Revert of Implement AudioReceiveStream::GetStats(). (patchset #19 id:360001 of https://codereview.webrtc.org/1390753002/ )
Reason for revert:
webrtc_perf_tests started failing on Win32 Release, Mac32 Release and Linux64 Release (all running large tests). These were not caught by try bots.
Original issue's description:
> Implement AudioReceiveStream::GetStats().
>
> R=tommi@webrtc.org
> TBR=hta@webrtc.org
> BUG=webrtc:4690
>
> Committed: a457752f4a
TBR=tommi@webrtc.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:4690
Review URL: https://codereview.webrtc.org/1411083006
Cr-Commit-Position: refs/heads/master@{#10340}
This commit is contained in:
parent
5a197dd617
commit
43e83d44f0
@ -40,16 +40,15 @@ FakeAudioReceiveStream::FakeAudioReceiveStream(
|
|||||||
RTC_DCHECK(config.voe_channel_id != -1);
|
RTC_DCHECK(config.voe_channel_id != -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
webrtc::AudioReceiveStream::Stats FakeAudioReceiveStream::GetStats() const {
|
||||||
|
return webrtc::AudioReceiveStream::Stats();
|
||||||
|
}
|
||||||
|
|
||||||
const webrtc::AudioReceiveStream::Config&
|
const webrtc::AudioReceiveStream::Config&
|
||||||
FakeAudioReceiveStream::GetConfig() const {
|
FakeAudioReceiveStream::GetConfig() const {
|
||||||
return config_;
|
return config_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FakeAudioReceiveStream::SetStats(
|
|
||||||
const webrtc::AudioReceiveStream::Stats& stats) {
|
|
||||||
stats_ = stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeAudioReceiveStream::IncrementReceivedPackets() {
|
void FakeAudioReceiveStream::IncrementReceivedPackets() {
|
||||||
received_packets_++;
|
received_packets_++;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -42,8 +42,11 @@ class FakeAudioReceiveStream : public webrtc::AudioReceiveStream {
|
|||||||
explicit FakeAudioReceiveStream(
|
explicit FakeAudioReceiveStream(
|
||||||
const webrtc::AudioReceiveStream::Config& config);
|
const webrtc::AudioReceiveStream::Config& config);
|
||||||
|
|
||||||
|
// webrtc::AudioReceiveStream implementation.
|
||||||
|
webrtc::AudioReceiveStream::Stats GetStats() const override;
|
||||||
|
|
||||||
const webrtc::AudioReceiveStream::Config& GetConfig() const;
|
const webrtc::AudioReceiveStream::Config& GetConfig() const;
|
||||||
void SetStats(const webrtc::AudioReceiveStream::Stats& stats);
|
|
||||||
int received_packets() const { return received_packets_; }
|
int received_packets() const { return received_packets_; }
|
||||||
void IncrementReceivedPackets();
|
void IncrementReceivedPackets();
|
||||||
|
|
||||||
@ -61,13 +64,7 @@ class FakeAudioReceiveStream : public webrtc::AudioReceiveStream {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// webrtc::AudioReceiveStream implementation.
|
|
||||||
webrtc::AudioReceiveStream::Stats GetStats() const override {
|
|
||||||
return stats_;
|
|
||||||
}
|
|
||||||
|
|
||||||
webrtc::AudioReceiveStream::Config config_;
|
webrtc::AudioReceiveStream::Config config_;
|
||||||
webrtc::AudioReceiveStream::Stats stats_;
|
|
||||||
int received_packets_;
|
int received_packets_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -65,6 +65,25 @@ static const int kOpusBandwidthWb = 8000;
|
|||||||
static const int kOpusBandwidthSwb = 12000;
|
static const int kOpusBandwidthSwb = 12000;
|
||||||
static const int kOpusBandwidthFb = 20000;
|
static const int kOpusBandwidthFb = 20000;
|
||||||
|
|
||||||
|
static const webrtc::NetworkStatistics kNetStats = {
|
||||||
|
1, // uint16_t currentBufferSize;
|
||||||
|
2, // uint16_t preferredBufferSize;
|
||||||
|
true, // bool jitterPeaksFound;
|
||||||
|
1234, // uint16_t currentPacketLossRate;
|
||||||
|
567, // uint16_t currentDiscardRate;
|
||||||
|
8901, // uint16_t currentExpandRate;
|
||||||
|
234, // uint16_t currentSpeechExpandRate;
|
||||||
|
5678, // uint16_t currentPreemptiveRate;
|
||||||
|
9012, // uint16_t currentAccelerateRate;
|
||||||
|
3456, // uint16_t currentSecondaryDecodedRate;
|
||||||
|
7890, // int32_t clockDriftPPM;
|
||||||
|
54, // meanWaitingTimeMs;
|
||||||
|
32, // int medianWaitingTimeMs;
|
||||||
|
1, // int minWaitingTimeMs;
|
||||||
|
98, // int maxWaitingTimeMs;
|
||||||
|
7654, // int addedSamples;
|
||||||
|
}; // These random but non-trivial numbers are used for testing.
|
||||||
|
|
||||||
#define WEBRTC_CHECK_CHANNEL(channel) \
|
#define WEBRTC_CHECK_CHANNEL(channel) \
|
||||||
if (channels_.find(channel) == channels_.end()) return -1;
|
if (channels_.find(channel) == channels_.end()) return -1;
|
||||||
|
|
||||||
@ -162,9 +181,9 @@ class FakeAudioProcessing : public webrtc::AudioProcessing {
|
|||||||
class FakeWebRtcVoiceEngine
|
class FakeWebRtcVoiceEngine
|
||||||
: public webrtc::VoEAudioProcessing,
|
: public webrtc::VoEAudioProcessing,
|
||||||
public webrtc::VoEBase, public webrtc::VoECodec, public webrtc::VoEDtmf,
|
public webrtc::VoEBase, public webrtc::VoECodec, public webrtc::VoEDtmf,
|
||||||
public webrtc::VoEHardware,
|
public webrtc::VoEHardware, public webrtc::VoENetEqStats,
|
||||||
public webrtc::VoENetwork, public webrtc::VoERTP_RTCP,
|
public webrtc::VoENetwork, public webrtc::VoERTP_RTCP,
|
||||||
public webrtc::VoEVolumeControl {
|
public webrtc::VoEVideoSync, public webrtc::VoEVolumeControl {
|
||||||
public:
|
public:
|
||||||
struct DtmfInfo {
|
struct DtmfInfo {
|
||||||
DtmfInfo()
|
DtmfInfo()
|
||||||
@ -508,7 +527,26 @@ class FakeWebRtcVoiceEngine
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
WEBRTC_STUB(SetBitRate, (int channel, int bitrate_bps));
|
WEBRTC_STUB(SetBitRate, (int channel, int bitrate_bps));
|
||||||
WEBRTC_STUB(GetRecCodec, (int channel, webrtc::CodecInst& codec));
|
WEBRTC_FUNC(GetRecCodec, (int channel, webrtc::CodecInst& codec)) {
|
||||||
|
WEBRTC_CHECK_CHANNEL(channel);
|
||||||
|
const Channel* c = channels_[channel];
|
||||||
|
for (std::list<std::string>::const_iterator it_packet = c->packets.begin();
|
||||||
|
it_packet != c->packets.end(); ++it_packet) {
|
||||||
|
int pltype;
|
||||||
|
if (!GetRtpPayloadType(it_packet->data(), it_packet->length(), &pltype)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for (std::vector<webrtc::CodecInst>::const_iterator it_codec =
|
||||||
|
c->recv_codecs.begin(); it_codec != c->recv_codecs.end();
|
||||||
|
++it_codec) {
|
||||||
|
if (it_codec->pltype == pltype) {
|
||||||
|
codec = *it_codec;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
WEBRTC_FUNC(SetRecPayloadType, (int channel,
|
WEBRTC_FUNC(SetRecPayloadType, (int channel,
|
||||||
const webrtc::CodecInst& codec)) {
|
const webrtc::CodecInst& codec)) {
|
||||||
WEBRTC_CHECK_CHANNEL(channel);
|
WEBRTC_CHECK_CHANNEL(channel);
|
||||||
@ -687,6 +725,20 @@ class FakeWebRtcVoiceEngine
|
|||||||
WEBRTC_STUB(EnableBuiltInNS, (bool enable));
|
WEBRTC_STUB(EnableBuiltInNS, (bool enable));
|
||||||
virtual bool BuiltInNSIsAvailable() const { return false; }
|
virtual bool BuiltInNSIsAvailable() const { return false; }
|
||||||
|
|
||||||
|
// webrtc::VoENetEqStats
|
||||||
|
WEBRTC_FUNC(GetNetworkStatistics, (int channel,
|
||||||
|
webrtc::NetworkStatistics& ns)) {
|
||||||
|
WEBRTC_CHECK_CHANNEL(channel);
|
||||||
|
memcpy(&ns, &kNetStats, sizeof(webrtc::NetworkStatistics));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
WEBRTC_FUNC_CONST(GetDecodingCallStatistics, (int channel,
|
||||||
|
webrtc::AudioDecodingCallStats*)) {
|
||||||
|
WEBRTC_CHECK_CHANNEL(channel);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// webrtc::VoENetwork
|
// webrtc::VoENetwork
|
||||||
WEBRTC_FUNC(RegisterExternalTransport, (int channel,
|
WEBRTC_FUNC(RegisterExternalTransport, (int channel,
|
||||||
webrtc::Transport& transport)) {
|
webrtc::Transport& transport)) {
|
||||||
@ -835,6 +887,18 @@ class FakeWebRtcVoiceEngine
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// webrtc::VoEVideoSync
|
||||||
|
WEBRTC_STUB(GetPlayoutBufferSize, (int& bufferMs));
|
||||||
|
WEBRTC_STUB(GetPlayoutTimestamp, (int channel, unsigned int& timestamp));
|
||||||
|
WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp**, webrtc::RtpReceiver**));
|
||||||
|
WEBRTC_STUB(SetInitTimestamp, (int channel, unsigned int timestamp));
|
||||||
|
WEBRTC_STUB(SetInitSequenceNumber, (int channel, short sequenceNumber));
|
||||||
|
WEBRTC_STUB(SetMinimumPlayoutDelay, (int channel, int delayMs));
|
||||||
|
WEBRTC_STUB(SetInitialPlayoutDelay, (int channel, int delay_ms));
|
||||||
|
WEBRTC_STUB(GetDelayEstimate, (int channel, int* jitter_buffer_delay_ms,
|
||||||
|
int* playout_buffer_delay_ms));
|
||||||
|
WEBRTC_STUB_CONST(GetLeastRequiredDelayMs, (int channel));
|
||||||
|
|
||||||
// webrtc::VoEVolumeControl
|
// webrtc::VoEVolumeControl
|
||||||
WEBRTC_STUB(SetSpeakerVolume, (unsigned int));
|
WEBRTC_STUB(SetSpeakerVolume, (unsigned int));
|
||||||
WEBRTC_STUB(GetSpeakerVolume, (unsigned int&));
|
WEBRTC_STUB(GetSpeakerVolume, (unsigned int&));
|
||||||
|
|||||||
@ -38,9 +38,13 @@
|
|||||||
#include "webrtc/voice_engine/include/voe_codec.h"
|
#include "webrtc/voice_engine/include/voe_codec.h"
|
||||||
#include "webrtc/voice_engine/include/voe_dtmf.h"
|
#include "webrtc/voice_engine/include/voe_dtmf.h"
|
||||||
#include "webrtc/voice_engine/include/voe_errors.h"
|
#include "webrtc/voice_engine/include/voe_errors.h"
|
||||||
|
#include "webrtc/voice_engine/include/voe_external_media.h"
|
||||||
|
#include "webrtc/voice_engine/include/voe_file.h"
|
||||||
#include "webrtc/voice_engine/include/voe_hardware.h"
|
#include "webrtc/voice_engine/include/voe_hardware.h"
|
||||||
|
#include "webrtc/voice_engine/include/voe_neteq_stats.h"
|
||||||
#include "webrtc/voice_engine/include/voe_network.h"
|
#include "webrtc/voice_engine/include/voe_network.h"
|
||||||
#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
|
#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
|
||||||
|
#include "webrtc/voice_engine/include/voe_video_sync.h"
|
||||||
#include "webrtc/voice_engine/include/voe_volume_control.h"
|
#include "webrtc/voice_engine/include/voe_volume_control.h"
|
||||||
|
|
||||||
namespace cricket {
|
namespace cricket {
|
||||||
@ -92,16 +96,18 @@ class VoEWrapper {
|
|||||||
VoEWrapper()
|
VoEWrapper()
|
||||||
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
|
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
|
||||||
base_(engine_), codec_(engine_), dtmf_(engine_),
|
base_(engine_), codec_(engine_), dtmf_(engine_),
|
||||||
hw_(engine_), network_(engine_),
|
hw_(engine_), neteq_(engine_), network_(engine_),
|
||||||
rtp_(engine_), volume_(engine_) {
|
rtp_(engine_), sync_(engine_), volume_(engine_) {
|
||||||
}
|
}
|
||||||
VoEWrapper(webrtc::VoEAudioProcessing* processing,
|
VoEWrapper(webrtc::VoEAudioProcessing* processing,
|
||||||
webrtc::VoEBase* base,
|
webrtc::VoEBase* base,
|
||||||
webrtc::VoECodec* codec,
|
webrtc::VoECodec* codec,
|
||||||
webrtc::VoEDtmf* dtmf,
|
webrtc::VoEDtmf* dtmf,
|
||||||
webrtc::VoEHardware* hw,
|
webrtc::VoEHardware* hw,
|
||||||
|
webrtc::VoENetEqStats* neteq,
|
||||||
webrtc::VoENetwork* network,
|
webrtc::VoENetwork* network,
|
||||||
webrtc::VoERTP_RTCP* rtp,
|
webrtc::VoERTP_RTCP* rtp,
|
||||||
|
webrtc::VoEVideoSync* sync,
|
||||||
webrtc::VoEVolumeControl* volume)
|
webrtc::VoEVolumeControl* volume)
|
||||||
: engine_(NULL),
|
: engine_(NULL),
|
||||||
processing_(processing),
|
processing_(processing),
|
||||||
@ -109,8 +115,10 @@ class VoEWrapper {
|
|||||||
codec_(codec),
|
codec_(codec),
|
||||||
dtmf_(dtmf),
|
dtmf_(dtmf),
|
||||||
hw_(hw),
|
hw_(hw),
|
||||||
|
neteq_(neteq),
|
||||||
network_(network),
|
network_(network),
|
||||||
rtp_(rtp),
|
rtp_(rtp),
|
||||||
|
sync_(sync),
|
||||||
volume_(volume) {
|
volume_(volume) {
|
||||||
}
|
}
|
||||||
~VoEWrapper() {}
|
~VoEWrapper() {}
|
||||||
@ -120,8 +128,10 @@ class VoEWrapper {
|
|||||||
webrtc::VoECodec* codec() const { return codec_.get(); }
|
webrtc::VoECodec* codec() const { return codec_.get(); }
|
||||||
webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
|
webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
|
||||||
webrtc::VoEHardware* hw() const { return hw_.get(); }
|
webrtc::VoEHardware* hw() const { return hw_.get(); }
|
||||||
|
webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
|
||||||
webrtc::VoENetwork* network() const { return network_.get(); }
|
webrtc::VoENetwork* network() const { return network_.get(); }
|
||||||
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
|
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
|
||||||
|
webrtc::VoEVideoSync* sync() const { return sync_.get(); }
|
||||||
webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
|
webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
|
||||||
int error() { return base_->LastError(); }
|
int error() { return base_->LastError(); }
|
||||||
|
|
||||||
@ -132,8 +142,10 @@ class VoEWrapper {
|
|||||||
scoped_voe_ptr<webrtc::VoECodec> codec_;
|
scoped_voe_ptr<webrtc::VoECodec> codec_;
|
||||||
scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
|
scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
|
||||||
scoped_voe_ptr<webrtc::VoEHardware> hw_;
|
scoped_voe_ptr<webrtc::VoEHardware> hw_;
|
||||||
|
scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
|
||||||
scoped_voe_ptr<webrtc::VoENetwork> network_;
|
scoped_voe_ptr<webrtc::VoENetwork> network_;
|
||||||
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
|
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
|
||||||
|
scoped_voe_ptr<webrtc::VoEVideoSync> sync_;
|
||||||
scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
|
scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -2694,6 +2694,11 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
webrtc::CallStatistics cs;
|
||||||
|
unsigned int ssrc;
|
||||||
|
webrtc::CodecInst codec;
|
||||||
|
unsigned int level;
|
||||||
|
|
||||||
for (const auto& ch : send_channels_) {
|
for (const auto& ch : send_channels_) {
|
||||||
const int channel = ch.second->channel();
|
const int channel = ch.second->channel();
|
||||||
|
|
||||||
@ -2701,8 +2706,6 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
|
|||||||
// remote side told us it got from its RTCP report.
|
// remote side told us it got from its RTCP report.
|
||||||
VoiceSenderInfo sinfo;
|
VoiceSenderInfo sinfo;
|
||||||
|
|
||||||
webrtc::CallStatistics cs = {0};
|
|
||||||
unsigned int ssrc = 0;
|
|
||||||
if (engine()->voe()->rtp()->GetRTCPStatistics(channel, cs) == -1 ||
|
if (engine()->voe()->rtp()->GetRTCPStatistics(channel, cs) == -1 ||
|
||||||
engine()->voe()->rtp()->GetLocalSSRC(channel, ssrc) == -1) {
|
engine()->voe()->rtp()->GetLocalSSRC(channel, ssrc) == -1) {
|
||||||
continue;
|
continue;
|
||||||
@ -2723,7 +2726,6 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
|
|||||||
sinfo.packets_lost = -1;
|
sinfo.packets_lost = -1;
|
||||||
sinfo.ext_seqnum = -1;
|
sinfo.ext_seqnum = -1;
|
||||||
std::vector<webrtc::ReportBlock> receive_blocks;
|
std::vector<webrtc::ReportBlock> receive_blocks;
|
||||||
webrtc::CodecInst codec = {0};
|
|
||||||
if (engine()->voe()->rtp()->GetRemoteRTCPReportBlocks(
|
if (engine()->voe()->rtp()->GetRemoteRTCPReportBlocks(
|
||||||
channel, &receive_blocks) != -1 &&
|
channel, &receive_blocks) != -1 &&
|
||||||
engine()->voe()->codec()->GetSendCodec(channel, codec) != -1) {
|
engine()->voe()->codec()->GetSendCodec(channel, codec) != -1) {
|
||||||
@ -2744,7 +2746,6 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Local speech level.
|
// Local speech level.
|
||||||
unsigned int level = 0;
|
|
||||||
sinfo.audio_level = (engine()->voe()->volume()->
|
sinfo.audio_level = (engine()->voe()->volume()->
|
||||||
GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
|
GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
|
||||||
|
|
||||||
@ -2765,36 +2766,76 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the SSRC and stats for each receiver.
|
// Get the SSRC and stats for each receiver.
|
||||||
info->receivers.clear();
|
for (const auto& ch : receive_channels_) {
|
||||||
for (const auto& stream : receive_streams_) {
|
int ch_id = ch.second->channel();
|
||||||
webrtc::AudioReceiveStream::Stats stats = stream.second->GetStats();
|
memset(&cs, 0, sizeof(cs));
|
||||||
VoiceReceiverInfo rinfo;
|
if (engine()->voe()->rtp()->GetRemoteSSRC(ch_id, ssrc) != -1 &&
|
||||||
rinfo.add_ssrc(stats.remote_ssrc);
|
engine()->voe()->rtp()->GetRTCPStatistics(ch_id, cs) != -1 &&
|
||||||
rinfo.bytes_rcvd = stats.bytes_rcvd;
|
engine()->voe()->codec()->GetRecCodec(ch_id, codec) != -1) {
|
||||||
rinfo.packets_rcvd = stats.packets_rcvd;
|
VoiceReceiverInfo rinfo;
|
||||||
rinfo.packets_lost = stats.packets_lost;
|
rinfo.add_ssrc(ssrc);
|
||||||
rinfo.fraction_lost = stats.fraction_lost;
|
rinfo.bytes_rcvd = cs.bytesReceived;
|
||||||
rinfo.codec_name = stats.codec_name;
|
rinfo.packets_rcvd = cs.packetsReceived;
|
||||||
rinfo.ext_seqnum = stats.ext_seqnum;
|
// The next four fields are from the most recently sent RTCP report.
|
||||||
rinfo.jitter_ms = stats.jitter_ms;
|
// Convert Q8 to floating point.
|
||||||
rinfo.jitter_buffer_ms = stats.jitter_buffer_ms;
|
rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
|
||||||
rinfo.jitter_buffer_preferred_ms = stats.jitter_buffer_preferred_ms;
|
rinfo.packets_lost = cs.cumulativeLost;
|
||||||
rinfo.delay_estimate_ms = stats.delay_estimate_ms;
|
rinfo.ext_seqnum = cs.extendedMax;
|
||||||
rinfo.audio_level = stats.audio_level;
|
rinfo.capture_start_ntp_time_ms = cs.capture_start_ntp_time_ms_;
|
||||||
rinfo.expand_rate = stats.expand_rate;
|
if (codec.pltype != -1) {
|
||||||
rinfo.speech_expand_rate = stats.speech_expand_rate;
|
rinfo.codec_name = codec.plname;
|
||||||
rinfo.secondary_decoded_rate = stats.secondary_decoded_rate;
|
}
|
||||||
rinfo.accelerate_rate = stats.accelerate_rate;
|
// Convert samples to milliseconds.
|
||||||
rinfo.preemptive_expand_rate = stats.preemptive_expand_rate;
|
if (codec.plfreq / 1000 > 0) {
|
||||||
rinfo.decoding_calls_to_silence_generator =
|
rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
|
||||||
stats.decoding_calls_to_silence_generator;
|
}
|
||||||
rinfo.decoding_calls_to_neteq = stats.decoding_calls_to_neteq;
|
|
||||||
rinfo.decoding_normal = stats.decoding_normal;
|
// Get jitter buffer and total delay (alg + jitter + playout) stats.
|
||||||
rinfo.decoding_plc = stats.decoding_plc;
|
webrtc::NetworkStatistics ns;
|
||||||
rinfo.decoding_cng = stats.decoding_cng;
|
if (engine()->voe()->neteq() &&
|
||||||
rinfo.decoding_plc_cng = stats.decoding_plc_cng;
|
engine()->voe()->neteq()->GetNetworkStatistics(
|
||||||
rinfo.capture_start_ntp_time_ms = stats.capture_start_ntp_time_ms;
|
ch_id, ns) != -1) {
|
||||||
info->receivers.push_back(rinfo);
|
rinfo.jitter_buffer_ms = ns.currentBufferSize;
|
||||||
|
rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
|
||||||
|
rinfo.expand_rate =
|
||||||
|
static_cast<float>(ns.currentExpandRate) / (1 << 14);
|
||||||
|
rinfo.speech_expand_rate =
|
||||||
|
static_cast<float>(ns.currentSpeechExpandRate) / (1 << 14);
|
||||||
|
rinfo.secondary_decoded_rate =
|
||||||
|
static_cast<float>(ns.currentSecondaryDecodedRate) / (1 << 14);
|
||||||
|
rinfo.accelerate_rate =
|
||||||
|
static_cast<float>(ns.currentAccelerateRate) / (1 << 14);
|
||||||
|
rinfo.preemptive_expand_rate =
|
||||||
|
static_cast<float>(ns.currentPreemptiveRate) / (1 << 14);
|
||||||
|
}
|
||||||
|
|
||||||
|
webrtc::AudioDecodingCallStats ds;
|
||||||
|
if (engine()->voe()->neteq() &&
|
||||||
|
engine()->voe()->neteq()->GetDecodingCallStatistics(
|
||||||
|
ch_id, &ds) != -1) {
|
||||||
|
rinfo.decoding_calls_to_silence_generator =
|
||||||
|
ds.calls_to_silence_generator;
|
||||||
|
rinfo.decoding_calls_to_neteq = ds.calls_to_neteq;
|
||||||
|
rinfo.decoding_normal = ds.decoded_normal;
|
||||||
|
rinfo.decoding_plc = ds.decoded_plc;
|
||||||
|
rinfo.decoding_cng = ds.decoded_cng;
|
||||||
|
rinfo.decoding_plc_cng = ds.decoded_plc_cng;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (engine()->voe()->sync()) {
|
||||||
|
int jitter_buffer_delay_ms = 0;
|
||||||
|
int playout_buffer_delay_ms = 0;
|
||||||
|
engine()->voe()->sync()->GetDelayEstimate(
|
||||||
|
ch_id, &jitter_buffer_delay_ms, &playout_buffer_delay_ms);
|
||||||
|
rinfo.delay_estimate_ms = jitter_buffer_delay_ms +
|
||||||
|
playout_buffer_delay_ms;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get speech level.
|
||||||
|
rinfo.audio_level = (engine()->voe()->volume()->
|
||||||
|
GetSpeechOutputLevelFullRange(ch_id, level) != -1) ? level : -1;
|
||||||
|
info->receivers.push_back(rinfo);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@ -38,27 +38,27 @@
|
|||||||
#include "webrtc/p2p/base/faketransportcontroller.h"
|
#include "webrtc/p2p/base/faketransportcontroller.h"
|
||||||
#include "talk/session/media/channel.h"
|
#include "talk/session/media/channel.h"
|
||||||
|
|
||||||
|
// Tests for the WebRtcVoiceEngine/VoiceChannel code.
|
||||||
|
|
||||||
using cricket::kRtpAudioLevelHeaderExtension;
|
using cricket::kRtpAudioLevelHeaderExtension;
|
||||||
using cricket::kRtpAbsoluteSenderTimeHeaderExtension;
|
using cricket::kRtpAbsoluteSenderTimeHeaderExtension;
|
||||||
|
|
||||||
namespace {
|
static const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1, 0);
|
||||||
|
static const cricket::AudioCodec kIsacCodec(103, "ISAC", 16000, 32000, 1, 0);
|
||||||
const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1, 0);
|
static const cricket::AudioCodec kOpusCodec(111, "opus", 48000, 64000, 2, 0);
|
||||||
const cricket::AudioCodec kIsacCodec(103, "ISAC", 16000, 32000, 1, 0);
|
static const cricket::AudioCodec kG722CodecVoE(9, "G722", 16000, 64000, 1, 0);
|
||||||
const cricket::AudioCodec kOpusCodec(111, "opus", 48000, 64000, 2, 0);
|
static const cricket::AudioCodec kG722CodecSdp(9, "G722", 8000, 64000, 1, 0);
|
||||||
const cricket::AudioCodec kG722CodecVoE(9, "G722", 16000, 64000, 1, 0);
|
static const cricket::AudioCodec kRedCodec(117, "red", 8000, 0, 1, 0);
|
||||||
const cricket::AudioCodec kG722CodecSdp(9, "G722", 8000, 64000, 1, 0);
|
static const cricket::AudioCodec kCn8000Codec(13, "CN", 8000, 0, 1, 0);
|
||||||
const cricket::AudioCodec kRedCodec(117, "red", 8000, 0, 1, 0);
|
static const cricket::AudioCodec kCn16000Codec(105, "CN", 16000, 0, 1, 0);
|
||||||
const cricket::AudioCodec kCn8000Codec(13, "CN", 8000, 0, 1, 0);
|
static const cricket::AudioCodec
|
||||||
const cricket::AudioCodec kCn16000Codec(105, "CN", 16000, 0, 1, 0);
|
kTelephoneEventCodec(106, "telephone-event", 8000, 0, 1, 0);
|
||||||
const cricket::AudioCodec kTelephoneEventCodec(106, "telephone-event", 8000, 0,
|
static const cricket::AudioCodec* const kAudioCodecs[] = {
|
||||||
1, 0);
|
|
||||||
const cricket::AudioCodec* const kAudioCodecs[] = {
|
|
||||||
&kPcmuCodec, &kIsacCodec, &kOpusCodec, &kG722CodecVoE, &kRedCodec,
|
&kPcmuCodec, &kIsacCodec, &kOpusCodec, &kG722CodecVoE, &kRedCodec,
|
||||||
&kCn8000Codec, &kCn16000Codec, &kTelephoneEventCodec,
|
&kCn8000Codec, &kCn16000Codec, &kTelephoneEventCodec,
|
||||||
};
|
};
|
||||||
const uint32_t kSsrc1 = 0x99;
|
static uint32_t kSsrc1 = 0x99;
|
||||||
const uint32_t kSsrc2 = 0x98;
|
static uint32_t kSsrc2 = 0x98;
|
||||||
|
|
||||||
class FakeVoEWrapper : public cricket::VoEWrapper {
|
class FakeVoEWrapper : public cricket::VoEWrapper {
|
||||||
public:
|
public:
|
||||||
@ -68,8 +68,10 @@ class FakeVoEWrapper : public cricket::VoEWrapper {
|
|||||||
engine, // codec
|
engine, // codec
|
||||||
engine, // dtmf
|
engine, // dtmf
|
||||||
engine, // hw
|
engine, // hw
|
||||||
|
engine, // neteq
|
||||||
engine, // network
|
engine, // network
|
||||||
engine, // rtp
|
engine, // rtp
|
||||||
|
engine, // sync
|
||||||
engine) { // volume
|
engine) { // volume
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -84,7 +86,6 @@ class FakeVoETraceWrapper : public cricket::VoETraceWrapper {
|
|||||||
int SetTraceCallback(webrtc::TraceCallback* callback) override { return 0; }
|
int SetTraceCallback(webrtc::TraceCallback* callback) override { return 0; }
|
||||||
unsigned int filter_;
|
unsigned int filter_;
|
||||||
};
|
};
|
||||||
} // namespace
|
|
||||||
|
|
||||||
class WebRtcVoiceEngineTestFake : public testing::Test {
|
class WebRtcVoiceEngineTestFake : public testing::Test {
|
||||||
public:
|
public:
|
||||||
@ -292,71 +293,6 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
|
|||||||
EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(new_channel_num, ext));
|
EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(new_channel_num, ext));
|
||||||
}
|
}
|
||||||
|
|
||||||
const webrtc::AudioReceiveStream::Stats& GetAudioReceiveStreamStats() const {
|
|
||||||
static webrtc::AudioReceiveStream::Stats stats;
|
|
||||||
if (stats.remote_ssrc == 0) {
|
|
||||||
stats.remote_ssrc = 123;
|
|
||||||
stats.bytes_rcvd = 456;
|
|
||||||
stats.packets_rcvd = 768;
|
|
||||||
stats.packets_lost = 101;
|
|
||||||
stats.fraction_lost = 23.45f;
|
|
||||||
stats.codec_name = "codec_name";
|
|
||||||
stats.ext_seqnum = 678;
|
|
||||||
stats.jitter_ms = 901;
|
|
||||||
stats.jitter_buffer_ms = 234;
|
|
||||||
stats.jitter_buffer_preferred_ms = 567;
|
|
||||||
stats.delay_estimate_ms = 890;
|
|
||||||
stats.audio_level = 1234;
|
|
||||||
stats.expand_rate = 5.67f;
|
|
||||||
stats.speech_expand_rate = 8.90f;
|
|
||||||
stats.secondary_decoded_rate = 1.23f;
|
|
||||||
stats.accelerate_rate = 4.56f;
|
|
||||||
stats.preemptive_expand_rate = 7.89f;
|
|
||||||
stats.decoding_calls_to_silence_generator = 012;
|
|
||||||
stats.decoding_calls_to_neteq = 345;
|
|
||||||
stats.decoding_normal = 67890;
|
|
||||||
stats.decoding_plc = 1234;
|
|
||||||
stats.decoding_cng = 5678;
|
|
||||||
stats.decoding_plc_cng = 9012;
|
|
||||||
stats.capture_start_ntp_time_ms = 3456;
|
|
||||||
}
|
|
||||||
return stats;
|
|
||||||
}
|
|
||||||
void SetAudioReceiveStreamStats() {
|
|
||||||
for (auto* s : call_.GetAudioReceiveStreams()) {
|
|
||||||
s->SetStats(GetAudioReceiveStreamStats());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void VerifyVoiceReceiverInfo(const cricket::VoiceReceiverInfo& info) {
|
|
||||||
const auto& kStats = GetAudioReceiveStreamStats();
|
|
||||||
EXPECT_EQ(info.local_stats.front().ssrc, kStats.remote_ssrc);
|
|
||||||
EXPECT_EQ(info.bytes_rcvd, kStats.bytes_rcvd);
|
|
||||||
EXPECT_EQ(info.packets_rcvd, kStats.packets_rcvd);
|
|
||||||
EXPECT_EQ(info.packets_lost, kStats.packets_lost);
|
|
||||||
EXPECT_EQ(info.fraction_lost, kStats.fraction_lost);
|
|
||||||
EXPECT_EQ(info.codec_name, kStats.codec_name);
|
|
||||||
EXPECT_EQ(info.ext_seqnum, kStats.ext_seqnum);
|
|
||||||
EXPECT_EQ(info.jitter_ms, kStats.jitter_ms);
|
|
||||||
EXPECT_EQ(info.jitter_buffer_ms, kStats.jitter_buffer_ms);
|
|
||||||
EXPECT_EQ(info.jitter_buffer_preferred_ms,
|
|
||||||
kStats.jitter_buffer_preferred_ms);
|
|
||||||
EXPECT_EQ(info.delay_estimate_ms, kStats.delay_estimate_ms);
|
|
||||||
EXPECT_EQ(info.audio_level, kStats.audio_level);
|
|
||||||
EXPECT_EQ(info.expand_rate, kStats.expand_rate);
|
|
||||||
EXPECT_EQ(info.speech_expand_rate, kStats.speech_expand_rate);
|
|
||||||
EXPECT_EQ(info.secondary_decoded_rate, kStats.secondary_decoded_rate);
|
|
||||||
EXPECT_EQ(info.accelerate_rate, kStats.accelerate_rate);
|
|
||||||
EXPECT_EQ(info.preemptive_expand_rate, kStats.preemptive_expand_rate);
|
|
||||||
EXPECT_EQ(info.decoding_calls_to_silence_generator,
|
|
||||||
kStats.decoding_calls_to_silence_generator);
|
|
||||||
EXPECT_EQ(info.decoding_calls_to_neteq, kStats.decoding_calls_to_neteq);
|
|
||||||
EXPECT_EQ(info.decoding_normal, kStats.decoding_normal);
|
|
||||||
EXPECT_EQ(info.decoding_plc, kStats.decoding_plc);
|
|
||||||
EXPECT_EQ(info.decoding_cng, kStats.decoding_cng);
|
|
||||||
EXPECT_EQ(info.decoding_plc_cng, kStats.decoding_plc_cng);
|
|
||||||
EXPECT_EQ(info.capture_start_ntp_time_ms, kStats.capture_start_ntp_time_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
cricket::FakeCall call_;
|
cricket::FakeCall call_;
|
||||||
cricket::FakeWebRtcVoiceEngine voe_;
|
cricket::FakeWebRtcVoiceEngine voe_;
|
||||||
@ -2072,23 +2008,38 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
|
|||||||
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].jitter_ms);
|
EXPECT_EQ(cricket::kIntStatValue, info.senders[i].jitter_ms);
|
||||||
EXPECT_EQ(kPcmuCodec.name, info.senders[i].codec_name);
|
EXPECT_EQ(kPcmuCodec.name, info.senders[i].codec_name);
|
||||||
}
|
}
|
||||||
|
EXPECT_EQ(0u, info.receivers.size());
|
||||||
|
|
||||||
// We have added one receive stream. We should see empty stats.
|
// Registered stream's remote SSRC is kSsrc2. Send a packet with SSRC=1.
|
||||||
EXPECT_EQ(info.receivers.size(), 1u);
|
// We should drop the packet and no stats should be available.
|
||||||
EXPECT_EQ(info.receivers[0].local_stats.front().ssrc, 0);
|
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
|
||||||
|
|
||||||
// Remove the kSsrc2 stream. No receiver stats.
|
|
||||||
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
|
|
||||||
EXPECT_EQ(true, channel_->GetStats(&info));
|
EXPECT_EQ(true, channel_->GetStats(&info));
|
||||||
EXPECT_EQ(0u, info.receivers.size());
|
EXPECT_EQ(0u, info.receivers.size());
|
||||||
|
|
||||||
// Deliver a new packet - a default receive stream should be created and we
|
// Remove the kSsrc2 stream and deliver a new packet - a default receive
|
||||||
// should see stats again.
|
// stream should be created and we should see stats.
|
||||||
|
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
|
||||||
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
|
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
|
||||||
SetAudioReceiveStreamStats();
|
|
||||||
EXPECT_EQ(true, channel_->GetStats(&info));
|
EXPECT_EQ(true, channel_->GetStats(&info));
|
||||||
EXPECT_EQ(1u, info.receivers.size());
|
EXPECT_EQ(1u, info.receivers.size());
|
||||||
VerifyVoiceReceiverInfo(info.receivers[0]);
|
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].bytes_rcvd);
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].packets_rcvd);
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].packets_lost);
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].ext_seqnum);
|
||||||
|
EXPECT_EQ(kPcmuCodec.name, info.receivers[0].codec_name);
|
||||||
|
EXPECT_EQ(static_cast<float>(cricket::kNetStats.currentExpandRate) /
|
||||||
|
(1 << 14), info.receivers[0].expand_rate);
|
||||||
|
EXPECT_EQ(static_cast<float>(cricket::kNetStats.currentSpeechExpandRate) /
|
||||||
|
(1 << 14), info.receivers[0].speech_expand_rate);
|
||||||
|
EXPECT_EQ(static_cast<float>(cricket::kNetStats.currentSecondaryDecodedRate) /
|
||||||
|
(1 << 14), info.receivers[0].secondary_decoded_rate);
|
||||||
|
EXPECT_EQ(
|
||||||
|
static_cast<float>(cricket::kNetStats.currentAccelerateRate) / (1 << 14),
|
||||||
|
info.receivers[0].accelerate_rate);
|
||||||
|
EXPECT_EQ(
|
||||||
|
static_cast<float>(cricket::kNetStats.currentPreemptiveRate) / (1 << 14),
|
||||||
|
info.receivers[0].preemptive_expand_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we can add and remove receive streams, and do proper send/playout.
|
// Test that we can add and remove receive streams, and do proper send/playout.
|
||||||
@ -2349,22 +2300,33 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
|
|||||||
// EXPECT_EQ(cricket::kIntStatValue, info.senders[0].echo_return_loss);
|
// EXPECT_EQ(cricket::kIntStatValue, info.senders[0].echo_return_loss);
|
||||||
// EXPECT_EQ(cricket::kIntStatValue,
|
// EXPECT_EQ(cricket::kIntStatValue,
|
||||||
// info.senders[0].echo_return_loss_enhancement);
|
// info.senders[0].echo_return_loss_enhancement);
|
||||||
// We have added one receive stream. We should see empty stats.
|
EXPECT_EQ(0u, info.receivers.size());
|
||||||
EXPECT_EQ(info.receivers.size(), 1u);
|
|
||||||
EXPECT_EQ(info.receivers[0].local_stats.front().ssrc, 0);
|
|
||||||
|
|
||||||
// Remove the kSsrc2 stream. No receiver stats.
|
// Registered stream's remote SSRC is kSsrc2. Send a packet with SSRC=1.
|
||||||
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
|
// We should drop the packet and no stats should be available.
|
||||||
|
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
|
||||||
EXPECT_EQ(true, channel_->GetStats(&info));
|
EXPECT_EQ(true, channel_->GetStats(&info));
|
||||||
EXPECT_EQ(0u, info.receivers.size());
|
EXPECT_EQ(0u, info.receivers.size());
|
||||||
|
|
||||||
// Deliver a new packet - a default receive stream should be created and we
|
// Remove the kSsrc2 stream and deliver a new packet - a default receive
|
||||||
// should see stats again.
|
// stream should be created and we should see stats.
|
||||||
|
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
|
||||||
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
|
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
|
||||||
SetAudioReceiveStreamStats();
|
|
||||||
EXPECT_EQ(true, channel_->GetStats(&info));
|
EXPECT_EQ(true, channel_->GetStats(&info));
|
||||||
EXPECT_EQ(1u, info.receivers.size());
|
EXPECT_EQ(1u, info.receivers.size());
|
||||||
VerifyVoiceReceiverInfo(info.receivers[0]);
|
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].bytes_rcvd);
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].packets_rcvd);
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].packets_lost);
|
||||||
|
EXPECT_EQ(cricket::kIntStatValue, info.receivers[0].ext_seqnum);
|
||||||
|
EXPECT_EQ(kPcmuCodec.name, info.receivers[0].codec_name);
|
||||||
|
EXPECT_EQ(static_cast<float>(cricket::kNetStats.currentExpandRate) /
|
||||||
|
(1 << 14), info.receivers[0].expand_rate);
|
||||||
|
EXPECT_EQ(static_cast<float>(cricket::kNetStats.currentSpeechExpandRate) /
|
||||||
|
(1 << 14), info.receivers[0].speech_expand_rate);
|
||||||
|
EXPECT_EQ(static_cast<float>(cricket::kNetStats.currentSecondaryDecodedRate) /
|
||||||
|
(1 << 14), info.receivers[0].secondary_decoded_rate);
|
||||||
|
// TODO(sriniv): Add testing for more receiver fields.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we can set the outgoing SSRC properly with multiple streams.
|
// Test that we can set the outgoing SSRC properly with multiple streams.
|
||||||
|
|||||||
@ -14,8 +14,6 @@ source_set("audio") {
|
|||||||
"audio_receive_stream.h",
|
"audio_receive_stream.h",
|
||||||
"audio_send_stream.cc",
|
"audio_send_stream.cc",
|
||||||
"audio_send_stream.h",
|
"audio_send_stream.h",
|
||||||
"conversion.h",
|
|
||||||
"scoped_voe_interface.h",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
configs += [ "..:common_config" ]
|
configs += [ "..:common_config" ]
|
||||||
|
|||||||
@ -12,17 +12,10 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "webrtc/audio/conversion.h"
|
|
||||||
#include "webrtc/base/checks.h"
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/base/logging.h"
|
#include "webrtc/base/logging.h"
|
||||||
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
|
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
|
||||||
#include "webrtc/system_wrappers/interface/tick_util.h"
|
#include "webrtc/system_wrappers/interface/tick_util.h"
|
||||||
#include "webrtc/voice_engine/include/voe_base.h"
|
|
||||||
#include "webrtc/voice_engine/include/voe_codec.h"
|
|
||||||
#include "webrtc/voice_engine/include/voe_neteq_stats.h"
|
|
||||||
#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
|
|
||||||
#include "webrtc/voice_engine/include/voe_video_sync.h"
|
|
||||||
#include "webrtc/voice_engine/include/voe_volume_control.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
std::string AudioReceiveStream::Config::Rtp::ToString() const {
|
std::string AudioReceiveStream::Config::Rtp::ToString() const {
|
||||||
@ -31,9 +24,8 @@ std::string AudioReceiveStream::Config::Rtp::ToString() const {
|
|||||||
ss << ", extensions: [";
|
ss << ", extensions: [";
|
||||||
for (size_t i = 0; i < extensions.size(); ++i) {
|
for (size_t i = 0; i < extensions.size(); ++i) {
|
||||||
ss << extensions[i].ToString();
|
ss << extensions[i].ToString();
|
||||||
if (i != extensions.size() - 1) {
|
if (i != extensions.size() - 1)
|
||||||
ss << ", ";
|
ss << ", ";
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ss << ']';
|
ss << ']';
|
||||||
ss << '}';
|
ss << '}';
|
||||||
@ -44,9 +36,8 @@ std::string AudioReceiveStream::Config::ToString() const {
|
|||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << "{rtp: " << rtp.ToString();
|
ss << "{rtp: " << rtp.ToString();
|
||||||
ss << ", voe_channel_id: " << voe_channel_id;
|
ss << ", voe_channel_id: " << voe_channel_id;
|
||||||
if (!sync_group.empty()) {
|
if (!sync_group.empty())
|
||||||
ss << ", sync_group: " << sync_group;
|
ss << ", sync_group: " << sync_group;
|
||||||
}
|
|
||||||
ss << '}';
|
ss << '}';
|
||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
@ -54,18 +45,13 @@ std::string AudioReceiveStream::Config::ToString() const {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
AudioReceiveStream::AudioReceiveStream(
|
AudioReceiveStream::AudioReceiveStream(
|
||||||
RemoteBitrateEstimator* remote_bitrate_estimator,
|
RemoteBitrateEstimator* remote_bitrate_estimator,
|
||||||
const webrtc::AudioReceiveStream::Config& config,
|
const webrtc::AudioReceiveStream::Config& config)
|
||||||
VoiceEngine* voice_engine)
|
|
||||||
: remote_bitrate_estimator_(remote_bitrate_estimator),
|
: remote_bitrate_estimator_(remote_bitrate_estimator),
|
||||||
config_(config),
|
config_(config),
|
||||||
voice_engine_(voice_engine),
|
|
||||||
voe_base_(voice_engine),
|
|
||||||
rtp_header_parser_(RtpHeaderParser::Create()) {
|
rtp_header_parser_(RtpHeaderParser::Create()) {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
||||||
LOG(LS_INFO) << "AudioReceiveStream: " << config_.ToString();
|
LOG(LS_INFO) << "AudioReceiveStream: " << config_.ToString();
|
||||||
RTC_DCHECK(config.voe_channel_id != -1);
|
RTC_DCHECK(config.voe_channel_id != -1);
|
||||||
RTC_DCHECK(remote_bitrate_estimator_ != nullptr);
|
RTC_DCHECK(remote_bitrate_estimator_ != nullptr);
|
||||||
RTC_DCHECK(voice_engine_ != nullptr);
|
|
||||||
RTC_DCHECK(rtp_header_parser_ != nullptr);
|
RTC_DCHECK(rtp_header_parser_ != nullptr);
|
||||||
for (const auto& ext : config.rtp.extensions) {
|
for (const auto& ext : config.rtp.extensions) {
|
||||||
// One-byte-extension local identifiers are in the range 1-14 inclusive.
|
// One-byte-extension local identifiers are in the range 1-14 inclusive.
|
||||||
@ -87,117 +73,33 @@ AudioReceiveStream::AudioReceiveStream(
|
|||||||
}
|
}
|
||||||
|
|
||||||
AudioReceiveStream::~AudioReceiveStream() {
|
AudioReceiveStream::~AudioReceiveStream() {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
||||||
LOG(LS_INFO) << "~AudioReceiveStream: " << config_.ToString();
|
LOG(LS_INFO) << "~AudioReceiveStream: " << config_.ToString();
|
||||||
}
|
}
|
||||||
|
|
||||||
webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const {
|
webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
return webrtc::AudioReceiveStream::Stats();
|
||||||
webrtc::AudioReceiveStream::Stats stats;
|
|
||||||
stats.remote_ssrc = config_.rtp.remote_ssrc;
|
|
||||||
ScopedVoEInterface<VoECodec> codec(voice_engine_);
|
|
||||||
ScopedVoEInterface<VoENetEqStats> neteq(voice_engine_);
|
|
||||||
ScopedVoEInterface<VoERTP_RTCP> rtp(voice_engine_);
|
|
||||||
ScopedVoEInterface<VoEVideoSync> sync(voice_engine_);
|
|
||||||
ScopedVoEInterface<VoEVolumeControl> volume(voice_engine_);
|
|
||||||
unsigned int ssrc = 0;
|
|
||||||
webrtc::CallStatistics cs = {0};
|
|
||||||
webrtc::CodecInst ci = {0};
|
|
||||||
// Only collect stats if we have seen some traffic with the SSRC.
|
|
||||||
if (rtp->GetRemoteSSRC(config_.voe_channel_id, ssrc) == -1 ||
|
|
||||||
rtp->GetRTCPStatistics(config_.voe_channel_id, cs) == -1 ||
|
|
||||||
codec->GetRecCodec(config_.voe_channel_id, ci) == -1) {
|
|
||||||
return stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.bytes_rcvd = cs.bytesReceived;
|
|
||||||
stats.packets_rcvd = cs.packetsReceived;
|
|
||||||
stats.packets_lost = cs.cumulativeLost;
|
|
||||||
stats.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
|
|
||||||
if (ci.pltype != -1) {
|
|
||||||
stats.codec_name = ci.plname;
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.ext_seqnum = cs.extendedMax;
|
|
||||||
if (ci.plfreq / 1000 > 0) {
|
|
||||||
stats.jitter_ms = cs.jitterSamples / (ci.plfreq / 1000);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
int jitter_buffer_delay_ms = 0;
|
|
||||||
int playout_buffer_delay_ms = 0;
|
|
||||||
sync->GetDelayEstimate(config_.voe_channel_id, &jitter_buffer_delay_ms,
|
|
||||||
&playout_buffer_delay_ms);
|
|
||||||
stats.delay_estimate_ms =
|
|
||||||
jitter_buffer_delay_ms + playout_buffer_delay_ms;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
unsigned int level = 0;
|
|
||||||
if (volume->GetSpeechOutputLevelFullRange(config_.voe_channel_id, level)
|
|
||||||
!= -1) {
|
|
||||||
stats.audio_level = static_cast<int32_t>(level);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
webrtc::NetworkStatistics ns = {0};
|
|
||||||
if (neteq->GetNetworkStatistics(config_.voe_channel_id, ns) != -1) {
|
|
||||||
// Get jitter buffer and total delay (alg + jitter + playout) stats.
|
|
||||||
stats.jitter_buffer_ms = ns.currentBufferSize;
|
|
||||||
stats.jitter_buffer_preferred_ms = ns.preferredBufferSize;
|
|
||||||
stats.expand_rate = Q14ToFloat(ns.currentExpandRate);
|
|
||||||
stats.speech_expand_rate = Q14ToFloat(ns.currentSpeechExpandRate);
|
|
||||||
stats.secondary_decoded_rate = Q14ToFloat(ns.currentSecondaryDecodedRate);
|
|
||||||
stats.accelerate_rate = Q14ToFloat(ns.currentAccelerateRate);
|
|
||||||
stats.preemptive_expand_rate = Q14ToFloat(ns.currentPreemptiveRate);
|
|
||||||
}
|
|
||||||
|
|
||||||
webrtc::AudioDecodingCallStats ds;
|
|
||||||
if (neteq->GetDecodingCallStatistics(config_.voe_channel_id, &ds) != -1) {
|
|
||||||
stats.decoding_calls_to_silence_generator =
|
|
||||||
ds.calls_to_silence_generator;
|
|
||||||
stats.decoding_calls_to_neteq = ds.calls_to_neteq;
|
|
||||||
stats.decoding_normal = ds.decoded_normal;
|
|
||||||
stats.decoding_plc = ds.decoded_plc;
|
|
||||||
stats.decoding_cng = ds.decoded_cng;
|
|
||||||
stats.decoding_plc_cng = ds.decoded_plc_cng;
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.capture_start_ntp_time_ms = cs.capture_start_ntp_time_ms_;
|
|
||||||
|
|
||||||
return stats;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
|
const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
||||||
return config_;
|
return config_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioReceiveStream::Start() {
|
void AudioReceiveStream::Start() {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioReceiveStream::Stop() {
|
void AudioReceiveStream::Stop() {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioReceiveStream::SignalNetworkState(NetworkState state) {
|
void AudioReceiveStream::SignalNetworkState(NetworkState state) {
|
||||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) {
|
bool AudioReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) {
|
||||||
// TODO(solenberg): Tests call this function on a network thread, libjingle
|
|
||||||
// calls on the worker thread. We should move towards always using a network
|
|
||||||
// thread. Then this check can be enabled.
|
|
||||||
// RTC_DCHECK(!thread_checker_.CalledOnValidThread());
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioReceiveStream::DeliverRtp(const uint8_t* packet,
|
bool AudioReceiveStream::DeliverRtp(const uint8_t* packet,
|
||||||
size_t length,
|
size_t length,
|
||||||
const PacketTime& packet_time) {
|
const PacketTime& packet_time) {
|
||||||
// TODO(solenberg): Tests call this function on a network thread, libjingle
|
|
||||||
// calls on the worker thread. We should move towards always using a network
|
|
||||||
// thread. Then this check can be enabled.
|
|
||||||
// RTC_DCHECK(!thread_checker_.CalledOnValidThread());
|
|
||||||
RTPHeader header;
|
RTPHeader header;
|
||||||
|
|
||||||
if (!rtp_header_parser_->Parse(packet, length, &header)) {
|
if (!rtp_header_parser_->Parse(packet, length, &header)) {
|
||||||
|
|||||||
@ -12,23 +12,18 @@
|
|||||||
#define WEBRTC_AUDIO_AUDIO_RECEIVE_STREAM_H_
|
#define WEBRTC_AUDIO_AUDIO_RECEIVE_STREAM_H_
|
||||||
|
|
||||||
#include "webrtc/audio_receive_stream.h"
|
#include "webrtc/audio_receive_stream.h"
|
||||||
#include "webrtc/audio/scoped_voe_interface.h"
|
|
||||||
#include "webrtc/base/thread_checker.h"
|
|
||||||
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
|
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
|
||||||
#include "webrtc/voice_engine/include/voe_base.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
class RemoteBitrateEstimator;
|
class RemoteBitrateEstimator;
|
||||||
class VoiceEngine;
|
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class AudioReceiveStream : public webrtc::AudioReceiveStream {
|
class AudioReceiveStream : public webrtc::AudioReceiveStream {
|
||||||
public:
|
public:
|
||||||
AudioReceiveStream(RemoteBitrateEstimator* remote_bitrate_estimator,
|
AudioReceiveStream(RemoteBitrateEstimator* remote_bitrate_estimator,
|
||||||
const webrtc::AudioReceiveStream::Config& config,
|
const webrtc::AudioReceiveStream::Config& config);
|
||||||
VoiceEngine* voice_engine);
|
|
||||||
~AudioReceiveStream() override;
|
~AudioReceiveStream() override;
|
||||||
|
|
||||||
// webrtc::ReceiveStream implementation.
|
// webrtc::ReceiveStream implementation.
|
||||||
@ -46,12 +41,8 @@ class AudioReceiveStream : public webrtc::AudioReceiveStream {
|
|||||||
const webrtc::AudioReceiveStream::Config& config() const;
|
const webrtc::AudioReceiveStream::Config& config() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
rtc::ThreadChecker thread_checker_;
|
|
||||||
RemoteBitrateEstimator* const remote_bitrate_estimator_;
|
RemoteBitrateEstimator* const remote_bitrate_estimator_;
|
||||||
const webrtc::AudioReceiveStream::Config config_;
|
const webrtc::AudioReceiveStream::Config config_;
|
||||||
VoiceEngine* voice_engine_;
|
|
||||||
// We hold one interface pointer to the VoE to make sure it is kept alive.
|
|
||||||
ScopedVoEInterface<VoEBase> voe_base_;
|
|
||||||
rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser_;
|
rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser_;
|
||||||
};
|
};
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
|||||||
@ -11,14 +11,10 @@
|
|||||||
#include "testing/gtest/include/gtest/gtest.h"
|
#include "testing/gtest/include/gtest/gtest.h"
|
||||||
|
|
||||||
#include "webrtc/audio/audio_receive_stream.h"
|
#include "webrtc/audio/audio_receive_stream.h"
|
||||||
#include "webrtc/audio/conversion.h"
|
|
||||||
#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h"
|
#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
|
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
|
||||||
#include "webrtc/test/fake_voice_engine.h"
|
|
||||||
|
|
||||||
namespace {
|
namespace webrtc {
|
||||||
|
|
||||||
using webrtc::ByteWriter;
|
|
||||||
|
|
||||||
const size_t kAbsoluteSendTimeLength = 4;
|
const size_t kAbsoluteSendTimeLength = 4;
|
||||||
|
|
||||||
@ -49,28 +45,23 @@ size_t CreateRtpHeaderWithAbsSendTime(uint8_t* header,
|
|||||||
ByteWriter<uint16_t>::WriteBigEndian(header + 2, 0x1234); // Sequence number.
|
ByteWriter<uint16_t>::WriteBigEndian(header + 2, 0x1234); // Sequence number.
|
||||||
ByteWriter<uint32_t>::WriteBigEndian(header + 4, 0x5678); // Timestamp.
|
ByteWriter<uint32_t>::WriteBigEndian(header + 4, 0x5678); // Timestamp.
|
||||||
ByteWriter<uint32_t>::WriteBigEndian(header + 8, 0x4321); // SSRC.
|
ByteWriter<uint32_t>::WriteBigEndian(header + 8, 0x4321); // SSRC.
|
||||||
int32_t rtp_header_length = webrtc::kRtpHeaderSize;
|
int32_t rtp_header_length = kRtpHeaderSize;
|
||||||
|
|
||||||
BuildAbsoluteSendTimeExtension(header + rtp_header_length, extension_id,
|
BuildAbsoluteSendTimeExtension(header + rtp_header_length, extension_id,
|
||||||
abs_send_time);
|
abs_send_time);
|
||||||
rtp_header_length += kAbsoluteSendTimeLength;
|
rtp_header_length += kAbsoluteSendTimeLength;
|
||||||
return rtp_header_length;
|
return rtp_header_length;
|
||||||
}
|
}
|
||||||
} // namespace
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
namespace test {
|
|
||||||
|
|
||||||
TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweWithTimestamp) {
|
TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweWithTimestamp) {
|
||||||
MockRemoteBitrateEstimator rbe;
|
MockRemoteBitrateEstimator rbe;
|
||||||
FakeVoiceEngine fve;
|
|
||||||
AudioReceiveStream::Config config;
|
AudioReceiveStream::Config config;
|
||||||
config.combined_audio_video_bwe = true;
|
config.combined_audio_video_bwe = true;
|
||||||
config.voe_channel_id = fve.kReceiveChannelId;
|
config.voe_channel_id = 1;
|
||||||
const int kAbsSendTimeId = 3;
|
const int kAbsSendTimeId = 3;
|
||||||
config.rtp.extensions.push_back(
|
config.rtp.extensions.push_back(
|
||||||
RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeId));
|
RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeId));
|
||||||
internal::AudioReceiveStream recv_stream(&rbe, config, &fve);
|
internal::AudioReceiveStream recv_stream(&rbe, config);
|
||||||
uint8_t rtp_packet[30];
|
uint8_t rtp_packet[30];
|
||||||
const int kAbsSendTimeValue = 1234;
|
const int kAbsSendTimeValue = 1234;
|
||||||
CreateRtpHeaderWithAbsSendTime(rtp_packet, kAbsSendTimeId, kAbsSendTimeValue);
|
CreateRtpHeaderWithAbsSendTime(rtp_packet, kAbsSendTimeId, kAbsSendTimeValue);
|
||||||
@ -83,57 +74,4 @@ TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweWithTimestamp) {
|
|||||||
EXPECT_TRUE(
|
EXPECT_TRUE(
|
||||||
recv_stream.DeliverRtp(rtp_packet, sizeof(rtp_packet), packet_time));
|
recv_stream.DeliverRtp(rtp_packet, sizeof(rtp_packet), packet_time));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AudioReceiveStreamTest, GetStats) {
|
|
||||||
const uint32_t kSsrc1 = 667;
|
|
||||||
|
|
||||||
MockRemoteBitrateEstimator rbe;
|
|
||||||
FakeVoiceEngine fve;
|
|
||||||
AudioReceiveStream::Config config;
|
|
||||||
config.rtp.remote_ssrc = kSsrc1;
|
|
||||||
config.voe_channel_id = fve.kReceiveChannelId;
|
|
||||||
internal::AudioReceiveStream recv_stream(&rbe, config, &fve);
|
|
||||||
|
|
||||||
AudioReceiveStream::Stats stats = recv_stream.GetStats();
|
|
||||||
const CallStatistics& call_stats = fve.GetRecvCallStats();
|
|
||||||
const CodecInst& codec_inst = fve.GetRecvRecCodecInst();
|
|
||||||
const NetworkStatistics& net_stats = fve.GetRecvNetworkStats();
|
|
||||||
const AudioDecodingCallStats& decode_stats =
|
|
||||||
fve.GetRecvAudioDecodingCallStats();
|
|
||||||
EXPECT_EQ(kSsrc1, stats.remote_ssrc);
|
|
||||||
EXPECT_EQ(static_cast<int64_t>(call_stats.bytesReceived), stats.bytes_rcvd);
|
|
||||||
EXPECT_EQ(static_cast<uint32_t>(call_stats.packetsReceived),
|
|
||||||
stats.packets_rcvd);
|
|
||||||
EXPECT_EQ(call_stats.cumulativeLost, stats.packets_lost);
|
|
||||||
EXPECT_EQ(static_cast<float>(call_stats.fractionLost) / 256,
|
|
||||||
stats.fraction_lost);
|
|
||||||
EXPECT_EQ(std::string(codec_inst.plname), stats.codec_name);
|
|
||||||
EXPECT_EQ(call_stats.extendedMax, stats.ext_seqnum);
|
|
||||||
EXPECT_EQ(call_stats.jitterSamples / (codec_inst.plfreq / 1000),
|
|
||||||
stats.jitter_ms);
|
|
||||||
EXPECT_EQ(net_stats.currentBufferSize, stats.jitter_buffer_ms);
|
|
||||||
EXPECT_EQ(net_stats.preferredBufferSize, stats.jitter_buffer_preferred_ms);
|
|
||||||
EXPECT_EQ(static_cast<uint32_t>(fve.kRecvJitterBufferDelay +
|
|
||||||
fve.kRecvPlayoutBufferDelay), stats.delay_estimate_ms);
|
|
||||||
EXPECT_EQ(static_cast<int32_t>(fve.kRecvSpeechOutputLevel),
|
|
||||||
stats.audio_level);
|
|
||||||
EXPECT_EQ(Q14ToFloat(net_stats.currentExpandRate), stats.expand_rate);
|
|
||||||
EXPECT_EQ(Q14ToFloat(net_stats.currentSpeechExpandRate),
|
|
||||||
stats.speech_expand_rate);
|
|
||||||
EXPECT_EQ(Q14ToFloat(net_stats.currentSecondaryDecodedRate),
|
|
||||||
stats.secondary_decoded_rate);
|
|
||||||
EXPECT_EQ(Q14ToFloat(net_stats.currentAccelerateRate), stats.accelerate_rate);
|
|
||||||
EXPECT_EQ(Q14ToFloat(net_stats.currentPreemptiveRate),
|
|
||||||
stats.preemptive_expand_rate);
|
|
||||||
EXPECT_EQ(decode_stats.calls_to_silence_generator,
|
|
||||||
stats.decoding_calls_to_silence_generator);
|
|
||||||
EXPECT_EQ(decode_stats.calls_to_neteq, stats.decoding_calls_to_neteq);
|
|
||||||
EXPECT_EQ(decode_stats.decoded_normal, stats.decoding_normal);
|
|
||||||
EXPECT_EQ(decode_stats.decoded_plc, stats.decoding_plc);
|
|
||||||
EXPECT_EQ(decode_stats.decoded_cng, stats.decoding_cng);
|
|
||||||
EXPECT_EQ(decode_stats.decoded_plc_cng, stats.decoding_plc_cng);
|
|
||||||
EXPECT_EQ(call_stats.capture_start_ntp_time_ms_,
|
|
||||||
stats.capture_start_ntp_time_ms);
|
|
||||||
}
|
|
||||||
} // namespace test
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|||||||
@ -1,21 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef WEBRTC_AUDIO_CONVERSION_H_
|
|
||||||
#define WEBRTC_AUDIO_CONVERSION_H_
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
|
|
||||||
inline float Q14ToFloat(uint16_t v) {
|
|
||||||
return static_cast<float>(v) / (1 << 14);
|
|
||||||
}
|
|
||||||
} // namespace webrtc
|
|
||||||
|
|
||||||
#endif // WEBRTC_AUDIO_CONVERSION_H_
|
|
||||||
@ -1,43 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef WEBRTC_AUDIO_SCOPED_VOE_INTERFACE_H_
|
|
||||||
#define WEBRTC_AUDIO_SCOPED_VOE_INTERFACE_H_
|
|
||||||
|
|
||||||
#include "webrtc/base/checks.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
|
|
||||||
class VoiceEngine;
|
|
||||||
|
|
||||||
namespace internal {
|
|
||||||
|
|
||||||
template<class T> class ScopedVoEInterface {
|
|
||||||
public:
|
|
||||||
explicit ScopedVoEInterface(webrtc::VoiceEngine* e)
|
|
||||||
: ptr_(T::GetInterface(e)) {
|
|
||||||
RTC_DCHECK(ptr_);
|
|
||||||
}
|
|
||||||
~ScopedVoEInterface() {
|
|
||||||
if (ptr_) {
|
|
||||||
ptr_->Release();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
T* operator->() {
|
|
||||||
RTC_DCHECK(ptr_);
|
|
||||||
return ptr_;
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
T* ptr_;
|
|
||||||
};
|
|
||||||
} // namespace internal
|
|
||||||
} // namespace webrtc
|
|
||||||
|
|
||||||
#endif // WEBRTC_AUDIO_SCOPED_VOE_INTERFACE_H_
|
|
||||||
@ -18,8 +18,6 @@
|
|||||||
'audio/audio_receive_stream.h',
|
'audio/audio_receive_stream.h',
|
||||||
'audio/audio_send_stream.cc',
|
'audio/audio_send_stream.cc',
|
||||||
'audio/audio_send_stream.h',
|
'audio/audio_send_stream.h',
|
||||||
'audio/conversion.h',
|
|
||||||
'audio/scoped_voe_interface.h',
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,32 +26,7 @@ class AudioDecoder;
|
|||||||
|
|
||||||
class AudioReceiveStream : public ReceiveStream {
|
class AudioReceiveStream : public ReceiveStream {
|
||||||
public:
|
public:
|
||||||
struct Stats {
|
struct Stats {};
|
||||||
uint32_t remote_ssrc = 0;
|
|
||||||
int64_t bytes_rcvd = 0;
|
|
||||||
uint32_t packets_rcvd = 0;
|
|
||||||
uint32_t packets_lost = 0;
|
|
||||||
float fraction_lost = 0.0f;
|
|
||||||
std::string codec_name;
|
|
||||||
uint32_t ext_seqnum = 0;
|
|
||||||
uint32_t jitter_ms = 0;
|
|
||||||
uint32_t jitter_buffer_ms = 0;
|
|
||||||
uint32_t jitter_buffer_preferred_ms = 0;
|
|
||||||
uint32_t delay_estimate_ms = 0;
|
|
||||||
int32_t audio_level = -1;
|
|
||||||
float expand_rate = 0.0f;
|
|
||||||
float speech_expand_rate = 0.0f;
|
|
||||||
float secondary_decoded_rate = 0.0f;
|
|
||||||
float accelerate_rate = 0.0f;
|
|
||||||
float preemptive_expand_rate = 0.0f;
|
|
||||||
int32_t decoding_calls_to_silence_generator = 0;
|
|
||||||
int32_t decoding_calls_to_neteq = 0;
|
|
||||||
int32_t decoding_normal = 0;
|
|
||||||
int32_t decoding_plc = 0;
|
|
||||||
int32_t decoding_cng = 0;
|
|
||||||
int32_t decoding_plc_cng = 0;
|
|
||||||
int64_t capture_start_ntp_time_ms = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Config {
|
struct Config {
|
||||||
std::string ToString() const;
|
std::string ToString() const;
|
||||||
|
|||||||
@ -25,7 +25,6 @@
|
|||||||
#include "webrtc/test/encoder_settings.h"
|
#include "webrtc/test/encoder_settings.h"
|
||||||
#include "webrtc/test/fake_decoder.h"
|
#include "webrtc/test/fake_decoder.h"
|
||||||
#include "webrtc/test/fake_encoder.h"
|
#include "webrtc/test/fake_encoder.h"
|
||||||
#include "webrtc/test/fake_voice_engine.h"
|
|
||||||
#include "webrtc/test/frame_generator_capturer.h"
|
#include "webrtc/test/frame_generator_capturer.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -131,10 +130,8 @@ class BitrateEstimatorTest : public test::CallTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void SetUp() {
|
virtual void SetUp() {
|
||||||
Call::Config config;
|
receiver_call_.reset(Call::Create(Call::Config()));
|
||||||
config.voice_engine = &fake_voice_engine_;
|
sender_call_.reset(Call::Create(Call::Config()));
|
||||||
receiver_call_.reset(Call::Create(config));
|
|
||||||
sender_call_.reset(Call::Create(config));
|
|
||||||
|
|
||||||
send_transport_.SetReceiver(receiver_call_->Receiver());
|
send_transport_.SetReceiver(receiver_call_->Receiver());
|
||||||
receive_transport_.SetReceiver(sender_call_->Receiver());
|
receive_transport_.SetReceiver(sender_call_->Receiver());
|
||||||
@ -268,7 +265,6 @@ class BitrateEstimatorTest : public test::CallTest {
|
|||||||
test::FakeDecoder fake_decoder_;
|
test::FakeDecoder fake_decoder_;
|
||||||
};
|
};
|
||||||
|
|
||||||
test::FakeVoiceEngine fake_voice_engine_;
|
|
||||||
TraceObserver receiver_trace_;
|
TraceObserver receiver_trace_;
|
||||||
test::DirectTransport send_transport_;
|
test::DirectTransport send_transport_;
|
||||||
test::DirectTransport receive_transport_;
|
test::DirectTransport receive_transport_;
|
||||||
|
|||||||
@ -123,8 +123,7 @@ class Call : public webrtc::Call, public PacketReceiver {
|
|||||||
|
|
||||||
VideoSendStream::RtpStateMap suspended_video_send_ssrcs_;
|
VideoSendStream::RtpStateMap suspended_video_send_ssrcs_;
|
||||||
|
|
||||||
RtcEventLog* event_log_ = nullptr;
|
RtcEventLog* event_log_;
|
||||||
VoECodec* voe_codec_ = nullptr;
|
|
||||||
|
|
||||||
RTC_DISALLOW_COPY_AND_ASSIGN(Call);
|
RTC_DISALLOW_COPY_AND_ASSIGN(Call);
|
||||||
};
|
};
|
||||||
@ -143,7 +142,8 @@ Call::Call(const Call::Config& config)
|
|||||||
config_(config),
|
config_(config),
|
||||||
network_enabled_(true),
|
network_enabled_(true),
|
||||||
receive_crit_(RWLockWrapper::CreateRWLock()),
|
receive_crit_(RWLockWrapper::CreateRWLock()),
|
||||||
send_crit_(RWLockWrapper::CreateRWLock()) {
|
send_crit_(RWLockWrapper::CreateRWLock()),
|
||||||
|
event_log_(nullptr) {
|
||||||
RTC_DCHECK(configuration_thread_checker_.CalledOnValidThread());
|
RTC_DCHECK(configuration_thread_checker_.CalledOnValidThread());
|
||||||
RTC_DCHECK_GE(config.bitrate_config.min_bitrate_bps, 0);
|
RTC_DCHECK_GE(config.bitrate_config.min_bitrate_bps, 0);
|
||||||
RTC_DCHECK_GE(config.bitrate_config.start_bitrate_bps,
|
RTC_DCHECK_GE(config.bitrate_config.start_bitrate_bps,
|
||||||
@ -153,11 +153,11 @@ Call::Call(const Call::Config& config)
|
|||||||
config.bitrate_config.start_bitrate_bps);
|
config.bitrate_config.start_bitrate_bps);
|
||||||
}
|
}
|
||||||
if (config.voice_engine) {
|
if (config.voice_engine) {
|
||||||
// Keep a reference to VoECodec, so we're sure the VoiceEngine lives for the
|
VoECodec* voe_codec = VoECodec::GetInterface(config.voice_engine);
|
||||||
// duration of the call.
|
if (voe_codec) {
|
||||||
voe_codec_ = VoECodec::GetInterface(config.voice_engine);
|
event_log_ = voe_codec->GetEventLog();
|
||||||
if (voe_codec_)
|
voe_codec->Release();
|
||||||
event_log_ = voe_codec_->GetEventLog();
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Trace::CreateTrace();
|
Trace::CreateTrace();
|
||||||
@ -179,9 +179,6 @@ Call::~Call() {
|
|||||||
|
|
||||||
module_process_thread_->Stop();
|
module_process_thread_->Stop();
|
||||||
Trace::ReturnTrace();
|
Trace::ReturnTrace();
|
||||||
|
|
||||||
if (voe_codec_)
|
|
||||||
voe_codec_->Release();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PacketReceiver* Call::Receiver() {
|
PacketReceiver* Call::Receiver() {
|
||||||
@ -232,8 +229,7 @@ webrtc::AudioReceiveStream* Call::CreateAudioReceiveStream(
|
|||||||
TRACE_EVENT0("webrtc", "Call::CreateAudioReceiveStream");
|
TRACE_EVENT0("webrtc", "Call::CreateAudioReceiveStream");
|
||||||
RTC_DCHECK(configuration_thread_checker_.CalledOnValidThread());
|
RTC_DCHECK(configuration_thread_checker_.CalledOnValidThread());
|
||||||
AudioReceiveStream* receive_stream = new AudioReceiveStream(
|
AudioReceiveStream* receive_stream = new AudioReceiveStream(
|
||||||
channel_group_->GetRemoteBitrateEstimator(false), config,
|
channel_group_->GetRemoteBitrateEstimator(false), config);
|
||||||
config_.voice_engine);
|
|
||||||
{
|
{
|
||||||
WriteLockScoped write_lock(*receive_crit_);
|
WriteLockScoped write_lock(*receive_crit_);
|
||||||
RTC_DCHECK(audio_receive_ssrcs_.find(config.rtp.remote_ssrc) ==
|
RTC_DCHECK(audio_receive_ssrcs_.find(config.rtp.remote_ssrc) ==
|
||||||
|
|||||||
@ -13,21 +13,19 @@
|
|||||||
#include "testing/gtest/include/gtest/gtest.h"
|
#include "testing/gtest/include/gtest/gtest.h"
|
||||||
|
|
||||||
#include "webrtc/call.h"
|
#include "webrtc/call.h"
|
||||||
#include "webrtc/test/fake_voice_engine.h"
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct CallHelper {
|
struct CallHelper {
|
||||||
CallHelper() : voice_engine_(new webrtc::test::FakeVoiceEngine()) {
|
CallHelper() {
|
||||||
webrtc::Call::Config config;
|
webrtc::Call::Config config;
|
||||||
config.voice_engine = voice_engine_.get();
|
// TODO(solenberg): Fill in with VoiceEngine* etc.
|
||||||
call_.reset(webrtc::Call::Create(config));
|
call_.reset(webrtc::Call::Create(config));
|
||||||
}
|
}
|
||||||
|
|
||||||
webrtc::Call* operator->() { return call_.get(); }
|
webrtc::Call* operator->() { return call_.get(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
rtc::scoped_ptr<webrtc::test::FakeVoiceEngine> voice_engine_;
|
|
||||||
rtc::scoped_ptr<webrtc::Call> call_;
|
rtc::scoped_ptr<webrtc::Call> call_;
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|||||||
@ -1,421 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef WEBRTC_AUDIO_FAKE_VOICE_ENGINE_H_
|
|
||||||
#define WEBRTC_AUDIO_FAKE_VOICE_ENGINE_H_
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "testing/gtest/include/gtest/gtest.h"
|
|
||||||
|
|
||||||
#include "webrtc/voice_engine/voice_engine_impl.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
namespace test {
|
|
||||||
|
|
||||||
// NOTE: This class inherits from VoiceEngineImpl so that its clients will be
|
|
||||||
// able to get the various interfaces as usual, via T::GetInterface().
|
|
||||||
class FakeVoiceEngine final : public VoiceEngineImpl {
|
|
||||||
public:
|
|
||||||
const int kSendChannelId = 1;
|
|
||||||
const int kReceiveChannelId = 2;
|
|
||||||
|
|
||||||
const int kRecvJitterBufferDelay = -7;
|
|
||||||
const int kRecvPlayoutBufferDelay = 302;
|
|
||||||
const unsigned int kRecvSpeechOutputLevel = 99;
|
|
||||||
|
|
||||||
FakeVoiceEngine() : VoiceEngineImpl(new Config(), true) {
|
|
||||||
// Increase ref count so this object isn't automatically deleted whenever
|
|
||||||
// interfaces are Release():d.
|
|
||||||
++_ref_count;
|
|
||||||
}
|
|
||||||
~FakeVoiceEngine() override {
|
|
||||||
// Decrease ref count before base class d-tor is called; otherwise it will
|
|
||||||
// trigger an assertion.
|
|
||||||
--_ref_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
const CallStatistics& GetRecvCallStats() const {
|
|
||||||
static const CallStatistics kStats = {
|
|
||||||
345, 678, 901, 234, -1, 0, 0, 567, 890, 123
|
|
||||||
};
|
|
||||||
return kStats;
|
|
||||||
}
|
|
||||||
|
|
||||||
const CodecInst& GetRecvRecCodecInst() const {
|
|
||||||
static const CodecInst kStats = {
|
|
||||||
123, "codec_name", 96000, -1, -1, -1
|
|
||||||
};
|
|
||||||
return kStats;
|
|
||||||
}
|
|
||||||
|
|
||||||
const NetworkStatistics& GetRecvNetworkStats() const {
|
|
||||||
static const NetworkStatistics kStats = {
|
|
||||||
123, 456, false, 0, 0, 789, 12, 345, 678, 901, -1, -1, -1, -1, -1, 0
|
|
||||||
};
|
|
||||||
return kStats;
|
|
||||||
}
|
|
||||||
|
|
||||||
const AudioDecodingCallStats& GetRecvAudioDecodingCallStats() const {
|
|
||||||
static AudioDecodingCallStats stats;
|
|
||||||
if (stats.calls_to_silence_generator == 0) {
|
|
||||||
stats.calls_to_silence_generator = 234;
|
|
||||||
stats.calls_to_neteq = 567;
|
|
||||||
stats.decoded_normal = 890;
|
|
||||||
stats.decoded_plc = 123;
|
|
||||||
stats.decoded_cng = 456;
|
|
||||||
stats.decoded_plc_cng = 789;
|
|
||||||
}
|
|
||||||
return stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
// VoEBase
|
|
||||||
int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int DeRegisterVoiceEngineObserver() override { return -1; }
|
|
||||||
int Init(AudioDeviceModule* external_adm = NULL,
|
|
||||||
AudioProcessing* audioproc = NULL) override { return -1; }
|
|
||||||
AudioProcessing* audio_processing() override { return nullptr; }
|
|
||||||
int Terminate() override { return -1; }
|
|
||||||
int CreateChannel() override { return -1; }
|
|
||||||
int CreateChannel(const Config& config) override { return -1; }
|
|
||||||
int DeleteChannel(int channel) override { return -1; }
|
|
||||||
int StartReceive(int channel) override { return -1; }
|
|
||||||
int StopReceive(int channel) override { return -1; }
|
|
||||||
int StartPlayout(int channel) override { return -1; }
|
|
||||||
int StopPlayout(int channel) override { return -1; }
|
|
||||||
int StartSend(int channel) override { return -1; }
|
|
||||||
int StopSend(int channel) override { return -1; }
|
|
||||||
int GetVersion(char version[1024]) override { return -1; }
|
|
||||||
int LastError() override { return -1; }
|
|
||||||
AudioTransport* audio_transport() { return nullptr; }
|
|
||||||
int AssociateSendChannel(int channel, int accociate_send_channel) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// VoECodec
|
|
||||||
int NumOfCodecs() override { return -1; }
|
|
||||||
int GetCodec(int index, CodecInst& codec) override { return -1; }
|
|
||||||
int SetSendCodec(int channel, const CodecInst& codec) override { return -1; }
|
|
||||||
int GetSendCodec(int channel, CodecInst& codec) override { return -1; }
|
|
||||||
int SetBitRate(int channel, int bitrate_bps) override { return -1; }
|
|
||||||
int GetRecCodec(int channel, CodecInst& codec) override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
codec = GetRecvRecCodecInst();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int SetRecPayloadType(int channel, const CodecInst& codec) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetRecPayloadType(int channel, CodecInst& codec) override { return -1; }
|
|
||||||
int SetSendCNPayloadType(int channel, int type,
|
|
||||||
PayloadFrequencies frequency = kFreq16000Hz) override { return -1; }
|
|
||||||
int SetVADStatus(int channel,
|
|
||||||
bool enable,
|
|
||||||
VadModes mode = kVadConventional,
|
|
||||||
bool disableDTX = false) override { return -1; }
|
|
||||||
int GetVADStatus(int channel,
|
|
||||||
bool& enabled,
|
|
||||||
VadModes& mode,
|
|
||||||
bool& disabledDTX) override { return -1; }
|
|
||||||
int SetOpusMaxPlaybackRate(int channel, int frequency_hz) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetOpusDtx(int channel, bool enable_dtx) override { return -1; }
|
|
||||||
RtcEventLog* GetEventLog() override { return nullptr; }
|
|
||||||
|
|
||||||
// VoEDtmf
|
|
||||||
int SendTelephoneEvent(int channel,
|
|
||||||
int eventCode,
|
|
||||||
bool outOfBand = true,
|
|
||||||
int lengthMs = 160,
|
|
||||||
int attenuationDb = 10) override { return -1; }
|
|
||||||
int SetSendTelephoneEventPayloadType(int channel,
|
|
||||||
unsigned char type) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetSendTelephoneEventPayloadType(int channel,
|
|
||||||
unsigned char& type) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetDtmfFeedbackStatus(bool enable,
|
|
||||||
bool directFeedback = false) override { return -1; }
|
|
||||||
int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int PlayDtmfTone(int eventCode,
|
|
||||||
int lengthMs = 200,
|
|
||||||
int attenuationDb = 10) override { return -1; }
|
|
||||||
|
|
||||||
// VoEExternalMedia
|
|
||||||
int RegisterExternalMediaProcessing(
|
|
||||||
int channel,
|
|
||||||
ProcessingTypes type,
|
|
||||||
VoEMediaProcess& processObject) override { return -1; }
|
|
||||||
int DeRegisterExternalMediaProcessing(int channel,
|
|
||||||
ProcessingTypes type) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetAudioFrame(int channel,
|
|
||||||
int desired_sample_rate_hz,
|
|
||||||
AudioFrame* frame) override { return -1; }
|
|
||||||
int SetExternalMixing(int channel, bool enable) override { return -1; }
|
|
||||||
|
|
||||||
// VoEFile
|
|
||||||
int StartPlayingFileLocally(
|
|
||||||
int channel,
|
|
||||||
const char fileNameUTF8[1024],
|
|
||||||
bool loop = false,
|
|
||||||
FileFormats format = kFileFormatPcm16kHzFile,
|
|
||||||
float volumeScaling = 1.0,
|
|
||||||
int startPointMs = 0,
|
|
||||||
int stopPointMs = 0) override { return -1; }
|
|
||||||
int StartPlayingFileLocally(
|
|
||||||
int channel,
|
|
||||||
InStream* stream,
|
|
||||||
FileFormats format = kFileFormatPcm16kHzFile,
|
|
||||||
float volumeScaling = 1.0,
|
|
||||||
int startPointMs = 0,
|
|
||||||
int stopPointMs = 0) override { return -1; }
|
|
||||||
int StopPlayingFileLocally(int channel) override { return -1; }
|
|
||||||
int IsPlayingFileLocally(int channel) override { return -1; }
|
|
||||||
int StartPlayingFileAsMicrophone(
|
|
||||||
int channel,
|
|
||||||
const char fileNameUTF8[1024],
|
|
||||||
bool loop = false,
|
|
||||||
bool mixWithMicrophone = false,
|
|
||||||
FileFormats format = kFileFormatPcm16kHzFile,
|
|
||||||
float volumeScaling = 1.0) override { return -1; }
|
|
||||||
int StartPlayingFileAsMicrophone(
|
|
||||||
int channel,
|
|
||||||
InStream* stream,
|
|
||||||
bool mixWithMicrophone = false,
|
|
||||||
FileFormats format = kFileFormatPcm16kHzFile,
|
|
||||||
float volumeScaling = 1.0) override { return -1; }
|
|
||||||
int StopPlayingFileAsMicrophone(int channel) override { return -1; }
|
|
||||||
int IsPlayingFileAsMicrophone(int channel) override { return -1; }
|
|
||||||
int StartRecordingPlayout(int channel,
|
|
||||||
const char* fileNameUTF8,
|
|
||||||
CodecInst* compression = NULL,
|
|
||||||
int maxSizeBytes = -1) override { return -1; }
|
|
||||||
int StopRecordingPlayout(int channel) override { return -1; }
|
|
||||||
int StartRecordingPlayout(int channel,
|
|
||||||
OutStream* stream,
|
|
||||||
CodecInst* compression = NULL) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int StartRecordingMicrophone(const char* fileNameUTF8,
|
|
||||||
CodecInst* compression = NULL,
|
|
||||||
int maxSizeBytes = -1) override { return -1; }
|
|
||||||
int StartRecordingMicrophone(OutStream* stream,
|
|
||||||
CodecInst* compression = NULL) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int StopRecordingMicrophone() override { return -1; }
|
|
||||||
|
|
||||||
// VoEHardware
|
|
||||||
int GetNumOfRecordingDevices(int& devices) override { return -1; }
|
|
||||||
|
|
||||||
// Gets the number of audio devices available for playout.
|
|
||||||
int GetNumOfPlayoutDevices(int& devices) override { return -1; }
|
|
||||||
|
|
||||||
// Gets the name of a specific recording device given by an |index|.
|
|
||||||
// On Windows Vista/7, it also retrieves an additional unique ID
|
|
||||||
// (GUID) for the recording device.
|
|
||||||
int GetRecordingDeviceName(int index,
|
|
||||||
char strNameUTF8[128],
|
|
||||||
char strGuidUTF8[128]) override { return -1; }
|
|
||||||
|
|
||||||
// Gets the name of a specific playout device given by an |index|.
|
|
||||||
// On Windows Vista/7, it also retrieves an additional unique ID
|
|
||||||
// (GUID) for the playout device.
|
|
||||||
int GetPlayoutDeviceName(int index,
|
|
||||||
char strNameUTF8[128],
|
|
||||||
char strGuidUTF8[128]) override { return -1; }
|
|
||||||
|
|
||||||
// Sets the audio device used for recording.
|
|
||||||
int SetRecordingDevice(
|
|
||||||
int index,
|
|
||||||
StereoChannel recordingChannel = kStereoBoth) override { return -1; }
|
|
||||||
|
|
||||||
// Sets the audio device used for playout.
|
|
||||||
int SetPlayoutDevice(int index) override { return -1; }
|
|
||||||
|
|
||||||
// Sets the type of audio device layer to use.
|
|
||||||
int SetAudioDeviceLayer(AudioLayers audioLayer) override { return -1; }
|
|
||||||
|
|
||||||
// Gets the currently used (active) audio device layer.
|
|
||||||
int GetAudioDeviceLayer(AudioLayers& audioLayer) override { return -1; }
|
|
||||||
|
|
||||||
// Native sample rate controls (samples/sec)
|
|
||||||
int SetRecordingSampleRate(unsigned int samples_per_sec) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int RecordingSampleRate(unsigned int* samples_per_sec) const override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetPlayoutSampleRate(unsigned int samples_per_sec) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int PlayoutSampleRate(unsigned int* samples_per_sec) const override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queries and controls platform audio effects on Android devices.
|
|
||||||
bool BuiltInAECIsAvailable() const override { return false; }
|
|
||||||
int EnableBuiltInAEC(bool enable) override { return -1; }
|
|
||||||
bool BuiltInAGCIsAvailable() const override { return false; }
|
|
||||||
int EnableBuiltInAGC(bool enable) override { return -1; }
|
|
||||||
bool BuiltInNSIsAvailable() const override { return false; }
|
|
||||||
int EnableBuiltInNS(bool enable) override { return -1; }
|
|
||||||
|
|
||||||
// VoENetwork
|
|
||||||
int RegisterExternalTransport(int channel, Transport& transport) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int DeRegisterExternalTransport(int channel) override { return -1; }
|
|
||||||
int ReceivedRTPPacket(int channel,
|
|
||||||
const void* data,
|
|
||||||
size_t length) override { return -1; }
|
|
||||||
int ReceivedRTPPacket(int channel,
|
|
||||||
const void* data,
|
|
||||||
size_t length,
|
|
||||||
const PacketTime& packet_time) override { return -1; }
|
|
||||||
int ReceivedRTCPPacket(int channel,
|
|
||||||
const void* data,
|
|
||||||
size_t length) { return -1; }
|
|
||||||
|
|
||||||
// VoENetEqStats
|
|
||||||
int GetNetworkStatistics(int channel, NetworkStatistics& stats) override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
stats = GetRecvNetworkStats();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int GetDecodingCallStatistics(int channel,
|
|
||||||
AudioDecodingCallStats* stats) const override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
EXPECT_NE(nullptr, stats);
|
|
||||||
*stats = GetRecvAudioDecodingCallStats();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// VoERTP_RTCP
|
|
||||||
int SetLocalSSRC(int channel, unsigned int ssrc) override { return -1; }
|
|
||||||
int GetLocalSSRC(int channel, unsigned int& ssrc) override { return -1; }
|
|
||||||
int GetRemoteSSRC(int channel, unsigned int& ssrc) override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
ssrc = 0;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int SetSendAudioLevelIndicationStatus(int channel,
|
|
||||||
bool enable,
|
|
||||||
unsigned char id = 1) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetSendAbsoluteSenderTimeStatus(int channel,
|
|
||||||
bool enable,
|
|
||||||
unsigned char id) override { return -1; }
|
|
||||||
int SetReceiveAbsoluteSenderTimeStatus(int channel,
|
|
||||||
bool enable,
|
|
||||||
unsigned char id) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetRTCPStatus(int channel, bool enable) override { return -1; }
|
|
||||||
int GetRTCPStatus(int channel, bool& enabled) override { return -1; }
|
|
||||||
int SetRTCP_CNAME(int channel, const char cName[256]) override { return -1; }
|
|
||||||
int GetRTCP_CNAME(int channel, char cName[256]) { return -1; }
|
|
||||||
int GetRemoteRTCP_CNAME(int channel, char cName[256]) override { return -1; }
|
|
||||||
int GetRemoteRTCPData(int channel,
|
|
||||||
unsigned int& NTPHigh,
|
|
||||||
unsigned int& NTPLow,
|
|
||||||
unsigned int& timestamp,
|
|
||||||
unsigned int& playoutTimestamp,
|
|
||||||
unsigned int* jitter = NULL,
|
|
||||||
unsigned short* fractionLost = NULL) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetRTPStatistics(int channel,
|
|
||||||
unsigned int& averageJitterMs,
|
|
||||||
unsigned int& maxJitterMs,
|
|
||||||
unsigned int& discardedPackets) override { return -1; }
|
|
||||||
int GetRTCPStatistics(int channel, CallStatistics& stats) override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
stats = GetRecvCallStats();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int GetRemoteRTCPReportBlocks(
|
|
||||||
int channel,
|
|
||||||
std::vector<ReportBlock>* receive_blocks) override { return -1; }
|
|
||||||
int SetNACKStatus(int channel, bool enable, int maxNoPackets) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// VoEVideoSync
|
|
||||||
int GetPlayoutBufferSize(int& buffer_ms) override { return -1; }
|
|
||||||
int SetMinimumPlayoutDelay(int channel, int delay_ms) override { return -1; }
|
|
||||||
int SetInitialPlayoutDelay(int channel, int delay_ms) override { return -1; }
|
|
||||||
int GetDelayEstimate(int channel,
|
|
||||||
int* jitter_buffer_delay_ms,
|
|
||||||
int* playout_buffer_delay_ms) override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
*jitter_buffer_delay_ms = kRecvJitterBufferDelay;
|
|
||||||
*playout_buffer_delay_ms = kRecvPlayoutBufferDelay;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int GetLeastRequiredDelayMs(int channel) const override { return -1; }
|
|
||||||
int SetInitTimestamp(int channel, unsigned int timestamp) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetInitSequenceNumber(int channel, short sequenceNumber) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetPlayoutTimestamp(int channel, unsigned int& timestamp) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetRtpRtcp(int channel,
|
|
||||||
RtpRtcp** rtpRtcpModule,
|
|
||||||
RtpReceiver** rtp_receiver) override { return -1; }
|
|
||||||
|
|
||||||
// VoEVolumeControl
|
|
||||||
int SetSpeakerVolume(unsigned int volume) override { return -1; }
|
|
||||||
int GetSpeakerVolume(unsigned int& volume) override { return -1; }
|
|
||||||
int SetMicVolume(unsigned int volume) override { return -1; }
|
|
||||||
int GetMicVolume(unsigned int& volume) override { return -1; }
|
|
||||||
int SetInputMute(int channel, bool enable) override { return -1; }
|
|
||||||
int GetInputMute(int channel, bool& enabled) override { return -1; }
|
|
||||||
int GetSpeechInputLevel(unsigned int& level) override { return -1; }
|
|
||||||
int GetSpeechOutputLevel(int channel, unsigned int& level) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetSpeechInputLevelFullRange(unsigned int& level) override { return -1; }
|
|
||||||
int GetSpeechOutputLevelFullRange(int channel,
|
|
||||||
unsigned int& level) override {
|
|
||||||
EXPECT_EQ(channel, kReceiveChannelId);
|
|
||||||
level = kRecvSpeechOutputLevel;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int SetChannelOutputVolumeScaling(int channel, float scaling) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetChannelOutputVolumeScaling(int channel, float& scaling) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int SetOutputVolumePan(int channel, float left, float right) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
int GetOutputVolumePan(int channel, float& left, float& right) override {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} // namespace test
|
|
||||||
} // namespace webrtc
|
|
||||||
|
|
||||||
#endif // WEBRTC_AUDIO_FAKE_VOICE_ENGINE_H_
|
|
||||||
@ -30,7 +30,6 @@
|
|||||||
'fake_encoder.h',
|
'fake_encoder.h',
|
||||||
'fake_network_pipe.cc',
|
'fake_network_pipe.cc',
|
||||||
'fake_network_pipe.h',
|
'fake_network_pipe.h',
|
||||||
'fake_voice_engine.h',
|
|
||||||
'frame_generator_capturer.cc',
|
'frame_generator_capturer.cc',
|
||||||
'frame_generator_capturer.h',
|
'frame_generator_capturer.h',
|
||||||
'layer_filtering_transport.cc',
|
'layer_filtering_transport.cc',
|
||||||
|
|||||||
@ -128,9 +128,7 @@ class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
|
|||||||
// This implements the Release() method for all the inherited interfaces.
|
// This implements the Release() method for all the inherited interfaces.
|
||||||
int Release() override;
|
int Release() override;
|
||||||
|
|
||||||
// This is *protected* so that FakeVoiceEngine can inherit from the class and
|
private:
|
||||||
// manipulate the reference count. See: fake_voice_engine.h.
|
|
||||||
protected:
|
|
||||||
Atomic32 _ref_count;
|
Atomic32 _ref_count;
|
||||||
rtc::scoped_ptr<const Config> own_config_;
|
rtc::scoped_ptr<const Config> own_config_;
|
||||||
};
|
};
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user