- Removes voe_conference_test.

- Adds a new AudioStatsTest, with better coverage of the same features, based on call_test.
- Adds an AudioEndToEndTest utility, which AudioStatsTest and LowBandwidthAudioTest uses.

BUG=webrtc:4690
R=kwiberg@webrtc.org

Review-Url: https://codereview.webrtc.org/3008273002 .
Cr-Commit-Position: refs/heads/master@{#19833}
This commit is contained in:
Fredrik Solenberg 2017-09-14 14:46:47 +02:00
parent 7d1f493a8b
commit 73276ad7ed
15 changed files with 367 additions and 924 deletions

View File

@ -58,6 +58,27 @@ rtc_static_library("audio") {
]
}
if (rtc_include_tests) {
rtc_source_set("audio_end_to_end_test") {
testonly = true
sources = [
"test/audio_end_to_end_test.cc",
"test/audio_end_to_end_test.h",
]
deps = [
":audio",
"../system_wrappers:system_wrappers",
"../test:fake_audio_device",
"../test:test_common",
"../test:test_support",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
}
rtc_source_set("audio_tests") {
testonly = true
@ -80,6 +101,7 @@ if (rtc_include_tests) {
]
deps = [
":audio",
":audio_end_to_end_test",
"../api:mock_audio_mixer",
"../call:rtp_receiver",
"../modules/audio_device:mock_audio_device",
@ -96,6 +118,11 @@ if (rtc_include_tests) {
"//testing/gtest",
]
if (!rtc_use_memcheck) {
# This test is timing dependent, which rules out running on memcheck bots.
sources += [ "test/audio_stats_test.cc" ]
}
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
@ -108,10 +135,10 @@ if (rtc_include_tests) {
sources = [
"test/low_bandwidth_audio_test.cc",
"test/low_bandwidth_audio_test.h",
]
deps = [
":audio_end_to_end_test",
"../common_audio",
"../rtc_base:rtc_base_approved",
"../system_wrappers",

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include "webrtc/audio/test/audio_end_to_end_test.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/fake_audio_device.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
namespace {
// Wait half a second between stopping sending and stopping receiving audio.
constexpr int kExtraRecordTimeMs = 500;
constexpr int kSampleRate = 48000;
} // namespace
AudioEndToEndTest::AudioEndToEndTest()
: EndToEndTest(CallTest::kDefaultTimeoutMs) {}
FakeNetworkPipe::Config AudioEndToEndTest::GetNetworkPipeConfig() const {
return FakeNetworkPipe::Config();
}
size_t AudioEndToEndTest::GetNumVideoStreams() const {
return 0;
}
size_t AudioEndToEndTest::GetNumAudioStreams() const {
return 1;
}
size_t AudioEndToEndTest::GetNumFlexfecStreams() const {
return 0;
}
std::unique_ptr<test::FakeAudioDevice::Capturer>
AudioEndToEndTest::CreateCapturer() {
return test::FakeAudioDevice::CreatePulsedNoiseCapturer(32000, kSampleRate);
}
std::unique_ptr<test::FakeAudioDevice::Renderer>
AudioEndToEndTest::CreateRenderer() {
return test::FakeAudioDevice::CreateDiscardRenderer(kSampleRate);
}
void AudioEndToEndTest::OnFakeAudioDevicesCreated(
test::FakeAudioDevice* send_audio_device,
test::FakeAudioDevice* recv_audio_device) {
send_audio_device_ = send_audio_device;
}
test::PacketTransport* AudioEndToEndTest::CreateSendTransport(
SingleThreadedTaskQueueForTesting* task_queue,
Call* sender_call) {
return new test::PacketTransport(
task_queue, sender_call, this, test::PacketTransport::kSender,
test::CallTest::payload_type_map_, GetNetworkPipeConfig());
}
test::PacketTransport* AudioEndToEndTest::CreateReceiveTransport(
SingleThreadedTaskQueueForTesting* task_queue) {
return new test::PacketTransport(
task_queue, nullptr, this, test::PacketTransport::kReceiver,
test::CallTest::payload_type_map_, GetNetworkPipeConfig());
}
void AudioEndToEndTest::ModifyAudioConfigs(
AudioSendStream::Config* send_config,
std::vector<AudioReceiveStream::Config>* receive_configs) {
// Large bitrate by default.
const webrtc::SdpAudioFormat kDefaultFormat("opus", 48000, 2,
{{"stereo", "1"}});
send_config->send_codec_spec =
rtc::Optional<AudioSendStream::Config::SendCodecSpec>(
{test::CallTest::kAudioSendPayloadType, kDefaultFormat});
}
void AudioEndToEndTest::OnAudioStreamsCreated(
AudioSendStream* send_stream,
const std::vector<AudioReceiveStream*>& receive_streams) {
ASSERT_NE(nullptr, send_stream);
ASSERT_EQ(1u, receive_streams.size());
ASSERT_NE(nullptr, receive_streams[0]);
send_stream_ = send_stream;
receive_stream_ = receive_streams[0];
}
void AudioEndToEndTest::PerformTest() {
// Wait until the input audio file is done...
send_audio_device_->WaitForRecordingEnd();
// and some extra time to account for network delay.
SleepMs(GetNetworkPipeConfig().queue_delay_ms + kExtraRecordTimeMs);
}
} // namespace test
} // namespace webrtc

View File

@ -7,28 +7,28 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_AUDIO_TEST_LOW_BANDWIDTH_AUDIO_TEST_H_
#define WEBRTC_AUDIO_TEST_LOW_BANDWIDTH_AUDIO_TEST_H_
#ifndef WEBRTC_AUDIO_TEST_AUDIO_END_TO_END_TEST_H_
#define WEBRTC_AUDIO_TEST_AUDIO_END_TO_END_TEST_H_
#include <memory>
#include <string>
#include <vector>
#include "webrtc/test/call_test.h"
#include "webrtc/test/fake_audio_device.h"
namespace webrtc {
namespace test {
class AudioQualityTest : public test::EndToEndTest {
class AudioEndToEndTest : public test::EndToEndTest {
public:
AudioQualityTest();
AudioEndToEndTest();
protected:
virtual std::string AudioInputFile();
virtual std::string AudioOutputFile();
test::FakeAudioDevice* send_audio_device() { return send_audio_device_; }
const AudioSendStream* send_stream() const { return send_stream_; }
const AudioReceiveStream* receive_stream() const { return receive_stream_; }
virtual FakeNetworkPipe::Config GetNetworkPipeConfig();
virtual FakeNetworkPipe::Config GetNetworkPipeConfig() const;
size_t GetNumVideoStreams() const override;
size_t GetNumAudioStreams() const override;
@ -50,15 +50,19 @@ class AudioQualityTest : public test::EndToEndTest {
void ModifyAudioConfigs(
AudioSendStream::Config* send_config,
std::vector<AudioReceiveStream::Config>* receive_configs) override;
void OnAudioStreamsCreated(
AudioSendStream* send_stream,
const std::vector<AudioReceiveStream*>& receive_streams) override;
void PerformTest() override;
void OnTestFinished() override;
private:
test::FakeAudioDevice* send_audio_device_;
test::FakeAudioDevice* send_audio_device_ = nullptr;
AudioSendStream* send_stream_ = nullptr;
AudioReceiveStream* receive_stream_ = nullptr;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_AUDIO_TEST_LOW_BANDWIDTH_AUDIO_TEST_H_
#endif // WEBRTC_AUDIO_TEST_AUDIO_END_TO_END_TEST_H_

View File

@ -0,0 +1,118 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/audio/test/audio_end_to_end_test.h"
#include "webrtc/rtc_base/safe_compare.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
namespace {
bool IsNear(int reference, int v) {
// Margin is 10%.
const int error = reference / 10 + 1;
return std::abs(reference - v) <= error;
}
class NoLossTest : public AudioEndToEndTest {
public:
const int kTestDurationMs = 8000;
const int kBytesSent = 69351;
const int32_t kPacketsSent = 400;
const int64_t kRttMs = 100;
NoLossTest() = default;
FakeNetworkPipe::Config GetNetworkPipeConfig() const override {
FakeNetworkPipe::Config pipe_config;
pipe_config.queue_delay_ms = kRttMs / 2;
return pipe_config;
}
void PerformTest() override {
SleepMs(kTestDurationMs);
send_audio_device()->StopRecording();
AudioEndToEndTest::PerformTest();
}
void OnStreamsStopped() override {
AudioSendStream::Stats send_stats = send_stream()->GetStats();
EXPECT_PRED2(IsNear, kBytesSent, send_stats.bytes_sent);
EXPECT_PRED2(IsNear, kPacketsSent, send_stats.packets_sent);
EXPECT_EQ(0, send_stats.packets_lost);
EXPECT_EQ(0.0f, send_stats.fraction_lost);
EXPECT_EQ("opus", send_stats.codec_name);
// send_stats.jitter_ms
EXPECT_PRED2(IsNear, kRttMs, send_stats.rtt_ms);
// Send level is 0 because it is cleared in TransmitMixer::StopSend().
EXPECT_EQ(0, send_stats.audio_level);
// send_stats.total_input_energy
// send_stats.total_input_duration
EXPECT_EQ(-1.0f, send_stats.aec_quality_min);
EXPECT_EQ(-1, send_stats.echo_delay_median_ms);
EXPECT_EQ(-1, send_stats.echo_delay_std_ms);
EXPECT_EQ(-100, send_stats.echo_return_loss);
EXPECT_EQ(-100, send_stats.echo_return_loss_enhancement);
EXPECT_EQ(0.0f, send_stats.residual_echo_likelihood);
EXPECT_EQ(0.0f, send_stats.residual_echo_likelihood_recent_max);
EXPECT_EQ(false, send_stats.typing_noise_detected);
AudioReceiveStream::Stats recv_stats = receive_stream()->GetStats();
EXPECT_PRED2(IsNear, kBytesSent, recv_stats.bytes_rcvd);
EXPECT_PRED2(IsNear, kPacketsSent, recv_stats.packets_rcvd);
EXPECT_EQ(0u, recv_stats.packets_lost);
EXPECT_EQ(0.0f, recv_stats.fraction_lost);
EXPECT_EQ("opus", send_stats.codec_name);
// recv_stats.jitter_ms
// recv_stats.jitter_buffer_ms
EXPECT_EQ(20u, recv_stats.jitter_buffer_preferred_ms);
// recv_stats.delay_estimate_ms
// Receive level is 0 because it is cleared in Channel::StopPlayout().
EXPECT_EQ(0, recv_stats.audio_level);
// recv_stats.total_output_energy
// recv_stats.total_samples_received
// recv_stats.total_output_duration
// recv_stats.concealed_samples
// recv_stats.expand_rate
// recv_stats.speech_expand_rate
EXPECT_EQ(0.0, recv_stats.secondary_decoded_rate);
EXPECT_EQ(0.0, recv_stats.secondary_discarded_rate);
EXPECT_EQ(0.0, recv_stats.accelerate_rate);
EXPECT_EQ(0.0, recv_stats.preemptive_expand_rate);
EXPECT_EQ(0, recv_stats.decoding_calls_to_silence_generator);
// recv_stats.decoding_calls_to_neteq
// recv_stats.decoding_normal
// recv_stats.decoding_plc
EXPECT_EQ(0, recv_stats.decoding_cng);
// recv_stats.decoding_plc_cng
// recv_stats.decoding_muted_output
// Capture start time is -1 because we do not have an associated send stream
// on the receiver side.
EXPECT_EQ(-1, recv_stats.capture_start_ntp_time_ms);
// Match these stats between caller and receiver.
EXPECT_EQ(send_stats.local_ssrc, recv_stats.remote_ssrc);
EXPECT_EQ(*send_stats.codec_payload_type, *recv_stats.codec_payload_type);
EXPECT_TRUE(rtc::SafeEq(send_stats.ext_seqnum, recv_stats.ext_seqnum));
}
};
} // namespace
using AudioStatsTest = CallTest;
TEST_F(AudioStatsTest, NoLoss) {
NoLossTest test;
RunBaseTest(&test);
}
} // namespace test
} // namespace webrtc

View File

@ -8,16 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include "webrtc/audio/test/low_bandwidth_audio_test.h"
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/audio/test/audio_end_to_end_test.h"
#include "webrtc/rtc_base/flags.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/fileutils.h"
DEFINE_int(sample_rate_hz, 16000,
"Sample rate (Hz) of the produced audio files.");
@ -25,122 +20,59 @@ DEFINE_bool(quick, false,
"Don't do the full audio recording. "
"Used to quickly check that the test runs without crashing.");
namespace webrtc {
namespace test {
namespace {
// Wait half a second between stopping sending and stopping receiving audio.
constexpr int kExtraRecordTimeMs = 500;
std::string FileSampleRateSuffix() {
return std::to_string(FLAG_sample_rate_hz / 1000);
}
} // namespace
class AudioQualityTest : public AudioEndToEndTest {
public:
AudioQualityTest() = default;
namespace webrtc {
namespace test {
AudioQualityTest::AudioQualityTest()
: EndToEndTest(CallTest::kDefaultTimeoutMs) {}
size_t AudioQualityTest::GetNumVideoStreams() const {
return 0;
}
size_t AudioQualityTest::GetNumAudioStreams() const {
return 1;
}
size_t AudioQualityTest::GetNumFlexfecStreams() const {
return 0;
}
std::string AudioQualityTest::AudioInputFile() {
return test::ResourcePath("voice_engine/audio_tiny" + FileSampleRateSuffix(),
"wav");
}
std::string AudioQualityTest::AudioOutputFile() {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
return webrtc::test::OutputPath() + "LowBandwidth_" + test_info->name() +
"_" + FileSampleRateSuffix() + ".wav";
}
std::unique_ptr<test::FakeAudioDevice::Capturer>
AudioQualityTest::CreateCapturer() {
return test::FakeAudioDevice::CreateWavFileReader(AudioInputFile());
}
std::unique_ptr<test::FakeAudioDevice::Renderer>
AudioQualityTest::CreateRenderer() {
return test::FakeAudioDevice::CreateBoundedWavFileWriter(
AudioOutputFile(), FLAG_sample_rate_hz);
}
void AudioQualityTest::OnFakeAudioDevicesCreated(
test::FakeAudioDevice* send_audio_device,
test::FakeAudioDevice* recv_audio_device) {
send_audio_device_ = send_audio_device;
}
FakeNetworkPipe::Config AudioQualityTest::GetNetworkPipeConfig() {
return FakeNetworkPipe::Config();
}
test::PacketTransport* AudioQualityTest::CreateSendTransport(
SingleThreadedTaskQueueForTesting* task_queue,
Call* sender_call) {
return new test::PacketTransport(
task_queue, sender_call, this, test::PacketTransport::kSender,
test::CallTest::payload_type_map_, GetNetworkPipeConfig());
}
test::PacketTransport* AudioQualityTest::CreateReceiveTransport(
SingleThreadedTaskQueueForTesting* task_queue) {
return new test::PacketTransport(
task_queue, nullptr, this, test::PacketTransport::kReceiver,
test::CallTest::payload_type_map_, GetNetworkPipeConfig());
}
void AudioQualityTest::ModifyAudioConfigs(
AudioSendStream::Config* send_config,
std::vector<AudioReceiveStream::Config>* receive_configs) {
// Large bitrate by default.
const webrtc::SdpAudioFormat kDefaultFormat("OPUS", 48000, 2,
{{"stereo", "1"}});
send_config->send_codec_spec =
rtc::Optional<AudioSendStream::Config::SendCodecSpec>(
{test::CallTest::kAudioSendPayloadType, kDefaultFormat});
}
void AudioQualityTest::PerformTest() {
if (FLAG_quick) {
// Let the recording run for a small amount of time to check if it works.
SleepMs(1000);
} else {
// Wait until the input audio file is done...
send_audio_device_->WaitForRecordingEnd();
// and some extra time to account for network delay.
SleepMs(GetNetworkPipeConfig().queue_delay_ms + kExtraRecordTimeMs);
private:
std::string AudioInputFile() const {
return test::ResourcePath(
"voice_engine/audio_tiny" + FileSampleRateSuffix(), "wav");
}
}
void AudioQualityTest::OnTestFinished() {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::string AudioOutputFile() const {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
return webrtc::test::OutputPath() + "LowBandwidth_" + test_info->name() +
"_" + FileSampleRateSuffix() + ".wav";
}
// Output information about the input and output audio files so that further
// processing can be done by an external process.
printf("TEST %s %s %s\n", test_info->name(),
AudioInputFile().c_str(), AudioOutputFile().c_str());
}
std::unique_ptr<test::FakeAudioDevice::Capturer> CreateCapturer() override {
return test::FakeAudioDevice::CreateWavFileReader(AudioInputFile());
}
std::unique_ptr<test::FakeAudioDevice::Renderer> CreateRenderer() override {
return test::FakeAudioDevice::CreateBoundedWavFileWriter(
AudioOutputFile(), FLAG_sample_rate_hz);
}
using LowBandwidthAudioTest = CallTest;
void PerformTest() override {
if (FLAG_quick) {
// Let the recording run for a small amount of time to check if it works.
SleepMs(1000);
} else {
AudioEndToEndTest::PerformTest();
}
}
TEST_F(LowBandwidthAudioTest, GoodNetworkHighBitrate) {
AudioQualityTest test;
RunBaseTest(&test);
}
void OnStreamsStopped() override {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
// Output information about the input and output audio files so that further
// processing can be done by an external process.
printf("TEST %s %s %s\n", test_info->name(),
AudioInputFile().c_str(), AudioOutputFile().c_str());
}
};
class Mobile2GNetworkTest : public AudioQualityTest {
void ModifyAudioConfigs(AudioSendStream::Config* send_config,
@ -156,7 +88,7 @@ class Mobile2GNetworkTest : public AudioQualityTest {
{"stereo", "1"}}}});
}
FakeNetworkPipe::Config GetNetworkPipeConfig() override {
FakeNetworkPipe::Config GetNetworkPipeConfig() const override {
FakeNetworkPipe::Config pipe_config;
pipe_config.link_capacity_kbps = 12;
pipe_config.queue_length_packets = 1500;
@ -164,11 +96,18 @@ class Mobile2GNetworkTest : public AudioQualityTest {
return pipe_config;
}
};
} // namespace
using LowBandwidthAudioTest = CallTest;
TEST_F(LowBandwidthAudioTest, GoodNetworkHighBitrate) {
AudioQualityTest test;
RunBaseTest(&test);
}
TEST_F(LowBandwidthAudioTest, Mobile2GNetwork) {
Mobile2GNetworkTest test;
RunBaseTest(&test);
}
} // namespace test
} // namespace webrtc

View File

@ -1914,6 +1914,33 @@ TEST_F(PeerConnectionIntegrationTest, GetBytesSentStatsWithOldStatsApi) {
EXPECT_GT(caller()->OldGetStatsForTrack(video_track)->BytesSent(), 0);
}
// Test that we can get capture start ntp time.
TEST_F(PeerConnectionIntegrationTest, GetCaptureStartNtpTimeWithOldStatsApi) {
ASSERT_TRUE(CreatePeerConnectionWrappers());
ConnectFakeSignaling();
caller()->AddAudioOnlyMediaStream();
auto audio_track = callee()->CreateLocalAudioTrack();
callee()->AddMediaStreamFromTracks(audio_track, nullptr);
// Do offer/answer, wait for the callee to receive some frames.
caller()->CreateAndSetAndSignalOffer();
ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
// Get the remote audio track created on the receiver, so they can be used as
// GetStats filters.
StreamCollectionInterface* remote_streams = callee()->remote_streams();
ASSERT_EQ(1u, remote_streams->count());
ASSERT_EQ(1u, remote_streams->at(0)->GetAudioTracks().size());
MediaStreamTrackInterface* remote_audio_track =
remote_streams->at(0)->GetAudioTracks()[0];
// Get the audio output level stats. Note that the level is not available
// until an RTCP packet has been received.
EXPECT_TRUE_WAIT(callee()->OldGetStatsForTrack(remote_audio_track)->
CaptureStartNtpTime() > 0, 2 * kMaxWaitForFramesMs);
}
// Test that we can get stats (using the new stats implemnetation) for
// unsignaled streams. Meaning when SSRCs/MSIDs aren't signaled explicitly in
// SDP.

View File

@ -125,6 +125,8 @@ class MockStatsObserver : public webrtc::StatsObserver {
&stats_.bytes_received);
GetIntValue(r, StatsReport::kStatsValueNameBytesSent,
&stats_.bytes_sent);
GetInt64Value(r, StatsReport::kStatsValueNameCaptureStartNtpTimeMs,
&stats_.capture_start_ntp_time);
} else if (r->type() == StatsReport::kStatsReportTypeBwe) {
stats_.timestamp = r->timestamp();
GetIntValue(r, StatsReport::kStatsValueNameAvailableReceiveBandwidth,
@ -163,6 +165,11 @@ class MockStatsObserver : public webrtc::StatsObserver {
return stats_.bytes_sent;
}
int64_t CaptureStartNtpTime() const {
RTC_CHECK(called_);
return stats_.capture_start_ntp_time;
}
int AvailableReceiveBandwidth() const {
RTC_CHECK(called_);
return stats_.available_receive_bandwidth;
@ -190,6 +197,17 @@ class MockStatsObserver : public webrtc::StatsObserver {
return v != nullptr;
}
bool GetInt64Value(const StatsReport* report,
StatsReport::StatsValueName name,
int64_t* value) {
const StatsReport::Value* v = report->FindValue(name);
if (v) {
// TODO(tommi): We should really just be using an int here :-/
*value = rtc::FromString<int64_t>(v->ToString());
}
return v != nullptr;
}
bool GetStringValue(const StatsReport* report,
StatsReport::StatsValueName name,
std::string* value) {
@ -208,6 +226,7 @@ class MockStatsObserver : public webrtc::StatsObserver {
audio_input_level = 0;
bytes_received = 0;
bytes_sent = 0;
capture_start_ntp_time = 0;
available_receive_bandwidth = 0;
dtls_cipher.clear();
srtp_cipher.clear();
@ -219,6 +238,7 @@ class MockStatsObserver : public webrtc::StatsObserver {
int audio_input_level;
int bytes_received;
int bytes_sent;
int64_t capture_start_ntp_time;
int available_receive_bandwidth;
std::string dtls_cipher;
std::string srtp_cipher;

View File

@ -153,8 +153,9 @@ void CallTest::RunBaseTest(BaseTest* test) {
test->PerformTest();
task_queue_.SendTask([this]() {
task_queue_.SendTask([this, test]() {
Stop();
test->OnStreamsStopped();
DestroyStreams();
send_transport_.reset();
receive_transport_.reset();
@ -162,8 +163,6 @@ void CallTest::RunBaseTest(BaseTest* test) {
if (num_audio_streams_ > 0)
DestroyVoiceEngines();
});
test->OnTestFinished();
}
void CallTest::CreateCalls(const Call::Config& sender_config,
@ -223,7 +222,7 @@ void CallTest::CreateSendConfig(size_t num_video_streams,
audio_send_config_.rtp.ssrc = kAudioSendSsrc;
audio_send_config_.send_codec_spec =
rtc::Optional<AudioSendStream::Config::SendCodecSpec>(
{kAudioSendPayloadType, {"OPUS", 48000, 2, {{"stereo", "1"}}}});
{kAudioSendPayloadType, {"opus", 48000, 2, {{"stereo", "1"}}}});
audio_send_config_.encoder_factory = encoder_factory_;
}
@ -590,7 +589,7 @@ void BaseTest::OnFrameGeneratorCapturerCreated(
FrameGeneratorCapturer* frame_generator_capturer) {
}
void BaseTest::OnTestFinished() {
void BaseTest::OnStreamsStopped() {
}
SendTest::SendTest(unsigned int timeout_ms) : BaseTest(timeout_ms) {

View File

@ -223,7 +223,7 @@ class BaseTest : public RtpRtcpObserver {
virtual void OnFrameGeneratorCapturerCreated(
FrameGeneratorCapturer* frame_generator_capturer);
virtual void OnTestFinished();
virtual void OnStreamsStopped();
std::unique_ptr<webrtc::RtcEventLog> event_log_;
};

View File

@ -266,10 +266,6 @@ if (rtc_include_tests) {
sources = [
"test/auto_test/automated_mode.cc",
"test/auto_test/fakes/conference_transport.cc",
"test/auto_test/fakes/conference_transport.h",
"test/auto_test/fakes/loudest_filter.cc",
"test/auto_test/fakes/loudest_filter.h",
"test/auto_test/fixtures/after_initialization_fixture.cc",
"test/auto_test/fixtures/after_initialization_fixture.h",
"test/auto_test/fixtures/after_streaming_fixture.cc",
@ -284,7 +280,6 @@ if (rtc_include_tests) {
"test/auto_test/standard/rtp_rtcp_before_streaming_test.cc",
"test/auto_test/standard/rtp_rtcp_extensions.cc",
"test/auto_test/standard/rtp_rtcp_test.cc",
"test/auto_test/voe_conference_test.cc",
"test/auto_test/voe_standard_test.cc",
"test/auto_test/voe_standard_test.h",
"test/auto_test/voe_test_defines.h",

View File

@ -1,307 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/voice_engine/test/auto_test/fakes/conference_transport.h"
#include <string>
#include "webrtc/rtc_base/byteorder.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/voice_engine/channel_proxy.h"
#include "webrtc/voice_engine/voice_engine_impl.h"
namespace webrtc {
namespace voetest {
namespace {
static const unsigned int kReflectorSsrc = 0x0000;
static const unsigned int kLocalSsrc = 0x0001;
static const unsigned int kFirstRemoteSsrc = 0x0002;
static const webrtc::CodecInst kCodecInst = {120, "opus", 48000, 960, 2, 64000};
static const int kAudioLevelHeaderId = 1;
static unsigned int ParseRtcpSsrc(const void* data, size_t len) {
const size_t ssrc_pos = 4;
unsigned int ssrc = 0;
if (len >= (ssrc_pos + sizeof(ssrc))) {
ssrc = rtc::GetBE32(static_cast<const char*>(data) + ssrc_pos);
}
return ssrc;
}
} // namespace
ConferenceTransport::ConferenceTransport()
: packet_event_(webrtc::EventWrapper::Create()),
thread_(Run, this, "ConferenceTransport"),
rtt_ms_(0),
stream_count_(0),
rtp_header_parser_(webrtc::RtpHeaderParser::Create()) {
rtp_header_parser_->
RegisterRtpHeaderExtension(webrtc::kRtpExtensionAudioLevel,
kAudioLevelHeaderId);
local_voe_ = webrtc::VoiceEngine::Create();
local_base_ = webrtc::VoEBase::GetInterface(local_voe_);
local_network_ = webrtc::VoENetwork::GetInterface(local_voe_);
local_rtp_rtcp_ = webrtc::VoERTP_RTCP::GetInterface(local_voe_);
local_apm_ = webrtc::AudioProcessing::Create();
local_base_->Init(nullptr, local_apm_.get(), nullptr);
// In principle, we can use one VoiceEngine to achieve the same goal. Well, in
// here, we use two engines to make it more like reality.
remote_voe_ = webrtc::VoiceEngine::Create();
remote_base_ = webrtc::VoEBase::GetInterface(remote_voe_);
remote_codec_ = webrtc::VoECodec::GetInterface(remote_voe_);
remote_network_ = webrtc::VoENetwork::GetInterface(remote_voe_);
remote_rtp_rtcp_ = webrtc::VoERTP_RTCP::GetInterface(remote_voe_);
remote_file_ = webrtc::VoEFile::GetInterface(remote_voe_);
remote_apm_ = webrtc::AudioProcessing::Create();
remote_base_->Init(nullptr, remote_apm_.get(), nullptr);
local_sender_ = local_base_->CreateChannel();
static_cast<webrtc::VoiceEngineImpl*>(local_voe_)
->GetChannelProxy(local_sender_)
->RegisterLegacyReceiveCodecs();
EXPECT_EQ(0, local_network_->RegisterExternalTransport(local_sender_, *this));
EXPECT_EQ(0, local_rtp_rtcp_->SetLocalSSRC(local_sender_, kLocalSsrc));
EXPECT_EQ(0, local_rtp_rtcp_->
SetSendAudioLevelIndicationStatus(local_sender_, true,
kAudioLevelHeaderId));
EXPECT_EQ(0, local_base_->StartSend(local_sender_));
reflector_ = remote_base_->CreateChannel();
static_cast<webrtc::VoiceEngineImpl*>(remote_voe_)
->GetChannelProxy(reflector_)
->RegisterLegacyReceiveCodecs();
EXPECT_EQ(0, remote_network_->RegisterExternalTransport(reflector_, *this));
EXPECT_EQ(0, remote_rtp_rtcp_->SetLocalSSRC(reflector_, kReflectorSsrc));
thread_.Start();
thread_.SetPriority(rtc::kHighPriority);
}
ConferenceTransport::~ConferenceTransport() {
// Must stop sending, otherwise DispatchPackets() cannot quit.
EXPECT_EQ(0, remote_network_->DeRegisterExternalTransport(reflector_));
EXPECT_EQ(0, local_network_->DeRegisterExternalTransport(local_sender_));
while (!streams_.empty()) {
auto stream = streams_.begin();
RemoveStream(stream->first);
}
thread_.Stop();
remote_file_->Release();
remote_rtp_rtcp_->Release();
remote_network_->Release();
remote_base_->Release();
local_rtp_rtcp_->Release();
local_network_->Release();
local_base_->Release();
EXPECT_TRUE(webrtc::VoiceEngine::Delete(remote_voe_));
EXPECT_TRUE(webrtc::VoiceEngine::Delete(local_voe_));
}
bool ConferenceTransport::SendRtp(const uint8_t* data,
size_t len,
const webrtc::PacketOptions& options) {
StorePacket(Packet::Rtp, data, len);
return true;
}
bool ConferenceTransport::SendRtcp(const uint8_t* data, size_t len) {
StorePacket(Packet::Rtcp, data, len);
return true;
}
int ConferenceTransport::GetReceiverChannelForSsrc(unsigned int sender_ssrc)
const {
rtc::CritScope lock(&stream_crit_);
auto it = streams_.find(sender_ssrc);
if (it != streams_.end()) {
return it->second.second;
}
return -1;
}
void ConferenceTransport::StorePacket(Packet::Type type,
const void* data,
size_t len) {
{
rtc::CritScope lock(&pq_crit_);
packet_queue_.push_back(Packet(type, data, len, rtc::TimeMillis()));
}
packet_event_->Set();
}
// This simulates the flow of RTP and RTCP packets. Complications like that
// a packet is first sent to the reflector, and then forwarded to the receiver
// are simplified, in this particular case, to a direct link between the sender
// and the receiver.
void ConferenceTransport::SendPacket(const Packet& packet) {
int destination = -1;
switch (packet.type_) {
case Packet::Rtp: {
webrtc::RTPHeader rtp_header;
rtp_header_parser_->Parse(packet.data_, packet.len_, &rtp_header);
if (rtp_header.ssrc == kLocalSsrc) {
remote_network_->ReceivedRTPPacket(reflector_, packet.data_,
packet.len_, webrtc::PacketTime());
} else {
if (loudest_filter_.ForwardThisPacket(rtp_header)) {
destination = GetReceiverChannelForSsrc(rtp_header.ssrc);
if (destination != -1) {
local_network_->ReceivedRTPPacket(destination, packet.data_,
packet.len_,
webrtc::PacketTime());
}
}
}
break;
}
case Packet::Rtcp: {
unsigned int sender_ssrc = ParseRtcpSsrc(packet.data_, packet.len_);
if (sender_ssrc == kLocalSsrc) {
remote_network_->ReceivedRTCPPacket(reflector_, packet.data_,
packet.len_);
} else if (sender_ssrc == kReflectorSsrc) {
local_network_->ReceivedRTCPPacket(local_sender_, packet.data_,
packet.len_);
} else {
destination = GetReceiverChannelForSsrc(sender_ssrc);
if (destination != -1) {
local_network_->ReceivedRTCPPacket(destination, packet.data_,
packet.len_);
}
}
break;
}
}
}
bool ConferenceTransport::DispatchPackets() {
switch (packet_event_->Wait(1000)) {
case webrtc::kEventSignaled:
break;
case webrtc::kEventTimeout:
return true;
case webrtc::kEventError:
ADD_FAILURE() << "kEventError encountered.";
return true;
}
while (true) {
Packet packet;
{
rtc::CritScope lock(&pq_crit_);
if (packet_queue_.empty())
break;
packet = packet_queue_.front();
packet_queue_.pop_front();
}
int32_t elapsed_time_ms = rtc::TimeSince(packet.send_time_ms_);
int32_t sleep_ms = rtt_ms_ / 2 - elapsed_time_ms;
if (sleep_ms > 0) {
// Every packet should be delayed by half of RTT.
webrtc::SleepMs(sleep_ms);
}
SendPacket(packet);
}
return true;
}
void ConferenceTransport::SetRtt(unsigned int rtt_ms) {
rtt_ms_ = rtt_ms;
}
unsigned int ConferenceTransport::AddStream(std::string file_name,
webrtc::FileFormats format) {
const int new_sender = remote_base_->CreateChannel();
static_cast<webrtc::VoiceEngineImpl*>(remote_voe_)
->GetChannelProxy(new_sender)
->RegisterLegacyReceiveCodecs();
EXPECT_EQ(0, remote_network_->RegisterExternalTransport(new_sender, *this));
const unsigned int remote_ssrc = kFirstRemoteSsrc + stream_count_++;
EXPECT_EQ(0, remote_rtp_rtcp_->SetLocalSSRC(new_sender, remote_ssrc));
EXPECT_EQ(0, remote_rtp_rtcp_->
SetSendAudioLevelIndicationStatus(new_sender, true, kAudioLevelHeaderId));
EXPECT_EQ(0, remote_codec_->SetSendCodec(new_sender, kCodecInst));
EXPECT_EQ(0, remote_base_->StartSend(new_sender));
EXPECT_EQ(0, remote_file_->StartPlayingFileAsMicrophone(
new_sender, file_name.c_str(), true, false, format, 1.0));
const int new_receiver = local_base_->CreateChannel();
static_cast<webrtc::VoiceEngineImpl*>(local_voe_)
->GetChannelProxy(new_receiver)
->RegisterLegacyReceiveCodecs();
EXPECT_EQ(0, local_base_->AssociateSendChannel(new_receiver, local_sender_));
EXPECT_EQ(0, local_network_->RegisterExternalTransport(new_receiver, *this));
// Receive channels have to have the same SSRC in order to send receiver
// reports with this SSRC.
EXPECT_EQ(0, local_rtp_rtcp_->SetLocalSSRC(new_receiver, kLocalSsrc));
{
rtc::CritScope lock(&stream_crit_);
streams_[remote_ssrc] = std::make_pair(new_sender, new_receiver);
}
return remote_ssrc; // remote ssrc used as stream id.
}
bool ConferenceTransport::RemoveStream(unsigned int id) {
rtc::CritScope lock(&stream_crit_);
auto it = streams_.find(id);
if (it == streams_.end()) {
return false;
}
EXPECT_EQ(0, remote_network_->
DeRegisterExternalTransport(it->second.second));
EXPECT_EQ(0, local_network_->
DeRegisterExternalTransport(it->second.first));
EXPECT_EQ(0, remote_base_->DeleteChannel(it->second.second));
EXPECT_EQ(0, local_base_->DeleteChannel(it->second.first));
streams_.erase(it);
return true;
}
bool ConferenceTransport::StartPlayout(unsigned int id) {
int dst = GetReceiverChannelForSsrc(id);
if (dst == -1) {
return false;
}
EXPECT_EQ(0, local_base_->StartPlayout(dst));
return true;
}
bool ConferenceTransport::GetReceiverStatistics(unsigned int id,
webrtc::CallStatistics* stats) {
int dst = GetReceiverChannelForSsrc(id);
if (dst == -1) {
return false;
}
EXPECT_EQ(0, local_rtp_rtcp_->GetRTCPStatistics(dst, *stats));
return true;
}
} // namespace voetest
} // namespace webrtc

View File

@ -1,168 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_TEST_AUTO_TEST_FAKES_CONFERENCE_TRANSPORT_H_
#define WEBRTC_VOICE_ENGINE_TEST_AUTO_TEST_FAKES_CONFERENCE_TRANSPORT_H_
#include <deque>
#include <map>
#include <memory>
#include <utility>
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/rtc_base/basictypes.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/platform_thread.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/test/gtest.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_codec.h"
#include "webrtc/voice_engine/include/voe_file.h"
#include "webrtc/voice_engine/include/voe_network.h"
#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
#include "webrtc/voice_engine/test/auto_test/fakes/loudest_filter.h"
namespace webrtc {
namespace voetest {
static const size_t kMaxPacketSizeByte = 1500;
// This class is to simulate a conference call. There are two Voice Engines, one
// for local channels and the other for remote channels. There is a simulated
// reflector, which exchanges RTCP with local channels. For simplicity, it
// also uses the Voice Engine for remote channels. One can add streams by
// calling AddStream(), which creates a remote sender channel and a local
// receive channel. The remote sender channel plays a file as microphone in a
// looped fashion. Received streams are mixed and played.
class ConferenceTransport: public webrtc::Transport {
public:
ConferenceTransport();
virtual ~ConferenceTransport();
/* SetRtt()
* Set RTT between local channels and reflector.
*
* Input:
* rtt_ms : RTT in milliseconds.
*/
void SetRtt(unsigned int rtt_ms);
/* AddStream()
* Adds a stream in the conference.
*
* Input:
* file_name : name of the file to be added as microphone input.
* format : format of the input file.
*
* Returns stream id.
*/
unsigned int AddStream(std::string file_name, webrtc::FileFormats format);
/* RemoveStream()
* Removes a stream with specified ID from the conference.
*
* Input:
* id : stream id.
*
* Returns false if the specified stream does not exist, true if succeeds.
*/
bool RemoveStream(unsigned int id);
/* StartPlayout()
* Starts playing out the stream with specified ID, using the default device.
*
* Input:
* id : stream id.
*
* Returns false if the specified stream does not exist, true if succeeds.
*/
bool StartPlayout(unsigned int id);
/* GetReceiverStatistics()
* Gets RTCP statistics of the stream with specified ID.
*
* Input:
* id : stream id;
* stats : pointer to a CallStatistics to store the result.
*
* Returns false if the specified stream does not exist, true if succeeds.
*/
bool GetReceiverStatistics(unsigned int id, webrtc::CallStatistics* stats);
// Inherit from class webrtc::Transport.
bool SendRtp(const uint8_t* data,
size_t len,
const webrtc::PacketOptions& options) override;
bool SendRtcp(const uint8_t *data, size_t len) override;
private:
struct Packet {
enum Type { Rtp, Rtcp, } type_;
Packet() : len_(0) {}
Packet(Type type, const void* data, size_t len, int64_t time_ms)
: type_(type), len_(len), send_time_ms_(time_ms) {
EXPECT_LE(len_, kMaxPacketSizeByte);
memcpy(data_, data, len_);
}
uint8_t data_[kMaxPacketSizeByte];
size_t len_;
int64_t send_time_ms_;
};
static bool Run(void* transport) {
return static_cast<ConferenceTransport*>(transport)->DispatchPackets();
}
int GetReceiverChannelForSsrc(unsigned int sender_ssrc) const;
void StorePacket(Packet::Type type, const void* data, size_t len);
void SendPacket(const Packet& packet);
bool DispatchPackets();
rtc::CriticalSection pq_crit_;
rtc::CriticalSection stream_crit_;
const std::unique_ptr<webrtc::EventWrapper> packet_event_;
rtc::PlatformThread thread_;
unsigned int rtt_ms_;
unsigned int stream_count_;
std::map<unsigned int, std::pair<int, int>> streams_
RTC_GUARDED_BY(stream_crit_);
std::deque<Packet> packet_queue_ RTC_GUARDED_BY(pq_crit_);
int local_sender_; // Channel Id of local sender
int reflector_;
webrtc::VoiceEngine* local_voe_;
webrtc::VoEBase* local_base_;
webrtc::VoERTP_RTCP* local_rtp_rtcp_;
webrtc::VoENetwork* local_network_;
rtc::scoped_refptr<webrtc::AudioProcessing> local_apm_;
webrtc::VoiceEngine* remote_voe_;
webrtc::VoEBase* remote_base_;
webrtc::VoECodec* remote_codec_;
webrtc::VoERTP_RTCP* remote_rtp_rtcp_;
webrtc::VoENetwork* remote_network_;
webrtc::VoEFile* remote_file_;
rtc::scoped_refptr<webrtc::AudioProcessing> remote_apm_;
LoudestFilter loudest_filter_;
const std::unique_ptr<webrtc::RtpHeaderParser> rtp_header_parser_;
};
} // namespace voetest
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_TEST_AUTO_TEST_FAKES_CONFERENCE_TRANSPORT_H_

View File

@ -1,82 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/voice_engine/test/auto_test/fakes/loudest_filter.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace voetest {
void LoudestFilter::RemoveTimeoutStreams(int64_t time_ms) {
auto it = stream_levels_.begin();
while (it != stream_levels_.end()) {
if (rtc::TimeDiff(time_ms, it->second.last_time_ms) > kStreamTimeOutMs) {
stream_levels_.erase(it++);
} else {
++it;
}
}
}
unsigned int LoudestFilter::FindQuietestStream() {
int quietest_level = kInvalidAudioLevel;
unsigned int quietest_ssrc = 0;
for (auto stream : stream_levels_) {
// A smaller value if audio level corresponds to a louder sound.
if (quietest_level == kInvalidAudioLevel ||
stream.second.audio_level > quietest_level) {
quietest_level = stream.second.audio_level;
quietest_ssrc = stream.first;
}
}
return quietest_ssrc;
}
bool LoudestFilter::ForwardThisPacket(const webrtc::RTPHeader& rtp_header) {
int64_t time_now_ms = rtc::TimeMillis();
RemoveTimeoutStreams(time_now_ms);
int source_ssrc = rtp_header.ssrc;
int audio_level = rtp_header.extension.hasAudioLevel ?
rtp_header.extension.audioLevel : kInvalidAudioLevel;
if (audio_level == kInvalidAudioLevel) {
// Always forward streams with unknown audio level, and don't keep their
// states.
return true;
}
auto it = stream_levels_.find(source_ssrc);
if (it != stream_levels_.end()) {
// Stream has been forwarded. Update and continue to forward.
it->second.audio_level = audio_level;
it->second.last_time_ms = time_now_ms;
return true;
}
if (stream_levels_.size() < kMaxMixSize) {
stream_levels_[source_ssrc].Set(audio_level, time_now_ms);
return true;
}
unsigned int quietest_ssrc = FindQuietestStream();
RTC_CHECK_NE(0, quietest_ssrc);
// A smaller value if audio level corresponds to a louder sound.
if (audio_level < stream_levels_[quietest_ssrc].audio_level) {
stream_levels_.erase(quietest_ssrc);
stream_levels_[source_ssrc].Set(audio_level, time_now_ms);
return true;
}
return false;
}
} // namespace voetest
} // namespace webrtc

View File

@ -1,55 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_VOICE_ENGINE_TEST_AUTO_TEST_FAKES_LOUDEST_FILTER_H_
#define WEBRTC_VOICE_ENGINE_TEST_AUTO_TEST_FAKES_LOUDEST_FILTER_H_
#include <map>
#include "webrtc/common_types.h"
#include "webrtc/rtc_base/timeutils.h"
namespace webrtc {
namespace voetest {
class LoudestFilter {
public:
/* ForwardThisPacket()
* Decide whether to forward a RTP packet, given its header.
*
* Input:
* rtp_header : Header of the RTP packet of interest.
*/
bool ForwardThisPacket(const webrtc::RTPHeader& rtp_header);
private:
struct Status {
void Set(int audio_level, int64_t last_time_ms) {
this->audio_level = audio_level;
this->last_time_ms = last_time_ms;
}
int audio_level;
int64_t last_time_ms;
};
void RemoveTimeoutStreams(int64_t time_ms);
unsigned int FindQuietestStream();
// Keeps the streams being forwarded in pair<SSRC, Status>.
std::map<unsigned int, Status> stream_levels_;
const int32_t kStreamTimeOutMs = 5000;
const size_t kMaxMixSize = 3;
const int kInvalidAudioLevel = 128;
};
} // namespace voetest
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_TEST_AUTO_TEST_FAKES_LOUDEST_FILTER_H_

View File

@ -1,179 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <queue>
#include "webrtc/rtc_base/format_macros.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/voice_engine/test/auto_test/fakes/conference_transport.h"
namespace webrtc {
namespace {
const int kRttMs = 25;
bool IsNear(int ref, int comp, int error) {
return (ref - comp <= error) && (comp - ref >= -error);
}
void CreateSilenceFile(const std::string& silence_file, int sample_rate_hz) {
FILE* fid = fopen(silence_file.c_str(), "wb");
int16_t zero = 0;
for (int i = 0; i < sample_rate_hz; ++i) {
// Write 1 second, but it does not matter since the file will be looped.
fwrite(&zero, sizeof(int16_t), 1, fid);
}
fclose(fid);
}
} // namespace
namespace voetest {
TEST(VoeConferenceTest, RttAndStartNtpTime) {
struct Stats {
Stats(int64_t rtt_receiver_1, int64_t rtt_receiver_2, int64_t ntp_delay)
: rtt_receiver_1_(rtt_receiver_1),
rtt_receiver_2_(rtt_receiver_2),
ntp_delay_(ntp_delay) {
}
int64_t rtt_receiver_1_;
int64_t rtt_receiver_2_;
int64_t ntp_delay_;
};
const std::string input_file =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
const webrtc::FileFormats kInputFormat = webrtc::kFileFormatPcm32kHzFile;
const int kDelayMs = 987;
ConferenceTransport trans;
trans.SetRtt(kRttMs);
unsigned int id_1 = trans.AddStream(input_file, kInputFormat);
unsigned int id_2 = trans.AddStream(input_file, kInputFormat);
EXPECT_TRUE(trans.StartPlayout(id_1));
// Start NTP time is the time when a stream is played out, rather than
// when it is added.
webrtc::SleepMs(kDelayMs);
EXPECT_TRUE(trans.StartPlayout(id_2));
const int kMaxRunTimeMs = 25000;
const int kNeedSuccessivePass = 3;
const int kStatsRequestIntervalMs = 1000;
const int kStatsBufferSize = 3;
int64_t deadline = rtc::TimeAfter(kMaxRunTimeMs);
// Run the following up to |kMaxRunTimeMs| milliseconds.
int successive_pass = 0;
webrtc::CallStatistics stats_1;
webrtc::CallStatistics stats_2;
std::queue<Stats> stats_buffer;
while (rtc::TimeMillis() < deadline &&
successive_pass < kNeedSuccessivePass) {
webrtc::SleepMs(kStatsRequestIntervalMs);
EXPECT_TRUE(trans.GetReceiverStatistics(id_1, &stats_1));
EXPECT_TRUE(trans.GetReceiverStatistics(id_2, &stats_2));
// It is not easy to verify the NTP time directly. We verify it by testing
// the difference of two start NTP times.
int64_t captured_start_ntp_delay = stats_2.capture_start_ntp_time_ms_ -
stats_1.capture_start_ntp_time_ms_;
// For the checks of RTT and start NTP time, We allow 10% accuracy.
if (IsNear(kRttMs, stats_1.rttMs, kRttMs / 10 + 1) &&
IsNear(kRttMs, stats_2.rttMs, kRttMs / 10 + 1) &&
IsNear(kDelayMs, captured_start_ntp_delay, kDelayMs / 10 + 1)) {
successive_pass++;
} else {
successive_pass = 0;
}
if (stats_buffer.size() >= kStatsBufferSize) {
stats_buffer.pop();
}
stats_buffer.push(Stats(stats_1.rttMs, stats_2.rttMs,
captured_start_ntp_delay));
}
EXPECT_GE(successive_pass, kNeedSuccessivePass) << "Expected to get RTT and"
" start NTP time estimate within 10% of the correct value over "
<< kStatsRequestIntervalMs * kNeedSuccessivePass / 1000
<< " seconds.";
if (successive_pass < kNeedSuccessivePass) {
printf("The most recent values (RTT for receiver 1, RTT for receiver 2, "
"NTP delay between receiver 1 and 2) are (from oldest):\n");
while (!stats_buffer.empty()) {
Stats stats = stats_buffer.front();
printf("(%" PRId64 ", %" PRId64 ", %" PRId64 ")\n", stats.rtt_receiver_1_,
stats.rtt_receiver_2_, stats.ntp_delay_);
stats_buffer.pop();
}
}
}
TEST(VoeConferenceTest, ReceivedPackets) {
const int kPackets = 50;
const int kPacketDurationMs = 20; // Correspond to Opus.
const std::string input_file =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
const webrtc::FileFormats kInputFormat = webrtc::kFileFormatPcm32kHzFile;
const std::string silence_file =
webrtc::test::TempFilename(webrtc::test::OutputPath(), "silence");
CreateSilenceFile(silence_file, 32000);
{
ConferenceTransport trans;
// Add silence to stream 0, so that it will be filtered out.
unsigned int id_0 = trans.AddStream(silence_file, kInputFormat);
unsigned int id_1 = trans.AddStream(input_file, kInputFormat);
unsigned int id_2 = trans.AddStream(input_file, kInputFormat);
unsigned int id_3 = trans.AddStream(input_file, kInputFormat);
EXPECT_TRUE(trans.StartPlayout(id_0));
EXPECT_TRUE(trans.StartPlayout(id_1));
EXPECT_TRUE(trans.StartPlayout(id_2));
EXPECT_TRUE(trans.StartPlayout(id_3));
webrtc::SleepMs(kPacketDurationMs * kPackets);
webrtc::CallStatistics stats_0;
webrtc::CallStatistics stats_1;
webrtc::CallStatistics stats_2;
webrtc::CallStatistics stats_3;
EXPECT_TRUE(trans.GetReceiverStatistics(id_0, &stats_0));
EXPECT_TRUE(trans.GetReceiverStatistics(id_1, &stats_1));
EXPECT_TRUE(trans.GetReceiverStatistics(id_2, &stats_2));
EXPECT_TRUE(trans.GetReceiverStatistics(id_3, &stats_3));
// We expect stream 0 to be filtered out totally, but since it may join the
// call earlier than other streams and the beginning packets might have got
// through. So we only expect |packetsReceived| to be close to zero.
EXPECT_NEAR(stats_0.packetsReceived, 0, 2);
// We expect |packetsReceived| to match |kPackets|, but the actual value
// depends on the sleep timer. So we allow a small off from |kPackets|.
EXPECT_NEAR(stats_1.packetsReceived, kPackets, 2);
EXPECT_NEAR(stats_2.packetsReceived, kPackets, 2);
EXPECT_NEAR(stats_3.packetsReceived, kPackets, 2);
}
remove(silence_file.c_str());
}
} // namespace voetest
} // namespace webrtc