diff --git a/webrtc/api/test/fakeaudiocapturemodule_unittest.cc b/webrtc/api/test/fakeaudiocapturemodule_unittest.cc index d0dcd85012..53fb5f2036 100644 --- a/webrtc/api/test/fakeaudiocapturemodule_unittest.cc +++ b/webrtc/api/test/fakeaudiocapturemodule_unittest.cc @@ -62,6 +62,21 @@ class FakeAdmTest : public testing::Test, return 0; } + void PushCaptureData(int voe_channel, + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) override {} + + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override {} + // ADM is pulling data. int32_t NeedMorePlayData(const size_t nSamples, const size_t nBytesPerSample, diff --git a/webrtc/media/engine/webrtcvoiceengine.cc b/webrtc/media/engine/webrtcvoiceengine.cc index be1888eea3..afe0975c07 100644 --- a/webrtc/media/engine/webrtcvoiceengine.cc +++ b/webrtc/media/engine/webrtcvoiceengine.cc @@ -1218,12 +1218,9 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread()); RTC_DCHECK(audio_capture_thread_checker_.CalledOnValidThread()); RTC_DCHECK(voe_audio_transport_); - voe_audio_transport_->OnData(config_.voe_channel_id, - audio_data, - bits_per_sample, - sample_rate, - number_of_channels, - number_of_frames); + voe_audio_transport_->PushCaptureData(config_.voe_channel_id, audio_data, + bits_per_sample, sample_rate, + number_of_channels, number_of_frames); } // Callback from the |source_| when it is going away. In case Start() has diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc index 48944f5772..78c83e9830 100644 --- a/webrtc/modules/audio_device/android/audio_device_unittest.cc +++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc @@ -390,6 +390,7 @@ class MockAudioTransport : public AudioTransport { const uint32_t currentMicLevel, const bool keyPressed, uint32_t& newMicLevel)); + MOCK_METHOD8(NeedMorePlayData, int32_t(const size_t nSamples, const size_t nBytesPerSample, @@ -400,6 +401,23 @@ class MockAudioTransport : public AudioTransport { int64_t* elapsed_time_ms, int64_t* ntp_time_ms)); + MOCK_METHOD6(PushCaptureData, + void(int voe_channel, + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames)); + + MOCK_METHOD7(PullRenderData, + void(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms)); + // Set default actions of the mock object. We are delegating to fake // implementations (of AudioStreamInterface) here. void HandleCallbacks(EventWrapper* test_is_done, diff --git a/webrtc/modules/audio_device/include/audio_device_defines.h b/webrtc/modules/audio_device/include/audio_device_defines.h index b847729f05..ccc263c57b 100644 --- a/webrtc/modules/audio_device/include/audio_device_defines.h +++ b/webrtc/modules/audio_device/include/audio_device_defines.h @@ -66,58 +66,16 @@ class AudioTransport { int64_t* elapsed_time_ms, int64_t* ntp_time_ms) = 0; - // Method to pass captured data directly and unmixed to network channels. - // |channel_ids| contains a list of VoE channels which are the - // sinks to the capture data. |audio_delay_milliseconds| is the sum of - // recording delay and playout delay of the hardware. |current_volume| is - // in the range of [0, 255], representing the current microphone analog - // volume. |key_pressed| is used by the typing detection. - // |need_audio_processing| specify if the data needs to be processed by APM. - // Currently WebRtc supports only one APM, and Chrome will make sure only - // one stream goes through APM. When |need_audio_processing| is false, the - // values of |audio_delay_milliseconds|, |current_volume| and |key_pressed| - // will be ignored. - // The return value is the new microphone volume, in the range of |0, 255]. - // When the volume does not need to be updated, it returns 0. - // TODO(xians): Remove this interface after Chrome and Libjingle switches - // to OnData(). - virtual int OnDataAvailable(const int voe_channels[], - size_t number_of_voe_channels, - const int16_t* audio_data, - int sample_rate, - size_t number_of_channels, - size_t number_of_frames, - int audio_delay_milliseconds, - int current_volume, - bool key_pressed, - bool need_audio_processing) { - return 0; - } - - // Method to pass the captured audio data to the specific VoE channel. - // |voe_channel| is the id of the VoE channel which is the sink to the - // capture data. - // TODO(xians): Remove this interface after Libjingle switches to - // PushCaptureData(). - virtual void OnData(int voe_channel, - const void* audio_data, - int bits_per_sample, - int sample_rate, - size_t number_of_channels, - size_t number_of_frames) {} - // Method to push the captured audio data to the specific VoE channel. // The data will not undergo audio processing. // |voe_channel| is the id of the VoE channel which is the sink to the // capture data. - // TODO(xians): Make the interface pure virtual after Libjingle - // has its implementation. virtual void PushCaptureData(int voe_channel, const void* audio_data, int bits_per_sample, int sample_rate, size_t number_of_channels, - size_t number_of_frames) {} + size_t number_of_frames) = 0; // Method to pull mixed render audio data from all active VoE channels. // The data will not be passed as reference for audio processing internally. @@ -129,7 +87,7 @@ class AudioTransport { size_t number_of_frames, void* audio_data, int64_t* elapsed_time_ms, - int64_t* ntp_time_ms) {} + int64_t* ntp_time_ms) = 0; protected: virtual ~AudioTransport() {} diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc index ec10119a11..96081d0f38 100644 --- a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc +++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc @@ -380,6 +380,7 @@ class MockAudioTransport : public AudioTransport { const uint32_t currentMicLevel, const bool keyPressed, uint32_t& newMicLevel)); + MOCK_METHOD8(NeedMorePlayData, int32_t(const size_t nSamples, const size_t nBytesPerSample, @@ -390,6 +391,23 @@ class MockAudioTransport : public AudioTransport { int64_t* elapsed_time_ms, int64_t* ntp_time_ms)); + MOCK_METHOD6(PushCaptureData, + void(int voe_channel, + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames)); + + MOCK_METHOD7(PullRenderData, + void(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms)); + // Set default actions of the mock object. We are delegating to fake // implementations (of AudioStreamInterface) here. void HandleCallbacks(EventWrapper* test_is_done, diff --git a/webrtc/modules/audio_device/test/audio_device_test_api.cc b/webrtc/modules/audio_device/test/audio_device_test_api.cc index dad42a0c0b..1b3c428067 100644 --- a/webrtc/modules/audio_device/test/audio_device_test_api.cc +++ b/webrtc/modules/audio_device/test/audio_device_test_api.cc @@ -54,14 +54,14 @@ class AudioEventObserverAPI: public AudioDeviceObserver { warning_(kRecordingWarning), audio_device_(audioDevice) {} - ~AudioEventObserverAPI() {} + ~AudioEventObserverAPI() override {} - virtual void OnErrorIsReported(const ErrorCode error) { + void OnErrorIsReported(const ErrorCode error) override { TEST_LOG("\n[*** ERROR ***] => OnErrorIsReported(%d)\n\n", error); error_ = error; } - virtual void OnWarningIsReported(const WarningCode warning) { + void OnWarningIsReported(const WarningCode warning) override { TEST_LOG("\n[*** WARNING ***] => OnWarningIsReported(%d)\n\n", warning); warning_ = warning; EXPECT_EQ(0, audio_device_->StopRecording()); @@ -82,7 +82,7 @@ class AudioTransportAPI: public AudioTransport { play_count_(0) { } - ~AudioTransportAPI() {} + ~AudioTransportAPI() override {} int32_t RecordedDataIsAvailable(const void* audioSamples, const size_t nSamples, @@ -130,6 +130,21 @@ class AudioTransportAPI: public AudioTransport { return 0; } + void PushCaptureData(int voe_channel, + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) override {} + + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override {} + private: uint32_t rec_count_; uint32_t play_count_; @@ -139,7 +154,7 @@ class AudioDeviceAPITest: public testing::Test { protected: AudioDeviceAPITest() {} - virtual ~AudioDeviceAPITest() {} + ~AudioDeviceAPITest() override {} static void SetUpTestCase() { process_thread_ = ProcessThread::Create("ProcessThread"); @@ -258,7 +273,7 @@ class AudioDeviceAPITest: public testing::Test { PRINT_TEST_RESULTS; } - void SetUp() { + void SetUp() override { if (linux_alsa_) { FAIL() << "API Test is not available on ALSA on Linux!"; } @@ -266,9 +281,7 @@ class AudioDeviceAPITest: public testing::Test { EXPECT_TRUE(audio_device_->Initialized()); } - void TearDown() { - EXPECT_EQ(0, audio_device_->Terminate()); - } + void TearDown() override { EXPECT_EQ(0, audio_device_->Terminate()); } void CheckVolume(uint32_t expected, uint32_t actual) { // Mac and Windows have lower resolution on the volume settings. diff --git a/webrtc/modules/audio_device/test/func_test_manager.cc b/webrtc/modules/audio_device/test/func_test_manager.cc index 6e58841b22..da79688d8b 100644 --- a/webrtc/modules/audio_device/test/func_test_manager.cc +++ b/webrtc/modules/audio_device/test/func_test_manager.cc @@ -563,6 +563,21 @@ int32_t AudioTransportImpl::NeedMorePlayData( return 0; } +void AudioTransportImpl::PushCaptureData(int voe_channel, + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) {} + +void AudioTransportImpl::PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) {} + FuncTestManager::FuncTestManager() : _audioDevice(NULL), _audioEventObserver(NULL), diff --git a/webrtc/modules/audio_device/test/func_test_manager.h b/webrtc/modules/audio_device/test/func_test_manager.h index 2b8a19beb1..d81bf03131 100644 --- a/webrtc/modules/audio_device/test/func_test_manager.h +++ b/webrtc/modules/audio_device/test/func_test_manager.h @@ -106,6 +106,21 @@ public: int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override; + void PushCaptureData(int voe_channel, + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) override; + + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override; + AudioTransportImpl(AudioDeviceModule* audioDevice); ~AudioTransportImpl(); diff --git a/webrtc/voice_engine/voe_base_impl.cc b/webrtc/voice_engine/voe_base_impl.cc index bfc85fc02a..9c917b5881 100644 --- a/webrtc/voice_engine/voe_base_impl.cc +++ b/webrtc/voice_engine/voe_base_impl.cc @@ -107,43 +107,6 @@ int32_t VoEBaseImpl::NeedMorePlayData(const size_t nSamples, return 0; } -int VoEBaseImpl::OnDataAvailable(const int voe_channels[], - size_t number_of_voe_channels, - const int16_t* audio_data, int sample_rate, - size_t number_of_channels, - size_t number_of_frames, - int audio_delay_milliseconds, int volume, - bool key_pressed, bool need_audio_processing) { - if (number_of_voe_channels == 0) return 0; - - if (need_audio_processing) { - return ProcessRecordedDataWithAPM( - voe_channels, number_of_voe_channels, audio_data, sample_rate, - number_of_channels, number_of_frames, audio_delay_milliseconds, 0, - volume, key_pressed); - } - - // No need to go through the APM, demultiplex the data to each VoE channel, - // encode and send to the network. - for (size_t i = 0; i < number_of_voe_channels; ++i) { - // TODO(ajm): In the case where multiple channels are using the same codec - // rate, this path needlessly does extra conversions. We should convert once - // and share between channels. - PushCaptureData(voe_channels[i], audio_data, 16, sample_rate, - number_of_channels, number_of_frames); - } - - // Return 0 to indicate no need to change the volume. - return 0; -} - -void VoEBaseImpl::OnData(int voe_channel, const void* audio_data, - int bits_per_sample, int sample_rate, - size_t number_of_channels, size_t number_of_frames) { - PushCaptureData(voe_channel, audio_data, bits_per_sample, sample_rate, - number_of_channels, number_of_frames); -} - void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data, int bits_per_sample, int sample_rate, size_t number_of_channels, diff --git a/webrtc/voice_engine/voe_base_impl.h b/webrtc/voice_engine/voe_base_impl.h index 192518a025..93e1cf5e86 100644 --- a/webrtc/voice_engine/voe_base_impl.h +++ b/webrtc/voice_engine/voe_base_impl.h @@ -78,22 +78,6 @@ class VoEBaseImpl : public VoEBase, size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override; - int OnDataAvailable(const int voe_channels[], - size_t number_of_voe_channels, - const int16_t* audio_data, - int sample_rate, - size_t number_of_channels, - size_t number_of_frames, - int audio_delay_milliseconds, - int current_volume, - bool key_pressed, - bool need_audio_processing) override; - void OnData(int voe_channel, - const void* audio_data, - int bits_per_sample, - int sample_rate, - size_t number_of_channels, - size_t number_of_frames) override; void PushCaptureData(int voe_channel, const void* audio_data, int bits_per_sample,