Remove VoEAudioProcessing interface.
BUG=webrtc:4690 Review-Url: https://codereview.webrtc.org/2738543002 Cr-Commit-Position: refs/heads/master@{#17185}
This commit is contained in:
parent
ca37cf6691
commit
fe7dd6d9ff
@ -76,46 +76,6 @@ class MockVoiceEngine : public VoiceEngineImpl {
|
||||
return std::unique_ptr<voe::ChannelProxy>(ChannelProxyFactory(channel_id));
|
||||
}
|
||||
|
||||
// VoEAudioProcessing
|
||||
MOCK_METHOD2(SetNsStatus, int(bool enable, NsModes mode));
|
||||
MOCK_METHOD2(GetNsStatus, int(bool& enabled, NsModes& mode));
|
||||
MOCK_METHOD2(SetAgcStatus, int(bool enable, AgcModes mode));
|
||||
MOCK_METHOD2(GetAgcStatus, int(bool& enabled, AgcModes& mode));
|
||||
MOCK_METHOD1(SetAgcConfig, int(AgcConfig config));
|
||||
MOCK_METHOD1(GetAgcConfig, int(AgcConfig& config));
|
||||
MOCK_METHOD2(SetEcStatus, int(bool enable, EcModes mode));
|
||||
MOCK_METHOD2(GetEcStatus, int(bool& enabled, EcModes& mode));
|
||||
MOCK_METHOD1(EnableDriftCompensation, int(bool enable));
|
||||
MOCK_METHOD0(DriftCompensationEnabled, bool());
|
||||
MOCK_METHOD1(SetDelayOffsetMs, void(int offset));
|
||||
MOCK_METHOD0(DelayOffsetMs, int());
|
||||
MOCK_METHOD2(SetAecmMode, int(AecmModes mode, bool enableCNG));
|
||||
MOCK_METHOD2(GetAecmMode, int(AecmModes& mode, bool& enabledCNG));
|
||||
MOCK_METHOD1(EnableHighPassFilter, int(bool enable));
|
||||
MOCK_METHOD0(IsHighPassFilterEnabled, bool());
|
||||
MOCK_METHOD1(VoiceActivityIndicator, int(int channel));
|
||||
MOCK_METHOD1(SetEcMetricsStatus, int(bool enable));
|
||||
MOCK_METHOD1(GetEcMetricsStatus, int(bool& enabled));
|
||||
MOCK_METHOD4(GetEchoMetrics, int(int& ERL, int& ERLE, int& RERL, int& A_NLP));
|
||||
MOCK_METHOD3(GetEcDelayMetrics,
|
||||
int(int& delay_median,
|
||||
int& delay_std,
|
||||
float& fraction_poor_delays));
|
||||
MOCK_METHOD1(StartDebugRecording, int(const char* fileNameUTF8));
|
||||
MOCK_METHOD1(StartDebugRecording, int(FILE* file_handle));
|
||||
MOCK_METHOD0(StopDebugRecording, int());
|
||||
MOCK_METHOD1(SetTypingDetectionStatus, int(bool enable));
|
||||
MOCK_METHOD1(GetTypingDetectionStatus, int(bool& enabled));
|
||||
MOCK_METHOD1(TimeSinceLastTyping, int(int& seconds));
|
||||
MOCK_METHOD5(SetTypingDetectionParameters,
|
||||
int(int timeWindow,
|
||||
int costPerTyping,
|
||||
int reportingThreshold,
|
||||
int penaltyDecay,
|
||||
int typeEventDelay));
|
||||
MOCK_METHOD1(EnableStereoChannelSwapping, void(bool enable));
|
||||
MOCK_METHOD0(IsStereoChannelSwappingEnabled, bool());
|
||||
|
||||
// VoEBase
|
||||
MOCK_METHOD1(RegisterVoiceEngineObserver, int(VoiceEngineObserver& observer));
|
||||
MOCK_METHOD0(DeRegisterVoiceEngineObserver, int());
|
||||
|
||||
@ -74,7 +74,6 @@ rtc_static_library("voice_engine") {
|
||||
"channel_manager.h",
|
||||
"channel_proxy.cc",
|
||||
"channel_proxy.h",
|
||||
"include/voe_audio_processing.h",
|
||||
"include/voe_base.h",
|
||||
"include/voe_codec.h",
|
||||
"include/voe_errors.h",
|
||||
@ -96,8 +95,6 @@ rtc_static_library("voice_engine") {
|
||||
"transport_feedback_packet_loss_tracker.h",
|
||||
"utility.cc",
|
||||
"utility.h",
|
||||
"voe_audio_processing_impl.cc",
|
||||
"voe_audio_processing_impl.h",
|
||||
"voe_base_impl.cc",
|
||||
"voe_base_impl.h",
|
||||
"voe_codec_impl.cc",
|
||||
@ -209,7 +206,6 @@ if (rtc_include_tests) {
|
||||
"file_player_unittests.cc",
|
||||
"transport_feedback_packet_loss_tracker_unittest.cc",
|
||||
"utility_unittest.cc",
|
||||
"voe_audio_processing_unittest.cc",
|
||||
"voe_base_unittest.cc",
|
||||
"voe_codec_unittest.cc",
|
||||
"voe_network_unittest.cc",
|
||||
@ -262,8 +258,6 @@ if (rtc_include_tests) {
|
||||
|
||||
sources = [
|
||||
"test/auto_test/automated_mode.cc",
|
||||
"test/auto_test/extended/agc_config_test.cc",
|
||||
"test/auto_test/extended/ec_metrics_test.cc",
|
||||
"test/auto_test/fakes/conference_transport.cc",
|
||||
"test/auto_test/fakes/conference_transport.h",
|
||||
"test/auto_test/fakes/loudest_filter.cc",
|
||||
@ -276,7 +270,6 @@ if (rtc_include_tests) {
|
||||
"test/auto_test/fixtures/before_initialization_fixture.h",
|
||||
"test/auto_test/fixtures/before_streaming_fixture.cc",
|
||||
"test/auto_test/fixtures/before_streaming_fixture.h",
|
||||
"test/auto_test/standard/audio_processing_test.cc",
|
||||
"test/auto_test/standard/codec_before_streaming_test.cc",
|
||||
"test/auto_test/standard/codec_test.cc",
|
||||
"test/auto_test/standard/dtmf_test.cc",
|
||||
|
||||
@ -446,15 +446,6 @@ int32_t Channel::SendData(FrameType frameType,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t Channel::InFrameType(FrameType frame_type) {
|
||||
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
|
||||
"Channel::InFrameType(frame_type=%d)", frame_type);
|
||||
|
||||
rtc::CritScope cs(&_callbackCritSect);
|
||||
_sendFrameType = (frame_type == kAudioFrameSpeech);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool Channel::SendRtp(const uint8_t* data,
|
||||
size_t len,
|
||||
const PacketOptions& options) {
|
||||
@ -893,7 +884,6 @@ Channel::Channel(int32_t channelId,
|
||||
_voiceEngineObserverPtr(NULL),
|
||||
_callbackCritSectPtr(NULL),
|
||||
_transportPtr(NULL),
|
||||
_sendFrameType(0),
|
||||
input_mute_(false),
|
||||
previous_frame_muted_(false),
|
||||
_outputGain(1.0f),
|
||||
@ -1026,10 +1016,7 @@ int32_t Channel::Init() {
|
||||
// RTCP is enabled by default.
|
||||
_rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
|
||||
// --- Register all permanent callbacks
|
||||
const bool fail = (audio_coding_->RegisterTransportCallback(this) == -1) ||
|
||||
(audio_coding_->RegisterVADCallback(this) == -1);
|
||||
|
||||
if (fail) {
|
||||
if (audio_coding_->RegisterTransportCallback(this) == -1) {
|
||||
_engineStatisticsPtr->SetLastError(
|
||||
VE_CANNOT_INIT_CHANNEL, kTraceError,
|
||||
"Channel::Init() callbacks not registered");
|
||||
@ -2294,11 +2281,6 @@ int Channel::SetSendTelephoneEventPayloadType(int payload_type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int Channel::VoiceActivityIndicator(int& activity) {
|
||||
activity = _sendFrameType;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int Channel::SetLocalSSRC(unsigned int ssrc) {
|
||||
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
|
||||
"Channel::SetLocalSSRC()");
|
||||
|
||||
@ -30,7 +30,6 @@
|
||||
#include "webrtc/voice_engine/audio_level.h"
|
||||
#include "webrtc/voice_engine/file_player.h"
|
||||
#include "webrtc/voice_engine/file_recorder.h"
|
||||
#include "webrtc/voice_engine/include/voe_audio_processing.h"
|
||||
#include "webrtc/voice_engine/include/voe_base.h"
|
||||
#include "webrtc/voice_engine/include/voe_network.h"
|
||||
#include "webrtc/voice_engine/shared_data.h"
|
||||
@ -134,7 +133,6 @@ class Channel
|
||||
public Transport,
|
||||
public AudioPacketizationCallback, // receive encoded packets from the
|
||||
// ACM
|
||||
public ACMVADCallback, // receive voice activity from the ACM
|
||||
public MixerParticipant, // supplies output mixer with audio frames
|
||||
public OverheadObserver {
|
||||
public:
|
||||
@ -265,9 +263,6 @@ class Channel
|
||||
int SendTelephoneEventOutband(int event, int duration_ms);
|
||||
int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
|
||||
|
||||
// VoEAudioProcessingImpl
|
||||
int VoiceActivityIndicator(int& activity);
|
||||
|
||||
// VoERTP_RTCP
|
||||
int SetLocalSSRC(unsigned int ssrc);
|
||||
int GetLocalSSRC(unsigned int& ssrc);
|
||||
@ -307,9 +302,6 @@ class Channel
|
||||
size_t payloadSize,
|
||||
const RTPFragmentationHeader* fragmentation) override;
|
||||
|
||||
// From ACMVADCallback in the ACM
|
||||
int32_t InFrameType(FrameType frame_type) override;
|
||||
|
||||
// From RtpData in the RTP/RTCP module
|
||||
int32_t OnReceivedPayloadData(const uint8_t* payloadData,
|
||||
size_t payloadSize,
|
||||
@ -456,6 +448,8 @@ class Channel
|
||||
|
||||
// Timestamp of the audio pulled from NetEq.
|
||||
rtc::Optional<uint32_t> jitter_buffer_playout_timestamp_;
|
||||
|
||||
rtc::CriticalSection video_sync_lock_;
|
||||
uint32_t playout_timestamp_rtp_ GUARDED_BY(video_sync_lock_);
|
||||
uint32_t playout_delay_ms_ GUARDED_BY(video_sync_lock_);
|
||||
uint16_t send_sequence_number_;
|
||||
@ -479,7 +473,6 @@ class Channel
|
||||
rtc::CriticalSection* _callbackCritSectPtr; // owned by base
|
||||
Transport* _transportPtr; // WebRtc socket or external transport
|
||||
RmsLevel rms_level_;
|
||||
int32_t _sendFrameType; // Send data is voice, 1-voice, 0-otherwise
|
||||
bool input_mute_ GUARDED_BY(volume_settings_critsect_);
|
||||
bool previous_frame_muted_; // Only accessed from PrepareEncodeAndSend().
|
||||
float _outputGain GUARDED_BY(volume_settings_critsect_);
|
||||
@ -494,9 +487,7 @@ class Channel
|
||||
rtc::CriticalSection overhead_per_packet_lock_;
|
||||
// VoENetwork
|
||||
AudioFrame::SpeechType _outputSpeechType;
|
||||
// VoEVideoSync
|
||||
rtc::CriticalSection video_sync_lock_;
|
||||
// VoEAudioProcessing
|
||||
// DTX.
|
||||
bool restored_packet_in_use_;
|
||||
// RtcpBandwidthObserver
|
||||
std::unique_ptr<VoERtcpObserver> rtcp_observer_;
|
||||
|
||||
@ -1,195 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// This sub-API supports the following functionalities:
|
||||
//
|
||||
// - Noise Suppression (NS).
|
||||
// - Automatic Gain Control (AGC).
|
||||
// - Echo Control (EC).
|
||||
// - Receiving side VAD, NS and AGC.
|
||||
// - Measurements of instantaneous speech, noise and echo levels.
|
||||
// - Generation of AP debug recordings.
|
||||
// - Detection of keyboard typing which can disrupt a voice conversation.
|
||||
//
|
||||
// Usage example, omitting error checking:
|
||||
//
|
||||
// using namespace webrtc;
|
||||
// VoiceEngine* voe = VoiceEngine::Create();
|
||||
// VoEBase* base = VoEBase::GetInterface();
|
||||
// VoEAudioProcessing* ap = VoEAudioProcessing::GetInterface(voe);
|
||||
// base->Init();
|
||||
// ap->SetEcStatus(true, kAgcAdaptiveAnalog);
|
||||
// ...
|
||||
// base->Terminate();
|
||||
// base->Release();
|
||||
// ap->Release();
|
||||
// VoiceEngine::Delete(voe);
|
||||
//
|
||||
#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
|
||||
#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VoiceEngine;
|
||||
|
||||
// VoEAudioProcessing
|
||||
class WEBRTC_DLLEXPORT VoEAudioProcessing {
|
||||
public:
|
||||
// Factory for the VoEAudioProcessing sub-API. Increases an internal
|
||||
// reference counter if successful. Returns NULL if the API is not
|
||||
// supported or if construction fails.
|
||||
static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
|
||||
|
||||
// Releases the VoEAudioProcessing sub-API and decreases an internal
|
||||
// reference counter. Returns the new reference count. This value should
|
||||
// be zero for all sub-API:s before the VoiceEngine object can be safely
|
||||
// deleted.
|
||||
virtual int Release() = 0;
|
||||
|
||||
// Sets Noise Suppression (NS) status and mode.
|
||||
// The NS reduces noise in the microphone signal.
|
||||
virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
|
||||
|
||||
// Gets the NS status and mode.
|
||||
virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
|
||||
|
||||
// Sets the Automatic Gain Control (AGC) status and mode.
|
||||
// The AGC adjusts the microphone signal to an appropriate level.
|
||||
virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
|
||||
|
||||
// Gets the AGC status and mode.
|
||||
virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
|
||||
|
||||
// Sets the AGC configuration.
|
||||
// Should only be used in situations where the working environment
|
||||
// is well known.
|
||||
virtual int SetAgcConfig(AgcConfig config) = 0;
|
||||
|
||||
// Gets the AGC configuration.
|
||||
virtual int GetAgcConfig(AgcConfig& config) = 0;
|
||||
|
||||
// Sets the Echo Control (EC) status and mode.
|
||||
// The EC mitigates acoustic echo where a user can hear their own
|
||||
// speech repeated back due to an acoustic coupling between the
|
||||
// speaker and the microphone at the remote end.
|
||||
virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
|
||||
|
||||
// Gets the EC status and mode.
|
||||
virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
|
||||
|
||||
// Enables the compensation of clock drift between the capture and render
|
||||
// streams by the echo canceller (i.e. only using EcMode==kEcAec). It will
|
||||
// only be enabled if supported on the current platform; otherwise an error
|
||||
// will be returned. Check if the platform is supported by calling
|
||||
// |DriftCompensationSupported()|.
|
||||
virtual int EnableDriftCompensation(bool enable) = 0;
|
||||
virtual bool DriftCompensationEnabled() = 0;
|
||||
static bool DriftCompensationSupported();
|
||||
|
||||
// Sets a delay |offset| in ms to add to the system delay reported by the
|
||||
// OS, which is used by the AEC to synchronize far- and near-end streams.
|
||||
// In some cases a system may introduce a delay which goes unreported by the
|
||||
// OS, but which is known to the user. This method can be used to compensate
|
||||
// for the unreported delay.
|
||||
virtual void SetDelayOffsetMs(int offset) = 0;
|
||||
virtual int DelayOffsetMs() = 0;
|
||||
|
||||
// Modifies settings for the AEC designed for mobile devices (AECM).
|
||||
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
|
||||
bool enableCNG = true) = 0;
|
||||
|
||||
// Gets settings for the AECM.
|
||||
virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
|
||||
|
||||
// Enables a high pass filter on the capture signal. This removes DC bias
|
||||
// and low-frequency noise. Recommended to be enabled.
|
||||
virtual int EnableHighPassFilter(bool enable) = 0;
|
||||
virtual bool IsHighPassFilterEnabled() = 0;
|
||||
|
||||
// Gets the VAD/DTX activity for the specified |channel|.
|
||||
// The returned value is 1 if frames of audio contains speech
|
||||
// and 0 if silence. The output is always 1 if VAD is disabled.
|
||||
virtual int VoiceActivityIndicator(int channel) = 0;
|
||||
|
||||
// Enables or disables the possibility to retrieve echo metrics and delay
|
||||
// logging values during an active call. The metrics are only supported in
|
||||
// AEC.
|
||||
virtual int SetEcMetricsStatus(bool enable) = 0;
|
||||
|
||||
// Gets the current EC metric status.
|
||||
virtual int GetEcMetricsStatus(bool& enabled) = 0;
|
||||
|
||||
// Gets the instantaneous echo level metrics.
|
||||
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
|
||||
|
||||
// Gets the EC internal |delay_median| and |delay_std| in ms between
|
||||
// near-end and far-end. The metric |fraction_poor_delays| is the amount of
|
||||
// delay values that potentially can break the EC. The values are aggregated
|
||||
// over one second and the last updated metrics are returned.
|
||||
virtual int GetEcDelayMetrics(int& delay_median,
|
||||
int& delay_std,
|
||||
float& fraction_poor_delays) = 0;
|
||||
|
||||
// Enables recording of Audio Processing (AP) debugging information.
|
||||
// The file can later be used for off-line analysis of the AP performance.
|
||||
virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
|
||||
|
||||
// Same as above but sets and uses an existing file handle. Takes ownership
|
||||
// of |file_handle| and passes it on to the audio processing module.
|
||||
virtual int StartDebugRecording(FILE* file_handle) = 0;
|
||||
|
||||
// Disables recording of AP debugging information.
|
||||
virtual int StopDebugRecording() = 0;
|
||||
|
||||
// Enables or disables detection of disturbing keyboard typing.
|
||||
// An error notification will be given as a callback upon detection.
|
||||
virtual int SetTypingDetectionStatus(bool enable) = 0;
|
||||
|
||||
// Gets the current typing detection status.
|
||||
virtual int GetTypingDetectionStatus(bool& enabled) = 0;
|
||||
|
||||
// Reports the lower of:
|
||||
// * Time in seconds since the last typing event.
|
||||
// * Time in seconds since the typing detection was enabled.
|
||||
// Returns error if typing detection is disabled.
|
||||
virtual int TimeSinceLastTyping(int& seconds) = 0;
|
||||
|
||||
// Optional setting of typing detection parameters
|
||||
// Parameter with value == 0 will be ignored
|
||||
// and left with default config.
|
||||
// TODO(niklase) Remove default argument as soon as libJingle is updated!
|
||||
virtual int SetTypingDetectionParameters(int timeWindow,
|
||||
int costPerTyping,
|
||||
int reportingThreshold,
|
||||
int penaltyDecay,
|
||||
int typeEventDelay = 0) = 0;
|
||||
|
||||
// Swaps the capture-side left and right audio channels when enabled. It
|
||||
// only has an effect when using a stereo send codec. The setting is
|
||||
// persistent; it will be applied whenever a stereo send codec is enabled.
|
||||
//
|
||||
// The swap is applied only to the captured audio, and not mixed files. The
|
||||
// swap will appear in file recordings and when accessing audio through the
|
||||
// external media interface.
|
||||
virtual void EnableStereoChannelSwapping(bool enable) = 0;
|
||||
virtual bool IsStereoChannelSwappingEnabled() = 0;
|
||||
|
||||
protected:
|
||||
VoEAudioProcessing() {}
|
||||
virtual ~VoEAudioProcessing() {}
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
|
||||
@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
|
||||
|
||||
class AgcConfigTest : public AfterStreamingFixture {
|
||||
protected:
|
||||
void SetUp() {
|
||||
// These should be defaults for the AGC config.
|
||||
default_agc_config_.digitalCompressionGaindB = 9;
|
||||
default_agc_config_.limiterEnable = true;
|
||||
default_agc_config_.targetLeveldBOv = 3;
|
||||
}
|
||||
|
||||
webrtc::AgcConfig default_agc_config_;
|
||||
};
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AgcConfigTest, HasCorrectDefaultConfiguration) {
|
||||
webrtc::AgcConfig agc_config;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetAgcConfig(agc_config));
|
||||
|
||||
EXPECT_EQ(default_agc_config_.targetLeveldBOv, agc_config.targetLeveldBOv);
|
||||
EXPECT_EQ(default_agc_config_.digitalCompressionGaindB,
|
||||
agc_config.digitalCompressionGaindB);
|
||||
EXPECT_EQ(default_agc_config_.limiterEnable, agc_config.limiterEnable);
|
||||
}
|
||||
|
||||
// Not needed anymore - we're not returning errors anymore, just logging.
|
||||
TEST_F(AgcConfigTest, DealsWithInvalidParameters) {
|
||||
webrtc::AgcConfig agc_config = default_agc_config_;
|
||||
agc_config.digitalCompressionGaindB = 91;
|
||||
EXPECT_EQ(-1, voe_apm_->SetAgcConfig(agc_config)) << "Should not be able "
|
||||
"to set gain to more than 90 dB.";
|
||||
EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
|
||||
|
||||
agc_config = default_agc_config_;
|
||||
agc_config.targetLeveldBOv = 32;
|
||||
EXPECT_EQ(-1, voe_apm_->SetAgcConfig(agc_config)) << "Should not be able "
|
||||
"to set target level to more than 31.";
|
||||
EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AgcConfigTest, CanGetAndSetAgcStatus) {
|
||||
webrtc::AgcConfig agc_config;
|
||||
agc_config.digitalCompressionGaindB = 17;
|
||||
agc_config.targetLeveldBOv = 11;
|
||||
agc_config.limiterEnable = false;
|
||||
|
||||
webrtc::AgcConfig actual_config;
|
||||
EXPECT_EQ(0, voe_apm_->SetAgcConfig(agc_config));
|
||||
EXPECT_EQ(0, voe_apm_->GetAgcConfig(actual_config));
|
||||
|
||||
EXPECT_EQ(agc_config.digitalCompressionGaindB,
|
||||
actual_config.digitalCompressionGaindB);
|
||||
EXPECT_EQ(agc_config.limiterEnable,
|
||||
actual_config.limiterEnable);
|
||||
EXPECT_EQ(agc_config.targetLeveldBOv,
|
||||
actual_config.targetLeveldBOv);
|
||||
}
|
||||
@ -1,104 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
|
||||
|
||||
class EcMetricsTest : public AfterStreamingFixture {
|
||||
};
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(EcMetricsTest, EcMetricsAreOnByDefault) {
|
||||
// AEC must be enabled fist.
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true, webrtc::kEcAec));
|
||||
|
||||
bool enabled = false;
|
||||
EXPECT_EQ(0, voe_apm_->GetEcMetricsStatus(enabled));
|
||||
EXPECT_TRUE(enabled);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(EcMetricsTest, CanEnableAndDisableEcMetrics) {
|
||||
// AEC must be enabled fist.
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true, webrtc::kEcAec));
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
|
||||
bool ec_on = false;
|
||||
EXPECT_EQ(0, voe_apm_->GetEcMetricsStatus(ec_on));
|
||||
ASSERT_TRUE(ec_on);
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(false));
|
||||
EXPECT_EQ(0, voe_apm_->GetEcMetricsStatus(ec_on));
|
||||
ASSERT_FALSE(ec_on);
|
||||
}
|
||||
|
||||
// TODO(solenberg): Do we have higher or lower level tests that verify metrics?
|
||||
// It's not the right test for this level.
|
||||
TEST_F(EcMetricsTest, ManualTestEcMetrics) {
|
||||
SwitchToManualMicrophone();
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
|
||||
|
||||
// Must enable AEC to get valid echo metrics.
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true, webrtc::kEcAec));
|
||||
|
||||
TEST_LOG("Speak into microphone and check metrics for 5 seconds...\n");
|
||||
int erl, erle, rerl, a_nlp;
|
||||
int delay_median = 0;
|
||||
int delay_std = 0;
|
||||
float fraction_poor_delays = 0;
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
Sleep(1000);
|
||||
EXPECT_EQ(0, voe_apm_->GetEchoMetrics(erl, erle, rerl, a_nlp));
|
||||
EXPECT_EQ(0, voe_apm_->GetEcDelayMetrics(delay_median, delay_std,
|
||||
fraction_poor_delays));
|
||||
TEST_LOG(" Echo : ERL=%5d, ERLE=%5d, RERL=%5d, A_NLP=%5d [dB], "
|
||||
" delay median=%3d, delay std=%3d [ms], "
|
||||
"fraction_poor_delays=%3.1f [%%]\n", erl, erle, rerl, a_nlp,
|
||||
delay_median, delay_std, fraction_poor_delays * 100);
|
||||
}
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(false));
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(EcMetricsTest, GetEcMetricsFailsIfEcNotEnabled) {
|
||||
int dummy = 0;
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
|
||||
EXPECT_EQ(-1, voe_apm_->GetEchoMetrics(dummy, dummy, dummy, dummy));
|
||||
EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(EcMetricsTest, GetEcDelayMetricsFailsIfEcNotEnabled) {
|
||||
int dummy = 0;
|
||||
float dummy_f = 0;
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
|
||||
EXPECT_EQ(-1, voe_apm_->GetEcDelayMetrics(dummy, dummy, dummy_f));
|
||||
EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
|
||||
}
|
||||
|
||||
// TODO(solenberg): Do we have higher or lower level tests that verify metrics?
|
||||
// It's not the right test for this level.
|
||||
TEST_F(EcMetricsTest, ManualVerifyEcDelayMetrics) {
|
||||
SwitchToManualMicrophone();
|
||||
TEST_LOG("Verify EC Delay metrics:");
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true));
|
||||
EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
int delay, delay_std;
|
||||
float fraction_poor_delays;
|
||||
EXPECT_EQ(0, voe_apm_->GetEcDelayMetrics(delay, delay_std,
|
||||
fraction_poor_delays));
|
||||
TEST_LOG("Delay = %d, Delay Std = %d, Fraction poor delays = %3.1f\n",
|
||||
delay, delay_std, fraction_poor_delays * 100);
|
||||
Sleep(1000);
|
||||
}
|
||||
}
|
||||
@ -19,7 +19,6 @@ BeforeInitializationFixture::BeforeInitializationFixture()
|
||||
voe_base_ = webrtc::VoEBase::GetInterface(voice_engine_);
|
||||
voe_codec_ = webrtc::VoECodec::GetInterface(voice_engine_);
|
||||
voe_rtp_rtcp_ = webrtc::VoERTP_RTCP::GetInterface(voice_engine_);
|
||||
voe_apm_ = webrtc::VoEAudioProcessing::GetInterface(voice_engine_);
|
||||
voe_network_ = webrtc::VoENetwork::GetInterface(voice_engine_);
|
||||
voe_file_ = webrtc::VoEFile::GetInterface(voice_engine_);
|
||||
voe_hardware_ = webrtc::VoEHardware::GetInterface(voice_engine_);
|
||||
@ -30,7 +29,6 @@ BeforeInitializationFixture::~BeforeInitializationFixture() {
|
||||
voe_base_->Release();
|
||||
voe_codec_->Release();
|
||||
voe_rtp_rtcp_->Release();
|
||||
voe_apm_->Release();
|
||||
voe_network_->Release();
|
||||
voe_file_->Release();
|
||||
voe_hardware_->Release();
|
||||
|
||||
@ -15,7 +15,6 @@
|
||||
#include "webrtc/test/gmock.h"
|
||||
#include "webrtc/test/gtest.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
#include "webrtc/voice_engine/include/voe_audio_processing.h"
|
||||
#include "webrtc/voice_engine/include/voe_base.h"
|
||||
#include "webrtc/voice_engine/include/voe_codec.h"
|
||||
#include "webrtc/voice_engine/include/voe_errors.h"
|
||||
@ -50,7 +49,6 @@ class BeforeInitializationFixture : public testing::Test {
|
||||
webrtc::VoEBase* voe_base_;
|
||||
webrtc::VoECodec* voe_codec_;
|
||||
webrtc::VoERTP_RTCP* voe_rtp_rtcp_;
|
||||
webrtc::VoEAudioProcessing* voe_apm_;
|
||||
webrtc::VoENetwork* voe_network_;
|
||||
webrtc::VoEFile* voe_file_;
|
||||
webrtc::VoEHardware* voe_hardware_;
|
||||
|
||||
@ -1,300 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/test/testsupport/fileutils.h"
|
||||
#include "webrtc/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
|
||||
#include "webrtc/voice_engine/test/auto_test/voe_standard_test.h"
|
||||
|
||||
class AudioProcessingTest : public AfterStreamingFixture {
|
||||
protected:
|
||||
// Note: Be careful with this one, it is used in the
|
||||
// Android / iPhone part too.
|
||||
void TryEnablingAgcWithMode(webrtc::AgcModes agc_mode_to_set) {
|
||||
EXPECT_EQ(0, voe_apm_->SetAgcStatus(true, agc_mode_to_set));
|
||||
|
||||
bool agc_enabled = false;
|
||||
webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetAgcStatus(agc_enabled, agc_mode));
|
||||
EXPECT_TRUE(agc_enabled);
|
||||
EXPECT_EQ(agc_mode_to_set, agc_mode);
|
||||
}
|
||||
|
||||
// EC modes can map to other EC modes, so we have a separate parameter
|
||||
// for what we expect the EC mode to be set to.
|
||||
void TryEnablingEcWithMode(webrtc::EcModes ec_mode_to_set,
|
||||
webrtc::EcModes expected_mode) {
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true, ec_mode_to_set));
|
||||
|
||||
bool ec_enabled = true;
|
||||
webrtc::EcModes ec_mode = webrtc::kEcDefault;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
|
||||
|
||||
EXPECT_EQ(expected_mode, ec_mode);
|
||||
}
|
||||
|
||||
// Here, the CNG mode will be expected to be on or off depending on the mode.
|
||||
void TryEnablingAecmWithMode(webrtc::AecmModes aecm_mode_to_set,
|
||||
bool cng_enabled_to_set) {
|
||||
EXPECT_EQ(0, voe_apm_->SetAecmMode(aecm_mode_to_set, cng_enabled_to_set));
|
||||
|
||||
bool cng_enabled = false;
|
||||
webrtc::AecmModes aecm_mode = webrtc::kAecmEarpiece;
|
||||
|
||||
voe_apm_->GetAecmMode(aecm_mode, cng_enabled);
|
||||
|
||||
EXPECT_EQ(cng_enabled_to_set, cng_enabled);
|
||||
EXPECT_EQ(aecm_mode_to_set, aecm_mode);
|
||||
}
|
||||
|
||||
void TryEnablingNsWithMode(webrtc::NsModes ns_mode_to_set,
|
||||
webrtc::NsModes expected_ns_mode) {
|
||||
EXPECT_EQ(0, voe_apm_->SetNsStatus(true, ns_mode_to_set));
|
||||
|
||||
bool ns_status = true;
|
||||
webrtc::NsModes ns_mode = webrtc::kNsDefault;
|
||||
EXPECT_EQ(0, voe_apm_->GetNsStatus(ns_status, ns_mode));
|
||||
|
||||
EXPECT_TRUE(ns_status);
|
||||
EXPECT_EQ(expected_ns_mode, ns_mode);
|
||||
}
|
||||
};
|
||||
|
||||
#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, AgcIsOnByDefault) {
|
||||
bool agc_enabled = false;
|
||||
webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetAgcStatus(agc_enabled, agc_mode));
|
||||
EXPECT_TRUE(agc_enabled);
|
||||
EXPECT_EQ(webrtc::kAgcAdaptiveAnalog, agc_mode);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, CanEnableAgcWithAllModes) {
|
||||
TryEnablingAgcWithMode(webrtc::kAgcAdaptiveDigital);
|
||||
TryEnablingAgcWithMode(webrtc::kAgcAdaptiveAnalog);
|
||||
TryEnablingAgcWithMode(webrtc::kAgcFixedDigital);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, EcIsDisabledAndAecIsDefaultEcMode) {
|
||||
bool ec_enabled = true;
|
||||
webrtc::EcModes ec_mode = webrtc::kEcDefault;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
|
||||
EXPECT_FALSE(ec_enabled);
|
||||
EXPECT_EQ(webrtc::kEcAec, ec_mode);
|
||||
}
|
||||
|
||||
// Not needed anymore - apm_helpers::SetEcStatus() doesn't take kEcAec.
|
||||
TEST_F(AudioProcessingTest, EnablingEcAecShouldEnableEcAec) {
|
||||
TryEnablingEcWithMode(webrtc::kEcAec, webrtc::kEcAec);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, EnablingEcConferenceShouldEnableEcAec) {
|
||||
TryEnablingEcWithMode(webrtc::kEcConference, webrtc::kEcAec);
|
||||
}
|
||||
|
||||
// Not needed anymore - apm_helpers::SetEcStatus() doesn't take kEcDefault.
|
||||
TEST_F(AudioProcessingTest, EcModeIsPreservedWhenEcIsTurnedOff) {
|
||||
TryEnablingEcWithMode(webrtc::kEcConference, webrtc::kEcAec);
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(false));
|
||||
|
||||
bool ec_enabled = true;
|
||||
webrtc::EcModes ec_mode = webrtc::kEcDefault;
|
||||
EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
|
||||
|
||||
EXPECT_FALSE(ec_enabled);
|
||||
EXPECT_EQ(webrtc::kEcAec, ec_mode);
|
||||
}
|
||||
|
||||
// Not needed anymore - apm_helpers::SetEcStatus() doesn't take kEcDefault.
|
||||
TEST_F(AudioProcessingTest, CanEnableAndDisableEcModeSeveralTimesInARow) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true));
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(false));
|
||||
}
|
||||
|
||||
bool ec_enabled = true;
|
||||
webrtc::EcModes ec_mode = webrtc::kEcDefault;
|
||||
EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
|
||||
|
||||
EXPECT_FALSE(ec_enabled);
|
||||
EXPECT_EQ(webrtc::kEcAec, ec_mode);
|
||||
}
|
||||
|
||||
#endif // !WEBRTC_IOS && !WEBRTC_ANDROID
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, EnablingEcAecmShouldEnableEcAecm) {
|
||||
// This one apparently applies to Android and iPhone as well.
|
||||
TryEnablingEcWithMode(webrtc::kEcAecm, webrtc::kEcAecm);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, EcAecmModeIsEnabledAndSpeakerphoneByDefault) {
|
||||
bool cng_enabled = false;
|
||||
webrtc::AecmModes aecm_mode = webrtc::kAecmEarpiece;
|
||||
|
||||
voe_apm_->GetAecmMode(aecm_mode, cng_enabled);
|
||||
|
||||
EXPECT_TRUE(cng_enabled);
|
||||
EXPECT_EQ(webrtc::kAecmSpeakerphone, aecm_mode);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, CanSetAecmMode) {
|
||||
EXPECT_EQ(0, voe_apm_->SetEcStatus(true, webrtc::kEcAecm));
|
||||
|
||||
// Try some AECM mode - CNG enabled combinations.
|
||||
TryEnablingAecmWithMode(webrtc::kAecmEarpiece, true);
|
||||
TryEnablingAecmWithMode(webrtc::kAecmEarpiece, false);
|
||||
TryEnablingAecmWithMode(webrtc::kAecmLoudEarpiece, true);
|
||||
TryEnablingAecmWithMode(webrtc::kAecmLoudSpeakerphone, false);
|
||||
TryEnablingAecmWithMode(webrtc::kAecmQuietEarpieceOrHeadset, true);
|
||||
TryEnablingAecmWithMode(webrtc::kAecmSpeakerphone, false);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, NsIsOffWithModerateSuppressionByDefault) {
|
||||
bool ns_status = true;
|
||||
webrtc::NsModes ns_mode = webrtc::kNsDefault;
|
||||
EXPECT_EQ(0, voe_apm_->GetNsStatus(ns_status, ns_mode));
|
||||
|
||||
EXPECT_FALSE(ns_status);
|
||||
EXPECT_EQ(webrtc::kNsModerateSuppression, ns_mode);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, CanSetNsMode) {
|
||||
// Concrete suppression values map to themselves.
|
||||
TryEnablingNsWithMode(webrtc::kNsHighSuppression,
|
||||
webrtc::kNsHighSuppression);
|
||||
TryEnablingNsWithMode(webrtc::kNsLowSuppression,
|
||||
webrtc::kNsLowSuppression);
|
||||
TryEnablingNsWithMode(webrtc::kNsModerateSuppression,
|
||||
webrtc::kNsModerateSuppression);
|
||||
TryEnablingNsWithMode(webrtc::kNsVeryHighSuppression,
|
||||
webrtc::kNsVeryHighSuppression);
|
||||
|
||||
// Conference and Default map to concrete values.
|
||||
TryEnablingNsWithMode(webrtc::kNsConference,
|
||||
webrtc::kNsHighSuppression);
|
||||
TryEnablingNsWithMode(webrtc::kNsDefault,
|
||||
webrtc::kNsModerateSuppression);
|
||||
}
|
||||
|
||||
// TODO(solenberg): Duplicate this test at the voe::Channel layer.
|
||||
// Not needed anymore - API is unused.
|
||||
TEST_F(AudioProcessingTest, VadIsDisabledByDefault) {
|
||||
bool vad_enabled;
|
||||
bool disabled_dtx;
|
||||
webrtc::VadModes vad_mode;
|
||||
|
||||
EXPECT_EQ(0, voe_codec_->GetVADStatus(
|
||||
channel_, vad_enabled, vad_mode, disabled_dtx));
|
||||
|
||||
EXPECT_FALSE(vad_enabled);
|
||||
}
|
||||
|
||||
// Not needed anymore - API is unused.
|
||||
TEST_F(AudioProcessingTest, VoiceActivityIndicatorReturns1WithSpeechOn) {
|
||||
// This sleep is necessary since the voice detection algorithm needs some
|
||||
// time to detect the speech from the fake microphone.
|
||||
Sleep(500);
|
||||
EXPECT_EQ(1, voe_apm_->VoiceActivityIndicator(channel_));
|
||||
}
|
||||
|
||||
// Not needed anymore - API is unused.
|
||||
TEST_F(AudioProcessingTest, CanSetDelayOffset) {
|
||||
voe_apm_->SetDelayOffsetMs(50);
|
||||
EXPECT_EQ(50, voe_apm_->DelayOffsetMs());
|
||||
voe_apm_->SetDelayOffsetMs(-50);
|
||||
EXPECT_EQ(-50, voe_apm_->DelayOffsetMs());
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, HighPassFilterIsOnByDefault) {
|
||||
EXPECT_TRUE(voe_apm_->IsHighPassFilterEnabled());
|
||||
}
|
||||
|
||||
// TODO(solenberg): Check that sufficient testing is done in APM.
|
||||
// Not needed anymore - API is unused.
|
||||
TEST_F(AudioProcessingTest, CanSetHighPassFilter) {
|
||||
EXPECT_EQ(0, voe_apm_->EnableHighPassFilter(true));
|
||||
EXPECT_TRUE(voe_apm_->IsHighPassFilterEnabled());
|
||||
EXPECT_EQ(0, voe_apm_->EnableHighPassFilter(false));
|
||||
EXPECT_FALSE(voe_apm_->IsHighPassFilterEnabled());
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, StereoChannelSwappingIsOffByDefault) {
|
||||
EXPECT_FALSE(voe_apm_->IsStereoChannelSwappingEnabled());
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, CanSetStereoChannelSwapping) {
|
||||
voe_apm_->EnableStereoChannelSwapping(true);
|
||||
EXPECT_TRUE(voe_apm_->IsStereoChannelSwappingEnabled());
|
||||
voe_apm_->EnableStereoChannelSwapping(false);
|
||||
EXPECT_FALSE(voe_apm_->IsStereoChannelSwappingEnabled());
|
||||
}
|
||||
|
||||
// TODO(solenberg): Check that sufficient testing is done in APM.
|
||||
TEST_F(AudioProcessingTest, CanStartAndStopDebugRecording) {
|
||||
std::string output_path = webrtc::test::OutputPath();
|
||||
std::string output_file = output_path + "apm_debug.txt";
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->StartDebugRecording(output_file.c_str()));
|
||||
Sleep(1000);
|
||||
EXPECT_EQ(0, voe_apm_->StopDebugRecording());
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, AgcIsOffByDefaultAndDigital) {
|
||||
bool agc_enabled = true;
|
||||
webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetAgcStatus(agc_enabled, agc_mode));
|
||||
EXPECT_FALSE(agc_enabled);
|
||||
EXPECT_EQ(webrtc::kAgcAdaptiveDigital, agc_mode);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, CanEnableAgcInAdaptiveDigitalMode) {
|
||||
TryEnablingAgcWithMode(webrtc::kAgcAdaptiveDigital);
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, AgcIsPossibleExceptInAdaptiveAnalogMode) {
|
||||
EXPECT_EQ(-1, voe_apm_->SetAgcStatus(true, webrtc::kAgcAdaptiveAnalog));
|
||||
EXPECT_EQ(0, voe_apm_->SetAgcStatus(true, webrtc::kAgcFixedDigital));
|
||||
EXPECT_EQ(0, voe_apm_->SetAgcStatus(true, webrtc::kAgcAdaptiveDigital));
|
||||
}
|
||||
|
||||
// Duplicated in apm_helpers_unittest.cc.
|
||||
TEST_F(AudioProcessingTest, EcIsDisabledAndAecmIsDefaultEcMode) {
|
||||
bool ec_enabled = true;
|
||||
webrtc::EcModes ec_mode = webrtc::kEcDefault;
|
||||
|
||||
EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
|
||||
EXPECT_FALSE(ec_enabled);
|
||||
EXPECT_EQ(webrtc::kEcAecm, ec_mode);
|
||||
}
|
||||
|
||||
#endif // WEBRTC_IOS || WEBRTC_ANDROID
|
||||
@ -1065,33 +1065,6 @@ void TransmitMixer::TypingDetection(bool keyPressed)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
int TransmitMixer::TimeSinceLastTyping(int &seconds)
|
||||
{
|
||||
// We check in VoEAudioProcessingImpl that this is only called when
|
||||
// typing detection is active.
|
||||
seconds = _typingDetection.TimeSinceLastDetectionInSeconds();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
|
||||
int costPerTyping,
|
||||
int reportingThreshold,
|
||||
int penaltyDecay,
|
||||
int typeEventDelay)
|
||||
{
|
||||
_typingDetection.SetParameters(timeWindow,
|
||||
costPerTyping,
|
||||
reportingThreshold,
|
||||
penaltyDecay,
|
||||
typeEventDelay,
|
||||
0);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
|
||||
swap_stereo_channels_ = enable;
|
||||
}
|
||||
|
||||
@ -145,16 +145,6 @@ public:
|
||||
|
||||
void RecordFileEnded(const int32_t id);
|
||||
|
||||
#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
// Typing detection
|
||||
int TimeSinceLastTyping(int &seconds);
|
||||
int SetTypingDetectionParameters(int timeWindow,
|
||||
int costPerTyping,
|
||||
int reportingThreshold,
|
||||
int penaltyDecay,
|
||||
int typeEventDelay);
|
||||
#endif
|
||||
|
||||
// Virtual to allow mocking.
|
||||
virtual void EnableStereoChannelSwapping(bool enable);
|
||||
bool IsStereoChannelSwappingEnabled();
|
||||
|
||||
@ -1,774 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/voice_engine/voe_audio_processing_impl.h"
|
||||
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||
#include "webrtc/system_wrappers/include/trace.h"
|
||||
#include "webrtc/voice_engine/channel.h"
|
||||
#include "webrtc/voice_engine/include/voe_errors.h"
|
||||
#include "webrtc/voice_engine/transmit_mixer.h"
|
||||
#include "webrtc/voice_engine/voice_engine_impl.h"
|
||||
|
||||
// TODO(andrew): move to a common place.
|
||||
#define WEBRTC_VOICE_INIT_CHECK() \
|
||||
do { \
|
||||
if (!_shared->statistics().Initialized()) { \
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define WEBRTC_VOICE_INIT_CHECK_BOOL() \
|
||||
do { \
|
||||
if (!_shared->statistics().Initialized()) { \
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError); \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
|
||||
static const EcModes kDefaultEcMode = kEcAecm;
|
||||
#else
|
||||
static const EcModes kDefaultEcMode = kEcAec;
|
||||
#endif
|
||||
|
||||
VoEAudioProcessing* VoEAudioProcessing::GetInterface(VoiceEngine* voiceEngine) {
|
||||
if (NULL == voiceEngine) {
|
||||
return NULL;
|
||||
}
|
||||
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
|
||||
s->AddRef();
|
||||
return s;
|
||||
}
|
||||
|
||||
VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared)
|
||||
: _isAecMode(kDefaultEcMode == kEcAec), _shared(shared) {
|
||||
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor");
|
||||
}
|
||||
|
||||
VoEAudioProcessingImpl::~VoEAudioProcessingImpl() {
|
||||
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"VoEAudioProcessingImpl::~VoEAudioProcessingImpl() - dtor");
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetNsStatus(enable=%d, mode=%d)", enable, mode);
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
NoiseSuppression::Level nsLevel = kDefaultNsMode;
|
||||
switch (mode) {
|
||||
case kNsDefault:
|
||||
nsLevel = kDefaultNsMode;
|
||||
break;
|
||||
case kNsUnchanged:
|
||||
nsLevel = _shared->audio_processing()->noise_suppression()->level();
|
||||
break;
|
||||
case kNsConference:
|
||||
nsLevel = NoiseSuppression::kHigh;
|
||||
break;
|
||||
case kNsLowSuppression:
|
||||
nsLevel = NoiseSuppression::kLow;
|
||||
break;
|
||||
case kNsModerateSuppression:
|
||||
nsLevel = NoiseSuppression::kModerate;
|
||||
break;
|
||||
case kNsHighSuppression:
|
||||
nsLevel = NoiseSuppression::kHigh;
|
||||
break;
|
||||
case kNsVeryHighSuppression:
|
||||
nsLevel = NoiseSuppression::kVeryHigh;
|
||||
break;
|
||||
}
|
||||
|
||||
if (_shared->audio_processing()->noise_suppression()->set_level(nsLevel) !=
|
||||
0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetNsStatus() failed to set Ns mode");
|
||||
return -1;
|
||||
}
|
||||
if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetNsStatus() failed to set Ns state");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
enabled = _shared->audio_processing()->noise_suppression()->is_enabled();
|
||||
NoiseSuppression::Level nsLevel =
|
||||
_shared->audio_processing()->noise_suppression()->level();
|
||||
|
||||
switch (nsLevel) {
|
||||
case NoiseSuppression::kLow:
|
||||
mode = kNsLowSuppression;
|
||||
break;
|
||||
case NoiseSuppression::kModerate:
|
||||
mode = kNsModerateSuppression;
|
||||
break;
|
||||
case NoiseSuppression::kHigh:
|
||||
mode = kNsHighSuppression;
|
||||
break;
|
||||
case NoiseSuppression::kVeryHigh:
|
||||
mode = kNsVeryHighSuppression;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetAgcStatus(enable=%d, mode=%d)", enable, mode);
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
|
||||
if (mode == kAgcAdaptiveAnalog) {
|
||||
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"SetAgcStatus() invalid Agc mode for mobile device");
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
GainControl::Mode agcMode = kDefaultAgcMode;
|
||||
switch (mode) {
|
||||
case kAgcDefault:
|
||||
agcMode = kDefaultAgcMode;
|
||||
break;
|
||||
case kAgcUnchanged:
|
||||
agcMode = _shared->audio_processing()->gain_control()->mode();
|
||||
break;
|
||||
case kAgcFixedDigital:
|
||||
agcMode = GainControl::kFixedDigital;
|
||||
break;
|
||||
case kAgcAdaptiveAnalog:
|
||||
agcMode = GainControl::kAdaptiveAnalog;
|
||||
break;
|
||||
case kAgcAdaptiveDigital:
|
||||
agcMode = GainControl::kAdaptiveDigital;
|
||||
break;
|
||||
}
|
||||
|
||||
if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetAgcStatus() failed to set Agc mode");
|
||||
return -1;
|
||||
}
|
||||
if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetAgcStatus() failed to set Agc state");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (agcMode != GainControl::kFixedDigital) {
|
||||
// Set Agc state in the ADM when adaptive Agc mode has been selected.
|
||||
// Note that we also enable the ADM Agc when Adaptive Digital mode is
|
||||
// used since we want to be able to provide the APM with updated mic
|
||||
// levels when the user modifies the mic level manually.
|
||||
if (_shared->audio_device()->SetAGC(enable) != 0) {
|
||||
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
|
||||
"SetAgcStatus() failed to set Agc mode");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
enabled = _shared->audio_processing()->gain_control()->is_enabled();
|
||||
GainControl::Mode agcMode =
|
||||
_shared->audio_processing()->gain_control()->mode();
|
||||
|
||||
switch (agcMode) {
|
||||
case GainControl::kFixedDigital:
|
||||
mode = kAgcFixedDigital;
|
||||
break;
|
||||
case GainControl::kAdaptiveAnalog:
|
||||
mode = kAgcAdaptiveAnalog;
|
||||
break;
|
||||
case GainControl::kAdaptiveDigital:
|
||||
mode = kAgcAdaptiveDigital;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetAgcConfig(AgcConfig config) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetAgcConfig()");
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (_shared->audio_processing()->gain_control()->set_target_level_dbfs(
|
||||
config.targetLeveldBOv) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetAgcConfig() failed to set target peak |level|"
|
||||
" (or envelope) of the Agc");
|
||||
return -1;
|
||||
}
|
||||
if (_shared->audio_processing()->gain_control()->set_compression_gain_db(
|
||||
config.digitalCompressionGaindB) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetAgcConfig() failed to set the range in |gain| "
|
||||
"the digital compression stage may apply");
|
||||
return -1;
|
||||
}
|
||||
if (_shared->audio_processing()->gain_control()->enable_limiter(
|
||||
config.limiterEnable) != 0) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceError,
|
||||
"SetAgcConfig() failed to set hard limiter to the signal");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig& config) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
config.targetLeveldBOv =
|
||||
_shared->audio_processing()->gain_control()->target_level_dbfs();
|
||||
config.digitalCompressionGaindB =
|
||||
_shared->audio_processing()->gain_control()->compression_gain_db();
|
||||
config.limiterEnable =
|
||||
_shared->audio_processing()->gain_control()->is_limiter_enabled();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool VoEAudioProcessing::DriftCompensationSupported() {
|
||||
#if defined(WEBRTC_DRIFT_COMPENSATION_SUPPORTED)
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::EnableDriftCompensation(bool enable) {
|
||||
WEBRTC_VOICE_INIT_CHECK();
|
||||
|
||||
if (!DriftCompensationSupported()) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceWarning,
|
||||
"Drift compensation is not supported on this platform.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
EchoCancellation* aec = _shared->audio_processing()->echo_cancellation();
|
||||
if (aec->enable_drift_compensation(enable) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"aec->enable_drift_compensation() failed");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool VoEAudioProcessingImpl::DriftCompensationEnabled() {
|
||||
WEBRTC_VOICE_INIT_CHECK_BOOL();
|
||||
|
||||
EchoCancellation* aec = _shared->audio_processing()->echo_cancellation();
|
||||
return aec->is_drift_compensation_enabled();
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetEcStatus(enable=%d, mode=%d)", enable, mode);
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// AEC mode
|
||||
if ((mode == kEcDefault) || (mode == kEcConference) || (mode == kEcAec) ||
|
||||
((mode == kEcUnchanged) && (_isAecMode == true))) {
|
||||
if (enable) {
|
||||
// Disable the AECM before enable the AEC
|
||||
if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceWarning,
|
||||
"SetEcStatus() disable AECM before enabling AEC");
|
||||
if (_shared->audio_processing()->echo_control_mobile()->Enable(false) !=
|
||||
0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetEcStatus() failed to disable AECM");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetEcStatus() failed to set AEC state");
|
||||
return -1;
|
||||
}
|
||||
if (mode == kEcConference) {
|
||||
if (_shared->audio_processing()
|
||||
->echo_cancellation()
|
||||
->set_suppression_level(EchoCancellation::kHighSuppression) !=
|
||||
0) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceError,
|
||||
"SetEcStatus() failed to set aggressiveness to high");
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
if (_shared->audio_processing()
|
||||
->echo_cancellation()
|
||||
->set_suppression_level(EchoCancellation::kModerateSuppression) !=
|
||||
0) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceError,
|
||||
"SetEcStatus() failed to set aggressiveness to moderate");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
_isAecMode = true;
|
||||
} else if ((mode == kEcAecm) ||
|
||||
((mode == kEcUnchanged) && (_isAecMode == false))) {
|
||||
if (enable) {
|
||||
// Disable the AEC before enable the AECM
|
||||
if (_shared->audio_processing()->echo_cancellation()->is_enabled()) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceWarning,
|
||||
"SetEcStatus() disable AEC before enabling AECM");
|
||||
if (_shared->audio_processing()->echo_cancellation()->Enable(false) !=
|
||||
0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetEcStatus() failed to disable AEC");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_shared->audio_processing()->echo_control_mobile()->Enable(enable) !=
|
||||
0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetEcStatus() failed to set AECM state");
|
||||
return -1;
|
||||
}
|
||||
_isAecMode = false;
|
||||
} else {
|
||||
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"SetEcStatus() invalid EC mode");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (_isAecMode == true) {
|
||||
mode = kEcAec;
|
||||
enabled = _shared->audio_processing()->echo_cancellation()->is_enabled();
|
||||
} else {
|
||||
mode = kEcAecm;
|
||||
enabled = _shared->audio_processing()->echo_control_mobile()->is_enabled();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void VoEAudioProcessingImpl::SetDelayOffsetMs(int offset) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetDelayOffsetMs(offset = %d)", offset);
|
||||
_shared->audio_processing()->set_delay_offset_ms(offset);
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::DelayOffsetMs() {
|
||||
return _shared->audio_processing()->delay_offset_ms();
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetAECMMode(mode = %d)", mode);
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
EchoControlMobile::RoutingMode aecmMode(
|
||||
EchoControlMobile::kQuietEarpieceOrHeadset);
|
||||
|
||||
switch (mode) {
|
||||
case kAecmQuietEarpieceOrHeadset:
|
||||
aecmMode = EchoControlMobile::kQuietEarpieceOrHeadset;
|
||||
break;
|
||||
case kAecmEarpiece:
|
||||
aecmMode = EchoControlMobile::kEarpiece;
|
||||
break;
|
||||
case kAecmLoudEarpiece:
|
||||
aecmMode = EchoControlMobile::kLoudEarpiece;
|
||||
break;
|
||||
case kAecmSpeakerphone:
|
||||
aecmMode = EchoControlMobile::kSpeakerphone;
|
||||
break;
|
||||
case kAecmLoudSpeakerphone:
|
||||
aecmMode = EchoControlMobile::kLoudSpeakerphone;
|
||||
break;
|
||||
}
|
||||
|
||||
if (_shared->audio_processing()->echo_control_mobile()->set_routing_mode(
|
||||
aecmMode) != 0) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetAECMMode() failed to set AECM routing mode");
|
||||
return -1;
|
||||
}
|
||||
if (_shared->audio_processing()->echo_control_mobile()->enable_comfort_noise(
|
||||
enableCNG) != 0) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceError,
|
||||
"SetAECMMode() failed to set comfort noise state for AECM");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
enabledCNG = false;
|
||||
|
||||
EchoControlMobile::RoutingMode aecmMode =
|
||||
_shared->audio_processing()->echo_control_mobile()->routing_mode();
|
||||
enabledCNG = _shared->audio_processing()
|
||||
->echo_control_mobile()
|
||||
->is_comfort_noise_enabled();
|
||||
|
||||
switch (aecmMode) {
|
||||
case EchoControlMobile::kQuietEarpieceOrHeadset:
|
||||
mode = kAecmQuietEarpieceOrHeadset;
|
||||
break;
|
||||
case EchoControlMobile::kEarpiece:
|
||||
mode = kAecmEarpiece;
|
||||
break;
|
||||
case EchoControlMobile::kLoudEarpiece:
|
||||
mode = kAecmLoudEarpiece;
|
||||
break;
|
||||
case EchoControlMobile::kSpeakerphone:
|
||||
mode = kAecmSpeakerphone;
|
||||
break;
|
||||
case EchoControlMobile::kLoudSpeakerphone:
|
||||
mode = kAecmLoudSpeakerphone;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::EnableHighPassFilter(bool enable) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"EnableHighPassFilter(%d)", enable);
|
||||
if (_shared->audio_processing()->high_pass_filter()->Enable(enable) !=
|
||||
AudioProcessing::kNoError) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"HighPassFilter::Enable() failed.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool VoEAudioProcessingImpl::IsHighPassFilterEnabled() {
|
||||
return _shared->audio_processing()->high_pass_filter()->is_enabled();
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"VoiceActivityIndicator(channel=%d)", channel);
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
|
||||
voe::Channel* channelPtr = ch.channel();
|
||||
if (channelPtr == NULL) {
|
||||
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
|
||||
"VoiceActivityIndicator() failed to locate channel");
|
||||
return -1;
|
||||
}
|
||||
int activity(-1);
|
||||
channelPtr->VoiceActivityIndicator(activity);
|
||||
|
||||
return activity;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetEcMetricsStatus(bool enable) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetEcMetricsStatus(enable=%d)", enable);
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((_shared->audio_processing()->echo_cancellation()->enable_metrics(
|
||||
enable) != 0) ||
|
||||
(_shared->audio_processing()->echo_cancellation()->enable_delay_logging(
|
||||
enable) != 0)) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceError,
|
||||
"SetEcMetricsStatus() unable to set EC metrics mode");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetEcMetricsStatus(bool& enabled) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool echo_mode =
|
||||
_shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
|
||||
bool delay_mode = _shared->audio_processing()
|
||||
->echo_cancellation()
|
||||
->is_delay_logging_enabled();
|
||||
|
||||
if (echo_mode != delay_mode) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceError,
|
||||
"GetEcMetricsStatus() delay logging and echo mode are not the same");
|
||||
return -1;
|
||||
}
|
||||
|
||||
enabled = echo_mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
|
||||
int& ERLE,
|
||||
int& RERL,
|
||||
int& A_NLP) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceWarning,
|
||||
"GetEchoMetrics() AudioProcessingModule AEC is not enabled");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Get Echo Metrics from Audio Processing Module.
|
||||
EchoCancellation::Metrics echoMetrics;
|
||||
if (_shared->audio_processing()->echo_cancellation()->GetMetrics(
|
||||
&echoMetrics)) {
|
||||
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"GetEchoMetrics(), AudioProcessingModule metrics error");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Echo quality metrics.
|
||||
ERL = echoMetrics.echo_return_loss.instant;
|
||||
ERLE = echoMetrics.echo_return_loss_enhancement.instant;
|
||||
RERL = echoMetrics.residual_echo_return_loss.instant;
|
||||
A_NLP = echoMetrics.a_nlp.instant;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median,
|
||||
int& delay_std,
|
||||
float& fraction_poor_delays) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceWarning,
|
||||
"GetEcDelayMetrics() AudioProcessingModule AEC is not enabled");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int median = 0;
|
||||
int std = 0;
|
||||
float poor_fraction = 0;
|
||||
// Get delay-logging values from Audio Processing Module.
|
||||
if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics(
|
||||
&median, &std, &poor_fraction)) {
|
||||
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"GetEcDelayMetrics(), AudioProcessingModule delay-logging "
|
||||
"error");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// EC delay-logging metrics
|
||||
delay_median = median;
|
||||
delay_std = std;
|
||||
fraction_poor_delays = poor_fraction;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::StartDebugRecording(const char* fileNameUTF8) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"StartDebugRecording()");
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return _shared->audio_processing()->StartDebugRecording(fileNameUTF8, -1);
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::StartDebugRecording(FILE* file_handle) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"StartDebugRecording()");
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return _shared->audio_processing()->StartDebugRecording(file_handle, -1);
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::StopDebugRecording() {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"StopDebugRecording()");
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return _shared->audio_processing()->StopDebugRecording();
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetTypingDetectionStatus(bool enable) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetTypingDetectionStatus()");
|
||||
#if !WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
NOT_SUPPORTED(_shared->statistics());
|
||||
#else
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Just use the VAD state to determine if we should enable typing detection
|
||||
// or not
|
||||
|
||||
if (_shared->audio_processing()->voice_detection()->Enable(enable)) {
|
||||
_shared->SetLastError(VE_APM_ERROR, kTraceWarning,
|
||||
"SetTypingDetectionStatus() failed to set VAD state");
|
||||
return -1;
|
||||
}
|
||||
if (_shared->audio_processing()->voice_detection()->set_likelihood(
|
||||
VoiceDetection::kVeryLowLikelihood)) {
|
||||
_shared->SetLastError(
|
||||
VE_APM_ERROR, kTraceWarning,
|
||||
"SetTypingDetectionStatus() failed to set VAD likelihood to low");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled) {
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
// Just use the VAD state to determine if we should enable typing
|
||||
// detection or not
|
||||
|
||||
enabled = _shared->audio_processing()->voice_detection()->is_enabled();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::TimeSinceLastTyping(int& seconds) {
|
||||
#if !WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
NOT_SUPPORTED(_shared->statistics());
|
||||
#else
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
// Check if typing detection is enabled
|
||||
bool enabled = _shared->audio_processing()->voice_detection()->is_enabled();
|
||||
if (enabled) {
|
||||
_shared->transmit_mixer()->TimeSinceLastTyping(seconds);
|
||||
return 0;
|
||||
} else {
|
||||
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
||||
"SetTypingDetectionStatus is not enabled");
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int VoEAudioProcessingImpl::SetTypingDetectionParameters(int timeWindow,
|
||||
int costPerTyping,
|
||||
int reportingThreshold,
|
||||
int penaltyDecay,
|
||||
int typeEventDelay) {
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
|
||||
"SetTypingDetectionParameters()");
|
||||
#if !WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
NOT_SUPPORTED(_shared->statistics());
|
||||
#else
|
||||
if (!_shared->statistics().Initialized()) {
|
||||
_shared->statistics().SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
return (_shared->transmit_mixer()->SetTypingDetectionParameters(
|
||||
timeWindow, costPerTyping, reportingThreshold, penaltyDecay,
|
||||
typeEventDelay));
|
||||
#endif
|
||||
}
|
||||
|
||||
void VoEAudioProcessingImpl::EnableStereoChannelSwapping(bool enable) {
|
||||
_shared->transmit_mixer()->EnableStereoChannelSwapping(enable);
|
||||
}
|
||||
|
||||
bool VoEAudioProcessingImpl::IsStereoChannelSwappingEnabled() {
|
||||
return _shared->transmit_mixer()->IsStereoChannelSwappingEnabled();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -1,94 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
|
||||
#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
|
||||
|
||||
#include "webrtc/voice_engine/include/voe_audio_processing.h"
|
||||
|
||||
#include "webrtc/voice_engine/shared_data.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VoEAudioProcessingImpl : public VoEAudioProcessing {
|
||||
public:
|
||||
int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) override;
|
||||
|
||||
int GetNsStatus(bool& enabled, NsModes& mode) override;
|
||||
|
||||
int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) override;
|
||||
|
||||
int GetAgcStatus(bool& enabled, AgcModes& mode) override;
|
||||
|
||||
int SetAgcConfig(AgcConfig config) override;
|
||||
|
||||
int GetAgcConfig(AgcConfig& config) override;
|
||||
|
||||
int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) override;
|
||||
int GetEcStatus(bool& enabled, EcModes& mode) override;
|
||||
int EnableDriftCompensation(bool enable) override;
|
||||
bool DriftCompensationEnabled() override;
|
||||
|
||||
void SetDelayOffsetMs(int offset) override;
|
||||
int DelayOffsetMs() override;
|
||||
|
||||
int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
|
||||
bool enableCNG = true) override;
|
||||
|
||||
int GetAecmMode(AecmModes& mode, bool& enabledCNG) override;
|
||||
|
||||
int EnableHighPassFilter(bool enable) override;
|
||||
bool IsHighPassFilterEnabled() override;
|
||||
|
||||
int VoiceActivityIndicator(int channel) override;
|
||||
|
||||
int SetEcMetricsStatus(bool enable) override;
|
||||
|
||||
int GetEcMetricsStatus(bool& enabled) override;
|
||||
|
||||
int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) override;
|
||||
|
||||
int GetEcDelayMetrics(int& delay_median,
|
||||
int& delay_std,
|
||||
float& fraction_poor_delays) override;
|
||||
|
||||
int StartDebugRecording(const char* fileNameUTF8) override;
|
||||
int StartDebugRecording(FILE* file_handle) override;
|
||||
|
||||
int StopDebugRecording() override;
|
||||
|
||||
int SetTypingDetectionStatus(bool enable) override;
|
||||
|
||||
int GetTypingDetectionStatus(bool& enabled) override;
|
||||
|
||||
int TimeSinceLastTyping(int& seconds) override;
|
||||
|
||||
// TODO(niklase) Remove default argument as soon as libJingle is updated!
|
||||
int SetTypingDetectionParameters(int timeWindow,
|
||||
int costPerTyping,
|
||||
int reportingThreshold,
|
||||
int penaltyDecay,
|
||||
int typeEventDelay = 0) override;
|
||||
|
||||
void EnableStereoChannelSwapping(bool enable) override;
|
||||
bool IsStereoChannelSwappingEnabled() override;
|
||||
|
||||
protected:
|
||||
VoEAudioProcessingImpl(voe::SharedData* shared);
|
||||
~VoEAudioProcessingImpl() override;
|
||||
|
||||
private:
|
||||
bool _isAecMode;
|
||||
voe::SharedData* _shared;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
|
||||
@ -1,66 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/voice_engine/include/voe_audio_processing.h"
|
||||
|
||||
#include "webrtc/test/gtest.h"
|
||||
#include "webrtc/voice_engine/include/voe_base.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace voe {
|
||||
namespace {
|
||||
|
||||
class VoEAudioProcessingTest : public ::testing::Test {
|
||||
protected:
|
||||
VoEAudioProcessingTest()
|
||||
: voe_(VoiceEngine::Create()),
|
||||
base_(VoEBase::GetInterface(voe_)),
|
||||
audioproc_(VoEAudioProcessing::GetInterface(voe_)) {}
|
||||
|
||||
virtual ~VoEAudioProcessingTest() {
|
||||
base_->Terminate();
|
||||
audioproc_->Release();
|
||||
base_->Release();
|
||||
VoiceEngine::Delete(voe_);
|
||||
}
|
||||
|
||||
VoiceEngine* voe_;
|
||||
VoEBase* base_;
|
||||
VoEAudioProcessing* audioproc_;
|
||||
};
|
||||
|
||||
TEST_F(VoEAudioProcessingTest, FailureIfNotInitialized) {
|
||||
EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(true));
|
||||
EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(false));
|
||||
EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
|
||||
}
|
||||
|
||||
// TODO(andrew): Investigate race conditions triggered by this test:
|
||||
// https://code.google.com/p/webrtc/issues/detail?id=788
|
||||
TEST_F(VoEAudioProcessingTest, DISABLED_DriftCompensationIsEnabledIfSupported) {
|
||||
ASSERT_EQ(0, base_->Init());
|
||||
// TODO(andrew): Ideally, DriftCompensationSupported() would be mocked.
|
||||
bool supported = VoEAudioProcessing::DriftCompensationSupported();
|
||||
if (supported) {
|
||||
EXPECT_EQ(0, audioproc_->EnableDriftCompensation(true));
|
||||
EXPECT_TRUE(audioproc_->DriftCompensationEnabled());
|
||||
EXPECT_EQ(0, audioproc_->EnableDriftCompensation(false));
|
||||
EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
|
||||
} else {
|
||||
EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(true));
|
||||
EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
|
||||
EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(false));
|
||||
EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace voe
|
||||
} // namespace webrtc
|
||||
@ -16,7 +16,6 @@
|
||||
#include "webrtc/system_wrappers/include/atomic32.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
#include "webrtc/voice_engine/voe_base_impl.h"
|
||||
#include "webrtc/voice_engine/voe_audio_processing_impl.h"
|
||||
#include "webrtc/voice_engine/voe_codec_impl.h"
|
||||
#include "webrtc/voice_engine/voe_file_impl.h"
|
||||
#include "webrtc/voice_engine/voe_hardware_impl.h"
|
||||
@ -31,7 +30,6 @@ class ChannelProxy;
|
||||
|
||||
class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
|
||||
public VoiceEngine,
|
||||
public VoEAudioProcessingImpl,
|
||||
public VoECodecImpl,
|
||||
public VoEFileImpl,
|
||||
public VoEHardwareImpl,
|
||||
@ -42,7 +40,6 @@ class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
|
||||
public:
|
||||
VoiceEngineImpl()
|
||||
: SharedData(),
|
||||
VoEAudioProcessingImpl(this),
|
||||
VoECodecImpl(this),
|
||||
VoEFileImpl(this),
|
||||
VoEHardwareImpl(this),
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user