Making the Analog AGC properly support multi-channel

This CL adds proper multi-channel support to the analog AGC.

Beyond that, it prepares adding multi-channel support to the digital
AGC by removing the tight dependency between the analog and digital
AGC codes.

Bug: webrtc:10859
Change-Id: I4414ccbc3db5dbb5ae069fdf426cbd038375ca7b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/159480
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Commit-Queue: Per Åhgren <peah@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#29878}
This commit is contained in:
Per Åhgren 2019-11-22 12:11:40 +01:00 committed by Commit Bot
parent 5b82ba37cc
commit 3daedb6c88
9 changed files with 426 additions and 250 deletions

View File

@ -25,12 +25,14 @@ rtc_library("agc") {
":gain_map", ":gain_map",
":level_estimation", ":level_estimation",
"..:apm_logging", "..:apm_logging",
"..:audio_buffer",
"../../../common_audio", "../../../common_audio",
"../../../common_audio:common_audio_c", "../../../common_audio:common_audio_c",
"../../../rtc_base:checks", "../../../rtc_base:checks",
"../../../rtc_base:gtest_prod", "../../../rtc_base:gtest_prod",
"../../../rtc_base:logging", "../../../rtc_base:logging",
"../../../rtc_base:macromagic", "../../../rtc_base:macromagic",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_minmax", "../../../rtc_base:safe_minmax",
"../../../system_wrappers:field_trial", "../../../system_wrappers:field_trial",
"../../../system_wrappers:metrics", "../../../system_wrappers:metrics",

View File

@ -13,14 +13,11 @@
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#ifdef WEBRTC_AGC_DEBUG_DUMP
#include <cstdio>
#endif
#include "common_audio/include/audio_util.h" #include "common_audio/include/audio_util.h"
#include "modules/audio_processing/agc/gain_control.h" #include "modules/audio_processing/agc/gain_control.h"
#include "modules/audio_processing/agc/gain_map_internal.h" #include "modules/audio_processing/agc/gain_map_internal.h"
#include "modules/audio_processing/agc2/adaptive_mode_level_estimator_agc.h" #include "modules/audio_processing/agc2/adaptive_mode_level_estimator_agc.h"
#include "rtc_base/atomic_ops.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_minmax.h" #include "rtc_base/numerics/safe_minmax.h"
@ -29,8 +26,6 @@
namespace webrtc { namespace webrtc {
int AgcManagerDirect::instance_counter_ = 0;
namespace { namespace {
// Amount the microphone level is lowered with every clipping event. // Amount the microphone level is lowered with every clipping event.
@ -61,10 +56,6 @@ const int kMaxResidualGainChange = 15;
// restrictions from clipping events. // restrictions from clipping events.
const int kSurplusCompressionGain = 6; const int kSurplusCompressionGain = 6;
// Maximum number of channels and number of samples per channel supported.
constexpr size_t kMaxNumSamplesPerChannel = 1920;
constexpr size_t kMaxNumChannels = 4;
// Returns kMinMicLevel if no field trial exists or if it has been disabled. // Returns kMinMicLevel if no field trial exists or if it has been disabled.
// Returns a value between 0 and 255 depending on the field-trial string. // Returns a value between 0 and 255 depending on the field-trial string.
// Example: 'WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-80' => returns 80. // Example: 'WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-80' => returns 80.
@ -138,45 +129,31 @@ float ComputeClippedRatio(const float* const* audio,
} // namespace } // namespace
AgcManagerDirect::AgcManagerDirect(Agc* agc, MonoAgc::MonoAgc(ApmDataDumper* data_dumper,
int startup_min_level, int startup_min_level,
int clipped_level_min) int clipped_level_min,
: AgcManagerDirect(startup_min_level, clipped_level_min, false, false) { bool use_agc2_level_estimation,
RTC_DCHECK(agc_); bool disable_digital_adaptive,
agc_.reset(agc); int min_mic_level)
} : min_mic_level_(min_mic_level),
disable_digital_adaptive_(disable_digital_adaptive),
AgcManagerDirect::AgcManagerDirect(int startup_min_level,
int clipped_level_min,
bool use_agc2_level_estimation,
bool disable_digital_adaptive)
: data_dumper_(new ApmDataDumper(instance_counter_)),
frames_since_clipped_(kClippedWaitFrames),
level_(0),
max_level_(kMaxMicLevel), max_level_(kMaxMicLevel),
max_compression_gain_(kMaxCompressionGain), max_compression_gain_(kMaxCompressionGain),
target_compression_(kDefaultCompressionGain), target_compression_(kDefaultCompressionGain),
compression_(target_compression_), compression_(target_compression_),
compression_accumulator_(compression_), compression_accumulator_(compression_),
capture_muted_(false),
check_volume_on_next_process_(true), // Check at startup.
startup_(true),
min_mic_level_(GetMinMicLevel()),
disable_digital_adaptive_(disable_digital_adaptive),
startup_min_level_(ClampLevel(startup_min_level, min_mic_level_)), startup_min_level_(ClampLevel(startup_min_level, min_mic_level_)),
clipped_level_min_(clipped_level_min) { clipped_level_min_(clipped_level_min) {
instance_counter_++;
if (use_agc2_level_estimation) { if (use_agc2_level_estimation) {
agc_ = std::make_unique<AdaptiveModeLevelEstimatorAgc>(data_dumper_.get()); agc_ = std::make_unique<AdaptiveModeLevelEstimatorAgc>(data_dumper);
} else { } else {
agc_ = std::make_unique<Agc>(); agc_ = std::make_unique<Agc>();
} }
} }
AgcManagerDirect::~AgcManagerDirect() {} MonoAgc::~MonoAgc() = default;
void AgcManagerDirect::Initialize() { void MonoAgc::Initialize() {
RTC_DLOG(LS_INFO) << "AgcManagerDirect::Initialize";
max_level_ = kMaxMicLevel; max_level_ = kMaxMicLevel;
max_compression_gain_ = kMaxCompressionGain; max_compression_gain_ = kMaxCompressionGain;
target_compression_ = disable_digital_adaptive_ ? 0 : kDefaultCompressionGain; target_compression_ = disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
@ -184,94 +161,12 @@ void AgcManagerDirect::Initialize() {
compression_accumulator_ = compression_; compression_accumulator_ = compression_;
capture_muted_ = false; capture_muted_ = false;
check_volume_on_next_process_ = true; check_volume_on_next_process_ = true;
// TODO(bjornv): Investigate if we need to reset |startup_| as well. For
// example, what happens when we change devices.
data_dumper_->InitiateNewSetOfRecordings();
} }
void AgcManagerDirect::ConfigureGainControl(GainControl* gain_control) const { void MonoAgc::Process(const int16_t* audio,
if (gain_control->set_mode(GainControl::kFixedDigital) != 0) { size_t samples_per_channel,
RTC_LOG(LS_ERROR) << "set_mode(GainControl::kFixedDigital) failed."; int sample_rate_hz) {
} new_compression_to_set_ = absl::nullopt;
const int target_level_dbfs = disable_digital_adaptive_ ? 0 : 2;
if (gain_control->set_target_level_dbfs(target_level_dbfs) != 0) {
RTC_LOG(LS_ERROR) << "set_target_level_dbfs() failed.";
}
const int compression_gain_db =
disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
if (gain_control->set_compression_gain_db(compression_gain_db) != 0) {
RTC_LOG(LS_ERROR) << "set_compression_gain_db() failed.";
}
const bool enable_limiter = !disable_digital_adaptive_;
if (gain_control->enable_limiter(enable_limiter) != 0) {
RTC_LOG(LS_ERROR) << "enable_limiter() failed.";
}
}
void AgcManagerDirect::AnalyzePreProcess(const float* const* audio,
int num_channels,
size_t samples_per_channel) {
RTC_DCHECK(audio);
if (capture_muted_) {
return;
}
if (frames_since_clipped_ < kClippedWaitFrames) {
++frames_since_clipped_;
return;
}
// Check for clipped samples, as the AGC has difficulty detecting pitch
// under clipping distortion. We do this in the preprocessing phase in order
// to catch clipped echo as well.
//
// If we find a sufficiently clipped frame, drop the current microphone level
// and enforce a new maximum level, dropped the same amount from the current
// maximum. This harsh treatment is an effort to avoid repeated clipped echo
// events. As compensation for this restriction, the maximum compression
// gain is increased, through SetMaxLevel().
float clipped_ratio =
ComputeClippedRatio(audio, num_channels, samples_per_channel);
if (clipped_ratio > kClippedRatioThreshold) {
RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio="
<< clipped_ratio;
// Always decrease the maximum level, even if the current level is below
// threshold.
SetMaxLevel(std::max(clipped_level_min_, max_level_ - kClippedLevelStep));
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.AgcClippingAdjustmentAllowed",
level_ - kClippedLevelStep >= clipped_level_min_);
if (level_ > clipped_level_min_) {
// Don't try to adjust the level if we're already below the limit. As
// a consequence, if the user has brought the level above the limit, we
// will still not react until the postproc updates the level.
SetLevel(std::max(clipped_level_min_, level_ - kClippedLevelStep));
// Reset the AGC since the level has changed.
agc_->Reset();
}
frames_since_clipped_ = 0;
}
}
void AgcManagerDirect::Process(const float* audio,
size_t length,
int sample_rate_hz,
GainControl* gain_control) {
if (capture_muted_) {
return;
}
std::array<int16_t, kMaxNumSamplesPerChannel * kMaxNumChannels> audio_data;
const int16_t* audio_fix;
size_t safe_length;
if (audio) {
audio_fix = audio_data.data();
safe_length = std::min(audio_data.size(), length);
FloatS16ToS16(audio, length, audio_data.data());
} else {
audio_fix = nullptr;
safe_length = length;
}
if (check_volume_on_next_process_) { if (check_volume_on_next_process_) {
check_volume_on_next_process_ = false; check_volume_on_next_process_ = false;
@ -280,25 +175,33 @@ void AgcManagerDirect::Process(const float* audio,
CheckVolumeAndReset(); CheckVolumeAndReset();
} }
agc_->Process(audio_fix, safe_length, sample_rate_hz); agc_->Process(audio, samples_per_channel, sample_rate_hz);
UpdateGain(); UpdateGain();
if (!disable_digital_adaptive_) { if (!disable_digital_adaptive_) {
UpdateCompressor(); UpdateCompressor();
} }
if (new_compression_to_set_) {
if (gain_control->set_compression_gain_db(*new_compression_to_set_) != 0) {
RTC_LOG(LS_ERROR) << "set_compression_gain_db(" << compression_
<< ") failed.";
}
}
new_compression_to_set_ = absl::nullopt;
data_dumper_->DumpRaw("experimental_gain_control_compression_gain_db", 1,
&compression_);
} }
void AgcManagerDirect::SetLevel(int new_level) { void MonoAgc::HandleClipping() {
// Always decrease the maximum level, even if the current level is below
// threshold.
SetMaxLevel(std::max(clipped_level_min_, max_level_ - kClippedLevelStep));
if (log_to_histograms_) {
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.AgcClippingAdjustmentAllowed",
level_ - kClippedLevelStep >= clipped_level_min_);
}
if (level_ > clipped_level_min_) {
// Don't try to adjust the level if we're already below the limit. As
// a consequence, if the user has brought the level above the limit, we
// will still not react until the postproc updates the level.
SetLevel(std::max(clipped_level_min_, level_ - kClippedLevelStep));
// Reset the AGCs for all channels since the level has changed.
agc_->Reset();
}
}
void MonoAgc::SetLevel(int new_level) {
int voe_level = stream_analog_level_; int voe_level = stream_analog_level_;
if (voe_level == 0) { if (voe_level == 0) {
RTC_DLOG(LS_INFO) RTC_DLOG(LS_INFO)
@ -325,6 +228,7 @@ void AgcManagerDirect::SetLevel(int new_level) {
// was manually adjusted. The compressor will still provide some of the // was manually adjusted. The compressor will still provide some of the
// desired gain change. // desired gain change.
agc_->Reset(); agc_->Reset();
return; return;
} }
@ -340,7 +244,7 @@ void AgcManagerDirect::SetLevel(int new_level) {
level_ = new_level; level_ = new_level;
} }
void AgcManagerDirect::SetMaxLevel(int level) { void MonoAgc::SetMaxLevel(int level) {
RTC_DCHECK_GE(level, clipped_level_min_); RTC_DCHECK_GE(level, clipped_level_min_);
max_level_ = level; max_level_ = level;
// Scale the |kSurplusCompressionGain| linearly across the restricted // Scale the |kSurplusCompressionGain| linearly across the restricted
@ -354,7 +258,7 @@ void AgcManagerDirect::SetMaxLevel(int level) {
<< ", max_compression_gain_=" << max_compression_gain_; << ", max_compression_gain_=" << max_compression_gain_;
} }
void AgcManagerDirect::SetCaptureMuted(bool muted) { void MonoAgc::SetCaptureMuted(bool muted) {
if (capture_muted_ == muted) { if (capture_muted_ == muted) {
return; return;
} }
@ -366,11 +270,7 @@ void AgcManagerDirect::SetCaptureMuted(bool muted) {
} }
} }
float AgcManagerDirect::voice_probability() { int MonoAgc::CheckVolumeAndReset() {
return agc_->voice_probability();
}
int AgcManagerDirect::CheckVolumeAndReset() {
int level = stream_analog_level_; int level = stream_analog_level_;
// Reasons for taking action at startup: // Reasons for taking action at startup:
// 1) A person starting a call is expected to be heard. // 1) A person starting a call is expected to be heard.
@ -407,7 +307,7 @@ int AgcManagerDirect::CheckVolumeAndReset() {
// //
// If the slider needs to be moved, we check first if the user has adjusted // If the slider needs to be moved, we check first if the user has adjusted
// it, in which case we take no action and cache the updated level. // it, in which case we take no action and cache the updated level.
void AgcManagerDirect::UpdateGain() { void MonoAgc::UpdateGain() {
int rms_error = 0; int rms_error = 0;
if (!agc_->GetRmsErrorDb(&rms_error)) { if (!agc_->GetRmsErrorDb(&rms_error)) {
// No error update ready. // No error update ready.
@ -460,7 +360,7 @@ void AgcManagerDirect::UpdateGain() {
} }
} }
void AgcManagerDirect::UpdateCompressor() { void MonoAgc::UpdateCompressor() {
calls_since_last_gain_log_++; calls_since_last_gain_log_++;
if (calls_since_last_gain_log_ == 100) { if (calls_since_last_gain_log_ == 100) {
calls_since_last_gain_log_ = 0; calls_since_last_gain_log_ = 0;
@ -501,4 +401,191 @@ void AgcManagerDirect::UpdateCompressor() {
} }
} }
int AgcManagerDirect::instance_counter_ = 0;
AgcManagerDirect::AgcManagerDirect(Agc* agc,
int startup_min_level,
int clipped_level_min,
int sample_rate_hz)
: AgcManagerDirect(/*num_capture_channels*/ 1,
startup_min_level,
clipped_level_min,
/*use_agc2_level_estimation*/ false,
/*disable_digital_adaptive*/ false,
sample_rate_hz) {
RTC_DCHECK(channel_agcs_[0]);
RTC_DCHECK(agc);
channel_agcs_[0]->set_agc(agc);
}
AgcManagerDirect::AgcManagerDirect(int num_capture_channels,
int startup_min_level,
int clipped_level_min,
bool use_agc2_level_estimation,
bool disable_digital_adaptive,
int sample_rate_hz)
: data_dumper_(
new ApmDataDumper(rtc::AtomicOps::Increment(&instance_counter_))),
sample_rate_hz_(sample_rate_hz),
num_capture_channels_(num_capture_channels),
disable_digital_adaptive_(disable_digital_adaptive),
frames_since_clipped_(kClippedWaitFrames),
capture_muted_(false),
channel_agcs_(num_capture_channels),
new_compressions_to_set_(num_capture_channels) {
const int min_mic_level = GetMinMicLevel();
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
ApmDataDumper* data_dumper_ch = ch == 0 ? data_dumper_.get() : nullptr;
channel_agcs_[ch] = std::make_unique<MonoAgc>(
data_dumper_ch, startup_min_level, clipped_level_min,
use_agc2_level_estimation, disable_digital_adaptive_, min_mic_level);
}
RTC_DCHECK_LT(0, channel_agcs_.size());
channel_agcs_[0]->ActivateLogging();
}
AgcManagerDirect::~AgcManagerDirect() {}
void AgcManagerDirect::Initialize() {
RTC_DLOG(LS_INFO) << "AgcManagerDirect::Initialize";
data_dumper_->InitiateNewSetOfRecordings();
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
channel_agcs_[ch]->Initialize();
}
capture_muted_ = false;
AggregateChannelLevels();
}
void AgcManagerDirect::SetupDigitalGainControl(
GainControl* gain_control) const {
RTC_DCHECK(gain_control);
if (gain_control->set_mode(GainControl::kFixedDigital) != 0) {
RTC_LOG(LS_ERROR) << "set_mode(GainControl::kFixedDigital) failed.";
}
const int target_level_dbfs = disable_digital_adaptive_ ? 0 : 2;
if (gain_control->set_target_level_dbfs(target_level_dbfs) != 0) {
RTC_LOG(LS_ERROR) << "set_target_level_dbfs() failed.";
}
const int compression_gain_db =
disable_digital_adaptive_ ? 0 : kDefaultCompressionGain;
if (gain_control->set_compression_gain_db(compression_gain_db) != 0) {
RTC_LOG(LS_ERROR) << "set_compression_gain_db() failed.";
}
const bool enable_limiter = !disable_digital_adaptive_;
if (gain_control->enable_limiter(enable_limiter) != 0) {
RTC_LOG(LS_ERROR) << "enable_limiter() failed.";
}
}
void AgcManagerDirect::AnalyzePreProcess(const AudioBuffer* audio) {
RTC_DCHECK(audio);
AnalyzePreProcess(audio->channels_const(), audio->num_frames());
}
void AgcManagerDirect::AnalyzePreProcess(const float* const* audio,
size_t samples_per_channel) {
RTC_DCHECK(audio);
AggregateChannelLevels();
if (capture_muted_) {
return;
}
if (frames_since_clipped_ < kClippedWaitFrames) {
++frames_since_clipped_;
return;
}
// Check for clipped samples, as the AGC has difficulty detecting pitch
// under clipping distortion. We do this in the preprocessing phase in order
// to catch clipped echo as well.
//
// If we find a sufficiently clipped frame, drop the current microphone level
// and enforce a new maximum level, dropped the same amount from the current
// maximum. This harsh treatment is an effort to avoid repeated clipped echo
// events. As compensation for this restriction, the maximum compression
// gain is increased, through SetMaxLevel().
float clipped_ratio =
ComputeClippedRatio(audio, num_capture_channels_, samples_per_channel);
if (clipped_ratio > kClippedRatioThreshold) {
RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio="
<< clipped_ratio;
for (auto& state_ch : channel_agcs_) {
state_ch->HandleClipping();
}
frames_since_clipped_ = 0;
}
AggregateChannelLevels();
}
void AgcManagerDirect::Process(const AudioBuffer* audio) {
AggregateChannelLevels();
if (capture_muted_) {
return;
}
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
int16_t* audio_use = nullptr;
std::array<int16_t, AudioBuffer::kMaxSampleRate / 100> audio_data;
int num_frames_per_band;
if (audio) {
FloatS16ToS16(audio->split_bands_const_f(ch)[0],
audio->num_frames_per_band(), audio_data.data());
audio_use = audio_data.data();
num_frames_per_band = audio->num_frames_per_band();
} else {
// Only used for testing.
// TODO(peah): Change unittests to only allow on non-null audio input.
num_frames_per_band = 320;
}
channel_agcs_[ch]->Process(audio_use, num_frames_per_band, sample_rate_hz_);
new_compressions_to_set_[ch] = channel_agcs_[ch]->new_compression();
}
AggregateChannelLevels();
}
absl::optional<int> AgcManagerDirect::GetDigitalComressionGain() {
return new_compressions_to_set_[channel_controlling_gain_];
}
void AgcManagerDirect::SetCaptureMuted(bool muted) {
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
channel_agcs_[ch]->SetCaptureMuted(muted);
}
capture_muted_ = muted;
}
float AgcManagerDirect::voice_probability() const {
float max_prob = 0.f;
for (const auto& state_ch : channel_agcs_) {
max_prob = std::max(max_prob, state_ch->voice_probability());
}
return max_prob;
}
void AgcManagerDirect::set_stream_analog_level(int level) {
for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) {
channel_agcs_[ch]->set_stream_analog_level(level);
}
AggregateChannelLevels();
}
void AgcManagerDirect::AggregateChannelLevels() {
stream_analog_level_ = channel_agcs_[0]->stream_analog_level();
channel_controlling_gain_ = 0;
for (size_t ch = 1; ch < channel_agcs_.size(); ++ch) {
int level = channel_agcs_[0]->stream_analog_level();
if (level < stream_analog_level_) {
stream_analog_level_ = level;
channel_controlling_gain_ = static_cast<int>(ch);
}
}
}
} // namespace webrtc } // namespace webrtc

View File

@ -15,12 +15,13 @@
#include "absl/types/optional.h" #include "absl/types/optional.h"
#include "modules/audio_processing/agc/agc.h" #include "modules/audio_processing/agc/agc.h"
#include "modules/audio_processing/audio_buffer.h"
#include "modules/audio_processing/logging/apm_data_dumper.h" #include "modules/audio_processing/logging/apm_data_dumper.h"
#include "rtc_base/constructor_magic.h"
#include "rtc_base/gtest_prod_util.h" #include "rtc_base/gtest_prod_util.h"
namespace webrtc { namespace webrtc {
class MonoAgc;
class AudioFrame; class AudioFrame;
class GainControl; class GainControl;
@ -35,34 +36,36 @@ class AgcManagerDirect final {
// responsible for processing the audio using it after the call to Process. // responsible for processing the audio using it after the call to Process.
// The operating range of startup_min_level is [12, 255] and any input value // The operating range of startup_min_level is [12, 255] and any input value
// outside that range will be clamped. // outside that range will be clamped.
AgcManagerDirect(int startup_min_level, AgcManagerDirect(int num_capture_channels,
int startup_min_level,
int clipped_level_min, int clipped_level_min,
bool use_agc2_level_estimation, bool use_agc2_level_estimation,
bool disable_digital_adaptive); bool disable_digital_adaptive,
int sample_rate_hz);
~AgcManagerDirect(); ~AgcManagerDirect();
AgcManagerDirect(const AgcManagerDirect&) = delete;
AgcManagerDirect& operator=(const AgcManagerDirect&) = delete;
void Initialize(); void Initialize();
void ConfigureGainControl(GainControl* gain_control) const; void SetupDigitalGainControl(GainControl* gain_control) const;
void AnalyzePreProcess(const float* const* audio, void AnalyzePreProcess(const AudioBuffer* audio);
int num_channels, void Process(const AudioBuffer* audio);
size_t samples_per_channel);
void Process(const float* audio,
size_t length,
int sample_rate_hz,
GainControl* gain_control);
// Call when the capture stream has been muted/unmuted. This causes the // Call when the capture stream has been muted/unmuted. This causes the
// manager to disregard all incoming audio; chances are good it's background // manager to disregard all incoming audio; chances are good it's background
// noise to which we'd like to avoid adapting. // noise to which we'd like to avoid adapting.
void SetCaptureMuted(bool muted); void SetCaptureMuted(bool muted);
bool capture_muted() { return capture_muted_; } float voice_probability() const;
float voice_probability();
int stream_analog_level() const { return stream_analog_level_; } int stream_analog_level() const { return stream_analog_level_; }
void set_stream_analog_level(int level) { stream_analog_level_ = level; } void set_stream_analog_level(int level);
int num_channels() const { return num_capture_channels_; }
int sample_rate_hz() const { return sample_rate_hz_; }
// If available, returns a new compression gain for the digital gain control.
absl::optional<int> GetDigitalComressionGain();
private: private:
friend class AgcManagerDirectTest; friend class AgcManagerDirectTest;
@ -76,11 +79,64 @@ class AgcManagerDirect final {
// by the manager. // by the manager.
AgcManagerDirect(Agc* agc, AgcManagerDirect(Agc* agc,
int startup_min_level, int startup_min_level,
int clipped_level_min); int clipped_level_min,
int sample_rate_hz);
void AnalyzePreProcess(const float* const* audio, size_t samples_per_channel);
void AggregateChannelLevels();
std::unique_ptr<ApmDataDumper> data_dumper_;
static int instance_counter_;
const int sample_rate_hz_;
const int num_capture_channels_;
const bool disable_digital_adaptive_;
int frames_since_clipped_;
int stream_analog_level_ = 0;
bool capture_muted_;
int channel_controlling_gain_ = 0;
std::vector<std::unique_ptr<MonoAgc>> channel_agcs_;
std::vector<absl::optional<int>> new_compressions_to_set_;
};
class MonoAgc {
public:
MonoAgc(ApmDataDumper* data_dumper,
int startup_min_level,
int clipped_level_min,
bool use_agc2_level_estimation,
bool disable_digital_adaptive,
int min_mic_level);
~MonoAgc();
MonoAgc(const MonoAgc&) = delete;
MonoAgc& operator=(const MonoAgc&) = delete;
void Initialize();
void SetCaptureMuted(bool muted);
void HandleClipping();
void Process(const int16_t* audio,
size_t samples_per_channel,
int sample_rate_hz);
void set_stream_analog_level(int level) { stream_analog_level_ = level; }
int stream_analog_level() const { return stream_analog_level_; }
float voice_probability() const { return agc_->voice_probability(); }
void ActivateLogging() { log_to_histograms_ = true; }
absl::optional<int> new_compression() const {
return new_compression_to_set_;
}
// Only used for testing.
void set_agc(Agc* agc) { agc_.reset(agc); }
int min_mic_level() const { return min_mic_level_; } int min_mic_level() const { return min_mic_level_; }
int startup_min_level() const { return startup_min_level_; } int startup_min_level() const { return startup_min_level_; }
private:
// Sets a new microphone level, after first checking that it hasn't been // Sets a new microphone level, after first checking that it hasn't been
// updated by the user, in which case no action is taken. // updated by the user, in which case no action is taken.
void SetLevel(int new_level); void SetLevel(int new_level);
@ -94,30 +150,24 @@ class AgcManagerDirect final {
void UpdateGain(); void UpdateGain();
void UpdateCompressor(); void UpdateCompressor();
std::unique_ptr<ApmDataDumper> data_dumper_; const int min_mic_level_;
static int instance_counter_; const bool disable_digital_adaptive_;
std::unique_ptr<Agc> agc_; std::unique_ptr<Agc> agc_;
int level_ = 0;
int frames_since_clipped_;
int level_;
int max_level_; int max_level_;
int max_compression_gain_; int max_compression_gain_;
int target_compression_; int target_compression_;
int compression_; int compression_;
float compression_accumulator_; float compression_accumulator_;
bool capture_muted_; bool capture_muted_ = false;
bool check_volume_on_next_process_; bool check_volume_on_next_process_ = true;
bool startup_; bool startup_ = true;
const int min_mic_level_;
const bool disable_digital_adaptive_;
int startup_min_level_; int startup_min_level_;
const int clipped_level_min_;
int calls_since_last_gain_log_ = 0; int calls_since_last_gain_log_ = 0;
int stream_analog_level_ = 0; int stream_analog_level_ = 0;
absl::optional<int> new_compression_to_set_; absl::optional<int> new_compression_to_set_;
bool log_to_histograms_ = false;
RTC_DISALLOW_COPY_AND_ASSIGN(AgcManagerDirect); const int clipped_level_min_;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -61,12 +61,12 @@ class AgcManagerDirectTest : public ::testing::Test {
protected: protected:
AgcManagerDirectTest() AgcManagerDirectTest()
: agc_(new MockAgc), : agc_(new MockAgc),
manager_(agc_, kInitialVolume, kClippedMin), manager_(agc_, kInitialVolume, kClippedMin, kSampleRateHz),
audio(kNumChannels), audio(kNumChannels),
audio_data(kNumChannels * kSamplesPerChannel, 0.f) { audio_data(kNumChannels * kSamplesPerChannel, 0.f) {
ExpectInitialize(); ExpectInitialize();
manager_.Initialize(); manager_.Initialize();
manager_.ConfigureGainControl(&gctrl_); manager_.SetupDigitalGainControl(&gctrl_);
for (size_t ch = 0; ch < kNumChannels; ++ch) { for (size_t ch = 0; ch < kNumChannels; ++ch) {
audio[ch] = &audio_data[ch * kSamplesPerChannel]; audio[ch] = &audio_data[ch * kSamplesPerChannel];
} }
@ -98,7 +98,12 @@ class AgcManagerDirectTest : public ::testing::Test {
void CallProcess(int num_calls) { void CallProcess(int num_calls) {
for (int i = 0; i < num_calls; ++i) { for (int i = 0; i < num_calls; ++i) {
EXPECT_CALL(*agc_, Process(_, _, _)).WillOnce(Return()); EXPECT_CALL(*agc_, Process(_, _, _)).WillOnce(Return());
manager_.Process(nullptr, kSamplesPerChannel, kSampleRateHz, &gctrl_); manager_.Process(nullptr);
absl::optional<int> new_digital_gain =
manager_.GetDigitalComressionGain();
if (new_digital_gain) {
gctrl_.set_compression_gain_db(*new_digital_gain);
}
} }
} }
@ -113,8 +118,7 @@ class AgcManagerDirectTest : public ::testing::Test {
} }
for (int i = 0; i < num_calls; ++i) { for (int i = 0; i < num_calls; ++i) {
manager_.AnalyzePreProcess(audio.data(), kNumChannels, manager_.AnalyzePreProcess(audio.data(), kSamplesPerChannel);
kSamplesPerChannel);
} }
} }
@ -364,7 +368,11 @@ TEST_F(AgcManagerDirectTest, CompressorReachesMinimum) {
TEST_F(AgcManagerDirectTest, NoActionWhileMuted) { TEST_F(AgcManagerDirectTest, NoActionWhileMuted) {
manager_.SetCaptureMuted(true); manager_.SetCaptureMuted(true);
manager_.Process(nullptr, kSamplesPerChannel, kSampleRateHz, &gctrl_); manager_.Process(nullptr);
absl::optional<int> new_digital_gain = manager_.GetDigitalComressionGain();
if (new_digital_gain) {
gctrl_.set_compression_gain_db(*new_digital_gain);
}
} }
TEST_F(AgcManagerDirectTest, UnmutingChecksVolumeWithoutRaising) { TEST_F(AgcManagerDirectTest, UnmutingChecksVolumeWithoutRaising) {
@ -683,9 +691,10 @@ TEST_F(AgcManagerDirectTest, TakesNoActionOnZeroMicVolume) {
TEST(AgcManagerDirectStandaloneTest, DisableDigitalDisablesDigital) { TEST(AgcManagerDirectStandaloneTest, DisableDigitalDisablesDigital) {
auto agc = std::unique_ptr<Agc>(new ::testing::NiceMock<MockAgc>()); auto agc = std::unique_ptr<Agc>(new ::testing::NiceMock<MockAgc>());
MockGainControl gctrl; MockGainControl gctrl;
AgcManagerDirect manager(kInitialVolume, kClippedMin, AgcManagerDirect manager(/* num_capture_channels */ 1, kInitialVolume,
kClippedMin,
/* use agc2 level estimation */ false, /* use agc2 level estimation */ false,
/* disable digital adaptive */ true); /* disable digital adaptive */ true, kSampleRateHz);
EXPECT_CALL(gctrl, set_mode(GainControl::kFixedDigital)); EXPECT_CALL(gctrl, set_mode(GainControl::kFixedDigital));
EXPECT_CALL(gctrl, set_target_level_dbfs(0)); EXPECT_CALL(gctrl, set_target_level_dbfs(0));
@ -693,38 +702,42 @@ TEST(AgcManagerDirectStandaloneTest, DisableDigitalDisablesDigital) {
EXPECT_CALL(gctrl, enable_limiter(false)); EXPECT_CALL(gctrl, enable_limiter(false));
manager.Initialize(); manager.Initialize();
manager.ConfigureGainControl(&gctrl); manager.SetupDigitalGainControl(&gctrl);
} }
TEST(AgcManagerDirectStandaloneTest, AgcMinMicLevelExperiment) { TEST(AgcManagerDirectStandaloneTest, AgcMinMicLevelExperiment) {
auto agc_man = std::unique_ptr<AgcManagerDirect>( auto agc_man = std::unique_ptr<AgcManagerDirect>(new AgcManagerDirect(
new AgcManagerDirect(kInitialVolume, kClippedMin, true, true)); /* num_capture_channels */ 1, kInitialVolume, kClippedMin, true, true,
EXPECT_EQ(agc_man->min_mic_level(), kMinMicLevel); kSampleRateHz));
EXPECT_EQ(agc_man->startup_min_level(), kInitialVolume); EXPECT_EQ(agc_man->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
EXPECT_EQ(agc_man->channel_agcs_[0]->startup_min_level(), kInitialVolume);
{ {
test::ScopedFieldTrials field_trial( test::ScopedFieldTrials field_trial(
"WebRTC-Audio-AgcMinMicLevelExperiment/Disabled/"); "WebRTC-Audio-AgcMinMicLevelExperiment/Disabled/");
agc_man.reset( agc_man.reset(new AgcManagerDirect(
new AgcManagerDirect(kInitialVolume, kClippedMin, true, true)); /* num_capture_channels */ 1, kInitialVolume, kClippedMin, true, true,
EXPECT_EQ(agc_man->min_mic_level(), kMinMicLevel); kSampleRateHz));
EXPECT_EQ(agc_man->startup_min_level(), kInitialVolume); EXPECT_EQ(agc_man->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
EXPECT_EQ(agc_man->channel_agcs_[0]->startup_min_level(), kInitialVolume);
} }
{ {
// Valid range of field-trial parameter is [0,255]. // Valid range of field-trial parameter is [0,255].
test::ScopedFieldTrials field_trial( test::ScopedFieldTrials field_trial(
"WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-256/"); "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-256/");
agc_man.reset( agc_man.reset(new AgcManagerDirect(
new AgcManagerDirect(kInitialVolume, kClippedMin, true, true)); /* num_capture_channels */ 1, kInitialVolume, kClippedMin, true, true,
EXPECT_EQ(agc_man->min_mic_level(), kMinMicLevel); kSampleRateHz));
EXPECT_EQ(agc_man->startup_min_level(), kInitialVolume); EXPECT_EQ(agc_man->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
EXPECT_EQ(agc_man->channel_agcs_[0]->startup_min_level(), kInitialVolume);
} }
{ {
test::ScopedFieldTrials field_trial( test::ScopedFieldTrials field_trial(
"WebRTC-Audio-AgcMinMicLevelExperiment/Enabled--1/"); "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled--1/");
agc_man.reset( agc_man.reset(new AgcManagerDirect(
new AgcManagerDirect(kInitialVolume, kClippedMin, true, true)); /* num_capture_channels */ 1, kInitialVolume, kClippedMin, true, true,
EXPECT_EQ(agc_man->min_mic_level(), kMinMicLevel); kSampleRateHz));
EXPECT_EQ(agc_man->startup_min_level(), kInitialVolume); EXPECT_EQ(agc_man->channel_agcs_[0]->min_mic_level(), kMinMicLevel);
EXPECT_EQ(agc_man->channel_agcs_[0]->startup_min_level(), kInitialVolume);
} }
{ {
// Verify that a valid experiment changes the minimum microphone level. // Verify that a valid experiment changes the minimum microphone level.
@ -732,10 +745,11 @@ TEST(AgcManagerDirectStandaloneTest, AgcMinMicLevelExperiment) {
// be changed. // be changed.
test::ScopedFieldTrials field_trial( test::ScopedFieldTrials field_trial(
"WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-50/"); "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-50/");
agc_man.reset( agc_man.reset(new AgcManagerDirect(
new AgcManagerDirect(kInitialVolume, kClippedMin, true, true)); /* num_capture_channels */ 1, kInitialVolume, kClippedMin, true, true,
EXPECT_EQ(agc_man->min_mic_level(), 50); kSampleRateHz));
EXPECT_EQ(agc_man->startup_min_level(), kInitialVolume); EXPECT_EQ(agc_man->channel_agcs_[0]->min_mic_level(), 50);
EXPECT_EQ(agc_man->channel_agcs_[0]->startup_min_level(), kInitialVolume);
} }
{ {
// Use experiment to reduce the default minimum microphone level, start at // Use experiment to reduce the default minimum microphone level, start at
@ -743,9 +757,10 @@ TEST(AgcManagerDirectStandaloneTest, AgcMinMicLevelExperiment) {
// level set by the experiment. // level set by the experiment.
test::ScopedFieldTrials field_trial( test::ScopedFieldTrials field_trial(
"WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-50/"); "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-50/");
agc_man.reset(new AgcManagerDirect(30, kClippedMin, true, true)); agc_man.reset(new AgcManagerDirect(/* num_capture_channels */ 1, 30,
EXPECT_EQ(agc_man->min_mic_level(), 50); kClippedMin, true, true, kSampleRateHz));
EXPECT_EQ(agc_man->startup_min_level(), 50); EXPECT_EQ(agc_man->channel_agcs_[0]->min_mic_level(), 50);
EXPECT_EQ(agc_man->channel_agcs_[0]->startup_min_level(), 50);
} }
} }

View File

@ -100,10 +100,12 @@ void AdaptiveModeLevelEstimator::Reset() {
} }
void AdaptiveModeLevelEstimator::DebugDumpEstimate() { void AdaptiveModeLevelEstimator::DebugDumpEstimate() {
apm_data_dumper_->DumpRaw("agc2_adaptive_level_estimate_with_offset_dbfs", if (apm_data_dumper_) {
last_estimate_with_offset_dbfs_); apm_data_dumper_->DumpRaw("agc2_adaptive_level_estimate_with_offset_dbfs",
apm_data_dumper_->DumpRaw("agc2_adaptive_level_estimate_dbfs", last_estimate_with_offset_dbfs_);
LatestLevelEstimate()); apm_data_dumper_->DumpRaw("agc2_adaptive_level_estimate_dbfs",
LatestLevelEstimate());
}
saturation_protector_.DebugDumpEstimate(); saturation_protector_.DebugDumpEstimate();
} }
} // namespace webrtc } // namespace webrtc

View File

@ -93,10 +93,13 @@ void SaturationProtector::Reset() {
} }
void SaturationProtector::DebugDumpEstimate() const { void SaturationProtector::DebugDumpEstimate() const {
apm_data_dumper_->DumpRaw( if (apm_data_dumper_) {
"agc2_adaptive_saturation_protector_delayed_peak_dbfs", apm_data_dumper_->DumpRaw(
peak_enveloper_.Query()); "agc2_adaptive_saturation_protector_delayed_peak_dbfs",
apm_data_dumper_->DumpRaw("agc2_adaptive_saturation_margin_db", last_margin_); peak_enveloper_.Query());
apm_data_dumper_->DumpRaw("agc2_adaptive_saturation_margin_db",
last_margin_);
}
} }
} // namespace webrtc } // namespace webrtc

View File

@ -323,20 +323,18 @@ AudioProcessingImpl::AudioProcessingImpl(
submodules_(std::move(capture_post_processor), submodules_(std::move(capture_post_processor),
std::move(render_pre_processor), std::move(render_pre_processor),
std::move(echo_detector), std::move(echo_detector),
std::move(capture_analyzer), std::move(capture_analyzer)),
config.Get<ExperimentalAgc>().startup_min_volume, constants_(config.Get<ExperimentalAgc>().startup_min_volume,
config.Get<ExperimentalAgc>().clipped_level_min, config.Get<ExperimentalAgc>().clipped_level_min,
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
/* enabled= */ false, /* enabled= */ false,
/* enabled_agc2_level_estimator= */ false, /* enabled_agc2_level_estimator= */ false,
/* digital_adaptive_disabled= */ false /* digital_adaptive_disabled= */ false,
#else #else
config.Get<ExperimentalAgc>().enabled, config.Get<ExperimentalAgc>().enabled,
config.Get<ExperimentalAgc>().enabled_agc2_level_estimator, config.Get<ExperimentalAgc>().enabled_agc2_level_estimator,
config.Get<ExperimentalAgc>().digital_adaptive_disabled config.Get<ExperimentalAgc>().digital_adaptive_disabled,
#endif #endif
),
constants_(config.Get<ExperimentalAgc>().clipped_level_min,
!field_trial::IsEnabled( !field_trial::IsEnabled(
"WebRTC-ApmExperimentalMultiChannelRenderKillSwitch"), "WebRTC-ApmExperimentalMultiChannelRenderKillSwitch"),
!field_trial::IsEnabled( !field_trial::IsEnabled(
@ -478,9 +476,21 @@ int AudioProcessingImpl::InitializeLocked() {
submodules_.gain_control->Initialize(num_proc_channels(), submodules_.gain_control->Initialize(num_proc_channels(),
proc_sample_rate_hz()); proc_sample_rate_hz());
if (submodules_.agc_manager) { if (constants_.use_experimental_agc) {
if (!submodules_.agc_manager.get() ||
submodules_.agc_manager->num_channels() !=
static_cast<int>(num_proc_channels()) ||
submodules_.agc_manager->sample_rate_hz() !=
capture_nonlocked_.split_rate) {
submodules_.agc_manager.reset(new AgcManagerDirect(
num_proc_channels(), constants_.agc_startup_min_volume,
constants_.agc_clipped_level_min,
constants_.use_experimental_agc_agc2_level_estimation,
constants_.use_experimental_agc_agc2_digital_adaptive,
capture_nonlocked_.split_rate));
}
submodules_.agc_manager->Initialize(); submodules_.agc_manager->Initialize();
submodules_.agc_manager->ConfigureGainControl( submodules_.agc_manager->SetupDigitalGainControl(
submodules_.gain_control.get()); submodules_.gain_control.get());
submodules_.agc_manager->SetCaptureMuted(capture_.output_will_be_muted); submodules_.agc_manager->SetCaptureMuted(capture_.output_will_be_muted);
} }
@ -1262,10 +1272,9 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
submodules_.echo_controller->AnalyzeCapture(capture_buffer); submodules_.echo_controller->AnalyzeCapture(capture_buffer);
} }
if (submodules_.agc_manager && submodules_.gain_control->is_enabled()) { if (constants_.use_experimental_agc &&
submodules_.agc_manager->AnalyzePreProcess( submodules_.gain_control->is_enabled()) {
capture_buffer->channels_const(), capture_buffer->num_channels(), submodules_.agc_manager->AnalyzePreProcess(capture_buffer);
capture_nonlocked_.capture_processing_format.num_frames());
} }
if (submodule_states_.CaptureMultiBandSubModulesActive() && if (submodule_states_.CaptureMultiBandSubModulesActive() &&
@ -1350,11 +1359,15 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
capture_.stats.voice_detected = absl::nullopt; capture_.stats.voice_detected = absl::nullopt;
} }
if (submodules_.agc_manager && submodules_.gain_control->is_enabled()) { if (constants_.use_experimental_agc &&
submodules_.agc_manager->Process( submodules_.gain_control->is_enabled()) {
capture_buffer->split_bands_const_f(0)[kBand0To8kHz], submodules_.agc_manager->Process(capture_buffer);
capture_buffer->num_frames_per_band(), capture_nonlocked_.split_rate,
submodules_.gain_control.get()); absl::optional<int> new_digital_gain =
submodules_.agc_manager->GetDigitalComressionGain();
if (new_digital_gain) {
submodules_.gain_control->set_compression_gain_db(*new_digital_gain);
}
} }
// TODO(peah): Add reporting from AEC3 whether there is echo. // TODO(peah): Add reporting from AEC3 whether there is echo.
RETURN_ON_ERR(submodules_.gain_control->ProcessCaptureAudio( RETURN_ON_ERR(submodules_.gain_control->ProcessCaptureAudio(

View File

@ -325,23 +325,11 @@ class AudioProcessingImpl : public AudioProcessing {
Submodules(std::unique_ptr<CustomProcessing> capture_post_processor, Submodules(std::unique_ptr<CustomProcessing> capture_post_processor,
std::unique_ptr<CustomProcessing> render_pre_processor, std::unique_ptr<CustomProcessing> render_pre_processor,
rtc::scoped_refptr<EchoDetector> echo_detector, rtc::scoped_refptr<EchoDetector> echo_detector,
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer, std::unique_ptr<CustomAudioAnalyzer> capture_analyzer)
int agc_startup_min_volume,
int agc_clipped_level_min,
bool use_experimental_agc,
bool use_experimental_agc_agc2_level_estimation,
bool use_experimental_agc_agc2_digital_adaptive)
: echo_detector(std::move(echo_detector)), : echo_detector(std::move(echo_detector)),
capture_post_processor(std::move(capture_post_processor)), capture_post_processor(std::move(capture_post_processor)),
render_pre_processor(std::move(render_pre_processor)), render_pre_processor(std::move(render_pre_processor)),
capture_analyzer(std::move(capture_analyzer)) { capture_analyzer(std::move(capture_analyzer)) {}
if (use_experimental_agc) {
agc_manager = std::make_unique<AgcManagerDirect>(
agc_startup_min_volume, agc_clipped_level_min,
use_experimental_agc_agc2_level_estimation,
use_experimental_agc_agc2_digital_adaptive);
}
}
// Accessed internally from capture or during initialization. // Accessed internally from capture or during initialization.
std::unique_ptr<AgcManagerDirect> agc_manager; std::unique_ptr<AgcManagerDirect> agc_manager;
std::unique_ptr<GainControlImpl> gain_control; std::unique_ptr<GainControlImpl> gain_control;
@ -381,15 +369,29 @@ class AudioProcessingImpl : public AudioProcessing {
// APM constants. // APM constants.
const struct ApmConstants { const struct ApmConstants {
ApmConstants(int agc_clipped_level_min, ApmConstants(int agc_startup_min_volume,
int agc_clipped_level_min,
bool use_experimental_agc,
bool use_experimental_agc_agc2_level_estimation,
bool use_experimental_agc_agc2_digital_adaptive,
bool experimental_multi_channel_render_support, bool experimental_multi_channel_render_support,
bool experimental_multi_channel_capture_support) bool experimental_multi_channel_capture_support)
: agc_clipped_level_min(agc_clipped_level_min), : agc_startup_min_volume(agc_startup_min_volume),
agc_clipped_level_min(agc_clipped_level_min),
use_experimental_agc(use_experimental_agc),
use_experimental_agc_agc2_level_estimation(
use_experimental_agc_agc2_level_estimation),
use_experimental_agc_agc2_digital_adaptive(
use_experimental_agc_agc2_digital_adaptive),
experimental_multi_channel_render_support( experimental_multi_channel_render_support(
experimental_multi_channel_render_support), experimental_multi_channel_render_support),
experimental_multi_channel_capture_support( experimental_multi_channel_capture_support(
experimental_multi_channel_capture_support) {} experimental_multi_channel_capture_support) {}
int agc_startup_min_volume;
int agc_clipped_level_min; int agc_clipped_level_min;
bool use_experimental_agc;
bool use_experimental_agc_agc2_level_estimation;
bool use_experimental_agc_agc2_digital_adaptive;
bool experimental_multi_channel_render_support; bool experimental_multi_channel_render_support;
bool experimental_multi_channel_capture_support; bool experimental_multi_channel_capture_support;
} constants_; } constants_;

View File

@ -19,6 +19,7 @@
#include "modules/audio_processing/logging/apm_data_dumper.h" #include "modules/audio_processing/logging/apm_data_dumper.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
#include "rtc_base/constructor_magic.h" #include "rtc_base/constructor_magic.h"
#include "rtc_base/logging.h"
namespace webrtc { namespace webrtc {
@ -380,6 +381,7 @@ int GainControlImpl::target_level_dbfs() const {
int GainControlImpl::set_compression_gain_db(int gain) { int GainControlImpl::set_compression_gain_db(int gain) {
if (gain < 0 || gain > 90) { if (gain < 0 || gain > 90) {
RTC_LOG(LS_ERROR) << "set_compression_gain_db(" << gain << ") failed.";
return AudioProcessing::kBadParameterError; return AudioProcessing::kBadParameterError;
} }
compression_gain_db_ = gain; compression_gain_db_ = gain;