AGC analog ClippingPredictor refactoring 2/2

Uunit test code readability improvements.

Bug: webrtc:12774
Change-Id: I66f552d23680ddb03824618dab869946e0940334
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/221900
Commit-Queue: Alessio Bazzica <alessiob@webrtc.org>
Reviewed-by: Hanna Silen <silen@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34300}
This commit is contained in:
Alessio Bazzica 2021-06-16 11:24:31 +02:00 committed by WebRTC LUCI CQ
parent 08be9baaa3
commit 98ff0280ce

View File

@ -10,6 +10,8 @@
#include "modules/audio_processing/agc/clipping_predictor.h"
#include <cstdint>
#include <limits>
#include <tuple>
#include "rtc_base/checks.h"
@ -21,23 +23,25 @@ namespace {
using ::testing::Eq;
using ::testing::Optional;
constexpr int kSampleRateHz = 32000;
constexpr int kNumChannels = 1;
constexpr int kSamplesPerChannel = kSampleRateHz / 100;
constexpr int kWindowLength = 5;
constexpr int kReferenceWindowLength = 5;
constexpr int kReferenceWindowDelay = 5;
constexpr int kMaxMicLevel = 255;
constexpr int kMinMicLevel = 12;
constexpr int kDefaultClippedLevelStep = 15;
using ClippingPredictorConfig = AudioProcessing::Config::GainController1::
AnalogGainController::ClippingPredictor;
using ClippingPredictorMode = AudioProcessing::Config::GainController1::
AnalogGainController::ClippingPredictor::Mode;
void CallProcess(int num_calls,
constexpr int kSampleRateHz = 32000;
constexpr int kNumChannels = 1;
constexpr int kSamplesPerChannel = kSampleRateHz / 100;
constexpr int kMaxMicLevel = 255;
constexpr int kMinMicLevel = 12;
constexpr int kDefaultClippedLevelStep = 15;
constexpr float kMaxSampleS16 =
static_cast<float>(std::numeric_limits<int16_t>::max());
// Threshold in dB corresponding to a signal with an amplitude equal to 99% of
// the dynamic range - i.e., computed as `20*log10(0.99)`.
constexpr float kClippingThresholdDb = -0.08729610804900176f;
void CallAnalyze(int num_calls,
const AudioFrameView<const float>& frame,
ClippingPredictor& predictor) {
for (int i = 0; i < num_calls; ++i) {
@ -45,35 +49,35 @@ void CallProcess(int num_calls,
}
}
// Creates and processes an audio frame with a non-zero (approx. 4.15dB) crest
// Creates and analyzes an audio frame with a non-zero (approx. 4.15dB) crest
// factor.
void ProcessNonZeroCrestFactorAudio(int num_calls,
void AnalyzeNonZeroCrestFactorAudio(int num_calls,
int num_channels,
float peak_ratio,
ClippingPredictor& predictor) {
RTC_DCHECK_GT(num_calls, 0);
RTC_DCHECK_GT(num_channels, 0);
RTC_DCHECK_LE(peak_ratio, 1.f);
RTC_DCHECK_LE(peak_ratio, 1.0f);
std::vector<float*> audio(num_channels);
std::vector<float> audio_data(num_channels * kSamplesPerChannel, 0.f);
std::vector<float> audio_data(num_channels * kSamplesPerChannel, 0.0f);
for (int channel = 0; channel < num_channels; ++channel) {
audio[channel] = &audio_data[channel * kSamplesPerChannel];
for (int sample = 0; sample < kSamplesPerChannel; sample += 10) {
audio[channel][sample] = 0.1f * peak_ratio * 32767.f;
audio[channel][sample + 1] = 0.2f * peak_ratio * 32767.f;
audio[channel][sample + 2] = 0.3f * peak_ratio * 32767.f;
audio[channel][sample + 3] = 0.4f * peak_ratio * 32767.f;
audio[channel][sample + 4] = 0.5f * peak_ratio * 32767.f;
audio[channel][sample + 5] = 0.6f * peak_ratio * 32767.f;
audio[channel][sample + 6] = 0.7f * peak_ratio * 32767.f;
audio[channel][sample + 7] = 0.8f * peak_ratio * 32767.f;
audio[channel][sample + 8] = 0.9f * peak_ratio * 32767.f;
audio[channel][sample + 9] = 1.f * peak_ratio * 32767.f;
audio[channel][sample] = 0.1f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 1] = 0.2f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 2] = 0.3f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 3] = 0.4f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 4] = 0.5f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 5] = 0.6f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 6] = 0.7f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 7] = 0.8f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 8] = 0.9f * peak_ratio * kMaxSampleS16;
audio[channel][sample + 9] = 1.0f * peak_ratio * kMaxSampleS16;
}
}
auto frame = AudioFrameView<const float>(audio.data(), num_channels,
kSamplesPerChannel);
CallProcess(num_calls, frame, predictor);
AudioFrameView<const float> frame(audio.data(), num_channels,
kSamplesPerChannel);
CallAnalyze(num_calls, frame, predictor);
}
void CheckChannelEstimatesWithValue(int num_channels,
@ -84,6 +88,7 @@ void CheckChannelEstimatesWithValue(int num_channels,
const ClippingPredictor& predictor,
int expected) {
for (int i = 0; i < num_channels; ++i) {
SCOPED_TRACE(i);
EXPECT_THAT(predictor.EstimateClippedLevelStep(
i, level, default_step, min_mic_level, max_mic_level),
Optional(Eq(expected)));
@ -97,14 +102,15 @@ void CheckChannelEstimatesWithoutValue(int num_channels,
int max_mic_level,
const ClippingPredictor& predictor) {
for (int i = 0; i < num_channels; ++i) {
SCOPED_TRACE(i);
EXPECT_EQ(predictor.EstimateClippedLevelStep(i, level, default_step,
min_mic_level, max_mic_level),
absl::nullopt);
}
}
// Creates and processes an audio frame with a zero crest factor.
void ProcessZeroCrestFactorAudio(int num_calls,
// Creates and analyzes an audio frame with a zero crest factor.
void AnalyzeZeroCrestFactorAudio(int num_calls,
int num_channels,
float peak_ratio,
ClippingPredictor& predictor) {
@ -116,137 +122,156 @@ void ProcessZeroCrestFactorAudio(int num_calls,
for (int channel = 0; channel < num_channels; ++channel) {
audio[channel] = &audio_data[channel * kSamplesPerChannel];
for (int sample = 0; sample < kSamplesPerChannel; ++sample) {
audio[channel][sample] = peak_ratio * 32767.f;
audio[channel][sample] = peak_ratio * kMaxSampleS16;
}
}
auto frame = AudioFrameView<const float>(audio.data(), num_channels,
kSamplesPerChannel);
CallProcess(num_calls, frame, predictor);
CallAnalyze(num_calls, frame, predictor);
}
TEST(ClippingPeakPredictorTest, NoPredictorCreated) {
auto predictor =
CreateClippingPredictor(kNumChannels, /*config=*/{/*enabled=*/false});
EXPECT_FALSE(predictor);
}
TEST(ClippingPeakPredictorTest, ClippingEventPredictionCreated) {
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
auto predictor = CreateClippingPredictor(
kNumChannels,
/*config=*/{/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kClippingEventPrediction});
EXPECT_TRUE(predictor);
}
TEST(ClippingPeakPredictorTest, AdaptiveStepClippingPeakPredictionCreated) {
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
auto predictor = CreateClippingPredictor(
kNumChannels, /*config=*/{
/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction});
EXPECT_TRUE(predictor);
}
TEST(ClippingPeakPredictorTest, FixedStepClippingPeakPredictionCreated) {
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
auto predictor = CreateClippingPredictor(
kNumChannels, /*config=*/{
/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kFixedStepClippingPeakPrediction});
EXPECT_TRUE(predictor);
}
class ClippingPredictorParameterization
: public ::testing::TestWithParam<std::tuple<int, int, int, int>> {
protected:
int num_channels() const { return std::get<0>(GetParam()); }
int window_length() const { return std::get<1>(GetParam()); }
int reference_window_length() const { return std::get<2>(GetParam()); }
int reference_window_delay() const { return std::get<3>(GetParam()); }
};
class ClippingEventPredictorParameterization
: public ::testing::TestWithParam<std::tuple<float, float>> {
protected:
float clipping_threshold() const { return std::get<0>(GetParam()); }
float crest_factor_margin() const { return std::get<1>(GetParam()); }
};
class ClippingPeakPredictorParameterization
: public ::testing::TestWithParam<std::tuple<bool, float>> {
protected:
float adaptive_step_estimation() const { return std::get<0>(GetParam()); }
float clipping_threshold() const { return std::get<1>(GetParam()); }
ClippingPredictorConfig GetConfig(ClippingPredictorMode mode) const {
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
return {/*enabled=*/true,
/*mode=*/mode,
/*window_length=*/std::get<1>(GetParam()),
/*reference_window_length=*/std::get<2>(GetParam()),
/*reference_window_delay=*/std::get<3>(GetParam()),
/*clipping_threshold=*/-1.0f,
/*crest_factor_margin=*/0.5f};
}
};
TEST_P(ClippingPredictorParameterization,
CheckClippingEventPredictorEstimateAfterCrestFactorDrop) {
if (reference_window_length() + reference_window_delay() > window_length()) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kClippingEventPrediction;
config.window_length = window_length();
config.reference_window_length = reference_window_length();
config.reference_window_delay = reference_window_delay();
config.clipping_threshold = -1.0f;
config.crest_factor_margin = 0.5f;
auto predictor = CreateClippingPredictor(num_channels(), config);
ProcessNonZeroCrestFactorAudio(
reference_window_length() + reference_window_delay() - window_length(),
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessZeroCrestFactorAudio(window_length(), num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithValue(
num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
const ClippingPredictorConfig config =
GetConfig(ClippingPredictorMode::kClippingEventPrediction);
if (config.reference_window_length + config.reference_window_delay <=
config.window_length) {
return;
}
auto predictor = CreateClippingPredictor(num_channels(), config);
AnalyzeNonZeroCrestFactorAudio(
/*num_calls=*/config.reference_window_length +
config.reference_window_delay - config.window_length,
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
AnalyzeZeroCrestFactorAudio(config.window_length, num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithValue(
num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
}
TEST_P(ClippingPredictorParameterization,
CheckClippingEventPredictorNoEstimateAfterConstantCrestFactor) {
if (reference_window_length() + reference_window_delay() > window_length()) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kClippingEventPrediction;
config.window_length = window_length();
config.reference_window_length = reference_window_length();
config.reference_window_delay = reference_window_delay();
config.clipping_threshold = -1.0f;
config.crest_factor_margin = 0.5f;
auto predictor = CreateClippingPredictor(num_channels(), config);
ProcessNonZeroCrestFactorAudio(
reference_window_length() + reference_window_delay() - window_length(),
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessNonZeroCrestFactorAudio(window_length(), num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
const ClippingPredictorConfig config =
GetConfig(ClippingPredictorMode::kClippingEventPrediction);
if (config.reference_window_length + config.reference_window_delay <=
config.window_length) {
return;
}
auto predictor = CreateClippingPredictor(num_channels(), config);
AnalyzeNonZeroCrestFactorAudio(
/*num_calls=*/config.reference_window_length +
config.reference_window_delay - config.window_length,
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length,
num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
}
TEST_P(ClippingPredictorParameterization,
CheckClippingPeakPredictorEstimateAfterHighCrestFactor) {
if (reference_window_length() + reference_window_delay() > window_length()) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction;
config.window_length = window_length();
config.reference_window_length = reference_window_length();
config.reference_window_delay = reference_window_delay();
config.clipping_threshold = -1.0f;
auto predictor = CreateClippingPredictor(num_channels(), config);
ProcessNonZeroCrestFactorAudio(
reference_window_length() + reference_window_delay() - window_length(),
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessNonZeroCrestFactorAudio(window_length(), num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithValue(
num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
const ClippingPredictorConfig config =
GetConfig(ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction);
if (config.reference_window_length + config.reference_window_delay <=
config.window_length) {
return;
}
auto predictor = CreateClippingPredictor(num_channels(), config);
AnalyzeNonZeroCrestFactorAudio(
/*num_calls=*/config.reference_window_length +
config.reference_window_delay - config.window_length,
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length,
num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithValue(
num_channels(), /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
}
TEST_P(ClippingPredictorParameterization,
CheckClippingPeakPredictorNoEstimateAfterLowCrestFactor) {
if (reference_window_length() + reference_window_delay() > window_length()) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction;
config.window_length = window_length();
config.reference_window_length = reference_window_length();
config.reference_window_delay = reference_window_delay();
config.clipping_threshold = -1.0f;
auto predictor = CreateClippingPredictor(num_channels(), config);
ProcessZeroCrestFactorAudio(
reference_window_length() + reference_window_delay() - window_length(),
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessNonZeroCrestFactorAudio(window_length(), num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
const ClippingPredictorConfig config =
GetConfig(ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction);
if (config.reference_window_length + config.reference_window_delay <=
config.window_length) {
return;
}
auto predictor = CreateClippingPredictor(num_channels(), config);
AnalyzeZeroCrestFactorAudio(
/*num_calls=*/config.reference_window_length +
config.reference_window_delay - config.window_length,
num_channels(), /*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.window_length,
num_channels(),
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(num_channels(), /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
}
INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor,
@ -256,26 +281,37 @@ INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor,
::testing::Values(1, 5),
::testing::Values(0, 1, 5)));
class ClippingEventPredictorParameterization
: public ::testing::TestWithParam<std::tuple<float, float>> {
protected:
ClippingPredictorConfig GetConfig() const {
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
return {/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kClippingEventPrediction,
/*window_length=*/5,
/*reference_window_length=*/5,
/*reference_window_delay=*/5,
/*clipping_threshold=*/std::get<0>(GetParam()),
/*crest_factor_margin=*/std::get<1>(GetParam())};
}
};
TEST_P(ClippingEventPredictorParameterization,
CheckEstimateAfterCrestFactorDrop) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kClippingEventPrediction;
config.window_length = kWindowLength;
config.reference_window_length = kReferenceWindowLength;
config.reference_window_delay = kReferenceWindowDelay;
config.clipping_threshold = clipping_threshold();
config.crest_factor_margin = crest_factor_margin();
const ClippingPredictorConfig config = GetConfig();
auto predictor = CreateClippingPredictor(kNumChannels, config);
ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length,
kNumChannels, /*peak_ratio=*/0.99f,
*predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels, /*peak_ratio=*/0.99f,
*predictor);
if (clipping_threshold() < 20 * std::log10f(0.99f) &&
crest_factor_margin() < 4.15f) {
AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
// TODO(bugs.webrtc.org/12774): Add clarifying comment.
// TODO(bugs.webrtc.org/12774): Remove 4.15f threshold and split tests.
if (config.clipping_threshold < kClippingThresholdDb &&
config.crest_factor_margin < 4.15f) {
CheckChannelEstimatesWithValue(
kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
@ -291,65 +327,90 @@ INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor,
::testing::Combine(::testing::Values(-1.0f, 0.0f),
::testing::Values(3.0f, 4.16f)));
TEST_P(ClippingPeakPredictorParameterization,
CheckEstimateAfterHighCrestFactor) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = adaptive_step_estimation()
? ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction
: ClippingPredictorMode::kFixedStepClippingPeakPrediction;
config.window_length = kWindowLength;
config.reference_window_length = kReferenceWindowLength;
config.reference_window_delay = kReferenceWindowDelay;
config.clipping_threshold = clipping_threshold();
class ClippingPredictorModeParameterization
: public ::testing::TestWithParam<ClippingPredictorMode> {
protected:
ClippingPredictorConfig GetConfig(float clipping_threshold_dbfs) const {
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
return {/*enabled=*/true,
/*mode=*/GetParam(),
/*window_length=*/5,
/*reference_window_length=*/5,
/*reference_window_delay=*/5,
/*clipping_threshold=*/clipping_threshold_dbfs,
/*crest_factor_margin=*/3.0f};
}
};
TEST_P(ClippingPredictorModeParameterization,
CheckEstimateAfterHighCrestFactorWithNoClippingMargin) {
const ClippingPredictorConfig config = GetConfig(
/*clipping_threshold_dbfs=*/0.0f);
auto predictor = CreateClippingPredictor(kNumChannels, config);
ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels,
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length,
kNumChannels, /*peak_ratio=*/0.99f,
*predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
// Since the clipping threshold is set to 0 dBFS, `EstimateClippedLevelStep()`
// is expected to return an unavailable value.
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
}
TEST_P(ClippingPredictorModeParameterization,
CheckEstimateAfterHighCrestFactorWithClippingMargin) {
const ClippingPredictorConfig config =
GetConfig(/*clipping_threshold_dbfs=*/-1.0f);
auto predictor = CreateClippingPredictor(kNumChannels, config);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/config.reference_window_length,
kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels,
AnalyzeZeroCrestFactorAudio(config.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
if (clipping_threshold() < 20 * std::log10(0.99f)) {
if (adaptive_step_estimation()) {
CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor,
/*expected=*/17);
} else {
CheckChannelEstimatesWithValue(
kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, kDefaultClippedLevelStep);
}
} else {
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
}
// TODO(bugs.webrtc.org/12774): Add clarifying comment.
const float expected_step =
config.mode == ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction
? 17
: kDefaultClippedLevelStep;
CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor, expected_step);
}
INSTANTIATE_TEST_SUITE_P(GainController1ClippingPredictor,
ClippingPeakPredictorParameterization,
::testing::Combine(::testing::Values(true, false),
::testing::Values(-1.0f, 0.0f)));
INSTANTIATE_TEST_SUITE_P(
GainController1ClippingPredictor,
ClippingPredictorModeParameterization,
::testing::Values(
ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction,
ClippingPredictorMode::kFixedStepClippingPeakPrediction));
TEST(ClippingEventPredictorTest, CheckEstimateAfterReset) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kClippingEventPrediction;
config.window_length = kWindowLength;
config.reference_window_length = kReferenceWindowLength;
config.reference_window_delay = kReferenceWindowDelay;
config.clipping_threshold = -1.0f;
config.crest_factor_margin = 3.0f;
auto predictor = CreateClippingPredictor(kNumChannels, config);
ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels,
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
constexpr ClippingPredictorConfig kConfig{
/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kClippingEventPrediction,
/*window_length=*/5,
/*reference_window_length=*/5,
/*reference_window_delay=*/5,
/*clipping_threshold=*/-1.0f,
/*crest_factor_margin=*/3.0f};
auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
predictor->Reset();
ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels,
AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
@ -357,22 +418,23 @@ TEST(ClippingEventPredictorTest, CheckEstimateAfterReset) {
}
TEST(ClippingPeakPredictorTest, CheckNoEstimateAfterReset) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction;
config.window_length = kWindowLength;
config.reference_window_length = kReferenceWindowLength;
config.reference_window_delay = kReferenceWindowDelay;
config.clipping_threshold = -1.0f;
config.crest_factor_margin = 3.0f;
auto predictor = CreateClippingPredictor(kNumChannels, config);
ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels,
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
constexpr ClippingPredictorConfig kConfig{
/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction,
/*window_length=*/5,
/*reference_window_length=*/5,
/*reference_window_delay=*/5,
/*clipping_threshold=*/-1.0f};
auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
predictor->Reset();
ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels,
AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
@ -380,20 +442,22 @@ TEST(ClippingPeakPredictorTest, CheckNoEstimateAfterReset) {
}
TEST(ClippingPeakPredictorTest, CheckAdaptiveStepEstimate) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction;
config.window_length = kWindowLength;
config.reference_window_length = kReferenceWindowLength;
config.reference_window_delay = kReferenceWindowDelay;
config.clipping_threshold = -1.0f;
auto predictor = CreateClippingPredictor(kNumChannels, config);
ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
constexpr ClippingPredictorConfig kConfig{
/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kAdaptiveStepClippingPeakPrediction,
/*window_length=*/5,
/*reference_window_length=*/5,
/*reference_window_delay=*/5,
/*clipping_threshold=*/-1.0f};
auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
kNumChannels, /*peak_ratio=*/0.99f,
*predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels,
AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
@ -401,20 +465,22 @@ TEST(ClippingPeakPredictorTest, CheckAdaptiveStepEstimate) {
}
TEST(ClippingPeakPredictorTest, CheckFixedStepEstimate) {
ClippingPredictorConfig config;
config.enabled = true;
config.mode = ClippingPredictorMode::kFixedStepClippingPeakPrediction;
config.window_length = kWindowLength;
config.reference_window_length = kReferenceWindowLength;
config.reference_window_delay = kReferenceWindowDelay;
config.clipping_threshold = -1.0f;
auto predictor = CreateClippingPredictor(kNumChannels, config);
ProcessNonZeroCrestFactorAudio(kReferenceWindowLength, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
// TODO(bugs.webrtc.org/12874): Use designated initializers one fixed.
constexpr ClippingPredictorConfig kConfig{
/*enabled=*/true,
/*mode=*/ClippingPredictorMode::kFixedStepClippingPeakPrediction,
/*window_length=*/5,
/*reference_window_length=*/5,
/*reference_window_delay=*/5,
/*clipping_threshold=*/-1.0f};
auto predictor = CreateClippingPredictor(kNumChannels, kConfig);
AnalyzeNonZeroCrestFactorAudio(/*num_calls=*/kConfig.reference_window_length,
kNumChannels, /*peak_ratio=*/0.99f,
*predictor);
CheckChannelEstimatesWithoutValue(kNumChannels, /*level=*/255,
kDefaultClippedLevelStep, kMinMicLevel,
kMaxMicLevel, *predictor);
ProcessZeroCrestFactorAudio(kWindowLength, kNumChannels,
AnalyzeZeroCrestFactorAudio(kConfig.window_length, kNumChannels,
/*peak_ratio=*/0.99f, *predictor);
CheckChannelEstimatesWithValue(
kNumChannels, /*level=*/255, kDefaultClippedLevelStep, kMinMicLevel,