Fix and simplify the power estimation in the IntelligibilityEnhancer

R=henrik.lundin@webrtc.org, turaj@webrtc.org

Review URL: https://codereview.webrtc.org/1685703004 .

Cr-Commit-Position: refs/heads/master@{#11663}
This commit is contained in:
Alejandro Luebs 2016-02-17 20:04:19 -08:00
parent ee18220ddd
commit 32348192cc
8 changed files with 189 additions and 658 deletions

View File

@ -8,13 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
//
// Implements core class for intelligibility enhancer.
//
// Details of the model and algorithm can be found in the original paper:
// http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788
//
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
#include <math.h>
@ -32,7 +25,7 @@ namespace webrtc {
namespace {
const size_t kErbResolution = 2;
const int kWindowSizeMs = 2;
const int kWindowSizeMs = 16;
const int kChunkSizeMs = 10; // Size provided by APM.
const float kClipFreq = 200.0f;
const float kConfigRho = 0.02f; // Default production and interpretation SNR.
@ -49,35 +42,30 @@ float DotProduct(const float* a, const float* b, size_t length) {
return ret;
}
// Computes the power across ERB filters from the power spectral density |var|.
// Computes the power across ERB bands from the power spectral density |pow|.
// Stores it in |result|.
void FilterVariance(const float* var,
const std::vector<std::vector<float>>& filter_bank,
float* result) {
void MapToErbBands(const float* pow,
const std::vector<std::vector<float>>& filter_bank,
float* result) {
for (size_t i = 0; i < filter_bank.size(); ++i) {
RTC_DCHECK_GT(filter_bank[i].size(), 0u);
result[i] = DotProduct(&filter_bank[i][0], var, filter_bank[i].size());
result[i] = DotProduct(&filter_bank[i][0], pow, filter_bank[i].size());
}
}
} // namespace
using std::complex;
using std::max;
using std::min;
using VarianceType = intelligibility::VarianceArray::StepType;
IntelligibilityEnhancer::TransformCallback::TransformCallback(
IntelligibilityEnhancer* parent)
: parent_(parent) {
}
void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
const complex<float>* const* in_block,
const std::complex<float>* const* in_block,
size_t in_channels,
size_t frames,
size_t /* out_channels */,
complex<float>* const* out_block) {
std::complex<float>* const* out_block) {
RTC_DCHECK_EQ(parent_->freqs_, frames);
for (size_t i = 0; i < in_channels; ++i) {
parent_->ProcessClearBlock(in_block[i], out_block[i]);
@ -101,13 +89,10 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config)
num_render_channels_(config.num_render_channels),
analysis_rate_(config.analysis_rate),
active_(true),
clear_variance_(freqs_,
config.var_type,
config.var_window_size,
config.var_decay_rate),
clear_power_(freqs_, config.decay_rate),
noise_power_(freqs_, 0.f),
filtered_clear_var_(new float[bank_size_]),
filtered_noise_var_(new float[bank_size_]),
filtered_clear_pow_(new float[bank_size_]),
filtered_noise_pow_(new float[bank_size_]),
center_freqs_(new float[bank_size_]),
render_filter_bank_(CreateErbBank(freqs_)),
rho_(new float[bank_size_]),
@ -120,12 +105,12 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config)
analysis_step_(0) {
RTC_DCHECK_LE(config.rho, 1.0f);
memset(filtered_clear_var_.get(),
memset(filtered_clear_pow_.get(),
0,
bank_size_ * sizeof(filtered_clear_var_[0]));
memset(filtered_noise_var_.get(),
bank_size_ * sizeof(filtered_clear_pow_[0]));
memset(filtered_noise_pow_.get(),
0,
bank_size_ * sizeof(filtered_noise_var_[0]));
bank_size_ * sizeof(filtered_noise_pow_[0]));
// Assumes all rho equal.
for (size_t i = 0; i < bank_size_; ++i) {
@ -176,8 +161,9 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
}
}
void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block,
complex<float>* out_block) {
void IntelligibilityEnhancer::ProcessClearBlock(
const std::complex<float>* in_block,
std::complex<float>* out_block) {
if (block_count_ < 2) {
memset(out_block, 0, freqs_ * sizeof(*out_block));
++block_count_;
@ -186,11 +172,9 @@ void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block,
// TODO(ekm): Use VAD to |Step| and |AnalyzeClearBlock| only if necessary.
if (true) {
clear_variance_.Step(in_block, false);
clear_power_.Step(in_block);
if (block_count_ % analysis_rate_ == analysis_rate_ - 1) {
const float power_target = std::accumulate(
clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.f);
AnalyzeClearBlock(power_target);
AnalyzeClearBlock();
++analysis_step_;
}
++block_count_;
@ -201,23 +185,26 @@ void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block,
}
}
void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) {
FilterVariance(clear_variance_.variance(),
render_filter_bank_,
filtered_clear_var_.get());
FilterVariance(&noise_power_[0],
capture_filter_bank_,
filtered_noise_var_.get());
void IntelligibilityEnhancer::AnalyzeClearBlock() {
const float* clear_power = clear_power_.Power();
MapToErbBands(clear_power,
render_filter_bank_,
filtered_clear_pow_.get());
MapToErbBands(&noise_power_[0],
capture_filter_bank_,
filtered_noise_pow_.get());
SolveForGainsGivenLambda(kLambdaTop, start_freq_, gains_eq_.get());
const float power_target = std::accumulate(
clear_power, clear_power + freqs_, 0.f);
const float power_top =
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
DotProduct(gains_eq_.get(), filtered_clear_pow_.get(), bank_size_);
SolveForGainsGivenLambda(kLambdaBot, start_freq_, gains_eq_.get());
const float power_bot =
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
DotProduct(gains_eq_.get(), filtered_clear_pow_.get(), bank_size_);
if (power_target >= power_bot && power_target <= power_top) {
SolveForLambda(power_target, power_bot, power_top);
UpdateErbGains();
} // Else experiencing variance underflow, so do nothing.
} // Else experiencing power underflow, so do nothing.
}
void IntelligibilityEnhancer::SolveForLambda(float power_target,
@ -237,7 +224,7 @@ void IntelligibilityEnhancer::SolveForLambda(float power_target,
const float lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f;
SolveForGainsGivenLambda(lambda, start_freq_, gains_eq_.get());
const float power =
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
DotProduct(gains_eq_.get(), filtered_clear_pow_.get(), bank_size_);
if (power < power_target) {
lambda_bot = lambda;
} else {
@ -290,22 +277,22 @@ std::vector<std::vector<float>> IntelligibilityEnhancer::CreateErbBank(
size_t lll, ll, rr, rrr;
static const size_t kOne = 1; // Avoids repeated static_cast<>s below.
lll = static_cast<size_t>(round(
center_freqs_[max(kOne, i - lf) - 1] * num_freqs /
center_freqs_[std::max(kOne, i - lf) - 1] * num_freqs /
(0.5f * sample_rate_hz_)));
ll = static_cast<size_t>(round(
center_freqs_[max(kOne, i) - 1] * num_freqs /
center_freqs_[std::max(kOne, i) - 1] * num_freqs /
(0.5f * sample_rate_hz_)));
lll = min(num_freqs, max(lll, kOne)) - 1;
ll = min(num_freqs, max(ll, kOne)) - 1;
lll = std::min(num_freqs, std::max(lll, kOne)) - 1;
ll = std::min(num_freqs, std::max(ll, kOne)) - 1;
rrr = static_cast<size_t>(round(
center_freqs_[min(bank_size_, i + rf) - 1] * num_freqs /
center_freqs_[std::min(bank_size_, i + rf) - 1] * num_freqs /
(0.5f * sample_rate_hz_)));
rr = static_cast<size_t>(round(
center_freqs_[min(bank_size_, i + 1) - 1] * num_freqs /
center_freqs_[std::min(bank_size_, i + 1) - 1] * num_freqs /
(0.5f * sample_rate_hz_)));
rrr = min(num_freqs, max(rrr, kOne)) - 1;
rr = min(num_freqs, max(rr, kOne)) - 1;
rrr = std::min(num_freqs, std::max(rrr, kOne)) - 1;
rr = std::min(num_freqs, std::max(rr, kOne)) - 1;
float step, element;
@ -343,8 +330,8 @@ void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
size_t start_freq,
float* sols) {
bool quadratic = (kConfigRho < 1.0f);
const float* var_x0 = filtered_clear_var_.get();
const float* var_n0 = filtered_noise_var_.get();
const float* pow_x0 = filtered_clear_pow_.get();
const float* pow_n0 = filtered_noise_pow_.get();
for (size_t n = 0; n < start_freq; ++n) {
sols[n] = 1.0f;
@ -353,11 +340,11 @@ void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
// Analytic solution for optimal gains. See paper for derivation.
for (size_t n = start_freq - 1; n < bank_size_; ++n) {
float alpha0, beta0, gamma0;
gamma0 = 0.5f * rho_[n] * var_x0[n] * var_n0[n] +
lambda * var_x0[n] * var_n0[n] * var_n0[n];
beta0 = lambda * var_x0[n] * (2 - rho_[n]) * var_x0[n] * var_n0[n];
gamma0 = 0.5f * rho_[n] * pow_x0[n] * pow_n0[n] +
lambda * pow_x0[n] * pow_n0[n] * pow_n0[n];
beta0 = lambda * pow_x0[n] * (2 - rho_[n]) * pow_x0[n] * pow_n0[n];
if (quadratic) {
alpha0 = lambda * var_x0[n] * (1 - rho_[n]) * var_x0[n] * var_x0[n];
alpha0 = lambda * pow_x0[n] * (1 - rho_[n]) * pow_x0[n] * pow_x0[n];
sols[n] =
(-beta0 - sqrtf(beta0 * beta0 - 4 * alpha0 * gamma0)) /
(2 * alpha0 + std::numeric_limits<float>::epsilon());

View File

@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
//
// Specifies core class for intelligbility enhancement.
//
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
@ -28,30 +24,25 @@ namespace webrtc {
// Speech intelligibility enhancement module. Reads render and capture
// audio streams and modifies the render stream with a set of gains per
// frequency bin to enhance speech against the noise background.
// Note: assumes speech and noise streams are already separated.
// Details of the model and algorithm can be found in the original paper:
// http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788
class IntelligibilityEnhancer {
public:
struct Config {
// |var_*| are parameters for the VarianceArray constructor for the
// clear speech stream.
// TODO(bercic): the |var_*|, |*_rate| and |gain_limit| parameters should
// probably go away once fine tuning is done.
// TODO(bercic): the |decay_rate|, |analysis_rate| and |gain_limit|
// parameters should probably go away once fine tuning is done.
Config()
: sample_rate_hz(16000),
num_capture_channels(1),
num_render_channels(1),
var_type(intelligibility::VarianceArray::kStepDecaying),
var_decay_rate(0.9f),
var_window_size(10),
analysis_rate(800),
decay_rate(0.9f),
analysis_rate(60),
gain_change_limit(0.1f),
rho(0.02f) {}
int sample_rate_hz;
size_t num_capture_channels;
size_t num_render_channels;
intelligibility::VarianceArray::StepType var_type;
float var_decay_rate;
size_t var_window_size;
float decay_rate;
int analysis_rate;
float gain_change_limit;
float rho;
@ -90,13 +81,13 @@ class IntelligibilityEnhancer {
FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestErbCreation);
FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestSolveForGains);
// Updates variance computation and analysis with |in_block_|,
// Updates power computation and analysis with |in_block_|,
// and writes modified speech to |out_block|.
void ProcessClearBlock(const std::complex<float>* in_block,
std::complex<float>* out_block);
// Computes and sets modified gains.
void AnalyzeClearBlock(float power_target);
void AnalyzeClearBlock();
// Bisection search for optimal |lambda|.
void SolveForLambda(float power_target, float power_bot, float power_top);
@ -127,10 +118,10 @@ class IntelligibilityEnhancer {
const bool active_; // Whether render gains are being updated.
// TODO(ekm): Add logic for updating |active_|.
intelligibility::VarianceArray clear_variance_;
intelligibility::PowerEstimator clear_power_;
std::vector<float> noise_power_;
rtc::scoped_ptr<float[]> filtered_clear_var_;
rtc::scoped_ptr<float[]> filtered_noise_var_;
rtc::scoped_ptr<float[]> filtered_clear_pow_;
rtc::scoped_ptr<float[]> filtered_noise_pow_;
rtc::scoped_ptr<float[]> center_freqs_;
std::vector<std::vector<float>> capture_filter_bank_;
std::vector<std::vector<float>> render_filter_bank_;

View File

@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
//
// Unit tests for intelligibility enhancer.
//
#include <math.h>
#include <stdlib.h>
#include <algorithm>
@ -32,28 +28,29 @@ const float kTestCenterFreqs[] = {
13.169f, 26.965f, 41.423f, 56.577f, 72.461f, 89.113f, 106.57f, 124.88f,
144.08f, 164.21f, 185.34f, 207.5f, 230.75f, 255.16f, 280.77f, 307.66f,
335.9f, 365.56f, 396.71f, 429.44f, 463.84f, 500.f};
const float kTestFilterBank[][2] = {{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.f},
{0.055556f, 0.2f},
{0, 0.2f},
{0, 0.2f},
{0, 0.2f},
{0, 0.2f}};
const float kTestFilterBank[][9] = {
{0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.2f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.25f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.25f, 0.142857f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.25f, 0.285714f, 0.f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.5f},
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.5f}};
static_assert(arraysize(kTestCenterFreqs) == arraysize(kTestFilterBank),
"Test filterbank badly initialized.");
@ -63,14 +60,14 @@ const float kTestZeroVar[] = {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,
1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 0.f,
0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
static_assert(arraysize(kTestCenterFreqs) == arraysize(kTestZeroVar),
"Variance test data badly initialized.");
"Power test data badly initialized.");
const float kTestNonZeroVarLambdaTop[] = {
1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,
1.f, 1.f, 1.f, 0.f, 0.f, 0.0351f, 0.0636f, 0.0863f,
0.1037f, 0.1162f, 0.1236f, 0.1251f, 0.1189f, 0.0993f};
static_assert(arraysize(kTestCenterFreqs) ==
arraysize(kTestNonZeroVarLambdaTop),
"Variance test data badly initialized.");
"Power test data badly initialized.");
const float kMaxTestError = 0.005f;
// Enhancer initialization parameters.
@ -81,9 +78,6 @@ const int kFragmentSize = kSampleRate / 100;
} // namespace
using std::vector;
using intelligibility::VarianceArray;
class IntelligibilityEnhancerTest : public ::testing::Test {
protected:
IntelligibilityEnhancerTest()
@ -92,9 +86,8 @@ class IntelligibilityEnhancerTest : public ::testing::Test {
enh_.reset(new IntelligibilityEnhancer(config_));
}
bool CheckUpdate(VarianceArray::StepType step_type) {
bool CheckUpdate() {
config_.sample_rate_hz = kSampleRate;
config_.var_type = step_type;
enh_.reset(new IntelligibilityEnhancer(config_));
float* clear_cursor = &clear_data_[0];
float* noise_cursor = &noise_data_[0];
@ -113,37 +106,25 @@ class IntelligibilityEnhancerTest : public ::testing::Test {
IntelligibilityEnhancer::Config config_;
rtc::scoped_ptr<IntelligibilityEnhancer> enh_;
vector<float> clear_data_;
vector<float> noise_data_;
vector<float> orig_data_;
std::vector<float> clear_data_;
std::vector<float> noise_data_;
std::vector<float> orig_data_;
};
// For each class of generated data, tests that render stream is
// updated when it should be for each variance update method.
// For each class of generated data, tests that render stream is updated when
// it should be.
TEST_F(IntelligibilityEnhancerTest, TestRenderUpdate) {
vector<VarianceArray::StepType> step_types;
step_types.push_back(VarianceArray::kStepInfinite);
step_types.push_back(VarianceArray::kStepDecaying);
step_types.push_back(VarianceArray::kStepWindowed);
step_types.push_back(VarianceArray::kStepBlocked);
step_types.push_back(VarianceArray::kStepBlockBasedMovingAverage);
std::fill(noise_data_.begin(), noise_data_.end(), 0.0f);
std::fill(orig_data_.begin(), orig_data_.end(), 0.0f);
for (auto step_type : step_types) {
std::fill(clear_data_.begin(), clear_data_.end(), 0.0f);
EXPECT_FALSE(CheckUpdate(step_type));
}
std::fill(clear_data_.begin(), clear_data_.end(), 0.0f);
EXPECT_FALSE(CheckUpdate());
std::srand(1);
auto float_rand = []() { return std::rand() * 2.f / RAND_MAX - 1; };
std::generate(noise_data_.begin(), noise_data_.end(), float_rand);
for (auto step_type : step_types) {
EXPECT_FALSE(CheckUpdate(step_type));
}
for (auto step_type : step_types) {
std::generate(clear_data_.begin(), clear_data_.end(), float_rand);
orig_data_ = clear_data_;
EXPECT_TRUE(CheckUpdate(step_type));
}
EXPECT_FALSE(CheckUpdate());
std::generate(clear_data_.begin(), clear_data_.end(), float_rand);
orig_data_ = clear_data_;
EXPECT_TRUE(CheckUpdate());
}
// Tests ERB bank creation, comparing against matlab output.
@ -163,11 +144,11 @@ TEST_F(IntelligibilityEnhancerTest, TestErbCreation) {
// against matlab output.
TEST_F(IntelligibilityEnhancerTest, TestSolveForGains) {
ASSERT_EQ(kTestStartFreq, enh_->start_freq_);
vector<float> sols(enh_->bank_size_);
std::vector<float> sols(enh_->bank_size_);
float lambda = -0.001f;
for (size_t i = 0; i < enh_->bank_size_; i++) {
enh_->filtered_clear_var_[i] = 0.0f;
enh_->filtered_noise_var_[i] = 0.0f;
enh_->filtered_clear_pow_[i] = 0.0f;
enh_->filtered_noise_pow_[i] = 0.0f;
enh_->rho_[i] = 0.02f;
}
enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
@ -175,8 +156,8 @@ TEST_F(IntelligibilityEnhancerTest, TestSolveForGains) {
EXPECT_NEAR(kTestZeroVar[i], sols[i], kMaxTestError);
}
for (size_t i = 0; i < enh_->bank_size_; i++) {
enh_->filtered_clear_var_[i] = static_cast<float>(i + 1);
enh_->filtered_noise_var_[i] = static_cast<float>(enh_->bank_size_ - i);
enh_->filtered_clear_pow_[i] = static_cast<float>(i + 1);
enh_->filtered_noise_pow_[i] = static_cast<float>(enh_->bank_size_ - i);
}
enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
for (size_t i = 0; i < enh_->bank_size_; i++) {

View File

@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
//
// Implements helper functions and classes for intelligibility enhancement.
//
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
#include <math.h>
@ -19,271 +15,46 @@
#include <string.h>
#include <algorithm>
using std::complex;
using std::min;
namespace webrtc {
namespace intelligibility {
namespace {
// Return |current| changed towards |target|, with the change being at most
// |limit|.
float UpdateFactor(float target, float current, float limit) {
float delta = fabsf(target - current);
float sign = copysign(1.0f, target - current);
float sign = copysign(1.f, target - current);
return current + sign * fminf(delta, limit);
}
float AddDitherIfZero(float value) {
return value == 0.f ? std::rand() * 0.01f / RAND_MAX : value;
}
} // namespace
complex<float> zerofudge(complex<float> c) {
return complex<float>(AddDitherIfZero(c.real()), AddDitherIfZero(c.imag()));
}
complex<float> NewMean(complex<float> mean, complex<float> data, size_t count) {
return mean + (data - mean) / static_cast<float>(count);
}
void AddToMean(complex<float> data, size_t count, complex<float>* mean) {
(*mean) = NewMean(*mean, data, count);
}
static const size_t kWindowBlockSize = 10;
VarianceArray::VarianceArray(size_t num_freqs,
StepType type,
size_t window_size,
float decay)
: running_mean_(new complex<float>[num_freqs]()),
running_mean_sq_(new complex<float>[num_freqs]()),
sub_running_mean_(new complex<float>[num_freqs]()),
sub_running_mean_sq_(new complex<float>[num_freqs]()),
variance_(new float[num_freqs]()),
conj_sum_(new float[num_freqs]()),
PowerEstimator::PowerEstimator(size_t num_freqs,
float decay)
: magnitude_(new float[num_freqs]()),
power_(new float[num_freqs]()),
num_freqs_(num_freqs),
window_size_(window_size),
decay_(decay),
history_cursor_(0),
count_(0),
array_mean_(0.0f),
buffer_full_(false) {
history_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
for (size_t i = 0; i < num_freqs_; ++i) {
history_[i].reset(new complex<float>[window_size_]());
}
subhistory_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
for (size_t i = 0; i < num_freqs_; ++i) {
subhistory_[i].reset(new complex<float>[window_size_]());
}
subhistory_sq_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
for (size_t i = 0; i < num_freqs_; ++i) {
subhistory_sq_[i].reset(new complex<float>[window_size_]());
}
switch (type) {
case kStepInfinite:
step_func_ = &VarianceArray::InfiniteStep;
break;
case kStepDecaying:
step_func_ = &VarianceArray::DecayStep;
break;
case kStepWindowed:
step_func_ = &VarianceArray::WindowedStep;
break;
case kStepBlocked:
step_func_ = &VarianceArray::BlockedStep;
break;
case kStepBlockBasedMovingAverage:
step_func_ = &VarianceArray::BlockBasedMovingAverage;
break;
}
decay_(decay) {
memset(magnitude_.get(), 0, sizeof(*magnitude_.get()) * num_freqs_);
memset(power_.get(), 0, sizeof(*power_.get()) * num_freqs_);
}
// Compute the variance with Welford's algorithm, adding some fudge to
// the input in case of all-zeroes.
void VarianceArray::InfiniteStep(const complex<float>* data, bool skip_fudge) {
array_mean_ = 0.0f;
++count_;
for (size_t i = 0; i < num_freqs_; ++i) {
complex<float> sample = data[i];
if (!skip_fudge) {
sample = zerofudge(sample);
}
if (count_ == 1) {
running_mean_[i] = sample;
variance_[i] = 0.0f;
} else {
float old_sum = conj_sum_[i];
complex<float> old_mean = running_mean_[i];
running_mean_[i] =
old_mean + (sample - old_mean) / static_cast<float>(count_);
conj_sum_[i] =
(old_sum + std::conj(sample - old_mean) * (sample - running_mean_[i]))
.real();
variance_[i] =
conj_sum_[i] / (count_ - 1);
}
array_mean_ += (variance_[i] - array_mean_) / (i + 1);
}
}
// Compute the variance from the beginning, with exponential decaying of the
// Compute the magnitude from the beginning, with exponential decaying of the
// series data.
void VarianceArray::DecayStep(const complex<float>* data, bool /*dummy*/) {
array_mean_ = 0.0f;
++count_;
void PowerEstimator::Step(const std::complex<float>* data) {
for (size_t i = 0; i < num_freqs_; ++i) {
complex<float> sample = data[i];
sample = zerofudge(sample);
if (count_ == 1) {
running_mean_[i] = sample;
running_mean_sq_[i] = sample * std::conj(sample);
variance_[i] = 0.0f;
} else {
complex<float> prev = running_mean_[i];
complex<float> prev2 = running_mean_sq_[i];
running_mean_[i] = decay_ * prev + (1.0f - decay_) * sample;
running_mean_sq_[i] =
decay_ * prev2 + (1.0f - decay_) * sample * std::conj(sample);
variance_[i] = (running_mean_sq_[i] -
running_mean_[i] * std::conj(running_mean_[i])).real();
}
array_mean_ += (variance_[i] - array_mean_) / (i + 1);
magnitude_[i] = decay_ * magnitude_[i] +
(1.f - decay_) * std::abs(data[i]);
}
}
// Windowed variance computation. On each step, the variances for the
// window are recomputed from scratch, using Welford's algorithm.
void VarianceArray::WindowedStep(const complex<float>* data, bool /*dummy*/) {
size_t num = min(count_ + 1, window_size_);
array_mean_ = 0.0f;
const float* PowerEstimator::Power() {
for (size_t i = 0; i < num_freqs_; ++i) {
complex<float> mean;
float conj_sum = 0.0f;
history_[i][history_cursor_] = data[i];
mean = history_[i][history_cursor_];
variance_[i] = 0.0f;
for (size_t j = 1; j < num; ++j) {
complex<float> sample =
zerofudge(history_[i][(history_cursor_ + j) % window_size_]);
sample = history_[i][(history_cursor_ + j) % window_size_];
float old_sum = conj_sum;
complex<float> old_mean = mean;
mean = old_mean + (sample - old_mean) / static_cast<float>(j + 1);
conj_sum =
(old_sum + std::conj(sample - old_mean) * (sample - mean)).real();
variance_[i] = conj_sum / (j);
}
array_mean_ += (variance_[i] - array_mean_) / (i + 1);
}
history_cursor_ = (history_cursor_ + 1) % window_size_;
++count_;
}
// Variance with a window of blocks. Within each block, the variances are
// recomputed from scratch at every stp, using |Var(X) = E(X^2) - E^2(X)|.
// Once a block is filled with kWindowBlockSize samples, it is added to the
// history window and a new block is started. The variances for the window
// are recomputed from scratch at each of these transitions.
void VarianceArray::BlockedStep(const complex<float>* data, bool /*dummy*/) {
size_t blocks = min(window_size_, history_cursor_ + 1);
for (size_t i = 0; i < num_freqs_; ++i) {
AddToMean(data[i], count_ + 1, &sub_running_mean_[i]);
AddToMean(data[i] * std::conj(data[i]), count_ + 1,
&sub_running_mean_sq_[i]);
subhistory_[i][history_cursor_ % window_size_] = sub_running_mean_[i];
subhistory_sq_[i][history_cursor_ % window_size_] = sub_running_mean_sq_[i];
variance_[i] =
(NewMean(running_mean_sq_[i], sub_running_mean_sq_[i], blocks) -
NewMean(running_mean_[i], sub_running_mean_[i], blocks) *
std::conj(NewMean(running_mean_[i], sub_running_mean_[i], blocks)))
.real();
if (count_ == kWindowBlockSize - 1) {
sub_running_mean_[i] = complex<float>(0.0f, 0.0f);
sub_running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
running_mean_[i] = complex<float>(0.0f, 0.0f);
running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
for (size_t j = 0; j < min(window_size_, history_cursor_); ++j) {
AddToMean(subhistory_[i][j], j + 1, &running_mean_[i]);
AddToMean(subhistory_sq_[i][j], j + 1, &running_mean_sq_[i]);
}
++history_cursor_;
}
}
++count_;
if (count_ == kWindowBlockSize) {
count_ = 0;
}
}
// Recomputes variances for each window from scratch based on previous window.
void VarianceArray::BlockBasedMovingAverage(const std::complex<float>* data,
bool /*dummy*/) {
// TODO(ekmeyerson) To mitigate potential divergence, add counter so that
// after every so often sums are computed scratch by summing over all
// elements instead of subtracting oldest and adding newest.
for (size_t i = 0; i < num_freqs_; ++i) {
sub_running_mean_[i] += data[i];
sub_running_mean_sq_[i] += data[i] * std::conj(data[i]);
}
++count_;
// TODO(ekmeyerson) Make kWindowBlockSize nonconstant to allow
// experimentation with different block size,window size pairs.
if (count_ >= kWindowBlockSize) {
count_ = 0;
for (size_t i = 0; i < num_freqs_; ++i) {
running_mean_[i] -= subhistory_[i][history_cursor_];
running_mean_sq_[i] -= subhistory_sq_[i][history_cursor_];
float scale = 1.f / kWindowBlockSize;
subhistory_[i][history_cursor_] = sub_running_mean_[i] * scale;
subhistory_sq_[i][history_cursor_] = sub_running_mean_sq_[i] * scale;
sub_running_mean_[i] = std::complex<float>(0.0f, 0.0f);
sub_running_mean_sq_[i] = std::complex<float>(0.0f, 0.0f);
running_mean_[i] += subhistory_[i][history_cursor_];
running_mean_sq_[i] += subhistory_sq_[i][history_cursor_];
scale = 1.f / (buffer_full_ ? window_size_ : history_cursor_ + 1);
variance_[i] = std::real(running_mean_sq_[i] * scale -
running_mean_[i] * scale *
std::conj(running_mean_[i]) * scale);
}
++history_cursor_;
if (history_cursor_ >= window_size_) {
buffer_full_ = true;
history_cursor_ = 0;
}
}
}
void VarianceArray::Clear() {
memset(running_mean_.get(), 0, sizeof(*running_mean_.get()) * num_freqs_);
memset(running_mean_sq_.get(), 0,
sizeof(*running_mean_sq_.get()) * num_freqs_);
memset(variance_.get(), 0, sizeof(*variance_.get()) * num_freqs_);
memset(conj_sum_.get(), 0, sizeof(*conj_sum_.get()) * num_freqs_);
history_cursor_ = 0;
count_ = 0;
array_mean_ = 0.0f;
}
void VarianceArray::ApplyScale(float scale) {
array_mean_ = 0.0f;
for (size_t i = 0; i < num_freqs_; ++i) {
variance_[i] *= scale * scale;
array_mean_ += (variance_[i] - array_mean_) / (i + 1);
power_[i] = magnitude_[i] * magnitude_[i];
}
return &power_[0];
}
GainApplier::GainApplier(size_t freqs, float change_limit)
@ -292,17 +63,17 @@ GainApplier::GainApplier(size_t freqs, float change_limit)
target_(new float[freqs]()),
current_(new float[freqs]()) {
for (size_t i = 0; i < freqs; ++i) {
target_[i] = 1.0f;
current_[i] = 1.0f;
target_[i] = 1.f;
current_[i] = 1.f;
}
}
void GainApplier::Apply(const complex<float>* in_block,
complex<float>* out_block) {
void GainApplier::Apply(const std::complex<float>* in_block,
std::complex<float>* out_block) {
for (size_t i = 0; i < num_freqs_; ++i) {
float factor = sqrtf(fabsf(current_[i]));
if (!std::isnormal(factor)) {
factor = 1.0f;
factor = 1.f;
}
out_block[i] = factor * in_block[i];
current_[i] = UpdateFactor(target_[i], current_[i], change_limit_);

View File

@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
//
// Specifies helper classes for intelligibility enhancement.
//
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
@ -23,115 +19,36 @@ namespace webrtc {
namespace intelligibility {
// Return |current| changed towards |target|, with the change being at most
// |limit|.
float UpdateFactor(float target, float current, float limit);
// Apply a small fudge to degenerate complex values. The numbers in the array
// were chosen randomly, so that even a series of all zeroes has some small
// variability.
std::complex<float> zerofudge(std::complex<float> c);
// Incremental mean computation. Return the mean of the series with the
// mean |mean| with added |data|.
std::complex<float> NewMean(std::complex<float> mean,
std::complex<float> data,
size_t count);
// Updates |mean| with added |data|;
void AddToMean(std::complex<float> data,
size_t count,
std::complex<float>* mean);
// Internal helper for computing the variances of a stream of arrays.
// The result is an array of variances per position: the i-th variance
// is the variance of the stream of data on the i-th positions in the
// input arrays.
// There are four methods of computation:
// * kStepInfinite computes variances from the beginning onwards
// * kStepDecaying uses a recursive exponential decay formula with a
// settable forgetting factor
// * kStepWindowed computes variances within a moving window
// * kStepBlocked is similar to kStepWindowed, but history is kept
// as a rolling window of blocks: multiple input elements are used for
// one block and the history then consists of the variances of these blocks
// with the same effect as kStepWindowed, but less storage, so the window
// can be longer
class VarianceArray {
// Internal helper for computing the power of a stream of arrays.
// The result is an array of power per position: the i-th power is the power of
// the stream of data on the i-th positions in the input arrays.
class PowerEstimator {
public:
enum StepType {
kStepInfinite = 0,
kStepDecaying,
kStepWindowed,
kStepBlocked,
kStepBlockBasedMovingAverage
};
// Construct an instance for the given input array length (|freqs|), with the
// appropriate parameters. |decay| is the forgetting factor.
PowerEstimator(size_t freqs, float decay);
// Construct an instance for the given input array length (|freqs|) and
// computation algorithm (|type|), with the appropriate parameters.
// |window_size| is the number of samples for kStepWindowed and
// the number of blocks for kStepBlocked. |decay| is the forgetting factor
// for kStepDecaying.
VarianceArray(size_t freqs, StepType type, size_t window_size, float decay);
// Add a new data point to the series.
void Step(const std::complex<float>* data);
// Add a new data point to the series and compute the new variances.
// TODO(bercic) |skip_fudge| is a flag for kStepWindowed and kStepDecaying,
// whether they should skip adding some small dummy values to the input
// to prevent problems with all-zero inputs. Can probably be removed.
void Step(const std::complex<float>* data, bool skip_fudge = false) {
(this->*step_func_)(data, skip_fudge);
}
// Reset variances to zero and forget all history.
void Clear();
// Scale the input data by |scale|. Effectively multiply variances
// by |scale^2|.
void ApplyScale(float scale);
// The current set of variances.
const float* variance() const { return variance_.get(); }
// The mean value of the current set of variances.
float array_mean() const { return array_mean_; }
// The current power array.
const float* Power();
private:
void InfiniteStep(const std::complex<float>* data, bool dummy);
void DecayStep(const std::complex<float>* data, bool dummy);
void WindowedStep(const std::complex<float>* data, bool dummy);
void BlockedStep(const std::complex<float>* data, bool dummy);
void BlockBasedMovingAverage(const std::complex<float>* data, bool dummy);
// TODO(ekmeyerson): Switch the following running means
// and histories from rtc::scoped_ptr to std::vector.
// The current average X and X^2.
rtc::scoped_ptr<std::complex<float>[]> running_mean_;
rtc::scoped_ptr<std::complex<float>[]> running_mean_sq_;
// Average X and X^2 for the current block in kStepBlocked.
rtc::scoped_ptr<std::complex<float>[]> sub_running_mean_;
rtc::scoped_ptr<std::complex<float>[]> sub_running_mean_sq_;
// Sample history for the rolling window in kStepWindowed and block-wise
// histories for kStepBlocked.
rtc::scoped_ptr<rtc::scoped_ptr<std::complex<float>[]>[]> history_;
rtc::scoped_ptr<rtc::scoped_ptr<std::complex<float>[]>[]> subhistory_;
rtc::scoped_ptr<rtc::scoped_ptr<std::complex<float>[]>[]> subhistory_sq_;
// The current set of variances and sums for Welford's algorithm.
rtc::scoped_ptr<float[]> variance_;
rtc::scoped_ptr<float[]> conj_sum_;
// The current magnitude array.
rtc::scoped_ptr<float[]> magnitude_;
// The current power array.
rtc::scoped_ptr<float[]> power_;
const size_t num_freqs_;
const size_t window_size_;
const float decay_;
size_t history_cursor_;
size_t count_;
float array_mean_;
bool buffer_full_;
void (VarianceArray::*step_func_)(const std::complex<float>*, bool);
};
// Helper class for smoothing gain changes. On each applicatiion step, the
// Helper class for smoothing gain changes. On each application step, the
// currently used gains are changed towards a set of settable target gains,
// constrained by a limit on the magnitude of the changes.
class GainApplier {

View File

@ -8,169 +8,69 @@
* be found in the AUTHORS file in the root of the source tree.
*/
//
// Unit tests for intelligibility utils.
//
#include <math.h>
#include <cmath>
#include <complex>
#include <iostream>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
using std::complex;
using std::vector;
namespace webrtc {
namespace intelligibility {
vector<vector<complex<float>>> GenerateTestData(int freqs, int samples) {
vector<vector<complex<float>>> data(samples);
for (int i = 0; i < samples; i++) {
for (int j = 0; j < freqs; j++) {
std::vector<std::vector<std::complex<float>>> GenerateTestData(size_t freqs,
size_t samples) {
std::vector<std::vector<std::complex<float>>> data(samples);
for (size_t i = 0; i < samples; ++i) {
for (size_t j = 0; j < freqs; ++j) {
const float val = 0.99f / ((i + 1) * (j + 1));
data[i].push_back(complex<float>(val, val));
data[i].push_back(std::complex<float>(val, val));
}
}
return data;
}
// Tests UpdateFactor.
TEST(IntelligibilityUtilsTest, TestUpdateFactor) {
EXPECT_EQ(0, intelligibility::UpdateFactor(0, 0, 0));
EXPECT_EQ(4, intelligibility::UpdateFactor(4, 2, 3));
EXPECT_EQ(3, intelligibility::UpdateFactor(4, 2, 1));
EXPECT_EQ(2, intelligibility::UpdateFactor(2, 4, 3));
EXPECT_EQ(3, intelligibility::UpdateFactor(2, 4, 1));
}
// Tests zerofudge.
TEST(IntelligibilityUtilsTest, TestCplx) {
complex<float> t0(1.f, 0.f);
t0 = intelligibility::zerofudge(t0);
EXPECT_NE(t0.imag(), 0.f);
EXPECT_NE(t0.real(), 0.f);
}
// Tests NewMean and AddToMean.
TEST(IntelligibilityUtilsTest, TestMeanUpdate) {
const complex<float> data[] = {{3, 8}, {7, 6}, {2, 1}, {8, 9}, {0, 6}};
const complex<float> means[] = {{3, 8}, {5, 7}, {4, 5}, {5, 6}, {4, 6}};
complex<float> mean(3, 8);
for (size_t i = 0; i < arraysize(data); i++) {
EXPECT_EQ(means[i], NewMean(mean, data[i], i + 1));
AddToMean(data[i], i + 1, &mean);
EXPECT_EQ(means[i], mean);
}
}
// Tests VarianceArray, for all variance step types.
TEST(IntelligibilityUtilsTest, TestVarianceArray) {
const int kFreqs = 10;
const int kSamples = 100;
const int kWindowSize = 10; // Should pass for all kWindowSize > 1.
// Tests PowerEstimator, for all power step types.
TEST(IntelligibilityUtilsTest, TestPowerEstimator) {
const size_t kFreqs = 10;
const size_t kSamples = 100;
const float kDecay = 0.5f;
vector<VarianceArray::StepType> step_types;
step_types.push_back(VarianceArray::kStepInfinite);
step_types.push_back(VarianceArray::kStepDecaying);
step_types.push_back(VarianceArray::kStepWindowed);
step_types.push_back(VarianceArray::kStepBlocked);
step_types.push_back(VarianceArray::kStepBlockBasedMovingAverage);
const vector<vector<complex<float>>> test_data(
const std::vector<std::vector<std::complex<float>>> test_data(
GenerateTestData(kFreqs, kSamples));
for (auto step_type : step_types) {
VarianceArray variance_array(kFreqs, step_type, kWindowSize, kDecay);
EXPECT_EQ(0, variance_array.variance()[0]);
EXPECT_EQ(0, variance_array.array_mean());
variance_array.ApplyScale(2.0f);
EXPECT_EQ(0, variance_array.variance()[0]);
EXPECT_EQ(0, variance_array.array_mean());
PowerEstimator power_estimator(kFreqs, kDecay);
EXPECT_EQ(0, power_estimator.Power()[0]);
// Makes sure Step is doing something.
variance_array.Step(&test_data[0][0]);
for (int i = 1; i < kSamples; i++) {
variance_array.Step(&test_data[i][0]);
EXPECT_GE(variance_array.array_mean(), 0.0f);
EXPECT_LE(variance_array.array_mean(), 1.0f);
for (int j = 0; j < kFreqs; j++) {
EXPECT_GE(variance_array.variance()[j], 0.0f);
EXPECT_LE(variance_array.variance()[j], 1.0f);
}
}
variance_array.Clear();
EXPECT_EQ(0, variance_array.variance()[0]);
EXPECT_EQ(0, variance_array.array_mean());
}
}
// Tests exact computation on synthetic data.
TEST(IntelligibilityUtilsTest, TestMovingBlockAverage) {
// Exact, not unbiased estimates.
const float kTestVarianceBufferNotFull = 16.5f;
const float kTestVarianceBufferFull1 = 66.5f;
const float kTestVarianceBufferFull2 = 333.375f;
const int kFreqs = 2;
const int kSamples = 50;
const int kWindowSize = 2;
const float kDecay = 0.5f;
const float kMaxError = 0.0001f;
VarianceArray variance_array(
kFreqs, VarianceArray::kStepBlockBasedMovingAverage, kWindowSize, kDecay);
vector<vector<complex<float>>> test_data(kSamples);
for (int i = 0; i < kSamples; i++) {
for (int j = 0; j < kFreqs; j++) {
if (i < 30) {
test_data[i].push_back(complex<float>(static_cast<float>(kSamples - i),
static_cast<float>(i + 1)));
} else {
test_data[i].push_back(complex<float>(0.f, 0.f));
}
}
}
for (int i = 0; i < kSamples; i++) {
variance_array.Step(&test_data[i][0]);
for (int j = 0; j < kFreqs; j++) {
if (i < 9) { // In utils, kWindowBlockSize = 10.
EXPECT_EQ(0, variance_array.variance()[j]);
} else if (i < 19) {
EXPECT_NEAR(kTestVarianceBufferNotFull, variance_array.variance()[j],
kMaxError);
} else if (i < 39) {
EXPECT_NEAR(kTestVarianceBufferFull1, variance_array.variance()[j],
kMaxError);
} else if (i < 49) {
EXPECT_NEAR(kTestVarianceBufferFull2, variance_array.variance()[j],
kMaxError);
} else {
EXPECT_EQ(0, variance_array.variance()[j]);
}
// Makes sure Step is doing something.
power_estimator.Step(&test_data[0][0]);
for (size_t i = 1; i < kSamples; ++i) {
power_estimator.Step(&test_data[i][0]);
for (size_t j = 0; j < kFreqs; ++j) {
const float* power = power_estimator.Power();
EXPECT_GE(power[j], 0.f);
EXPECT_LE(power[j], 1.f);
}
}
}
// Tests gain applier.
TEST(IntelligibilityUtilsTest, TestGainApplier) {
const int kFreqs = 10;
const int kSamples = 100;
const size_t kFreqs = 10;
const size_t kSamples = 100;
const float kChangeLimit = 0.1f;
GainApplier gain_applier(kFreqs, kChangeLimit);
const vector<vector<complex<float>>> in_data(
const std::vector<std::vector<std::complex<float>>> in_data(
GenerateTestData(kFreqs, kSamples));
vector<vector<complex<float>>> out_data(GenerateTestData(kFreqs, kSamples));
for (int i = 0; i < kSamples; i++) {
std::vector<std::vector<std::complex<float>>> out_data(GenerateTestData(
kFreqs, kSamples));
for (size_t i = 0; i < kSamples; ++i) {
gain_applier.Apply(&in_data[i][0], &out_data[i][0]);
for (int j = 0; j < kFreqs; j++) {
EXPECT_GT(out_data[i][j].real(), 0.0f);
EXPECT_LT(out_data[i][j].real(), 1.0f);
EXPECT_GT(out_data[i][j].imag(), 0.0f);
EXPECT_LT(out_data[i][j].imag(), 1.0f);
for (size_t j = 0; j < kFreqs; ++j) {
EXPECT_GT(out_data[i][j].real(), 0.f);
EXPECT_LT(out_data[i][j].real(), 1.f);
EXPECT_GT(out_data[i][j].imag(), 0.f);
EXPECT_LT(out_data[i][j].imag(), 1.f);
}
}
}

View File

@ -24,6 +24,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/real_fourier.h"
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
@ -35,34 +36,17 @@
#include "webrtc/test/testsupport/fileutils.h"
using std::complex;
using webrtc::intelligibility::VarianceArray;
namespace webrtc {
namespace {
bool ValidateClearWindow(const char* flagname, int32_t value) {
return value > 0;
}
DEFINE_int32(clear_type,
webrtc::intelligibility::VarianceArray::kStepDecaying,
"Variance algorithm for clear data.");
DEFINE_double(clear_alpha, 0.9, "Variance decay factor for clear data.");
DEFINE_int32(clear_window,
475,
"Window size for windowed variance for clear data.");
const bool clear_window_dummy =
google::RegisterFlagValidator(&FLAGS_clear_window, &ValidateClearWindow);
DEFINE_double(clear_alpha, 0.9, "Power decay factor for clear data.");
DEFINE_int32(sample_rate,
16000,
"Audio sample rate used in the input and output files.");
DEFINE_int32(ana_rate,
800,
60,
"Analysis rate; gains recalculated every N blocks.");
DEFINE_int32(
var_rate,
2,
"Variance clear rate; history is forgotten every N gain recalculations.");
DEFINE_double(gain_limit, 1000.0, "Maximum gain change in one block.");
DEFINE_string(clear_file, "speech.wav", "Input file with clear speech.");
@ -77,11 +61,7 @@ const size_t kNumChannels = 1;
// void function for gtest
void void_main(int argc, char* argv[]) {
google::SetUsageMessage(
"\n\nVariance algorithm types are:\n"
" 0 - infinite/normal,\n"
" 1 - exponentially decaying,\n"
" 2 - rolling window.\n"
"\nInput files must be little-endian 16-bit signed raw PCM.\n");
"\n\nInput files must be little-endian 16-bit signed raw PCM.\n");
google::ParseCommandLineFlags(&argc, &argv, true);
size_t samples; // Number of samples in input PCM file
@ -105,17 +85,17 @@ void void_main(int argc, char* argv[]) {
WavReader in_file(FLAGS_clear_file);
std::vector<float> in_fpcm(samples);
in_file.ReadSamples(samples, &in_fpcm[0]);
FloatS16ToFloat(&in_fpcm[0], samples, &in_fpcm[0]);
WavReader noise_file(FLAGS_noise_file);
std::vector<float> noise_fpcm(samples);
noise_file.ReadSamples(samples, &noise_fpcm[0]);
FloatS16ToFloat(&noise_fpcm[0], samples, &noise_fpcm[0]);
// Run intelligibility enhancement.
IntelligibilityEnhancer::Config config;
config.sample_rate_hz = FLAGS_sample_rate;
config.var_type = static_cast<VarianceArray::StepType>(FLAGS_clear_type);
config.var_decay_rate = static_cast<float>(FLAGS_clear_alpha);
config.var_window_size = static_cast<size_t>(FLAGS_clear_window);
config.decay_rate = static_cast<float>(FLAGS_clear_alpha);
config.analysis_rate = FLAGS_ana_rate;
config.gain_change_limit = FLAGS_gain_limit;
IntelligibilityEnhancer enh(config);
@ -146,6 +126,8 @@ void void_main(int argc, char* argv[]) {
noise_cursor += fragment_size;
}
FloatToFloatS16(&in_fpcm[0], samples, &in_fpcm[0]);
if (FLAGS_out_file.compare("-") == 0) {
const std::string temp_out_filename =
test::TempFilename(test::WorkingDir(), "temp_wav_file");

View File

@ -177,15 +177,17 @@ std::vector<float> NoiseSuppressionImpl::NoiseEstimate() {
rtc::CritScope cs(crit_);
std::vector<float> noise_estimate;
#if defined(WEBRTC_NS_FLOAT)
const float kNormalizationFactor = 1.f / (1 << 15);
noise_estimate.assign(WebRtcNs_num_freq(), 0.f);
for (auto& suppressor : suppressors_) {
const float* noise = WebRtcNs_noise_estimate(suppressor->state());
for (size_t i = 0; i < noise_estimate.size(); ++i) {
noise_estimate[i] += noise[i] / suppressors_.size();
noise_estimate[i] += kNormalizationFactor *
noise[i] / suppressors_.size();
}
}
#elif defined(WEBRTC_NS_FIXED)
const float kNormalizationFactor = 1.f / (1 << 8);
const float kNormalizationFactor = 1.f / (1 << 23);
noise_estimate.assign(WebRtcNsx_num_freq(), 0.f);
for (auto& suppressor : suppressors_) {
const uint32_t* noise = WebRtcNsx_noise_estimate(suppressor->state());