Remove usage of fmaf in IntelligibilityEnhancer
This produces bit-exact output and doesn't have the performance sensitivity to vectorisation, giving a complexity decrease of the IntelligibilityEnhancer of about 30x in my local machine. This performance issue was put in evidence by this CL: https://codereview.webrtc.org/1693823004/ BUG=590998 Review URL: https://codereview.webrtc.org/1755943002 Cr-Commit-Position: refs/heads/master@{#11851}
This commit is contained in:
parent
36f0137fd5
commit
a2abdf2fbe
@ -42,7 +42,7 @@ const float kRho = 0.0004f; // Default production and interpretation SNR.
|
||||
float DotProduct(const float* a, const float* b, size_t length) {
|
||||
float ret = 0.f;
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
ret = fmaf(a[i], b[i], ret);
|
||||
ret += a[i] * b[i];
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -180,7 +180,7 @@ void IntelligibilityEnhancer::UpdateErbGains() {
|
||||
for (size_t i = 0; i < freqs_; ++i) {
|
||||
gains[i] = 0.f;
|
||||
for (size_t j = 0; j < bank_size_; ++j) {
|
||||
gains[i] = fmaf(render_filter_bank_[j][i], gains_eq_[j], gains[i]);
|
||||
gains[i] += render_filter_bank_[j][i] * gains_eq_[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user