Changed the digital AGC1 gain to properly support multichannel
Beyond making the digital AGC1 code properly support multichannel, this CL also -Removes deprecated debug logging code. -Converts the gain application to be fully in floating point which --Is less computationally complex. --Does not quantize the samples to 16 bit before applying the gains. Bug: webrtc:10859 Change-Id: I6020ba8ae7e311dfc93a72783a2bb68d935f90c5 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/159861 Commit-Queue: Per Åhgren <peah@webrtc.org> Reviewed-by: Gustaf Ullberg <gustaf@webrtc.org> Cr-Commit-Position: refs/heads/master@{#29886}
This commit is contained in:
parent
3af0cd8de2
commit
77dc19905d
@ -20,9 +20,6 @@
|
|||||||
#include "modules/audio_processing/agc/legacy/analog_agc.h"
|
#include "modules/audio_processing/agc/legacy/analog_agc.h"
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
#include <stdio.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "rtc_base/checks.h"
|
#include "rtc_base/checks.h"
|
||||||
|
|
||||||
@ -510,12 +507,6 @@ void WebRtcAgc_ZeroCtrl(LegacyAgc* stt, int32_t* inMicLevel, int32_t* env) {
|
|||||||
stt->micVol = *inMicLevel;
|
stt->micVol = *inMicLevel;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\t\tAGC->zeroCntrl, frame %d: 500 ms under threshold,"
|
|
||||||
" micVol: %d\n",
|
|
||||||
stt->fcount, stt->micVol);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
stt->activeSpeech = 0;
|
stt->activeSpeech = 0;
|
||||||
stt->Rxx16_LPw32Max = 0;
|
stt->Rxx16_LPw32Max = 0;
|
||||||
@ -605,16 +596,8 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
inMicLevelTmp = inMicLevel << stt->scale;
|
inMicLevelTmp = inMicLevel << stt->scale;
|
||||||
|
|
||||||
if (inMicLevelTmp > stt->maxAnalog) {
|
if (inMicLevelTmp > stt->maxAnalog) {
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl > maxAnalog\n",
|
|
||||||
stt->fcount);
|
|
||||||
#endif
|
|
||||||
return -1;
|
return -1;
|
||||||
} else if (inMicLevelTmp < stt->minLevel) {
|
} else if (inMicLevelTmp < stt->minLevel) {
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel\n",
|
|
||||||
stt->fcount);
|
|
||||||
#endif
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -644,12 +627,6 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
stt->micVol = inMicLevelTmp;
|
stt->micVol = inMicLevelTmp;
|
||||||
#ifdef MIC_LEVEL_FEEDBACK
|
#ifdef MIC_LEVEL_FEEDBACK
|
||||||
// stt->numBlocksMicLvlSat = 0;
|
// stt->numBlocksMicLvlSat = 0;
|
||||||
#endif
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\tAGC->ProcessAnalog, frame %d: micLvl < minLevel by manual"
|
|
||||||
" decrease, raise vol\n",
|
|
||||||
stt->fcount);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -699,11 +676,6 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
}
|
}
|
||||||
inMicLevelTmp = stt->micVol;
|
inMicLevelTmp = stt->micVol;
|
||||||
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\tAGC->ProcessAnalog, frame %d: saturated, micVol = %d\n",
|
|
||||||
stt->fcount, stt->micVol);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (stt->micVol < stt->minOutput) {
|
if (stt->micVol < stt->minOutput) {
|
||||||
*saturationWarning = 1;
|
*saturationWarning = 1;
|
||||||
@ -826,12 +798,6 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
stt->Rxx16_LPw32Max = 0;
|
stt->Rxx16_LPw32Max = 0;
|
||||||
#ifdef MIC_LEVEL_FEEDBACK
|
#ifdef MIC_LEVEL_FEEDBACK
|
||||||
// stt->numBlocksMicLvlSat = 0;
|
// stt->numBlocksMicLvlSat = 0;
|
||||||
#endif
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\tAGC->ProcessAnalog, frame %d: measure >"
|
|
||||||
" 2ndUpperLim, micVol = %d, maxLevel = %d\n",
|
|
||||||
stt->fcount, stt->micVol, stt->maxLevel);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
} else if (stt->Rxx160_LPw32 > stt->upperLimit) {
|
} else if (stt->Rxx160_LPw32 > stt->upperLimit) {
|
||||||
@ -865,12 +831,6 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
|
|
||||||
#ifdef MIC_LEVEL_FEEDBACK
|
#ifdef MIC_LEVEL_FEEDBACK
|
||||||
// stt->numBlocksMicLvlSat = 0;
|
// stt->numBlocksMicLvlSat = 0;
|
||||||
#endif
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\tAGC->ProcessAnalog, frame %d: measure >"
|
|
||||||
" UpperLim, micVol = %d, maxLevel = %d\n",
|
|
||||||
stt->fcount, stt->micVol, stt->maxLevel);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
} else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit) {
|
} else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit) {
|
||||||
@ -920,12 +880,6 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
stt->numBlocksMicLvlSat++;
|
stt->numBlocksMicLvlSat++;
|
||||||
fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
|
fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\tAGC->ProcessAnalog, frame %d: measure <"
|
|
||||||
" 2ndLowerLim, micVol = %d\n",
|
|
||||||
stt->fcount, stt->micVol);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
} else if (stt->Rxx160_LPw32 < stt->lowerLimit) {
|
} else if (stt->Rxx160_LPw32 < stt->lowerLimit) {
|
||||||
@ -975,12 +929,6 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
stt->numBlocksMicLvlSat++;
|
stt->numBlocksMicLvlSat++;
|
||||||
fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
|
fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt,
|
|
||||||
"\tAGC->ProcessAnalog, frame %d: measure < LowerLim, micVol "
|
|
||||||
"= %d\n",
|
|
||||||
stt->fcount, stt->micVol);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1041,24 +989,20 @@ int32_t WebRtcAgc_ProcessAnalog(void* state,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int WebRtcAgc_Process(void* agcInst,
|
int WebRtcAgc_Analyze(void* agcInst,
|
||||||
const int16_t* const* in_near,
|
const int16_t* const* in_near,
|
||||||
size_t num_bands,
|
size_t num_bands,
|
||||||
size_t samples,
|
size_t samples,
|
||||||
int16_t* const* out,
|
|
||||||
int32_t inMicLevel,
|
int32_t inMicLevel,
|
||||||
int32_t* outMicLevel,
|
int32_t* outMicLevel,
|
||||||
int16_t echo,
|
int16_t echo,
|
||||||
uint8_t* saturationWarning) {
|
uint8_t* saturationWarning,
|
||||||
LegacyAgc* stt;
|
int32_t gains[11]) {
|
||||||
|
LegacyAgc* stt = (LegacyAgc*)agcInst;
|
||||||
|
|
||||||
stt = (LegacyAgc*)agcInst;
|
|
||||||
|
|
||||||
//
|
|
||||||
if (stt == NULL) {
|
if (stt == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
//
|
|
||||||
|
|
||||||
if (stt->fs == 8000) {
|
if (stt->fs == 8000) {
|
||||||
if (samples != 80) {
|
if (samples != 80) {
|
||||||
@ -1076,18 +1020,14 @@ int WebRtcAgc_Process(void* agcInst,
|
|||||||
// TODO(minyue): PUT IN RANGE CHECKING FOR INPUT LEVELS
|
// TODO(minyue): PUT IN RANGE CHECKING FOR INPUT LEVELS
|
||||||
*outMicLevel = inMicLevel;
|
*outMicLevel = inMicLevel;
|
||||||
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
stt->fcount++;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (WebRtcAgc_ProcessDigital(&stt->digitalAgc, in_near, num_bands, out,
|
int32_t error =
|
||||||
stt->fs, stt->lowLevelSignal) == -1) {
|
WebRtcAgc_ComputeDigitalGains(&stt->digitalAgc, in_near, num_bands,
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
stt->fs, stt->lowLevelSignal, gains);
|
||||||
fprintf(stt->fpt, "AGC->Process, frame %d: Error from DigAGC\n\n",
|
if (error == -1) {
|
||||||
stt->fcount);
|
|
||||||
#endif
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stt->agcMode < kAgcModeFixedDigital &&
|
if (stt->agcMode < kAgcModeFixedDigital &&
|
||||||
(stt->lowLevelSignal == 0 || stt->agcMode != kAgcModeAdaptiveDigital)) {
|
(stt->lowLevelSignal == 0 || stt->agcMode != kAgcModeAdaptiveDigital)) {
|
||||||
if (WebRtcAgc_ProcessAnalog(agcInst, inMicLevel, outMicLevel,
|
if (WebRtcAgc_ProcessAnalog(agcInst, inMicLevel, outMicLevel,
|
||||||
@ -1096,10 +1036,6 @@ int WebRtcAgc_Process(void* agcInst,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->agcLog, "%5d\t%d\t%d\t%d\t%d\n", stt->fcount, inMicLevel,
|
|
||||||
*outMicLevel, stt->maxLevel, stt->micVol);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* update queue */
|
/* update queue */
|
||||||
if (stt->inQueue > 1) {
|
if (stt->inQueue > 1) {
|
||||||
@ -1114,6 +1050,15 @@ int WebRtcAgc_Process(void* agcInst,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int WebRtcAgc_Process(const void* agcInst,
|
||||||
|
const int32_t gains[11],
|
||||||
|
const int16_t* const* in_near,
|
||||||
|
size_t num_bands,
|
||||||
|
int16_t* const* out) {
|
||||||
|
const LegacyAgc* stt = (const LegacyAgc*)agcInst;
|
||||||
|
return WebRtcAgc_ApplyDigitalGains(gains, num_bands, stt->fs, in_near, out);
|
||||||
|
}
|
||||||
|
|
||||||
int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) {
|
int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) {
|
||||||
LegacyAgc* stt;
|
LegacyAgc* stt;
|
||||||
stt = (LegacyAgc*)agcInst;
|
stt = (LegacyAgc*)agcInst;
|
||||||
@ -1152,10 +1097,6 @@ int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) {
|
|||||||
if (WebRtcAgc_CalculateGainTable(
|
if (WebRtcAgc_CalculateGainTable(
|
||||||
&(stt->digitalAgc.gainTable[0]), stt->compressionGaindB,
|
&(stt->digitalAgc.gainTable[0]), stt->compressionGaindB,
|
||||||
stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1) {
|
stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1) {
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "AGC->set_config, frame %d: Error from calcGainTable\n\n",
|
|
||||||
stt->fcount);
|
|
||||||
#endif
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
/* Store the config in a WebRtcAgcConfig */
|
/* Store the config in a WebRtcAgcConfig */
|
||||||
@ -1194,12 +1135,6 @@ int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config) {
|
|||||||
void* WebRtcAgc_Create() {
|
void* WebRtcAgc_Create() {
|
||||||
LegacyAgc* stt = malloc(sizeof(LegacyAgc));
|
LegacyAgc* stt = malloc(sizeof(LegacyAgc));
|
||||||
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
stt->fpt = fopen("./agc_test_log.txt", "wt");
|
|
||||||
stt->agcLog = fopen("./agc_debug_log.txt", "wt");
|
|
||||||
stt->digitalAgc.logFile = fopen("./agc_log.txt", "wt");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
stt->initFlag = 0;
|
stt->initFlag = 0;
|
||||||
stt->lastError = 0;
|
stt->lastError = 0;
|
||||||
|
|
||||||
@ -1210,11 +1145,6 @@ void WebRtcAgc_Free(void* state) {
|
|||||||
LegacyAgc* stt;
|
LegacyAgc* stt;
|
||||||
|
|
||||||
stt = (LegacyAgc*)state;
|
stt = (LegacyAgc*)state;
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fclose(stt->fpt);
|
|
||||||
fclose(stt->agcLog);
|
|
||||||
fclose(stt->digitalAgc.logFile);
|
|
||||||
#endif
|
|
||||||
free(stt);
|
free(stt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1249,14 +1179,7 @@ int WebRtcAgc_Init(void* agcInst,
|
|||||||
* dBOv)]
|
* dBOv)]
|
||||||
* 3 - Fixed Digital Gain [compressionGaindB (default 8 dB)]
|
* 3 - Fixed Digital Gain [compressionGaindB (default 8 dB)]
|
||||||
*/
|
*/
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
stt->fcount = 0;
|
|
||||||
fprintf(stt->fpt, "AGC->Init\n");
|
|
||||||
#endif
|
|
||||||
if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital) {
|
if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital) {
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "AGC->Init: error, incorrect mode\n\n");
|
|
||||||
#endif
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
stt->agcMode = agcMode;
|
stt->agcMode = agcMode;
|
||||||
@ -1310,10 +1233,6 @@ int WebRtcAgc_Init(void* agcInst,
|
|||||||
stt->numBlocksMicLvlSat = 0;
|
stt->numBlocksMicLvlSat = 0;
|
||||||
stt->micLvlSat = 0;
|
stt->micLvlSat = 0;
|
||||||
#endif
|
#endif
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "AGC->Init: minLevel = %d, maxAnalog = %d, maxLevel = %d\n",
|
|
||||||
stt->minLevel, stt->maxAnalog, stt->maxLevel);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Minimum output volume is 4% higher than the available lowest volume level
|
/* Minimum output volume is 4% higher than the available lowest volume level
|
||||||
*/
|
*/
|
||||||
@ -1377,14 +1296,8 @@ int WebRtcAgc_Init(void* agcInst,
|
|||||||
|
|
||||||
/* Only positive values are allowed that are not too large */
|
/* Only positive values are allowed that are not too large */
|
||||||
if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000)) {
|
if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000)) {
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "minLevel, maxLevel value(s) are invalid\n\n");
|
|
||||||
#endif
|
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
fprintf(stt->fpt, "\n");
|
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,9 +12,6 @@
|
|||||||
#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
|
#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
|
||||||
|
|
||||||
//#define MIC_LEVEL_FEEDBACK
|
//#define MIC_LEVEL_FEEDBACK
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
#include <stdio.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "modules/audio_processing/agc/legacy/digital_agc.h"
|
#include "modules/audio_processing/agc/legacy/digital_agc.h"
|
||||||
#include "modules/audio_processing/agc/legacy/gain_control.h"
|
#include "modules/audio_processing/agc/legacy/gain_control.h"
|
||||||
@ -119,12 +116,6 @@ typedef struct {
|
|||||||
AgcVad vadMic;
|
AgcVad vadMic;
|
||||||
DigitalAgc digitalAgc;
|
DigitalAgc digitalAgc;
|
||||||
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
FILE* fpt;
|
|
||||||
FILE* agcLog;
|
|
||||||
int32_t fcount;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int16_t lowLevelSignal;
|
int16_t lowLevelSignal;
|
||||||
} LegacyAgc;
|
} LegacyAgc;
|
||||||
|
|
||||||
|
|||||||
@ -15,9 +15,6 @@
|
|||||||
#include "modules/audio_processing/agc/legacy/digital_agc.h"
|
#include "modules/audio_processing/agc/legacy/digital_agc.h"
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
#include <stdio.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "rtc_base/checks.h"
|
#include "rtc_base/checks.h"
|
||||||
#include "modules/audio_processing/agc/legacy/gain_control.h"
|
#include "modules/audio_processing/agc/legacy/gain_control.h"
|
||||||
@ -254,9 +251,6 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc* stt, int16_t agcMode) {
|
|||||||
stt->gain = 65536;
|
stt->gain = 65536;
|
||||||
stt->gatePrevious = 0;
|
stt->gatePrevious = 0;
|
||||||
stt->agcMode = agcMode;
|
stt->agcMode = agcMode;
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
stt->frameCounter = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// initialize VADs
|
// initialize VADs
|
||||||
WebRtcAgc_InitVad(&stt->vadNearend);
|
WebRtcAgc_InitVad(&stt->vadNearend);
|
||||||
@ -275,27 +269,25 @@ int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
// Gains is an 11 element long array (one value per ms, incl start & end).
|
||||||
const int16_t* const* in_near,
|
int32_t WebRtcAgc_ComputeDigitalGains(DigitalAgc* stt,
|
||||||
size_t num_bands,
|
const int16_t* const* in_near,
|
||||||
int16_t* const* out,
|
size_t num_bands,
|
||||||
uint32_t FS,
|
uint32_t FS,
|
||||||
int16_t lowlevelSignal) {
|
int16_t lowlevelSignal,
|
||||||
// array for gains (one value per ms, incl start & end)
|
int32_t gains[11]) {
|
||||||
int32_t gains[11];
|
int32_t tmp32;
|
||||||
|
|
||||||
int32_t out_tmp, tmp32;
|
|
||||||
int32_t env[10];
|
int32_t env[10];
|
||||||
int32_t max_nrg;
|
int32_t max_nrg;
|
||||||
int32_t cur_level;
|
int32_t cur_level;
|
||||||
int32_t gain32, delta;
|
int32_t gain32;
|
||||||
int16_t logratio;
|
int16_t logratio;
|
||||||
int16_t lower_thr, upper_thr;
|
int16_t lower_thr, upper_thr;
|
||||||
int16_t zeros = 0, zeros_fast, frac = 0;
|
int16_t zeros = 0, zeros_fast, frac = 0;
|
||||||
int16_t decay;
|
int16_t decay;
|
||||||
int16_t gate, gain_adj;
|
int16_t gate, gain_adj;
|
||||||
int16_t k;
|
int16_t k;
|
||||||
size_t n, i, L;
|
size_t n, L;
|
||||||
int16_t L2; // samples/subframe
|
int16_t L2; // samples/subframe
|
||||||
|
|
||||||
// determine number of samples per ms
|
// determine number of samples per ms
|
||||||
@ -309,14 +301,8 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_bands; ++i) {
|
|
||||||
if (in_near[i] != out[i]) {
|
|
||||||
// Only needed if they don't already point to the same place.
|
|
||||||
memcpy(out[i], in_near[i], 10 * L * sizeof(in_near[i][0]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// VAD for near end
|
// VAD for near end
|
||||||
logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, out[0], L * 10);
|
logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, in_near[0], L * 10);
|
||||||
|
|
||||||
// Account for far end VAD
|
// Account for far end VAD
|
||||||
if (stt->vadFarend.counter > 10) {
|
if (stt->vadFarend.counter > 10) {
|
||||||
@ -358,18 +344,13 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
|||||||
decay = 0;
|
decay = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
stt->frameCounter++;
|
|
||||||
fprintf(stt->logFile, "%5.2f\t%d\t%d\t%d\t", (float)(stt->frameCounter) / 100,
|
|
||||||
logratio, decay, stt->vadNearend.stdLongTerm);
|
|
||||||
#endif
|
|
||||||
// Find max amplitude per sub frame
|
// Find max amplitude per sub frame
|
||||||
// iterate over sub frames
|
// iterate over sub frames
|
||||||
for (k = 0; k < 10; k++) {
|
for (k = 0; k < 10; k++) {
|
||||||
// iterate over samples
|
// iterate over samples
|
||||||
max_nrg = 0;
|
max_nrg = 0;
|
||||||
for (n = 0; n < L; n++) {
|
for (n = 0; n < L; n++) {
|
||||||
int32_t nrg = out[0][k * L + n] * out[0][k * L + n];
|
int32_t nrg = in_near[0][k * L + n] * in_near[0][k * L + n];
|
||||||
if (nrg > max_nrg) {
|
if (nrg > max_nrg) {
|
||||||
max_nrg = nrg;
|
max_nrg = nrg;
|
||||||
}
|
}
|
||||||
@ -416,12 +397,6 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
|||||||
tmp32 = ((stt->gainTable[zeros - 1] - stt->gainTable[zeros]) *
|
tmp32 = ((stt->gainTable[zeros - 1] - stt->gainTable[zeros]) *
|
||||||
(int64_t)frac) >> 12;
|
(int64_t)frac) >> 12;
|
||||||
gains[k + 1] = stt->gainTable[zeros] + tmp32;
|
gains[k + 1] = stt->gainTable[zeros] + tmp32;
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
if (k == 0) {
|
|
||||||
fprintf(stt->logFile, "%d\t%d\t%d\t%d\t%d\n", env[0], cur_level,
|
|
||||||
stt->capacitorFast, stt->capacitorSlow, zeros);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gate processing (lower gain during absence of speech)
|
// Gate processing (lower gain during absence of speech)
|
||||||
@ -498,20 +473,47 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
|||||||
// save start gain for next frame
|
// save start gain for next frame
|
||||||
stt->gain = gains[10];
|
stt->gain = gains[10];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t WebRtcAgc_ApplyDigitalGains(const int32_t gains[11], size_t num_bands,
|
||||||
|
uint32_t FS, const int16_t* const* in_near,
|
||||||
|
int16_t* const* out) {
|
||||||
// Apply gain
|
// Apply gain
|
||||||
// handle first sub frame separately
|
// handle first sub frame separately
|
||||||
delta = (gains[1] - gains[0]) * (1 << (4 - L2));
|
size_t L;
|
||||||
gain32 = gains[0] * (1 << 4);
|
int16_t L2; // samples/subframe
|
||||||
|
|
||||||
|
// determine number of samples per ms
|
||||||
|
if (FS == 8000) {
|
||||||
|
L = 8;
|
||||||
|
L2 = 3;
|
||||||
|
} else if (FS == 16000 || FS == 32000 || FS == 48000) {
|
||||||
|
L = 16;
|
||||||
|
L2 = 4;
|
||||||
|
} else {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < num_bands; ++i) {
|
||||||
|
if (in_near[i] != out[i]) {
|
||||||
|
// Only needed if they don't already point to the same place.
|
||||||
|
memcpy(out[i], in_near[i], 10 * L * sizeof(in_near[i][0]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// iterate over samples
|
// iterate over samples
|
||||||
for (n = 0; n < L; n++) {
|
int32_t delta = (gains[1] - gains[0]) * (1 << (4 - L2));
|
||||||
for (i = 0; i < num_bands; ++i) {
|
int32_t gain32 = gains[0] * (1 << 4);
|
||||||
out_tmp = (int64_t)out[i][n] * ((gain32 + 127) >> 7) >> 16;
|
for (size_t n = 0; n < L; n++) {
|
||||||
|
for (size_t i = 0; i < num_bands; ++i) {
|
||||||
|
int32_t out_tmp = (int64_t)out[i][n] * ((gain32 + 127) >> 7) >> 16;
|
||||||
if (out_tmp > 4095) {
|
if (out_tmp > 4095) {
|
||||||
out[i][n] = (int16_t)32767;
|
out[i][n] = (int16_t)32767;
|
||||||
} else if (out_tmp < -4096) {
|
} else if (out_tmp < -4096) {
|
||||||
out[i][n] = (int16_t)-32768;
|
out[i][n] = (int16_t)-32768;
|
||||||
} else {
|
} else {
|
||||||
tmp32 = ((int64_t)out[i][n] * (gain32 >> 4)) >> 16;
|
int32_t tmp32 = ((int64_t)out[i][n] * (gain32 >> 4)) >> 16;
|
||||||
out[i][n] = (int16_t)tmp32;
|
out[i][n] = (int16_t)tmp32;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -519,12 +521,12 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
|||||||
gain32 += delta;
|
gain32 += delta;
|
||||||
}
|
}
|
||||||
// iterate over subframes
|
// iterate over subframes
|
||||||
for (k = 1; k < 10; k++) {
|
for (int k = 1; k < 10; k++) {
|
||||||
delta = (gains[k + 1] - gains[k]) * (1 << (4 - L2));
|
delta = (gains[k + 1] - gains[k]) * (1 << (4 - L2));
|
||||||
gain32 = gains[k] * (1 << 4);
|
gain32 = gains[k] * (1 << 4);
|
||||||
// iterate over samples
|
// iterate over samples
|
||||||
for (n = 0; n < L; n++) {
|
for (size_t n = 0; n < L; n++) {
|
||||||
for (i = 0; i < num_bands; ++i) {
|
for (size_t i = 0; i < num_bands; ++i) {
|
||||||
int64_t tmp64 = ((int64_t)(out[i][k * L + n])) * (gain32 >> 4);
|
int64_t tmp64 = ((int64_t)(out[i][k * L + n])) * (gain32 >> 4);
|
||||||
tmp64 = tmp64 >> 16;
|
tmp64 = tmp64 >> 16;
|
||||||
if (tmp64 > 32767) {
|
if (tmp64 > 32767) {
|
||||||
@ -540,7 +542,6 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
|
|||||||
gain32 += delta;
|
gain32 += delta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -11,9 +11,6 @@
|
|||||||
#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
|
#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
|
||||||
#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
|
#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
|
||||||
|
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
#include <stdio.h>
|
|
||||||
#endif
|
|
||||||
#include "common_audio/signal_processing/include/signal_processing_library.h"
|
#include "common_audio/signal_processing/include/signal_processing_library.h"
|
||||||
|
|
||||||
// the 32 most significant bits of A(19) * B(26) >> 13
|
// the 32 most significant bits of A(19) * B(26) >> 13
|
||||||
@ -44,20 +41,22 @@ typedef struct {
|
|||||||
int16_t agcMode;
|
int16_t agcMode;
|
||||||
AgcVad vadNearend;
|
AgcVad vadNearend;
|
||||||
AgcVad vadFarend;
|
AgcVad vadFarend;
|
||||||
#ifdef WEBRTC_AGC_DEBUG_DUMP
|
|
||||||
FILE* logFile;
|
|
||||||
int frameCounter;
|
|
||||||
#endif
|
|
||||||
} DigitalAgc;
|
} DigitalAgc;
|
||||||
|
|
||||||
int32_t WebRtcAgc_InitDigital(DigitalAgc* digitalAgcInst, int16_t agcMode);
|
int32_t WebRtcAgc_InitDigital(DigitalAgc* digitalAgcInst, int16_t agcMode);
|
||||||
|
|
||||||
int32_t WebRtcAgc_ProcessDigital(DigitalAgc* digitalAgcInst,
|
int32_t WebRtcAgc_ComputeDigitalGains(DigitalAgc* digitalAgcInst,
|
||||||
const int16_t* const* inNear,
|
const int16_t* const* inNear,
|
||||||
size_t num_bands,
|
size_t num_bands,
|
||||||
int16_t* const* out,
|
uint32_t FS,
|
||||||
uint32_t FS,
|
int16_t lowLevelSignal,
|
||||||
int16_t lowLevelSignal);
|
int32_t gains[11]);
|
||||||
|
|
||||||
|
int32_t WebRtcAgc_ApplyDigitalGains(const int32_t gains[11],
|
||||||
|
size_t num_bands,
|
||||||
|
uint32_t FS,
|
||||||
|
const int16_t* const* in_near,
|
||||||
|
int16_t* const* out);
|
||||||
|
|
||||||
int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst,
|
int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst,
|
||||||
const int16_t* inFar,
|
const int16_t* inFar,
|
||||||
|
|||||||
@ -127,12 +127,12 @@ int WebRtcAgc_VirtualMic(void* agcInst,
|
|||||||
int32_t* micLevelOut);
|
int32_t* micLevelOut);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function processes a 10 ms frame and adjusts (normalizes) the gain both
|
* This function analyses a 10 ms frame and produces the analog and digital
|
||||||
* analog and digitally. The gain adjustments are done only during active
|
* gains required to normalize the signal. The gain adjustments are done only
|
||||||
* periods of speech. The length of the speech vectors must be given in samples
|
* during active periods of speech. The length of the speech vectors must be
|
||||||
* (80 when FS=8000, and 160 when FS=16000, FS=32000 or FS=48000). The echo
|
* given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
|
||||||
* parameter can be used to ensure the AGC will not adjust upward in the
|
* FS=48000). The echo parameter can be used to ensure the AGC will not adjust
|
||||||
* presence of echo.
|
* upward in the presence of echo.
|
||||||
*
|
*
|
||||||
* This function should be called after processing the near-end microphone
|
* This function should be called after processing the near-end microphone
|
||||||
* signal, in any case after any echo cancellation.
|
* signal, in any case after any echo cancellation.
|
||||||
@ -150,25 +150,47 @@ int WebRtcAgc_VirtualMic(void* agcInst,
|
|||||||
*
|
*
|
||||||
* Output:
|
* Output:
|
||||||
* - outMicLevel : Adjusted microphone volume level
|
* - outMicLevel : Adjusted microphone volume level
|
||||||
* - out : Gain-adjusted near-end speech vector
|
|
||||||
* : May be the same vector as the input.
|
|
||||||
* - saturationWarning : A returned value of 1 indicates a saturation event
|
* - saturationWarning : A returned value of 1 indicates a saturation event
|
||||||
* has occurred and the volume cannot be further
|
* has occurred and the volume cannot be further
|
||||||
* reduced. Otherwise will be set to 0.
|
* reduced. Otherwise will be set to 0.
|
||||||
|
* - gains : Vector of gains to apply for digital normalization
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* : 0 - Normal operation.
|
* : 0 - Normal operation.
|
||||||
* : -1 - Error
|
* : -1 - Error
|
||||||
*/
|
*/
|
||||||
int WebRtcAgc_Process(void* agcInst,
|
int WebRtcAgc_Analyze(void* agcInst,
|
||||||
const int16_t* const* inNear,
|
const int16_t* const* inNear,
|
||||||
size_t num_bands,
|
size_t num_bands,
|
||||||
size_t samples,
|
size_t samples,
|
||||||
int16_t* const* out,
|
|
||||||
int32_t inMicLevel,
|
int32_t inMicLevel,
|
||||||
int32_t* outMicLevel,
|
int32_t* outMicLevel,
|
||||||
int16_t echo,
|
int16_t echo,
|
||||||
uint8_t* saturationWarning);
|
uint8_t* saturationWarning,
|
||||||
|
int32_t gains[11]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function processes a 10 ms frame by applying precomputed digital gains.
|
||||||
|
*
|
||||||
|
* Input:
|
||||||
|
* - agcInst : AGC instance
|
||||||
|
* - gains : Vector of gains to apply for digital normalization
|
||||||
|
* - in_near : Near-end input speech vector for each band
|
||||||
|
* - num_bands : Number of bands in input/output vector
|
||||||
|
*
|
||||||
|
* Output:
|
||||||
|
* - out : Gain-adjusted near-end speech vector
|
||||||
|
* : May be the same vector as the input.
|
||||||
|
*
|
||||||
|
* Return value:
|
||||||
|
* : 0 - Normal operation.
|
||||||
|
* : -1 - Error
|
||||||
|
*/
|
||||||
|
int WebRtcAgc_Process(const void* agcInst,
|
||||||
|
const int32_t gains[11],
|
||||||
|
const int16_t* const* in_near,
|
||||||
|
size_t num_bands,
|
||||||
|
int16_t* const* out);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function sets the config parameters (targetLevelDbfs,
|
* This function sets the config parameters (targetLevelDbfs,
|
||||||
|
|||||||
@ -18,8 +18,8 @@
|
|||||||
#include "modules/audio_processing/include/audio_processing.h"
|
#include "modules/audio_processing/include/audio_processing.h"
|
||||||
#include "modules/audio_processing/logging/apm_data_dumper.h"
|
#include "modules/audio_processing/logging/apm_data_dumper.h"
|
||||||
#include "rtc_base/checks.h"
|
#include "rtc_base/checks.h"
|
||||||
#include "rtc_base/constructor_magic.h"
|
|
||||||
#include "rtc_base/logging.h"
|
#include "rtc_base/logging.h"
|
||||||
|
#include "system_wrappers/include/field_trial.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
@ -39,59 +39,65 @@ int16_t MapSetting(GainControl::Mode mode) {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks whether the legacy digital gain application should be used.
|
||||||
|
bool UseLegacyDigitalGainApplier() {
|
||||||
|
return field_trial::IsEnabled("WebRTC-UseLegacyDigitalGainApplier");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Floating point variant of WebRtcAgc_Process.
|
||||||
|
void ApplyDigitalGain(const int32_t gains[11],
|
||||||
|
size_t num_bands,
|
||||||
|
float* const* out) {
|
||||||
|
constexpr float kScaling = 1.f / 65536.f;
|
||||||
|
constexpr int kNumSubSections = 16;
|
||||||
|
constexpr float kOneByNumSubSections = 1.f / kNumSubSections;
|
||||||
|
|
||||||
|
float gains_scaled[11];
|
||||||
|
for (int k = 0; k < 11; ++k) {
|
||||||
|
gains_scaled[k] = gains[k] * kScaling;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t b = 0; b < num_bands; ++b) {
|
||||||
|
float* out_band = out[b];
|
||||||
|
for (int k = 0, sample = 0; k < 10; ++k) {
|
||||||
|
const float delta =
|
||||||
|
(gains_scaled[k + 1] - gains_scaled[k]) * kOneByNumSubSections;
|
||||||
|
float gain = gains_scaled[k];
|
||||||
|
for (int n = 0; n < kNumSubSections; ++n, ++sample) {
|
||||||
|
RTC_DCHECK_EQ(k * kNumSubSections + n, sample);
|
||||||
|
out_band[sample] *= gain;
|
||||||
|
out_band[sample] =
|
||||||
|
std::min(32767.f, std::max(-32768.f, out_band[sample]));
|
||||||
|
gain += delta;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
class GainControlImpl::GainController {
|
struct GainControlImpl::MonoAgcState {
|
||||||
public:
|
MonoAgcState() {
|
||||||
explicit GainController() {
|
state = WebRtcAgc_Create();
|
||||||
state_ = WebRtcAgc_Create();
|
RTC_CHECK(state);
|
||||||
RTC_CHECK(state_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~GainController() {
|
~MonoAgcState() {
|
||||||
RTC_DCHECK(state_);
|
RTC_DCHECK(state);
|
||||||
WebRtcAgc_Free(state_);
|
WebRtcAgc_Free(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Handle* state() {
|
MonoAgcState(const MonoAgcState&) = delete;
|
||||||
RTC_DCHECK(state_);
|
MonoAgcState& operator=(const MonoAgcState&) = delete;
|
||||||
return state_;
|
int32_t gains[11];
|
||||||
}
|
Handle* state;
|
||||||
|
|
||||||
void Initialize(int minimum_capture_level,
|
|
||||||
int maximum_capture_level,
|
|
||||||
Mode mode,
|
|
||||||
int sample_rate_hz,
|
|
||||||
int capture_level) {
|
|
||||||
RTC_DCHECK(state_);
|
|
||||||
int error =
|
|
||||||
WebRtcAgc_Init(state_, minimum_capture_level, maximum_capture_level,
|
|
||||||
MapSetting(mode), sample_rate_hz);
|
|
||||||
RTC_DCHECK_EQ(0, error);
|
|
||||||
|
|
||||||
set_capture_level(capture_level);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_capture_level(int capture_level) { capture_level_ = capture_level; }
|
|
||||||
|
|
||||||
int get_capture_level() {
|
|
||||||
RTC_DCHECK(capture_level_);
|
|
||||||
return *capture_level_;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Handle* state_;
|
|
||||||
// TODO(peah): Remove the optional once the initialization is moved into the
|
|
||||||
// ctor.
|
|
||||||
absl::optional<int> capture_level_;
|
|
||||||
|
|
||||||
RTC_DISALLOW_COPY_AND_ASSIGN(GainController);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int GainControlImpl::instance_counter_ = 0;
|
int GainControlImpl::instance_counter_ = 0;
|
||||||
|
|
||||||
GainControlImpl::GainControlImpl()
|
GainControlImpl::GainControlImpl()
|
||||||
: data_dumper_(new ApmDataDumper(instance_counter_)),
|
: data_dumper_(new ApmDataDumper(instance_counter_)),
|
||||||
|
use_legacy_gain_applier_(UseLegacyDigitalGainApplier()),
|
||||||
mode_(kAdaptiveAnalog),
|
mode_(kAdaptiveAnalog),
|
||||||
minimum_capture_level_(0),
|
minimum_capture_level_(0),
|
||||||
maximum_capture_level_(255),
|
maximum_capture_level_(255),
|
||||||
@ -102,7 +108,7 @@ GainControlImpl::GainControlImpl()
|
|||||||
was_analog_level_set_(false),
|
was_analog_level_set_(false),
|
||||||
stream_is_saturated_(false) {}
|
stream_is_saturated_(false) {}
|
||||||
|
|
||||||
GainControlImpl::~GainControlImpl() {}
|
GainControlImpl::~GainControlImpl() = default;
|
||||||
|
|
||||||
void GainControlImpl::ProcessRenderAudio(
|
void GainControlImpl::ProcessRenderAudio(
|
||||||
rtc::ArrayView<const int16_t> packed_render_audio) {
|
rtc::ArrayView<const int16_t> packed_render_audio) {
|
||||||
@ -110,8 +116,8 @@ void GainControlImpl::ProcessRenderAudio(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
WebRtcAgc_AddFarend(gain_controller->state(), packed_render_audio.data(),
|
WebRtcAgc_AddFarend(mono_agcs_[ch]->state, packed_render_audio.data(),
|
||||||
packed_render_audio.size());
|
packed_render_audio.size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,27 +126,28 @@ void GainControlImpl::PackRenderAudioBuffer(
|
|||||||
const AudioBuffer& audio,
|
const AudioBuffer& audio,
|
||||||
std::vector<int16_t>* packed_buffer) {
|
std::vector<int16_t>* packed_buffer) {
|
||||||
RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, audio.num_frames_per_band());
|
RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, audio.num_frames_per_band());
|
||||||
std::array<int16_t, AudioBuffer::kMaxSplitFrameLength> mixed_low_pass_data;
|
std::array<int16_t, AudioBuffer::kMaxSplitFrameLength>
|
||||||
rtc::ArrayView<const int16_t> mixed_low_pass(mixed_low_pass_data.data(),
|
mixed_16_kHz_render_data;
|
||||||
audio.num_frames_per_band());
|
rtc::ArrayView<const int16_t> mixed_16_kHz_render(
|
||||||
|
mixed_16_kHz_render_data.data(), audio.num_frames_per_band());
|
||||||
if (audio.num_channels() == 1) {
|
if (audio.num_channels() == 1) {
|
||||||
FloatS16ToS16(audio.split_bands_const(0)[kBand0To8kHz],
|
FloatS16ToS16(audio.split_bands_const(0)[kBand0To8kHz],
|
||||||
audio.num_frames_per_band(), mixed_low_pass_data.data());
|
audio.num_frames_per_band(), mixed_16_kHz_render_data.data());
|
||||||
} else {
|
} else {
|
||||||
const int num_channels = static_cast<int>(audio.num_channels());
|
const int num_channels = static_cast<int>(audio.num_channels());
|
||||||
for (size_t i = 0; i < audio.num_frames_per_band(); ++i) {
|
for (size_t i = 0; i < audio.num_frames_per_band(); ++i) {
|
||||||
int32_t value =
|
int32_t sum = 0;
|
||||||
FloatS16ToS16(audio.split_channels_const(kBand0To8kHz)[0][i]);
|
for (int ch = 0; ch < num_channels; ++ch) {
|
||||||
for (int j = 1; j < num_channels; ++j) {
|
sum += FloatS16ToS16(audio.split_channels_const(kBand0To8kHz)[ch][i]);
|
||||||
value += FloatS16ToS16(audio.split_channels_const(kBand0To8kHz)[j][i]);
|
|
||||||
}
|
}
|
||||||
mixed_low_pass_data[i] = value / num_channels;
|
mixed_16_kHz_render_data[i] = sum / num_channels;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
packed_buffer->clear();
|
packed_buffer->clear();
|
||||||
packed_buffer->insert(packed_buffer->end(), mixed_low_pass.data(),
|
packed_buffer->insert(
|
||||||
(mixed_low_pass.data() + audio.num_frames_per_band()));
|
packed_buffer->end(), mixed_16_kHz_render.data(),
|
||||||
|
(mixed_16_kHz_render.data() + audio.num_frames_per_band()));
|
||||||
}
|
}
|
||||||
|
|
||||||
int GainControlImpl::AnalyzeCaptureAudio(const AudioBuffer& audio) {
|
int GainControlImpl::AnalyzeCaptureAudio(const AudioBuffer& audio) {
|
||||||
@ -151,7 +158,7 @@ int GainControlImpl::AnalyzeCaptureAudio(const AudioBuffer& audio) {
|
|||||||
RTC_DCHECK(num_proc_channels_);
|
RTC_DCHECK(num_proc_channels_);
|
||||||
RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, audio.num_frames_per_band());
|
RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, audio.num_frames_per_band());
|
||||||
RTC_DCHECK_EQ(audio.num_channels(), *num_proc_channels_);
|
RTC_DCHECK_EQ(audio.num_channels(), *num_proc_channels_);
|
||||||
RTC_DCHECK_LE(*num_proc_channels_, gain_controllers_.size());
|
RTC_DCHECK_LE(*num_proc_channels_, mono_agcs_.size());
|
||||||
|
|
||||||
int16_t split_band_data[AudioBuffer::kMaxNumBands]
|
int16_t split_band_data[AudioBuffer::kMaxNumBands]
|
||||||
[AudioBuffer::kMaxSplitFrameLength];
|
[AudioBuffer::kMaxSplitFrameLength];
|
||||||
@ -159,39 +166,35 @@ int GainControlImpl::AnalyzeCaptureAudio(const AudioBuffer& audio) {
|
|||||||
split_band_data[0], split_band_data[1], split_band_data[2]};
|
split_band_data[0], split_band_data[1], split_band_data[2]};
|
||||||
|
|
||||||
if (mode_ == kAdaptiveAnalog) {
|
if (mode_ == kAdaptiveAnalog) {
|
||||||
int capture_channel = 0;
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
capture_levels_[ch] = analog_capture_level_;
|
||||||
gain_controller->set_capture_level(analog_capture_level_);
|
|
||||||
|
|
||||||
audio.ExportSplitChannelData(capture_channel, split_bands);
|
audio.ExportSplitChannelData(ch, split_bands);
|
||||||
|
|
||||||
int err =
|
int err =
|
||||||
WebRtcAgc_AddMic(gain_controller->state(), split_bands,
|
WebRtcAgc_AddMic(mono_agcs_[ch]->state, split_bands,
|
||||||
audio.num_bands(), audio.num_frames_per_band());
|
audio.num_bands(), audio.num_frames_per_band());
|
||||||
|
|
||||||
if (err != AudioProcessing::kNoError) {
|
if (err != AudioProcessing::kNoError) {
|
||||||
return AudioProcessing::kUnspecifiedError;
|
return AudioProcessing::kUnspecifiedError;
|
||||||
}
|
}
|
||||||
++capture_channel;
|
|
||||||
}
|
}
|
||||||
} else if (mode_ == kAdaptiveDigital) {
|
} else if (mode_ == kAdaptiveDigital) {
|
||||||
int capture_channel = 0;
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
|
||||||
int32_t capture_level_out = 0;
|
int32_t capture_level_out = 0;
|
||||||
|
|
||||||
audio.ExportSplitChannelData(capture_channel, split_bands);
|
audio.ExportSplitChannelData(ch, split_bands);
|
||||||
|
|
||||||
int err =
|
int err =
|
||||||
WebRtcAgc_VirtualMic(gain_controller->state(), split_bands,
|
WebRtcAgc_VirtualMic(mono_agcs_[ch]->state, split_bands,
|
||||||
audio.num_bands(), audio.num_frames_per_band(),
|
audio.num_bands(), audio.num_frames_per_band(),
|
||||||
analog_capture_level_, &capture_level_out);
|
analog_capture_level_, &capture_level_out);
|
||||||
|
|
||||||
gain_controller->set_capture_level(capture_level_out);
|
capture_levels_[ch] = capture_level_out;
|
||||||
|
|
||||||
if (err != AudioProcessing::kNoError) {
|
if (err != AudioProcessing::kNoError) {
|
||||||
return AudioProcessing::kUnspecifiedError;
|
return AudioProcessing::kUnspecifiedError;
|
||||||
}
|
}
|
||||||
++capture_channel;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,57 +217,78 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio,
|
|||||||
RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
|
RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
|
||||||
|
|
||||||
stream_is_saturated_ = false;
|
stream_is_saturated_ = false;
|
||||||
int capture_channel = 0;
|
bool error_reported = false;
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
int32_t capture_level_out = 0;
|
|
||||||
uint8_t saturation_warning = 0;
|
|
||||||
|
|
||||||
int16_t split_band_data[AudioBuffer::kMaxNumBands]
|
int16_t split_band_data[AudioBuffer::kMaxNumBands]
|
||||||
[AudioBuffer::kMaxSplitFrameLength];
|
[AudioBuffer::kMaxSplitFrameLength];
|
||||||
int16_t* split_bands[AudioBuffer::kMaxNumBands] = {
|
int16_t* split_bands[AudioBuffer::kMaxNumBands] = {
|
||||||
split_band_data[0], split_band_data[1], split_band_data[2]};
|
split_band_data[0], split_band_data[1], split_band_data[2]};
|
||||||
audio->ExportSplitChannelData(capture_channel, split_bands);
|
audio->ExportSplitChannelData(ch, split_bands);
|
||||||
|
|
||||||
// The call to stream_has_echo() is ok from a deadlock perspective
|
// The call to stream_has_echo() is ok from a deadlock perspective
|
||||||
// as the capture lock is allready held.
|
// as the capture lock is allready held.
|
||||||
int err = WebRtcAgc_Process(
|
int32_t new_capture_level = 0;
|
||||||
gain_controller->state(), split_bands, audio->num_bands(),
|
uint8_t saturation_warning = 0;
|
||||||
audio->num_frames_per_band(), split_bands,
|
int err_analyze = WebRtcAgc_Analyze(
|
||||||
gain_controller->get_capture_level(), &capture_level_out,
|
mono_agcs_[ch]->state, split_bands, audio->num_bands(),
|
||||||
stream_has_echo, &saturation_warning);
|
audio->num_frames_per_band(), capture_levels_[ch], &new_capture_level,
|
||||||
|
stream_has_echo, &saturation_warning, mono_agcs_[ch]->gains);
|
||||||
|
capture_levels_[ch] = new_capture_level;
|
||||||
|
|
||||||
audio->ImportSplitChannelData(capture_channel, split_bands);
|
error_reported = error_reported || err_analyze != AudioProcessing::kNoError;
|
||||||
|
|
||||||
if (err != AudioProcessing::kNoError) {
|
stream_is_saturated_ = stream_is_saturated_ || saturation_warning == 1;
|
||||||
return AudioProcessing::kUnspecifiedError;
|
}
|
||||||
|
|
||||||
|
// Choose the minimun gain for application
|
||||||
|
size_t index_to_apply = 0;
|
||||||
|
for (size_t ch = 1; ch < mono_agcs_.size(); ++ch) {
|
||||||
|
if (mono_agcs_[index_to_apply]->gains[10] < mono_agcs_[ch]->gains[10]) {
|
||||||
|
index_to_apply = ch;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
gain_controller->set_capture_level(capture_level_out);
|
if (use_legacy_gain_applier_) {
|
||||||
if (saturation_warning == 1) {
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
stream_is_saturated_ = true;
|
int16_t split_band_data[AudioBuffer::kMaxNumBands]
|
||||||
|
[AudioBuffer::kMaxSplitFrameLength];
|
||||||
|
int16_t* split_bands[AudioBuffer::kMaxNumBands] = {
|
||||||
|
split_band_data[0], split_band_data[1], split_band_data[2]};
|
||||||
|
audio->ExportSplitChannelData(ch, split_bands);
|
||||||
|
|
||||||
|
int err_process = WebRtcAgc_Process(
|
||||||
|
mono_agcs_[ch]->state, mono_agcs_[index_to_apply]->gains, split_bands,
|
||||||
|
audio->num_bands(), split_bands);
|
||||||
|
RTC_DCHECK_EQ(err_process, 0);
|
||||||
|
|
||||||
|
audio->ImportSplitChannelData(ch, split_bands);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
|
ApplyDigitalGain(mono_agcs_[index_to_apply]->gains, audio->num_bands(),
|
||||||
|
audio->split_bands(ch));
|
||||||
}
|
}
|
||||||
|
|
||||||
++capture_channel;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RTC_DCHECK_LT(0ul, *num_proc_channels_);
|
RTC_DCHECK_LT(0ul, *num_proc_channels_);
|
||||||
if (mode_ == kAdaptiveAnalog) {
|
if (mode_ == kAdaptiveAnalog) {
|
||||||
// Take the analog level to be the average across the handles.
|
// Take the analog level to be the minimum accross all channels.
|
||||||
analog_capture_level_ = 0;
|
analog_capture_level_ = capture_levels_[0];
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
for (size_t ch = 1; ch < mono_agcs_.size(); ++ch) {
|
||||||
analog_capture_level_ += gain_controller->get_capture_level();
|
analog_capture_level_ =
|
||||||
|
std::min(analog_capture_level_, capture_levels_[ch]);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
analog_capture_level_ /= (*num_proc_channels_);
|
if (error_reported) {
|
||||||
|
return AudioProcessing::kUnspecifiedError;
|
||||||
}
|
}
|
||||||
|
|
||||||
was_analog_level_set_ = false;
|
was_analog_level_set_ = false;
|
||||||
|
|
||||||
return AudioProcessing::kNoError;
|
return AudioProcessing::kNoError;
|
||||||
}
|
}
|
||||||
|
|
||||||
int GainControlImpl::compression_gain_db() const {
|
|
||||||
return compression_gain_db_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(ajm): ensure this is called under kAdaptiveAnalog.
|
// TODO(ajm): ensure this is called under kAdaptiveAnalog.
|
||||||
int GainControlImpl::set_stream_analog_level(int level) {
|
int GainControlImpl::set_stream_analog_level(int level) {
|
||||||
@ -282,9 +306,6 @@ int GainControlImpl::set_stream_analog_level(int level) {
|
|||||||
int GainControlImpl::stream_analog_level() const {
|
int GainControlImpl::stream_analog_level() const {
|
||||||
data_dumper_->DumpRaw("gain_control_stream_analog_level", 1,
|
data_dumper_->DumpRaw("gain_control_stream_analog_level", 1,
|
||||||
&analog_capture_level_);
|
&analog_capture_level_);
|
||||||
// TODO(ajm): enable this assertion?
|
|
||||||
// RTC_DCHECK_EQ(kAdaptiveAnalog, mode_);
|
|
||||||
|
|
||||||
return analog_capture_level_;
|
return analog_capture_level_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,10 +322,6 @@ int GainControlImpl::Enable(bool enable) {
|
|||||||
return AudioProcessing::kNoError;
|
return AudioProcessing::kNoError;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GainControlImpl::is_enabled() const {
|
|
||||||
return enabled_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int GainControlImpl::set_mode(Mode mode) {
|
int GainControlImpl::set_mode(Mode mode) {
|
||||||
if (MapSetting(mode) == -1) {
|
if (MapSetting(mode) == -1) {
|
||||||
return AudioProcessing::kBadParameterError;
|
return AudioProcessing::kBadParameterError;
|
||||||
@ -317,49 +334,21 @@ int GainControlImpl::set_mode(Mode mode) {
|
|||||||
return AudioProcessing::kNoError;
|
return AudioProcessing::kNoError;
|
||||||
}
|
}
|
||||||
|
|
||||||
GainControl::Mode GainControlImpl::mode() const {
|
|
||||||
return mode_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int GainControlImpl::set_analog_level_limits(int minimum, int maximum) {
|
int GainControlImpl::set_analog_level_limits(int minimum, int maximum) {
|
||||||
if (minimum < 0) {
|
if (minimum < 0 || maximum > 65535 || maximum < minimum) {
|
||||||
return AudioProcessing::kBadParameterError;
|
return AudioProcessing::kBadParameterError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (maximum > 65535) {
|
minimum_capture_level_ = minimum;
|
||||||
return AudioProcessing::kBadParameterError;
|
maximum_capture_level_ = maximum;
|
||||||
}
|
|
||||||
|
|
||||||
if (maximum < minimum) {
|
RTC_DCHECK(num_proc_channels_);
|
||||||
return AudioProcessing::kBadParameterError;
|
RTC_DCHECK(sample_rate_hz_);
|
||||||
}
|
Initialize(*num_proc_channels_, *sample_rate_hz_);
|
||||||
|
|
||||||
size_t num_proc_channels_local = 0u;
|
|
||||||
int sample_rate_hz_local = 0;
|
|
||||||
{
|
|
||||||
minimum_capture_level_ = minimum;
|
|
||||||
maximum_capture_level_ = maximum;
|
|
||||||
|
|
||||||
RTC_DCHECK(num_proc_channels_);
|
|
||||||
RTC_DCHECK(sample_rate_hz_);
|
|
||||||
num_proc_channels_local = *num_proc_channels_;
|
|
||||||
sample_rate_hz_local = *sample_rate_hz_;
|
|
||||||
}
|
|
||||||
Initialize(num_proc_channels_local, sample_rate_hz_local);
|
|
||||||
return AudioProcessing::kNoError;
|
return AudioProcessing::kNoError;
|
||||||
}
|
}
|
||||||
|
|
||||||
int GainControlImpl::analog_level_minimum() const {
|
|
||||||
return minimum_capture_level_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int GainControlImpl::analog_level_maximum() const {
|
|
||||||
return maximum_capture_level_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GainControlImpl::stream_is_saturated() const {
|
|
||||||
return stream_is_saturated_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int GainControlImpl::set_target_level_dbfs(int level) {
|
int GainControlImpl::set_target_level_dbfs(int level) {
|
||||||
if (level > 31 || level < 0) {
|
if (level > 31 || level < 0) {
|
||||||
@ -369,10 +358,6 @@ int GainControlImpl::set_target_level_dbfs(int level) {
|
|||||||
return Configure();
|
return Configure();
|
||||||
}
|
}
|
||||||
|
|
||||||
int GainControlImpl::target_level_dbfs() const {
|
|
||||||
return target_level_dbfs_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int GainControlImpl::set_compression_gain_db(int gain) {
|
int GainControlImpl::set_compression_gain_db(int gain) {
|
||||||
if (gain < 0 || gain > 90) {
|
if (gain < 0 || gain > 90) {
|
||||||
RTC_LOG(LS_ERROR) << "set_compression_gain_db(" << gain << ") failed.";
|
RTC_LOG(LS_ERROR) << "set_compression_gain_db(" << gain << ") failed.";
|
||||||
@ -387,10 +372,6 @@ int GainControlImpl::enable_limiter(bool enable) {
|
|||||||
return Configure();
|
return Configure();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GainControlImpl::is_limiter_enabled() const {
|
|
||||||
return limiter_enabled_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void GainControlImpl::Initialize(size_t num_proc_channels, int sample_rate_hz) {
|
void GainControlImpl::Initialize(size_t num_proc_channels, int sample_rate_hz) {
|
||||||
data_dumper_->InitiateNewSetOfRecordings();
|
data_dumper_->InitiateNewSetOfRecordings();
|
||||||
|
|
||||||
@ -401,13 +382,18 @@ void GainControlImpl::Initialize(size_t num_proc_channels, int sample_rate_hz) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gain_controllers_.resize(*num_proc_channels_);
|
mono_agcs_.resize(*num_proc_channels_);
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
capture_levels_.resize(*num_proc_channels_);
|
||||||
if (!gain_controller) {
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
gain_controller.reset(new GainController());
|
if (!mono_agcs_[ch]) {
|
||||||
|
mono_agcs_[ch].reset(new MonoAgcState());
|
||||||
}
|
}
|
||||||
gain_controller->Initialize(minimum_capture_level_, maximum_capture_level_,
|
|
||||||
mode_, *sample_rate_hz_, analog_capture_level_);
|
int error = WebRtcAgc_Init(mono_agcs_[ch]->state, minimum_capture_level_,
|
||||||
|
maximum_capture_level_, MapSetting(mode_),
|
||||||
|
*sample_rate_hz_);
|
||||||
|
RTC_DCHECK_EQ(error, 0);
|
||||||
|
capture_levels_[ch] = analog_capture_level_;
|
||||||
}
|
}
|
||||||
|
|
||||||
Configure();
|
Configure();
|
||||||
@ -424,11 +410,10 @@ int GainControlImpl::Configure() {
|
|||||||
config.limiterEnable = limiter_enabled_;
|
config.limiterEnable = limiter_enabled_;
|
||||||
|
|
||||||
int error = AudioProcessing::kNoError;
|
int error = AudioProcessing::kNoError;
|
||||||
for (auto& gain_controller : gain_controllers_) {
|
for (size_t ch = 0; ch < mono_agcs_.size(); ++ch) {
|
||||||
const int handle_error =
|
int error_ch = WebRtcAgc_set_config(mono_agcs_[ch]->state, config);
|
||||||
WebRtcAgc_set_config(gain_controller->state(), config);
|
if (error_ch != AudioProcessing::kNoError) {
|
||||||
if (handle_error != AudioProcessing::kNoError) {
|
error = error_ch;
|
||||||
error = handle_error;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return error;
|
return error;
|
||||||
|
|||||||
@ -20,7 +20,6 @@
|
|||||||
#include "absl/types/optional.h"
|
#include "absl/types/optional.h"
|
||||||
#include "api/array_view.h"
|
#include "api/array_view.h"
|
||||||
#include "modules/audio_processing/agc/gain_control.h"
|
#include "modules/audio_processing/agc/gain_control.h"
|
||||||
#include "rtc_base/constructor_magic.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
@ -45,13 +44,13 @@ class GainControlImpl : public GainControl {
|
|||||||
std::vector<int16_t>* packed_buffer);
|
std::vector<int16_t>* packed_buffer);
|
||||||
|
|
||||||
// GainControl implementation.
|
// GainControl implementation.
|
||||||
bool is_enabled() const override;
|
bool is_enabled() const override { return enabled_; }
|
||||||
int stream_analog_level() const override;
|
int stream_analog_level() const override;
|
||||||
bool is_limiter_enabled() const override;
|
bool is_limiter_enabled() const override { return limiter_enabled_; }
|
||||||
Mode mode() const override;
|
Mode mode() const override { return mode_; }
|
||||||
int Enable(bool enable) override;
|
int Enable(bool enable) override;
|
||||||
int set_mode(Mode mode) override;
|
int set_mode(Mode mode) override;
|
||||||
int compression_gain_db() const override;
|
int compression_gain_db() const override { return compression_gain_db_; }
|
||||||
int set_analog_level_limits(int minimum, int maximum) override;
|
int set_analog_level_limits(int minimum, int maximum) override;
|
||||||
int set_compression_gain_db(int gain) override;
|
int set_compression_gain_db(int gain) override;
|
||||||
int set_target_level_dbfs(int level) override;
|
int set_target_level_dbfs(int level) override;
|
||||||
@ -59,13 +58,13 @@ class GainControlImpl : public GainControl {
|
|||||||
int set_stream_analog_level(int level) override;
|
int set_stream_analog_level(int level) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class GainController;
|
struct MonoAgcState;
|
||||||
|
|
||||||
// GainControl implementation.
|
// GainControl implementation.
|
||||||
int target_level_dbfs() const override;
|
int target_level_dbfs() const override { return target_level_dbfs_; }
|
||||||
int analog_level_minimum() const override;
|
int analog_level_minimum() const override { return minimum_capture_level_; }
|
||||||
int analog_level_maximum() const override;
|
int analog_level_maximum() const override { return maximum_capture_level_; }
|
||||||
bool stream_is_saturated() const override;
|
bool stream_is_saturated() const override { return stream_is_saturated_; }
|
||||||
|
|
||||||
int Configure();
|
int Configure();
|
||||||
|
|
||||||
@ -73,6 +72,7 @@ class GainControlImpl : public GainControl {
|
|||||||
|
|
||||||
bool enabled_ = false;
|
bool enabled_ = false;
|
||||||
|
|
||||||
|
const bool use_legacy_gain_applier_;
|
||||||
Mode mode_;
|
Mode mode_;
|
||||||
int minimum_capture_level_;
|
int minimum_capture_level_;
|
||||||
int maximum_capture_level_;
|
int maximum_capture_level_;
|
||||||
@ -83,7 +83,8 @@ class GainControlImpl : public GainControl {
|
|||||||
bool was_analog_level_set_;
|
bool was_analog_level_set_;
|
||||||
bool stream_is_saturated_;
|
bool stream_is_saturated_;
|
||||||
|
|
||||||
std::vector<std::unique_ptr<GainController>> gain_controllers_;
|
std::vector<std::unique_ptr<MonoAgcState>> mono_agcs_;
|
||||||
|
std::vector<int> capture_levels_;
|
||||||
|
|
||||||
absl::optional<size_t> num_proc_channels_;
|
absl::optional<size_t> num_proc_channels_;
|
||||||
absl::optional<int> sample_rate_hz_;
|
absl::optional<int> sample_rate_hz_;
|
||||||
|
|||||||
@ -133,21 +133,6 @@ void RunBitExactnessTest(int sample_rate_hz,
|
|||||||
// Chromium ARM and ARM64 boths have been identified. This is tracked in the
|
// Chromium ARM and ARM64 boths have been identified. This is tracked in the
|
||||||
// issue https://bugs.chromium.org/p/webrtc/issues/detail?id=5711.
|
// issue https://bugs.chromium.org/p/webrtc/issues/detail?id=5711.
|
||||||
|
|
||||||
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
|
||||||
defined(WEBRTC_ANDROID))
|
|
||||||
TEST(GainControlBitExactnessTest,
|
|
||||||
Mono8kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
|
|
||||||
#else
|
|
||||||
TEST(GainControlBitExactnessTest,
|
|
||||||
DISABLED_Mono8kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
|
|
||||||
#endif
|
|
||||||
const int kStreamAnalogLevelReference = 50;
|
|
||||||
const float kOutputReference[] = {-0.006622f, -0.002747f, 0.001587f};
|
|
||||||
RunBitExactnessTest(8000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
|
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
|
||||||
kOutputReference);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
||||||
defined(WEBRTC_ANDROID))
|
defined(WEBRTC_ANDROID))
|
||||||
TEST(GainControlBitExactnessTest,
|
TEST(GainControlBitExactnessTest,
|
||||||
@ -209,21 +194,6 @@ TEST(GainControlBitExactnessTest,
|
|||||||
kOutputReference);
|
kOutputReference);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
|
||||||
defined(WEBRTC_ANDROID))
|
|
||||||
TEST(GainControlBitExactnessTest,
|
|
||||||
Mono8kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
|
||||||
#else
|
|
||||||
TEST(GainControlBitExactnessTest,
|
|
||||||
DISABLED_Mono8kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
|
||||||
#endif
|
|
||||||
const int kStreamAnalogLevelReference = 50;
|
|
||||||
const float kOutputReference[] = {-0.004028f, -0.001678f, 0.000946f};
|
|
||||||
RunBitExactnessTest(8000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
|
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
|
||||||
kOutputReference);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
||||||
defined(WEBRTC_ANDROID))
|
defined(WEBRTC_ANDROID))
|
||||||
TEST(GainControlBitExactnessTest,
|
TEST(GainControlBitExactnessTest,
|
||||||
@ -264,7 +234,7 @@ TEST(GainControlBitExactnessTest,
|
|||||||
DISABLED_Mono32kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
DISABLED_Mono32kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
||||||
#endif
|
#endif
|
||||||
const int kStreamAnalogLevelReference = 50;
|
const int kStreamAnalogLevelReference = 50;
|
||||||
const float kOutputReference[] = {-0.006104f, -0.005524f, -0.004974f};
|
const float kOutputReference[] = {-0.006134f, -0.005524f, -0.005005f};
|
||||||
RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
|
RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
true, 0, 100, kStreamAnalogLevelReference,
|
||||||
kOutputReference);
|
kOutputReference);
|
||||||
@ -279,27 +249,12 @@ TEST(GainControlBitExactnessTest,
|
|||||||
DISABLED_Mono48kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
DISABLED_Mono48kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
||||||
#endif
|
#endif
|
||||||
const int kStreamAnalogLevelReference = 50;
|
const int kStreamAnalogLevelReference = 50;
|
||||||
const float kOutputReference[] = {-0.006104f, -0.005524f, -0.004974f};
|
const float kOutputReference[] = {-0.006134f, -0.005524f, -0.005005};
|
||||||
RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
|
RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
true, 0, 100, kStreamAnalogLevelReference,
|
||||||
kOutputReference);
|
kOutputReference);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
|
||||||
defined(WEBRTC_ANDROID))
|
|
||||||
TEST(GainControlBitExactnessTest,
|
|
||||||
Mono8kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
|
||||||
#else
|
|
||||||
TEST(GainControlBitExactnessTest,
|
|
||||||
DISABLED_Mono8kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
|
||||||
#endif
|
|
||||||
const int kStreamAnalogLevelReference = 50;
|
|
||||||
const float kOutputReference[] = {-0.011871f, -0.004944f, 0.002838f};
|
|
||||||
RunBitExactnessTest(8000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
|
||||||
kOutputReference);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
|
||||||
defined(WEBRTC_ANDROID))
|
defined(WEBRTC_ANDROID))
|
||||||
TEST(GainControlBitExactnessTest,
|
TEST(GainControlBitExactnessTest,
|
||||||
@ -324,8 +279,8 @@ TEST(GainControlBitExactnessTest,
|
|||||||
DISABLED_Stereo16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
DISABLED_Stereo16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
||||||
#endif
|
#endif
|
||||||
const int kStreamAnalogLevelReference = 50;
|
const int kStreamAnalogLevelReference = 50;
|
||||||
const float kOutputReference[] = {-0.048950f, -0.028503f, -0.050354f,
|
const float kOutputReference[] = {-0.048896f, -0.028479f, -0.050345f,
|
||||||
-0.048950f, -0.028503f, -0.050354f};
|
-0.048896f, -0.028479f, -0.050345f};
|
||||||
RunBitExactnessTest(16000, 2, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
RunBitExactnessTest(16000, 2, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
true, 0, 100, kStreamAnalogLevelReference,
|
||||||
kOutputReference);
|
kOutputReference);
|
||||||
@ -340,7 +295,7 @@ TEST(GainControlBitExactnessTest,
|
|||||||
DISABLED_Mono32kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
DISABLED_Mono32kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
||||||
#endif
|
#endif
|
||||||
const int kStreamAnalogLevelReference = 50;
|
const int kStreamAnalogLevelReference = 50;
|
||||||
const float kOutputReference[] = {-0.018188f, -0.016418f, -0.014862f};
|
const float kOutputReference[] = {-0.018158f, -0.016357f, -0.014832f};
|
||||||
RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
true, 0, 100, kStreamAnalogLevelReference,
|
||||||
kOutputReference);
|
kOutputReference);
|
||||||
@ -355,7 +310,7 @@ TEST(GainControlBitExactnessTest,
|
|||||||
DISABLED_Mono48kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
DISABLED_Mono48kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
|
||||||
#endif
|
#endif
|
||||||
const int kStreamAnalogLevelReference = 50;
|
const int kStreamAnalogLevelReference = 50;
|
||||||
const float kOutputReference[] = {-0.018188f, -0.016418f, -0.014862f};
|
const float kOutputReference[] = {-0.018158f, -0.016357f, -0.014832f};
|
||||||
RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
|
||||||
true, 0, 100, kStreamAnalogLevelReference,
|
true, 0, 100, kStreamAnalogLevelReference,
|
||||||
kOutputReference);
|
kOutputReference);
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
f85386d49e89027aa14f2aad36537a8a4e887a61
|
4010b1fe15eda1b42968cdb3f9fed399e1aa7197
|
||||||
@ -1 +1 @@
|
|||||||
734cc6174a5dac2fd87de267fe8d12519fe18321
|
8d368435bbc80edab08205c6f21db1416e119119
|
||||||
Loading…
x
Reference in New Issue
Block a user