diff --git a/webrtc/common_audio/resampler/sinc_resampler.cc b/webrtc/common_audio/resampler/sinc_resampler.cc index 373ef4c76c..81c789d00b 100644 --- a/webrtc/common_audio/resampler/sinc_resampler.cc +++ b/webrtc/common_audio/resampler/sinc_resampler.cc @@ -97,6 +97,7 @@ #include "webrtc/typedefs.h" namespace webrtc { + namespace { double SincScaleFactor(double io_ratio) { diff --git a/webrtc/common_audio/vad/vad_core_unittest.cc b/webrtc/common_audio/vad/vad_core_unittest.cc index 77db3d8ed2..00f5841477 100644 --- a/webrtc/common_audio/vad/vad_core_unittest.cc +++ b/webrtc/common_audio/vad/vad_core_unittest.cc @@ -83,7 +83,7 @@ TEST_F(VadTest, CalcVad) { // Construct a speech signal that will trigger the VAD in all modes. It is // known that (i * i) will wrap around, but that doesn't matter in this case. for (int16_t i = 0; i < kMaxFrameLength; ++i) { - speech[i] = (i * i); + speech[i] = static_cast(i * i); } for (size_t j = 0; j < kFrameLengthsSize; ++j) { if (ValidRatesAndFrameLengths(8000, kFrameLengths[j])) { diff --git a/webrtc/common_audio/vad/vad_filterbank_unittest.cc b/webrtc/common_audio/vad/vad_filterbank_unittest.cc index d274c4b131..4232cbcff4 100644 --- a/webrtc/common_audio/vad/vad_filterbank_unittest.cc +++ b/webrtc/common_audio/vad/vad_filterbank_unittest.cc @@ -39,7 +39,7 @@ TEST_F(VadTest, vad_filterbank) { // known that (i * i) will wrap around, but that doesn't matter in this case. int16_t speech[kMaxFrameLength]; for (int16_t i = 0; i < kMaxFrameLength; ++i) { - speech[i] = (i * i); + speech[i] = static_cast(i * i); } int frame_length_index = 0; diff --git a/webrtc/common_audio/vad/vad_sp_unittest.cc b/webrtc/common_audio/vad/vad_sp_unittest.cc index d893138ad3..9127bf3299 100644 --- a/webrtc/common_audio/vad/vad_sp_unittest.cc +++ b/webrtc/common_audio/vad/vad_sp_unittest.cc @@ -41,7 +41,7 @@ TEST_F(VadTest, vad_sp) { // Construct a speech signal that will trigger the VAD in all modes. It is // known that (i * i) will wrap around, but that doesn't matter in this case. for (int16_t i = 0; i < kMaxFrameLenSp; ++i) { - data_in[i] = (i * i); + data_in[i] = static_cast(i * i); } // Input values all zeros, expect all zeros out. WebRtcVad_Downsampling(zeros, data_out, state, kMaxFrameLenSp); diff --git a/webrtc/common_audio/vad/vad_unittest.cc b/webrtc/common_audio/vad/vad_unittest.cc index 2c3e803cb6..48eddbf935 100644 --- a/webrtc/common_audio/vad/vad_unittest.cc +++ b/webrtc/common_audio/vad/vad_unittest.cc @@ -66,7 +66,7 @@ TEST_F(VadTest, ApiTest) { // known that (i * i) will wrap around, but that doesn't matter in this case. int16_t speech[kMaxFrameLength]; for (int16_t i = 0; i < kMaxFrameLength; i++) { - speech[i] = (i * i); + speech[i] = static_cast(i * i); } // nullptr instance tests diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h index 68dd7dac50..34ff8aa000 100644 --- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h +++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h @@ -28,7 +28,7 @@ void WebRtcIlbcfix_CbMemEnergy( int16_t *energyW16, /* (o) Energy in the CB vectors */ int16_t *energyShifts, /* (o) Shift value of the energy */ int scale, /* (i) The scaling of all energy values */ - int16_t base_size /* (i) Index to where the energy values should be stored */ + int16_t base_size /* (i) Index to where energy values should be stored */ ); #endif diff --git a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c index ecdd68a57f..b313b58b8d 100644 --- a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c +++ b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c @@ -70,7 +70,8 @@ void WebRtcIlbcfix_DoThePlc( /* Maximum 60 samples are correlated, preserve as high accuracy as possible without getting overflow */ - max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual, (int16_t)iLBCdec_inst->blockl); + max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual, + (int16_t)iLBCdec_inst->blockl); scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25; if (scale3 < 0) { scale3 = 0; diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c index f0ae07e132..d0c59d6878 100644 --- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c +++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c @@ -27,9 +27,9 @@ -int WebRtcIsacfix_DecodeImpl(int16_t *signal_out16, - IsacFixDecoderInstance *ISACdec_obj, - int16_t *current_framesamples) +int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16, + IsacFixDecoderInstance* ISACdec_obj, + int16_t* current_framesamples) { int k; int err; diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c index bdb807e10b..43a9e52127 100644 --- a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c +++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c @@ -777,7 +777,7 @@ int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst, ISACFIX_SubStruct *ISAC_inst; /* number of samples (480 or 960), output from decoder */ /* that were actually used in the encoder/decoder (determined on the fly) */ - int16_t number_of_samples; + int16_t number_of_samples; int declen = 0; /* typecast pointer to real structure */ @@ -807,8 +807,8 @@ int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst, /* added for NetEq purposes (VAD/DTX related) */ *speechType=1; - declen = WebRtcIsacfix_DecodeImpl(decoded,&ISAC_inst->ISACdec_obj, &number_of_samples); - + declen = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj, + &number_of_samples); if (declen < 0) { /* Some error inside the decoder */ ISAC_inst->errorcode = -(int16_t)declen; @@ -818,14 +818,18 @@ int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst, /* error check */ - if (declen & 0x0001) { - if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) & 0x00FF) ) { + if (declen & 1) { + if (len != declen && + len != declen + + ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) { ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH; memset(decoded, 0, sizeof(int16_t) * number_of_samples); return -1; } } else { - if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) >> 8) ) { + if (len != declen && + len != declen + + ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) >> 8)) { ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH; memset(decoded, 0, sizeof(int16_t) * number_of_samples); return -1; @@ -870,7 +874,7 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst, ISACFIX_SubStruct *ISAC_inst; /* twice the number of samples (480 or 960), output from decoder */ /* that were actually used in the encoder/decoder (determined on the fly) */ - int16_t number_of_samples; + int16_t number_of_samples; int declen = 0; int16_t dummy[FRAMESAMPLES/2]; @@ -901,8 +905,8 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst, /* added for NetEq purposes (VAD/DTX related) */ *speechType=1; - declen = WebRtcIsacfix_DecodeImpl(decoded,&ISAC_inst->ISACdec_obj, &number_of_samples); - + declen = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj, + &number_of_samples); if (declen < 0) { /* Some error inside the decoder */ ISAC_inst->errorcode = -(int16_t)declen; @@ -912,14 +916,18 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst, /* error check */ - if (declen & 0x0001) { - if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) & 0x00FF) ) { + if (declen & 1) { + if (len != declen && + len != declen + + ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) { ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH; memset(decoded, 0, sizeof(int16_t) * number_of_samples); return -1; } } else { - if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) >> 8) ) { + if (len != declen && + len != declen + + ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >>1]) >> 8)) { ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH; memset(decoded, 0, sizeof(int16_t) * number_of_samples); return -1; @@ -1319,7 +1327,8 @@ int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded, read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream); /* decode frame length, needed to get to the rateIndex in the bitstream */ - err = WebRtcIsacfix_DecodeFrameLen(&streamdata, rateIndex); + int16_t frameLength; + err = WebRtcIsacfix_DecodeFrameLen(&streamdata, &frameLength); if (err<0) // error check return err; diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c index 7fcb9e3b7b..13858d790c 100644 --- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c +++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c @@ -218,7 +218,7 @@ void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef, int16_t lo_hi, int16_t *lat_outQ0) { - int ii,n,k,i,u; + int ii, n, k, i, u; int16_t sthQ15[MAX_AR_MODEL_ORDER]; int16_t cthQ15[MAX_AR_MODEL_ORDER]; int32_t tmp32; diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S index 4a0d99f3b1..945d6ee3a8 100644 --- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S +++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S @@ -38,7 +38,7 @@ DEFINE_FUNCTION WebRtcIsacfix_FilterArLoop mov r4, #HALF_SUBFRAMELEN sub r4, #1 @ Outer loop counter = HALF_SUBFRAMELEN - 1 -HALF_SUBFRAME_LOOP: @ for(n = 0; n < HALF_SUBFRAMELEN - 1; n++) +HALF_SUBFRAME_LOOP: @ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) ldr r9, [sp, #32] @ Restore the inner loop counter to order_coef ldrh r5, [r1] @ tmpAR = ar_f_Q0[n+1] @@ -46,7 +46,7 @@ HALF_SUBFRAME_LOOP: @ for(n = 0; n < HALF_SUBFRAMELEN - 1; n++) add r2, r9, asl #1 @ Restore r2 to &cth_Q15[order_coef] add r3, r9, asl #1 @ Restore r3 to &sth_Q15[order_coef] -ORDER_COEF_LOOP: @ for(k = order_coef ; k > 0; k--) +ORDER_COEF_LOOP: @ for (k = order_coef; k > 0; k--) ldrh r7, [r3, #-2]! @ sth_Q15[k - 1] ldrh r6, [r2, #-2]! @ cth_Q15[k - 1] diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc index ab7c640eed..2628f1f432 100644 --- a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc +++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc @@ -624,8 +624,8 @@ int main(int argc, char* argv[]) errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst); printf("\nError in encoder: %d.\n", errtype); } else { - if (fwrite(streamdata, sizeof(char), - stream_len, outbits) != (size_t)stream_len) { + if (fwrite(streamdata, sizeof(char), stream_len, outbits) != + (size_t)stream_len) { return -1; } } @@ -731,12 +731,12 @@ int main(int argc, char* argv[]) /* iSAC decoding */ if( lostFrame && framecnt > 0) { if (nbTest !=2) { - declen = WebRtcIsacfix_DecodePlc(ISAC_main_inst, - decoded, prevFrameSize ); + declen = + WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize); } else { #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED - declen = WebRtcIsacfix_DecodePlcNb(ISAC_main_inst, decoded, - prevFrameSize ); + declen = WebRtcIsacfix_DecodePlcNb( + ISAC_main_inst, decoded, prevFrameSize); #else declen = -1; #endif @@ -755,7 +755,7 @@ int main(int argc, char* argv[]) decoded, speechType); /* Error check */ - if (err<0 || declen<0 || FL!=declen) { + if (err < 0 || declen < 0 || FL != declen) { errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst); printf("\nError in decode_B/or getFrameLen: %d.\n", errtype); } diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c index 6dbdb7eff8..71bd272f9c 100644 --- a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c +++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c @@ -556,12 +556,13 @@ int main(int argc, char* argv[]) { else declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1); } else { - if (nbTest != 2) + if (nbTest != 2) { declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata, stream_len, decoded, speechType); - else + } else { declen = WebRtcIsacfix_DecodeNb(ISACFIX_main_inst, streamdata, stream_len, decoded, speechType); + } } if (declen <= 0) { /* exit if returned with error */ @@ -582,7 +583,7 @@ int main(int argc, char* argv[]) { totalsmpls += declen; totalbits += 8 * stream_len; - kbps = ((double)FS) / ((double)cur_framesmpls) * 8.0 * stream_len / 1000.0; + kbps = (double)FS / (double)cur_framesmpls * 8.0 * stream_len / 1000.0; fy = fopen("bit_rate.dat", "a"); fprintf(fy, "Frame %i = %0.14f\n", framecnt, kbps); fclose(fy); diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c index 7a51a1e292..ac211e9a2c 100644 --- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c +++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c @@ -750,7 +750,8 @@ int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst, streamLenUB + garbageLen, &crc); #ifndef WEBRTC_ARCH_BIG_ENDIAN for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) { - encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] = crc >> (24 - k * 8); + encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] = + (uint8_t)(crc >> (24 - k * 8)); } #else memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc, LEN_CHECK_SUM_WORD8); diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc index 4eeeed078f..8584c76990 100644 --- a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc +++ b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc @@ -662,8 +662,9 @@ int main(int argc, char* argv[]) { if (stream_len < 0) { /* exit if returned with error */ errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst); - printf("\n\nError in encoder: %d.\n\n", errtype); + fprintf(stderr, "Error in encoder: %d.\n", errtype); cout << flush; + exit(0); } cur_framesmpls += samplesIn10Ms; /* exit encoder loop if the encoder returned a bitstream */ diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c index e0d0f412c8..214dccd021 100644 --- a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c +++ b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c @@ -350,6 +350,11 @@ int main(int argc, char* argv[]) { } rcuStreamLen = WebRtcIsac_GetRedPayload(ISAC_main_inst, payloadRCU); + if (rcuStreamLen < 0) { + fprintf(stderr, "\nError getting RED payload\n"); + getc(stdin); + exit(EXIT_FAILURE); + } get_arrival_time(cur_framesmpls, stream_len, bottleneck, &packetData, sampFreqKHz * 1000, sampFreqKHz * 1000); diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc index 88e084fe1d..9bf1ae38ce 100644 --- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc +++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc @@ -115,7 +115,7 @@ size_t AudioEncoderOpus::MaxEncodedBytes() const { // Calculate the number of bytes we expect the encoder to produce, // then multiply by two to give a wide margin for error. size_t bytes_per_millisecond = - static_cast(bitrate_bps_ / (1000 * 8) + 1); + static_cast(bitrate_bps_ / (1000 * 8) + 1); size_t approx_encoded_bytes = num_10ms_frames_per_packet_ * 10 * bytes_per_millisecond; return 2 * approx_encoded_bytes; diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.cc b/webrtc/modules/audio_coding/main/test/PCMFile.cc index 4f46098a7b..4b08f753cd 100644 --- a/webrtc/modules/audio_coding/main/test/PCMFile.cc +++ b/webrtc/modules/audio_coding/main/test/PCMFile.cc @@ -150,8 +150,7 @@ void PCMFile::Write10MsData(AudioFrame& audio_frame) { } } else { int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_]; - int k; - for (k = 0; k < audio_frame.samples_per_channel_; k++) { + for (int k = 0; k < audio_frame.samples_per_channel_; k++) { stereo_audio[k << 1] = audio_frame.data_[k]; stereo_audio[(k << 1) + 1] = audio_frame.data_[k]; } diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc index 99ff95a2ec..6a9b953f23 100644 --- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc +++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc @@ -256,16 +256,14 @@ int AudioDecoderG722Stereo::DecodeInternal(const uint8_t* encoded, static_cast(encoded_len / 2), &decoded[decoded_len], &temp_type); if (ret == decoded_len) { - decoded_len += ret; + ret += decoded_len; // Return total number of samples. // Interleave output. - for (int k = decoded_len / 2; k < decoded_len; k++) { + for (int k = ret / 2; k < ret; k++) { int16_t temp = decoded[k]; - memmove(&decoded[2 * k - decoded_len + 2], - &decoded[2 * k - decoded_len + 1], - (decoded_len - k - 1) * sizeof(int16_t)); - decoded[2 * k - decoded_len + 1] = temp; + memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1], + (ret - k - 1) * sizeof(int16_t)); + decoded[2 * k - ret + 1] = temp; } - ret = decoded_len; // Return total number of samples. } } *speech_type = ConvertSpeechType(temp_type); diff --git a/webrtc/modules/audio_coding/neteq/dsp_helper.cc b/webrtc/modules/audio_coding/neteq/dsp_helper.cc index 289e66d17c..3e5c61d87b 100644 --- a/webrtc/modules/audio_coding/neteq/dsp_helper.cc +++ b/webrtc/modules/audio_coding/neteq/dsp_helper.cc @@ -117,7 +117,7 @@ void DspHelper::PeakDetection(int16_t* data, int data_length, peak_index[i] = WebRtcSpl_MaxIndexW16(data, data_length - 1); if (i != num_peaks - 1) { - min_index = std::max(0, peak_index[i] - 2); + min_index = (peak_index[i] > 2) ? (peak_index[i] - 2) : 0; max_index = std::min(data_length - 1, peak_index[i] + 2); } @@ -238,7 +238,7 @@ void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult, int DspHelper::MinDistortion(const int16_t* signal, int min_lag, int max_lag, int length, int32_t* distortion_value) { - int best_index = -1; + int best_index = 0; int32_t min_distortion = WEBRTC_SPL_WORD32_MAX; for (int i = min_lag; i <= max_lag; i++) { int32_t sum_diff = 0; diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc index 10f6a9f5bf..ae12e50461 100644 --- a/webrtc/modules/audio_coding/neteq/expand.cc +++ b/webrtc/modules/audio_coding/neteq/expand.cc @@ -441,8 +441,8 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { &audio_history[signal_length - correlation_length - start_index - correlation_lags], correlation_length + start_index + correlation_lags - 1); - correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max)) - + (31 - WebRtcSpl_NormW32(correlation_length))) - 31; + correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) + + (31 - WebRtcSpl_NormW32(correlation_length)) - 31; correlation_scale = std::max(0, correlation_scale); // Calculate the correlation, store in |correlation_vector2|. diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer.cc b/webrtc/modules/audio_coding/neteq/packet_buffer.cc index 5792b227ef..07fa339fd3 100644 --- a/webrtc/modules/audio_coding/neteq/packet_buffer.cc +++ b/webrtc/modules/audio_coding/neteq/packet_buffer.cc @@ -255,7 +255,7 @@ int PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database, continue; } int duration = - decoder->PacketDuration(packet->payload, packet->payload_length); + decoder->PacketDuration(packet->payload, packet->payload_length); if (duration >= 0) { last_duration = duration; // Save the most up-to-date (valid) duration. } diff --git a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc index 1aacb401b7..7e778b86e0 100644 --- a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc +++ b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc @@ -454,7 +454,7 @@ int main(int argc, char* argv[]) { printf("Packet size %d must be positive", packet_size); return -1; } - printf("Packet size: %i\n", packet_size); + printf("Packet size: %d\n", packet_size); // check for stereo if (argv[4][strlen(argv[4]) - 1] == '*') { @@ -1572,29 +1572,31 @@ int NetEQTest_encode(int coder, if (useVAD) { *vad = 0; + int sampleRate_10 = 10 * sampleRate / 1000; + int sampleRate_20 = 20 * sampleRate / 1000; + int sampleRate_30 = 30 * sampleRate / 1000; for (int k = 0; k < numChannels; k++) { tempLen = frameLen; tempdata = &indata[k * frameLen]; int localVad = 0; /* Partition the signal and test each chunk for VAD. All chunks must be VAD=0 to produce a total VAD=0. */ - while (tempLen >= 10 * sampleRate / 1000) { - if ((tempLen % 30 * sampleRate / 1000) == - 0) { // tempLen is multiple of 30ms + while (tempLen >= sampleRate_10) { + if ((tempLen % sampleRate_30) == 0) { // tempLen is multiple of 30ms localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata, - 30 * sampleRate / 1000); - tempdata += 30 * sampleRate / 1000; - tempLen -= 30 * sampleRate / 1000; - } else if (tempLen >= 20 * sampleRate / 1000) { // tempLen >= 20ms + sampleRate_30); + tempdata += sampleRate_30; + tempLen -= sampleRate_30; + } else if (tempLen >= sampleRate_20) { // tempLen >= 20ms localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata, - 20 * sampleRate / 1000); - tempdata += 20 * sampleRate / 1000; - tempLen -= 20 * sampleRate / 1000; + sampleRate_20); + tempdata += sampleRate_20; + tempLen -= sampleRate_20; } else { // use 10ms localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata, - 10 * sampleRate / 1000); - tempdata += 10 * sampleRate / 1000; - tempLen -= 10 * sampleRate / 1000; + sampleRate_10); + tempdata += sampleRate_10; + tempLen -= sampleRate_10; } } diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc index b21fd6e200..0aef6f9161 100644 --- a/webrtc/modules/audio_device/android/audio_device_unittest.cc +++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc @@ -350,7 +350,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface { } int IndexToMilliseconds(double index) const { - return 10.0 * (index / frames_per_buffer_) + 0.5; + return static_cast(10.0 * (index / frames_per_buffer_) + 0.5); } private: diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc index 66ad6259a3..6925b61762 100644 --- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc +++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc @@ -120,7 +120,7 @@ complex ConjugateDotProduct(const ComplexMatrix& lhs, // Works for positive numbers only. int Round(float x) { - return std::floor(x + 0.5f); + return static_cast(std::floor(x + 0.5f)); } // Calculates the sum of absolute values of a complex matrix. @@ -464,9 +464,9 @@ void NonlinearBeamformer::ApplyMaskFrequencySmoothing() { final_mask_[i] = kMaskFrequencySmoothAlpha * final_mask_[i] + (1 - kMaskFrequencySmoothAlpha) * final_mask_[i - 1]; } - for (int i = high_mean_end_bin_; i >= 0; --i) { - final_mask_[i] = kMaskFrequencySmoothAlpha * final_mask_[i] + - (1 - kMaskFrequencySmoothAlpha) * final_mask_[i + 1]; + for (int i = high_mean_end_bin_ + 1; i > 0; --i) { + final_mask_[i - 1] = kMaskFrequencySmoothAlpha * final_mask_[i - 1] + + (1 - kMaskFrequencySmoothAlpha) * final_mask_[i]; } } diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc index e4a8139c4c..dbb7e638b2 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc @@ -31,7 +31,6 @@ namespace webrtc { namespace { -const int kErbResolution = 2; const int kWindowSizeMs = 2; const int kChunkSizeMs = 10; // Size provided by APM. const float kClipFreq = 200.0f; @@ -132,7 +131,7 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, float freqs_khz = kClipFreq / 1000.0f; int erb_index = static_cast(ceilf( 11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); - start_freq_ = max(1, erb_index * kErbResolution); + start_freq_ = std::max(1, erb_index * erb_resolution); WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, kbd_window_.get()); diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h index df47de5978..7f18be8c6e 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h @@ -73,11 +73,11 @@ class IntelligibilityEnhancer { // All in frequency domain, receives input |in_block|, applies // intelligibility enhancement, and writes result to |out_block|. - virtual void ProcessAudioBlock(const std::complex* const* in_block, - int in_channels, - int frames, - int out_channels, - std::complex* const* out_block); + void ProcessAudioBlock(const std::complex* const* in_block, + int in_channels, + int frames, + int out_channels, + std::complex* const* out_block) override; private: IntelligibilityEnhancer* parent_; diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc index d67d200689..824b1676d8 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc @@ -137,9 +137,9 @@ void VarianceArray::InfiniteStep(const complex* data, bool skip_fudge) { .real(); variance_[i] = conj_sum_[i] / (count_ - 1); // + fudge[fudge_index].real(); - if (skip_fudge && false) { - // variance_[i] -= fudge[fudge_index].real(); - } + // if (skip_fudge) { + // variance_[i] -= fudge[fudge_index].real(); + // } } array_mean_ += (variance_[i] - array_mean_) / (i + 1); } diff --git a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc index 2f1888d28c..cef41e964a 100644 --- a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc +++ b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc @@ -33,9 +33,11 @@ using std::complex; namespace webrtc { +namespace { -using webrtc::RealFourier; -using webrtc::IntelligibilityEnhancer; +bool ValidateClearWindow(const char* flagname, int32_t value) { + return value > 0; +} DEFINE_int32(clear_type, webrtc::intelligibility::VarianceArray::kStepInfinite, @@ -44,6 +46,8 @@ DEFINE_double(clear_alpha, 0.9, "Variance decay factor for clear data."); DEFINE_int32(clear_window, 475, "Window size for windowed variance for clear data."); +const bool clear_window_dummy = + google::RegisterFlagValidator(&FLAGS_clear_window, &ValidateClearWindow); DEFINE_int32(sample_rate, 16000, "Audio sample rate used in the input and output files."); @@ -137,6 +141,7 @@ void void_main(int argc, char* argv[]) { } } +} // namespace } // namespace webrtc int main(int argc, char* argv[]) {