diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc index afb060f46d..9411a9a47e 100644 --- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc +++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc @@ -583,17 +583,16 @@ void AudioConferenceMixerImpl::UpdateToMix( // There are already more active participants than should be // mixed. Only keep the ones with the highest energy. AudioFrameList::iterator replaceItem; - CalculateEnergy(*audioFrame); - uint32_t lowestEnergy = audioFrame->energy_; + uint32_t lowestEnergy = CalculateEnergy(*audioFrame); bool found_replace_item = false; for (AudioFrameList::iterator iter = activeList.begin(); iter != activeList.end(); ++iter) { - CalculateEnergy(**iter); - if((*iter)->energy_ < lowestEnergy) { + const uint32_t energy = CalculateEnergy(**iter); + if(energy < lowestEnergy) { replaceItem = iter; - lowestEnergy = (*iter)->energy_; + lowestEnergy = energy; found_replace_item = true; } } @@ -783,18 +782,6 @@ void AudioConferenceMixerImpl::ClearAudioFrameList( audioFrameList->clear(); } -void AudioConferenceMixerImpl::UpdateVADPositiveParticipants( - AudioFrameList* mixList) const { - WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, - "UpdateVADPositiveParticipants(mixList)"); - - for (AudioFrameList::const_iterator iter = mixList->begin(); - iter != mixList->end(); - ++iter) { - CalculateEnergy(**iter); - } -} - bool AudioConferenceMixerImpl::IsParticipantInList( const MixerParticipant& participant, const MixerParticipantList& participantList) const { diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h index 2466112769..0fab84ba81 100644 --- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h +++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h @@ -116,10 +116,6 @@ private: // Clears audioFrameList and reclaims all memory associated with it. void ClearAudioFrameList(AudioFrameList* audioFrameList) const; - // Update the list of MixerParticipants who have a positive VAD. mixList - // should be a list of AudioFrames - void UpdateVADPositiveParticipants(AudioFrameList* mixList) const; - // This function returns true if it finds the MixerParticipant in the // specified list of MixerParticipants. bool IsParticipantInList(const MixerParticipant& participant, diff --git a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc index 9c5d3b939d..1e679af914 100644 --- a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc +++ b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc @@ -39,16 +39,16 @@ const size_t rampSize = sizeof(rampArray)/sizeof(rampArray[0]); } // namespace namespace webrtc { -void CalculateEnergy(AudioFrame& audioFrame) +uint32_t CalculateEnergy(const AudioFrame& audioFrame) { - audioFrame.energy_ = 0; + uint32_t energy = 0; for(size_t position = 0; position < audioFrame.samples_per_channel_; position++) { // TODO(andrew): this can easily overflow. - audioFrame.energy_ += audioFrame.data_[position] * - audioFrame.data_[position]; + energy += audioFrame.data_[position] * audioFrame.data_[position]; } + return energy; } void RampIn(AudioFrame& audioFrame) diff --git a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h index 2da3a4d0d1..c136597fc3 100644 --- a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h +++ b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h @@ -11,11 +11,13 @@ #ifndef WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_FRAME_MANIPULATOR_H_ #define WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_FRAME_MANIPULATOR_H_ +#include "webrtc/typedefs.h" + namespace webrtc { class AudioFrame; // Updates the audioFrame's energy (based on its samples). -void CalculateEnergy(AudioFrame& audioFrame); +uint32_t CalculateEnergy(const AudioFrame& audioFrame); // Apply linear step function that ramps in/out the audio samples in audioFrame void RampIn(AudioFrame& audioFrame); diff --git a/webrtc/modules/include/module_common_types.h b/webrtc/modules/include/module_common_types.h index 89c5f1b49b..706eea3685 100644 --- a/webrtc/modules/include/module_common_types.h +++ b/webrtc/modules/include/module_common_types.h @@ -508,7 +508,7 @@ class AudioFrame { void UpdateFrame(int id, uint32_t timestamp, const int16_t* data, size_t samples_per_channel, int sample_rate_hz, SpeechType speech_type, VADActivity vad_activity, - size_t num_channels = 1, uint32_t energy = -1); + size_t num_channels = 1); AudioFrame& Append(const AudioFrame& rhs); @@ -535,11 +535,6 @@ class AudioFrame { size_t num_channels_; SpeechType speech_type_; VADActivity vad_activity_; - // Note that there is no guarantee that |energy_| is correct. Any user of this - // member must verify that the value is correct. - // TODO(henrike) Remove |energy_|. - // See https://code.google.com/p/webrtc/issues/detail?id=3315. - uint32_t energy_; bool interleaved_; private: @@ -563,7 +558,6 @@ inline void AudioFrame::Reset() { num_channels_ = 0; speech_type_ = kUndefined; vad_activity_ = kVadUnknown; - energy_ = 0xffffffff; interleaved_ = true; } @@ -574,8 +568,7 @@ inline void AudioFrame::UpdateFrame(int id, int sample_rate_hz, SpeechType speech_type, VADActivity vad_activity, - size_t num_channels, - uint32_t energy) { + size_t num_channels) { id_ = id; timestamp_ = timestamp; samples_per_channel_ = samples_per_channel; @@ -583,7 +576,6 @@ inline void AudioFrame::UpdateFrame(int id, speech_type_ = speech_type; vad_activity_ = vad_activity; num_channels_ = num_channels; - energy_ = energy; const size_t length = samples_per_channel * num_channels; assert(length <= kMaxDataSizeSamples); @@ -606,7 +598,6 @@ inline void AudioFrame::CopyFrom(const AudioFrame& src) { speech_type_ = src.speech_type_; vad_activity_ = src.vad_activity_; num_channels_ = src.num_channels_; - energy_ = src.energy_; interleaved_ = src.interleaved_; const size_t length = samples_per_channel_ * num_channels_; @@ -701,7 +692,6 @@ inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) { data_[i] = ClampToInt16(wrap_guard); } } - energy_ = 0xffffffff; return *this; } @@ -725,7 +715,6 @@ inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) { static_cast(data_[i]) - static_cast(rhs.data_[i]); data_[i] = ClampToInt16(wrap_guard); } - energy_ = 0xffffffff; return *this; }