Revert of Rewrote UpdateToMix in the audio mixer. (patchset #5 id:120001 of https://codereview.webrtc.org/2132563002/ )

Reason for revert:
Multiple definitions of  webrtc::MockMixerParticipant::MockMixerParticipant() during linking of modules_unittests. Please investigate and resubmit.

Original issue's description:
> Rewrote UpdateToMix in the audio mixer.
>
> The new version is much shorter than the old one, and hopefully easier
> to read. This is part of the effort to rewrite the old mixer.
>
> Committed: https://crrev.com/2942e240f4a985752714dac18c141064c97696d4
> Cr-Commit-Position: refs/heads/master@{#13568}

TBR=ossu@webrtc.org,ivoc@webrtc.org,aleloi@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review-Url: https://codereview.webrtc.org/2195633002
Cr-Commit-Position: refs/heads/master@{#13569}
This commit is contained in:
terelius 2016-07-29 01:36:14 -07:00 committed by Commit bot
parent 2942e240f4
commit ea4c141ffa
4 changed files with 239 additions and 294 deletions

View File

@ -29,7 +29,7 @@ class MixerAudioSource {
kError // audio_frame will not be used.
};
struct AudioFrameWithMuted {
struct AudioFrameWithInfo {
AudioFrame* audio_frame;
AudioFrameInfo audio_frame_info;
};
@ -40,8 +40,8 @@ class MixerAudioSource {
// different calls. The pointer must stay valid until the next
// mixing call or until this audio source is disconnected from the
// mixer.
virtual AudioFrameWithMuted GetAudioFrameWithMuted(int32_t id,
int sample_rate_hz) = 0;
virtual AudioFrameWithInfo GetAudioFrameWithMuted(int32_t id,
int sample_rate_hz) = 0;
// Returns true if the participant was mixed this mix iteration.
bool IsMixed() const;

View File

@ -11,7 +11,6 @@
#include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
#include <algorithm>
#include <functional>
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
@ -23,44 +22,16 @@
namespace webrtc {
namespace {
class SourceFrame {
public:
SourceFrame(MixerAudioSource* p,
AudioFrame* a,
bool m,
bool was_mixed_before)
: audio_source_(p),
audio_frame_(a),
muted_(m),
was_mixed_before_(was_mixed_before) {
if (!muted_) {
energy_ = CalculateEnergy(*a);
}
}
// a.shouldMixBefore(b) is used to select mixer participants.
bool shouldMixBefore(const SourceFrame& other) const {
if (muted_ != other.muted_) {
return other.muted_;
}
auto our_activity = audio_frame_->vad_activity_;
auto other_activity = other.audio_frame_->vad_activity_;
if (our_activity != other_activity) {
return our_activity == AudioFrame::kVadActive;
}
return energy_ > other.energy_;
}
MixerAudioSource* audio_source_;
AudioFrame* audio_frame_;
bool muted_;
uint32_t energy_;
bool was_mixed_before_;
struct AudioSourceWithFrame {
AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m)
: audio_source(p), audio_frame(a), muted(m) {}
MixerAudioSource* audio_source;
AudioFrame* audio_frame;
bool muted;
};
typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList;
// Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
// These effects are applied to |frame| itself prior to mixing. Assumes that
// |mixed_frame| always has at least as many channels as |frame|. Supports
@ -196,6 +167,7 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
RTC_DCHECK(thread_checker_.CalledOnValidThread());
AudioFrameList mixList;
AudioFrameList rampOutList;
AudioFrameList additionalFramesList;
std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
{
@ -242,17 +214,20 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
}
}
mixList = UpdateToMix(remainingAudioSourcesAllowedToMix);
remainingAudioSourcesAllowedToMix -= mixList.size();
UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap,
&remainingAudioSourcesAllowedToMix);
GetAdditionalAudio(&additionalFramesList);
UpdateMixedStatus(mixedAudioSourcesMap);
}
// TODO(aleloi): it might be better to decide the number of channels
// with an API instead of dynamically.
// Find the max channels over all mixing lists.
const size_t num_mixed_channels =
std::max(MaxNumChannels(&mixList), MaxNumChannels(&additionalFramesList));
const size_t num_mixed_channels = std::max(
MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList),
MaxNumChannels(&rampOutList)));
audio_frame_for_mixing->UpdateFrame(
-1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech,
@ -270,6 +245,7 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
{
CriticalSectionScoped cs(_crit.get());
MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList);
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
@ -280,6 +256,10 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
LimitMixedAudio(audio_frame_for_mixing);
}
}
ClearAudioFrameList(&mixList);
ClearAudioFrameList(&rampOutList);
ClearAudioFrameList(&additionalFramesList);
return;
}
@ -446,62 +426,177 @@ int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
return highestFreq;
}
AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix(
size_t maxAudioFrameCounter) const {
AudioFrameList result;
std::vector<SourceFrame> audioSourceMixingDataList;
void NewAudioConferenceMixerImpl::UpdateToMix(
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerAudioSource*>* mixAudioSourceList,
size_t* maxAudioFrameCounter) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)",
*maxAudioFrameCounter);
const size_t mixListStartSize = mixList->size();
AudioFrameList activeList;
// Struct needed by the passive lists to keep track of which AudioFrame
// belongs to which MixerAudioSource.
AudioSourceWithFrameList passiveWasNotMixedList;
AudioSourceWithFrameList passiveWasMixedList;
for (MixerAudioSourceList::const_iterator audio_source =
audio_source_list_.begin();
audio_source != audio_source_list_.end(); ++audio_source) {
// Stop keeping track of passive audioSources if there are already
// enough audio sources available (they wont be mixed anyway).
bool mustAddToPassiveList =
(*maxAudioFrameCounter >
(activeList.size() + passiveWasMixedList.size() +
passiveWasNotMixedList.size()));
// Get audio source audio and put it in the struct vector.
for (MixerAudioSource* audio_source : audio_source_list_) {
auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
_id, static_cast<int>(_outputFrequency));
bool wasMixed = false;
wasMixed = (*audio_source)->_mixHistory->WasMixed();
auto audio_frame_info = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
auto audio_frame_with_info =
(*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
auto ret = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
if (ret == MixerAudioSource::AudioFrameInfo::kError) {
continue;
}
const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
if (audio_source_list_.size() != 1) {
// TODO(wu): Issue 3390, add support for multiple audio sources case.
audio_frame->ntp_time_ms_ = -1;
}
if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
// TODO(aleloi): this assert triggers in some test cases where SRTP is
// used which prevents NetEQ from making a VAD. Temporarily disable this
// assert until the problem is fixed on a higher level.
// RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown);
if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrameWithMuted() from participant");
continue;
}
audioSourceMixingDataList.emplace_back(
audio_source, audio_source_audio_frame,
audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
audio_source->_mixHistory->WasMixed());
}
// Sort frames by sorting function.
std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(),
std::mem_fn(&SourceFrame::shouldMixBefore));
// Go through list in order and put things in mixList.
for (SourceFrame& p : audioSourceMixingDataList) {
// Filter muted.
if (p.muted_) {
p.audio_source_->_mixHistory->SetIsMixed(false);
continue;
"invalid VAD state from audio source");
}
// Add frame to result vector for mixing.
bool is_mixed = false;
if (maxAudioFrameCounter > 0) {
--maxAudioFrameCounter;
if (!p.was_mixed_before_) {
RampIn(*p.audio_frame_);
if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
if (!wasMixed && !muted) {
RampIn(*audio_frame);
}
result.emplace_back(p.audio_frame_, false);
is_mixed = true;
}
// Ramp out unmuted.
if (p.was_mixed_before_ && !is_mixed) {
RampOut(*p.audio_frame_);
result.emplace_back(p.audio_frame_, false);
}
if (activeList.size() >= *maxAudioFrameCounter) {
// There are already more active audio sources than should be
// mixed. Only keep the ones with the highest energy.
AudioFrameList::iterator replaceItem;
uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
bool found_replace_item = false;
for (AudioFrameList::iterator iter = activeList.begin();
iter != activeList.end(); ++iter) {
const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
if (energy < lowestEnergy) {
replaceItem = iter;
lowestEnergy = energy;
found_replace_item = true;
}
}
if (found_replace_item) {
RTC_DCHECK(!muted); // Cannot replace with a muted frame.
FrameAndMuteInfo replaceFrame = *replaceItem;
bool replaceWasMixed = false;
std::map<int, MixerAudioSource*>::const_iterator it =
mixAudioSourceList->find(replaceFrame.frame->id_);
// When a frame is pushed to |activeList| it is also pushed
// to mixAudioSourceList with the frame's id. This means
// that the Find call above should never fail.
RTC_DCHECK(it != mixAudioSourceList->end());
replaceWasMixed = it->second->_mixHistory->WasMixed();
mixAudioSourceList->erase(replaceFrame.frame->id_);
activeList.erase(replaceItem);
activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
(*mixAudioSourceList)[audio_frame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
if (replaceWasMixed) {
if (!replaceFrame.muted) {
RampOut(*replaceFrame.frame);
}
rampOutList->push_back(replaceFrame);
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
} else {
if (wasMixed) {
if (!muted) {
RampOut(*audio_frame);
}
rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
RTC_DCHECK_LE(
rampOutList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
}
} else {
activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
(*mixAudioSourceList)[audio_frame->id_] = *audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
} else {
if (wasMixed) {
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audio_frame, muted);
passiveWasMixedList.push_back(part_struct);
} else if (mustAddToPassiveList) {
if (!muted) {
RampIn(*audio_frame);
}
AudioSourceWithFrame* part_struct =
new AudioSourceWithFrame(*audio_source, audio_frame, muted);
passiveWasNotMixedList.push_back(part_struct);
}
}
}
return result;
RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter);
// At this point it is known which audio sources should be mixed. Transfer
// this information to this functions output parameters.
for (AudioFrameList::const_iterator iter = activeList.begin();
iter != activeList.end(); ++iter) {
mixList->push_back(*iter);
}
activeList.clear();
// Always mix a constant number of AudioFrames. If there aren't enough
// active audio sources mix passive ones. Starting with those that was mixed
// last iteration.
for (AudioSourceWithFrameList::const_iterator iter =
passiveWasMixedList.begin();
iter != passiveWasMixedList.end(); ++iter) {
if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back(
FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
delete *iter;
}
// And finally the ones that have not been mixed for a while.
for (AudioSourceWithFrameList::const_iterator iter =
passiveWasNotMixedList.begin();
iter != passiveWasNotMixedList.end(); ++iter) {
if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back(
FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
(*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
RTC_DCHECK_LE(mixAudioSourceList->size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
}
delete *iter;
}
RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size());
*maxAudioFrameCounter += mixListStartSize - mixList->size();
}
void NewAudioConferenceMixerImpl::GetAdditionalAudio(
@ -538,6 +633,38 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
}
}
void NewAudioConferenceMixerImpl::UpdateMixedStatus(
const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateMixedStatus(mixedAudioSourcesMap)");
RTC_DCHECK_LE(mixedAudioSourcesMap.size(),
static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
// Loop through all audio_sources. If they are in the mix map they
// were mixed.
for (MixerAudioSourceList::const_iterator audio_source =
audio_source_list_.begin();
audio_source != audio_source_list_.end(); ++audio_source) {
bool isMixed = false;
for (std::map<int, MixerAudioSource*>::const_iterator it =
mixedAudioSourcesMap.begin();
it != mixedAudioSourcesMap.end(); ++it) {
if (it->second == *audio_source) {
isMixed = true;
break;
}
}
(*audio_source)->_mixHistory->SetIsMixed(isMixed);
}
}
void NewAudioConferenceMixerImpl::ClearAudioFrameList(
AudioFrameList* audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"ClearAudioFrameList(audioFrameList)");
audioFrameList->clear();
}
bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
const MixerAudioSource& audio_source,
const MixerAudioSourceList& audioSourceList) const {

View File

@ -84,10 +84,17 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
int32_t SetOutputFrequency(const Frequency& frequency);
Frequency OutputFrequency() const;
// Compute what audio sources to mix from audio_source_list_. Ramp in
// and out. Update mixed status. maxAudioFrameCounter specifies how
// many participants are allowed to be mixed.
AudioFrameList UpdateToMix(size_t maxAudioFrameCounter) const;
// Fills mixList with the AudioFrames pointers that should be used when
// mixing.
// maxAudioFrameCounter both input and output specifies how many more
// AudioFrames that are allowed to be mixed.
// rampOutList contain AudioFrames corresponding to an audio stream that
// used to be mixed but shouldn't be mixed any longer. These AudioFrames
// should be ramped out over this AudioFrame to avoid audio discontinuities.
void UpdateToMix(AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerAudioSource*>* mixAudioSourceList,
size_t* maxAudioFrameCounter) const;
// Return the lowest mixing frequency that can be used without having to
// downsample any audio.
@ -98,6 +105,11 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
// Return the AudioFrames that should be mixed anonymously.
void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
// Update the NewMixHistory of all MixerAudioSources. mixedAudioSourcesList
// should contain a map of MixerAudioSources that have been mixed.
void UpdateMixedStatus(
const std::map<int, MixerAudioSource*>& mixedAudioSourcesList) const;
// Clears audioFrameList and reclaims all memory associated with it.
void ClearAudioFrameList(AudioFrameList* audioFrameList) const;

View File

@ -9,12 +9,9 @@
*/
#include <memory>
#include <utility>
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_mixer/audio_mixer.h"
#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
@ -29,154 +26,30 @@ using testing::Return;
using webrtc::voe::AudioMixer;
namespace webrtc {
class MockMixerParticipant : public MixerParticipant {
public:
MockMixerParticipant()
: fake_audio_frame_info_(MixerParticipant::AudioFrameInfo::kNormal) {
ON_CALL(*this, GetAudioFrameWithMuted(_, _))
.WillByDefault(
Invoke(this, &MockMixerParticipant::FakeAudioFrameWithMuted));
}
MOCK_METHOD2(GetAudioFrameWithMuted,
AudioFrameInfo(const int32_t id, AudioFrame* audio_frame));
MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
AudioFrame* fake_frame() { return &fake_frame_; }
AudioFrameInfo fake_info() { return this->fake_audio_frame_info_; }
void set_fake_info(const AudioFrameInfo audio_frame_info) {
fake_audio_frame_info_ = audio_frame_info;
}
private:
AudioFrame fake_frame_;
AudioFrameInfo fake_audio_frame_info_;
AudioFrameInfo FakeAudioFrameWithMuted(const int32_t id,
AudioFrame* audio_frame) {
audio_frame->CopyFrom(*fake_frame());
return fake_info();
}
};
class MockMixerAudioSource : public MixerAudioSource {
public:
MockMixerAudioSource()
: fake_audio_frame_info_(MixerAudioSource::AudioFrameInfo::kNormal) {
MockMixerAudioSource() {
ON_CALL(*this, GetAudioFrameWithMuted(_, _))
.WillByDefault(
Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted));
}
MOCK_METHOD2(GetAudioFrameWithMuted,
AudioFrameWithMuted(const int32_t id, int sample_rate_hz));
AudioFrameWithInfo(const int32_t id, int sample_rate_hz));
MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
AudioFrame* fake_frame() { return &fake_frame_; }
AudioFrameInfo fake_info() { return fake_audio_frame_info_; }
void set_fake_info(const AudioFrameInfo audio_frame_info) {
fake_audio_frame_info_ = audio_frame_info;
}
private:
AudioFrame fake_frame_;
AudioFrameInfo fake_audio_frame_info_;
AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id,
int sample_rate_hz) {
AudioFrameWithInfo FakeAudioFrameWithMuted(const int32_t id,
int sample_rate_hz) {
return {
fake_frame(), // audio_frame_pointer
fake_info(), // audio_frame_info
fake_frame(), // audio_frame_pointer
AudioFrameInfo::kNormal, // audio_frame_info
};
}
};
// Keeps two identical sets of participants and two mixers to test
// that the same participants are chosen for mixing.
class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver {
protected:
constexpr static int kId = 1;
constexpr static int kSampleRateHz = 32000;
CompareWithOldMixerTest()
: old_mixer_(AudioConferenceMixer::Create(kId)),
new_mixer_(NewAudioConferenceMixer::Create(kId)) {}
~CompareWithOldMixerTest() { Reset(); }
// Mixes with both mixers and compares results: resulting frames and
// mix statuses.
void MixAndCompare() {
old_mixer_->Process();
new_mixer_->Mix(&new_mixer_frame_);
EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_,
sizeof(old_mixer_frame_.data_)));
for (auto& participant_pair : participants_) {
EXPECT_EQ(participant_pair.first->IsMixed(),
participant_pair.second->IsMixed());
}
}
std::unique_ptr<AudioFrame> last_mixed_audio_old() {
std::unique_ptr<AudioFrame> result(new AudioFrame);
result->CopyFrom(old_mixer_frame_);
return result;
}
void Reset() {
old_mixer_.reset(AudioConferenceMixer::Create(kId));
new_mixer_.reset(NewAudioConferenceMixer::Create(kId));
for (auto& participant_pair : participants_) {
delete participant_pair.first;
delete participant_pair.second;
}
participants_.clear();
}
void ResetFrame(AudioFrame* audio_frame) {
audio_frame->sample_rate_hz_ = kSampleRateHz;
audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
audio_frame->vad_activity_ = AudioFrame::kVadActive;
audio_frame->num_channels_ = 1;
}
void AddParticipant(AudioFrame* audio_frame,
MixerParticipant::AudioFrameInfo audio_frame_info) {
auto old_participant = new MockMixerParticipant;
auto new_participant = new MockMixerAudioSource;
old_participant->fake_frame()->CopyFrom(*audio_frame);
new_participant->fake_frame()->CopyFrom(*audio_frame);
old_participant->set_fake_info(audio_frame_info);
MixerAudioSource::AudioFrameInfo new_audio_frame_info;
switch (audio_frame_info) {
case MixerParticipant::AudioFrameInfo::kNormal:
new_audio_frame_info = MixerAudioSource::AudioFrameInfo::kNormal;
break;
case MixerParticipant::AudioFrameInfo::kMuted:
new_audio_frame_info = MixerAudioSource::AudioFrameInfo::kMuted;
break;
default:
new_audio_frame_info = MixerAudioSource::AudioFrameInfo::kError;
}
new_participant->set_fake_info(new_audio_frame_info);
participants_.emplace_back(old_participant, new_participant);
}
void NewMixedAudio(const int32_t id,
const AudioFrame& generalAudioFrame,
const AudioFrame** uniqueAudioFrames,
const uint32_t size) override {
old_mixer_frame_.CopyFrom(generalAudioFrame);
}
AudioFrame old_mixer_frame_;
AudioFrame new_mixer_frame_;
std::vector<std::pair<MockMixerParticipant*, MockMixerAudioSource*>>
participants_;
std::unique_ptr<AudioConferenceMixer> old_mixer_;
std::unique_ptr<NewAudioConferenceMixer> new_mixer_;
};
class BothMixersTest : public testing::Test {
protected:
BothMixersTest() {
@ -373,71 +246,4 @@ TEST_F(BothMixersTest, CompareSecondFrameAudio) {
sizeof(mixing_round_frame.data_)));
}
TEST_F(CompareWithOldMixerTest, TwoParticipantsNormalFrames) {
Reset();
AudioFrame first_frame, second_frame;
ResetFrame(&first_frame);
ResetFrame(&second_frame);
first_frame.id_ = 1;
second_frame.id_ = 2;
AddParticipant(&first_frame, MixerParticipant::AudioFrameInfo::kNormal);
AddParticipant(&second_frame, MixerParticipant::AudioFrameInfo::kNormal);
for (int i = 0; i < 3; ++i) {
MixAndCompare();
}
}
TEST_F(CompareWithOldMixerTest, ThreeParticipantsDifferentFrames) {
Reset();
AudioFrame first_frame, second_frame, third_frame;
ResetFrame(&first_frame);
ResetFrame(&second_frame);
ResetFrame(&third_frame);
first_frame.id_ = 1;
second_frame.id_ = 2;
third_frame.id_ = 3;
second_frame.vad_activity_ = AudioFrame::kVadPassive;
AddParticipant(&first_frame, MixerParticipant::AudioFrameInfo::kNormal);
AddParticipant(&second_frame, MixerParticipant::AudioFrameInfo::kMuted);
AddParticipant(&third_frame, MixerParticipant::AudioFrameInfo::kMuted);
for (int i = 0; i < 3; ++i) {
MixAndCompare();
}
}
TEST_F(CompareWithOldMixerTest, ManyParticipantsDifferentFrames) {
Reset();
constexpr int num_participants = 20;
AudioFrame audio_frames[num_participants];
for (int i = 0; i < num_participants; ++i) {
ResetFrame(&audio_frames[i]);
audio_frames[i].id_ = 1;
audio_frames[i].data_[10] = 100 * (i % 5);
audio_frames[i].data_[100] = 100 * (i % 5);
if (i % 2 == 0) {
audio_frames[i].vad_activity_ = AudioFrame::kVadPassive;
}
}
for (int i = 0; i < num_participants; ++i) {
if (i % 2 == 0) {
AddParticipant(&audio_frames[i],
MixerParticipant::AudioFrameInfo::kMuted);
} else {
AddParticipant(&audio_frames[i],
MixerParticipant::AudioFrameInfo::kNormal);
}
MixAndCompare();
}
}
} // namespace webrtc