Removals and renamings in the new audio mixer.

Removed the OutputMixer part of the new mixer and renamed the new
mixer from NewAudioConferenceMixer to AudioMixer.

NOTRY=True

Review-Url: https://codereview.webrtc.org/2249213005
Cr-Commit-Position: refs/heads/master@{#13883}
This commit is contained in:
aleloi 2016-08-24 02:20:54 -07:00 committed by Commit bot
parent 76f91cd08f
commit 5d167d6829
8 changed files with 112 additions and 706 deletions

View File

@ -7,55 +7,22 @@
# be found in the AUTHORS file in the root of the source tree.
config("audio_conference_mixer_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [
"include",
"../../modules/include",
]
include_dirs = [ "../../modules/include" ]
}
source_set("audio_mixer") {
sources = [
"audio_mixer.cc",
"audio_mixer.h",
]
deps = [
":audio_conference_mixer",
"../../voice_engine:voice_engine",
]
if (is_win) {
defines = [ "WEBRTC_DRIFT_COMPENSATION_SUPPORTED" ]
cflags = [
# TODO(kjellander): Bug 261: fix this warning.
"/wd4373", # virtual function override.
]
}
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
if (is_clang) {
# Suppress warnings from Chrome's Clang plugins.
# See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
configs -= [ "//build/config/clang:find_bad_constructs" ]
}
}
source_set("audio_conference_mixer") {
sources = [
"audio_frame_manipulator.cc",
"audio_frame_manipulator.h",
"audio_mixer.h",
"audio_mixer_defines.h",
"new_audio_conference_mixer.h",
"new_audio_conference_mixer_impl.cc",
"new_audio_conference_mixer_impl.h",
"audio_mixer_impl.cc",
"audio_mixer_impl.h",
]
public = [
"audio_mixer_defines.h",
"new_audio_conference_mixer.h",
"audio_mixer.h",
]
configs += [ "../..:common_config" ]

View File

@ -1,413 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_mixer/audio_mixer.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
#include "webrtc/voice_engine/statistics.h"
#include "webrtc/voice_engine/utility.h"
namespace webrtc {
namespace voe {
void AudioMixer::PlayNotification(int32_t id, uint32_t durationMs) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::PlayNotification(id=%d, durationMs=%d)", id,
durationMs);
// Not implement yet
}
void AudioMixer::RecordNotification(int32_t id, uint32_t durationMs) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::RecordNotification(id=%d, durationMs=%d)", id,
durationMs);
// Not implement yet
}
void AudioMixer::PlayFileEnded(int32_t id) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::PlayFileEnded(id=%d)", id);
// not needed
}
void AudioMixer::RecordFileEnded(int32_t id) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::RecordFileEnded(id=%d)", id);
RTC_DCHECK_EQ(id, _instanceId);
rtc::CritScope cs(&_fileCritSect);
_outputFileRecording = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::RecordFileEnded() =>"
"output file recorder module is shutdown");
}
int32_t AudioMixer::Create(AudioMixer*& mixer, uint32_t instanceId) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
"AudioMixer::Create(instanceId=%d)", instanceId);
mixer = new AudioMixer(instanceId);
if (mixer == NULL) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
"AudioMixer::Create() unable to allocate memory for"
"mixer");
return -1;
}
return 0;
}
AudioMixer::AudioMixer(uint32_t instanceId)
: _mixerModule(*NewAudioConferenceMixer::Create(instanceId)),
_audioLevel(),
_instanceId(instanceId),
_externalMediaCallbackPtr(NULL),
_externalMedia(false),
_panLeft(1.0f),
_panRight(1.0f),
_mixingFrequencyHz(8000),
_outputFileRecording(false) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::AudioMixer() - ctor");
}
void AudioMixer::Destroy(AudioMixer*& mixer) {
if (mixer) {
delete mixer;
mixer = NULL;
}
}
AudioMixer::~AudioMixer() {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::~AudioMixer() - dtor");
if (_externalMedia) {
DeRegisterExternalMediaProcessing();
}
{
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr) {
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
_outputFileRecorderPtr->StopRecording();
}
}
delete &_mixerModule;
}
int32_t AudioMixer::SetEngineInformation(voe::Statistics& engineStatistics) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::SetEngineInformation()");
_engineStatisticsPtr = &engineStatistics;
return 0;
}
int32_t AudioMixer::SetAudioProcessingModule(
AudioProcessing* audioProcessingModule) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::SetAudioProcessingModule("
"audioProcessingModule=0x%x)",
audioProcessingModule);
_audioProcessingModulePtr = audioProcessingModule;
return 0;
}
int AudioMixer::RegisterExternalMediaProcessing(
VoEMediaProcess& proccess_object) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::RegisterExternalMediaProcessing()");
rtc::CritScope cs(&_callbackCritSect);
_externalMediaCallbackPtr = &proccess_object;
_externalMedia = true;
return 0;
}
int AudioMixer::DeRegisterExternalMediaProcessing() {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::DeRegisterExternalMediaProcessing()");
rtc::CritScope cs(&_callbackCritSect);
_externalMedia = false;
_externalMediaCallbackPtr = NULL;
return 0;
}
int32_t AudioMixer::SetMixabilityStatus(MixerAudioSource& audio_source,
bool mixable) {
return _mixerModule.SetMixabilityStatus(&audio_source, mixable);
}
int32_t AudioMixer::SetAnonymousMixabilityStatus(MixerAudioSource& audio_source,
bool mixable) {
return _mixerModule.SetAnonymousMixabilityStatus(&audio_source, mixable);
}
int AudioMixer::GetSpeechOutputLevel(uint32_t& level) {
int8_t currentLevel = _audioLevel.Level();
level = static_cast<uint32_t>(currentLevel);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetSpeechOutputLevel() => level=%u", level);
return 0;
}
int AudioMixer::GetSpeechOutputLevelFullRange(uint32_t& level) {
int16_t currentLevel = _audioLevel.LevelFullRange();
level = static_cast<uint32_t>(currentLevel);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetSpeechOutputLevelFullRange() => level=%u", level);
return 0;
}
int AudioMixer::SetOutputVolumePan(float left, float right) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::SetOutputVolumePan()");
_panLeft = left;
_panRight = right;
return 0;
}
int AudioMixer::GetOutputVolumePan(float& left, float& right) {
left = _panLeft;
right = _panRight;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetOutputVolumePan() => left=%2.1f, right=%2.1f", left, right);
return 0;
}
int AudioMixer::StartRecordingPlayout(const char* fileName,
const CodecInst* codecInst) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::StartRecordingPlayout(fileName=%s)", fileName);
if (_outputFileRecording) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
"StartRecordingPlayout() is already recording");
return 0;
}
FileFormats format;
const uint32_t notificationTime(0);
CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000};
if ((codecInst != NULL) &&
((codecInst->channels < 1) || (codecInst->channels > 2))) {
_engineStatisticsPtr->SetLastError(
VE_BAD_ARGUMENT, kTraceError,
"StartRecordingPlayout() invalid compression");
return (-1);
}
if (codecInst == NULL) {
format = kFileFormatPcm16kHzFile;
codecInst = &dummyCodec;
} else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) ||
(STR_CASE_CMP(codecInst->plname, "PCMU") == 0) ||
(STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) {
format = kFileFormatWavFile;
} else {
format = kFileFormatCompressedFile;
}
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr) {
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
}
_outputFileRecorderPtr =
FileRecorder::CreateFileRecorder(_instanceId, (const FileFormats)format);
if (_outputFileRecorderPtr == NULL) {
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartRecordingPlayout() fileRecorder format isnot correct");
return -1;
}
if (_outputFileRecorderPtr->StartRecordingAudioFile(
fileName, (const CodecInst&)*codecInst, notificationTime) != 0) {
_engineStatisticsPtr->SetLastError(
VE_BAD_FILE, kTraceError,
"StartRecordingAudioFile() failed to start file recording");
_outputFileRecorderPtr->StopRecording();
_outputFileRecorderPtr.reset();
return -1;
}
_outputFileRecorderPtr->RegisterModuleFileCallback(this);
_outputFileRecording = true;
return 0;
}
int AudioMixer::StartRecordingPlayout(OutStream* stream,
const CodecInst* codecInst) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::StartRecordingPlayout()");
if (_outputFileRecording) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
"StartRecordingPlayout() is already recording");
return 0;
}
FileFormats format;
const uint32_t notificationTime(0);
CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000};
if (codecInst != NULL && codecInst->channels != 1) {
_engineStatisticsPtr->SetLastError(
VE_BAD_ARGUMENT, kTraceError,
"StartRecordingPlayout() invalid compression");
return (-1);
}
if (codecInst == NULL) {
format = kFileFormatPcm16kHzFile;
codecInst = &dummyCodec;
} else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) ||
(STR_CASE_CMP(codecInst->plname, "PCMU") == 0) ||
(STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) {
format = kFileFormatWavFile;
} else {
format = kFileFormatCompressedFile;
}
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr) {
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
}
_outputFileRecorderPtr =
FileRecorder::CreateFileRecorder(_instanceId, (const FileFormats)format);
if (_outputFileRecorderPtr == NULL) {
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartRecordingPlayout() fileRecorder format isnot correct");
return -1;
}
if (_outputFileRecorderPtr->StartRecordingAudioFile(stream, *codecInst,
notificationTime) != 0) {
_engineStatisticsPtr->SetLastError(
VE_BAD_FILE, kTraceError,
"StartRecordingAudioFile() failed to start file recording");
_outputFileRecorderPtr->StopRecording();
_outputFileRecorderPtr.reset();
return -1;
}
_outputFileRecorderPtr->RegisterModuleFileCallback(this);
_outputFileRecording = true;
return 0;
}
int AudioMixer::StopRecordingPlayout() {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::StopRecordingPlayout()");
if (!_outputFileRecording) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
"StopRecordingPlayout() file isnot recording");
return -1;
}
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr->StopRecording() != 0) {
_engineStatisticsPtr->SetLastError(
VE_STOP_RECORDING_FAILED, kTraceError,
"StopRecording(), could not stop recording");
return -1;
}
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
_outputFileRecorderPtr.reset();
_outputFileRecording = false;
return 0;
}
int AudioMixer::GetMixedAudio(int sample_rate_hz,
size_t num_channels,
AudioFrame* frame) {
WEBRTC_TRACE(
kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%" PRIuS ")",
sample_rate_hz, num_channels);
// --- Record playout if enabled
{
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecording && _outputFileRecorderPtr)
_outputFileRecorderPtr->RecordAudioToFile(_audioFrame);
}
_mixerModule.Mix(sample_rate_hz, num_channels, frame);
return 0;
}
int32_t AudioMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm) {
if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
"AudioMixer::DoOperationsOnCombinedSignal() => "
"mixing frequency = %d",
_audioFrame.sample_rate_hz_);
_mixingFrequencyHz = _audioFrame.sample_rate_hz_;
}
// Scale left and/or right channel(s) if balance is active
if (_panLeft != 1.0 || _panRight != 1.0) {
if (_audioFrame.num_channels_ == 1) {
AudioFrameOperations::MonoToStereo(&_audioFrame);
} else {
// Pure stereo mode (we are receiving a stereo signal).
}
RTC_DCHECK_EQ(_audioFrame.num_channels_, static_cast<size_t>(2));
AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
}
// --- Far-end Voice Quality Enhancement (AudioProcessing Module)
if (feed_data_to_apm) {
if (_audioProcessingModulePtr->ProcessReverseStream(&_audioFrame) != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
"AudioProcessingModule::ProcessReverseStream() => error");
RTC_DCHECK(false);
}
}
// --- External media processing
{
rtc::CritScope cs(&_callbackCritSect);
if (_externalMedia) {
const bool is_stereo = (_audioFrame.num_channels_ == 2);
if (_externalMediaCallbackPtr) {
_externalMediaCallbackPtr->Process(
-1, kPlaybackAllChannelsMixed,
reinterpret_cast<int16_t*>(_audioFrame.data_),
_audioFrame.samples_per_channel_, _audioFrame.sample_rate_hz_,
is_stereo);
}
}
}
// --- Measure audio level (0-9) for the combined signal
_audioLevel.ComputeLevel(_audioFrame);
return 0;
}
} // namespace voe
} // namespace webrtc

View File

@ -9,7 +9,7 @@
{
'targets': [
{
'target_name': 'new_audio_conference_mixer',
'target_name': 'audio_mixer',
'type': 'static_library',
'dependencies': [
'audio_processing',
@ -21,23 +21,10 @@
'sources': [
'audio_frame_manipulator.cc',
'audio_frame_manipulator.h',
'new_audio_conference_mixer.h',
'audio_mixer_defines.h',
'new_audio_conference_mixer_impl.cc',
'new_audio_conference_mixer_impl.h',
],
},
{
'target_name': 'audio_mixer',
'type': 'static_library',
'dependencies': [
'new_audio_conference_mixer',
'webrtc_utility',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
'audio_mixer.h',
'audio_mixer.cc',
'audio_mixer_defines.h',
'audio_mixer_impl.cc',
'audio_mixer_impl.h',
],
},
], # targets

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -13,108 +13,64 @@
#include <memory>
#include "webrtc/base/criticalsection.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_mixer/new_audio_conference_mixer.h"
#include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
#include "webrtc/modules/utility/include/file_recorder.h"
#include "webrtc/voice_engine/level_indicator.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
#include "webrtc/modules/include/module.h"
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
class MixerAudioSource;
class AudioProcessing;
class FileWrapper;
class VoEMediaProcess;
namespace voe {
class Statistics;
// Note: this class is in the process of being rewritten and merged
// with AudioConferenceMixer. Expect inheritance chains to be changed,
// member functions removed or renamed.
class AudioMixer : public FileCallback {
class AudioMixer {
public:
static int32_t Create(AudioMixer*& mixer, uint32_t instanceId); // NOLINT
static const int kMaximumAmountOfMixedAudioSources = 3;
enum Frequency {
kNbInHz = 8000,
kWbInHz = 16000,
kSwbInHz = 32000,
kFbInHz = 48000,
kLowestPossible = -1,
kDefaultFrequency = kWbInHz
};
static void Destroy(AudioMixer*& mixer); // NOLINT
// Factory method. Constructor disabled.
static std::unique_ptr<AudioMixer> Create(int id);
virtual ~AudioMixer() {}
int32_t SetEngineInformation(Statistics& engineStatistics); // NOLINT
// Add/remove audio sources as candidates for mixing.
virtual int32_t SetMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) = 0;
// Returns true if an audio source is a candidate for mixing.
virtual bool MixabilityStatus(const MixerAudioSource& audio_source) const = 0;
int32_t SetAudioProcessingModule(AudioProcessing* audioProcessingModule);
// Inform the mixer that the audio source should always be mixed and not
// count toward the number of mixed audio sources. Note that an audio source
// must have been added to the mixer (by calling SetMixabilityStatus())
// before this function can be successfully called.
virtual int32_t SetAnonymousMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) = 0;
// VoEExternalMedia
int RegisterExternalMediaProcessing(VoEMediaProcess& // NOLINT
proccess_object);
// Performs mixing by asking registered audio sources for audio. The
// mixed result is placed in the provided AudioFrame. Can only be
// called from a single thread. The rate and channels arguments
// specify the rate and number of channels of the mix result.
virtual void Mix(int sample_rate,
size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) = 0;
int DeRegisterExternalMediaProcessing();
// Returns true if the audio source is mixed anonymously.
virtual bool AnonymousMixabilityStatus(
const MixerAudioSource& audio_source) const = 0;
int32_t DoOperationsOnCombinedSignal(bool feed_data_to_apm);
// Output level functions for VoEVolumeControl. Return value
// between 0 and 9 is returned by voe::AudioLevel.
virtual int GetOutputAudioLevel() = 0;
int32_t SetMixabilityStatus(MixerAudioSource& audio_source, // NOLINT
bool mixable);
// Return value between 0 and 0x7fff is returned by voe::AudioLevel.
virtual int GetOutputAudioLevelFullRange() = 0;
int32_t SetAnonymousMixabilityStatus(
MixerAudioSource& audio_source, // NOLINT
bool mixable);
int GetMixedAudio(int sample_rate_hz,
size_t num_channels,
AudioFrame* audioFrame);
// VoEVolumeControl
int GetSpeechOutputLevel(uint32_t& level); // NOLINT
int GetSpeechOutputLevelFullRange(uint32_t& level); // NOLINT
int SetOutputVolumePan(float left, float right);
int GetOutputVolumePan(float& left, float& right); // NOLINT
// VoEFile
int StartRecordingPlayout(const char* fileName, const CodecInst* codecInst);
int StartRecordingPlayout(OutStream* stream, const CodecInst* codecInst);
int StopRecordingPlayout();
virtual ~AudioMixer();
// For file recording
void PlayNotification(int32_t id, uint32_t durationMs);
void RecordNotification(int32_t id, uint32_t durationMs);
void PlayFileEnded(int32_t id);
void RecordFileEnded(int32_t id);
private:
explicit AudioMixer(uint32_t instanceId);
// uses
Statistics* _engineStatisticsPtr;
AudioProcessing* _audioProcessingModulePtr;
rtc::CriticalSection _callbackCritSect;
// protect the _outputFileRecorderPtr and _outputFileRecording
rtc::CriticalSection _fileCritSect;
NewAudioConferenceMixer& _mixerModule;
AudioFrame _audioFrame;
// Converts mixed audio to the audio processing rate.
PushResampler<int16_t> audioproc_resampler_;
AudioLevel _audioLevel; // measures audio level for the combined signal
int _instanceId;
VoEMediaProcess* _externalMediaCallbackPtr;
bool _externalMedia;
float _panLeft;
float _panRight;
int _mixingFrequencyHz;
std::unique_ptr<FileRecorder> _outputFileRecorderPtr;
bool _outputFileRecording;
protected:
AudioMixer() {}
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_H_

View File

@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_mixer/new_audio_conference_mixer_impl.h"
#include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
#include <algorithm>
#include <functional>
@ -119,16 +119,16 @@ void NewMixHistory::ResetMixedStatus() {
is_mixed_ = false;
}
NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) {
NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id);
std::unique_ptr<AudioMixer> AudioMixer::Create(int id) {
AudioMixerImpl* mixer = new AudioMixerImpl(id);
if (!mixer->Init()) {
delete mixer;
return NULL;
}
return mixer;
return std::unique_ptr<AudioMixer>(mixer);
}
NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
AudioMixerImpl::AudioMixerImpl(int id)
: id_(id),
output_frequency_(kDefaultFrequency),
sample_size_(0),
@ -140,9 +140,9 @@ NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
thread_checker_.DetachFromThread();
}
NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {}
AudioMixerImpl::~AudioMixerImpl() {}
bool NewAudioConferenceMixerImpl::Init() {
bool AudioMixerImpl::Init() {
crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
if (crit_.get() == NULL)
return false;
@ -183,9 +183,9 @@ bool NewAudioConferenceMixerImpl::Init() {
return true;
}
void NewAudioConferenceMixerImpl::Mix(int sample_rate,
size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) {
void AudioMixerImpl::Mix(int sample_rate,
size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) {
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
RTC_DCHECK(thread_checker_.CalledOnValidThread());
AudioFrameList mixList;
@ -260,26 +260,23 @@ void NewAudioConferenceMixerImpl::Mix(int sample_rate,
return;
}
int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
const Frequency& frequency) {
int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) {
CriticalSectionScoped cs(crit_.get());
output_frequency_ = frequency;
sample_size_ =
static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000);
static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000);
return 0;
}
NewAudioConferenceMixer::Frequency
NewAudioConferenceMixerImpl::OutputFrequency() const {
AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
CriticalSectionScoped cs(crit_.get());
return output_frequency_;
}
int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
MixerAudioSource* audio_source,
bool mixable) {
int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) {
if (!mixable) {
// Anonymous audio sources are in a separate list. Make sure that the
// audio source is in the _audioSourceList if it is being mixed.
@ -323,13 +320,13 @@ int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
return 0;
}
bool NewAudioConferenceMixerImpl::MixabilityStatus(
bool AudioMixerImpl::MixabilityStatus(
const MixerAudioSource& audio_source) const {
CriticalSectionScoped cs(cb_crit_.get());
return IsAudioSourceInList(audio_source, audio_source_list_);
}
int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
MixerAudioSource* audio_source,
bool anonymous) {
CriticalSectionScoped cs(cb_crit_.get());
@ -364,14 +361,13 @@ int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
: -1;
}
bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus(
bool AudioMixerImpl::AnonymousMixabilityStatus(
const MixerAudioSource& audio_source) const {
CriticalSectionScoped cs(cb_crit_.get());
return IsAudioSourceInList(audio_source, additional_audio_source_list_);
}
AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix(
size_t maxAudioFrameCounter) const {
AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const {
AudioFrameList result;
std::vector<SourceFrame> audioSourceMixingDataList;
@ -428,7 +424,7 @@ AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix(
return result;
}
void NewAudioConferenceMixerImpl::GetAdditionalAudio(
void AudioMixerImpl::GetAdditionalAudio(
AudioFrameList* additionalFramesList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"GetAdditionalAudio(additionalFramesList)");
@ -462,7 +458,7 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
}
}
bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
bool AudioMixerImpl::IsAudioSourceInList(
const MixerAudioSource& audio_source,
const MixerAudioSourceList& audioSourceList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
@ -471,7 +467,7 @@ bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
&audio_source) != audioSourceList.end();
}
bool NewAudioConferenceMixerImpl::AddAudioSourceToList(
bool AudioMixerImpl::AddAudioSourceToList(
MixerAudioSource* audio_source,
MixerAudioSourceList* audioSourceList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
@ -482,7 +478,7 @@ bool NewAudioConferenceMixerImpl::AddAudioSourceToList(
return true;
}
bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList(
bool AudioMixerImpl::RemoveAudioSourceFromList(
MixerAudioSource* audio_source,
MixerAudioSourceList* audioSourceList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
@ -499,11 +495,10 @@ bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList(
}
}
int32_t NewAudioConferenceMixerImpl::MixFromList(
AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList,
int32_t id,
bool use_limiter) {
int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList,
int32_t id,
bool use_limiter) {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
"MixFromList(mixedAudio, audioFrameList)");
if (audioFrameList.empty())
@ -535,7 +530,7 @@ int32_t NewAudioConferenceMixerImpl::MixFromList(
}
// TODO(andrew): consolidate this function with MixFromList.
int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList(
int32_t AudioMixerImpl::MixAnonomouslyFromList(
AudioFrame* mixedAudio,
const AudioFrameList& audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
@ -553,8 +548,7 @@ int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList(
return 0;
}
bool NewAudioConferenceMixerImpl::LimitMixedAudio(
AudioFrame* mixedAudio) const {
bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
if (!use_limiter_) {
return true;
}
@ -583,14 +577,14 @@ bool NewAudioConferenceMixerImpl::LimitMixedAudio(
return true;
}
int NewAudioConferenceMixerImpl::GetOutputAudioLevel() {
int AudioMixerImpl::GetOutputAudioLevel() {
const int level = audio_level_.Level();
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
"GetAudioOutputLevel() => level=%d", level);
return level;
}
int NewAudioConferenceMixerImpl::GetOutputAudioLevelFullRange() {
int AudioMixerImpl::GetOutputAudioLevelFullRange() {
const int level = audio_level_.LevelFullRange();
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
"GetAudioOutputLevelFullRange() => level=%d", level);

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_MIXER_NEW_AUDIO_CONFERENCE_MIXER_IMPL_H_
#define WEBRTC_MODULES_AUDIO_MIXER_NEW_AUDIO_CONFERENCE_MIXER_IMPL_H_
#ifndef WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
#define WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
#include <list>
#include <map>
@ -18,7 +18,7 @@
#include "webrtc/base/thread_checker.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_mixer/new_audio_conference_mixer.h"
#include "webrtc/modules/audio_mixer/audio_mixer.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/voice_engine/level_indicator.h"
@ -57,19 +57,19 @@ class NewMixHistory {
bool is_mixed_;
};
class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
class AudioMixerImpl : public AudioMixer {
public:
// AudioProcessing only accepts 10 ms frames.
enum { kProcessPeriodicityInMs = 10 };
static const int kFrameDurationInMs = 10;
explicit NewAudioConferenceMixerImpl(int id);
explicit AudioMixerImpl(int id);
~NewAudioConferenceMixerImpl() override;
~AudioMixerImpl() override;
// Must be called after ctor.
bool Init();
// NewAudioConferenceMixer functions
// AudioMixer functions
int32_t SetMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) override;
bool MixabilityStatus(const MixerAudioSource& audio_source) const override;
@ -164,4 +164,4 @@ class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_MIXER_NEW_AUDIO_CONFERENCE_MIXER_IMPL_H_
#endif // WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_

View File

@ -1,74 +0,0 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_MIXER_NEW_AUDIO_CONFERENCE_MIXER_H_
#define WEBRTC_MODULES_AUDIO_MIXER_NEW_AUDIO_CONFERENCE_MIXER_H_
#include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
#include "webrtc/modules/include/module.h"
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
class MixerAudioSource;
class NewAudioConferenceMixer {
public:
enum { kMaximumAmountOfMixedAudioSources = 3 };
enum Frequency {
kNbInHz = 8000,
kWbInHz = 16000,
kSwbInHz = 32000,
kFbInHz = 48000,
kLowestPossible = -1,
kDefaultFrequency = kWbInHz
};
// Factory method. Constructor disabled.
static NewAudioConferenceMixer* Create(int id);
virtual ~NewAudioConferenceMixer() {}
// Add/remove audio sources as candidates for mixing.
virtual int32_t SetMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) = 0;
// Returns true if an audio source is a candidate for mixing.
virtual bool MixabilityStatus(const MixerAudioSource& audio_source) const = 0;
// Inform the mixer that the audio source should always be mixed and not
// count toward the number of mixed audio sources. Note that an audio source
// must have been added to the mixer (by calling SetMixabilityStatus())
// before this function can be successfully called.
virtual int32_t SetAnonymousMixabilityStatus(MixerAudioSource* audio_source,
bool mixable) = 0;
// Performs mixing by asking registered audio sources for audio. The
// mixed result is placed in the provided AudioFrame. Can only be
// called from a single thread. The rate and channels arguments
// specify the rate and number of channels of the mix result.
virtual void Mix(int sample_rate,
size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) = 0;
// Returns true if the audio source is mixed anonymously.
virtual bool AnonymousMixabilityStatus(
const MixerAudioSource& audio_source) const = 0;
// Output level functions for VoEVolumeControl. Return value
// between 0 and 9 is returned by voe::AudioLevel.
virtual int GetOutputAudioLevel() = 0;
// Return value between 0 and 0x7fff is returned by voe::AudioLevel.
virtual int GetOutputAudioLevelFullRange() = 0;
protected:
NewAudioConferenceMixer() {}
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_MIXER_NEW_AUDIO_CONFERENCE_MIXER_H_

View File

@ -13,7 +13,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
#include "webrtc/modules/audio_mixer/new_audio_conference_mixer.h"
#include "webrtc/modules/audio_mixer/audio_mixer.h"
using testing::_;
using testing::Exactly;
@ -85,8 +85,7 @@ void MixAndCompare(
RTC_DCHECK(frames.size() == frame_info.size());
RTC_DCHECK(frame_info.size() == expected_status.size());
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
std::vector<MockMixerAudioSource> participants(num_audio_sources);
for (int i = 0; i < num_audio_sources; i++) {
@ -112,13 +111,10 @@ void MixAndCompare(
TEST(AudioMixer, AnonymousAndNamed) {
// Should not matter even if partipants are more than
// kMaximumAmountOfMixedAudioSources.
constexpr int kNamed =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
constexpr int kAnonymous =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
constexpr int kNamed = AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
constexpr int kAnonymous = AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource named[kNamed];
MockMixerAudioSource anonymous[kAnonymous];
@ -165,10 +161,9 @@ TEST(AudioMixer, AnonymousAndNamed) {
TEST(AudioMixer, LargestEnergyVadActiveMixed) {
constexpr int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 3;
AudioMixer::kMaximumAmountOfMixedAudioSources + 3;
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participants[kAudioSources];
@ -197,8 +192,7 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) {
for (int i = 0; i < kAudioSources; ++i) {
bool is_mixed = participants[i].IsMixed();
if (i == kAudioSources - 1 ||
i < kAudioSources - 1 -
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) {
i < kAudioSources - 1 - AudioMixer::kMaximumAmountOfMixedAudioSources) {
EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i
<< " wrong.";
} else {
@ -209,8 +203,7 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) {
}
TEST(AudioMixer, ParticipantSampleRate) {
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
@ -225,8 +218,7 @@ TEST(AudioMixer, ParticipantSampleRate) {
}
TEST(AudioMixer, ParticipantNumberOfChannels) {
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
@ -243,8 +235,7 @@ TEST(AudioMixer, ParticipantNumberOfChannels) {
// Test that the volume is reported as zero when the mixer input
// comprises only zero values.
TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) {
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
@ -263,8 +254,7 @@ TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) {
// Test that the reported volume is maximal when the mixer
// input comprises frames with maximal values.
TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) {
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participant;
ResetFrame(participant.fake_frame());
@ -296,10 +286,9 @@ TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) {
// another participant with higher energy is added.
TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
constexpr int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
std::unique_ptr<NewAudioConferenceMixer> mixer(
NewAudioConferenceMixer::Create(kId));
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId));
MockMixerAudioSource participants[kAudioSources];
for (int i = 0; i < kAudioSources; i++) {
@ -350,7 +339,7 @@ TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
TEST(AudioMixer, MutedShouldMixAfterUnmuted) {
constexpr int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
std::vector<AudioFrame> frames(kAudioSources);
for (auto& frame : frames) {
@ -368,7 +357,7 @@ TEST(AudioMixer, MutedShouldMixAfterUnmuted) {
TEST(AudioMixer, PassiveShouldMixAfterNormal) {
constexpr int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
std::vector<AudioFrame> frames(kAudioSources);
for (auto& frame : frames) {
@ -386,7 +375,7 @@ TEST(AudioMixer, PassiveShouldMixAfterNormal) {
TEST(AudioMixer, ActiveShouldMixBeforeLoud) {
constexpr int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
std::vector<AudioFrame> frames(kAudioSources);
for (auto& frame : frames) {
@ -406,7 +395,7 @@ TEST(AudioMixer, ActiveShouldMixBeforeLoud) {
TEST(AudioMixer, UnmutedShouldMixBeforeLoud) {
constexpr int kAudioSources =
NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1;
AudioMixer::kMaximumAmountOfMixedAudioSources + 1;
std::vector<AudioFrame> frames(kAudioSources);
for (auto& frame : frames) {