Remove the [Un]RegisterVoiceProcessor() API.

BUG=webrtc:4690
R=pthatcher@webrtc.org

Review URL: https://codereview.webrtc.org/1361633002 .

Cr-Commit-Position: refs/heads/master@{#10027}
This commit is contained in:
Fredrik Solenberg 2015-09-23 12:23:21 +02:00
parent 09677342ae
commit 7d173362d0
16 changed files with 11 additions and 567 deletions

View File

@ -480,7 +480,6 @@
'media/base/videoframefactory.cc',
'media/base/videoframefactory.h',
'media/base/videorenderer.h',
'media/base/voiceprocessor.h',
'media/base/yuvframegenerator.cc',
'media/base/yuvframegenerator.h',
'media/devices/deviceinfo.h',

View File

@ -53,7 +53,6 @@
'sources': [
'media/base/fakecapturemanager.h',
'media/base/fakemediaengine.h',
'media/base/fakemediaprocessor.h',
'media/base/fakenetworkinterface.h',
'media/base/fakertp.h',
'media/base/fakevideocapturer.h',

View File

@ -27,7 +27,6 @@
#include "talk/media/base/capturemanager.h"
#include "talk/media/base/fakemediaprocessor.h"
#include "talk/media/base/fakevideocapturer.h"
#include "talk/media/base/fakevideorenderer.h"
#include "webrtc/base/gunit.h"
@ -77,7 +76,6 @@ class CaptureManagerTest : public ::testing::Test, public sigslot::has_slots<> {
}
protected:
cricket::FakeMediaProcessor media_processor_;
cricket::FakeVideoCapturer video_capturer_;
cricket::FakeVideoRenderer video_renderer_;

View File

@ -771,9 +771,7 @@ class FakeVoiceEngine : public FakeBaseEngine {
public:
FakeVoiceEngine()
: output_volume_(-1),
delay_offset_(0),
rx_processor_(NULL),
tx_processor_(NULL) {
delay_offset_(0) {
// Add a fake audio codec. Note that the name must not be "" as there are
// sanity checks against that.
codecs_.push_back(AudioCodec(101, "fake_audio_codec", 0, 0, 1, 0));
@ -838,32 +836,6 @@ class FakeVoiceEngine : public FakeBaseEngine {
bool StartAecDump(rtc::PlatformFile file) { return false; }
bool RegisterProcessor(uint32 ssrc, VoiceProcessor* voice_processor,
MediaProcessorDirection direction) {
if (direction == MPD_RX) {
rx_processor_ = voice_processor;
return true;
} else if (direction == MPD_TX) {
tx_processor_ = voice_processor;
return true;
}
return false;
}
bool UnregisterProcessor(uint32 ssrc, VoiceProcessor* voice_processor,
MediaProcessorDirection direction) {
bool unregistered = false;
if (direction & MPD_RX) {
rx_processor_ = NULL;
unregistered = true;
}
if (direction & MPD_TX) {
tx_processor_ = NULL;
unregistered = true;
}
return unregistered;
}
private:
std::vector<FakeVoiceMediaChannel*> channels_;
std::vector<AudioCodec> codecs_;
@ -871,8 +843,6 @@ class FakeVoiceEngine : public FakeBaseEngine {
int delay_offset_;
std::string in_device_;
std::string out_device_;
VoiceProcessor* rx_processor_;
VoiceProcessor* tx_processor_;
AudioOptions options_;
friend class FakeMediaEngine;
@ -1005,14 +975,6 @@ class FakeMediaEngine :
voice_.set_fail_create_channel(fail);
video_.set_fail_create_channel(fail);
}
bool voice_processor_registered(MediaProcessorDirection direction) const {
if (direction == MPD_RX) {
return voice_.rx_processor_ != NULL;
} else if (direction == MPD_TX) {
return voice_.tx_processor_ != NULL;
}
return false;
}
};
// CompositeMediaEngine with FakeVoiceEngine to expose SetAudioCodecs to

View File

@ -25,37 +25,5 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_MEDIA_BASE_FAKEMEDIAPROCESSOR_H_
#define TALK_MEDIA_BASE_FAKEMEDIAPROCESSOR_H_
#include "talk/media/base/voiceprocessor.h"
namespace cricket {
class AudioFrame;
class FakeMediaProcessor : public VoiceProcessor {
public:
FakeMediaProcessor()
: voice_frame_count_(0) {
}
virtual ~FakeMediaProcessor() {}
virtual void OnFrame(uint32 ssrc,
MediaProcessorDirection direction,
AudioFrame* frame) {
++voice_frame_count_;
}
virtual void OnVoiceMute(uint32 ssrc, bool muted) {}
virtual void OnVideoMute(uint32 ssrc, bool muted) {}
int voice_frame_count() const { return voice_frame_count_; }
private:
// TODO(janahan): make is a map so that we can multiple ssrcs
int voice_frame_count_;
};
} // namespace cricket
#endif // TALK_MEDIA_BASE_FAKEMEDIAPROCESSOR_H_
// TODO(solenberg): Remove this file once Chromium's libjingle.gyp/.gn are
// updated.

View File

@ -129,14 +129,6 @@ class MediaEngineInterface {
// Starts AEC dump using existing file.
virtual bool StartAecDump(rtc::PlatformFile file) = 0;
// Voice processors for effects.
virtual bool RegisterVoiceProcessor(uint32 ssrc,
VoiceProcessor* video_processor,
MediaProcessorDirection direction) = 0;
virtual bool UnregisterVoiceProcessor(uint32 ssrc,
VoiceProcessor* video_processor,
MediaProcessorDirection direction) = 0;
};
@ -241,17 +233,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
return voice_.StartAecDump(file);
}
virtual bool RegisterVoiceProcessor(uint32 ssrc,
VoiceProcessor* processor,
MediaProcessorDirection direction) {
return voice_.RegisterProcessor(ssrc, processor, direction);
}
virtual bool UnregisterVoiceProcessor(uint32 ssrc,
VoiceProcessor* processor,
MediaProcessorDirection direction) {
return voice_.UnregisterProcessor(ssrc, processor, direction);
}
protected:
VOICE voice_;
VIDEO video_;
@ -286,12 +267,6 @@ class NullVoiceEngine {
}
void SetLogging(int min_sev, const char* filter) {}
bool StartAecDump(rtc::PlatformFile file) { return false; }
bool RegisterProcessor(uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection direction) { return true; }
bool UnregisterProcessor(uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection direction) { return true; }
private:
std::vector<AudioCodec> codecs_;

View File

@ -28,7 +28,6 @@
#include <stdio.h>
#include <vector>
#include "talk/media/base/fakemediaprocessor.h"
#include "talk/media/base/fakevideocapturer.h"
#include "talk/media/base/fakevideorenderer.h"
#include "talk/media/base/testutils.h"

View File

@ -25,33 +25,5 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TALK_MEDIA_BASE_VOICEPROCESSOR_H_
#define TALK_MEDIA_BASE_VOICEPROCESSOR_H_
#include "webrtc/base/basictypes.h"
#include "webrtc/base/sigslot.h"
namespace cricket {
class AudioFrame;
enum MediaProcessorDirection {
MPD_INVALID = 0,
MPD_RX = 1 << 0,
MPD_TX = 1 << 1,
MPD_RX_AND_TX = MPD_RX | MPD_TX,
};
class VoiceProcessor : public sigslot::has_slots<> {
public:
virtual ~VoiceProcessor() {}
// Contents of frame may be manipulated by the processor.
// The processed data is expected to be the same size as the
// original data.
virtual void OnFrame(uint32 ssrc,
MediaProcessorDirection direction,
AudioFrame* frame) = 0;
};
} // namespace cricket
#endif // TALK_MEDIA_BASE_VOICEPROCESSOR_H_
// TODO(solenberg): Remove this file once Chromium's libjingle.gyp/.gn are
// updated.

View File

@ -34,7 +34,6 @@
#include "talk/media/base/codec.h"
#include "talk/media/base/rtputils.h"
#include "talk/media/base/voiceprocessor.h"
#include "talk/media/webrtc/fakewebrtccommon.h"
#include "talk/media/webrtc/webrtcvoe.h"
#include "webrtc/base/basictypes.h"
@ -187,8 +186,7 @@ class FakeAudioProcessing : public webrtc::AudioProcessing {
class FakeWebRtcVoiceEngine
: public webrtc::VoEAudioProcessing,
public webrtc::VoEBase, public webrtc::VoECodec, public webrtc::VoEDtmf,
public webrtc::VoEHardware,
public webrtc::VoEExternalMedia, public webrtc::VoENetEqStats,
public webrtc::VoEHardware, public webrtc::VoENetEqStats,
public webrtc::VoENetwork, public webrtc::VoERTP_RTCP,
public webrtc::VoEVideoSync, public webrtc::VoEVolumeControl {
public:
@ -215,7 +213,6 @@ class FakeWebRtcVoiceEngine
opus_dtx(false),
red(false),
nack(false),
media_processor_registered(false),
rx_agc_enabled(false),
rx_agc_mode(webrtc::kAgcDefault),
cn8_type(13),
@ -246,7 +243,6 @@ class FakeWebRtcVoiceEngine
bool opus_dtx;
bool red;
bool nack;
bool media_processor_registered;
bool rx_agc_enabled;
webrtc::AgcModes rx_agc_mode;
webrtc::AgcConfig rx_agc_config;
@ -294,8 +290,7 @@ class FakeWebRtcVoiceEngine
playout_fail_channel_(-1),
send_fail_channel_(-1),
recording_sample_rate_(-1),
playout_sample_rate_(-1),
media_processor_(NULL) {
playout_sample_rate_(-1) {
memset(&agc_config_, 0, sizeof(agc_config_));
}
~FakeWebRtcVoiceEngine() {
@ -307,9 +302,6 @@ class FakeWebRtcVoiceEngine
}
}
bool IsExternalMediaProcessorRegistered() const {
return media_processor_ != NULL;
}
bool IsInited() const { return inited_; }
int GetLastChannel() const { return last_channel_; }
int GetChannelFromLocalSsrc(uint32 local_ssrc) const {
@ -388,19 +380,6 @@ class FakeWebRtcVoiceEngine
void set_fail_create_channel(bool fail_create_channel) {
fail_create_channel_ = fail_create_channel;
}
void TriggerProcessPacket(MediaProcessorDirection direction) {
webrtc::ProcessingTypes pt =
(direction == cricket::MPD_TX) ?
webrtc::kRecordingPerChannel : webrtc::kPlaybackAllChannelsMixed;
if (media_processor_ != NULL) {
media_processor_->Process(0,
pt,
NULL,
0,
0,
true);
}
}
int AddChannel(const webrtc::Config& config) {
if (fail_create_channel_) {
return -1;
@ -1101,31 +1080,6 @@ class FakeWebRtcVoiceEngine
return (dtmf_info_.dtmf_event_code == event_code &&
dtmf_info_.dtmf_length_ms == length_ms);
}
// webrtc::VoEExternalMedia
WEBRTC_FUNC(RegisterExternalMediaProcessing,
(int channel, webrtc::ProcessingTypes type,
webrtc::VoEMediaProcess& processObject)) {
WEBRTC_CHECK_CHANNEL(channel);
if (channels_[channel]->media_processor_registered) {
return -1;
}
channels_[channel]->media_processor_registered = true;
media_processor_ = &processObject;
return 0;
}
WEBRTC_FUNC(DeRegisterExternalMediaProcessing,
(int channel, webrtc::ProcessingTypes type)) {
WEBRTC_CHECK_CHANNEL(channel);
if (!channels_[channel]->media_processor_registered) {
return -1;
}
channels_[channel]->media_processor_registered = false;
media_processor_ = NULL;
return 0;
}
WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz,
webrtc::AudioFrame* frame));
WEBRTC_STUB(SetExternalMixing, (int channel, bool enable));
int GetNetEqCapacity() const {
auto ch = channels_.find(last_channel_);
ASSERT(ch != channels_.end());
@ -1199,7 +1153,6 @@ class FakeWebRtcVoiceEngine
int recording_sample_rate_;
int playout_sample_rate_;
DtmfInfo dtmf_info_;
webrtc::VoEMediaProcess* media_processor_;
FakeAudioProcessing audio_processing_;
};

View File

@ -96,7 +96,7 @@ class VoEWrapper {
VoEWrapper()
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
base_(engine_), codec_(engine_), dtmf_(engine_),
hw_(engine_), media_(engine_), neteq_(engine_), network_(engine_),
hw_(engine_), neteq_(engine_), network_(engine_),
rtp_(engine_), sync_(engine_), volume_(engine_) {
}
VoEWrapper(webrtc::VoEAudioProcessing* processing,
@ -104,7 +104,6 @@ class VoEWrapper {
webrtc::VoECodec* codec,
webrtc::VoEDtmf* dtmf,
webrtc::VoEHardware* hw,
webrtc::VoEExternalMedia* media,
webrtc::VoENetEqStats* neteq,
webrtc::VoENetwork* network,
webrtc::VoERTP_RTCP* rtp,
@ -116,7 +115,6 @@ class VoEWrapper {
codec_(codec),
dtmf_(dtmf),
hw_(hw),
media_(media),
neteq_(neteq),
network_(network),
rtp_(rtp),
@ -130,7 +128,6 @@ class VoEWrapper {
webrtc::VoECodec* codec() const { return codec_.get(); }
webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
webrtc::VoEHardware* hw() const { return hw_.get(); }
webrtc::VoEExternalMedia* media() const { return media_.get(); }
webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
webrtc::VoENetwork* network() const { return network_.get(); }
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
@ -145,7 +142,6 @@ class VoEWrapper {
scoped_voe_ptr<webrtc::VoECodec> codec_;
scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
scoped_voe_ptr<webrtc::VoEHardware> hw_;
scoped_voe_ptr<webrtc::VoEExternalMedia> media_;
scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
scoped_voe_ptr<webrtc::VoENetwork> network_;
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;

View File

@ -42,7 +42,6 @@
#include "talk/media/base/audiorenderer.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/streamparams.h"
#include "talk/media/base/voiceprocessor.h"
#include "talk/media/webrtc/webrtcvoe.h"
#include "webrtc/base/base64.h"
#include "webrtc/base/byteorder.h"
@ -368,9 +367,7 @@ WebRtcVoiceEngine::WebRtcVoiceEngine()
tracing_(new VoETraceWrapper()),
adm_(NULL),
log_filter_(SeverityToFilter(kDefaultLogSeverity)),
is_dumping_aec_(false),
tx_processor_ssrc_(0),
rx_processor_ssrc_(0) {
is_dumping_aec_(false) {
Construct();
}
@ -380,9 +377,7 @@ WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
tracing_(tracing),
adm_(NULL),
log_filter_(SeverityToFilter(kDefaultLogSeverity)),
is_dumping_aec_(false),
tx_processor_ssrc_(0),
rx_processor_ssrc_(0) {
is_dumping_aec_(false) {
Construct();
}
@ -490,10 +485,6 @@ WebRtcVoiceEngine::~WebRtcVoiceEngine() {
adm_ = NULL;
}
// Test to see if the media processor was deregistered properly
RTC_DCHECK(SignalRxMediaFrame.is_empty());
RTC_DCHECK(SignalTxMediaFrame.is_empty());
tracing_->SetTraceCallback(NULL);
}
@ -1277,31 +1268,6 @@ bool WebRtcVoiceEngine::FindChannelAndSsrc(
return false;
}
// This method will search through the WebRtcVoiceMediaChannels and
// obtain the voice engine's channel number.
bool WebRtcVoiceEngine::FindChannelNumFromSsrc(
uint32 ssrc, MediaProcessorDirection direction, int* channel_num) {
RTC_DCHECK(channel_num != NULL);
RTC_DCHECK(direction == MPD_RX || direction == MPD_TX);
*channel_num = -1;
// Find corresponding channel for ssrc.
for (const WebRtcVoiceMediaChannel* ch : channels_) {
RTC_DCHECK(ch != NULL);
if (direction & MPD_RX) {
*channel_num = ch->GetReceiveChannelNum(ssrc);
}
if (*channel_num == -1 && (direction & MPD_TX)) {
*channel_num = ch->GetSendChannelNum(ssrc);
}
if (*channel_num != -1) {
return true;
}
}
LOG(LS_WARNING) << "FindChannelFromSsrc. No Channel Found for Ssrc: " << ssrc;
return false;
}
void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel *channel) {
rtc::CritScope lock(&channels_cs_);
channels_.push_back(channel);
@ -1370,158 +1336,6 @@ bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file) {
return true;
}
bool WebRtcVoiceEngine::RegisterProcessor(
uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection direction) {
bool register_with_webrtc = false;
int channel_id = -1;
bool success = false;
uint32* processor_ssrc = NULL;
bool found_channel = FindChannelNumFromSsrc(ssrc, direction, &channel_id);
if (voice_processor == NULL || !found_channel) {
LOG(LS_WARNING) << "Media Processing Registration Failed. ssrc: " << ssrc
<< " foundChannel: " << found_channel;
return false;
}
webrtc::ProcessingTypes processing_type;
{
rtc::CritScope cs(&signal_media_critical_);
if (direction == MPD_RX) {
processing_type = webrtc::kPlaybackAllChannelsMixed;
if (SignalRxMediaFrame.is_empty()) {
register_with_webrtc = true;
processor_ssrc = &rx_processor_ssrc_;
}
SignalRxMediaFrame.connect(voice_processor,
&VoiceProcessor::OnFrame);
} else {
processing_type = webrtc::kRecordingPerChannel;
if (SignalTxMediaFrame.is_empty()) {
register_with_webrtc = true;
processor_ssrc = &tx_processor_ssrc_;
}
SignalTxMediaFrame.connect(voice_processor,
&VoiceProcessor::OnFrame);
}
}
if (register_with_webrtc) {
// TODO(janahan): when registering consider instantiating a
// a VoeMediaProcess object and not make the engine extend the interface.
if (voe()->media() && voe()->media()->
RegisterExternalMediaProcessing(channel_id,
processing_type,
*this) != -1) {
LOG(LS_INFO) << "Media Processing Registration Succeeded. channel:"
<< channel_id;
*processor_ssrc = ssrc;
success = true;
} else {
LOG_RTCERR2(RegisterExternalMediaProcessing,
channel_id,
processing_type);
success = false;
}
} else {
// If we don't have to register with the engine, we just needed to
// connect a new processor, set success to true;
success = true;
}
return success;
}
bool WebRtcVoiceEngine::UnregisterProcessorChannel(
MediaProcessorDirection channel_direction,
uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection processor_direction) {
bool success = true;
FrameSignal* signal;
webrtc::ProcessingTypes processing_type;
uint32* processor_ssrc = NULL;
if (channel_direction == MPD_RX) {
signal = &SignalRxMediaFrame;
processing_type = webrtc::kPlaybackAllChannelsMixed;
processor_ssrc = &rx_processor_ssrc_;
} else {
signal = &SignalTxMediaFrame;
processing_type = webrtc::kRecordingPerChannel;
processor_ssrc = &tx_processor_ssrc_;
}
int deregister_id = -1;
{
rtc::CritScope cs(&signal_media_critical_);
if ((processor_direction & channel_direction) != 0 && !signal->is_empty()) {
signal->disconnect(voice_processor);
int channel_id = -1;
bool found_channel = FindChannelNumFromSsrc(ssrc,
channel_direction,
&channel_id);
if (signal->is_empty() && found_channel) {
deregister_id = channel_id;
}
}
}
if (deregister_id != -1) {
if (voe()->media() &&
voe()->media()->DeRegisterExternalMediaProcessing(deregister_id,
processing_type) != -1) {
*processor_ssrc = 0;
LOG(LS_INFO) << "Media Processing DeRegistration Succeeded. channel:"
<< deregister_id;
} else {
LOG_RTCERR2(DeRegisterExternalMediaProcessing,
deregister_id,
processing_type);
success = false;
}
}
return success;
}
bool WebRtcVoiceEngine::UnregisterProcessor(
uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection direction) {
bool success = true;
if (voice_processor == NULL) {
LOG(LS_WARNING) << "Media Processing Deregistration Failed. ssrc: "
<< ssrc;
return false;
}
if (!UnregisterProcessorChannel(MPD_RX, ssrc, voice_processor, direction)) {
success = false;
}
if (!UnregisterProcessorChannel(MPD_TX, ssrc, voice_processor, direction)) {
success = false;
}
return success;
}
// Implementing method from WebRtc VoEMediaProcess interface
// Do not lock mux_channel_cs_ in this callback.
void WebRtcVoiceEngine::Process(int channel,
webrtc::ProcessingTypes type,
int16_t audio10ms[],
size_t length,
int sampling_freq,
bool is_stereo) {
rtc::CritScope cs(&signal_media_critical_);
AudioFrame frame(audio10ms, length, sampling_freq, is_stereo);
if (type == webrtc::kPlaybackAllChannelsMixed) {
SignalRxMediaFrame(rx_processor_ssrc_, MPD_RX, &frame);
} else if (type == webrtc::kRecordingPerChannel) {
SignalTxMediaFrame(tx_processor_ssrc_, MPD_TX, &frame);
} else {
LOG(LS_WARNING) << "Media Processing invoked unexpectedly."
<< " channel: " << channel << " type: " << type
<< " tx_ssrc: " << tx_processor_ssrc_
<< " rx_ssrc: " << rx_processor_ssrc_;
}
}
void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
if (!is_dumping_aec_) {
// Start dumping AEC when we are not dumping.

View File

@ -53,15 +53,13 @@ class AudioDeviceModule;
class AudioRenderer;
class VoETraceWrapper;
class VoEWrapper;
class VoiceProcessor;
class WebRtcVoiceMediaChannel;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
class WebRtcVoiceEngine
: public webrtc::VoiceEngineObserver,
public webrtc::TraceCallback,
public webrtc::VoEMediaProcess {
public webrtc::TraceCallback {
friend class WebRtcVoiceMediaChannel;
public:
@ -93,21 +91,6 @@ class WebRtcVoiceEngine
void SetLogging(int min_sev, const char* filter);
bool RegisterProcessor(uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection direction);
bool UnregisterProcessor(uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection direction);
// Method from webrtc::VoEMediaProcess
void Process(int channel,
webrtc::ProcessingTypes type,
int16_t audio10ms[],
size_t length,
int sampling_freq,
bool is_stereo) override;
// For tracking WebRtc channels. Needed because we have to pause them
// all when switching devices.
// May only be called by WebRtcVoiceMediaChannel.
@ -135,8 +118,6 @@ class WebRtcVoiceEngine
private:
typedef std::vector<WebRtcVoiceMediaChannel*> ChannelList;
typedef sigslot::
signal3<uint32, MediaProcessorDirection, AudioFrame*> FrameSignal;
void Construct();
void ConstructCodecs();
@ -176,25 +157,11 @@ class WebRtcVoiceEngine
bool FindChannelAndSsrc(int channel_num,
WebRtcVoiceMediaChannel** channel,
uint32* ssrc) const;
bool FindChannelNumFromSsrc(uint32 ssrc,
MediaProcessorDirection direction,
int* channel_num);
bool UnregisterProcessorChannel(MediaProcessorDirection channel_direction,
uint32 ssrc,
VoiceProcessor* voice_processor,
MediaProcessorDirection processor_direction);
void StartAecDump(const std::string& filename);
void StopAecDump();
int CreateVoiceChannel(VoEWrapper* voe);
// When a voice processor registers with the engine, it is connected
// to either the Rx or Tx signals, based on the direction parameter.
// SignalXXMediaFrame will be invoked for every audio packet.
FrameSignal SignalRxMediaFrame;
FrameSignal SignalTxMediaFrame;
static const int kDefaultLogSeverity = rtc::LS_WARNING;
// The primary instance of WebRtc VoiceEngine.
@ -225,16 +192,6 @@ class WebRtcVoiceEngine
AudioOptions options_;
AudioOptions option_overrides_;
// When the media processor registers with the engine, the ssrc is cached
// here so that a look up need not be made when the callback is invoked.
// This is necessary because the lookup results in mux_channels_cs lock being
// held and if a remote participant leaves the hangout at the same time
// we hit a deadlock.
uint32 tx_processor_ssrc_;
uint32 rx_processor_ssrc_;
rtc::CriticalSection signal_media_critical_;
// Cache received extended_filter_aec, delay_agnostic_aec and experimental_ns
// values, and apply them in case they are missing in the audio options. We
// need to do this because SetExtraOptions() will revert to defaults for

View File

@ -30,7 +30,6 @@
#include "webrtc/call.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/fakemediaengine.h"
#include "talk/media/base/fakemediaprocessor.h"
#include "talk/media/base/fakenetworkinterface.h"
#include "talk/media/base/fakertp.h"
#include "talk/media/webrtc/fakewebrtccall.h"
@ -69,7 +68,6 @@ class FakeVoEWrapper : public cricket::VoEWrapper {
engine, // codec
engine, // dtmf
engine, // hw
engine, // media
engine, // neteq
engine, // network
engine, // rtp
@ -2603,86 +2601,6 @@ TEST_F(WebRtcVoiceEngineTestFake, TestSetPlayoutError) {
EXPECT_FALSE(channel_->SetPlayout(true));
}
// Test that the Registering/Unregistering with the
// webrtcvoiceengine works as expected
TEST_F(WebRtcVoiceEngineTestFake, RegisterVoiceProcessor) {
EXPECT_TRUE(SetupEngine());
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(kSsrc2)));
cricket::FakeMediaProcessor vp_1;
cricket::FakeMediaProcessor vp_2;
EXPECT_FALSE(engine_.RegisterProcessor(kSsrc2, &vp_1, cricket::MPD_TX));
EXPECT_TRUE(engine_.RegisterProcessor(kSsrc2, &vp_1, cricket::MPD_RX));
EXPECT_TRUE(engine_.RegisterProcessor(kSsrc2, &vp_2, cricket::MPD_RX));
voe_.TriggerProcessPacket(cricket::MPD_RX);
voe_.TriggerProcessPacket(cricket::MPD_TX);
EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
EXPECT_EQ(1, vp_1.voice_frame_count());
EXPECT_EQ(1, vp_2.voice_frame_count());
EXPECT_TRUE(engine_.UnregisterProcessor(kSsrc2,
&vp_2,
cricket::MPD_RX));
voe_.TriggerProcessPacket(cricket::MPD_RX);
EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
EXPECT_EQ(1, vp_2.voice_frame_count());
EXPECT_EQ(2, vp_1.voice_frame_count());
EXPECT_TRUE(engine_.UnregisterProcessor(kSsrc2,
&vp_1,
cricket::MPD_RX));
voe_.TriggerProcessPacket(cricket::MPD_RX);
EXPECT_FALSE(voe_.IsExternalMediaProcessorRegistered());
EXPECT_EQ(2, vp_1.voice_frame_count());
EXPECT_FALSE(engine_.RegisterProcessor(kSsrc1, &vp_1, cricket::MPD_RX));
EXPECT_TRUE(engine_.RegisterProcessor(kSsrc1, &vp_1, cricket::MPD_TX));
voe_.TriggerProcessPacket(cricket::MPD_RX);
voe_.TriggerProcessPacket(cricket::MPD_TX);
EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
EXPECT_EQ(3, vp_1.voice_frame_count());
EXPECT_TRUE(engine_.UnregisterProcessor(kSsrc1,
&vp_1,
cricket::MPD_RX_AND_TX));
voe_.TriggerProcessPacket(cricket::MPD_TX);
EXPECT_FALSE(voe_.IsExternalMediaProcessorRegistered());
EXPECT_EQ(3, vp_1.voice_frame_count());
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
EXPECT_FALSE(engine_.RegisterProcessor(kSsrc2, &vp_1, cricket::MPD_RX));
EXPECT_FALSE(voe_.IsExternalMediaProcessorRegistered());
// Test that we can register a processor on the receive channel on SSRC 0.
// This tests the 1:1 case when the receive SSRC is unknown.
EXPECT_TRUE(engine_.RegisterProcessor(0, &vp_1, cricket::MPD_RX));
voe_.TriggerProcessPacket(cricket::MPD_RX);
EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
EXPECT_EQ(4, vp_1.voice_frame_count());
EXPECT_TRUE(engine_.UnregisterProcessor(0,
&vp_1,
cricket::MPD_RX));
// The following tests test that FindChannelNumFromSsrc is doing
// what we expect.
// pick an invalid ssrc and make sure we can't register
EXPECT_FALSE(engine_.RegisterProcessor(99,
&vp_1,
cricket::MPD_RX));
EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
EXPECT_TRUE(engine_.RegisterProcessor(1,
&vp_1,
cricket::MPD_RX));
EXPECT_TRUE(engine_.UnregisterProcessor(1,
&vp_1,
cricket::MPD_RX));
EXPECT_FALSE(engine_.RegisterProcessor(1,
&vp_1,
cricket::MPD_TX));
EXPECT_TRUE(channel_->RemoveRecvStream(1));
}
TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_TRUE(SetupEngine());

View File

@ -744,24 +744,6 @@ void ChannelManager::GetSupportedFormats_w(
*out_formats = *formats;
}
bool ChannelManager::RegisterVoiceProcessor(
uint32 ssrc,
VoiceProcessor* processor,
MediaProcessorDirection direction) {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&MediaEngineInterface::RegisterVoiceProcessor, media_engine_.get(),
ssrc, processor, direction));
}
bool ChannelManager::UnregisterVoiceProcessor(
uint32 ssrc,
VoiceProcessor* processor,
MediaProcessorDirection direction) {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&MediaEngineInterface::UnregisterVoiceProcessor,
media_engine_.get(), ssrc, processor, direction));
}
// The following are done in the new "CaptureManager" style that
// all local video capturers, processors, and managers should move
// to.

View File

@ -48,7 +48,6 @@ namespace cricket {
const int kDefaultAudioDelayOffset = 0;
class VoiceChannel;
class VoiceProcessor;
// ChannelManager allows the MediaEngine to run on a separate thread, and takes
// care of marshalling calls between threads. It also creates and keeps track of
@ -174,13 +173,6 @@ class ChannelManager : public rtc::MessageHandler,
// Gets capturer's supported formats in a thread safe manner
std::vector<cricket::VideoFormat> GetSupportedFormats(
VideoCapturer* capturer) const;
// The channel manager handles the Tx and Rx side for Voice processing.
bool RegisterVoiceProcessor(uint32 ssrc,
VoiceProcessor* processor,
MediaProcessorDirection direction);
bool UnregisterVoiceProcessor(uint32 ssrc,
VoiceProcessor* processor,
MediaProcessorDirection direction);
// The following are done in the new "CaptureManager" style that
// all local video capturers, processors, and managers should move to.
// TODO(pthatcher): Make methods nicer by having start return a handle that

View File

@ -28,7 +28,6 @@
#include "talk/app/webrtc/mediacontroller.h"
#include "talk/media/base/fakecapturemanager.h"
#include "talk/media/base/fakemediaengine.h"
#include "talk/media/base/fakemediaprocessor.h"
#include "talk/media/base/testutils.h"
#include "talk/media/devices/fakedevicemanager.h"
#include "talk/media/webrtc/fakewebrtccall.h"
@ -529,45 +528,6 @@ TEST_F(ChannelManagerTest, SetLogging) {
EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
}
// Test that the Video/Voice Processors register and unregister
TEST_F(ChannelManagerTest, RegisterProcessors) {
cricket::FakeMediaProcessor fmp;
EXPECT_TRUE(cm_->Init());
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
EXPECT_TRUE(cm_->RegisterVoiceProcessor(1,
&fmp,
cricket::MPD_RX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_TRUE(fme_->voice_processor_registered(cricket::MPD_RX));
EXPECT_TRUE(cm_->UnregisterVoiceProcessor(1,
&fmp,
cricket::MPD_RX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
EXPECT_TRUE(cm_->RegisterVoiceProcessor(1,
&fmp,
cricket::MPD_TX));
EXPECT_TRUE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
EXPECT_TRUE(cm_->UnregisterVoiceProcessor(1,
&fmp,
cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
}
TEST_F(ChannelManagerTest, SetVideoRtxEnabled) {
std::vector<VideoCodec> codecs;
const VideoCodec rtx_codec(96, "rtx", 0, 0, 0, 0);