Support for unmixed remote audio into tracks.

BUG=chromium:121673
R=solenberg@webrtc.org

Review URL: https://codereview.webrtc.org/1505253004 .

Cr-Commit-Position: refs/heads/master@{#10995}
This commit is contained in:
Tommi 2015-12-12 01:37:01 +01:00
parent f67c548576
commit f888bb58da
31 changed files with 526 additions and 56 deletions

View File

@ -167,6 +167,8 @@ class AudioSourceInterface : public MediaSourceInterface {
// TODO(xians): Makes all the interface pure virtual after Chrome has their
// implementations.
// Sets the volume to the source. |volume| is in the range of [0, 10].
// TODO(tommi): This method should be on the track and ideally volume should
// be applied in the track in a way that does not affect clones of the track.
virtual void SetVolume(double volume) {}
// Registers/unregisters observer to the audio source.

View File

@ -29,6 +29,7 @@
#define TALK_APP_WEBRTC_MEDIASTREAMPROVIDER_H_
#include "webrtc/base/basictypes.h"
#include "webrtc/base/scoped_ptr.h"
namespace cricket {
@ -42,6 +43,8 @@ struct VideoOptions;
namespace webrtc {
class AudioSinkInterface;
// TODO(deadbeef): Change the key from an ssrc to a "sender_id" or
// "receiver_id" string, which will be the MSID in the short term and MID in
// the long term.
@ -67,6 +70,13 @@ class AudioProviderInterface {
// |volume| is in the range of [0, 10].
virtual void SetAudioPlayoutVolume(uint32_t ssrc, double volume) = 0;
// Allows for setting a direct audio sink for an incoming audio source.
// Only one audio sink is supported per ssrc and ownership of the sink is
// passed to the provider.
virtual void SetRawAudioSink(
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) = 0;
protected:
virtual ~AudioProviderInterface() {}
};

View File

@ -39,6 +39,7 @@
#include "talk/app/webrtc/mediastreamproxy.h"
#include "talk/app/webrtc/mediastreamtrackproxy.h"
#include "talk/app/webrtc/remoteaudiosource.h"
#include "talk/app/webrtc/remoteaudiotrack.h"
#include "talk/app/webrtc/remotevideocapturer.h"
#include "talk/app/webrtc/rtpreceiver.h"
#include "talk/app/webrtc/rtpsender.h"
@ -448,10 +449,12 @@ class RemoteMediaStreamFactory {
MediaStream::Create(stream_label));
}
AudioTrackInterface* AddAudioTrack(webrtc::MediaStreamInterface* stream,
AudioTrackInterface* AddAudioTrack(uint32_t ssrc,
AudioProviderInterface* provider,
webrtc::MediaStreamInterface* stream,
const std::string& track_id) {
return AddTrack<AudioTrackInterface, AudioTrack, AudioTrackProxy>(
stream, track_id, RemoteAudioSource::Create().get());
return AddTrack<AudioTrackInterface, RemoteAudioTrack, AudioTrackProxy>(
stream, track_id, RemoteAudioSource::Create(ssrc, provider));
}
VideoTrackInterface* AddVideoTrack(webrtc::MediaStreamInterface* stream,
@ -467,7 +470,7 @@ class RemoteMediaStreamFactory {
template <typename TI, typename T, typename TP, typename S>
TI* AddTrack(MediaStreamInterface* stream,
const std::string& track_id,
S* source) {
const S& source) {
rtc::scoped_refptr<TI> track(
TP::Create(signaling_thread_, T::Create(track_id, source)));
track->set_state(webrtc::MediaStreamTrackInterface::kLive);
@ -1583,8 +1586,8 @@ void PeerConnection::OnRemoteTrackSeen(const std::string& stream_label,
MediaStreamInterface* stream = remote_streams_->find(stream_label);
if (media_type == cricket::MEDIA_TYPE_AUDIO) {
AudioTrackInterface* audio_track =
remote_stream_factory_->AddAudioTrack(stream, track_id);
AudioTrackInterface* audio_track = remote_stream_factory_->AddAudioTrack(
ssrc, session_.get(), stream, track_id);
CreateAudioReceiver(stream, audio_track, ssrc);
} else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
VideoTrackInterface* video_track =

View File

@ -29,44 +29,143 @@
#include <algorithm>
#include <functional>
#include <utility>
#include "talk/app/webrtc/mediastreamprovider.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/thread.h"
namespace webrtc {
rtc::scoped_refptr<RemoteAudioSource> RemoteAudioSource::Create() {
return new rtc::RefCountedObject<RemoteAudioSource>();
class RemoteAudioSource::MessageHandler : public rtc::MessageHandler {
public:
explicit MessageHandler(RemoteAudioSource* source) : source_(source) {}
private:
~MessageHandler() override {}
void OnMessage(rtc::Message* msg) override {
source_->OnMessage(msg);
delete this;
}
const rtc::scoped_refptr<RemoteAudioSource> source_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MessageHandler);
};
class RemoteAudioSource::Sink : public AudioSinkInterface {
public:
explicit Sink(RemoteAudioSource* source) : source_(source) {}
~Sink() override { source_->OnAudioProviderGone(); }
private:
void OnData(const AudioSinkInterface::Data& audio) override {
if (source_)
source_->OnData(audio);
}
const rtc::scoped_refptr<RemoteAudioSource> source_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Sink);
};
rtc::scoped_refptr<RemoteAudioSource> RemoteAudioSource::Create(
uint32_t ssrc,
AudioProviderInterface* provider) {
rtc::scoped_refptr<RemoteAudioSource> ret(
new rtc::RefCountedObject<RemoteAudioSource>());
ret->Initialize(ssrc, provider);
return ret;
}
RemoteAudioSource::RemoteAudioSource() {
RemoteAudioSource::RemoteAudioSource()
: main_thread_(rtc::Thread::Current()),
state_(MediaSourceInterface::kLive) {
RTC_DCHECK(main_thread_);
}
RemoteAudioSource::~RemoteAudioSource() {
ASSERT(audio_observers_.empty());
RTC_DCHECK(main_thread_->IsCurrent());
RTC_DCHECK(audio_observers_.empty());
RTC_DCHECK(sinks_.empty());
}
MediaSourceInterface::SourceState RemoteAudioSource::state() const {
return MediaSourceInterface::kLive;
}
void RemoteAudioSource::SetVolume(double volume) {
ASSERT(volume >= 0 && volume <= 10);
for (AudioObserverList::iterator it = audio_observers_.begin();
it != audio_observers_.end(); ++it) {
(*it)->OnSetVolume(volume);
void RemoteAudioSource::Initialize(uint32_t ssrc,
AudioProviderInterface* provider) {
RTC_DCHECK(main_thread_->IsCurrent());
// To make sure we always get notified when the provider goes out of scope,
// we register for callbacks here and not on demand in AddSink.
if (provider) { // May be null in tests.
provider->SetRawAudioSink(
ssrc, std::move(rtc::scoped_ptr<AudioSinkInterface>(new Sink(this))));
}
}
MediaSourceInterface::SourceState RemoteAudioSource::state() const {
RTC_DCHECK(main_thread_->IsCurrent());
return state_;
}
void RemoteAudioSource::SetVolume(double volume) {
RTC_DCHECK(volume >= 0 && volume <= 10);
for (auto* observer : audio_observers_)
observer->OnSetVolume(volume);
}
void RemoteAudioSource::RegisterAudioObserver(AudioObserver* observer) {
ASSERT(observer != NULL);
ASSERT(std::find(audio_observers_.begin(), audio_observers_.end(),
observer) == audio_observers_.end());
RTC_DCHECK(observer != NULL);
RTC_DCHECK(std::find(audio_observers_.begin(), audio_observers_.end(),
observer) == audio_observers_.end());
audio_observers_.push_back(observer);
}
void RemoteAudioSource::UnregisterAudioObserver(AudioObserver* observer) {
ASSERT(observer != NULL);
RTC_DCHECK(observer != NULL);
audio_observers_.remove(observer);
}
void RemoteAudioSource::AddSink(AudioTrackSinkInterface* sink) {
RTC_DCHECK(main_thread_->IsCurrent());
RTC_DCHECK(sink);
if (state_ != MediaSourceInterface::kLive) {
LOG(LS_ERROR) << "Can't register sink as the source isn't live.";
return;
}
rtc::CritScope lock(&sink_lock_);
RTC_DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end());
sinks_.push_back(sink);
}
void RemoteAudioSource::RemoveSink(AudioTrackSinkInterface* sink) {
RTC_DCHECK(main_thread_->IsCurrent());
RTC_DCHECK(sink);
rtc::CritScope lock(&sink_lock_);
sinks_.remove(sink);
}
void RemoteAudioSource::OnData(const AudioSinkInterface::Data& audio) {
// Called on the externally-owned audio callback thread, via/from webrtc.
rtc::CritScope lock(&sink_lock_);
for (auto* sink : sinks_) {
sink->OnData(audio.data, 16, audio.sample_rate, audio.channels,
audio.samples_per_channel);
}
}
void RemoteAudioSource::OnAudioProviderGone() {
// Called when the data provider is deleted. It may be the worker thread
// in libjingle or may be a different worker thread.
main_thread_->Post(new MessageHandler(this));
}
void RemoteAudioSource::OnMessage(rtc::Message* msg) {
RTC_DCHECK(main_thread_->IsCurrent());
sinks_.clear();
state_ = MediaSourceInterface::kEnded;
FireOnChanged();
}
} // namespace webrtc

View File

@ -29,36 +29,65 @@
#define TALK_APP_WEBRTC_REMOTEAUDIOSOURCE_H_
#include <list>
#include <string>
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/notifier.h"
#include "talk/media/base/audiorenderer.h"
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/criticalsection.h"
namespace rtc {
struct Message;
class Thread;
} // namespace rtc
namespace webrtc {
using webrtc::AudioSourceInterface;
class AudioProviderInterface;
// This class implements the audio source used by the remote audio track.
class RemoteAudioSource : public Notifier<AudioSourceInterface> {
public:
// Creates an instance of RemoteAudioSource.
static rtc::scoped_refptr<RemoteAudioSource> Create();
protected:
RemoteAudioSource();
virtual ~RemoteAudioSource();
private:
typedef std::list<AudioObserver*> AudioObserverList;
static rtc::scoped_refptr<RemoteAudioSource> Create(
uint32_t ssrc,
AudioProviderInterface* provider);
// MediaSourceInterface implementation.
MediaSourceInterface::SourceState state() const override;
void AddSink(AudioTrackSinkInterface* sink);
void RemoveSink(AudioTrackSinkInterface* sink);
protected:
RemoteAudioSource();
~RemoteAudioSource() override;
// Post construction initialize where we can do things like save a reference
// to ourselves (need to be fully constructed).
void Initialize(uint32_t ssrc, AudioProviderInterface* provider);
private:
typedef std::list<AudioObserver*> AudioObserverList;
// AudioSourceInterface implementation.
void SetVolume(double volume) override;
void RegisterAudioObserver(AudioObserver* observer) override;
void UnregisterAudioObserver(AudioObserver* observer) override;
class Sink;
void OnData(const AudioSinkInterface::Data& audio);
void OnAudioProviderGone();
class MessageHandler;
void OnMessage(rtc::Message* msg);
AudioObserverList audio_observers_;
rtc::CriticalSection sink_lock_;
std::list<AudioTrackSinkInterface*> sinks_;
rtc::Thread* const main_thread_;
SourceState state_;
};
} // namespace webrtc

View File

@ -26,3 +26,70 @@
*/
#include "talk/app/webrtc/remoteaudiotrack.h"
#include "talk/app/webrtc/remoteaudiosource.h"
using rtc::scoped_refptr;
namespace webrtc {
// static
scoped_refptr<RemoteAudioTrack> RemoteAudioTrack::Create(
const std::string& id,
const scoped_refptr<RemoteAudioSource>& source) {
return new rtc::RefCountedObject<RemoteAudioTrack>(id, source);
}
RemoteAudioTrack::RemoteAudioTrack(
const std::string& label,
const scoped_refptr<RemoteAudioSource>& source)
: MediaStreamTrack<AudioTrackInterface>(label), audio_source_(source) {
audio_source_->RegisterObserver(this);
TrackState new_state = kInitializing;
switch (audio_source_->state()) {
case MediaSourceInterface::kLive:
case MediaSourceInterface::kMuted:
new_state = kLive;
break;
case MediaSourceInterface::kEnded:
new_state = kEnded;
break;
case MediaSourceInterface::kInitializing:
default:
// kInitializing;
break;
}
set_state(new_state);
}
RemoteAudioTrack::~RemoteAudioTrack() {
set_state(MediaStreamTrackInterface::kEnded);
audio_source_->UnregisterObserver(this);
}
std::string RemoteAudioTrack::kind() const {
return MediaStreamTrackInterface::kAudioKind;
}
AudioSourceInterface* RemoteAudioTrack::GetSource() const {
return audio_source_.get();
}
void RemoteAudioTrack::AddSink(AudioTrackSinkInterface* sink) {
audio_source_->AddSink(sink);
}
void RemoteAudioTrack::RemoveSink(AudioTrackSinkInterface* sink) {
audio_source_->RemoveSink(sink);
}
bool RemoteAudioTrack::GetSignalLevel(int* level) {
return false;
}
void RemoteAudioTrack::OnChanged() {
if (audio_source_->state() == MediaSourceInterface::kEnded)
set_state(MediaStreamTrackInterface::kEnded);
}
} // namespace webrtc

View File

@ -28,4 +28,50 @@
#ifndef TALK_APP_WEBRTC_REMOTEAUDIOTRACK_H_
#define TALK_APP_WEBRTC_REMOTEAUDIOTRACK_H_
#include <string>
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/mediastreamtrack.h"
#include "talk/app/webrtc/notifier.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
namespace webrtc {
class RemoteAudioSource;
class RemoteAudioTrack : public MediaStreamTrack<AudioTrackInterface>,
public ObserverInterface {
protected:
// Protected ctor to force use of factory method.
RemoteAudioTrack(const std::string& label,
const rtc::scoped_refptr<RemoteAudioSource>& source);
~RemoteAudioTrack() override;
public:
static rtc::scoped_refptr<RemoteAudioTrack> Create(
const std::string& id,
const rtc::scoped_refptr<RemoteAudioSource>& source);
private:
// MediaStreamTrack implementation.
std::string kind() const override;
// AudioTrackInterface implementation.
AudioSourceInterface* GetSource() const override;
void AddSink(AudioTrackSinkInterface* sink) override;
void RemoveSink(AudioTrackSinkInterface* sink) override;
bool GetSignalLevel(int* level) override;
// ObserverInterface implementation.
void OnChanged() override;
private:
const rtc::scoped_refptr<RemoteAudioSource> audio_source_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteAudioTrack);
};
} // namespace webrtc
#endif // TALK_APP_WEBRTC_REMOTEAUDIOTRACK_H_

View File

@ -68,10 +68,10 @@ class AudioRtpReceiver : public ObserverInterface,
private:
void Reconfigure();
std::string id_;
rtc::scoped_refptr<AudioTrackInterface> track_;
uint32_t ssrc_;
AudioProviderInterface* provider_;
const std::string id_;
const rtc::scoped_refptr<AudioTrackInterface> track_;
const uint32_t ssrc_;
AudioProviderInterface* provider_; // Set to null in Stop().
bool cached_track_enabled_;
};

View File

@ -26,10 +26,12 @@
*/
#include <string>
#include <utility>
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/mediastream.h"
#include "talk/app/webrtc/remoteaudiosource.h"
#include "talk/app/webrtc/remoteaudiotrack.h"
#include "talk/app/webrtc/rtpreceiver.h"
#include "talk/app/webrtc/rtpsender.h"
#include "talk/app/webrtc/streamcollection.h"
@ -57,7 +59,8 @@ namespace webrtc {
// Helper class to test RtpSender/RtpReceiver.
class MockAudioProvider : public AudioProviderInterface {
public:
virtual ~MockAudioProvider() {}
~MockAudioProvider() override {}
MOCK_METHOD2(SetAudioPlayout,
void(uint32_t ssrc,
bool enable));
@ -67,6 +70,14 @@ class MockAudioProvider : public AudioProviderInterface {
const cricket::AudioOptions& options,
cricket::AudioRenderer* renderer));
MOCK_METHOD2(SetAudioPlayoutVolume, void(uint32_t ssrc, double volume));
void SetRawAudioSink(uint32_t,
rtc::scoped_ptr<AudioSinkInterface> sink) override {
sink_ = std::move(sink);
}
private:
rtc::scoped_ptr<AudioSinkInterface> sink_;
};
// Helper class to test RtpSender/RtpReceiver.
@ -151,8 +162,8 @@ class RtpSenderReceiverTest : public testing::Test {
}
void CreateAudioRtpReceiver() {
audio_track_ =
AudioTrack::Create(kAudioTrackId, RemoteAudioSource::Create().get());
audio_track_ = RemoteAudioTrack::Create(
kAudioTrackId, RemoteAudioSource::Create(kAudioSsrc, NULL));
EXPECT_TRUE(stream_->AddTrack(audio_track_));
EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
audio_rtp_receiver_ = new AudioRtpReceiver(stream_->GetAudioTracks()[0],

View File

@ -30,8 +30,9 @@
#include <limits.h>
#include <algorithm>
#include <vector>
#include <set>
#include <utility>
#include <vector>
#include "talk/app/webrtc/jsepicecandidate.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
@ -44,6 +45,7 @@
#include "talk/session/media/channel.h"
#include "talk/session/media/channelmanager.h"
#include "talk/session/media/mediasession.h"
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/helpers.h"
@ -1318,6 +1320,15 @@ void WebRtcSession::SetAudioPlayoutVolume(uint32_t ssrc, double volume) {
}
}
void WebRtcSession::SetRawAudioSink(uint32_t ssrc,
rtc::scoped_ptr<AudioSinkInterface> sink) {
ASSERT(signaling_thread()->IsCurrent());
if (!voice_channel_)
return;
voice_channel_->SetRawAudioSink(ssrc, std::move(sink));
}
bool WebRtcSession::SetCaptureDevice(uint32_t ssrc,
cricket::VideoCapturer* camera) {
ASSERT(signaling_thread()->IsCurrent());

View File

@ -38,11 +38,11 @@
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/statstypes.h"
#include "talk/media/base/mediachannel.h"
#include "webrtc/p2p/base/transportcontroller.h"
#include "talk/session/media/mediasession.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/sslidentity.h"
#include "webrtc/base/thread.h"
#include "webrtc/p2p/base/transportcontroller.h"
namespace cricket {
@ -250,6 +250,8 @@ class WebRtcSession : public AudioProviderInterface,
const cricket::AudioOptions& options,
cricket::AudioRenderer* renderer) override;
void SetAudioPlayoutVolume(uint32_t ssrc, double volume) override;
void SetRawAudioSink(uint32_t ssrc,
rtc::scoped_ptr<AudioSinkInterface> sink) override;
// Implements VideoMediaProviderInterface.
bool SetCaptureDevice(uint32_t ssrc, cricket::VideoCapturer* camera) override;

View File

@ -38,9 +38,10 @@
#include "talk/media/base/mediaengine.h"
#include "talk/media/base/rtputils.h"
#include "talk/media/base/streamparams.h"
#include "webrtc/p2p/base/sessiondescription.h"
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/p2p/base/sessiondescription.h"
namespace cricket {
@ -346,6 +347,12 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
virtual bool GetStats(VoiceMediaInfo* info) { return false; }
virtual void SetRawAudioSink(
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
sink_ = std::move(sink);
}
private:
class VoiceChannelAudioSink : public AudioRenderer::Sink {
public:
@ -418,6 +425,7 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
int time_since_last_typing_;
AudioOptions options_;
std::map<uint32_t, VoiceChannelAudioSink*> local_renderers_;
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink_;
};
// A helper function to compare the FakeVoiceMediaChannel::DtmfInfo.

View File

@ -31,6 +31,7 @@
#include <string>
#include <vector>
#include "talk/media/base/audiorenderer.h"
#include "talk/media/base/codec.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/streamparams.h"
@ -51,9 +52,12 @@ class RateLimiter;
class Timing;
}
namespace webrtc {
class AudioSinkInterface;
}
namespace cricket {
class AudioRenderer;
struct RtpHeader;
class ScreencastId;
struct VideoFormat;
@ -1028,6 +1032,10 @@ class VoiceMediaChannel : public MediaChannel {
virtual bool InsertDtmf(uint32_t ssrc, int event, int duration) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VoiceMediaInfo* info) = 0;
virtual void SetRawAudioSink(
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) = 0;
};
struct VideoSendParameters : RtpSendParameters<VideoCodec, VideoOptions> {

View File

@ -28,10 +28,12 @@
#include "talk/media/webrtc/fakewebrtccall.h"
#include <algorithm>
#include <utility>
#include "talk/media/base/rtputils.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/gunit.h"
#include "webrtc/audio/audio_sink.h"
namespace cricket {
FakeAudioSendStream::FakeAudioSendStream(
@ -90,6 +92,11 @@ webrtc::AudioReceiveStream::Stats FakeAudioReceiveStream::GetStats() const {
return stats_;
}
void FakeAudioReceiveStream::SetSink(
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
sink_ = std::move(sink);
}
FakeVideoSendStream::FakeVideoSendStream(
const webrtc::VideoSendStream::Config& config,
const webrtc::VideoEncoderConfig& encoder_config)

View File

@ -106,10 +106,12 @@ class FakeAudioReceiveStream final : public webrtc::AudioReceiveStream {
// webrtc::AudioReceiveStream implementation.
webrtc::AudioReceiveStream::Stats GetStats() const override;
void SetSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) override;
webrtc::AudioReceiveStream::Config config_;
webrtc::AudioReceiveStream::Stats stats_;
int received_packets_;
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink_;
};
class FakeVideoSendStream final : public webrtc::VideoSendStream,

View File

@ -44,6 +44,7 @@
#include "talk/media/base/streamparams.h"
#include "talk/media/webrtc/webrtcmediaengine.h"
#include "talk/media/webrtc/webrtcvoe.h"
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/base64.h"
#include "webrtc/base/byteorder.h"
@ -1248,6 +1249,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
return config_.voe_channel_id;
}
void SetRawAudioSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
stream_->SetSink(std::move(sink));
}
private:
void RecreateAudioReceiveStream(bool use_combined_bwe,
const std::vector<webrtc::RtpExtension>& extensions) {
@ -2032,6 +2038,7 @@ bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32_t ssrc) {
// Clean up and delete the receive stream+channel.
LOG(LS_INFO) << "Removing audio receive stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
it->second->SetRawAudioSink(nullptr);
delete it->second;
recv_streams_.erase(it);
return DeleteVoEChannel(channel);
@ -2408,6 +2415,19 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
return true;
}
void WebRtcVoiceMediaChannel::SetRawAudioSink(
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink";
const auto it = recv_streams_.find(ssrc);
if (it == recv_streams_.end()) {
LOG(LS_WARNING) << "SetRawAudioSink: no recv stream" << ssrc;
return;
}
it->second->SetRawAudioSink(std::move(sink));
}
int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
unsigned int ulevel = 0;
int ret = engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel);

View File

@ -196,6 +196,10 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel,
void OnReadyToSend(bool ready) override {}
bool GetStats(VoiceMediaInfo* info) override;
void SetRawAudioSink(
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) override;
// implements Transport interface
bool SendRtp(const uint8_t* data,
size_t len,

View File

@ -30,6 +30,7 @@
#include "talk/media/base/constants.h"
#include "talk/media/base/rtputils.h"
#include "talk/session/media/channelmanager.h"
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/byteorder.h"
@ -40,9 +41,18 @@
#include "webrtc/p2p/base/transportchannel.h"
namespace cricket {
using rtc::Bind;
namespace {
// See comment below for why we need to use a pointer to a scoped_ptr.
bool SetRawAudioSink_w(VoiceMediaChannel* channel,
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface>* sink) {
channel->SetRawAudioSink(ssrc, std::move(*sink));
return true;
}
} // namespace
enum {
MSG_EARLYMEDIATIMEOUT = 1,
MSG_SCREENCASTWINDOWEVENT,
@ -1376,6 +1386,15 @@ bool VoiceChannel::SetOutputVolume(uint32_t ssrc, double volume) {
media_channel(), ssrc, volume));
}
void VoiceChannel::SetRawAudioSink(
uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
// We need to work around Bind's lack of support for scoped_ptr and ownership
// passing. So we invoke to our own little routine that gets a pointer to
// our local variable. This is OK since we're synchronously invoking.
InvokeOnWorker(Bind(&SetRawAudioSink_w, media_channel(), ssrc, &sink));
}
bool VoiceChannel::GetStats(VoiceMediaInfo* stats) {
return InvokeOnWorker(Bind(&VoiceMediaChannel::GetStats,
media_channel(), stats));

View File

@ -38,19 +38,24 @@
#include "talk/media/base/mediaengine.h"
#include "talk/media/base/streamparams.h"
#include "talk/media/base/videocapturer.h"
#include "webrtc/p2p/base/transportcontroller.h"
#include "webrtc/p2p/client/socketmonitor.h"
#include "talk/session/media/audiomonitor.h"
#include "talk/session/media/bundlefilter.h"
#include "talk/session/media/mediamonitor.h"
#include "talk/session/media/mediasession.h"
#include "talk/session/media/rtcpmuxfilter.h"
#include "talk/session/media/srtpfilter.h"
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/asyncudpsocket.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/network.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/window.h"
#include "webrtc/p2p/base/transportcontroller.h"
#include "webrtc/p2p/client/socketmonitor.h"
namespace webrtc {
class AudioSinkInterface;
} // namespace webrtc
namespace cricket {
@ -367,6 +372,9 @@ class VoiceChannel : public BaseChannel {
// event 0-9, *, #, A-D.
bool InsertDtmf(uint32_t ssrc, int event_code, int duration);
bool SetOutputVolume(uint32_t ssrc, double volume);
void SetRawAudioSink(uint32_t ssrc,
rtc::scoped_ptr<webrtc::AudioSinkInterface> sink);
// Get statistics about the current media session.
bool GetStats(VoiceMediaInfo* stats);

View File

@ -34,7 +34,6 @@
#include "talk/media/base/screencastid.h"
#include "talk/media/base/testutils.h"
#include "talk/session/media/channel.h"
#include "webrtc/p2p/base/faketransportcontroller.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/fileutils.h"
#include "webrtc/base/gunit.h"
@ -45,6 +44,7 @@
#include "webrtc/base/ssladapter.h"
#include "webrtc/base/sslidentity.h"
#include "webrtc/base/window.h"
#include "webrtc/p2p/base/faketransportcontroller.h"
#define MAYBE_SKIP_TEST(feature) \
if (!(rtc::SSLStreamAdapter::feature())) { \

View File

@ -14,6 +14,7 @@ source_set("audio") {
"audio_receive_stream.h",
"audio_send_stream.cc",
"audio_send_stream.h",
"audio_sink.h",
"audio_state.cc",
"audio_state.h",
"conversion.h",
@ -31,7 +32,7 @@ source_set("audio") {
deps = [
"..:webrtc_common",
"../voice_engine",
"../system_wrappers",
"../voice_engine",
]
}

View File

@ -11,7 +11,9 @@
#include "webrtc/audio/audio_receive_stream.h"
#include <string>
#include <utility>
#include "webrtc/audio/audio_sink.h"
#include "webrtc/audio/audio_state.h"
#include "webrtc/audio/conversion.h"
#include "webrtc/base/checks.h"
@ -201,6 +203,11 @@ webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const {
return stats;
}
void AudioReceiveStream::SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
channel_proxy_->SetSink(std::move(sink));
}
const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return config_;

View File

@ -24,6 +24,7 @@ class ChannelProxy;
} // namespace voe
namespace internal {
class AudioReceiveStream final : public webrtc::AudioReceiveStream {
public:
AudioReceiveStream(RemoteBitrateEstimator* remote_bitrate_estimator,
@ -43,6 +44,8 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream {
// webrtc::AudioReceiveStream implementation.
webrtc::AudioReceiveStream::Stats GetStats() const override;
void SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) override;
const webrtc::AudioReceiveStream::Config& config() const;
private:

53
webrtc/audio/audio_sink.h Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_AUDIO_AUDIO_SINK_H_
#define WEBRTC_AUDIO_AUDIO_SINK_H_
#if defined(WEBRTC_POSIX) && !defined(__STDC_FORMAT_MACROS)
// Avoid conflict with format_macros.h.
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <stddef.h>
namespace webrtc {
// Represents a simple push audio sink.
class AudioSinkInterface {
public:
virtual ~AudioSinkInterface() {}
struct Data {
Data(int16_t* data,
size_t samples_per_channel,
int sample_rate,
int channels,
uint32_t timestamp)
: data(data),
samples_per_channel(samples_per_channel),
sample_rate(sample_rate),
channels(channels),
timestamp(timestamp) {}
int16_t* data; // The actual 16bit audio data.
size_t samples_per_channel; // Number of frames in the buffer.
int sample_rate; // Sample rate in Hz.
int channels; // Number of channels in the audio data.
uint32_t timestamp; // The RTP timestamp of the first sample.
};
virtual void OnData(const Data& audio) = 0;
};
} // namespace webrtc
#endif // WEBRTC_AUDIO_AUDIO_SINK_H_

View File

@ -18,6 +18,7 @@
'audio/audio_receive_stream.h',
'audio/audio_send_stream.cc',
'audio/audio_send_stream.h',
'audio/audio_sink.h',
'audio/audio_state.cc',
'audio/audio_state.h',
'audio/conversion.h',

View File

@ -15,6 +15,7 @@
#include <string>
#include <vector>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/config.h"
#include "webrtc/stream.h"
#include "webrtc/transport.h"
@ -23,6 +24,7 @@
namespace webrtc {
class AudioDecoder;
class AudioSinkInterface;
// WORK IN PROGRESS
// This class is under development and is not yet intended for for use outside
@ -100,6 +102,16 @@ class AudioReceiveStream : public ReceiveStream {
};
virtual Stats GetStats() const = 0;
// Sets an audio sink that receives unmixed audio from the receive stream.
// Ownership of the sink is passed to the stream and can be used by the
// caller to do lifetime management (i.e. when the sink's dtor is called).
// Only one sink can be set and passing a null sink, clears an existing one.
// NOTE: Audio must still somehow be pulled through AudioTransport for audio
// to stream through this sink. In practice, this happens if mixed audio
// is being pulled+rendered and/or if audio is being pulled for the purposes
// of feeding to the AEC.
virtual void SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) = 0;
};
} // namespace webrtc

View File

@ -73,6 +73,8 @@
#else // WEBRTC_WIN
#include <inttypes.h>
#if !defined(PRId64)
#define PRId64 "I64d"
#endif

View File

@ -11,6 +11,7 @@
#include "webrtc/voice_engine/channel.h"
#include <algorithm>
#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/base/format_macros.h"
@ -560,6 +561,21 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
}
}
{
// Pass the audio buffers to an optional sink callback, before applying
// scaling/panning, as that applies to the mix operation.
// External recipients of the audio (e.g. via AudioTrack), will do their
// own mixing/dynamic processing.
CriticalSectionScoped cs(&_callbackCritSect);
if (audio_sink_) {
AudioSinkInterface::Data data(
&audioFrame->data_[0],
audioFrame->samples_per_channel_, audioFrame->sample_rate_hz_,
audioFrame->num_channels_, audioFrame->timestamp_);
audio_sink_->OnData(data);
}
}
float output_gain = 1.0f;
float left_pan = 1.0f;
float right_pan = 1.0f;
@ -608,13 +624,10 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
const bool isStereo = (audioFrame->num_channels_ == 2);
if (_outputExternalMediaCallbackPtr)
{
_outputExternalMediaCallbackPtr->Process(
_channelId,
kPlaybackPerChannel,
(int16_t*)audioFrame->data_,
audioFrame->samples_per_channel_,
audioFrame->sample_rate_hz_,
isStereo);
_outputExternalMediaCallbackPtr->Process(
_channelId, kPlaybackPerChannel, (int16_t*)audioFrame->data_,
audioFrame->samples_per_channel_, audioFrame->sample_rate_hz_,
isStereo);
}
}
@ -1172,6 +1185,11 @@ Channel::UpdateLocalTimeStamp()
return 0;
}
void Channel::SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) {
CriticalSectionScoped cs(&_callbackCritSect);
audio_sink_ = std::move(sink);
}
int32_t
Channel::StartPlayout()
{

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_H_
#define WEBRTC_VOICE_ENGINE_CHANNEL_H_
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
@ -192,6 +193,8 @@ public:
CriticalSectionWrapper* callbackCritSect);
int32_t UpdateLocalTimeStamp();
void SetSink(rtc::scoped_ptr<AudioSinkInterface> sink);
// API methods
// VoEBase
@ -508,6 +511,7 @@ private:
TelephoneEventHandler* telephone_event_handler_;
rtc::scoped_ptr<RtpRtcp> _rtpRtcpModule;
rtc::scoped_ptr<AudioCodingModule> audio_coding_;
rtc::scoped_ptr<AudioSinkInterface> audio_sink_;
AudioLevel _outputAudioLevel;
bool _externalTransport;
AudioFrame _audioFrame;

View File

@ -10,6 +10,9 @@
#include "webrtc/voice_engine/channel_proxy.h"
#include <utility>
#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/checks.h"
#include "webrtc/voice_engine/channel.h"
@ -22,6 +25,8 @@ ChannelProxy::ChannelProxy(const ChannelOwner& channel_owner) :
RTC_CHECK(channel_owner_.channel());
}
ChannelProxy::~ChannelProxy() {}
void ChannelProxy::SetRTCPStatus(bool enable) {
channel()->SetRTCPStatus(enable);
}
@ -134,6 +139,11 @@ bool ChannelProxy::SendTelephoneEventOutband(uint8_t event,
channel()->SendTelephoneEventOutband(event, duration_ms, 10, false) == 0;
}
void ChannelProxy::SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
channel()->SetSink(std::move(sink));
}
Channel* ChannelProxy::channel() const {
RTC_DCHECK(channel_owner_.channel());
return channel_owner_.channel();

View File

@ -20,6 +20,7 @@
namespace webrtc {
class AudioSinkInterface;
class PacketRouter;
class RtpPacketSender;
class TransportFeedbackObserver;
@ -39,7 +40,7 @@ class ChannelProxy {
public:
ChannelProxy();
explicit ChannelProxy(const ChannelOwner& channel_owner);
virtual ~ChannelProxy() {}
virtual ~ChannelProxy();
virtual void SetRTCPStatus(bool enable);
virtual void SetLocalSSRC(uint32_t ssrc);
@ -64,6 +65,8 @@ class ChannelProxy {
virtual bool SetSendTelephoneEventPayloadType(int payload_type);
virtual bool SendTelephoneEventOutband(uint8_t event, uint32_t duration_ms);
virtual void SetSink(rtc::scoped_ptr<AudioSinkInterface> sink);
private:
Channel* channel() const;