From 49a6a27bf02c07e0d1a988e93cffcb5f6705dd96 Mon Sep 17 00:00:00 2001 From: "buildbot@webrtc.org" Date: Wed, 21 May 2014 00:24:54 +0000 Subject: [PATCH] (Auto)update libjingle 67555838-> 67643194 git-svn-id: http://webrtc.googlecode.com/svn/trunk@6206 4adac7df-926f-26a2-2b94-8c16560cd09d --- talk/session/media/currentspeakermonitor.cc | 16 +++++++++++++--- talk/session/media/currentspeakermonitor.h | 8 ++++++-- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/talk/session/media/currentspeakermonitor.cc b/talk/session/media/currentspeakermonitor.cc index 0009699b5e..492a717ccf 100644 --- a/talk/session/media/currentspeakermonitor.cc +++ b/talk/session/media/currentspeakermonitor.cc @@ -59,6 +59,8 @@ void CurrentSpeakerMonitor::Start() { this, &CurrentSpeakerMonitor::OnAudioMonitor); audio_source_context_->SignalMediaStreamsUpdate.connect( this, &CurrentSpeakerMonitor::OnMediaStreamsUpdate); + audio_source_context_->SignalMediaStreamsReset.connect( + this, &CurrentSpeakerMonitor::OnMediaStreamsReset); started_ = true; } @@ -190,20 +192,28 @@ void CurrentSpeakerMonitor::OnAudioMonitor( } void CurrentSpeakerMonitor::OnMediaStreamsUpdate( - AudioSourceContext* audio_source_context, Session* session, + AudioSourceContext* audio_source_context, BaseSession* session, const MediaStreams& added, const MediaStreams& removed) { + if (audio_source_context == audio_source_context_ && session == session_) { // Update the speaking state map based on added and removed streams. for (std::vector::const_iterator - it = removed.video().begin(); it != removed.video().end(); ++it) { + it = removed.audio().begin(); it != removed.audio().end(); ++it) { ssrc_to_speaking_state_map_.erase(it->first_ssrc()); } for (std::vector::const_iterator - it = added.video().begin(); it != added.video().end(); ++it) { + it = added.audio().begin(); it != added.audio().end(); ++it) { ssrc_to_speaking_state_map_[it->first_ssrc()] = SS_NOT_SPEAKING; } } } +void CurrentSpeakerMonitor::OnMediaStreamsReset( + AudioSourceContext* audio_source_context, BaseSession* session) { + if (audio_source_context == audio_source_context_ && session == session_) { + ssrc_to_speaking_state_map_.clear(); + } +} + } // namespace cricket diff --git a/talk/session/media/currentspeakermonitor.h b/talk/session/media/currentspeakermonitor.h index 8ef64b35f6..ff829e22f6 100644 --- a/talk/session/media/currentspeakermonitor.h +++ b/talk/session/media/currentspeakermonitor.h @@ -48,7 +48,9 @@ class AudioSourceContext { public: sigslot::signal2 SignalAudioMonitor; - sigslot::signal4 + SignalMediaStreamsReset; + sigslot::signal4 SignalMediaStreamsUpdate; }; @@ -85,9 +87,11 @@ class CurrentSpeakerMonitor : public sigslot::has_slots<> { private: void OnAudioMonitor(AudioSourceContext* call, const AudioInfo& info); void OnMediaStreamsUpdate(AudioSourceContext* call, - Session* session, + BaseSession* session, const MediaStreams& added, const MediaStreams& removed); + void OnMediaStreamsReset(AudioSourceContext* audio_source_context, + BaseSession* session); // These are states that a participant will pass through so that we gradually // recognize that they have started and stopped speaking. This avoids