New rtc::VideoSinkInterface.

The plan is that this interface should be used by all classes which receive a stream of video frames, and replace the two generic classes webrtc::VideoRendererInterface and cricket::VideoRenderer.

And the list goes on, there's a dozen of different classes which act as video frame sinks.

At some point, we will likely add some method to handle sink properties like, e.g, maximum useful width and height. But hopefully this can be done while keeping the interface very simple.

BUG=webrtc:5426
R=perkj@webrtc.org, pthatcher@webrtc.org

Committed: https://crrev.com/a862d4563fbc26e52bed442de784094ae1dfe5ee
Cr-Commit-Position: refs/heads/master@{#11396}

Review URL: https://codereview.webrtc.org/1594973006

Cr-Commit-Position: refs/heads/master@{#11414}
This commit is contained in:
nisse 2016-01-28 04:47:08 -08:00 committed by Commit bot
parent 533a4e4882
commit e73afbaf17
19 changed files with 166 additions and 120 deletions

View File

@ -40,6 +40,7 @@
#include "webrtc/base/basictypes.h"
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/media/base/videosinkinterface.h"
namespace cricket {
@ -116,12 +117,17 @@ class MediaStreamTrackInterface : public rtc::RefCountInterface,
};
// Interface for rendering VideoFrames from a VideoTrack
class VideoRendererInterface {
class VideoRendererInterface
: public rtc::VideoSinkInterface<cricket::VideoFrame> {
public:
// |frame| may have pending rotation. For clients which can't apply rotation,
// |frame|->GetCopyWithRotationApplied() will return a frame that has the
// rotation applied.
virtual void RenderFrame(const cricket::VideoFrame* frame) = 0;
// Intended to replace RenderFrame.
void OnFrame(const cricket::VideoFrame& frame) override {
RenderFrame(&frame);
}
protected:
// The destructor is protected to prevent deletion via the interface.

View File

@ -103,8 +103,9 @@ class FakeVideoSource : public Notifier<VideoSourceInterface> {
virtual cricket::VideoCapturer* GetVideoCapturer() { return &fake_capturer_; }
virtual void Stop() {}
virtual void Restart() {}
virtual void AddSink(cricket::VideoRenderer* output) {}
virtual void RemoveSink(cricket::VideoRenderer* output) {}
virtual void AddSink(rtc::VideoSinkInterface<cricket::VideoFrame>* output) {}
virtual void RemoveSink(
rtc::VideoSinkInterface<cricket::VideoFrame>* output) {}
virtual SourceState state() const { return state_; }
virtual bool remote() const { return remote_; }
virtual const cricket::VideoOptions* options() const { return &options_; }

View File

@ -436,19 +436,21 @@ void VideoSource::Restart() {
SetState(kEnded);
return;
}
for(cricket::VideoRenderer* sink : sinks_) {
channel_manager_->AddVideoRenderer(video_capturer_.get(), sink);
for (auto* sink : sinks_) {
channel_manager_->AddVideoSink(video_capturer_.get(), sink);
}
}
void VideoSource::AddSink(cricket::VideoRenderer* output) {
void VideoSource::AddSink(
rtc::VideoSinkInterface<cricket::VideoFrame>* output) {
sinks_.push_back(output);
channel_manager_->AddVideoRenderer(video_capturer_.get(), output);
channel_manager_->AddVideoSink(video_capturer_.get(), output);
}
void VideoSource::RemoveSink(cricket::VideoRenderer* output) {
void VideoSource::RemoveSink(
rtc::VideoSinkInterface<cricket::VideoFrame>* output) {
sinks_.remove(output);
channel_manager_->RemoveVideoRenderer(video_capturer_.get(), output);
channel_manager_->RemoveVideoSink(video_capturer_.get(), output);
}
// OnStateChange listens to the ChannelManager::SignalVideoCaptureStateChange.

View File

@ -38,6 +38,7 @@
#include "talk/media/base/videocommon.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/media/base/videosinkinterface.h"
// VideoSource implements VideoSourceInterface. It owns a
// cricket::VideoCapturer and make sure the camera is started at a resolution
@ -84,8 +85,8 @@ class VideoSource : public Notifier<VideoSourceInterface>,
// |output| will be served video frames as long as the underlying capturer
// is running video frames.
virtual void AddSink(cricket::VideoRenderer* output);
virtual void RemoveSink(cricket::VideoRenderer* output);
virtual void AddSink(rtc::VideoSinkInterface<cricket::VideoFrame>* output);
virtual void RemoveSink(rtc::VideoSinkInterface<cricket::VideoFrame>* output);
protected:
VideoSource(cricket::ChannelManager* channel_manager,
@ -103,7 +104,7 @@ class VideoSource : public Notifier<VideoSourceInterface>,
rtc::scoped_ptr<cricket::VideoCapturer> video_capturer_;
rtc::scoped_ptr<cricket::VideoRenderer> frame_input_;
std::list<cricket::VideoRenderer*> sinks_;
std::list<rtc::VideoSinkInterface<cricket::VideoFrame>*> sinks_;
cricket::VideoFormat format_;
cricket::VideoOptions options_;

View File

@ -30,6 +30,7 @@
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/media/base/mediachannel.h"
#include "talk/media/base/videorenderer.h"
namespace webrtc {
@ -48,9 +49,27 @@ class VideoSourceInterface : public MediaSourceInterface {
virtual void Stop() = 0;
virtual void Restart() = 0;
// TODO(nisse): Delete these backwards compatibility wrappers after
// chrome is updated. Needed now because chrome's MockVideoSource
// defines a method with this signature (and marked with override).
// In addition, we need a dummy default implementation for the new
// AddSink/RemoveSink methods below, because they're unimplemented
// in the same class. This is ugly, but for MockVideoSource it
// doesn't really matter what these methods do, because they're not
// used.
virtual void AddSink(cricket::VideoRenderer* output) {
AddSink(static_cast<rtc::VideoSinkInterface<cricket::VideoFrame>*>(output));
}
virtual void RemoveSink(cricket::VideoRenderer* output) {
RemoveSink(
static_cast<rtc::VideoSinkInterface<cricket::VideoFrame>*>(output));
}
// Adds |output| to the source to receive frames.
virtual void AddSink(cricket::VideoRenderer* output) = 0;
virtual void RemoveSink(cricket::VideoRenderer* output) = 0;
// TODO(nisse): Delete dummy default implementation.
virtual void AddSink(rtc::VideoSinkInterface<cricket::VideoFrame>* output){};
virtual void RemoveSink(
rtc::VideoSinkInterface<cricket::VideoFrame>* output){};
virtual const cricket::VideoOptions* options() const = 0;
virtual cricket::VideoRenderer* FrameInput() = 0;

View File

@ -42,8 +42,8 @@ BEGIN_PROXY_MAP(VideoSource)
PROXY_METHOD0(cricket::VideoCapturer*, GetVideoCapturer)
PROXY_METHOD0(void, Stop)
PROXY_METHOD0(void, Restart)
PROXY_METHOD1(void, AddSink, cricket::VideoRenderer*)
PROXY_METHOD1(void, RemoveSink, cricket::VideoRenderer*)
PROXY_METHOD1(void, AddSink, rtc::VideoSinkInterface<cricket::VideoFrame>*)
PROXY_METHOD1(void, RemoveSink, rtc::VideoSinkInterface<cricket::VideoFrame>*)
PROXY_CONSTMETHOD0(const cricket::VideoOptions*, options)
PROXY_METHOD0(cricket::VideoRenderer*, FrameInput)
PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)

View File

@ -30,7 +30,6 @@
#include <algorithm>
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/videorenderer.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
@ -287,32 +286,32 @@ bool CaptureManager::RestartVideoCapture(
return true;
}
bool CaptureManager::AddVideoRenderer(VideoCapturer* video_capturer,
VideoRenderer* video_renderer) {
void CaptureManager::AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!video_capturer || !video_renderer) {
return false;
// TODO(nisse): Do we really need to tolerate NULL inputs?
if (!video_capturer || !sink) {
return;
}
CaptureRenderAdapter* adapter = GetAdapter(video_capturer);
if (!adapter) {
return false;
return;
}
adapter->AddRenderer(video_renderer);
return true;
adapter->AddSink(sink);
}
bool CaptureManager::RemoveVideoRenderer(VideoCapturer* video_capturer,
VideoRenderer* video_renderer) {
void CaptureManager::RemoveVideoSink(
VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!video_capturer || !video_renderer) {
return false;
if (!video_capturer || !sink) {
return;
}
CaptureRenderAdapter* adapter = GetAdapter(video_capturer);
if (!adapter) {
return false;
return;
}
adapter->RemoveRenderer(video_renderer);
return true;
adapter->RemoveSink(sink);
}
bool CaptureManager::IsCapturerRegistered(VideoCapturer* video_capturer) const {

View File

@ -79,10 +79,10 @@ class CaptureManager : public sigslot::has_slots<> {
const VideoFormat& desired_format,
RestartOptions options);
virtual bool AddVideoRenderer(VideoCapturer* video_capturer,
VideoRenderer* video_renderer);
virtual bool RemoveVideoRenderer(VideoCapturer* video_capturer,
VideoRenderer* video_renderer);
virtual void AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
virtual void RemoveVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
sigslot::repeater2<VideoCapturer*, CaptureState> SignalCapturerStateChange;

View File

@ -89,12 +89,6 @@ class CaptureManagerTest : public ::testing::Test, public sigslot::has_slots<> {
};
// Incorrect use cases.
TEST_F(CaptureManagerTest, InvalidCallOrder) {
// Capturer must be registered before any of these calls.
EXPECT_FALSE(capture_manager_.AddVideoRenderer(&video_capturer_,
&video_renderer_));
}
TEST_F(CaptureManagerTest, InvalidAddingRemoving) {
EXPECT_FALSE(capture_manager_.StopVideoCapture(&video_capturer_,
cricket::VideoFormat()));
@ -102,7 +96,8 @@ TEST_F(CaptureManagerTest, InvalidAddingRemoving) {
format_vga_));
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
EXPECT_EQ(1, callback_count());
EXPECT_FALSE(capture_manager_.AddVideoRenderer(&video_capturer_, NULL));
// NULL argument currently allowed, and does nothing.
capture_manager_.AddVideoSink(&video_capturer_, NULL);
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_, format_vga_));
}
@ -112,8 +107,7 @@ TEST_F(CaptureManagerTest, KeepFirstResolutionHigh) {
format_vga_));
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
EXPECT_EQ(1, callback_count());
EXPECT_TRUE(capture_manager_.AddVideoRenderer(&video_capturer_,
&video_renderer_));
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
// Renderer should be fed frames with the resolution of format_vga_.
@ -142,8 +136,7 @@ TEST_F(CaptureManagerTest, KeepFirstResolutionLow) {
format_qvga_));
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_vga_));
EXPECT_TRUE(capture_manager_.AddVideoRenderer(&video_capturer_,
&video_renderer_));
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
EXPECT_EQ_WAIT(1, callback_count(), kMsCallbackWait);
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
@ -164,8 +157,7 @@ TEST_F(CaptureManagerTest, MultipleStartStops) {
format_qvga_));
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
EXPECT_EQ(1, callback_count());
EXPECT_TRUE(capture_manager_.AddVideoRenderer(&video_capturer_,
&video_renderer_));
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
// Ensure that a frame can be captured when two start calls have been made.
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
@ -189,8 +181,7 @@ TEST_F(CaptureManagerTest, MultipleStartStops) {
TEST_F(CaptureManagerTest, TestForceRestart) {
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_qvga_));
EXPECT_TRUE(capture_manager_.AddVideoRenderer(&video_capturer_,
&video_renderer_));
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
EXPECT_EQ_WAIT(1, callback_count(), kMsCallbackWait);
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
@ -209,8 +200,7 @@ TEST_F(CaptureManagerTest, TestForceRestart) {
TEST_F(CaptureManagerTest, TestRequestRestart) {
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_vga_));
EXPECT_TRUE(capture_manager_.AddVideoRenderer(&video_capturer_,
&video_renderer_));
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
EXPECT_EQ_WAIT(1, callback_count(), kMsCallbackWait);
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());

View File

@ -43,7 +43,7 @@ CaptureRenderAdapter::~CaptureRenderAdapter() {
// outstanding calls to OnVideoFrame will be done when this is done, and no
// more calls will be serviced by this.
// We do this explicitly instead of just letting the has_slots<> destructor
// take care of it because we need to do this *before* video_renderers_ is
// take care of it because we need to do this *before* sinks_ is
// cleared by the destructor; otherwise we could mess with it while
// OnVideoFrame is running.
// We *don't* take capture_crit_ here since it could deadlock with the lock
@ -61,32 +61,23 @@ CaptureRenderAdapter* CaptureRenderAdapter::Create(
return return_value;
}
void CaptureRenderAdapter::AddRenderer(VideoRenderer* video_renderer) {
RTC_DCHECK(video_renderer);
void CaptureRenderAdapter::AddSink(rtc::VideoSinkInterface<VideoFrame>* sink) {
RTC_DCHECK(sink);
rtc::CritScope cs(&capture_crit_);
// This implements set semantics, the same renderer can only be
// added once.
// TODO(nisse): Is this really needed?
if (std::find(video_renderers_.begin(), video_renderers_.end(),
video_renderer) == video_renderers_.end())
video_renderers_.push_back(video_renderer);
if (std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end())
sinks_.push_back(sink);
}
void CaptureRenderAdapter::RemoveRenderer(VideoRenderer* video_renderer) {
RTC_DCHECK(video_renderer);
void CaptureRenderAdapter::RemoveSink(
rtc::VideoSinkInterface<VideoFrame>* sink) {
RTC_DCHECK(sink);
rtc::CritScope cs(&capture_crit_);
// TODO(nisse): Switch to using std::list, and use its remove
// method. And similarly in VideoTrackRenderers, which this class
// mostly duplicates.
for (VideoRenderers::iterator iter = video_renderers_.begin();
iter != video_renderers_.end(); ++iter) {
if (video_renderer == *iter) {
video_renderers_.erase(iter);
break;
}
}
sinks_.erase(std::remove(sinks_.begin(), sinks_.end(), sink), sinks_.end());
}
void CaptureRenderAdapter::Init() {
@ -98,12 +89,12 @@ void CaptureRenderAdapter::Init() {
void CaptureRenderAdapter::OnVideoFrame(VideoCapturer* capturer,
const VideoFrame* video_frame) {
rtc::CritScope cs(&capture_crit_);
if (video_renderers_.empty()) {
if (sinks_.empty()) {
return;
}
for (auto* renderer : video_renderers_)
renderer->RenderFrame(video_frame);
for (auto* sink : sinks_)
sink->OnFrame(*video_frame);
}
} // namespace cricket

View File

@ -39,34 +39,32 @@
#include "talk/media/base/videocapturer.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/media/base/videosinkinterface.h"
namespace cricket {
class VideoCapturer;
class VideoProcessor;
class VideoRenderer;
class CaptureRenderAdapter : public sigslot::has_slots<> {
public:
static CaptureRenderAdapter* Create(VideoCapturer* video_capturer);
~CaptureRenderAdapter();
void AddRenderer(VideoRenderer* video_renderer);
void RemoveRenderer(VideoRenderer* video_renderer);
void AddSink(rtc::VideoSinkInterface<VideoFrame>* sink);
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink);
VideoCapturer* video_capturer() { return video_capturer_; }
private:
// Just pointers since ownership is not handed over to this class.
typedef std::vector<VideoRenderer*> VideoRenderers;
explicit CaptureRenderAdapter(VideoCapturer* video_capturer);
void Init();
// Callback for frames received from the capturer.
void OnVideoFrame(VideoCapturer* capturer, const VideoFrame* video_frame);
VideoRenderers video_renderers_;
// Just pointers since ownership is not handed over to this class.
std::vector<rtc::VideoSinkInterface<VideoFrame>*> sinks_;
VideoCapturer* video_capturer_;
// Critical section synchronizing the capture thread.
rtc::CriticalSection capture_crit_;

View File

@ -37,14 +37,10 @@ class FakeCaptureManager : public CaptureManager {
FakeCaptureManager() {}
~FakeCaptureManager() {}
virtual bool AddVideoRenderer(VideoCapturer* video_capturer,
VideoRenderer* video_renderer) {
return true;
}
virtual bool RemoveVideoRenderer(VideoCapturer* video_capturer,
VideoRenderer* video_renderer) {
return true;
}
void AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) override {}
void RemoveVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) override {}
};
} // namespace cricket

View File

@ -28,16 +28,23 @@
#ifndef TALK_MEDIA_BASE_VIDEORENDERER_H_
#define TALK_MEDIA_BASE_VIDEORENDERER_H_
#include "webrtc/media/base/videosinkinterface.h"
namespace cricket {
class VideoFrame;
// Abstract interface for rendering VideoFrames.
class VideoRenderer {
class VideoRenderer : public rtc::VideoSinkInterface<VideoFrame> {
public:
virtual ~VideoRenderer() {}
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) = 0;
// Intended to replace RenderFrame.
void OnFrame(const cricket::VideoFrame& frame) override {
// Unused return value
RenderFrame(&frame);
}
};
} // namespace cricket

View File

@ -1254,7 +1254,7 @@ bool WebRtcVideoChannel2::SetRenderer(uint32_t ssrc, VideoRenderer* renderer) {
return false;
}
it->second->SetRenderer(renderer);
it->second->SetSink(renderer);
return true;
}
@ -2214,7 +2214,7 @@ WebRtcVideoChannel2::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
config_(config),
external_decoder_factory_(external_decoder_factory),
disable_prerenderer_smoothing_(disable_prerenderer_smoothing),
renderer_(NULL),
sink_(NULL),
last_width_(-1),
last_height_(-1),
first_frame_timestamp_(-1),
@ -2407,7 +2407,7 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::ClearDecoders(
void WebRtcVideoChannel2::WebRtcVideoReceiveStream::RenderFrame(
const webrtc::VideoFrame& frame,
int time_to_render_ms) {
rtc::CritScope crit(&renderer_lock_);
rtc::CritScope crit(&sink_lock_);
if (first_frame_timestamp_ < 0)
first_frame_timestamp_ = frame.timestamp();
@ -2419,8 +2419,8 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::RenderFrame(
if (frame.ntp_time_ms() > 0)
estimated_remote_start_ntp_time_ms_ = frame.ntp_time_ms() - elapsed_time_ms;
if (renderer_ == NULL) {
LOG(LS_WARNING) << "VideoReceiveStream not connected to a VideoRenderer.";
if (sink_ == NULL) {
LOG(LS_WARNING) << "VideoReceiveStream not connected to a VideoSink.";
return;
}
@ -2430,7 +2430,7 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::RenderFrame(
const WebRtcVideoFrame render_frame(
frame.video_frame_buffer(),
frame.render_time_ms() * rtc::kNumNanosecsPerMillisec, frame.rotation());
renderer_->RenderFrame(&render_frame);
sink_->OnFrame(render_frame);
}
bool WebRtcVideoChannel2::WebRtcVideoReceiveStream::IsTextureSupported() const {
@ -2446,10 +2446,10 @@ bool WebRtcVideoChannel2::WebRtcVideoReceiveStream::IsDefaultStream() const {
return default_stream_;
}
void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetRenderer(
cricket::VideoRenderer* renderer) {
rtc::CritScope crit(&renderer_lock_);
renderer_ = renderer;
void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetSink(
rtc::VideoSinkInterface<cricket::VideoFrame>* sink) {
rtc::CritScope crit(&sink_lock_);
sink_ = sink;
}
std::string
@ -2483,7 +2483,7 @@ WebRtcVideoChannel2::WebRtcVideoReceiveStream::GetVideoReceiverInfo() {
info.framerate_output = stats.render_frame_rate;
{
rtc::CritScope frame_cs(&renderer_lock_);
rtc::CritScope frame_cs(&sink_lock_);
info.frame_width = last_width_;
info.frame_height = last_height_;
info.capture_start_ntp_time_ms = estimated_remote_start_ntp_time_ms_;

View File

@ -40,6 +40,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/media/base/videosinkinterface.h"
#include "webrtc/call.h"
#include "webrtc/transport.h"
#include "webrtc/video_frame.h"
@ -412,7 +413,7 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
bool SmoothsRenderedFrames() const override;
bool IsDefaultStream() const;
void SetRenderer(cricket::VideoRenderer* renderer);
void SetSink(rtc::VideoSinkInterface<cricket::VideoFrame>* sink);
VideoReceiverInfo GetVideoReceiverInfo();
@ -450,18 +451,18 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
const bool disable_prerenderer_smoothing_;
rtc::CriticalSection renderer_lock_;
cricket::VideoRenderer* renderer_ GUARDED_BY(renderer_lock_);
int last_width_ GUARDED_BY(renderer_lock_);
int last_height_ GUARDED_BY(renderer_lock_);
rtc::CriticalSection sink_lock_;
rtc::VideoSinkInterface<cricket::VideoFrame>* sink_ GUARDED_BY(sink_lock_);
int last_width_ GUARDED_BY(sink_lock_);
int last_height_ GUARDED_BY(sink_lock_);
// Expands remote RTP timestamps to int64_t to be able to estimate how long
// the stream has been running.
rtc::TimestampWrapAroundHandler timestamp_wraparound_handler_
GUARDED_BY(renderer_lock_);
int64_t first_frame_timestamp_ GUARDED_BY(renderer_lock_);
GUARDED_BY(sink_lock_);
int64_t first_frame_timestamp_ GUARDED_BY(sink_lock_);
// Start NTP time is estimated as current remote NTP time (estimated from
// RTCP) minus the elapsed time, as soon as remote NTP time is available.
int64_t estimated_remote_start_ntp_time_ms_ GUARDED_BY(renderer_lock_);
int64_t estimated_remote_start_ntp_time_ms_ GUARDED_BY(sink_lock_);
};
void Construct(webrtc::Call* call, WebRtcVideoEngine2* engine);

View File

@ -500,18 +500,20 @@ bool ChannelManager::RestartVideoCapture(
video_capturer, previous_format, desired_format, options));
}
bool ChannelManager::AddVideoRenderer(
VideoCapturer* capturer, VideoRenderer* renderer) {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&CaptureManager::AddVideoRenderer,
capture_manager_.get(), capturer, renderer));
void ChannelManager::AddVideoSink(
VideoCapturer* capturer, rtc::VideoSinkInterface<VideoFrame>* sink) {
if (initialized_)
worker_thread_->Invoke<void>(
Bind(&CaptureManager::AddVideoSink,
capture_manager_.get(), capturer, sink));
}
bool ChannelManager::RemoveVideoRenderer(
VideoCapturer* capturer, VideoRenderer* renderer) {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&CaptureManager::RemoveVideoRenderer,
capture_manager_.get(), capturer, renderer));
void ChannelManager::RemoveVideoSink(
VideoCapturer* capturer, rtc::VideoSinkInterface<VideoFrame>* sink) {
if (initialized_)
worker_thread_->Invoke<void>(
Bind(&CaptureManager::RemoveVideoSink,
capture_manager_.get(), capturer, sink));
}
bool ChannelManager::IsScreencastRunning() const {

View File

@ -156,8 +156,10 @@ class ChannelManager : public rtc::MessageHandler,
const VideoFormat& desired_format,
CaptureManager::RestartOptions options);
bool AddVideoRenderer(VideoCapturer* capturer, VideoRenderer* renderer);
bool RemoveVideoRenderer(VideoCapturer* capturer, VideoRenderer* renderer);
virtual void AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
virtual void RemoveVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
bool IsScreencastRunning() const;
// The operations below occur on the main thread.

4
webrtc/media/OWNERS Normal file
View File

@ -0,0 +1,4 @@
mflodman@webrtc.org
pbos@webrtc.org
pthatcher@webrtc.org
solenberg@webrtc.org

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MEDIA_BASE_VIDEOSINKINTERFACE_H_
#define WEBRTC_MEDIA_BASE_VIDEOSINKINTERFACE_H_
namespace rtc {
template <typename VideoFrameT>
class VideoSinkInterface {
public:
virtual void OnFrame(const VideoFrameT& frame) = 0;
protected:
~VideoSinkInterface() {}
};
} // namespace rtc
#endif // WEBRTC_MEDIA_BASE_VIDEOSINKINTERFACE_H_