diff --git a/talk/app/webrtc/peerconnectioninterface_unittest.cc b/talk/app/webrtc/peerconnectioninterface_unittest.cc index 4f3f18520a..e47e5b17f8 100644 --- a/talk/app/webrtc/peerconnectioninterface_unittest.cc +++ b/talk/app/webrtc/peerconnectioninterface_unittest.cc @@ -179,6 +179,7 @@ class MockPeerConnectionObserver : public PeerConnectionObserver { EXPECT_EQ(pc_->ice_gathering_state(), new_state); } virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { + EXPECT_NE(PeerConnectionInterface::kIceGatheringNew, pc_->ice_gathering_state()); diff --git a/talk/base/asyncsocket.h b/talk/base/asyncsocket.h index 97859a7527..2854558adc 100644 --- a/talk/base/asyncsocket.h +++ b/talk/base/asyncsocket.h @@ -27,6 +27,7 @@ #ifndef TALK_BASE_ASYNCSOCKET_H_ #define TALK_BASE_ASYNCSOCKET_H_ +#ifndef __native_client__ #include "talk/base/common.h" #include "talk/base/sigslot.h" @@ -138,4 +139,5 @@ class AsyncSocketAdapter : public AsyncSocket, public sigslot::has_slots<> { } // namespace talk_base +#endif // __native_client__ #endif // TALK_BASE_ASYNCSOCKET_H_ diff --git a/talk/base/fileutils.cc b/talk/base/fileutils.cc index bbe1c36e91..d73997afe7 100644 --- a/talk/base/fileutils.cc +++ b/talk/base/fileutils.cc @@ -297,7 +297,6 @@ bool CreateUniqueFile(Pathname& path, bool create_empty) { return true; } -#ifdef HAS_PLATFORM_FILE // Taken from Chromium's base/platform_file_*.cc. // TODO(grunell): Remove when Chromium has started to use AEC in each source. // http://crbug.com/264611. @@ -321,6 +320,5 @@ bool ClosePlatformFile(PlatformFile file) { return close(file); #endif } -#endif // HAS_PLATFORM_FILE } // namespace talk_base diff --git a/talk/base/fileutils.h b/talk/base/fileutils.h index 3d68af2cb9..fba0d000b0 100644 --- a/talk/base/fileutils.h +++ b/talk/base/fileutils.h @@ -458,18 +458,14 @@ bool CreateUniqueFile(Pathname& path, bool create_empty); // TODO(grunell): Remove when Chromium has started to use AEC in each source. // http://crbug.com/264611. #if defined(WIN32) -#define HAS_PLATFORM_FILE 1 typedef HANDLE PlatformFile; const PlatformFile kInvalidPlatformFileValue = INVALID_HANDLE_VALUE; -#elif defined(POSIX) && !defined(__native_client__) -#define HAS_PLATFORM_FILE 1 +#elif defined(POSIX) typedef int PlatformFile; const PlatformFile kInvalidPlatformFileValue = -1; #endif -#ifdef HAS_PLATFORM_FILE FILE* FdopenPlatformFileForWriting(PlatformFile file); bool ClosePlatformFile(PlatformFile file); -#endif } // namespace talk_base diff --git a/talk/base/socket.h b/talk/base/socket.h index 47f55225de..56e3ebceea 100644 --- a/talk/base/socket.h +++ b/talk/base/socket.h @@ -2,32 +2,40 @@ * libjingle * Copyright 2004--2005, Google Inc. * - * Redistribution and use in source and binary forms, with or without + * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - * 1. Redistributions of source code must retain the above copyright notice, + * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products + * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TALK_BASE_SOCKET_H__ #define TALK_BASE_SOCKET_H__ +#if defined(__native_client__) +namespace talk_base { +// These should never be defined or instantiated. +class Socket; +class AsyncSocket; +} // namespace talk_base +#else + #include #ifdef POSIX @@ -199,4 +207,5 @@ class Socket { } // namespace talk_base +#endif // !__native_client__ #endif // TALK_BASE_SOCKET_H__ diff --git a/talk/libjingle.gyp b/talk/libjingle.gyp index e77e48a3e3..38a165be80 100755 --- a/talk/libjingle.gyp +++ b/talk/libjingle.gyp @@ -509,6 +509,8 @@ 'xmpp/pubsub_task.h', 'xmpp/pubsubclient.cc', 'xmpp/pubsubclient.h', + 'xmpp/pubsubstateclient.cc', + 'xmpp/pubsubstateclient.h', 'xmpp/pubsubtasks.cc', 'xmpp/pubsubtasks.h', 'xmpp/receivetask.cc', diff --git a/talk/media/base/videoadapter.cc b/talk/media/base/videoadapter.cc index 3cd6cac96c..588f1950e1 100644 --- a/talk/media/base/videoadapter.cc +++ b/talk/media/base/videoadapter.cc @@ -36,9 +36,9 @@ namespace cricket { // TODO(fbarchard): Make downgrades settable static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU. -// The number of cpu samples to require before adapting. This value depends on -// the cpu monitor sampling frequency being 2000ms. -static const int kCpuLoadMinSamples = 3; +// The number of milliseconds of data to require before acting on cpu sampling +// information. +static const size_t kCpuLoadMinSampleTime = 5000; // The amount of weight to give to each new cpu load sample. The lower the // value, the slower we'll adapt to changing cpu conditions. static const float kCpuLoadWeightCoefficient = 0.4f; @@ -165,8 +165,8 @@ VideoAdapter::VideoAdapter() frames_(0), adapted_frames_(0), adaption_changes_(0), - previous_width_(0), - previous_height_(0), + previous_width(0), + previous_height(0), black_output_(false), is_black_(false), interval_next_frame_(0) { @@ -240,7 +240,7 @@ int VideoAdapter::GetOutputNumPixels() const { // TODO(fbarchard): Add AdaptFrameRate function that only drops frames but // not resolution. bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, - VideoFrame** out_frame) { + const VideoFrame** out_frame) { talk_base::CritScope cs(&critical_section_); if (!in_frame || !out_frame) { return false; @@ -306,8 +306,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, // resolution changes as well. Consider dropping the statistics into their // own class which could be queried publically. bool changed = false; - if (previous_width_ && (previous_width_ != (*out_frame)->GetWidth() || - previous_height_ != (*out_frame)->GetHeight())) { + if (previous_width && (previous_width != (*out_frame)->GetWidth() || + previous_height != (*out_frame)->GetHeight())) { show = true; ++adaption_changes_; changed = true; @@ -325,8 +325,8 @@ bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, << "x" << (*out_frame)->GetHeight() << " Changed: " << (changed ? "true" : "false"); } - previous_width_ = (*out_frame)->GetWidth(); - previous_height_ = (*out_frame)->GetHeight(); + previous_width = (*out_frame)->GetWidth(); + previous_height = (*out_frame)->GetHeight(); return true; } @@ -382,8 +382,7 @@ CoordinatedVideoAdapter::CoordinatedVideoAdapter() view_adaptation_(true), view_switch_(false), cpu_downgrade_count_(0), - cpu_load_min_samples_(kCpuLoadMinSamples), - cpu_load_num_samples_(0), + cpu_adapt_wait_time_(0), high_system_threshold_(kHighSystemCpuThreshold), low_system_threshold_(kLowSystemCpuThreshold), process_threshold_(kProcessCpuThreshold), @@ -553,18 +552,22 @@ void CoordinatedVideoAdapter::OnCpuLoadUpdated( // we'll still calculate this information, in case smoothing is later enabled. system_load_average_ = kCpuLoadWeightCoefficient * system_load + (1.0f - kCpuLoadWeightCoefficient) * system_load_average_; - ++cpu_load_num_samples_; if (cpu_smoothing_) { system_load = system_load_average_; } + // If we haven't started taking samples yet, wait until we have at least + // the correct number of samples per the wait time. + if (cpu_adapt_wait_time_ == 0) { + cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime); + } AdaptRequest request = FindCpuRequest(current_cpus, max_cpus, process_load, system_load); // Make sure we're not adapting too quickly. if (request != KEEP) { - if (cpu_load_num_samples_ < cpu_load_min_samples_) { + if (talk_base::TimeIsLater(talk_base::Time(), + cpu_adapt_wait_time_)) { LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until " - << (cpu_load_min_samples_ - cpu_load_num_samples_) - << " more samples"; + << talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms"; request = KEEP; } } @@ -685,7 +688,7 @@ bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width, if (changed) { // When any adaptation occurs, historic CPU load levels are no longer // accurate. Clear out our state so we can re-learn at the new normal. - cpu_load_num_samples_ = 0; + cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime); system_load_average_ = kCpuLoadInitialAverage; } diff --git a/talk/media/base/videoadapter.h b/talk/media/base/videoadapter.h index 272df72de6..38a8c9d63a 100644 --- a/talk/media/base/videoadapter.h +++ b/talk/media/base/videoadapter.h @@ -62,7 +62,7 @@ class VideoAdapter { // successfully. Return false otherwise. // output_frame_ is owned by the VideoAdapter that has the best knowledge on // the output frame. - bool AdaptFrame(const VideoFrame* in_frame, VideoFrame** out_frame); + bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame); void set_scale_third(bool enable) { LOG(LS_INFO) << "Video Adapter third scaling is now " @@ -90,8 +90,8 @@ class VideoAdapter { int frames_; // Number of input frames. int adapted_frames_; // Number of frames scaled. int adaption_changes_; // Number of changes in scale factor. - size_t previous_width_; // Previous adapter output width. - size_t previous_height_; // Previous adapter output height. + size_t previous_width; // Previous adapter output width. + size_t previous_height; // Previous adapter output height. bool black_output_; // Flag to tell if we need to black output_frame_. bool is_black_; // Flag to tell if output_frame_ is currently black. int64 interval_next_frame_; @@ -149,15 +149,14 @@ class CoordinatedVideoAdapter // When the video is decreased, set the waiting time for CPU adaptation to // decrease video again. - void set_cpu_load_min_samples(int cpu_load_min_samples) { - if (cpu_load_min_samples_ != cpu_load_min_samples) { - LOG(LS_INFO) << "VAdapt Change Cpu Adapt Min Samples from: " - << cpu_load_min_samples_ << " to " - << cpu_load_min_samples; - cpu_load_min_samples_ = cpu_load_min_samples; + void set_cpu_adapt_wait_time(uint32 cpu_adapt_wait_time) { + if (cpu_adapt_wait_time_ != static_cast(cpu_adapt_wait_time)) { + LOG(LS_INFO) << "VAdapt Change Cpu Adapt Wait Time from: " + << cpu_adapt_wait_time_ << " to " + << cpu_adapt_wait_time; + cpu_adapt_wait_time_ = static_cast(cpu_adapt_wait_time); } } - int cpu_load_min_samples() const { return cpu_load_min_samples_; } // CPU system load high threshold for reducing resolution. e.g. 0.85f void set_high_system_threshold(float high_system_threshold) { ASSERT(high_system_threshold <= 1.0f); @@ -221,8 +220,7 @@ class CoordinatedVideoAdapter bool view_adaptation_; // True if view adaptation is enabled. bool view_switch_; // True if view switch is enabled. int cpu_downgrade_count_; - int cpu_load_min_samples_; - int cpu_load_num_samples_; + int cpu_adapt_wait_time_; // cpu system load thresholds relative to max cpus. float high_system_threshold_; float low_system_threshold_; diff --git a/talk/media/base/videocapturer.cc b/talk/media/base/videocapturer.cc index b2f41dcfb4..355cc64dd8 100644 --- a/talk/media/base/videocapturer.cc +++ b/talk/media/base/videocapturer.cc @@ -475,25 +475,14 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*, << desired_width << " x " << desired_height; return; } - - VideoFrame* adapted_frame = &i420_frame; - if (!SignalAdaptFrame.is_empty() && !IsScreencast()) { - VideoFrame* out_frame = NULL; - SignalAdaptFrame(this, adapted_frame, &out_frame); - if (!out_frame) { - return; // VideoAdapter dropped the frame. - } - adapted_frame = out_frame; - } - - if (!muted_ && !ApplyProcessors(adapted_frame)) { + if (!muted_ && !ApplyProcessors(&i420_frame)) { // Processor dropped the frame. return; } if (muted_) { - adapted_frame->SetToBlack(); + i420_frame.SetToBlack(); } - SignalVideoFrame(this, adapted_frame); + SignalVideoFrame(this, &i420_frame); #endif // VIDEO_FRAME_NAME } diff --git a/talk/media/base/videocapturer.h b/talk/media/base/videocapturer.h index 15c016fd11..933fc82500 100644 --- a/talk/media/base/videocapturer.h +++ b/talk/media/base/videocapturer.h @@ -255,14 +255,7 @@ class VideoCapturer // Signal the captured frame to downstream. sigslot::signal2 SignalFrameCaptured; - // A VideoAdapter should be hooked up to SignalAdaptFrame which will be - // called before forwarding the frame to SignalVideoFrame. The parameters - // are this capturer instance, the input video frame and output frame - // pointer, respectively. - sigslot::signal3 SignalAdaptFrame; - // Signal the captured and possibly adapted frame to downstream consumers - // such as the encoder. + // Signal the captured frame converted to I420 to downstream. sigslot::signal2 SignalVideoFrame; diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc index 8c5cda9eb9..ca0ed414c8 100644 --- a/talk/media/webrtc/webrtcvideoengine.cc +++ b/talk/media/webrtc/webrtcvideoengine.cc @@ -583,12 +583,13 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { external_capture_(external_capture), capturer_updated_(false), interval_(0), + video_adapter_(new CoordinatedVideoAdapter), cpu_monitor_(cpu_monitor) { - overuse_observer_.reset(new WebRtcOveruseObserver(&video_adapter_)); - SignalCpuAdaptationUnable.repeat(video_adapter_.SignalCpuAdaptationUnable); + overuse_observer_.reset(new WebRtcOveruseObserver(video_adapter_.get())); + SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable); if (cpu_monitor) { cpu_monitor->SignalUpdate.connect( - &video_adapter_, &CoordinatedVideoAdapter::OnCpuLoadUpdated); + video_adapter_.get(), &CoordinatedVideoAdapter::OnCpuLoadUpdated); } } @@ -598,7 +599,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { bool sending() const { return sending_; } void set_muted(bool on) { // TODO(asapersson): add support. - // video_adapter_.SetBlackOutput(on); + // video_adapter_->SetBlackOutput(on); muted_ = on; } bool muted() {return muted_; } @@ -613,7 +614,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { if (video_format_ != cricket::VideoFormat()) { interval_ = video_format_.interval; } - video_adapter_.OnOutputFormatRequest(video_format_); + video_adapter_->OnOutputFormatRequest(video_format_); } void set_interval(int64 interval) { if (video_format() == cricket::VideoFormat()) { @@ -626,13 +627,17 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { VideoFormat format(codec.width, codec.height, VideoFormat::FpsToInterval(codec.maxFramerate), FOURCC_I420); - if (video_adapter_.output_format().IsSize0x0()) { - video_adapter_.SetOutputFormat(format); + if (video_adapter_->output_format().IsSize0x0()) { + video_adapter_->SetOutputFormat(format); } } + bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame) { + *out_frame = NULL; + return video_adapter_->AdaptFrame(in_frame, out_frame); + } int CurrentAdaptReason() const { - return video_adapter_.adapt_reason(); + return video_adapter_->adapt_reason(); } webrtc::CpuOveruseObserver* overuse_observer() { return overuse_observer_.get(); @@ -669,51 +674,40 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { // be zero, and all frames may be dropped. // Consider fixing this by having video_adapter keep a pointer to the // video capturer. - video_adapter_.SetInputFormat(*capture_format); + video_adapter_->SetInputFormat(*capture_format); } - // TODO(thorcarpenter): When the adapter supports "only frame dropping" - // mode, also hook it up to screencast capturers. - video_capturer->SignalAdaptFrame.connect( - this, &WebRtcVideoChannelSendInfo::AdaptFrame); } } - CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; } - - void AdaptFrame(VideoCapturer* capturer, const VideoFrame* input, - VideoFrame** adapted) { - video_adapter_.AdaptFrame(input, adapted); - } - void ApplyCpuOptions(const VideoOptions& options) { bool cpu_adapt, cpu_smoothing, adapt_third; float low, med, high; if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) { - video_adapter_.set_cpu_adaptation(cpu_adapt); + video_adapter_->set_cpu_adaptation(cpu_adapt); } if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) { - video_adapter_.set_cpu_smoothing(cpu_smoothing); + video_adapter_->set_cpu_smoothing(cpu_smoothing); } if (options.process_adaptation_threshhold.Get(&med)) { - video_adapter_.set_process_threshold(med); + video_adapter_->set_process_threshold(med); } if (options.system_low_adaptation_threshhold.Get(&low)) { - video_adapter_.set_low_system_threshold(low); + video_adapter_->set_low_system_threshold(low); } if (options.system_high_adaptation_threshhold.Get(&high)) { - video_adapter_.set_high_system_threshold(high); + video_adapter_->set_high_system_threshold(high); } if (options.video_adapt_third.Get(&adapt_third)) { - video_adapter_.set_scale_third(adapt_third); + video_adapter_->set_scale_third(adapt_third); } } void SetCpuOveruseDetection(bool enable) { if (cpu_monitor_ && enable) { - cpu_monitor_->SignalUpdate.disconnect(&video_adapter_); + cpu_monitor_->SignalUpdate.disconnect(video_adapter_.get()); } overuse_observer_->Enable(enable); - video_adapter_.set_cpu_adaptation(enable); + video_adapter_->set_cpu_adaptation(enable); } void ProcessFrame(const VideoFrame& original_frame, bool mute, @@ -767,7 +761,7 @@ class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> { int64 interval_; - CoordinatedVideoAdapter video_adapter_; + talk_base::scoped_ptr video_adapter_; talk_base::CpuMonitor* cpu_monitor_; talk_base::scoped_ptr overuse_observer_; }; @@ -2860,16 +2854,7 @@ bool WebRtcVideoMediaChannel::GetRenderer(uint32 ssrc, return true; } -bool WebRtcVideoMediaChannel::GetVideoAdapter( - uint32 ssrc, CoordinatedVideoAdapter** video_adapter) { - SendChannelMap::iterator it = send_channels_.find(ssrc); - if (it == send_channels_.end()) { - return false; - } - *video_adapter = it->second->video_adapter(); - return true; -} - +// TODO(zhurunz): Add unittests to test this function. void WebRtcVideoMediaChannel::SendFrame(VideoCapturer* capturer, const VideoFrame* frame) { // If the |capturer| is registered to any send channel, then send the frame diff --git a/talk/media/webrtc/webrtcvideoengine.h b/talk/media/webrtc/webrtcvideoengine.h index fa1b24881c..d4949c473c 100644 --- a/talk/media/webrtc/webrtcvideoengine.h +++ b/talk/media/webrtc/webrtcvideoengine.h @@ -60,13 +60,12 @@ class CpuMonitor; namespace cricket { -class CoordinatedVideoAdapter; -class ViETraceWrapper; -class ViEWrapper; class VideoCapturer; class VideoFrame; class VideoProcessor; class VideoRenderer; +class ViETraceWrapper; +class ViEWrapper; class VoiceMediaChannel; class WebRtcDecoderObserver; class WebRtcEncoderObserver; @@ -228,6 +227,10 @@ class WebRtcVideoEngine : public sigslot::has_slots<>, int local_renderer_h_; VideoRenderer* local_renderer_; + // Critical section to protect the media processor register/unregister + // while processing a frame + talk_base::CriticalSection signal_media_critical_; + talk_base::scoped_ptr cpu_monitor_; }; @@ -286,11 +289,12 @@ class WebRtcVideoMediaChannel : public talk_base::MessageHandler, // Public functions for use by tests and other specialized code. uint32 send_ssrc() const { return 0; } bool GetRenderer(uint32 ssrc, VideoRenderer** renderer); - bool GetVideoAdapter(uint32 ssrc, CoordinatedVideoAdapter** video_adapter); void SendFrame(VideoCapturer* capturer, const VideoFrame* frame); bool SendFrame(WebRtcVideoChannelSendInfo* channel_info, const VideoFrame* frame, bool is_screencast); + void AdaptAndSendFrame(VideoCapturer* capturer, const VideoFrame* frame); + // Thunk functions for use with HybridVideoEngine void OnLocalFrame(VideoCapturer* capturer, const VideoFrame* frame) { SendFrame(0u, frame, capturer->IsScreencast()); diff --git a/talk/media/webrtc/webrtcvideoengine_unittest.cc b/talk/media/webrtc/webrtcvideoengine_unittest.cc index 386ec0c524..e331188b59 100644 --- a/talk/media/webrtc/webrtcvideoengine_unittest.cc +++ b/talk/media/webrtc/webrtcvideoengine_unittest.cc @@ -36,7 +36,6 @@ #include "talk/media/base/fakevideorenderer.h" #include "talk/media/base/mediachannel.h" #include "talk/media/base/testutils.h" -#include "talk/media/base/videoadapter.h" #include "talk/media/base/videoengine_unittest.h" #include "talk/media/webrtc/fakewebrtcvideocapturemodule.h" #include "talk/media/webrtc/fakewebrtcvideoengine.h"