From d36dd499c8f253cbcf37364c2a070c2e8c7100e9 Mon Sep 17 00:00:00 2001 From: kjellander Date: Sat, 8 Oct 2016 22:21:35 -0700 Subject: [PATCH] Revert of Make cricket::VideoFrame inherit webrtc::VideoFrame. (patchset #9 id:160001 of https://codereview.webrtc.org/2315663002/ ) Reason for revert: Breaks compile for Chromium builds: https://build.chromium.org/p/chromium.webrtc.fyi/builders/Linux%20Builder/builds/10761 https://build.chromium.org/p/chromium.webrtc.fyi/builders/Mac%20Builder/builds/18142 FAILED: obj/remoting/protocol/protocol/webrtc_video_renderer_adapter.o ../../remoting/protocol/webrtc_video_renderer_adapter.cc:110:52: error: no member named 'transport_frame_id' in 'cricket::VideoFrame' weak_factory_.GetWeakPtr(), frame.transport_frame_id(), ~~~~~ ^ 1 error generated. Please run chromium trybots as described at https://webrtc.org/contributing/#tryjobs-on-chromium-trybots before relanding. Original issue's description: > Make cricket::VideoFrame inherit webrtc::VideoFrame. Delete > all methods but a few constructors. And similarly for the > subclass cricket::WebRtcVideoFrame. > > TBR=tkchin@webrtc.org # Added an include line > BUG=webrtc:5682 > > Committed: https://crrev.com/dda6ec008a0fc8d52e118814fb779032e8931968 > Cr-Commit-Position: refs/heads/master@{#14576} TBR=perkj@webrtc.org,pthatcher@webrtc.org,pthatcher@chromium.org,tkchin@webrtc.org,nisse@webrtc.org NOTRY=True NOPRESUBMIT=True BUG=webrtc:5682 Review-Url: https://codereview.webrtc.org/2402853002 Cr-Commit-Position: refs/heads/master@{#14583} --- webrtc/media/BUILD.gn | 4 + webrtc/media/base/videobroadcaster.cc | 3 +- .../media/base/videobroadcaster_unittest.cc | 4 +- webrtc/media/base/videoframe.cc | 171 ++ webrtc/media/base/videoframe.h | 57 +- webrtc/media/base/videoframe_unittest.h | 1369 +++++++++++++++++ webrtc/media/engine/webrtcvideocapturer.cc | 2 +- webrtc/media/engine/webrtcvideoengine2.cc | 8 +- webrtc/media/engine/webrtcvideoengine2.h | 2 +- webrtc/media/engine/webrtcvideoframe.cc | 154 ++ webrtc/media/engine/webrtcvideoframe.h | 104 +- .../media/engine/webrtcvideoframe_unittest.cc | 157 ++ webrtc/media/media.gyp | 2 + .../Classes/avfoundationvideocapturer.mm | 1 - 14 files changed, 2000 insertions(+), 38 deletions(-) create mode 100644 webrtc/media/base/videoframe.cc create mode 100644 webrtc/media/base/videoframe_unittest.h create mode 100644 webrtc/media/engine/webrtcvideoframe.cc create mode 100644 webrtc/media/engine/webrtcvideoframe_unittest.cc diff --git a/webrtc/media/BUILD.gn b/webrtc/media/BUILD.gn index bfe605db9f..122c404455 100644 --- a/webrtc/media/BUILD.gn +++ b/webrtc/media/BUILD.gn @@ -81,6 +81,7 @@ rtc_static_library("rtc_media") { "base/videocapturerfactory.h", "base/videocommon.cc", "base/videocommon.h", + "base/videoframe.cc", "base/videoframe.h", "base/videosourcebase.cc", "base/videosourcebase.h", @@ -101,6 +102,7 @@ rtc_static_library("rtc_media") { "engine/webrtcvideoencoderfactory.h", "engine/webrtcvideoengine2.cc", "engine/webrtcvideoengine2.h", + "engine/webrtcvideoframe.cc", "engine/webrtcvideoframe.h", "engine/webrtcvoe.h", "engine/webrtcvoiceengine.cc", @@ -304,12 +306,14 @@ if (rtc_include_tests) { "base/videocapturer_unittest.cc", "base/videocommon_unittest.cc", "base/videoengine_unittest.h", + "base/videoframe_unittest.h", "engine/nullwebrtcvideoengine_unittest.cc", "engine/payload_type_mapper_unittest.cc", "engine/simulcast_unittest.cc", "engine/webrtcmediaengine_unittest.cc", "engine/webrtcvideocapturer_unittest.cc", "engine/webrtcvideoengine2_unittest.cc", + "engine/webrtcvideoframe_unittest.cc", "engine/webrtcvoiceengine_unittest.cc", "sctp/sctpdataengine_unittest.cc", ] diff --git a/webrtc/media/base/videobroadcaster.cc b/webrtc/media/base/videobroadcaster.cc index 653829639c..d3bc7a09ce 100644 --- a/webrtc/media/base/videobroadcaster.cc +++ b/webrtc/media/base/videobroadcaster.cc @@ -13,7 +13,6 @@ #include #include "webrtc/base/checks.h" -#include "webrtc/base/logging.h" namespace rtc { @@ -65,7 +64,7 @@ void VideoBroadcaster::OnFrame(const cricket::VideoFrame& frame) { if (sink_pair.wants.black_frames) { sink_pair.sink->OnFrame(cricket::WebRtcVideoFrame( GetBlackFrameBuffer(frame.width(), frame.height()), frame.rotation(), - frame.timestamp_us())); + frame.timestamp_us(), frame.transport_frame_id())); } else { sink_pair.sink->OnFrame(frame); } diff --git a/webrtc/media/base/videobroadcaster_unittest.cc b/webrtc/media/base/videobroadcaster_unittest.cc index 593c8e5151..48b4ffec71 100644 --- a/webrtc/media/base/videobroadcaster_unittest.cc +++ b/webrtc/media/base/videobroadcaster_unittest.cc @@ -140,7 +140,7 @@ TEST(VideoBroadcasterTest, SinkWantsBlackFrames) { buffer->InitializeData(); cricket::WebRtcVideoFrame frame1(buffer, webrtc::kVideoRotation_0, - 10 /* timestamp_us */); + 10 /* timestamp_us */, 0 /* frame_id */); broadcaster.OnFrame(frame1); EXPECT_TRUE(sink1.black_frame()); EXPECT_EQ(10, sink1.timestamp_us()); @@ -154,7 +154,7 @@ TEST(VideoBroadcasterTest, SinkWantsBlackFrames) { broadcaster.AddOrUpdateSink(&sink2, wants2); cricket::WebRtcVideoFrame frame2(buffer, webrtc::kVideoRotation_0, - 30 /* timestamp_us */); + 30 /* timestamp_us */, 0 /* frame_id */); broadcaster.OnFrame(frame2); EXPECT_FALSE(sink1.black_frame()); EXPECT_EQ(30, sink1.timestamp_us()); diff --git a/webrtc/media/base/videoframe.cc b/webrtc/media/base/videoframe.cc new file mode 100644 index 0000000000..4ce0dfd90c --- /dev/null +++ b/webrtc/media/base/videoframe.cc @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/media/base/videoframe.h" + +#include + +#include "webrtc/base/arraysize.h" +#include "webrtc/base/checks.h" +#include "webrtc/base/logging.h" +#include "webrtc/media/base/videocommon.h" + +namespace cricket { + +static const size_t kMaxSampleSize = 1000000000u; +// Returns whether a sample is valid. +bool VideoFrame::Validate(uint32_t fourcc, + int w, + int h, + const uint8_t* sample, + size_t sample_size) { + if (h < 0) { + h = -h; + } + // 16384 is maximum resolution for VP8 codec. + if (w < 1 || w > 16384 || h < 1 || h > 16384) { + LOG(LS_ERROR) << "Invalid dimensions: " << w << "x" << h; + return false; + } + uint32_t format = CanonicalFourCC(fourcc); + int expected_bpp = 8; + switch (format) { + case FOURCC_I400: + case FOURCC_RGGB: + case FOURCC_BGGR: + case FOURCC_GRBG: + case FOURCC_GBRG: + expected_bpp = 8; + break; + case FOURCC_I420: + case FOURCC_I411: + case FOURCC_YU12: + case FOURCC_YV12: + case FOURCC_M420: + case FOURCC_NV21: + case FOURCC_NV12: + expected_bpp = 12; + break; + case FOURCC_I422: + case FOURCC_YV16: + case FOURCC_YUY2: + case FOURCC_UYVY: + case FOURCC_RGBP: + case FOURCC_RGBO: + case FOURCC_R444: + expected_bpp = 16; + break; + case FOURCC_I444: + case FOURCC_YV24: + case FOURCC_24BG: + case FOURCC_RAW: + expected_bpp = 24; + break; + + case FOURCC_ABGR: + case FOURCC_BGRA: + case FOURCC_ARGB: + expected_bpp = 32; + break; + + case FOURCC_MJPG: + case FOURCC_H264: + expected_bpp = 0; + break; + default: + expected_bpp = 8; // Expect format is at least 8 bits per pixel. + break; + } + size_t expected_size = (w * expected_bpp + 7) / 8 * h; + // For compressed formats, expect 4 bits per 16 x 16 macro. I420 would be + // 6 bits, but grey can be 4 bits. + if (expected_bpp == 0) { + expected_size = ((w + 15) / 16) * ((h + 15) / 16) * 4 / 8; + } + if (sample == NULL) { + LOG(LS_ERROR) << "NULL sample pointer." + << " format: " << GetFourccName(format) + << " bpp: " << expected_bpp + << " size: " << w << "x" << h + << " expected: " << expected_size + << " " << sample_size; + return false; + } + // TODO(fbarchard): Make function to dump information about frames. + uint8_t four_samples[4] = {0, 0, 0, 0}; + for (size_t i = 0; i < arraysize(four_samples) && i < sample_size; ++i) { + four_samples[i] = sample[i]; + } + if (sample_size < expected_size) { + LOG(LS_ERROR) << "Size field is too small." + << " format: " << GetFourccName(format) + << " bpp: " << expected_bpp + << " size: " << w << "x" << h + << " " << sample_size + << " expected: " << expected_size + << " sample[0..3]: " << static_cast(four_samples[0]) + << ", " << static_cast(four_samples[1]) + << ", " << static_cast(four_samples[2]) + << ", " << static_cast(four_samples[3]); + return false; + } + if (sample_size > kMaxSampleSize) { + LOG(LS_WARNING) << "Size field is invalid." + << " format: " << GetFourccName(format) + << " bpp: " << expected_bpp + << " size: " << w << "x" << h + << " " << sample_size + << " expected: " << 2 * expected_size + << " sample[0..3]: " << static_cast(four_samples[0]) + << ", " << static_cast(four_samples[1]) + << ", " << static_cast(four_samples[2]) + << ", " << static_cast(four_samples[3]); + return false; + } + // Show large size warning once every 100 frames. + // TODO(fbarchard): Make frame counter atomic for thread safety. + static int large_warn100 = 0; + size_t large_expected_size = expected_size * 2; + if (expected_bpp >= 8 && + (sample_size > large_expected_size || sample_size > kMaxSampleSize) && + large_warn100 % 100 == 0) { + ++large_warn100; + LOG(LS_WARNING) << "Size field is too large." + << " format: " << GetFourccName(format) + << " bpp: " << expected_bpp + << " size: " << w << "x" << h + << " bytes: " << sample_size + << " expected: " << large_expected_size + << " sample[0..3]: " << static_cast(four_samples[0]) + << ", " << static_cast(four_samples[1]) + << ", " << static_cast(four_samples[2]) + << ", " << static_cast(four_samples[3]); + } + + // TODO(fbarchard): Add duplicate pixel check. + // TODO(fbarchard): Use frame counter atomic for thread safety. + static bool valid_once = true; + if (valid_once) { + valid_once = false; + LOG(LS_INFO) << "Validate frame passed." + << " format: " << GetFourccName(format) + << " bpp: " << expected_bpp + << " size: " << w << "x" << h + << " bytes: " << sample_size + << " expected: " << expected_size + << " sample[0..3]: " << static_cast(four_samples[0]) + << ", " << static_cast(four_samples[1]) + << ", " << static_cast(four_samples[2]) + << ", " << static_cast(four_samples[3]); + } + return true; +} + +} // namespace cricket diff --git a/webrtc/media/base/videoframe.h b/webrtc/media/base/videoframe.h index d1ae3fef0d..6aa434d79d 100644 --- a/webrtc/media/base/videoframe.h +++ b/webrtc/media/base/videoframe.h @@ -8,24 +8,57 @@ * be found in the AUTHORS file in the root of the source tree. */ -// TODO(nisse): Deprecated, replace cricket::VideoFrame with -// webrtc::VideoFrame everywhere, then delete this file. See -// https://bugs.chromium.org/p/webrtc/issues/detail?id=5682. - #ifndef WEBRTC_MEDIA_BASE_VIDEOFRAME_H_ #define WEBRTC_MEDIA_BASE_VIDEOFRAME_H_ -#include "webrtc/video_frame.h" +#include "webrtc/base/basictypes.h" +#include "webrtc/base/stream.h" +#include "webrtc/common_video/include/video_frame_buffer.h" +#include "webrtc/common_video/rotation.h" namespace cricket { -class VideoFrame : public webrtc::VideoFrame { - protected: - VideoFrame() : webrtc::VideoFrame() {} - VideoFrame(const rtc::scoped_refptr& buffer, - webrtc::VideoRotation rotation, - int64_t timestamp_us) - : webrtc::VideoFrame(buffer, rotation, timestamp_us) {} +// Represents a YUV420 (a.k.a. I420) video frame. + +// TODO(nisse): This class duplicates webrtc::VideoFrame. There's +// ongoing work to merge the classes. See +// https://bugs.chromium.org/p/webrtc/issues/detail?id=5682. +class VideoFrame { + public: + VideoFrame() {} + virtual ~VideoFrame() {} + + // Basic accessors. + // Note this is the width and height without rotation applied. + virtual int width() const = 0; + virtual int height() const = 0; + + // Returns the underlying video frame buffer. This function is ok to call + // multiple times, but the returned object will refer to the same memory. + virtual const rtc::scoped_refptr& + video_frame_buffer() const = 0; + + // Frame ID. Normally RTP timestamp when the frame was received using RTP. + virtual uint32_t transport_frame_id() const = 0; + + // System monotonic clock, same timebase as rtc::TimeMicros(). + virtual int64_t timestamp_us() const = 0; + virtual void set_timestamp_us(int64_t time_us) = 0; + + // Indicates the rotation angle in degrees. + virtual webrtc::VideoRotation rotation() const = 0; + + // Tests if sample is valid. Returns true if valid. + + // TODO(nisse): Deprecated. Should be deleted in the cricket::VideoFrame and + // webrtc::VideoFrame merge. Validation of sample_size possibly moved to + // libyuv::ConvertToI420. As an initial step, demote this method to protected + // status. Used only by WebRtcVideoFrame::Reset. + static bool Validate(uint32_t fourcc, + int w, + int h, + const uint8_t* sample, + size_t sample_size); }; } // namespace cricket diff --git a/webrtc/media/base/videoframe_unittest.h b/webrtc/media/base/videoframe_unittest.h new file mode 100644 index 0000000000..42936bbefe --- /dev/null +++ b/webrtc/media/base/videoframe_unittest.h @@ -0,0 +1,1369 @@ +/* + * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ +#define WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ + +#include +#include +#include + +#include "libyuv/convert.h" +#include "libyuv/convert_from.h" +#include "libyuv/planar_functions.h" +#include "libyuv/rotate.h" +#include "webrtc/base/gunit.h" +#include "webrtc/base/pathutils.h" +#include "webrtc/base/stream.h" +#include "webrtc/base/stringutils.h" +#include "webrtc/common_video/rotation.h" +#include "webrtc/media/base/testutils.h" +#include "webrtc/media/base/videocommon.h" +#include "webrtc/media/base/videoframe.h" +#include "webrtc/test/testsupport/fileutils.h" + +#if defined(_MSC_VER) +#define ALIGN16(var) __declspec(align(16)) var +#else +#define ALIGN16(var) var __attribute__((aligned(16))) +#endif + +#define kImageFilename "media/faces.1280x720_P420" +#define kYuvExtension "yuv" +#define kJpeg420Filename "media/faces_I420" +#define kJpeg422Filename "media/faces_I422" +#define kJpeg444Filename "media/faces_I444" +#define kJpeg411Filename "media/faces_I411" +#define kJpeg400Filename "media/faces_I400" +#define kJpegExtension "jpg" + +// Generic test class for testing various video frame implementations. +template +class VideoFrameTest : public testing::Test { + public: + VideoFrameTest() : repeat_(1) {} + + protected: + static const int kWidth = 1280; + static const int kHeight = 720; + static const int kAlignment = 16; + static const int kMinWidthAll = 1; // Constants for ConstructYUY2AllSizes. + static const int kMinHeightAll = 1; + static const int kMaxWidthAll = 17; + static const int kMaxHeightAll = 23; + + // Load a video frame from disk. + bool LoadFrameNoRepeat(T* frame) { + int save_repeat = repeat_; // This LoadFrame disables repeat. + repeat_ = 1; + bool success = LoadFrame(LoadSample(kImageFilename, kYuvExtension).get(), + cricket::FOURCC_I420, + kWidth, kHeight, frame); + repeat_ = save_repeat; + return success; + } + + bool LoadFrame(const std::string& filename, + uint32_t format, + int32_t width, + int32_t height, + T* frame) { + return LoadFrame(filename, format, width, height, width, abs(height), + webrtc::kVideoRotation_0, frame); + } + bool LoadFrame(const std::string& filename, + uint32_t format, + int32_t width, + int32_t height, + int dw, + int dh, + webrtc::VideoRotation rotation, + T* frame) { + std::unique_ptr ms(LoadSample(filename)); + return LoadFrame(ms.get(), format, width, height, dw, dh, rotation, frame); + } + // Load a video frame from a memory stream. + bool LoadFrame(rtc::MemoryStream* ms, + uint32_t format, + int32_t width, + int32_t height, + T* frame) { + return LoadFrame(ms, format, width, height, width, abs(height), + webrtc::kVideoRotation_0, frame); + } + bool LoadFrame(rtc::MemoryStream* ms, + uint32_t format, + int32_t width, + int32_t height, + int dw, + int dh, + webrtc::VideoRotation rotation, + T* frame) { + if (!ms) { + return false; + } + size_t data_size; + bool ret = ms->GetSize(&data_size); + EXPECT_TRUE(ret); + if (ret) { + ret = LoadFrame(reinterpret_cast(ms->GetBuffer()), data_size, + format, width, height, dw, dh, rotation, frame); + } + return ret; + } + // Load a frame from a raw buffer. + bool LoadFrame(uint8_t* sample, + size_t sample_size, + uint32_t format, + int32_t width, + int32_t height, + T* frame) { + return LoadFrame(sample, sample_size, format, width, height, width, + abs(height), webrtc::kVideoRotation_0, frame); + } + bool LoadFrame(uint8_t* sample, + size_t sample_size, + uint32_t format, + int32_t width, + int32_t height, + int dw, + int dh, + webrtc::VideoRotation rotation, + T* frame) { + bool ret = false; + for (int i = 0; i < repeat_; ++i) { + ret = frame->Init(format, width, height, dw, dh, + sample, sample_size, 0, rotation); + } + return ret; + } + + std::unique_ptr LoadSample(const std::string& filename, + const std::string& extension) { + rtc::Pathname path(webrtc::test::ResourcePath(filename, extension)); + std::unique_ptr fs( + rtc::Filesystem::OpenFile(path, "rb")); + if (!fs.get()) { + LOG(LS_ERROR) << "Could not open test file path: " << path.pathname() + << " from current dir " + << rtc::Filesystem::GetCurrentDirectory().pathname(); + return NULL; + } + + char buf[4096]; + std::unique_ptr ms( + new rtc::MemoryStream()); + rtc::StreamResult res = Flow(fs.get(), buf, sizeof(buf), ms.get()); + if (res != rtc::SR_SUCCESS) { + LOG(LS_ERROR) << "Could not load test file path: " << path.pathname(); + return NULL; + } + + return ms; + } + + bool DumpSample(const std::string& filename, const void* buffer, int size) { + rtc::Pathname path(filename); + std::unique_ptr fs( + rtc::Filesystem::OpenFile(path, "wb")); + if (!fs.get()) { + return false; + } + + return (fs->Write(buffer, size, NULL, NULL) == rtc::SR_SUCCESS); + } + + // Create a test image in the desired color space. + // The image is a checkerboard pattern with 63x63 squares, which allows + // I420 chroma artifacts to easily be seen on the square boundaries. + // The pattern is { { green, orange }, { blue, purple } } + // There is also a gradient within each square to ensure that the luma + // values are handled properly. + std::unique_ptr CreateYuv422Sample(uint32_t fourcc, + uint32_t width, + uint32_t height) { + int y1_pos, y2_pos, u_pos, v_pos; + if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { + return NULL; + } + + std::unique_ptr ms( + new rtc::MemoryStream); + int awidth = (width + 1) & ~1; + int size = awidth * 2 * height; + if (!ms->ReserveSize(size)) { + return NULL; + } + for (uint32_t y = 0; y < height; ++y) { + for (int x = 0; x < awidth; x += 2) { + uint8_t quad[4]; + quad[y1_pos] = (x % 63 + y % 63) + 64; + quad[y2_pos] = ((x + 1) % 63 + y % 63) + 64; + quad[u_pos] = ((x / 63) & 1) ? 192 : 64; + quad[v_pos] = ((y / 63) & 1) ? 192 : 64; + ms->Write(quad, sizeof(quad), NULL, NULL); + } + } + return ms; + } + + // Create a test image for YUV 420 formats with 12 bits per pixel. + std::unique_ptr CreateYuvSample(uint32_t width, + uint32_t height, + uint32_t bpp) { + std::unique_ptr ms( + new rtc::MemoryStream); + if (!ms->ReserveSize(width * height * bpp / 8)) { + return NULL; + } + + for (uint32_t i = 0; i < width * height * bpp / 8; ++i) { + uint8_t value = ((i / 63) & 1) ? 192 : 64; + ms->Write(&value, sizeof(value), NULL, NULL); + } + return ms; + } + + std::unique_ptr CreateRgbSample(uint32_t fourcc, + uint32_t width, + uint32_t height) { + int r_pos, g_pos, b_pos, bytes; + if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { + return NULL; + } + + std::unique_ptr ms( + new rtc::MemoryStream); + if (!ms->ReserveSize(width * height * bytes)) { + return NULL; + } + + for (uint32_t y = 0; y < height; ++y) { + for (uint32_t x = 0; x < width; ++x) { + uint8_t rgb[4] = {255, 255, 255, 255}; + rgb[r_pos] = ((x / 63) & 1) ? 224 : 32; + rgb[g_pos] = (x % 63 + y % 63) + 96; + rgb[b_pos] = ((y / 63) & 1) ? 224 : 32; + ms->Write(rgb, bytes, NULL, NULL); + } + } + return ms; + } + + // Simple conversion routines to verify the optimized VideoFrame routines. + // Converts from the specified colorspace to I420. + std::unique_ptr ConvertYuv422(const rtc::MemoryStream* ms, + uint32_t fourcc, + uint32_t width, + uint32_t height) { + int y1_pos, y2_pos, u_pos, v_pos; + if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { + return nullptr; + } + + rtc::scoped_refptr buffer( + new rtc::RefCountedObject(width, height)); + + buffer->SetToBlack(); + + const uint8_t* start = reinterpret_cast(ms->GetBuffer()); + int awidth = (width + 1) & ~1; + int stride_y = buffer->StrideY(); + int stride_u = buffer->StrideU(); + int stride_v = buffer->StrideV(); + uint8_t* plane_y = buffer->MutableDataY(); + uint8_t* plane_u = buffer->MutableDataU(); + uint8_t* plane_v = buffer->MutableDataV(); + for (uint32_t y = 0; y < height; ++y) { + for (uint32_t x = 0; x < width; x += 2) { + const uint8_t* quad1 = start + (y * awidth + x) * 2; + plane_y[stride_y * y + x] = quad1[y1_pos]; + if ((x + 1) < width) { + plane_y[stride_y * y + x + 1] = quad1[y2_pos]; + } + if ((y & 1) == 0) { + const uint8_t* quad2 = quad1 + awidth * 2; + if ((y + 1) >= height) { + quad2 = quad1; + } + plane_u[stride_u * (y / 2) + x / 2] = + (quad1[u_pos] + quad2[u_pos] + 1) / 2; + plane_v[stride_v * (y / 2) + x / 2] = + (quad1[v_pos] + quad2[v_pos] + 1) / 2; + } + } + } + return std::unique_ptr(new T(buffer, 0, webrtc::kVideoRotation_0)); + } + + // Convert RGB to 420. + // A negative height inverts the image. + std::unique_ptr ConvertRgb(const rtc::MemoryStream* ms, + uint32_t fourcc, + int32_t width, + int32_t height) { + int r_pos, g_pos, b_pos, bytes; + if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { + return nullptr; + } + int pitch = width * bytes; + const uint8_t* start = reinterpret_cast(ms->GetBuffer()); + if (height < 0) { + height = -height; + start = start + pitch * (height - 1); + pitch = -pitch; + } + rtc::scoped_refptr buffer( + new rtc::RefCountedObject(width, height)); + + buffer->SetToBlack(); + + int stride_y = buffer->StrideY(); + int stride_u = buffer->StrideU(); + int stride_v = buffer->StrideV(); + uint8_t* plane_y = buffer->MutableDataY(); + uint8_t* plane_u = buffer->MutableDataU(); + uint8_t* plane_v = buffer->MutableDataV(); + for (int32_t y = 0; y < height; y += 2) { + for (int32_t x = 0; x < width; x += 2) { + const uint8_t* rgb[4]; + uint8_t yuv[4][3]; + rgb[0] = start + y * pitch + x * bytes; + rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); + rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); + rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); + for (size_t i = 0; i < 4; ++i) { + ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], + &yuv[i][0], &yuv[i][1], &yuv[i][2]); + } + plane_y[stride_y * y + x] = yuv[0][0]; + if ((x + 1) < width) { + plane_y[stride_y * y + x + 1] = yuv[1][0]; + } + if ((y + 1) < height) { + plane_y[stride_y * (y + 1) + x] = yuv[2][0]; + if ((x + 1) < width) { + plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0]; + } + } + plane_u[stride_u * (y / 2) + x / 2] = + (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; + plane_v[stride_v * (y / 2) + x / 2] = + (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; + } + } + return std::unique_ptr(new T(buffer, 0, webrtc::kVideoRotation_0)); + } + + // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. + void ConvertRgbPixel(uint8_t r, + uint8_t g, + uint8_t b, + uint8_t* y, + uint8_t* u, + uint8_t* v) { + *y = static_cast(.257 * r + .504 * g + .098 * b) + 16; + *u = static_cast(-.148 * r - .291 * g + .439 * b) + 128; + *v = static_cast(.439 * r - .368 * g - .071 * b) + 128; + } + + bool GetYuv422Packing(uint32_t fourcc, + int* y1_pos, + int* y2_pos, + int* u_pos, + int* v_pos) { + if (fourcc == cricket::FOURCC_YUY2) { + *y1_pos = 0; *u_pos = 1; *y2_pos = 2; *v_pos = 3; + } else if (fourcc == cricket::FOURCC_UYVY) { + *u_pos = 0; *y1_pos = 1; *v_pos = 2; *y2_pos = 3; + } else { + return false; + } + return true; + } + + bool GetRgbPacking(uint32_t fourcc, + int* r_pos, + int* g_pos, + int* b_pos, + int* bytes) { + if (fourcc == cricket::FOURCC_RAW) { + *r_pos = 0; *g_pos = 1; *b_pos = 2; *bytes = 3; // RGB in memory. + } else if (fourcc == cricket::FOURCC_24BG) { + *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 3; // BGR in memory. + } else if (fourcc == cricket::FOURCC_ABGR) { + *r_pos = 0; *g_pos = 1; *b_pos = 2; *bytes = 4; // RGBA in memory. + } else if (fourcc == cricket::FOURCC_BGRA) { + *r_pos = 1; *g_pos = 2; *b_pos = 3; *bytes = 4; // ARGB in memory. + } else if (fourcc == cricket::FOURCC_ARGB) { + *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory. + } else { + return false; + } + return true; + } + + // Comparison functions for testing. + static bool IsNull(const cricket::VideoFrame& frame) { + return !frame.video_frame_buffer(); + } + + static bool IsSize(const cricket::VideoFrame& frame, + int width, + int height) { + return !IsNull(frame) && frame.video_frame_buffer()->StrideY() >= width && + frame.video_frame_buffer()->StrideU() >= width / 2 && + frame.video_frame_buffer()->StrideV() >= width / 2 && + frame.width() == width && frame.height() == height; + } + + static bool IsPlaneEqual(const std::string& name, + const uint8_t* plane1, + uint32_t pitch1, + const uint8_t* plane2, + uint32_t pitch2, + uint32_t width, + uint32_t height, + int max_error) { + const uint8_t* r1 = plane1; + const uint8_t* r2 = plane2; + for (uint32_t y = 0; y < height; ++y) { + for (uint32_t x = 0; x < width; ++x) { + if (abs(static_cast(r1[x] - r2[x])) > max_error) { + LOG(LS_INFO) << "IsPlaneEqual(" << name << "): pixel[" + << x << "," << y << "] differs: " + << static_cast(r1[x]) << " vs " + << static_cast(r2[x]); + return false; + } + } + r1 += pitch1; + r2 += pitch2; + } + return true; + } + + static bool IsEqual(const cricket::VideoFrame& frame, + int width, + int height, + const uint8_t* y, + uint32_t ypitch, + const uint8_t* u, + uint32_t upitch, + const uint8_t* v, + uint32_t vpitch, + int max_error) { + return IsSize(frame, width, height) && + IsPlaneEqual("y", frame.video_frame_buffer()->DataY(), + frame.video_frame_buffer()->StrideY(), y, ypitch, + static_cast(width), + static_cast(height), max_error) && + IsPlaneEqual("u", frame.video_frame_buffer()->DataU(), + frame.video_frame_buffer()->StrideU(), u, upitch, + static_cast((width + 1) / 2), + static_cast((height + 1) / 2), max_error) && + IsPlaneEqual("v", frame.video_frame_buffer()->DataV(), + frame.video_frame_buffer()->StrideV(), v, vpitch, + static_cast((width + 1) / 2), + static_cast((height + 1) / 2), max_error); + } + + static bool IsEqual(const cricket::VideoFrame& frame1, + const cricket::VideoFrame& frame2, + int max_error) { + return frame1.timestamp_us() == frame2.timestamp_us() && + IsEqual(frame1, + frame2.width(), frame2.height(), + frame2.video_frame_buffer()->DataY(), + frame2.video_frame_buffer()->StrideY(), + frame2.video_frame_buffer()->DataU(), + frame2.video_frame_buffer()->StrideU(), + frame2.video_frame_buffer()->DataV(), + frame2.video_frame_buffer()->StrideV(), max_error); + } + + static bool IsEqual( + const cricket::VideoFrame& frame1, + const rtc::scoped_refptr& buffer, + int max_error) { + return IsEqual(frame1, buffer->width(), buffer->height(), + buffer->DataY(), buffer->StrideY(), + buffer->DataU(), buffer->StrideU(), + buffer->DataV(), buffer->StrideV(), + max_error); + } + + static bool IsEqualWithCrop(const cricket::VideoFrame& frame1, + const cricket::VideoFrame& frame2, + int hcrop, int vcrop, int max_error) { + return frame1.width() <= frame2.width() && + frame1.height() <= frame2.height() && + frame1.timestamp_us() == frame2.timestamp_us() && + IsEqual(frame1, + frame2.width() - hcrop * 2, + frame2.height() - vcrop * 2, + frame2.video_frame_buffer()->DataY() + + vcrop * frame2.video_frame_buffer()->StrideY() + + hcrop, + frame2.video_frame_buffer()->StrideY(), + frame2.video_frame_buffer()->DataU() + + vcrop * frame2.video_frame_buffer()->StrideU() / 2 + + hcrop / 2, + frame2.video_frame_buffer()->StrideU(), + frame2.video_frame_buffer()->DataV() + + vcrop * frame2.video_frame_buffer()->StrideV() / 2 + + hcrop / 2, + frame2.video_frame_buffer()->StrideV(), + max_error); + } + + static bool IsBlack(const cricket::VideoFrame& frame) { + return !IsNull(frame) && + *frame.video_frame_buffer()->DataY() <= 16 && + *frame.video_frame_buffer()->DataU() == 128 && + *frame.video_frame_buffer()->DataV() == 128; + } + + //////////////////////// + // Construction tests // + //////////////////////// + + // Test constructing an image from a I420 buffer. + void ConstructI420() { + T frame; + EXPECT_TRUE(IsNull(frame)); + std::unique_ptr ms( + CreateYuvSample(kWidth, kHeight, 12)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, + kWidth, kHeight, &frame)); + + const uint8_t* y = reinterpret_cast(ms.get()->GetBuffer()); + const uint8_t* u = y + kWidth * kHeight; + const uint8_t* v = u + kWidth * kHeight / 4; + EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, y, kWidth, u, kWidth / 2, v, + kWidth / 2, 0)); + } + + // Test constructing an image from a YV12 buffer. + void ConstructYV12() { + T frame; + std::unique_ptr ms( + CreateYuvSample(kWidth, kHeight, 12)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YV12, + kWidth, kHeight, &frame)); + + const uint8_t* y = reinterpret_cast(ms.get()->GetBuffer()); + const uint8_t* v = y + kWidth * kHeight; + const uint8_t* u = v + kWidth * kHeight / 4; + EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, y, kWidth, u, kWidth / 2, v, + kWidth / 2, 0)); + } + + // Test constructing an image from a I422 buffer. + void ConstructI422() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + size_t buf_size = kWidth * kHeight * 2; + std::unique_ptr buf(new uint8_t[buf_size + kAlignment]); + uint8_t* y = ALIGNP(buf.get(), kAlignment); + uint8_t* u = y + kWidth * kHeight; + uint8_t* v = u + (kWidth / 2) * kHeight; + EXPECT_EQ(0, libyuv::I420ToI422(frame1.video_frame_buffer()->DataY(), + frame1.video_frame_buffer()->StrideY(), + frame1.video_frame_buffer()->DataU(), + frame1.video_frame_buffer()->StrideU(), + frame1.video_frame_buffer()->DataV(), + frame1.video_frame_buffer()->StrideV(), + y, kWidth, + u, kWidth / 2, + v, kWidth / 2, + kWidth, kHeight)); + EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 1)); + } + + // Test constructing an image from a YUY2 buffer. + void ConstructYuy2() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + size_t buf_size = kWidth * kHeight * 2; + std::unique_ptr buf(new uint8_t[buf_size + kAlignment]); + uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment); + EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.video_frame_buffer()->DataY(), + frame1.video_frame_buffer()->StrideY(), + frame1.video_frame_buffer()->DataU(), + frame1.video_frame_buffer()->StrideU(), + frame1.video_frame_buffer()->DataV(), + frame1.video_frame_buffer()->StrideV(), + yuy2, kWidth * 2, + kWidth, kHeight)); + EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + } + + // Test constructing an image from a YUY2 buffer with buffer unaligned. + void ConstructYuy2Unaligned() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + size_t buf_size = kWidth * kHeight * 2; + std::unique_ptr buf(new uint8_t[buf_size + kAlignment + 1]); + uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1; + EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.video_frame_buffer()->DataY(), + frame1.video_frame_buffer()->StrideY(), + frame1.video_frame_buffer()->DataU(), + frame1.video_frame_buffer()->StrideU(), + frame1.video_frame_buffer()->DataV(), + frame1.video_frame_buffer()->StrideV(), + yuy2, kWidth * 2, + kWidth, kHeight)); + EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + } + + // Test constructing an image from a wide YUY2 buffer. + // Normal is 1280x720. Wide is 12800x72 + void ConstructYuy2Wide() { + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth * 10, kHeight / 10)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, + kWidth * 10, kHeight / 10); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, + kWidth * 10, kHeight / 10, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 0)); + } + + // Test constructing an image from a UYVY buffer. + void ConstructUyvy() { + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertYuv422(ms.get(), cricket::FOURCC_UYVY, + kWidth, kHeight); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 0)); + } + + // Test constructing an image from a random buffer. + // We are merely verifying that the code succeeds and is free of crashes. + void ConstructM420() { + T frame; + std::unique_ptr ms( + CreateYuvSample(kWidth, kHeight, 12)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_M420, + kWidth, kHeight, &frame)); + } + + void ConstructNV21() { + T frame; + std::unique_ptr ms( + CreateYuvSample(kWidth, kHeight, 12)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_NV21, + kWidth, kHeight, &frame)); + } + + void ConstructNV12() { + T frame; + std::unique_ptr ms( + CreateYuvSample(kWidth, kHeight, 12)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_NV12, + kWidth, kHeight, &frame)); + } + + // Test constructing an image from a ABGR buffer + // Due to rounding, some pixels may differ slightly from the VideoFrame impl. + void ConstructABGR() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_ABGR, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ABGR, + kWidth, kHeight); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ABGR, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); + } + + // Test constructing an image from a ARGB buffer + // Due to rounding, some pixels may differ slightly from the VideoFrame impl. + void ConstructARGB() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, + kWidth, kHeight); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); + } + + // Test constructing an image from a wide ARGB buffer + // Normal is 1280x720. Wide is 12800x72 + void ConstructARGBWide() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_ARGB, kWidth * 10, kHeight / 10)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, + kWidth * 10, kHeight / 10); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, + kWidth * 10, kHeight / 10, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); + } + + // Test constructing an image from an BGRA buffer. + // Due to rounding, some pixels may differ slightly from the VideoFrame impl. + void ConstructBGRA() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_BGRA, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_BGRA, + kWidth, kHeight); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_BGRA, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); + } + + // Test constructing an image from a 24BG buffer. + // Due to rounding, some pixels may differ slightly from the VideoFrame impl. + void Construct24BG() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_24BG, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_24BG, + kWidth, kHeight); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_24BG, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); + } + + // Test constructing an image from a raw RGB buffer. + // Due to rounding, some pixels may differ slightly from the VideoFrame impl. + void ConstructRaw() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_RAW, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_RAW, + kWidth, kHeight); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); + } + +// Macro to help test different rotations +#define TEST_MIRROR(FOURCC, BPP) \ + void Construct##FOURCC##Mirror() { \ + T frame1, frame2; \ + rtc::scoped_refptr res_buffer; \ + std::unique_ptr ms( \ + CreateYuvSample(kWidth, kHeight, BPP)); \ + ASSERT_TRUE(ms.get() != NULL); \ + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \ + -kHeight, kWidth, kHeight, \ + webrtc::kVideoRotation_180, &frame1)); \ + size_t data_size; \ + bool ret = ms->GetSize(&data_size); \ + EXPECT_TRUE(ret); \ + EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ + kHeight, \ + reinterpret_cast(ms->GetBuffer()), \ + data_size, 0, webrtc::kVideoRotation_0)); \ + int width_rotate = frame1.width(); \ + int height_rotate = frame1.height(); \ + res_buffer = webrtc::I420Buffer::Create(width_rotate, height_rotate); \ + libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \ + frame2.video_frame_buffer()->StrideY(), \ + frame2.video_frame_buffer()->DataU(), \ + frame2.video_frame_buffer()->StrideU(), \ + frame2.video_frame_buffer()->DataV(), \ + frame2.video_frame_buffer()->StrideV(), \ + res_buffer->MutableDataY(), res_buffer->StrideY(), \ + res_buffer->MutableDataU(), res_buffer->StrideU(), \ + res_buffer->MutableDataV(), res_buffer->StrideV(), \ + kWidth, kHeight); \ + EXPECT_TRUE(IsEqual(frame1, res_buffer, 0)); \ + } + + TEST_MIRROR(I420, 420) + +// Macro to help test different rotations +#define TEST_ROTATE(FOURCC, BPP, ROTATE) \ + void Construct##FOURCC##Rotate##ROTATE() { \ + T frame1, frame2; \ + rtc::scoped_refptr res_buffer; \ + std::unique_ptr ms( \ + CreateYuvSample(kWidth, kHeight, BPP)); \ + ASSERT_TRUE(ms.get() != NULL); \ + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \ + kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \ + &frame1)); \ + size_t data_size; \ + bool ret = ms->GetSize(&data_size); \ + EXPECT_TRUE(ret); \ + EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ + kHeight, \ + reinterpret_cast(ms->GetBuffer()), \ + data_size, 0, webrtc::kVideoRotation_0)); \ + int width_rotate = frame1.width(); \ + int height_rotate = frame1.height(); \ + res_buffer = webrtc::I420Buffer::Create(width_rotate, height_rotate); \ + libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \ + frame2.video_frame_buffer()->StrideY(), \ + frame2.video_frame_buffer()->DataU(), \ + frame2.video_frame_buffer()->StrideU(), \ + frame2.video_frame_buffer()->DataV(), \ + frame2.video_frame_buffer()->StrideV(), \ + res_buffer->MutableDataY(), res_buffer->StrideY(), \ + res_buffer->MutableDataU(), res_buffer->StrideU(), \ + res_buffer->MutableDataV(), res_buffer->StrideV(), \ + kWidth, kHeight, libyuv::kRotate##ROTATE); \ + EXPECT_TRUE(IsEqual(frame1, res_buffer, 0)); \ + } + + // Test constructing an image with rotation. + TEST_ROTATE(I420, 12, 0) + TEST_ROTATE(I420, 12, 90) + TEST_ROTATE(I420, 12, 180) + TEST_ROTATE(I420, 12, 270) + TEST_ROTATE(YV12, 12, 0) + TEST_ROTATE(YV12, 12, 90) + TEST_ROTATE(YV12, 12, 180) + TEST_ROTATE(YV12, 12, 270) + TEST_ROTATE(NV12, 12, 0) + TEST_ROTATE(NV12, 12, 90) + TEST_ROTATE(NV12, 12, 180) + TEST_ROTATE(NV12, 12, 270) + TEST_ROTATE(NV21, 12, 0) + TEST_ROTATE(NV21, 12, 90) + TEST_ROTATE(NV21, 12, 180) + TEST_ROTATE(NV21, 12, 270) + TEST_ROTATE(UYVY, 16, 0) + TEST_ROTATE(UYVY, 16, 90) + TEST_ROTATE(UYVY, 16, 180) + TEST_ROTATE(UYVY, 16, 270) + TEST_ROTATE(YUY2, 16, 0) + TEST_ROTATE(YUY2, 16, 90) + TEST_ROTATE(YUY2, 16, 180) + TEST_ROTATE(YUY2, 16, 270) + + // Test constructing an image from a UYVY buffer rotated 90 degrees. + void ConstructUyvyRotate90() { + T frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, kWidth, kHeight, + kWidth, kHeight, webrtc::kVideoRotation_90, &frame2)); + } + + // Test constructing an image from a UYVY buffer rotated 180 degrees. + void ConstructUyvyRotate180() { + T frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, kWidth, kHeight, + kWidth, kHeight, webrtc::kVideoRotation_180, + &frame2)); + } + + // Test constructing an image from a UYVY buffer rotated 270 degrees. + void ConstructUyvyRotate270() { + T frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, kWidth, kHeight, + kWidth, kHeight, webrtc::kVideoRotation_270, + &frame2)); + } + + // Test constructing an image from a YUY2 buffer rotated 90 degrees. + void ConstructYuy2Rotate90() { + T frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, + kWidth, kHeight, webrtc::kVideoRotation_90, &frame2)); + } + + // Test constructing an image from a YUY2 buffer rotated 180 degrees. + void ConstructYuy2Rotate180() { + T frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, + kWidth, kHeight, webrtc::kVideoRotation_180, + &frame2)); + } + + // Test constructing an image from a YUY2 buffer rotated 270 degrees. + void ConstructYuy2Rotate270() { + T frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, + kWidth, kHeight, webrtc::kVideoRotation_270, + &frame2)); + } + + // Test 1 pixel edge case image I420 buffer. + void ConstructI4201Pixel() { + T frame; + uint8_t pixel[3] = {1, 2, 3}; + for (int i = 0; i < repeat_; ++i) { + EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 1, 1, 1, 1, pixel, + sizeof(pixel), 0, webrtc::kVideoRotation_0)); + } + const uint8_t* y = pixel; + const uint8_t* u = y + 1; + const uint8_t* v = u + 1; + EXPECT_TRUE(IsEqual(frame, 1, 1, y, 1, u, 1, v, 1, 0)); + } + + // Test 5 pixel edge case image. + void ConstructI4205Pixel() { + T frame; + uint8_t pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2]; + memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2); + for (int i = 0; i < repeat_; ++i) { + EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5, + sizeof(pixels5x5), 0, + webrtc::kVideoRotation_0)); + } + EXPECT_EQ(5, frame.width()); + EXPECT_EQ(5, frame.height()); + EXPECT_EQ(5, frame.video_frame_buffer()->StrideY()); + EXPECT_EQ(3, frame.video_frame_buffer()->StrideU()); + EXPECT_EQ(3, frame.video_frame_buffer()->StrideV()); + } + + // Test constructing an image from an I420 buffer with horizontal cropping. + void ConstructI420CropHorizontal() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_I420, kWidth, kHeight, + kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, + &frame2)); + EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 0)); + } + + // Test constructing an image from a YUY2 buffer with horizontal cropping. + void ConstructYuy2CropHorizontal() { + T frame1, frame2; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, + &frame1)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, + kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, + &frame2)); + EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 0)); + } + + // Test constructing an image from an ARGB buffer with horizontal cropping. + void ConstructARGBCropHorizontal() { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, + kWidth, kHeight); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, + kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, + &frame2)); + EXPECT_TRUE(IsEqualWithCrop(frame2, *frame1, kWidth / 8, 0, 2)); + } + + // Test constructing an image from an I420 buffer, cropping top and bottom. + void ConstructI420CropVertical() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(LoadSample(kImageFilename, kYuvExtension).get(), + cricket::FOURCC_I420, kWidth, kHeight, + kWidth, kHeight * 3 / 4, webrtc::kVideoRotation_0, + &frame2)); + EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, 0, kHeight / 8, 0)); + } + + // Test constructing an image from I420 synonymous formats. + void ConstructI420Aliases() { + T frame1, frame2, frame3; + ASSERT_TRUE(LoadFrame(LoadSample(kImageFilename, kYuvExtension), + cricket::FOURCC_I420, kWidth, kHeight, + &frame1)); + ASSERT_TRUE(LoadFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_IYUV, kWidth, kHeight, + &frame2)); + ASSERT_TRUE(LoadFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_YU12, kWidth, kHeight, + &frame3)); + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + EXPECT_TRUE(IsEqual(frame1, frame3, 0)); + } + + // Test constructing an image from an I420 MJPG buffer. + void ConstructMjpgI420() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(kJpeg420Filename, kJpegExtension, + cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 32)); + } + + // Test constructing an image from an I422 MJPG buffer. + void ConstructMjpgI422() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(LoadSample(kJpeg422Filename, kJpegExtension).get(), + cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 32)); + } + + // Test constructing an image from an I444 MJPG buffer. + void ConstructMjpgI444() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(LoadSample(kJpeg444Filename, kJpegExtension), + cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 32)); + } + + // Test constructing an image from an I444 MJPG buffer. + void ConstructMjpgI411() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(kJpeg411Filename, kJpegExtension, + cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 32)); + } + + // Test constructing an image from an I400 MJPG buffer. + // TODO(fbarchard): Stronger compare on chroma. Compare agaisnt a grey image. + void ConstructMjpgI400() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + ASSERT_TRUE(LoadFrame(kJpeg400Filename, kJpegExtension, + cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); + EXPECT_TRUE(IsPlaneEqual("y", frame1.video_frame_buffer()->DataY(), + frame1.video_frame_buffer()->StrideY(), + frame2.video_frame_buffer()->DataY(), + frame2.video_frame_buffer()->StrideY(), + kWidth, kHeight, 32)); + EXPECT_TRUE(IsEqual(frame1, frame2, 128)); + } + + // Test constructing an image from an I420 MJPG buffer. + void ValidateFrame(const char* name, + const char* extension, + uint32_t fourcc, + int data_adjust, + int size_adjust, + bool expected_result) { + T frame; + std::unique_ptr ms(LoadSample(name, extension)); + ASSERT_TRUE(ms.get() != NULL); + const uint8_t* sample = + reinterpret_cast(ms.get()->GetBuffer()); + size_t sample_size; + ms->GetSize(&sample_size); + // Optional adjust size to test invalid size. + size_t data_size = sample_size + data_adjust; + + // Allocate a buffer with end page aligned. + const int kPadToHeapSized = 16 * 1024 * 1024; + std::unique_ptr page_buffer( + new uint8_t[((data_size + kPadToHeapSized + 4095) & ~4095)]); + uint8_t* data_ptr = page_buffer.get(); + if (!data_ptr) { + LOG(LS_WARNING) << "Failed to allocate memory for ValidateFrame test."; + EXPECT_FALSE(expected_result); // NULL is okay if failure was expected. + return; + } + data_ptr += kPadToHeapSized + (-(static_cast(data_size)) & 4095); + memcpy(data_ptr, sample, std::min(data_size, sample_size)); + for (int i = 0; i < repeat_; ++i) { + EXPECT_EQ(expected_result, frame.Validate(fourcc, kWidth, kHeight, + data_ptr, + sample_size + size_adjust)); + } + } + + // Test validate for I420 MJPG buffer. + void ValidateMjpgI420() { + ValidateFrame(kJpeg420Filename, kJpegExtension, + cricket::FOURCC_MJPG, 0, 0, true); + } + + // Test validate for I422 MJPG buffer. + void ValidateMjpgI422() { + ValidateFrame(kJpeg422Filename, kJpegExtension, + cricket::FOURCC_MJPG, 0, 0, true); + } + + // Test validate for I444 MJPG buffer. + void ValidateMjpgI444() { + ValidateFrame(kJpeg444Filename, kJpegExtension, + cricket::FOURCC_MJPG, 0, 0, true); + } + + // Test validate for I411 MJPG buffer. + void ValidateMjpgI411() { + ValidateFrame(kJpeg411Filename, kJpegExtension, + cricket::FOURCC_MJPG, 0, 0, true); + } + + // Test validate for I400 MJPG buffer. + void ValidateMjpgI400() { + ValidateFrame(kJpeg400Filename, kJpegExtension, + cricket::FOURCC_MJPG, 0, 0, true); + } + + // Test validate for I420 buffer. + void ValidateI420() { + ValidateFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_I420, 0, 0, true); + } + + // Test validate for I420 buffer where size is too small + void ValidateI420SmallSize() { + ValidateFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_I420, 0, -16384, false); + } + + // Test validate for I420 buffer where size is too large (16 MB) + // Will produce warning but pass. + void ValidateI420LargeSize() { + ValidateFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_I420, 16000000, 16000000, + true); + } + + // Test validate for I420 buffer where size is 1 GB (not reasonable). + void ValidateI420HugeSize() { +#ifndef WIN32 // TODO(fbarchard): Reenable when fixing bug 9603762. + ValidateFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_I420, 1000000000u, + 1000000000u, false); +#endif + } + + // The following test that Validate crashes if the size is greater than the + // actual buffer size. + // TODO(fbarchard): Consider moving a filter into the capturer/plugin. +#if defined(_MSC_VER) && !defined(NDEBUG) + int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep) { + if (code == EXCEPTION_ACCESS_VIOLATION) { + LOG(LS_INFO) << "Caught EXCEPTION_ACCESS_VIOLATION as expected."; + return EXCEPTION_EXECUTE_HANDLER; + } else { + LOG(LS_INFO) << "Did not catch EXCEPTION_ACCESS_VIOLATION. Unexpected."; + return EXCEPTION_CONTINUE_SEARCH; + } + } + + // Test validate fails for truncated MJPG data buffer. If ValidateFrame + // crashes the exception handler will return and unittest passes with OK. + void ValidateMjpgI420InvalidSize() { + __try { + ValidateFrame(kJpeg420Filename, kJpegExtension, + cricket::FOURCC_MJPG, -16384, 0, false); + FAIL() << "Validate was expected to cause EXCEPTION_ACCESS_VIOLATION."; + } __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) { + return; // Successfully crashed in ValidateFrame. + } + } + + // Test validate fails for truncated I420 buffer. + void ValidateI420InvalidSize() { + __try { + ValidateFrame(kImageFilename, kYuvExtension, + cricket::FOURCC_I420, -16384, 0, false); + FAIL() << "Validate was expected to cause EXCEPTION_ACCESS_VIOLATION."; + } __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) { + return; // Successfully crashed in ValidateFrame. + } + } +#endif + + // Test constructing an image from a YUY2 buffer (and synonymous formats). + void ConstructYuy2Aliases() { + T frame1, frame2, frame3, frame4; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, + &frame1)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUVS, + kWidth, kHeight, &frame3)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUYV, + kWidth, kHeight, &frame4)); + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + EXPECT_TRUE(IsEqual(frame1, frame3, 0)); + EXPECT_TRUE(IsEqual(frame1, frame4, 0)); + } + + // Test constructing an image from a UYVY buffer (and synonymous formats). + void ConstructUyvyAliases() { + T frame1, frame2, frame3, frame4; + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_UYVY, kWidth, kHeight, + &frame1)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, + kWidth, kHeight, &frame2)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_2VUY, + kWidth, kHeight, &frame3)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_HDYC, + kWidth, kHeight, &frame4)); + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + EXPECT_TRUE(IsEqual(frame1, frame3, 0)); + EXPECT_TRUE(IsEqual(frame1, frame4, 0)); + } + + // Test creating a copy. + void ConstructCopy() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + for (int i = 0; i < repeat_; ++i) { + EXPECT_TRUE(frame2.Init(frame1)); + } + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + } + + // Test creating a copy and check that it just increments the refcount. + void ConstructCopyIsRef() { + T frame1, frame2; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + for (int i = 0; i < repeat_; ++i) { + EXPECT_TRUE(frame2.Init(frame1)); + } + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer()); + } + + // Test constructing an image from a YUY2 buffer with a range of sizes. + // Only tests that conversion does not crash or corrupt heap. + void ConstructYuy2AllSizes() { + T frame1, frame2; + for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { + for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { + std::unique_ptr ms( + CreateYuv422Sample(cricket::FOURCC_YUY2, width, height)); + ASSERT_TRUE(ms.get() != NULL); + EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, width, height, + &frame1)); + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, + width, height, &frame2)); + EXPECT_TRUE(IsEqual(frame1, frame2, 0)); + } + } + } + + // Test constructing an image from a ARGB buffer with a range of sizes. + // Only tests that conversion does not crash or corrupt heap. + void ConstructARGBAllSizes() { + for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { + for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_ARGB, width, height)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, + width, height); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, + width, height, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 64)); + } + } + // Test a practical window size for screencasting usecase. + const int kOddWidth = 1228; + const int kOddHeight = 260; + for (int j = 0; j < 2; ++j) { + for (int i = 0; i < 2; ++i) { + std::unique_ptr ms( + CreateRgbSample(cricket::FOURCC_ARGB, kOddWidth + i, kOddHeight + j)); + ASSERT_TRUE(ms.get() != NULL); + std::unique_ptr frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, + kOddWidth + i, kOddHeight + j); + ASSERT_TRUE(frame1); + T frame2; + EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, + kOddWidth + i, kOddHeight + j, &frame2)); + EXPECT_TRUE(IsEqual(*frame1, frame2, 64)); + } + } + } + + ////////////////////// + // Conversion tests // + ////////////////////// + + // Test converting from I420 to I422. + void ConvertToI422Buffer() { + T frame1, frame2; + size_t out_size = kWidth * kHeight * 2; + std::unique_ptr buf(new uint8_t[out_size + kAlignment]); + uint8_t* y = ALIGNP(buf.get(), kAlignment); + uint8_t* u = y + kWidth * kHeight; + uint8_t* v = u + (kWidth / 2) * kHeight; + ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); + for (int i = 0; i < repeat_; ++i) { + EXPECT_EQ(0, libyuv::I420ToI422(frame1.video_frame_buffer()->DataY(), + frame1.video_frame_buffer()->StrideY(), + frame1.video_frame_buffer()->DataU(), + frame1.video_frame_buffer()->StrideU(), + frame1.video_frame_buffer()->DataV(), + frame1.video_frame_buffer()->StrideV(), + y, kWidth, + u, kWidth / 2, + v, kWidth / 2, + kWidth, kHeight)); + } + EXPECT_TRUE(frame2.Init(cricket::FOURCC_I422, kWidth, kHeight, kWidth, + kHeight, y, out_size, 1, 1, 0, + webrtc::kVideoRotation_0)); + EXPECT_TRUE(IsEqual(frame1, frame2, 1)); + } + + int repeat_; +}; + +#endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ diff --git a/webrtc/media/engine/webrtcvideocapturer.cc b/webrtc/media/engine/webrtcvideocapturer.cc index 7fe47d92eb..2b1b62bedd 100644 --- a/webrtc/media/engine/webrtcvideocapturer.cc +++ b/webrtc/media/engine/webrtcvideocapturer.cc @@ -355,7 +355,7 @@ void WebRtcVideoCapturer::OnIncomingCapturedFrame( OnFrame(cricket::WebRtcVideoFrame( sample.video_frame_buffer(), sample.rotation(), - sample.render_time_ms() * rtc::kNumMicrosecsPerMillisec), + sample.render_time_ms() * rtc::kNumMicrosecsPerMillisec, 0), sample.width(), sample.height()); } diff --git a/webrtc/media/engine/webrtcvideoengine2.cc b/webrtc/media/engine/webrtcvideoengine2.cc index 11cecacc0e..4e22a487a2 100644 --- a/webrtc/media/engine/webrtcvideoengine2.cc +++ b/webrtc/media/engine/webrtcvideoengine2.cc @@ -2510,10 +2510,10 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::OnFrame( return; } - sink_->OnFrame( - WebRtcVideoFrame(frame.video_frame_buffer(), frame.rotation(), - frame.render_time_ms() * rtc::kNumNanosecsPerMicrosec, - frame.timestamp())); + WebRtcVideoFrame render_frame( + frame.video_frame_buffer(), frame.rotation(), + frame.render_time_ms() * rtc::kNumNanosecsPerMicrosec, frame.timestamp()); + sink_->OnFrame(render_frame); } bool WebRtcVideoChannel2::WebRtcVideoReceiveStream::IsDefaultStream() const { diff --git a/webrtc/media/engine/webrtcvideoengine2.h b/webrtc/media/engine/webrtcvideoengine2.h index ec999e167a..4255b2d00e 100644 --- a/webrtc/media/engine/webrtcvideoengine2.h +++ b/webrtc/media/engine/webrtcvideoengine2.h @@ -26,11 +26,11 @@ #include "webrtc/media/base/videosourceinterface.h" #include "webrtc/call.h" #include "webrtc/media/base/mediaengine.h" -#include "webrtc/media/base/videoframe.h" #include "webrtc/media/engine/webrtcvideochannelfactory.h" #include "webrtc/media/engine/webrtcvideodecoderfactory.h" #include "webrtc/media/engine/webrtcvideoencoderfactory.h" #include "webrtc/transport.h" +#include "webrtc/video_frame.h" #include "webrtc/video_receive_stream.h" #include "webrtc/video_send_stream.h" diff --git a/webrtc/media/engine/webrtcvideoframe.cc b/webrtc/media/engine/webrtcvideoframe.cc new file mode 100644 index 0000000000..c478c0a74d --- /dev/null +++ b/webrtc/media/engine/webrtcvideoframe.cc @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/media/engine/webrtcvideoframe.h" + +#include "libyuv/convert.h" +#include "webrtc/base/logging.h" +#include "webrtc/media/base/videocapturer.h" +#include "webrtc/media/base/videocommon.h" +#include "webrtc/video_frame.h" + +namespace cricket { + +WebRtcVideoFrame::WebRtcVideoFrame() + : timestamp_us_(0), rotation_(webrtc::kVideoRotation_0) {} + +WebRtcVideoFrame::WebRtcVideoFrame( + const rtc::scoped_refptr& buffer, + webrtc::VideoRotation rotation, + int64_t timestamp_us, + uint32_t transport_frame_id) + : video_frame_buffer_(buffer), + timestamp_us_(timestamp_us), + transport_frame_id_(transport_frame_id), + rotation_(rotation) {} + +WebRtcVideoFrame::WebRtcVideoFrame( + const rtc::scoped_refptr& buffer, + webrtc::VideoRotation rotation, + int64_t timestamp_us) + : WebRtcVideoFrame(buffer, rotation, timestamp_us, 0) {}; + +WebRtcVideoFrame::WebRtcVideoFrame( + const rtc::scoped_refptr& buffer, + int64_t time_stamp_ns, + webrtc::VideoRotation rotation) + : WebRtcVideoFrame(buffer, + rotation, + time_stamp_ns / rtc::kNumNanosecsPerMicrosec, + 0) {} + +WebRtcVideoFrame::~WebRtcVideoFrame() {} + +bool WebRtcVideoFrame::Init(uint32_t format, + int w, + int h, + int dw, + int dh, + uint8_t* sample, + size_t sample_size, + int64_t time_stamp_ns, + webrtc::VideoRotation rotation) { + return Reset(format, w, h, dw, dh, sample, sample_size, + time_stamp_ns / rtc::kNumNanosecsPerMicrosec, rotation, + true /*apply_rotation*/); +} + +int WebRtcVideoFrame::width() const { + return video_frame_buffer_ ? video_frame_buffer_->width() : 0; +} + +int WebRtcVideoFrame::height() const { + return video_frame_buffer_ ? video_frame_buffer_->height() : 0; +} + +const rtc::scoped_refptr& +WebRtcVideoFrame::video_frame_buffer() const { + return video_frame_buffer_; +} + +uint32_t WebRtcVideoFrame::transport_frame_id() const { + return transport_frame_id_; +} + +int64_t WebRtcVideoFrame::timestamp_us() const { + return timestamp_us_; +} + +void WebRtcVideoFrame::set_timestamp_us(int64_t time_us) { + timestamp_us_ = time_us; +} + +webrtc::VideoRotation WebRtcVideoFrame::rotation() const { + return rotation_; +} + +bool WebRtcVideoFrame::Reset(uint32_t format, + int w, + int h, + int dw, + int dh, + uint8_t* sample, + size_t sample_size, + int64_t timestamp_us, + webrtc::VideoRotation rotation, + bool apply_rotation) { + if (!Validate(format, w, h, sample, sample_size)) { + return false; + } + // Translate aliases to standard enums (e.g., IYUV -> I420). + format = CanonicalFourCC(format); + + // Set up a new buffer. + // TODO(fbarchard): Support lazy allocation. + int new_width = dw; + int new_height = dh; + // If rotated swap width, height. + if (apply_rotation && (rotation == 90 || rotation == 270)) { + new_width = dh; + new_height = dw; + } + + rtc::scoped_refptr buffer = + webrtc::I420Buffer::Create(new_width, new_height); + video_frame_buffer_ = buffer; + rotation_ = apply_rotation ? webrtc::kVideoRotation_0 : rotation; + + int horiz_crop = ((w - dw) / 2) & ~1; + // ARGB on Windows has negative height. + // The sample's layout in memory is normal, so just correct crop. + int vert_crop = ((abs(h) - dh) / 2) & ~1; + // Conversion functions expect negative height to flip the image. + int idh = (h < 0) ? -dh : dh; + int r = libyuv::ConvertToI420( + sample, sample_size, + buffer->MutableDataY(), buffer->StrideY(), + buffer->MutableDataU(), buffer->StrideU(), + buffer->MutableDataV(), buffer->StrideV(), + horiz_crop, vert_crop, w, h, dw, idh, + static_cast( + apply_rotation ? rotation : webrtc::kVideoRotation_0), + format); + if (r) { + LOG(LS_ERROR) << "Error parsing format: " << GetFourccName(format) + << " return code : " << r; + return false; + } + timestamp_us_ = timestamp_us; + return true; +} + +void WebRtcVideoFrame::InitToEmptyBuffer(int w, int h) { + video_frame_buffer_ = webrtc::I420Buffer::Create(w, h); + rotation_ = webrtc::kVideoRotation_0; +} + +} // namespace cricket diff --git a/webrtc/media/engine/webrtcvideoframe.h b/webrtc/media/engine/webrtcvideoframe.h index 3c36b005d6..79ad571236 100644 --- a/webrtc/media/engine/webrtcvideoframe.h +++ b/webrtc/media/engine/webrtcvideoframe.h @@ -8,10 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -// TODO(nisse): Deprecated, replace cricket::WebRtcVideoFrame with -// webrtc::VideoFrame everywhere, then delete this file. See -// https://bugs.chromium.org/p/webrtc/issues/detail?id=5682. - #ifndef WEBRTC_MEDIA_ENGINE_WEBRTCVIDEOFRAME_H_ #define WEBRTC_MEDIA_ENGINE_WEBRTCVIDEOFRAME_H_ @@ -26,22 +22,100 @@ namespace cricket { +// TODO(nisse): This class will be deleted when the cricket::VideoFrame and +// webrtc::VideoFrame classes are merged. See +// https://bugs.chromium.org/p/webrtc/issues/detail?id=5682. Try to use only the +// preferred constructor, and the non-deprecated methods of the VideoFrame base +// class. class WebRtcVideoFrame : public VideoFrame { public: - WebRtcVideoFrame() : VideoFrame() {} - WebRtcVideoFrame(const rtc::scoped_refptr& buffer, - webrtc::VideoRotation rotation, - int64_t timestamp_us) - : VideoFrame(buffer, rotation, timestamp_us) {} + // TODO(nisse): Deprecated. Using the default constructor violates the + // reasonable assumption that video_frame_buffer() returns a valid buffer. + WebRtcVideoFrame(); + + // Preferred constructor. WebRtcVideoFrame(const rtc::scoped_refptr& buffer, webrtc::VideoRotation rotation, int64_t timestamp_us, - uint32_t transport_frame_id) - : VideoFrame(buffer, rotation, timestamp_us) { - // For now, transport_frame_id and rtp timestamp are the same. - // TODO(nisse): Must be handled differently for QUIC. - set_timestamp(transport_frame_id); - } + uint32_t transport_frame_id); + + // Alternative constructor, when not knowing or caring about the + // transport_frame_id. Which is set to zero. + WebRtcVideoFrame(const rtc::scoped_refptr& buffer, + webrtc::VideoRotation rotation, + int64_t timestamp_us); + + // TODO(nisse): Deprecated, delete as soon as all callers have switched to the + // above constructor with microsecond timestamp. + WebRtcVideoFrame(const rtc::scoped_refptr& buffer, + int64_t timestamp_ns, + webrtc::VideoRotation rotation); + + ~WebRtcVideoFrame(); + + // TODO(nisse): Init (and its helpers Reset and Validate) are used + // only by the LoadFrame function used in the VideoFrame unittests. + // Rewrite tests, and delete this function. + + // Creates a frame from a raw sample with FourCC "format" and size "w" x "h". + // "h" can be negative indicating a vertically flipped image. + // "dh" is destination height if cropping is desired and is always positive. + // Returns "true" if successful. + bool Init(uint32_t format, + int w, + int h, + int dw, + int dh, + uint8_t* sample, + size_t sample_size, + int64_t timestamp_ns, + webrtc::VideoRotation rotation); + + void InitToEmptyBuffer(int w, int h); + + int width() const override; + int height() const override; + + const rtc::scoped_refptr& video_frame_buffer() + const override; + + uint32_t transport_frame_id() const override; + + int64_t timestamp_us() const override; + void set_timestamp_us(int64_t time_us) override; + + webrtc::VideoRotation rotation() const override; + + protected: + // Creates a frame from a raw sample with FourCC |format| and size |w| x |h|. + // |h| can be negative indicating a vertically flipped image. + // |dw| is destination width; can be less than |w| if cropping is desired. + // |dh| is destination height, like |dw|, but must be a positive number. + // Returns whether the function succeeded or failed. + bool Reset(uint32_t format, + int w, + int h, + int dw, + int dh, + uint8_t* sample, + size_t sample_size, + int64_t timestamp_us, + webrtc::VideoRotation rotation, + bool apply_rotation); + + private: + // Tests mutate |rotation_|, so the base test class is a friend. + friend class WebRtcVideoFrameTest; + + // An opaque reference counted handle that stores the pixel data. + rtc::scoped_refptr video_frame_buffer_; + int64_t timestamp_us_; + uint32_t transport_frame_id_; + webrtc::VideoRotation rotation_; + + // This is mutable as the calculation is expensive but once calculated, it + // remains const. + mutable std::unique_ptr rotated_frame_; }; } // namespace cricket diff --git a/webrtc/media/engine/webrtcvideoframe_unittest.cc b/webrtc/media/engine/webrtcvideoframe_unittest.cc new file mode 100644 index 0000000000..2385e44ab2 --- /dev/null +++ b/webrtc/media/engine/webrtcvideoframe_unittest.cc @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include + +#include "webrtc/media/base/videoframe_unittest.h" +#include "webrtc/media/engine/webrtcvideoframe.h" +#include "webrtc/test/fake_texture_frame.h" + +namespace cricket { + +class WebRtcVideoFrameTest : public VideoFrameTest { + public: + WebRtcVideoFrameTest() {} + + void SetFrameRotation(WebRtcVideoFrame* frame, + webrtc::VideoRotation rotation) { + frame->rotation_ = rotation; + } +}; + +#define TEST_WEBRTCVIDEOFRAME(X) \ + TEST_F(WebRtcVideoFrameTest, X) { VideoFrameTest::X(); } + +TEST_WEBRTCVIDEOFRAME(ConstructI420) +TEST_WEBRTCVIDEOFRAME(ConstructI422) +TEST_WEBRTCVIDEOFRAME(ConstructYuy2) +TEST_WEBRTCVIDEOFRAME(ConstructYuy2Unaligned) +TEST_WEBRTCVIDEOFRAME(ConstructYuy2Wide) +TEST_WEBRTCVIDEOFRAME(ConstructYV12) +TEST_WEBRTCVIDEOFRAME(ConstructUyvy) +TEST_WEBRTCVIDEOFRAME(ConstructM420) +TEST_WEBRTCVIDEOFRAME(ConstructNV21) +TEST_WEBRTCVIDEOFRAME(ConstructNV12) +TEST_WEBRTCVIDEOFRAME(ConstructABGR) +TEST_WEBRTCVIDEOFRAME(ConstructARGB) +TEST_WEBRTCVIDEOFRAME(ConstructARGBWide) +TEST_WEBRTCVIDEOFRAME(ConstructBGRA) +TEST_WEBRTCVIDEOFRAME(Construct24BG) +TEST_WEBRTCVIDEOFRAME(ConstructRaw) + +TEST_WEBRTCVIDEOFRAME(ConstructI420Mirror) +TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate0) +TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate90) +TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate180) +TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate270) +TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate0) +TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate90) +TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate180) +TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate270) +TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate0) +TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate90) +TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate180) +TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate270) +TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate0) +TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate90) +TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate180) +TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate270) +TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate0) +TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate90) +TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate180) +TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate270) +TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate0) +TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate90) +TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate180) +TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate270) +TEST_WEBRTCVIDEOFRAME(ConstructI4201Pixel) +TEST_WEBRTCVIDEOFRAME(ConstructI4205Pixel) +// TODO(juberti): WebRtcVideoFrame does not support horizontal crop. +// Re-evaluate once it supports 3 independent planes, since we might want to +// just Init normally and then crop by adjusting pointers. +// TEST_WEBRTCVIDEOFRAME(ConstructI420CropHorizontal) +TEST_WEBRTCVIDEOFRAME(ConstructI420CropVertical) +// TODO(juberti): WebRtcVideoFrame is not currently refcounted. +// TEST_WEBRTCVIDEOFRAME(ConstructCopy) +// TEST_WEBRTCVIDEOFRAME(ConstructCopyIsRef) +// TODO(fbarchard): Implement Jpeg +// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI420) +TEST_WEBRTCVIDEOFRAME(ConstructMjpgI422) +// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI444) +// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI411) +// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI400) +// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI420) +// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI422) +// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI444) +// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI411) +// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI400) +TEST_WEBRTCVIDEOFRAME(ValidateI420) +TEST_WEBRTCVIDEOFRAME(ValidateI420SmallSize) +TEST_WEBRTCVIDEOFRAME(ValidateI420LargeSize) +TEST_WEBRTCVIDEOFRAME(ValidateI420HugeSize) +// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI420InvalidSize) +// TEST_WEBRTCVIDEOFRAME(ValidateI420InvalidSize) + +// TODO(fbarchard): WebRtcVideoFrame does not support odd sizes. +// Re-evaluate once WebRTC switches to libyuv +// TEST_WEBRTCVIDEOFRAME(ConstructYuy2AllSizes) +// TEST_WEBRTCVIDEOFRAME(ConstructARGBAllSizes) +// TEST_WEBRTCVIDEOFRAME(ConvertToI422Buffer) +// TEST_WEBRTCVIDEOFRAME(ConstructARGBBlackWhitePixel) + +TEST_F(WebRtcVideoFrameTest, TextureInitialValues) { + webrtc::test::FakeNativeHandle* dummy_handle = + new webrtc::test::FakeNativeHandle(); + webrtc::NativeHandleBuffer* buffer = + new rtc::RefCountedObject( + dummy_handle, 640, 480); + + WebRtcVideoFrame frame(buffer, webrtc::kVideoRotation_0, 20); + EXPECT_EQ(dummy_handle, frame.video_frame_buffer()->native_handle()); + EXPECT_EQ(640, frame.width()); + EXPECT_EQ(480, frame.height()); + EXPECT_EQ(20, frame.timestamp_us()); + frame.set_timestamp_us(40); + EXPECT_EQ(40, frame.timestamp_us()); +} + +TEST_F(WebRtcVideoFrameTest, ApplyRotationToFrame) { + WebRtcVideoFrame applied0; + EXPECT_TRUE(IsNull(applied0)); + EXPECT_TRUE(LoadFrame(CreateYuvSample(kWidth, kHeight, 12).get(), FOURCC_I420, + kWidth, kHeight, &applied0)); + + // Claim that this frame needs to be rotated for 90 degree. + SetFrameRotation(&applied0, webrtc::kVideoRotation_90); + EXPECT_EQ(applied0.rotation(), webrtc::kVideoRotation_90); + + // Apply rotation on frame 1. Output should be different from frame 1. + WebRtcVideoFrame applied90( + webrtc::I420Buffer::Rotate(applied0.video_frame_buffer(), + applied0.rotation()), + webrtc::kVideoRotation_0, applied0.timestamp_us()); + + EXPECT_EQ(applied90.rotation(), webrtc::kVideoRotation_0); + EXPECT_FALSE(IsEqual(applied0, applied90, 0)); + + // Claim the frame 2 needs to be rotated for another 270 degree. The output + // from frame 2 rotation should be the same as frame 1. + SetFrameRotation(&applied90, webrtc::kVideoRotation_270); + WebRtcVideoFrame applied360( + webrtc::I420Buffer::Rotate(applied90.video_frame_buffer(), + applied90.rotation()), + webrtc::kVideoRotation_0, applied90.timestamp_us()); + EXPECT_EQ(applied360.rotation(), webrtc::kVideoRotation_0); + EXPECT_TRUE(IsEqual(applied0, applied360, 0)); +} + +} // namespace cricket diff --git a/webrtc/media/media.gyp b/webrtc/media/media.gyp index c18e3113a7..4325b151cc 100644 --- a/webrtc/media/media.gyp +++ b/webrtc/media/media.gyp @@ -60,6 +60,7 @@ 'base/videocapturerfactory.h', 'base/videocommon.cc', 'base/videocommon.h', + 'base/videoframe.cc', 'base/videoframe.h', 'base/videosourcebase.cc', 'base/videosourcebase.h', @@ -81,6 +82,7 @@ 'engine/webrtcvideoencoderfactory.h', 'engine/webrtcvideoengine2.cc', 'engine/webrtcvideoengine2.h', + 'engine/webrtcvideoframe.cc', 'engine/webrtcvideoframe.h', 'engine/webrtcvoe.h', 'engine/webrtcvoiceengine.cc', diff --git a/webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm b/webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm index 47180934b2..f104b9dc46 100644 --- a/webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm +++ b/webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm @@ -26,7 +26,6 @@ #include "webrtc/base/bind.h" #include "webrtc/base/checks.h" -#include "webrtc/base/logging.h" #include "webrtc/base/thread.h" #include "webrtc/common_video/include/corevideo_frame_buffer.h" #include "webrtc/common_video/rotation.h"