diff --git a/api/video/encoded_image.h b/api/video/encoded_image.h index 717489f0e1..fb3288f2f9 100644 --- a/api/video/encoded_image.h +++ b/api/video/encoded_image.h @@ -62,9 +62,8 @@ class RTC_EXPORT EncodedImage { const webrtc::ColorSpace* ColorSpace() const { return color_space_ ? &*color_space_ : nullptr; } - void SetColorSpace(const webrtc::ColorSpace* color_space) { - color_space_ = - color_space ? absl::make_optional(*color_space) : absl::nullopt; + void SetColorSpace(const absl::optional& color_space) { + color_space_ = color_space; } size_t size() const { return size_; } diff --git a/api/video/video_frame.cc b/api/video/video_frame.cc index c91aeda124..4075bf7c37 100644 --- a/api/video/video_frame.cc +++ b/api/video/video_frame.cc @@ -61,7 +61,7 @@ VideoFrame::Builder& VideoFrame::Builder::set_rotation(VideoRotation rotation) { } VideoFrame::Builder& VideoFrame::Builder::set_color_space( - const ColorSpace& color_space) { + const absl::optional& color_space) { color_space_ = color_space; return *this; } diff --git a/api/video/video_frame.h b/api/video/video_frame.h index f83345ac8f..aa377c2762 100644 --- a/api/video/video_frame.h +++ b/api/video/video_frame.h @@ -47,7 +47,7 @@ class RTC_EXPORT VideoFrame { Builder& set_timestamp_rtp(uint32_t timestamp_rtp); Builder& set_ntp_time_ms(int64_t ntp_time_ms); Builder& set_rotation(VideoRotation rotation); - Builder& set_color_space(const ColorSpace& color_space); + Builder& set_color_space(const absl::optional& color_space); Builder& set_color_space(const ColorSpace* color_space); Builder& set_id(uint16_t id); Builder& set_partial_frame_description( @@ -140,12 +140,9 @@ class RTC_EXPORT VideoFrame { void set_rotation(VideoRotation rotation) { rotation_ = rotation; } // Get color space when available. - const ColorSpace* color_space() const { - return color_space_ ? &*color_space_ : nullptr; - } - void set_color_space(ColorSpace* color_space) { - color_space_ = - color_space ? absl::make_optional(*color_space) : absl::nullopt; + const absl::optional& color_space() const { return color_space_; } + void set_color_space(const absl::optional& color_space) { + color_space_ = color_space; } const PartialFrameDescription* partial_frame_description() const { diff --git a/call/rtp_payload_params_unittest.cc b/call/rtp_payload_params_unittest.cc index 5343cf974b..c46d9756fd 100644 --- a/call/rtp_payload_params_unittest.cc +++ b/call/rtp_payload_params_unittest.cc @@ -126,7 +126,7 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) { ColorSpace color_space( ColorSpace::PrimaryID::kSMPTE170M, ColorSpace::TransferID::kSMPTE170M, ColorSpace::MatrixID::kSMPTE170M, ColorSpace::RangeID::kFull); - encoded_image.SetColorSpace(&color_space); + encoded_image.SetColorSpace(color_space); header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare); EXPECT_EQ(kVideoRotation_90, header.rotation); diff --git a/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc b/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc index 8fa2d496f5..f93c463033 100644 --- a/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc +++ b/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc @@ -123,7 +123,7 @@ TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) { VideoFrame input_frame_w_color_space = VideoFrame::Builder() .set_video_frame_buffer(input_frame->video_frame_buffer()) - .set_color_space(&color_space) + .set_color_space(color_space) .build(); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, @@ -141,7 +141,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedColorSpaceEqualsEncodedColorSpace) { ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); // Add color space to encoded frame. ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false); - encoded_frame.SetColorSpace(&color_space); + encoded_frame.SetColorSpace(color_space); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, nullptr, 0)); std::unique_ptr decoded_frame; diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc index 000fc3a9be..d7258c9c28 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc @@ -283,11 +283,12 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image, return WEBRTC_VIDEO_CODEC_OK; } -int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img, - uint32_t timestamp, - int64_t ntp_time_ms, - int qp, - const ColorSpace* explicit_color_space) { +int LibvpxVp8Decoder::ReturnFrame( + const vpx_image_t* img, + uint32_t timestamp, + int64_t ntp_time_ms, + int qp, + const webrtc::ColorSpace* explicit_color_space) { if (img == NULL) { // Decoder OK and NULL image => No show frame return WEBRTC_VIDEO_CODEC_NO_OUTPUT; diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h index 0db4c3b221..a6ddfefb0b 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h @@ -54,7 +54,7 @@ class LibvpxVp8Decoder : public VideoDecoder { uint32_t timeStamp, int64_t ntp_time_ms, int qp, - const ColorSpace* explicit_color_space); + const webrtc::ColorSpace* explicit_color_space); const bool use_postproc_arm_; I420BufferPool buffer_pool_; diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc index 12e13a900b..06d9448cb4 100644 --- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc +++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc @@ -193,7 +193,7 @@ TEST_F(TestVp8Impl, EncodedColorSpaceEqualsInputColorSpace) { VideoFrame input_frame_w_color_space = VideoFrame::Builder() .set_video_frame_buffer(input_frame->video_frame_buffer()) - .set_color_space(&color_space) + .set_color_space(color_space) .build(); EncodeAndWaitForFrame(input_frame_w_color_space, &encoded_frame, @@ -229,7 +229,7 @@ TEST_F(TestVp8Impl, DecodedColorSpaceEqualsEncodedColorSpace) { // Encoded frame with explicit color space information. ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false); - encoded_frame.SetColorSpace(&color_space); + encoded_frame.SetColorSpace(color_space); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, nullptr, -1)); std::unique_ptr decoded_frame; diff --git a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc index d03e196894..7b5f07b844 100644 --- a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc +++ b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc @@ -185,7 +185,7 @@ TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) { VideoFrame input_frame_w_hdr = VideoFrame::Builder() .set_video_frame_buffer(input_frame->video_frame_buffer()) - .set_color_space(&color_space) + .set_color_space(color_space) .build(); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame_w_hdr, nullptr, nullptr)); @@ -215,7 +215,7 @@ TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) { // Encoded frame with explicit color space information. ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true); - encoded_frame.SetColorSpace(&color_space); + encoded_frame.SetColorSpace(color_space); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, nullptr, 0)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc index d86f78f1c1..3bf03fa4b7 100644 --- a/modules/video_coding/codecs/vp9/vp9_impl.cc +++ b/modules/video_coding/codecs/vp9/vp9_impl.cc @@ -1480,11 +1480,12 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image, return WEBRTC_VIDEO_CODEC_OK; } -int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, - uint32_t timestamp, - int64_t ntp_time_ms, - int qp, - const ColorSpace* explicit_color_space) { +int VP9DecoderImpl::ReturnFrame( + const vpx_image_t* img, + uint32_t timestamp, + int64_t ntp_time_ms, + int qp, + const webrtc::ColorSpace* explicit_color_space) { if (img == nullptr) { // Decoder OK and nullptr image => No show frame. return WEBRTC_VIDEO_CODEC_NO_OUTPUT; @@ -1533,7 +1534,7 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, .set_ntp_time_ms(ntp_time_ms) .set_rotation(webrtc::kVideoRotation_0); if (explicit_color_space) { - builder.set_color_space(explicit_color_space); + builder.set_color_space(*explicit_color_space); } else { builder.set_color_space( ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth)); diff --git a/modules/video_coding/codecs/vp9/vp9_impl.h b/modules/video_coding/codecs/vp9/vp9_impl.h index a2dab26010..bb87dbf0d0 100644 --- a/modules/video_coding/codecs/vp9/vp9_impl.h +++ b/modules/video_coding/codecs/vp9/vp9_impl.h @@ -177,7 +177,7 @@ class VP9DecoderImpl : public VP9Decoder { uint32_t timestamp, int64_t ntp_time_ms, int qp, - const ColorSpace* explicit_color_space); + const webrtc::ColorSpace* explicit_color_space); // Memory pool used to share buffers between libvpx and webrtc. Vp9FrameBufferPool frame_buffer_pool_; diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc index 4f5f9d476d..1e0b647b23 100644 --- a/modules/video_coding/frame_object.cc +++ b/modules/video_coding/frame_object.cc @@ -73,9 +73,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer, // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265 // (HEVC)). rotation_ = last_packet->video_header.rotation; - SetColorSpace(last_packet->video_header.color_space - ? &last_packet->video_header.color_space.value() - : nullptr); + SetColorSpace(last_packet->video_header.color_space); _rotation_set = true; content_type_ = last_packet->video_header.content_type; if (last_packet->video_header.video_timing.flags != diff --git a/style-guide.md b/style-guide.md index 391c45b644..2a35fdc5d1 100644 --- a/style-guide.md +++ b/style-guide.md @@ -77,18 +77,6 @@ instead of | use See [the source](api/array_view.h) for more detailed docs. -### `absl::optional` as function argument - -`absl::optional` is generally a good choice when you want to pass a -possibly missing `T` to a function—provided of course that `T` -is a type that it makes sense to pass by value. - -However, when you want to avoid pass-by-value, generally **do not pass -`const absl::optional&`; use `const T*` instead.** `const -absl::optional&` forces the caller to store the `T` in an -`absl::optional`; `const T*`, on the other hand, makes no -assumptions about how the `T` is stored. - ### sigslot sigslot is a lightweight library that adds a signal/slot language diff --git a/test/frame_generator_capturer.cc b/test/frame_generator_capturer.cc index d086b60891..49ce40a440 100644 --- a/test/frame_generator_capturer.cc +++ b/test/frame_generator_capturer.cc @@ -150,7 +150,7 @@ void FrameGeneratorCapturer::InsertFrame() { frame->set_ntp_time_ms(clock_->CurrentNtpInMilliseconds()); frame->set_rotation(fake_rotation_); if (fake_color_space_) { - frame->set_color_space(&fake_color_space_.value()); + frame->set_color_space(fake_color_space_); } if (first_frame_capture_time_ == -1) { first_frame_capture_time_ = frame->ntp_time_ms();