diff --git a/BUILD.gn b/BUILD.gn index f5b34591d2..ae2f7166fa 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -452,6 +452,7 @@ rtc_source_set("webrtc_common") { "api/video:video_bitrate_allocation", "api/video:video_frame", "rtc_base:checks", + "rtc_base:deprecation", "//third_party/abseil-cpp/absl/strings", ] } diff --git a/api/fec_controller.h b/api/fec_controller.h index 59e86ccedb..6cc46dd674 100644 --- a/api/fec_controller.h +++ b/api/fec_controller.h @@ -74,8 +74,9 @@ class FecController { int64_t round_trip_time_ms) = 0; // Informs of encoded output. - virtual void UpdateWithEncodedData(size_t encoded_image_length, - FrameType encoded_image_frametype) = 0; + virtual void UpdateWithEncodedData( + size_t encoded_image_length, + VideoFrameType encoded_image_frametype) = 0; // Returns whether this FEC Controller needs Loss Vector Mask as input. virtual bool UseLossVectorMask() = 0; diff --git a/api/test/mock_video_encoder.h b/api/test/mock_video_encoder.h index 62f17bab88..15e39142a1 100644 --- a/api/test/mock_video_encoder.h +++ b/api/test/mock_video_encoder.h @@ -40,7 +40,7 @@ class MockVideoEncoder : public VideoEncoder { MOCK_METHOD3(Encode, int32_t(const VideoFrame& inputImage, const CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types)); + const std::vector* frame_types)); MOCK_METHOD1(RegisterEncodeCompleteCallback, int32_t(EncodedImageCallback* callback)); MOCK_METHOD0(Release, int32_t()); diff --git a/api/test/videocodec_test_stats.h b/api/test/videocodec_test_stats.h index 5de015f6f4..c9eada36bf 100644 --- a/api/test/videocodec_test_stats.h +++ b/api/test/videocodec_test_stats.h @@ -43,7 +43,7 @@ class VideoCodecTestStats { size_t encode_time_us = 0; size_t target_bitrate_kbps = 0; size_t length_bytes = 0; - webrtc::FrameType frame_type = kVideoFrameDelta; + webrtc::VideoFrameType frame_type = kVideoFrameDelta; // Layering. size_t spatial_idx = 0; diff --git a/api/video/encoded_image.h b/api/video/encoded_image.h index 1d3bd46ef5..804e06bba0 100644 --- a/api/video/encoded_image.h +++ b/api/video/encoded_image.h @@ -115,7 +115,7 @@ class RTC_EXPORT EncodedImage { // NTP time of the capture time in local timebase in milliseconds. int64_t ntp_time_ms_ = 0; int64_t capture_time_ms_ = 0; - FrameType _frameType = kVideoFrameDelta; + VideoFrameType _frameType = kVideoFrameDelta; VideoRotation rotation_ = kVideoRotation_0; VideoContentType content_type_ = VideoContentType::UNSPECIFIED; bool _completeFrame = false; diff --git a/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc b/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc index 9adcd77bb9..ec861dde3c 100644 --- a/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc +++ b/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc @@ -95,7 +95,7 @@ class VideoEncoderSoftwareFallbackWrapperTest : public ::testing::Test { } int32_t Encode(const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { ++encode_count_; if (encode_complete_callback_ && encode_return_code_ == WEBRTC_VIDEO_CODEC_OK) { @@ -181,7 +181,7 @@ void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame(int expected_ret) { rtc::scoped_refptr buffer = I420Buffer::Create(codec_.width, codec_.height); I420Buffer::SetBlack(buffer); - std::vector types(1, kVideoFrameKey); + std::vector types(1, kVideoFrameKey); frame_ = absl::make_unique(VideoFrame::Builder() @@ -293,7 +293,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest, EXPECT_EQ(&callback2, fake_encoder_->encode_complete_callback_); // Encoding a frame using the fallback should arrive at the new callback. - std::vector types(1, kVideoFrameKey); + std::vector types(1, kVideoFrameKey); frame_->set_timestamp(frame_->timestamp() + 1000); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types)); diff --git a/api/video_codecs/video_encoder.cc b/api/video_codecs/video_encoder.cc index c28c181c80..a8b6f427c6 100644 --- a/api/video_codecs/video_encoder.cc +++ b/api/video_codecs/video_encoder.cc @@ -104,13 +104,13 @@ VideoEncoder::EncoderInfo::~EncoderInfo() = default; // Implementations of the interface must implement one or the other of these two // methods. int32_t VideoEncoder::Encode(const VideoFrame& frame, - const std::vector* frame_types) { + const std::vector* frame_types) { return Encode(frame, nullptr, frame_types); } int32_t VideoEncoder::Encode(const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { + const std::vector* frame_types) { return Encode(frame, frame_types); } diff --git a/api/video_codecs/video_encoder.h b/api/video_codecs/video_encoder.h index 59dc55fb30..dc68928492 100644 --- a/api/video_codecs/video_encoder.h +++ b/api/video_codecs/video_encoder.h @@ -242,12 +242,12 @@ class RTC_EXPORT VideoEncoder { // WEBRTC_VIDEO_CODEC_MEMORY // WEBRTC_VIDEO_CODEC_ERROR virtual int32_t Encode(const VideoFrame& frame, - const std::vector* frame_types); + const std::vector* frame_types); // TODO(bugs.webrtc.org/10379): Deprecated. Delete, and make above method pure // virtual, as soon as downstream applications are updated. virtual int32_t Encode(const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types); + const std::vector* frame_types); // Inform the encoder about the new target bit rate. // diff --git a/api/video_codecs/video_encoder_software_fallback_wrapper.cc b/api/video_codecs/video_encoder_software_fallback_wrapper.cc index c52262feee..4360980ba6 100644 --- a/api/video_codecs/video_encoder_software_fallback_wrapper.cc +++ b/api/video_codecs/video_encoder_software_fallback_wrapper.cc @@ -88,7 +88,7 @@ class VideoEncoderSoftwareFallbackWrapper final : public VideoEncoder { int32_t Release() override; int32_t Encode(const VideoFrame& frame, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t SetRateAllocation(const VideoBitrateAllocation& bitrate_allocation, uint32_t framerate) override; EncoderInfo GetEncoderInfo() const override; @@ -252,7 +252,7 @@ int32_t VideoEncoderSoftwareFallbackWrapper::Release() { int32_t VideoEncoderSoftwareFallbackWrapper::Encode( const VideoFrame& frame, - const std::vector* frame_types) { + const std::vector* frame_types) { if (use_fallback_encoder_) return fallback_encoder_->Encode(frame, frame_types); int32_t ret = encoder_->Encode(frame, frame_types); diff --git a/audio/channel_send.cc b/audio/channel_send.cc index 196911aadd..5951b6b942 100644 --- a/audio/channel_send.cc +++ b/audio/channel_send.cc @@ -55,7 +55,7 @@ constexpr int64_t kMaxRetransmissionWindowMs = 1000; constexpr int64_t kMinRetransmissionWindowMs = 30; MediaTransportEncodedAudioFrame::FrameType -MediaTransportFrameTypeForWebrtcFrameType(webrtc::FrameType frame_type) { +MediaTransportFrameTypeForWebrtcFrameType(webrtc::AudioFrameType frame_type) { switch (frame_type) { case kAudioFrameSpeech: return MediaTransportEncodedAudioFrame::FrameType::kSpeech; @@ -184,7 +184,7 @@ class ChannelSend class ProcessAndEncodeAudioTask; // From AudioPacketizationCallback in the ACM - int32_t SendData(FrameType frameType, + int32_t SendData(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, @@ -196,13 +196,13 @@ class ChannelSend int SetSendRtpHeaderExtension(bool enable, RTPExtensionType type, int id); - int32_t SendRtpAudio(FrameType frameType, + int32_t SendRtpAudio(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, rtc::ArrayView payload, const RTPFragmentationHeader* fragmentation); - int32_t SendMediaTransportAudio(FrameType frameType, + int32_t SendMediaTransportAudio(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, rtc::ArrayView payload, @@ -492,7 +492,7 @@ class ChannelSend::ProcessAndEncodeAudioTask : public rtc::QueuedTask { ChannelSend* const channel_; }; -int32_t ChannelSend::SendData(FrameType frameType, +int32_t ChannelSend::SendData(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, @@ -516,7 +516,7 @@ int32_t ChannelSend::SendData(FrameType frameType, } } -int32_t ChannelSend::SendRtpAudio(FrameType frameType, +int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, rtc::ArrayView payload, @@ -589,7 +589,7 @@ int32_t ChannelSend::SendRtpAudio(FrameType frameType, } int32_t ChannelSend::SendMediaTransportAudio( - FrameType frameType, + AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, rtc::ArrayView payload, diff --git a/call/rtp_payload_params_unittest.cc b/call/rtp_payload_params_unittest.cc index 149bd729f5..d96d268411 100644 --- a/call/rtp_payload_params_unittest.cc +++ b/call/rtp_payload_params_unittest.cc @@ -347,7 +347,7 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test { void ConvertAndCheck(int temporal_index, int64_t shared_frame_id, - FrameType frame_type, + VideoFrameType frame_type, LayerSync layer_sync, const std::set& expected_deps, uint16_t width = 0, diff --git a/common_types.h b/common_types.h index 2b877d6e5e..2dc1783096 100644 --- a/common_types.h +++ b/common_types.h @@ -25,7 +25,9 @@ namespace webrtc { -enum FrameType { +// TODO(bugs.webrtc.org/6883): This type should be split into separate types for +// audio and video, and then moved out of this file. +enum FrameTypeDeprecated { kEmptyFrame = 0, kAudioFrameSpeech = 1, kAudioFrameCN = 2, @@ -33,6 +35,12 @@ enum FrameType { kVideoFrameDelta = 4, }; +// Can't use RTC_DEPRECATED until Chromium is updated. +typedef FrameTypeDeprecated FrameType; + +using AudioFrameType = FrameTypeDeprecated; +using VideoFrameType = FrameTypeDeprecated; + // Statistics for RTCP packet types. struct RtcpPacketTypeCounter { RtcpPacketTypeCounter() diff --git a/media/engine/encoder_simulcast_proxy.cc b/media/engine/encoder_simulcast_proxy.cc index dd35bdbea9..e87e1031b5 100644 --- a/media/engine/encoder_simulcast_proxy.cc +++ b/media/engine/encoder_simulcast_proxy.cc @@ -43,8 +43,9 @@ int EncoderSimulcastProxy::InitEncode(const VideoCodec* inst, return ret; } -int EncoderSimulcastProxy::Encode(const VideoFrame& input_image, - const std::vector* frame_types) { +int EncoderSimulcastProxy::Encode( + const VideoFrame& input_image, + const std::vector* frame_types) { return encoder_->Encode(input_image, frame_types); } diff --git a/media/engine/encoder_simulcast_proxy.h b/media/engine/encoder_simulcast_proxy.h index ce408ac1ba..2574fa9429 100644 --- a/media/engine/encoder_simulcast_proxy.h +++ b/media/engine/encoder_simulcast_proxy.h @@ -46,7 +46,7 @@ class EncoderSimulcastProxy : public VideoEncoder { int number_of_cores, size_t max_payload_size) override; int Encode(const VideoFrame& input_image, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override; int SetRateAllocation(const VideoBitrateAllocation& bitrate, uint32_t new_framerate) override; diff --git a/media/engine/encoder_simulcast_proxy_unittest.cc b/media/engine/encoder_simulcast_proxy_unittest.cc index 15fdaaf9af..62e215d320 100644 --- a/media/engine/encoder_simulcast_proxy_unittest.cc +++ b/media/engine/encoder_simulcast_proxy_unittest.cc @@ -48,7 +48,7 @@ class MockEncoder : public VideoEncoder { Encode, int32_t(const VideoFrame& inputImage, const CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) /* override */); + const std::vector* frame_types) /* override */); MOCK_CONST_METHOD0(GetEncoderInfo, VideoEncoder::EncoderInfo(void)); }; diff --git a/media/engine/fake_webrtc_video_engine.cc b/media/engine/fake_webrtc_video_engine.cc index 32ce1fa154..f275fd3d13 100644 --- a/media/engine/fake_webrtc_video_engine.cc +++ b/media/engine/fake_webrtc_video_engine.cc @@ -151,7 +151,7 @@ int32_t FakeWebRtcVideoEncoder::InitEncode( int32_t FakeWebRtcVideoEncoder::Encode( const webrtc::VideoFrame& inputImage, const webrtc::CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) { + const std::vector* frame_types) { rtc::CritScope lock(&crit_); ++num_frames_encoded_; init_encode_event_.Set(); diff --git a/media/engine/fake_webrtc_video_engine.h b/media/engine/fake_webrtc_video_engine.h index 6d06923a9a..08c7bb9fcd 100644 --- a/media/engine/fake_webrtc_video_engine.h +++ b/media/engine/fake_webrtc_video_engine.h @@ -88,9 +88,10 @@ class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder { int32_t InitEncode(const webrtc::VideoCodec* codecSettings, int32_t numberOfCores, size_t maxPayloadSize) override; - int32_t Encode(const webrtc::VideoFrame& inputImage, - const webrtc::CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) override; + int32_t Encode( + const webrtc::VideoFrame& inputImage, + const webrtc::CodecSpecificInfo* codecSpecificInfo, + const std::vector* frame_types) override; int32_t RegisterEncodeCompleteCallback( webrtc::EncodedImageCallback* callback) override; int32_t Release() override; diff --git a/media/engine/simulcast_encoder_adapter.cc b/media/engine/simulcast_encoder_adapter.cc index fc18aa4091..1595cfe17a 100644 --- a/media/engine/simulcast_encoder_adapter.cc +++ b/media/engine/simulcast_encoder_adapter.cc @@ -338,7 +338,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst, int SimulcastEncoderAdapter::Encode( const VideoFrame& input_image, - const std::vector* frame_types) { + const std::vector* frame_types) { RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_); if (!Initialized()) { @@ -375,7 +375,7 @@ int SimulcastEncoderAdapter::Encode( continue; } - std::vector stream_frame_types; + std::vector stream_frame_types; if (send_key_frame) { stream_frame_types.push_back(kVideoFrameKey); streaminfos_[stream_idx].key_frame_request = false; diff --git a/media/engine/simulcast_encoder_adapter.h b/media/engine/simulcast_encoder_adapter.h index a62e8794b3..039ab62a1a 100644 --- a/media/engine/simulcast_encoder_adapter.h +++ b/media/engine/simulcast_encoder_adapter.h @@ -45,7 +45,7 @@ class SimulcastEncoderAdapter : public VideoEncoder { int number_of_cores, size_t max_payload_size) override; int Encode(const VideoFrame& input_image, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override; int SetRateAllocation(const VideoBitrateAllocation& bitrate, uint32_t new_framerate) override; diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc index 2d575ec7e8..147fe8b3bf 100644 --- a/media/engine/simulcast_encoder_adapter_unittest.cc +++ b/media/engine/simulcast_encoder_adapter_unittest.cc @@ -198,7 +198,7 @@ class MockVideoEncoder : public VideoEncoder { Encode, int32_t(const VideoFrame& inputImage, const CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) /* override */); + const std::vector* frame_types) /* override */); int32_t RegisterEncodeCompleteCallback( EncodedImageCallback* callback) /* override */ { @@ -556,7 +556,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) { .set_timestamp_ms(1000) .set_rotation(kVideoRotation_180) .build(); - std::vector frame_types; + std::vector frame_types; // Encode with three streams. EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200)); @@ -890,7 +890,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, // frame and can't otherwise be modified/resized. for (MockVideoEncoder* encoder : helper_->factory()->encoders()) EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _, _)).Times(1); - std::vector frame_types(3, kVideoFrameKey); + std::vector frame_types(3, kVideoFrameKey); EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types)); } @@ -916,7 +916,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) { .set_timestamp_us(0) .set_rotation(kVideoRotation_0) .build(); - std::vector frame_types(3, kVideoFrameKey); + std::vector frame_types(3, kVideoFrameKey); EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE, adapter_->Encode(input_frame, &frame_types)); } @@ -1031,7 +1031,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ActivatesCorrectStreamsInInitEncode) { EXPECT_CALL(*original_encoders[1], Encode(_, _, _)).Times(0); EXPECT_CALL(*original_encoders[2], Encode(_, _, _)).Times(0); - std::vector frame_types; + std::vector frame_types; frame_types.resize(3, kVideoFrameKey); EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types)); } diff --git a/modules/audio_coding/acm2/acm_receiver_unittest.cc b/modules/audio_coding/acm2/acm_receiver_unittest.cc index e5a7684d34..7667b718dc 100644 --- a/modules/audio_coding/acm2/acm_receiver_unittest.cc +++ b/modules/audio_coding/acm2/acm_receiver_unittest.cc @@ -103,7 +103,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback, return num_10ms_frames; } - int SendData(FrameType frame_type, + int SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, @@ -139,7 +139,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback, uint32_t timestamp_; bool packet_sent_; // Set when SendData is called reset when inserting audio. uint32_t last_packet_send_timestamp_; - FrameType last_frame_type_; + AudioFrameType last_frame_type_; }; #if defined(WEBRTC_ANDROID) diff --git a/modules/audio_coding/acm2/acm_send_test.cc b/modules/audio_coding/acm2/acm_send_test.cc index b6110b692f..4c34e41695 100644 --- a/modules/audio_coding/acm2/acm_send_test.cc +++ b/modules/audio_coding/acm2/acm_send_test.cc @@ -123,7 +123,7 @@ std::unique_ptr AcmSendTestOldApi::NextPacket() { // This method receives the callback from ACM when a new packet is produced. int32_t AcmSendTestOldApi::SendData( - FrameType frame_type, + AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, diff --git a/modules/audio_coding/acm2/acm_send_test.h b/modules/audio_coding/acm2/acm_send_test.h index 24d230b823..744d0157de 100644 --- a/modules/audio_coding/acm2/acm_send_test.h +++ b/modules/audio_coding/acm2/acm_send_test.h @@ -50,7 +50,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback, std::unique_ptr NextPacket() override; // Inherited from AudioPacketizationCallback. - int32_t SendData(FrameType frame_type, + int32_t SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, @@ -75,7 +75,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback, bool codec_registered_; int test_duration_ms_; // The following member variables are set whenever SendData() is called. - FrameType frame_type_; + AudioFrameType frame_type_; int payload_type_; uint32_t timestamp_; uint16_t sequence_number_; diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc index 1547b37198..a4b64b1251 100644 --- a/modules/audio_coding/acm2/audio_coding_module.cc +++ b/modules/audio_coding/acm2/audio_coding_module.cc @@ -393,7 +393,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) { RTPFragmentationHeader my_fragmentation; ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation); - FrameType frame_type; + AudioFrameType frame_type; if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) { frame_type = kEmptyFrame; encoded_info.payload_type = previous_pltype; diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc index 4ee9addc69..797b9b1cd2 100644 --- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc +++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc @@ -104,7 +104,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback { last_payload_type_(-1), last_timestamp_(0) {} - int32_t SendData(FrameType frame_type, + int32_t SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, @@ -129,7 +129,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback { return rtc::checked_cast(last_payload_vec_.size()); } - FrameType last_frame_type() const { + AudioFrameType last_frame_type() const { rtc::CritScope lock(&crit_sect_); return last_frame_type_; } @@ -151,7 +151,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback { private: int num_calls_ RTC_GUARDED_BY(crit_sect_); - FrameType last_frame_type_ RTC_GUARDED_BY(crit_sect_); + AudioFrameType last_frame_type_ RTC_GUARDED_BY(crit_sect_); int last_payload_type_ RTC_GUARDED_BY(crit_sect_); uint32_t last_timestamp_ RTC_GUARDED_BY(crit_sect_); std::vector last_payload_vec_ RTC_GUARDED_BY(crit_sect_); @@ -430,7 +430,7 @@ class AudioCodingModuleTestWithComfortNoiseOldApi // that is contain comfort noise. const struct { int ix; - FrameType type; + AudioFrameType type; } expectation[] = { {2, kAudioFrameCN}, {5, kEmptyFrame}, {8, kEmptyFrame}, {11, kAudioFrameCN}, {14, kEmptyFrame}, {17, kEmptyFrame}, diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h index 7e5bf1b549..0621473b68 100644 --- a/modules/audio_coding/include/audio_coding_module.h +++ b/modules/audio_coding/include/audio_coding_module.h @@ -40,7 +40,7 @@ class AudioPacketizationCallback { public: virtual ~AudioPacketizationCallback() {} - virtual int32_t SendData(FrameType frame_type, + virtual int32_t SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, @@ -53,7 +53,7 @@ class ACMVADCallback { public: virtual ~ACMVADCallback() {} - virtual int32_t InFrameType(FrameType frame_type) = 0; + virtual int32_t InFrameType(AudioFrameType frame_type) = 0; }; class AudioCodingModule { diff --git a/modules/audio_coding/neteq/tools/rtp_encode.cc b/modules/audio_coding/neteq/tools/rtp_encode.cc index 14c6e58a9d..443dfd81e9 100644 --- a/modules/audio_coding/neteq/tools/rtp_encode.cc +++ b/modules/audio_coding/neteq/tools/rtp_encode.cc @@ -107,7 +107,7 @@ class Packetizer : public AudioPacketizationCallback { ssrc_(ssrc), timestamp_rate_hz_(timestamp_rate_hz) {} - int32_t SendData(FrameType frame_type, + int32_t SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, diff --git a/modules/audio_coding/test/Channel.cc b/modules/audio_coding/test/Channel.cc index adfc0d52d1..d54faa7c07 100644 --- a/modules/audio_coding/test/Channel.cc +++ b/modules/audio_coding/test/Channel.cc @@ -18,7 +18,7 @@ namespace webrtc { -int32_t Channel::SendData(FrameType frameType, +int32_t Channel::SendData(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, diff --git a/modules/audio_coding/test/Channel.h b/modules/audio_coding/test/Channel.h index 4d7f0b7e12..6a55b065b5 100644 --- a/modules/audio_coding/test/Channel.h +++ b/modules/audio_coding/test/Channel.h @@ -47,7 +47,7 @@ class Channel : public AudioPacketizationCallback { Channel(int16_t chID = -1); ~Channel() override; - int32_t SendData(FrameType frameType, + int32_t SendData(AudioFrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, diff --git a/modules/audio_coding/test/EncodeDecodeTest.cc b/modules/audio_coding/test/EncodeDecodeTest.cc index 28ee8aaf64..c961fe591a 100644 --- a/modules/audio_coding/test/EncodeDecodeTest.cc +++ b/modules/audio_coding/test/EncodeDecodeTest.cc @@ -33,8 +33,10 @@ TestPacketization::~TestPacketization() { } int32_t TestPacketization::SendData( - const FrameType /* frameType */, const uint8_t payloadType, - const uint32_t timeStamp, const uint8_t* payloadData, + const AudioFrameType /* frameType */, + const uint8_t payloadType, + const uint32_t timeStamp, + const uint8_t* payloadData, const size_t payloadSize, const RTPFragmentationHeader* /* fragmentation */) { _rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize, diff --git a/modules/audio_coding/test/EncodeDecodeTest.h b/modules/audio_coding/test/EncodeDecodeTest.h index cdfc706eeb..6dc7bc9a15 100644 --- a/modules/audio_coding/test/EncodeDecodeTest.h +++ b/modules/audio_coding/test/EncodeDecodeTest.h @@ -28,7 +28,7 @@ class TestPacketization : public AudioPacketizationCallback { public: TestPacketization(RTPStream *rtpStream, uint16_t frequency); ~TestPacketization(); - int32_t SendData(const FrameType frameType, + int32_t SendData(const AudioFrameType frameType, const uint8_t payloadType, const uint32_t timeStamp, const uint8_t* payloadData, diff --git a/modules/audio_coding/test/TestAllCodecs.cc b/modules/audio_coding/test/TestAllCodecs.cc index 81b83c048f..52518ac8a5 100644 --- a/modules/audio_coding/test/TestAllCodecs.cc +++ b/modules/audio_coding/test/TestAllCodecs.cc @@ -60,7 +60,7 @@ void TestPack::RegisterReceiverACM(AudioCodingModule* acm) { return; } -int32_t TestPack::SendData(FrameType frame_type, +int32_t TestPack::SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, diff --git a/modules/audio_coding/test/TestAllCodecs.h b/modules/audio_coding/test/TestAllCodecs.h index 3125efeb0b..d8a7711358 100644 --- a/modules/audio_coding/test/TestAllCodecs.h +++ b/modules/audio_coding/test/TestAllCodecs.h @@ -25,7 +25,7 @@ class TestPack : public AudioPacketizationCallback { void RegisterReceiverACM(AudioCodingModule* acm); - int32_t SendData(FrameType frame_type, + int32_t SendData(AudioFrameType frame_type, uint8_t payload_type, uint32_t timestamp, const uint8_t* payload_data, diff --git a/modules/audio_coding/test/TestStereo.cc b/modules/audio_coding/test/TestStereo.cc index 2c71f46813..2fa56de1c6 100644 --- a/modules/audio_coding/test/TestStereo.cc +++ b/modules/audio_coding/test/TestStereo.cc @@ -40,7 +40,7 @@ void TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm) { return; } -int32_t TestPackStereo::SendData(const FrameType frame_type, +int32_t TestPackStereo::SendData(const AudioFrameType frame_type, const uint8_t payload_type, const uint32_t timestamp, const uint8_t* payload_data, diff --git a/modules/audio_coding/test/TestStereo.h b/modules/audio_coding/test/TestStereo.h index da10bf15dd..9a44a10c39 100644 --- a/modules/audio_coding/test/TestStereo.h +++ b/modules/audio_coding/test/TestStereo.h @@ -31,7 +31,7 @@ class TestPackStereo : public AudioPacketizationCallback { void RegisterReceiverACM(AudioCodingModule* acm); - int32_t SendData(const FrameType frame_type, + int32_t SendData(const AudioFrameType frame_type, const uint8_t payload_type, const uint32_t timestamp, const uint8_t* payload_data, diff --git a/modules/audio_coding/test/TestVADDTX.cc b/modules/audio_coding/test/TestVADDTX.cc index 7c04b228c6..b22e97eeef 100644 --- a/modules/audio_coding/test/TestVADDTX.cc +++ b/modules/audio_coding/test/TestVADDTX.cc @@ -33,7 +33,7 @@ ActivityMonitor::ActivityMonitor() { ResetStatistics(); } -int32_t ActivityMonitor::InFrameType(FrameType frame_type) { +int32_t ActivityMonitor::InFrameType(AudioFrameType frame_type) { counter_[frame_type]++; return 0; } diff --git a/modules/audio_coding/test/TestVADDTX.h b/modules/audio_coding/test/TestVADDTX.h index f2358e7799..36d5f95255 100644 --- a/modules/audio_coding/test/TestVADDTX.h +++ b/modules/audio_coding/test/TestVADDTX.h @@ -25,7 +25,7 @@ namespace webrtc { class ActivityMonitor : public ACMVADCallback { public: ActivityMonitor(); - int32_t InFrameType(FrameType frame_type); + int32_t InFrameType(AudioFrameType frame_type); void PrintStatistics(); void ResetStatistics(); void GetStatistics(uint32_t* stats); diff --git a/modules/include/module_common_types.h b/modules/include/module_common_types.h index 26122b16c6..ff4fb72381 100644 --- a/modules/include/module_common_types.h +++ b/modules/include/module_common_types.h @@ -30,7 +30,8 @@ struct WebRtcRTPHeader { RTPVideoHeader video; RTPHeader header; - FrameType frameType; + // Used for video only. + VideoFrameType frameType; // NTP time of the capture time in local timebase in milliseconds. int64_t ntp_time_ms; }; diff --git a/modules/rtp_rtcp/source/rtp_format.cc b/modules/rtp_rtcp/source/rtp_format.cc index 0010d90750..7375a63d50 100644 --- a/modules/rtp_rtcp/source/rtp_format.cc +++ b/modules/rtp_rtcp/source/rtp_format.cc @@ -29,7 +29,7 @@ std::unique_ptr RtpPacketizer::Create( PayloadSizeLimits limits, // Codec-specific details. const RTPVideoHeader& rtp_video_header, - FrameType frame_type, + VideoFrameType frame_type, const RTPFragmentationHeader* fragmentation) { switch (type) { case kVideoCodecH264: { diff --git a/modules/rtp_rtcp/source/rtp_format.h b/modules/rtp_rtcp/source/rtp_format.h index 71c7dc5e0e..c32283b72b 100644 --- a/modules/rtp_rtcp/source/rtp_format.h +++ b/modules/rtp_rtcp/source/rtp_format.h @@ -39,7 +39,7 @@ class RtpPacketizer { PayloadSizeLimits limits, // Codec-specific details. const RTPVideoHeader& rtp_video_header, - FrameType frame_type, + VideoFrameType frame_type, const RTPFragmentationHeader* fragmentation); virtual ~RtpPacketizer() = default; @@ -71,7 +71,7 @@ class RtpDepacketizer { const uint8_t* payload; size_t payload_length; - FrameType frame_type; + VideoFrameType frame_type; }; static RtpDepacketizer* Create(VideoCodecType type); diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic.cc b/modules/rtp_rtcp/source/rtp_format_video_generic.cc index 92aada4688..7af8121472 100644 --- a/modules/rtp_rtcp/source/rtp_format_video_generic.cc +++ b/modules/rtp_rtcp/source/rtp_format_video_generic.cc @@ -26,7 +26,7 @@ RtpPacketizerGeneric::RtpPacketizerGeneric( rtc::ArrayView payload, PayloadSizeLimits limits, const RTPVideoHeader& rtp_video_header, - FrameType frame_type) + VideoFrameType frame_type) : remaining_payload_(payload) { BuildHeader(rtp_video_header, frame_type); @@ -72,7 +72,7 @@ bool RtpPacketizerGeneric::NextPacket(RtpPacketToSend* packet) { } void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header, - FrameType frame_type) { + VideoFrameType frame_type) { header_size_ = kGenericHeaderLength; header_[0] = RtpFormatVideoGeneric::kFirstPacketBit; if (frame_type == kVideoFrameKey) { diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic.h b/modules/rtp_rtcp/source/rtp_format_video_generic.h index 982e35a3c9..069f20d4f8 100644 --- a/modules/rtp_rtcp/source/rtp_format_video_generic.h +++ b/modules/rtp_rtcp/source/rtp_format_video_generic.h @@ -38,7 +38,7 @@ class RtpPacketizerGeneric : public RtpPacketizer { RtpPacketizerGeneric(rtc::ArrayView payload, PayloadSizeLimits limits, const RTPVideoHeader& rtp_video_header, - FrameType frametype); + VideoFrameType frametype); ~RtpPacketizerGeneric() override; @@ -52,7 +52,7 @@ class RtpPacketizerGeneric : public RtpPacketizer { private: // Fills header_ and header_size_ members. void BuildHeader(const RTPVideoHeader& rtp_video_header, - FrameType frame_type); + VideoFrameType frame_type); uint8_t header_[3]; size_t header_size_; diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.cc b/modules/rtp_rtcp/source/rtp_sender_audio.cc index c049530a96..2f0060354a 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio.cc @@ -30,7 +30,7 @@ namespace webrtc { namespace { -const char* FrameTypeToString(FrameType frame_type) { +const char* FrameTypeToString(AudioFrameType frame_type) { switch (frame_type) { case kEmptyFrame: return "empty"; @@ -88,7 +88,7 @@ int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name, return 0; } -bool RTPSenderAudio::MarkerBit(FrameType frame_type, int8_t payload_type) { +bool RTPSenderAudio::MarkerBit(AudioFrameType frame_type, int8_t payload_type) { rtc::CritScope cs(&send_audio_critsect_); // for audio true for first packet in a speech burst bool marker_bit = false; @@ -131,7 +131,7 @@ bool RTPSenderAudio::MarkerBit(FrameType frame_type, int8_t payload_type) { return marker_bit; } -bool RTPSenderAudio::SendAudio(FrameType frame_type, +bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, int8_t payload_type, uint32_t rtp_timestamp, const uint8_t* payload_data, diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.h b/modules/rtp_rtcp/source/rtp_sender_audio.h index fa5894367d..362dd49be1 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.h +++ b/modules/rtp_rtcp/source/rtp_sender_audio.h @@ -39,7 +39,7 @@ class RTPSenderAudio { size_t channels, uint32_t rate); - bool SendAudio(FrameType frame_type, + bool SendAudio(AudioFrameType frame_type, int8_t payload_type, uint32_t capture_timestamp, const uint8_t* payload_data, @@ -60,7 +60,7 @@ class RTPSenderAudio { uint16_t duration, bool marker_bit); // set on first packet in talk burst - bool MarkerBit(FrameType frame_type, int8_t payload_type); + bool MarkerBit(AudioFrameType frame_type, int8_t payload_type); private: bool LogAndSendToNetwork(std::unique_ptr packet, diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc index 8b835bdc8f..456b478ff2 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video.cc @@ -54,7 +54,7 @@ void BuildRedPayload(const RtpPacketToSend& media_packet, void AddRtpHeaderExtensions(const RTPVideoHeader& video_header, const absl::optional& playout_delay, - FrameType frame_type, + VideoFrameType frame_type, bool set_video_rotation, bool set_color_space, bool set_frame_marking, @@ -167,7 +167,7 @@ bool IsBaseLayer(const RTPVideoHeader& video_header) { return true; } -const char* FrameTypeToString(FrameType frame_type) { +const char* FrameTypeToString(VideoFrameType frame_type) { switch (frame_type) { case kEmptyFrame: return "empty"; @@ -421,7 +421,7 @@ absl::optional RTPSenderVideo::FlexfecSsrc() const { return absl::nullopt; } -bool RTPSenderVideo::SendVideo(FrameType frame_type, +bool RTPSenderVideo::SendVideo(VideoFrameType frame_type, int8_t payload_type, uint32_t rtp_timestamp, int64_t capture_time_ms, diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h index 9772b867aa..afdca1e264 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.h +++ b/modules/rtp_rtcp/source/rtp_sender_video.h @@ -59,7 +59,7 @@ class RTPSenderVideo { const WebRtcKeyValueConfig& field_trials); virtual ~RTPSenderVideo(); - bool SendVideo(FrameType frame_type, + bool SendVideo(VideoFrameType frame_type, int8_t payload_type, uint32_t capture_timestamp, int64_t capture_time_ms, diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc index 09a5d02b93..1800b9ed68 100644 --- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc +++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc @@ -64,7 +64,7 @@ int NumberOfThreads(int width, int height, int number_of_cores) { return 1; } -FrameType ConvertToVideoFrameType(EVideoFrameType type) { +VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) { switch (type) { case videoFrameTypeIDR: return kVideoFrameKey; @@ -381,9 +381,10 @@ int32_t H264EncoderImpl::SetRateAllocation( return WEBRTC_VIDEO_CODEC_OK; } -int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame, - const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { +int32_t H264EncoderImpl::Encode( + const VideoFrame& input_frame, + const CodecSpecificInfo* codec_specific_info, + const std::vector* frame_types) { if (encoders_.empty()) { ReportError(); return WEBRTC_VIDEO_CODEC_UNINITIALIZED; diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h index 75a875894d..36a7f021bb 100644 --- a/modules/video_coding/codecs/h264/h264_encoder_impl.h +++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h @@ -68,7 +68,7 @@ class H264EncoderImpl : public H264Encoder { // passed to the encode complete callback. int32_t Encode(const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; EncoderInfo GetEncoderInfo() const override; diff --git a/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h b/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h index 0dd1930914..de010c914e 100644 --- a/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h +++ b/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h @@ -43,7 +43,7 @@ class MultiplexEncoderAdapter : public VideoEncoder { int number_of_cores, size_t max_payload_size) override; int Encode(const VideoFrame& input_image, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override; int SetRateAllocation(const VideoBitrateAllocation& bitrate, uint32_t new_framerate) override; diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc index dcba67e1c6..e3eceacb58 100644 --- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc +++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc @@ -115,11 +115,13 @@ MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) { ByteReader::ReadBigEndian(buffer + offset); offset += sizeof(uint32_t); + // TODO(nisse): This makes the wire format depend on the numeric values of the + // VideoCodecType and VideoFrameType enum constants. frame_header.codec_type = static_cast( ByteReader::ReadBigEndian(buffer + offset)); offset += sizeof(uint8_t); - frame_header.frame_type = static_cast( + frame_header.frame_type = static_cast( ByteReader::ReadBigEndian(buffer + offset)); offset += sizeof(uint8_t); @@ -181,8 +183,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease( // key frame so as to decode the whole image without previous frame data. // Thus only when all components are key frames, we can mark the combined // frame as key frame. - if (frame_header.frame_type == FrameType::kVideoFrameDelta) { - combined_image._frameType = FrameType::kVideoFrameDelta; + if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) { + combined_image._frameType = VideoFrameType::kVideoFrameDelta; } frame_headers.push_back(frame_header); diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h index 9d9be265ba..d3505e444b 100644 --- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h +++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h @@ -67,7 +67,7 @@ struct MultiplexImageComponentHeader { VideoCodecType codec_type; // Indicated the underlying frame is a key frame or delta frame. - FrameType frame_type; + VideoFrameType frame_type; }; const int kMultiplexImageComponentHeaderSize = sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) + diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc index 4b27b181ff..6e3c5e2851 100644 --- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc +++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc @@ -138,12 +138,12 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst, int MultiplexEncoderAdapter::Encode( const VideoFrame& input_image, - const std::vector* frame_types) { + const std::vector* frame_types) { if (!encoded_complete_callback_) { return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } - std::vector adjusted_frame_types; + std::vector adjusted_frame_types; if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) { adjusted_frame_types.push_back(kVideoFrameKey); } else { diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc index ac637388df..7458006cb0 100644 --- a/modules/video_coding/codecs/test/videoprocessor.cc +++ b/modules/video_coding/codecs/test/videoprocessor.cc @@ -285,9 +285,9 @@ void VideoProcessor::ProcessFrame() { } // Encode. - const std::vector frame_types = - (frame_number == 0) ? std::vector{kVideoFrameKey} - : std::vector{kVideoFrameDelta}; + const std::vector frame_types = + (frame_number == 0) ? std::vector{kVideoFrameKey} + : std::vector{kVideoFrameDelta}; const int encode_return_code = encoder_->Encode(input_frame, &frame_types); for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) { FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i); diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc index 1bf42eeece..94b079fef4 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc @@ -737,7 +737,7 @@ size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) { int LibvpxVp8Encoder::Encode(const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { + const std::vector* frame_types) { RTC_DCHECK_EQ(frame.width(), codec_.width); RTC_DCHECK_EQ(frame.height(), codec_.height); diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h index 5a2205b880..271055907d 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h @@ -47,7 +47,7 @@ class LibvpxVp8Encoder : public VideoEncoder { int Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override; diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc index ec687dfab4..771471f415 100644 --- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc +++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc @@ -74,11 +74,11 @@ class TestVp8Impl : public VideoCodecUnitTest { EncodedImage* encoded_frame, CodecSpecificInfo* codec_specific_info, bool keyframe = false) { - std::vector frame_types; + std::vector frame_types; if (keyframe) { - frame_types.emplace_back(FrameType::kVideoFrameKey); + frame_types.emplace_back(VideoFrameType::kVideoFrameKey); } else { - frame_types.emplace_back(FrameType::kVideoFrameDelta); + frame_types.emplace_back(VideoFrameType::kVideoFrameDelta); } EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, &frame_types)); @@ -484,7 +484,7 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) { .Times(2) .WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK)); - auto delta_frame = std::vector{kVideoFrameDelta}; + auto delta_frame = std::vector{kVideoFrameDelta}; encoder.Encode(*NextInputFrame(), nullptr, &delta_frame); } diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc index a342b39d22..177c839fe1 100644 --- a/modules/video_coding/codecs/vp9/vp9_impl.cc +++ b/modules/video_coding/codecs/vp9/vp9_impl.cc @@ -714,7 +714,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) { int VP9EncoderImpl::Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { + const std::vector* frame_types) { if (!inited_) { return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } diff --git a/modules/video_coding/codecs/vp9/vp9_impl.h b/modules/video_coding/codecs/vp9/vp9_impl.h index 1e9979fdd8..62aeeb577d 100644 --- a/modules/video_coding/codecs/vp9/vp9_impl.h +++ b/modules/video_coding/codecs/vp9/vp9_impl.h @@ -45,7 +45,7 @@ class VP9EncoderImpl : public VP9Encoder { int Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override; diff --git a/modules/video_coding/encoded_frame.h b/modules/video_coding/encoded_frame.h index eeaea1532e..25c2f9b459 100644 --- a/modules/video_coding/encoded_frame.h +++ b/modules/video_coding/encoded_frame.h @@ -67,7 +67,7 @@ class VCMEncodedFrame : protected EncodedImage { /** * Get frame type */ - webrtc::FrameType FrameType() const { return _frameType; } + webrtc::VideoFrameType FrameType() const { return _frameType; } /** * Get frame rotation */ diff --git a/modules/video_coding/fec_controller_default.cc b/modules/video_coding/fec_controller_default.cc index 3bceecc509..4502f2cd5d 100644 --- a/modules/video_coding/fec_controller_default.cc +++ b/modules/video_coding/fec_controller_default.cc @@ -177,7 +177,7 @@ void FecControllerDefault::SetProtectionMethod(bool enable_fec, } void FecControllerDefault::UpdateWithEncodedData( const size_t encoded_image_length, - const FrameType encoded_image_frametype) { + const VideoFrameType encoded_image_frametype) { const size_t encoded_length = encoded_image_length; CritScope lock(&crit_sect_); if (encoded_length > 0) { diff --git a/modules/video_coding/fec_controller_default.h b/modules/video_coding/fec_controller_default.h index 1db39a403b..f4bbf22bac 100644 --- a/modules/video_coding/fec_controller_default.h +++ b/modules/video_coding/fec_controller_default.h @@ -44,8 +44,9 @@ class FecControllerDefault : public FecController { uint8_t fraction_lost, std::vector loss_mask_vector, int64_t round_trip_time_ms) override; - void UpdateWithEncodedData(const size_t encoded_image_length, - const FrameType encoded_image_frametype) override; + void UpdateWithEncodedData( + const size_t encoded_image_length, + const VideoFrameType encoded_image_frametype) override; bool UseLossVectorMask() override; float GetProtectionOverheadRateThreshold(); diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc index c4ba8203e6..a4c92adfc8 100644 --- a/modules/video_coding/frame_buffer.cc +++ b/modules/video_coding/frame_buffer.cc @@ -29,7 +29,7 @@ VCMFrameBuffer::VCMFrameBuffer() VCMFrameBuffer::~VCMFrameBuffer() {} -webrtc::FrameType VCMFrameBuffer::FrameType() const { +webrtc::VideoFrameType VCMFrameBuffer::FrameType() const { return _sessionInfo.FrameType(); } diff --git a/modules/video_coding/frame_buffer.h b/modules/video_coding/frame_buffer.h index 18f40fcfa7..4b5ef7fe1f 100644 --- a/modules/video_coding/frame_buffer.h +++ b/modules/video_coding/frame_buffer.h @@ -70,7 +70,7 @@ class VCMFrameBuffer : public VCMEncodedFrame { int64_t LatestPacketTimeMs() const; - webrtc::FrameType FrameType() const; + webrtc::VideoFrameType FrameType() const; private: void SetState(VCMFrameBufferStateEnum state); // Set state of frame diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc index 268adfc7c2..884204e34e 100644 --- a/modules/video_coding/frame_object.cc +++ b/modules/video_coding/frame_object.cc @@ -121,7 +121,7 @@ int RtpFrameObject::times_nacked() const { return times_nacked_; } -FrameType RtpFrameObject::frame_type() const { +VideoFrameType RtpFrameObject::frame_type() const { return frame_type_; } diff --git a/modules/video_coding/frame_object.h b/modules/video_coding/frame_object.h index 8b9ad9203c..c39a896e5a 100644 --- a/modules/video_coding/frame_object.h +++ b/modules/video_coding/frame_object.h @@ -36,7 +36,7 @@ class RtpFrameObject : public EncodedFrame { uint16_t first_seq_num() const; uint16_t last_seq_num() const; int times_nacked() const; - enum FrameType frame_type() const; + VideoFrameType frame_type() const; VideoCodecType codec_type() const; int64_t ReceivedTime() const override; int64_t RenderTime() const override; @@ -49,7 +49,7 @@ class RtpFrameObject : public EncodedFrame { void AllocateBitstreamBuffer(size_t frame_size); rtc::scoped_refptr packet_buffer_; - enum FrameType frame_type_; + VideoFrameType frame_type_; VideoCodecType codec_type_; uint16_t first_seq_num_; uint16_t last_seq_num_; diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc index 2651565ae0..2863efb85b 100644 --- a/modules/video_coding/jitter_buffer_unittest.cc +++ b/modules/video_coding/jitter_buffer_unittest.cc @@ -362,7 +362,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam, return jitter_buffer_->InsertPacket(packet, &retransmitted); } - VCMFrameBufferEnum InsertFrame(FrameType frame_type) { + VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) { stream_generator_->GenerateFrame( frame_type, (frame_type != kEmptyFrame) ? 1 : 0, (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds()); @@ -371,7 +371,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam, return ret; } - VCMFrameBufferEnum InsertFrames(int num_frames, FrameType frame_type) { + VCMFrameBufferEnum InsertFrames(int num_frames, VideoFrameType frame_type) { VCMFrameBufferEnum ret_for_all = kNoError; for (int i = 0; i < num_frames; ++i) { VCMFrameBufferEnum ret = InsertFrame(frame_type); diff --git a/modules/video_coding/packet.cc b/modules/video_coding/packet.cc index b50e97576d..1113a6def6 100644 --- a/modules/video_coding/packet.cc +++ b/modules/video_coding/packet.cc @@ -46,7 +46,7 @@ VCMPacket::VCMPacket(const uint8_t* ptr, size_t size, const RTPHeader& rtp_header, const RTPVideoHeader& videoHeader, - FrameType frame_type, + VideoFrameType frame_type, int64_t ntp_time_ms) : payloadType(rtp_header.payloadType), timestamp(rtp_header.timestamp), diff --git a/modules/video_coding/packet.h b/modules/video_coding/packet.h index 944aed53dd..835bfdfdf4 100644 --- a/modules/video_coding/packet.h +++ b/modules/video_coding/packet.h @@ -32,7 +32,7 @@ class VCMPacket { size_t size, const RTPHeader& rtp_header, const RTPVideoHeader& video_header, - FrameType frame_type, + VideoFrameType frame_type, int64_t ntp_time_ms); ~VCMPacket(); @@ -58,7 +58,7 @@ class VCMPacket { bool markerBit; int timesNacked; - FrameType frameType; + VideoFrameType frameType; VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete. bool insertStartCode; // True if a start code should be inserted before this diff --git a/modules/video_coding/receiver_unittest.cc b/modules/video_coding/receiver_unittest.cc index ca50dfaba7..29bb209a57 100644 --- a/modules/video_coding/receiver_unittest.cc +++ b/modules/video_coding/receiver_unittest.cc @@ -56,7 +56,7 @@ class TestVCMReceiver : public ::testing::Test { return receiver_.InsertPacket(packet); } - int32_t InsertFrame(FrameType frame_type, bool complete) { + int32_t InsertFrame(VideoFrameType frame_type, bool complete) { int num_of_packets = complete ? 1 : 2; stream_generator_->GenerateFrame( frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0, @@ -322,7 +322,7 @@ class SimulatedClockWithFrames : public SimulatedClock { void GenerateAndInsertFrame(int64_t render_timestamp_ms) { VCMPacket packet; - stream_generator_->GenerateFrame(FrameType::kVideoFrameKey, + stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 1, // media packets 0, // empty packets render_timestamp_ms); diff --git a/modules/video_coding/session_info.h b/modules/video_coding/session_info.h index d66101c95f..47eccce9da 100644 --- a/modules/video_coding/session_info.h +++ b/modules/video_coding/session_info.h @@ -54,7 +54,7 @@ class VCMSessionInfo { int NumPackets() const; bool HaveFirstPacket() const; bool HaveLastPacket() const; - webrtc::FrameType FrameType() const { return frame_type_; } + webrtc::VideoFrameType FrameType() const { return frame_type_; } int LowSequenceNumber() const; // Returns highest sequence number, media or empty. @@ -103,7 +103,7 @@ class VCMSessionInfo { void UpdateCompleteSession(); bool complete_; - webrtc::FrameType frame_type_; + webrtc::VideoFrameType frame_type_; // Packets in this frame. PacketList packets_; int empty_seq_num_low_; diff --git a/modules/video_coding/test/stream_generator.cc b/modules/video_coding/test/stream_generator.cc index e23aa8727b..022edb6dc5 100644 --- a/modules/video_coding/test/stream_generator.cc +++ b/modules/video_coding/test/stream_generator.cc @@ -29,7 +29,7 @@ void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) { memset(packet_buffer_, 0, sizeof(packet_buffer_)); } -void StreamGenerator::GenerateFrame(FrameType type, +void StreamGenerator::GenerateFrame(VideoFrameType type, int num_media_packets, int num_empty_packets, int64_t time_ms) { @@ -54,7 +54,7 @@ VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number, unsigned int size, bool first_packet, bool marker_bit, - FrameType type) { + VideoFrameType type) { EXPECT_LT(size, kMaxPacketSize); VCMPacket packet; packet.seqNum = sequence_number; diff --git a/modules/video_coding/test/stream_generator.h b/modules/video_coding/test/stream_generator.h index 150fa79606..548654ec53 100644 --- a/modules/video_coding/test/stream_generator.h +++ b/modules/video_coding/test/stream_generator.h @@ -34,7 +34,7 @@ class StreamGenerator { // |time_ms| denotes the timestamp you want to put on the frame, and the unit // is millisecond. GenerateFrame will translate |time_ms| into a 90kHz // timestamp and put it on the frame. - void GenerateFrame(FrameType type, + void GenerateFrame(VideoFrameType type, int num_media_packets, int num_empty_packets, int64_t time_ms); @@ -56,7 +56,7 @@ class StreamGenerator { unsigned int size, bool first_packet, bool marker_bit, - FrameType type); + VideoFrameType type); std::list::iterator GetPacketIterator(int index); diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc index 2d696544ac..edef45d17a 100644 --- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc +++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc @@ -294,8 +294,8 @@ void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) { void SimulcastTestFixtureImpl::RunActiveStreamsTest( const std::vector active_streams) { - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); UpdateActiveStreams(active_streams); // Set sufficient bitrate for all streams so we can test active without // bitrate being an issue. @@ -326,7 +326,7 @@ void SimulcastTestFixtureImpl::UpdateActiveStreams( } void SimulcastTestFixtureImpl::ExpectStreams( - FrameType frame_type, + VideoFrameType frame_type, const std::vector expected_streams_active) { ASSERT_EQ(static_cast(expected_streams_active.size()), kNumberOfSimulcastStreams); @@ -367,7 +367,7 @@ void SimulcastTestFixtureImpl::ExpectStreams( } } -void SimulcastTestFixtureImpl::ExpectStreams(FrameType frame_type, +void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type, int expected_video_streams) { ASSERT_GE(expected_video_streams, 0); ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams); @@ -396,8 +396,8 @@ void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers( // a key frame was only requested for some of them. void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() { SetRates(kMaxBitrates[2], 30); // To get all three streams. - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -431,8 +431,8 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() { void SimulcastTestFixtureImpl::TestPaddingAllStreams() { // We should always encode the base layer. SetRates(kMinBitrates[0] - 1, 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 1); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -444,8 +444,8 @@ void SimulcastTestFixtureImpl::TestPaddingAllStreams() { void SimulcastTestFixtureImpl::TestPaddingTwoStreams() { // We have just enough to get only the first stream and padding for two. SetRates(kMinBitrates[0], 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 1); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -458,8 +458,8 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() { // We are just below limit of sending second stream, so we should get // the first stream maxed out (at |maxBitrate|), and padding for two. SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 1); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -471,8 +471,8 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() { void SimulcastTestFixtureImpl::TestPaddingOneStream() { // We have just enough to send two streams, so padding for one stream. SetRates(kTargetBitrates[0] + kMinBitrates[1], 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 2); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -485,8 +485,8 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() { // We are just below limit of sending third stream, so we should get // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|. SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 2); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -498,8 +498,8 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() { void SimulcastTestFixtureImpl::TestSendAllStreams() { // We have just enough to send all streams. SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 3); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -511,8 +511,8 @@ void SimulcastTestFixtureImpl::TestSendAllStreams() { void SimulcastTestFixtureImpl::TestDisablingStreams() { // We should get three media streams. SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); ExpectStreams(kVideoFrameKey, 3); EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types)); @@ -617,8 +617,8 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) { // Encode one frame and verify. SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30); - std::vector frame_types(kNumberOfSimulcastStreams, - kVideoFrameDelta); + std::vector frame_types(kNumberOfSimulcastStreams, + kVideoFrameDelta); EXPECT_CALL( encoder_callback_, OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey), diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.h b/modules/video_coding/utility/simulcast_test_fixture_impl.h index 8881e0671d..06437fc8fe 100644 --- a/modules/video_coding/utility/simulcast_test_fixture_impl.h +++ b/modules/video_coding/utility/simulcast_test_fixture_impl.h @@ -67,9 +67,9 @@ class SimulcastTestFixtureImpl final : public SimulcastTestFixture { void SetRates(uint32_t bitrate_kbps, uint32_t fps); void RunActiveStreamsTest(const std::vector active_streams); void UpdateActiveStreams(const std::vector active_streams); - void ExpectStreams(FrameType frame_type, + void ExpectStreams(VideoFrameType frame_type, const std::vector expected_streams_active); - void ExpectStreams(FrameType frame_type, int expected_video_streams); + void ExpectStreams(VideoFrameType frame_type, int expected_video_streams); void VerifyTemporalIdxAndSyncForAllSpatialLayers( TestEncodedImageCallback* encoder_callback, const int* expected_temporal_idx, diff --git a/sdk/android/src/jni/android_media_encoder.cc b/sdk/android/src/jni/android_media_encoder.cc index 36681a5290..16402649d2 100644 --- a/sdk/android/src/jni/android_media_encoder.cc +++ b/sdk/android/src/jni/android_media_encoder.cc @@ -100,7 +100,7 @@ class MediaCodecVideoEncoder : public VideoEncoder { size_t /* max_payload_size */) override; int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* /* codec_specific_info */, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t RegisterEncodeCompleteCallback( EncodedImageCallback* callback) override; int32_t Release() override; @@ -595,7 +595,7 @@ int32_t MediaCodecVideoEncoder::InitEncodeInternal(int width, int32_t MediaCodecVideoEncoder::Encode( const VideoFrame& frame, const CodecSpecificInfo* /* codec_specific_info */, - const std::vector* frame_types) { + const std::vector* frame_types) { RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_checker_); if (sw_fallback_required_) return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; diff --git a/sdk/android/src/jni/encoded_image.cc b/sdk/android/src/jni/encoded_image.cc index c801ce5f10..2e8b26689e 100644 --- a/sdk/android/src/jni/encoded_image.cc +++ b/sdk/android/src/jni/encoded_image.cc @@ -20,7 +20,7 @@ namespace webrtc { namespace jni { ScopedJavaLocalRef NativeToJavaFrameType(JNIEnv* env, - FrameType frame_type) { + VideoFrameType frame_type) { return Java_FrameType_fromNativeIndex(env, frame_type); } @@ -43,7 +43,7 @@ ScopedJavaLocalRef NativeToJavaEncodedImage( ScopedJavaLocalRef NativeToJavaFrameTypeArray( JNIEnv* env, - const std::vector& frame_types) { + const std::vector& frame_types) { return NativeToJavaObjectArray( env, frame_types, org_webrtc_EncodedImage_00024FrameType_clazz(env), &NativeToJavaFrameType); diff --git a/sdk/android/src/jni/encoded_image.h b/sdk/android/src/jni/encoded_image.h index 148ba038b9..118994df59 100644 --- a/sdk/android/src/jni/encoded_image.h +++ b/sdk/android/src/jni/encoded_image.h @@ -25,12 +25,12 @@ class EncodedImage; namespace jni { ScopedJavaLocalRef NativeToJavaFrameType(JNIEnv* env, - FrameType frame_type); + VideoFrameType frame_type); ScopedJavaLocalRef NativeToJavaEncodedImage(JNIEnv* jni, const EncodedImage& image); ScopedJavaLocalRef NativeToJavaFrameTypeArray( JNIEnv* env, - const std::vector& frame_types); + const std::vector& frame_types); } // namespace jni } // namespace webrtc diff --git a/sdk/android/src/jni/video_encoder_wrapper.cc b/sdk/android/src/jni/video_encoder_wrapper.cc index 8dde3d1b5f..76579e3501 100644 --- a/sdk/android/src/jni/video_encoder_wrapper.cc +++ b/sdk/android/src/jni/video_encoder_wrapper.cc @@ -120,7 +120,7 @@ int32_t VideoEncoderWrapper::Release() { int32_t VideoEncoderWrapper::Encode( const VideoFrame& frame, const CodecSpecificInfo* /* codec_specific_info */, - const std::vector* frame_types) { + const std::vector* frame_types) { if (!initialized_) { // Most likely initializing the codec failed. return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; @@ -275,7 +275,7 @@ void VideoEncoderWrapper::OnEncodedFrame(JNIEnv* jni, frame._encodedHeight = encoded_height; frame.SetTimestamp(frame_extra_info.timestamp_rtp); frame.capture_time_ms_ = capture_time_ns / rtc::kNumNanosecsPerMillisec; - frame._frameType = (FrameType)frame_type; + frame._frameType = (VideoFrameType)frame_type; frame.rotation_ = (VideoRotation)rotation; frame._completeFrame = complete_frame; if (qp == -1) { diff --git a/sdk/android/src/jni/video_encoder_wrapper.h b/sdk/android/src/jni/video_encoder_wrapper.h index ef4f840865..de2d67b5ea 100644 --- a/sdk/android/src/jni/video_encoder_wrapper.h +++ b/sdk/android/src/jni/video_encoder_wrapper.h @@ -43,7 +43,7 @@ class VideoEncoderWrapper : public VideoEncoder { int32_t Encode(const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t SetRateAllocation(const VideoBitrateAllocation& allocation, uint32_t framerate) override; diff --git a/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm b/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm index 669fea2e74..dda8aac80e 100644 --- a/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm +++ b/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm @@ -52,7 +52,7 @@ encodedImage.timing_.flags = self.flags; encodedImage.timing_.encode_start_ms = self.encodeStartMs; encodedImage.timing_.encode_finish_ms = self.encodeFinishMs; - encodedImage._frameType = webrtc::FrameType(self.frameType); + encodedImage._frameType = webrtc::VideoFrameType(self.frameType); encodedImage.rotation_ = webrtc::VideoRotation(self.rotation); encodedImage._completeFrame = self.completeFrame; encodedImage.qp_ = self.qp ? self.qp.intValue : -1; diff --git a/sdk/objc/native/src/objc_video_encoder_factory.mm b/sdk/objc/native/src/objc_video_encoder_factory.mm index bfa76ad430..5c90978bb2 100644 --- a/sdk/objc/native/src/objc_video_encoder_factory.mm +++ b/sdk/objc/native/src/objc_video_encoder_factory.mm @@ -75,7 +75,7 @@ class ObjCVideoEncoder : public VideoEncoder { int32_t Encode(const VideoFrame &frame, const CodecSpecificInfo *codec_specific_info, - const std::vector *frame_types) override { + const std::vector *frame_types) override { NSMutableArray *rtcFrameTypes = [NSMutableArray array]; for (size_t i = 0; i < frame_types->size(); ++i) { [rtcFrameTypes addObject:@(RTCFrameType(frame_types->at(i)))]; diff --git a/sdk/objc/unittests/objc_video_encoder_factory_tests.mm b/sdk/objc/unittests/objc_video_encoder_factory_tests.mm index 9e862b05ad..cef7495bb6 100644 --- a/sdk/objc/unittests/objc_video_encoder_factory_tests.mm +++ b/sdk/objc/unittests/objc_video_encoder_factory_tests.mm @@ -84,7 +84,7 @@ TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsOKOnSuccess) { .set_rotation(webrtc::kVideoRotation_0) .set_timestamp_us(0) .build(); - std::vector frame_types; + std::vector frame_types; EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK); } @@ -102,7 +102,7 @@ TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsErrorOnFail) { .set_rotation(webrtc::kVideoRotation_0) .set_timestamp_us(0) .build(); - std::vector frame_types; + std::vector frame_types; EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_ERROR); } diff --git a/test/configurable_frame_size_encoder.cc b/test/configurable_frame_size_encoder.cc index bc94011b48..40b99079b2 100644 --- a/test/configurable_frame_size_encoder.cc +++ b/test/configurable_frame_size_encoder.cc @@ -46,7 +46,7 @@ int32_t ConfigurableFrameSizeEncoder::InitEncode( int32_t ConfigurableFrameSizeEncoder::Encode( const VideoFrame& inputImage, const CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) { + const std::vector* frame_types) { EncodedImage encodedImage(buffer_.get(), current_frame_size_, max_frame_size_); encodedImage._completeFrame = true; diff --git a/test/configurable_frame_size_encoder.h b/test/configurable_frame_size_encoder.h index 390b2b113a..6b8ca96c73 100644 --- a/test/configurable_frame_size_encoder.h +++ b/test/configurable_frame_size_encoder.h @@ -39,7 +39,7 @@ class ConfigurableFrameSizeEncoder : public VideoEncoder { int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t RegisterEncodeCompleteCallback( EncodedImageCallback* callback) override; diff --git a/test/fake_encoder.cc b/test/fake_encoder.cc index 67fc839ac0..5e7131cdb7 100644 --- a/test/fake_encoder.cc +++ b/test/fake_encoder.cc @@ -82,7 +82,7 @@ int32_t FakeEncoder::InitEncode(const VideoCodec* config, int32_t FakeEncoder::Encode(const VideoFrame& input_image, const CodecSpecificInfo* /*codec_specific_info*/, - const std::vector* frame_types) { + const std::vector* frame_types) { unsigned char max_framerate; unsigned char num_simulcast_streams; SimulcastStream simulcast_streams[kMaxSimulcastStreams]; @@ -161,7 +161,7 @@ std::unique_ptr FakeEncoder::EncodeHook( } FakeEncoder::FrameInfo FakeEncoder::NextFrame( - const std::vector* frame_types, + const std::vector* frame_types, bool keyframe, uint8_t num_simulcast_streams, const VideoBitrateAllocation& target_bitrate, @@ -171,7 +171,7 @@ FakeEncoder::FrameInfo FakeEncoder::NextFrame( frame_info.keyframe = keyframe; if (frame_types) { - for (FrameType frame_type : *frame_types) { + for (VideoFrameType frame_type : *frame_types) { if (frame_type == kVideoFrameKey) { frame_info.keyframe = true; break; @@ -356,7 +356,7 @@ void DelayedEncoder::SetDelay(int delay_ms) { int32_t DelayedEncoder::Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { + const std::vector* frame_types) { RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); SleepMs(delay_ms_); @@ -390,7 +390,7 @@ class MultithreadedFakeH264Encoder::EncodeTask : public rtc::QueuedTask { EncodeTask(MultithreadedFakeH264Encoder* encoder, const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) + const std::vector* frame_types) : encoder_(encoder), input_image_(input_image), codec_specific_info_(), @@ -409,13 +409,13 @@ class MultithreadedFakeH264Encoder::EncodeTask : public rtc::QueuedTask { MultithreadedFakeH264Encoder* const encoder_; VideoFrame input_image_; CodecSpecificInfo codec_specific_info_; - std::vector frame_types_; + std::vector frame_types_; }; int32_t MultithreadedFakeH264Encoder::Encode( const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { + const std::vector* frame_types) { RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); std::unique_ptr& queue = @@ -434,7 +434,7 @@ int32_t MultithreadedFakeH264Encoder::Encode( int32_t MultithreadedFakeH264Encoder::EncodeCallback( const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) { + const std::vector* frame_types) { return FakeH264Encoder::Encode(input_image, codec_specific_info, frame_types); } diff --git a/test/fake_encoder.h b/test/fake_encoder.h index ffd672a63e..3a4008363e 100644 --- a/test/fake_encoder.h +++ b/test/fake_encoder.h @@ -46,7 +46,7 @@ class FakeEncoder : public VideoEncoder { size_t max_payload_size) override; int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t RegisterEncodeCompleteCallback( EncodedImageCallback* callback) override; int32_t Release() override; @@ -72,7 +72,7 @@ class FakeEncoder : public VideoEncoder { std::vector layers; }; - FrameInfo NextFrame(const std::vector* frame_types, + FrameInfo NextFrame(const std::vector* frame_types, bool keyframe, uint8_t num_simulcast_streams, const VideoBitrateAllocation& target_bitrate, @@ -126,7 +126,7 @@ class DelayedEncoder : public test::FakeEncoder { void SetDelay(int delay_ms); int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; private: int delay_ms_ RTC_GUARDED_BY(sequence_checker_); @@ -148,11 +148,11 @@ class MultithreadedFakeH264Encoder : public test::FakeH264Encoder { int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t EncodeCallback(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types); + const std::vector* frame_types); int32_t Release() override; diff --git a/test/fake_vp8_encoder.cc b/test/fake_vp8_encoder.cc index bf7ec68386..6ad40947a4 100644 --- a/test/fake_vp8_encoder.cc +++ b/test/fake_vp8_encoder.cc @@ -92,7 +92,7 @@ void FakeVP8Encoder::SetupTemporalLayers(const VideoCodec& codec) { void FakeVP8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, size_t size_bytes, - FrameType frame_type, + VideoFrameType frame_type, int stream_idx, uint32_t timestamp) { RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); diff --git a/test/fake_vp8_encoder.h b/test/fake_vp8_encoder.h index 9607baf12d..1906e7418d 100644 --- a/test/fake_vp8_encoder.h +++ b/test/fake_vp8_encoder.h @@ -48,7 +48,7 @@ class FakeVP8Encoder : public FakeEncoder { void SetupTemporalLayers(const VideoCodec& codec); void PopulateCodecSpecific(CodecSpecificInfo* codec_specific, size_t size_bytes, - FrameType frame_type, + VideoFrameType frame_type, int stream_idx, uint32_t timestamp); diff --git a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc index e2557778f1..e2f16cc22a 100644 --- a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc +++ b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc @@ -87,7 +87,7 @@ int32_t QualityAnalyzingVideoEncoder::Release() { int32_t QualityAnalyzingVideoEncoder::Encode( const VideoFrame& frame, - const std::vector* frame_types) { + const std::vector* frame_types) { { rtc::CritScope crit(&lock_); // Store id to be able to retrieve it in analyzing callback. @@ -245,7 +245,7 @@ bool QualityAnalyzingVideoEncoder::ShouldDiscard( // are equal or less than required one are interesting, so all above // have to be discarded. For other frames only required spatial index // is interesting, so all others have to be discarded. - if (encoded_image._frameType == FrameType::kVideoFrameKey) { + if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) { return *encoded_image.SpatialIndex() > *required_spatial_index; } else { return *encoded_image.SpatialIndex() != *required_spatial_index; diff --git a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h index a9a5873f81..693817c24c 100644 --- a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h +++ b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h @@ -68,7 +68,7 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder, EncodedImageCallback* callback) override; int32_t Release() override; int32_t Encode(const VideoFrame& frame, - const std::vector* frame_types) override; + const std::vector* frame_types) override; int32_t SetRates(uint32_t bitrate, uint32_t framerate) override; int32_t SetRateAllocation(const VideoBitrateAllocation& allocation, uint32_t framerate) override; diff --git a/test/video_encoder_proxy_factory.h b/test/video_encoder_proxy_factory.h index 7f5f6fcfcd..55a01a1e0e 100644 --- a/test/video_encoder_proxy_factory.h +++ b/test/video_encoder_proxy_factory.h @@ -62,7 +62,7 @@ class VideoEncoderProxyFactory final : public VideoEncoderFactory { private: int32_t Encode(const VideoFrame& input_image, - const std::vector* frame_types) override { + const std::vector* frame_types) override { return encoder_->Encode(input_image, frame_types); } int32_t InitEncode(const VideoCodec* config, diff --git a/video/end_to_end_tests/network_state_tests.cc b/video/end_to_end_tests/network_state_tests.cc index 8350e73fe2..5ba38d9874 100644 --- a/video/end_to_end_tests/network_state_tests.cc +++ b/video/end_to_end_tests/network_state_tests.cc @@ -269,7 +269,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { { rtc::CritScope lock(&test_crit_); if (sender_state_ == kNetworkDown) { @@ -365,7 +365,7 @@ TEST_F(NetworkStateEndToEndTest, NewVideoSendStreamsRespectVideoNetworkDown) { } int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { ADD_FAILURE() << "Unexpected frame encode."; return test::FakeEncoder::Encode(input_image, codec_specific_info, frame_types); @@ -390,7 +390,7 @@ TEST_F(NetworkStateEndToEndTest, NewVideoSendStreamsIgnoreAudioNetworkDown) { } int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { encoded_frame_ = true; return test::FakeEncoder::Encode(input_image, codec_specific_info, frame_types); diff --git a/video/picture_id_tests.cc b/video/picture_id_tests.cc index 3188786e9f..9d9b6eab0f 100644 --- a/video/picture_id_tests.cc +++ b/video/picture_id_tests.cc @@ -72,7 +72,7 @@ class PictureIdObserver : public test::RtpRtcpObserver { int16_t picture_id; int16_t tl0_pic_idx; uint8_t temporal_idx; - FrameType frame_type; + VideoFrameType frame_type; }; bool ParsePayload(const uint8_t* packet, diff --git a/video/video_quality_test.cc b/video/video_quality_test.cc index bdb1035589..04dc449886 100644 --- a/video/video_quality_test.cc +++ b/video/video_quality_test.cc @@ -142,7 +142,7 @@ class QualityTestVideoEncoder : public VideoEncoder, } int32_t Release() override { return encoder_->Release(); } int32_t Encode(const VideoFrame& frame, - const std::vector* frame_types) { + const std::vector* frame_types) { if (analyzer_) { analyzer_->PreEncodeOnFrame(frame); } diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc index a5f5c1ac3f..fe562d5143 100644 --- a/video/video_send_stream_tests.cc +++ b/video/video_send_stream_tests.cc @@ -1984,7 +1984,7 @@ TEST_F(VideoSendStreamTest, int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { ADD_FAILURE() << "Unexpected Encode call since the send stream is not started"; return 0; @@ -2318,7 +2318,7 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) { int32_t Encode(const VideoFrame& inputImage, const CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) override { + const std::vector* frame_types) override { EXPECT_TRUE(IsReadyForEncode()); observation_complete_.Set(); @@ -2537,7 +2537,7 @@ class VideoCodecConfigObserver : public test::SendTest, int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { // Silently skip the encode, FakeEncoder::Encode doesn't produce VP8. return 0; } @@ -3003,7 +3003,7 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) { private: int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codecSpecificInfo, - const std::vector* frame_types) override { + const std::vector* frame_types) override { CodecSpecificInfo specifics; specifics.codecType = kVideoCodecGeneric; diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc index 75f8b9b516..9572512002 100644 --- a/video/video_stream_encoder.cc +++ b/video/video_stream_encoder.cc @@ -1731,7 +1731,8 @@ void VideoStreamEncoder::RunPostEncode(EncodedImage encoded_image, // Run post encode tasks, such as overuse detection and frame rate/drop // stats for internal encoders. const size_t frame_size = encoded_image.size(); - const bool keyframe = encoded_image._frameType == FrameType::kVideoFrameKey; + const bool keyframe = + encoded_image._frameType == VideoFrameType::kVideoFrameKey; if (frame_size > 0) { frame_dropper_.Fill(frame_size, !keyframe); diff --git a/video/video_stream_encoder.h b/video/video_stream_encoder.h index fd08351910..663a020fb9 100644 --- a/video/video_stream_encoder.h +++ b/video/video_stream_encoder.h @@ -316,7 +316,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, // TODO(sprang): Change actually support keyframe per simulcast stream, or // turn this into a simple bool |pending_keyframe_request_|. - std::vector next_frame_types_ RTC_GUARDED_BY(&encoder_queue_); + std::vector next_frame_types_ RTC_GUARDED_BY(&encoder_queue_); FrameEncodeTimer frame_encoder_timer_; diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc index ad4200b443..8b4084aa53 100644 --- a/video/video_stream_encoder_unittest.cc +++ b/video/video_stream_encoder_unittest.cc @@ -606,14 +606,14 @@ class VideoStreamEncoderTest : public ::testing::Test { return last_update_rect_; } - const std::vector& LastFrameTypes() const { + const std::vector& LastFrameTypes() const { rtc::CritScope lock(&local_crit_sect_); return last_frame_types_; } void InjectFrame(const VideoFrame& input_image, bool keyframe) { - const std::vector frame_type = {keyframe ? kVideoFrameKey - : kVideoFrameDelta}; + const std::vector frame_type = { + keyframe ? kVideoFrameKey : kVideoFrameDelta}; { rtc::CritScope lock(&local_crit_sect_); last_frame_types_ = frame_type; @@ -640,7 +640,7 @@ class VideoStreamEncoderTest : public ::testing::Test { private: int32_t Encode(const VideoFrame& input_image, const CodecSpecificInfo* codec_specific_info, - const std::vector* frame_types) override { + const std::vector* frame_types) override { bool block_encode; { rtc::CritScope lock(&local_crit_sect_); @@ -747,7 +747,7 @@ class VideoStreamEncoderTest : public ::testing::Test { absl::optional last_bitrate_allocation_; VideoFrame::UpdateRect last_update_rect_ RTC_GUARDED_BY(local_crit_sect_) = {0, 0, 0, 0}; - std::vector last_frame_types_; + std::vector last_frame_types_; bool expect_null_frame_ = false; EncodedImageCallback* encoded_image_callback_ RTC_GUARDED_BY(local_crit_sect_) = nullptr; @@ -3605,20 +3605,20 @@ TEST_F(VideoStreamEncoderTest, SetsFrameTypes) { video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); WaitForEncodedFrame(1); EXPECT_THAT(fake_encoder_.LastFrameTypes(), - testing::ElementsAre(FrameType{kVideoFrameKey})); + testing::ElementsAre(VideoFrameType{kVideoFrameKey})); // Insert delta frame. video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr)); WaitForEncodedFrame(2); EXPECT_THAT(fake_encoder_.LastFrameTypes(), - testing::ElementsAre(FrameType{kVideoFrameDelta})); + testing::ElementsAre(VideoFrameType{kVideoFrameDelta})); // Request next frame be a key-frame. video_stream_encoder_->SendKeyFrame(); video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr)); WaitForEncodedFrame(3); EXPECT_THAT(fake_encoder_.LastFrameTypes(), - testing::ElementsAre(FrameType{kVideoFrameKey})); + testing::ElementsAre(VideoFrameType{kVideoFrameKey})); video_stream_encoder_->Stop(); } @@ -3669,23 +3669,23 @@ TEST_F(VideoStreamEncoderTest, RequestKeyframeInternalSource) { fake_encoder_.InjectFrame(CreateFrame(1, nullptr), true); EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs)); EXPECT_THAT(fake_encoder_.LastFrameTypes(), - testing::ElementsAre(FrameType{kVideoFrameKey})); + testing::ElementsAre(VideoFrameType{kVideoFrameKey})); - const std::vector kDeltaFrame = {kVideoFrameDelta}; + const std::vector kDeltaFrame = {kVideoFrameDelta}; // Need to set timestamp manually since manually for injected frame. VideoFrame frame = CreateFrame(101, nullptr); frame.set_timestamp(101); fake_encoder_.InjectFrame(frame, false); EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs)); EXPECT_THAT(fake_encoder_.LastFrameTypes(), - testing::ElementsAre(FrameType{kVideoFrameDelta})); + testing::ElementsAre(VideoFrameType{kVideoFrameDelta})); // Request key-frame. The forces a dummy frame down into the encoder. fake_encoder_.ExpectNullFrame(); video_stream_encoder_->SendKeyFrame(); EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs)); EXPECT_THAT(fake_encoder_.LastFrameTypes(), - testing::ElementsAre(FrameType{kVideoFrameKey})); + testing::ElementsAre(VideoFrameType{kVideoFrameKey})); video_stream_encoder_->Stop(); }