Prepare for splitting FrameType into AudioFrameType and VideoFrameType
This cl deprecates the FrameType enum, and adds aliases AudioFrameType and VideoFrameType. After downstream usage is updated, the enums will be separated and be moved out of common_types.h. Bug: webrtc:6883 Change-Id: I2aaf660169da45f22574b4cbb16aea8522cc07a6 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/123184 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27011}
This commit is contained in:
parent
0b69826ffb
commit
87e2d785a0
1
BUILD.gn
1
BUILD.gn
@ -452,6 +452,7 @@ rtc_source_set("webrtc_common") {
|
|||||||
"api/video:video_bitrate_allocation",
|
"api/video:video_bitrate_allocation",
|
||||||
"api/video:video_frame",
|
"api/video:video_frame",
|
||||||
"rtc_base:checks",
|
"rtc_base:checks",
|
||||||
|
"rtc_base:deprecation",
|
||||||
"//third_party/abseil-cpp/absl/strings",
|
"//third_party/abseil-cpp/absl/strings",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@ -74,8 +74,9 @@ class FecController {
|
|||||||
int64_t round_trip_time_ms) = 0;
|
int64_t round_trip_time_ms) = 0;
|
||||||
|
|
||||||
// Informs of encoded output.
|
// Informs of encoded output.
|
||||||
virtual void UpdateWithEncodedData(size_t encoded_image_length,
|
virtual void UpdateWithEncodedData(
|
||||||
FrameType encoded_image_frametype) = 0;
|
size_t encoded_image_length,
|
||||||
|
VideoFrameType encoded_image_frametype) = 0;
|
||||||
|
|
||||||
// Returns whether this FEC Controller needs Loss Vector Mask as input.
|
// Returns whether this FEC Controller needs Loss Vector Mask as input.
|
||||||
virtual bool UseLossVectorMask() = 0;
|
virtual bool UseLossVectorMask() = 0;
|
||||||
|
|||||||
@ -40,7 +40,7 @@ class MockVideoEncoder : public VideoEncoder {
|
|||||||
MOCK_METHOD3(Encode,
|
MOCK_METHOD3(Encode,
|
||||||
int32_t(const VideoFrame& inputImage,
|
int32_t(const VideoFrame& inputImage,
|
||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types));
|
const std::vector<VideoFrameType>* frame_types));
|
||||||
MOCK_METHOD1(RegisterEncodeCompleteCallback,
|
MOCK_METHOD1(RegisterEncodeCompleteCallback,
|
||||||
int32_t(EncodedImageCallback* callback));
|
int32_t(EncodedImageCallback* callback));
|
||||||
MOCK_METHOD0(Release, int32_t());
|
MOCK_METHOD0(Release, int32_t());
|
||||||
|
|||||||
@ -43,7 +43,7 @@ class VideoCodecTestStats {
|
|||||||
size_t encode_time_us = 0;
|
size_t encode_time_us = 0;
|
||||||
size_t target_bitrate_kbps = 0;
|
size_t target_bitrate_kbps = 0;
|
||||||
size_t length_bytes = 0;
|
size_t length_bytes = 0;
|
||||||
webrtc::FrameType frame_type = kVideoFrameDelta;
|
webrtc::VideoFrameType frame_type = kVideoFrameDelta;
|
||||||
|
|
||||||
// Layering.
|
// Layering.
|
||||||
size_t spatial_idx = 0;
|
size_t spatial_idx = 0;
|
||||||
|
|||||||
@ -115,7 +115,7 @@ class RTC_EXPORT EncodedImage {
|
|||||||
// NTP time of the capture time in local timebase in milliseconds.
|
// NTP time of the capture time in local timebase in milliseconds.
|
||||||
int64_t ntp_time_ms_ = 0;
|
int64_t ntp_time_ms_ = 0;
|
||||||
int64_t capture_time_ms_ = 0;
|
int64_t capture_time_ms_ = 0;
|
||||||
FrameType _frameType = kVideoFrameDelta;
|
VideoFrameType _frameType = kVideoFrameDelta;
|
||||||
VideoRotation rotation_ = kVideoRotation_0;
|
VideoRotation rotation_ = kVideoRotation_0;
|
||||||
VideoContentType content_type_ = VideoContentType::UNSPECIFIED;
|
VideoContentType content_type_ = VideoContentType::UNSPECIFIED;
|
||||||
bool _completeFrame = false;
|
bool _completeFrame = false;
|
||||||
|
|||||||
@ -95,7 +95,7 @@ class VideoEncoderSoftwareFallbackWrapperTest : public ::testing::Test {
|
|||||||
}
|
}
|
||||||
int32_t Encode(const VideoFrame& frame,
|
int32_t Encode(const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
++encode_count_;
|
++encode_count_;
|
||||||
if (encode_complete_callback_ &&
|
if (encode_complete_callback_ &&
|
||||||
encode_return_code_ == WEBRTC_VIDEO_CODEC_OK) {
|
encode_return_code_ == WEBRTC_VIDEO_CODEC_OK) {
|
||||||
@ -181,7 +181,7 @@ void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame(int expected_ret) {
|
|||||||
rtc::scoped_refptr<I420Buffer> buffer =
|
rtc::scoped_refptr<I420Buffer> buffer =
|
||||||
I420Buffer::Create(codec_.width, codec_.height);
|
I420Buffer::Create(codec_.width, codec_.height);
|
||||||
I420Buffer::SetBlack(buffer);
|
I420Buffer::SetBlack(buffer);
|
||||||
std::vector<FrameType> types(1, kVideoFrameKey);
|
std::vector<VideoFrameType> types(1, kVideoFrameKey);
|
||||||
|
|
||||||
frame_ =
|
frame_ =
|
||||||
absl::make_unique<VideoFrame>(VideoFrame::Builder()
|
absl::make_unique<VideoFrame>(VideoFrame::Builder()
|
||||||
@ -293,7 +293,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
|
|||||||
EXPECT_EQ(&callback2, fake_encoder_->encode_complete_callback_);
|
EXPECT_EQ(&callback2, fake_encoder_->encode_complete_callback_);
|
||||||
|
|
||||||
// Encoding a frame using the fallback should arrive at the new callback.
|
// Encoding a frame using the fallback should arrive at the new callback.
|
||||||
std::vector<FrameType> types(1, kVideoFrameKey);
|
std::vector<VideoFrameType> types(1, kVideoFrameKey);
|
||||||
frame_->set_timestamp(frame_->timestamp() + 1000);
|
frame_->set_timestamp(frame_->timestamp() + 1000);
|
||||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types));
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types));
|
||||||
|
|
||||||
|
|||||||
@ -104,13 +104,13 @@ VideoEncoder::EncoderInfo::~EncoderInfo() = default;
|
|||||||
// Implementations of the interface must implement one or the other of these two
|
// Implementations of the interface must implement one or the other of these two
|
||||||
// methods.
|
// methods.
|
||||||
int32_t VideoEncoder::Encode(const VideoFrame& frame,
|
int32_t VideoEncoder::Encode(const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
return Encode(frame, nullptr, frame_types);
|
return Encode(frame, nullptr, frame_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t VideoEncoder::Encode(const VideoFrame& frame,
|
int32_t VideoEncoder::Encode(const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
return Encode(frame, frame_types);
|
return Encode(frame, frame_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -242,12 +242,12 @@ class RTC_EXPORT VideoEncoder {
|
|||||||
// WEBRTC_VIDEO_CODEC_MEMORY
|
// WEBRTC_VIDEO_CODEC_MEMORY
|
||||||
// WEBRTC_VIDEO_CODEC_ERROR
|
// WEBRTC_VIDEO_CODEC_ERROR
|
||||||
virtual int32_t Encode(const VideoFrame& frame,
|
virtual int32_t Encode(const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types);
|
const std::vector<VideoFrameType>* frame_types);
|
||||||
// TODO(bugs.webrtc.org/10379): Deprecated. Delete, and make above method pure
|
// TODO(bugs.webrtc.org/10379): Deprecated. Delete, and make above method pure
|
||||||
// virtual, as soon as downstream applications are updated.
|
// virtual, as soon as downstream applications are updated.
|
||||||
virtual int32_t Encode(const VideoFrame& frame,
|
virtual int32_t Encode(const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types);
|
const std::vector<VideoFrameType>* frame_types);
|
||||||
|
|
||||||
// Inform the encoder about the new target bit rate.
|
// Inform the encoder about the new target bit rate.
|
||||||
//
|
//
|
||||||
|
|||||||
@ -88,7 +88,7 @@ class VideoEncoderSoftwareFallbackWrapper final : public VideoEncoder {
|
|||||||
|
|
||||||
int32_t Release() override;
|
int32_t Release() override;
|
||||||
int32_t Encode(const VideoFrame& frame,
|
int32_t Encode(const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int32_t SetRateAllocation(const VideoBitrateAllocation& bitrate_allocation,
|
int32_t SetRateAllocation(const VideoBitrateAllocation& bitrate_allocation,
|
||||||
uint32_t framerate) override;
|
uint32_t framerate) override;
|
||||||
EncoderInfo GetEncoderInfo() const override;
|
EncoderInfo GetEncoderInfo() const override;
|
||||||
@ -252,7 +252,7 @@ int32_t VideoEncoderSoftwareFallbackWrapper::Release() {
|
|||||||
|
|
||||||
int32_t VideoEncoderSoftwareFallbackWrapper::Encode(
|
int32_t VideoEncoderSoftwareFallbackWrapper::Encode(
|
||||||
const VideoFrame& frame,
|
const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
if (use_fallback_encoder_)
|
if (use_fallback_encoder_)
|
||||||
return fallback_encoder_->Encode(frame, frame_types);
|
return fallback_encoder_->Encode(frame, frame_types);
|
||||||
int32_t ret = encoder_->Encode(frame, frame_types);
|
int32_t ret = encoder_->Encode(frame, frame_types);
|
||||||
|
|||||||
@ -55,7 +55,7 @@ constexpr int64_t kMaxRetransmissionWindowMs = 1000;
|
|||||||
constexpr int64_t kMinRetransmissionWindowMs = 30;
|
constexpr int64_t kMinRetransmissionWindowMs = 30;
|
||||||
|
|
||||||
MediaTransportEncodedAudioFrame::FrameType
|
MediaTransportEncodedAudioFrame::FrameType
|
||||||
MediaTransportFrameTypeForWebrtcFrameType(webrtc::FrameType frame_type) {
|
MediaTransportFrameTypeForWebrtcFrameType(webrtc::AudioFrameType frame_type) {
|
||||||
switch (frame_type) {
|
switch (frame_type) {
|
||||||
case kAudioFrameSpeech:
|
case kAudioFrameSpeech:
|
||||||
return MediaTransportEncodedAudioFrame::FrameType::kSpeech;
|
return MediaTransportEncodedAudioFrame::FrameType::kSpeech;
|
||||||
@ -184,7 +184,7 @@ class ChannelSend
|
|||||||
class ProcessAndEncodeAudioTask;
|
class ProcessAndEncodeAudioTask;
|
||||||
|
|
||||||
// From AudioPacketizationCallback in the ACM
|
// From AudioPacketizationCallback in the ACM
|
||||||
int32_t SendData(FrameType frameType,
|
int32_t SendData(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
@ -196,13 +196,13 @@ class ChannelSend
|
|||||||
|
|
||||||
int SetSendRtpHeaderExtension(bool enable, RTPExtensionType type, int id);
|
int SetSendRtpHeaderExtension(bool enable, RTPExtensionType type, int id);
|
||||||
|
|
||||||
int32_t SendRtpAudio(FrameType frameType,
|
int32_t SendRtpAudio(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
const RTPFragmentationHeader* fragmentation);
|
const RTPFragmentationHeader* fragmentation);
|
||||||
|
|
||||||
int32_t SendMediaTransportAudio(FrameType frameType,
|
int32_t SendMediaTransportAudio(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
@ -492,7 +492,7 @@ class ChannelSend::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
|
|||||||
ChannelSend* const channel_;
|
ChannelSend* const channel_;
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t ChannelSend::SendData(FrameType frameType,
|
int32_t ChannelSend::SendData(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
@ -516,7 +516,7 @@ int32_t ChannelSend::SendData(FrameType frameType,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ChannelSend::SendRtpAudio(FrameType frameType,
|
int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
@ -589,7 +589,7 @@ int32_t ChannelSend::SendRtpAudio(FrameType frameType,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int32_t ChannelSend::SendMediaTransportAudio(
|
int32_t ChannelSend::SendMediaTransportAudio(
|
||||||
FrameType frameType,
|
AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
|
|||||||
@ -347,7 +347,7 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test {
|
|||||||
|
|
||||||
void ConvertAndCheck(int temporal_index,
|
void ConvertAndCheck(int temporal_index,
|
||||||
int64_t shared_frame_id,
|
int64_t shared_frame_id,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
LayerSync layer_sync,
|
LayerSync layer_sync,
|
||||||
const std::set<int64_t>& expected_deps,
|
const std::set<int64_t>& expected_deps,
|
||||||
uint16_t width = 0,
|
uint16_t width = 0,
|
||||||
|
|||||||
@ -25,7 +25,9 @@
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
enum FrameType {
|
// TODO(bugs.webrtc.org/6883): This type should be split into separate types for
|
||||||
|
// audio and video, and then moved out of this file.
|
||||||
|
enum FrameTypeDeprecated {
|
||||||
kEmptyFrame = 0,
|
kEmptyFrame = 0,
|
||||||
kAudioFrameSpeech = 1,
|
kAudioFrameSpeech = 1,
|
||||||
kAudioFrameCN = 2,
|
kAudioFrameCN = 2,
|
||||||
@ -33,6 +35,12 @@ enum FrameType {
|
|||||||
kVideoFrameDelta = 4,
|
kVideoFrameDelta = 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Can't use RTC_DEPRECATED until Chromium is updated.
|
||||||
|
typedef FrameTypeDeprecated FrameType;
|
||||||
|
|
||||||
|
using AudioFrameType = FrameTypeDeprecated;
|
||||||
|
using VideoFrameType = FrameTypeDeprecated;
|
||||||
|
|
||||||
// Statistics for RTCP packet types.
|
// Statistics for RTCP packet types.
|
||||||
struct RtcpPacketTypeCounter {
|
struct RtcpPacketTypeCounter {
|
||||||
RtcpPacketTypeCounter()
|
RtcpPacketTypeCounter()
|
||||||
|
|||||||
@ -43,8 +43,9 @@ int EncoderSimulcastProxy::InitEncode(const VideoCodec* inst,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int EncoderSimulcastProxy::Encode(const VideoFrame& input_image,
|
int EncoderSimulcastProxy::Encode(
|
||||||
const std::vector<FrameType>* frame_types) {
|
const VideoFrame& input_image,
|
||||||
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
return encoder_->Encode(input_image, frame_types);
|
return encoder_->Encode(input_image, frame_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -46,7 +46,7 @@ class EncoderSimulcastProxy : public VideoEncoder {
|
|||||||
int number_of_cores,
|
int number_of_cores,
|
||||||
size_t max_payload_size) override;
|
size_t max_payload_size) override;
|
||||||
int Encode(const VideoFrame& input_image,
|
int Encode(const VideoFrame& input_image,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||||
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
||||||
uint32_t new_framerate) override;
|
uint32_t new_framerate) override;
|
||||||
|
|||||||
@ -48,7 +48,7 @@ class MockEncoder : public VideoEncoder {
|
|||||||
Encode,
|
Encode,
|
||||||
int32_t(const VideoFrame& inputImage,
|
int32_t(const VideoFrame& inputImage,
|
||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types) /* override */);
|
const std::vector<VideoFrameType>* frame_types) /* override */);
|
||||||
|
|
||||||
MOCK_CONST_METHOD0(GetEncoderInfo, VideoEncoder::EncoderInfo(void));
|
MOCK_CONST_METHOD0(GetEncoderInfo, VideoEncoder::EncoderInfo(void));
|
||||||
};
|
};
|
||||||
|
|||||||
@ -151,7 +151,7 @@ int32_t FakeWebRtcVideoEncoder::InitEncode(
|
|||||||
int32_t FakeWebRtcVideoEncoder::Encode(
|
int32_t FakeWebRtcVideoEncoder::Encode(
|
||||||
const webrtc::VideoFrame& inputImage,
|
const webrtc::VideoFrame& inputImage,
|
||||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<webrtc::FrameType>* frame_types) {
|
const std::vector<webrtc::VideoFrameType>* frame_types) {
|
||||||
rtc::CritScope lock(&crit_);
|
rtc::CritScope lock(&crit_);
|
||||||
++num_frames_encoded_;
|
++num_frames_encoded_;
|
||||||
init_encode_event_.Set();
|
init_encode_event_.Set();
|
||||||
|
|||||||
@ -88,9 +88,10 @@ class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder {
|
|||||||
int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
|
int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
|
||||||
int32_t numberOfCores,
|
int32_t numberOfCores,
|
||||||
size_t maxPayloadSize) override;
|
size_t maxPayloadSize) override;
|
||||||
int32_t Encode(const webrtc::VideoFrame& inputImage,
|
int32_t Encode(
|
||||||
|
const webrtc::VideoFrame& inputImage,
|
||||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<webrtc::FrameType>* frame_types) override;
|
const std::vector<webrtc::VideoFrameType>* frame_types) override;
|
||||||
int32_t RegisterEncodeCompleteCallback(
|
int32_t RegisterEncodeCompleteCallback(
|
||||||
webrtc::EncodedImageCallback* callback) override;
|
webrtc::EncodedImageCallback* callback) override;
|
||||||
int32_t Release() override;
|
int32_t Release() override;
|
||||||
|
|||||||
@ -338,7 +338,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
|
|||||||
|
|
||||||
int SimulcastEncoderAdapter::Encode(
|
int SimulcastEncoderAdapter::Encode(
|
||||||
const VideoFrame& input_image,
|
const VideoFrame& input_image,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
|
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
|
||||||
|
|
||||||
if (!Initialized()) {
|
if (!Initialized()) {
|
||||||
@ -375,7 +375,7 @@ int SimulcastEncoderAdapter::Encode(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<FrameType> stream_frame_types;
|
std::vector<VideoFrameType> stream_frame_types;
|
||||||
if (send_key_frame) {
|
if (send_key_frame) {
|
||||||
stream_frame_types.push_back(kVideoFrameKey);
|
stream_frame_types.push_back(kVideoFrameKey);
|
||||||
streaminfos_[stream_idx].key_frame_request = false;
|
streaminfos_[stream_idx].key_frame_request = false;
|
||||||
|
|||||||
@ -45,7 +45,7 @@ class SimulcastEncoderAdapter : public VideoEncoder {
|
|||||||
int number_of_cores,
|
int number_of_cores,
|
||||||
size_t max_payload_size) override;
|
size_t max_payload_size) override;
|
||||||
int Encode(const VideoFrame& input_image,
|
int Encode(const VideoFrame& input_image,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||||
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
||||||
uint32_t new_framerate) override;
|
uint32_t new_framerate) override;
|
||||||
|
|||||||
@ -198,7 +198,7 @@ class MockVideoEncoder : public VideoEncoder {
|
|||||||
Encode,
|
Encode,
|
||||||
int32_t(const VideoFrame& inputImage,
|
int32_t(const VideoFrame& inputImage,
|
||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types) /* override */);
|
const std::vector<VideoFrameType>* frame_types) /* override */);
|
||||||
|
|
||||||
int32_t RegisterEncodeCompleteCallback(
|
int32_t RegisterEncodeCompleteCallback(
|
||||||
EncodedImageCallback* callback) /* override */ {
|
EncodedImageCallback* callback) /* override */ {
|
||||||
@ -556,7 +556,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
|||||||
.set_timestamp_ms(1000)
|
.set_timestamp_ms(1000)
|
||||||
.set_rotation(kVideoRotation_180)
|
.set_rotation(kVideoRotation_180)
|
||||||
.build();
|
.build();
|
||||||
std::vector<FrameType> frame_types;
|
std::vector<VideoFrameType> frame_types;
|
||||||
|
|
||||||
// Encode with three streams.
|
// Encode with three streams.
|
||||||
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
||||||
@ -890,7 +890,7 @@ TEST_F(TestSimulcastEncoderAdapterFake,
|
|||||||
// frame and can't otherwise be modified/resized.
|
// frame and can't otherwise be modified/resized.
|
||||||
for (MockVideoEncoder* encoder : helper_->factory()->encoders())
|
for (MockVideoEncoder* encoder : helper_->factory()->encoders())
|
||||||
EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _, _)).Times(1);
|
EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _, _)).Times(1);
|
||||||
std::vector<FrameType> frame_types(3, kVideoFrameKey);
|
std::vector<VideoFrameType> frame_types(3, kVideoFrameKey);
|
||||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -916,7 +916,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
|||||||
.set_timestamp_us(0)
|
.set_timestamp_us(0)
|
||||||
.set_rotation(kVideoRotation_0)
|
.set_rotation(kVideoRotation_0)
|
||||||
.build();
|
.build();
|
||||||
std::vector<FrameType> frame_types(3, kVideoFrameKey);
|
std::vector<VideoFrameType> frame_types(3, kVideoFrameKey);
|
||||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
|
||||||
adapter_->Encode(input_frame, &frame_types));
|
adapter_->Encode(input_frame, &frame_types));
|
||||||
}
|
}
|
||||||
@ -1031,7 +1031,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ActivatesCorrectStreamsInInitEncode) {
|
|||||||
EXPECT_CALL(*original_encoders[1], Encode(_, _, _)).Times(0);
|
EXPECT_CALL(*original_encoders[1], Encode(_, _, _)).Times(0);
|
||||||
EXPECT_CALL(*original_encoders[2], Encode(_, _, _)).Times(0);
|
EXPECT_CALL(*original_encoders[2], Encode(_, _, _)).Times(0);
|
||||||
|
|
||||||
std::vector<FrameType> frame_types;
|
std::vector<VideoFrameType> frame_types;
|
||||||
frame_types.resize(3, kVideoFrameKey);
|
frame_types.resize(3, kVideoFrameKey);
|
||||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -103,7 +103,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
|||||||
return num_10ms_frames;
|
return num_10ms_frames;
|
||||||
}
|
}
|
||||||
|
|
||||||
int SendData(FrameType frame_type,
|
int SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
@ -139,7 +139,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
|||||||
uint32_t timestamp_;
|
uint32_t timestamp_;
|
||||||
bool packet_sent_; // Set when SendData is called reset when inserting audio.
|
bool packet_sent_; // Set when SendData is called reset when inserting audio.
|
||||||
uint32_t last_packet_send_timestamp_;
|
uint32_t last_packet_send_timestamp_;
|
||||||
FrameType last_frame_type_;
|
AudioFrameType last_frame_type_;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(WEBRTC_ANDROID)
|
#if defined(WEBRTC_ANDROID)
|
||||||
|
|||||||
@ -123,7 +123,7 @@ std::unique_ptr<Packet> AcmSendTestOldApi::NextPacket() {
|
|||||||
|
|
||||||
// This method receives the callback from ACM when a new packet is produced.
|
// This method receives the callback from ACM when a new packet is produced.
|
||||||
int32_t AcmSendTestOldApi::SendData(
|
int32_t AcmSendTestOldApi::SendData(
|
||||||
FrameType frame_type,
|
AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -50,7 +50,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
|
|||||||
std::unique_ptr<Packet> NextPacket() override;
|
std::unique_ptr<Packet> NextPacket() override;
|
||||||
|
|
||||||
// Inherited from AudioPacketizationCallback.
|
// Inherited from AudioPacketizationCallback.
|
||||||
int32_t SendData(FrameType frame_type,
|
int32_t SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
@ -75,7 +75,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
|
|||||||
bool codec_registered_;
|
bool codec_registered_;
|
||||||
int test_duration_ms_;
|
int test_duration_ms_;
|
||||||
// The following member variables are set whenever SendData() is called.
|
// The following member variables are set whenever SendData() is called.
|
||||||
FrameType frame_type_;
|
AudioFrameType frame_type_;
|
||||||
int payload_type_;
|
int payload_type_;
|
||||||
uint32_t timestamp_;
|
uint32_t timestamp_;
|
||||||
uint16_t sequence_number_;
|
uint16_t sequence_number_;
|
||||||
|
|||||||
@ -393,7 +393,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
|||||||
|
|
||||||
RTPFragmentationHeader my_fragmentation;
|
RTPFragmentationHeader my_fragmentation;
|
||||||
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
|
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
|
||||||
FrameType frame_type;
|
AudioFrameType frame_type;
|
||||||
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
||||||
frame_type = kEmptyFrame;
|
frame_type = kEmptyFrame;
|
||||||
encoded_info.payload_type = previous_pltype;
|
encoded_info.payload_type = previous_pltype;
|
||||||
|
|||||||
@ -104,7 +104,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
|||||||
last_payload_type_(-1),
|
last_payload_type_(-1),
|
||||||
last_timestamp_(0) {}
|
last_timestamp_(0) {}
|
||||||
|
|
||||||
int32_t SendData(FrameType frame_type,
|
int32_t SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
@ -129,7 +129,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
|||||||
return rtc::checked_cast<int>(last_payload_vec_.size());
|
return rtc::checked_cast<int>(last_payload_vec_.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
FrameType last_frame_type() const {
|
AudioFrameType last_frame_type() const {
|
||||||
rtc::CritScope lock(&crit_sect_);
|
rtc::CritScope lock(&crit_sect_);
|
||||||
return last_frame_type_;
|
return last_frame_type_;
|
||||||
}
|
}
|
||||||
@ -151,7 +151,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
int num_calls_ RTC_GUARDED_BY(crit_sect_);
|
int num_calls_ RTC_GUARDED_BY(crit_sect_);
|
||||||
FrameType last_frame_type_ RTC_GUARDED_BY(crit_sect_);
|
AudioFrameType last_frame_type_ RTC_GUARDED_BY(crit_sect_);
|
||||||
int last_payload_type_ RTC_GUARDED_BY(crit_sect_);
|
int last_payload_type_ RTC_GUARDED_BY(crit_sect_);
|
||||||
uint32_t last_timestamp_ RTC_GUARDED_BY(crit_sect_);
|
uint32_t last_timestamp_ RTC_GUARDED_BY(crit_sect_);
|
||||||
std::vector<uint8_t> last_payload_vec_ RTC_GUARDED_BY(crit_sect_);
|
std::vector<uint8_t> last_payload_vec_ RTC_GUARDED_BY(crit_sect_);
|
||||||
@ -430,7 +430,7 @@ class AudioCodingModuleTestWithComfortNoiseOldApi
|
|||||||
// that is contain comfort noise.
|
// that is contain comfort noise.
|
||||||
const struct {
|
const struct {
|
||||||
int ix;
|
int ix;
|
||||||
FrameType type;
|
AudioFrameType type;
|
||||||
} expectation[] = {
|
} expectation[] = {
|
||||||
{2, kAudioFrameCN}, {5, kEmptyFrame}, {8, kEmptyFrame},
|
{2, kAudioFrameCN}, {5, kEmptyFrame}, {8, kEmptyFrame},
|
||||||
{11, kAudioFrameCN}, {14, kEmptyFrame}, {17, kEmptyFrame},
|
{11, kAudioFrameCN}, {14, kEmptyFrame}, {17, kEmptyFrame},
|
||||||
|
|||||||
@ -40,7 +40,7 @@ class AudioPacketizationCallback {
|
|||||||
public:
|
public:
|
||||||
virtual ~AudioPacketizationCallback() {}
|
virtual ~AudioPacketizationCallback() {}
|
||||||
|
|
||||||
virtual int32_t SendData(FrameType frame_type,
|
virtual int32_t SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
@ -53,7 +53,7 @@ class ACMVADCallback {
|
|||||||
public:
|
public:
|
||||||
virtual ~ACMVADCallback() {}
|
virtual ~ACMVADCallback() {}
|
||||||
|
|
||||||
virtual int32_t InFrameType(FrameType frame_type) = 0;
|
virtual int32_t InFrameType(AudioFrameType frame_type) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
class AudioCodingModule {
|
class AudioCodingModule {
|
||||||
|
|||||||
@ -107,7 +107,7 @@ class Packetizer : public AudioPacketizationCallback {
|
|||||||
ssrc_(ssrc),
|
ssrc_(ssrc),
|
||||||
timestamp_rate_hz_(timestamp_rate_hz) {}
|
timestamp_rate_hz_(timestamp_rate_hz) {}
|
||||||
|
|
||||||
int32_t SendData(FrameType frame_type,
|
int32_t SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
int32_t Channel::SendData(FrameType frameType,
|
int32_t Channel::SendData(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
|
|||||||
@ -47,7 +47,7 @@ class Channel : public AudioPacketizationCallback {
|
|||||||
Channel(int16_t chID = -1);
|
Channel(int16_t chID = -1);
|
||||||
~Channel() override;
|
~Channel() override;
|
||||||
|
|
||||||
int32_t SendData(FrameType frameType,
|
int32_t SendData(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
|
|||||||
@ -33,8 +33,10 @@ TestPacketization::~TestPacketization() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int32_t TestPacketization::SendData(
|
int32_t TestPacketization::SendData(
|
||||||
const FrameType /* frameType */, const uint8_t payloadType,
|
const AudioFrameType /* frameType */,
|
||||||
const uint32_t timeStamp, const uint8_t* payloadData,
|
const uint8_t payloadType,
|
||||||
|
const uint32_t timeStamp,
|
||||||
|
const uint8_t* payloadData,
|
||||||
const size_t payloadSize,
|
const size_t payloadSize,
|
||||||
const RTPFragmentationHeader* /* fragmentation */) {
|
const RTPFragmentationHeader* /* fragmentation */) {
|
||||||
_rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
|
_rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
|
||||||
|
|||||||
@ -28,7 +28,7 @@ class TestPacketization : public AudioPacketizationCallback {
|
|||||||
public:
|
public:
|
||||||
TestPacketization(RTPStream *rtpStream, uint16_t frequency);
|
TestPacketization(RTPStream *rtpStream, uint16_t frequency);
|
||||||
~TestPacketization();
|
~TestPacketization();
|
||||||
int32_t SendData(const FrameType frameType,
|
int32_t SendData(const AudioFrameType frameType,
|
||||||
const uint8_t payloadType,
|
const uint8_t payloadType,
|
||||||
const uint32_t timeStamp,
|
const uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
|
|||||||
@ -60,7 +60,7 @@ void TestPack::RegisterReceiverACM(AudioCodingModule* acm) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t TestPack::SendData(FrameType frame_type,
|
int32_t TestPack::SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -25,7 +25,7 @@ class TestPack : public AudioPacketizationCallback {
|
|||||||
|
|
||||||
void RegisterReceiverACM(AudioCodingModule* acm);
|
void RegisterReceiverACM(AudioCodingModule* acm);
|
||||||
|
|
||||||
int32_t SendData(FrameType frame_type,
|
int32_t SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -40,7 +40,7 @@ void TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t TestPackStereo::SendData(const FrameType frame_type,
|
int32_t TestPackStereo::SendData(const AudioFrameType frame_type,
|
||||||
const uint8_t payload_type,
|
const uint8_t payload_type,
|
||||||
const uint32_t timestamp,
|
const uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -31,7 +31,7 @@ class TestPackStereo : public AudioPacketizationCallback {
|
|||||||
|
|
||||||
void RegisterReceiverACM(AudioCodingModule* acm);
|
void RegisterReceiverACM(AudioCodingModule* acm);
|
||||||
|
|
||||||
int32_t SendData(const FrameType frame_type,
|
int32_t SendData(const AudioFrameType frame_type,
|
||||||
const uint8_t payload_type,
|
const uint8_t payload_type,
|
||||||
const uint32_t timestamp,
|
const uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -33,7 +33,7 @@ ActivityMonitor::ActivityMonitor() {
|
|||||||
ResetStatistics();
|
ResetStatistics();
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ActivityMonitor::InFrameType(FrameType frame_type) {
|
int32_t ActivityMonitor::InFrameType(AudioFrameType frame_type) {
|
||||||
counter_[frame_type]++;
|
counter_[frame_type]++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -25,7 +25,7 @@ namespace webrtc {
|
|||||||
class ActivityMonitor : public ACMVADCallback {
|
class ActivityMonitor : public ACMVADCallback {
|
||||||
public:
|
public:
|
||||||
ActivityMonitor();
|
ActivityMonitor();
|
||||||
int32_t InFrameType(FrameType frame_type);
|
int32_t InFrameType(AudioFrameType frame_type);
|
||||||
void PrintStatistics();
|
void PrintStatistics();
|
||||||
void ResetStatistics();
|
void ResetStatistics();
|
||||||
void GetStatistics(uint32_t* stats);
|
void GetStatistics(uint32_t* stats);
|
||||||
|
|||||||
@ -30,7 +30,8 @@ struct WebRtcRTPHeader {
|
|||||||
RTPVideoHeader video;
|
RTPVideoHeader video;
|
||||||
|
|
||||||
RTPHeader header;
|
RTPHeader header;
|
||||||
FrameType frameType;
|
// Used for video only.
|
||||||
|
VideoFrameType frameType;
|
||||||
// NTP time of the capture time in local timebase in milliseconds.
|
// NTP time of the capture time in local timebase in milliseconds.
|
||||||
int64_t ntp_time_ms;
|
int64_t ntp_time_ms;
|
||||||
};
|
};
|
||||||
|
|||||||
@ -29,7 +29,7 @@ std::unique_ptr<RtpPacketizer> RtpPacketizer::Create(
|
|||||||
PayloadSizeLimits limits,
|
PayloadSizeLimits limits,
|
||||||
// Codec-specific details.
|
// Codec-specific details.
|
||||||
const RTPVideoHeader& rtp_video_header,
|
const RTPVideoHeader& rtp_video_header,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
const RTPFragmentationHeader* fragmentation) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case kVideoCodecH264: {
|
case kVideoCodecH264: {
|
||||||
|
|||||||
@ -39,7 +39,7 @@ class RtpPacketizer {
|
|||||||
PayloadSizeLimits limits,
|
PayloadSizeLimits limits,
|
||||||
// Codec-specific details.
|
// Codec-specific details.
|
||||||
const RTPVideoHeader& rtp_video_header,
|
const RTPVideoHeader& rtp_video_header,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
const RTPFragmentationHeader* fragmentation);
|
const RTPFragmentationHeader* fragmentation);
|
||||||
|
|
||||||
virtual ~RtpPacketizer() = default;
|
virtual ~RtpPacketizer() = default;
|
||||||
@ -71,7 +71,7 @@ class RtpDepacketizer {
|
|||||||
|
|
||||||
const uint8_t* payload;
|
const uint8_t* payload;
|
||||||
size_t payload_length;
|
size_t payload_length;
|
||||||
FrameType frame_type;
|
VideoFrameType frame_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
static RtpDepacketizer* Create(VideoCodecType type);
|
static RtpDepacketizer* Create(VideoCodecType type);
|
||||||
|
|||||||
@ -26,7 +26,7 @@ RtpPacketizerGeneric::RtpPacketizerGeneric(
|
|||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
PayloadSizeLimits limits,
|
PayloadSizeLimits limits,
|
||||||
const RTPVideoHeader& rtp_video_header,
|
const RTPVideoHeader& rtp_video_header,
|
||||||
FrameType frame_type)
|
VideoFrameType frame_type)
|
||||||
: remaining_payload_(payload) {
|
: remaining_payload_(payload) {
|
||||||
BuildHeader(rtp_video_header, frame_type);
|
BuildHeader(rtp_video_header, frame_type);
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ bool RtpPacketizerGeneric::NextPacket(RtpPacketToSend* packet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header,
|
void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header,
|
||||||
FrameType frame_type) {
|
VideoFrameType frame_type) {
|
||||||
header_size_ = kGenericHeaderLength;
|
header_size_ = kGenericHeaderLength;
|
||||||
header_[0] = RtpFormatVideoGeneric::kFirstPacketBit;
|
header_[0] = RtpFormatVideoGeneric::kFirstPacketBit;
|
||||||
if (frame_type == kVideoFrameKey) {
|
if (frame_type == kVideoFrameKey) {
|
||||||
|
|||||||
@ -38,7 +38,7 @@ class RtpPacketizerGeneric : public RtpPacketizer {
|
|||||||
RtpPacketizerGeneric(rtc::ArrayView<const uint8_t> payload,
|
RtpPacketizerGeneric(rtc::ArrayView<const uint8_t> payload,
|
||||||
PayloadSizeLimits limits,
|
PayloadSizeLimits limits,
|
||||||
const RTPVideoHeader& rtp_video_header,
|
const RTPVideoHeader& rtp_video_header,
|
||||||
FrameType frametype);
|
VideoFrameType frametype);
|
||||||
|
|
||||||
~RtpPacketizerGeneric() override;
|
~RtpPacketizerGeneric() override;
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ class RtpPacketizerGeneric : public RtpPacketizer {
|
|||||||
private:
|
private:
|
||||||
// Fills header_ and header_size_ members.
|
// Fills header_ and header_size_ members.
|
||||||
void BuildHeader(const RTPVideoHeader& rtp_video_header,
|
void BuildHeader(const RTPVideoHeader& rtp_video_header,
|
||||||
FrameType frame_type);
|
VideoFrameType frame_type);
|
||||||
|
|
||||||
uint8_t header_[3];
|
uint8_t header_[3];
|
||||||
size_t header_size_;
|
size_t header_size_;
|
||||||
|
|||||||
@ -30,7 +30,7 @@ namespace webrtc {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
const char* FrameTypeToString(FrameType frame_type) {
|
const char* FrameTypeToString(AudioFrameType frame_type) {
|
||||||
switch (frame_type) {
|
switch (frame_type) {
|
||||||
case kEmptyFrame:
|
case kEmptyFrame:
|
||||||
return "empty";
|
return "empty";
|
||||||
@ -88,7 +88,7 @@ int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RTPSenderAudio::MarkerBit(FrameType frame_type, int8_t payload_type) {
|
bool RTPSenderAudio::MarkerBit(AudioFrameType frame_type, int8_t payload_type) {
|
||||||
rtc::CritScope cs(&send_audio_critsect_);
|
rtc::CritScope cs(&send_audio_critsect_);
|
||||||
// for audio true for first packet in a speech burst
|
// for audio true for first packet in a speech burst
|
||||||
bool marker_bit = false;
|
bool marker_bit = false;
|
||||||
@ -131,7 +131,7 @@ bool RTPSenderAudio::MarkerBit(FrameType frame_type, int8_t payload_type) {
|
|||||||
return marker_bit;
|
return marker_bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RTPSenderAudio::SendAudio(FrameType frame_type,
|
bool RTPSenderAudio::SendAudio(AudioFrameType frame_type,
|
||||||
int8_t payload_type,
|
int8_t payload_type,
|
||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
|
|||||||
@ -39,7 +39,7 @@ class RTPSenderAudio {
|
|||||||
size_t channels,
|
size_t channels,
|
||||||
uint32_t rate);
|
uint32_t rate);
|
||||||
|
|
||||||
bool SendAudio(FrameType frame_type,
|
bool SendAudio(AudioFrameType frame_type,
|
||||||
int8_t payload_type,
|
int8_t payload_type,
|
||||||
uint32_t capture_timestamp,
|
uint32_t capture_timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
@ -60,7 +60,7 @@ class RTPSenderAudio {
|
|||||||
uint16_t duration,
|
uint16_t duration,
|
||||||
bool marker_bit); // set on first packet in talk burst
|
bool marker_bit); // set on first packet in talk burst
|
||||||
|
|
||||||
bool MarkerBit(FrameType frame_type, int8_t payload_type);
|
bool MarkerBit(AudioFrameType frame_type, int8_t payload_type);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool LogAndSendToNetwork(std::unique_ptr<RtpPacketToSend> packet,
|
bool LogAndSendToNetwork(std::unique_ptr<RtpPacketToSend> packet,
|
||||||
|
|||||||
@ -54,7 +54,7 @@ void BuildRedPayload(const RtpPacketToSend& media_packet,
|
|||||||
|
|
||||||
void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
|
void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
|
||||||
const absl::optional<PlayoutDelay>& playout_delay,
|
const absl::optional<PlayoutDelay>& playout_delay,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
bool set_video_rotation,
|
bool set_video_rotation,
|
||||||
bool set_color_space,
|
bool set_color_space,
|
||||||
bool set_frame_marking,
|
bool set_frame_marking,
|
||||||
@ -167,7 +167,7 @@ bool IsBaseLayer(const RTPVideoHeader& video_header) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* FrameTypeToString(FrameType frame_type) {
|
const char* FrameTypeToString(VideoFrameType frame_type) {
|
||||||
switch (frame_type) {
|
switch (frame_type) {
|
||||||
case kEmptyFrame:
|
case kEmptyFrame:
|
||||||
return "empty";
|
return "empty";
|
||||||
@ -421,7 +421,7 @@ absl::optional<uint32_t> RTPSenderVideo::FlexfecSsrc() const {
|
|||||||
return absl::nullopt;
|
return absl::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RTPSenderVideo::SendVideo(FrameType frame_type,
|
bool RTPSenderVideo::SendVideo(VideoFrameType frame_type,
|
||||||
int8_t payload_type,
|
int8_t payload_type,
|
||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
int64_t capture_time_ms,
|
int64_t capture_time_ms,
|
||||||
|
|||||||
@ -59,7 +59,7 @@ class RTPSenderVideo {
|
|||||||
const WebRtcKeyValueConfig& field_trials);
|
const WebRtcKeyValueConfig& field_trials);
|
||||||
virtual ~RTPSenderVideo();
|
virtual ~RTPSenderVideo();
|
||||||
|
|
||||||
bool SendVideo(FrameType frame_type,
|
bool SendVideo(VideoFrameType frame_type,
|
||||||
int8_t payload_type,
|
int8_t payload_type,
|
||||||
uint32_t capture_timestamp,
|
uint32_t capture_timestamp,
|
||||||
int64_t capture_time_ms,
|
int64_t capture_time_ms,
|
||||||
|
|||||||
@ -64,7 +64,7 @@ int NumberOfThreads(int width, int height, int number_of_cores) {
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
FrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case videoFrameTypeIDR:
|
case videoFrameTypeIDR:
|
||||||
return kVideoFrameKey;
|
return kVideoFrameKey;
|
||||||
@ -381,9 +381,10 @@ int32_t H264EncoderImpl::SetRateAllocation(
|
|||||||
return WEBRTC_VIDEO_CODEC_OK;
|
return WEBRTC_VIDEO_CODEC_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
int32_t H264EncoderImpl::Encode(
|
||||||
|
const VideoFrame& input_frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
if (encoders_.empty()) {
|
if (encoders_.empty()) {
|
||||||
ReportError();
|
ReportError();
|
||||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||||
|
|||||||
@ -68,7 +68,7 @@ class H264EncoderImpl : public H264Encoder {
|
|||||||
// passed to the encode complete callback.
|
// passed to the encode complete callback.
|
||||||
int32_t Encode(const VideoFrame& frame,
|
int32_t Encode(const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
EncoderInfo GetEncoderInfo() const override;
|
EncoderInfo GetEncoderInfo() const override;
|
||||||
|
|
||||||
|
|||||||
@ -43,7 +43,7 @@ class MultiplexEncoderAdapter : public VideoEncoder {
|
|||||||
int number_of_cores,
|
int number_of_cores,
|
||||||
size_t max_payload_size) override;
|
size_t max_payload_size) override;
|
||||||
int Encode(const VideoFrame& input_image,
|
int Encode(const VideoFrame& input_image,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||||
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
||||||
uint32_t new_framerate) override;
|
uint32_t new_framerate) override;
|
||||||
|
|||||||
@ -115,11 +115,13 @@ MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
|
|||||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||||
offset += sizeof(uint32_t);
|
offset += sizeof(uint32_t);
|
||||||
|
|
||||||
|
// TODO(nisse): This makes the wire format depend on the numeric values of the
|
||||||
|
// VideoCodecType and VideoFrameType enum constants.
|
||||||
frame_header.codec_type = static_cast<VideoCodecType>(
|
frame_header.codec_type = static_cast<VideoCodecType>(
|
||||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||||
offset += sizeof(uint8_t);
|
offset += sizeof(uint8_t);
|
||||||
|
|
||||||
frame_header.frame_type = static_cast<FrameType>(
|
frame_header.frame_type = static_cast<VideoFrameType>(
|
||||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||||
offset += sizeof(uint8_t);
|
offset += sizeof(uint8_t);
|
||||||
|
|
||||||
@ -181,8 +183,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
|||||||
// key frame so as to decode the whole image without previous frame data.
|
// key frame so as to decode the whole image without previous frame data.
|
||||||
// Thus only when all components are key frames, we can mark the combined
|
// Thus only when all components are key frames, we can mark the combined
|
||||||
// frame as key frame.
|
// frame as key frame.
|
||||||
if (frame_header.frame_type == FrameType::kVideoFrameDelta) {
|
if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) {
|
||||||
combined_image._frameType = FrameType::kVideoFrameDelta;
|
combined_image._frameType = VideoFrameType::kVideoFrameDelta;
|
||||||
}
|
}
|
||||||
|
|
||||||
frame_headers.push_back(frame_header);
|
frame_headers.push_back(frame_header);
|
||||||
|
|||||||
@ -67,7 +67,7 @@ struct MultiplexImageComponentHeader {
|
|||||||
VideoCodecType codec_type;
|
VideoCodecType codec_type;
|
||||||
|
|
||||||
// Indicated the underlying frame is a key frame or delta frame.
|
// Indicated the underlying frame is a key frame or delta frame.
|
||||||
FrameType frame_type;
|
VideoFrameType frame_type;
|
||||||
};
|
};
|
||||||
const int kMultiplexImageComponentHeaderSize =
|
const int kMultiplexImageComponentHeaderSize =
|
||||||
sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
|
sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
|
||||||
|
|||||||
@ -138,12 +138,12 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
|
|||||||
|
|
||||||
int MultiplexEncoderAdapter::Encode(
|
int MultiplexEncoderAdapter::Encode(
|
||||||
const VideoFrame& input_image,
|
const VideoFrame& input_image,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
if (!encoded_complete_callback_) {
|
if (!encoded_complete_callback_) {
|
||||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<FrameType> adjusted_frame_types;
|
std::vector<VideoFrameType> adjusted_frame_types;
|
||||||
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
|
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
|
||||||
adjusted_frame_types.push_back(kVideoFrameKey);
|
adjusted_frame_types.push_back(kVideoFrameKey);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -285,9 +285,9 @@ void VideoProcessor::ProcessFrame() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encode.
|
// Encode.
|
||||||
const std::vector<FrameType> frame_types =
|
const std::vector<VideoFrameType> frame_types =
|
||||||
(frame_number == 0) ? std::vector<FrameType>{kVideoFrameKey}
|
(frame_number == 0) ? std::vector<VideoFrameType>{kVideoFrameKey}
|
||||||
: std::vector<FrameType>{kVideoFrameDelta};
|
: std::vector<VideoFrameType>{kVideoFrameDelta};
|
||||||
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
|
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
|
||||||
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
|
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
|
||||||
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
|
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
|
||||||
|
|||||||
@ -737,7 +737,7 @@ size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) {
|
|||||||
|
|
||||||
int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
RTC_DCHECK_EQ(frame.width(), codec_.width);
|
RTC_DCHECK_EQ(frame.width(), codec_.width);
|
||||||
RTC_DCHECK_EQ(frame.height(), codec_.height);
|
RTC_DCHECK_EQ(frame.height(), codec_.height);
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,7 @@ class LibvpxVp8Encoder : public VideoEncoder {
|
|||||||
|
|
||||||
int Encode(const VideoFrame& input_image,
|
int Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||||
|
|
||||||
|
|||||||
@ -74,11 +74,11 @@ class TestVp8Impl : public VideoCodecUnitTest {
|
|||||||
EncodedImage* encoded_frame,
|
EncodedImage* encoded_frame,
|
||||||
CodecSpecificInfo* codec_specific_info,
|
CodecSpecificInfo* codec_specific_info,
|
||||||
bool keyframe = false) {
|
bool keyframe = false) {
|
||||||
std::vector<FrameType> frame_types;
|
std::vector<VideoFrameType> frame_types;
|
||||||
if (keyframe) {
|
if (keyframe) {
|
||||||
frame_types.emplace_back(FrameType::kVideoFrameKey);
|
frame_types.emplace_back(VideoFrameType::kVideoFrameKey);
|
||||||
} else {
|
} else {
|
||||||
frame_types.emplace_back(FrameType::kVideoFrameDelta);
|
frame_types.emplace_back(VideoFrameType::kVideoFrameDelta);
|
||||||
}
|
}
|
||||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||||
encoder_->Encode(input_frame, &frame_types));
|
encoder_->Encode(input_frame, &frame_types));
|
||||||
@ -484,7 +484,7 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
|
|||||||
.Times(2)
|
.Times(2)
|
||||||
.WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
|
.WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
|
||||||
|
|
||||||
auto delta_frame = std::vector<FrameType>{kVideoFrameDelta};
|
auto delta_frame = std::vector<VideoFrameType>{kVideoFrameDelta};
|
||||||
encoder.Encode(*NextInputFrame(), nullptr, &delta_frame);
|
encoder.Encode(*NextInputFrame(), nullptr, &delta_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -714,7 +714,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
|
|||||||
|
|
||||||
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
if (!inited_) {
|
if (!inited_) {
|
||||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -45,7 +45,7 @@ class VP9EncoderImpl : public VP9Encoder {
|
|||||||
|
|
||||||
int Encode(const VideoFrame& input_image,
|
int Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||||
|
|
||||||
|
|||||||
@ -67,7 +67,7 @@ class VCMEncodedFrame : protected EncodedImage {
|
|||||||
/**
|
/**
|
||||||
* Get frame type
|
* Get frame type
|
||||||
*/
|
*/
|
||||||
webrtc::FrameType FrameType() const { return _frameType; }
|
webrtc::VideoFrameType FrameType() const { return _frameType; }
|
||||||
/**
|
/**
|
||||||
* Get frame rotation
|
* Get frame rotation
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -177,7 +177,7 @@ void FecControllerDefault::SetProtectionMethod(bool enable_fec,
|
|||||||
}
|
}
|
||||||
void FecControllerDefault::UpdateWithEncodedData(
|
void FecControllerDefault::UpdateWithEncodedData(
|
||||||
const size_t encoded_image_length,
|
const size_t encoded_image_length,
|
||||||
const FrameType encoded_image_frametype) {
|
const VideoFrameType encoded_image_frametype) {
|
||||||
const size_t encoded_length = encoded_image_length;
|
const size_t encoded_length = encoded_image_length;
|
||||||
CritScope lock(&crit_sect_);
|
CritScope lock(&crit_sect_);
|
||||||
if (encoded_length > 0) {
|
if (encoded_length > 0) {
|
||||||
|
|||||||
@ -44,8 +44,9 @@ class FecControllerDefault : public FecController {
|
|||||||
uint8_t fraction_lost,
|
uint8_t fraction_lost,
|
||||||
std::vector<bool> loss_mask_vector,
|
std::vector<bool> loss_mask_vector,
|
||||||
int64_t round_trip_time_ms) override;
|
int64_t round_trip_time_ms) override;
|
||||||
void UpdateWithEncodedData(const size_t encoded_image_length,
|
void UpdateWithEncodedData(
|
||||||
const FrameType encoded_image_frametype) override;
|
const size_t encoded_image_length,
|
||||||
|
const VideoFrameType encoded_image_frametype) override;
|
||||||
bool UseLossVectorMask() override;
|
bool UseLossVectorMask() override;
|
||||||
float GetProtectionOverheadRateThreshold();
|
float GetProtectionOverheadRateThreshold();
|
||||||
|
|
||||||
|
|||||||
@ -29,7 +29,7 @@ VCMFrameBuffer::VCMFrameBuffer()
|
|||||||
|
|
||||||
VCMFrameBuffer::~VCMFrameBuffer() {}
|
VCMFrameBuffer::~VCMFrameBuffer() {}
|
||||||
|
|
||||||
webrtc::FrameType VCMFrameBuffer::FrameType() const {
|
webrtc::VideoFrameType VCMFrameBuffer::FrameType() const {
|
||||||
return _sessionInfo.FrameType();
|
return _sessionInfo.FrameType();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -70,7 +70,7 @@ class VCMFrameBuffer : public VCMEncodedFrame {
|
|||||||
|
|
||||||
int64_t LatestPacketTimeMs() const;
|
int64_t LatestPacketTimeMs() const;
|
||||||
|
|
||||||
webrtc::FrameType FrameType() const;
|
webrtc::VideoFrameType FrameType() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
|
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
|
||||||
|
|||||||
@ -121,7 +121,7 @@ int RtpFrameObject::times_nacked() const {
|
|||||||
return times_nacked_;
|
return times_nacked_;
|
||||||
}
|
}
|
||||||
|
|
||||||
FrameType RtpFrameObject::frame_type() const {
|
VideoFrameType RtpFrameObject::frame_type() const {
|
||||||
return frame_type_;
|
return frame_type_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -36,7 +36,7 @@ class RtpFrameObject : public EncodedFrame {
|
|||||||
uint16_t first_seq_num() const;
|
uint16_t first_seq_num() const;
|
||||||
uint16_t last_seq_num() const;
|
uint16_t last_seq_num() const;
|
||||||
int times_nacked() const;
|
int times_nacked() const;
|
||||||
enum FrameType frame_type() const;
|
VideoFrameType frame_type() const;
|
||||||
VideoCodecType codec_type() const;
|
VideoCodecType codec_type() const;
|
||||||
int64_t ReceivedTime() const override;
|
int64_t ReceivedTime() const override;
|
||||||
int64_t RenderTime() const override;
|
int64_t RenderTime() const override;
|
||||||
@ -49,7 +49,7 @@ class RtpFrameObject : public EncodedFrame {
|
|||||||
void AllocateBitstreamBuffer(size_t frame_size);
|
void AllocateBitstreamBuffer(size_t frame_size);
|
||||||
|
|
||||||
rtc::scoped_refptr<PacketBuffer> packet_buffer_;
|
rtc::scoped_refptr<PacketBuffer> packet_buffer_;
|
||||||
enum FrameType frame_type_;
|
VideoFrameType frame_type_;
|
||||||
VideoCodecType codec_type_;
|
VideoCodecType codec_type_;
|
||||||
uint16_t first_seq_num_;
|
uint16_t first_seq_num_;
|
||||||
uint16_t last_seq_num_;
|
uint16_t last_seq_num_;
|
||||||
|
|||||||
@ -362,7 +362,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
|
|||||||
return jitter_buffer_->InsertPacket(packet, &retransmitted);
|
return jitter_buffer_->InsertPacket(packet, &retransmitted);
|
||||||
}
|
}
|
||||||
|
|
||||||
VCMFrameBufferEnum InsertFrame(FrameType frame_type) {
|
VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) {
|
||||||
stream_generator_->GenerateFrame(
|
stream_generator_->GenerateFrame(
|
||||||
frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
|
frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
|
||||||
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
|
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
|
||||||
@ -371,7 +371,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
VCMFrameBufferEnum InsertFrames(int num_frames, FrameType frame_type) {
|
VCMFrameBufferEnum InsertFrames(int num_frames, VideoFrameType frame_type) {
|
||||||
VCMFrameBufferEnum ret_for_all = kNoError;
|
VCMFrameBufferEnum ret_for_all = kNoError;
|
||||||
for (int i = 0; i < num_frames; ++i) {
|
for (int i = 0; i < num_frames; ++i) {
|
||||||
VCMFrameBufferEnum ret = InsertFrame(frame_type);
|
VCMFrameBufferEnum ret = InsertFrame(frame_type);
|
||||||
|
|||||||
@ -46,7 +46,7 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
|
|||||||
size_t size,
|
size_t size,
|
||||||
const RTPHeader& rtp_header,
|
const RTPHeader& rtp_header,
|
||||||
const RTPVideoHeader& videoHeader,
|
const RTPVideoHeader& videoHeader,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
int64_t ntp_time_ms)
|
int64_t ntp_time_ms)
|
||||||
: payloadType(rtp_header.payloadType),
|
: payloadType(rtp_header.payloadType),
|
||||||
timestamp(rtp_header.timestamp),
|
timestamp(rtp_header.timestamp),
|
||||||
|
|||||||
@ -32,7 +32,7 @@ class VCMPacket {
|
|||||||
size_t size,
|
size_t size,
|
||||||
const RTPHeader& rtp_header,
|
const RTPHeader& rtp_header,
|
||||||
const RTPVideoHeader& video_header,
|
const RTPVideoHeader& video_header,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
int64_t ntp_time_ms);
|
int64_t ntp_time_ms);
|
||||||
|
|
||||||
~VCMPacket();
|
~VCMPacket();
|
||||||
@ -58,7 +58,7 @@ class VCMPacket {
|
|||||||
bool markerBit;
|
bool markerBit;
|
||||||
int timesNacked;
|
int timesNacked;
|
||||||
|
|
||||||
FrameType frameType;
|
VideoFrameType frameType;
|
||||||
|
|
||||||
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
|
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
|
||||||
bool insertStartCode; // True if a start code should be inserted before this
|
bool insertStartCode; // True if a start code should be inserted before this
|
||||||
|
|||||||
@ -56,7 +56,7 @@ class TestVCMReceiver : public ::testing::Test {
|
|||||||
return receiver_.InsertPacket(packet);
|
return receiver_.InsertPacket(packet);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t InsertFrame(FrameType frame_type, bool complete) {
|
int32_t InsertFrame(VideoFrameType frame_type, bool complete) {
|
||||||
int num_of_packets = complete ? 1 : 2;
|
int num_of_packets = complete ? 1 : 2;
|
||||||
stream_generator_->GenerateFrame(
|
stream_generator_->GenerateFrame(
|
||||||
frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
|
frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
|
||||||
@ -322,7 +322,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
|
|||||||
|
|
||||||
void GenerateAndInsertFrame(int64_t render_timestamp_ms) {
|
void GenerateAndInsertFrame(int64_t render_timestamp_ms) {
|
||||||
VCMPacket packet;
|
VCMPacket packet;
|
||||||
stream_generator_->GenerateFrame(FrameType::kVideoFrameKey,
|
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey,
|
||||||
1, // media packets
|
1, // media packets
|
||||||
0, // empty packets
|
0, // empty packets
|
||||||
render_timestamp_ms);
|
render_timestamp_ms);
|
||||||
|
|||||||
@ -54,7 +54,7 @@ class VCMSessionInfo {
|
|||||||
int NumPackets() const;
|
int NumPackets() const;
|
||||||
bool HaveFirstPacket() const;
|
bool HaveFirstPacket() const;
|
||||||
bool HaveLastPacket() const;
|
bool HaveLastPacket() const;
|
||||||
webrtc::FrameType FrameType() const { return frame_type_; }
|
webrtc::VideoFrameType FrameType() const { return frame_type_; }
|
||||||
int LowSequenceNumber() const;
|
int LowSequenceNumber() const;
|
||||||
|
|
||||||
// Returns highest sequence number, media or empty.
|
// Returns highest sequence number, media or empty.
|
||||||
@ -103,7 +103,7 @@ class VCMSessionInfo {
|
|||||||
void UpdateCompleteSession();
|
void UpdateCompleteSession();
|
||||||
|
|
||||||
bool complete_;
|
bool complete_;
|
||||||
webrtc::FrameType frame_type_;
|
webrtc::VideoFrameType frame_type_;
|
||||||
// Packets in this frame.
|
// Packets in this frame.
|
||||||
PacketList packets_;
|
PacketList packets_;
|
||||||
int empty_seq_num_low_;
|
int empty_seq_num_low_;
|
||||||
|
|||||||
@ -29,7 +29,7 @@ void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
|
|||||||
memset(packet_buffer_, 0, sizeof(packet_buffer_));
|
memset(packet_buffer_, 0, sizeof(packet_buffer_));
|
||||||
}
|
}
|
||||||
|
|
||||||
void StreamGenerator::GenerateFrame(FrameType type,
|
void StreamGenerator::GenerateFrame(VideoFrameType type,
|
||||||
int num_media_packets,
|
int num_media_packets,
|
||||||
int num_empty_packets,
|
int num_empty_packets,
|
||||||
int64_t time_ms) {
|
int64_t time_ms) {
|
||||||
@ -54,7 +54,7 @@ VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number,
|
|||||||
unsigned int size,
|
unsigned int size,
|
||||||
bool first_packet,
|
bool first_packet,
|
||||||
bool marker_bit,
|
bool marker_bit,
|
||||||
FrameType type) {
|
VideoFrameType type) {
|
||||||
EXPECT_LT(size, kMaxPacketSize);
|
EXPECT_LT(size, kMaxPacketSize);
|
||||||
VCMPacket packet;
|
VCMPacket packet;
|
||||||
packet.seqNum = sequence_number;
|
packet.seqNum = sequence_number;
|
||||||
|
|||||||
@ -34,7 +34,7 @@ class StreamGenerator {
|
|||||||
// |time_ms| denotes the timestamp you want to put on the frame, and the unit
|
// |time_ms| denotes the timestamp you want to put on the frame, and the unit
|
||||||
// is millisecond. GenerateFrame will translate |time_ms| into a 90kHz
|
// is millisecond. GenerateFrame will translate |time_ms| into a 90kHz
|
||||||
// timestamp and put it on the frame.
|
// timestamp and put it on the frame.
|
||||||
void GenerateFrame(FrameType type,
|
void GenerateFrame(VideoFrameType type,
|
||||||
int num_media_packets,
|
int num_media_packets,
|
||||||
int num_empty_packets,
|
int num_empty_packets,
|
||||||
int64_t time_ms);
|
int64_t time_ms);
|
||||||
@ -56,7 +56,7 @@ class StreamGenerator {
|
|||||||
unsigned int size,
|
unsigned int size,
|
||||||
bool first_packet,
|
bool first_packet,
|
||||||
bool marker_bit,
|
bool marker_bit,
|
||||||
FrameType type);
|
VideoFrameType type);
|
||||||
|
|
||||||
std::list<VCMPacket>::iterator GetPacketIterator(int index);
|
std::list<VCMPacket>::iterator GetPacketIterator(int index);
|
||||||
|
|
||||||
|
|||||||
@ -294,7 +294,7 @@ void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
|
|||||||
|
|
||||||
void SimulcastTestFixtureImpl::RunActiveStreamsTest(
|
void SimulcastTestFixtureImpl::RunActiveStreamsTest(
|
||||||
const std::vector<bool> active_streams) {
|
const std::vector<bool> active_streams) {
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
UpdateActiveStreams(active_streams);
|
UpdateActiveStreams(active_streams);
|
||||||
// Set sufficient bitrate for all streams so we can test active without
|
// Set sufficient bitrate for all streams so we can test active without
|
||||||
@ -326,7 +326,7 @@ void SimulcastTestFixtureImpl::UpdateActiveStreams(
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SimulcastTestFixtureImpl::ExpectStreams(
|
void SimulcastTestFixtureImpl::ExpectStreams(
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
const std::vector<bool> expected_streams_active) {
|
const std::vector<bool> expected_streams_active) {
|
||||||
ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
|
ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
|
||||||
kNumberOfSimulcastStreams);
|
kNumberOfSimulcastStreams);
|
||||||
@ -367,7 +367,7 @@ void SimulcastTestFixtureImpl::ExpectStreams(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SimulcastTestFixtureImpl::ExpectStreams(FrameType frame_type,
|
void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
|
||||||
int expected_video_streams) {
|
int expected_video_streams) {
|
||||||
ASSERT_GE(expected_video_streams, 0);
|
ASSERT_GE(expected_video_streams, 0);
|
||||||
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
|
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
|
||||||
@ -396,7 +396,7 @@ void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|||||||
// a key frame was only requested for some of them.
|
// a key frame was only requested for some of them.
|
||||||
void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
|
void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
|
||||||
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -431,7 +431,7 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
|
|||||||
void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
|
void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
|
||||||
// We should always encode the base layer.
|
// We should always encode the base layer.
|
||||||
SetRates(kMinBitrates[0] - 1, 30);
|
SetRates(kMinBitrates[0] - 1, 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -444,7 +444,7 @@ void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
|
|||||||
void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
|
void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
|
||||||
// We have just enough to get only the first stream and padding for two.
|
// We have just enough to get only the first stream and padding for two.
|
||||||
SetRates(kMinBitrates[0], 30);
|
SetRates(kMinBitrates[0], 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -458,7 +458,7 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
|
|||||||
// We are just below limit of sending second stream, so we should get
|
// We are just below limit of sending second stream, so we should get
|
||||||
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
||||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -471,7 +471,7 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
|
|||||||
void SimulcastTestFixtureImpl::TestPaddingOneStream() {
|
void SimulcastTestFixtureImpl::TestPaddingOneStream() {
|
||||||
// We have just enough to send two streams, so padding for one stream.
|
// We have just enough to send two streams, so padding for one stream.
|
||||||
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 2);
|
ExpectStreams(kVideoFrameKey, 2);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -485,7 +485,7 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
|
|||||||
// We are just below limit of sending third stream, so we should get
|
// We are just below limit of sending third stream, so we should get
|
||||||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 2);
|
ExpectStreams(kVideoFrameKey, 2);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -498,7 +498,7 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
|
|||||||
void SimulcastTestFixtureImpl::TestSendAllStreams() {
|
void SimulcastTestFixtureImpl::TestSendAllStreams() {
|
||||||
// We have just enough to send all streams.
|
// We have just enough to send all streams.
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 3);
|
ExpectStreams(kVideoFrameKey, 3);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -511,7 +511,7 @@ void SimulcastTestFixtureImpl::TestSendAllStreams() {
|
|||||||
void SimulcastTestFixtureImpl::TestDisablingStreams() {
|
void SimulcastTestFixtureImpl::TestDisablingStreams() {
|
||||||
// We should get three media streams.
|
// We should get three media streams.
|
||||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
ExpectStreams(kVideoFrameKey, 3);
|
ExpectStreams(kVideoFrameKey, 3);
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||||
@ -617,7 +617,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
|
|||||||
|
|
||||||
// Encode one frame and verify.
|
// Encode one frame and verify.
|
||||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
kVideoFrameDelta);
|
kVideoFrameDelta);
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(
|
||||||
encoder_callback_,
|
encoder_callback_,
|
||||||
|
|||||||
@ -67,9 +67,9 @@ class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
|
|||||||
void SetRates(uint32_t bitrate_kbps, uint32_t fps);
|
void SetRates(uint32_t bitrate_kbps, uint32_t fps);
|
||||||
void RunActiveStreamsTest(const std::vector<bool> active_streams);
|
void RunActiveStreamsTest(const std::vector<bool> active_streams);
|
||||||
void UpdateActiveStreams(const std::vector<bool> active_streams);
|
void UpdateActiveStreams(const std::vector<bool> active_streams);
|
||||||
void ExpectStreams(FrameType frame_type,
|
void ExpectStreams(VideoFrameType frame_type,
|
||||||
const std::vector<bool> expected_streams_active);
|
const std::vector<bool> expected_streams_active);
|
||||||
void ExpectStreams(FrameType frame_type, int expected_video_streams);
|
void ExpectStreams(VideoFrameType frame_type, int expected_video_streams);
|
||||||
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
TestEncodedImageCallback* encoder_callback,
|
TestEncodedImageCallback* encoder_callback,
|
||||||
const int* expected_temporal_idx,
|
const int* expected_temporal_idx,
|
||||||
|
|||||||
@ -100,7 +100,7 @@ class MediaCodecVideoEncoder : public VideoEncoder {
|
|||||||
size_t /* max_payload_size */) override;
|
size_t /* max_payload_size */) override;
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* /* codec_specific_info */,
|
const CodecSpecificInfo* /* codec_specific_info */,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int32_t RegisterEncodeCompleteCallback(
|
int32_t RegisterEncodeCompleteCallback(
|
||||||
EncodedImageCallback* callback) override;
|
EncodedImageCallback* callback) override;
|
||||||
int32_t Release() override;
|
int32_t Release() override;
|
||||||
@ -595,7 +595,7 @@ int32_t MediaCodecVideoEncoder::InitEncodeInternal(int width,
|
|||||||
int32_t MediaCodecVideoEncoder::Encode(
|
int32_t MediaCodecVideoEncoder::Encode(
|
||||||
const VideoFrame& frame,
|
const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* /* codec_specific_info */,
|
const CodecSpecificInfo* /* codec_specific_info */,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_checker_);
|
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_checker_);
|
||||||
if (sw_fallback_required_)
|
if (sw_fallback_required_)
|
||||||
return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||||
|
|||||||
@ -20,7 +20,7 @@ namespace webrtc {
|
|||||||
namespace jni {
|
namespace jni {
|
||||||
|
|
||||||
ScopedJavaLocalRef<jobject> NativeToJavaFrameType(JNIEnv* env,
|
ScopedJavaLocalRef<jobject> NativeToJavaFrameType(JNIEnv* env,
|
||||||
FrameType frame_type) {
|
VideoFrameType frame_type) {
|
||||||
return Java_FrameType_fromNativeIndex(env, frame_type);
|
return Java_FrameType_fromNativeIndex(env, frame_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaEncodedImage(
|
|||||||
|
|
||||||
ScopedJavaLocalRef<jobjectArray> NativeToJavaFrameTypeArray(
|
ScopedJavaLocalRef<jobjectArray> NativeToJavaFrameTypeArray(
|
||||||
JNIEnv* env,
|
JNIEnv* env,
|
||||||
const std::vector<FrameType>& frame_types) {
|
const std::vector<VideoFrameType>& frame_types) {
|
||||||
return NativeToJavaObjectArray(
|
return NativeToJavaObjectArray(
|
||||||
env, frame_types, org_webrtc_EncodedImage_00024FrameType_clazz(env),
|
env, frame_types, org_webrtc_EncodedImage_00024FrameType_clazz(env),
|
||||||
&NativeToJavaFrameType);
|
&NativeToJavaFrameType);
|
||||||
|
|||||||
@ -25,12 +25,12 @@ class EncodedImage;
|
|||||||
namespace jni {
|
namespace jni {
|
||||||
|
|
||||||
ScopedJavaLocalRef<jobject> NativeToJavaFrameType(JNIEnv* env,
|
ScopedJavaLocalRef<jobject> NativeToJavaFrameType(JNIEnv* env,
|
||||||
FrameType frame_type);
|
VideoFrameType frame_type);
|
||||||
ScopedJavaLocalRef<jobject> NativeToJavaEncodedImage(JNIEnv* jni,
|
ScopedJavaLocalRef<jobject> NativeToJavaEncodedImage(JNIEnv* jni,
|
||||||
const EncodedImage& image);
|
const EncodedImage& image);
|
||||||
ScopedJavaLocalRef<jobjectArray> NativeToJavaFrameTypeArray(
|
ScopedJavaLocalRef<jobjectArray> NativeToJavaFrameTypeArray(
|
||||||
JNIEnv* env,
|
JNIEnv* env,
|
||||||
const std::vector<FrameType>& frame_types);
|
const std::vector<VideoFrameType>& frame_types);
|
||||||
|
|
||||||
} // namespace jni
|
} // namespace jni
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|||||||
@ -120,7 +120,7 @@ int32_t VideoEncoderWrapper::Release() {
|
|||||||
int32_t VideoEncoderWrapper::Encode(
|
int32_t VideoEncoderWrapper::Encode(
|
||||||
const VideoFrame& frame,
|
const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* /* codec_specific_info */,
|
const CodecSpecificInfo* /* codec_specific_info */,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
if (!initialized_) {
|
if (!initialized_) {
|
||||||
// Most likely initializing the codec failed.
|
// Most likely initializing the codec failed.
|
||||||
return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||||
@ -275,7 +275,7 @@ void VideoEncoderWrapper::OnEncodedFrame(JNIEnv* jni,
|
|||||||
frame._encodedHeight = encoded_height;
|
frame._encodedHeight = encoded_height;
|
||||||
frame.SetTimestamp(frame_extra_info.timestamp_rtp);
|
frame.SetTimestamp(frame_extra_info.timestamp_rtp);
|
||||||
frame.capture_time_ms_ = capture_time_ns / rtc::kNumNanosecsPerMillisec;
|
frame.capture_time_ms_ = capture_time_ns / rtc::kNumNanosecsPerMillisec;
|
||||||
frame._frameType = (FrameType)frame_type;
|
frame._frameType = (VideoFrameType)frame_type;
|
||||||
frame.rotation_ = (VideoRotation)rotation;
|
frame.rotation_ = (VideoRotation)rotation;
|
||||||
frame._completeFrame = complete_frame;
|
frame._completeFrame = complete_frame;
|
||||||
if (qp == -1) {
|
if (qp == -1) {
|
||||||
|
|||||||
@ -43,7 +43,7 @@ class VideoEncoderWrapper : public VideoEncoder {
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& frame,
|
int32_t Encode(const VideoFrame& frame,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
int32_t SetRateAllocation(const VideoBitrateAllocation& allocation,
|
int32_t SetRateAllocation(const VideoBitrateAllocation& allocation,
|
||||||
uint32_t framerate) override;
|
uint32_t framerate) override;
|
||||||
|
|||||||
@ -52,7 +52,7 @@
|
|||||||
encodedImage.timing_.flags = self.flags;
|
encodedImage.timing_.flags = self.flags;
|
||||||
encodedImage.timing_.encode_start_ms = self.encodeStartMs;
|
encodedImage.timing_.encode_start_ms = self.encodeStartMs;
|
||||||
encodedImage.timing_.encode_finish_ms = self.encodeFinishMs;
|
encodedImage.timing_.encode_finish_ms = self.encodeFinishMs;
|
||||||
encodedImage._frameType = webrtc::FrameType(self.frameType);
|
encodedImage._frameType = webrtc::VideoFrameType(self.frameType);
|
||||||
encodedImage.rotation_ = webrtc::VideoRotation(self.rotation);
|
encodedImage.rotation_ = webrtc::VideoRotation(self.rotation);
|
||||||
encodedImage._completeFrame = self.completeFrame;
|
encodedImage._completeFrame = self.completeFrame;
|
||||||
encodedImage.qp_ = self.qp ? self.qp.intValue : -1;
|
encodedImage.qp_ = self.qp ? self.qp.intValue : -1;
|
||||||
|
|||||||
@ -75,7 +75,7 @@ class ObjCVideoEncoder : public VideoEncoder {
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame &frame,
|
int32_t Encode(const VideoFrame &frame,
|
||||||
const CodecSpecificInfo *codec_specific_info,
|
const CodecSpecificInfo *codec_specific_info,
|
||||||
const std::vector<FrameType> *frame_types) override {
|
const std::vector<VideoFrameType> *frame_types) override {
|
||||||
NSMutableArray<NSNumber *> *rtcFrameTypes = [NSMutableArray array];
|
NSMutableArray<NSNumber *> *rtcFrameTypes = [NSMutableArray array];
|
||||||
for (size_t i = 0; i < frame_types->size(); ++i) {
|
for (size_t i = 0; i < frame_types->size(); ++i) {
|
||||||
[rtcFrameTypes addObject:@(RTCFrameType(frame_types->at(i)))];
|
[rtcFrameTypes addObject:@(RTCFrameType(frame_types->at(i)))];
|
||||||
|
|||||||
@ -84,7 +84,7 @@ TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsOKOnSuccess) {
|
|||||||
.set_rotation(webrtc::kVideoRotation_0)
|
.set_rotation(webrtc::kVideoRotation_0)
|
||||||
.set_timestamp_us(0)
|
.set_timestamp_us(0)
|
||||||
.build();
|
.build();
|
||||||
std::vector<webrtc::FrameType> frame_types;
|
std::vector<webrtc::VideoFrameType> frame_types;
|
||||||
|
|
||||||
EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK);
|
EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK);
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsErrorOnFail) {
|
|||||||
.set_rotation(webrtc::kVideoRotation_0)
|
.set_rotation(webrtc::kVideoRotation_0)
|
||||||
.set_timestamp_us(0)
|
.set_timestamp_us(0)
|
||||||
.build();
|
.build();
|
||||||
std::vector<webrtc::FrameType> frame_types;
|
std::vector<webrtc::VideoFrameType> frame_types;
|
||||||
|
|
||||||
EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_ERROR);
|
EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_ERROR);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -46,7 +46,7 @@ int32_t ConfigurableFrameSizeEncoder::InitEncode(
|
|||||||
int32_t ConfigurableFrameSizeEncoder::Encode(
|
int32_t ConfigurableFrameSizeEncoder::Encode(
|
||||||
const VideoFrame& inputImage,
|
const VideoFrame& inputImage,
|
||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
EncodedImage encodedImage(buffer_.get(), current_frame_size_,
|
EncodedImage encodedImage(buffer_.get(), current_frame_size_,
|
||||||
max_frame_size_);
|
max_frame_size_);
|
||||||
encodedImage._completeFrame = true;
|
encodedImage._completeFrame = true;
|
||||||
|
|||||||
@ -39,7 +39,7 @@ class ConfigurableFrameSizeEncoder : public VideoEncoder {
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
int32_t RegisterEncodeCompleteCallback(
|
int32_t RegisterEncodeCompleteCallback(
|
||||||
EncodedImageCallback* callback) override;
|
EncodedImageCallback* callback) override;
|
||||||
|
|||||||
@ -82,7 +82,7 @@ int32_t FakeEncoder::InitEncode(const VideoCodec* config,
|
|||||||
|
|
||||||
int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* /*codec_specific_info*/,
|
const CodecSpecificInfo* /*codec_specific_info*/,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
unsigned char max_framerate;
|
unsigned char max_framerate;
|
||||||
unsigned char num_simulcast_streams;
|
unsigned char num_simulcast_streams;
|
||||||
SimulcastStream simulcast_streams[kMaxSimulcastStreams];
|
SimulcastStream simulcast_streams[kMaxSimulcastStreams];
|
||||||
@ -161,7 +161,7 @@ std::unique_ptr<RTPFragmentationHeader> FakeEncoder::EncodeHook(
|
|||||||
}
|
}
|
||||||
|
|
||||||
FakeEncoder::FrameInfo FakeEncoder::NextFrame(
|
FakeEncoder::FrameInfo FakeEncoder::NextFrame(
|
||||||
const std::vector<FrameType>* frame_types,
|
const std::vector<VideoFrameType>* frame_types,
|
||||||
bool keyframe,
|
bool keyframe,
|
||||||
uint8_t num_simulcast_streams,
|
uint8_t num_simulcast_streams,
|
||||||
const VideoBitrateAllocation& target_bitrate,
|
const VideoBitrateAllocation& target_bitrate,
|
||||||
@ -171,7 +171,7 @@ FakeEncoder::FrameInfo FakeEncoder::NextFrame(
|
|||||||
frame_info.keyframe = keyframe;
|
frame_info.keyframe = keyframe;
|
||||||
|
|
||||||
if (frame_types) {
|
if (frame_types) {
|
||||||
for (FrameType frame_type : *frame_types) {
|
for (VideoFrameType frame_type : *frame_types) {
|
||||||
if (frame_type == kVideoFrameKey) {
|
if (frame_type == kVideoFrameKey) {
|
||||||
frame_info.keyframe = true;
|
frame_info.keyframe = true;
|
||||||
break;
|
break;
|
||||||
@ -356,7 +356,7 @@ void DelayedEncoder::SetDelay(int delay_ms) {
|
|||||||
|
|
||||||
int32_t DelayedEncoder::Encode(const VideoFrame& input_image,
|
int32_t DelayedEncoder::Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
|
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
|
||||||
|
|
||||||
SleepMs(delay_ms_);
|
SleepMs(delay_ms_);
|
||||||
@ -390,7 +390,7 @@ class MultithreadedFakeH264Encoder::EncodeTask : public rtc::QueuedTask {
|
|||||||
EncodeTask(MultithreadedFakeH264Encoder* encoder,
|
EncodeTask(MultithreadedFakeH264Encoder* encoder,
|
||||||
const VideoFrame& input_image,
|
const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types)
|
const std::vector<VideoFrameType>* frame_types)
|
||||||
: encoder_(encoder),
|
: encoder_(encoder),
|
||||||
input_image_(input_image),
|
input_image_(input_image),
|
||||||
codec_specific_info_(),
|
codec_specific_info_(),
|
||||||
@ -409,13 +409,13 @@ class MultithreadedFakeH264Encoder::EncodeTask : public rtc::QueuedTask {
|
|||||||
MultithreadedFakeH264Encoder* const encoder_;
|
MultithreadedFakeH264Encoder* const encoder_;
|
||||||
VideoFrame input_image_;
|
VideoFrame input_image_;
|
||||||
CodecSpecificInfo codec_specific_info_;
|
CodecSpecificInfo codec_specific_info_;
|
||||||
std::vector<FrameType> frame_types_;
|
std::vector<VideoFrameType> frame_types_;
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t MultithreadedFakeH264Encoder::Encode(
|
int32_t MultithreadedFakeH264Encoder::Encode(
|
||||||
const VideoFrame& input_image,
|
const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
|
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
|
||||||
|
|
||||||
std::unique_ptr<rtc::TaskQueue>& queue =
|
std::unique_ptr<rtc::TaskQueue>& queue =
|
||||||
@ -434,7 +434,7 @@ int32_t MultithreadedFakeH264Encoder::Encode(
|
|||||||
int32_t MultithreadedFakeH264Encoder::EncodeCallback(
|
int32_t MultithreadedFakeH264Encoder::EncodeCallback(
|
||||||
const VideoFrame& input_image,
|
const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
return FakeH264Encoder::Encode(input_image, codec_specific_info, frame_types);
|
return FakeH264Encoder::Encode(input_image, codec_specific_info, frame_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -46,7 +46,7 @@ class FakeEncoder : public VideoEncoder {
|
|||||||
size_t max_payload_size) override;
|
size_t max_payload_size) override;
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int32_t RegisterEncodeCompleteCallback(
|
int32_t RegisterEncodeCompleteCallback(
|
||||||
EncodedImageCallback* callback) override;
|
EncodedImageCallback* callback) override;
|
||||||
int32_t Release() override;
|
int32_t Release() override;
|
||||||
@ -72,7 +72,7 @@ class FakeEncoder : public VideoEncoder {
|
|||||||
std::vector<SpatialLayer> layers;
|
std::vector<SpatialLayer> layers;
|
||||||
};
|
};
|
||||||
|
|
||||||
FrameInfo NextFrame(const std::vector<FrameType>* frame_types,
|
FrameInfo NextFrame(const std::vector<VideoFrameType>* frame_types,
|
||||||
bool keyframe,
|
bool keyframe,
|
||||||
uint8_t num_simulcast_streams,
|
uint8_t num_simulcast_streams,
|
||||||
const VideoBitrateAllocation& target_bitrate,
|
const VideoBitrateAllocation& target_bitrate,
|
||||||
@ -126,7 +126,7 @@ class DelayedEncoder : public test::FakeEncoder {
|
|||||||
void SetDelay(int delay_ms);
|
void SetDelay(int delay_ms);
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int delay_ms_ RTC_GUARDED_BY(sequence_checker_);
|
int delay_ms_ RTC_GUARDED_BY(sequence_checker_);
|
||||||
@ -148,11 +148,11 @@ class MultithreadedFakeH264Encoder : public test::FakeH264Encoder {
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
|
|
||||||
int32_t EncodeCallback(const VideoFrame& input_image,
|
int32_t EncodeCallback(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types);
|
const std::vector<VideoFrameType>* frame_types);
|
||||||
|
|
||||||
int32_t Release() override;
|
int32_t Release() override;
|
||||||
|
|
||||||
|
|||||||
@ -92,7 +92,7 @@ void FakeVP8Encoder::SetupTemporalLayers(const VideoCodec& codec) {
|
|||||||
|
|
||||||
void FakeVP8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
void FakeVP8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||||
size_t size_bytes,
|
size_t size_bytes,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
int stream_idx,
|
int stream_idx,
|
||||||
uint32_t timestamp) {
|
uint32_t timestamp) {
|
||||||
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
|
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
|
||||||
|
|||||||
@ -48,7 +48,7 @@ class FakeVP8Encoder : public FakeEncoder {
|
|||||||
void SetupTemporalLayers(const VideoCodec& codec);
|
void SetupTemporalLayers(const VideoCodec& codec);
|
||||||
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||||
size_t size_bytes,
|
size_t size_bytes,
|
||||||
FrameType frame_type,
|
VideoFrameType frame_type,
|
||||||
int stream_idx,
|
int stream_idx,
|
||||||
uint32_t timestamp);
|
uint32_t timestamp);
|
||||||
|
|
||||||
|
|||||||
@ -87,7 +87,7 @@ int32_t QualityAnalyzingVideoEncoder::Release() {
|
|||||||
|
|
||||||
int32_t QualityAnalyzingVideoEncoder::Encode(
|
int32_t QualityAnalyzingVideoEncoder::Encode(
|
||||||
const VideoFrame& frame,
|
const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
{
|
{
|
||||||
rtc::CritScope crit(&lock_);
|
rtc::CritScope crit(&lock_);
|
||||||
// Store id to be able to retrieve it in analyzing callback.
|
// Store id to be able to retrieve it in analyzing callback.
|
||||||
@ -245,7 +245,7 @@ bool QualityAnalyzingVideoEncoder::ShouldDiscard(
|
|||||||
// are equal or less than required one are interesting, so all above
|
// are equal or less than required one are interesting, so all above
|
||||||
// have to be discarded. For other frames only required spatial index
|
// have to be discarded. For other frames only required spatial index
|
||||||
// is interesting, so all others have to be discarded.
|
// is interesting, so all others have to be discarded.
|
||||||
if (encoded_image._frameType == FrameType::kVideoFrameKey) {
|
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
|
||||||
return *encoded_image.SpatialIndex() > *required_spatial_index;
|
return *encoded_image.SpatialIndex() > *required_spatial_index;
|
||||||
} else {
|
} else {
|
||||||
return *encoded_image.SpatialIndex() != *required_spatial_index;
|
return *encoded_image.SpatialIndex() != *required_spatial_index;
|
||||||
|
|||||||
@ -68,7 +68,7 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder,
|
|||||||
EncodedImageCallback* callback) override;
|
EncodedImageCallback* callback) override;
|
||||||
int32_t Release() override;
|
int32_t Release() override;
|
||||||
int32_t Encode(const VideoFrame& frame,
|
int32_t Encode(const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types) override;
|
const std::vector<VideoFrameType>* frame_types) override;
|
||||||
int32_t SetRates(uint32_t bitrate, uint32_t framerate) override;
|
int32_t SetRates(uint32_t bitrate, uint32_t framerate) override;
|
||||||
int32_t SetRateAllocation(const VideoBitrateAllocation& allocation,
|
int32_t SetRateAllocation(const VideoBitrateAllocation& allocation,
|
||||||
uint32_t framerate) override;
|
uint32_t framerate) override;
|
||||||
|
|||||||
@ -62,7 +62,7 @@ class VideoEncoderProxyFactory final : public VideoEncoderFactory {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
return encoder_->Encode(input_image, frame_types);
|
return encoder_->Encode(input_image, frame_types);
|
||||||
}
|
}
|
||||||
int32_t InitEncode(const VideoCodec* config,
|
int32_t InitEncode(const VideoCodec* config,
|
||||||
|
|||||||
@ -269,7 +269,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) {
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
{
|
{
|
||||||
rtc::CritScope lock(&test_crit_);
|
rtc::CritScope lock(&test_crit_);
|
||||||
if (sender_state_ == kNetworkDown) {
|
if (sender_state_ == kNetworkDown) {
|
||||||
@ -365,7 +365,7 @@ TEST_F(NetworkStateEndToEndTest, NewVideoSendStreamsRespectVideoNetworkDown) {
|
|||||||
}
|
}
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
ADD_FAILURE() << "Unexpected frame encode.";
|
ADD_FAILURE() << "Unexpected frame encode.";
|
||||||
return test::FakeEncoder::Encode(input_image, codec_specific_info,
|
return test::FakeEncoder::Encode(input_image, codec_specific_info,
|
||||||
frame_types);
|
frame_types);
|
||||||
@ -390,7 +390,7 @@ TEST_F(NetworkStateEndToEndTest, NewVideoSendStreamsIgnoreAudioNetworkDown) {
|
|||||||
}
|
}
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
encoded_frame_ = true;
|
encoded_frame_ = true;
|
||||||
return test::FakeEncoder::Encode(input_image, codec_specific_info,
|
return test::FakeEncoder::Encode(input_image, codec_specific_info,
|
||||||
frame_types);
|
frame_types);
|
||||||
|
|||||||
@ -72,7 +72,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
|
|||||||
int16_t picture_id;
|
int16_t picture_id;
|
||||||
int16_t tl0_pic_idx;
|
int16_t tl0_pic_idx;
|
||||||
uint8_t temporal_idx;
|
uint8_t temporal_idx;
|
||||||
FrameType frame_type;
|
VideoFrameType frame_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool ParsePayload(const uint8_t* packet,
|
bool ParsePayload(const uint8_t* packet,
|
||||||
|
|||||||
@ -142,7 +142,7 @@ class QualityTestVideoEncoder : public VideoEncoder,
|
|||||||
}
|
}
|
||||||
int32_t Release() override { return encoder_->Release(); }
|
int32_t Release() override { return encoder_->Release(); }
|
||||||
int32_t Encode(const VideoFrame& frame,
|
int32_t Encode(const VideoFrame& frame,
|
||||||
const std::vector<FrameType>* frame_types) {
|
const std::vector<VideoFrameType>* frame_types) {
|
||||||
if (analyzer_) {
|
if (analyzer_) {
|
||||||
analyzer_->PreEncodeOnFrame(frame);
|
analyzer_->PreEncodeOnFrame(frame);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1984,7 +1984,7 @@ TEST_F(VideoSendStreamTest,
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
ADD_FAILURE()
|
ADD_FAILURE()
|
||||||
<< "Unexpected Encode call since the send stream is not started";
|
<< "Unexpected Encode call since the send stream is not started";
|
||||||
return 0;
|
return 0;
|
||||||
@ -2318,7 +2318,7 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& inputImage,
|
int32_t Encode(const VideoFrame& inputImage,
|
||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
EXPECT_TRUE(IsReadyForEncode());
|
EXPECT_TRUE(IsReadyForEncode());
|
||||||
|
|
||||||
observation_complete_.Set();
|
observation_complete_.Set();
|
||||||
@ -2537,7 +2537,7 @@ class VideoCodecConfigObserver : public test::SendTest,
|
|||||||
|
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
// Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
|
// Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3003,7 +3003,7 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
|
|||||||
private:
|
private:
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
CodecSpecificInfo specifics;
|
CodecSpecificInfo specifics;
|
||||||
specifics.codecType = kVideoCodecGeneric;
|
specifics.codecType = kVideoCodecGeneric;
|
||||||
|
|
||||||
|
|||||||
@ -1731,7 +1731,8 @@ void VideoStreamEncoder::RunPostEncode(EncodedImage encoded_image,
|
|||||||
// Run post encode tasks, such as overuse detection and frame rate/drop
|
// Run post encode tasks, such as overuse detection and frame rate/drop
|
||||||
// stats for internal encoders.
|
// stats for internal encoders.
|
||||||
const size_t frame_size = encoded_image.size();
|
const size_t frame_size = encoded_image.size();
|
||||||
const bool keyframe = encoded_image._frameType == FrameType::kVideoFrameKey;
|
const bool keyframe =
|
||||||
|
encoded_image._frameType == VideoFrameType::kVideoFrameKey;
|
||||||
|
|
||||||
if (frame_size > 0) {
|
if (frame_size > 0) {
|
||||||
frame_dropper_.Fill(frame_size, !keyframe);
|
frame_dropper_.Fill(frame_size, !keyframe);
|
||||||
|
|||||||
@ -316,7 +316,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
|
|||||||
|
|
||||||
// TODO(sprang): Change actually support keyframe per simulcast stream, or
|
// TODO(sprang): Change actually support keyframe per simulcast stream, or
|
||||||
// turn this into a simple bool |pending_keyframe_request_|.
|
// turn this into a simple bool |pending_keyframe_request_|.
|
||||||
std::vector<FrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
|
std::vector<VideoFrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
|
||||||
|
|
||||||
FrameEncodeTimer frame_encoder_timer_;
|
FrameEncodeTimer frame_encoder_timer_;
|
||||||
|
|
||||||
|
|||||||
@ -606,14 +606,14 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
return last_update_rect_;
|
return last_update_rect_;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<FrameType>& LastFrameTypes() const {
|
const std::vector<VideoFrameType>& LastFrameTypes() const {
|
||||||
rtc::CritScope lock(&local_crit_sect_);
|
rtc::CritScope lock(&local_crit_sect_);
|
||||||
return last_frame_types_;
|
return last_frame_types_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InjectFrame(const VideoFrame& input_image, bool keyframe) {
|
void InjectFrame(const VideoFrame& input_image, bool keyframe) {
|
||||||
const std::vector<FrameType> frame_type = {keyframe ? kVideoFrameKey
|
const std::vector<VideoFrameType> frame_type = {
|
||||||
: kVideoFrameDelta};
|
keyframe ? kVideoFrameKey : kVideoFrameDelta};
|
||||||
{
|
{
|
||||||
rtc::CritScope lock(&local_crit_sect_);
|
rtc::CritScope lock(&local_crit_sect_);
|
||||||
last_frame_types_ = frame_type;
|
last_frame_types_ = frame_type;
|
||||||
@ -640,7 +640,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
private:
|
private:
|
||||||
int32_t Encode(const VideoFrame& input_image,
|
int32_t Encode(const VideoFrame& input_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<VideoFrameType>* frame_types) override {
|
||||||
bool block_encode;
|
bool block_encode;
|
||||||
{
|
{
|
||||||
rtc::CritScope lock(&local_crit_sect_);
|
rtc::CritScope lock(&local_crit_sect_);
|
||||||
@ -747,7 +747,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
absl::optional<VideoBitrateAllocation> last_bitrate_allocation_;
|
absl::optional<VideoBitrateAllocation> last_bitrate_allocation_;
|
||||||
VideoFrame::UpdateRect last_update_rect_
|
VideoFrame::UpdateRect last_update_rect_
|
||||||
RTC_GUARDED_BY(local_crit_sect_) = {0, 0, 0, 0};
|
RTC_GUARDED_BY(local_crit_sect_) = {0, 0, 0, 0};
|
||||||
std::vector<FrameType> last_frame_types_;
|
std::vector<VideoFrameType> last_frame_types_;
|
||||||
bool expect_null_frame_ = false;
|
bool expect_null_frame_ = false;
|
||||||
EncodedImageCallback* encoded_image_callback_
|
EncodedImageCallback* encoded_image_callback_
|
||||||
RTC_GUARDED_BY(local_crit_sect_) = nullptr;
|
RTC_GUARDED_BY(local_crit_sect_) = nullptr;
|
||||||
@ -3605,20 +3605,20 @@ TEST_F(VideoStreamEncoderTest, SetsFrameTypes) {
|
|||||||
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
|
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
|
||||||
WaitForEncodedFrame(1);
|
WaitForEncodedFrame(1);
|
||||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||||
testing::ElementsAre(FrameType{kVideoFrameKey}));
|
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||||
|
|
||||||
// Insert delta frame.
|
// Insert delta frame.
|
||||||
video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
|
video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
|
||||||
WaitForEncodedFrame(2);
|
WaitForEncodedFrame(2);
|
||||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||||
testing::ElementsAre(FrameType{kVideoFrameDelta}));
|
testing::ElementsAre(VideoFrameType{kVideoFrameDelta}));
|
||||||
|
|
||||||
// Request next frame be a key-frame.
|
// Request next frame be a key-frame.
|
||||||
video_stream_encoder_->SendKeyFrame();
|
video_stream_encoder_->SendKeyFrame();
|
||||||
video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
|
video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
|
||||||
WaitForEncodedFrame(3);
|
WaitForEncodedFrame(3);
|
||||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||||
testing::ElementsAre(FrameType{kVideoFrameKey}));
|
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||||
|
|
||||||
video_stream_encoder_->Stop();
|
video_stream_encoder_->Stop();
|
||||||
}
|
}
|
||||||
@ -3669,23 +3669,23 @@ TEST_F(VideoStreamEncoderTest, RequestKeyframeInternalSource) {
|
|||||||
fake_encoder_.InjectFrame(CreateFrame(1, nullptr), true);
|
fake_encoder_.InjectFrame(CreateFrame(1, nullptr), true);
|
||||||
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
||||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||||
testing::ElementsAre(FrameType{kVideoFrameKey}));
|
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||||
|
|
||||||
const std::vector<FrameType> kDeltaFrame = {kVideoFrameDelta};
|
const std::vector<VideoFrameType> kDeltaFrame = {kVideoFrameDelta};
|
||||||
// Need to set timestamp manually since manually for injected frame.
|
// Need to set timestamp manually since manually for injected frame.
|
||||||
VideoFrame frame = CreateFrame(101, nullptr);
|
VideoFrame frame = CreateFrame(101, nullptr);
|
||||||
frame.set_timestamp(101);
|
frame.set_timestamp(101);
|
||||||
fake_encoder_.InjectFrame(frame, false);
|
fake_encoder_.InjectFrame(frame, false);
|
||||||
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
||||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||||
testing::ElementsAre(FrameType{kVideoFrameDelta}));
|
testing::ElementsAre(VideoFrameType{kVideoFrameDelta}));
|
||||||
|
|
||||||
// Request key-frame. The forces a dummy frame down into the encoder.
|
// Request key-frame. The forces a dummy frame down into the encoder.
|
||||||
fake_encoder_.ExpectNullFrame();
|
fake_encoder_.ExpectNullFrame();
|
||||||
video_stream_encoder_->SendKeyFrame();
|
video_stream_encoder_->SendKeyFrame();
|
||||||
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
||||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||||
testing::ElementsAre(FrameType{kVideoFrameKey}));
|
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||||
|
|
||||||
video_stream_encoder_->Stop();
|
video_stream_encoder_->Stop();
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user