webrtc_m130/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
magjed 56124bd158 Send audio and video codecs to RTPPayloadRegistry
The purpose with this CL is to be able to send video codec specific
information down to RTPPayloadRegistry. We already do this for audio
with explicit arguments for e.g. number of channels. Instead of
extracting the arguments from webrtc::CodecInst (audio) and
webrtc::VideoCodec, this CL sends the types unmodified all the way down
to RTPPayloadRegistry.

This CL does not contain any functional changes, and is just a
preparation for future CL:s.

In the dependent CL https://codereview.webrtc.org/2524923002/,
RTPPayloadStrategy is removed. RTPPayloadStrategy previously handled
audio/video specific aspects of payload handling. After this CL, we will
know if we get audio or video codecs without any dependency injection,
since we have different functions with different signatures for audio
vs video.

BUG=webrtc:6743
TBR=mflodman

Review-Url: https://codereview.webrtc.org/2523843002
Cr-Commit-Position: refs/heads/master@{#15231}
2016-11-24 17:34:53 +00:00

126 lines
4.3 KiB
C++

/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h"
#include <assert.h>
#include <string.h>
#include <memory>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_cvo.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
namespace webrtc {
RTPReceiverStrategy* RTPReceiverStrategy::CreateVideoStrategy(
RtpData* data_callback) {
return new RTPReceiverVideo(data_callback);
}
RTPReceiverVideo::RTPReceiverVideo(RtpData* data_callback)
: RTPReceiverStrategy(data_callback) {
}
RTPReceiverVideo::~RTPReceiverVideo() {
}
bool RTPReceiverVideo::ShouldReportCsrcChanges(uint8_t payload_type) const {
// Always do this for video packets.
return true;
}
int32_t RTPReceiverVideo::OnNewPayloadTypeCreated(
const CodecInst& audio_codec) {
RTC_NOTREACHED();
return 0;
}
int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
const PayloadUnion& specific_payload,
bool is_red,
const uint8_t* payload,
size_t payload_length,
int64_t timestamp_ms,
bool is_first_packet) {
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "Video::ParseRtp",
"seqnum", rtp_header->header.sequenceNumber, "timestamp",
rtp_header->header.timestamp);
rtp_header->type.Video.codec = specific_payload.Video.videoCodecType;
RTC_DCHECK_GE(payload_length, rtp_header->header.paddingLength);
const size_t payload_data_length =
payload_length - rtp_header->header.paddingLength;
if (payload == NULL || payload_data_length == 0) {
return data_callback_->OnReceivedPayloadData(NULL, 0, rtp_header) == 0 ? 0
: -1;
}
if (first_packet_received_()) {
LOG(LS_INFO) << "Received first video RTP packet";
}
// We are not allowed to hold a critical section when calling below functions.
std::unique_ptr<RtpDepacketizer> depacketizer(
RtpDepacketizer::Create(rtp_header->type.Video.codec));
if (depacketizer.get() == NULL) {
LOG(LS_ERROR) << "Failed to create depacketizer.";
return -1;
}
rtp_header->type.Video.isFirstPacket = is_first_packet;
RtpDepacketizer::ParsedPayload parsed_payload;
if (!depacketizer->Parse(&parsed_payload, payload, payload_data_length))
return -1;
rtp_header->frameType = parsed_payload.frame_type;
rtp_header->type = parsed_payload.type;
rtp_header->type.Video.rotation = kVideoRotation_0;
// Retrieve the video rotation information.
if (rtp_header->header.extension.hasVideoRotation) {
rtp_header->type.Video.rotation =
rtp_header->header.extension.videoRotation;
}
rtp_header->type.Video.playout_delay =
rtp_header->header.extension.playout_delay;
return data_callback_->OnReceivedPayloadData(parsed_payload.payload,
parsed_payload.payload_length,
rtp_header) == 0
? 0
: -1;
}
RTPAliveType RTPReceiverVideo::ProcessDeadOrAlive(
uint16_t last_payload_length) const {
return kRtpDead;
}
int32_t RTPReceiverVideo::InvokeOnInitializeDecoder(
RtpFeedback* callback,
int8_t payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const PayloadUnion& specific_payload) const {
// TODO(pbos): Remove as soon as audio can handle a changing payload type
// without this callback.
return 0;
}
} // namespace webrtc