Use the SDP ContentInfo helpers to avoid downcasting

This changes all internal code to use the media_description() helper
for ContentInfo along with the as_audio, as_video, and as_data casting
methods introduced in a previous CL. Reduces the total number of
pointer static_casts in pc/ from 351 to 122.

Bug: webrtc:8620
Change-Id: I996f49b55f1501c758a9e5223e30539a9f8d4eac
Reviewed-on: https://webrtc-review.googlesource.com/35921
Reviewed-by: Peter Thatcher <pthatcher@webrtc.org>
Commit-Queue: Steve Anton <steveanton@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#21419}
This commit is contained in:
Steve Anton 2017-12-21 15:14:30 -08:00 committed by Commit Bot
parent c0ed4db0ba
commit b1c1de17d4
16 changed files with 378 additions and 602 deletions

View File

@ -1418,17 +1418,17 @@ bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "VoiceChannel::SetLocalContent_w");
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
RTC_DCHECK_RUN_ON(worker_thread());
RTC_LOG(LS_INFO) << "Setting local voice description";
const AudioContentDescription* audio =
static_cast<const AudioContentDescription*>(content);
RTC_DCHECK(audio != NULL);
if (!audio) {
RTC_DCHECK(content);
if (!content) {
SafeSetError("Can't find audio content in local description.", error_desc);
return false;
}
const AudioContentDescription* audio = content->as_audio();
RtpHeaderExtensions rtp_header_extensions =
GetFilteredRtpHeaderExtensions(audio->rtp_header_extensions());
@ -1467,17 +1467,17 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "VoiceChannel::SetRemoteContent_w");
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
RTC_DCHECK_RUN_ON(worker_thread());
RTC_LOG(LS_INFO) << "Setting remote voice description";
const AudioContentDescription* audio =
static_cast<const AudioContentDescription*>(content);
RTC_DCHECK(audio != NULL);
if (!audio) {
RTC_DCHECK(content);
if (!content) {
SafeSetError("Can't find audio content in remote description.", error_desc);
return false;
}
const AudioContentDescription* audio = content->as_audio();
RtpHeaderExtensions rtp_header_extensions =
GetFilteredRtpHeaderExtensions(audio->rtp_header_extensions());
@ -1697,17 +1697,17 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "VideoChannel::SetLocalContent_w");
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
RTC_DCHECK_RUN_ON(worker_thread());
RTC_LOG(LS_INFO) << "Setting local video description";
const VideoContentDescription* video =
static_cast<const VideoContentDescription*>(content);
RTC_DCHECK(video != NULL);
if (!video) {
RTC_DCHECK(content);
if (!content) {
SafeSetError("Can't find video content in local description.", error_desc);
return false;
}
const VideoContentDescription* video = content->as_video();
RtpHeaderExtensions rtp_header_extensions =
GetFilteredRtpHeaderExtensions(video->rtp_header_extensions());
@ -1746,17 +1746,17 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "VideoChannel::SetRemoteContent_w");
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
RTC_DCHECK_RUN_ON(worker_thread());
RTC_LOG(LS_INFO) << "Setting remote video description";
const VideoContentDescription* video =
static_cast<const VideoContentDescription*>(content);
RTC_DCHECK(video != NULL);
if (!video) {
RTC_DCHECK(content);
if (!content) {
SafeSetError("Can't find video content in remote description.", error_desc);
return false;
}
const VideoContentDescription* video = content->as_video();
RtpHeaderExtensions rtp_header_extensions =
GetFilteredRtpHeaderExtensions(video->rtp_header_extensions());
@ -1898,17 +1898,17 @@ bool RtpDataChannel::SetLocalContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "RtpDataChannel::SetLocalContent_w");
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
RTC_DCHECK_RUN_ON(worker_thread());
RTC_LOG(LS_INFO) << "Setting local data description";
const DataContentDescription* data =
static_cast<const DataContentDescription*>(content);
RTC_DCHECK(data != NULL);
if (!data) {
RTC_DCHECK(content);
if (!content) {
SafeSetError("Can't find data content in local description.", error_desc);
return false;
}
const DataContentDescription* data = content->as_data();
if (!CheckDataChannelTypeFromContent(data, error_desc)) {
return false;
}
@ -1951,16 +1951,17 @@ bool RtpDataChannel::SetRemoteContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "RtpDataChannel::SetRemoteContent_w");
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
RTC_DCHECK_RUN_ON(worker_thread());
RTC_LOG(LS_INFO) << "Setting remote data description";
const DataContentDescription* data =
static_cast<const DataContentDescription*>(content);
RTC_DCHECK(data != NULL);
if (!data) {
RTC_DCHECK(content);
if (!content) {
SafeSetError("Can't find data content in remote description.", error_desc);
return false;
}
const DataContentDescription* data = content->as_data();
// If the remote data doesn't have codecs, it must be empty, so ignore it.
if (!data->has_codecs()) {
return true;

View File

@ -54,7 +54,7 @@ int GetCandidatePreferenceFromType(const std::string& type) {
// candidates.
void UpdateConnectionAddress(
const JsepCandidateCollection& candidate_collection,
cricket::ContentDescription* content_description) {
cricket::MediaContentDescription* media_desc) {
int port = kDummyPort;
std::string ip = kDummyAddress;
int current_preference = kPreferenceUnknown;
@ -88,8 +88,7 @@ void UpdateConnectionAddress(
rtc::SocketAddress connection_addr;
connection_addr.SetIP(ip);
connection_addr.SetPort(port);
static_cast<cricket::MediaContentDescription*>(content_description)
->set_connection_address(connection_addr);
media_desc->set_connection_address(connection_addr);
}
} // namespace
@ -234,7 +233,7 @@ bool JsepSessionDescription::AddCandidate(
updated_candidate_wrapper.release());
UpdateConnectionAddress(
candidate_collection_[mediasection_index],
description_->contents()[mediasection_index].description);
description_->contents()[mediasection_index].media_description());
}
return true;
@ -252,7 +251,7 @@ size_t JsepSessionDescription::RemoveCandidates(
num_removed += candidate_collection_[mediasection_index].remove(candidate);
UpdateConnectionAddress(
candidate_collection_[mediasection_index],
description_->contents()[mediasection_index].description);
description_->contents()[mediasection_index].media_description());
}
return num_removed;
}

View File

@ -382,8 +382,8 @@ TEST_F(JsepSessionDescriptionTest, RemoveCandidateAndSetConnectionAddress) {
JsepIceCandidate jice3("audio", 0, candidate3);
size_t audio_index = 0;
auto media_desc = static_cast<cricket::MediaContentDescription*>(
jsep_desc_->description()->contents()[audio_index].description);
auto media_desc =
jsep_desc_->description()->contents()[audio_index].media_description();
ASSERT_TRUE(jsep_desc_->AddCandidate(&jice1));
ASSERT_TRUE(jsep_desc_->AddCandidate(&jice2));

View File

@ -114,13 +114,10 @@ static RtpTransceiverDirection NegotiateRtpTransceiverDirection(
static bool IsMediaContentOfType(const ContentInfo* content,
MediaType media_type) {
if (!IsMediaContent(content)) {
if (!content || !content->media_description()) {
return false;
}
const MediaContentDescription* mdesc =
static_cast<const MediaContentDescription*>(content->description);
return mdesc && mdesc->type() == media_type;
return content->media_description()->type() == media_type;
}
static bool CreateCryptoParams(int tag, const std::string& cipher,
@ -178,13 +175,10 @@ bool CreateMediaCryptos(const std::vector<std::string>& crypto_suites,
}
const CryptoParamsVec* GetCryptos(const ContentInfo* content) {
if (!content) {
if (!content || !content->media_description()) {
return nullptr;
}
RTC_DCHECK(IsMediaContent(content));
return &(static_cast<const MediaContentDescription*>(content->description)
->cryptos());
return &content->media_description()->cryptos();
}
bool FindMatchingCrypto(const CryptoParamsVec& cryptos,
@ -293,22 +287,16 @@ static void GenerateSsrcs(const StreamParamsVec& params_vec,
// Finds all StreamParams of all media types and attach them to stream_params.
static void GetCurrentStreamParams(const SessionDescription* sdesc,
StreamParamsVec* stream_params) {
if (!sdesc)
RTC_DCHECK(stream_params);
if (!sdesc) {
return;
const ContentInfos& contents = sdesc->contents();
for (ContentInfos::const_iterator content = contents.begin();
content != contents.end(); ++content) {
if (!IsMediaContent(&*content)) {
}
for (const ContentInfo& content : sdesc->contents()) {
if (!content.media_description()) {
continue;
}
const MediaContentDescription* media =
static_cast<const MediaContentDescription*>(
content->description);
const StreamParamsVec& streams = media->streams();
for (StreamParamsVec::const_iterator it = streams.begin();
it != streams.end(); ++it) {
stream_params->push_back(*it);
for (const StreamParams& params : content.media_description()->streams()) {
stream_params->push_back(params);
}
}
}
@ -553,15 +541,11 @@ static bool GetCryptosByName(const SessionDescription* sdesc,
if (!sdesc || !cryptos) {
return false;
}
const ContentInfo* content = sdesc->GetContentByName(content_name);
if (!IsMediaContent(content) || !content->description) {
if (!content || !content->media_description()) {
return false;
}
const MediaContentDescription* media_desc =
static_cast<const MediaContentDescription*>(content->description);
*cryptos = media_desc->cryptos();
*cryptos = content->media_description()->cryptos();
return true;
}
@ -604,13 +588,8 @@ static bool IsRtpContent(SessionDescription* sdesc,
const std::string& content_name) {
bool is_rtp = false;
ContentInfo* content = sdesc->GetContentByName(content_name);
if (IsMediaContent(content)) {
MediaContentDescription* media_desc =
static_cast<MediaContentDescription*>(content->description);
if (!media_desc) {
return false;
}
is_rtp = IsRtpProtocol(media_desc->protocol());
if (content && content->media_description()) {
is_rtp = IsRtpProtocol(content->media_description()->protocol());
}
return is_rtp;
}
@ -670,8 +649,7 @@ static bool UpdateCryptoParamsForBundle(const ContentGroup& bundle_group,
}
ContentInfo* content = sdesc->GetContentByName(*it);
if (IsMediaContent(content)) {
MediaContentDescription* media_desc =
static_cast<MediaContentDescription*>(content->description);
MediaContentDescription* media_desc = content->media_description();
if (!media_desc) {
return false;
}
@ -1570,15 +1548,15 @@ void MergeCodecsFromDescription(const SessionDescription* description,
for (const ContentInfo& content : description->contents()) {
if (IsMediaContentOfType(&content, MEDIA_TYPE_AUDIO)) {
const AudioContentDescription* audio =
static_cast<AudioContentDescription*>(content.description);
content.media_description()->as_audio();
MergeCodecs<AudioCodec>(audio->codecs(), audio_codecs, used_pltypes);
} else if (IsMediaContentOfType(&content, MEDIA_TYPE_VIDEO)) {
const VideoContentDescription* video =
static_cast<VideoContentDescription*>(content.description);
content.media_description()->as_video();
MergeCodecs<VideoCodec>(video->codecs(), video_codecs, used_pltypes);
} else if (IsMediaContentOfType(&content, MEDIA_TYPE_DATA)) {
const DataContentDescription* data =
static_cast<DataContentDescription*>(content.description);
content.media_description()->as_data();
MergeCodecs<DataCodec>(data->codecs(), data_codecs, used_pltypes);
}
}
@ -1647,7 +1625,7 @@ void MediaSessionDescriptionFactory::GetCodecsForAnswer(
for (const ContentInfo& content : remote_offer->contents()) {
if (IsMediaContentOfType(&content, MEDIA_TYPE_AUDIO)) {
const AudioContentDescription* audio =
static_cast<AudioContentDescription*>(content.description);
content.media_description()->as_audio();
for (const AudioCodec& offered_audio_codec : audio->codecs()) {
if (!FindMatchingCodec<AudioCodec>(audio->codecs(),
filtered_offered_audio_codecs,
@ -1659,7 +1637,7 @@ void MediaSessionDescriptionFactory::GetCodecsForAnswer(
}
} else if (IsMediaContentOfType(&content, MEDIA_TYPE_VIDEO)) {
const VideoContentDescription* video =
static_cast<VideoContentDescription*>(content.description);
content.media_description()->as_video();
for (const VideoCodec& offered_video_codec : video->codecs()) {
if (!FindMatchingCodec<VideoCodec>(video->codecs(),
filtered_offered_video_codecs,
@ -1671,7 +1649,7 @@ void MediaSessionDescriptionFactory::GetCodecsForAnswer(
}
} else if (IsMediaContentOfType(&content, MEDIA_TYPE_DATA)) {
const DataContentDescription* data =
static_cast<DataContentDescription*>(content.description);
content.media_description()->as_data();
for (const DataCodec& offered_data_codec : data->codecs()) {
if (!FindMatchingCodec<DataCodec>(data->codecs(),
filtered_offered_data_codecs,
@ -1714,13 +1692,13 @@ void MediaSessionDescriptionFactory::GetRtpHdrExtsToOffer(
for (const ContentInfo& content : current_description->contents()) {
if (IsMediaContentOfType(&content, MEDIA_TYPE_AUDIO)) {
const AudioContentDescription* audio =
static_cast<const AudioContentDescription*>(content.description);
content.media_description()->as_audio();
MergeRtpHdrExts(audio->rtp_header_extensions(), offer_audio_extensions,
&all_regular_extensions, &all_encrypted_extensions,
&used_ids);
} else if (IsMediaContentOfType(&content, MEDIA_TYPE_VIDEO)) {
const VideoContentDescription* video =
static_cast<const VideoContentDescription*>(content.description);
content.media_description()->as_video();
MergeRtpHdrExts(video->rtp_header_extensions(), offer_video_extensions,
&all_regular_extensions, &all_encrypted_extensions,
&used_ids);
@ -1828,8 +1806,7 @@ bool MediaSessionDescriptionFactory::AddAudioContentForOffer(
if (current_content) {
RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_AUDIO));
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(
current_content->description);
current_content->media_description()->as_audio();
for (const AudioCodec& codec : acd->codecs()) {
if (FindMatchingCodec<AudioCodec>(acd->codecs(), audio_codecs, codec,
nullptr)) {
@ -1904,8 +1881,7 @@ bool MediaSessionDescriptionFactory::AddVideoContentForOffer(
if (current_content) {
RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_VIDEO));
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(
current_content->description);
current_content->media_description()->as_video();
for (const VideoCodec& codec : vcd->codecs()) {
if (FindMatchingCodec<VideoCodec>(vcd->codecs(), video_codecs, codec,
nullptr)) {
@ -1966,9 +1942,8 @@ bool MediaSessionDescriptionFactory::AddDataContentForOffer(
// the current description.
if (session_options.data_channel_type == DCT_NONE && current_content) {
RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_DATA));
is_sctp = (static_cast<const DataContentDescription*>(
current_content->description)
->protocol() == kMediaProtocolSctp);
is_sctp = (current_content->media_description()->protocol() ==
kMediaProtocolSctp);
}
cricket::SecurePolicy sdes_policy =
@ -2043,7 +2018,7 @@ bool MediaSessionDescriptionFactory::AddAudioContentForAnswer(
SessionDescription* answer) const {
RTC_CHECK(IsMediaContentOfType(offer_content, MEDIA_TYPE_AUDIO));
const AudioContentDescription* offer_audio_description =
static_cast<const AudioContentDescription*>(offer_content->description);
offer_content->media_description()->as_audio();
std::unique_ptr<TransportDescription> audio_transport(
CreateTransportAnswer(media_description_options.mid, offer_description,
@ -2067,8 +2042,7 @@ bool MediaSessionDescriptionFactory::AddAudioContentForAnswer(
if (current_content) {
RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_AUDIO));
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(
current_content->description);
current_content->media_description()->as_audio();
for (const AudioCodec& codec : acd->codecs()) {
if (FindMatchingCodec<AudioCodec>(acd->codecs(), audio_codecs, codec,
nullptr)) {
@ -2135,7 +2109,7 @@ bool MediaSessionDescriptionFactory::AddVideoContentForAnswer(
SessionDescription* answer) const {
RTC_CHECK(IsMediaContentOfType(offer_content, MEDIA_TYPE_VIDEO));
const VideoContentDescription* offer_video_description =
static_cast<const VideoContentDescription*>(offer_content->description);
offer_content->media_description()->as_video();
std::unique_ptr<TransportDescription> video_transport(
CreateTransportAnswer(media_description_options.mid, offer_description,
@ -2150,8 +2124,7 @@ bool MediaSessionDescriptionFactory::AddVideoContentForAnswer(
if (current_content) {
RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_VIDEO));
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(
current_content->description);
current_content->media_description()->as_video();
for (const VideoCodec& codec : vcd->codecs()) {
if (FindMatchingCodec<VideoCodec>(vcd->codecs(), video_codecs, codec,
nullptr)) {
@ -2235,7 +2208,7 @@ bool MediaSessionDescriptionFactory::AddDataContentForAnswer(
session_options.bundle_enabled;
RTC_CHECK(IsMediaContentOfType(offer_content, MEDIA_TYPE_DATA));
const DataContentDescription* offer_data_description =
static_cast<const DataContentDescription*>(offer_content->description);
offer_content->media_description()->as_data();
if (!CreateMediaContentAnswer(
offer_data_description, media_description_options, session_options,
data_codecs, sdes_policy, GetCryptos(current_content),
@ -2363,8 +2336,7 @@ const ContentInfo* GetFirstDataContent(const SessionDescription* sdesc) {
const MediaContentDescription* GetFirstMediaContentDescription(
const SessionDescription* sdesc, MediaType media_type) {
const ContentInfo* content = GetFirstMediaContent(sdesc, media_type);
const ContentDescription* description = content ? content->description : NULL;
return static_cast<const MediaContentDescription*>(description);
return (content ? content->media_description() : nullptr);
}
const AudioContentDescription* GetFirstAudioContentDescription(
@ -2436,8 +2408,7 @@ MediaContentDescription* GetFirstMediaContentDescription(
SessionDescription* sdesc,
MediaType media_type) {
ContentInfo* content = GetFirstMediaContent(sdesc, media_type);
ContentDescription* description = content ? content->description : NULL;
return static_cast<MediaContentDescription*>(description);
return (content ? content->media_description() : nullptr);
}
AudioContentDescription* GetFirstAudioContentDescription(

View File

@ -220,15 +220,13 @@ static constexpr bool kActive = false;
static bool IsMediaContentOfType(const ContentInfo* content,
MediaType media_type) {
const MediaContentDescription* mdesc =
static_cast<const MediaContentDescription*>(content->description);
return mdesc && mdesc->type() == media_type;
RTC_DCHECK(content);
return content->media_description()->type() == media_type;
}
static RtpTransceiverDirection GetMediaDirection(const ContentInfo* content) {
cricket::MediaContentDescription* desc =
reinterpret_cast<cricket::MediaContentDescription*>(content->description);
return desc->direction();
RTC_DCHECK(content);
return content->media_description()->direction();
}
static void AddRtxCodec(const VideoCodec& rtx_codec,
@ -537,15 +535,13 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
ref_desc.reset(f1_.CreateOffer(options, NULL));
desc.reset(f1_.CreateAnswer(ref_desc.get(), options, NULL));
}
ASSERT_TRUE(desc.get() != NULL);
ASSERT_TRUE(desc);
const cricket::MediaContentDescription* audio_media_desc =
static_cast<const cricket::MediaContentDescription*>(
desc.get()->GetContentDescriptionByName("audio"));
ASSERT_TRUE(audio_media_desc != NULL);
desc->GetContentDescriptionByName("audio");
ASSERT_TRUE(audio_media_desc);
const cricket::MediaContentDescription* video_media_desc =
static_cast<const cricket::MediaContentDescription*>(
desc.get()->GetContentDescriptionByName("video"));
ASSERT_TRUE(video_media_desc != NULL);
desc->GetContentDescriptionByName("video");
ASSERT_TRUE(video_media_desc);
EXPECT_TRUE(CompareCryptoParams(audio_media_desc->cryptos(),
video_media_desc->cryptos()));
EXPECT_EQ(1u, audio_media_desc->cryptos().size());
@ -555,8 +551,7 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
// Verify the selected crypto is one from the reference audio
// media content.
const cricket::MediaContentDescription* ref_audio_media_desc =
static_cast<const cricket::MediaContentDescription*>(
ref_desc.get()->GetContentDescriptionByName("audio"));
ref_desc->GetContentDescriptionByName("audio");
bool found = false;
for (size_t i = 0; i < ref_audio_media_desc->cryptos().size(); ++i) {
if (ref_audio_media_desc->cryptos()[i].Matches(
@ -598,14 +593,15 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
}
bool VerifyNoCNCodecs(const cricket::ContentInfo* content) {
const cricket::ContentDescription* description = content->description;
RTC_CHECK(description != NULL);
const cricket::AudioContentDescription* audio_content_desc =
static_cast<const cricket::AudioContentDescription*>(description);
RTC_CHECK(audio_content_desc != NULL);
for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
if (audio_content_desc->codecs()[i].name == "CN")
RTC_DCHECK(content);
RTC_CHECK(content->media_description());
const cricket::AudioContentDescription* audio_desc =
content->media_description()->as_audio();
RTC_CHECK(audio_desc);
for (const cricket::AudioCodec& codec : audio_desc->codecs()) {
if (codec.name == "CN") {
return false;
}
}
return true;
}
@ -632,10 +628,8 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
@ -676,8 +670,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioOffer) {
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc == NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached.
@ -700,10 +693,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoOffer) {
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
@ -809,10 +800,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateRtpDataOffer) {
ASSERT_TRUE(dc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, dc->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const DataContentDescription* dcd =
static_cast<const DataContentDescription*>(dc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const DataContentDescription* dcd = dc->media_description()->as_data();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attched.
@ -851,9 +840,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateImplicitSctpDataOffer) {
ASSERT_TRUE(offer1.get() != NULL);
const ContentInfo* data = offer1->GetContentByName("data");
ASSERT_TRUE(data != NULL);
const MediaContentDescription* mdesc =
static_cast<const MediaContentDescription*>(data->description);
ASSERT_EQ(cricket::kMediaProtocolSctp, mdesc->protocol());
ASSERT_EQ(cricket::kMediaProtocolSctp, data->media_description()->protocol());
// Now set data_channel_type to 'none' (default) and make sure that the
// datachannel type that gets generated from the previous offer, is of the
@ -863,8 +850,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateImplicitSctpDataOffer) {
f1_.CreateOffer(opts, offer1.get()));
data = offer2->GetContentByName("data");
ASSERT_TRUE(data != NULL);
mdesc = static_cast<const MediaContentDescription*>(data->description);
EXPECT_EQ(cricket::kMediaProtocolSctp, mdesc->protocol());
EXPECT_EQ(cricket::kMediaProtocolSctp, data->media_description()->protocol());
}
// Create an audio, video offer without legacy StreamParams.
@ -878,10 +864,8 @@ TEST_F(MediaSessionDescriptionFactoryTest,
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams.
EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams.
@ -953,8 +937,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswer) {
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc == NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
@ -980,8 +963,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswerGcm) {
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc == NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
@ -1007,10 +989,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswer) {
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
@ -1058,10 +1038,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswer) {
ASSERT_TRUE(dc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, dc->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const DataContentDescription* dcd =
static_cast<const DataContentDescription*>(dc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const DataContentDescription* dcd = dc->media_description()->as_data();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
@ -1092,10 +1070,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerGcm) {
ASSERT_TRUE(dc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, dc->type);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const DataContentDescription* dcd =
static_cast<const DataContentDescription*>(dc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const DataContentDescription* dcd = dc->media_description()->as_data();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
@ -1119,8 +1095,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerUsesSctpmap) {
ASSERT_TRUE(offer.get() != NULL);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != NULL);
DataContentDescription* dcd_offer =
static_cast<DataContentDescription*>(dc_offer->description);
DataContentDescription* dcd_offer = dc_offer->media_description()->as_data();
EXPECT_TRUE(dcd_offer->use_sctpmap());
std::unique_ptr<SessionDescription> answer(
@ -1128,7 +1103,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerUsesSctpmap) {
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != NULL);
const DataContentDescription* dcd_answer =
static_cast<const DataContentDescription*>(dc_answer->description);
dc_answer->media_description()->as_data();
EXPECT_TRUE(dcd_answer->use_sctpmap());
}
@ -1140,8 +1115,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerWithoutSctpmap) {
ASSERT_TRUE(offer.get() != NULL);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != NULL);
DataContentDescription* dcd_offer =
static_cast<DataContentDescription*>(dc_offer->description);
DataContentDescription* dcd_offer = dc_offer->media_description()->as_data();
dcd_offer->set_use_sctpmap(false);
std::unique_ptr<SessionDescription> answer(
@ -1149,7 +1123,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerWithoutSctpmap) {
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != NULL);
const DataContentDescription* dcd_answer =
static_cast<const DataContentDescription*>(dc_answer->description);
dc_answer->media_description()->as_data();
EXPECT_FALSE(dcd_answer->use_sctpmap());
}
@ -1170,8 +1144,7 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_TRUE(offer.get() != nullptr);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != nullptr);
DataContentDescription* dcd_offer =
static_cast<DataContentDescription*>(dc_offer->description);
DataContentDescription* dcd_offer = dc_offer->media_description()->as_data();
std::vector<std::string> protos = {"DTLS/SCTP", "UDP/DTLS/SCTP",
"TCP/DTLS/SCTP"};
@ -1182,7 +1155,7 @@ TEST_F(MediaSessionDescriptionFactoryTest,
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != nullptr);
const DataContentDescription* dcd_answer =
static_cast<const DataContentDescription*>(dc_answer->description);
dc_answer->media_description()->as_data();
EXPECT_FALSE(dc_answer->rejected);
EXPECT_EQ(proto, dcd_answer->protocol());
}
@ -1262,8 +1235,7 @@ TEST_F(MediaSessionDescriptionFactoryTest,
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != NULL);
DataContentDescription* dcd_offer =
static_cast<DataContentDescription*>(dc_offer->description);
DataContentDescription* dcd_offer = dc_offer->media_description()->as_data();
ASSERT_TRUE(dcd_offer != NULL);
std::string protocol = "a weird unknown protocol";
dcd_offer->set_protocol(protocol);
@ -1275,7 +1247,7 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_TRUE(dc_answer != NULL);
EXPECT_TRUE(dc_answer->rejected);
const DataContentDescription* dcd_answer =
static_cast<const DataContentDescription*>(dc_answer->description);
dc_answer->media_description()->as_data();
ASSERT_TRUE(dcd_answer != NULL);
EXPECT_EQ(protocol, dcd_answer->protocol());
}
@ -1457,12 +1429,9 @@ TEST_F(MediaSessionDescriptionFactoryTest,
const ContentInfo* dc = answer->GetContentByName("data");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const DataContentDescription* dcd =
static_cast<const DataContentDescription*>(dc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
const DataContentDescription* dcd = dc->media_description()->as_data();
EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams.
EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams.
@ -1570,7 +1539,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswerToVideo) {
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
ASSERT_TRUE(vc->description != NULL);
ASSERT_TRUE(vc->media_description() != NULL);
EXPECT_TRUE(vc->rejected);
}
@ -1590,7 +1559,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateNoDataAnswerToDataOffer) {
const ContentInfo* dc = answer->GetContentByName("data");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(dc != NULL);
ASSERT_TRUE(dc->description != NULL);
ASSERT_TRUE(dc->media_description() != NULL);
EXPECT_TRUE(dc->rejected);
}
@ -1656,12 +1625,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) {
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
ASSERT_TRUE(dc != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const DataContentDescription* dcd =
static_cast<const DataContentDescription*>(dc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
const DataContentDescription* dcd = dc->media_description()->as_data();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
@ -1730,11 +1696,11 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) {
ASSERT_TRUE(vc != NULL);
ASSERT_TRUE(dc != NULL);
const AudioContentDescription* updated_acd =
static_cast<const AudioContentDescription*>(ac->description);
ac->media_description()->as_audio();
const VideoContentDescription* updated_vcd =
static_cast<const VideoContentDescription*>(vc->description);
vc->media_description()->as_video();
const DataContentDescription* updated_dcd =
static_cast<const DataContentDescription*>(dc->description);
dc->media_description()->as_data();
EXPECT_EQ(acd->type(), updated_acd->type());
EXPECT_EQ(acd->codecs(), updated_acd->codecs());
@ -1792,8 +1758,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSimulcastVideoOffer) {
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const VideoContentDescription* vcd = vc->media_description()->as_video();
const StreamParamsVec& video_streams = vcd->streams();
ASSERT_EQ(1U, video_streams.size());
@ -1853,12 +1818,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) {
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
ASSERT_TRUE(dc != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const DataContentDescription* dcd =
static_cast<const DataContentDescription*>(dc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
const DataContentDescription* dcd = dc->media_description()->as_data();
ASSERT_CRYPTO(acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
@ -1923,11 +1885,11 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) {
ASSERT_TRUE(vc != NULL);
ASSERT_TRUE(dc != NULL);
const AudioContentDescription* updated_acd =
static_cast<const AudioContentDescription*>(ac->description);
ac->media_description()->as_audio();
const VideoContentDescription* updated_vcd =
static_cast<const VideoContentDescription*>(vc->description);
vc->media_description()->as_video();
const DataContentDescription* updated_dcd =
static_cast<const DataContentDescription*>(dc->description);
dc->media_description()->as_data();
ASSERT_CRYPTO(updated_acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
EXPECT_TRUE(CompareCryptoParams(acd->cryptos(), updated_acd->cryptos()));
@ -2231,10 +2193,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtxWithoutApt) {
// is selected. Manually remove kCodecParamAssociatedPayloadType so that it
// is possible to test that that RTX is dropped when
// kCodecParamAssociatedPayloadType is missing in the offer.
VideoContentDescription* desc =
static_cast<cricket::VideoContentDescription*>(
offer->GetContentDescriptionByName(cricket::CN_VIDEO));
ASSERT_TRUE(desc != NULL);
MediaContentDescription* media_desc =
offer->GetContentDescriptionByName(cricket::CN_VIDEO);
ASSERT_TRUE(media_desc);
VideoContentDescription* desc = media_desc->as_video();
std::vector<VideoCodec> codecs = desc->codecs();
for (std::vector<VideoCodec>::iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
@ -2373,9 +2335,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, SimSsrcsGenerateMultipleRtxSsrcs) {
// is a FID ssrc + grouping for each.
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
ASSERT_TRUE(offer.get() != NULL);
VideoContentDescription* desc = static_cast<VideoContentDescription*>(
offer->GetContentDescriptionByName(cricket::CN_VIDEO));
ASSERT_TRUE(desc != NULL);
MediaContentDescription* media_desc =
offer->GetContentDescriptionByName(cricket::CN_VIDEO);
ASSERT_TRUE(media_desc);
VideoContentDescription* desc = media_desc->as_video();
const StreamParamsVec& streams = desc->streams();
// Single stream.
ASSERT_EQ(1u, streams.size());
@ -2413,9 +2376,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, GenerateFlexfecSsrc) {
// there is no FEC-FR ssrc + grouping for each.
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, nullptr));
ASSERT_TRUE(offer.get() != nullptr);
VideoContentDescription* desc = static_cast<VideoContentDescription*>(
offer->GetContentDescriptionByName(cricket::CN_VIDEO));
ASSERT_TRUE(desc != nullptr);
MediaContentDescription* media_desc =
offer->GetContentDescriptionByName(cricket::CN_VIDEO);
ASSERT_TRUE(media_desc);
VideoContentDescription* desc = media_desc->as_video();
const StreamParamsVec& streams = desc->streams();
// Single stream.
ASSERT_EQ(1u, streams.size());
@ -2452,9 +2416,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, SimSsrcsGenerateNoFlexfecSsrcs) {
// there is no FEC-FR ssrc + grouping for each.
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, nullptr));
ASSERT_TRUE(offer.get() != nullptr);
VideoContentDescription* desc = static_cast<VideoContentDescription*>(
offer->GetContentDescriptionByName(cricket::CN_VIDEO));
ASSERT_TRUE(desc != nullptr);
MediaContentDescription* media_desc =
offer->GetContentDescriptionByName(cricket::CN_VIDEO);
ASSERT_TRUE(media_desc);
VideoContentDescription* desc = media_desc->as_video();
const StreamParamsVec& streams = desc->streams();
// Single stream.
ASSERT_EQ(1u, streams.size());
@ -2628,14 +2593,12 @@ TEST(MediaSessionDescription, CopySessionDescription) {
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd_copy =
static_cast<const AudioContentDescription*>(ac->description);
const AudioContentDescription* acd_copy = ac->media_description()->as_audio();
EXPECT_EQ(acd->codecs(), acd_copy->codecs());
EXPECT_EQ(1u, acd->first_ssrc());
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const VideoContentDescription* vcd_copy =
static_cast<const VideoContentDescription*>(vc->description);
const VideoContentDescription* vcd_copy = vc->media_description()->as_video();
EXPECT_EQ(vcd->codecs(), vcd_copy->codecs());
EXPECT_EQ(2u, vcd->first_ssrc());
}
@ -2790,7 +2753,7 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ContentInfo* offer_content = offer->GetContentByName("audio");
ASSERT_TRUE(offer_content != NULL);
AudioContentDescription* offer_audio_desc =
static_cast<AudioContentDescription*>(offer_content->description);
offer_content->media_description()->as_audio();
offer_audio_desc->set_protocol(cricket::kMediaProtocolDtlsSavpf);
std::unique_ptr<SessionDescription> answer(
@ -2816,7 +2779,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestOfferDtlsSavpfCreateAnswer) {
ContentInfo* offer_content = offer->GetContentByName("audio");
ASSERT_TRUE(offer_content != NULL);
AudioContentDescription* offer_audio_desc =
static_cast<AudioContentDescription*>(offer_content->description);
offer_content->media_description()->as_audio();
offer_audio_desc->set_protocol(cricket::kMediaProtocolDtlsSavpf);
std::unique_ptr<SessionDescription> answer(
@ -2828,7 +2791,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestOfferDtlsSavpfCreateAnswer) {
ASSERT_FALSE(answer_content->rejected);
const AudioContentDescription* answer_audio_desc =
static_cast<const AudioContentDescription*>(answer_content->description);
answer_content->media_description()->as_audio();
EXPECT_EQ(std::string(cricket::kMediaProtocolDtlsSavpf),
answer_audio_desc->protocol());
}
@ -2852,11 +2815,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
offer.reset(f1_.CreateOffer(options, NULL));
ASSERT_TRUE(offer.get() != NULL);
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
offer->GetContentDescriptionByName("audio"));
audio_media_desc = offer->GetContentDescriptionByName("audio");
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
offer->GetContentDescriptionByName("video"));
video_media_desc = offer->GetContentDescriptionByName("video");
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_EQ(2u, audio_media_desc->cryptos().size());
EXPECT_EQ(1u, video_media_desc->cryptos().size());
@ -2872,11 +2833,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
ASSERT_TRUE(answer.get() != NULL);
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("audio"));
audio_media_desc = answer->GetContentDescriptionByName("audio");
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("video"));
video_media_desc = answer->GetContentDescriptionByName("video");
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_EQ(1u, audio_media_desc->cryptos().size());
EXPECT_EQ(1u, video_media_desc->cryptos().size());
@ -2893,11 +2852,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
ASSERT_TRUE(answer.get() != NULL);
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("audio"));
audio_media_desc = answer->GetContentDescriptionByName("audio");
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
answer->GetContentDescriptionByName("video"));
video_media_desc = answer->GetContentDescriptionByName("video");
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_TRUE(audio_media_desc->cryptos().empty());
EXPECT_TRUE(video_media_desc->cryptos().empty());
@ -2917,11 +2874,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
// in new offer.
offer.reset(f1_.CreateOffer(options, offer.get()));
ASSERT_TRUE(offer.get() != NULL);
audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
offer->GetContentDescriptionByName("audio"));
audio_media_desc = offer->GetContentDescriptionByName("audio");
ASSERT_TRUE(audio_media_desc != NULL);
video_media_desc = static_cast<const cricket::MediaContentDescription*>(
offer->GetContentDescriptionByName("video"));
video_media_desc = offer->GetContentDescriptionByName("video");
ASSERT_TRUE(video_media_desc != NULL);
EXPECT_TRUE(audio_media_desc->cryptos().empty());
EXPECT_TRUE(video_media_desc->cryptos().empty());
@ -3082,30 +3037,26 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_EQ(4u, offer->contents().size());
EXPECT_FALSE(offer->contents()[0].rejected);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(
offer->contents()[0].description);
offer->contents()[0].media_description()->as_audio();
ASSERT_EQ(1u, acd->streams().size());
EXPECT_EQ(kAudioTrack1, acd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, acd->direction());
EXPECT_FALSE(offer->contents()[1].rejected);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(
offer->contents()[1].description);
offer->contents()[1].media_description()->as_video();
ASSERT_EQ(1u, vcd->streams().size());
EXPECT_EQ(kVideoTrack1, vcd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, vcd->direction());
EXPECT_FALSE(offer->contents()[2].rejected);
acd = static_cast<const AudioContentDescription*>(
offer->contents()[2].description);
acd = offer->contents()[2].media_description()->as_audio();
ASSERT_EQ(1u, acd->streams().size());
EXPECT_EQ(kAudioTrack2, acd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, acd->direction());
EXPECT_FALSE(offer->contents()[3].rejected);
vcd = static_cast<const VideoContentDescription*>(
offer->contents()[3].description);
vcd = offer->contents()[3].media_description()->as_video();
ASSERT_EQ(1u, vcd->streams().size());
EXPECT_EQ(kVideoTrack2, vcd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, vcd->direction());
@ -3144,30 +3095,26 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_EQ(4u, answer->contents().size());
EXPECT_FALSE(answer->contents()[0].rejected);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(
answer->contents()[0].description);
answer->contents()[0].media_description()->as_audio();
ASSERT_EQ(1u, acd->streams().size());
EXPECT_EQ(kAudioTrack1, acd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, acd->direction());
EXPECT_FALSE(answer->contents()[1].rejected);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(
answer->contents()[1].description);
answer->contents()[1].media_description()->as_video();
ASSERT_EQ(1u, vcd->streams().size());
EXPECT_EQ(kVideoTrack1, vcd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, vcd->direction());
EXPECT_FALSE(answer->contents()[2].rejected);
acd = static_cast<const AudioContentDescription*>(
answer->contents()[2].description);
acd = answer->contents()[2].media_description()->as_audio();
ASSERT_EQ(1u, acd->streams().size());
EXPECT_EQ(kAudioTrack2, acd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, acd->direction());
EXPECT_FALSE(answer->contents()[3].rejected);
vcd = static_cast<const VideoContentDescription*>(
answer->contents()[3].description);
vcd = answer->contents()[3].media_description()->as_video();
ASSERT_EQ(1u, vcd->streams().size());
EXPECT_EQ(kVideoTrack2, vcd->streams()[0].id);
EXPECT_EQ(RtpTransceiverDirection::kSendRecv, vcd->direction());
@ -3284,11 +3231,9 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_TRUE(offer);
ASSERT_EQ(2u, offer->contents().size());
const VideoContentDescription* vcd1 =
static_cast<const VideoContentDescription*>(
offer->contents()[0].description);
offer->contents()[0].media_description()->as_video();
const VideoContentDescription* vcd2 =
static_cast<const VideoContentDescription*>(
offer->contents()[1].description);
offer->contents()[1].media_description()->as_video();
EXPECT_EQ(vcd1->codecs().size(), vcd2->codecs().size());
ASSERT_EQ(2u, vcd1->codecs().size());
EXPECT_EQ(vcd1->codecs()[0].name, vcd2->codecs()[0].name);
@ -3301,10 +3246,8 @@ TEST_F(MediaSessionDescriptionFactoryTest,
f2_.CreateAnswer(offer.get(), opts, nullptr));
ASSERT_TRUE(answer);
ASSERT_EQ(2u, answer->contents().size());
vcd1 = static_cast<const VideoContentDescription*>(
answer->contents()[0].description);
vcd2 = static_cast<const VideoContentDescription*>(
answer->contents()[1].description);
vcd1 = answer->contents()[0].media_description()->as_video();
vcd2 = answer->contents()[1].media_description()->as_video();
EXPECT_EQ(vcd1->codecs().size(), vcd2->codecs().size());
ASSERT_EQ(1u, vcd1->codecs().size());
EXPECT_EQ(vcd1->codecs()[0].name, vcd2->codecs()[0].name);
@ -3325,10 +3268,9 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_TRUE(offer);
ASSERT_EQ(2u, offer->contents().size());
VideoContentDescription* vcd1 =
static_cast<VideoContentDescription*>(offer->contents()[0].description);
offer->contents()[0].media_description()->as_video();
const VideoContentDescription* vcd2 =
static_cast<const VideoContentDescription*>(
offer->contents()[1].description);
offer->contents()[1].media_description()->as_video();
auto video_codecs = MAKE_VECTOR(kVideoCodecs1);
EXPECT_EQ(video_codecs, vcd1->codecs());
EXPECT_EQ(video_codecs, vcd2->codecs());
@ -3339,10 +3281,8 @@ TEST_F(MediaSessionDescriptionFactoryTest,
vcd1->set_codecs(video_codecs_reverse);
std::unique_ptr<SessionDescription> updated_offer(
f1_.CreateOffer(opts, offer.get()));
vcd1 = static_cast<VideoContentDescription*>(
updated_offer->contents()[0].description);
vcd2 = static_cast<const VideoContentDescription*>(
updated_offer->contents()[1].description);
vcd1 = updated_offer->contents()[0].media_description()->as_video();
vcd2 = updated_offer->contents()[1].media_description()->as_video();
// The video codec preference order should be respected.
EXPECT_EQ(video_codecs_reverse, vcd1->codecs());
EXPECT_EQ(video_codecs, vcd2->codecs());
@ -3362,10 +3302,9 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_TRUE(offer);
ASSERT_EQ(2u, offer->contents().size());
VideoContentDescription* vcd1 =
static_cast<VideoContentDescription*>(offer->contents()[0].description);
offer->contents()[0].media_description()->as_video();
const VideoContentDescription* vcd2 =
static_cast<const VideoContentDescription*>(
offer->contents()[1].description);
offer->contents()[1].media_description()->as_video();
auto video_codecs = MAKE_VECTOR(kVideoCodecs1);
EXPECT_EQ(video_codecs, vcd1->codecs());
EXPECT_EQ(video_codecs, vcd2->codecs());
@ -3376,10 +3315,8 @@ TEST_F(MediaSessionDescriptionFactoryTest,
vcd1->set_codecs(video_codecs_reverse);
std::unique_ptr<SessionDescription> answer(
f1_.CreateAnswer(offer.get(), opts, nullptr));
vcd1 =
static_cast<VideoContentDescription*>(answer->contents()[0].description);
vcd2 = static_cast<const VideoContentDescription*>(
answer->contents()[1].description);
vcd1 = answer->contents()[0].media_description()->as_video();
vcd2 = answer->contents()[1].media_description()->as_video();
// The video codec preference order should be respected.
EXPECT_EQ(video_codecs_reverse, vcd1->codecs());
EXPECT_EQ(video_codecs, vcd2->codecs());
@ -3419,10 +3356,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerWithLocalCodecParams) {
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, nullptr));
ASSERT_TRUE(offer);
auto offer_acd =
static_cast<AudioContentDescription*>(offer->contents()[0].description);
auto offer_vcd =
static_cast<VideoContentDescription*>(offer->contents()[1].description);
auto offer_acd = offer->contents()[0].media_description()->as_audio();
auto offer_vcd = offer->contents()[1].media_description()->as_video();
std::string value;
EXPECT_TRUE(offer_acd->codecs()[0].GetParam(audio_param_name, &value));
EXPECT_EQ(audio_value1, value);
@ -3432,10 +3367,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerWithLocalCodecParams) {
std::unique_ptr<SessionDescription> answer(
f2_.CreateAnswer(offer.get(), opts, nullptr));
ASSERT_TRUE(answer);
auto answer_acd =
static_cast<AudioContentDescription*>(answer->contents()[0].description);
auto answer_vcd =
static_cast<VideoContentDescription*>(answer->contents()[1].description);
auto answer_acd = answer->contents()[0].media_description()->as_audio();
auto answer_vcd = answer->contents()[1].media_description()->as_video();
// Use the parameters from the local codecs.
EXPECT_TRUE(answer_acd->codecs()[0].GetParam(audio_param_name, &value));
EXPECT_EQ(audio_value2, value);
@ -3478,8 +3411,7 @@ TEST_P(MediaProtocolTest, TestAudioVideoAcceptance) {
ASSERT_TRUE(offer.get() != nullptr);
// Set the protocol for all the contents.
for (auto content : offer.get()->contents()) {
static_cast<MediaContentDescription*>(content.description)
->set_protocol(GetParam());
content.media_description()->set_protocol(GetParam());
}
std::unique_ptr<SessionDescription> answer(
f2_.CreateAnswer(offer.get(), opts, nullptr));
@ -3489,10 +3421,8 @@ TEST_P(MediaProtocolTest, TestAudioVideoAcceptance) {
ASSERT_TRUE(vc != nullptr);
EXPECT_FALSE(ac->rejected); // the offer is accepted
EXPECT_FALSE(vc->rejected);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(GetParam(), acd->protocol());
EXPECT_EQ(GetParam(), vcd->protocol());
}
@ -3593,15 +3523,14 @@ void TestAudioCodecsOffer(RtpTransceiverDirection direction) {
std::unique_ptr<SessionDescription> offer(sf.CreateOffer(opts, NULL));
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* ac = offer->GetContentByName("audio");
ContentInfo* ac = offer->GetContentByName("audio");
// If the factory didn't add any audio content to the offer, we cannot check
// that the codecs put in are right. This happens when we neither want to
// send nor receive audio. The checks are still in place if at some point
// we'd instead create an inactive stream.
if (ac) {
AudioContentDescription* acd =
static_cast<AudioContentDescription*>(ac->description);
AudioContentDescription* acd = ac->media_description()->as_audio();
// sendrecv and inactive should both present lists as if the channel was
// to be used for sending and receiving. Inactive essentially means it
// might eventually be used anything, but we don't know more at this
@ -3711,9 +3640,8 @@ void TestAudioCodecsAnswer(RtpTransceiverDirection offer_direction,
// to send nor receive audio. The checks are still in place if at some point
// we'd instead create an inactive stream.
if (ac) {
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
ASSERT_EQ(MEDIA_TYPE_AUDIO, ac->media_description()->type());
const AudioContentDescription* acd = ac->media_description()->as_audio();
std::vector<AudioCodec> target_codecs;
// For offers with sendrecv or inactive, we should never reply with more

View File

@ -334,11 +334,9 @@ bool MediaSectionsInSameOrder(const SessionDescription* existing_desc,
return false;
}
const MediaContentDescription* new_desc_mdesc =
static_cast<const MediaContentDescription*>(
new_desc->contents()[i].description);
new_desc->contents()[i].media_description();
const MediaContentDescription* existing_desc_mdesc =
static_cast<const MediaContentDescription*>(
existing_desc->contents()[i].description);
existing_desc->contents()[i].media_description();
if (new_desc_mdesc->type() != existing_desc_mdesc->type()) {
return false;
}
@ -378,8 +376,7 @@ RTCError VerifyCrypto(const SessionDescription* desc, bool dtls_enabled) {
// If the content isn't rejected or bundled into another m= section, crypto
// must be present.
const MediaContentDescription* media =
static_cast<const MediaContentDescription*>(content_info.description);
const MediaContentDescription* media = content_info.media_description();
const TransportInfo* tinfo = desc->GetTransportInfoByName(mid);
if (!media || !tinfo) {
// Something is not right.
@ -445,30 +442,20 @@ bool GetTrackIdBySsrc(const SessionDescription* session_description,
std::string* track_id) {
RTC_DCHECK(track_id != NULL);
const cricket::ContentInfo* audio_info =
cricket::GetFirstAudioContent(session_description);
if (audio_info) {
const cricket::MediaContentDescription* audio_content =
static_cast<const cricket::MediaContentDescription*>(
audio_info->description);
const auto* found =
cricket::GetStreamBySsrc(audio_content->streams(), ssrc);
const cricket::AudioContentDescription* audio_desc =
cricket::GetFirstAudioContentDescription(session_description);
if (audio_desc) {
const auto* found = cricket::GetStreamBySsrc(audio_desc->streams(), ssrc);
if (found) {
*track_id = found->id;
return true;
}
}
const cricket::ContentInfo* video_info =
cricket::GetFirstVideoContent(session_description);
if (video_info) {
const cricket::MediaContentDescription* video_content =
static_cast<const cricket::MediaContentDescription*>(
video_info->description);
const auto* found =
cricket::GetStreamBySsrc(video_content->streams(), ssrc);
const cricket::VideoContentDescription* video_desc =
cricket::GetFirstVideoContentDescription(session_description);
if (video_desc) {
const auto* found = cricket::GetStreamBySsrc(video_desc->streams(), ssrc);
if (found) {
*track_id = found->id;
return true;
@ -480,18 +467,16 @@ bool GetTrackIdBySsrc(const SessionDescription* session_description,
// Get the SCTP port out of a SessionDescription.
// Return -1 if not found.
int GetSctpPort(const SessionDescription* session_description) {
const ContentInfo* content_info = GetFirstDataContent(session_description);
RTC_DCHECK(content_info);
if (!content_info) {
const cricket::DataContentDescription* data_desc =
GetFirstDataContentDescription(session_description);
RTC_DCHECK(data_desc);
if (!data_desc) {
return -1;
}
const cricket::DataContentDescription* data =
static_cast<const cricket::DataContentDescription*>(
(content_info->description));
std::string value;
cricket::DataCodec match_pattern(cricket::kGoogleSctpDataCodecPlType,
cricket::kGoogleSctpDataCodecName);
for (const cricket::DataCodec& codec : data->codecs()) {
for (const cricket::DataCodec& codec : data_desc->codecs()) {
if (!codec.Matches(match_pattern)) {
continue;
}
@ -1706,8 +1691,7 @@ RTCError PeerConnection::ApplyLocalDescription(
RemoveSenders(cricket::MEDIA_TYPE_AUDIO);
} else {
const cricket::AudioContentDescription* audio_desc =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
audio_content->media_description()->as_audio();
UpdateLocalSenders(audio_desc->streams(), audio_desc->type());
}
}
@ -1719,8 +1703,7 @@ RTCError PeerConnection::ApplyLocalDescription(
RemoveSenders(cricket::MEDIA_TYPE_VIDEO);
} else {
const cricket::VideoContentDescription* video_desc =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
video_content->media_description()->as_video();
UpdateLocalSenders(video_desc->streams(), video_desc->type());
}
}
@ -1729,8 +1712,7 @@ RTCError PeerConnection::ApplyLocalDescription(
GetFirstDataContent(local_description()->description());
if (data_content) {
const cricket::DataContentDescription* data_desc =
static_cast<const cricket::DataContentDescription*>(
data_content->description);
data_content->media_description()->as_data();
if (rtc::starts_with(data_desc->protocol().data(),
cricket::kMediaProtocolRtpPrefix)) {
UpdateLocalRtpDataChannels(data_desc->streams());
@ -3712,7 +3694,7 @@ RTCError PeerConnection::PushdownMediaDescription(
continue;
}
const MediaContentDescription* content_desc =
static_cast<const MediaContentDescription*>(content_info->description);
content_info->media_description();
if (!content_desc) {
continue;
}
@ -3732,8 +3714,7 @@ RTCError PeerConnection::PushdownMediaDescription(
cricket::GetFirstDataContent(sdesc->description());
if (data_content && !data_content->rejected) {
const MediaContentDescription* data_desc =
static_cast<const MediaContentDescription*>(
data_content->description);
data_content->media_description();
if (data_desc) {
std::string error;
bool success =
@ -4679,9 +4660,7 @@ bool PeerConnection::ValidateBundleSettings(const SessionDescription* desc) {
}
bool PeerConnection::HasRtcpMuxEnabled(const cricket::ContentInfo* content) {
const cricket::MediaContentDescription* description =
static_cast<cricket::MediaContentDescription*>(content->description);
return description->rtcp_mux();
return content->media_description()->rtcp_mux();
}
RTCError PeerConnection::ValidateSessionDescription(

View File

@ -55,9 +55,7 @@ class PeerConnectionWrapperForBundleTest : public PeerConnectionWrapper {
auto* desc = pc()->remote_description()->description();
for (size_t i = 0; i < desc->contents().size(); i++) {
const auto& content = desc->contents()[i];
auto* media_desc =
static_cast<cricket::MediaContentDescription*>(content.description);
if (media_desc->type() == media_type) {
if (content.media_description()->type() == media_type) {
candidate->set_transport_name(content.name);
JsepIceCandidate jsep_candidate(content.name, i, *candidate);
return pc()->AddIceCandidate(&jsep_candidate);
@ -220,9 +218,7 @@ class PeerConnectionBundleTest : public ::testing::Test {
SdpContentMutator RemoveRtcpMux() {
return [](cricket::ContentInfo* content, cricket::TransportInfo* transport) {
auto* media_desc =
static_cast<cricket::MediaContentDescription*>(content->description);
media_desc->set_rtcp_mux(false);
content->media_description()->set_rtcp_mux(false);
};
}

View File

@ -119,33 +119,25 @@ SdpContentPredicate HaveDtlsFingerprint() {
SdpContentPredicate HaveSdesCryptos() {
return [](const cricket::ContentInfo* content,
const cricket::TransportInfo* transport) {
const auto* media_desc =
static_cast<const cricket::MediaContentDescription*>(
content->description);
return !media_desc->cryptos().empty();
return !content->media_description()->cryptos().empty();
};
}
SdpContentPredicate HaveProtocol(const std::string& protocol) {
return [protocol](const cricket::ContentInfo* content,
const cricket::TransportInfo* transport) {
const auto* media_desc =
static_cast<const cricket::MediaContentDescription*>(
content->description);
return media_desc->protocol() == protocol;
return content->media_description()->protocol() == protocol;
};
}
SdpContentPredicate HaveSdesGcmCryptos(size_t num_crypto_suites) {
return [num_crypto_suites](const cricket::ContentInfo* content,
const cricket::TransportInfo* transport) {
const auto* media_desc =
static_cast<const cricket::MediaContentDescription*>(
content->description);
if (media_desc->cryptos().size() != num_crypto_suites) {
const auto& cryptos = content->media_description()->cryptos();
if (cryptos.size() != num_crypto_suites) {
return false;
}
const cricket::CryptoParams first_params = media_desc->cryptos()[0];
const cricket::CryptoParams first_params = cryptos[0];
return first_params.key_params.size() == 67U &&
first_params.cipher_suite == "AEAD_AES_256_GCM";
};
@ -153,9 +145,7 @@ SdpContentPredicate HaveSdesGcmCryptos(size_t num_crypto_suites) {
SdpContentMutator RemoveSdesCryptos() {
return [](cricket::ContentInfo* content, cricket::TransportInfo* transport) {
auto* media_desc =
static_cast<cricket::MediaContentDescription*>(content->description);
media_desc->set_cryptos({});
content->media_description()->set_cryptos({});
};
}

View File

@ -147,8 +147,7 @@ class PeerConnectionDataChannelTest : public ::testing::Test {
auto* data_content = cricket::GetFirstDataContent(desc);
RTC_DCHECK(data_content);
auto* data_desc = static_cast<cricket::DataContentDescription*>(
data_content->description);
auto* data_desc = data_content->media_description()->as_data();
data_desc->set_codecs({sctp_codec});
}

View File

@ -118,9 +118,7 @@ PeerConnectionInterface::RTCOfferAnswerOptions IceRestartOfferAnswerOptions() {
// attribute from received SDP, simulating a legacy endpoint.
void RemoveSsrcsAndMsids(cricket::SessionDescription* desc) {
for (ContentInfo& content : desc->contents()) {
MediaContentDescription* media_desc =
static_cast<MediaContentDescription*>(content.description);
media_desc->mutable_streams().clear();
content.media_description()->mutable_streams().clear();
}
desc->set_msid_supported(false);
}
@ -2110,9 +2108,8 @@ TEST_F(PeerConnectionIntegrationTest,
// Helper for test below.
void ModifySsrcs(cricket::SessionDescription* desc) {
for (ContentInfo& content : desc->contents()) {
MediaContentDescription* media_desc =
static_cast<MediaContentDescription*>(content.description);
for (cricket::StreamParams& stream : media_desc->mutable_streams()) {
for (cricket::StreamParams& stream :
content.media_description()->mutable_streams()) {
for (uint32_t& ssrc : stream.ssrcs) {
ssrc = rtc::CreateRandomId();
}
@ -2735,10 +2732,9 @@ TEST_F(PeerConnectionIntegrationTest, SctpDataChannelToAudioVideoUpgrade) {
}
static void MakeSpecCompliantSctpOffer(cricket::SessionDescription* desc) {
const ContentInfo* dc_offer = GetFirstDataContent(desc);
ASSERT_NE(nullptr, dc_offer);
cricket::DataContentDescription* dcd_offer =
static_cast<cricket::DataContentDescription*>(dc_offer->description);
GetFirstDataContentDescription(desc);
ASSERT_TRUE(dcd_offer);
dcd_offer->set_use_sctpmap(false);
dcd_offer->set_protocol("UDP/DTLS/SCTP");
}

View File

@ -633,7 +633,7 @@ void ChangeMediaTypeAudioToVideo(cricket::SessionDescription* desc) {
desc->RemoveContentByName(cricket::CN_AUDIO);
auto* video_content = desc->GetContentByName(cricket::CN_VIDEO);
desc->AddContent(cricket::CN_AUDIO, video_content->type,
video_content->description->Copy());
video_content->media_description()->Copy());
}
constexpr char kMLinesOutOfOrder[] =

View File

@ -370,8 +370,7 @@ bool GetFirstSsrc(const cricket::ContentInfo* content_info, int* ssrc) {
return false;
}
const cricket::MediaContentDescription* media_desc =
static_cast<const cricket::MediaContentDescription*>(
content_info->description);
content_info->media_description();
if (!media_desc || media_desc->streams().empty()) {
return false;
}
@ -1064,11 +1063,8 @@ class PeerConnectionInterfaceTest : public testing::Test {
const std::string& GetFirstAudioStreamCname(
const SessionDescriptionInterface* desc) {
const cricket::ContentInfo* audio_content =
cricket::GetFirstAudioContent(desc->description());
const cricket::AudioContentDescription* audio_desc =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
cricket::GetFirstAudioContentDescription(desc->description());
return audio_desc->streams()[0].cname;
}
@ -1110,14 +1106,13 @@ class PeerConnectionInterfaceTest : public testing::Test {
}
bool HasCNCodecs(const cricket::ContentInfo* content) {
const cricket::ContentDescription* description = content->description;
RTC_DCHECK(description);
const cricket::AudioContentDescription* audio_content_desc =
static_cast<const cricket::AudioContentDescription*>(description);
RTC_DCHECK(audio_content_desc);
for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
if (audio_content_desc->codecs()[i].name == "CN")
RTC_DCHECK(content);
RTC_DCHECK(content->media_description());
for (const cricket::AudioCodec& codec :
content->media_description()->as_audio()->codecs()) {
if (codec.name == "CN") {
return true;
}
}
return false;
}
@ -1363,19 +1358,13 @@ TEST_F(PeerConnectionInterfaceTest, AddedStreamsPresentInOffer) {
std::unique_ptr<SessionDescriptionInterface> offer;
ASSERT_TRUE(DoCreateOffer(&offer, nullptr));
const cricket::ContentInfo* audio_content =
cricket::GetFirstAudioContent(offer->description());
const cricket::AudioContentDescription* audio_desc =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
cricket::GetFirstAudioContentDescription(offer->description());
EXPECT_TRUE(
ContainsTrack(audio_desc->streams(), kStreamLabel1, "audio_track"));
const cricket::ContentInfo* video_content =
cricket::GetFirstVideoContent(offer->description());
const cricket::VideoContentDescription* video_desc =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
cricket::GetFirstVideoContentDescription(offer->description());
EXPECT_TRUE(
ContainsTrack(video_desc->streams(), kStreamLabel1, "video_track"));
@ -1384,17 +1373,13 @@ TEST_F(PeerConnectionInterfaceTest, AddedStreamsPresentInOffer) {
AddAudioVideoStream(kStreamLabel2, "audio_track2", "video_track2");
ASSERT_TRUE(DoCreateOffer(&offer, nullptr));
audio_content = cricket::GetFirstAudioContent(offer->description());
audio_desc = static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
audio_desc = cricket::GetFirstAudioContentDescription(offer->description());
EXPECT_TRUE(
ContainsTrack(audio_desc->streams(), kStreamLabel1, "audio_track"));
EXPECT_TRUE(
ContainsTrack(audio_desc->streams(), kStreamLabel2, "audio_track2"));
video_content = cricket::GetFirstVideoContent(offer->description());
video_desc = static_cast<const cricket::VideoContentDescription*>(
video_content->description);
video_desc = cricket::GetFirstVideoContentDescription(offer->description());
EXPECT_TRUE(
ContainsTrack(video_desc->streams(), kStreamLabel1, "video_track"));
EXPECT_TRUE(
@ -1444,19 +1429,13 @@ TEST_F(PeerConnectionInterfaceTest, AddTrackRemoveTrack) {
const cricket::ContentInfo* audio_content =
cricket::GetFirstAudioContent(offer->description());
const cricket::AudioContentDescription* audio_desc =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
EXPECT_TRUE(
ContainsTrack(audio_desc->streams(), kStreamLabel1, "audio_track"));
EXPECT_TRUE(ContainsTrack(audio_content->media_description()->streams(),
kStreamLabel1, "audio_track"));
const cricket::ContentInfo* video_content =
cricket::GetFirstVideoContent(offer->description());
const cricket::VideoContentDescription* video_desc =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
EXPECT_TRUE(
ContainsTrack(video_desc->streams(), kStreamLabel1, "video_track"));
EXPECT_TRUE(ContainsTrack(video_content->media_description()->streams(),
kStreamLabel1, "video_track"));
EXPECT_TRUE(DoSetLocalDescription(std::move(offer)));
@ -1468,16 +1447,12 @@ TEST_F(PeerConnectionInterfaceTest, AddTrackRemoveTrack) {
ASSERT_TRUE(DoCreateOffer(&offer, nullptr));
audio_content = cricket::GetFirstAudioContent(offer->description());
audio_desc = static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
EXPECT_FALSE(
ContainsTrack(audio_desc->streams(), kStreamLabel1, "audio_track"));
EXPECT_FALSE(ContainsTrack(audio_content->media_description()->streams(),
kStreamLabel1, "audio_track"));
video_content = cricket::GetFirstVideoContent(offer->description());
video_desc = static_cast<const cricket::VideoContentDescription*>(
video_content->description);
EXPECT_FALSE(
ContainsTrack(video_desc->streams(), kStreamLabel1, "video_track"));
EXPECT_FALSE(ContainsTrack(video_content->media_description()->streams(),
kStreamLabel1, "video_track"));
EXPECT_TRUE(DoSetLocalDescription(std::move(offer)));
@ -2190,17 +2165,13 @@ TEST_F(PeerConnectionInterfaceTest, CreateSubsequentRecvOnlyOffer) {
const cricket::ContentInfo* video_content =
cricket::GetFirstVideoContent(offer->description());
const cricket::VideoContentDescription* video_desc =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
ASSERT_EQ(RtpTransceiverDirection::kRecvOnly, video_desc->direction());
ASSERT_EQ(RtpTransceiverDirection::kRecvOnly,
video_content->media_description()->direction());
const cricket::ContentInfo* audio_content =
cricket::GetFirstAudioContent(offer->description());
const cricket::AudioContentDescription* audio_desc =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
ASSERT_EQ(RtpTransceiverDirection::kRecvOnly, audio_desc->direction());
ASSERT_EQ(RtpTransceiverDirection::kRecvOnly,
audio_content->media_description()->direction());
}
// Test that if we're receiving (but not sending) a track, and the
@ -2227,17 +2198,13 @@ TEST_F(PeerConnectionInterfaceTest, CreateSubsequentInactiveOffer) {
const cricket::ContentInfo* video_content =
cricket::GetFirstVideoContent(offer->description());
const cricket::VideoContentDescription* video_desc =
static_cast<const cricket::VideoContentDescription*>(
video_content->description);
ASSERT_EQ(RtpTransceiverDirection::kInactive, video_desc->direction());
ASSERT_EQ(RtpTransceiverDirection::kInactive,
video_content->media_description()->direction());
const cricket::ContentInfo* audio_content =
cricket::GetFirstAudioContent(offer->description());
const cricket::AudioContentDescription* audio_desc =
static_cast<const cricket::AudioContentDescription*>(
audio_content->description);
ASSERT_EQ(RtpTransceiverDirection::kInactive, audio_desc->direction());
ASSERT_EQ(RtpTransceiverDirection::kInactive,
audio_content->media_description()->direction());
}
// Test that we can use SetConfiguration to change the ICE servers of the

View File

@ -127,24 +127,24 @@ ContentInfo* SessionDescription::GetContentByName(const std::string& name) {
return FindContentInfoByName(&contents_, name);
}
const ContentDescription* SessionDescription::GetContentDescriptionByName(
const MediaContentDescription* SessionDescription::GetContentDescriptionByName(
const std::string& name) const {
const ContentInfo* cinfo = FindContentInfoByName(contents_, name);
if (cinfo == NULL) {
return NULL;
}
return cinfo->description;
return cinfo->media_description();
}
ContentDescription* SessionDescription::GetContentDescriptionByName(
MediaContentDescription* SessionDescription::GetContentDescriptionByName(
const std::string& name) {
ContentInfo* cinfo = FindContentInfoByName(&contents_, name);
if (cinfo == NULL) {
return NULL;
}
return cinfo->description;
return cinfo->media_description();
}
const ContentInfo* SessionDescription::FirstContentByType(
@ -158,7 +158,7 @@ const ContentInfo* SessionDescription::FirstContent() const {
void SessionDescription::AddContent(const std::string& name,
MediaProtocolType type,
ContentDescription* description) {
MediaContentDescription* description) {
ContentInfo content(type);
content.name = name;
content.description = description;
@ -168,7 +168,7 @@ void SessionDescription::AddContent(const std::string& name,
void SessionDescription::AddContent(const std::string& name,
MediaProtocolType type,
bool rejected,
ContentDescription* description) {
MediaContentDescription* description) {
ContentInfo content(type);
content.name = name;
content.rejected = rejected;
@ -180,7 +180,7 @@ void SessionDescription::AddContent(const std::string& name,
MediaProtocolType type,
bool rejected,
bool bundle_only,
ContentDescription* description) {
MediaContentDescription* description) {
ContentInfo content(type);
content.name = name;
content.rejected = rejected;

View File

@ -248,7 +248,7 @@ class AudioContentDescription : public MediaContentDescriptionImpl<AudioCodec> {
public:
AudioContentDescription() {}
virtual ContentDescription* Copy() const {
virtual AudioContentDescription* Copy() const {
return new AudioContentDescription(*this);
}
virtual MediaType type() const { return MEDIA_TYPE_AUDIO; }
@ -258,7 +258,7 @@ class AudioContentDescription : public MediaContentDescriptionImpl<AudioCodec> {
class VideoContentDescription : public MediaContentDescriptionImpl<VideoCodec> {
public:
virtual ContentDescription* Copy() const {
virtual VideoContentDescription* Copy() const {
return new VideoContentDescription(*this);
}
virtual MediaType type() const { return MEDIA_TYPE_VIDEO; }
@ -270,7 +270,7 @@ class DataContentDescription : public MediaContentDescriptionImpl<DataCodec> {
public:
DataContentDescription() {}
virtual ContentDescription* Copy() const {
virtual DataContentDescription* Copy() const {
return new DataContentDescription(*this);
}
virtual MediaType type() const { return MEDIA_TYPE_DATA; }
@ -300,6 +300,8 @@ constexpr MediaProtocolType NS_JINGLE_DRAFT_SCTP = MediaProtocolType::kSctp;
// Represents a session description section. Most information about the section
// is stored in the description, which is a subclass of MediaContentDescription.
struct ContentInfo {
friend class SessionDescription;
explicit ContentInfo(MediaProtocolType type) : type(type) {}
// Alias for |name|.
@ -320,7 +322,7 @@ struct ContentInfo {
MediaProtocolType type;
bool rejected = false;
bool bundle_only = false;
ContentDescription* description = nullptr;
MediaContentDescription* description = nullptr;
};
typedef std::vector<std::string> ContentNames;
@ -379,9 +381,9 @@ class SessionDescription {
ContentInfos& contents() { return contents_; }
const ContentInfo* GetContentByName(const std::string& name) const;
ContentInfo* GetContentByName(const std::string& name);
const ContentDescription* GetContentDescriptionByName(
const MediaContentDescription* GetContentDescriptionByName(
const std::string& name) const;
ContentDescription* GetContentDescriptionByName(const std::string& name);
MediaContentDescription* GetContentDescriptionByName(const std::string& name);
const ContentInfo* FirstContentByType(MediaProtocolType type) const;
const ContentInfo* FirstContent() const;
@ -389,16 +391,16 @@ class SessionDescription {
// Adds a content to this description. Takes ownership of ContentDescription*.
void AddContent(const std::string& name,
MediaProtocolType type,
ContentDescription* description);
MediaContentDescription* description);
void AddContent(const std::string& name,
MediaProtocolType type,
bool rejected,
ContentDescription* description);
MediaContentDescription* description);
void AddContent(const std::string& name,
MediaProtocolType type,
bool rejected,
bool bundle_only,
ContentDescription* description);
MediaContentDescription* description);
bool RemoveContentByName(const std::string& name);
// Transport accessors.
@ -444,8 +446,8 @@ class SessionDescription {
bool msid_supported_ = true;
};
// Indicates whether a ContentDescription was sent by the local client
// or received from the remote client.
// Indicates whether a session description was sent by the local client or
// received from the remote client.
enum ContentSource { CS_LOCAL, CS_REMOTE };
} // namespace cricket

View File

@ -44,7 +44,6 @@
using cricket::AudioContentDescription;
using cricket::Candidate;
using cricket::Candidates;
using cricket::ContentDescription;
using cricket::ContentInfo;
using cricket::CryptoParams;
using cricket::DataContentDescription;
@ -640,13 +639,9 @@ void CreateTracksFromSsrcInfos(const SsrcInfoVec& ssrc_infos,
void GetMediaStreamLabels(const ContentInfo* content,
std::set<std::string>* labels) {
const MediaContentDescription* media_desc =
static_cast<const MediaContentDescription*>(
content->description);
const cricket::StreamParamsVec& streams = media_desc->streams();
for (cricket::StreamParamsVec::const_iterator it = streams.begin();
it != streams.end(); ++it) {
labels->insert(it->sync_label);
for (const StreamParams& stream_params :
content->media_description()->streams()) {
labels->insert(stream_params.sync_label);
}
}
@ -830,8 +825,7 @@ std::string SdpSerialize(const JsepSessionDescription& jdesc,
int mline_index = -1;
for (cricket::ContentInfos::const_iterator it = desc->contents().begin();
it != desc->contents().end(); ++it) {
const MediaContentDescription* mdesc =
static_cast<const MediaContentDescription*>(it->description);
const MediaContentDescription* mdesc = it->media_description();
std::vector<Candidate> candidates;
GetCandidatesByMindex(jdesc, ++mline_index, &candidates);
BuildMediaDescription(&*it, desc->GetTransportInfoByName(it->name),
@ -1216,10 +1210,8 @@ void BuildMediaDescription(const ContentInfo* content_info,
// http://google-styleguide.googlecode.com/svn/
// trunk/cppguide.xml?showone=Streams#Streams
std::ostringstream os;
const MediaContentDescription* media_desc =
static_cast<const MediaContentDescription*>(
content_info->description);
RTC_DCHECK(media_desc != NULL);
const MediaContentDescription* media_desc = content_info->media_description();
RTC_DCHECK(media_desc);
int sctp_port = cricket::kSctpDefaultPort;
@ -1238,8 +1230,7 @@ void BuildMediaDescription(const ContentInfo* content_info,
std::string fmt;
if (media_type == cricket::MEDIA_TYPE_VIDEO) {
const VideoContentDescription* video_desc =
static_cast<const VideoContentDescription*>(media_desc);
const VideoContentDescription* video_desc = media_desc->as_video();
for (std::vector<cricket::VideoCodec>::const_iterator it =
video_desc->codecs().begin();
it != video_desc->codecs().end(); ++it) {
@ -1247,8 +1238,7 @@ void BuildMediaDescription(const ContentInfo* content_info,
fmt.append(rtc::ToString<int>(it->id));
}
} else if (media_type == cricket::MEDIA_TYPE_AUDIO) {
const AudioContentDescription* audio_desc =
static_cast<const AudioContentDescription*>(media_desc);
const AudioContentDescription* audio_desc = media_desc->as_audio();
for (std::vector<cricket::AudioCodec>::const_iterator it =
audio_desc->codecs().begin();
it != audio_desc->codecs().end(); ++it) {
@ -1256,8 +1246,7 @@ void BuildMediaDescription(const ContentInfo* content_info,
fmt.append(rtc::ToString<int>(it->id));
}
} else if (media_type == cricket::MEDIA_TYPE_DATA) {
const DataContentDescription* data_desc =
static_cast<const DataContentDescription*>(media_desc);
const DataContentDescription* data_desc = media_desc->as_data();
if (IsDtlsSctp(media_desc->protocol())) {
fmt.append(" ");
@ -1412,8 +1401,7 @@ void BuildMediaDescription(const ContentInfo* content_info,
AddLine(os.str(), message);
if (IsDtlsSctp(media_desc->protocol())) {
const DataContentDescription* data_desc =
static_cast<const DataContentDescription*>(media_desc);
const DataContentDescription* data_desc = media_desc->as_data();
bool use_sctpmap = data_desc->use_sctpmap();
BuildSctpContentAttributes(message, sctp_port, use_sctpmap);
} else if (IsRtp(media_desc->protocol())) {
@ -1735,8 +1723,7 @@ void BuildRtpMap(const MediaContentDescription* media_desc,
RTC_DCHECK(media_desc != NULL);
std::ostringstream os;
if (media_type == cricket::MEDIA_TYPE_VIDEO) {
const VideoContentDescription* video_desc =
static_cast<const VideoContentDescription*>(media_desc);
const VideoContentDescription* video_desc = media_desc->as_video();
for (std::vector<cricket::VideoCodec>::const_iterator it =
video_desc->codecs().begin();
it != video_desc->codecs().end(); ++it) {
@ -1753,8 +1740,7 @@ void BuildRtpMap(const MediaContentDescription* media_desc,
AddFmtpLine(*it, message);
}
} else if (media_type == cricket::MEDIA_TYPE_AUDIO) {
const AudioContentDescription* audio_desc =
static_cast<const AudioContentDescription*>(media_desc);
const AudioContentDescription* audio_desc = media_desc->as_audio();
std::vector<int> ptimes;
std::vector<int> maxptimes;
int max_minptime = 0;
@ -1803,8 +1789,7 @@ void BuildRtpMap(const MediaContentDescription* media_desc,
AddAttributeLine(kCodecParamPTime, ptime, message);
}
} else if (media_type == cricket::MEDIA_TYPE_DATA) {
const DataContentDescription* data_desc =
static_cast<const DataContentDescription*>(media_desc);
const DataContentDescription* data_desc = media_desc->as_data();
for (std::vector<cricket::DataCodec>::const_iterator it =
data_desc->codecs().begin();
it != data_desc->codecs().end(); ++it) {
@ -2689,8 +2674,7 @@ bool ParseContent(const std::string& message,
RTC_DCHECK(transport != NULL);
if (media_type == cricket::MEDIA_TYPE_AUDIO) {
MaybeCreateStaticPayloadAudioCodecs(
payload_types, static_cast<AudioContentDescription*>(media_desc));
MaybeCreateStaticPayloadAudioCodecs(payload_types, media_desc->as_audio());
}
// The media level "ice-ufrag" and "ice-pwd".
@ -2839,8 +2823,7 @@ bool ParseContent(const std::string& message,
if (!ParseSctpPort(line, &sctp_port, error)) {
return false;
}
if (!AddSctpDataCodec(static_cast<DataContentDescription*>(media_desc),
sctp_port)) {
if (!AddSctpDataCodec(media_desc->as_data(), sctp_port)) {
return false;
}
} else if (IsRtp(protocol)) {
@ -2943,8 +2926,7 @@ bool ParseContent(const std::string& message,
}
if (media_type == cricket::MEDIA_TYPE_AUDIO) {
AudioContentDescription* audio_desc =
static_cast<AudioContentDescription*>(media_desc);
AudioContentDescription* audio_desc = media_desc->as_audio();
UpdateFromWildcardCodecs(audio_desc);
// Verify audio codec ensures that no audio codec has been populated with
@ -2957,8 +2939,7 @@ bool ParseContent(const std::string& message,
}
if (media_type == cricket::MEDIA_TYPE_VIDEO) {
VideoContentDescription* video_desc =
static_cast<VideoContentDescription*>(media_desc);
VideoContentDescription* video_desc = media_desc->as_video();
UpdateFromWildcardCodecs(video_desc);
// Verify video codec ensures that no video codec has been populated with
// only rtcp-fb.
@ -3201,8 +3182,7 @@ bool ParseRtpmapAttribute(const std::string& line,
return false;
}
if (media_type == cricket::MEDIA_TYPE_VIDEO) {
VideoContentDescription* video_desc =
static_cast<VideoContentDescription*>(media_desc);
VideoContentDescription* video_desc = media_desc->as_video();
UpdateCodec(payload_type, encoding_name,
video_desc);
} else if (media_type == cricket::MEDIA_TYPE_AUDIO) {
@ -3217,13 +3197,11 @@ bool ParseRtpmapAttribute(const std::string& line,
return false;
}
}
AudioContentDescription* audio_desc =
static_cast<AudioContentDescription*>(media_desc);
AudioContentDescription* audio_desc = media_desc->as_audio();
UpdateCodec(payload_type, encoding_name, clock_rate, 0, channels,
audio_desc);
} else if (media_type == cricket::MEDIA_TYPE_DATA) {
DataContentDescription* data_desc =
static_cast<DataContentDescription*>(media_desc);
DataContentDescription* data_desc = media_desc->as_data();
data_desc->AddCodec(cricket::DataCodec(payload_type, encoding_name));
}
return true;

View File

@ -1049,8 +1049,8 @@ class WebRtcSdpTest : public testing::Test {
// Turns the existing reference description into a plan B description,
// with 2 audio tracks and 3 video tracks.
void MakePlanBDescription() {
audio_desc_ = static_cast<AudioContentDescription*>(audio_desc_->Copy());
video_desc_ = static_cast<VideoContentDescription*>(video_desc_->Copy());
audio_desc_ = audio_desc_->Copy();
video_desc_ = video_desc_->Copy();
StreamParams audio_track_2;
audio_track_2.id = kAudioTrackId2;
@ -1244,27 +1244,25 @@ class WebRtcSdpTest : public testing::Test {
ASSERT_EQ(IsAudioContent(&c1), IsAudioContent(&c2));
if (IsAudioContent(&c1)) {
const AudioContentDescription* acd1 =
static_cast<const AudioContentDescription*>(c1.description);
c1.media_description()->as_audio();
const AudioContentDescription* acd2 =
static_cast<const AudioContentDescription*>(c2.description);
c2.media_description()->as_audio();
CompareMediaContentDescription<AudioContentDescription>(acd1, acd2);
}
ASSERT_EQ(IsVideoContent(&c1), IsVideoContent(&c2));
if (IsVideoContent(&c1)) {
const VideoContentDescription* vcd1 =
static_cast<const VideoContentDescription*>(c1.description);
c1.media_description()->as_video();
const VideoContentDescription* vcd2 =
static_cast<const VideoContentDescription*>(c2.description);
c2.media_description()->as_video();
CompareMediaContentDescription<VideoContentDescription>(vcd1, vcd2);
}
ASSERT_EQ(IsDataContent(&c1), IsDataContent(&c2));
if (IsDataContent(&c1)) {
const DataContentDescription* dcd1 =
static_cast<const DataContentDescription*>(c1.description);
const DataContentDescription* dcd2 =
static_cast<const DataContentDescription*>(c2.description);
const DataContentDescription* dcd1 = c1.media_description()->as_data();
const DataContentDescription* dcd2 = c2.media_description()->as_data();
CompareDataContentDescription(dcd1, dcd2);
}
}
@ -1438,10 +1436,8 @@ class WebRtcSdpTest : public testing::Test {
}
void AddExtmap(bool encrypted) {
audio_desc_ = static_cast<AudioContentDescription*>(
audio_desc_->Copy());
video_desc_ = static_cast<VideoContentDescription*>(
video_desc_->Copy());
audio_desc_ = audio_desc_->Copy();
video_desc_ = video_desc_->Copy();
audio_desc_->AddRtpHeaderExtension(
RtpExtension(kExtmapUri, kExtmapId, encrypted));
video_desc_->AddRtpHeaderExtension(
@ -1474,10 +1470,8 @@ class WebRtcSdpTest : public testing::Test {
}
bool TestSerializeRejected(bool audio_rejected, bool video_rejected) {
audio_desc_ = static_cast<AudioContentDescription*>(
audio_desc_->Copy());
video_desc_ = static_cast<VideoContentDescription*>(
video_desc_->Copy());
audio_desc_ = audio_desc_->Copy();
video_desc_ = video_desc_->Copy();
desc_.RemoveContentByName(kAudioContentName);
desc_.RemoveContentByName(kVideoContentName);
@ -1559,10 +1553,8 @@ class WebRtcSdpTest : public testing::Test {
JsepSessionDescription new_jdesc(SdpType::kOffer);
EXPECT_TRUE(SdpDeserialize(new_sdp, &new_jdesc));
audio_desc_ = static_cast<AudioContentDescription*>(
audio_desc_->Copy());
video_desc_ = static_cast<VideoContentDescription*>(
video_desc_->Copy());
audio_desc_ = audio_desc_->Copy();
video_desc_ = video_desc_->Copy();
desc_.RemoveContentByName(kAudioContentName);
desc_.RemoveContentByName(kVideoContentName);
desc_.AddContent(kAudioContentName, MediaProtocolType::kRtp, audio_rejected,
@ -1669,10 +1661,9 @@ class WebRtcSdpTest : public testing::Test {
SdpParseError error;
EXPECT_TRUE(webrtc::SdpDeserialize(sdp, jdesc_output, &error));
const ContentInfo* ac = GetFirstAudioContent(jdesc_output->description());
ASSERT_TRUE(ac != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
GetFirstAudioContentDescription(jdesc_output->description());
ASSERT_TRUE(acd);
ASSERT_FALSE(acd->codecs().empty());
cricket::AudioCodec opus = acd->codecs()[0];
EXPECT_EQ("opus", opus.name);
@ -1689,10 +1680,9 @@ class WebRtcSdpTest : public testing::Test {
VerifyCodecParameter(codec.params, "maxptime", params.max_ptime);
}
const ContentInfo* vc = GetFirstVideoContent(jdesc_output->description());
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
GetFirstVideoContentDescription(jdesc_output->description());
ASSERT_TRUE(vcd);
ASSERT_FALSE(vcd->codecs().empty());
cricket::VideoCodec vp8 = vcd->codecs()[0];
EXPECT_EQ("VP8", vp8.name);
@ -1731,10 +1721,9 @@ class WebRtcSdpTest : public testing::Test {
// Deserialize
SdpParseError error;
EXPECT_TRUE(webrtc::SdpDeserialize(sdp, jdesc_output, &error));
const ContentInfo* ac = GetFirstAudioContent(jdesc_output->description());
ASSERT_TRUE(ac != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
GetFirstAudioContentDescription(jdesc_output->description());
ASSERT_TRUE(acd);
ASSERT_FALSE(acd->codecs().empty());
cricket::AudioCodec opus = acd->codecs()[0];
EXPECT_EQ(111, opus.id);
@ -1742,10 +1731,9 @@ class WebRtcSdpTest : public testing::Test {
cricket::FeedbackParam(cricket::kRtcpFbParamNack,
cricket::kParamValueEmpty)));
const ContentInfo* vc = GetFirstVideoContent(jdesc_output->description());
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
GetFirstVideoContentDescription(jdesc_output->description());
ASSERT_TRUE(vcd);
ASSERT_FALSE(vcd->codecs().empty());
cricket::VideoCodec vp8 = vcd->codecs()[0];
EXPECT_STREQ(webrtc::JsepSessionDescription::kDefaultVideoCodecName,
@ -1889,11 +1877,9 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBundle) {
}
TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBandwidth) {
VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
GetFirstVideoContent(&desc_)->description);
VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_);
vcd->set_bandwidth(100 * 1000);
AudioContentDescription* acd = static_cast<AudioContentDescription*>(
GetFirstAudioContent(&desc_)->description);
AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_);
acd->set_bandwidth(50 * 1000);
ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
jdesc_.session_id(),
@ -1986,8 +1972,10 @@ TEST_F(WebRtcSdpTest, SerializeWithSctpDataChannelAndNewPort) {
AddSctpDataChannel(use_sctpmap);
JsepSessionDescription jsep_desc(kDummyType);
MakeDescriptionWithoutCandidates(&jsep_desc);
DataContentDescription* dcdesc = static_cast<DataContentDescription*>(
jsep_desc.description()->GetContentDescriptionByName(kDataContentName));
DataContentDescription* dcdesc =
jsep_desc.description()
->GetContentDescriptionByName(kDataContentName)
->as_data();
const int kNewPort = 1234;
cricket::DataCodec codec(cricket::kGoogleSctpDataCodecPlType,
@ -2181,8 +2169,9 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutRtpmap) {
JsepSessionDescription jdesc(kDummyType);
EXPECT_TRUE(SdpDeserialize(kSdpNoRtpmapString, &jdesc));
cricket::AudioContentDescription* audio =
static_cast<AudioContentDescription*>(
jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
jdesc.description()
->GetContentDescriptionByName(cricket::CN_AUDIO)
->as_audio();
AudioCodecs ref_codecs;
// The codecs in the AudioContentDescription should be in the same order as
// the payload types (<fmt>s) on the m= line.
@ -2205,8 +2194,9 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutRtpmapButWithFmtp) {
JsepSessionDescription jdesc(kDummyType);
EXPECT_TRUE(SdpDeserialize(kSdpNoRtpmapString, &jdesc));
cricket::AudioContentDescription* audio =
static_cast<AudioContentDescription*>(
jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
jdesc.description()
->GetContentDescriptionByName(cricket::CN_AUDIO)
->as_audio();
cricket::AudioCodec g729 = audio->codecs()[0];
EXPECT_EQ("G729", g729.name);
@ -2268,11 +2258,9 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithBandwidth) {
&sdp_with_bandwidth);
EXPECT_TRUE(
SdpDeserialize(sdp_with_bandwidth, &jdesc_with_bandwidth));
VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
GetFirstVideoContent(&desc_)->description);
VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_);
vcd->set_bandwidth(100 * 1000);
AudioContentDescription* acd = static_cast<AudioContentDescription*>(
GetFirstAudioContent(&desc_)->description);
AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_);
acd->set_bandwidth(50 * 1000);
ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
jdesc_.session_id(),
@ -2604,8 +2592,8 @@ void MutateJsepSctpPort(JsepSessionDescription* jdesc,
const SessionDescription& desc) {
// take our pre-built session description and change the SCTP port.
cricket::SessionDescription* mutant = desc.Copy();
DataContentDescription* dcdesc = static_cast<DataContentDescription*>(
mutant->GetContentDescriptionByName(kDataContentName));
DataContentDescription* dcdesc =
mutant->GetContentDescriptionByName(kDataContentName)->as_data();
std::vector<cricket::DataCodec> codecs(dcdesc->codecs());
EXPECT_EQ(1U, codecs.size());
EXPECT_EQ(cricket::kGoogleSctpDataCodecPlType, codecs[0].id);
@ -2680,8 +2668,7 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannelsAndBandwidth) {
bool use_sctpmap = true;
AddSctpDataChannel(use_sctpmap);
JsepSessionDescription jdesc(kDummyType);
DataContentDescription* dcd = static_cast<DataContentDescription*>(
GetFirstDataContent(&desc_)->description);
DataContentDescription* dcd = GetFirstDataContentDescription(&desc_);
dcd->set_bandwidth(100 * 1000);
ASSERT_TRUE(jdesc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
@ -2769,13 +2756,15 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithConferenceFlag) {
// Verify
cricket::AudioContentDescription* audio =
static_cast<AudioContentDescription*>(
jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
jdesc.description()
->GetContentDescriptionByName(cricket::CN_AUDIO)
->as_audio();
EXPECT_TRUE(audio->conference_mode());
cricket::VideoContentDescription* video =
static_cast<VideoContentDescription*>(
jdesc.description()->GetContentDescriptionByName(cricket::CN_VIDEO));
jdesc.description()
->GetContentDescriptionByName(cricket::CN_VIDEO)
->as_video();
EXPECT_TRUE(video->conference_mode());
}
@ -2790,13 +2779,15 @@ TEST_F(WebRtcSdpTest, SerializeSdpWithConferenceFlag) {
// Verify.
cricket::AudioContentDescription* audio =
static_cast<AudioContentDescription*>(
jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
jdesc.description()
->GetContentDescriptionByName(cricket::CN_AUDIO)
->as_audio();
EXPECT_TRUE(audio->conference_mode());
cricket::VideoContentDescription* video =
static_cast<VideoContentDescription*>(
jdesc.description()->GetContentDescriptionByName(cricket::CN_VIDEO));
jdesc.description()
->GetContentDescriptionByName(cricket::CN_VIDEO)
->as_video();
EXPECT_TRUE(video->conference_mode());
}
@ -2901,10 +2892,9 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithReorderedPltypes) {
// Deserialize
EXPECT_TRUE(SdpDeserialize(kSdpWithReorderedPlTypesString, &jdesc_output));
const ContentInfo* ac = GetFirstAudioContent(jdesc_output.description());
ASSERT_TRUE(ac != NULL);
const AudioContentDescription* acd =
static_cast<const AudioContentDescription*>(ac->description);
GetFirstAudioContentDescription(jdesc_output.description());
ASSERT_TRUE(acd);
ASSERT_FALSE(acd->codecs().empty());
EXPECT_EQ("ISAC", acd->codecs()[0].name);
EXPECT_EQ(32000, acd->codecs()[0].clockrate);
@ -2956,10 +2946,9 @@ TEST_F(WebRtcSdpTest, DeserializeVideoFmtp) {
EXPECT_TRUE(
webrtc::SdpDeserialize(kSdpWithFmtpString, &jdesc_output, &error));
const ContentInfo* vc = GetFirstVideoContent(jdesc_output.description());
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
GetFirstVideoContentDescription(jdesc_output.description());
ASSERT_TRUE(vcd);
ASSERT_FALSE(vcd->codecs().empty());
cricket::VideoCodec vp8 = vcd->codecs()[0];
EXPECT_EQ("VP8", vp8.name);
@ -2991,11 +2980,9 @@ TEST_F(WebRtcSdpTest, DeserializeVideoFmtpWithSprops) {
EXPECT_TRUE(
webrtc::SdpDeserialize(kSdpWithFmtpString, &jdesc_output, &error));
const ContentInfo* vc = GetFirstVideoContent(jdesc_output.description());
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
ASSERT_TRUE(vcd != NULL);
GetFirstVideoContentDescription(jdesc_output.description());
ASSERT_TRUE(vcd);
ASSERT_FALSE(vcd->codecs().empty());
cricket::VideoCodec h264 = vcd->codecs()[0];
EXPECT_EQ("H264", h264.name);
@ -3026,10 +3013,9 @@ TEST_F(WebRtcSdpTest, DeserializeVideoFmtpWithSpace) {
EXPECT_TRUE(webrtc::SdpDeserialize(kSdpWithFmtpString, &jdesc_output,
&error));
const ContentInfo* vc = GetFirstVideoContent(jdesc_output.description());
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
GetFirstVideoContentDescription(jdesc_output.description());
ASSERT_TRUE(vcd);
ASSERT_FALSE(vcd->codecs().empty());
cricket::VideoCodec vp8 = vcd->codecs()[0];
EXPECT_EQ("VP8", vp8.name);
@ -3044,8 +3030,7 @@ TEST_F(WebRtcSdpTest, DeserializeVideoFmtpWithSpace) {
}
TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithUnknownParameter) {
AudioContentDescription* acd = static_cast<AudioContentDescription*>(
GetFirstAudioContent(&desc_)->description);
AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_);
cricket::AudioCodecs codecs = acd->codecs();
codecs[0].params["unknown-future-parameter"] = "SomeFutureValue";
@ -3063,8 +3048,7 @@ TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithUnknownParameter) {
}
TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithKnownFmtpParameter) {
AudioContentDescription* acd = static_cast<AudioContentDescription*>(
GetFirstAudioContent(&desc_)->description);
AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_);
cricket::AudioCodecs codecs = acd->codecs();
codecs[0].params["stereo"] = "1";
@ -3082,8 +3066,7 @@ TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithKnownFmtpParameter) {
}
TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithPTimeAndMaxPTime) {
AudioContentDescription* acd = static_cast<AudioContentDescription*>(
GetFirstAudioContent(&desc_)->description);
AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_);
cricket::AudioCodecs codecs = acd->codecs();
codecs[0].params["ptime"] = "20";
@ -3103,8 +3086,7 @@ TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithPTimeAndMaxPTime) {
}
TEST_F(WebRtcSdpTest, SerializeVideoFmtp) {
VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
GetFirstVideoContent(&desc_)->description);
VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_);
cricket::VideoCodecs codecs = vcd->codecs();
codecs[0].params["x-google-min-bitrate"] = "10";
@ -3248,8 +3230,7 @@ TEST_F(WebRtcSdpTest, MediaContentOrderMaintainedRoundTrip) {
for (size_t i = 0; i < 3; ++i) {
const cricket::MediaContentDescription* mdesc =
static_cast<const cricket::MediaContentDescription*>(
desc->contents()[i].description);
desc->contents()[i].media_description();
EXPECT_EQ(media_types[media_content_in_sdp[i]], mdesc->type());
}
@ -3395,10 +3376,9 @@ TEST_F(WebRtcSdpTest, BandwidthLimitOfNegativeOneIgnored) {
JsepSessionDescription jdesc_output(kDummyType);
EXPECT_TRUE(SdpDeserialize(kSdpWithBandwidthOfNegativeOne, &jdesc_output));
const ContentInfo* vc = GetFirstVideoContent(jdesc_output.description());
ASSERT_NE(nullptr, vc);
const VideoContentDescription* vcd =
static_cast<const VideoContentDescription*>(vc->description);
GetFirstVideoContentDescription(jdesc_output.description());
ASSERT_TRUE(vcd);
EXPECT_EQ(cricket::kAutoBandwidth, vcd->bandwidth());
}
@ -3499,14 +3479,10 @@ TEST_F(WebRtcSdpTest, ParseConnectionData) {
const auto& content1 = jsep_desc.description()->contents()[0];
EXPECT_EQ("74.125.127.126:2345",
static_cast<cricket::MediaContentDescription*>(content1.description)
->connection_address()
.ToString());
content1.media_description()->connection_address().ToString());
const auto& content2 = jsep_desc.description()->contents()[1];
EXPECT_EQ("74.125.224.39:3457",
static_cast<cricket::MediaContentDescription*>(content2.description)
->connection_address()
.ToString());
content2.media_description()->connection_address().ToString());
}
// Tests that the session-level connection address will be used if the media
@ -3524,14 +3500,10 @@ TEST_F(WebRtcSdpTest, ParseConnectionDataSessionLevelOnly) {
const auto& content1 = jsep_desc.description()->contents()[0];
EXPECT_EQ("192.168.0.3:9",
static_cast<cricket::MediaContentDescription*>(content1.description)
->connection_address()
.ToString());
content1.media_description()->connection_address().ToString());
const auto& content2 = jsep_desc.description()->contents()[1];
EXPECT_EQ("192.168.0.3:9",
static_cast<cricket::MediaContentDescription*>(content2.description)
->connection_address()
.ToString());
content2.media_description()->connection_address().ToString());
}
TEST_F(WebRtcSdpTest, ParseConnectionDataIPv6) {
@ -3550,14 +3522,10 @@ TEST_F(WebRtcSdpTest, ParseConnectionDataIPv6) {
EXPECT_TRUE(SdpDeserialize(sdp, &jsep_desc));
const auto& content1 = jsep_desc.description()->contents()[0];
EXPECT_EQ("[2001:db8:85a3::8a2e:370:7335]:9",
static_cast<cricket::MediaContentDescription*>(content1.description)
->connection_address()
.ToString());
content1.media_description()->connection_address().ToString());
const auto& content2 = jsep_desc.description()->contents()[1];
EXPECT_EQ("[2001:db8:85a3::8a2e:370:7336]:9",
static_cast<cricket::MediaContentDescription*>(content2.description)
->connection_address()
.ToString());
content2.media_description()->connection_address().ToString());
}
// Test that the invalid or unsupprted connection data cannot be parsed.
@ -3595,10 +3563,12 @@ TEST_F(WebRtcSdpTest, SerializeAndDeserializeWithConnectionAddress) {
// Deserialization.
JsepSessionDescription jdesc(kDummyType);
EXPECT_TRUE(SdpDeserialize(message, &jdesc));
auto audio_desc = static_cast<cricket::MediaContentDescription*>(
jdesc.description()->GetContentByName(kAudioContentName)->description);
auto video_desc = static_cast<cricket::MediaContentDescription*>(
jdesc.description()->GetContentByName(kVideoContentName)->description);
auto audio_desc = jdesc.description()
->GetContentByName(kAudioContentName)
->media_description();
auto video_desc = jdesc.description()
->GetContentByName(kVideoContentName)
->media_description();
EXPECT_EQ(audio_desc_->connection_address().ToString(),
audio_desc->connection_address().ToString());
EXPECT_EQ(video_desc_->connection_address().ToString(),