Change RtpSender to have multiple stream_ids
This is part of the work towards implementing Unified Plan. In Unified Plan, an RtpSender/MediaStreamTrack can be a part of multiple streams. This changes RtpSender to internally store stream ids as a vector so that later CL's can update downstream consumers to support multiple streams. For now, the vector can only have exactly one element in it, but once the supporting code has been updated the singular getter/setter will be removed and any number of streams can be associated with an RtpSender. Bug: webrtc:8183 Change-Id: I499a77ce79198861d35c91328b40ced2eb913cc0 Reviewed-on: https://chromium-review.googlesource.com/646712 Reviewed-by: Taylor Brandstetter <deadbeef@webrtc.org> Commit-Queue: Steve Anton <steveanton@webrtc.org> Cr-Commit-Position: refs/heads/master@{#19733}
This commit is contained in:
parent
19f51434e8
commit
8ffb9c388f
@ -300,6 +300,7 @@ typedef std::vector<rtc::scoped_refptr<VideoTrackInterface> >
|
||||
class MediaStreamInterface : public rtc::RefCountInterface,
|
||||
public NotifierInterface {
|
||||
public:
|
||||
// TODO(steveanton): This could be renamed to id() to match the spec.
|
||||
virtual std::string label() const = 0;
|
||||
|
||||
virtual AudioTrackVector GetAudioTracks() = 0;
|
||||
|
||||
@ -498,7 +498,9 @@ static bool AddStreamParams(
|
||||
}
|
||||
}
|
||||
stream_param.cname = rtcp_cname;
|
||||
stream_param.sync_label = sender.stream_id;
|
||||
// TODO(steveanton): Support any number of stream ids.
|
||||
RTC_CHECK(sender.stream_ids.size() == 1U);
|
||||
stream_param.sync_label = sender.stream_ids[0];
|
||||
content_description->AddStream(stream_param);
|
||||
|
||||
// Store the new StreamParams in current_streams.
|
||||
@ -508,7 +510,9 @@ static bool AddStreamParams(
|
||||
// Use existing generated SSRCs/groups, but update the sync_label if
|
||||
// necessary. This may be needed if a MediaStreamTrack was moved from one
|
||||
// MediaStream to another.
|
||||
param->sync_label = sender.stream_id;
|
||||
// TODO(steveanton): Support any number of stream ids.
|
||||
RTC_CHECK(sender.stream_ids.size() == 1U);
|
||||
param->sync_label = sender.stream_ids[0];
|
||||
content_description->AddStream(*param);
|
||||
}
|
||||
}
|
||||
@ -1250,29 +1254,36 @@ std::string MediaContentDirectionToString(MediaContentDirection direction) {
|
||||
return dir_str;
|
||||
}
|
||||
|
||||
void MediaDescriptionOptions::AddAudioSender(const std::string& track_id,
|
||||
const std::string& stream_id) {
|
||||
void MediaDescriptionOptions::AddAudioSender(
|
||||
const std::string& track_id,
|
||||
const std::vector<std::string>& stream_ids) {
|
||||
RTC_DCHECK(type == MEDIA_TYPE_AUDIO);
|
||||
AddSenderInternal(track_id, stream_id, 1);
|
||||
AddSenderInternal(track_id, stream_ids, 1);
|
||||
}
|
||||
|
||||
void MediaDescriptionOptions::AddVideoSender(const std::string& track_id,
|
||||
const std::string& stream_id,
|
||||
int num_sim_layers) {
|
||||
void MediaDescriptionOptions::AddVideoSender(
|
||||
const std::string& track_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
int num_sim_layers) {
|
||||
RTC_DCHECK(type == MEDIA_TYPE_VIDEO);
|
||||
AddSenderInternal(track_id, stream_id, num_sim_layers);
|
||||
AddSenderInternal(track_id, stream_ids, num_sim_layers);
|
||||
}
|
||||
|
||||
void MediaDescriptionOptions::AddRtpDataChannel(const std::string& track_id,
|
||||
const std::string& stream_id) {
|
||||
RTC_DCHECK(type == MEDIA_TYPE_DATA);
|
||||
AddSenderInternal(track_id, stream_id, 1);
|
||||
// TODO(steveanton): Is it the case that RtpDataChannel will never have more
|
||||
// than one stream?
|
||||
AddSenderInternal(track_id, {stream_id}, 1);
|
||||
}
|
||||
|
||||
void MediaDescriptionOptions::AddSenderInternal(const std::string& track_id,
|
||||
const std::string& stream_id,
|
||||
int num_sim_layers) {
|
||||
sender_options.push_back(SenderOptions{track_id, stream_id, num_sim_layers});
|
||||
void MediaDescriptionOptions::AddSenderInternal(
|
||||
const std::string& track_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
int num_sim_layers) {
|
||||
// TODO(steveanton): Support any number of stream ids.
|
||||
RTC_CHECK(stream_ids.size() == 1U);
|
||||
sender_options.push_back(SenderOptions{track_id, stream_ids, num_sim_layers});
|
||||
}
|
||||
|
||||
bool MediaSessionOptions::HasMediaDescription(MediaType type) const {
|
||||
|
||||
@ -105,7 +105,9 @@ NegotiateRtpTransceiverDirection(RtpTransceiverDirection offer,
|
||||
// Options for an RtpSender contained with an media description/"m=" section.
|
||||
struct SenderOptions {
|
||||
std::string track_id;
|
||||
std::string stream_id;
|
||||
// TODO(steveanton): As part of work towards Unified Plan, this has been
|
||||
// changed to be a vector. But for now this can only have exactly one.
|
||||
std::vector<std::string> stream_ids;
|
||||
int num_sim_layers;
|
||||
};
|
||||
|
||||
@ -120,9 +122,9 @@ struct MediaDescriptionOptions {
|
||||
// TODO(deadbeef): When we don't support Plan B, there will only be one
|
||||
// sender per media description and this can be simplified.
|
||||
void AddAudioSender(const std::string& track_id,
|
||||
const std::string& stream_id);
|
||||
const std::vector<std::string>& stream_ids);
|
||||
void AddVideoSender(const std::string& track_id,
|
||||
const std::string& stream_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
int num_sim_layers);
|
||||
|
||||
// Internally just uses sender_options.
|
||||
@ -141,7 +143,7 @@ struct MediaDescriptionOptions {
|
||||
private:
|
||||
// Doesn't DCHECK on |type|.
|
||||
void AddSenderInternal(const std::string& track_id,
|
||||
const std::string& stream_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
int num_sim_layers);
|
||||
};
|
||||
|
||||
|
||||
@ -284,22 +284,24 @@ static void AddDataSection(cricket::DataChannelType dct,
|
||||
AddMediaSection(MEDIA_TYPE_DATA, "data", direction, kActive, opts);
|
||||
}
|
||||
|
||||
static void AttachSenderToMediaSection(const std::string& mid,
|
||||
MediaType type,
|
||||
const std::string& track_id,
|
||||
const std::string& stream_id,
|
||||
int num_sim_layer,
|
||||
MediaSessionOptions* session_options) {
|
||||
static void AttachSenderToMediaSection(
|
||||
const std::string& mid,
|
||||
MediaType type,
|
||||
const std::string& track_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
int num_sim_layer,
|
||||
MediaSessionOptions* session_options) {
|
||||
auto it = FindFirstMediaDescriptionByMid(mid, session_options);
|
||||
switch (type) {
|
||||
case MEDIA_TYPE_AUDIO:
|
||||
it->AddAudioSender(track_id, stream_id);
|
||||
it->AddAudioSender(track_id, stream_ids);
|
||||
break;
|
||||
case MEDIA_TYPE_VIDEO:
|
||||
it->AddVideoSender(track_id, stream_id, num_sim_layer);
|
||||
it->AddVideoSender(track_id, stream_ids, num_sim_layer);
|
||||
break;
|
||||
case MEDIA_TYPE_DATA:
|
||||
it->AddRtpDataChannel(track_id, stream_id);
|
||||
RTC_CHECK(stream_ids.size() == 1U);
|
||||
it->AddRtpDataChannel(track_id, stream_ids[0]);
|
||||
break;
|
||||
default:
|
||||
RTC_NOTREACHED();
|
||||
@ -882,9 +884,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSendOnlyOffer) {
|
||||
MediaSessionOptions opts;
|
||||
AddAudioVideoSections(cricket::MD_SENDONLY, &opts);
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
|
||||
ASSERT_TRUE(offer.get() != NULL);
|
||||
@ -1652,17 +1654,17 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) {
|
||||
MediaSessionOptions opts;
|
||||
AddAudioVideoSections(cricket::MD_SENDRECV, &opts);
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack2,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
AddDataSection(cricket::DCT_RTP, cricket::MD_SENDRECV, &opts);
|
||||
AttachSenderToMediaSection("data", MEDIA_TYPE_DATA, kDataTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
AttachSenderToMediaSection("data", MEDIA_TYPE_DATA, kDataTrack2,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
f1_.set_secure(SEC_ENABLED);
|
||||
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
|
||||
@ -1730,13 +1732,13 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) {
|
||||
// Update the offer. Add a new video track that is not synched to the
|
||||
// other tracks and replace audio track 2 with audio track 3.
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, kVideoTrack2,
|
||||
kMediaStream2, 1, &opts);
|
||||
{kMediaStream2}, 1, &opts);
|
||||
DetachSenderFromMediaSection("audio", kAudioTrack2, &opts);
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack3,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
DetachSenderFromMediaSection("data", kDataTrack2, &opts);
|
||||
AttachSenderToMediaSection("data", MEDIA_TYPE_DATA, kDataTrack3,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
std::unique_ptr<SessionDescription> updated_offer(
|
||||
f1_.CreateOffer(opts, offer.get()));
|
||||
|
||||
@ -1804,7 +1806,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSimulcastVideoOffer) {
|
||||
&opts);
|
||||
const int num_sim_layers = 3;
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
|
||||
kMediaStream1, num_sim_layers, &opts);
|
||||
{kMediaStream1}, num_sim_layers, &opts);
|
||||
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
|
||||
|
||||
ASSERT_TRUE(offer.get() != NULL);
|
||||
@ -1847,18 +1849,18 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) {
|
||||
AddMediaSection(MEDIA_TYPE_VIDEO, "video", cricket::MD_SENDRECV, kActive,
|
||||
&answer_opts);
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
|
||||
kMediaStream1, 1, &answer_opts);
|
||||
{kMediaStream1}, 1, &answer_opts);
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &answer_opts);
|
||||
{kMediaStream1}, 1, &answer_opts);
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack2,
|
||||
kMediaStream1, 1, &answer_opts);
|
||||
{kMediaStream1}, 1, &answer_opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_DATA, "data", cricket::MD_SENDRECV, kActive,
|
||||
&answer_opts);
|
||||
AttachSenderToMediaSection("data", MEDIA_TYPE_DATA, kDataTrack1,
|
||||
kMediaStream1, 1, &answer_opts);
|
||||
{kMediaStream1}, 1, &answer_opts);
|
||||
AttachSenderToMediaSection("data", MEDIA_TYPE_DATA, kDataTrack2,
|
||||
kMediaStream1, 1, &answer_opts);
|
||||
{kMediaStream1}, 1, &answer_opts);
|
||||
answer_opts.data_channel_type = cricket::DCT_RTP;
|
||||
|
||||
std::unique_ptr<SessionDescription> answer(
|
||||
@ -1927,7 +1929,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) {
|
||||
// Update the answer. Add a new video track that is not synched to the
|
||||
// other tracks and remove 1 audio track.
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, kVideoTrack2,
|
||||
kMediaStream2, 1, &answer_opts);
|
||||
{kMediaStream2}, 1, &answer_opts);
|
||||
DetachSenderFromMediaSection("audio", kAudioTrack2, &answer_opts);
|
||||
DetachSenderFromMediaSection("data", kDataTrack2, &answer_opts);
|
||||
std::unique_ptr<SessionDescription> updated_answer(
|
||||
@ -2325,7 +2327,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, SimSsrcsGenerateMultipleRtxSsrcs) {
|
||||
&opts);
|
||||
// Add simulcast streams.
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, "stream1",
|
||||
"stream1label", 3, &opts);
|
||||
{"stream1label"}, 3, &opts);
|
||||
|
||||
// Use a single real codec, and then add RTX for it.
|
||||
std::vector<VideoCodec> f1_codecs;
|
||||
@ -2366,7 +2368,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, GenerateFlexfecSsrc) {
|
||||
&opts);
|
||||
// Add single stream.
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, "stream1",
|
||||
"stream1label", 1, &opts);
|
||||
{"stream1label"}, 1, &opts);
|
||||
|
||||
// Use a single real codec, and then add FlexFEC for it.
|
||||
std::vector<VideoCodec> f1_codecs;
|
||||
@ -2406,7 +2408,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, SimSsrcsGenerateNoFlexfecSsrcs) {
|
||||
&opts);
|
||||
// Add simulcast streams.
|
||||
AttachSenderToMediaSection("video", MEDIA_TYPE_VIDEO, "stream1",
|
||||
"stream1label", 3, &opts);
|
||||
{"stream1label"}, 3, &opts);
|
||||
|
||||
// Use a single real codec, and then add FlexFEC for it.
|
||||
std::vector<VideoCodec> f1_codecs;
|
||||
@ -3018,22 +3020,22 @@ TEST_F(MediaSessionDescriptionFactoryTest,
|
||||
AddMediaSection(MEDIA_TYPE_AUDIO, "audio_1", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("audio_1", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_VIDEO, "video_1", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("video_1", MEDIA_TYPE_VIDEO, kVideoTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_AUDIO, "audio_2", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("audio_2", MEDIA_TYPE_AUDIO, kAudioTrack2,
|
||||
kMediaStream2, 1, &opts);
|
||||
{kMediaStream2}, 1, &opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_VIDEO, "video_2", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("video_2", MEDIA_TYPE_VIDEO, kVideoTrack2,
|
||||
kMediaStream2, 1, &opts);
|
||||
{kMediaStream2}, 1, &opts);
|
||||
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, nullptr));
|
||||
ASSERT_TRUE(offer);
|
||||
|
||||
@ -3077,22 +3079,22 @@ TEST_F(MediaSessionDescriptionFactoryTest,
|
||||
AddMediaSection(MEDIA_TYPE_AUDIO, "audio_1", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("audio_1", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_VIDEO, "video_1", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("video_1", MEDIA_TYPE_VIDEO, kVideoTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_AUDIO, "audio_2", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("audio_2", MEDIA_TYPE_AUDIO, kAudioTrack2,
|
||||
kMediaStream2, 1, &opts);
|
||||
{kMediaStream2}, 1, &opts);
|
||||
|
||||
AddMediaSection(MEDIA_TYPE_VIDEO, "video_2", cricket::MD_SENDRECV, kActive,
|
||||
&opts);
|
||||
AttachSenderToMediaSection("video_2", MEDIA_TYPE_VIDEO, kVideoTrack2,
|
||||
kMediaStream2, 1, &opts);
|
||||
{kMediaStream2}, 1, &opts);
|
||||
|
||||
std::unique_ptr<SessionDescription> offer(f1_.CreateOffer(opts, nullptr));
|
||||
ASSERT_TRUE(offer);
|
||||
@ -3487,7 +3489,7 @@ void TestAudioCodecsOffer(MediaContentDirection direction) {
|
||||
|
||||
if (RtpTransceiverDirection::FromMediaContentDirection(direction).send) {
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &opts);
|
||||
{kMediaStream1}, 1, &opts);
|
||||
}
|
||||
|
||||
std::unique_ptr<SessionDescription> offer(sf.CreateOffer(opts, NULL));
|
||||
@ -3587,7 +3589,7 @@ void TestAudioCodecsAnswer(MediaContentDirection offer_direction,
|
||||
if (RtpTransceiverDirection::FromMediaContentDirection(offer_direction)
|
||||
.send) {
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &offer_opts);
|
||||
{kMediaStream1}, 1, &offer_opts);
|
||||
}
|
||||
|
||||
std::unique_ptr<SessionDescription> offer(
|
||||
@ -3601,7 +3603,7 @@ void TestAudioCodecsAnswer(MediaContentDirection offer_direction,
|
||||
if (RtpTransceiverDirection::FromMediaContentDirection(answer_direction)
|
||||
.send) {
|
||||
AttachSenderToMediaSection("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
|
||||
kMediaStream1, 1, &answer_opts);
|
||||
{kMediaStream1}, 1, &answer_opts);
|
||||
}
|
||||
std::unique_ptr<SessionDescription> answer(
|
||||
answer_factory.CreateAnswer(offer.get(), answer_opts, NULL));
|
||||
|
||||
@ -143,13 +143,13 @@ void AddRtpSenderOptions(
|
||||
if (sender->media_type() == cricket::MEDIA_TYPE_AUDIO) {
|
||||
if (audio_media_description_options) {
|
||||
audio_media_description_options->AddAudioSender(
|
||||
sender->id(), sender->internal()->stream_id());
|
||||
sender->id(), sender->internal()->stream_ids());
|
||||
}
|
||||
} else {
|
||||
RTC_DCHECK(sender->media_type() == cricket::MEDIA_TYPE_VIDEO);
|
||||
if (video_media_description_options) {
|
||||
video_media_description_options->AddVideoSender(
|
||||
sender->id(), sender->internal()->stream_id(), 1);
|
||||
sender->id(), sender->internal()->stream_ids(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1475,8 +1475,8 @@ void PeerConnection::AddAudioTrack(AudioTrackInterface* track,
|
||||
rtc::scoped_refptr<RtpSenderProxyWithInternal<RtpSenderInternal>> new_sender =
|
||||
RtpSenderProxyWithInternal<RtpSenderInternal>::Create(
|
||||
signaling_thread(),
|
||||
new AudioRtpSender(track, stream->label(), session_->voice_channel(),
|
||||
stats_.get()));
|
||||
new AudioRtpSender(track, {stream->label()},
|
||||
session_->voice_channel(), stats_.get()));
|
||||
senders_.push_back(new_sender);
|
||||
// If the sender has already been configured in SDP, we call SetSsrc,
|
||||
// which will connect the sender to the underlying transport. This can
|
||||
@ -1520,7 +1520,7 @@ void PeerConnection::AddVideoTrack(VideoTrackInterface* track,
|
||||
// Normal case; we've never seen this track before.
|
||||
rtc::scoped_refptr<RtpSenderProxyWithInternal<RtpSenderInternal>> new_sender =
|
||||
RtpSenderProxyWithInternal<RtpSenderInternal>::Create(
|
||||
signaling_thread(), new VideoRtpSender(track, stream->label(),
|
||||
signaling_thread(), new VideoRtpSender(track, {stream->label()},
|
||||
session_->video_channel()));
|
||||
senders_.push_back(new_sender);
|
||||
const TrackInfo* track_info =
|
||||
|
||||
@ -45,16 +45,19 @@ void LocalAudioSinkAdapter::SetSink(cricket::AudioSource::Sink* sink) {
|
||||
}
|
||||
|
||||
AudioRtpSender::AudioRtpSender(AudioTrackInterface* track,
|
||||
const std::string& stream_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
cricket::VoiceChannel* channel,
|
||||
StatsCollector* stats)
|
||||
: id_(track->id()),
|
||||
stream_id_(stream_id),
|
||||
stream_ids_(stream_ids),
|
||||
channel_(channel),
|
||||
stats_(stats),
|
||||
track_(track),
|
||||
cached_track_enabled_(track->enabled()),
|
||||
sink_adapter_(new LocalAudioSinkAdapter()) {
|
||||
// TODO(steveanton): Relax this constraint once more Unified Plan work is
|
||||
// done.
|
||||
RTC_CHECK(stream_ids_.size() == 1U);
|
||||
track_->RegisterObserver(this);
|
||||
track_->AddSink(sink_adapter_.get());
|
||||
CreateDtmfSender();
|
||||
@ -64,7 +67,8 @@ AudioRtpSender::AudioRtpSender(AudioTrackInterface* track,
|
||||
cricket::VoiceChannel* channel,
|
||||
StatsCollector* stats)
|
||||
: id_(track->id()),
|
||||
stream_id_(rtc::CreateRandomUuid()),
|
||||
// TODO(steveanton): With Unified Plan this should be empty.
|
||||
stream_ids_({rtc::CreateRandomUuid()}),
|
||||
channel_(channel),
|
||||
stats_(stats),
|
||||
track_(track),
|
||||
@ -78,7 +82,8 @@ AudioRtpSender::AudioRtpSender(AudioTrackInterface* track,
|
||||
AudioRtpSender::AudioRtpSender(cricket::VoiceChannel* channel,
|
||||
StatsCollector* stats)
|
||||
: id_(rtc::CreateRandomUuid()),
|
||||
stream_id_(rtc::CreateRandomUuid()),
|
||||
// TODO(steveanton): With Unified Plan this should be empty.
|
||||
stream_ids_({rtc::CreateRandomUuid()}),
|
||||
channel_(channel),
|
||||
stats_(stats),
|
||||
sink_adapter_(new LocalAudioSinkAdapter()) {
|
||||
@ -297,21 +302,25 @@ void AudioRtpSender::CreateDtmfSender() {
|
||||
}
|
||||
|
||||
VideoRtpSender::VideoRtpSender(VideoTrackInterface* track,
|
||||
const std::string& stream_id,
|
||||
const std::vector<std::string>& stream_ids,
|
||||
cricket::VideoChannel* channel)
|
||||
: id_(track->id()),
|
||||
stream_id_(stream_id),
|
||||
stream_ids_({stream_ids}),
|
||||
channel_(channel),
|
||||
track_(track),
|
||||
cached_track_enabled_(track->enabled()),
|
||||
cached_track_content_hint_(track->content_hint()) {
|
||||
// TODO(steveanton): Relax this constraint once more Unified Plan work is
|
||||
// done.
|
||||
RTC_CHECK(stream_ids_.size() == 1U);
|
||||
track_->RegisterObserver(this);
|
||||
}
|
||||
|
||||
VideoRtpSender::VideoRtpSender(VideoTrackInterface* track,
|
||||
cricket::VideoChannel* channel)
|
||||
: id_(track->id()),
|
||||
stream_id_(rtc::CreateRandomUuid()),
|
||||
// TODO(steveanton): With Unified Plan this should be empty.
|
||||
stream_ids_({rtc::CreateRandomUuid()}),
|
||||
channel_(channel),
|
||||
track_(track),
|
||||
cached_track_enabled_(track->enabled()),
|
||||
@ -321,7 +330,8 @@ VideoRtpSender::VideoRtpSender(VideoTrackInterface* track,
|
||||
|
||||
VideoRtpSender::VideoRtpSender(cricket::VideoChannel* channel)
|
||||
: id_(rtc::CreateRandomUuid()),
|
||||
stream_id_(rtc::CreateRandomUuid()),
|
||||
// TODO(steveanton): With Unified Plan this should be empty.
|
||||
stream_ids_({rtc::CreateRandomUuid()}),
|
||||
channel_(channel) {}
|
||||
|
||||
VideoRtpSender::~VideoRtpSender() {
|
||||
|
||||
@ -40,9 +40,13 @@ class RtpSenderInternal : public RtpSenderInterface {
|
||||
// description).
|
||||
virtual void SetSsrc(uint32_t ssrc) = 0;
|
||||
|
||||
// TODO(deadbeef): Support one sender having multiple stream ids.
|
||||
// TODO(steveanton): With Unified Plan, a track/RTCRTPSender can be part of
|
||||
// multiple streams (or no stream at all). Replace these singular methods with
|
||||
// their corresponding plural methods.
|
||||
// Until these are removed, RtpSenders must have exactly one stream.
|
||||
virtual void set_stream_id(const std::string& stream_id) = 0;
|
||||
virtual std::string stream_id() const = 0;
|
||||
virtual void set_stream_ids(const std::vector<std::string>& stream_ids) = 0;
|
||||
|
||||
virtual void Stop() = 0;
|
||||
};
|
||||
@ -79,7 +83,7 @@ class AudioRtpSender : public DtmfProviderInterface,
|
||||
// at the appropriate times.
|
||||
// |channel| can be null if one does not exist yet.
|
||||
AudioRtpSender(AudioTrackInterface* track,
|
||||
const std::string& stream_id,
|
||||
const std::vector<std::string>& stream_id,
|
||||
cricket::VoiceChannel* channel,
|
||||
StatsCollector* stats);
|
||||
|
||||
@ -117,10 +121,7 @@ class AudioRtpSender : public DtmfProviderInterface,
|
||||
|
||||
std::string id() const override { return id_; }
|
||||
|
||||
std::vector<std::string> stream_ids() const override {
|
||||
std::vector<std::string> ret = {stream_id_};
|
||||
return ret;
|
||||
}
|
||||
std::vector<std::string> stream_ids() const override { return stream_ids_; }
|
||||
|
||||
RtpParameters GetParameters() const override;
|
||||
bool SetParameters(const RtpParameters& parameters) override;
|
||||
@ -131,9 +132,12 @@ class AudioRtpSender : public DtmfProviderInterface,
|
||||
void SetSsrc(uint32_t ssrc) override;
|
||||
|
||||
void set_stream_id(const std::string& stream_id) override {
|
||||
stream_id_ = stream_id;
|
||||
stream_ids_ = {stream_id};
|
||||
}
|
||||
std::string stream_id() const override { return stream_ids_[0]; }
|
||||
void set_stream_ids(const std::vector<std::string>& stream_ids) override {
|
||||
stream_ids_ = stream_ids;
|
||||
}
|
||||
std::string stream_id() const override { return stream_id_; }
|
||||
|
||||
void Stop() override;
|
||||
|
||||
@ -156,7 +160,9 @@ class AudioRtpSender : public DtmfProviderInterface,
|
||||
sigslot::signal0<> SignalDestroyed;
|
||||
|
||||
std::string id_;
|
||||
std::string stream_id_;
|
||||
// TODO(steveanton): Until more Unified Plan work is done, this can only have
|
||||
// exactly one element.
|
||||
std::vector<std::string> stream_ids_;
|
||||
cricket::VoiceChannel* channel_ = nullptr;
|
||||
StatsCollector* stats_;
|
||||
rtc::scoped_refptr<AudioTrackInterface> track_;
|
||||
@ -175,7 +181,7 @@ class VideoRtpSender : public ObserverInterface,
|
||||
public:
|
||||
// |channel| can be null if one does not exist yet.
|
||||
VideoRtpSender(VideoTrackInterface* track,
|
||||
const std::string& stream_id,
|
||||
const std::vector<std::string>& stream_id,
|
||||
cricket::VideoChannel* channel);
|
||||
|
||||
// Randomly generates stream_id.
|
||||
@ -205,10 +211,7 @@ class VideoRtpSender : public ObserverInterface,
|
||||
|
||||
std::string id() const override { return id_; }
|
||||
|
||||
std::vector<std::string> stream_ids() const override {
|
||||
std::vector<std::string> ret = {stream_id_};
|
||||
return ret;
|
||||
}
|
||||
std::vector<std::string> stream_ids() const override { return stream_ids_; }
|
||||
|
||||
RtpParameters GetParameters() const override;
|
||||
bool SetParameters(const RtpParameters& parameters) override;
|
||||
@ -219,9 +222,12 @@ class VideoRtpSender : public ObserverInterface,
|
||||
void SetSsrc(uint32_t ssrc) override;
|
||||
|
||||
void set_stream_id(const std::string& stream_id) override {
|
||||
stream_id_ = stream_id;
|
||||
stream_ids_ = {stream_id};
|
||||
}
|
||||
std::string stream_id() const override { return stream_ids_[0]; }
|
||||
void set_stream_ids(const std::vector<std::string>& stream_ids) override {
|
||||
stream_ids_ = stream_ids;
|
||||
}
|
||||
std::string stream_id() const override { return stream_id_; }
|
||||
|
||||
void Stop() override;
|
||||
|
||||
@ -238,7 +244,9 @@ class VideoRtpSender : public ObserverInterface,
|
||||
void ClearVideoSend();
|
||||
|
||||
std::string id_;
|
||||
std::string stream_id_;
|
||||
// TODO(steveanton): Until more Unified Plan work is done, this can only have
|
||||
// exactly one element.
|
||||
std::vector<std::string> stream_ids_;
|
||||
cricket::VideoChannel* channel_ = nullptr;
|
||||
rtc::scoped_refptr<VideoTrackInterface> track_;
|
||||
uint32_t ssrc_ = 0;
|
||||
|
||||
@ -136,7 +136,7 @@ class RtpSenderReceiverTest : public testing::Test,
|
||||
EXPECT_TRUE(local_stream_->AddTrack(audio_track_));
|
||||
audio_rtp_sender_ =
|
||||
new AudioRtpSender(local_stream_->GetAudioTracks()[0],
|
||||
local_stream_->label(), voice_channel_, nullptr);
|
||||
{local_stream_->label()}, voice_channel_, nullptr);
|
||||
audio_rtp_sender_->SetSsrc(kAudioSsrc);
|
||||
audio_rtp_sender_->GetOnDestroyedSignal()->connect(
|
||||
this, &RtpSenderReceiverTest::OnAudioSenderDestroyed);
|
||||
@ -151,7 +151,7 @@ class RtpSenderReceiverTest : public testing::Test,
|
||||
AddVideoTrack(is_screencast);
|
||||
video_rtp_sender_ =
|
||||
new VideoRtpSender(local_stream_->GetVideoTracks()[0],
|
||||
local_stream_->label(), video_channel_);
|
||||
{local_stream_->label()}, video_channel_);
|
||||
video_rtp_sender_->SetSsrc(kVideoSsrc);
|
||||
VerifyVideoChannelInput();
|
||||
}
|
||||
@ -715,7 +715,7 @@ TEST_F(RtpSenderReceiverTest,
|
||||
video_track_->set_content_hint(VideoTrackInterface::ContentHint::kDetailed);
|
||||
video_rtp_sender_ =
|
||||
new VideoRtpSender(local_stream_->GetVideoTracks()[0],
|
||||
local_stream_->label(), video_channel_);
|
||||
{local_stream_->label()}, video_channel_);
|
||||
video_track_->set_enabled(true);
|
||||
|
||||
// Sender is not ready to send (no SSRC) so no option should have been set.
|
||||
|
||||
@ -596,10 +596,10 @@ class WebRtcSessionTest
|
||||
cricket::MEDIA_TYPE_AUDIO, media_description_options.mid,
|
||||
cricket::RtpTransceiverDirection(send_audio, recv_audio), stopped);
|
||||
if (send_stream_1_ && send_audio) {
|
||||
media_desc_options.AddAudioSender(kAudioTrack1, kStream1);
|
||||
media_desc_options.AddAudioSender(kAudioTrack1, {kStream1});
|
||||
}
|
||||
if (send_stream_2_ && send_audio) {
|
||||
media_desc_options.AddAudioSender(kAudioTrack2, kStream2);
|
||||
media_desc_options.AddAudioSender(kAudioTrack2, {kStream2});
|
||||
}
|
||||
session_options->media_description_options.push_back(
|
||||
media_desc_options);
|
||||
@ -609,11 +609,11 @@ class WebRtcSessionTest
|
||||
cricket::MEDIA_TYPE_VIDEO, media_description_options.mid,
|
||||
cricket::RtpTransceiverDirection(send_video, recv_video), stopped);
|
||||
if (send_stream_1_ && send_video) {
|
||||
media_desc_options.AddVideoSender(kVideoTrack1, kStream1,
|
||||
media_desc_options.AddVideoSender(kVideoTrack1, {kStream1},
|
||||
num_sim_layer);
|
||||
}
|
||||
if (send_stream_2_ && send_video) {
|
||||
media_desc_options.AddVideoSender(kVideoTrack2, kStream2,
|
||||
media_desc_options.AddVideoSender(kVideoTrack2, {kStream2},
|
||||
num_sim_layer);
|
||||
}
|
||||
session_options->media_description_options.push_back(
|
||||
@ -649,10 +649,10 @@ class WebRtcSessionTest
|
||||
cricket::RtpTransceiverDirection(send_audio, recv_audio),
|
||||
kActive);
|
||||
if (send_stream_1_ && send_audio) {
|
||||
media_desc_options.AddAudioSender(kAudioTrack1, kStream1);
|
||||
media_desc_options.AddAudioSender(kAudioTrack1, {kStream1});
|
||||
}
|
||||
if (send_stream_2_ && send_audio) {
|
||||
media_desc_options.AddAudioSender(kAudioTrack2, kStream2);
|
||||
media_desc_options.AddAudioSender(kAudioTrack2, {kStream2});
|
||||
}
|
||||
session_options->media_description_options.push_back(media_desc_options);
|
||||
offered_media_sections_.push_back(media_desc_options);
|
||||
@ -666,11 +666,11 @@ class WebRtcSessionTest
|
||||
kActive);
|
||||
int num_sim_layer = 1;
|
||||
if (send_stream_1_ && send_video) {
|
||||
media_desc_options.AddVideoSender(kVideoTrack1, kStream1,
|
||||
media_desc_options.AddVideoSender(kVideoTrack1, {kStream1},
|
||||
num_sim_layer);
|
||||
}
|
||||
if (send_stream_2_ && send_video) {
|
||||
media_desc_options.AddVideoSender(kVideoTrack2, kStream2,
|
||||
media_desc_options.AddVideoSender(kVideoTrack2, {kStream2},
|
||||
num_sim_layer);
|
||||
}
|
||||
session_options->media_description_options.push_back(media_desc_options);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user