Replaced temporal_layer_thresholds_bps[] field with num_temporal_layers.

temporal_layer_thresholds_bps served only one purpose: its size was used
to infer number of temporal layers. I replaced it with num_temporal_layers,
which does what is says.

The practical reason for this change is the need to have possibility to
distinguish between cases when VP9 SVC temporal layering was/not set
through field trial. That was not possible with
temporal_layer_thresholds_bps[] because empty vector means 1 temporal
layer.

Bug: webrtc:8518
Change-Id: I275ec3a8c74e8ba409eb049878199f132a20ec51
Reviewed-on: https://webrtc-review.googlesource.com/58084
Commit-Queue: Sergey Silkin <ssilkin@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22230}
This commit is contained in:
Sergey Silkin 2018-02-28 13:40:08 +01:00 committed by Commit Bot
parent 56da2f7868
commit d2ed0a4c9e
14 changed files with 83 additions and 109 deletions

View File

@ -38,17 +38,10 @@ std::string VideoStream::ToString() const {
ss << ", target_bitrate_bps:" << target_bitrate_bps;
ss << ", max_bitrate_bps:" << max_bitrate_bps;
ss << ", max_qp: " << max_qp;
ss << ", num_temporal_layers: " << num_temporal_layers.value_or(0);
ss << ", bitrate_priority: " << bitrate_priority.value_or(0);
ss << ", active: " << active;
ss << ", temporal_layer_thresholds_bps: [";
for (size_t i = 0; i < temporal_layer_thresholds_bps.size(); ++i) {
ss << temporal_layer_thresholds_bps[i];
if (i != temporal_layer_thresholds_bps.size() - 1)
ss << ", ";
}
ss << ']';
ss << '}';
return ss.str();
}

View File

@ -36,24 +36,14 @@ struct VideoStream {
int min_bitrate_bps;
int target_bitrate_bps;
int max_bitrate_bps;
rtc::Optional<double> bitrate_priority;
int max_qp;
rtc::Optional<size_t> num_temporal_layers;
rtc::Optional<double> bitrate_priority;
// TODO(bugs.webrtc.org/8653): Support active per-simulcast layer.
bool active;
// Bitrate thresholds for enabling additional temporal layers. Since these are
// thresholds in between layers, we have one additional layer. One threshold
// gives two temporal layers, one below the threshold and one above, two give
// three, and so on.
// The VideoEncoder may redistribute bitrates over the temporal layers so a
// bitrate threshold of 100k and an estimate of 105k does not imply that we
// get 100k in one temporal layer and 5k in the other, just that the bitrate
// in the first temporal layer should not exceed 100k.
// TODO(kthelgason): Apart from a special case for two-layer screencast these
// thresholds are not propagated to the VideoEncoder. To be implemented.
std::vector<int> temporal_layer_thresholds_bps;
};
class VideoEncoderConfig {

View File

@ -227,17 +227,17 @@ void FakeVideoSendStream::ReconfigureVideoEncoder(
video_streams_ = config.video_stream_factory->CreateEncoderStreams(
width, height, config);
if (config.encoder_specific_settings != NULL) {
const unsigned char num_temporal_layers = static_cast<unsigned char>(
video_streams_.back().num_temporal_layers.value_or(1));
if (config_.encoder_settings.payload_name == "VP8") {
config.encoder_specific_settings->FillVideoCodecVp8(&vpx_settings_.vp8);
if (!video_streams_.empty()) {
vpx_settings_.vp8.numberOfTemporalLayers = static_cast<unsigned char>(
video_streams_.back().temporal_layer_thresholds_bps.size() + 1);
vpx_settings_.vp8.numberOfTemporalLayers = num_temporal_layers;
}
} else if (config_.encoder_settings.payload_name == "VP9") {
config.encoder_specific_settings->FillVideoCodecVp9(&vpx_settings_.vp9);
if (!video_streams_.empty()) {
vpx_settings_.vp9.numberOfTemporalLayers = static_cast<unsigned char>(
video_streams_.back().temporal_layer_thresholds_bps.size() + 1);
vpx_settings_.vp9.numberOfTemporalLayers = num_temporal_layers;
}
} else {
ADD_FAILURE() << "Unsupported encoder payload: "

View File

@ -196,8 +196,7 @@ std::vector<webrtc::VideoStream> GetNormalSimulcastLayers(
layers[s].height = height;
// TODO(pbos): Fill actual temporal-layer bitrate thresholds.
layers[s].max_qp = max_qp;
layers[s].temporal_layer_thresholds_bps.resize(
kDefaultConferenceNumberOfTemporalLayers[s] - 1);
layers[s].num_temporal_layers = kDefaultConferenceNumberOfTemporalLayers[s];
layers[s].max_bitrate_bps = FindSimulcastMaxBitrateBps(width, height);
layers[s].target_bitrate_bps = FindSimulcastTargetBitrateBps(width, height);
layers[s].min_bitrate_bps = FindSimulcastMinBitrateBps(width, height);
@ -247,9 +246,7 @@ std::vector<webrtc::VideoStream> GetScreenshareLayers(
layers[0].min_bitrate_bps = kMinVideoBitrateBps;
layers[0].target_bitrate_bps = config.tl0_bitrate_kbps * 1000;
layers[0].max_bitrate_bps = config.tl1_bitrate_kbps * 1000;
layers[0].temporal_layer_thresholds_bps.clear();
layers[0].temporal_layer_thresholds_bps.push_back(config.tl0_bitrate_kbps *
1000);
layers[0].num_temporal_layers = 2;
// With simulcast enabled, add another spatial layer. This one will have a
// more normal layout, with the regular 3 temporal layer pattern and no fps
@ -271,7 +268,7 @@ std::vector<webrtc::VideoStream> GetScreenshareLayers(
layers[1].max_qp = max_qp;
layers[1].max_framerate = max_framerate;
// Three temporal layers means two thresholds.
layers[1].temporal_layer_thresholds_bps.resize(2);
layers[1].num_temporal_layers = 2;
layers[1].min_bitrate_bps = layers[0].target_bitrate_bps * 2;
layers[1].target_bitrate_bps = max_bitrate_bps;
layers[1].max_bitrate_bps = max_bitrate_bps;

View File

@ -462,9 +462,12 @@ WebRtcVideoChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings(
// TODO(asapersson): Set to 2 for now since there is a DCHECK in
// VideoSendStream::ReconfigureVideoEncoder.
vp9_settings.numberOfSpatialLayers = 2;
vp9_settings.numberOfTemporalLayers = 1;
} else {
vp9_settings.numberOfSpatialLayers = GetDefaultVp9SpatialLayers();
vp9_settings.numberOfTemporalLayers = GetDefaultVp9TemporalLayers();
}
// VP9 denoising is disabled by default.
vp9_settings.denoisingOn = codec_default_denoising ? true : denoising;
vp9_settings.frameDroppingOn = frame_dropping;
@ -2675,9 +2678,13 @@ std::vector<webrtc::VideoStream> EncoderStreamFactory::CreateEncoderStreams(
layer.max_qp = max_qp_;
layer.bitrate_priority = encoder_config.bitrate_priority;
if (CodecNamesEq(codec_name_, kVp9CodecName) && !is_screenshare_) {
layer.temporal_layer_thresholds_bps.resize(GetDefaultVp9TemporalLayers() -
1);
if (CodecNamesEq(codec_name_, kVp9CodecName)) {
RTC_DCHECK(encoder_config.encoder_specific_settings);
// Use VP9 SVC layering from codec settings which might be initialized
// though field trial in ConfigureVideoEncoderSettings.
webrtc::VideoCodecVP9 vp9_settings;
encoder_config.encoder_specific_settings->FillVideoCodecVp9(&vp9_settings);
layer.num_temporal_layers = vp9_settings.numberOfTemporalLayers;
}
layers.push_back(layer);

View File

@ -1968,7 +1968,7 @@ TEST_F(WebRtcVideoChannelTest, UsesCorrectSettingsForScreencast) {
streams = send_stream->GetVideoStreams();
EXPECT_EQ(capture_format_hd.width, streams.front().width);
EXPECT_EQ(capture_format_hd.height, streams.front().height);
EXPECT_TRUE(streams[0].temporal_layer_thresholds_bps.empty());
EXPECT_FALSE(streams[0].num_temporal_layers.has_value());
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, true, nullptr, nullptr));
}
@ -2004,9 +2004,9 @@ TEST_F(WebRtcVideoChannelTest,
std::vector<webrtc::VideoStream> streams = send_stream->GetVideoStreams();
ASSERT_EQ(1u, streams.size());
ASSERT_EQ(1u, streams[0].temporal_layer_thresholds_bps.size());
ASSERT_EQ(2u, streams[0].num_temporal_layers);
EXPECT_EQ(kConferenceScreencastTemporalBitrateBps,
streams[0].temporal_layer_thresholds_bps[0]);
streams[0].target_bitrate_bps);
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, true, nullptr, nullptr));
}
@ -5022,10 +5022,13 @@ class WebRtcVideoChannelSimulcastTest : public testing::Test {
EXPECT_GT(video_streams[i].max_qp, 0);
EXPECT_EQ(expected_streams[i].max_qp, video_streams[i].max_qp);
EXPECT_EQ(!conference_mode,
expected_streams[i].temporal_layer_thresholds_bps.empty());
EXPECT_EQ(expected_streams[i].temporal_layer_thresholds_bps,
video_streams[i].temporal_layer_thresholds_bps);
EXPECT_EQ(conference_mode,
expected_streams[i].num_temporal_layers.has_value());
if (conference_mode) {
EXPECT_EQ(expected_streams[i].num_temporal_layers,
video_streams[i].num_temporal_layers);
}
if (i == num_streams - 1) {
total_max_bitrate_bps += video_streams[i].max_bitrate_bps;

View File

@ -149,8 +149,8 @@ void TestConfig::ConfigureSimulcast() {
SimulcastStream* ss = &codec_settings.simulcastStream[i];
ss->width = static_cast<uint16_t>(stream[i].width);
ss->height = static_cast<uint16_t>(stream[i].height);
ss->numberOfTemporalLayers = static_cast<unsigned char>(
stream[i].temporal_layer_thresholds_bps.size() + 1);
ss->numberOfTemporalLayers =
static_cast<unsigned char>(*stream[i].num_temporal_layers);
ss->maxBitrate = stream[i].max_bitrate_bps / 1000;
ss->targetBitrate = stream[i].target_bitrate_bps / 1000;
ss->minBitrate = stream[i].min_bitrate_bps / 1000;

View File

@ -23,15 +23,6 @@
#include "system_wrappers/include/clock.h"
namespace webrtc {
namespace {
bool TemporalLayersConfigured(const std::vector<VideoStream>& streams) {
for (const VideoStream& stream : streams) {
if (stream.temporal_layer_thresholds_bps.size() > 0)
return true;
}
return false;
}
} // namespace
bool VideoCodecInitializer::SetupCodec(
const VideoEncoderConfig& config,
@ -129,10 +120,8 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
break;
case VideoEncoderConfig::ContentType::kScreen:
video_codec.mode = kScreensharing;
if (!streams.empty() &&
streams[0].temporal_layer_thresholds_bps.size() == 1) {
video_codec.targetBitrate =
streams[0].temporal_layer_thresholds_bps[0] / 1000;
if (!streams.empty() && streams[0].num_temporal_layers == 2) {
video_codec.targetBitrate = streams[0].target_bitrate_bps / 1000;
}
break;
}
@ -142,12 +131,16 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
switch (video_codec.codecType) {
case kVideoCodecVP8: {
if (!config.encoder_specific_settings)
if (!config.encoder_specific_settings) {
*video_codec.VP8() = VideoEncoder::GetDefaultVp8Settings();
video_codec.VP8()->numberOfTemporalLayers = static_cast<unsigned char>(
streams.back().temporal_layer_thresholds_bps.size() + 1);
}
if (nack_enabled && !TemporalLayersConfigured(streams)) {
video_codec.VP8()->numberOfTemporalLayers = static_cast<unsigned char>(
streams.back().num_temporal_layers.value_or(
video_codec.VP8()->numberOfTemporalLayers));
RTC_DCHECK_GE(video_codec.VP8()->numberOfTemporalLayers, 1);
if (nack_enabled && video_codec.VP8()->numberOfTemporalLayers == 1) {
RTC_LOG(LS_INFO)
<< "No temporal layers and nack enabled -> resilience off";
video_codec.VP8()->resilience = kResilienceOff;
@ -155,8 +148,10 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
break;
}
case kVideoCodecVP9: {
if (!config.encoder_specific_settings)
if (!config.encoder_specific_settings) {
*video_codec.VP9() = VideoEncoder::GetDefaultVp9Settings();
}
if (video_codec.mode == kScreensharing &&
config.encoder_specific_settings) {
video_codec.VP9()->flexibleMode = true;
@ -164,10 +159,13 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
RTC_DCHECK_EQ(1, video_codec.VP9()->numberOfTemporalLayers);
RTC_DCHECK_EQ(2, video_codec.VP9()->numberOfSpatialLayers);
}
video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
streams.back().temporal_layer_thresholds_bps.size() + 1);
if (nack_enabled && !TemporalLayersConfigured(streams) &&
video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
streams.back().num_temporal_layers.value_or(
video_codec.VP9()->numberOfTemporalLayers));
RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
if (nack_enabled && video_codec.VP9()->numberOfTemporalLayers == 1 &&
video_codec.VP9()->numberOfSpatialLayers == 1) {
RTC_LOG(LS_INFO) << "No temporal or spatial layers and nack enabled -> "
<< "resilience off";
@ -239,8 +237,8 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
sim_stream->targetBitrate = streams[i].target_bitrate_bps / 1000;
sim_stream->maxBitrate = streams[i].max_bitrate_bps / 1000;
sim_stream->qpMax = streams[i].max_qp;
sim_stream->numberOfTemporalLayers = static_cast<unsigned char>(
streams[i].temporal_layer_thresholds_bps.size() + 1);
sim_stream->numberOfTemporalLayers =
static_cast<unsigned char>(streams[i].num_temporal_layers.value_or(1));
sim_stream->active = streams[i].active;
video_codec.width =

View File

@ -103,7 +103,7 @@ class VideoCodecInitializerTest : public ::testing::Test {
for (int i = 0; i < codec_out_.numberOfSimulcastStreams; ++i) {
temporal_layers_.emplace_back(codec_out_.VP8()->tl_factory->Create(
i, streams_[i].temporal_layer_thresholds_bps.size() + 1, 0));
i, *streams_[i].num_temporal_layers, 0));
}
}
return true;
@ -118,6 +118,7 @@ class VideoCodecInitializerTest : public ::testing::Test {
stream.target_bitrate_bps = kDefaultTargetBitrateBps;
stream.max_bitrate_bps = kDefaultMaxBitrateBps;
stream.max_qp = kDefaultMaxQp;
stream.num_temporal_layers = 1;
stream.active = true;
return stream;
}
@ -128,7 +129,7 @@ class VideoCodecInitializerTest : public ::testing::Test {
stream.target_bitrate_bps = kScreenshareTl0BitrateBps;
stream.max_bitrate_bps = 1000000;
stream.max_framerate = kScreenshareDefaultFramerate;
stream.temporal_layer_thresholds_bps.push_back(kScreenshareTl0BitrateBps);
stream.num_temporal_layers = 2;
stream.active = true;
return stream;
}
@ -236,8 +237,7 @@ TEST_F(VideoCodecInitializerTest, HighFpsSimulcastVp8Screenshare) {
SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 3, true);
streams_.push_back(DefaultScreenshareStream());
VideoStream video_stream = DefaultStream();
video_stream.temporal_layer_thresholds_bps.push_back(kHighScreenshareTl0Bps);
video_stream.temporal_layer_thresholds_bps.push_back(kHighScreenshareTl1Bps);
video_stream.num_temporal_layers = 3;
streams_.push_back(video_stream);
EXPECT_TRUE(InitializeCodec());

View File

@ -285,8 +285,7 @@ class VideoStreamFactory
streams[i].min_bitrate_bps = encoder_stream_bps;
streams[i].target_bitrate_bps = encoder_stream_bps;
streams[i].max_bitrate_bps = encoder_stream_bps;
streams[i].temporal_layer_thresholds_bps.resize(num_of_temporal_layers_ -
1);
streams[i].num_temporal_layers = num_of_temporal_layers_;
// test::CreateVideoStreams does not return frame sizes for the lower
// streams that are accepted by VP8Impl::InitEncode.
// TODO(brandtr): Fix the problem in test::CreateVideoStreams, rather

View File

@ -1232,21 +1232,8 @@ VideoStream VideoQualityTest::DefaultVideoStream(const Params& params,
stream.target_bitrate_bps = params.video[video_idx].target_bitrate_bps;
stream.max_bitrate_bps = params.video[video_idx].max_bitrate_bps;
stream.max_qp = kDefaultMaxQp;
stream.num_temporal_layers = params.video[video_idx].num_temporal_layers;
stream.active = true;
// TODO(sprang): Can we make this less of a hack?
if (params.video[video_idx].num_temporal_layers == 2) {
stream.temporal_layer_thresholds_bps.push_back(stream.target_bitrate_bps);
} else if (params.video[video_idx].num_temporal_layers == 3) {
stream.temporal_layer_thresholds_bps.push_back(stream.max_bitrate_bps / 4);
stream.temporal_layer_thresholds_bps.push_back(stream.target_bitrate_bps);
} else {
RTC_CHECK_LE(params.video[video_idx].num_temporal_layers,
kMaxTemporalStreams);
for (int i = 0; i < params.video[video_idx].num_temporal_layers - 1; ++i) {
stream.temporal_layer_thresholds_bps.push_back(static_cast<int>(
stream.max_bitrate_bps * kVp8LayerRateAlloction[0][i] + 0.5));
}
}
return stream;
}
@ -1320,10 +1307,8 @@ void VideoQualityTest::FillScalabilitySettings(
stream.max_bitrate_bps = v[5];
if (v.size() > 6 && v[6] != -1)
stream.max_qp = v[6];
if (v.size() > 7) {
stream.temporal_layer_thresholds_bps.clear();
stream.temporal_layer_thresholds_bps.insert(
stream.temporal_layer_thresholds_bps.end(), v.begin() + 7, v.end());
if (v.size() > 7 && v[7] != -1) {
stream.num_temporal_layers = v[7];
} else {
// Automatic TL thresholds for more than two layers not supported.
RTC_CHECK_LE(params->video[video_idx].num_temporal_layers, 2);

View File

@ -1014,10 +1014,10 @@ void VideoSendStreamImpl::OnEncoderConfigurationChanged(
stats_proxy_->OnInactiveSsrc(config_->rtp.ssrcs[i]);
}
size_t number_of_temporal_layers =
streams.back().temporal_layer_thresholds_bps.size() + 1;
const size_t num_temporal_layers =
streams.back().num_temporal_layers.value_or(1);
fec_controller_->SetEncodingData(streams[0].width, streams[0].height,
number_of_temporal_layers,
num_temporal_layers,
config_->rtp.max_packet_size);
if (payload_router_.IsActive()) {

View File

@ -1082,8 +1082,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
std::vector<VideoStream> streams =
test::CreateVideoStreams(width, height, encoder_config);
for (VideoStream& stream : streams) {
stream.temporal_layer_thresholds_bps.resize(num_temporal_layers_ -
1);
stream.num_temporal_layers = num_temporal_layers_;
}
return streams;
}
@ -2462,8 +2461,8 @@ class VideoCodecConfigObserver : public test::SendTest,
std::vector<VideoStream> streams =
test::CreateVideoStreams(width, height, encoder_config);
for (size_t i = 0; i < streams.size(); ++i) {
streams[i].temporal_layer_thresholds_bps.resize(
kVideoCodecConfigObserverNumberOfTemporalLayers - 1);
streams[i].num_temporal_layers =
kVideoCodecConfigObserverNumberOfTemporalLayers;
}
return streams;
}
@ -2673,7 +2672,7 @@ TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
}
TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
static const int kScreencastTargetBitrateKbps = 200;
static const int kScreencastMaxTargetBitrateDeltaKbps = 1;
class VideoStreamFactory
: public VideoEncoderConfig::VideoStreamFactoryInterface {
@ -2687,9 +2686,13 @@ TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
const VideoEncoderConfig& encoder_config) override {
std::vector<VideoStream> streams =
test::CreateVideoStreams(width, height, encoder_config);
EXPECT_TRUE(streams[0].temporal_layer_thresholds_bps.empty());
streams[0].temporal_layer_thresholds_bps.push_back(
kScreencastTargetBitrateKbps * 1000);
EXPECT_FALSE(streams[0].num_temporal_layers.has_value());
streams[0].num_temporal_layers = 2;
RTC_CHECK_GT(streams[0].max_bitrate_bps,
kScreencastMaxTargetBitrateDeltaKbps);
streams[0].target_bitrate_bps =
streams[0].max_bitrate_bps -
kScreencastMaxTargetBitrateDeltaKbps * 1000;
return streams;
}
};
@ -2705,8 +2708,8 @@ TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
int32_t InitEncode(const VideoCodec* config,
int32_t number_of_cores,
size_t max_payload_size) override {
EXPECT_EQ(static_cast<unsigned int>(kScreencastTargetBitrateKbps),
config->targetBitrate);
EXPECT_EQ(static_cast<unsigned int>(kScreencastMaxTargetBitrateDeltaKbps),
config->maxBitrate - config->targetBitrate);
observation_complete_.Set();
return test::FakeEncoder::InitEncode(
config, number_of_cores, max_payload_size);
@ -3034,8 +3037,7 @@ class Vp9HeaderObserver : public test::SendTest {
const VideoEncoderConfig& encoder_config) override {
std::vector<VideoStream> streams =
test::CreateVideoStreams(width, height, encoder_config);
streams[0].temporal_layer_thresholds_bps.resize(
number_of_temporal_layers_ - 1);
streams.back().num_temporal_layers = number_of_temporal_layers_;
return streams;
}

View File

@ -150,7 +150,7 @@ class VideoStreamFactory
std::vector<VideoStream> streams =
test::CreateVideoStreams(width, height, encoder_config);
for (VideoStream& stream : streams) {
stream.temporal_layer_thresholds_bps.resize(num_temporal_layers_ - 1);
stream.num_temporal_layers = num_temporal_layers_;
stream.max_framerate = framerate_;
}
return streams;
@ -3166,7 +3166,7 @@ TEST_F(VideoStreamEncoderTest, AcceptsFullHdAdaptedDownSimulcastFrames) {
test::CreateVideoStreams(width - width % 4, height - height % 4,
encoder_config);
for (VideoStream& stream : streams) {
stream.temporal_layer_thresholds_bps.resize(num_temporal_layers_ - 1);
stream.num_temporal_layers = num_temporal_layers_;
stream.max_framerate = framerate_;
}
return streams;