This cl do a major cleanup of the VideoAdapter and make sure it does care about the VideoSinkWants.max_pixel_count and VideoSinkWants.max_pixel_count_step_up.

Unit tests are updated to test that screen share is not adapted but it does not change the VideoSinkWants in WebRtcVideoEngine2::SendStream due to a switch to screen share. The reason is that it works anyway and sprang is looking into how to do adaptation based on frame rate as well and use the adapter for screen share as well.

BUG=webrtc:5688, webrtc:5426
R=nisse@webrtc.org, pbos@webrtc.org, sprang@google.com

Review URL: https://codereview.webrtc.org/1836043004 .

Cr-Commit-Position: refs/heads/master@{#12240}
This commit is contained in:
Per 2016-04-05 15:23:49 +02:00
parent 9fdb6cf255
commit 766ad3b989
10 changed files with 493 additions and 1711 deletions

View File

@ -189,9 +189,8 @@ void AndroidVideoCapturer::OnIncomingFrame(
void AndroidVideoCapturer::OnOutputFormatRequest(
int width, int height, int fps) {
RTC_CHECK(thread_checker_.CalledOnValidThread());
const cricket::VideoFormat& current = video_adapter()->output_format();
cricket::VideoFormat format(
width, height, cricket::VideoFormat::FpsToInterval(fps), current.fourcc);
cricket::VideoFormat format(width, height,
cricket::VideoFormat::FpsToInterval(fps), 0);
video_adapter()->OnOutputFormatRequest(format);
}

View File

@ -10,161 +10,108 @@
#include "webrtc/media/base/videoadapter.h"
#include <limits.h> // For INT_MAX
#include <algorithm>
#include <limits>
#include "webrtc/base/logging.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/media/base/mediaconstants.h"
#include "webrtc/media/base/videocommon.h"
#include "webrtc/media/base/videoframe.h"
namespace cricket {
namespace {
// TODO(fbarchard): Make downgrades settable
static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU.
// The number of cpu samples to require before adapting. This value depends on
// the cpu monitor sampling frequency being 2000ms.
static const int kCpuLoadMinSamples = 3;
// The amount of weight to give to each new cpu load sample. The lower the
// value, the slower we'll adapt to changing cpu conditions.
static const float kCpuLoadWeightCoefficient = 0.4f;
// The seed value for the cpu load moving average.
static const float kCpuLoadInitialAverage = 0.5f;
// Desktop needs 1/8 scale for HD (1280 x 720) to QQVGA (160 x 90)
static const float kScaleFactors[] = {
1.f / 1.f, // Full size.
3.f / 4.f, // 3/4 scale.
1.f / 2.f, // 1/2 scale.
3.f / 8.f, // 3/8 scale.
1.f / 4.f, // 1/4 scale.
3.f / 16.f, // 3/16 scale.
1.f / 8.f, // 1/8 scale.
0.f // End of table.
// Scale factors optimized for in libYUV that we accept.
// Must be sorted in decreasing scale factors for FindScaleLargerThan to work.
const float kScaleFactors[] = {
1.f / 1.f, // Full size.
3.f / 4.f, // 3/4 scale.
1.f / 2.f, // 1/2 scale.
3.f / 8.f, // 3/8 scale.
1.f / 4.f, // 1/4 scale.
3.f / 16.f, // 3/16 scale.
};
// TODO(fbarchard): Use this table (optionally) for CPU and GD as well.
static const float kViewScaleFactors[] = {
1.f / 1.f, // Full size.
3.f / 4.f, // 3/4 scale.
2.f / 3.f, // 2/3 scale. // Allow 1080p to 720p.
1.f / 2.f, // 1/2 scale.
3.f / 8.f, // 3/8 scale.
1.f / 3.f, // 1/3 scale. // Allow 1080p to 360p.
1.f / 4.f, // 1/4 scale.
3.f / 16.f, // 3/16 scale.
1.f / 8.f, // 1/8 scale.
0.f // End of table.
};
const float* VideoAdapter::GetViewScaleFactors() const {
return scale_third_ ? kViewScaleFactors : kScaleFactors;
}
// For resolutions that would scale down a little instead of up a little,
// bias toward scaling up a little. This will tend to choose 3/4 scale instead
// of 2/3 scale, when the 2/3 is not an exact match.
static const float kUpBias = -0.9f;
// Find the scale factor that, when applied to width and height, is closest
// to num_pixels.
float VideoAdapter::FindScale(const float* scale_factors,
const float upbias,
int width, int height,
int target_num_pixels) {
const float kMinNumPixels = 160 * 90;
if (!target_num_pixels) {
return 0.f;
}
float best_distance = static_cast<float>(INT_MAX);
float best_scale = 1.f; // Default to unscaled if nothing matches.
float pixels = static_cast<float>(width * height);
for (int i = 0; ; ++i) {
float scale = scale_factors[i];
float FindScaleLessThanOrEqual(int width,
int height,
int target_num_pixels,
int* resulting_number_of_pixels) {
float best_distance = std::numeric_limits<float>::max();
float best_scale = 0.0f; // Default to 0 if nothing matches.
float pixels = width * height;
float best_number_of_pixels = 0.0f;
for (const auto& scale : kScaleFactors) {
float test_num_pixels = pixels * scale * scale;
// Do not consider scale factors that produce too small images.
// Scale factor of 0 at end of table will also exit here.
if (test_num_pixels < kMinNumPixels) {
break;
}
float diff = target_num_pixels - test_num_pixels;
// If resolution is higher than desired, bias the difference based on
// preference for slightly larger for nearest, or avoid completely if
// looking for lower resolutions only.
if (diff < 0) {
diff = diff * kUpBias;
continue;
}
if (diff < best_distance) {
best_distance = diff;
best_scale = scale;
best_number_of_pixels = test_num_pixels;
if (best_distance == 0) { // Found exact match.
break;
}
}
}
if (resulting_number_of_pixels) {
*resulting_number_of_pixels = static_cast<int>(best_number_of_pixels + .5f);
}
return best_scale;
}
// Find the closest scale factor.
float VideoAdapter::FindClosestScale(int width, int height,
int target_num_pixels) {
return FindScale(kScaleFactors, kUpBias,
width, height, target_num_pixels);
float FindScaleLargerThan(int width,
int height,
int target_num_pixels,
int* resulting_number_of_pixels) {
float best_distance = std::numeric_limits<float>::max();
float best_scale = 1.f; // Default to unscaled if nothing matches.
float pixels = width * height;
float best_number_of_pixels = pixels; // Default to input number of pixels.
for (const auto& scale : kScaleFactors) {
float test_num_pixels = pixels * scale * scale;
float diff = test_num_pixels - target_num_pixels;
if (diff <= 0) {
break;
}
if (diff < best_distance) {
best_distance = diff;
best_scale = scale;
best_number_of_pixels = test_num_pixels;
}
}
*resulting_number_of_pixels = static_cast<int>(best_number_of_pixels + .5f);
return best_scale;
}
// Find the closest view scale factor.
float VideoAdapter::FindClosestViewScale(int width, int height,
int target_num_pixels) {
return FindScale(GetViewScaleFactors(), kUpBias,
width, height, target_num_pixels);
}
} // namespace
// Finds the scale factor that, when applied to width and height, produces
// fewer than num_pixels.
static const float kUpAvoidBias = -1000000000.f;
float VideoAdapter::FindLowerScale(int width, int height,
int target_num_pixels) {
return FindScale(GetViewScaleFactors(), kUpAvoidBias,
width, height, target_num_pixels);
}
namespace cricket {
// There are several frame sizes used by Adapter. This explains them
// input_format - set once by server to frame size expected from the camera.
// The input frame size is also updated in AdaptFrameResolution.
// output_format - size that output would like to be. Includes framerate.
// The output frame size is also updated in AdaptFrameResolution.
// output_num_pixels - size that output should be constrained to. Used to
// compute output_format from in_frame.
// in_frame - actual camera captured frame size, which is typically the same
// as input_format. This can also be rotated or cropped for aspect ratio.
// out_frame - actual frame output by adapter. Should be a direct scale of
// in_frame maintaining rotation and aspect ratio.
// OnOutputFormatRequest - server requests you send this resolution based on
// view requests.
// OnEncoderResolutionRequest - encoder requests you send this resolution based
// on bandwidth
// OnCpuLoadUpdated - cpu monitor requests you send this resolution based on
// cpu load.
///////////////////////////////////////////////////////////////////////
// Implementation of VideoAdapter
VideoAdapter::VideoAdapter()
: output_num_pixels_(INT_MAX),
scale_third_(false),
: output_num_pixels_(std::numeric_limits<int>::max()),
frames_in_(0),
frames_out_(0),
frames_scaled_(0),
adaption_changes_(0),
previous_width_(0),
previous_height_(0),
interval_next_frame_(0) {
}
interval_next_frame_(0),
format_request_max_pixel_count_(std::numeric_limits<int>::max()),
resolution_request_max_pixel_count_(std::numeric_limits<int>::max()) {}
VideoAdapter::~VideoAdapter() {
VideoAdapter::~VideoAdapter() {}
void VideoAdapter::SetExpectedInputFrameInterval(int64_t interval) {
// TODO(perkj): Consider measuring input frame rate instead.
// Frame rate typically varies depending on lighting.
rtc::CritScope cs(&critical_section_);
input_format_.interval = interval;
}
void VideoAdapter::SetInputFormat(const VideoFormat& format) {
rtc::CritScope cs(&critical_section_);
bool is_resolution_change = (input_format().width != format.width ||
input_format().height != format.height);
int64_t old_input_interval = input_format_.interval;
input_format_ = format;
output_format_.interval =
@ -173,73 +120,21 @@ void VideoAdapter::SetInputFormat(const VideoFormat& format) {
LOG(LS_INFO) << "VAdapt input interval changed from "
<< old_input_interval << " to " << input_format_.interval;
}
}
void CoordinatedVideoAdapter::SetInputFormat(const VideoFormat& format) {
int previous_width = input_format().width;
int previous_height = input_format().height;
bool is_resolution_change = previous_width > 0 && format.width > 0 &&
(previous_width != format.width ||
previous_height != format.height);
VideoAdapter::SetInputFormat(format);
if (is_resolution_change) {
int width, height;
// Trigger the adaptation logic again, to potentially reset the adaptation
// state for things like view requests that may not longer be capping
// output (or may now cap output).
AdaptToMinimumFormat(&width, &height);
LOG(LS_INFO) << "VAdapt Input Resolution Change: "
<< "Previous input resolution: "
<< previous_width << "x" << previous_height
<< " New input resolution: "
<< format.width << "x" << format.height
<< " New output resolution: "
<< width << "x" << height;
Adapt(std::min(format_request_max_pixel_count_,
resolution_request_max_pixel_count_),
0);
}
}
void CoordinatedVideoAdapter::set_cpu_smoothing(bool enable) {
LOG(LS_INFO) << "CPU smoothing is now "
<< (enable ? "enabled" : "disabled");
cpu_smoothing_ = enable;
}
void VideoAdapter::SetOutputFormat(const VideoFormat& format) {
rtc::CritScope cs(&critical_section_);
int64_t old_output_interval = output_format_.interval;
output_format_ = format;
output_num_pixels_ = output_format_.width * output_format_.height;
output_format_.interval =
std::max(output_format_.interval, input_format_.interval);
if (old_output_interval != output_format_.interval) {
LOG(LS_INFO) << "VAdapt output interval changed from "
<< old_output_interval << " to " << output_format_.interval;
}
}
const VideoFormat& VideoAdapter::input_format() {
const VideoFormat& VideoAdapter::input_format() const {
rtc::CritScope cs(&critical_section_);
return input_format_;
}
bool VideoAdapter::drops_all_frames() const {
return output_num_pixels_ == 0;
}
const VideoFormat& VideoAdapter::output_format() {
rtc::CritScope cs(&critical_section_);
return output_format_;
}
// Constrain output resolution to this many pixels overall
void VideoAdapter::SetOutputNumPixels(int num_pixels) {
output_num_pixels_ = num_pixels;
}
int VideoAdapter::GetOutputNumPixels() const {
return output_num_pixels_;
}
VideoFormat VideoAdapter::AdaptFrameResolution(int in_width, int in_height) {
rtc::CritScope cs(&critical_section_);
++frames_in_;
@ -255,8 +150,6 @@ VideoFormat VideoAdapter::AdaptFrameResolution(int in_width, int in_height) {
} else {
// Drop some frames based on input fps and output fps.
// Normally output fps is less than input fps.
// TODO(fbarchard): Consider adjusting interval to reflect the adjusted
// interval between frames after dropping some frames.
interval_next_frame_ += input_format_.interval;
if (output_format_.interval > 0) {
if (interval_next_frame_ >= output_format_.interval) {
@ -284,44 +177,24 @@ VideoFormat VideoAdapter::AdaptFrameResolution(int in_width, int in_height) {
return VideoFormat(); // Drop frame.
}
const float scale = VideoAdapter::FindClosestViewScale(
in_width, in_height, output_num_pixels_);
const size_t output_width = static_cast<size_t>(in_width * scale + .5f);
const size_t output_height = static_cast<size_t>(in_height * scale + .5f);
const float scale = FindScaleLessThanOrEqual(in_width, in_height,
output_num_pixels_, nullptr);
const int output_width = static_cast<int>(in_width * scale + .5f);
const int output_height = static_cast<int>(in_height * scale + .5f);
++frames_out_;
if (scale != 1)
++frames_scaled_;
// Show VAdapt log every 90 frames output. (3 seconds)
// TODO(fbarchard): Consider GetLogSeverity() to change interval to less
// for LS_VERBOSE and more for LS_INFO.
bool show = (frames_out_) % 90 == 0;
// TODO(fbarchard): LOG the previous output resolution and track input
// resolution changes as well. Consider dropping the statistics into their
// own class which could be queried publically.
bool changed = false;
if (previous_width_ && (previous_width_ != output_width ||
previous_height_ != output_height)) {
show = true;
++adaption_changes_;
changed = true;
}
if (show) {
// TODO(fbarchard): Reduce to LS_VERBOSE when adapter info is not needed
// in default calls.
LOG(LS_INFO) << "VAdapt Frame: scaled " << frames_scaled_
<< " / out " << frames_out_
<< " / in " << frames_in_
<< " Changes: " << adaption_changes_
<< " Input: " << in_width
<< "x" << in_height
<< " i" << input_format_.interval
<< " Scale: " << scale
<< " Output: " << output_width
<< "x" << output_height
<< " i" << output_format_.interval
<< " Changed: " << (changed ? "true" : "false");
LOG(LS_INFO) << "Frame size changed: scaled " << frames_scaled_ << " / out "
<< frames_out_ << " / in " << frames_in_
<< " Changes: " << adaption_changes_ << " Input: " << in_width
<< "x" << in_height << " i" << input_format_.interval
<< " Scale: " << scale << " Output: " << output_width << "x"
<< output_height << " i" << output_format_.interval;
}
output_format_.width = output_width;
@ -332,385 +205,57 @@ VideoFormat VideoAdapter::AdaptFrameResolution(int in_width, int in_height) {
return output_format_;
}
void VideoAdapter::set_scale_third(bool enable) {
LOG(LS_INFO) << "Video Adapter third scaling is now "
<< (enable ? "enabled" : "disabled");
scale_third_ = enable;
void VideoAdapter::OnOutputFormatRequest(const VideoFormat& format) {
rtc::CritScope cs(&critical_section_);
format_request_max_pixel_count_ = format.width * format.height;
output_format_.interval = format.interval;
Adapt(std::min(format_request_max_pixel_count_,
resolution_request_max_pixel_count_),
0);
}
///////////////////////////////////////////////////////////////////////
// Implementation of CoordinatedVideoAdapter
CoordinatedVideoAdapter::CoordinatedVideoAdapter()
: cpu_adaptation_(true),
cpu_smoothing_(false),
gd_adaptation_(true),
view_adaptation_(true),
view_switch_(false),
cpu_downgrade_count_(0),
cpu_load_min_samples_(kCpuLoadMinSamples),
cpu_load_num_samples_(0),
high_system_threshold_(kHighSystemCpuThreshold),
low_system_threshold_(kLowSystemCpuThreshold),
process_threshold_(kProcessCpuThreshold),
view_desired_num_pixels_(INT_MAX),
view_desired_interval_(0),
encoder_desired_num_pixels_(INT_MAX),
cpu_desired_num_pixels_(INT_MAX),
adapt_reason_(ADAPTREASON_NONE),
system_load_average_(kCpuLoadInitialAverage) {
}
// Helper function to UPGRADE or DOWNGRADE a number of pixels
void CoordinatedVideoAdapter::StepPixelCount(
CoordinatedVideoAdapter::AdaptRequest request,
int* num_pixels) {
switch (request) {
case CoordinatedVideoAdapter::DOWNGRADE:
*num_pixels /= 2;
break;
case CoordinatedVideoAdapter::UPGRADE:
*num_pixels *= 2;
break;
default: // No change in pixel count
break;
}
return;
}
// Find the adaptation request of the cpu based on the load. Return UPGRADE if
// the load is low, DOWNGRADE if the load is high, and KEEP otherwise.
CoordinatedVideoAdapter::AdaptRequest CoordinatedVideoAdapter::FindCpuRequest(
int current_cpus, int max_cpus,
float process_load, float system_load) {
// Downgrade if system is high and plugin is at least more than midrange.
if (system_load >= high_system_threshold_ * max_cpus &&
process_load >= process_threshold_ * current_cpus) {
return CoordinatedVideoAdapter::DOWNGRADE;
// Upgrade if system is low.
} else if (system_load < low_system_threshold_ * max_cpus) {
return CoordinatedVideoAdapter::UPGRADE;
}
return CoordinatedVideoAdapter::KEEP;
}
// A remote view request for a new resolution.
void CoordinatedVideoAdapter::OnOutputFormatRequest(const VideoFormat& format) {
rtc::CritScope cs(&request_critical_section_);
if (!view_adaptation_) {
return;
}
// Set output for initial aspect ratio in mediachannel unittests.
int old_num_pixels = GetOutputNumPixels();
SetOutputFormat(format);
SetOutputNumPixels(old_num_pixels);
view_desired_num_pixels_ = format.width * format.height;
view_desired_interval_ = format.interval;
int new_width, new_height;
bool changed = AdaptToMinimumFormat(&new_width, &new_height);
LOG(LS_INFO) << "VAdapt View Request: "
<< format.width << "x" << format.height
<< " Pixels: " << view_desired_num_pixels_
<< " Changed: " << (changed ? "true" : "false")
<< " To: " << new_width << "x" << new_height;
}
void CoordinatedVideoAdapter::set_cpu_load_min_samples(
int cpu_load_min_samples) {
if (cpu_load_min_samples_ != cpu_load_min_samples) {
LOG(LS_INFO) << "VAdapt Change Cpu Adapt Min Samples from: "
<< cpu_load_min_samples_ << " to "
<< cpu_load_min_samples;
cpu_load_min_samples_ = cpu_load_min_samples;
}
}
void CoordinatedVideoAdapter::set_high_system_threshold(
float high_system_threshold) {
ASSERT(high_system_threshold <= 1.0f);
ASSERT(high_system_threshold >= 0.0f);
if (high_system_threshold_ != high_system_threshold) {
LOG(LS_INFO) << "VAdapt Change High System Threshold from: "
<< high_system_threshold_ << " to " << high_system_threshold;
high_system_threshold_ = high_system_threshold;
}
}
void CoordinatedVideoAdapter::set_low_system_threshold(
float low_system_threshold) {
ASSERT(low_system_threshold <= 1.0f);
ASSERT(low_system_threshold >= 0.0f);
if (low_system_threshold_ != low_system_threshold) {
LOG(LS_INFO) << "VAdapt Change Low System Threshold from: "
<< low_system_threshold_ << " to " << low_system_threshold;
low_system_threshold_ = low_system_threshold;
}
}
void CoordinatedVideoAdapter::set_process_threshold(float process_threshold) {
ASSERT(process_threshold <= 1.0f);
ASSERT(process_threshold >= 0.0f);
if (process_threshold_ != process_threshold) {
LOG(LS_INFO) << "VAdapt Change High Process Threshold from: "
<< process_threshold_ << " to " << process_threshold;
process_threshold_ = process_threshold;
}
}
// A Bandwidth GD request for new resolution
void CoordinatedVideoAdapter::OnEncoderResolutionRequest(
int width, int height, AdaptRequest request) {
rtc::CritScope cs(&request_critical_section_);
if (!gd_adaptation_) {
return;
}
int old_encoder_desired_num_pixels = encoder_desired_num_pixels_;
if (KEEP != request) {
int new_encoder_desired_num_pixels = width * height;
int old_num_pixels = GetOutputNumPixels();
if (new_encoder_desired_num_pixels != old_num_pixels) {
LOG(LS_VERBOSE) << "VAdapt GD resolution stale. Ignored";
} else {
// Update the encoder desired format based on the request.
encoder_desired_num_pixels_ = new_encoder_desired_num_pixels;
StepPixelCount(request, &encoder_desired_num_pixels_);
}
}
int new_width, new_height;
bool changed = AdaptToMinimumFormat(&new_width, &new_height);
// Ignore up or keep if no change.
if (DOWNGRADE != request && view_switch_ && !changed) {
encoder_desired_num_pixels_ = old_encoder_desired_num_pixels;
LOG(LS_VERBOSE) << "VAdapt ignoring GD request.";
}
LOG(LS_INFO) << "VAdapt GD Request: "
<< (DOWNGRADE == request ? "down" :
(UPGRADE == request ? "up" : "keep"))
<< " From: " << width << "x" << height
<< " Pixels: " << encoder_desired_num_pixels_
<< " Changed: " << (changed ? "true" : "false")
<< " To: " << new_width << "x" << new_height;
}
void CoordinatedVideoAdapter::OnCpuResolutionRequest(
void VideoAdapter::OnResolutionRequest(
rtc::Optional<int> max_pixel_count,
rtc::Optional<int> max_pixel_count_step_up) {
rtc::CritScope cs(&request_critical_section_);
// TODO(perkj): We should support taking larger steps up and down and
// actually look at the values set in max_pixel_count and
// max_pixel_count_step_up.
if (max_pixel_count && *max_pixel_count < GetOutputNumPixels()) {
OnCpuResolutionRequest(DOWNGRADE);
} else if (max_pixel_count_step_up &&
*max_pixel_count_step_up >= GetOutputNumPixels()) {
OnCpuResolutionRequest(UPGRADE);
}
rtc::CritScope cs(&critical_section_);
resolution_request_max_pixel_count_ =
max_pixel_count.value_or(std::numeric_limits<int>::max());
Adapt(std::min(format_request_max_pixel_count_,
resolution_request_max_pixel_count_),
max_pixel_count_step_up.value_or(0));
}
// A Bandwidth GD request for new resolution
void CoordinatedVideoAdapter::OnCpuResolutionRequest(AdaptRequest request) {
rtc::CritScope cs(&request_critical_section_);
if (!cpu_adaptation_) {
return;
}
bool VideoAdapter::Adapt(int max_num_pixels, int max_pixel_count_step_up) {
float scale_lower =
FindScaleLessThanOrEqual(input_format_.width, input_format_.height,
max_num_pixels, &max_num_pixels);
float scale_upper =
max_pixel_count_step_up > 0
? FindScaleLargerThan(input_format_.width, input_format_.height,
max_pixel_count_step_up,
&max_pixel_count_step_up)
: 1.f;
// Update how many times we have downgraded due to the cpu load.
switch (request) {
case DOWNGRADE:
// Ignore downgrades if we have downgraded the maximum times.
if (cpu_downgrade_count_ < kMaxCpuDowngrades) {
++cpu_downgrade_count_;
} else {
LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade "
"because maximum downgrades reached";
SignalCpuAdaptationUnable();
}
break;
case UPGRADE:
if (cpu_downgrade_count_ > 0) {
bool is_min = IsMinimumFormat(cpu_desired_num_pixels_);
if (is_min) {
--cpu_downgrade_count_;
} else {
LOG(LS_VERBOSE) << "VAdapt CPU load low but do not upgrade "
"because cpu is not limiting resolution";
}
} else {
LOG(LS_VERBOSE) << "VAdapt CPU load low but do not upgrade "
"because minimum downgrades reached";
}
break;
case KEEP:
default:
break;
}
if (KEEP != request) {
// TODO(fbarchard): compute stepping up/down from OutputNumPixels but
// clamp to inputpixels / 4 (2 steps)
cpu_desired_num_pixels_ = cpu_downgrade_count_ == 0 ? INT_MAX :
static_cast<int>(input_format().width * input_format().height >>
cpu_downgrade_count_);
}
int new_width, new_height;
bool changed = AdaptToMinimumFormat(&new_width, &new_height);
LOG(LS_INFO) << "VAdapt CPU Request: "
<< (DOWNGRADE == request ? "down" :
(UPGRADE == request ? "up" : "keep"))
<< " Steps: " << cpu_downgrade_count_
<< " Changed: " << (changed ? "true" : "false")
<< " To: " << new_width << "x" << new_height;
}
bool use_max_pixel_count_step_up =
max_pixel_count_step_up > 0 && max_num_pixels > max_pixel_count_step_up;
// A CPU request for new resolution
// TODO(fbarchard): Move outside adapter.
void CoordinatedVideoAdapter::OnCpuLoadUpdated(
int current_cpus, int max_cpus, float process_load, float system_load) {
rtc::CritScope cs(&request_critical_section_);
if (!cpu_adaptation_) {
return;
}
// Update the moving average of system load. Even if we aren't smoothing,
// we'll still calculate this information, in case smoothing is later enabled.
system_load_average_ = kCpuLoadWeightCoefficient * system_load +
(1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
++cpu_load_num_samples_;
if (cpu_smoothing_) {
system_load = system_load_average_;
}
AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
process_load, system_load);
// Make sure we're not adapting too quickly.
if (request != KEEP) {
if (cpu_load_num_samples_ < cpu_load_min_samples_) {
LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
<< (cpu_load_min_samples_ - cpu_load_num_samples_)
<< " more samples";
request = KEEP;
}
}
int old_num_pixels = output_num_pixels_;
output_num_pixels_ =
use_max_pixel_count_step_up ? max_pixel_count_step_up : max_num_pixels;
// Log the new size.
float scale = use_max_pixel_count_step_up ? scale_upper : scale_lower;
int new_width = static_cast<int>(input_format_.width * scale + .5f);
int new_height = static_cast<int>(input_format_.height * scale + .5f);
OnCpuResolutionRequest(request);
}
// Called by cpu adapter on up requests.
bool CoordinatedVideoAdapter::IsMinimumFormat(int pixels) {
// Find closest scale factor that matches input resolution to min_num_pixels
// and set that for output resolution. This is not needed for VideoAdapter,
// but provides feedback to unittests and users on expected resolution.
// Actual resolution is based on input frame.
VideoFormat new_output = output_format();
VideoFormat input = input_format();
if (input_format().IsSize0x0()) {
input = new_output;
}
float scale = 1.0f;
if (!input.IsSize0x0()) {
scale = FindClosestScale(input.width,
input.height,
pixels);
}
new_output.width = static_cast<int>(input.width * scale + .5f);
new_output.height = static_cast<int>(input.height * scale + .5f);
int new_pixels = new_output.width * new_output.height;
int num_pixels = GetOutputNumPixels();
return new_pixels <= num_pixels;
}
// Called by all coordinators when there is a change.
bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width,
int* new_height) {
VideoFormat new_output = output_format();
VideoFormat input = input_format();
if (input_format().IsSize0x0()) {
input = new_output;
}
int old_num_pixels = GetOutputNumPixels();
int min_num_pixels = INT_MAX;
adapt_reason_ = ADAPTREASON_NONE;
// Reduce resolution based on encoder bandwidth (GD).
if (encoder_desired_num_pixels_ &&
(encoder_desired_num_pixels_ < min_num_pixels)) {
adapt_reason_ |= ADAPTREASON_BANDWIDTH;
min_num_pixels = encoder_desired_num_pixels_;
}
// Reduce resolution based on CPU.
if (cpu_adaptation_ && cpu_desired_num_pixels_ &&
(cpu_desired_num_pixels_ <= min_num_pixels)) {
if (cpu_desired_num_pixels_ < min_num_pixels) {
adapt_reason_ = ADAPTREASON_CPU;
} else {
adapt_reason_ |= ADAPTREASON_CPU;
}
min_num_pixels = cpu_desired_num_pixels_;
}
// Round resolution for GD or CPU to allow 1/2 to map to 9/16.
if (!input.IsSize0x0() && min_num_pixels != INT_MAX) {
float scale = FindClosestScale(input.width, input.height, min_num_pixels);
min_num_pixels = static_cast<int>(input.width * scale + .5f) *
static_cast<int>(input.height * scale + .5f);
}
// Reduce resolution based on View Request.
if (view_desired_num_pixels_ <= min_num_pixels) {
if (view_desired_num_pixels_ < min_num_pixels) {
adapt_reason_ = ADAPTREASON_VIEW;
} else {
adapt_reason_ |= ADAPTREASON_VIEW;
}
min_num_pixels = view_desired_num_pixels_;
}
// Snap to a scale factor.
float scale = 1.0f;
if (!input.IsSize0x0()) {
scale = FindLowerScale(input.width, input.height, min_num_pixels);
min_num_pixels = static_cast<int>(input.width * scale + .5f) *
static_cast<int>(input.height * scale + .5f);
}
if (scale == 1.0f) {
adapt_reason_ = ADAPTREASON_NONE;
}
*new_width = new_output.width = static_cast<int>(input.width * scale + .5f);
*new_height = new_output.height = static_cast<int>(input.height * scale +
.5f);
SetOutputNumPixels(min_num_pixels);
new_output.interval = view_desired_interval_;
SetOutputFormat(new_output);
int new_num_pixels = GetOutputNumPixels();
bool changed = new_num_pixels != old_num_pixels;
static const char* kReasons[8] = {
"None",
"CPU",
"BANDWIDTH",
"CPU+BANDWIDTH",
"VIEW",
"CPU+VIEW",
"BANDWIDTH+VIEW",
"CPU+BANDWIDTH+VIEW",
};
LOG(LS_VERBOSE) << "VAdapt Status View: " << view_desired_num_pixels_
<< " GD: " << encoder_desired_num_pixels_
<< " CPU: " << cpu_desired_num_pixels_
<< " Pixels: " << min_num_pixels
<< " Input: " << input.width
<< "x" << input.height
<< " Scale: " << scale
<< " Resolution: " << new_output.width
<< "x" << new_output.height
<< " Changed: " << (changed ? "true" : "false")
<< " Reason: " << kReasons[adapt_reason_];
if (changed) {
// When any adaptation occurs, historic CPU load levels are no longer
// accurate. Clear out our state so we can re-learn at the new normal.
cpu_load_num_samples_ = 0;
system_load_average_ = kCpuLoadInitialAverage;
}
bool changed = output_num_pixels_ != old_num_pixels;
LOG(LS_INFO) << "OnResolutionRequest: "
<< " Max pixels: " << max_num_pixels
<< " Max pixels step up: " << max_pixel_count_step_up
<< " Output Pixels: " << output_num_pixels_
<< " Input: " << input_format_.width << "x"
<< input_format_.height << " Scale: " << scale
<< " Resolution: " << new_width << "x" << new_height
<< " Changed: " << (changed ? "true" : "false");
return changed;
}

View File

@ -8,188 +8,72 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MEDIA_BASE_VIDEOADAPTER_H_ // NOLINT
#ifndef WEBRTC_MEDIA_BASE_VIDEOADAPTER_H_
#define WEBRTC_MEDIA_BASE_VIDEOADAPTER_H_
#include "webrtc/base/common.h" // For ASSERT
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/optional.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/media/base/videocommon.h"
namespace cricket {
class VideoFrame;
// VideoAdapter adapts an input video frame to an output frame based on the
// specified input and output formats. The adaptation includes dropping frames
// to reduce frame rate and scaling frames. VideoAdapter is thread safe.
// to reduce frame rate and scaling frames.
// VideoAdapter is thread safe.
class VideoAdapter {
public:
VideoAdapter();
virtual ~VideoAdapter();
virtual void SetInputFormat(const VideoFormat& format);
void SetOutputFormat(const VideoFormat& format);
// Constrain output resolution to this many pixels overall
void SetOutputNumPixels(int num_pixels);
int GetOutputNumPixels() const;
const VideoFormat& input_format();
// Returns true if the adapter will always return zero size from
// AdaptFrameResolution.
bool drops_all_frames() const;
const VideoFormat& output_format();
// Sets the expected frame interval. This controls how often frames should
// be dropped if |OnOutputFormatRequest| is called with a lower frame
// interval.
void SetExpectedInputFrameInterval(int64_t interval);
// Return the adapted resolution given the input resolution. The returned
// resolution will be 0x0 if the frame should be dropped.
VideoFormat AdaptFrameResolution(int in_width, int in_height);
void set_scale_third(bool enable);
bool scale_third() const { return scale_third_; }
// Requests the output frame size and frame interval from
// |AdaptFrameResolution| to not be larger than |format|.
void OnOutputFormatRequest(const VideoFormat& format);
int adaptation_changes() const { return adaption_changes_; }
// Requests the output frame size from |AdaptFrameResolution| to not have
// more than |max_pixel_count| pixels and have "one step" up more pixels than
// max_pixel_count_step_up.
void OnResolutionRequest(rtc::Optional<int> max_pixel_count,
rtc::Optional<int> max_pixel_count_step_up);
protected:
float FindClosestScale(int width, int height, int target_num_pixels);
float FindClosestViewScale(int width, int height, int target_num_pixels);
float FindLowerScale(int width, int height, int target_num_pixels);
const VideoFormat& input_format() const;
private:
const float* GetViewScaleFactors() const;
float FindScale(const float* scale_factors,
const float upbias, int width, int height,
int target_num_pixels);
void SetInputFormat(const VideoFormat& format);
bool Adapt(int max_num_pixels, int max_pixel_count_step_up);
VideoFormat input_format_;
VideoFormat output_format_;
int output_num_pixels_;
bool scale_third_; // True if adapter allows scaling to 1/3 and 2/3.
int frames_in_; // Number of input frames.
int frames_out_; // Number of output frames.
int frames_scaled_; // Number of frames scaled.
int frames_in_; // Number of input frames.
int frames_out_; // Number of output frames.
int frames_scaled_; // Number of frames scaled.
int adaption_changes_; // Number of changes in scale factor.
size_t previous_width_; // Previous adapter output width.
size_t previous_height_; // Previous adapter output height.
int previous_width_; // Previous adapter output width.
int previous_height_; // Previous adapter output height.
int64_t interval_next_frame_;
// Max number of pixels requested via calls to OnOutputFormatRequest,
// OnResolutionRequest respectively.
// The adapted output format is the minimum of these.
int format_request_max_pixel_count_;
int resolution_request_max_pixel_count_;
// The critical section to protect the above variables.
rtc::CriticalSection critical_section_;
RTC_DISALLOW_COPY_AND_ASSIGN(VideoAdapter);
};
// CoordinatedVideoAdapter adapts the video input to the encoder by coordinating
// the format request from the server, the resolution request from the encoder,
// and the CPU load.
class CoordinatedVideoAdapter
: public VideoAdapter, public sigslot::has_slots<> {
public:
enum AdaptRequest { UPGRADE, KEEP, DOWNGRADE };
enum AdaptReasonEnum {
ADAPTREASON_NONE = 0,
ADAPTREASON_CPU = 1,
ADAPTREASON_BANDWIDTH = 2,
ADAPTREASON_VIEW = 4
};
typedef int AdaptReason;
CoordinatedVideoAdapter();
virtual ~CoordinatedVideoAdapter() {}
virtual void SetInputFormat(const VideoFormat& format);
// Enable or disable video adaptation due to the change of the CPU load.
void set_cpu_adaptation(bool enable) { cpu_adaptation_ = enable; }
bool cpu_adaptation() const { return cpu_adaptation_; }
// Enable or disable smoothing when doing CPU adaptation. When smoothing is
// enabled, system CPU load is tracked using an exponential weighted
// average.
void set_cpu_smoothing(bool enable);
bool cpu_smoothing() const { return cpu_smoothing_; }
// Enable or disable video adaptation due to the change of the GD
void set_gd_adaptation(bool enable) { gd_adaptation_ = enable; }
bool gd_adaptation() const { return gd_adaptation_; }
// Enable or disable video adaptation due to the change of the View
void set_view_adaptation(bool enable) { view_adaptation_ = enable; }
bool view_adaptation() const { return view_adaptation_; }
// Enable or disable video adaptation to fast switch View
void set_view_switch(bool enable) { view_switch_ = enable; }
bool view_switch() const { return view_switch_; }
CoordinatedVideoAdapter::AdaptReason adapt_reason() const {
return adapt_reason_;
}
// When the video is decreased, set the waiting time for CPU adaptation to
// decrease video again.
void set_cpu_load_min_samples(int cpu_load_min_samples);
int cpu_load_min_samples() const { return cpu_load_min_samples_; }
// CPU system load high threshold for reducing resolution. e.g. 0.85f
void set_high_system_threshold(float high_system_threshold);
float high_system_threshold() const { return high_system_threshold_; }
// CPU system load low threshold for increasing resolution. e.g. 0.70f
void set_low_system_threshold(float low_system_threshold);
float low_system_threshold() const { return low_system_threshold_; }
// CPU process load threshold for reducing resolution. e.g. 0.10f
void set_process_threshold(float process_threshold);
float process_threshold() const { return process_threshold_; }
// Handle the format request from the server via Jingle update message.
void OnOutputFormatRequest(const VideoFormat& format);
// Handle the resolution request from the encoder due to bandwidth changes.
void OnEncoderResolutionRequest(int width, int height, AdaptRequest request);
// Handle the resolution request for CPU overuse.
void OnCpuResolutionRequest(AdaptRequest request);
void OnCpuResolutionRequest(rtc::Optional<int> max_pixel_count,
rtc::Optional<int> max_pixel_count_step_up);
// Handle the CPU load provided by a CPU monitor.
void OnCpuLoadUpdated(int current_cpus, int max_cpus,
float process_load, float system_load);
sigslot::signal0<> SignalCpuAdaptationUnable;
private:
// Adapt to the minimum of the formats the server requests, the CPU wants, and
// the encoder wants. Returns true if resolution changed.
bool AdaptToMinimumFormat(int* new_width, int* new_height);
bool IsMinimumFormat(int pixels);
void StepPixelCount(CoordinatedVideoAdapter::AdaptRequest request,
int* num_pixels);
CoordinatedVideoAdapter::AdaptRequest FindCpuRequest(
int current_cpus, int max_cpus,
float process_load, float system_load);
bool cpu_adaptation_; // True if cpu adaptation is enabled.
bool cpu_smoothing_; // True if cpu smoothing is enabled (with adaptation).
bool gd_adaptation_; // True if gd adaptation is enabled.
bool view_adaptation_; // True if view adaptation is enabled.
bool view_switch_; // True if view switch is enabled.
int cpu_downgrade_count_;
int cpu_load_min_samples_;
int cpu_load_num_samples_;
// cpu system load thresholds relative to max cpus.
float high_system_threshold_;
float low_system_threshold_;
// cpu process load thresholds relative to current cpus.
float process_threshold_;
// Video formats that the server view requests, the CPU wants, and the encoder
// wants respectively. The adapted output format is the minimum of these.
int view_desired_num_pixels_;
int64_t view_desired_interval_;
int encoder_desired_num_pixels_;
int cpu_desired_num_pixels_;
CoordinatedVideoAdapter::AdaptReason adapt_reason_;
// The critical section to protect handling requests.
rtc::CriticalSection request_critical_section_;
// The weighted average of cpu load over time. It's always updated (if cpu
// adaptation is on), but only used if cpu_smoothing_ is set.
float system_load_average_;
RTC_DISALLOW_COPY_AND_ASSIGN(CoordinatedVideoAdapter);
};
} // namespace cricket
#endif // WEBRTC_MEDIA_BASE_VIDEOADAPTER_H_ // NOLINT
#endif // WEBRTC_MEDIA_BASE_VIDEOADAPTER_H_

File diff suppressed because it is too large Load Diff

View File

@ -213,8 +213,8 @@ void VideoCapturer::OnSinkWantsChanged(const rtc::VideoSinkWants& wants) {
}
if (video_adapter()) {
video_adapter()->OnCpuResolutionRequest(wants.max_pixel_count,
wants.max_pixel_count_step_up);
video_adapter()->OnResolutionRequest(wants.max_pixel_count,
wants.max_pixel_count_step_up);
}
}

View File

@ -230,7 +230,7 @@ class VideoCapturer : public sigslot::has_slots<>,
// SignalFrameCaptured or OnFrameCaptured.
void OnFrame(VideoCapturer* capturer, const VideoFrame* frame);
CoordinatedVideoAdapter* video_adapter() { return &video_adapter_; }
VideoAdapter* video_adapter() { return &video_adapter_; }
void SetCaptureState(CaptureState state);
@ -248,8 +248,7 @@ class VideoCapturer : public sigslot::has_slots<>,
if (capture_format_) {
ASSERT(capture_format_->interval > 0 &&
"Capture format expected to have positive interval.");
// Video adapter really only cares about capture format interval.
video_adapter_.SetInputFormat(*capture_format_);
video_adapter_.SetExpectedInputFrameInterval(capture_format_->interval);
}
}
@ -293,7 +292,7 @@ class VideoCapturer : public sigslot::has_slots<>,
rtc::VideoBroadcaster broadcaster_;
bool enable_video_adapter_;
CoordinatedVideoAdapter video_adapter_;
VideoAdapter video_adapter_;
rtc::CriticalSection frame_stats_crit_;
// The captured frame size before potential adapation.

View File

@ -256,9 +256,11 @@ TEST_F(VideoCapturerTest, SinkWantsMaxPixelAndMaxPixelCountStepUp) {
EXPECT_EQ(1280, renderer_.width());
EXPECT_EQ(720, renderer_.height());
// Request a lower resolution.
// Request a lower resolution. The output resolution will have a resolution
// with less than or equal to |wants.max_pixel_count| depending on how the
// capturer can scale the input frame size.
rtc::VideoSinkWants wants;
wants.max_pixel_count = rtc::Optional<int>(1280 * 720 / 2);
wants.max_pixel_count = rtc::Optional<int>(1280 * 720 * 3 / 5);
capturer_->AddOrUpdateSink(&renderer_, wants);
EXPECT_TRUE(capturer_->CaptureFrame());
EXPECT_EQ(2, renderer_.num_rendered_frames());
@ -267,7 +269,7 @@ TEST_F(VideoCapturerTest, SinkWantsMaxPixelAndMaxPixelCountStepUp) {
// Request a lower resolution.
wants.max_pixel_count =
rtc::Optional<int>(renderer_.width() * renderer_.height() / 2);
rtc::Optional<int>(renderer_.width() * renderer_.height() * 3 / 5);
capturer_->AddOrUpdateSink(&renderer_, wants);
EXPECT_TRUE(capturer_->CaptureFrame());
EXPECT_EQ(3, renderer_.num_rendered_frames());
@ -306,6 +308,17 @@ TEST_F(VideoCapturerTest, SinkWantsMaxPixelAndMaxPixelCountStepUp) {
EXPECT_EQ(3, renderer2.num_rendered_frames());
EXPECT_EQ(960, renderer2.width());
EXPECT_EQ(540, renderer2.height());
// But resetting the wants should reset the resolution to what the camera is
// opened with.
capturer_->AddOrUpdateSink(&renderer_, rtc::VideoSinkWants());
EXPECT_TRUE(capturer_->CaptureFrame());
EXPECT_EQ(7, renderer_.num_rendered_frames());
EXPECT_EQ(1280, renderer_.width());
EXPECT_EQ(720, renderer_.height());
EXPECT_EQ(4, renderer2.num_rendered_frames());
EXPECT_EQ(1280, renderer2.width());
EXPECT_EQ(720, renderer2.height());
}
TEST_F(VideoCapturerTest, ScreencastScaledSuperLarge) {

View File

@ -371,6 +371,9 @@ static const int kDefaultQpMax = 56;
static const int kDefaultRtcpReceiverReportSsrc = 1;
// Down grade resolution at most 2 times for CPU reasons.
static const int kMaxCpuDowngrades = 2;
std::vector<VideoCodec> DefaultVideoCodecList() {
std::vector<VideoCodec> codecs;
codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultVp8PlType,
@ -1835,7 +1838,7 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetSendParameters(
<< "RecreateWebRtcStream (send) because of SetSendParameters";
RecreateWebRtcStream();
}
} // release |lock_|
} // release |lock_|
// |capturer_->AddOrUpdateSink| may not be called while holding |lock_| since
// that might cause a lock order inversion.
@ -2026,8 +2029,14 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::OnLoadUpdate(Load load) {
rtc::Optional<int> max_pixel_count;
rtc::Optional<int> max_pixel_count_step_up;
if (load == kOveruse) {
max_pixel_count = rtc::Optional<int>(
(last_dimensions_.height * last_dimensions_.width) / 2);
if (cpu_restricted_counter_ >= kMaxCpuDowngrades) {
return;
}
// The input video frame size will have a resolution with less than or
// equal to |max_pixel_count| depending on how the capturer can scale the
// input frame size.
max_pixel_count = rtc::Optional<int>(
(last_dimensions_.height * last_dimensions_.width * 3) / 5);
// Increase |number_of_cpu_adapt_changes_| if
// sink_wants_.max_pixel_count will be changed since
// last time |capturer_->AddOrUpdateSink| was called. That is, this will
@ -2039,6 +2048,9 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::OnLoadUpdate(Load load) {
}
} else {
RTC_DCHECK(load == kUnderuse);
// The input video frame size will have a resolution with "one step up"
// pixels than |max_pixel_count_step_up| where "one step up" depends on
// how the capturer can scale the input frame size.
max_pixel_count_step_up = rtc::Optional<int>(last_dimensions_.height *
last_dimensions_.width);
// Increase |number_of_cpu_adapt_changes_| if
@ -2088,16 +2100,15 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::GetVideoSenderInfo() {
stats = stream_->GetStats();
}
info.adapt_changes = number_of_cpu_adapt_changes_;
info.adapt_reason = cpu_restricted_counter_ <= 0
? CoordinatedVideoAdapter::ADAPTREASON_NONE
: CoordinatedVideoAdapter::ADAPTREASON_CPU;
info.adapt_reason =
cpu_restricted_counter_ <= 0 ? ADAPTREASON_NONE : ADAPTREASON_CPU;
// Get bandwidth limitation info from stream_->GetStats().
// Input resolution (output from video_adapter) can be further scaled down or
// higher video layer(s) can be dropped due to bitrate constraints.
// Note, adapt_changes only include changes from the video_adapter.
if (stats.bw_limited_resolution)
info.adapt_reason |= CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH;
info.adapt_reason |= ADAPTREASON_BANDWIDTH;
info.encoder_implementation_name = stats.encoder_implementation_name;
info.ssrc_groups = ssrc_groups_;

View File

@ -175,6 +175,15 @@ class WebRtcVideoChannel2 : public VideoMediaChannel, public webrtc::Transport {
// Implemented for VideoMediaChannelTest.
bool sending() const { return sending_; }
// AdaptReason is used for expressing why a WebRtcVideoSendStream request
// a lower input frame size than the currently configured camera input frame
// size. There can be more than one reason OR:ed together.
enum AdaptReason {
ADAPTREASON_NONE = 0,
ADAPTREASON_CPU = 1,
ADAPTREASON_BANDWIDTH = 2,
};
private:
class WebRtcVideoReceiveStream;
struct VideoCodecSettings {

View File

@ -2036,29 +2036,99 @@ TEST_F(WebRtcVideoChannel2Test, AdaptsOnOveruseAndChangeResolution) {
EXPECT_EQ(1280 * 2 / 4, send_stream->GetLastWidth());
EXPECT_EQ(720 * 2 / 4, send_stream->GetLastHeight());
// Trigger overuse again. This should not decrease the resolution since we
// should only adapt the resolution down max two steps.
overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kOveruse);
EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
EXPECT_EQ(4, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1280 * 2 / 4, send_stream->GetLastWidth());
EXPECT_EQ(720 * 2 / 4, send_stream->GetLastHeight());
// Change input resolution.
EXPECT_TRUE(capturer.CaptureCustomFrame(1284, 724, cricket::FOURCC_I420));
EXPECT_EQ(4, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(5, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1284 / 2, send_stream->GetLastWidth());
EXPECT_EQ(724 / 2, send_stream->GetLastHeight());
// Trigger underuse which should go back up in resolution.
overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kUnderuse);
EXPECT_TRUE(capturer.CaptureCustomFrame(1284, 724, cricket::FOURCC_I420));
EXPECT_EQ(5, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(6, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1284 * 3 / 4, send_stream->GetLastWidth());
EXPECT_EQ(724 * 3 / 4, send_stream->GetLastHeight());
// Trigger underuse which should go back up in resolution.
overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kUnderuse);
EXPECT_TRUE(capturer.CaptureCustomFrame(1284, 724, cricket::FOURCC_I420));
EXPECT_EQ(6, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(7, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1284, send_stream->GetLastWidth());
EXPECT_EQ(724, send_stream->GetLastHeight());
EXPECT_TRUE(channel_->SetCapturer(last_ssrc_, NULL));
}
TEST_F(WebRtcVideoChannel2Test, PreviousAdaptationDoesNotApplyToScreenshare) {
cricket::VideoCodec codec = kVp8Codec720p;
cricket::VideoSendParameters parameters;
parameters.codecs.push_back(codec);
MediaConfig media_config = MediaConfig();
channel_.reset(
engine_.CreateChannel(fake_call_.get(), media_config, VideoOptions()));
ASSERT_TRUE(channel_->SetSendParameters(parameters));
AddSendStream();
cricket::FakeVideoCapturer capturer;
ASSERT_TRUE(channel_->SetCapturer(last_ssrc_, &capturer));
ASSERT_EQ(cricket::CS_RUNNING,
capturer.Start(capturer.GetSupportedFormats()->front()));
ASSERT_TRUE(channel_->SetSend(true));
cricket::VideoOptions camera_options;
channel_->SetVideoSend(last_ssrc_, true /* enable */, &camera_options);
ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
webrtc::LoadObserver* overuse_callback =
send_stream->GetConfig().overuse_callback;
ASSERT_TRUE(overuse_callback != NULL);
EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
EXPECT_EQ(1, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1280, send_stream->GetLastWidth());
EXPECT_EQ(720, send_stream->GetLastHeight());
// Trigger overuse.
overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kOveruse);
EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
EXPECT_EQ(2, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1280 * 3 / 4, send_stream->GetLastWidth());
EXPECT_EQ(720 * 3 / 4, send_stream->GetLastHeight());
// Switch to screen share. Expect no CPU adaptation.
cricket::FakeVideoCapturer screen_share(true);
ASSERT_EQ(cricket::CS_RUNNING,
screen_share.Start(screen_share.GetSupportedFormats()->front()));
ASSERT_TRUE(channel_->SetCapturer(last_ssrc_, &screen_share));
cricket::VideoOptions screenshare_options;
screenshare_options.is_screencast = rtc::Optional<bool>(true);
channel_->SetVideoSend(last_ssrc_, true /* enable */, &screenshare_options);
EXPECT_TRUE(screen_share.CaptureCustomFrame(1284, 724, cricket::FOURCC_I420));
EXPECT_EQ(3, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1284, send_stream->GetLastWidth());
EXPECT_EQ(724, send_stream->GetLastHeight());
// Switch back to the normal capturer. Expect the frame to be CPU adapted.
ASSERT_TRUE(channel_->SetCapturer(last_ssrc_, &capturer));
channel_->SetVideoSend(last_ssrc_, true /* enable */, &camera_options);
EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
EXPECT_EQ(4, send_stream->GetNumberOfSwappedFrames());
EXPECT_EQ(1280 * 3 / 4, send_stream->GetLastWidth());
EXPECT_EQ(720 * 3 / 4, send_stream->GetLastHeight());
EXPECT_TRUE(channel_->SetCapturer(last_ssrc_, NULL));
}
void WebRtcVideoChannel2Test::TestCpuAdaptation(bool enable_overuse,
bool is_screenshare) {
cricket::VideoCodec codec = kVp8Codec720p;
@ -2783,8 +2853,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationStats) {
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(1, info.senders[0].adapt_changes);
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_CPU,
info.senders[0].adapt_reason);
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_CPU, info.senders[0].adapt_reason);
// Trigger upgrade and verify that we adapt back up to VGA.
overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kUnderuse);
@ -2793,7 +2862,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationStats) {
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(2, info.senders[0].adapt_changes);
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_NONE,
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_NONE,
info.senders[0].adapt_reason);
// No capturer (no adapter). Adapt changes from old adapter should be kept.
@ -2802,7 +2871,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationStats) {
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(2, info.senders[0].adapt_changes);
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_NONE,
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_NONE,
info.senders[0].adapt_reason);
// Set new capturer, capture format HD.
@ -2819,8 +2888,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationStats) {
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(3, info.senders[0].adapt_changes);
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_CPU,
info.senders[0].adapt_reason);
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_CPU, info.senders[0].adapt_reason);
EXPECT_TRUE(channel_->SetCapturer(kSsrcs3[0], NULL));
}
@ -2855,8 +2923,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationAndBandwidthStats) {
cricket::VideoMediaInfo info;
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_CPU,
info.senders[0].adapt_reason);
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_CPU, info.senders[0].adapt_reason);
// Set bandwidth limitation stats for the stream -> adapt CPU + BW.
webrtc::VideoSendStream::Stats stats;
@ -2865,8 +2932,8 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationAndBandwidthStats) {
info.Clear();
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_CPU +
CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH,
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_CPU |
WebRtcVideoChannel2::ADAPTREASON_BANDWIDTH,
info.senders[0].adapt_reason);
// Trigger upgrade -> adapt BW.
@ -2875,7 +2942,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationAndBandwidthStats) {
info.Clear();
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH,
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_BANDWIDTH,
info.senders[0].adapt_reason);
// Reset bandwidth limitation state -> adapt NONE.
@ -2884,7 +2951,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationAndBandwidthStats) {
info.Clear();
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_NONE,
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_NONE,
info.senders[0].adapt_reason);
EXPECT_TRUE(channel_->SetCapturer(kSsrcs3[0], NULL));
@ -2900,7 +2967,7 @@ TEST_F(WebRtcVideoChannel2Test,
cricket::VideoMediaInfo info;
EXPECT_TRUE(channel_->GetStats(&info));
ASSERT_EQ(1U, info.senders.size());
EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH,
EXPECT_EQ(WebRtcVideoChannel2::ADAPTREASON_BANDWIDTH,
info.senders[0].adapt_reason);
}