Use backticks not vertical bars to denote variables in comments for /video

Bug: webrtc:12338
Change-Id: I47958800407482894ff6f17c1887dce907fdf35a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227030
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34585}
This commit is contained in:
Artem Titov 2021-07-27 16:22:11 +02:00 committed by WebRTC LUCI CQ
parent 4727071506
commit ab30d72b72
62 changed files with 195 additions and 195 deletions

View File

@ -435,8 +435,8 @@ TEST_F(OveruseFrameDetectorTest, RunOnTqNormalUsage) {
RTC_FROM_HERE);
rtc::Event event;
// Expect NormalUsage(). When called, stop the |overuse_detector_| and then
// set |event| to end the test.
// Expect NormalUsage(). When called, stop the `overuse_detector_` and then
// set `event` to end the test.
EXPECT_CALL(mock_observer_, AdaptUp())
.WillOnce(InvokeWithoutArgs([this, &event] {
overuse_detector_->StopCheckForOveruse();
@ -920,8 +920,8 @@ TEST_F(OveruseFrameDetectorTest2, RunOnTqNormalUsage) {
RTC_FROM_HERE);
rtc::Event event;
// Expect NormalUsage(). When called, stop the |overuse_detector_| and then
// set |event| to end the test.
// Expect NormalUsage(). When called, stop the `overuse_detector_` and then
// set `event` to end the test.
EXPECT_CALL(mock_observer_, AdaptUp())
.WillOnce(InvokeWithoutArgs([this, &event] {
overuse_detector_->StopCheckForOveruse();

View File

@ -78,9 +78,9 @@ void PixelLimitResource::SetResourceListener(ResourceListener* listener) {
int current_pixels = frame_size_pixels.value();
int target_pixel_upper_bounds = max_pixels_.value();
// To avoid toggling, we allow any resolutions between
// |target_pixel_upper_bounds| and video_stream_adapter.h's
// `target_pixel_upper_bounds` and video_stream_adapter.h's
// GetLowerResolutionThan(). This is the pixels we end up if we adapt down
// from |target_pixel_upper_bounds|.
// from `target_pixel_upper_bounds`.
int target_pixels_lower_bounds =
GetLowerResolutionThan(target_pixel_upper_bounds);
if (current_pixels > target_pixel_upper_bounds) {

View File

@ -455,7 +455,7 @@ void VideoStreamEncoderResourceManager::OnEncodeCompleted(
int64_t time_sent_in_us,
absl::optional<int> encode_duration_us) {
RTC_DCHECK_RUN_ON(encoder_queue_);
// Inform |encode_usage_resource_| of the encode completed event.
// Inform `encode_usage_resource_` of the encode completed event.
uint32_t timestamp = encoded_image.Timestamp();
int64_t capture_time_us =
encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;

View File

@ -133,7 +133,7 @@ class VideoStreamEncoderResourceManager
absl::optional<uint32_t> UseBandwidthAllocationBps() const;
// VideoSourceRestrictionsListener implementation.
// Updates |video_source_restrictions_|.
// Updates `video_source_restrictions_`.
void OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
@ -159,7 +159,7 @@ class VideoStreamEncoderResourceManager
int LastFrameSizeOrDefault() const;
// Calculates an up-to-date value of the target frame rate and informs the
// |encode_usage_resource_| of the new value.
// `encode_usage_resource_` of the new value.
void MaybeUpdateTargetFrameRate();
// Use nullopt to disable quality scaling.

View File

@ -19,8 +19,8 @@
namespace webrtc {
namespace {
// Round each scale factor to the closest rational in form alignment/i where i
// is a multiple of |requested_alignment|. Each resolution divisible by
// |alignment| will be divisible by |requested_alignment| after the scale factor
// is a multiple of `requested_alignment`. Each resolution divisible by
// `alignment` will be divisible by `requested_alignment` after the scale factor
// is applied.
double RoundToMultiple(int alignment,
int requested_alignment,
@ -56,7 +56,7 @@ double RoundToMultiple(int alignment,
// Output:
// If B is false, returns K and does not adjust scaling factors.
// Otherwise, returns adjusted alignment (A), adjusted scaling factors (S'[i])
// are written in |config| such that:
// are written in `config` such that:
//
// A / S'[i] are integers divisible by K
// sum abs(S'[i] - S[i]) -> min
@ -94,7 +94,7 @@ int AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
}
// Get alignment for downscaled layers.
// Adjust |scale_resolution_down_by| to a common multiple to limit the
// Adjust `scale_resolution_down_by` to a common multiple to limit the
// alignment value (to avoid largely cropped frames and possibly with an
// aspect ratio far from the original).
const int kMaxAlignment = 16;

View File

@ -24,12 +24,12 @@ class AlignmentAdjuster {
//
// If |EncoderInfo::apply_alignment_to_all_simulcast_layers| is enabled, the
// alignment will be adjusted to ensure that each simulcast layer also is
// divisible by |requested_resolution_alignment|. The configured scale factors
// |scale_resolution_down_by| may be adjusted to a common multiple to limit
// divisible by `requested_resolution_alignment`. The configured scale factors
// `scale_resolution_down_by` may be adjusted to a common multiple to limit
// the alignment value to avoid largely cropped frames and possibly with an
// aspect ratio far from the original.
// Note: |max_layers| currently only taken into account when using default
// Note: `max_layers` currently only taken into account when using default
// scale factors.
static int GetAlignmentAndMaybeAdjustScaleFactors(
const VideoEncoder::EncoderInfo& info,

View File

@ -64,7 +64,7 @@ int64_t GetNewAvgRttMs(const std::list<CallStats::RttTime>& reports,
// This class is used to de-register a Module from a ProcessThread to satisfy
// threading requirements of the Module (CallStats).
// The guarantee offered by TemporaryDeregistration is that while its in scope,
// no calls to |TimeUntilNextProcess| or |Process()| will occur and therefore
// no calls to `TimeUntilNextProcess` or `Process()` will occur and therefore
// synchronization with those methods, is not necessary.
class TemporaryDeregistration {
public:
@ -122,7 +122,7 @@ void CallStats::Process() {
int64_t now = clock_->TimeInMilliseconds();
last_process_time_ = now;
// |avg_rtt_ms_| is allowed to be read on the process thread since that's the
// `avg_rtt_ms_` is allowed to be read on the process thread since that's the
// only thread that modifies the value.
int64_t avg_rtt_ms = avg_rtt_ms_;
RemoveOldReports(now, &reports_);
@ -150,7 +150,7 @@ void CallStats::ProcessThreadAttached(ProcessThread* process_thread) {
process_thread_running_ = process_thread != nullptr;
// Whether we just got attached or detached, we clear the
// |process_thread_checker_| so that it can be used to protect variables
// `process_thread_checker_` so that it can be used to protect variables
// in either the process thread when it starts again, or UpdateHistograms()
// (mutually exclusive).
process_thread_checker_.Detach();

View File

@ -40,7 +40,7 @@ class CallStats : public Module, public RtcpRttStats {
void RegisterStatsObserver(CallStatsObserver* observer);
void DeregisterStatsObserver(CallStatsObserver* observer);
// Expose |LastProcessedRtt()| from RtcpRttStats to the public interface, as
// Expose `LastProcessedRtt()` from RtcpRttStats to the public interface, as
// it is the part of the API that is needed by direct users of CallStats.
// TODO(tommi): Threading or lifetime guarantees are not explicit in how
// CallStats is used as RtcpRttStats or how pointers are cached in a
@ -84,15 +84,15 @@ class CallStats : public Module, public RtcpRttStats {
int64_t max_rtt_ms_ RTC_GUARDED_BY(process_thread_checker_);
// Accessed from random threads (seemingly). Consider atomic.
// |avg_rtt_ms_| is allowed to be read on the process thread without a lock.
// |avg_rtt_ms_lock_| must be held elsewhere for reading.
// |avg_rtt_ms_lock_| must be held on the process thread for writing.
// `avg_rtt_ms_` is allowed to be read on the process thread without a lock.
// `avg_rtt_ms_lock_` must be held elsewhere for reading.
// `avg_rtt_ms_lock_` must be held on the process thread for writing.
int64_t avg_rtt_ms_;
// Protects |avg_rtt_ms_|.
// Protects `avg_rtt_ms_`.
mutable Mutex avg_rtt_ms_lock_;
// |sum_avg_rtt_ms_|, |num_avg_rtt_| and |time_of_first_rtt_ms_| are only used
// `sum_avg_rtt_ms_`, `num_avg_rtt_` and `time_of_first_rtt_ms_` are only used
// on the ProcessThread when running. When the Process Thread is not running,
// (and only then) they can be used in UpdateHistograms(), usually called from
// the dtor.

View File

@ -50,7 +50,7 @@ class CallStats {
void RegisterStatsObserver(CallStatsObserver* observer);
void DeregisterStatsObserver(CallStatsObserver* observer);
// Expose |LastProcessedRtt()| from RtcpRttStats to the public interface, as
// Expose `LastProcessedRtt()` from RtcpRttStats to the public interface, as
// it is the part of the API that is needed by direct users of CallStats.
int64_t LastProcessedRtt() const;

View File

@ -87,7 +87,7 @@ void CpuOveruseTest::RunTestAndCheckForAdaptation(
case DegradationPreference::BALANCED:
if (wants.max_pixel_count == std::numeric_limits<int>::max() &&
wants.max_framerate_fps == std::numeric_limits<int>::max()) {
// |adapt_counters_| map in VideoStreamEncoder is reset when
// `adapt_counters_` map in VideoStreamEncoder is reset when
// balanced mode is set.
break;
}

View File

@ -16,7 +16,7 @@ namespace webrtc {
namespace {
// The buffer level for media-rate utilization is allowed to go below zero,
// down to
// -(|kMaxMediaUnderrunFrames| / |target_framerate_fps_|) * |target_bitrate_|.
// -(`kMaxMediaUnderrunFrames` / `target_framerate_fps_`) * `target_bitrate_`.
static constexpr double kMaxMediaUnderrunFrames = 5.0;
} // namespace
@ -173,7 +173,7 @@ void EncoderOvershootDetector::LeakBits(int64_t time_ms) {
network_buffer_level_bits_ =
std::max<int64_t>(0, network_buffer_level_bits_ - leaked_bits);
// Media buffer my go down to minus |kMaxMediaUnderrunFrames| frames worth
// Media buffer my go down to minus `kMaxMediaUnderrunFrames` frames worth
// of data.
const double max_underrun_seconds =
std::min(kMaxMediaUnderrunFrames, target_framerate_fps_) /

View File

@ -26,15 +26,15 @@ class EncoderOvershootDetector {
void SetTargetRate(DataRate target_bitrate,
double target_framerate_fps,
int64_t time_ms);
// A frame has been encoded or dropped. |bytes| == 0 indicates a drop.
// A frame has been encoded or dropped. `bytes` == 0 indicates a drop.
void OnEncodedFrame(size_t bytes, int64_t time_ms);
// This utilization factor reaches 1.0 only if the encoder produces encoded
// frame in such a way that they can be sent onto the network at
// |target_bitrate| without building growing queues.
// `target_bitrate` without building growing queues.
absl::optional<double> GetNetworkRateUtilizationFactor(int64_t time_ms);
// This utilization factor is based just on actual encoded frame sizes in
// relation to ideal sizes. An undershoot may be compensated by an
// overshoot so that the average over time is close to |target_bitrate|.
// overshoot so that the average over time is close to `target_bitrate`.
absl::optional<double> GetMediaRateUtilizationFactor(int64_t time_ms);
void Reset();

View File

@ -94,7 +94,7 @@ class BandwidthStatsTest : public test::EndToEndTest {
~BandwidthStatsTest() override {
// Block until all already posted tasks run to avoid races when such task
// accesses |this|.
// accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@ -206,7 +206,7 @@ TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) {
~BweObserver() override {
// Block until all already posted tasks run to avoid races when such task
// accesses |this|. Also make sure we free |rtp_rtcp_| on the correct
// accesses `this`. Also make sure we free `rtp_rtcp_` on the correct
// thread/task queue.
SendTask(RTC_FROM_HERE, task_queue_, [this]() { rtp_rtcp_ = nullptr; });
}

View File

@ -412,7 +412,7 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) {
return DROP_PACKET;
// Pass one media packet after dropped packet after last FEC,
// otherwise receiver might never see a seq_no after
// |ulpfec_sequence_number_|
// `ulpfec_sequence_number_`
state_ = kVerifyUlpfecPacketNotInNackList;
break;
case kVerifyUlpfecPacketNotInNackList:

View File

@ -54,9 +54,9 @@ void HistogramTest::VerifyHistogramStats(bool use_rtx,
private:
void OnFrame(const VideoFrame& video_frame) override {
// The RTT is needed to estimate |ntp_time_ms| which is used by
// The RTT is needed to estimate `ntp_time_ms` which is used by
// end-to-end delay stats. Therefore, start counting received frames once
// |ntp_time_ms| is valid.
// `ntp_time_ms` is valid.
if (video_frame.ntp_time_ms() > 0 &&
Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
video_frame.ntp_time_ms()) {

View File

@ -474,9 +474,9 @@ TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) {
bool ShouldCreateReceivers() const override { return true; }
void OnFrame(const VideoFrame& video_frame) override {
// The RTT is needed to estimate |ntp_time_ms| which is used by
// The RTT is needed to estimate `ntp_time_ms` which is used by
// end-to-end delay stats. Therefore, start counting received frames once
// |ntp_time_ms| is valid.
// `ntp_time_ms` is valid.
if (video_frame.ntp_time_ms() > 0 &&
Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
video_frame.ntp_time_ms()) {

View File

@ -135,7 +135,7 @@ void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
int64_t encode_done_ms = rtc::TimeMillis();
// Encoders with internal sources do not call OnEncodeStarted
// |timing_frames_info_| may be not filled here.
// `timing_frames_info_` may be not filled here.
if (!internal_source_) {
encode_start_ms =
ExtractEncodeStartTimeAndFillMetadata(simulcast_svc_idx, encoded_image);
@ -174,7 +174,7 @@ void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
}
// Workaround for chromoting encoder: it passes encode start and finished
// timestamps in |timing_| field, but they (together with capture timestamp)
// timestamps in `timing_` field, but they (together with capture timestamp)
// are not in the WebRTC clock.
if (internal_source_ && encoded_image->timing_.encode_finish_ms > 0 &&
encoded_image->timing_.encode_start_ms > 0) {

View File

@ -61,9 +61,9 @@ bool IsTimingFrame(const EncodedImage& image) {
image.timing_.flags != VideoSendTiming::kNotTriggered;
}
// Emulates |num_frames| on |num_streams| frames with capture timestamps
// Emulates `num_frames` on `num_streams` frames with capture timestamps
// increased by 1 from 0. Size of each frame is between
// |min_frame_size| and |max_frame_size|, outliers are counted relatevely to
// `min_frame_size` and `max_frame_size`, outliers are counted relatevely to
// |average_frame_sizes[]| for each stream.
std::vector<std::vector<FrameType>> GetTimingFrames(
const int64_t delay_ms,

View File

@ -65,7 +65,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
void SetMaxExpectedPictureIdGap(int max_expected_picture_id_gap) {
MutexLock lock(&mutex_);
max_expected_picture_id_gap_ = max_expected_picture_id_gap;
// Expect smaller gap for |tl0_pic_idx| (running index for temporal_idx 0).
// Expect smaller gap for `tl0_pic_idx` (running index for temporal_idx 0).
max_expected_tl0_idx_gap_ = max_expected_picture_id_gap_ / 2;
}
@ -155,8 +155,8 @@ class PictureIdObserver : public test::RtpRtcpObserver {
return;
}
// New frame with |temporal_idx| 0.
// |tl0_pic_idx| should be increasing.
// New frame with `temporal_idx` 0.
// `tl0_pic_idx` should be increasing.
EXPECT_TRUE(AheadOf<uint8_t>(current.tl0_pic_idx, last.tl0_pic_idx));
// Expect continuously increasing idx.

View File

@ -32,7 +32,7 @@ namespace webrtc {
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
class QualityLimitationReasonTracker {
public:
// The caller is responsible for making sure |clock| outlives the tracker.
// The caller is responsible for making sure `clock` outlives the tracker.
explicit QualityLimitationReasonTracker(Clock* clock);
// The current reason defaults to QualityLimitationReason::kNone.
@ -45,9 +45,9 @@ class QualityLimitationReasonTracker {
QualityLimitationReason current_reason_;
int64_t current_reason_updated_timestamp_ms_;
// The total amount of time spent in each reason at time
// |current_reason_updated_timestamp_ms_|. To get the total amount duration
// so-far, including the time spent in |current_reason_| elapsed since the
// last time |current_reason_| was updated, see DurationsMs().
// `current_reason_updated_timestamp_ms_`. To get the total amount duration
// so-far, including the time spent in `current_reason_` elapsed since the
// last time `current_reason_` was updated, see DurationsMs().
std::map<QualityLimitationReason, int64_t> durations_ms_;
};

View File

@ -83,19 +83,19 @@ TEST_P(QualityLimitationReasonTrackerTestWithParamReason,
TEST_P(QualityLimitationReasonTrackerTestWithParamReason,
SwitchBetweenReasonsBackAndForth) {
int64_t initial_duration_ms = tracker_.DurationsMs()[reason_];
// Spend 100 ms in |different_reason_|.
// Spend 100 ms in `different_reason_`.
tracker_.SetReason(different_reason_);
fake_clock_.AdvanceTimeMilliseconds(100);
EXPECT_EQ(initial_duration_ms, tracker_.DurationsMs()[reason_]);
// Spend 50 ms in |reason_|.
// Spend 50 ms in `reason_`.
tracker_.SetReason(reason_);
fake_clock_.AdvanceTimeMilliseconds(50);
EXPECT_EQ(initial_duration_ms + 50, tracker_.DurationsMs()[reason_]);
// Spend another 1000 ms in |different_reason_|.
// Spend another 1000 ms in `different_reason_`.
tracker_.SetReason(different_reason_);
fake_clock_.AdvanceTimeMilliseconds(1000);
EXPECT_EQ(initial_duration_ms + 50, tracker_.DurationsMs()[reason_]);
// Spend another 100 ms in |reason_|.
// Spend another 100 ms in `reason_`.
tracker_.SetReason(reason_);
fake_clock_.AdvanceTimeMilliseconds(100);
EXPECT_EQ(initial_duration_ms + 150, tracker_.DurationsMs()[reason_]);

View File

@ -25,7 +25,7 @@
namespace webrtc {
namespace {
// Periodic time interval for processing samples for |freq_offset_counter_|.
// Periodic time interval for processing samples for `freq_offset_counter_`.
const int64_t kFreqOffsetProcessIntervalMs = 40000;
// Configuration for bad call detection.
@ -129,7 +129,7 @@ void ReceiveStatisticsProxy::UpdateHistograms(
const StreamDataCounters* rtx_stats) {
// Not actually running on the decoder thread, but must be called after
// DecoderThreadStopped, which detaches the thread checker. It is therefore
// safe to access |qp_counters_|, which were updated on the decode thread
// safe to access `qp_counters_`, which were updated on the decode thread
// earlier.
RTC_DCHECK_RUN_ON(&decode_thread_);

View File

@ -140,7 +140,7 @@ class ReceiveStatisticsProxy : public VCMReceiveStatisticsCallback,
Clock* const clock_;
// Ownership of this object lies with the owner of the ReceiveStatisticsProxy
// instance. Lifetime is guaranteed to outlive |this|.
// instance. Lifetime is guaranteed to outlive `this`.
// TODO(tommi): In practice the config_ reference is only used for accessing
// config_.rtp.ulpfec.ulpfec_payload_type. Instead of holding a pointer back,
// we could just store the value of ulpfec_payload_type and change the

View File

@ -29,7 +29,7 @@
namespace webrtc {
namespace internal {
namespace {
// Periodic time interval for processing samples for |freq_offset_counter_|.
// Periodic time interval for processing samples for `freq_offset_counter_`.
const int64_t kFreqOffsetProcessIntervalMs = 40000;
// Configuration for bad call detection.
@ -676,7 +676,7 @@ VideoReceiveStream::Stats ReceiveStatisticsProxy::GetStats() const {
interframe_delay_max_moving_.Max(*last_decoded_frame_time_ms_)
.value_or(-1);
} else {
// We're paused. Avoid changing the state of |interframe_delay_max_moving_|.
// We're paused. Avoid changing the state of `interframe_delay_max_moving_`.
stats_.interframe_delay_max_ms = -1;
}
@ -790,8 +790,8 @@ void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated(
// [main] worker thread.
// So until the sender implementation has been updated, we work around this
// here by posting the update to the expected thread. We make a by value
// copy of the |task_safety_| to handle the case if the queued task
// runs after the |ReceiveStatisticsProxy| has been deleted. In such a
// copy of the `task_safety_` to handle the case if the queued task
// runs after the `ReceiveStatisticsProxy` has been deleted. In such a
// case the packet_counter update won't be recorded.
worker_thread_->PostTask(
ToQueuedTask(task_safety_, [ssrc, packet_counter, this]() {

View File

@ -1290,7 +1290,7 @@ TEST_P(ReceiveStatisticsProxy2TestWithContent,
fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
}
// |kMinRequiredSamples| samples, and thereby intervals, is required. That
// `kMinRequiredSamples` samples, and thereby intervals, is required. That
// means we're one frame short of having a valid data set.
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);

View File

@ -1257,7 +1257,7 @@ TEST_P(ReceiveStatisticsProxyTestWithContent,
fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
}
// |kMinRequiredSamples| samples, and thereby intervals, is required. That
// `kMinRequiredSamples` samples, and thereby intervals, is required. That
// means we're one frame short of having a valid data set.
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);

View File

@ -38,9 +38,9 @@ class RtpStreamsSynchronizer : public Module {
void Process() override;
// Gets the estimated playout NTP timestamp for the video frame with
// |rtp_timestamp| and the sync offset between the current played out audio
// `rtp_timestamp` and the sync offset between the current played out audio
// frame and the video frame. Returns true on success, false otherwise.
// The |estimated_freq_khz| is the frequency used in the RTP to NTP timestamp
// The `estimated_freq_khz` is the frequency used in the RTP to NTP timestamp
// conversion.
bool GetStreamSyncOffsetInMs(uint32_t rtp_timestamp,
int64_t render_time_ms,

View File

@ -35,9 +35,9 @@ class RtpStreamsSynchronizer {
void ConfigureSync(Syncable* syncable_audio);
// Gets the estimated playout NTP timestamp for the video frame with
// |rtp_timestamp| and the sync offset between the current played out audio
// `rtp_timestamp` and the sync offset between the current played out audio
// frame and the video frame. Returns true on success, false otherwise.
// The |estimated_freq_khz| is the frequency used in the RTP to NTP timestamp
// The `estimated_freq_khz` is the frequency used in the RTP to NTP timestamp
// conversion.
bool GetStreamSyncOffsetInMs(uint32_t rtp_timestamp,
int64_t render_time_ms,

View File

@ -268,8 +268,8 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
config_.rtp.local_ssrc)),
complete_frame_callback_(complete_frame_callback),
keyframe_request_sender_(keyframe_request_sender),
// TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
// directly with |rtp_rtcp_|.
// TODO(bugs.webrtc.org/10336): Let `rtcp_feedback_buffer_` communicate
// directly with `rtp_rtcp_`.
rtcp_feedback_buffer_(this, nack_sender, this),
packet_buffer_(kPacketBufferStartSize, PacketBufferMaxSize()),
reference_finder_(std::make_unique<RtpFrameReferenceFinder>()),
@ -862,7 +862,7 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
// In that case, request a key frame ASAP.
if (!has_received_frame_) {
if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
// |loss_notification_controller_|, if present, would have already
// `loss_notification_controller_`, if present, would have already
// requested a key frame when the first packet for the non-key frame
// had arrived, so no need to replicate the request.
if (!loss_notification_controller_) {
@ -873,16 +873,16 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
}
MutexLock lock(&reference_finder_lock_);
// Reset |reference_finder_| if |frame| is new and the codec have changed.
// Reset `reference_finder_` if `frame` is new and the codec have changed.
if (current_codec_) {
bool frame_is_newer =
AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
if (frame->codec_type() != current_codec_) {
if (frame_is_newer) {
// When we reset the |reference_finder_| we don't want new picture ids
// When we reset the `reference_finder_` we don't want new picture ids
// to overlap with old picture ids. To ensure that doesn't happen we
// start from the |last_completed_picture_id_| and add an offset in
// start from the `last_completed_picture_id_` and add an offset in
// case of reordering.
reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());

View File

@ -317,7 +317,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
RTC_RUN_ON(worker_task_checker_);
Clock* const clock_;
// Ownership of this object lies with VideoReceiveStream, which owns |this|.
// Ownership of this object lies with VideoReceiveStream, which owns `this`.
const VideoReceiveStream::Config& config_;
PacketRouter* const packet_router_;
ProcessThread* const process_thread_;

View File

@ -242,8 +242,8 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2(
config_.rtp.local_ssrc)),
complete_frame_callback_(complete_frame_callback),
keyframe_request_sender_(keyframe_request_sender),
// TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
// directly with |rtp_rtcp_|.
// TODO(bugs.webrtc.org/10336): Let `rtcp_feedback_buffer_` communicate
// directly with `rtp_rtcp_`.
rtcp_feedback_buffer_(this, nack_sender, this),
nack_module_(MaybeConstructNackModule(current_queue,
nack_periodic_processor,
@ -810,7 +810,7 @@ void RtpVideoStreamReceiver2::OnAssembledFrame(
// In that case, request a key frame ASAP.
if (!has_received_frame_) {
if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
// |loss_notification_controller_|, if present, would have already
// `loss_notification_controller_`, if present, would have already
// requested a key frame when the first packet for the non-key frame
// had arrived, so no need to replicate the request.
if (!loss_notification_controller_) {
@ -820,16 +820,16 @@ void RtpVideoStreamReceiver2::OnAssembledFrame(
has_received_frame_ = true;
}
// Reset |reference_finder_| if |frame| is new and the codec have changed.
// Reset `reference_finder_` if `frame` is new and the codec have changed.
if (current_codec_) {
bool frame_is_newer =
AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
if (frame->codec_type() != current_codec_) {
if (frame_is_newer) {
// When we reset the |reference_finder_| we don't want new picture ids
// When we reset the `reference_finder_` we don't want new picture ids
// to overlap with old picture ids. To ensure that doesn't happen we
// start from the |last_completed_picture_id_| and add an offset in case
// start from the `last_completed_picture_id_` and add an offset in case
// of reordering.
reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());

View File

@ -284,7 +284,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
RTC_RUN_ON(packet_sequence_checker_);
Clock* const clock_;
// Ownership of this object lies with VideoReceiveStream, which owns |this|.
// Ownership of this object lies with VideoReceiveStream, which owns `this`.
const VideoReceiveStream::Config& config_;
PacketRouter* const packet_router_;

View File

@ -773,8 +773,8 @@ TEST_F(RtpVideoStreamReceiver2Test, SinkGetsRtpNotifications) {
}
TEST_F(RtpVideoStreamReceiver2Test, NonStartedStreamGetsNoRtpCallbacks) {
// Explicitly showing that the stream is not in the |started| state,
// regardless of whether streams start out |started| or |stopped|.
// Explicitly showing that the stream is not in the `started` state,
// regardless of whether streams start out `started` or `stopped`.
rtp_video_stream_receiver_->StopReceive();
MockRtpPacketSink test_sink;
@ -811,7 +811,7 @@ TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) {
uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
memcpy(payload, data.data(), data.size());
// The first byte is the header, so we ignore the first byte of |data|.
// The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@ -852,7 +852,7 @@ TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
memcpy(first_packet_payload, data.data(), data.size());
// The first byte is the header, so we ignore the first byte of |data|.
// The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@ -873,7 +873,7 @@ TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
memcpy(second_packet_payload, data.data(), data.size());
// The first byte is the header, so we ignore the first byte of |data|.
// The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);

View File

@ -32,7 +32,7 @@ class RtpVideoFrameReceiver {
};
// Delegates calls to FrameTransformerInterface to transform frames, and to
// RtpVideoStreamReceiver to manage transformed frames on the |network_thread_|.
// RtpVideoStreamReceiver to manage transformed frames on the `network_thread_`.
class RtpVideoStreamReceiverFrameTransformerDelegate
: public TransformedFrameCallback {
public:
@ -49,12 +49,12 @@ class RtpVideoStreamReceiverFrameTransformerDelegate
void TransformFrame(std::unique_ptr<RtpFrameObject> frame);
// Implements TransformedFrameCallback. Can be called on any thread. Posts
// the transformed frame to be managed on the |network_thread_|.
// the transformed frame to be managed on the `network_thread_`.
void OnTransformedFrame(
std::unique_ptr<TransformableFrameInterface> frame) override;
// Delegates the call to RtpVideoFrameReceiver::ManageFrame on the
// |network_thread_|.
// `network_thread_`.
void ManageFrame(std::unique_ptr<TransformableFrameInterface> frame);
protected:

View File

@ -797,8 +797,8 @@ TEST_F(RtpVideoStreamReceiverTest,
TEST_F(RtpVideoStreamReceiverTest,
SecondariesOfNonStartedStreamGetNoNotifications) {
// Explicitly showing that the stream is not in the |started| state,
// regardless of whether streams start out |started| or |stopped|.
// Explicitly showing that the stream is not in the `started` state,
// regardless of whether streams start out `started` or `stopped`.
rtp_video_stream_receiver_->StopReceive();
MockRtpPacketSink secondary_sink;
@ -836,7 +836,7 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorOnePacket) {
uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
memcpy(payload, data.data(), data.size());
// The first byte is the header, so we ignore the first byte of |data|.
// The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@ -877,7 +877,7 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorTwoPackets) {
uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
memcpy(first_packet_payload, data.data(), data.size());
// The first byte is the header, so we ignore the first byte of |data|.
// The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@ -898,7 +898,7 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorTwoPackets) {
uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
memcpy(second_packet_payload, data.data(), data.size());
// The first byte is the header, so we ignore the first byte of |data|.
// The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);

View File

@ -871,7 +871,7 @@ void SendStatisticsProxy::UpdateEncoderFallbackStats(
return;
}
if (is_active && (pixels > *fallback_max_pixels_)) {
// Pixels should not be above |fallback_max_pixels_|. If above skip to
// Pixels should not be above `fallback_max_pixels_`. If above skip to
// avoid fallbacks due to failure.
fallback_info->is_possible = false;
return;
@ -882,7 +882,7 @@ void SendStatisticsProxy::UpdateEncoderFallbackStats(
if (fallback_info->last_update_ms) {
int64_t diff_ms = now_ms - *(fallback_info->last_update_ms);
// If the time diff since last update is greater than |max_frame_diff_ms|,
// If the time diff since last update is greater than `max_frame_diff_ms`,
// video is considered paused/muted and the change is not included.
if (diff_ms < fallback_info->max_frame_diff_ms) {
uma_container_->fallback_active_counter_.Add(fallback_info->is_active,
@ -1229,7 +1229,7 @@ void SendStatisticsProxy::OnBitrateAllocationUpdated(
}
// Informes observer if an internal encoder scaler has reduced video
// resolution or not. |is_scaled| is a flag indicating if the video is scaled
// resolution or not. `is_scaled` is a flag indicating if the video is scaled
// down.
void SendStatisticsProxy::OnEncoderInternalScalerUpdate(bool is_scaled) {
MutexLock lock(&mutex_);

View File

@ -411,7 +411,7 @@ TEST_F(SendStatisticsProxyTest, TotalEncodedBytesTargetFirstFrame) {
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
// On the first frame we don't know the frame rate yet, calculation yields
// zero. Our estimate assumes at least 1 FPS, so we expect the frame size to
// increment by a full |kTargetBytesPerSecond|.
// increment by a full `kTargetBytesPerSecond`.
EXPECT_EQ(kTargetBytesPerSecond,
statistics_proxy_->GetStats().total_encoded_bytes_target);
}
@ -422,7 +422,7 @@ TEST_F(SendStatisticsProxyTest,
const int kInterframeDelayMs = 100;
// SendStatisticsProxy uses a RateTracker internally. SendStatisticsProxy uses
// |fake_clock_| for testing, but the RateTracker relies on a global clock.
// `fake_clock_` for testing, but the RateTracker relies on a global clock.
// This test relies on rtc::ScopedFakeClock to synchronize these two clocks.
// TODO(https://crbug.com/webrtc/10640): When the RateTracker uses a Clock
// this test can stop relying on rtc::ScopedFakeClock.
@ -447,7 +447,7 @@ TEST_F(SendStatisticsProxyTest,
auto stats = statistics_proxy_->GetStats();
// By the time the second frame arrives, one frame has previously arrived
// during a |kInterframeDelayMs| interval. The estimated encode frame rate at
// during a `kInterframeDelayMs` interval. The estimated encode frame rate at
// the second frame's arrival should be 10 FPS.
uint64_t delta_encoded_bytes_target =
stats.total_encoded_bytes_target - first_total_encoded_bytes_target;

View File

@ -228,7 +228,7 @@ bool StatsCounter::TimeToProcess(int* elapsed_intervals) {
if (diff_ms < process_intervals_ms_)
return false;
// Advance number of complete |process_intervals_ms_| that have passed.
// Advance number of complete `process_intervals_ms_` that have passed.
int64_t num_intervals = diff_ms / process_intervals_ms_;
last_process_time_ms_ += num_intervals * process_intervals_ms_;
@ -338,7 +338,7 @@ MaxCounter::MaxCounter(Clock* clock,
int64_t process_intervals_ms)
: StatsCounter(clock,
process_intervals_ms,
false, // |include_empty_intervals|
false, // `include_empty_intervals`
observer) {}
void MaxCounter::Add(int sample) {
@ -361,7 +361,7 @@ int MaxCounter::GetValueForEmptyInterval() const {
PercentCounter::PercentCounter(Clock* clock, StatsCounterObserver* observer)
: StatsCounter(clock,
kDefaultProcessIntervalMs,
false, // |include_empty_intervals|
false, // `include_empty_intervals`
observer) {}
void PercentCounter::Add(bool sample) {
@ -385,7 +385,7 @@ int PercentCounter::GetValueForEmptyInterval() const {
PermilleCounter::PermilleCounter(Clock* clock, StatsCounterObserver* observer)
: StatsCounter(clock,
kDefaultProcessIntervalMs,
false, // |include_empty_intervals|
false, // `include_empty_intervals`
observer) {}
void PermilleCounter::Add(bool sample) {

View File

@ -22,7 +22,7 @@ class AggregatedCounter;
class Clock;
class Samples;
// |StatsCounterObserver| is called periodically when a metric is updated.
// `StatsCounterObserver` is called periodically when a metric is updated.
class StatsCounterObserver {
public:
virtual void OnMetricUpdated(int sample) = 0;
@ -43,13 +43,13 @@ struct AggregatedStats {
// Classes which periodically computes a metric.
//
// During a period, |kProcessIntervalMs|, different metrics can be computed e.g:
// - |AvgCounter|: average of samples
// - |PercentCounter|: percentage of samples
// - |PermilleCounter|: permille of samples
// During a period, `kProcessIntervalMs`, different metrics can be computed e.g:
// - `AvgCounter`: average of samples
// - `PercentCounter`: percentage of samples
// - `PermilleCounter`: permille of samples
//
// Each periodic metric can be either:
// - reported to an |observer| each period
// - reported to an `observer` each period
// - aggregated during the call (e.g. min, max, average)
//
// periodically computed
@ -76,7 +76,7 @@ struct AggregatedStats {
// stats: {min:4, max:15, avg:8}
//
// Note: StatsCounter takes ownership of |observer|.
// Note: StatsCounter takes ownership of `observer`.
class StatsCounter {
public:
@ -145,7 +145,7 @@ class StatsCounter {
// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
// GetMetric | (5 + 1 + 6) / 3 | (5 + 5) / 2 |
//
// |include_empty_intervals|: If set, intervals without samples will be included
// `include_empty_intervals`: If set, intervals without samples will be included
// in the stats. The value for an interval is
// determined by GetValueForEmptyInterval().
//
@ -236,7 +236,7 @@ class PermilleCounter : public StatsCounter {
// |<------ 2 sec ------->| |
// GetMetric | (5 + 1 + 6) / 2 | (5 + 5) / 2 |
//
// |include_empty_intervals|: If set, intervals without samples will be included
// `include_empty_intervals`: If set, intervals without samples will be included
// in the stats. The value for an interval is
// determined by GetValueForEmptyInterval().
//
@ -263,7 +263,7 @@ class RateCounter : public StatsCounter {
// |<------ 2 sec ------->| |
// GetMetric | (8 - 0) / 2 | (13 - 8) / 2 |
//
// |include_empty_intervals|: If set, intervals without samples will be included
// `include_empty_intervals`: If set, intervals without samples will be included
// in the stats. The value for an interval is
// determined by GetValueForEmptyInterval().
//

View File

@ -33,15 +33,15 @@ class StreamSynchronization {
int* total_audio_delay_target_ms,
int* total_video_delay_target_ms);
// On success |relative_delay_ms| contains the number of milliseconds later
// On success `relative_delay_ms` contains the number of milliseconds later
// video is rendered relative audio. If audio is played back later than video
// |relative_delay_ms| will be negative.
// `relative_delay_ms` will be negative.
static bool ComputeRelativeDelay(const Measurements& audio_measurement,
const Measurements& video_measurement,
int* relative_delay_ms);
// Set target buffering delay. Audio and video will be delayed by at least
// |target_delay_ms|.
// `target_delay_ms`.
void SetTargetBufferingDelay(int target_delay_ms);
// Lowers the audio delay by 10%. Can be used to recover from errors.

View File

@ -32,9 +32,9 @@ class StreamSynchronizationTest : public ::testing::Test {
protected:
// Generates the necessary RTCP measurements and RTP timestamps and computes
// the audio and video delays needed to get the two streams in sync.
// |audio_delay_ms| and |video_delay_ms| are the number of milliseconds after
// `audio_delay_ms` and `video_delay_ms` are the number of milliseconds after
// capture which the frames are received.
// |current_audio_delay_ms| is the number of milliseconds which audio is
// `current_audio_delay_ms` is the number of milliseconds which audio is
// currently being delayed by the receiver.
bool DelayedStreams(int audio_delay_ms,
int video_delay_ms,

View File

@ -458,10 +458,10 @@ bool VideoAnalyzer::IsInSelectedSpatialAndTemporalLayer(
}
void VideoAnalyzer::PollStats() {
// Do not grab |comparison_lock_|, before |GetStats()| completes.
// Do not grab `comparison_lock_`, before `GetStats()` completes.
// Otherwise a deadlock may occur:
// 1) |comparison_lock_| is acquired after |lock_|
// 2) |lock_| is acquired after internal pacer lock in SendRtp()
// 1) `comparison_lock_` is acquired after `lock_`
// 2) `lock_` is acquired after internal pacer lock in SendRtp()
// 3) internal pacer lock is acquired by GetStats().
Call::Stats call_stats = call_->GetStats();
@ -490,8 +490,8 @@ void VideoAnalyzer::PollStats() {
if (receive_stream_ != nullptr) {
VideoReceiveStream::Stats receive_stats = receive_stream_->GetStats();
// |total_decode_time_ms| gives a good estimate of the mean decode time,
// |decode_ms| is used to keep track of the standard deviation.
// `total_decode_time_ms` gives a good estimate of the mean decode time,
// `decode_ms` is used to keep track of the standard deviation.
if (receive_stats.frames_decoded > 0)
mean_decode_time_ms_ =
static_cast<double>(receive_stats.total_decode_time_ms) /
@ -504,8 +504,8 @@ void VideoAnalyzer::PollStats() {
pixels_.AddSample(receive_stats.width * receive_stats.height);
}
// |frames_decoded| and |frames_rendered| are used because they are more
// accurate than |decode_frame_rate| and |render_frame_rate|.
// `frames_decoded` and `frames_rendered` are used because they are more
// accurate than `decode_frame_rate` and `render_frame_rate`.
// The latter two are calculated on a momentary basis.
const double total_frames_duration_sec_double =
static_cast<double>(receive_stats.total_frames_duration_ms) / 1000.0;

View File

@ -162,7 +162,7 @@ class VideoAnalyzer : public PacketReceiver,
const rtc::VideoSinkWants& wants)
RTC_LOCKS_EXCLUDED(lock_) override;
// Called by |send_stream_| when |send_stream_.SetSource()| is called.
// Called by `send_stream_` when |send_stream_.SetSource()| is called.
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink)
RTC_LOCKS_EXCLUDED(lock_) override;

View File

@ -51,7 +51,7 @@ class VideoQualityObserver {
uint32_t TotalFramesDurationMs() const;
double SumSquaredFrameDurationsSec() const;
// Set |screenshare| to true if the last decoded frame was for screenshare.
// Set `screenshare` to true if the last decoded frame was for screenshare.
void UpdateHistograms(bool screenshare);
static const uint32_t kMinFrameSamplesToDetectFreeze;

View File

@ -380,7 +380,7 @@ void VideoReceiveStream::Start() {
new VideoStreamDecoder(&video_receiver_, &stats_proxy_, renderer));
// Make sure we register as a stats observer *after* we've prepared the
// |video_stream_decoder_|.
// `video_stream_decoder_`.
call_stats_->RegisterStatsObserver(this);
// Start decoding on task queue.

View File

@ -210,9 +210,9 @@ class VideoReceiveStream
mutable Mutex playout_delay_lock_;
// All of them tries to change current min_playout_delay on |timing_| but
// All of them tries to change current min_playout_delay on `timing_` but
// source of the change request is different in each case. Among them the
// biggest delay is used. -1 means use default value from the |timing_|.
// biggest delay is used. -1 means use default value from the `timing_`.
//
// Minimum delay as decided by the RTP playout delay extension.
int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1;

View File

@ -400,7 +400,7 @@ void VideoReceiveStream2::Start() {
new VideoStreamDecoder(&video_receiver_, &stats_proxy_, renderer));
// Make sure we register as a stats observer *after* we've prepared the
// |video_stream_decoder_|.
// `video_stream_decoder_`.
call_stats_->RegisterStatsObserver(this);
// Start decoding on task queue.
@ -739,7 +739,7 @@ void VideoReceiveStream2::StartNextDecode() {
void VideoReceiveStream2::HandleEncodedFrame(
std::unique_ptr<EncodedFrame> frame) {
// Running on |decode_queue_|.
// Running on `decode_queue_`.
int64_t now_ms = clock_->TimeInMilliseconds();
// Current OnPreDecode only cares about QP for VP8.
@ -810,7 +810,7 @@ int VideoReceiveStream2::DecodeAndMaybeDispatchEncodedFrame(
std::unique_ptr<EncodedFrame> frame) {
// Running on decode_queue_.
// If |buffered_encoded_frames_| grows out of control (=60 queued frames),
// If `buffered_encoded_frames_` grows out of control (=60 queued frames),
// maybe due to a stuck decoder, we just halt the process here and log the
// error.
const bool encoded_frame_output_enabled =
@ -841,7 +841,7 @@ int VideoReceiveStream2::DecodeAndMaybeDispatchEncodedFrame(
absl::optional<RecordableEncodedFrame::EncodedResolution>
pending_resolution;
{
// Fish out |pending_resolution_| to avoid taking the mutex on every lap
// Fish out `pending_resolution_` to avoid taking the mutex on every lap
// or dispatching under the mutex in the flush loop.
webrtc::MutexLock lock(&pending_resolution_mutex_);
if (pending_resolution_.has_value())

View File

@ -263,9 +263,9 @@ class VideoReceiveStream2
const int max_wait_for_keyframe_ms_;
const int max_wait_for_frame_ms_;
// All of them tries to change current min_playout_delay on |timing_| but
// All of them tries to change current min_playout_delay on `timing_` but
// source of the change request is different in each case. Among them the
// biggest delay is used. -1 means use default value from the |timing_|.
// biggest delay is used. -1 means use default value from the `timing_`.
//
// Minimum delay as decided by the RTP playout delay extension.
int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) =
@ -298,17 +298,17 @@ class VideoReceiveStream2
std::vector<std::unique_ptr<EncodedFrame>> buffered_encoded_frames_
RTC_GUARDED_BY(decode_queue_);
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter |enabled|
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter `enabled`
// determines if the low-latency renderer algorithm should be used for the
// case min playout delay=0 and max playout delay>0.
FieldTrialParameter<bool> low_latency_renderer_enabled_;
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter
// |include_predecode_buffer| determines if the predecode buffer should be
// `include_predecode_buffer` determines if the predecode buffer should be
// taken into account when calculating maximum number of frames in composition
// queue.
FieldTrialParameter<bool> low_latency_renderer_include_predecode_buffer_;
// Set by the field trial WebRTC-PreStreamDecoders. The parameter |max|
// Set by the field trial WebRTC-PreStreamDecoders. The parameter `max`
// determines the maximum number of decoders that are created up front before
// any video frame has been received.
FieldTrialParameter<int> maximum_pre_stream_decoders_;

View File

@ -228,7 +228,7 @@ TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMaxValue) {
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
// Ensure that -1 preserves default maximum value from |timing_|.
// Ensure that -1 preserves default maximum value from `timing_`.
EXPECT_EQ(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_NE(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_max_playout_latency, timing_->max_playout_delay());
@ -244,7 +244,7 @@ TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMinValue) {
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
// Ensure that -1 preserves default minimum value from |timing_|.
// Ensure that -1 preserves default minimum value from `timing_`.
EXPECT_NE(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_EQ(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
@ -430,7 +430,7 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, RenderedFrameUpdatesGetSources) {
// Verify that the per-packet information is passed to the renderer.
EXPECT_THAT(fake_renderer_.packet_infos(), ElementsAreArray(packet_infos));
// Verify that the per-packet information also updates |GetSources()|.
// Verify that the per-packet information also updates `GetSources()`.
std::vector<RtpSource> sources = video_receive_stream_->GetSources();
ASSERT_THAT(sources, SizeIs(2));
{

View File

@ -205,7 +205,7 @@ TEST_F(VideoReceiveStreamTest, PlayoutDelayPreservesDefaultMaxValue) {
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
// Ensure that -1 preserves default maximum value from |timing_|.
// Ensure that -1 preserves default maximum value from `timing_`.
EXPECT_EQ(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_NE(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_max_playout_latency, timing_->max_playout_delay());
@ -221,7 +221,7 @@ TEST_F(VideoReceiveStreamTest, PlayoutDelayPreservesDefaultMinValue) {
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
// Ensure that -1 preserves default minimum value from |timing_|.
// Ensure that -1 preserves default minimum value from `timing_`.
EXPECT_NE(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_EQ(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
@ -363,7 +363,7 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, RenderedFrameUpdatesGetSources) {
// Verify that the per-packet information is passed to the renderer.
EXPECT_THAT(fake_renderer_.packet_infos(), ElementsAreArray(packet_infos));
// Verify that the per-packet information also updates |GetSources()|.
// Verify that the per-packet information also updates `GetSources()`.
std::vector<RtpSource> sources = video_receive_stream_->GetSources();
ASSERT_THAT(sources, SizeIs(2));
{

View File

@ -102,7 +102,7 @@ int CalculateMaxPadBitrateBps(const std::vector<VideoStream>& streams,
if (is_svc) {
// For SVC, since there is only one "stream", the padding bitrate
// needed to enable the top spatial layer is stored in the
// |target_bitrate_bps| field.
// `target_bitrate_bps` field.
// TODO(sprang): This behavior needs to die.
pad_up_to_bitrate_bps = static_cast<int>(
hysteresis_factor * active_streams[0].target_bitrate_bps + 0.5);

View File

@ -107,7 +107,7 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver,
VideoLayersAllocation allocation) override;
// Implements EncodedImageCallback. The implementation routes encoded frames
// to the |payload_router_| and |config.pre_encode_callback| if set.
// to the `payload_router_` and |config.pre_encode_callback| if set.
// Called on an arbitrary encoder callback thread.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,

View File

@ -1322,7 +1322,7 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
(last_packet_time_ms_ &&
clock_->TimeInMilliseconds() - last_packet_time_ms_.value() >
kNoPacketsThresholdMs)) {
// No packets seen for |kNoPacketsThresholdMs|, restart camera.
// No packets seen for `kNoPacketsThresholdMs`, restart camera.
capturer_->Start();
test_state_ = kWaitingForMediaAfterCameraRestart;
}
@ -1461,7 +1461,7 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
bitrate_capped_(false) {}
~BitrateObserver() override {
// Make sure we free |rtp_rtcp_| in the same context as we constructed it.
// Make sure we free `rtp_rtcp_` in the same context as we constructed it.
SendTask(RTC_FROM_HERE, task_queue_, [this]() { rtp_rtcp_ = nullptr; });
}
@ -1551,7 +1551,7 @@ TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
~ChangingNetworkRouteTest() {
// Block until all already posted tasks run to avoid 'use after free'
// when such task accesses |this|.
// when such task accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@ -1677,7 +1677,7 @@ TEST_F(VideoSendStreamTest, RelayToDirectRoute) {
~RelayToDirectRouteTest() {
// Block until all already posted tasks run to avoid 'use after free'
// when such task accesses |this|.
// when such task accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@ -1848,7 +1848,7 @@ class MaxPaddingSetTest : public test::SendTest {
~MaxPaddingSetTest() {
// Block until all already posted tasks run to avoid 'use after free'
// when such task accesses |this|.
// when such task accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@ -1889,7 +1889,7 @@ class MaxPaddingSetTest : public test::SendTest {
RTC_DCHECK_RUN_ON(&task_queue_thread_);
// In case we get a callback during teardown.
// When this happens, OnStreamsStopped() has been called already,
// |call_| is null and the streams are being torn down.
// `call_` is null and the streams are being torn down.
if (!call_)
return;
@ -1925,7 +1925,7 @@ class MaxPaddingSetTest : public test::SendTest {
return SEND_PACKET;
}
// Called on |task_queue_|
// Called on `task_queue_`
void OnStreamsStopped() override {
RTC_DCHECK_RUN_ON(&task_queue_thread_);
RTC_DCHECK(task_queue_->IsCurrent());
@ -3788,7 +3788,7 @@ const float kAlrProbingExperimentPaceMultiplier = 1.0f;
TEST_F(VideoSendStreamTest, AlrConfiguredWhenSendSideOn) {
test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString());
// Send-side bwe on, use pacing factor from |kAlrProbingExperiment| above.
// Send-side bwe on, use pacing factor from `kAlrProbingExperiment` above.
PacingFactorObserver test_with_send_side(true,
kAlrProbingExperimentPaceMultiplier);
RunBaseTest(&test_with_send_side);

View File

@ -74,8 +74,8 @@ class VideoSourceSinkController {
// Pixel and frame rate restrictions.
VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_);
// Ensures that even if we are not restricted, the sink is never configured
// above this limit. Example: We are not CPU limited (no |restrictions_|) but
// our encoder is capped at 30 fps (= |frame_rate_upper_limit_|).
// above this limit. Example: We are not CPU limited (no `restrictions_`) but
// our encoder is capped at 30 fps (= `frame_rate_upper_limit_`).
absl::optional<size_t> pixels_per_frame_upper_limit_
RTC_GUARDED_BY(&sequence_checker_);
absl::optional<double> frame_rate_upper_limit_

View File

@ -80,11 +80,11 @@ TEST(VideoSourceSinkControllerTest, VideoRestrictionsToSinkWants) {
VideoSourceSinkController controller(&sink, &source);
VideoSourceRestrictions restrictions = controller.restrictions();
// max_pixels_per_frame() maps to |max_pixel_count|.
// max_pixels_per_frame() maps to `max_pixel_count`.
restrictions.set_max_pixels_per_frame(42u);
// target_pixels_per_frame() maps to |target_pixel_count|.
// target_pixels_per_frame() maps to `target_pixel_count`.
restrictions.set_target_pixels_per_frame(200u);
// max_frame_rate() maps to |max_framerate_fps|.
// max_frame_rate() maps to `max_framerate_fps`.
restrictions.set_max_frame_rate(30.0);
controller.SetRestrictions(restrictions);
EXPECT_CALL(source, AddOrUpdateSink(_, _))
@ -96,9 +96,9 @@ TEST(VideoSourceSinkControllerTest, VideoRestrictionsToSinkWants) {
});
controller.PushSourceSinkSettings();
// pixels_per_frame_upper_limit() caps |max_pixel_count|.
// pixels_per_frame_upper_limit() caps `max_pixel_count`.
controller.SetPixelsPerFrameUpperLimit(24);
// frame_rate_upper_limit() caps |max_framerate_fps|.
// frame_rate_upper_limit() caps `max_framerate_fps`.
controller.SetFrameRateUpperLimit(10.0);
EXPECT_CALL(source, AddOrUpdateSink(_, _))

View File

@ -37,7 +37,7 @@ VideoStreamDecoder::~VideoStreamDecoder() {
video_receiver_->RegisterReceiveCallback(nullptr);
}
// Do not acquire the lock of |video_receiver_| in this function. Decode
// Do not acquire the lock of `video_receiver_` in this function. Decode
// callback won't necessarily be called from the decoding thread. The decoding
// thread may have held the lock when calling VideoDecoder::Decode, Reset, or
// Release. Acquiring the same lock in the path of decode callback can deadlock.

View File

@ -38,7 +38,7 @@ VideoStreamDecoder::~VideoStreamDecoder() {
video_receiver_->RegisterReceiveCallback(nullptr);
}
// Do not acquire the lock of |video_receiver_| in this function. Decode
// Do not acquire the lock of `video_receiver_` in this function. Decode
// callback won't necessarily be called from the decoding thread. The decoding
// thread may have held the lock when calling VideoDecoder::Decode, Reset, or
// Release. Acquiring the same lock in the path of decode callback can deadlock.

View File

@ -185,7 +185,7 @@ void VideoStreamDecoderImpl::OnNextFrameCallback(
}
case video_coding::FrameBuffer::kTimeout: {
callbacks_->OnNonDecodableState();
// The |frame_buffer_| requires the frame callback function to complete
// The `frame_buffer_` requires the frame callback function to complete
// before NextFrame is called again. For this reason we call
// StartNextDecode in a later task to allow this task to complete first.
bookkeeping_queue_.PostTask([this]() {

View File

@ -102,14 +102,14 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface {
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings_
RTC_GUARDED_BY(decode_queue_);
// The |bookkeeping_queue_| use the |frame_buffer_| and also posts tasks to
// the |decode_queue_|. The |decode_queue_| in turn use the |decoder_| to
// decode frames. When the |decoder_| is done it will post back to the
// |bookkeeping_queue_| with the decoded frame. During shutdown we start by
// isolating the |bookkeeping_queue_| from the |decode_queue_|, so now it's
// safe for the |decode_queue_| to be destructed. After that the |decoder_|
// can be destructed, and then the |bookkeeping_queue_|. Finally the
// |frame_buffer_| can be destructed.
// The `bookkeeping_queue_` use the `frame_buffer_` and also posts tasks to
// the `decode_queue_`. The `decode_queue_` in turn use the `decoder_` to
// decode frames. When the `decoder_` is done it will post back to the
// `bookkeeping_queue_` with the decoded frame. During shutdown we start by
// isolating the `bookkeeping_queue_` from the `decode_queue_`, so now it's
// safe for the `decode_queue_` to be destructed. After that the `decoder_`
// can be destructed, and then the `bookkeeping_queue_`. Finally the
// `frame_buffer_` can be destructed.
Mutex shut_down_mutex_;
bool shut_down_ RTC_GUARDED_BY(shut_down_mutex_);
video_coding::FrameBuffer frame_buffer_ RTC_GUARDED_BY(bookkeeping_queue_);

View File

@ -877,7 +877,7 @@ void VideoStreamEncoder::ReconfigureEncoder() {
encoder_reset_required = true;
}
// Possibly adjusts scale_resolution_down_by in |encoder_config_| to limit the
// Possibly adjusts scale_resolution_down_by in `encoder_config_` to limit the
// alignment value.
AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
encoder_->GetEncoderInfo(), &encoder_config_, absl::nullopt);
@ -1437,7 +1437,7 @@ void VideoStreamEncoder::SetEncoderRates(
return;
}
// |bitrate_allocation| is 0 it means that the network is down or the send
// `bitrate_allocation` is 0 it means that the network is down or the send
// pacer is full. We currently only report this if the encoder has an internal
// source. If the encoder does not have an internal source, higher levels
// are expected to not call AddVideoFrame. We do this since it is unclear
@ -1524,7 +1524,7 @@ void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
if (last_encoder_rate_settings_) {
// Clone rate settings before update, so that SetEncoderRates() will
// actually detect the change between the input and
// |last_encoder_rate_setings_|, triggering the call to SetRate() on the
// `last_encoder_rate_setings_`, triggering the call to SetRate() on the
// encoder.
EncoderRateSettings new_rate_settings = *last_encoder_rate_settings_;
new_rate_settings.rate_control.framerate_fps =
@ -1869,7 +1869,7 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
// Currently internal quality scaler is used for VP9 instead of webrtc qp
// scaler (in no-svc case or if only a single spatial layer is encoded).
// It has to be explicitly detected and reported to adaptation metrics.
// Post a task because |send_codec_| requires |encoder_queue_| lock.
// Post a task because `send_codec_` requires `encoder_queue_` lock.
unsigned int image_width = image_copy._encodedWidth;
unsigned int image_height = image_copy._encodedHeight;
encoder_queue_.PostTask([this, codec_type, image_width, image_height] {

View File

@ -115,8 +115,8 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
double cwnd_reduce_ratio);
protected:
// Used for testing. For example the |ScalingObserverInterface| methods must
// be called on |encoder_queue_|.
// Used for testing. For example the `ScalingObserverInterface` methods must
// be called on `encoder_queue_`.
rtc::TaskQueue* encoder_queue() { return &encoder_queue_; }
void OnVideoSourceRestrictionsUpdated(
@ -159,7 +159,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
VideoEncoder::RateControlParameters rate_control;
// This is the scalar target bitrate before the VideoBitrateAllocator, i.e.
// the |target_bitrate| argument of the OnBitrateUpdated() method. This is
// the `target_bitrate` argument of the OnBitrateUpdated() method. This is
// needed because the bitrate allocator may truncate the total bitrate and a
// later call to the same allocator instance, e.g.
// |using last_encoder_rate_setings_->bitrate.get_sum_bps()|, may trick it
@ -197,7 +197,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
void TraceFrameDropStart();
void TraceFrameDropEnd();
// Returns a copy of |rate_settings| with the |bitrate| field updated using
// Returns a copy of `rate_settings` with the `bitrate` field updated using
// the current VideoBitrateAllocator.
EncoderRateSettings UpdateBitrateAllocation(
const EncoderRateSettings& rate_settings) RTC_RUN_ON(&encoder_queue_);
@ -212,7 +212,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
DataSize frame_size);
bool HasInternalSource() const RTC_RUN_ON(&encoder_queue_);
void ReleaseEncoder() RTC_RUN_ON(&encoder_queue_);
// After calling this function |resource_adaptation_processor_| will be null.
// After calling this function `resource_adaptation_processor_` will be null.
void ShutdownResourceAdaptationQueue();
void CheckForAnimatedContent(const VideoFrame& frame,
@ -323,7 +323,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// encoder behavior might dynamically change.
bool force_disable_frame_dropper_ RTC_GUARDED_BY(&encoder_queue_);
RateStatistics input_framerate_ RTC_GUARDED_BY(&encoder_queue_);
// Incremented on worker thread whenever |frame_dropper_| determines that a
// Incremented on worker thread whenever `frame_dropper_` determines that a
// frame should be dropped. Decremented on whichever thread runs
// OnEncodedImage(), which is only called by one thread but not necessarily
// the worker thread.
@ -339,7 +339,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
RTC_GUARDED_BY(&encoder_queue_);
// TODO(sprang): Change actually support keyframe per simulcast stream, or
// turn this into a simple bool |pending_keyframe_request_|.
// turn this into a simple bool `pending_keyframe_request_`.
std::vector<VideoFrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
FrameEncodeMetadataWriter frame_encode_metadata_writer_;
@ -387,7 +387,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// specific resources, such as "encode usage percent" measurements and "QP
// scaling". Also involved with various mitigations such as initial frame
// dropping.
// The manager primarily operates on the |encoder_queue_| but its lifetime is
// The manager primarily operates on the `encoder_queue_` but its lifetime is
// tied to the VideoStreamEncoder (which is destroyed off the encoder queue)
// and its resource list is accessible from any thread.
VideoStreamEncoderResourceManager stream_resource_manager_

View File

@ -5698,7 +5698,7 @@ TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenSVCLayersChange) {
video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps;
video_encoder_config.content_type =
VideoEncoderConfig::ContentType::kRealtimeVideo;
// Currently simulcast layers |active| flags are used to inidicate
// Currently simulcast layers `active` flags are used to inidicate
// which SVC layers are active.
video_encoder_config.simulcast_layers.resize(3);
@ -6167,7 +6167,7 @@ TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
max_bitrate, max_bitrate, max_bitrate, 0, 0, 0);
// Insert frames and advance |min_duration_ms|.
// Insert frames and advance `min_duration_ms`.
for (size_t i = 1; i <= 10; i++) {
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
@ -7649,8 +7649,8 @@ TEST_F(VideoStreamEncoderTest, EncoderSelectorCurrentEncoderIsSignaled) {
// The encoders produces by the VideoEncoderProxyFactory have a pointer back
// to it's factory, so in order for the encoder instance in the
// |video_stream_encoder_| to be destroyed before the |encoder_factory| we
// reset the |video_stream_encoder_| here.
// `video_stream_encoder_` to be destroyed before the `encoder_factory` we
// reset the `video_stream_encoder_` here.
video_stream_encoder_.reset();
}
@ -7736,8 +7736,8 @@ TEST_F(VideoStreamEncoderTest, EncoderSelectorBrokenEncoderSwitch) {
// The encoders produces by the VideoEncoderProxyFactory have a pointer back
// to it's factory, so in order for the encoder instance in the
// |video_stream_encoder_| to be destroyed before the |encoder_factory| we
// reset the |video_stream_encoder_| here.
// `video_stream_encoder_` to be destroyed before the `encoder_factory` we
// reset the `video_stream_encoder_` here.
video_stream_encoder_.reset();
}
@ -8020,7 +8020,7 @@ TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSinglecast) {
TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSimulcast) {
// Pick downscale factors such that we never encode at full resolution - this
// is an interesting use case. The frame resolution influences the encoder
// resolutions, but if no layer has |scale_resolution_down_by| == 1 then the
// resolutions, but if no layer has `scale_resolution_down_by` == 1 then the
// encoder should not ask for the frame resolution. This allows video frames
// to have the appearence of one resolution but optimize its internal buffers
// for what is actually encoded.
@ -8268,8 +8268,8 @@ class VideoStreamEncoderWithRealEncoderTest
void TearDown() override {
video_stream_encoder_->Stop();
// Ensure |video_stream_encoder_| is destroyed before
// |encoder_proxy_factory_|.
// Ensure `video_stream_encoder_` is destroyed before
// `encoder_proxy_factory_`.
video_stream_encoder_.reset();
VideoStreamEncoderTest::TearDown();
}