Use backticks not vertical bars to denote variables in comments for /modules/audio_coding

Bug: webrtc:12338
Change-Id: I02613d9fca45d00e2477f334b7a0416e7912e26b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227037
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34621}
This commit is contained in:
Artem Titov 2021-07-28 20:00:17 +02:00 committed by WebRTC LUCI CQ
parent 0146a34b3f
commit d00ce747c7
143 changed files with 809 additions and 809 deletions

View File

@ -71,8 +71,8 @@ class AcmReceiveTestOldApi {
RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
};
// This test toggles the output frequency every |toggle_period_ms|. The test
// starts with |output_freq_hz_1|. Except for the toggling, it does the same
// This test toggles the output frequency every `toggle_period_ms`. The test
// starts with `output_freq_hz_1`. Except for the toggling, it does the same
// thing as AcmReceiveTestOldApi.
class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi {
public:

View File

@ -131,7 +131,7 @@ int AcmReceiver::InsertPacket(const RTPHeader& rtp_header,
/*num_channels=*/format->num_channels,
/*sdp_format=*/std::move(format->sdp_format)};
}
} // |mutex_| is released.
} // `mutex_` is released.
if (neteq_->InsertPacket(rtp_header, incoming_payload) < 0) {
RTC_LOG(LERROR) << "AcmReceiver::InsertPacket "
@ -201,7 +201,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz,
// We might end up here ONLY if codec is changed.
}
// Store current audio in |last_audio_buffer_| for next time.
// Store current audio in `last_audio_buffer_` for next time.
memcpy(last_audio_buffer_.get(), audio_frame->data(),
sizeof(int16_t) * audio_frame->samples_per_channel_ *
audio_frame->num_channels_);

View File

@ -177,9 +177,9 @@ class AcmReceiver {
// enabled then the maximum NACK list size is modified accordingly.
//
// If the sequence number of last received packet is N, the sequence numbers
// of NACK list are in the range of [N - |max_nack_list_size|, N).
// of NACK list are in the range of [N - `max_nack_list_size`, N).
//
// |max_nack_list_size| should be positive (none zero) and less than or
// `max_nack_list_size` should be positive (none zero) and less than or
// equal to |Nack::kNackListSizeLimit|. Otherwise, No change is applied and -1
// is returned. 0 is returned at success.
//
@ -189,12 +189,12 @@ class AcmReceiver {
void DisableNack();
//
// Get a list of packets to be retransmitted. |round_trip_time_ms| is an
// Get a list of packets to be retransmitted. `round_trip_time_ms` is an
// estimate of the round-trip-time (in milliseconds). Missing packets which
// will be playout in a shorter time than the round-trip-time (with respect
// to the time this API is called) will not be included in the list.
//
// Negative |round_trip_time_ms| results is an error message and empty list
// Negative `round_trip_time_ms` results is an error message and empty list
// is returned.
//
std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;

View File

@ -125,7 +125,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
// TODO(bugs.webrtc.org/10739): change |absolute_capture_timestamp_ms| to
// TODO(bugs.webrtc.org/10739): change `absolute_capture_timestamp_ms` to
// int64_t when it always receives a valid value.
int Encode(const InputData& input_data,
absl::optional<int64_t> absolute_capture_timestamp_ms)
@ -141,8 +141,8 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
//
// in_frame: input audio-frame
// ptr_out: pointer to output audio_frame. If no preprocessing is required
// |ptr_out| will be pointing to |in_frame|, otherwise pointing to
// |preprocess_frame_|.
// `ptr_out` will be pointing to `in_frame`, otherwise pointing to
// `preprocess_frame_`.
//
// Return value:
// -1: if encountering an error.
@ -152,7 +152,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
// Change required states after starting to receive the codec corresponding
// to |index|.
// to `index`.
int UpdateUponReceivingCodec(int index);
mutable Mutex acm_mutex_;
@ -397,7 +397,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
// output data if needed.
ReMixFrame(*ptr_frame, current_num_channels, &input_data->buffer);
// For pushing data to primary, point the |ptr_audio| to correct buffer.
// For pushing data to primary, point the `ptr_audio` to correct buffer.
input_data->audio = input_data->buffer.data();
RTC_DCHECK_GE(input_data->buffer.size(),
input_data->length_per_channel * input_data->audio_channel);
@ -414,7 +414,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
// encoder is mono and input is stereo. In case of dual-streaming, both
// encoders has to be mono for down-mix to take place.
// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
// is required, |*ptr_out| points to |in_frame|.
// is required, |*ptr_out| points to `in_frame`.
// TODO(yujo): Make this more efficient for muted frames.
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out) {

View File

@ -342,7 +342,7 @@ TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) {
// Introduce this class to set different expectations on the number of encoded
// bytes. This class expects all encoded packets to be 9 bytes (matching one
// CNG SID frame) or 0 bytes. This test depends on |input_frame_| containing
// CNG SID frame) or 0 bytes. This test depends on `input_frame_` containing
// (near-)zero values. It also introduces a way to register comfort noise with
// a custom payload type.
class AudioCodingModuleTestWithComfortNoiseOldApi
@ -593,7 +593,7 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
InsertAudio();
ASSERT_LT(loop_counter++, 10);
}
// Set |last_packet_number_| to one less that |num_calls| so that the packet
// Set `last_packet_number_` to one less that `num_calls` so that the packet
// will be fetched in the next InsertPacket() call.
last_packet_number_ = packet_cb_.num_calls() - 1;
@ -617,7 +617,7 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
if (num_calls > last_packet_number_) {
// Get the new payload out from the callback handler.
// Note that since we swap buffers here instead of directly inserting
// a pointer to the data in |packet_cb_|, we avoid locking the callback
// a pointer to the data in `packet_cb_`, we avoid locking the callback
// for the duration of the IncomingPacket() call.
packet_cb_.SwapBuffers(&last_payload_vec_);
ASSERT_GT(last_payload_vec_.size(), 0u);
@ -1140,8 +1140,8 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
// Sets up the test::AcmSendTest object. Returns true on success, otherwise
// false.
bool SetUpSender(std::string input_file_name, int source_rate) {
// Note that |audio_source_| will loop forever. The test duration is set
// explicitly by |kTestDurationMs|.
// Note that `audio_source_` will loop forever. The test duration is set
// explicitly by `kTestDurationMs`.
audio_source_.reset(new test::InputAudioFile(input_file_name));
send_test_.reset(new test::AcmSendTestOldApi(audio_source_.get(),
source_rate, kTestDurationMs));
@ -1243,7 +1243,7 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
VerifyPacket(packet.get());
// TODO(henrik.lundin) Save the packet to file as well.
// Pass it on to the caller. The caller becomes the owner of |packet|.
// Pass it on to the caller. The caller becomes the owner of `packet`.
return packet;
}
@ -1631,8 +1631,8 @@ class AcmSetBitRateTest : public ::testing::Test {
bool SetUpSender() {
const std::string input_file_name =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
// Note that |audio_source_| will loop forever. The test duration is set
// explicitly by |kTestDurationMs|.
// Note that `audio_source_` will loop forever. The test duration is set
// explicitly by `kTestDurationMs`.
audio_source_.reset(new test::InputAudioFile(input_file_name));
static const int kSourceRateHz = 32000;
send_test_.reset(new test::AcmSendTestOldApi(
@ -1859,7 +1859,7 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
// This test fixture is implemented to run ACM and change the desired output
// frequency during the call. The input packets are simply PCM16b-wb encoded
// payloads with a constant value of |kSampleValue|. The test fixture itself
// payloads with a constant value of `kSampleValue`. The test fixture itself
// acts as PacketSource in between the receive test class and the constant-
// payload packet source class. The output is both written to file, and analyzed
// in this test fixture.

View File

@ -44,7 +44,7 @@ void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type,
break;
}
case AudioFrame::kUndefined: {
// If the audio is decoded by NetEq, |kUndefined| is not an option.
// If the audio is decoded by NetEq, `kUndefined` is not an option.
RTC_NOTREACHED();
}
}

View File

@ -36,8 +36,8 @@ class CallStatistics {
CallStatistics() {}
~CallStatistics() {}
// Call this method to indicate that NetEq engaged in decoding. |speech_type|
// is the audio-type according to NetEq, and |muted| indicates if the decoded
// Call this method to indicate that NetEq engaged in decoding. `speech_type`
// is the audio-type according to NetEq, and `muted` indicates if the decoded
// frame was produced in muted state.
void DecodedByNetEq(AudioFrame::SpeechType speech_type, bool muted);

View File

@ -50,7 +50,7 @@ void BitrateController::UpdateNetworkMetrics(
}
void BitrateController::MakeDecision(AudioEncoderRuntimeConfig* config) {
// Decision on |bitrate_bps| should not have been made.
// Decision on `bitrate_bps` should not have been made.
RTC_DCHECK(!config->bitrate_bps);
if (target_audio_bitrate_bps_ && overhead_bytes_per_packet_) {
if (config->frame_length_ms)

View File

@ -28,7 +28,7 @@ ChannelController::Config::Config(size_t num_encoder_channels,
ChannelController::ChannelController(const Config& config)
: config_(config), channels_to_encode_(config_.intial_channels_to_encode) {
RTC_DCHECK_GT(config_.intial_channels_to_encode, 0lu);
// Currently, we require |intial_channels_to_encode| to be <= 2.
// Currently, we require `intial_channels_to_encode` to be <= 2.
RTC_DCHECK_LE(config_.intial_channels_to_encode, 2lu);
RTC_DCHECK_GE(config_.num_encoder_channels,
config_.intial_channels_to_encode);
@ -43,7 +43,7 @@ void ChannelController::UpdateNetworkMetrics(
}
void ChannelController::MakeDecision(AudioEncoderRuntimeConfig* config) {
// Decision on |num_channels| should not have been made.
// Decision on `num_channels` should not have been made.
RTC_DCHECK(!config->num_channels);
if (uplink_bandwidth_bps_) {

View File

@ -23,8 +23,8 @@ message FecController {
optional float high_bandwidth_packet_loss = 4;
}
// |fec_enabling_threshold| defines a curve, above which FEC should be
// enabled. |fec_disabling_threshold| defines a curve, under which FEC
// `fec_enabling_threshold` defines a curve, above which FEC should be
// enabled. `fec_disabling_threshold` defines a curve, under which FEC
// should be disabled. See below
//
// packet-loss ^ | |
@ -36,7 +36,7 @@ message FecController {
optional Threshold fec_enabling_threshold = 1;
optional Threshold fec_disabling_threshold = 2;
// |time_constant_ms| is the time constant for an exponential filter, which
// `time_constant_ms` is the time constant for an exponential filter, which
// is used for smoothing the packet loss fraction.
optional int32 time_constant_ms = 3;
}
@ -62,8 +62,8 @@ message FecControllerRplrBased {
optional float high_bandwidth_recoverable_packet_loss = 4;
}
// |fec_enabling_threshold| defines a curve, above which FEC should be
// enabled. |fec_disabling_threshold| defines a curve, under which FEC
// `fec_enabling_threshold` defines a curve, above which FEC should be
// enabled. `fec_disabling_threshold` defines a curve, under which FEC
// should be disabled. See below
//
// packet-loss ^ | |
@ -122,7 +122,7 @@ message FrameLengthControllerV2 {
// FrameLengthControllerV2 chooses the frame length by taking the target
// bitrate and subtracting the overhead bitrate to obtain the remaining
// bitrate for the payload. The chosen frame length is the shortest possible
// where the payload bitrate is more than |min_payload_bitrate_bps|.
// where the payload bitrate is more than `min_payload_bitrate_bps`.
optional int32 min_payload_bitrate_bps = 1;
// If true, uses the stable target bitrate to decide the frame length. This
@ -158,17 +158,17 @@ message BitrateController {
message Controller {
message ScoringPoint {
// |ScoringPoint| is a subspace of network condition. It is used for
// `ScoringPoint` is a subspace of network condition. It is used for
// comparing the significance of controllers.
optional int32 uplink_bandwidth_bps = 1;
optional float uplink_packet_loss_fraction = 2;
}
// The distance from |scoring_point| to a given network condition defines
// The distance from `scoring_point` to a given network condition defines
// the significance of this controller with respect that network condition.
// Shorter distance means higher significance. The significances of
// controllers determine their order in the processing pipeline. Controllers
// without |scoring_point| follow their default order in
// without `scoring_point` follow their default order in
// |ControllerManager::controllers|.
optional ScoringPoint scoring_point = 1;

View File

@ -373,14 +373,14 @@ std::vector<Controller*> ControllerManagerImpl::GetSortedControllers(
config_.min_reordering_squared_distance)
return sorted_controllers_;
// Sort controllers according to the distances of |scoring_point| to the
// Sort controllers according to the distances of `scoring_point` to the
// scoring points of controllers.
//
// A controller that does not associate with any scoring point
// are treated as if
// 1) they are less important than any controller that has a scoring point,
// 2) they are equally important to any controller that has no scoring point,
// and their relative order will follow |default_sorted_controllers_|.
// and their relative order will follow `default_sorted_controllers_`.
std::vector<Controller*> sorted_controllers(default_sorted_controllers_);
std::stable_sort(
sorted_controllers.begin(), sorted_controllers.end(),
@ -430,7 +430,7 @@ float NormalizeUplinkBandwidth(int uplink_bandwidth_bps) {
}
float NormalizePacketLossFraction(float uplink_packet_loss_fraction) {
// |uplink_packet_loss_fraction| is seldom larger than 0.3, so we scale it up
// `uplink_packet_loss_fraction` is seldom larger than 0.3, so we scale it up
// by 3.3333f.
return std::min(uplink_packet_loss_fraction * 3.3333f, 1.0f);
}

View File

@ -111,7 +111,7 @@ class ControllerManagerImpl final : public ControllerManager {
std::vector<Controller*> sorted_controllers_;
// |scoring_points_| saves the scoring points of various
// `scoring_points_` saves the scoring points of various
// controllers.
std::map<const Controller*, ScoringPoint> controller_scoring_points_;

View File

@ -43,7 +43,7 @@ constexpr int kMinReorderingTimeMs = 200;
constexpr int kFactor = 100;
constexpr float kMinReorderingSquareDistance = 1.0f / kFactor / kFactor;
// |kMinUplinkBandwidthBps| and |kMaxUplinkBandwidthBps| are copied from
// `kMinUplinkBandwidthBps` and `kMaxUplinkBandwidthBps` are copied from
// controller_manager.cc
constexpr int kMinUplinkBandwidthBps = 0;
constexpr int kMaxUplinkBandwidthBps = 120000;
@ -82,7 +82,7 @@ ControllerManagerStates CreateControllerManager() {
return states;
}
// |expected_order| contains the expected indices of all controllers in the
// `expected_order` contains the expected indices of all controllers in the
// vector of controllers returned by GetSortedControllers(). A negative index
// means that we do not care about its exact place, but we do check that it
// exists in the vector.
@ -112,8 +112,8 @@ void CheckControllersOrder(
TEST(ControllerManagerTest, GetControllersReturnAllControllers) {
auto states = CreateControllerManager();
auto check = states.controller_manager->GetControllers();
// Verify that controllers in |check| are one-to-one mapped to those in
// |mock_controllers_|.
// Verify that controllers in `check` are one-to-one mapped to those in
// `mock_controllers_`.
EXPECT_EQ(states.mock_controllers.size(), check.size());
for (auto& controller : check)
EXPECT_NE(states.mock_controllers.end(),
@ -123,7 +123,7 @@ TEST(ControllerManagerTest, GetControllersReturnAllControllers) {
TEST(ControllerManagerTest, ControllersInDefaultOrderOnEmptyNetworkMetrics) {
auto states = CreateControllerManager();
// |network_metrics| are empty, and the controllers are supposed to follow the
// `network_metrics` are empty, and the controllers are supposed to follow the
// default order.
CheckControllersOrder(&states, absl::nullopt, absl::nullopt, {0, 1, 2, 3});
}
@ -304,7 +304,7 @@ void CheckControllersOrder(const std::vector<Controller*>& controllers,
for (size_t i = 0; i < controllers.size(); ++i) {
AudioEncoderRuntimeConfig encoder_config;
// We check the order of |controllers| by judging their decisions.
// We check the order of `controllers` by judging their decisions.
controllers[i]->MakeDecision(&encoder_config);
// Since controllers are not provided with network metrics, they give the

View File

@ -21,7 +21,7 @@ message EncoderRuntimeConfig {
optional bool enable_fec = 4;
optional bool enable_dtx = 5;
// Some encoders can encode fewer channels than the actual input to make
// better use of the bandwidth. |num_channels| sets the number of channels
// better use of the bandwidth. `num_channels` sets the number of channels
// to encode.
optional uint32 num_channels = 6;
}

View File

@ -33,7 +33,7 @@ void DtxController::UpdateNetworkMetrics(
}
void DtxController::MakeDecision(AudioEncoderRuntimeConfig* config) {
// Decision on |enable_dtx| should not have been made.
// Decision on `enable_dtx` should not have been made.
RTC_DCHECK(!config->enable_dtx);
if (uplink_bandwidth_bps_) {

View File

@ -25,8 +25,8 @@ namespace webrtc {
class FecControllerPlrBased final : public Controller {
public:
struct Config {
// |fec_enabling_threshold| defines a curve, above which FEC should be
// enabled. |fec_disabling_threshold| defines a curve, under which FEC
// `fec_enabling_threshold` defines a curve, above which FEC should be
// enabled. `fec_disabling_threshold` defines a curve, under which FEC
// should be disabled. See below
//
// packet-loss ^ | |

View File

@ -100,9 +100,9 @@ void UpdateNetworkMetrics(FecControllerPlrBasedTestStates* states,
}
}
// Checks that the FEC decision and |uplink_packet_loss_fraction| given by
// |states->controller->MakeDecision| matches |expected_enable_fec| and
// |expected_uplink_packet_loss_fraction|, respectively.
// Checks that the FEC decision and `uplink_packet_loss_fraction` given by
// |states->controller->MakeDecision| matches `expected_enable_fec` and
// `expected_uplink_packet_loss_fraction`, respectively.
void CheckDecision(FecControllerPlrBasedTestStates* states,
bool expected_enable_fec,
float expected_uplink_packet_loss_fraction) {
@ -221,7 +221,7 @@ TEST(FecControllerPlrBasedTest, MaintainFecOffForLowBandwidth) {
TEST(FecControllerPlrBasedTest, MaintainFecOffForVeryLowBandwidth) {
auto states = CreateFecControllerPlrBased(false);
// Below |kEnablingBandwidthLow|, no packet loss fraction can cause FEC to
// Below `kEnablingBandwidthLow`, no packet loss fraction can cause FEC to
// turn on.
UpdateNetworkMetrics(&states, kEnablingBandwidthLow - 1, 1.0);
CheckDecision(&states, false, 1.0);
@ -272,7 +272,7 @@ TEST(FecControllerPlrBasedTest, DisableFecForLowBandwidth) {
TEST(FecControllerPlrBasedTest, DisableFecForVeryLowBandwidth) {
auto states = CreateFecControllerPlrBased(true);
// Below |kEnablingBandwidthLow|, any packet loss fraction can cause FEC to
// Below `kEnablingBandwidthLow`, any packet loss fraction can cause FEC to
// turn off.
UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
CheckDecision(&states, false, 1.0);

View File

@ -54,7 +54,7 @@ FrameLengthController::FrameLengthController(const Config& config)
frame_length_ms_ = std::find(config_.encoder_frame_lengths_ms.begin(),
config_.encoder_frame_lengths_ms.end(),
config_.initial_frame_length_ms);
// |encoder_frame_lengths_ms| must contain |initial_frame_length_ms|.
// `encoder_frame_lengths_ms` must contain `initial_frame_length_ms`.
RTC_DCHECK(frame_length_ms_ != config_.encoder_frame_lengths_ms.end());
}
@ -71,7 +71,7 @@ void FrameLengthController::UpdateNetworkMetrics(
}
void FrameLengthController::MakeDecision(AudioEncoderRuntimeConfig* config) {
// Decision on |frame_length_ms| should not have been made.
// Decision on `frame_length_ms` should not have been made.
RTC_DCHECK(!config->frame_length_ms);
if (FrameLengthIncreasingDecision(*config)) {
@ -99,12 +99,12 @@ bool FrameLengthController::Config::FrameLengthChange::operator<(
bool FrameLengthController::FrameLengthIncreasingDecision(
const AudioEncoderRuntimeConfig& config) {
// Increase frame length if
// 1. |uplink_bandwidth_bps| is known to be smaller or equal than
// |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the
// 1. `uplink_bandwidth_bps` is known to be smaller or equal than
// `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
// current overhead rate OR all the following:
// 2. longer frame length is available AND
// 3. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
// 4. |uplink_packet_loss_fraction| is known to be smaller than a threshold.
// 3. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
// 4. `uplink_packet_loss_fraction` is known to be smaller than a threshold.
// Find next frame length to which a criterion is defined to shift from
// current frame length.
@ -156,12 +156,12 @@ bool FrameLengthController::FrameLengthDecreasingDecision(
const AudioEncoderRuntimeConfig& config) {
// Decrease frame length if
// 1. shorter frame length is available AND
// 2. |uplink_bandwidth_bps| is known to be bigger than
// |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the
// 2. `uplink_bandwidth_bps` is known to be bigger than
// `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
// overhead which would be produced with the shorter frame length AND
// one or more of the followings:
// 3. |uplink_bandwidth_bps| is known to be larger than a threshold,
// 4. |uplink_packet_loss_fraction| is known to be larger than a threshold,
// 3. `uplink_bandwidth_bps` is known to be larger than a threshold,
// 4. `uplink_packet_loss_fraction` is known to be larger than a threshold,
// Find next frame length to which a criterion is defined to shift from
// current frame length.

View File

@ -184,8 +184,8 @@ TEST(FrameLengthControllerTest,
TEST(FrameLengthControllerTest, IncreaseTo40MsOnMultipleConditions) {
// Increase to 40ms frame length if
// 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
// 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold
// 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
// 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold
// AND
// 3. FEC is not decided or OFF.
auto controller = CreateController(CreateChangeCriteriaFor20msAnd40ms(),
@ -206,8 +206,8 @@ TEST(FrameLengthControllerTest, DecreaseTo40MsOnHighUplinkBandwidth) {
TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) {
// Maintain 60ms frame length if
// 1. |uplink_bandwidth_bps| is at medium level,
// 2. |uplink_packet_loss_fraction| is at medium,
// 1. `uplink_bandwidth_bps` is at medium level,
// 2. `uplink_packet_loss_fraction` is at medium,
// 3. FEC is not decided ON.
auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
kDefaultEncoderFrameLengthsMs, 60);
@ -218,8 +218,8 @@ TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) {
TEST(FrameLengthControllerTest, IncreaseTo60MsOnMultipleConditions) {
// Increase to 60ms frame length if
// 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
// 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold
// 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
// 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold
// AND
// 3. FEC is not decided or OFF.
auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
@ -365,8 +365,8 @@ TEST(FrameLengthControllerTest, From60MsTo120MsOnVeryLowUplinkBandwidth) {
TEST(FrameLengthControllerTest, From20MsTo120MsOnMultipleConditions) {
// Increase to 120ms frame length if
// 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
// 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold.
// 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
// 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold.
auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
kDefaultEncoderFrameLengthsMs, 20);
// It takes two steps for frame length to go from 20ms to 120ms.

View File

@ -32,7 +32,7 @@ struct AudioEncoderRuntimeConfig {
absl::optional<bool> enable_dtx;
// Some encoders can encode fewer channels than the actual input to make
// better use of the bandwidth. |num_channels| sets the number of channels
// better use of the bandwidth. `num_channels` sets the number of channels
// to encode.
absl::optional<size_t> num_channels;

View File

@ -92,8 +92,8 @@ class AudioEncoderCngTest : public ::testing::Test {
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}
// Expect |num_calls| calls to the encoder, all successful. The last call
// claims to have encoded |kMockReturnEncodedBytes| bytes, and all the
// Expect `num_calls` calls to the encoder, all successful. The last call
// claims to have encoded `kMockReturnEncodedBytes` bytes, and all the
// preceding ones 0 bytes.
void ExpectEncodeCalls(size_t num_calls) {
InSequence s;
@ -108,7 +108,7 @@ class AudioEncoderCngTest : public ::testing::Test {
}
// Verifies that the cng_ object waits until it has collected
// |blocks_per_frame| blocks of audio, and then dispatches all of them to
// `blocks_per_frame` blocks of audio, and then dispatches all of them to
// the underlying codec (speech or cng).
void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
@ -169,7 +169,7 @@ class AudioEncoderCngTest : public ::testing::Test {
.WillOnce(Return(Vad::kPassive));
}
// With this call to Encode(), |mock_vad_| should be called according to the
// With this call to Encode(), `mock_vad_` should be called according to the
// above expectations.
Encode();
}
@ -201,7 +201,7 @@ class AudioEncoderCngTest : public ::testing::Test {
std::unique_ptr<AudioEncoder> cng_;
std::unique_ptr<MockAudioEncoder> mock_encoder_owner_;
MockAudioEncoder* mock_encoder_;
MockVad* mock_vad_; // Ownership is transferred to |cng_|.
MockVad* mock_vad_; // Ownership is transferred to `cng_`.
uint32_t timestamp_;
int16_t audio_[kMaxNumSamples];
size_t num_audio_samples_10ms_;
@ -294,7 +294,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
for (size_t i = 0; i < 100; ++i) {
Encode();
// Check if it was time to call the cng encoder. This is done once every
// |kBlocksPerFrame| calls.
// `kBlocksPerFrame` calls.
if ((i + 1) % kBlocksPerFrame == 0) {
// Now check if a SID interval has elapsed.
if ((i % (sid_frame_interval_ms / 10)) < kBlocksPerFrame) {
@ -334,7 +334,7 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) {
EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
EXPECT_TRUE(encoded_info_.speech);
// All of the frame is passive speech. Expect no calls to |mock_encoder_|.
// All of the frame is passive speech. Expect no calls to `mock_encoder_`.
EXPECT_FALSE(CheckMixedActivePassive(Vad::kPassive, Vad::kPassive));
EXPECT_FALSE(encoded_info_.speech);
}
@ -442,7 +442,7 @@ class AudioEncoderCngDeathTest : public AudioEncoderCngTest {
}
// Override AudioEncoderCngTest::TearDown, since that one expects a call to
// the destructor of |mock_vad_|. In this case, that object is already
// the destructor of `mock_vad_`. In this case, that object is already
// deleted.
void TearDown() override { cng_.reset(); }

View File

@ -193,10 +193,10 @@ bool ComfortNoiseDecoder::Generate(rtc::ArrayView<int16_t> out_data,
WebRtcSpl_ScaleVector(excitation, excitation, dec_used_scale_factor_,
num_samples, 13);
/* |lpPoly| - Coefficients in Q12.
* |excitation| - Speech samples.
/* `lpPoly` - Coefficients in Q12.
* `excitation` - Speech samples.
* |nst->dec_filtstate| - State preservation.
* |out_data| - Filtered speech samples. */
* `out_data` - Filtered speech samples. */
WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation,
num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER,
dec_filtstateLow_, WEBRTC_CNG_MAX_LPC_ORDER,
@ -395,7 +395,7 @@ size_t ComfortNoiseEncoder::Encode(rtc::ArrayView<const int16_t> speech,
}
namespace {
/* Values in |k| are Q15, and |a| Q12. */
/* Values in `k` are Q15, and `a` Q12. */
void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a) {
int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1];
int16_t* aptr;

View File

@ -33,13 +33,13 @@ class ComfortNoiseDecoder {
void Reset();
// Updates the CN state when a new SID packet arrives.
// |sid| is a view of the SID packet without the headers.
// `sid` is a view of the SID packet without the headers.
void UpdateSid(rtc::ArrayView<const uint8_t> sid);
// Generates comfort noise.
// |out_data| will be filled with samples - its size determines the number of
// samples generated. When |new_period| is true, CNG history will be reset
// before any audio is generated. Returns |false| if outData is too large -
// `out_data` will be filled with samples - its size determines the number of
// samples generated. When `new_period` is true, CNG history will be reset
// before any audio is generated. Returns `false` if outData is too large -
// currently 640 bytes (equalling 10ms at 64kHz).
// TODO(ossu): Specify better limits for the size of out_data. Either let it
// be unbounded or limit to 10ms in the current sample rate.
@ -61,9 +61,9 @@ class ComfortNoiseDecoder {
class ComfortNoiseEncoder {
public:
// Creates a comfort noise encoder.
// |fs| selects sample rate: 8000 for narrowband or 16000 for wideband.
// |interval| sets the interval at which to generate SID data (in ms).
// |quality| selects the number of refl. coeffs. Maximum allowed is 12.
// `fs` selects sample rate: 8000 for narrowband or 16000 for wideband.
// `interval` sets the interval at which to generate SID data (in ms).
// `quality` selects the number of refl. coeffs. Maximum allowed is 12.
ComfortNoiseEncoder(int fs, int interval, int quality);
~ComfortNoiseEncoder() = default;
@ -74,8 +74,8 @@ class ComfortNoiseEncoder {
// Parameters are set as during construction.
void Reset(int fs, int interval, int quality);
// Analyzes background noise from |speech| and appends coefficients to
// |output|. Returns the number of coefficients generated. If |force_sid| is
// Analyzes background noise from `speech` and appends coefficients to
// `output`. Returns the number of coefficients generated. If `force_sid` is
// true, a SID frame is forced and the internal sid interval counter is reset.
// Will fail if the input size is too large (> 640 samples, see
// ComfortNoiseDecoder::Generate).

View File

@ -60,11 +60,11 @@ class AudioDecoderG722StereoImpl final : public AudioDecoder {
SpeechType* speech_type) override;
private:
// Splits the stereo-interleaved payload in |encoded| into separate payloads
// Splits the stereo-interleaved payload in `encoded` into separate payloads
// for left and right channels. The separated payloads are written to
// |encoded_deinterleaved|, which must hold at least |encoded_len| samples.
// `encoded_deinterleaved`, which must hold at least `encoded_len` samples.
// The left channel starts at offset 0, while the right channel starts at
// offset encoded_len / 2 into |encoded_deinterleaved|.
// offset encoded_len / 2 into `encoded_deinterleaved`.
void SplitStereoPacket(const uint8_t* encoded,
size_t encoded_len,
uint8_t* encoded_deinterleaved);

View File

@ -39,7 +39,7 @@ void WebRtcIlbcfix_CreateAugmentedVec(
const int16_t *ppo, *ppi;
int16_t cbVecTmp[4];
/* Interpolation starts 4 elements before cbVec+index, but must not start
outside |cbVec|; clamping interp_len to stay within |cbVec|.
outside `cbVec`; clamping interp_len to stay within `cbVec`.
*/
size_t interp_len = WEBRTC_SPL_MIN(index, 4);
@ -69,12 +69,12 @@ void WebRtcIlbcfix_CreateAugmentedVec(
/* copy the second noninterpolated part */
ppo = buffer - index;
/* |tempbuff2| is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
long. |buffer| points one element past the end of that vector, i.e., at
/* `tempbuff2` is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
long. `buffer` points one element past the end of that vector, i.e., at
tempbuff2+SUBL+5. Since ppo=buffer-index, we cannot read any more than
|index| elements from |ppo|.
`index` elements from `ppo`.
|cbVec| is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
`cbVec` is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
Therefore, we can only write SUBL-index elements to cbVec+index.
These two conditions limit the number of elements to copy.

View File

@ -99,7 +99,7 @@ bool WebRtcIlbcfix_GetCbVec(
// We're going to fill in cbveclen + 5 elements of tempbuff2 in
// WebRtcSpl_FilterMAFastQ12, less than the SUBL + 5 elements we'll be
// using in WebRtcIlbcfix_CreateAugmentedVec. This error is caused by
// bad values in |index| (which come from the encoded stream). Tell the
// bad values in `index` (which come from the encoded stream). Tell the
// caller that things went south, and that the decoder state is now
// corrupt (because it's half-way through an update that we can't
// complete).

View File

@ -831,15 +831,15 @@ It first shifts input data of one matrix, determines the right indexes for the
two matrixes, multiply them, and write the results into an output buffer.
Note that two factors (or, multipliers) determine the initialization values of
the variable |matrix1_index| in the code. The relationship is
|matrix1_index| = |matrix1_index_factor1| * |matrix1_index_factor2|, where
|matrix1_index_factor1| is given by the argument while |matrix1_index_factor2|
is determined by the value of argument |matrix1_index_init_case|;
|matrix1_index_factor2| is the value of the outmost loop counter j (when
|matrix1_index_init_case| is 0), or the value of the middle loop counter k (when
|matrix1_index_init_case| is non-zero).
the variable `matrix1_index` in the code. The relationship is
`matrix1_index` = `matrix1_index_factor1` * `matrix1_index_factor2`, where
`matrix1_index_factor1` is given by the argument while `matrix1_index_factor2`
is determined by the value of argument `matrix1_index_init_case`;
`matrix1_index_factor2` is the value of the outmost loop counter j (when
`matrix1_index_init_case` is 0), or the value of the middle loop counter k (when
`matrix1_index_init_case` is non-zero).
|matrix0_index| is determined the same way.
`matrix0_index` is determined the same way.
Arguments:
matrix0[]: matrix0 data in Q15 domain.

View File

@ -75,7 +75,7 @@ static void AllpassFilterForDec32(int16_t *InOut16, //Q0
a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15
a <<= 1; // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, FilterState[j]); //Q16+Q16=Q16
// |a| in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
// `a` in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]);
// FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16
FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16);

View File

@ -34,7 +34,7 @@ Time2Spec WebRtcIsacfix_Time2Spec;
MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
/* This method assumes that |stream_size_bytes| is in valid range,
/* This method assumes that `stream_size_bytes` is in valid range,
* i.e. >= 0 && <= STREAM_MAXW16_60MS
*/
static void InitializeDecoderBitstream(size_t stream_size_bytes,
@ -294,8 +294,8 @@ int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
return statusInit;
}
/* Read the given number of bytes of big-endian 16-bit integers from |src| and
write them to |dest| in host endian. If |nbytes| is odd, the number of
/* Read the given number of bytes of big-endian 16-bit integers from `src` and
write them to `dest` in host endian. If `nbytes` is odd, the number of
output elements is rounded up, and the least significant byte of the last
element is set to 0. */
static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
@ -306,8 +306,8 @@ static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
dest[nbytes / 2] = src[nbytes - 1] << 8;
}
/* Read the given number of bytes of host-endian 16-bit integers from |src| and
write them to |dest| in big endian. If |nbytes| is odd, the number of source
/* Read the given number of bytes of host-endian 16-bit integers from `src` and
write them to `dest` in big endian. If `nbytes` is odd, the number of source
elements is rounded up (but only the most significant byte of the last
element is used), and the number of output bytes written will be
nbytes + 1. */

View File

@ -663,7 +663,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
/* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
/* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/
// |corrlo2QQ| in Q(QdomLO-5).
// `corrlo2QQ` in Q(QdomLO-5).
corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) -
(WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2);
@ -721,12 +721,12 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
} else if ((sh-shMem)<7){
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible
// Shift |alpha| the number of times required to get |tmp| in QdomLO.
// Shift `alpha` the number of times required to get `tmp` in QdomLO.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
} else {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift |alpha| as much as possible without overflow the number of
// times required to get |tmp| in QdomLO.
// Shift `alpha` as much as possible without overflow the number of
// times required to get `tmp` in QdomLO.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
tmpCorr = corrloQQ[n] >> (sh - shMem - 6);
tmp = tmp + tmpCorr;
@ -774,7 +774,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
maskdata->CorrBufHiQdom[n] = QdomHI;
} else if ((sh-shMem)<7) {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift |alpha| the number of times required to get |tmp| in QdomHI.
// Shift `alpha` the number of times required to get `tmp` in QdomHI.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
tmpCorr = corrhiQQ[n];
tmp = tmp + tmpCorr;
@ -782,8 +782,8 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
maskdata->CorrBufHiQdom[n] = QdomHI;
} else {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift |alpha| as much as possible without overflow the number of
// times required to get |tmp| in QdomHI.
// Shift `alpha` as much as possible without overflow the number of
// times required to get `tmp` in QdomHI.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
tmpCorr = corrhiQQ[n] >> (sh - shMem - 6);
tmp = tmp + tmpCorr;
@ -919,7 +919,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
ssh = sh_hi >> 1; // |sqrt_nrg| is in Qssh.
ssh = sh_hi >> 1; // `sqrt_nrg` is in Qssh.
sh = ssh - 14;
tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)

View File

@ -203,7 +203,7 @@ TEST_P(EncoderTest, DoNotOvershootTargetBitrate) {
e->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
num_bytes += encoded.size();
}
// Inverse of the duration of |kNumFrames| 10 ms frames (unit: seconds^-1).
// Inverse of the duration of `kNumFrames` 10 ms frames (unit: seconds^-1).
constexpr float kAudioDurationInv = 100.f / kNumFrames;
const int measured_bitrate_bps = 8 * num_bytes * kAudioDurationInv;
EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2000); // Max 2 kbps extra.

View File

@ -606,7 +606,7 @@ int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
int16_t* decoded,
int16_t* speechType);
/* If |inst| is a decoder but not an encoder: tell it what sample rate the
/* If `inst` is a decoder but not an encoder: tell it what sample rate the
encoder is using, for bandwidth estimation purposes. */
void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);

View File

@ -1446,7 +1446,7 @@ void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata) {
index[k] = WebRtcIsac_kQArRcInitIndex[k];
// The safe-guards in following while conditions are to suppress gcc 4.8.3
// warnings, Issue 2888. Otherwise, first and last elements of
// |WebRtcIsac_kQArBoundaryLevels| are such that the following search
// `WebRtcIsac_kQArBoundaryLevels` are such that the following search
// *never* cause an out-of-boundary read.
if (RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k]]) {
while (index[k] + 1 < NUM_AR_RC_QUANT_BAUNDARY &&

View File

@ -25,8 +25,8 @@
* Post-filtering:
* y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
*
* Note that |lag| is a floating number so we perform an interpolation to
* obtain the correct |lag|.
* Note that `lag` is a floating number so we perform an interpolation to
* obtain the correct `lag`.
*
*/
@ -86,7 +86,7 @@ typedef enum {
* buffer : a buffer where the sum of previous inputs and outputs
* are stored.
* damper_state : the state of the damping filter. The filter is defined by
* |kDampFilter|.
* `kDampFilter`.
* interpol_coeff : pointer to a set of coefficient which are used to utilize
* fractional pitch by interpolation.
* gain : pitch-gain to be applied to the current segment of input.
@ -353,7 +353,7 @@ static void FilterFrame(const double* in_data, PitchFiltstr* filter_state,
if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) {
/* Filter the lookahead segment, this is treated as the last sub-frame. So
* set |pf_param| to last sub-frame. */
* set `pf_param` to last sub-frame. */
filter_parameters.sub_frame = PITCH_SUBFRAMES - 1;
filter_parameters.num_samples = QLOOKAHEAD;
FilterSegment(in_data, &filter_parameters, out_data, out_dg);

View File

@ -59,7 +59,7 @@ std::vector<AudioDecoder::ParseResult> LegacyEncodedAudioFrame::SplitBySamples(
new LegacyEncodedAudioFrame(decoder, std::move(payload)));
results.emplace_back(timestamp, 0, std::move(frame));
} else {
// Reduce the split size by half as long as |split_size_bytes| is at least
// Reduce the split size by half as long as `split_size_bytes` is at least
// twice the minimum chunk size (so that the resulting size is at least as
// large as the minimum chunk size).
while (split_size_bytes >= 2 * min_chunk_size) {

View File

@ -564,9 +564,9 @@ void AudioEncoderOpusImpl::OnReceivedOverhead(
void AudioEncoderOpusImpl::SetReceiverFrameLengthRange(
int min_frame_length_ms,
int max_frame_length_ms) {
// Ensure that |SetReceiverFrameLengthRange| is called before
// |EnableAudioNetworkAdaptor|, otherwise we need to recreate
// |audio_network_adaptor_|, which is not a needed use case.
// Ensure that `SetReceiverFrameLengthRange` is called before
// `EnableAudioNetworkAdaptor`, otherwise we need to recreate
// `audio_network_adaptor_`, which is not a needed use case.
RTC_DCHECK(!audio_network_adaptor_);
FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
&config_.supported_frame_lengths_ms);

View File

@ -228,8 +228,8 @@ TEST_P(AudioEncoderOpusTest,
TEST_P(AudioEncoderOpusTest, SetReceiverFrameLengthRange) {
auto states = CreateCodec(sample_rate_hz_, 2);
// Before calling to |SetReceiverFrameLengthRange|,
// |supported_frame_lengths_ms| should contain only the frame length being
// Before calling to `SetReceiverFrameLengthRange`,
// `supported_frame_lengths_ms` should contain only the frame length being
// used.
using ::testing::ElementsAre;
EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
@ -348,7 +348,7 @@ TEST_P(AudioEncoderOpusTest,
// will fail.
constexpr float kPacketLossFraction_1 = 0.02f;
constexpr float kPacketLossFraction_2 = 0.198f;
// |kSecondSampleTimeMs| is chosen to ease the calculation since
// `kSecondSampleTimeMs` is chosen to ease the calculation since
// 0.9999 ^ 6931 = 0.5.
constexpr int64_t kSecondSampleTimeMs = 6931;
@ -380,7 +380,7 @@ TEST_P(AudioEncoderOpusTest, DoNotInvokeSetTargetBitrateIfOverheadUnknown) {
states->encoder->OnReceivedUplinkBandwidth(kDefaultOpusRate * 2,
absl::nullopt);
// Since |OnReceivedOverhead| has not been called, the codec bitrate should
// Since `OnReceivedOverhead` has not been called, the codec bitrate should
// not change.
EXPECT_EQ(kDefaultOpusRate, states->encoder->GetTargetBitrate());
}

View File

@ -218,8 +218,8 @@ TEST_P(OpusFecTest, RandomPacketLossTest) {
time_now_ms += block_duration_ms_;
// |data_pointer_| is incremented and wrapped across
// |loop_length_samples_|.
// `data_pointer_` is incremented and wrapped across
// `loop_length_samples_`.
data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
loop_length_samples_;
}

View File

@ -574,8 +574,8 @@ void WebRtcOpus_DecoderInit(OpusDecInst* inst) {
/* For decoder to determine if it is to output speech or comfort noise. */
static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
// Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
// to be so if the following |encoded_byte| are 0 or 1.
// Audio type becomes comfort noise if `encoded_byte` is 1 and keeps
// to be so if the following `encoded_byte` are 0 or 1.
if (encoded_bytes == 0 && inst->in_dtx_mode) {
return 2; // Comfort noise.
} else if (encoded_bytes == 1 || encoded_bytes == 2) {
@ -595,7 +595,7 @@ static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
}
}
/* |frame_size| is set to maximum Opus frame size in the normal case, and
/* `frame_size` is set to maximum Opus frame size in the normal case, and
* is set to the number of samples needed for PLC in case of losses.
* It is up to the caller to make sure the value is correct. */
static int DecodeNative(OpusDecInst* inst,
@ -632,9 +632,9 @@ static int DecodePlc(OpusDecInst* inst, int16_t* decoded) {
FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz);
if (inst->plc_use_prev_decoded_samples) {
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |MaxFrameSizePerChannel()|. */
/* The number of samples we ask for is `number_of_lost_frames` times
* `prev_decoded_samples_`. Limit the number of samples to maximum
* `MaxFrameSizePerChannel()`. */
plc_samples = inst->prev_decoded_samples;
const int max_samples_per_channel =
MaxFrameSizePerChannel(inst->sample_rate_hz);
@ -729,9 +729,9 @@ int WebRtcOpus_DurationEst(OpusDecInst* inst,
int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
if (inst->plc_use_prev_decoded_samples) {
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |MaxFrameSizePerChannel()|. */
/* The number of samples we ask for is `number_of_lost_frames` times
* `prev_decoded_samples_`. Limit the number of samples to maximum
* `MaxFrameSizePerChannel()`. */
const int plc_samples = inst->prev_decoded_samples;
const int max_samples_per_channel =
MaxFrameSizePerChannel(inst->sample_rate_hz);
@ -826,8 +826,8 @@ int WebRtcOpus_PacketHasFec(const uint8_t* payload,
// as binary values with uniform probability, they can be extracted directly
// from the most significant bits of the first byte of compressed data.
for (int n = 0; n < channels; n++) {
// The LBRR bit for channel 1 is on the (|silk_frames| + 1)-th bit, and
// that of channel 2 is on the |(|silk_frames| + 1) * 2 + 1|-th bit.
// The LBRR bit for channel 1 is on the (`silk_frames` + 1)-th bit, and
// that of channel 2 is on the |(`silk_frames` + 1) * 2 + 1|-th bit.
if (frame_data[0][0] & (0x80 >> ((n + 1) * (silk_frames + 1) - 1)))
return 1;
}

View File

@ -115,10 +115,10 @@ class OpusTest
void TestCbrEffect(bool dtx, int block_length_ms);
// Prepare |speech_data_| for encoding, read from a hard-coded file.
// Prepare `speech_data_` for encoding, read from a hard-coded file.
// After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
// block of |block_length_ms| milliseconds. The data is looped every
// |loop_length_ms| milliseconds.
// block of `block_length_ms` milliseconds. The data is looped every
// `loop_length_ms` milliseconds.
void PrepareSpeechData(int block_length_ms, int loop_length_ms);
int EncodeDecode(WebRtcOpusEncInst* encoder,
@ -310,24 +310,24 @@ void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
// one with an arbitrary size and the other of 1-byte, then stops sending for
// a certain number of frames.
// |max_dtx_frames| is the maximum number of frames Opus can stay in DTX.
// `max_dtx_frames` is the maximum number of frames Opus can stay in DTX.
// TODO(kwiberg): Why does this number depend on the encoding sample rate?
const int max_dtx_frames =
(encoder_sample_rate_hz_ == 16000 ? 800 : 400) / block_length_ms + 1;
// We run |kRunTimeMs| milliseconds of pure silence.
// We run `kRunTimeMs` milliseconds of pure silence.
const int kRunTimeMs = 4500;
// We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in
// We check that, after a `kCheckTimeMs` milliseconds (given that the CNG in
// Opus needs time to adapt), the absolute values of DTX decoded signal are
// bounded by |kOutputValueBound|.
// bounded by `kOutputValueBound`.
const int kCheckTimeMs = 4000;
#if defined(OPUS_FIXED_POINT)
// Fixed-point Opus generates a random (comfort) noise, which has a less
// predictable value bound than its floating-point Opus. This value depends on
// input signal, and the time window for checking the output values (between
// |kCheckTimeMs| and |kRunTimeMs|).
// `kCheckTimeMs` and `kRunTimeMs`).
const uint16_t kOutputValueBound = 30;
#else
@ -336,7 +336,7 @@ void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
int time = 0;
while (time < kRunTimeMs) {
// DTX mode is maintained for maximum |max_dtx_frames| frames.
// DTX mode is maintained for maximum `max_dtx_frames` frames.
int i = 0;
for (; i < max_dtx_frames; ++i) {
time += block_length_ms;

View File

@ -29,11 +29,11 @@ class AudioRingBuffer final {
AudioRingBuffer(size_t channels, size_t max_frames);
~AudioRingBuffer();
// Copies |data| to the buffer and advances the write pointer. |channels| must
// Copies `data` to the buffer and advances the write pointer. `channels` must
// be the same as at creation time.
void Write(const float* const* data, size_t channels, size_t frames);
// Copies from the buffer to |data| and advances the read pointer. |channels|
// Copies from the buffer to `data` and advances the read pointer. `channels`
// must be the same as at creation time.
void Read(float* const* data, size_t channels, size_t frames);

View File

@ -16,7 +16,7 @@
namespace {
// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
// Adds `a` and `b` frame by frame into `result` (basically matrix addition).
void AddFrames(const float* const* a,
size_t a_start_index,
const float* const* b,
@ -33,7 +33,7 @@ void AddFrames(const float* const* a,
}
}
// Copies |src| into |dst| channel by channel.
// Copies `src` into `dst` channel by channel.
void CopyFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
@ -46,7 +46,7 @@ void CopyFrames(const float* const* src,
}
}
// Moves |src| into |dst| channel by channel.
// Moves `src` into `dst` channel by channel.
void MoveFrames(const float* const* src,
size_t src_start_index,
size_t num_frames,
@ -69,8 +69,8 @@ void ZeroOut(float* const* buffer,
}
}
// Pointwise multiplies each channel of |frames| with |window|. Results are
// stored in |frames|.
// Pointwise multiplies each channel of `frames` with `window`. Results are
// stored in `frames`.
void ApplyWindow(const float* window,
size_t num_frames,
size_t num_channels,
@ -134,7 +134,7 @@ Blocker::~Blocker() = default;
// On each call to ProcessChunk():
// 1. New input gets read into sections _b_ and _c_ of the input buffer.
// 2. We block starting from frame_offset.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// 3. We block until we reach a block `bl` that doesn't contain any frames
// from sections _a_ or _b_ of the input buffer.
// 4. We window the current block, fire the callback for processing, window
// again, and overlap/add to the output buffer.
@ -142,7 +142,7 @@ Blocker::~Blocker() = default;
// 6. For both the input and the output buffers, we copy section _c_ into
// section _a_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _b_ and _c_.
// of `bl` and the border between sections _b_ and _c_.
//
// When block_size > chunk_size the input and output buffers look like this:
//
@ -153,13 +153,13 @@ Blocker::~Blocker() = default;
// On each call to ProcessChunk():
// The procedure is the same as above, except for:
// 1. New input gets read into section _c_ of the input buffer.
// 3. We block until we reach a block |bl| that doesn't contain any frames
// 3. We block until we reach a block `bl` that doesn't contain any frames
// from section _a_ of the input buffer.
// 5. We copy section _a_ of the output buffer into output.
// 6. For both the input and the output buffers, we copy sections _b_ and _c_
// into section _a_ and _b_.
// 7. We set the new frame_offset to be the difference between the first frame
// of |bl| and the border between sections _a_ and _b_.
// of `bl` and the border between sections _a_ and _b_.
//
// * delay here refers to inintial_delay_
//

View File

@ -39,7 +39,7 @@ class BlockerCallback {
// of audio, which is not a power of 2. Blocker allows us to specify the
// transform and all other necessary processing via the Process() callback
// function without any constraints on the transform-size
// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
// (read: `block_size_`) or received-audio-size (read: `chunk_size_`).
// We handle this for the multichannel audio case, allowing for different
// numbers of input and output channels (for example, beamforming takes 2 or
// more input channels and returns 1 output channel). Audio signals are
@ -53,8 +53,8 @@ class BlockerCallback {
// sending back a processed chunk
//
// To use blocker:
// 1. Impelment a BlockerCallback object |bc|.
// 2. Instantiate a Blocker object |b|, passing in |bc|.
// 1. Impelment a BlockerCallback object `bc`.
// 2. Instantiate a Blocker object `b`, passing in `bc`.
// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
//
// A small amount of delay is added to the first received chunk to deal with
@ -101,7 +101,7 @@ class Blocker {
// input and output buffers are responsible for saving those frames between
// calls to ProcessChunk().
//
// Both contain |initial delay| + |chunk_size| frames. The input is a fairly
// Both contain |initial delay| + `chunk_size` frames. The input is a fairly
// standard FIFO, but due to the overlap-add it's harder to use an
// AudioRingBuffer for the output.
AudioRingBuffer input_buffer_;
@ -116,7 +116,7 @@ class Blocker {
std::unique_ptr<float[]> window_;
// The amount of frames between the start of contiguous blocks. For example,
// |shift_amount_| = |block_size_| / 2 for a Hann window.
// `shift_amount_` = `block_size_` / 2 for a Hann window.
size_t shift_amount_;
BlockerCallback* callback_;

View File

@ -84,11 +84,11 @@ class LappedTransform {
std::complex<float>* const* out_block) = 0;
};
// Construct a transform instance. |chunk_length| is the number of samples in
// each channel. |window| defines the window, owned by the caller (a copy is
// made internally); |window| should have length equal to |block_length|.
// |block_length| defines the length of a block, in samples.
// |shift_amount| is in samples. |callback| is the caller-owned audio
// Construct a transform instance. `chunk_length` is the number of samples in
// each channel. `window` defines the window, owned by the caller (a copy is
// made internally); `window` should have length equal to `block_length`.
// `block_length` defines the length of a block, in samples.
// `shift_amount` is in samples. `callback` is the caller-owned audio
// processing function called for each block of the input chunk.
LappedTransform(size_t num_in_channels,
size_t num_out_channels,
@ -99,10 +99,10 @@ class LappedTransform {
Callback* callback);
~LappedTransform();
// Main audio processing helper method. Internally slices |in_chunk| into
// Main audio processing helper method. Internally slices `in_chunk` into
// blocks, transforms them to frequency domain, calls the callback for each
// block and returns a de-blocked time domain chunk of audio through
// |out_chunk|. Both buffers are caller-owned.
// `out_chunk`. Both buffers are caller-owned.
void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
// Get the chunk length.
@ -132,8 +132,8 @@ class LappedTransform {
// Returns the initial delay.
//
// This is the delay introduced by the |blocker_| to be able to get and return
// chunks of |chunk_length|, but process blocks of |block_length|.
// This is the delay introduced by the `blocker_` to be able to get and return
// chunks of `chunk_length`, but process blocks of `block_length`.
size_t initial_delay() const { return blocker_.initial_delay(); }
private:

View File

@ -145,7 +145,7 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl(
info.redundant.push_back(it->first);
}
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
// `info` will be implicitly cast to an EncodedInfoLeaf struct, effectively
// discarding the (empty) vector of redundant information. This is
// intentional.
if (header_length_bytes > 0) {

View File

@ -31,9 +31,9 @@ class AudioCodecSpeedTest : public ::testing::TestWithParam<coding_param> {
virtual void TearDown();
// EncodeABlock(...) does the following:
// 1. encodes a block of audio, saved in |in_data|,
// 2. save the bit stream to |bit_stream| of |max_bytes| bytes in size,
// 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
// 1. encodes a block of audio, saved in `in_data`,
// 2. save the bit stream to `bit_stream` of `max_bytes` bytes in size,
// 3. assign `encoded_bytes` with the length of the bit stream (in bytes),
// 4. return the cost of time (in millisecond) spent on actual encoding.
virtual float EncodeABlock(int16_t* in_data,
uint8_t* bit_stream,
@ -41,15 +41,15 @@ class AudioCodecSpeedTest : public ::testing::TestWithParam<coding_param> {
size_t* encoded_bytes) = 0;
// DecodeABlock(...) does the following:
// 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
// 1. decodes the bit stream in `bit_stream` with a length of `encoded_bytes`
// (in bytes),
// 2. save the decoded audio in |out_data|,
// 2. save the decoded audio in `out_data`,
// 3. return the cost of time (in millisecond) spent on actual decoding.
virtual float DecodeABlock(const uint8_t* bit_stream,
size_t encoded_bytes,
int16_t* out_data) = 0;
// Encoding and decode an audio of |audio_duration| (in seconds) and
// Encoding and decode an audio of `audio_duration` (in seconds) and
// record the runtime for encoding and decoding separately.
void EncodeDecode(size_t audio_duration);

View File

@ -83,9 +83,9 @@ class AudioCodingModule {
// Sender
//
// |modifier| is called exactly once with one argument: a pointer to the
// `modifier` is called exactly once with one argument: a pointer to the
// unique_ptr that holds the current encoder (which is null if there is no
// current encoder). For the duration of the call, |modifier| has exclusive
// current encoder). For the duration of the call, `modifier` has exclusive
// access to the unique_ptr; it may call the encoder, steal the encoder and
// replace it with another encoder or with nullptr, etc.
virtual void ModifyEncoder(

View File

@ -57,12 +57,12 @@ Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch(
if ((best_correlation > correlation_threshold) || !active_speech) {
// Do accelerate operation by overlap add.
// Pre-calculate common multiplication with |fs_mult_|.
// Pre-calculate common multiplication with `fs_mult_`.
// 120 corresponds to 15 ms.
size_t fs_mult_120 = fs_mult_ * 120;
if (fast_mode) {
// Fit as many multiples of |peak_index| as possible in fs_mult_120.
// Fit as many multiples of `peak_index` as possible in fs_mult_120.
// TODO(henrik.lundin) Consider finding multiple correlation peaks and
// pick the one with the longest correlation lag in this case.
peak_index = (fs_mult_120 / peak_index) * peak_index;
@ -72,11 +72,11 @@ Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch(
// Copy first part; 0 to 15 ms.
output->PushBackInterleaved(
rtc::ArrayView<const int16_t>(input, fs_mult_120 * num_channels_));
// Copy the |peak_index| starting at 15 ms to |temp_vector|.
// Copy the `peak_index` starting at 15 ms to `temp_vector`.
AudioMultiVector temp_vector(num_channels_);
temp_vector.PushBackInterleaved(rtc::ArrayView<const int16_t>(
&input[fs_mult_120 * num_channels_], peak_index * num_channels_));
// Cross-fade |temp_vector| onto the end of |output|.
// Cross-fade `temp_vector` onto the end of `output`.
output->CrossFade(temp_vector, peak_index);
// Copy the last unmodified part, 15 ms + pitch period until the end.
output->PushBackInterleaved(rtc::ArrayView<const int16_t>(

View File

@ -34,10 +34,10 @@ class Accelerate : public TimeStretch {
: TimeStretch(sample_rate_hz, num_channels, background_noise) {}
// This method performs the actual Accelerate operation. The samples are
// read from |input|, of length |input_length| elements, and are written to
// |output|. The number of samples removed through time-stretching is
// is provided in the output |length_change_samples|. The method returns
// the outcome of the operation as an enumerator value. If |fast_accelerate|
// read from `input`, of length `input_length` elements, and are written to
// `output`. The number of samples removed through time-stretching is
// is provided in the output `length_change_samples`. The method returns
// the outcome of the operation as an enumerator value. If `fast_accelerate`
// is true, the algorithm will relax the requirements on finding strong
// correlations, and may remove multiple pitch periods if possible.
ReturnCodes Process(const int16_t* input,
@ -47,7 +47,7 @@ class Accelerate : public TimeStretch {
size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
// Sets the parameters `best_correlation` and `peak_index` to suitable
// values when the signal contains no active speech.
void SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation,

View File

@ -40,7 +40,7 @@ namespace {
constexpr int kOverheadBytesPerPacket = 50;
// The absolute difference between the input and output (the first channel) is
// compared vs |tolerance|. The parameter |delay| is used to correct for codec
// compared vs `tolerance`. The parameter `delay` is used to correct for codec
// delays.
void CompareInputOutput(const std::vector<int16_t>& input,
const std::vector<int16_t>& output,
@ -56,8 +56,8 @@ void CompareInputOutput(const std::vector<int16_t>& input,
}
}
// The absolute difference between the first two channels in |output| is
// compared vs |tolerance|.
// The absolute difference between the first two channels in `output` is
// compared vs `tolerance`.
void CompareTwoChannels(const std::vector<int16_t>& output,
size_t samples_per_channel,
size_t channels,
@ -70,7 +70,7 @@ void CompareTwoChannels(const std::vector<int16_t>& output,
}
// Calculates mean-squared error between input and output (the first channel).
// The parameter |delay| is used to correct for codec delays.
// The parameter `delay` is used to correct for codec delays.
double MseInputOutput(const std::vector<int16_t>& input,
const std::vector<int16_t>& output,
size_t num_samples,
@ -152,10 +152,10 @@ class AudioDecoderTest : public ::testing::Test {
}
// Encodes and decodes audio. The absolute difference between the input and
// output is compared vs |tolerance|, and the mean-squared error is compared
// with |mse|. The encoded stream should contain |expected_bytes|. For stereo
// output is compared vs `tolerance`, and the mean-squared error is compared
// with `mse`. The encoded stream should contain `expected_bytes`. For stereo
// audio, the absolute difference between the two channels is compared vs
// |channel_diff_tolerance|.
// `channel_diff_tolerance`.
void EncodeDecodeTest(size_t expected_bytes,
int tolerance,
double mse,
@ -170,7 +170,7 @@ class AudioDecoderTest : public ::testing::Test {
std::vector<int16_t> input;
std::vector<int16_t> decoded;
while (processed_samples + frame_size_ <= data_length_) {
// Extend input vector with |frame_size_|.
// Extend input vector with `frame_size_`.
input.resize(input.size() + frame_size_, 0);
// Read from input file.
ASSERT_GE(input.size() - processed_samples, frame_size_);

View File

@ -77,8 +77,8 @@ void AudioMultiVector::PushBackInterleaved(
size_t length_per_channel = append_this.size() / num_channels_;
int16_t* temp_array = new int16_t[length_per_channel]; // Temporary storage.
for (size_t channel = 0; channel < num_channels_; ++channel) {
// Copy elements to |temp_array|.
// Set |source_ptr| to first element of this channel.
// Copy elements to `temp_array`.
// Set `source_ptr` to first element of this channel.
const int16_t* source_ptr = &append_this[channel];
for (size_t i = 0; i < length_per_channel; ++i) {
temp_array[i] = *source_ptr;
@ -132,7 +132,7 @@ size_t AudioMultiVector::ReadInterleavedFromIndex(size_t start_index,
size_t length,
int16_t* destination) const {
RTC_DCHECK(destination);
size_t index = 0; // Number of elements written to |destination| so far.
size_t index = 0; // Number of elements written to `destination` so far.
RTC_DCHECK_LE(start_index, Size());
start_index = std::min(start_index, Size());
if (length + start_index > Size()) {
@ -162,7 +162,7 @@ void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this,
size_t length,
size_t position) {
RTC_DCHECK_EQ(num_channels_, insert_this.num_channels_);
// Cap |length| at the length of |insert_this|.
// Cap `length` at the length of `insert_this`.
RTC_DCHECK_LE(length, insert_this.Size());
length = std::min(length, insert_this.Size());
if (num_channels_ == insert_this.num_channels_) {

View File

@ -24,12 +24,12 @@ namespace webrtc {
class AudioMultiVector {
public:
// Creates an empty AudioMultiVector with |N| audio channels. |N| must be
// Creates an empty AudioMultiVector with `N` audio channels. `N` must be
// larger than 0.
explicit AudioMultiVector(size_t N);
// Creates an AudioMultiVector with |N| audio channels, each channel having
// an initial size. |N| must be larger than 0.
// Creates an AudioMultiVector with `N` audio channels, each channel having
// an initial size. `N` must be larger than 0.
AudioMultiVector(size_t N, size_t initial_size);
virtual ~AudioMultiVector();
@ -37,47 +37,47 @@ class AudioMultiVector {
// Deletes all values and make the vector empty.
virtual void Clear();
// Clears the vector and inserts |length| zeros into each channel.
// Clears the vector and inserts `length` zeros into each channel.
virtual void Zeros(size_t length);
// Copies all values from this vector to |copy_to|. Any contents in |copy_to|
// are deleted. After the operation is done, |copy_to| will be an exact
// Copies all values from this vector to `copy_to`. Any contents in `copy_to`
// are deleted. After the operation is done, `copy_to` will be an exact
// replica of this object. The source and the destination must have the same
// number of channels.
virtual void CopyTo(AudioMultiVector* copy_to) const;
// Appends the contents of |append_this| to the end of this object. The array
// Appends the contents of `append_this` to the end of this object. The array
// is assumed to be channel-interleaved. The length must be an even multiple
// of this object's number of channels. The length of this object is increased
// with the length of the array divided by the number of channels.
void PushBackInterleaved(rtc::ArrayView<const int16_t> append_this);
// Appends the contents of AudioMultiVector |append_this| to this object. The
// length of this object is increased with the length of |append_this|.
// Appends the contents of AudioMultiVector `append_this` to this object. The
// length of this object is increased with the length of `append_this`.
virtual void PushBack(const AudioMultiVector& append_this);
// Appends the contents of AudioMultiVector |append_this| to this object,
// taken from |index| up until the end of |append_this|. The length of this
// Appends the contents of AudioMultiVector `append_this` to this object,
// taken from `index` up until the end of `append_this`. The length of this
// object is increased.
virtual void PushBackFromIndex(const AudioMultiVector& append_this,
size_t index);
// Removes |length| elements from the beginning of this object, from each
// Removes `length` elements from the beginning of this object, from each
// channel.
virtual void PopFront(size_t length);
// Removes |length| elements from the end of this object, from each
// Removes `length` elements from the end of this object, from each
// channel.
virtual void PopBack(size_t length);
// Reads |length| samples from each channel and writes them interleaved to
// |destination|. The total number of elements written to |destination| is
// returned, i.e., |length| * number of channels. If the AudioMultiVector
// contains less than |length| samples per channel, this is reflected in the
// Reads `length` samples from each channel and writes them interleaved to
// `destination`. The total number of elements written to `destination` is
// returned, i.e., `length` * number of channels. If the AudioMultiVector
// contains less than `length` samples per channel, this is reflected in the
// return value.
virtual size_t ReadInterleaved(size_t length, int16_t* destination) const;
// Like ReadInterleaved() above, but reads from |start_index| instead of from
// Like ReadInterleaved() above, but reads from `start_index` instead of from
// the beginning.
virtual size_t ReadInterleavedFromIndex(size_t start_index,
size_t length,
@ -89,18 +89,18 @@ class AudioMultiVector {
int16_t* destination) const;
// Overwrites each channel in this AudioMultiVector with values taken from
// |insert_this|. The values are taken from the beginning of |insert_this| and
// are inserted starting at |position|. |length| values are written into each
// channel. If |length| and |position| are selected such that the new data
// `insert_this`. The values are taken from the beginning of `insert_this` and
// are inserted starting at `position`. `length` values are written into each
// channel. If `length` and `position` are selected such that the new data
// extends beyond the end of the current AudioVector, the vector is extended
// to accommodate the new data. |length| is limited to the length of
// |insert_this|.
// to accommodate the new data. `length` is limited to the length of
// `insert_this`.
virtual void OverwriteAt(const AudioMultiVector& insert_this,
size_t length,
size_t position);
// Appends |append_this| to the end of the current vector. Lets the two
// vectors overlap by |fade_length| samples (per channel), and cross-fade
// Appends `append_this` to the end of the current vector. Lets the two
// vectors overlap by `fade_length` samples (per channel), and cross-fade
// linearly in this region.
virtual void CrossFade(const AudioMultiVector& append_this,
size_t fade_length);
@ -111,14 +111,14 @@ class AudioMultiVector {
// Returns the number of elements per channel in this AudioMultiVector.
virtual size_t Size() const;
// Verify that each channel can hold at least |required_size| elements. If
// Verify that each channel can hold at least `required_size` elements. If
// not, extend accordingly.
virtual void AssertSize(size_t required_size);
virtual bool Empty() const;
// Copies the data between two channels in the AudioMultiVector. The method
// does not add any new channel. Thus, |from_channel| and |to_channel| must
// does not add any new channel. Thus, `from_channel` and `to_channel` must
// both be valid channel numbers.
virtual void CopyChannel(size_t from_channel, size_t to_channel);

View File

@ -94,7 +94,7 @@ TEST_P(AudioMultiVectorTest, PushBackInterleavedAndCopy) {
AudioMultiVector vec(num_channels_);
vec.PushBackInterleaved(array_interleaved_);
AudioMultiVector vec_copy(num_channels_);
vec.CopyTo(&vec_copy); // Copy from |vec| to |vec_copy|.
vec.CopyTo(&vec_copy); // Copy from `vec` to `vec_copy`.
ASSERT_EQ(num_channels_, vec.Channels());
ASSERT_EQ(array_length(), vec.Size());
ASSERT_EQ(num_channels_, vec_copy.Channels());
@ -106,7 +106,7 @@ TEST_P(AudioMultiVectorTest, PushBackInterleavedAndCopy) {
}
}
// Clear |vec| and verify that it is empty.
// Clear `vec` and verify that it is empty.
vec.Clear();
EXPECT_TRUE(vec.Empty());
@ -208,7 +208,7 @@ TEST_P(AudioMultiVectorTest, PopFront) {
vec.PushBackInterleaved(array_interleaved_);
vec.PopFront(1); // Remove one element from each channel.
ASSERT_EQ(array_length() - 1u, vec.Size());
// Let |ptr| point to the second element of the first channel in the
// Let `ptr` point to the second element of the first channel in the
// interleaved array.
int16_t* ptr = &array_interleaved_[num_channels_];
for (size_t i = 0; i < array_length() - 1; ++i) {
@ -227,7 +227,7 @@ TEST_P(AudioMultiVectorTest, PopBack) {
vec.PushBackInterleaved(array_interleaved_);
vec.PopBack(1); // Remove one element from each channel.
ASSERT_EQ(array_length() - 1u, vec.Size());
// Let |ptr| point to the first element of the first channel in the
// Let `ptr` point to the first element of the first channel in the
// interleaved array.
int16_t* ptr = array_interleaved_.data();
for (size_t i = 0; i < array_length() - 1; ++i) {

View File

@ -245,14 +245,14 @@ void AudioVector::OverwriteAt(const int16_t* insert_this,
void AudioVector::CrossFade(const AudioVector& append_this,
size_t fade_length) {
// Fade length cannot be longer than the current vector or |append_this|.
// Fade length cannot be longer than the current vector or `append_this`.
RTC_DCHECK_LE(fade_length, Size());
RTC_DCHECK_LE(fade_length, append_this.Size());
fade_length = std::min(fade_length, Size());
fade_length = std::min(fade_length, append_this.Size());
size_t position = Size() - fade_length + begin_index_;
// Cross fade the overlapping regions.
// |alpha| is the mixing factor in Q14.
// `alpha` is the mixing factor in Q14.
// TODO(hlundin): Consider skipping +1 in the denominator to produce a
// smoother cross-fade, in particular at the end of the fade.
int alpha_step = 16384 / (static_cast<int>(fade_length) + 1);
@ -265,7 +265,7 @@ void AudioVector::CrossFade(const AudioVector& append_this,
14;
}
RTC_DCHECK_GE(alpha, 0); // Verify that the slope was correct.
// Append what is left of |append_this|.
// Append what is left of `append_this`.
size_t samples_to_push_back = append_this.Size() - fade_length;
if (samples_to_push_back > 0)
PushBack(append_this, samples_to_push_back, fade_length);
@ -286,8 +286,8 @@ void AudioVector::Reserve(size_t n) {
return;
const size_t length = Size();
// Reserve one more sample to remove the ambiguity between empty vector and
// full vector. Therefore |begin_index_| == |end_index_| indicates empty
// vector, and |begin_index_| == (|end_index_| + 1) % capacity indicates
// full vector. Therefore `begin_index_` == `end_index_` indicates empty
// vector, and `begin_index_` == (`end_index_` + 1) % capacity indicates
// full vector.
std::unique_ptr<int16_t[]> temp_array(new int16_t[n + 1]);
CopyTo(length, 0, temp_array.get());

View File

@ -34,27 +34,27 @@ class AudioVector {
// Deletes all values and make the vector empty.
virtual void Clear();
// Copies all values from this vector to |copy_to|. Any contents in |copy_to|
// Copies all values from this vector to `copy_to`. Any contents in `copy_to`
// are deleted before the copy operation. After the operation is done,
// |copy_to| will be an exact replica of this object.
// `copy_to` will be an exact replica of this object.
virtual void CopyTo(AudioVector* copy_to) const;
// Copies |length| values from |position| in this vector to |copy_to|.
// Copies `length` values from `position` in this vector to `copy_to`.
virtual void CopyTo(size_t length, size_t position, int16_t* copy_to) const;
// Prepends the contents of AudioVector |prepend_this| to this object. The
// length of this object is increased with the length of |prepend_this|.
// Prepends the contents of AudioVector `prepend_this` to this object. The
// length of this object is increased with the length of `prepend_this`.
virtual void PushFront(const AudioVector& prepend_this);
// Same as above, but with an array |prepend_this| with |length| elements as
// Same as above, but with an array `prepend_this` with `length` elements as
// source.
virtual void PushFront(const int16_t* prepend_this, size_t length);
// Same as PushFront but will append to the end of this object.
virtual void PushBack(const AudioVector& append_this);
// Appends a segment of |append_this| to the end of this object. The segment
// starts from |position| and has |length| samples.
// Appends a segment of `append_this` to the end of this object. The segment
// starts from `position` and has `length` samples.
virtual void PushBack(const AudioVector& append_this,
size_t length,
size_t position);
@ -62,47 +62,47 @@ class AudioVector {
// Same as PushFront but will append to the end of this object.
virtual void PushBack(const int16_t* append_this, size_t length);
// Removes |length| elements from the beginning of this object.
// Removes `length` elements from the beginning of this object.
virtual void PopFront(size_t length);
// Removes |length| elements from the end of this object.
// Removes `length` elements from the end of this object.
virtual void PopBack(size_t length);
// Extends this object with |extra_length| elements at the end. The new
// Extends this object with `extra_length` elements at the end. The new
// elements are initialized to zero.
virtual void Extend(size_t extra_length);
// Inserts |length| elements taken from the array |insert_this| and insert
// them at |position|. The length of the AudioVector is increased by |length|.
// |position| = 0 means that the new values are prepended to the vector.
// |position| = Size() means that the new values are appended to the vector.
// Inserts `length` elements taken from the array `insert_this` and insert
// them at `position`. The length of the AudioVector is increased by `length`.
// `position` = 0 means that the new values are prepended to the vector.
// `position` = Size() means that the new values are appended to the vector.
virtual void InsertAt(const int16_t* insert_this,
size_t length,
size_t position);
// Like InsertAt, but inserts |length| zero elements at |position|.
// Like InsertAt, but inserts `length` zero elements at `position`.
virtual void InsertZerosAt(size_t length, size_t position);
// Overwrites |length| elements of this AudioVector starting from |position|
// with first values in |AudioVector|. The definition of |position|
// is the same as for InsertAt(). If |length| and |position| are selected
// Overwrites `length` elements of this AudioVector starting from `position`
// with first values in `AudioVector`. The definition of `position`
// is the same as for InsertAt(). If `length` and `position` are selected
// such that the new data extends beyond the end of the current AudioVector,
// the vector is extended to accommodate the new data.
virtual void OverwriteAt(const AudioVector& insert_this,
size_t length,
size_t position);
// Overwrites |length| elements of this AudioVector with values taken from the
// array |insert_this|, starting at |position|. The definition of |position|
// is the same as for InsertAt(). If |length| and |position| are selected
// Overwrites `length` elements of this AudioVector with values taken from the
// array `insert_this`, starting at `position`. The definition of `position`
// is the same as for InsertAt(). If `length` and `position` are selected
// such that the new data extends beyond the end of the current AudioVector,
// the vector is extended to accommodate the new data.
virtual void OverwriteAt(const int16_t* insert_this,
size_t length,
size_t position);
// Appends |append_this| to the end of the current vector. Lets the two
// vectors overlap by |fade_length| samples, and cross-fade linearly in this
// Appends `append_this` to the end of the current vector. Lets the two
// vectors overlap by `fade_length` samples, and cross-fade linearly in this
// region.
virtual void CrossFade(const AudioVector& append_this, size_t fade_length);
@ -158,11 +158,11 @@ class AudioVector {
size_t capacity_; // Allocated number of samples in the array.
// The index of the first sample in |array_|, except when
// The index of the first sample in `array_`, except when
// |begin_index_ == end_index_|, which indicates an empty buffer.
size_t begin_index_;
// The index of the sample after the last sample in |array_|.
// The index of the sample after the last sample in `array_`.
size_t end_index_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioVector);

View File

@ -62,7 +62,7 @@ TEST_F(AudioVectorTest, PushBackAndCopy) {
AudioVector vec;
AudioVector vec_copy;
vec.PushBack(array_, array_length());
vec.CopyTo(&vec_copy); // Copy from |vec| to |vec_copy|.
vec.CopyTo(&vec_copy); // Copy from `vec` to `vec_copy`.
ASSERT_EQ(array_length(), vec.Size());
ASSERT_EQ(array_length(), vec_copy.Size());
for (size_t i = 0; i < array_length(); ++i) {
@ -70,7 +70,7 @@ TEST_F(AudioVectorTest, PushBackAndCopy) {
EXPECT_EQ(array_[i], vec_copy[i]);
}
// Clear |vec| and verify that it is empty.
// Clear `vec` and verify that it is empty.
vec.Clear();
EXPECT_TRUE(vec.Empty());
@ -178,8 +178,8 @@ TEST_F(AudioVectorTest, InsertAt) {
int insert_position = 5;
vec.InsertAt(new_array, kNewLength, insert_position);
// Verify that the vector looks as follows:
// {0, 1, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
// |insert_position|, |insert_position| + 1, ..., kLength - 1}.
// {0, 1, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
// `insert_position`, `insert_position` + 1, ..., kLength - 1}.
size_t pos = 0;
for (int i = 0; i < insert_position; ++i) {
EXPECT_EQ(array_[i], vec[pos]);
@ -309,8 +309,8 @@ TEST_F(AudioVectorTest, OverwriteAt) {
size_t insert_position = 2;
vec.OverwriteAt(new_array, kNewLength, insert_position);
// Verify that the vector looks as follows:
// {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
// |insert_position|, |insert_position| + 1, ..., kLength - 1}.
// {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
// `insert_position`, `insert_position` + 1, ..., kLength - 1}.
size_t pos = 0;
for (pos = 0; pos < insert_position; ++pos) {
EXPECT_EQ(array_[pos], vec[pos]);
@ -340,8 +340,8 @@ TEST_F(AudioVectorTest, OverwriteBeyondEnd) {
vec.OverwriteAt(new_array, kNewLength, insert_position);
ASSERT_EQ(array_length() - 2u + kNewLength, vec.Size());
// Verify that the vector looks as follows:
// {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
// |insert_position|, |insert_position| + 1, ..., kLength - 1}.
// {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
// `insert_position`, `insert_position` + 1, ..., kLength - 1}.
int pos = 0;
for (pos = 0; pos < insert_position; ++pos) {
EXPECT_EQ(array_[pos], vec[pos]);
@ -350,7 +350,7 @@ TEST_F(AudioVectorTest, OverwriteBeyondEnd) {
EXPECT_EQ(new_array[i], vec[pos]);
++pos;
}
// Verify that we checked to the end of |vec|.
// Verify that we checked to the end of `vec`.
EXPECT_EQ(vec.Size(), static_cast<size_t>(pos));
}
@ -359,7 +359,7 @@ TEST_F(AudioVectorTest, CrossFade) {
static const size_t kFadeLength = 10;
AudioVector vec1(kLength);
AudioVector vec2(kLength);
// Set all vector elements to 0 in |vec1| and 100 in |vec2|.
// Set all vector elements to 0 in `vec1` and 100 in `vec2`.
for (size_t i = 0; i < kLength; ++i) {
vec1[i] = 0;
vec2[i] = 100;

View File

@ -108,8 +108,8 @@ bool BackgroundNoise::Update(const AudioMultiVector& input,
if ((sample_energy > 0) &&
(int64_t{5} * residual_energy >= int64_t{16} * sample_energy)) {
// Spectrum is flat enough; save filter parameters.
// |temp_signal| + |kVecLen| - |kMaxLpcOrder| points at the first of the
// |kMaxLpcOrder| samples in the residual signal, which will form the
// `temp_signal` + `kVecLen` - `kMaxLpcOrder` points at the first of the
// `kMaxLpcOrder` samples in the residual signal, which will form the
// filter state for the next noise generation.
SaveParameters(channel_ix, lpc_coefficients,
temp_signal + kVecLen - kMaxLpcOrder, sample_energy,
@ -117,7 +117,7 @@ bool BackgroundNoise::Update(const AudioMultiVector& input,
filter_params_saved = true;
}
} else {
// Will only happen if post-decode VAD is disabled and |sample_energy| is
// Will only happen if post-decode VAD is disabled and `sample_energy` is
// not low enough. Increase the threshold for update so that it increases
// by a factor 4 in 4 seconds.
IncrementEnergyThreshold(channel_ix, sample_energy);
@ -264,8 +264,8 @@ void BackgroundNoise::IncrementEnergyThreshold(size_t channel,
parameters.max_energy = sample_energy;
}
// Set |energy_update_threshold| to no less than 60 dB lower than
// |max_energy_|. Adding 524288 assures proper rounding.
// Set `energy_update_threshold` to no less than 60 dB lower than
// `max_energy_`. Adding 524288 assures proper rounding.
int32_t energy_update_threshold = (parameters.max_energy + 524288) >> 20;
if (energy_update_threshold > parameters.energy_update_threshold) {
parameters.energy_update_threshold = energy_update_threshold;
@ -297,9 +297,9 @@ void BackgroundNoise::SaveParameters(size_t channel,
// Calculate scale and shift factor.
parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
// Add 13 to the |scale_shift_|, since the random numbers table is in
// Add 13 to the `scale_shift_`, since the random numbers table is in
// Q13.
// TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
// TODO(hlundin): Move the "13" to where the `scale_shift_` is used?
parameters.scale_shift =
static_cast<int16_t>(13 + ((kLogResidualLength + norm_shift) / 2));

View File

@ -37,12 +37,12 @@ class BackgroundNoise {
void Reset();
// Updates the parameter estimates based on the signal currently in the
// |sync_buffer|, and on the latest decision in |vad| if it is running.
// `sync_buffer`, and on the latest decision in `vad` if it is running.
// Returns true if the filter parameters are updated.
bool Update(const AudioMultiVector& sync_buffer, const PostDecodeVad& vad);
// Generates background noise given a random vector and writes the output to
// |buffer|.
// `buffer`.
void GenerateBackgroundNoise(rtc::ArrayView<const int16_t> random_vector,
size_t channel,
int mute_slope,
@ -50,29 +50,29 @@ class BackgroundNoise {
size_t num_noise_samples,
int16_t* buffer);
// Returns |energy_| for |channel|.
// Returns `energy_` for `channel`.
int32_t Energy(size_t channel) const;
// Sets the value of |mute_factor_| for |channel| to |value|.
// Sets the value of `mute_factor_` for `channel` to `value`.
void SetMuteFactor(size_t channel, int16_t value);
// Returns |mute_factor_| for |channel|.
// Returns `mute_factor_` for `channel`.
int16_t MuteFactor(size_t channel) const;
// Returns a pointer to |filter_| for |channel|.
// Returns a pointer to `filter_` for `channel`.
const int16_t* Filter(size_t channel) const;
// Returns a pointer to |filter_state_| for |channel|.
// Returns a pointer to `filter_state_` for `channel`.
const int16_t* FilterState(size_t channel) const;
// Copies |input| to the filter state. Will not copy more than |kMaxLpcOrder|
// Copies `input` to the filter state. Will not copy more than `kMaxLpcOrder`
// elements.
void SetFilterState(size_t channel, rtc::ArrayView<const int16_t> input);
// Returns |scale_| for |channel|.
// Returns `scale_` for `channel`.
int16_t Scale(size_t channel) const;
// Returns |scale_shift_| for |channel|.
// Returns `scale_shift_` for `channel`.
int16_t ScaleShift(size_t channel) const;
// Accessors.
@ -117,7 +117,7 @@ class BackgroundNoise {
size_t length,
int32_t* auto_correlation) const;
// Increments the energy threshold by a factor 1 + |kThresholdIncrement|.
// Increments the energy threshold by a factor 1 + `kThresholdIncrement`.
void IncrementEnergyThreshold(size_t channel, int32_t sample_energy);
// Updates the filter parameters.

View File

@ -30,10 +30,10 @@ void BufferLevelFilter::Reset() {
void BufferLevelFilter::Update(size_t buffer_size_samples,
int time_stretched_samples) {
// Filter:
// |filtered_current_level_| = |level_factor_| * |filtered_current_level_| +
// (1 - |level_factor_|) * |buffer_size_samples|
// |level_factor_| and |filtered_current_level_| are in Q8.
// |buffer_size_samples| is in Q0.
// `filtered_current_level_` = `level_factor_` * `filtered_current_level_` +
// (1 - `level_factor_`) * `buffer_size_samples`
// `level_factor_` and `filtered_current_level_` are in Q8.
// `buffer_size_samples` is in Q0.
const int64_t filtered_current_level =
(level_factor_ * int64_t{filtered_current_level_} >> 8) +
(256 - level_factor_) * rtc::dchecked_cast<int64_t>(buffer_size_samples);

View File

@ -24,8 +24,8 @@ class BufferLevelFilter {
virtual ~BufferLevelFilter() {}
virtual void Reset();
// Updates the filter. Current buffer size is |buffer_size_samples|.
// |time_stretched_samples| is subtracted from the filtered value (thus
// Updates the filter. Current buffer size is `buffer_size_samples`.
// `time_stretched_samples` is subtracted from the filtered value (thus
// bypassing the filter operation).
virtual void Update(size_t buffer_size_samples, int time_stretched_samples);

View File

@ -38,7 +38,7 @@ TEST(BufferLevelFilter, ConvergenceTest) {
filter.Update(value, 0 /* time_stretched_samples */);
}
// Expect the filtered value to be (theoretically)
// (1 - (251/256) ^ |times|) * |value|.
// (1 - (251/256) ^ `times`) * `value`.
double expected_value_double = (1 - pow(251.0 / 256.0, times)) * value;
int expected_value = static_cast<int>(expected_value_double);
@ -62,7 +62,7 @@ TEST(BufferLevelFilter, FilterFactor) {
filter.Update(kValue, 0 /* time_stretched_samples */);
}
// Expect the filtered value to be
// (1 - (252/256) ^ |kTimes|) * |kValue|.
// (1 - (252/256) ^ `kTimes`) * `kValue`.
int expected_value = 15;
EXPECT_EQ(expected_value, filter.filtered_current_level());
@ -72,7 +72,7 @@ TEST(BufferLevelFilter, FilterFactor) {
filter.Update(kValue, 0 /* time_stretched_samples */);
}
// Expect the filtered value to be
// (1 - (253/256) ^ |kTimes|) * |kValue|.
// (1 - (253/256) ^ `kTimes`) * `kValue`.
expected_value = 11;
EXPECT_EQ(expected_value, filter.filtered_current_level());
@ -82,7 +82,7 @@ TEST(BufferLevelFilter, FilterFactor) {
filter.Update(kValue, 0 /* time_stretched_samples */);
}
// Expect the filtered value to be
// (1 - (254/256) ^ |kTimes|) * |kValue|.
// (1 - (254/256) ^ `kTimes`) * `kValue`.
expected_value = 8;
EXPECT_EQ(expected_value, filter.filtered_current_level());
}
@ -98,13 +98,13 @@ TEST(BufferLevelFilter, TimeStretchedSamples) {
filter.Update(kValue, 0);
}
// Expect the filtered value to be
// (1 - (251/256) ^ |kTimes|) * |kValue|.
// (1 - (251/256) ^ `kTimes`) * `kValue`.
const int kExpectedValue = 18;
EXPECT_EQ(kExpectedValue, filter.filtered_current_level());
// Update filter again, now with non-zero value for packet length.
// Set the current filtered value to be the input, in order to isolate the
// impact of |kTimeStretchedSamples|.
// impact of `kTimeStretchedSamples`.
filter.Update(filter.filtered_current_level(), kTimeStretchedSamples);
EXPECT_EQ(kExpectedValue - kTimeStretchedSamples,
filter.filtered_current_level());

View File

@ -119,8 +119,8 @@ int ComfortNoise::Generate(size_t requested_length, AudioMultiVector* output) {
muting_window += muting_window_increment;
unmuting_window += unmuting_window_increment;
}
// Remove |overlap_length_| samples from the front of |output| since they
// were mixed into |sync_buffer_| above.
// Remove `overlap_length_` samples from the front of `output` since they
// were mixed into `sync_buffer_` above.
output->PopFront(overlap_length_);
}
first_call_ = false;

View File

@ -45,11 +45,11 @@ class ComfortNoise {
// Resets the state. Should be called before each new comfort noise period.
void Reset();
// Update the comfort noise generator with the parameters in |packet|.
// Update the comfort noise generator with the parameters in `packet`.
int UpdateParameters(const Packet& packet);
// Generates |requested_length| samples of comfort noise and writes to
// |output|. If this is the first in call after Reset (or first after creating
// Generates `requested_length` samples of comfort noise and writes to
// `output`. If this is the first in call after Reset (or first after creating
// the object), it will also mix in comfort noise at the end of the
// SyncBuffer object provided in the constructor.
int Generate(size_t requested_length, AudioMultiVector* output);

View File

@ -23,7 +23,7 @@ TEST(ComfortNoise, CreateAndDestroy) {
MockDecoderDatabase db;
SyncBuffer sync_buffer(1, 1000);
ComfortNoise cn(fs, &db, &sync_buffer);
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
}
// TODO(hlundin): Write more tests.

View File

@ -17,19 +17,19 @@
namespace webrtc {
// The function calculates the cross-correlation between two sequences
// |sequence_1| and |sequence_2|. |sequence_1| is taken as reference, with
// |sequence_1_length| as its length. |sequence_2| slides for the calculation of
// cross-correlation. The result will be saved in |cross_correlation|.
// |cross_correlation_length| correlation points are calculated.
// `sequence_1` and `sequence_2`. `sequence_1` is taken as reference, with
// `sequence_1_length` as its length. `sequence_2` slides for the calculation of
// cross-correlation. The result will be saved in `cross_correlation`.
// `cross_correlation_length` correlation points are calculated.
// The corresponding lag starts from 0, and increases with a step of
// |cross_correlation_step|. The result is without normalization. To avoid
// `cross_correlation_step`. The result is without normalization. To avoid
// overflow, the result will be right shifted. The amount of shifts will be
// returned.
//
// Input:
// - sequence_1 : First sequence (reference).
// - sequence_2 : Second sequence (sliding during calculation).
// - sequence_1_length : Length of |sequence_1|.
// - sequence_1_length : Length of `sequence_1`.
// - cross_correlation_length : Number of cross-correlations to calculate.
// - cross_correlation_step : Step in the lag for the cross-correlation.
//

View File

@ -309,8 +309,8 @@ NetEq::Operation DecisionLogic::ExpectedPacketAvailable(NetEq::Mode prev_mode,
std::max(target_level_samples * 3 / 4,
target_level_samples -
kDecelerationTargetLevelOffsetMs * samples_per_ms);
// |higher_limit| is equal to |target_level|, but should at
// least be 20 ms higher than |lower_limit|.
// `higher_limit` is equal to `target_level`, but should at
// least be 20 ms higher than `lower_limit`.
const int high_limit =
std::max(target_level_samples, low_limit + 20 * samples_per_ms);

View File

@ -47,23 +47,23 @@ class DecisionLogic : public NetEqController {
void SetSampleRate(int fs_hz, size_t output_size_samples) override;
// Given info about the latest received packet, and current jitter buffer
// status, returns the operation. |target_timestamp| and |expand_mutefactor|
// are provided for reference. |last_packet_samples| is the number of samples
// status, returns the operation. `target_timestamp` and `expand_mutefactor`
// are provided for reference. `last_packet_samples` is the number of samples
// obtained from the last decoded frame. If there is a packet available, it
// should be supplied in |packet|; otherwise it should be NULL. The mode
// should be supplied in `packet`; otherwise it should be NULL. The mode
// resulting from the last call to NetEqImpl::GetAudio is supplied in
// |last_mode|. If there is a DTMF event to play, |play_dtmf| should be set to
// true. The output variable |reset_decoder| will be set to true if a reset is
// `last_mode`. If there is a DTMF event to play, `play_dtmf` should be set to
// true. The output variable `reset_decoder` will be set to true if a reset is
// required; otherwise it is left unchanged (i.e., it can remain true if it
// was true before the call).
NetEq::Operation GetDecision(const NetEqController::NetEqStatus& status,
bool* reset_decoder) override;
// These methods test the |cng_state_| for different conditions.
// These methods test the `cng_state_` for different conditions.
bool CngRfc3389On() const override { return cng_state_ == kCngRfc3389On; }
bool CngOff() const override { return cng_state_ == kCngOff; }
// Resets the |cng_state_| to kCngOff.
// Resets the `cng_state_` to kCngOff.
void SetCngOff() override { cng_state_ = kCngOff; }
// Reports back to DecisionLogic whether the decision to do expand remains or
@ -72,7 +72,7 @@ class DecisionLogic : public NetEqController {
// sync buffer.
void ExpandDecision(NetEq::Operation operation) override;
// Adds |value| to |sample_memory_|.
// Adds `value` to `sample_memory_`.
void AddSampleMemory(int32_t value) override { sample_memory_ += value; }
int TargetLevelMs() const override { return delay_manager_->TargetDelayMs(); }
@ -120,8 +120,8 @@ class DecisionLogic : public NetEqController {
enum CngState { kCngOff, kCngRfc3389On, kCngInternalOn };
// Updates the |buffer_level_filter_| with the current buffer level
// |buffer_size_samples|.
// Updates the `buffer_level_filter_` with the current buffer level
// `buffer_size_samples`.
void FilterBufferLevel(size_t buffer_size_samples);
// Returns the operation given that the next available packet is a comfort
@ -132,7 +132,7 @@ class DecisionLogic : public NetEqController {
size_t generated_noise_samples);
// Returns the operation given that no packets are available (except maybe
// a DTMF event, flagged by setting |play_dtmf| true).
// a DTMF event, flagged by setting `play_dtmf` true).
virtual NetEq::Operation NoPacket(bool play_dtmf);
// Returns the operation to do given that the expected packet is available.
@ -160,13 +160,13 @@ class DecisionLogic : public NetEqController {
// Checks if the current (filtered) buffer level is under the target level.
bool UnderTargetLevel() const;
// Checks if |timestamp_leap| is so long into the future that a reset due
// Checks if `timestamp_leap` is so long into the future that a reset due
// to exceeding kReinitAfterExpands will be done.
bool ReinitAfterExpands(uint32_t timestamp_leap) const;
// Checks if we still have not done enough expands to cover the distance from
// the last decoded packet to the next available packet, the distance beeing
// conveyed in |timestamp_leap|.
// conveyed in `timestamp_leap`.
bool PacketTooEarly(uint32_t timestamp_leap) const;
// Checks if num_consecutive_expands_ >= kMaxWaitForPacket.

View File

@ -161,7 +161,7 @@ int DecoderDatabase::RegisterPayload(int rtp_payload_type,
rtp_payload_type,
DecoderInfo(audio_format, codec_pair_id_, decoder_factory_.get())));
if (ret.second == false) {
// Database already contains a decoder with type |rtp_payload_type|.
// Database already contains a decoder with type `rtp_payload_type`.
return kDecoderExists;
}
return kOK;
@ -169,7 +169,7 @@ int DecoderDatabase::RegisterPayload(int rtp_payload_type,
int DecoderDatabase::Remove(uint8_t rtp_payload_type) {
if (decoders_.erase(rtp_payload_type) == 0) {
// No decoder with that |rtp_payload_type|.
// No decoder with that `rtp_payload_type`.
return kDecoderNotFound;
}
if (active_decoder_type_ == rtp_payload_type) {
@ -199,7 +199,7 @@ const DecoderDatabase::DecoderInfo* DecoderDatabase::GetDecoderInfo(
int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type,
bool* new_decoder) {
// Check that |rtp_payload_type| exists in the database.
// Check that `rtp_payload_type` exists in the database.
const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
if (!info) {
// Decoder not found.
@ -231,7 +231,7 @@ AudioDecoder* DecoderDatabase::GetActiveDecoder() const {
}
int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
// Check that |rtp_payload_type| exists in the database.
// Check that `rtp_payload_type` exists in the database.
const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
if (!info) {
// Decoder not found.

View File

@ -80,15 +80,15 @@ class DecoderDatabase {
// Returns true if the decoder's format is RED.
bool IsRed() const { return subtype_ == Subtype::kRed; }
// Returns true if the decoder's format is named |name|.
// Returns true if the decoder's format is named `name`.
bool IsType(const char* name) const;
// Returns true if the decoder's format is named |name|.
// Returns true if the decoder's format is named `name`.
bool IsType(const std::string& name) const;
const std::string& get_name() const { return name_; }
private:
// TODO(ossu): |name_| is kept here while we retain the old external
// TODO(ossu): `name_` is kept here while we retain the old external
// decoder interface. Remove this once using an
// AudioDecoderFactory has supplanted the old functionality.
const std::string name_;
@ -143,26 +143,26 @@ class DecoderDatabase {
virtual int RegisterPayload(int rtp_payload_type,
const SdpAudioFormat& audio_format);
// Removes the entry for |rtp_payload_type| from the database.
// Removes the entry for `rtp_payload_type` from the database.
// Returns kDecoderNotFound or kOK depending on the outcome of the operation.
virtual int Remove(uint8_t rtp_payload_type);
// Remove all entries.
virtual void RemoveAll();
// Returns a pointer to the DecoderInfo struct for |rtp_payload_type|. If
// no decoder is registered with that |rtp_payload_type|, NULL is returned.
// Returns a pointer to the DecoderInfo struct for `rtp_payload_type`. If
// no decoder is registered with that `rtp_payload_type`, NULL is returned.
virtual const DecoderInfo* GetDecoderInfo(uint8_t rtp_payload_type) const;
// Sets the active decoder to be |rtp_payload_type|. If this call results in a
// change of active decoder, |new_decoder| is set to true. The previous active
// Sets the active decoder to be `rtp_payload_type`. If this call results in a
// change of active decoder, `new_decoder` is set to true. The previous active
// decoder's AudioDecoder object is deleted.
virtual int SetActiveDecoder(uint8_t rtp_payload_type, bool* new_decoder);
// Returns the current active decoder, or NULL if no active decoder exists.
virtual AudioDecoder* GetActiveDecoder() const;
// Sets the active comfort noise decoder to be |rtp_payload_type|. If this
// Sets the active comfort noise decoder to be `rtp_payload_type`. If this
// call results in a change of active comfort noise decoder, the previous
// active decoder's AudioDecoder object is deleted.
virtual int SetActiveCngDecoder(uint8_t rtp_payload_type);
@ -176,26 +176,26 @@ class DecoderDatabase {
// exists.
// Returns a pointer to the AudioDecoder object associated with
// |rtp_payload_type|, or NULL if none is registered. If the AudioDecoder
// `rtp_payload_type`, or NULL if none is registered. If the AudioDecoder
// object does not exist for that decoder, the object is created.
AudioDecoder* GetDecoder(uint8_t rtp_payload_type) const;
// Returns if |rtp_payload_type| is registered with a format named |name|.
// Returns if `rtp_payload_type` is registered with a format named `name`.
bool IsType(uint8_t rtp_payload_type, const char* name) const;
// Returns if |rtp_payload_type| is registered with a format named |name|.
// Returns if `rtp_payload_type` is registered with a format named `name`.
bool IsType(uint8_t rtp_payload_type, const std::string& name) const;
// Returns true if |rtp_payload_type| is registered as comfort noise.
// Returns true if `rtp_payload_type` is registered as comfort noise.
bool IsComfortNoise(uint8_t rtp_payload_type) const;
// Returns true if |rtp_payload_type| is registered as DTMF.
// Returns true if `rtp_payload_type` is registered as DTMF.
bool IsDtmf(uint8_t rtp_payload_type) const;
// Returns true if |rtp_payload_type| is registered as RED.
// Returns true if `rtp_payload_type` is registered as RED.
bool IsRed(uint8_t rtp_payload_type) const;
// Returns kOK if all packets in |packet_list| carry payload types that are
// Returns kOK if all packets in `packet_list` carry payload types that are
// registered in the database. Otherwise, returns kDecoderNotFound.
int CheckPayloadTypes(const PacketList& packet_list) const;

View File

@ -148,7 +148,7 @@ TEST(DecoderDatabase, CheckPayloadTypes) {
}
PacketList packet_list;
for (int i = 0; i < kNumPayloads + 1; ++i) {
// Create packet with payload type |i|. The last packet will have a payload
// Create packet with payload type `i`. The last packet will have a payload
// type that is not registered in the decoder database.
Packet packet;
packet.payload_type = i;

View File

@ -191,7 +191,7 @@ absl::optional<int> DelayManager::Update(uint32_t timestamp,
}
}
// Calculate new |target_level_ms_| based on updated statistics.
// Calculate new `target_level_ms_` based on updated statistics.
int bucket_index = histogram_->Quantile(histogram_quantile_);
target_level_ms_ = (1 + bucket_index) * kBucketSizeMs;
target_level_ms_ = std::max(target_level_ms_, effective_minimum_delay_ms_);
@ -293,7 +293,7 @@ bool DelayManager::SetMinimumDelay(int delay_ms) {
}
bool DelayManager::SetMaximumDelay(int delay_ms) {
// If |delay_ms| is zero then it unsets the maximum delay and target level is
// If `delay_ms` is zero then it unsets the maximum delay and target level is
// unconstrained by maximum delay.
if (delay_ms != 0 &&
(delay_ms < minimum_delay_ms_ || delay_ms < packet_len_ms_)) {
@ -321,7 +321,7 @@ int DelayManager::GetBaseMinimumDelay() const {
}
void DelayManager::UpdateEffectiveMinimumDelay() {
// Clamp |base_minimum_delay_ms_| into the range which can be effectively
// Clamp `base_minimum_delay_ms_` into the range which can be effectively
// used.
const int base_minimum_delay_ms =
rtc::SafeClamp(base_minimum_delay_ms_, 0, MinimumDelayUpperBound());

View File

@ -34,9 +34,9 @@ class DelayManager {
std::unique_ptr<Histogram> histogram);
// Create a DelayManager object. Notify the delay manager that the packet
// buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
// buffer can hold no more than `max_packets_in_buffer` packets (i.e., this
// is the number of packet slots in the buffer) and that the target delay
// should be greater than or equal to |base_minimum_delay_ms|. Supply a
// should be greater than or equal to `base_minimum_delay_ms`. Supply a
// PeakDetector object to the DelayManager.
static std::unique_ptr<DelayManager> Create(int max_packets_in_buffer,
int base_minimum_delay_ms,
@ -44,10 +44,10 @@ class DelayManager {
virtual ~DelayManager();
// Updates the delay manager with a new incoming packet, with |timestamp| from
// Updates the delay manager with a new incoming packet, with `timestamp` from
// the RTP header. This updates the statistics and a new target buffer level
// is calculated. Returns the relative delay if it can be calculated. If
// |reset| is true, restarts the relative arrival delay calculation from this
// `reset` is true, restarts the relative arrival delay calculation from this
// packet.
virtual absl::optional<int> Update(uint32_t timestamp,
int sample_rate_hz,
@ -63,7 +63,7 @@ class DelayManager {
virtual int SetPacketAudioLength(int length_ms);
// Accessors and mutators.
// Assuming |delay| is in valid range.
// Assuming `delay` is in valid range.
virtual bool SetMinimumDelay(int delay_ms);
virtual bool SetMaximumDelay(int delay_ms);
virtual bool SetBaseMinimumDelay(int delay_ms);
@ -78,25 +78,25 @@ class DelayManager {
private:
// Provides value which minimum delay can't exceed based on current buffer
// size and given |maximum_delay_ms_|. Lower bound is a constant 0.
// size and given `maximum_delay_ms_`. Lower bound is a constant 0.
int MinimumDelayUpperBound() const;
// Updates |delay_history_|.
// Updates `delay_history_`.
void UpdateDelayHistory(int iat_delay_ms,
uint32_t timestamp,
int sample_rate_hz);
// Calculate relative packet arrival delay from |delay_history_|.
// Calculate relative packet arrival delay from `delay_history_`.
int CalculateRelativePacketArrivalDelay() const;
// Updates |effective_minimum_delay_ms_| delay based on current
// |minimum_delay_ms_|, |base_minimum_delay_ms_| and |maximum_delay_ms_|
// Updates `effective_minimum_delay_ms_` delay based on current
// `minimum_delay_ms_`, `base_minimum_delay_ms_` and `maximum_delay_ms_`
// and buffer size.
void UpdateEffectiveMinimumDelay();
// Makes sure that |delay_ms| is less than maximum delay, if any maximum
// is set. Also, if possible check |delay_ms| to be less than 75% of
// |max_packets_in_buffer_|.
// Makes sure that `delay_ms` is less than maximum delay, if any maximum
// is set. Also, if possible check `delay_ms` to be less than 75% of
// `max_packets_in_buffer_`.
bool IsValidMinimumDelay(int delay_ms) const;
bool IsValidBaseMinimumDelay(int delay_ms) const;

View File

@ -94,7 +94,7 @@ int DspHelper::RampSignal(AudioMultiVector* signal,
return factor;
}
int end_factor = 0;
// Loop over the channels, starting at the same |factor| each time.
// Loop over the channels, starting at the same `factor` each time.
for (size_t channel = 0; channel < signal->Channels(); ++channel) {
end_factor =
RampSignal(&(*signal)[channel], start_index, length, factor, increment);
@ -116,7 +116,7 @@ void DspHelper::PeakDetection(int16_t* data,
// Single peak. The parabola fit assumes that an extra point is
// available; worst case it gets a zero on the high end of the signal.
// TODO(hlundin): This can potentially get much worse. It breaks the
// API contract, that the length of |data| is |data_length|.
// API contract, that the length of `data` is `data_length`.
data_length++;
}

View File

@ -51,8 +51,8 @@ class DspHelper {
static const int kUnmuteFactorIncrement48kHz = 1057;
// Multiplies the signal with a gradually changing factor.
// The first sample is multiplied with |factor| (in Q14). For each sample,
// |factor| is increased (additive) by the |increment| (in Q20), which can
// The first sample is multiplied with `factor` (in Q14). For each sample,
// `factor` is increased (additive) by the `increment` (in Q20), which can
// be negative. Returns the scale factor after the last increment.
static int RampSignal(const int16_t* input,
size_t length,
@ -60,14 +60,14 @@ class DspHelper {
int increment,
int16_t* output);
// Same as above, but with the samples of |signal| being modified in-place.
// Same as above, but with the samples of `signal` being modified in-place.
static int RampSignal(int16_t* signal,
size_t length,
int factor,
int increment);
// Same as above, but processes |length| samples from |signal|, starting at
// |start_index|.
// Same as above, but processes `length` samples from `signal`, starting at
// `start_index`.
static int RampSignal(AudioVector* signal,
size_t start_index,
size_t length,
@ -81,10 +81,10 @@ class DspHelper {
int factor,
int increment);
// Peak detection with parabolic fit. Looks for |num_peaks| maxima in |data|,
// having length |data_length| and sample rate multiplier |fs_mult|. The peak
// locations and values are written to the arrays |peak_index| and
// |peak_value|, respectively. Both arrays must hold at least |num_peaks|
// Peak detection with parabolic fit. Looks for `num_peaks` maxima in `data`,
// having length `data_length` and sample rate multiplier `fs_mult`. The peak
// locations and values are written to the arrays `peak_index` and
// `peak_value`, respectively. Both arrays must hold at least `num_peaks`
// elements.
static void PeakDetection(int16_t* data,
size_t data_length,
@ -94,30 +94,30 @@ class DspHelper {
int16_t* peak_value);
// Estimates the height and location of a maximum. The three values in the
// array |signal_points| are used as basis for a parabolic fit, which is then
// used to find the maximum in an interpolated signal. The |signal_points| are
// array `signal_points` are used as basis for a parabolic fit, which is then
// used to find the maximum in an interpolated signal. The `signal_points` are
// assumed to be from a 4 kHz signal, while the maximum, written to
// |peak_index| and |peak_value| is given in the full sample rate, as
// indicated by the sample rate multiplier |fs_mult|.
// `peak_index` and `peak_value` is given in the full sample rate, as
// indicated by the sample rate multiplier `fs_mult`.
static void ParabolicFit(int16_t* signal_points,
int fs_mult,
size_t* peak_index,
int16_t* peak_value);
// Calculates the sum-abs-diff for |signal| when compared to a displaced
// Calculates the sum-abs-diff for `signal` when compared to a displaced
// version of itself. Returns the displacement lag that results in the minimum
// distortion. The resulting distortion is written to |distortion_value|.
// The values of |min_lag| and |max_lag| are boundaries for the search.
// distortion. The resulting distortion is written to `distortion_value`.
// The values of `min_lag` and `max_lag` are boundaries for the search.
static size_t MinDistortion(const int16_t* signal,
size_t min_lag,
size_t max_lag,
size_t length,
int32_t* distortion_value);
// Mixes |length| samples from |input1| and |input2| together and writes the
// result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
// is decreased by |factor_decrement| (Q14) for each sample. The gain for
// |input2| is the complement 16384 - mix_factor.
// Mixes `length` samples from `input1` and `input2` together and writes the
// result to `output`. The gain for `input1` starts at `mix_factor` (Q14) and
// is decreased by `factor_decrement` (Q14) for each sample. The gain for
// `input2` is the complement 16384 - mix_factor.
static void CrossFade(const int16_t* input1,
const int16_t* input2,
size_t length,
@ -125,24 +125,24 @@ class DspHelper {
int16_t factor_decrement,
int16_t* output);
// Scales |input| with an increasing gain. Applies |factor| (Q14) to the first
// sample and increases the gain by |increment| (Q20) for each sample. The
// result is written to |output|. |length| samples are processed.
// Scales `input` with an increasing gain. Applies `factor` (Q14) to the first
// sample and increases the gain by `increment` (Q20) for each sample. The
// result is written to `output`. `length` samples are processed.
static void UnmuteSignal(const int16_t* input,
size_t length,
int16_t* factor,
int increment,
int16_t* output);
// Starts at unity gain and gradually fades out |signal|. For each sample,
// the gain is reduced by |mute_slope| (Q14). |length| samples are processed.
// Starts at unity gain and gradually fades out `signal`. For each sample,
// the gain is reduced by `mute_slope` (Q14). `length` samples are processed.
static void MuteSignal(int16_t* signal, int mute_slope, size_t length);
// Downsamples |input| from |sample_rate_hz| to 4 kHz sample rate. The input
// has |input_length| samples, and the method will write |output_length|
// samples to |output|. Compensates for the phase delay of the downsampling
// filters if |compensate_delay| is true. Returns -1 if the input is too short
// to produce |output_length| samples, otherwise 0.
// Downsamples `input` from `sample_rate_hz` to 4 kHz sample rate. The input
// has `input_length` samples, and the method will write `output_length`
// samples to `output`. Compensates for the phase delay of the downsampling
// filters if `compensate_delay` is true. Returns -1 if the input is too short
// to produce `output_length` samples, otherwise 0.
static int DownsampleTo4kHz(const int16_t* input,
size_t input_length,
size_t output_length,

View File

@ -24,7 +24,7 @@ TEST(DspHelper, RampSignalArray) {
input[i] = 1000;
}
int start_factor = 0;
// Ramp from 0 to 1 (in Q14) over the array. Note that |increment| is in Q20,
// Ramp from 0 to 1 (in Q14) over the array. Note that `increment` is in Q20,
// while the factor is in Q14, hence the shift by 6.
int increment = (16384 << 6) / kLen;
@ -36,7 +36,7 @@ TEST(DspHelper, RampSignalArray) {
EXPECT_EQ(1000 * i / kLen, output[i]);
}
// Test second method. (Note that this modifies |input|.)
// Test second method. (Note that this modifies `input`.)
stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment);
EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
for (int i = 0; i < kLen; ++i) {
@ -54,31 +54,31 @@ TEST(DspHelper, RampSignalAudioMultiVector) {
input[channel][i] = 1000;
}
}
// We want to start ramping at |start_index| and keep ramping for |kLen|
// We want to start ramping at `start_index` and keep ramping for `kLen`
// samples.
int start_index = kLen;
int start_factor = 0;
// Ramp from 0 to 1 (in Q14) in |kLen| samples. Note that |increment| is in
// Ramp from 0 to 1 (in Q14) in `kLen` samples. Note that `increment` is in
// Q20, while the factor is in Q14, hence the shift by 6.
int increment = (16384 << 6) / kLen;
int stop_factor =
DspHelper::RampSignal(&input, start_index, kLen, start_factor, increment);
EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
// Verify that the first |kLen| samples are left untouched.
// Verify that the first `kLen` samples are left untouched.
int i;
for (i = 0; i < kLen; ++i) {
for (int channel = 0; channel < kChannels; ++channel) {
EXPECT_EQ(1000, input[channel][i]);
}
}
// Verify that the next block of |kLen| samples are ramped.
// Verify that the next block of `kLen` samples are ramped.
for (; i < 2 * kLen; ++i) {
for (int channel = 0; channel < kChannels; ++channel) {
EXPECT_EQ(1000 * (i - kLen) / kLen, input[channel][i]);
}
}
// Verify the last |kLen| samples are left untouched.
// Verify the last `kLen` samples are left untouched.
for (; i < 3 * kLen; ++i) {
for (int channel = 0; channel < kChannels; ++channel) {
EXPECT_EQ(1000, input[channel][i]);

View File

@ -32,7 +32,7 @@ void DtmfBuffer::Flush() {
buffer_.clear();
}
// The ParseEvent method parses 4 bytes from |payload| according to this format
// The ParseEvent method parses 4 bytes from `payload` according to this format
// from RFC 4733:
//
// 0 1 2 3
@ -119,8 +119,8 @@ int DtmfBuffer::InsertEvent(const DtmfEvent& event) {
bool DtmfBuffer::GetEvent(uint32_t current_timestamp, DtmfEvent* event) {
DtmfList::iterator it = buffer_.begin();
while (it != buffer_.end()) {
// |event_end| is an estimate of where the current event ends. If the end
// bit is set, we know that the event ends at |timestamp| + |duration|.
// `event_end` is an estimate of where the current event ends. If the end
// bit is set, we know that the event ends at `timestamp` + `duration`.
uint32_t event_end = it->timestamp + it->duration;
#ifdef LEGACY_BITEXACT
bool next_available = false;
@ -226,7 +226,7 @@ bool DtmfBuffer::MergeEvents(DtmfList::iterator it, const DtmfEvent& event) {
}
}
// Returns true if |a| goes before |b| in the sorting order ("|a| < |b|").
// Returns true if `a` goes before `b` in the sorting order ("`a` < `b`").
// The events are ranked using their start timestamp (taking wrap-around into
// account). In the unlikely situation that two events share the same start
// timestamp, the event number is used to rank the two. Note that packets

View File

@ -45,7 +45,7 @@ class DtmfBuffer {
kInvalidSampleRate
};
// Set up the buffer for use at sample rate |fs_hz|.
// Set up the buffer for use at sample rate `fs_hz`.
explicit DtmfBuffer(int fs_hz);
virtual ~DtmfBuffer();
@ -53,21 +53,21 @@ class DtmfBuffer {
// Flushes the buffer.
virtual void Flush();
// Static method to parse 4 bytes from |payload| as a DTMF event (RFC 4733)
// and write the parsed information into the struct |event|. Input variable
// |rtp_timestamp| is simply copied into the struct.
// Static method to parse 4 bytes from `payload` as a DTMF event (RFC 4733)
// and write the parsed information into the struct `event`. Input variable
// `rtp_timestamp` is simply copied into the struct.
static int ParseEvent(uint32_t rtp_timestamp,
const uint8_t* payload,
size_t payload_length_bytes,
DtmfEvent* event);
// Inserts |event| into the buffer. The method looks for a matching event and
// Inserts `event` into the buffer. The method looks for a matching event and
// merges the two if a match is found.
virtual int InsertEvent(const DtmfEvent& event);
// Checks if a DTMF event should be played at time |current_timestamp|. If so,
// Checks if a DTMF event should be played at time `current_timestamp`. If so,
// the method returns true; otherwise false. The parameters of the event to
// play will be written to |event|.
// play will be written to `event`.
virtual bool GetEvent(uint32_t current_timestamp, DtmfEvent* event);
// Number of events in the buffer.
@ -87,7 +87,7 @@ class DtmfBuffer {
// Compares two events and returns true if they are the same.
static bool SameEvent(const DtmfEvent& a, const DtmfEvent& b);
// Merges |event| to the event pointed out by |it|. The method checks that
// Merges `event` to the event pointed out by `it`. The method checks that
// the two events are the same (using the SameEvent method), and merges them
// if that was the case, returning true. If the events are not the same, false
// is returned.

View File

@ -208,12 +208,12 @@ TEST(DtmfBuffer, ExtrapolationTime) {
DtmfEvent event2(timestamp, event_no, volume, duration, end_bit);
EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
EXPECT_EQ(2u, buffer.Length());
// Now we expect to get the new event when supplying |timestamp_now|.
// Now we expect to get the new event when supplying `timestamp_now`.
EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
EXPECT_TRUE(EqualEvents(event2, out_event));
// Expect the the first event to be erased now.
EXPECT_EQ(1u, buffer.Length());
// Move |timestamp_now| to more than 560 samples after the end of the second
// Move `timestamp_now` to more than 560 samples after the end of the second
// event. Expect that event to be erased.
timestamp_now = timestamp + duration + 600;
#ifdef LEGACY_BITEXACT

View File

@ -167,7 +167,7 @@ void DtmfToneGenerator::Reset() {
initialized_ = false;
}
// Generate num_samples of DTMF signal and write to |output|.
// Generate num_samples of DTMF signal and write to `output`.
int DtmfToneGenerator::Generate(size_t num_samples, AudioMultiVector* output) {
if (!initialized_) {
return kNotInitialized;

View File

@ -167,7 +167,7 @@ int Expand::Process(AudioMultiVector* output) {
}
// Smooth the expanded if it has not been muted to a low amplitude and
// |current_voice_mix_factor| is larger than 0.5.
// `current_voice_mix_factor` is larger than 0.5.
if ((parameters.mute_factor > 819) &&
(parameters.current_voice_mix_factor > 8192)) {
size_t start_ix = sync_buffer_->Size() - overlap_length_;
@ -197,7 +197,7 @@ int Expand::Process(AudioMultiVector* output) {
}
// Unvoiced part.
// Filter |scaled_random_vector| through |ar_filter_|.
// Filter `scaled_random_vector` through `ar_filter_`.
memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state,
sizeof(int16_t) * kUnvoicedLpcOrder);
int32_t add_constant = 0;
@ -402,7 +402,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Calculate correlation in downsampled domain (4 kHz sample rate).
size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
// If it is decided to break bit-exactness |correlation_length| should be
// If it is decided to break bit-exactness `correlation_length` should be
// initialized to the return value of Correlation().
Correlation(audio_history.get(), signal_length, correlation_vector);
@ -417,7 +417,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
best_correlation_index[1] += fs_mult_20;
best_correlation_index[2] += fs_mult_20;
// Calculate distortion around the |kNumCorrelationCandidates| best lags.
// Calculate distortion around the `kNumCorrelationCandidates` best lags.
int distortion_scale = 0;
for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
size_t min_index =
@ -434,7 +434,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates,
best_distortion_w32, distortion_scale);
// Find the maximizing index |i| of the cost function
// Find the maximizing index `i` of the cost function
// f[i] = best_correlation[i] / best_distortion[i].
int32_t best_ratio = std::numeric_limits<int32_t>::min();
size_t best_index = std::numeric_limits<size_t>::max();
@ -458,7 +458,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
max_lag_ = std::max(distortion_lag, correlation_lag);
// Calculate the exact best correlation in the range between
// |correlation_lag| and |distortion_lag|.
// `correlation_lag` and `distortion_lag`.
correlation_length = std::max(std::min(distortion_lag + 10, fs_mult_120),
static_cast<size_t>(60 * fs_mult));
@ -487,7 +487,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
(31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
correlation_scale = std::max(0, correlation_scale);
// Calculate the correlation, store in |correlation_vector2|.
// Calculate the correlation, store in `correlation_vector2`.
WebRtcSpl_CrossCorrelation(
correlation_vector2,
&(audio_history[signal_length - correlation_length]),
@ -537,7 +537,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
}
// Extract the two vectors expand_vector0 and expand_vector1 from
// |audio_history|.
// `audio_history`.
size_t expansion_length = max_lag_ + overlap_length_;
const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
const int16_t* vector2 = vector1 - distortion_lag;
@ -594,13 +594,13 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
expand_lags_[1] = distortion_lag;
expand_lags_[2] = distortion_lag;
} else {
// |distortion_lag| and |correlation_lag| are not equal; use different
// `distortion_lag` and `correlation_lag` are not equal; use different
// combinations of the two.
// First lag is |distortion_lag| only.
// First lag is `distortion_lag` only.
expand_lags_[0] = distortion_lag;
// Second lag is the average of the two.
expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
// Third lag is the average again, but rounding towards |correlation_lag|.
// Third lag is the average again, but rounding towards `correlation_lag`.
if (distortion_lag > correlation_lag) {
expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
} else {
@ -638,7 +638,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
if (stability != 1) {
// Set first coefficient to 4096 (1.0 in Q12).
parameters.ar_filter[0] = 4096;
// Set remaining |kUnvoicedLpcOrder| coefficients to zero.
// Set remaining `kUnvoicedLpcOrder` coefficients to zero.
WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder);
}
}
@ -656,7 +656,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
sizeof(int16_t) * noise_length);
} else {
// Only applies to SWB where length could be larger than
// |kRandomTableSize|.
// `kRandomTableSize`.
memcpy(random_vector, RandomVector::kRandomTable,
sizeof(int16_t) * RandomVector::kRandomTableSize);
RTC_DCHECK_LE(noise_length, kMaxSampleRate / 8000 * 120 + 30);
@ -694,7 +694,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(
unvoiced_vector, unvoiced_vector, 128, unvoiced_prescale);
// Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy.
// Normalize `unvoiced_energy` to 28 or 29 bits to preserve sqrt() accuracy.
int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
// Make sure we do an odd number of shifts since we already have 7 shifts
// from dividing with 128 earlier. This will make the total scale factor
@ -715,7 +715,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// voice_mix_factor = 0;
if (corr_coefficient > 7875) {
int16_t x1, x2, x3;
// |corr_coefficient| is in Q14.
// `corr_coefficient` is in Q14.
x1 = static_cast<int16_t>(corr_coefficient);
x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14.
x3 = (x1 * x2) >> 14;
@ -733,13 +733,13 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
}
// Calculate muting slope. Reuse value from earlier scaling of
// |expand_vector0| and |expand_vector1|.
// `expand_vector0` and `expand_vector1`.
int16_t slope = amplitude_ratio;
if (slope > 12288) {
// slope > 1.5.
// Calculate (1 - (1 / slope)) / distortion_lag =
// (slope - 1) / (distortion_lag * slope).
// |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
// `slope` is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
// the division.
// Shift the denominator from Q13 to Q5 before the division. The result of
// the division will then be in Q20.
@ -757,7 +757,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
parameters.onset = true;
} else {
// Calculate (1 - slope) / distortion_lag.
// Shift |slope| by 7 to Q20 before the division. The result is in Q20.
// Shift `slope` by 7 to Q20 before the division. The result is in Q20.
parameters.mute_slope = WebRtcSpl_DivW32W16(
(8192 - slope) * 128, static_cast<int16_t>(distortion_lag));
if (parameters.voice_mix_factor <= 13107) {
@ -826,7 +826,7 @@ void Expand::Correlation(const int16_t* input,
kDownsampledLength, filter_coefficients, num_coefficients,
downsampling_factor, kFilterDelay);
// Normalize |downsampled_input| to using all 16 bits.
// Normalize `downsampled_input` to using all 16 bits.
int16_t max_value =
WebRtcSpl_MaxAbsValueW16(downsampled_input, kDownsampledLength);
int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);

View File

@ -45,7 +45,7 @@ class Expand {
virtual void Reset();
// The main method to produce concealment data. The data is appended to the
// end of |output|.
// end of `output`.
virtual int Process(AudioMultiVector* output);
// Prepare the object to do extra expansion during normal operation following
@ -56,7 +56,7 @@ class Expand {
// a period of expands.
virtual void SetParametersForMergeAfterExpand();
// Returns the mute factor for |channel|.
// Returns the mute factor for `channel`.
int16_t MuteFactor(size_t channel) const {
RTC_DCHECK_LT(channel, num_channels_);
return channel_parameters_[channel].mute_factor;
@ -81,7 +81,7 @@ class Expand {
bool TooManyExpands();
// Analyzes the signal history in |sync_buffer_|, and set up all parameters
// Analyzes the signal history in `sync_buffer_`, and set up all parameters
// necessary to produce concealment data.
void AnalyzeSignal(int16_t* random_vector);
@ -115,9 +115,9 @@ class Expand {
int mute_slope; /* Q20 */
};
// Calculate the auto-correlation of |input|, with length |input_length|
// Calculate the auto-correlation of `input`, with length `input_length`
// samples. The correlation is calculated from a downsampled version of
// |input|, and is written to |output|.
// `input`, and is written to `output`.
void Correlation(const int16_t* input,
size_t input_length,
int16_t* output) const;

View File

@ -124,7 +124,7 @@ TEST_F(ExpandTest, DelayedPacketOutage) {
EXPECT_EQ(0, statistics_.last_outage_duration_samples());
}
expand_.SetParametersForNormalAfterExpand();
// Convert |sum_output_len_samples| to milliseconds.
// Convert `sum_output_len_samples` to milliseconds.
EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
statistics_.last_outage_duration_samples());
}
@ -164,7 +164,7 @@ TEST_F(ExpandTest, CheckOutageStatsAfterReset) {
EXPECT_EQ(0, statistics_.last_outage_duration_samples());
}
expand_.SetParametersForNormalAfterExpand();
// Convert |sum_output_len_samples| to milliseconds.
// Convert `sum_output_len_samples` to milliseconds.
EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
statistics_.last_outage_duration_samples());
}

View File

@ -34,42 +34,42 @@ Histogram::Histogram(size_t num_buckets,
Histogram::~Histogram() {}
// Each element in the vector is first multiplied by the forgetting factor
// |forget_factor_|. Then the vector element indicated by |iat_packets| is then
// increased (additive) by 1 - |forget_factor_|. This way, the probability of
// |value| is slightly increased, while the sum of the histogram remains
// `forget_factor_`. Then the vector element indicated by `iat_packets` is then
// increased (additive) by 1 - `forget_factor_`. This way, the probability of
// `value` is slightly increased, while the sum of the histogram remains
// constant (=1).
// Due to inaccuracies in the fixed-point arithmetic, the histogram may no
// longer sum up to 1 (in Q30) after the update. To correct this, a correction
// term is added or subtracted from the first element (or elements) of the
// vector.
// The forgetting factor |forget_factor_| is also updated. When the DelayManager
// The forgetting factor `forget_factor_` is also updated. When the DelayManager
// is reset, the factor is set to 0 to facilitate rapid convergence in the
// beginning. With each update of the histogram, the factor is increased towards
// the steady-state value |base_forget_factor_|.
// the steady-state value `base_forget_factor_`.
void Histogram::Add(int value) {
RTC_DCHECK(value >= 0);
RTC_DCHECK(value < static_cast<int>(buckets_.size()));
int vector_sum = 0; // Sum up the vector elements as they are processed.
// Multiply each element in |buckets_| with |forget_factor_|.
// Multiply each element in `buckets_` with `forget_factor_`.
for (int& bucket : buckets_) {
bucket = (static_cast<int64_t>(bucket) * forget_factor_) >> 15;
vector_sum += bucket;
}
// Increase the probability for the currently observed inter-arrival time
// by 1 - |forget_factor_|. The factor is in Q15, |buckets_| in Q30.
// by 1 - `forget_factor_`. The factor is in Q15, `buckets_` in Q30.
// Thus, left-shift 15 steps to obtain result in Q30.
buckets_[value] += (32768 - forget_factor_) << 15;
vector_sum += (32768 - forget_factor_) << 15; // Add to vector sum.
// |buckets_| should sum up to 1 (in Q30), but it may not due to
// `buckets_` should sum up to 1 (in Q30), but it may not due to
// fixed-point rounding errors.
vector_sum -= 1 << 30; // Should be zero. Compensate if not.
if (vector_sum != 0) {
// Modify a few values early in |buckets_|.
// Modify a few values early in `buckets_`.
int flip_sign = vector_sum > 0 ? -1 : 1;
for (int& bucket : buckets_) {
// Add/subtract 1/16 of the element, but not more than |vector_sum|.
// Add/subtract 1/16 of the element, but not more than `vector_sum`.
int correction = flip_sign * std::min(std::abs(vector_sum), bucket >> 4);
bucket += correction;
vector_sum += correction;
@ -82,8 +82,8 @@ void Histogram::Add(int value) {
++add_count_;
// Update |forget_factor_| (changes only during the first seconds after a
// reset). The factor converges to |base_forget_factor_|.
// Update `forget_factor_` (changes only during the first seconds after a
// reset). The factor converges to `base_forget_factor_`.
if (start_forget_weight_) {
if (forget_factor_ != base_forget_factor_) {
int old_forget_factor = forget_factor_;
@ -92,7 +92,7 @@ void Histogram::Add(int value) {
forget_factor_ =
std::max(0, std::min(base_forget_factor_, forget_factor));
// The histogram is updated recursively by forgetting the old histogram
// with |forget_factor_| and adding a new sample multiplied by |1 -
// with `forget_factor_` and adding a new sample multiplied by |1 -
// forget_factor_|. We need to make sure that the effective weight on the
// new sample is no smaller than those on the old samples, i.e., to
// satisfy the following DCHECK.
@ -106,21 +106,21 @@ void Histogram::Add(int value) {
int Histogram::Quantile(int probability) {
// Find the bucket for which the probability of observing an
// inter-arrival time larger than or equal to |index| is larger than or
// equal to |probability|. The sought probability is estimated using
// inter-arrival time larger than or equal to `index` is larger than or
// equal to `probability`. The sought probability is estimated using
// the histogram as the reverse cumulant PDF, i.e., the sum of elements from
// the end up until |index|. Now, since the sum of all elements is 1
// the end up until `index`. Now, since the sum of all elements is 1
// (in Q30) by definition, and since the solution is often a low value for
// |iat_index|, it is more efficient to start with |sum| = 1 and subtract
// `iat_index`, it is more efficient to start with `sum` = 1 and subtract
// elements from the start of the histogram.
int inverse_probability = (1 << 30) - probability;
size_t index = 0; // Start from the beginning of |buckets_|.
size_t index = 0; // Start from the beginning of `buckets_`.
int sum = 1 << 30; // Assign to 1 in Q30.
sum -= buckets_[index];
while ((sum > inverse_probability) && (index < buckets_.size() - 1)) {
// Subtract the probabilities one by one until the sum is no longer greater
// than |inverse_probability|.
// than `inverse_probability`.
++index;
sum -= buckets_[index];
}

View File

@ -21,7 +21,7 @@ namespace webrtc {
class Histogram {
public:
// Creates histogram with capacity |num_buckets| and |forget_factor| in Q15.
// Creates histogram with capacity `num_buckets` and `forget_factor` in Q15.
Histogram(size_t num_buckets,
int forget_factor,
absl::optional<double> start_forget_weight = absl::nullopt);
@ -31,10 +31,10 @@ class Histogram {
// Resets the histogram to the default start distribution.
virtual void Reset();
// Add entry in bucket |index|.
// Add entry in bucket `index`.
virtual void Add(int index);
// Calculates the quantile at |probability| (in Q30) of the histogram
// Calculates the quantile at `probability` (in Q30) of the histogram
// distribution.
virtual int Quantile(int probability);

View File

@ -149,13 +149,13 @@ size_t Merge::Process(int16_t* input,
(*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0);
}
// Copy back the first part of the data to |sync_buffer_| and remove it from
// |output|.
// Copy back the first part of the data to `sync_buffer_` and remove it from
// `output`.
sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
output->PopFront(old_length);
// Return new added length. |old_length| samples were borrowed from
// |sync_buffer_|.
// Return new added length. `old_length` samples were borrowed from
// `sync_buffer_`.
RTC_DCHECK_GE(output_length, old_length);
return output_length - old_length;
}
@ -200,7 +200,7 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
// Append one more pitch period each time.
expanded_.PushBack(expanded_temp);
}
// Trim the length to exactly |required_length|.
// Trim the length to exactly `required_length`.
expanded_.PopBack(expanded_.Size() - required_length);
}
RTC_DCHECK_GE(expanded_.Size(), required_length);
@ -240,17 +240,17 @@ int16_t Merge::SignalScaling(const int16_t* input,
// Calculate muting factor to use for new frame.
int16_t mute_factor;
if (energy_input > energy_expanded) {
// Normalize |energy_input| to 14 bits.
// Normalize `energy_input` to 14 bits.
int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
// Put |energy_expanded| in a domain 14 higher, so that
// Put `energy_expanded` in a domain 14 higher, so that
// energy_expanded / energy_input is in Q14.
energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
// Calculate sqrt(energy_expanded / energy_input) in Q14.
mute_factor = static_cast<int16_t>(
WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
} else {
// Set to 1 (in Q14) when |expanded| has higher energy than |input|.
// Set to 1 (in Q14) when `expanded` has higher energy than `input`.
mute_factor = 16384;
}
@ -295,7 +295,7 @@ void Merge::Downsample(const int16_t* input,
// there is not much we can do.
const size_t temp_len =
input_length > signal_offset ? input_length - signal_offset : 0;
// TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
// TODO(hlundin): Should `downsamp_temp_len` be corrected for round-off
// errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
size_t downsamp_temp_len = temp_len / decimation_factor;
if (downsamp_temp_len > 0) {
@ -351,8 +351,8 @@ size_t Merge::CorrelateAndPeakSearch(size_t start_position,
// Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
size_t start_index_downsamp = start_index / (fs_mult_ * 2);
// Calculate a modified |stop_position_downsamp| to account for the increased
// start index |start_index_downsamp| and the effective array length.
// Calculate a modified `stop_position_downsamp` to account for the increased
// start index `start_index_downsamp` and the effective array length.
size_t modified_stop_pos =
std::min(stop_position_downsamp,
kMaxCorrelationLength + pad_length - start_index_downsamp);

View File

@ -37,10 +37,10 @@ class Merge {
virtual ~Merge();
// The main method to produce the audio data. The decoded data is supplied in
// |input|, having |input_length| samples in total for all channels
// (interleaved). The result is written to |output|. The number of channels
// allocated in |output| defines the number of channels that will be used when
// de-interleaving |input|.
// `input`, having `input_length` samples in total for all channels
// (interleaved). The result is written to `output`. The number of channels
// allocated in `output` defines the number of channels that will be used when
// de-interleaving `input`.
virtual size_t Process(int16_t* input,
size_t input_length,
AudioMultiVector* output);
@ -57,29 +57,29 @@ class Merge {
static const size_t kInputDownsampLength = 40;
static const size_t kMaxCorrelationLength = 60;
// Calls |expand_| to get more expansion data to merge with. The data is
// written to |expanded_signal_|. Returns the length of the expanded data,
// while |expand_period| will be the number of samples in one expansion period
// (typically one pitch period). The value of |old_length| will be the number
// of samples that were taken from the |sync_buffer_|.
// Calls `expand_` to get more expansion data to merge with. The data is
// written to `expanded_signal_`. Returns the length of the expanded data,
// while `expand_period` will be the number of samples in one expansion period
// (typically one pitch period). The value of `old_length` will be the number
// of samples that were taken from the `sync_buffer_`.
size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
// Analyzes |input| and |expanded_signal| and returns muting factor (Q14) to
// Analyzes `input` and `expanded_signal` and returns muting factor (Q14) to
// be used on the new data.
int16_t SignalScaling(const int16_t* input,
size_t input_length,
const int16_t* expanded_signal) const;
// Downsamples |input| (|input_length| samples) and |expanded_signal| to
// Downsamples `input` (`input_length` samples) and `expanded_signal` to
// 4 kHz sample rate. The downsampled signals are written to
// |input_downsampled_| and |expanded_downsampled_|, respectively.
// `input_downsampled_` and `expanded_downsampled_`, respectively.
void Downsample(const int16_t* input,
size_t input_length,
const int16_t* expanded_signal,
size_t expanded_length);
// Calculates cross-correlation between |input_downsampled_| and
// |expanded_downsampled_|, and finds the correlation maximum. The maximizing
// Calculates cross-correlation between `input_downsampled_` and
// `expanded_downsampled_`, and finds the correlation maximum. The maximizing
// lag is returned.
size_t CorrelateAndPeakSearch(size_t start_position,
size_t input_length,

View File

@ -123,7 +123,7 @@ void NackTracker::AddToList(uint16_t sequence_number_current_received_rtp) {
IsNewerSequenceNumber(sequence_number_current_received_rtp,
sequence_num_last_decoded_rtp_));
// Packets with sequence numbers older than |upper_bound_missing| are
// Packets with sequence numbers older than `upper_bound_missing` are
// considered missing, and the rest are considered late.
uint16_t upper_bound_missing =
sequence_number_current_received_rtp - nack_threshold_packets_;

View File

@ -63,9 +63,9 @@ class NackTracker {
// Set a maximum for the size of the NACK list. If the last received packet
// has sequence number of N, then NACK list will not contain any element
// with sequence number earlier than N - |max_nack_list_size|.
// with sequence number earlier than N - `max_nack_list_size`.
//
// The largest maximum size is defined by |kNackListSizeLimit|
// The largest maximum size is defined by `kNackListSizeLimit`
void SetMaxNackListSize(size_t max_nack_list_size);
// Set the sampling rate.
@ -90,7 +90,7 @@ class NackTracker {
std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
// Reset to default values. The NACK list is cleared.
// |nack_threshold_packets_| & |max_nack_list_size_| preserve their values.
// `nack_threshold_packets_` & `max_nack_list_size_` preserve their values.
void Reset();
private:
@ -110,7 +110,7 @@ class NackTracker {
int64_t time_to_play_ms;
// A guess about the timestamp of the missing packet, it is used for
// estimation of |time_to_play_ms|. The estimate might be slightly wrong if
// estimation of `time_to_play_ms`. The estimate might be slightly wrong if
// there has been frame-size change since the last received packet and the
// missing packet. However, the risk of this is low, and in case of such
// errors, there will be a minor misestimation in time-to-play of missing
@ -139,7 +139,7 @@ class NackTracker {
// computed correctly.
NackList GetNackList() const;
// Given the |sequence_number_current_received_rtp| of currently received RTP,
// Given the `sequence_number_current_received_rtp` of currently received RTP,
// recognize packets which are not arrive and add to the list.
void AddToList(uint16_t sequence_number_current_received_rtp);
@ -147,23 +147,23 @@ class NackTracker {
// This is called when 10 ms elapsed with no new RTP packet decoded.
void UpdateEstimatedPlayoutTimeBy10ms();
// Given the |sequence_number_current_received_rtp| and
// |timestamp_current_received_rtp| of currently received RTP update number
// Given the `sequence_number_current_received_rtp` and
// `timestamp_current_received_rtp` of currently received RTP update number
// of samples per packet.
void UpdateSamplesPerPacket(uint16_t sequence_number_current_received_rtp,
uint32_t timestamp_current_received_rtp);
// Given the |sequence_number_current_received_rtp| of currently received RTP
// Given the `sequence_number_current_received_rtp` of currently received RTP
// update the list. That is; some packets will change from late to missing,
// some packets are inserted as missing and some inserted as late.
void UpdateList(uint16_t sequence_number_current_received_rtp);
// Packets which are considered late for too long (according to
// |nack_threshold_packets_|) are flagged as missing.
// `nack_threshold_packets_`) are flagged as missing.
void ChangeFromLateToMissing(uint16_t sequence_number_current_received_rtp);
// Packets which have sequence number older that
// |sequence_num_last_received_rtp_| - |max_nack_list_size_| are removed
// `sequence_num_last_received_rtp_` - `max_nack_list_size_` are removed
// from the NACK list.
void LimitNackListSize();
@ -173,9 +173,9 @@ class NackTracker {
// Compute time-to-play given a timestamp.
int64_t TimeToPlay(uint32_t timestamp) const;
// If packet N is arrived, any packet prior to N - |nack_threshold_packets_|
// If packet N is arrived, any packet prior to N - `nack_threshold_packets_`
// which is not arrived is considered missing, and should be in NACK list.
// Also any packet in the range of N-1 and N - |nack_threshold_packets_|,
// Also any packet in the range of N-1 and N - `nack_threshold_packets_`,
// exclusive, which is not arrived is considered late, and should should be
// in the list of late packets.
const int nack_threshold_packets_;
@ -202,7 +202,7 @@ class NackTracker {
NackList nack_list_;
// NACK list will not keep track of missing packets prior to
// |sequence_num_last_received_rtp_| - |max_nack_list_size_|.
// `sequence_num_last_received_rtp_` - `max_nack_list_size_`.
size_t max_nack_list_size_;
};

View File

@ -215,10 +215,10 @@ TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) {
std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
nack->UpdateSampleRate(kSampleRateHz);
// Sequence number wrap around if |k| is 2 or 3;
// Sequence number wrap around if `k` is 2 or 3;
int seq_num_offset = (k < 2) ? 0 : 65531;
// Timestamp wrap around if |k| is 1 or 3.
// Timestamp wrap around if `k` is 1 or 3.
uint32_t timestamp_offset =
(k & 0x1) ? static_cast<uint32_t>(0xffffffff) - 6 : 0;
@ -283,7 +283,7 @@ TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) {
TEST(NackTrackerTest,
MissingPacketsPriorToLastDecodedRtpShouldNotBeInNackList) {
for (int m = 0; m < 2; ++m) {
uint16_t seq_num_offset = (m == 0) ? 0 : 65531; // Wrap around if |m| is 1.
uint16_t seq_num_offset = (m == 0) ? 0 : 65531; // Wrap around if `m` is 1.
std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
nack->UpdateSampleRate(kSampleRateHz);
@ -361,7 +361,7 @@ TEST(NackTrackerTest, Reset) {
TEST(NackTrackerTest, ListSizeAppliedFromBeginning) {
const size_t kNackListSize = 10;
for (int m = 0; m < 2; ++m) {
uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if |m| is 1.
uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if `m` is 1.
std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
nack->UpdateSampleRate(kSampleRateHz);
nack->SetMaxNackListSize(kNackListSize);
@ -385,7 +385,7 @@ TEST(NackTrackerTest, ListSizeAppliedFromBeginning) {
TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) {
const size_t kNackListSize = 10;
for (int m = 0; m < 2; ++m) {
uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if |m| is 1.
uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if `m` is 1.
std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
nack->UpdateSampleRate(kSampleRateHz);

View File

@ -608,7 +608,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
// Reinitialize NetEq if it's needed (changed SSRC or first call).
if (update_sample_rate_and_channels) {
// Note: |first_packet_| will be cleared further down in this method, once
// Note: `first_packet_` will be cleared further down in this method, once
// the packet has been successfully inserted into the packet buffer.
// Flush the packet buffer and DTMF buffer.
@ -784,8 +784,8 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
}
if (update_sample_rate_and_channels && !packet_buffer_->Empty()) {
// We do not use |current_rtp_payload_type_| to |set payload_type|, but
// get the next RTP header from |packet_buffer_| to obtain the payload type.
// We do not use `current_rtp_payload_type_` to |set payload_type|, but
// get the next RTP header from `packet_buffer_` to obtain the payload type.
// The reason for it is the following corner case. If NetEq receives a
// CNG packet with a sample rate different than the current CNG then it
// flushes its buffer, assuming send codec must have been changed. However,
@ -978,18 +978,18 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
comfort_noise_->Reset();
}
// We treat it as if all packets referenced to by |last_decoded_packet_infos_|
// were mashed together when creating the samples in |algorithm_buffer_|.
// We treat it as if all packets referenced to by `last_decoded_packet_infos_`
// were mashed together when creating the samples in `algorithm_buffer_`.
RtpPacketInfos packet_infos(last_decoded_packet_infos_);
// Copy samples from |algorithm_buffer_| to |sync_buffer_|.
// Copy samples from `algorithm_buffer_` to `sync_buffer_`.
//
// TODO(bugs.webrtc.org/10757):
// We would in the future also like to pass |packet_infos| so that we can do
// sample-perfect tracking of that information across |sync_buffer_|.
// We would in the future also like to pass `packet_infos` so that we can do
// sample-perfect tracking of that information across `sync_buffer_`.
sync_buffer_->PushBack(*algorithm_buffer_);
// Extract data from |sync_buffer_| to |output|.
// Extract data from `sync_buffer_` to `output`.
size_t num_output_samples_per_channel = output_size_samples_;
size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels();
if (num_output_samples > AudioFrame::kMaxDataSizeSamples) {
@ -1006,14 +1006,14 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
audio_frame->sample_rate_hz_ = fs_hz_;
// TODO(bugs.webrtc.org/10757):
// We don't have the ability to properly track individual packets once their
// audio samples have entered |sync_buffer_|. So for now, treat it as if
// |packet_infos| from packets decoded by the current |GetAudioInternal()|
// audio samples have entered `sync_buffer_`. So for now, treat it as if
// `packet_infos` from packets decoded by the current `GetAudioInternal()`
// call were all consumed assembling the current audio frame and the current
// audio frame only.
audio_frame->packet_infos_ = std::move(packet_infos);
if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
// The sync buffer should always contain |overlap_length| samples, but now
// too many samples have been extracted. Reinstall the |overlap_length|
// The sync buffer should always contain `overlap_length` samples, but now
// too many samples have been extracted. Reinstall the `overlap_length`
// lookahead by moving the index.
const size_t missing_lookahead_samples =
expand_->overlap_length() - sync_buffer_->FutureLength();
@ -1031,7 +1031,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
return kSampleUnderrun;
}
// Should always have overlap samples left in the |sync_buffer_|.
// Should always have overlap samples left in the `sync_buffer_`.
RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
// TODO(yujo): For muted frames, this can be a copy rather than an addition.
@ -1041,7 +1041,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
}
// Update the background noise parameters if last operation wrote data
// straight from the decoder to the |sync_buffer_|. That is, none of the
// straight from the decoder to the `sync_buffer_`. That is, none of the
// operations that modify the signal can be followed by a parameter update.
if ((last_mode_ == Mode::kNormal) || (last_mode_ == Mode::kAccelerateFail) ||
(last_mode_ == Mode::kPreemptiveExpandFail) ||
@ -1051,14 +1051,14 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
}
if (operation == Operation::kDtmf) {
// DTMF data was written the end of |sync_buffer_|.
// Update index to end of DTMF data in |sync_buffer_|.
// DTMF data was written the end of `sync_buffer_`.
// Update index to end of DTMF data in `sync_buffer_`.
sync_buffer_->set_dtmf_index(sync_buffer_->Size());
}
if (last_mode_ != Mode::kExpand && last_mode_ != Mode::kCodecPlc) {
// If last operation was not expand, calculate the |playout_timestamp_| from
// the |sync_buffer_|. However, do not update the |playout_timestamp_| if it
// If last operation was not expand, calculate the `playout_timestamp_` from
// the `sync_buffer_`. However, do not update the `playout_timestamp_` if it
// would be moved "backwards".
uint32_t temp_timestamp =
sync_buffer_->end_timestamp() -
@ -1067,7 +1067,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
playout_timestamp_ = temp_timestamp;
}
} else {
// Use dead reckoning to estimate the |playout_timestamp_|.
// Use dead reckoning to estimate the `playout_timestamp_`.
playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
}
// Set the timestamp in the audio frame to zero before the first packet has
@ -1206,7 +1206,7 @@ int NetEqImpl::GetDecision(Operation* operation,
// Use the provided action instead of the decision NetEq decided on.
*operation = *action_override;
}
// Check if we already have enough samples in the |sync_buffer_|. If so,
// Check if we already have enough samples in the `sync_buffer_`. If so,
// change decision to normal, unless the decision was merge, accelerate, or
// preemptive expand.
if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
@ -1245,7 +1245,7 @@ int NetEqImpl::GetDecision(Operation* operation,
*operation = Operation::kNormal;
}
}
// Adjust |sync_buffer_| timestamp before setting |end_timestamp| to the
// Adjust `sync_buffer_` timestamp before setting `end_timestamp` to the
// new value.
sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp);
end_timestamp = timestamp_;
@ -1535,7 +1535,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list,
while (!packet_list->empty() && !decoder_database_->IsComfortNoise(
packet_list->front().payload_type)) {
RTC_DCHECK(decoder); // At this point, we must have a decoder object.
// The number of channels in the |sync_buffer_| should be the same as the
// The number of channels in the `sync_buffer_` should be the same as the
// number decoder channels.
RTC_DCHECK_EQ(sync_buffer_->Channels(), decoder->Channels());
RTC_DCHECK_GE(decoded_buffer_length_, kMaxFrameSize * decoder->Channels());
@ -1557,7 +1557,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list,
*speech_type = result.speech_type;
if (result.num_decoded_samples > 0) {
*decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
// Update |decoder_frame_length_| with number of samples per channel.
// Update `decoder_frame_length_` with number of samples per channel.
decoder_frame_length_ =
result.num_decoded_samples / decoder->Channels();
}
@ -1733,7 +1733,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
size_t num_channels = algorithm_buffer_->Channels();
size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms.
// Must move data from the `sync_buffer_` in order to get 30 ms.
borrowed_samples_per_channel =
static_cast<int>(required_samples - decoded_length_per_channel);
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
@ -1765,7 +1765,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
}
if (borrowed_samples_per_channel > 0) {
// Copy borrowed samples back to the |sync_buffer_|.
// Copy borrowed samples back to the `sync_buffer_`.
size_t length = algorithm_buffer_->Size();
if (length < borrowed_samples_per_channel) {
// This destroys the beginning of the buffer, but will not cause any
@ -1806,7 +1806,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
size_t old_borrowed_samples_per_channel = 0;
size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms.
// Must move data from the `sync_buffer_` in order to get 30 ms.
borrowed_samples_per_channel =
required_samples - decoded_length_per_channel;
// Calculate how many of these were already played out.
@ -1843,7 +1843,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
}
if (borrowed_samples_per_channel > 0) {
// Copy borrowed samples back to the |sync_buffer_|.
// Copy borrowed samples back to the `sync_buffer_`.
sync_buffer_->ReplaceAtIndex(
*algorithm_buffer_, borrowed_samples_per_channel,
sync_buffer_->Size() - borrowed_samples_per_channel);
@ -1903,10 +1903,10 @@ void NetEqImpl::DoCodecInternalCng(const int16_t* decoded_buffer,
}
int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
// This block of the code and the block further down, handling |dtmf_switch|
// This block of the code and the block further down, handling `dtmf_switch`
// are commented out. Otherwise playing out-of-band DTMF would fail in VoE
// test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is
// equivalent to |dtmf_switch| always be false.
// equivalent to `dtmf_switch` always be false.
//
// See http://webrtc-codereview.appspot.com/1195004/ for discussion
// On this issue. This change might cause some glitches at the point of
@ -1916,7 +1916,7 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
// if ((last_mode_ != Modes::kDtmf) &&
// dtmf_tone_generator_->initialized()) {
// // Special case; see below.
// // We must catch this before calling Generate, since |initialized| is
// // We must catch this before calling Generate, since `initialized` is
// // modified in that call.
// dtmf_switch = true;
// }
@ -1948,7 +1948,7 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
// // TODO(hlundin): This code seems incorrect. (Legacy.) Write test and
// // verify correct operation.
// RTC_NOTREACHED();
// // Must generate enough data to replace all of the |sync_buffer_|
// // Must generate enough data to replace all of the `sync_buffer_`
// // "future".
// int required_length = sync_buffer_->FutureLength();
// RTC_DCHECK(dtmf_tone_generator_->initialized());
@ -2033,7 +2033,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
do {
timestamp_ = next_packet->timestamp;
absl::optional<Packet> packet = packet_buffer_->GetNextPacket();
// |next_packet| may be invalid after the |packet_buffer_| operation.
// `next_packet` may be invalid after the `packet_buffer_` operation.
next_packet = nullptr;
if (!packet) {
RTC_LOG(LS_ERROR) << "Should always be able to extract a packet here";
@ -2180,7 +2180,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
comfort_noise_.reset(
new ComfortNoise(fs_hz, decoder_database_.get(), sync_buffer_.get()));
// Verify that |decoded_buffer_| is long enough.
// Verify that `decoded_buffer_` is long enough.
if (decoded_buffer_length_ < kMaxFrameSize * channels) {
// Reallocate to larger size.
decoded_buffer_length_ = kMaxFrameSize * channels;

View File

@ -141,7 +141,7 @@ class NetEqImpl : public webrtc::NetEq {
bool RegisterPayloadType(int rtp_payload_type,
const SdpAudioFormat& audio_format) override;
// Removes |rtp_payload_type| from the codec database. Returns 0 on success,
// Removes `rtp_payload_type` from the codec database. Returns 0 on success,
// -1 on failure.
int RemovePayloadType(uint8_t rtp_payload_type) override;
@ -159,7 +159,7 @@ class NetEqImpl : public webrtc::NetEq {
int FilteredCurrentDelayMs() const override;
// Writes the current network statistics to |stats|. The statistics are reset
// Writes the current network statistics to `stats`. The statistics are reset
// after the call.
int NetworkStatistics(NetEqNetworkStatistics* stats) override;
@ -215,7 +215,7 @@ class NetEqImpl : public webrtc::NetEq {
rtc::ArrayView<const uint8_t> payload)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Delivers 10 ms of audio data. The data is written to |audio_frame|.
// Delivers 10 ms of audio data. The data is written to `audio_frame`.
// Returns 0 on success, otherwise an error code.
int GetAudioInternal(AudioFrame* audio_frame,
bool* muted,
@ -223,9 +223,9 @@ class NetEqImpl : public webrtc::NetEq {
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Provides a decision to the GetAudioInternal method. The decision what to
// do is written to |operation|. Packets to decode are written to
// |packet_list|, and a DTMF event to play is written to |dtmf_event|. When
// DTMF should be played, |play_dtmf| is set to true by the method.
// do is written to `operation`. Packets to decode are written to
// `packet_list`, and a DTMF event to play is written to `dtmf_event`. When
// DTMF should be played, `play_dtmf` is set to true by the method.
// Returns 0 on success, otherwise an error code.
int GetDecision(Operation* operation,
PacketList* packet_list,
@ -234,11 +234,11 @@ class NetEqImpl : public webrtc::NetEq {
absl::optional<Operation> action_override)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Decodes the speech packets in |packet_list|, and writes the results to
// |decoded_buffer|, which is allocated to hold |decoded_buffer_length|
// elements. The length of the decoded data is written to |decoded_length|.
// Decodes the speech packets in `packet_list`, and writes the results to
// `decoded_buffer`, which is allocated to hold `decoded_buffer_length`
// elements. The length of the decoded data is written to `decoded_length`.
// The speech type -- speech or (codec-internal) comfort noise -- is written
// to |speech_type|. If |packet_list| contains any SID frames for RFC 3389
// to `speech_type`. If `packet_list` contains any SID frames for RFC 3389
// comfort noise, those are not decoded.
int Decode(PacketList* packet_list,
Operation* operation,
@ -293,7 +293,7 @@ class NetEqImpl : public webrtc::NetEq {
bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort
// noise. |packet_list| can either contain one SID frame to update the
// noise. `packet_list` can either contain one SID frame to update the
// noise parameters, or no payload at all, in which case the previously
// received parameters are used.
int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf)
@ -308,20 +308,20 @@ class NetEqImpl : public webrtc::NetEq {
int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Overdub DTMF on top of |output|.
// Overdub DTMF on top of `output`.
int DtmfOverdub(const DtmfEvent& dtmf_event,
size_t num_channels,
int16_t* output) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Extracts packets from |packet_buffer_| to produce at least
// |required_samples| samples. The packets are inserted into |packet_list|.
// Extracts packets from `packet_buffer_` to produce at least
// `required_samples` samples. The packets are inserted into `packet_list`.
// Returns the number of samples that the packets in the list will produce, or
// -1 in case of an error.
int ExtractPackets(size_t required_samples, PacketList* packet_list)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Resets various variables and objects to new values based on the sample rate
// |fs_hz| and |channels| number audio channels.
// `fs_hz` and `channels` number audio channels.
void SetSampleRateAndChannels(int fs_hz, size_t channels)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);

View File

@ -207,8 +207,8 @@ class NetEqImplTest : public ::testing::Test {
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
// DTMF packets are immediately consumed by |InsertPacket()| and won't be
// returned by |GetAudio()|.
// DTMF packets are immediately consumed by `InsertPacket()` and won't be
// returned by `GetAudio()`.
EXPECT_THAT(output.packet_infos_, IsEmpty());
// Verify first 64 samples of actual output.
@ -461,7 +461,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
public:
CountingSamplesDecoder() : next_value_(1) {}
// Produce as many samples as input bytes (|encoded_len|).
// Produce as many samples as input bytes (`encoded_len`).
int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int /* sample_rate_hz */,
@ -578,7 +578,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
.WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
int16_t dummy_output[kPayloadLengthSamples] = {0};
// The below expectation will make the mock decoder write
// |kPayloadLengthSamples| zeros to the output array, and mark it as speech.
// `kPayloadLengthSamples` zeros to the output array, and mark it as speech.
EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
kSampleRateHz, _, _))
.WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
@ -1284,7 +1284,7 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
.WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
int16_t dummy_output[kPayloadLengthSamples] = {0};
// The below expectation will make the mock decoder write
// |kPayloadLengthSamples| - 5 zeros to the output array, and mark it as
// `kPayloadLengthSamples` - 5 zeros to the output array, and mark it as
// speech. That is, the decoded length is 5 samples shorter than the expected.
EXPECT_CALL(mock_decoder,
DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))

View File

@ -188,11 +188,11 @@ class NetEqNetworkStatsTest {
: 0xffffffff);
}
// |stats_ref|
// `stats_ref`
// expects.x = -1, do not care
// expects.x = 0, 'x' in current stats should equal 'x' in |stats_ref|
// expects.x = 1, 'x' in current stats should < 'x' in |stats_ref|
// expects.x = 2, 'x' in current stats should > 'x' in |stats_ref|
// expects.x = 0, 'x' in current stats should equal 'x' in `stats_ref`
// expects.x = 1, 'x' in current stats should < 'x' in `stats_ref`
// expects.x = 2, 'x' in current stats should > 'x' in `stats_ref`
void CheckNetworkStatistics(NetEqNetworkStatsCheck expects) {
NetEqNetworkStatistics stats;
neteq_->NetworkStatistics(&stats);
@ -229,7 +229,7 @@ class NetEqNetworkStatsTest {
uint32_t time_now;
uint32_t next_send_time;
// Initiate |last_lost_time_|.
// Initiate `last_lost_time_`.
time_now = next_send_time = last_lost_time_ = rtp_generator_->GetRtpHeader(
kPayloadType, frame_size_samples_, &rtp_header_);
for (int k = 0; k < num_loops; ++k) {

View File

@ -305,7 +305,7 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
PopulateRtpInfo(0, 0, &rtp_info);
rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
// Set all of |out_data_| to 1, and verify that it was set to 0 by the call
// Set all of `out_data_` to 1, and verify that it was set to 0 by the call
// to GetAudio.
int16_t* out_frame_data = out_frame_.mutable_data();
for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
@ -327,7 +327,7 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
}
TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
// Set all of |out_data_| to 1, and verify that it was set to 0 by the call
// Set all of `out_data_` to 1, and verify that it was set to 0 by the call
// to GetAudio.
int16_t* out_frame_data = out_frame_.mutable_data();
for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
@ -371,7 +371,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
AudioFrame output;
test::AudioLoop input;
// We are using the same 32 kHz input file for all tests, regardless of
// |sampling_rate_hz|. The output may sound weird, but the test is still
// `sampling_rate_hz`. The output may sound weird, but the test is still
// valid.
ASSERT_TRUE(input.Init(
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
@ -534,7 +534,7 @@ TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
payload, payload_len)));
// Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
// Pull audio until we have played `kCngPeriodMs` of CNG. Start at 10 ms since
// we have already pulled out CNG once.
for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));

View File

@ -45,7 +45,7 @@ int Normal::Process(const int16_t* input,
const int fs_mult = fs_hz_ / 8000;
RTC_DCHECK_GT(fs_mult, 0);
// fs_shift = log2(fs_mult), rounded down.
// Note that |fs_shift| is not "exact" for 48 kHz.
// Note that `fs_shift` is not "exact" for 48 kHz.
// TODO(hlundin): Investigate this further.
const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
@ -83,7 +83,7 @@ int Normal::Process(const int16_t* input,
size_t energy_length =
std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
int scaling = 6 + fs_shift - WebRtcSpl_NormW32(decoded_max * decoded_max);
scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
scaling = std::max(scaling, 0); // `scaling` should always be >= 0.
int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(),
energy_length, scaling);
int32_t scaled_energy_length =

View File

@ -49,11 +49,11 @@ class Normal {
virtual ~Normal() {}
// Performs the "Normal" operation. The decoder data is supplied in |input|,
// having |length| samples in total for all channels (interleaved). The
// result is written to |output|. The number of channels allocated in
// |output| defines the number of channels that will be used when
// de-interleaving |input|. |last_mode| contains the mode used in the previous
// Performs the "Normal" operation. The decoder data is supplied in `input`,
// having `length` samples in total for all channels (interleaved). The
// result is written to `output`. The number of channels allocated in
// `output` defines the number of channels that will be used when
// de-interleaving `input`. `last_mode` contains the mode used in the previous
// GetAudio call (i.e., not the current one).
int Process(const int16_t* input,
size_t length,

View File

@ -51,7 +51,7 @@ TEST(Normal, CreateAndDestroy) {
StatisticsCalculator statistics;
Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
Normal normal(fs, &db, bgn, &expand, &statistics);
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
}
TEST(Normal, AvoidDivideByZero) {
@ -85,8 +85,8 @@ TEST(Normal, AvoidDivideByZero) {
EXPECT_EQ(input_size_samples, normal.Process(input, input_size_samples,
NetEq::Mode::kExpand, &output));
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope.
EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope.
}
TEST(Normal, InputLengthAndChannelsDoNotMatch) {
@ -109,8 +109,8 @@ TEST(Normal, InputLengthAndChannelsDoNotMatch) {
EXPECT_EQ(0, normal.Process(input, input_len, NetEq::Mode::kExpand, &output));
EXPECT_EQ(0u, output.Size());
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope.
EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope.
}
TEST(Normal, LastModeExpand120msPacket) {
@ -138,8 +138,8 @@ TEST(Normal, LastModeExpand120msPacket) {
EXPECT_EQ(kPacketsizeBytes, output.Size());
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope.
EXPECT_CALL(db, Die()); // Called when `db` goes out of scope.
EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope.
}
// TODO(hlundin): Write more tests.

View File

@ -84,8 +84,8 @@ struct Packet {
// Packets should generally be moved around but sometimes it's useful to make
// a copy, for example for testing purposes. NOTE: Will only work for
// un-parsed packets, i.e. |frame| must be unset. The payload will, however,
// be copied. |waiting_time| will also not be copied.
// un-parsed packets, i.e. `frame` must be unset. The payload will, however,
// be copied. `waiting_time` will also not be copied.
Packet Clone() const;
Packet& operator=(Packet&& b);

View File

@ -33,7 +33,7 @@
namespace webrtc {
namespace {
// Predicate used when inserting packets in the buffer list.
// Operator() returns true when |packet| goes before |new_packet|.
// Operator() returns true when `packet` goes before `new_packet`.
class NewTimestampIsLarger {
public:
explicit NewTimestampIsLarger(const Packet& new_packet)
@ -183,16 +183,16 @@ int PacketBuffer::InsertPacket(Packet&& packet,
PacketList::reverse_iterator rit = std::find_if(
buffer_.rbegin(), buffer_.rend(), NewTimestampIsLarger(packet));
// The new packet is to be inserted to the right of |rit|. If it has the same
// timestamp as |rit|, which has a higher priority, do not insert the new
// The new packet is to be inserted to the right of `rit`. If it has the same
// timestamp as `rit`, which has a higher priority, do not insert the new
// packet to list.
if (rit != buffer_.rend() && packet.timestamp == rit->timestamp) {
LogPacketDiscarded(packet.priority.codec_level, stats);
return return_val;
}
// The new packet is to be inserted to the left of |it|. If it has the same
// timestamp as |it|, which has a lower priority, replace |it| with the new
// The new packet is to be inserted to the left of `it`. If it has the same
// timestamp as `it`, which has a lower priority, replace `it` with the new
// packet.
PacketList::iterator it = rit.base();
if (it != buffer_.end() && packet.timestamp == it->timestamp) {

View File

@ -45,7 +45,7 @@ class PacketBuffer {
};
// Constructor creates a buffer which can hold a maximum of
// |max_number_of_packets| packets.
// `max_number_of_packets` packets.
PacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer);
// Deletes all packets in the buffer before destroying the buffer.
@ -63,7 +63,7 @@ class PacketBuffer {
// Returns true for an empty buffer.
virtual bool Empty() const;
// Inserts |packet| into the buffer. The buffer will take over ownership of
// Inserts `packet` into the buffer. The buffer will take over ownership of
// the packet object.
// Returns PacketBuffer::kOK on success, PacketBuffer::kFlushed if the buffer
// was flushed due to overfilling.
@ -93,14 +93,14 @@ class PacketBuffer {
int target_level_ms);
// Gets the timestamp for the first packet in the buffer and writes it to the
// output variable |next_timestamp|.
// output variable `next_timestamp`.
// Returns PacketBuffer::kBufferEmpty if the buffer is empty,
// PacketBuffer::kOK otherwise.
virtual int NextTimestamp(uint32_t* next_timestamp) const;
// Gets the timestamp for the first packet in the buffer with a timestamp no
// lower than the input limit |timestamp|. The result is written to the output
// variable |next_timestamp|.
// lower than the input limit `timestamp`. The result is written to the output
// variable `next_timestamp`.
// Returns PacketBuffer::kBufferEmpty if the buffer is empty,
// PacketBuffer::kOK otherwise.
virtual int NextHigherTimestamp(uint32_t timestamp,
@ -154,11 +154,11 @@ class PacketBuffer {
virtual bool ContainsDtxOrCngPacket(
const DecoderDatabase* decoder_database) const;
// Static method returning true if |timestamp| is older than |timestamp_limit|
// but less than |horizon_samples| behind |timestamp_limit|. For instance,
// Static method returning true if `timestamp` is older than `timestamp_limit`
// but less than `horizon_samples` behind `timestamp_limit`. For instance,
// with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the
// range (90, 100) is considered obsolete, and will yield true.
// Setting |horizon_samples| to 0 is the same as setting it to 2^31, i.e.,
// Setting `horizon_samples` to 0 is the same as setting it to 2^31, i.e.,
// half the 32-bit timestamp range.
static bool IsObsoleteTimestamp(uint32_t timestamp,
uint32_t timestamp_limit,

Some files were not shown because too many files have changed in this diff Show More