Comment unused variables in implemented functions 13\n

Increased the number of errors the automation is fixing to 150 from
75 in this commit.

Bug: webrtc:370878648
Change-Id: If6e6a5f40db7eb54c27c1a85fb7031838e478c70
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/366205
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Commit-Queue: Dor Hen <dorhen@meta.com>
Cr-Commit-Position: refs/heads/main@{#43337}
This commit is contained in:
Dor Hen 2024-10-30 11:45:22 +02:00 committed by WebRTC LUCI CQ
parent 297fe1a2d9
commit 3fa21c89c0
53 changed files with 389 additions and 351 deletions

View File

@ -195,7 +195,7 @@ TEST_F(AudioEgressTest, SkipAudioEncodingAfterStopSend) {
constexpr int kExpected = 10;
rtc::Event event;
int rtp_count = 0;
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> /* packet */, Unused) {
if (++rtp_count == kExpected) {
event.Set();
}
@ -296,7 +296,7 @@ TEST_F(AudioEgressTest, TestAudioInputLevelAndEnergyDuration) {
constexpr int kExpected = 6;
rtc::Event event;
int rtp_count = 0;
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> /* packet */, Unused) {
if (++rtp_count == kExpected) {
event.Set();
}

View File

@ -194,12 +194,12 @@ FakeVoiceMediaSendChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
}
}
void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
std::optional<int64_t> absolute_capture_timestamp_ms) {}
const void* /* audio_data */,
int /* bits_per_sample */,
int /* sample_rate */,
size_t /* number_of_channels */,
size_t /* number_of_frames */,
std::optional<int64_t> /* absolute_capture_timestamp_ms */) {}
void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnClose() {
source_ = nullptr;
}
@ -288,7 +288,7 @@ bool FakeVoiceMediaSendChannel::GetOutputVolume(uint32_t ssrc, double* volume) {
*volume = output_scalings_[ssrc];
return true;
}
bool FakeVoiceMediaSendChannel::GetStats(VoiceMediaSendInfo* info) {
bool FakeVoiceMediaSendChannel::GetStats(VoiceMediaSendInfo* /* info */) {
return false;
}
bool FakeVoiceMediaSendChannel::SetSendCodecs(
@ -388,8 +388,8 @@ bool FakeVideoMediaSendChannel::HasSource(uint32_t ssrc) const {
return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
}
void FakeVideoMediaSendChannel::FillBitrateInfo(
BandwidthEstimationInfo* bwe_info) {}
bool FakeVideoMediaSendChannel::GetStats(VideoMediaSendInfo* info) {
BandwidthEstimationInfo* /* bwe_info */) {}
bool FakeVideoMediaSendChannel::GetStats(VideoMediaSendInfo* /* info */) {
return false;
}
bool FakeVideoMediaSendChannel::SetSendCodecs(
@ -412,8 +412,8 @@ bool FakeVideoMediaSendChannel::SetMaxSendBandwidth(int bps) {
return true;
}
void FakeVideoMediaSendChannel::GenerateSendKeyFrame(
uint32_t ssrc,
const std::vector<std::string>& rids) {}
uint32_t /* ssrc */,
const std::vector<std::string>& /* rids */) {}
FakeVideoMediaReceiveChannel::FakeVideoMediaReceiveChannel(
const VideoOptions& options,
@ -457,7 +457,7 @@ bool FakeVideoMediaReceiveChannel::SetSink(
return true;
}
void FakeVideoMediaReceiveChannel::SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {}
rtc::VideoSinkInterface<webrtc::VideoFrame>* /* sink */) {}
bool FakeVideoMediaReceiveChannel::HasSink(uint32_t ssrc) const {
return sinks_.find(ssrc) != sinks_.end() && sinks_.at(ssrc) != nullptr;
}
@ -481,7 +481,7 @@ bool FakeVideoMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
return true;
}
std::vector<webrtc::RtpSource> FakeVideoMediaReceiveChannel::GetSources(
uint32_t ssrc) const {
uint32_t /* ssrc */) const {
return {};
}
bool FakeVideoMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
@ -521,15 +521,16 @@ bool FakeVideoMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
}
void FakeVideoMediaReceiveChannel::SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback) {}
uint32_t /* ssrc */,
std::function<void(const webrtc::RecordableEncodedFrame&)> /* callback */) {
}
void FakeVideoMediaReceiveChannel::ClearRecordableEncodedFrameCallback(
uint32_t ssrc) {}
uint32_t /* ssrc */) {}
void FakeVideoMediaReceiveChannel::RequestRecvKeyFrame(uint32_t ssrc) {}
void FakeVideoMediaReceiveChannel::RequestRecvKeyFrame(uint32_t /* ssrc */) {}
bool FakeVideoMediaReceiveChannel::GetStats(VideoMediaReceiveInfo* info) {
bool FakeVideoMediaReceiveChannel::GetStats(VideoMediaReceiveInfo* /* info */) {
return false;
}
@ -543,11 +544,12 @@ rtc::scoped_refptr<webrtc::AudioState> FakeVoiceEngine::GetAudioState() const {
return rtc::scoped_refptr<webrtc::AudioState>();
}
std::unique_ptr<VoiceMediaSendChannelInterface>
FakeVoiceEngine::CreateSendChannel(webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
FakeVoiceEngine::CreateSendChannel(
webrtc::Call* call,
const MediaConfig& /* config */,
const AudioOptions& options,
const webrtc::CryptoOptions& /* crypto_options */,
webrtc::AudioCodecPairId /* codec_pair_id */) {
std::unique_ptr<FakeVoiceMediaSendChannel> ch =
std::make_unique<FakeVoiceMediaSendChannel>(options,
call->network_thread());
@ -556,10 +558,10 @@ FakeVoiceEngine::CreateSendChannel(webrtc::Call* call,
std::unique_ptr<VoiceMediaReceiveChannelInterface>
FakeVoiceEngine::CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const MediaConfig& /* config */,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
const webrtc::CryptoOptions& /* crypto_options */,
webrtc::AudioCodecPairId /* codec_pair_id */) {
std::unique_ptr<FakeVoiceMediaReceiveChannel> ch =
std::make_unique<FakeVoiceMediaReceiveChannel>(options,
call->network_thread());
@ -584,8 +586,8 @@ void FakeVoiceEngine::SetSendCodecs(const std::vector<Codec>& codecs) {
int FakeVoiceEngine::GetInputLevel() {
return 0;
}
bool FakeVoiceEngine::StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) {
bool FakeVoiceEngine::StartAecDump(webrtc::FileWrapper /* file */,
int64_t /* max_size_bytes */) {
return false;
}
std::optional<webrtc::AudioDeviceModule::Stats>
@ -618,10 +620,11 @@ bool FakeVideoEngine::SetOptions(const VideoOptions& options) {
std::unique_ptr<VideoMediaSendChannelInterface>
FakeVideoEngine::CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const MediaConfig& /* config */,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
const webrtc::CryptoOptions& /* crypto_options */,
webrtc::
VideoBitrateAllocatorFactory* /* video_bitrate_allocator_factory */) {
if (fail_create_channel_) {
return nullptr;
}
@ -634,9 +637,9 @@ FakeVideoEngine::CreateSendChannel(
std::unique_ptr<VideoMediaReceiveChannelInterface>
FakeVideoEngine::CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const MediaConfig& /* config */,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) {
const webrtc::CryptoOptions& /* crypto_options */) {
if (fail_create_channel_) {
return nullptr;
}
@ -646,11 +649,11 @@ FakeVideoEngine::CreateReceiveChannel(
call->network_thread());
return ch;
}
std::vector<Codec> FakeVideoEngine::send_codecs(bool use_rtx) const {
std::vector<Codec> FakeVideoEngine::send_codecs(bool /* use_rtx */) const {
return send_codecs_;
}
std::vector<Codec> FakeVideoEngine::recv_codecs(bool use_rtx) const {
std::vector<Codec> FakeVideoEngine::recv_codecs(bool /* use_rtx */) const {
return recv_codecs_;
}

View File

@ -4128,7 +4128,7 @@ class Vp9SettingsTestWithFieldTrial
void VerifySettings(int num_spatial_layers,
int num_temporal_layers,
webrtc::InterLayerPredMode interLayerPred) {
webrtc::InterLayerPredMode /* interLayerPred */) {
cricket::VideoSenderParameters parameters;
parameters.codecs.push_back(GetEngineCodec("VP9"));
ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));

View File

@ -239,11 +239,11 @@ TEST(WebRtcVoiceEngineTestStubLibrary, StartupShutdown) {
class FakeAudioSink : public webrtc::AudioSinkInterface {
public:
void OnData(const Data& audio) override {}
void OnData(const Data& /* audio */) override {}
};
class FakeAudioSource : public cricket::AudioSource {
void SetSink(Sink* sink) override {}
void SetSink(Sink* /* sink */) override {}
};
class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam<bool> {
@ -714,7 +714,7 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam<bool> {
}
}
void VerifyVoiceSenderInfo(const cricket::VoiceSenderInfo& info,
bool is_sending) {
bool /* is_sending */) {
const auto stats = GetAudioSendStreamStats();
EXPECT_EQ(info.ssrc(), stats.local_ssrc);
EXPECT_EQ(info.payload_bytes_sent, stats.payload_bytes_sent);

View File

@ -124,11 +124,11 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport {
void PullRenderData(int /* bits_per_sample */,
int /* sample_rate */,
size_t number_of_channels,
size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override {
size_t /* number_of_channels */,
size_t /* number_of_frames */,
void* /* audio_data */,
int64_t* /* elapsed_time_ms */,
int64_t* /* ntp_time_ms */) override {
RTC_DCHECK_NOTREACHED();
}

View File

@ -159,8 +159,8 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int RestartPlayoutInternally() override { return -1; }
int RestartRecordingInternally() override { return -1; }
int SetPlayoutSampleRate(uint32_t sample_rate) override { return -1; }
int SetRecordingSampleRate(uint32_t sample_rate) override { return -1; }
int SetPlayoutSampleRate(uint32_t /* sample_rate */) override { return -1; }
int SetRecordingSampleRate(uint32_t /* sample_rate */) override { return -1; }
private:
PlatformType Platform() const;

View File

@ -369,11 +369,11 @@ class MockAudioTransport : public test::MockAudioTransport {
const size_t bytes_per_frame,
const size_t channels,
const uint32_t sample_rate,
const uint32_t total_delay_ms,
const int32_t clock_drift,
const uint32_t current_mic_level,
const bool typing_status,
uint32_t& new_mic_level) {
const uint32_t /* total_delay_ms */,
const int32_t /* clock_drift */,
const uint32_t /* current_mic_level */,
const bool /* typing_status */,
uint32_t& /* new_mic_level */) {
EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
// Store audio parameters once in the first callback. For all other
// callbacks, verify that the provided audio parameters are maintained and
@ -412,8 +412,8 @@ class MockAudioTransport : public test::MockAudioTransport {
const uint32_t sample_rate,
void* audio_buffer,
size_t& samples_out,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {
int64_t* /* elapsed_time_ms */,
int64_t* /* ntp_time_ms */) {
EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
// Store audio parameters once in the first callback. For all other
// callbacks, verify that the provided audio parameters are maintained and

View File

@ -157,72 +157,73 @@ int32_t AudioDeviceDummy::MicrophoneVolumeIsAvailable(bool& /* available */) {
return -1;
}
int32_t AudioDeviceDummy::SetMicrophoneVolume(uint32_t volume) {
int32_t AudioDeviceDummy::SetMicrophoneVolume(uint32_t /* volume */) {
return -1;
}
int32_t AudioDeviceDummy::MicrophoneVolume(uint32_t& volume) const {
int32_t AudioDeviceDummy::MicrophoneVolume(uint32_t& /* volume */) const {
return -1;
}
int32_t AudioDeviceDummy::MaxMicrophoneVolume(uint32_t& maxVolume) const {
int32_t AudioDeviceDummy::MaxMicrophoneVolume(uint32_t& /* maxVolume */) const {
return -1;
}
int32_t AudioDeviceDummy::MinMicrophoneVolume(uint32_t& minVolume) const {
int32_t AudioDeviceDummy::MinMicrophoneVolume(uint32_t& /* minVolume */) const {
return -1;
}
int32_t AudioDeviceDummy::SpeakerMuteIsAvailable(bool& available) {
int32_t AudioDeviceDummy::SpeakerMuteIsAvailable(bool& /* available */) {
return -1;
}
int32_t AudioDeviceDummy::SetSpeakerMute(bool enable) {
int32_t AudioDeviceDummy::SetSpeakerMute(bool /* enable */) {
return -1;
}
int32_t AudioDeviceDummy::SpeakerMute(bool& enabled) const {
int32_t AudioDeviceDummy::SpeakerMute(bool& /* enabled */) const {
return -1;
}
int32_t AudioDeviceDummy::MicrophoneMuteIsAvailable(bool& available) {
int32_t AudioDeviceDummy::MicrophoneMuteIsAvailable(bool& /* available */) {
return -1;
}
int32_t AudioDeviceDummy::SetMicrophoneMute(bool enable) {
int32_t AudioDeviceDummy::SetMicrophoneMute(bool /* enable */) {
return -1;
}
int32_t AudioDeviceDummy::MicrophoneMute(bool& enabled) const {
int32_t AudioDeviceDummy::MicrophoneMute(bool& /* enabled */) const {
return -1;
}
int32_t AudioDeviceDummy::StereoPlayoutIsAvailable(bool& available) {
int32_t AudioDeviceDummy::StereoPlayoutIsAvailable(bool& /* available */) {
return -1;
}
int32_t AudioDeviceDummy::SetStereoPlayout(bool enable) {
int32_t AudioDeviceDummy::SetStereoPlayout(bool /* enable */) {
return -1;
}
int32_t AudioDeviceDummy::StereoPlayout(bool& enabled) const {
int32_t AudioDeviceDummy::StereoPlayout(bool& /* enabled */) const {
return -1;
}
int32_t AudioDeviceDummy::StereoRecordingIsAvailable(bool& available) {
int32_t AudioDeviceDummy::StereoRecordingIsAvailable(bool& /* available */) {
return -1;
}
int32_t AudioDeviceDummy::SetStereoRecording(bool enable) {
int32_t AudioDeviceDummy::SetStereoRecording(bool /* enable */) {
return -1;
}
int32_t AudioDeviceDummy::StereoRecording(bool& enabled) const {
int32_t AudioDeviceDummy::StereoRecording(bool& /* enabled */) const {
return -1;
}
int32_t AudioDeviceDummy::PlayoutDelay(uint16_t& delayMS) const {
int32_t AudioDeviceDummy::PlayoutDelay(uint16_t& /* delayMS */) const {
return -1;
}
void AudioDeviceDummy::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {}
void AudioDeviceDummy::AttachAudioBuffer(AudioDeviceBuffer* /* audioBuffer */) {
}
} // namespace webrtc

View File

@ -400,7 +400,7 @@ int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
available = true;
return 0;
}
int32_t FileAudioDevice::SetStereoPlayout(bool enable) {
int32_t FileAudioDevice::SetStereoPlayout(bool /* enable */) {
return 0;
}
@ -414,7 +414,7 @@ int32_t FileAudioDevice::StereoRecordingIsAvailable(bool& available) {
return 0;
}
int32_t FileAudioDevice::SetStereoRecording(bool enable) {
int32_t FileAudioDevice::SetStereoRecording(bool /* enable */) {
return 0;
}
@ -423,7 +423,7 @@ int32_t FileAudioDevice::StereoRecording(bool& enabled) const {
return 0;
}
int32_t FileAudioDevice::PlayoutDelay(uint16_t& delayMS) const {
int32_t FileAudioDevice::PlayoutDelay(uint16_t& /* delayMS */) const {
return 0;
}

View File

@ -25,50 +25,50 @@ class AudioDeviceModuleDefault : public T {
AudioDeviceModuleDefault() {}
virtual ~AudioDeviceModuleDefault() {}
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
int32_t RegisterAudioCallback(AudioTransport* /* audioCallback */) override {
return 0;
}
int32_t Init() override { return 0; }
int32_t InitSpeaker() override { return 0; }
int32_t SetPlayoutDevice(uint16_t index) override { return 0; }
int32_t SetPlayoutDevice(uint16_t /* index */) override { return 0; }
int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) override {
AudioDeviceModule::WindowsDeviceType /* device */) override {
return 0;
}
int32_t SetStereoPlayout(bool enable) override { return 0; }
int32_t SetStereoPlayout(bool /* enable */) override { return 0; }
int32_t StopPlayout() override { return 0; }
int32_t InitMicrophone() override { return 0; }
int32_t SetRecordingDevice(uint16_t index) override { return 0; }
int32_t SetRecordingDevice(uint16_t /* index */) override { return 0; }
int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) override {
AudioDeviceModule::WindowsDeviceType /* device */) override {
return 0;
}
int32_t SetStereoRecording(bool enable) override { return 0; }
int32_t SetStereoRecording(bool /* enable */) override { return 0; }
int32_t StopRecording() override { return 0; }
int32_t Terminate() override { return 0; }
int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer* audioLayer) const override {
AudioDeviceModule::AudioLayer* /* audioLayer */) const override {
return 0;
}
bool Initialized() const override { return true; }
int16_t PlayoutDevices() override { return 0; }
int16_t RecordingDevices() override { return 0; }
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
int32_t PlayoutDeviceName(uint16_t /* index */,
char /* name */[kAdmMaxDeviceNameSize],
char /* guid */[kAdmMaxGuidSize]) override {
return 0;
}
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
int32_t RecordingDeviceName(uint16_t /* index */,
char /* name */[kAdmMaxDeviceNameSize],
char /* guid */[kAdmMaxGuidSize]) override {
return 0;
}
int32_t PlayoutIsAvailable(bool* available) override { return 0; }
int32_t PlayoutIsAvailable(bool* /* available */) override { return 0; }
int32_t InitPlayout() override { return 0; }
bool PlayoutIsInitialized() const override { return true; }
int32_t RecordingIsAvailable(bool* available) override { return 0; }
int32_t RecordingIsAvailable(bool* /* available */) override { return 0; }
int32_t InitRecording() override { return 0; }
bool RecordingIsInitialized() const override { return true; }
int32_t StartPlayout() override { return 0; }
@ -77,40 +77,52 @@ class AudioDeviceModuleDefault : public T {
bool Recording() const override { return false; }
bool SpeakerIsInitialized() const override { return true; }
bool MicrophoneIsInitialized() const override { return true; }
int32_t SpeakerVolumeIsAvailable(bool* available) override { return 0; }
int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
int32_t SpeakerVolume(uint32_t* volume) const override { return 0; }
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; }
int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; }
int32_t MicrophoneVolumeIsAvailable(bool* available) override { return 0; }
int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; }
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; }
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; }
int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; }
int32_t SetSpeakerMute(bool enable) override { return 0; }
int32_t SpeakerMute(bool* enabled) const override { return 0; }
int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; }
int32_t SetMicrophoneMute(bool enable) override { return 0; }
int32_t MicrophoneMute(bool* enabled) const override { return 0; }
int32_t SpeakerVolumeIsAvailable(bool* /* available */) override { return 0; }
int32_t SetSpeakerVolume(uint32_t /* volume */) override { return 0; }
int32_t SpeakerVolume(uint32_t* /* volume */) const override { return 0; }
int32_t MaxSpeakerVolume(uint32_t* /* maxVolume */) const override {
return 0;
}
int32_t MinSpeakerVolume(uint32_t* /* minVolume */) const override {
return 0;
}
int32_t MicrophoneVolumeIsAvailable(bool* /* available */) override {
return 0;
}
int32_t SetMicrophoneVolume(uint32_t /* volume */) override { return 0; }
int32_t MicrophoneVolume(uint32_t* /* volume */) const override { return 0; }
int32_t MaxMicrophoneVolume(uint32_t* /* maxVolume */) const override {
return 0;
}
int32_t MinMicrophoneVolume(uint32_t* /* minVolume */) const override {
return 0;
}
int32_t SpeakerMuteIsAvailable(bool* /* available */) override { return 0; }
int32_t SetSpeakerMute(bool /* enable */) override { return 0; }
int32_t SpeakerMute(bool* /* enabled */) const override { return 0; }
int32_t MicrophoneMuteIsAvailable(bool* /* available */) override {
return 0;
}
int32_t SetMicrophoneMute(bool /* enable */) override { return 0; }
int32_t MicrophoneMute(bool* /* enabled */) const override { return 0; }
int32_t StereoPlayoutIsAvailable(bool* available) const override {
*available = false;
return 0;
}
int32_t StereoPlayout(bool* enabled) const override { return 0; }
int32_t StereoPlayout(bool* /* enabled */) const override { return 0; }
int32_t StereoRecordingIsAvailable(bool* available) const override {
*available = false;
return 0;
}
int32_t StereoRecording(bool* enabled) const override { return 0; }
int32_t StereoRecording(bool* /* enabled */) const override { return 0; }
int32_t PlayoutDelay(uint16_t* delayMS) const override {
*delayMS = 0;
return 0;
}
bool BuiltInAECIsAvailable() const override { return false; }
int32_t EnableBuiltInAEC(bool enable) override { return -1; }
int32_t EnableBuiltInAEC(bool /* enable */) override { return -1; }
bool BuiltInAGCIsAvailable() const override { return false; }
int32_t EnableBuiltInAGC(bool enable) override { return -1; }
int32_t EnableBuiltInAGC(bool /* enable */) override { return -1; }
bool BuiltInNSIsAvailable() const override { return false; }
int32_t EnableBuiltInNS(bool enable) override { return -1; }

View File

@ -356,17 +356,17 @@ class TestAudioTransport : public AudioTransport {
~TestAudioTransport() override = default;
int32_t RecordedDataIsAvailable(
const void* audioSamples,
const void* /* audioSamples */,
size_t samples_per_channel,
size_t bytes_per_sample,
size_t number_of_channels,
uint32_t samples_per_second,
uint32_t total_delay_ms,
int32_t clock_drift,
uint32_t current_mic_level,
bool key_pressed,
uint32_t /* total_delay_ms */,
int32_t /* clock_drift */,
uint32_t /* current_mic_level */,
bool /* key_pressed */,
uint32_t& new_mic_level,
std::optional<int64_t> estimated_capture_time_ns) override {
std::optional<int64_t> /* estimated_capture_time_ns */) override {
new_mic_level = 1;
if (mode_ != Mode::kRecording) {
@ -411,26 +411,26 @@ class TestAudioTransport : public AudioTransport {
return 0;
}
int32_t RecordedDataIsAvailable(const void* audio_samples,
size_t samples_per_channel,
size_t bytes_per_sample,
size_t number_of_channels,
uint32_t samples_per_second,
uint32_t total_delay_ms,
int32_t clockDrift,
uint32_t current_mic_level,
bool key_pressed,
uint32_t& new_mic_level) override {
int32_t RecordedDataIsAvailable(const void* /* audio_samples */,
size_t /* samples_per_channel */,
size_t /* bytes_per_sample */,
size_t /* number_of_channels */,
uint32_t /* samples_per_second */,
uint32_t /* total_delay_ms */,
int32_t /* clockDrift */,
uint32_t /* current_mic_level */,
bool /* key_pressed */,
uint32_t& /* new_mic_level */) override {
RTC_CHECK(false) << "This methods should be never executed";
}
void PullRenderData(int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override {
void PullRenderData(int /* bits_per_sample */,
int /* sample_rate */,
size_t /* number_of_channels */,
size_t /* number_of_frames */,
void* /* audio_data */,
int64_t* /* elapsed_time_ms */,
int64_t* /* ntp_time_ms */) override {
RTC_CHECK(false) << "This methods should be never executed";
}

View File

@ -44,7 +44,7 @@ class TestAudioDevice : public AudioDeviceGeneric {
// Retrieve the currently utilized audio layer
int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const override {
AudioDeviceModule::AudioLayer& /* audioLayer */) const override {
return 0;
}
@ -56,26 +56,26 @@ class TestAudioDevice : public AudioDeviceGeneric {
// Device enumeration
int16_t PlayoutDevices() override { return 0; }
int16_t RecordingDevices() override { return 0; }
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
int32_t PlayoutDeviceName(uint16_t /* index */,
char /* name */[kAdmMaxDeviceNameSize],
char /* guid */[kAdmMaxGuidSize]) override {
return 0;
}
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
int32_t RecordingDeviceName(uint16_t /* index */,
char /* name */[kAdmMaxDeviceNameSize],
char /* guid */[kAdmMaxGuidSize]) override {
return 0;
}
// Device selection
int32_t SetPlayoutDevice(uint16_t index) override { return 0; }
int32_t SetPlayoutDevice(uint16_t /* index */) override { return 0; }
int32_t SetPlayoutDevice(
AudioDeviceModule::WindowsDeviceType device) override {
AudioDeviceModule::WindowsDeviceType /* device */) override {
return 0;
}
int32_t SetRecordingDevice(uint16_t index) override { return 0; }
int32_t SetRecordingDevice(uint16_t /* index */) override { return 0; }
int32_t SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType device) override {
AudioDeviceModule::WindowsDeviceType /* device */) override {
return 0;
}
@ -102,42 +102,54 @@ class TestAudioDevice : public AudioDeviceGeneric {
bool MicrophoneIsInitialized() const override { return true; }
// Speaker volume controls
int32_t SpeakerVolumeIsAvailable(bool& available) override { return 0; }
int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
int32_t SpeakerVolume(uint32_t& volume) const override { return 0; }
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override { return 0; }
int32_t MinSpeakerVolume(uint32_t& minVolume) const override { return 0; }
int32_t SpeakerVolumeIsAvailable(bool& /* available */) override { return 0; }
int32_t SetSpeakerVolume(uint32_t /* volume */) override { return 0; }
int32_t SpeakerVolume(uint32_t& /* volume */) const override { return 0; }
int32_t MaxSpeakerVolume(uint32_t& /* maxVolume */) const override {
return 0;
}
int32_t MinSpeakerVolume(uint32_t& /* minVolume */) const override {
return 0;
}
// Microphone volume controls
int32_t MicrophoneVolumeIsAvailable(bool& available) override { return 0; }
int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
int32_t MicrophoneVolume(uint32_t& volume) const override { return 0; }
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override { return 0; }
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override { return 0; }
int32_t MicrophoneVolumeIsAvailable(bool& /* available */) override {
return 0;
}
int32_t SetMicrophoneVolume(uint32_t /* volume */) override { return 0; }
int32_t MicrophoneVolume(uint32_t& /* volume */) const override { return 0; }
int32_t MaxMicrophoneVolume(uint32_t& /* maxVolume */) const override {
return 0;
}
int32_t MinMicrophoneVolume(uint32_t& /* minVolume */) const override {
return 0;
}
// Speaker mute control
int32_t SpeakerMuteIsAvailable(bool& available) override { return 0; }
int32_t SetSpeakerMute(bool enable) override { return 0; }
int32_t SpeakerMute(bool& enabled) const override { return 0; }
int32_t SpeakerMuteIsAvailable(bool& /* available */) override { return 0; }
int32_t SetSpeakerMute(bool /* enable */) override { return 0; }
int32_t SpeakerMute(bool& /* enabled */) const override { return 0; }
// Microphone mute control
int32_t MicrophoneMuteIsAvailable(bool& available) override { return 0; }
int32_t SetMicrophoneMute(bool enable) override { return 0; }
int32_t MicrophoneMute(bool& enabled) const override { return 0; }
int32_t MicrophoneMuteIsAvailable(bool& /* available */) override {
return 0;
}
int32_t SetMicrophoneMute(bool /* enable */) override { return 0; }
int32_t MicrophoneMute(bool& /* enabled */) const override { return 0; }
// Stereo support
int32_t StereoPlayoutIsAvailable(bool& available) override {
available = false;
return 0;
}
int32_t SetStereoPlayout(bool enable) override { return 0; }
int32_t StereoPlayout(bool& enabled) const override { return 0; }
int32_t SetStereoPlayout(bool /* enable */) override { return 0; }
int32_t StereoPlayout(bool& /* enabled */) const override { return 0; }
int32_t StereoRecordingIsAvailable(bool& available) override {
available = false;
return 0;
}
int32_t SetStereoRecording(bool enable) override { return 0; }
int32_t StereoRecording(bool& enabled) const override { return 0; }
int32_t SetStereoRecording(bool /* enable */) override { return 0; }
int32_t StereoRecording(bool& /* enabled */) const override { return 0; }
// Delay information and control
int32_t PlayoutDelay(uint16_t& delayMS) const override {
@ -151,9 +163,9 @@ class TestAudioDevice : public AudioDeviceGeneric {
bool BuiltInNSIsAvailable() const override { return false; }
// Windows Core Audio and Android only.
int32_t EnableBuiltInAEC(bool enable) override { return -1; }
int32_t EnableBuiltInAGC(bool enable) override { return -1; }
int32_t EnableBuiltInNS(bool enable) override { return -1; }
int32_t EnableBuiltInAEC(bool /* enable */) override { return -1; }
int32_t EnableBuiltInAGC(bool /* enable */) override { return -1; }
int32_t EnableBuiltInNS(bool /* enable */) override { return -1; }
// Play underrun count.
int32_t GetPlayoutUnderrunCount() const override { return -1; }

View File

@ -121,7 +121,7 @@ class CustomRateCalculator : public OutputRateCalculator {
public:
explicit CustomRateCalculator(int rate) : rate_(rate) {}
int CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_rates) override {
rtc::ArrayView<const int> /* preferred_rates */) override {
return rate_;
}
@ -483,7 +483,7 @@ class HighOutputRateCalculator : public OutputRateCalculator {
public:
static const int kDefaultFrequency = 76000;
int CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_sample_rates) override {
rtc::ArrayView<const int> /* preferred_sample_rates */) override {
return kDefaultFrequency;
}
~HighOutputRateCalculator() override {}

View File

@ -39,7 +39,7 @@ namespace {
void SetAudioFrameFields(rtc::ArrayView<const AudioFrame* const> mix_list,
size_t number_of_channels,
int sample_rate,
size_t number_of_streams,
size_t /* number_of_streams */,
AudioFrame* audio_frame_for_mixing) {
const size_t samples_per_channel =
SampleRateToDefaultChannelSize(sample_rate);

View File

@ -84,8 +84,8 @@ std::atomic<int> BlockProcessorImpl::instance_count_(0);
BlockProcessorImpl::BlockProcessorImpl(
const EchoCanceller3Config& config,
int sample_rate_hz,
size_t num_render_channels,
size_t num_capture_channels,
size_t /* num_render_channels */,
size_t /* num_capture_channels */,
std::unique_ptr<RenderDelayBuffer> render_buffer,
std::unique_ptr<RenderDelayController> delay_controller,
std::unique_ptr<EchoRemover> echo_remover)

View File

@ -107,7 +107,7 @@ bool VerifyOutputFrameBitexactness(rtc::ArrayView<const float> reference,
// EchoCanceller3 output.
class CaptureTransportVerificationProcessor : public BlockProcessor {
public:
explicit CaptureTransportVerificationProcessor(size_t num_bands) {}
explicit CaptureTransportVerificationProcessor(size_t /* num_bands */) {}
CaptureTransportVerificationProcessor() = delete;
CaptureTransportVerificationProcessor(
@ -117,27 +117,27 @@ class CaptureTransportVerificationProcessor : public BlockProcessor {
~CaptureTransportVerificationProcessor() override = default;
void ProcessCapture(bool level_change,
bool saturated_microphone_signal,
Block* linear_output,
Block* capture_block) override {}
void ProcessCapture(bool /* level_change */,
bool /* saturated_microphone_signal */,
Block* /* linear_output */,
Block* /* capture_block */) override {}
void BufferRender(const Block& block) override {}
void BufferRender(const Block& /* block */) override {}
void UpdateEchoLeakageStatus(bool leakage_detected) override {}
void UpdateEchoLeakageStatus(bool /* leakage_detected */) override {}
void GetMetrics(EchoControl::Metrics* metrics) const override {}
void GetMetrics(EchoControl::Metrics* /* metrics */) const override {}
void SetAudioBufferDelay(int delay_ms) override {}
void SetAudioBufferDelay(int /* delay_ms */) override {}
void SetCaptureOutputUsage(bool capture_output_used) {}
void SetCaptureOutputUsage(bool /* capture_output_used */) {}
};
// Class for testing that the render data is properly received by the block
// processor.
class RenderTransportVerificationProcessor : public BlockProcessor {
public:
explicit RenderTransportVerificationProcessor(size_t num_bands) {}
explicit RenderTransportVerificationProcessor(size_t /* num_bands */) {}
RenderTransportVerificationProcessor() = delete;
RenderTransportVerificationProcessor(
@ -147,9 +147,9 @@ class RenderTransportVerificationProcessor : public BlockProcessor {
~RenderTransportVerificationProcessor() override = default;
void ProcessCapture(bool level_change,
bool saturated_microphone_signal,
Block* linear_output,
void ProcessCapture(bool /* level_change */,
bool /* saturated_microphone_signal */,
Block* /* linear_output */,
Block* capture_block) override {
Block render_block = received_render_blocks_.front();
received_render_blocks_.pop_front();
@ -160,9 +160,9 @@ class RenderTransportVerificationProcessor : public BlockProcessor {
received_render_blocks_.push_back(block);
}
void UpdateEchoLeakageStatus(bool leakage_detected) override {}
void UpdateEchoLeakageStatus(bool /* leakage_detected */) override {}
void GetMetrics(EchoControl::Metrics* metrics) const override {}
void GetMetrics(EchoControl::Metrics* /* metrics */) const override {}
void SetAudioBufferDelay(int delay_ms) override {}

View File

@ -53,8 +53,8 @@ void EchoRemoverMetrics::ResetMetrics() {
void EchoRemoverMetrics::Update(
const AecState& aec_state,
const std::array<float, kFftLengthBy2Plus1>& comfort_noise_spectrum,
const std::array<float, kFftLengthBy2Plus1>& suppressor_gain) {
const std::array<float, kFftLengthBy2Plus1>& /* comfort_noise_spectrum */,
const std::array<float, kFftLengthBy2Plus1>& /* suppressor_gain */) {
metrics_reported_ = false;
if (++block_counter_ <= kMetricsCollectionBlocks) {
erl_time_domain_.UpdateInstant(aec_state.ErlTimeDomain());

View File

@ -780,7 +780,7 @@ void MatchedFilter::Update(const DownsampledRenderBuffer& render_buffer,
}
}
void MatchedFilter::LogFilterProperties(int sample_rate_hz,
void MatchedFilter::LogFilterProperties(int /* sample_rate_hz */,
size_t shift,
size_t downsampling_factor) const {
size_t alignment_shift = 0;

View File

@ -113,7 +113,7 @@ void RenderDelayControllerImpl::LogRenderCall() {}
std::optional<DelayEstimate> RenderDelayControllerImpl::GetDelay(
const DownsampledRenderBuffer& render_buffer,
size_t render_delay_buffer_delay,
size_t /* render_delay_buffer_delay */,
const Block& capture) {
++capture_call_counter_;

View File

@ -30,10 +30,10 @@ void SubbandNearendDetector::Update(
rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
nearend_spectrum,
rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
residual_echo_spectrum,
/* residual_echo_spectrum */,
rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>>
comfort_noise_spectrum,
bool initial_state) {
bool /* initial_state */) {
nearend_state_ = false;
for (size_t ch = 0; ch < num_capture_channels_; ++ch) {
const std::array<float, kFftLengthBy2Plus1>& noise =

View File

@ -323,7 +323,7 @@ void SuppressionGain::LowerBandGain(
SuppressionGain::SuppressionGain(const EchoCanceller3Config& config,
Aec3Optimization optimization,
int sample_rate_hz,
int /* sample_rate_hz */,
size_t num_capture_channels)
: data_dumper_(new ApmDataDumper(instance_count_.fetch_add(1) + 1)),
optimization_(optimization),

View File

@ -45,13 +45,13 @@ class TransparentModeImpl : public TransparentMode {
prob_transparent_state_ = kInitialTransparentStateProbability;
}
void Update(int filter_delay_blocks,
bool any_filter_consistent,
bool any_filter_converged,
void Update(int /* filter_delay_blocks */,
bool /* any_filter_consistent */,
bool /* any_filter_converged */,
bool any_coarse_filter_converged,
bool all_filters_diverged,
bool /* all_filters_diverged */,
bool active_render,
bool saturated_capture) override {
bool /* saturated_capture */) override {
// The classifier is implemented as a Hidden Markov Model (HMM) with two
// hidden states: "normal" and "transparent". The estimated probabilities of
// the two states are updated by observing filter convergence during active
@ -145,7 +145,7 @@ class LegacyTransparentModeImpl : public TransparentMode {
void Update(int filter_delay_blocks,
bool any_filter_consistent,
bool any_filter_converged,
bool any_coarse_filter_converged,
bool /* any_coarse_filter_converged */,
bool all_filters_diverged,
bool active_render,
bool saturated_capture) override {

View File

@ -160,7 +160,7 @@ int GetSpeechLevelErrorDb(float speech_level_dbfs, float speech_probability) {
} // namespace
MonoAgc::MonoAgc(ApmDataDumper* data_dumper,
MonoAgc::MonoAgc(ApmDataDumper* /* data_dumper */,
int clipped_level_min,
bool disable_digital_adaptive,
int min_mic_level)

View File

@ -271,7 +271,7 @@ int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
// Gains is an 11 element long array (one value per ms, incl start & end).
int32_t WebRtcAgc_ComputeDigitalGains(DigitalAgc* stt,
const int16_t* const* in_near,
size_t num_bands,
size_t /* num_bands */,
uint32_t FS,
int16_t lowlevelSignal,
int32_t gains[11]) {

View File

@ -182,11 +182,11 @@ void ComputeAutoCorrelation(
// Searches the strongest pitch period at 24 kHz and returns its inverted lag at
// 48 kHz.
int ComputePitchPeriod48kHz(
rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
rtc::ArrayView<const float, kBufSize24kHz> /* pitch_buffer */,
rtc::ArrayView<const int> inverted_lags,
rtc::ArrayView<const float, kInitialNumLags24kHz> auto_correlation,
rtc::ArrayView<const float, kRefineNumLags24kHz> y_energy,
const VectorMath& vector_math) {
const VectorMath& /* vector_math */) {
static_assert(kMaxPitch24kHz > kInitialNumLags24kHz, "");
static_assert(kMaxPitch24kHz < kBufSize24kHz, "");
int best_inverted_lag = 0; // Pitch period.

View File

@ -42,7 +42,7 @@ AudioBuffer::AudioBuffer(size_t input_rate,
size_t buffer_rate,
size_t buffer_num_channels,
size_t output_rate,
size_t output_num_channels)
size_t /* output_num_channels */)
: input_num_frames_(static_cast<int>(input_rate) / 100),
input_num_channels_(input_num_channels),
buffer_num_frames_(static_cast<int>(buffer_rate) / 100),

View File

@ -1579,8 +1579,8 @@ int AudioProcessingImpl::ProcessReverseStream(const float* const* src,
int AudioProcessingImpl::AnalyzeReverseStreamLocked(
const float* const* src,
const StreamConfig& input_config,
const StreamConfig& output_config) {
const StreamConfig& /* input_config */,
const StreamConfig& /* output_config */) {
if (aec_dump_) {
const size_t channel_size =
formats_.api_format.reverse_input_stream().num_frames();

View File

@ -130,7 +130,7 @@ class AudioProcessingImpl : public AudioProcessing {
size_t num_reverse_channels() const override;
int stream_delay_ms() const override;
AudioProcessingStats GetStatistics(bool has_remote_tracks) override {
AudioProcessingStats GetStatistics(bool /* has_remote_tracks */) override {
return GetStatistics();
}
AudioProcessingStats GetStatistics() override {

View File

@ -63,9 +63,9 @@ class MockEchoControlFactory : public EchoControlFactory {
MockEchoControlFactory() : next_mock_(std::make_unique<MockEchoControl>()) {}
// Returns a pointer to the next MockEchoControl that this factory creates.
MockEchoControl* GetNext() const { return next_mock_.get(); }
std::unique_ptr<EchoControl> Create(int sample_rate_hz,
int num_render_channels,
int num_capture_channels) override {
std::unique_ptr<EchoControl> Create(int /* sample_rate_hz */,
int /* num_render_channels */,
int /* num_capture_channels */) override {
std::unique_ptr<EchoControl> mock = std::move(next_mock_);
next_mock_ = std::make_unique<MockEchoControl>();
return mock;
@ -88,12 +88,12 @@ class TestEchoDetector : public EchoDetector {
last_render_audio_first_sample_ = render_audio[0];
analyze_render_audio_called_ = true;
}
void AnalyzeCaptureAudio(rtc::ArrayView<const float> capture_audio) override {
}
void Initialize(int capture_sample_rate_hz,
int num_capture_channels,
int render_sample_rate_hz,
int num_render_channels) override {}
void AnalyzeCaptureAudio(
rtc::ArrayView<const float> /* capture_audio */) override {}
void Initialize(int /* capture_sample_rate_hz */,
int /* num_capture_channels */,
int /* render_sample_rate_hz */,
int /* num_render_channels */) override {}
EchoDetector::Metrics GetMetrics() const override { return {}; }
// Returns true if AnalyzeRenderAudio() has been called at least once.
bool analyze_render_audio_called() const {
@ -116,7 +116,7 @@ class TestRenderPreProcessor : public CustomProcessing {
public:
TestRenderPreProcessor() = default;
~TestRenderPreProcessor() = default;
void Initialize(int sample_rate_hz, int num_channels) override {}
void Initialize(int /* sample_rate_hz */, int /* num_channels */) override {}
void Process(AudioBuffer* audio) override {
for (size_t k = 0; k < audio->num_channels(); ++k) {
rtc::ArrayView<float> channel_view(audio->channels()[k],
@ -126,7 +126,8 @@ class TestRenderPreProcessor : public CustomProcessing {
}
}
std::string ToString() const override { return "TestRenderPreProcessor"; }
void SetRuntimeSetting(AudioProcessing::RuntimeSetting setting) override {}
void SetRuntimeSetting(
AudioProcessing::RuntimeSetting /* setting */) override {}
// Modifies a sample. This member is used in Process() to modify a frame and
// it is publicly visible to enable tests.
static constexpr float ProcessSample(float x) { return 2.f * x; }

View File

@ -592,8 +592,8 @@ bool ApmTest::ReadFrame(FILE* file, Int16FrameData* frame) {
// If the end of the file has been reached, rewind it and attempt to read the
// frame again.
void ApmTest::ReadFrameWithRewind(FILE* file,
Int16FrameData* frame,
void ApmTest::ReadFrameWithRewind(FILE* /* file */,
Int16FrameData* /* frame */,
ChannelBuffer<float>* cb) {
if (!ReadFrame(near_file_, &frame_, cb)) {
rewind(near_file_);
@ -2619,7 +2619,7 @@ TEST(ApmConfiguration, PreProcessingReceivesRuntimeSettings) {
class MyEchoControlFactory : public EchoControlFactory {
public:
std::unique_ptr<EchoControl> Create(int sample_rate_hz) {
std::unique_ptr<EchoControl> Create(int /* sample_rate_hz */) {
auto ec = new test::MockEchoControl();
EXPECT_CALL(*ec, AnalyzeRender(::testing::_)).Times(1);
EXPECT_CALL(*ec, AnalyzeCapture(::testing::_)).Times(2);
@ -2629,8 +2629,8 @@ class MyEchoControlFactory : public EchoControlFactory {
}
std::unique_ptr<EchoControl> Create(int sample_rate_hz,
int num_render_channels,
int num_capture_channels) {
int /* num_render_channels */,
int /* num_capture_channels */) {
return Create(sample_rate_hz);
}
};

View File

@ -54,7 +54,7 @@ std::string FormFileName(absl::string_view output_dir,
ApmDataDumper::ApmDataDumper(int instance_index)
: instance_index_(instance_index) {}
#else
ApmDataDumper::ApmDataDumper(int instance_index) {}
ApmDataDumper::ApmDataDumper(int /* instance_index */) {}
#endif
ApmDataDumper::~ApmDataDumper() = default;

View File

@ -61,7 +61,7 @@ class ApmDataDumper {
~ApmDataDumper();
// Activates or deactivate the dumping functionality.
static void SetActivated(bool activated) {
static void SetActivated([[maybe_unused]] bool activated) {
#if WEBRTC_APM_DEBUG_DUMP == 1
recording_activated_ = activated;
#endif
@ -82,14 +82,15 @@ class ApmDataDumper {
// Specifies what dump set to use. All dump commands with a different dump set
// than the one specified will be discarded. If not specificed, all dump sets
// will be used.
static void SetDumpSetToUse(int dump_set_to_use) {
static void SetDumpSetToUse([[maybe_unused]] int dump_set_to_use) {
#if WEBRTC_APM_DEBUG_DUMP == 1
dump_set_to_use_ = dump_set_to_use;
#endif
}
// Set an optional output directory.
static void SetOutputDirectory(absl::string_view output_dir) {
static void SetOutputDirectory(
[[maybe_unused]] absl::string_view output_dir) {
#if WEBRTC_APM_DEBUG_DUMP == 1
RTC_CHECK_LT(output_dir.size(), kOutputDirMaxLength);
rtc::strcpyn(output_dir_, kOutputDirMaxLength, output_dir);
@ -106,9 +107,9 @@ class ApmDataDumper {
// Methods for performing dumping of data of various types into
// various formats.
void DumpRaw(absl::string_view name,
double v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] double v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -120,10 +121,10 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v_length,
const double* v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const double* v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -135,9 +136,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
rtc::ArrayView<const double> v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] rtc::ArrayView<const double> v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -148,9 +149,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
float v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] float v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -162,10 +163,10 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v_length,
const float* v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const float* v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -177,9 +178,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
rtc::ArrayView<const float> v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] rtc::ArrayView<const float> v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -190,7 +191,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name, bool v, int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] bool v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -201,10 +204,10 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v_length,
const bool* v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const bool* v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -219,9 +222,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
rtc::ArrayView<const bool> v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] rtc::ArrayView<const bool> v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -232,9 +235,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
int16_t v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] int16_t v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -246,10 +249,10 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v_length,
const int16_t* v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const int16_t* v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -261,9 +264,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
rtc::ArrayView<const int16_t> v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] rtc::ArrayView<const int16_t> v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -274,9 +277,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
int32_t v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] int32_t v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -288,10 +291,10 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v_length,
const int32_t* v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const int32_t* v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -303,9 +306,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -317,10 +320,10 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
size_t v_length,
const size_t* v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const size_t* v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -332,9 +335,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
rtc::ArrayView<const int32_t> v,
int dump_set = kDefaultDumpSet) {
void DumpRaw([[maybe_unused]] absl::string_view name,
[[maybe_unused]] rtc::ArrayView<const int32_t> v,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -345,9 +348,9 @@ class ApmDataDumper {
#endif
}
void DumpRaw(absl::string_view name,
rtc::ArrayView<const size_t> v,
int dump_set = kDefaultDumpSet) {
void DumpRaw(absl::string_view /* name */,
rtc::ArrayView<const size_t> /* v */,
int /* dump_set */ = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -356,12 +359,12 @@ class ApmDataDumper {
#endif
}
void DumpWav(absl::string_view name,
size_t v_length,
const float* v,
int sample_rate_hz,
int num_channels,
int dump_set = kDefaultDumpSet) {
void DumpWav([[maybe_unused]] absl::string_view name,
[[maybe_unused]] size_t v_length,
[[maybe_unused]] const float* v,
[[maybe_unused]] int sample_rate_hz,
[[maybe_unused]] int num_channels,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;
@ -374,11 +377,11 @@ class ApmDataDumper {
#endif
}
void DumpWav(absl::string_view name,
rtc::ArrayView<const float> v,
int sample_rate_hz,
int num_channels,
int dump_set = kDefaultDumpSet) {
void DumpWav([[maybe_unused]] absl::string_view name,
[[maybe_unused]] rtc::ArrayView<const float> v,
[[maybe_unused]] int sample_rate_hz,
[[maybe_unused]] int num_channels,
[[maybe_unused]] int dump_set = kDefaultDumpSet) {
#if WEBRTC_APM_DEBUG_DUMP == 1
if (dump_set_to_use_ && *dump_set_to_use_ != dump_set)
return;

View File

@ -48,7 +48,7 @@ void PopulateInputFrameWithIdenticalChannels(size_t num_channels,
void VerifyIdenticalChannels(size_t num_channels,
size_t num_bands,
size_t frame_index,
size_t /* frame_index */,
const AudioBuffer& audio) {
EXPECT_GT(num_channels, 1u);
for (size_t ch = 1; ch < num_channels; ++ch) {

View File

@ -27,7 +27,7 @@ constexpr size_t kTwoBandFilterSamplesPerFrame = 320;
SplittingFilter::SplittingFilter(size_t num_channels,
size_t num_bands,
size_t num_frames)
size_t /* num_frames */)
: num_bands_(num_bands),
two_bands_states_(num_bands_ == 2 ? num_channels : 0),
three_band_filter_banks_(num_bands_ == 3 ? num_channels : 0) {

View File

@ -57,8 +57,8 @@ class FakeRecordingDeviceIdentity final : public FakeRecordingDeviceWorker {
explicit FakeRecordingDeviceIdentity(const int initial_mic_level)
: FakeRecordingDeviceWorker(initial_mic_level) {}
~FakeRecordingDeviceIdentity() override = default;
void ModifyBufferInt16(rtc::ArrayView<int16_t> buffer) override {}
void ModifyBufferFloat(ChannelBuffer<float>* buffer) override {}
void ModifyBufferInt16(rtc::ArrayView<int16_t> /* buffer */) override {}
void ModifyBufferFloat(ChannelBuffer<float>* /* buffer */) override {}
};
// Linear fake recording device. The gain curve is a linear function mapping the

View File

@ -35,7 +35,7 @@ AcknowledgedBitrateEstimator::AcknowledgedBitrateEstimator(
AcknowledgedBitrateEstimator::~AcknowledgedBitrateEstimator() {}
AcknowledgedBitrateEstimator::AcknowledgedBitrateEstimator(
const FieldTrialsView* key_value_config,
const FieldTrialsView* /* key_value_config */,
std::unique_ptr<BitrateEstimator> bitrate_estimator)
: in_alr_(false), bitrate_estimator_(std::move(bitrate_estimator)) {}

View File

@ -212,9 +212,9 @@ DataRate DelayBasedBwe::TriggerOveruse(Timestamp at_time,
DelayBasedBwe::Result DelayBasedBwe::MaybeUpdateEstimate(
std::optional<DataRate> acked_bitrate,
std::optional<DataRate> probe_bitrate,
std::optional<NetworkStateEstimate> state_estimate,
std::optional<NetworkStateEstimate> /* state_estimate */,
bool recovered_from_overuse,
bool in_alr,
bool /* in_alr */,
Timestamp at_time) {
Result result;

View File

@ -232,7 +232,7 @@ void DelayBasedBweTest::IncomingFeedback(Timestamp receive_time,
// Returns true if an over-use was seen, false otherwise.
// The StreamGenerator::updated() should be used to check for any changes in
// target bitrate after the call to this function.
bool DelayBasedBweTest::GenerateAndProcessFrame(uint32_t ssrc,
bool DelayBasedBweTest::GenerateAndProcessFrame(uint32_t /* ssrc */,
uint32_t bitrate_bps) {
stream_generator_->SetBitrateBps(bitrate_bps);
std::vector<PacketResult> packets;
@ -419,7 +419,7 @@ void DelayBasedBweTest::RateIncreaseRtpTimestampsTestHelper(
void DelayBasedBweTest::CapacityDropTestHelper(
int number_of_streams,
bool wrap_time_stamp,
bool /* wrap_time_stamp */,
uint32_t expected_bitrate_drop_delta,
int64_t receiver_clock_offset_change_ms) {
const int kFramerate = 30;

View File

@ -300,7 +300,7 @@ NetworkControlUpdate GoogCcNetworkController::OnSentPacket(
}
NetworkControlUpdate GoogCcNetworkController::OnReceivedPacket(
ReceivedPacket received_packet) {
ReceivedPacket /* received_packet */) {
return NetworkControlUpdate();
}

View File

@ -372,8 +372,8 @@ void SendSideBandwidthEstimation::SetAcknowledgedRate(
void SendSideBandwidthEstimation::UpdateLossBasedEstimator(
const TransportPacketsFeedback& report,
BandwidthUsage delay_detector_state,
std::optional<DataRate> probe_bitrate,
BandwidthUsage /* delay_detector_state */,
std::optional<DataRate> /* probe_bitrate */,
bool in_alr) {
if (LossBasedBandwidthEstimatorV1Enabled()) {
loss_based_bandwidth_estimator_v1_.UpdateLossStatistics(

View File

@ -199,9 +199,9 @@ TrendlineEstimator::~TrendlineEstimator() {}
void TrendlineEstimator::UpdateTrendline(double recv_delta_ms,
double send_delta_ms,
int64_t send_time_ms,
int64_t /* send_time_ms */,
int64_t arrival_time_ms,
size_t packet_size) {
size_t /* packet_size */) {
const double delta_ms = recv_delta_ms - send_delta_ms;
++num_of_deltas_;
num_of_deltas_ = std::min(num_of_deltas_, kDeltaCounterMax);

View File

@ -339,12 +339,12 @@ void PccNetworkController::UpdateSendingRateAndMode() {
}
NetworkControlUpdate PccNetworkController::OnNetworkAvailability(
NetworkAvailability msg) {
NetworkAvailability /* msg */) {
return NetworkControlUpdate();
}
NetworkControlUpdate PccNetworkController::OnNetworkRouteChange(
NetworkRouteChange msg) {
NetworkRouteChange /* msg */) {
return NetworkControlUpdate();
}
@ -354,7 +354,7 @@ NetworkControlUpdate PccNetworkController::OnProcessInterval(
}
NetworkControlUpdate PccNetworkController::OnTargetRateConstraints(
TargetRateConstraints msg) {
TargetRateConstraints /* msg */) {
return NetworkControlUpdate();
}
@ -373,17 +373,18 @@ NetworkControlUpdate PccNetworkController::OnTransportLossReport(
return NetworkControlUpdate();
}
NetworkControlUpdate PccNetworkController::OnStreamsConfig(StreamsConfig msg) {
NetworkControlUpdate PccNetworkController::OnStreamsConfig(
StreamsConfig /* msg */) {
return NetworkControlUpdate();
}
NetworkControlUpdate PccNetworkController::OnReceivedPacket(
ReceivedPacket msg) {
ReceivedPacket /* msg */) {
return NetworkControlUpdate();
}
NetworkControlUpdate PccNetworkController::OnNetworkStateEstimate(
NetworkStateEstimate msg) {
NetworkStateEstimate /* msg */) {
return NetworkControlUpdate();
}

View File

@ -121,7 +121,9 @@ class FakeScreenCapturer : public DesktopCapturer {
next_frame_ = std::move(next_frame);
}
bool IsOccluded(const DesktopVector& pos) override { return is_occluded_; }
bool IsOccluded(const DesktopVector& /* pos */) override {
return is_occluded_;
}
void set_is_occluded(bool value) { is_occluded_ = value; }
@ -147,7 +149,9 @@ class FakeMouseMonitor : public MouseCursorMonitor {
hotspot_ = hotspot;
}
void Init(Callback* callback, Mode mode) override { callback_ = callback; }
void Init(Callback* callback, Mode /* mode */) override {
callback_ = callback;
}
void Capture() override {
if (changed_) {
@ -209,7 +213,7 @@ class DesktopAndCursorComposerTest : public ::testing::Test,
}
// DesktopCapturer::Callback interface
void OnCaptureResult(DesktopCapturer::Result result,
void OnCaptureResult(DesktopCapturer::Result /* result */,
std::unique_ptr<DesktopFrame> frame) override {
frame_ = std::move(frame);
}

View File

@ -45,15 +45,15 @@ DesktopCapturer::GetDelegatedSourceListController() {
}
void DesktopCapturer::SetSharedMemoryFactory(
std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {}
std::unique_ptr<SharedMemoryFactory> /* shared_memory_factory */) {}
void DesktopCapturer::SetExcludedWindow(WindowId window) {}
void DesktopCapturer::SetExcludedWindow(WindowId /* window */) {}
bool DesktopCapturer::GetSourceList(SourceList* sources) {
bool DesktopCapturer::GetSourceList(SourceList* /* sources */) {
return true;
}
bool DesktopCapturer::SelectSource(SourceId id) {
bool DesktopCapturer::SelectSource(SourceId /* id */) {
return false;
}
@ -61,7 +61,7 @@ bool DesktopCapturer::FocusOnSelectedSource() {
return false;
}
bool DesktopCapturer::IsOccluded(const DesktopVector& pos) {
bool DesktopCapturer::IsOccluded(const DesktopVector& /* pos */) {
return false;
}
@ -109,7 +109,7 @@ std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateScreenCapturer(
// static
std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateGenericCapturer(
const DesktopCaptureOptions& options) {
[[maybe_unused]] const DesktopCaptureOptions& options) {
std::unique_ptr<DesktopCapturer> capturer;
#if defined(WEBRTC_USE_PIPEWIRE)

View File

@ -109,7 +109,7 @@ class RTC_EXPORT DesktopCapturer {
// supported by all capturers. This will only affect the frequency at which
// new frames are available, not the frequency at which you are allowed to
// capture the frames.
virtual void SetMaxFrameRate(uint32_t max_frame_rate) {}
virtual void SetMaxFrameRate(uint32_t /* max_frame_rate */) {}
// Returns a valid pointer if the capturer requires the user to make a
// selection from a source list provided by the capturer.

View File

@ -455,7 +455,7 @@ bool ScreenCapturerMac::RegisterRefreshAndMoveHandlers() {
DesktopVector display_origin = config.pixel_bounds.top_left();
CGDisplayStreamFrameAvailableHandler handler = ^(CGDisplayStreamFrameStatus status,
uint64_t display_time,
uint64_t /* display_time */,
IOSurfaceRef frame_surface,
CGDisplayStreamUpdateRef updateRef) {
RTC_DCHECK(thread_checker_.IsCurrent());

View File

@ -138,7 +138,7 @@ void ScreenCapturerSck::Start(DesktopCapturer::Callback* callback) {
StartOrReconfigureCapturer();
}
void ScreenCapturerSck::SetMaxFrameRate(uint32_t max_frame_rate) {
void ScreenCapturerSck::SetMaxFrameRate(uint32_t /* max_frame_rate */) {
// TODO: crbug.com/327458809 - Implement this.
}
@ -345,7 +345,7 @@ void ScreenCapturerSck::StartOrReconfigureCapturer() {
// inside the block is equivalent to `this->helper_` and would crash (UAF) if `this` is
// deleted before the block is executed.
SckHelper* local_helper = helper_;
auto handler = ^(SCShareableContent* content, NSError* error) {
auto handler = ^(SCShareableContent* content, NSError* /* error */) {
[local_helper onShareableContentCreated:content];
};

View File

@ -53,8 +53,8 @@ class MouseCursorMonitor {
// Called in response to Capture(). `position` indicates cursor position
// relative to the `window` specified in the constructor.
// Deprecated: use the following overload instead.
virtual void OnMouseCursorPosition(CursorState state,
const DesktopVector& position) {}
virtual void OnMouseCursorPosition(CursorState /* state */,
const DesktopVector& /* position */) {}
// Called in response to Capture(). `position` indicates cursor absolute
// position on the system in fullscreen coordinate, i.e. the top-left
@ -64,7 +64,7 @@ class MouseCursorMonitor {
// TODO(zijiehe): Ensure all implementations return the absolute position.
// TODO(zijiehe): Current this overload works correctly only when capturing
// mouse cursor against fullscreen.
virtual void OnMouseCursorPosition(const DesktopVector& position) {}
virtual void OnMouseCursorPosition(const DesktopVector& /* position */) {}
protected:
virtual ~Callback() {}

View File

@ -33,7 +33,7 @@ class WindowCapturerTest : public ::testing::Test,
void TearDown() override {}
// DesktopCapturer::Callback interface
void OnCaptureResult(DesktopCapturer::Result result,
void OnCaptureResult(DesktopCapturer::Result /* result */,
std::unique_ptr<DesktopFrame> frame) override {
frame_ = std::move(frame);
}

View File

@ -136,7 +136,7 @@ void BitrateProber::CreateProbeCluster(
<< ")";
}
Timestamp BitrateProber::NextProbeTime(Timestamp now) const {
Timestamp BitrateProber::NextProbeTime(Timestamp /* now */) const {
// Probing is not active or probing is already complete.
if (probing_state_ != ProbingState::kActive || clusters_.empty()) {
return Timestamp::PlusInfinity();

View File

@ -57,9 +57,10 @@ class PacingController {
// TODO(bugs.webrtc.org/11340): Make pure virtual once downstream projects
// have been updated.
virtual void OnAbortedRetransmissions(
uint32_t ssrc,
rtc::ArrayView<const uint16_t> sequence_numbers) {}
virtual std::optional<uint32_t> GetRtxSsrcForMedia(uint32_t ssrc) const {
uint32_t /* ssrc */,
rtc::ArrayView<const uint16_t> /* sequence_numbers */) {}
virtual std::optional<uint32_t> GetRtxSsrcForMedia(
uint32_t /* ssrc */) const {
return std::nullopt;
}
};

View File

@ -106,7 +106,7 @@ class MediaStream {
class MockPacingControllerCallback : public PacingController::PacketSender {
public:
void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
const PacedPacketInfo& cluster_info) override {
const PacedPacketInfo& /* cluster_info */) override {
SendPacket(packet->Ssrc(), packet->SequenceNumber(),
packet->capture_time().ms(),
packet->packet_type() == RtpPacketMediaType::kRetransmission,
@ -184,7 +184,7 @@ class PacingControllerPadding : public PacingController::PacketSender {
PacingControllerPadding() : padding_sent_(0), total_bytes_sent_(0) {}
void SendPacket(std::unique_ptr<RtpPacketToSend> packet,
const PacedPacketInfo& pacing_info) override {
const PacedPacketInfo& /* pacing_info */) override {
total_bytes_sent_ += packet->payload_size();
}
@ -1565,7 +1565,7 @@ TEST_F(PacingControllerTest, ProbeClusterId) {
});
bool non_probe_packet_seen = false;
EXPECT_CALL(callback, SendPacket)
.WillOnce([&](std::unique_ptr<RtpPacketToSend> packet,
.WillOnce([&](std::unique_ptr<RtpPacketToSend> /* packet */,
const PacedPacketInfo& cluster_info) {
EXPECT_EQ(cluster_info.probe_cluster_id, kNotAProbe);
non_probe_packet_seen = true;
@ -1647,7 +1647,7 @@ TEST_F(PacingControllerTest, SmallFirstProbePacket) {
// Expect small padding packet to be requested.
EXPECT_CALL(callback, GeneratePadding(DataSize::Bytes(1)))
.WillOnce([&](DataSize padding_size) {
.WillOnce([&](DataSize /* padding_size */) {
std::vector<std::unique_ptr<RtpPacketToSend>> padding_packets;
padding_packets.emplace_back(
BuildPacket(RtpPacketMediaType::kPadding, kAudioSsrc, 1,
@ -1660,7 +1660,7 @@ TEST_F(PacingControllerTest, SmallFirstProbePacket) {
EXPECT_CALL(callback, SendPacket)
.Times(AnyNumber())
.WillRepeatedly([&](std::unique_ptr<RtpPacketToSend> packet,
const PacedPacketInfo& cluster_info) {
const PacedPacketInfo& /* cluster_info */) {
if (packets_sent == 0) {
EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding);
} else {
@ -2205,7 +2205,7 @@ TEST_F(PacingControllerTest,
size_t sent_size_in_burst = 0;
EXPECT_CALL(callback, SendPacket)
.WillRepeatedly([&](std::unique_ptr<RtpPacketToSend> packet,
const PacedPacketInfo& cluster_info) {
const PacedPacketInfo& /* cluster_info */) {
sent_size_in_burst += packet->size();
});

View File

@ -118,7 +118,7 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesRtx) {
const size_t kExpectedPaddingPackets = 1;
EXPECT_CALL(rtp_1, GeneratePadding(_)).Times(0);
EXPECT_CALL(rtp_2, GeneratePadding(kPaddingSize))
.WillOnce([&](size_t padding_size) {
.WillOnce([&](size_t /* padding_size */) {
return std::vector<std::unique_ptr<RtpPacketToSend>>(
kExpectedPaddingPackets);
});
@ -163,7 +163,7 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) {
const size_t kPaddingSize = 123;
const size_t kExpectedPaddingPackets = 1;
auto generate_padding = [&](size_t padding_size) {
auto generate_padding = [&](size_t /* padding_size */) {
return std::vector<std::unique_ptr<RtpPacketToSend>>(
kExpectedPaddingPackets);
};
@ -267,7 +267,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) {
// and supports rtx.
EXPECT_CALL(rtp_2, GeneratePadding(kPaddingBytes))
.Times(1)
.WillOnce([&](size_t target_size_bytes) {
.WillOnce([&](size_t /* target_size_bytes */) {
std::vector<std::unique_ptr<RtpPacketToSend>> packets;
packets.push_back(BuildRtpPacket(kSsrc2));
return packets;
@ -279,7 +279,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) {
EXPECT_CALL(rtp_1, GeneratePadding(kPaddingBytes))
.Times(1)
.WillOnce([&](size_t target_size_bytes) {
.WillOnce([&](size_t /* target_size_bytes */) {
std::vector<std::unique_ptr<RtpPacketToSend>> packets;
packets.push_back(BuildRtpPacket(kSsrc1));
return packets;