Comment unused variables in implemented functions 11\n

Bug: webrtc:370878648
Change-Id: Ic31d7744cc8516e4c014bc044fbe2dba9e4d835b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/366525
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Dor Hen <dorhen@meta.com>
Cr-Commit-Position: refs/heads/main@{#43328}
This commit is contained in:
Dor Hen 2024-10-29 14:48:14 +02:00 committed by WebRTC LUCI CQ
parent f5e0f03844
commit a154b73097
22 changed files with 96 additions and 86 deletions

View File

@ -104,13 +104,13 @@ PeerConnectionFactoryInterface::CreatePeerConnection(
RTCErrorOr<rtc::scoped_refptr<PeerConnectionInterface>>
PeerConnectionFactoryInterface::CreatePeerConnectionOrError(
const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies) {
const PeerConnectionInterface::RTCConfiguration& /* configuration */,
PeerConnectionDependencies /* dependencies */) {
return RTCError(RTCErrorType::INTERNAL_ERROR);
}
RtpCapabilities PeerConnectionFactoryInterface::GetRtpSenderCapabilities(
cricket::MediaType kind) const {
cricket::MediaType /* kind */) const {
return {};
}

View File

@ -1352,7 +1352,7 @@ class PeerConnectionObserver {
// RTCSessionDescription" algorithm:
// https://w3c.github.io/webrtc-pc/#set-description
virtual void OnTrack(
rtc::scoped_refptr<RtpTransceiverInterface> transceiver) {}
rtc::scoped_refptr<RtpTransceiverInterface> /* transceiver */) {}
// Called when signaling indicates that media will no longer be received on a
// track.
@ -1363,7 +1363,7 @@ class PeerConnectionObserver {
// https://w3c.github.io/webrtc-pc/#process-remote-track-removal
// TODO(hbos,deadbeef): Make pure virtual when all subclasses implement it.
virtual void OnRemoveTrack(
rtc::scoped_refptr<RtpReceiverInterface> receiver) {}
rtc::scoped_refptr<RtpReceiverInterface> /* receiver */) {}
// Called when an interesting usage is detected by WebRTC.
// An appropriate action is to add information about the context of the
@ -1371,7 +1371,7 @@ class PeerConnectionObserver {
// log function.
// The heuristics for defining what constitutes "interesting" are
// implementation-defined.
virtual void OnInterestingUsage(int usage_pattern) {}
virtual void OnInterestingUsage(int /* usage_pattern */) {}
};
// PeerConnectionDependencies holds all of PeerConnections dependencies.
@ -1615,7 +1615,7 @@ class RTC_EXPORT PeerConnectionFactoryInterface
// StopAecDump function is called.
// TODO(webrtc:6463): Delete default implementation when downstream mocks
// classes are updated.
virtual bool StartAecDump(FILE* file, int64_t max_size_bytes) {
virtual bool StartAecDump(FILE* /* file */, int64_t /* max_size_bytes */) {
return false;
}

View File

@ -298,7 +298,7 @@ TEST_F(ChannelSendTest, AudioLevelsAttachedToInsertedTransformedFrame) {
std::optional<uint8_t> sent_audio_level;
auto send_rtp = [&](rtc::ArrayView<const uint8_t> data,
const PacketOptions& options) {
const PacketOptions& /* options */) {
RtpPacketReceived packet(&extension_manager);
packet.Parse(data);
RTPHeader header;

View File

@ -227,8 +227,9 @@ ConvertIceCandidatePairEventType(IceCandidatePairEventType type) {
} // namespace
std::string RtcEventLogEncoderLegacy::EncodeLogStart(int64_t timestamp_us,
int64_t utc_time_us) {
std::string RtcEventLogEncoderLegacy::EncodeLogStart(
int64_t timestamp_us,
int64_t /* utc_time_us */) {
rtclog::Event rtclog_event;
rtclog_event.set_timestamp_us(timestamp_us);
rtclog_event.set_type(rtclog::Event::LOG_START);

View File

@ -248,8 +248,8 @@ class EventVerifier {
const LoggedGenericAckReceived& logged_event) const;
template <typename EventType, typename ParsedType>
void VerifyLoggedRtpPacket(const EventType& original_event,
const ParsedType& logged_event) {
void VerifyLoggedRtpPacket(const EventType& /* original_event */,
const ParsedType& /* logged_event */) {
static_assert(sizeof(ParsedType) == 0,
"You have to use one of the two defined template "
"specializations of VerifyLoggedRtpPacket");

View File

@ -97,9 +97,10 @@ class RtpReceiveChannelHelper : public Base, public MediaChannelUtil {
std::optional<uint32_t> GetUnsignaledSsrc() const override {
return std::nullopt;
}
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override {}
void ChooseReceiverReportSsrc(
const std::set<uint32_t>& /* choices */) override {}
virtual bool SetLocalSsrc(const StreamParams& sp) { return true; }
virtual bool SetLocalSsrc(const StreamParams& /* sp */) { return true; }
void OnDemuxerCriteriaUpdatePending() override {}
void OnDemuxerCriteriaUpdateComplete() override {}
@ -151,18 +152,19 @@ class RtpReceiveChannelHelper : public Base, public MediaChannelUtil {
}
void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us) {
int64_t /* packet_time_us */) {
rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
}
void SetFrameDecryptor(uint32_t ssrc,
void SetFrameDecryptor(uint32_t /* ssrc */,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override {}
/* frame_decryptor */) override {}
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
uint32_t /* ssrc */,
rtc::scoped_refptr<
webrtc::FrameTransformerInterface> /* frame_transformer */) override {
}
void SetInterface(MediaChannelNetworkInterface* iface) override {
network_interface_ = iface;
@ -363,18 +365,19 @@ class RtpSendChannelHelper : public Base, public MediaChannelUtil {
}
void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us) {
int64_t /* packet_time_us */) {
rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
}
// Stuff that deals with encryptors, transformers and the like
void SetFrameEncryptor(uint32_t ssrc,
void SetFrameEncryptor(uint32_t /* ssrc */,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override {}
/* frame_encryptor */) override {}
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
uint32_t /* ssrc */,
rtc::scoped_refptr<
webrtc::FrameTransformerInterface> /* frame_transformer */) override {
}
void SetInterface(MediaChannelNetworkInterface* iface) override {
network_interface_ = iface;
@ -407,9 +410,9 @@ class RtpSendChannelHelper : public Base, public MediaChannelUtil {
void set_send_rtcp_parameters(const RtcpParameters& params) {
send_rtcp_parameters_ = params;
}
void OnPacketSent(const rtc::SentPacket& sent_packet) override {}
void OnPacketSent(const rtc::SentPacket& /* sent_packet */) override {}
void OnReadyToSend(bool ready) override { ready_to_send_ = ready; }
void OnNetworkRouteChanged(absl::string_view transport_name,
void OnNetworkRouteChanged(absl::string_view /* transport_name */,
const rtc::NetworkRoute& network_route) override {
last_network_route_ = network_route;
++num_network_route_changes_;
@ -496,9 +499,9 @@ class FakeVoiceMediaReceiveChannel
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetReceiveNackEnabled(bool enabled) override {}
void SetRtcpMode(webrtc::RtcpMode mode) override {}
void SetReceiveNonSenderRttEnabled(bool enabled) override {}
void SetReceiveNackEnabled(bool /* enabled */) override {}
void SetRtcpMode(webrtc::RtcpMode /* mode */) override {}
void SetReceiveNonSenderRttEnabled(bool /* enabled */) override {}
private:
class VoiceChannelAudioSink : public AudioSource::Sink {
@ -574,8 +577,8 @@ class FakeVoiceMediaSendChannel
bool SenderNackEnabled() const override { return false; }
bool SenderNonSenderRttEnabled() const override { return false; }
void SetReceiveNackEnabled(bool enabled) {}
void SetReceiveNonSenderRttEnabled(bool enabled) {}
void SetReceiveNackEnabled(bool /* enabled */) {}
void SetReceiveNonSenderRttEnabled(bool /* enabled */) {}
bool SendCodecHasNack() const override { return false; }
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {}

View File

@ -154,7 +154,9 @@ class FakeNetworkInterface : public MediaChannelNetworkInterface {
return true;
}
virtual int SetOption(SocketType type, rtc::Socket::Option opt, int option) {
virtual int SetOption(SocketType /* type */,
rtc::Socket::Option opt,
int option) {
if (opt == rtc::Socket::OPT_SNDBUF) {
sendbuf_size_ = option;
} else if (opt == rtc::Socket::OPT_RCVBUF) {

View File

@ -39,7 +39,7 @@ std::vector<SdpVideoFormat> FakeVideoEncoderFactory::GetSupportedFormats()
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::Create(
const Environment& env,
const SdpVideoFormat& format) {
const SdpVideoFormat& /* format */) {
return std::make_unique<test::FakeEncoder>(env);
}
@ -57,8 +57,8 @@ std::vector<SdpVideoFormat> FakeVideoDecoderFactory::GetSupportedFormats()
}
std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::Create(
const Environment& env,
const SdpVideoFormat& format) {
const Environment& /* env */,
const SdpVideoFormat& /* format */) {
return std::make_unique<test::FakeDecoder>();
}

View File

@ -97,8 +97,8 @@ class FakeAudioSendStream final : public webrtc::AudioSendStream {
webrtc::SetParametersCallback callback) override;
void Start() override { sending_ = true; }
void Stop() override { sending_ = false; }
void SendAudioData(std::unique_ptr<webrtc::AudioFrame> audio_frame) override {
}
void SendAudioData(
std::unique_ptr<webrtc::AudioFrame> /* audio_frame */) override {}
bool SendTelephoneEvent(int payload_type,
int payload_frequency,
int event,
@ -301,14 +301,16 @@ class FakeVideoReceiveStream final
void UpdateRtxSsrc(uint32_t ssrc) { config_.rtp.rtx_ssrc = ssrc; }
void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override {}
/* frame_decryptor */) override {}
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
rtc::scoped_refptr<
webrtc::FrameTransformerInterface> /* frame_transformer */) override {
}
RecordingState SetAndGetRecordingState(RecordingState state,
bool generate_key_frame) override {
RecordingState SetAndGetRecordingState(
RecordingState /* state */,
bool /* generate_key_frame */) override {
return RecordingState();
}
void GenerateKeyFrame() override {}
@ -400,14 +402,14 @@ class FakeFlexfecReceiveStream final : public webrtc::FlexfecReceiveStream {
class FakePayloadTypeSuggester : public webrtc::PayloadTypeSuggester {
public:
webrtc::RTCErrorOr<webrtc::PayloadType> SuggestPayloadType(
const std::string& mid,
const std::string& /* mid */,
cricket::Codec codec) override {
// Ignores mid argument.
return pt_picker_.SuggestMapping(codec, nullptr);
}
webrtc::RTCError AddLocalMapping(const std::string& mid,
webrtc::PayloadType payload_type,
const cricket::Codec& codec) override {
webrtc::RTCError AddLocalMapping(const std::string& /* mid */,
webrtc::PayloadType /* payload_type */,
const cricket::Codec& /* codec */) override {
return webrtc::RTCError::OK();
}

View File

@ -58,7 +58,7 @@ FakeWebRtcVideoDecoder::~FakeWebRtcVideoDecoder() {
}
}
bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) {
bool FakeWebRtcVideoDecoder::Configure(const Settings& /* settings */) {
return true;
}
@ -99,7 +99,7 @@ FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const {
}
std::unique_ptr<webrtc::VideoDecoder> FakeWebRtcVideoDecoderFactory::Create(
const webrtc::Environment& env,
const webrtc::Environment& /* env */,
const webrtc::SdpVideoFormat& format) {
if (format.IsCodecInList(supported_codec_formats_)) {
num_created_decoders_++;
@ -147,13 +147,13 @@ FakeWebRtcVideoEncoder::~FakeWebRtcVideoEncoder() {
}
void FakeWebRtcVideoEncoder::SetFecControllerOverride(
webrtc::FecControllerOverride* fec_controller_override) {
webrtc::FecControllerOverride* /* fec_controller_override */) {
// Ignored.
}
int32_t FakeWebRtcVideoEncoder::InitEncode(
const webrtc::VideoCodec* codecSettings,
const VideoEncoder::Settings& settings) {
const VideoEncoder::Settings& /* settings */) {
webrtc::MutexLock lock(&mutex_);
codec_settings_ = *codecSettings;
init_encode_event_.Set();
@ -161,8 +161,8 @@ int32_t FakeWebRtcVideoEncoder::InitEncode(
}
int32_t FakeWebRtcVideoEncoder::Encode(
const webrtc::VideoFrame& inputImage,
const std::vector<webrtc::VideoFrameType>* frame_types) {
const webrtc::VideoFrame& /* inputImage */,
const std::vector<webrtc::VideoFrameType>* /* frame_types */) {
webrtc::MutexLock lock(&mutex_);
++num_frames_encoded_;
init_encode_event_.Set();
@ -170,7 +170,7 @@ int32_t FakeWebRtcVideoEncoder::Encode(
}
int32_t FakeWebRtcVideoEncoder::RegisterEncodeCompleteCallback(
webrtc::EncodedImageCallback* callback) {
webrtc::EncodedImageCallback* /* callback */) {
return WEBRTC_VIDEO_CODEC_OK;
}
@ -178,8 +178,8 @@ int32_t FakeWebRtcVideoEncoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
void FakeWebRtcVideoEncoder::SetRates(const RateControlParameters& parameters) {
}
void FakeWebRtcVideoEncoder::SetRates(
const RateControlParameters& /* parameters */) {}
webrtc::VideoEncoder::EncoderInfo FakeWebRtcVideoEncoder::GetEncoderInfo()
const {

View File

@ -707,7 +707,7 @@ EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
&stream_codec_specific);
}
void SimulcastEncoderAdapter::OnDroppedFrame(size_t stream_idx) {
void SimulcastEncoderAdapter::OnDroppedFrame(size_t /* stream_idx */) {
// Not yet implemented.
}

View File

@ -58,13 +58,13 @@ std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture(
std::unique_ptr<VideoEncoderFactory> encoder_factory =
std::make_unique<FunctionVideoEncoderFactory>(
[internal_encoder_factory](const Environment& env,
const SdpVideoFormat& format) {
const SdpVideoFormat& /* format */) {
return std::make_unique<SimulcastEncoderAdapter>(
env, internal_encoder_factory, nullptr, SdpVideoFormat::VP8());
});
std::unique_ptr<VideoDecoderFactory> decoder_factory =
std::make_unique<FunctionVideoDecoderFactory>(
[](const Environment& env, const SdpVideoFormat& format) {
[](const Environment& env, const SdpVideoFormat& /* format */) {
return CreateVp8Decoder(env);
});
return CreateSimulcastTestFixture(std::move(encoder_factory),
@ -223,7 +223,7 @@ class MockVideoEncoder : public VideoEncoder {
(override));
int32_t InitEncode(const VideoCodec* codecSettings,
const VideoEncoder::Settings& settings) override {
const VideoEncoder::Settings& /* settings */) override {
codec_ = *codecSettings;
if (codec_.numberOfSimulcastStreams > 1 && fallback_from_simulcast_) {
return *fallback_from_simulcast_;
@ -374,7 +374,7 @@ std::vector<SdpVideoFormat> MockVideoEncoderFactory::GetSupportedFormats()
}
std::unique_ptr<VideoEncoder> MockVideoEncoderFactory::Create(
const Environment& env,
const Environment& /* env */,
const SdpVideoFormat& format) {
if (create_video_encoder_return_nullptr_) {
return nullptr;
@ -480,8 +480,9 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
SetUp();
}
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override {
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* /* codec_specific_info */) override {
last_encoded_image_width_ = encoded_image._encodedWidth;
last_encoded_image_height_ = encoded_image._encodedHeight;
last_encoded_image_simulcast_index_ = encoded_image.SimulcastIndex();
@ -1146,7 +1147,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, NativeHandleForwardingOnlyIfSupported) {
// ...the lowest one gets a software buffer.
EXPECT_CALL(*encoders[0], Encode)
.WillOnce([&](const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) {
const std::vector<VideoFrameType>* /* frame_types */) {
EXPECT_EQ(frame.video_frame_buffer()->type(),
VideoFrameBuffer::Type::kI420);
return 0;

View File

@ -288,7 +288,7 @@ webrtc::AudioReceiveStreamInterface::Config BuildReceiveStreamConfig(
bool use_nack,
bool enable_non_sender_rtt,
const std::vector<std::string>& stream_ids,
const std::vector<webrtc::RtpExtension>& extensions,
const std::vector<webrtc::RtpExtension>& /* extensions */,
webrtc::Transport* rtcp_send_transport,
const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
const std::map<int, webrtc::SdpAudioFormat>& decoder_map,

View File

@ -685,7 +685,7 @@ void DcSctpTransport::OnTransportWritableState(
}
void DcSctpTransport::OnTransportReadPacket(
rtc::PacketTransportInternal* transport,
rtc::PacketTransportInternal* /* transport */,
const rtc::ReceivedPacket& packet) {
RTC_DCHECK_RUN_ON(network_thread_);
if (packet.decryption_info() != rtc::ReceivedPacket::kDtlsDecrypted) {

View File

@ -119,12 +119,13 @@ std::unique_ptr<Packet> AcmSendTestOldApi::NextPacket() {
}
// This method receives the callback from ACM when a new packet is produced.
int32_t AcmSendTestOldApi::SendData(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
int64_t absolute_capture_timestamp_ms) {
int32_t AcmSendTestOldApi::SendData(
AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
int64_t /* absolute_capture_timestamp_ms */) {
// Store the packet locally.
frame_type_ = frame_type;
payload_type_ = payload_type;

View File

@ -114,7 +114,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
int64_t absolute_capture_timestamp_ms) override {
int64_t /* absolute_capture_timestamp_ms */) override {
MutexLock lock(&mutex_);
++num_calls_;
last_frame_type_ = frame_type;
@ -1054,14 +1054,14 @@ class AcmSetBitRateTest : public ::testing::Test {
int channels,
int payload_type,
int frame_size_samples,
int frame_size_rtp_timestamps) {
int /* frame_size_rtp_timestamps */) {
return send_test_->RegisterCodec(payload_name, sampling_freq_hz, channels,
payload_type, frame_size_samples);
}
void RegisterExternalSendCodec(
std::unique_ptr<AudioEncoder> external_speech_encoder,
int payload_type) {
int /* payload_type */) {
send_test_->RegisterExternalCodec(std::move(external_speech_encoder));
}

View File

@ -97,7 +97,7 @@ bool FrameLengthController::Config::FrameLengthChange::operator<(
}
bool FrameLengthController::FrameLengthIncreasingDecision(
const AudioEncoderRuntimeConfig& config) {
const AudioEncoderRuntimeConfig& /* config */) {
// Increase frame length if
// 1. `uplink_bandwidth_bps` is known to be smaller or equal than
// `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
@ -153,7 +153,7 @@ bool FrameLengthController::FrameLengthIncreasingDecision(
}
bool FrameLengthController::FrameLengthDecreasingDecision(
const AudioEncoderRuntimeConfig& config) {
const AudioEncoderRuntimeConfig& /* config */) {
// Decrease frame length if
// 1. shorter frame length is available AND
// 2. `uplink_bandwidth_bps` is known to be bigger than

View File

@ -52,7 +52,7 @@ int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded,
return static_cast<int>(ret);
}
int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
int AudioDecoderPcmU::PacketDuration(const uint8_t* /* encoded */,
size_t encoded_len) const {
// One encoded byte per sample per channel.
return static_cast<int>(encoded_len / Channels());
@ -98,7 +98,7 @@ int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded,
return static_cast<int>(ret);
}
int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
int AudioDecoderPcmA::PacketDuration(const uint8_t* /* encoded */,
size_t encoded_len) const {
// One encoded byte per sample per channel.
return static_cast<int>(encoded_len / Channels());

View File

@ -57,7 +57,7 @@ std::vector<AudioDecoder::ParseResult> AudioDecoderG722Impl::ParsePayload(
timestamp, 8, 16);
}
int AudioDecoderG722Impl::PacketDuration(const uint8_t* encoded,
int AudioDecoderG722Impl::PacketDuration(const uint8_t* /* encoded */,
size_t encoded_len) const {
// 1/2 encoded byte per sample per channel.
return static_cast<int>(2 * encoded_len / Channels());
@ -125,7 +125,7 @@ int AudioDecoderG722StereoImpl::DecodeInternal(const uint8_t* encoded,
return static_cast<int>(ret);
}
int AudioDecoderG722StereoImpl::PacketDuration(const uint8_t* encoded,
int AudioDecoderG722StereoImpl::PacketDuration(const uint8_t* /* encoded */,
size_t encoded_len) const {
// 1/2 encoded byte per sample per channel. Make sure the length represents
// an equal number of bytes per channel. Otherwise, we cannot de-interleave

View File

@ -130,7 +130,7 @@ size_t AudioDecoderOpusImpl::Channels() const {
}
void AudioDecoderOpusImpl::GeneratePlc(
size_t requested_samples_per_channel,
size_t /* requested_samples_per_channel */,
rtc::BufferT<int16_t>* concealment_audio) {
if (!generate_plc_) {
return;

View File

@ -65,7 +65,7 @@ std::unique_ptr<AudioEncoderOpusStates> CreateCodec(
MockAudioNetworkAdaptor** mock_ptr = &states->mock_audio_network_adaptor;
AudioEncoderOpusImpl::AudioNetworkAdaptorCreator creator =
[mock_ptr](absl::string_view, RtcEventLog* event_log) {
[mock_ptr](absl::string_view, RtcEventLog* /* event_log */) {
std::unique_ptr<MockAudioNetworkAdaptor> adaptor(
new NiceMock<MockAudioNetworkAdaptor>());
EXPECT_CALL(*adaptor, Die());

View File

@ -48,8 +48,8 @@ class PowerRatioEstimator : public LappedTransform::Callback {
protected:
void ProcessAudioBlock(const std::complex<float>* const* input,
size_t num_input_channels,
size_t num_freq_bins,
size_t num_output_channels,
size_t /* num_freq_bins */,
size_t /* num_output_channels */,
std::complex<float>* const* output) override {
float low_pow = 0.f;
float high_pow = 0.f;