From d3edd770ade4a9323e136a39737957963cc676b7 Mon Sep 17 00:00:00 2001 From: kwiberg Date: Wed, 1 Mar 2017 18:52:48 -0800 Subject: [PATCH] Introduce dchecked_cast, and start using it It's the faster, less strict cousin of checked_cast. BUG=none Review-Url: https://codereview.webrtc.org/2714063002 Cr-Commit-Position: refs/heads/master@{#16958} --- webrtc/base/safe_conversions.h | 12 ++++++--- .../modules/audio_coding/acm2/acm_receiver.cc | 2 +- .../audio_coding/acm2/audio_coding_module.cc | 2 +- .../codecs/audio_format_conversion.cc | 2 +- webrtc/modules/audio_coding/neteq/expand.cc | 8 +++--- .../modules/audio_coding/neteq/neteq_impl.cc | 25 +++++++++---------- .../neteq/red_payload_splitter.cc | 2 +- .../neteq/statistics_calculator.cc | 2 +- .../audio_coding/neteq/time_stretch.cc | 2 +- .../neteq/tools/encode_neteq_input.cc | 2 +- .../congestion_controller/probe_controller.cc | 2 +- 11 files changed, 33 insertions(+), 28 deletions(-) diff --git a/webrtc/base/safe_conversions.h b/webrtc/base/safe_conversions.h index 51239bc65d..ff9cc44bc2 100644 --- a/webrtc/base/safe_conversions.h +++ b/webrtc/base/safe_conversions.h @@ -27,14 +27,20 @@ inline bool IsValueInRangeForNumericType(Src value) { return internal::RangeCheck(value) == internal::TYPE_VALID; } -// checked_cast<> is analogous to static_cast<> for numeric types, -// except that it CHECKs that the specified numeric conversion will not -// overflow or underflow. NaN source will always trigger a CHECK. +// checked_cast<> and dchecked_cast<> are analogous to static_cast<> for +// numeric types, except that they [D]CHECK that the specified numeric +// conversion will not overflow or underflow. NaN source will always trigger +// the [D]CHECK. template inline Dst checked_cast(Src value) { RTC_CHECK(IsValueInRangeForNumericType(value)); return static_cast(value); } +template +inline Dst dchecked_cast(Src value) { + RTC_DCHECK(IsValueInRangeForNumericType(value)); + return static_cast(value); +} // saturated_cast<> is analogous to static_cast<> for numeric types, except // that the specified numeric conversion will saturate rather than overflow or diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/acm2/acm_receiver.cc index fd4e91a617..1577d2ded6 100644 --- a/webrtc/modules/audio_coding/acm2/acm_receiver.cc +++ b/webrtc/modules/audio_coding/acm2/acm_receiver.cc @@ -162,7 +162,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, audio_frame->sample_rate_hz_ = desired_freq_hz; RTC_DCHECK_EQ( audio_frame->sample_rate_hz_, - rtc::checked_cast(audio_frame->samples_per_channel_ * 100)); + rtc::dchecked_cast(audio_frame->samples_per_channel_ * 100)); resampled_last_output_frame_ = true; } else { resampled_last_output_frame_ = false; diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc index 32e6940088..daeea3577e 100644 --- a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc +++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc @@ -370,7 +370,7 @@ void ConvertEncodedInfoToFragmentationHeader( frag->fragmentationOffset[i] = offset; offset += info.redundant[i].encoded_bytes; frag->fragmentationLength[i] = info.redundant[i].encoded_bytes; - frag->fragmentationTimeDiff[i] = rtc::checked_cast( + frag->fragmentationTimeDiff[i] = rtc::dchecked_cast( info.encoded_timestamp - info.redundant[i].encoded_timestamp); frag->fragmentationPlType[i] = info.redundant[i].payload_type; } diff --git a/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc b/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc index 5d42409ce0..5a69ae431d 100644 --- a/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc +++ b/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc @@ -34,7 +34,7 @@ CodecInst MakeCodecInst(int payload_type, strncpy(ci.plname, name, sizeof(ci.plname)); ci.plname[sizeof(ci.plname) - 1] = '\0'; ci.plfreq = sample_rate; - ci.channels = rtc::checked_cast(num_channels); + ci.channels = rtc::dchecked_cast(num_channels); return ci; } diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc index 2154bfde63..0c527fe041 100644 --- a/webrtc/modules/audio_coding/neteq/expand.cc +++ b/webrtc/modules/audio_coding/neteq/expand.cc @@ -222,7 +222,7 @@ int Expand::Process(AudioMultiVector* output) { // >= 64 * fs_mult => go from 1 to 0 in about 32 ms. // temp_shift = getbits(max_lag_) - 5. int temp_shift = - (31 - WebRtcSpl_NormW32(rtc::checked_cast(max_lag_))) - 5; + (31 - WebRtcSpl_NormW32(rtc::dchecked_cast(max_lag_))) - 5; int16_t mix_factor_increment = 256 >> temp_shift; if (stop_muting_) { mix_factor_increment = 0; @@ -315,8 +315,8 @@ int Expand::Process(AudioMultiVector* output) { kMaxConsecutiveExpands : consecutive_expands_ + 1; expand_duration_samples_ += output->Size(); // Clamp the duration counter at 2 seconds. - expand_duration_samples_ = - std::min(expand_duration_samples_, rtc::checked_cast(fs_hz_ * 2)); + expand_duration_samples_ = std::min(expand_duration_samples_, + rtc::dchecked_cast(fs_hz_ * 2)); return 0; } @@ -325,7 +325,7 @@ void Expand::SetParametersForNormalAfterExpand() { lag_index_direction_ = 0; stop_muting_ = true; // Do not mute signal any more. statistics_->LogDelayedPacketOutageEvent( - rtc::checked_cast(expand_duration_samples_) / (fs_hz_ / 1000)); + rtc::dchecked_cast(expand_duration_samples_) / (fs_hz_ / 1000)); } void Expand::SetParametersForMergeAfterExpand() { diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc index 09a32965ed..786cb84aa3 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc @@ -199,7 +199,7 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame, bool* muted) { } RTC_DCHECK_EQ( audio_frame->sample_rate_hz_, - rtc::checked_cast(audio_frame->samples_per_channel_ * 100)); + rtc::dchecked_cast(audio_frame->samples_per_channel_ * 100)); SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(), last_vad_activity_, audio_frame); last_vad_activity_ = audio_frame->vad_activity_; @@ -826,7 +826,7 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header, if (packet_length_samples != decision_logic_->packet_length_samples()) { decision_logic_->set_packet_length_samples(packet_length_samples); delay_manager_->SetPacketAudioLength( - rtc::checked_cast((1000 * packet_length_samples) / fs_hz_)); + rtc::dchecked_cast((1000 * packet_length_samples) / fs_hz_)); } } @@ -1131,7 +1131,7 @@ int NetEqImpl::GetDecision(Operations* operation, last_mode_ == kModePreemptiveExpandLowEnergy) { // Subtract (samples_left + output_size_samples_) from sampleMemory. decision_logic_->AddSampleMemory( - -(samples_left + rtc::checked_cast(output_size_samples_))); + -(samples_left + rtc::dchecked_cast(output_size_samples_))); } // Check if it is time to play a DTMF event. @@ -1157,11 +1157,9 @@ int NetEqImpl::GetDecision(Operations* operation, // Check if we already have enough samples in the |sync_buffer_|. If so, // change decision to normal, unless the decision was merge, accelerate, or // preemptive expand. - if (samples_left >= rtc::checked_cast(output_size_samples_) && - *operation != kMerge && - *operation != kAccelerate && - *operation != kFastAccelerate && - *operation != kPreemptiveExpand) { + if (samples_left >= rtc::dchecked_cast(output_size_samples_) && + *operation != kMerge && *operation != kAccelerate && + *operation != kFastAccelerate && *operation != kPreemptiveExpand) { *operation = kNormal; return 0; } @@ -1454,7 +1452,7 @@ int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length, return 0; } - while (*decoded_length < rtc::checked_cast(output_size_samples_)) { + while (*decoded_length < rtc::dchecked_cast(output_size_samples_)) { const int length = decoder->Decode( nullptr, 0, fs_hz_, (decoded_buffer_length_ - *decoded_length) * sizeof(int16_t), @@ -1500,7 +1498,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation, const auto& result = *opt_result; *speech_type = result.speech_type; if (result.num_decoded_samples > 0) { - *decoded_length += rtc::checked_cast(result.num_decoded_samples); + *decoded_length += rtc::dchecked_cast(result.num_decoded_samples); // Update |decoder_frame_length_| with number of samples per channel. decoder_frame_length_ = result.num_decoded_samples / decoder->Channels(); @@ -1513,7 +1511,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation, packet_list->clear(); break; } - if (*decoded_length > rtc::checked_cast(decoded_buffer_length_)) { + if (*decoded_length > rtc::dchecked_cast(decoded_buffer_length_)) { // Guard against overflow. LOG(LS_WARNING) << "Decoded too much."; packet_list->clear(); @@ -1986,7 +1984,8 @@ int NetEqImpl::ExtractPackets(size_t required_samples, packet_duration = packet->frame->Duration(); // TODO(ossu): Is this the correct way to track Opus FEC packets? if (packet->priority.codec_level > 0) { - stats_.SecondaryDecodedSamples(rtc::checked_cast(packet_duration)); + stats_.SecondaryDecodedSamples( + rtc::dchecked_cast(packet_duration)); } } else if (!has_cng_packet) { LOG(LS_WARNING) << "Unknown payload type " @@ -2029,7 +2028,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples, packet_buffer_->DiscardAllOldPackets(timestamp_); } - return rtc::checked_cast(extracted_samples); + return rtc::dchecked_cast(extracted_samples); } void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) { diff --git a/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc b/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc index 774832c416..c14986dc39 100644 --- a/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc +++ b/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc @@ -110,7 +110,7 @@ bool RedPayloadSplitter::SplitRed(PacketList* packet_list) { new_packet.payload_type = new_header.payload_type; new_packet.sequence_number = red_packet.sequence_number; new_packet.priority.red_level = - rtc::checked_cast((new_headers.size() - 1) - i); + rtc::dchecked_cast((new_headers.size() - 1) - i); new_packet.payload.SetData(payload_ptr, payload_length); new_packets.push_front(std::move(new_packet)); payload_ptr += payload_length; diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc index e9bceb72d9..b47ca127db 100644 --- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc +++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc @@ -218,7 +218,7 @@ void StatisticsCalculator::GetNetworkStatistics( stats->added_zero_samples = added_zero_samples_; stats->current_buffer_size_ms = static_cast(num_samples_in_buffers * 1000 / fs_hz); - const int ms_per_packet = rtc::checked_cast( + const int ms_per_packet = rtc::dchecked_cast( decision_logic.packet_length_samples() / (fs_hz / 1000)); stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) * ms_per_packet; diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.cc b/webrtc/modules/audio_coding/neteq/time_stretch.cc index c96d165e74..0b3bad9e91 100644 --- a/webrtc/modules/audio_coding/neteq/time_stretch.cc +++ b/webrtc/modules/audio_coding/neteq/time_stretch.cc @@ -195,7 +195,7 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy, right_scale = std::max(0, right_scale); left_side = left_side >> right_scale; right_side = - rtc::checked_cast(peak_index) * (right_side >> right_scale); + rtc::dchecked_cast(peak_index) * (right_side >> right_scale); // Scale |left_side| properly before comparing with |right_side|. // (|scaling| is the scale factor before energy calculation, thus the scale diff --git a/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc b/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc index 263f7b4223..f837ad6362 100644 --- a/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc +++ b/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc @@ -72,7 +72,7 @@ void EncodeNetEqInput::CreatePacket() { info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples), &packet_data_->payload); - rtp_timestamp_ += rtc::checked_cast( + rtp_timestamp_ += rtc::dchecked_cast( num_samples * encoder_->RtpTimestampRateHz() / encoder_->SampleRateHz()); ++num_blocks; diff --git a/webrtc/modules/congestion_controller/probe_controller.cc b/webrtc/modules/congestion_controller/probe_controller.cc index 72d0e9ed0d..e8b3a2a1cb 100644 --- a/webrtc/modules/congestion_controller/probe_controller.cc +++ b/webrtc/modules/congestion_controller/probe_controller.cc @@ -229,7 +229,7 @@ void ProbeController::InitiateProbing( bitrate = max_probe_bitrate_bps; probe_further = false; } - pacer_->CreateProbeCluster(rtc::checked_cast(bitrate)); + pacer_->CreateProbeCluster(rtc::dchecked_cast(bitrate)); } time_last_probing_initiated_ms_ = now_ms; if (probe_further) {