Report stats from ChannelReceive::GetAudioFrameWithInfo at 1Hz.

This is a change from the previous 100Hz frequency.
Also changing the  locks slightly in AcmReceiver so that grabbing the
neteq lock right after we've let it go, isn't necessary inside of
AcmReceiver::GetAudio and also to avoid grabbing the neteq lock while
holding the AcmReceiver lock.

Bug: webrtc:12868
Change-Id: If6ee35f3dca20eb5bdbc615123aa099ccecf57c5
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/221371
Commit-Queue: Tommi <tommi@webrtc.org>
Reviewed-by: Minyue Li <minyue@webrtc.org>
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Reviewed-by: Markus Handell <handellm@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34258}
This commit is contained in:
Tommi 2021-06-09 19:30:41 +02:00 committed by WebRTC LUCI CQ
parent e2e046452a
commit 3cc68ec32e
7 changed files with 47 additions and 23 deletions

View File

@ -214,11 +214,15 @@ class NetEq {
// |data_| in |audio_frame| is not written, but should be interpreted as being
// all zeros. For testing purposes, an override can be supplied in the
// |action_override| argument, which will cause NetEq to take this action
// next, instead of the action it would normally choose.
// next, instead of the action it would normally choose. An optional output
// argument for fetching the current sample rate can be provided, which
// will return the same value as last_output_sample_rate_hz() but will avoid
// additional synchronization.
// Returns kOK on success, or kFail in case of an error.
virtual int GetAudio(
AudioFrame* audio_frame,
bool* muted,
int* current_sample_rate_hz = nullptr,
absl::optional<Operation> action_override = absl::nullopt) = 0;
// Replaces the current set of decoders with the given one.

View File

@ -284,6 +284,15 @@ class ChannelReceive : public ChannelReceiveInterface {
rtc::scoped_refptr<ChannelReceiveFrameTransformerDelegate>
frame_transformer_delegate_;
// Counter that's used to control the frequency of reporting histograms
// from the `GetAudioFrameWithInfo` callback.
int audio_frame_interval_count_ RTC_GUARDED_BY(audio_thread_race_checker_) =
0;
// Controls how many callbacks we let pass by before reporting callback stats.
// A value of 100 means 100 callbacks, each one of which represents 10ms worth
// of data, so the stats reporting frequency will be 1Hz (modulo failures).
constexpr static int kHistogramReportingInterval = 100;
};
void ChannelReceive::OnReceivedPayloadData(
@ -332,7 +341,6 @@ void ChannelReceive::InitFrameTransformerDelegate(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
RTC_DCHECK(frame_transformer);
RTC_DCHECK(!frame_transformer_delegate_);
RTC_DCHECK(worker_thread_);
RTC_DCHECK(worker_thread_->IsCurrent());
// Pass a callback to ChannelReceive::OnReceivedPayloadData, to be called by
@ -457,19 +465,22 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
}
audio_frame->packet_infos_ = RtpPacketInfos(packet_infos);
RTC_DCHECK(worker_thread_);
worker_thread_->PostTask(ToQueuedTask(worker_safety_, [this]() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs",
acm_receiver_.TargetDelayMs());
const int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs();
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs",
jitter_buffer_delay + playout_delay_ms_);
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs",
jitter_buffer_delay);
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs",
playout_delay_ms_);
}));
++audio_frame_interval_count_;
if (audio_frame_interval_count_ >= kHistogramReportingInterval) {
audio_frame_interval_count_ = 0;
worker_thread_->PostTask(ToQueuedTask(worker_safety_, [this]() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs",
acm_receiver_.TargetDelayMs());
const int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs();
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs",
jitter_buffer_delay + playout_delay_ms_);
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs",
jitter_buffer_delay);
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs",
playout_delay_ms_);
}));
}
return muted ? AudioMixer::Source::AudioFrameInfo::kMuted
: AudioMixer::Source::AudioFrameInfo::kNormal;

View File

@ -146,20 +146,22 @@ int AcmReceiver::GetAudio(int desired_freq_hz,
AudioFrame* audio_frame,
bool* muted) {
RTC_DCHECK(muted);
// Accessing members, take the lock.
MutexLock lock(&mutex_);
if (neteq_->GetAudio(audio_frame, muted) != NetEq::kOK) {
int current_sample_rate_hz = 0;
if (neteq_->GetAudio(audio_frame, muted, &current_sample_rate_hz) !=
NetEq::kOK) {
RTC_LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed.";
return -1;
}
const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz();
RTC_DCHECK_NE(current_sample_rate_hz, 0);
// Update if resampling is required.
const bool need_resampling =
(desired_freq_hz != -1) && (current_sample_rate_hz != desired_freq_hz);
// Accessing members, take the lock.
MutexLock lock(&mutex_);
if (need_resampling && !resampled_last_output_frame_) {
// Prime the resampler with the last frame.
int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
@ -174,8 +176,8 @@ int AcmReceiver::GetAudio(int desired_freq_hz,
}
}
// TODO(henrik.lundin) Glitches in the output may appear if the output rate
// from NetEq changes. See WebRTC issue 3923.
// TODO(bugs.webrtc.org/3923) Glitches in the output may appear if the output
// rate from NetEq changes.
if (need_resampling) {
// TODO(yujo): handle this more efficiently for muted frames.
int samples_per_channel_int = resampler_.Resample10Msec(

View File

@ -258,6 +258,7 @@ void SetAudioFrameActivityAndType(bool vad_enabled,
int NetEqImpl::GetAudio(AudioFrame* audio_frame,
bool* muted,
int* current_sample_rate_hz,
absl::optional<Operation> action_override) {
TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
MutexLock lock(&mutex_);
@ -296,6 +297,11 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame,
}
}
if (current_sample_rate_hz) {
*current_sample_rate_hz = delayed_last_output_sample_rate_hz_.value_or(
last_output_sample_rate_hz_);
}
return kOK;
}

View File

@ -133,6 +133,7 @@ class NetEqImpl : public webrtc::NetEq {
int GetAudio(
AudioFrame* audio_frame,
bool* muted,
int* current_sample_rate_hz = nullptr,
absl::optional<Operation> action_override = absl::nullopt) override;
void SetCodecs(const std::map<int, SdpAudioFormat>& codecs) override;

View File

@ -1066,7 +1066,7 @@ TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples;
// We have two packets in the buffer and kAccelerate operation will
// extract 20 ms of data.
neteq_->GetAudio(&out_frame_, &muted, NetEq::Operation::kAccelerate);
neteq_->GetAudio(&out_frame_, &muted, nullptr, NetEq::Operation::kAccelerate);
// Check jitter buffer delay.
NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();

View File

@ -172,7 +172,7 @@ NetEqTest::SimulationStepResult NetEqTest::RunToNextGetAudio() {
}
AudioFrame out_frame;
bool muted;
int error = neteq_->GetAudio(&out_frame, &muted,
int error = neteq_->GetAudio(&out_frame, &muted, nullptr,
ActionToOperations(next_action_));
next_action_ = absl::nullopt;
RTC_CHECK(!muted) << "The code does not handle enable_muted_state";