AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is activated by AudioProcessing::StartRecording. The data is stored in binary protobuf format in a specified file. The file IO is, as of this CL, done from the real-time audio thread. This CL contains an interface for AecDump, a new APM submodule that will handle the recordings. Calls to the new interface from the AudioProcessingModule are added. These calls have no effect, and for a short while, audio_processing_impl.cc will contain two copies of recording calls. The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP preprocessor define. They still have an effect, while the new ones do not. In the following CLs, the old recording calls will be removed, and an implementation of AecDump added. The reasons for the refactoring is to move file IO operations from the real-time audio thread, to add a top-level low-priority task queue for logging tasks like this, to simplify and modularize audio_processing_impl.cc and remove some of the preprocessor directives. These goals will be archived by the upcoming CLs. The implementation is in https://codereview.webrtc.org/2865113002. BUG=webrtc:7404 Review-Url: https://codereview.webrtc.org/2778783002 Cr-Commit-Position: refs/heads/master@{#18233}
This commit is contained in:
parent
dceb42da3e
commit
868f32f423
@ -234,6 +234,7 @@ rtc_static_library("audio_processing") {
|
||||
|
||||
defines = []
|
||||
deps = [
|
||||
":aec_dump_interface",
|
||||
"..:module_api",
|
||||
"../..:webrtc_common",
|
||||
"../../audio/utility:audio_frame_operations",
|
||||
@ -308,6 +309,17 @@ rtc_static_library("audio_processing") {
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("aec_dump_interface") {
|
||||
sources = [
|
||||
"include/aec_dump.cc",
|
||||
"include/aec_dump.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
"../../base:rtc_base_approved",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("audio_processing_c") {
|
||||
visibility = [ ":*" ] # Only targets in this file can depend on this.
|
||||
sources = [
|
||||
|
||||
@ -157,6 +157,23 @@ class HighPassFilterImpl : public HighPassFilter {
|
||||
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl);
|
||||
};
|
||||
|
||||
webrtc::InternalAPMStreamsConfig ToStreamsConfig(
|
||||
const ProcessingConfig& api_format) {
|
||||
webrtc::InternalAPMStreamsConfig result;
|
||||
result.input_sample_rate = api_format.input_stream().sample_rate_hz();
|
||||
result.input_num_channels = api_format.input_stream().num_channels();
|
||||
result.output_num_channels = api_format.output_stream().num_channels();
|
||||
result.render_input_num_channels =
|
||||
api_format.reverse_input_stream().num_channels();
|
||||
result.render_input_sample_rate =
|
||||
api_format.reverse_input_stream().sample_rate_hz();
|
||||
result.output_sample_rate = api_format.output_stream().sample_rate_hz();
|
||||
result.render_output_sample_rate =
|
||||
api_format.reverse_output_stream().sample_rate_hz();
|
||||
result.render_output_num_channels =
|
||||
api_format.reverse_output_stream().num_channels();
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Throughout webrtc, it's assumed that success is represented by zero.
|
||||
@ -541,7 +558,9 @@ int AudioProcessingImpl::InitializeLocked() {
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (aec_dump_) {
|
||||
aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format));
|
||||
}
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
@ -867,6 +886,10 @@ int AudioProcessingImpl::ProcessStream(const float* const* src,
|
||||
}
|
||||
#endif
|
||||
|
||||
if (aec_dump_) {
|
||||
RecordUnprocessedCaptureStream(src);
|
||||
}
|
||||
|
||||
capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
|
||||
RETURN_ON_ERR(ProcessCaptureStreamLocked());
|
||||
capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
|
||||
@ -884,7 +907,9 @@ int AudioProcessingImpl::ProcessStream(const float* const* src,
|
||||
&crit_debug_, &debug_dump_.capture));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (aec_dump_) {
|
||||
RecordProcessedCaptureStream(dest);
|
||||
}
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
@ -1123,6 +1148,10 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
return kBadDataLengthError;
|
||||
}
|
||||
|
||||
if (aec_dump_) {
|
||||
RecordUnprocessedCaptureStream(*frame);
|
||||
}
|
||||
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
if (debug_dump_.debug_file->is_open()) {
|
||||
RETURN_ON_ERR(WriteConfigMessage(false));
|
||||
@ -1141,6 +1170,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
frame, submodule_states_.CaptureMultiBandProcessingActive() ||
|
||||
submodule_states_.CaptureFullBandProcessingActive());
|
||||
|
||||
if (aec_dump_) {
|
||||
RecordProcessedCaptureStream(*frame);
|
||||
}
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
if (debug_dump_.debug_file->is_open()) {
|
||||
audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
|
||||
@ -1426,7 +1458,14 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked(
|
||||
&crit_debug_, &debug_dump_.render));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (aec_dump_) {
|
||||
const size_t channel_size =
|
||||
formats_.api_format.reverse_input_stream().num_frames();
|
||||
const size_t num_channels =
|
||||
formats_.api_format.reverse_input_stream().num_channels();
|
||||
aec_dump_->WriteRenderStreamMessage(
|
||||
FloatAudioFrame(src, num_channels, channel_size));
|
||||
}
|
||||
render_.render_audio->CopyFrom(src,
|
||||
formats_.api_format.reverse_input_stream());
|
||||
return ProcessRenderStreamLocked();
|
||||
@ -1479,6 +1518,10 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
|
||||
&crit_debug_, &debug_dump_.render));
|
||||
}
|
||||
#endif
|
||||
if (aec_dump_) {
|
||||
aec_dump_->WriteRenderStreamMessage(*frame);
|
||||
}
|
||||
|
||||
render_.render_audio->DeinterleaveFrom(frame);
|
||||
RETURN_ON_ERR(ProcessRenderStreamLocked());
|
||||
render_.render_audio->InterleaveTo(
|
||||
@ -1568,6 +1611,30 @@ int AudioProcessingImpl::delay_offset_ms() const {
|
||||
return capture_.delay_offset_ms;
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::AttachAecDump(std::unique_ptr<AecDump> aec_dump) {
|
||||
RTC_DCHECK(aec_dump);
|
||||
rtc::CritScope cs_render(&crit_render_);
|
||||
rtc::CritScope cs_capture(&crit_capture_);
|
||||
|
||||
// The previously attached AecDump will be destroyed with the
|
||||
// 'aec_dump' parameter, which is after locks are released.
|
||||
aec_dump_.swap(aec_dump);
|
||||
WriteAecDumpConfigMessage(true);
|
||||
aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format));
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::DetachAecDump() {
|
||||
// The d-tor of a task-queue based AecDump blocks until all pending
|
||||
// tasks are done. This construction avoids blocking while holding
|
||||
// the render and capture locks.
|
||||
std::unique_ptr<AecDump> aec_dump = nullptr;
|
||||
{
|
||||
rtc::CritScope cs_render(&crit_render_);
|
||||
rtc::CritScope cs_capture(&crit_capture_);
|
||||
aec_dump = std::move(aec_dump_);
|
||||
}
|
||||
}
|
||||
|
||||
int AudioProcessingImpl::StartDebugRecording(
|
||||
const char filename[AudioProcessing::kMaxFilenameSize],
|
||||
int64_t max_log_size_bytes) {
|
||||
@ -1639,11 +1706,12 @@ int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
|
||||
}
|
||||
|
||||
int AudioProcessingImpl::StopDebugRecording() {
|
||||
DetachAecDump();
|
||||
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
// Run in a single-threaded manner.
|
||||
rtc::CritScope cs_render(&crit_render_);
|
||||
rtc::CritScope cs_capture(&crit_capture_);
|
||||
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
// We just return if recording hasn't started.
|
||||
debug_dump_.debug_file->CloseFile();
|
||||
return kNoError;
|
||||
@ -1904,6 +1972,121 @@ void AudioProcessingImpl::UpdateHistogramsOnCallEnd() {
|
||||
capture_.last_aec_system_delay_ms = 0;
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::WriteAecDumpConfigMessage(bool forced) {
|
||||
if (!aec_dump_) {
|
||||
return;
|
||||
}
|
||||
std::string experiments_description =
|
||||
public_submodules_->echo_cancellation->GetExperimentsDescription();
|
||||
// TODO(peah): Add semicolon-separated concatenations of experiment
|
||||
// descriptions for other submodules.
|
||||
if (capture_nonlocked_.level_controller_enabled) {
|
||||
experiments_description += "LevelController;";
|
||||
}
|
||||
if (constants_.agc_clipped_level_min != kClippedLevelMin) {
|
||||
experiments_description += "AgcClippingLevelExperiment;";
|
||||
}
|
||||
if (capture_nonlocked_.echo_canceller3_enabled) {
|
||||
experiments_description += "EchoCanceller3;";
|
||||
}
|
||||
|
||||
InternalAPMConfig apm_config;
|
||||
|
||||
apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled();
|
||||
apm_config.aec_delay_agnostic_enabled =
|
||||
public_submodules_->echo_cancellation->is_delay_agnostic_enabled();
|
||||
apm_config.aec_drift_compensation_enabled =
|
||||
public_submodules_->echo_cancellation->is_drift_compensation_enabled();
|
||||
apm_config.aec_extended_filter_enabled =
|
||||
public_submodules_->echo_cancellation->is_extended_filter_enabled();
|
||||
apm_config.aec_suppression_level = static_cast<int>(
|
||||
public_submodules_->echo_cancellation->suppression_level());
|
||||
|
||||
apm_config.aecm_enabled =
|
||||
public_submodules_->echo_control_mobile->is_enabled();
|
||||
apm_config.aecm_comfort_noise_enabled =
|
||||
public_submodules_->echo_control_mobile->is_comfort_noise_enabled();
|
||||
apm_config.aecm_routing_mode =
|
||||
static_cast<int>(public_submodules_->echo_control_mobile->routing_mode());
|
||||
|
||||
apm_config.agc_enabled = public_submodules_->gain_control->is_enabled();
|
||||
apm_config.agc_mode =
|
||||
static_cast<int>(public_submodules_->gain_control->mode());
|
||||
apm_config.agc_limiter_enabled =
|
||||
public_submodules_->gain_control->is_limiter_enabled();
|
||||
apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc;
|
||||
|
||||
apm_config.hpf_enabled = config_.high_pass_filter.enabled;
|
||||
|
||||
apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled();
|
||||
apm_config.ns_level =
|
||||
static_cast<int>(public_submodules_->noise_suppression->level());
|
||||
|
||||
apm_config.transient_suppression_enabled =
|
||||
capture_.transient_suppressor_enabled;
|
||||
apm_config.intelligibility_enhancer_enabled =
|
||||
capture_nonlocked_.intelligibility_enabled;
|
||||
apm_config.experiments_description = experiments_description;
|
||||
|
||||
if (!forced && apm_config == apm_config_for_aec_dump_) {
|
||||
return;
|
||||
}
|
||||
aec_dump_->WriteConfig(apm_config);
|
||||
apm_config_for_aec_dump_ = apm_config;
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::RecordUnprocessedCaptureStream(
|
||||
const float* const* src) {
|
||||
RTC_DCHECK(aec_dump_);
|
||||
WriteAecDumpConfigMessage(false);
|
||||
|
||||
const size_t channel_size = formats_.api_format.input_stream().num_frames();
|
||||
const size_t num_channels = formats_.api_format.input_stream().num_channels();
|
||||
aec_dump_->AddCaptureStreamInput(
|
||||
FloatAudioFrame(src, num_channels, channel_size));
|
||||
RecordAudioProcessingState();
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::RecordUnprocessedCaptureStream(
|
||||
const AudioFrame& capture_frame) {
|
||||
RTC_DCHECK(aec_dump_);
|
||||
WriteAecDumpConfigMessage(false);
|
||||
|
||||
aec_dump_->AddCaptureStreamInput(capture_frame);
|
||||
RecordAudioProcessingState();
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::RecordProcessedCaptureStream(
|
||||
const float* const* processed_capture_stream) {
|
||||
RTC_DCHECK(aec_dump_);
|
||||
|
||||
const size_t channel_size = formats_.api_format.output_stream().num_frames();
|
||||
const size_t num_channels =
|
||||
formats_.api_format.output_stream().num_channels();
|
||||
aec_dump_->AddCaptureStreamOutput(
|
||||
FloatAudioFrame(processed_capture_stream, num_channels, channel_size));
|
||||
aec_dump_->WriteCaptureStreamMessage();
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::RecordProcessedCaptureStream(
|
||||
const AudioFrame& processed_capture_frame) {
|
||||
RTC_DCHECK(aec_dump_);
|
||||
|
||||
aec_dump_->AddCaptureStreamOutput(processed_capture_frame);
|
||||
aec_dump_->WriteCaptureStreamMessage();
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::RecordAudioProcessingState() {
|
||||
RTC_DCHECK(aec_dump_);
|
||||
AecDump::AudioProcessingState audio_proc_state;
|
||||
audio_proc_state.delay = capture_nonlocked_.stream_delay_ms;
|
||||
audio_proc_state.drift =
|
||||
public_submodules_->echo_cancellation->stream_drift_samples();
|
||||
audio_proc_state.level = gain_control()->stream_analog_level();
|
||||
audio_proc_state.keypress = capture_.key_pressed;
|
||||
aec_dump_->AddAudioProcessingState(audio_proc_state);
|
||||
}
|
||||
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
int AudioProcessingImpl::WriteMessageToDebugFile(
|
||||
FileWrapper* debug_file,
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
#include "webrtc/base/swap_queue.h"
|
||||
#include "webrtc/base/thread_annotations.h"
|
||||
#include "webrtc/modules/audio_processing/audio_buffer.h"
|
||||
#include "webrtc/modules/audio_processing/include/aec_dump.h"
|
||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||
#include "webrtc/modules/audio_processing/render_queue_item_verifier.h"
|
||||
#include "webrtc/modules/audio_processing/rms_level.h"
|
||||
@ -64,6 +65,8 @@ class AudioProcessingImpl : public AudioProcessing {
|
||||
void ApplyConfig(const AudioProcessing::Config& config) override;
|
||||
void SetExtraOptions(const webrtc::Config& config) override;
|
||||
void UpdateHistogramsOnCallEnd() override;
|
||||
void AttachAecDump(std::unique_ptr<AecDump> aec_dump) override;
|
||||
void DetachAecDump() override;
|
||||
int StartDebugRecording(const char filename[kMaxFilenameSize],
|
||||
int64_t max_log_size_bytes) override;
|
||||
int StartDebugRecording(FILE* handle, int64_t max_log_size_bytes) override;
|
||||
@ -279,6 +282,34 @@ class AudioProcessingImpl : public AudioProcessing {
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
|
||||
int ProcessRenderStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
|
||||
|
||||
// Collects configuration settings from public and private
|
||||
// submodules to be saved as an audioproc::Config message on the
|
||||
// AecDump if it is attached. If not |forced|, only writes the current
|
||||
// config if it is different from the last saved one; if |forced|,
|
||||
// writes the config regardless of the last saved.
|
||||
void WriteAecDumpConfigMessage(bool forced)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
|
||||
|
||||
// Notifies attached AecDump of current configuration and capture data.
|
||||
void RecordUnprocessedCaptureStream(const float* const* capture_stream)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
|
||||
|
||||
void RecordUnprocessedCaptureStream(const AudioFrame& capture_frame)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
|
||||
|
||||
// Notifies attached AecDump of current configuration and
|
||||
// processed capture data and issues a capture stream recording
|
||||
// request.
|
||||
void RecordProcessedCaptureStream(
|
||||
const float* const* processed_capture_stream)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
|
||||
|
||||
void RecordProcessedCaptureStream(const AudioFrame& processed_capture_frame)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
|
||||
|
||||
// Notifies attached AecDump about current state (delay, drift, etc).
|
||||
void RecordAudioProcessingState() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
|
||||
|
||||
// Debug dump methods that are internal and called without locks.
|
||||
// TODO(peah): Make thread safe.
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
@ -303,6 +334,14 @@ class AudioProcessingImpl : public AudioProcessing {
|
||||
ApmDebugDumpState debug_dump_;
|
||||
#endif
|
||||
|
||||
// AecDump instance used for optionally logging APM config, input
|
||||
// and output to file in the AEC-dump format defined in debug.proto.
|
||||
std::unique_ptr<AecDump> aec_dump_;
|
||||
|
||||
// Hold the last config written with AecDump for avoiding writing
|
||||
// the same config twice.
|
||||
InternalAPMConfig apm_config_for_aec_dump_ GUARDED_BY(crit_capture_);
|
||||
|
||||
// Critical sections.
|
||||
rtc::CriticalSection crit_render_ ACQUIRED_BEFORE(crit_capture_);
|
||||
rtc::CriticalSection crit_capture_;
|
||||
|
||||
40
webrtc/modules/audio_processing/include/aec_dump.cc
Normal file
40
webrtc/modules/audio_processing/include/aec_dump.cc
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_processing/include/aec_dump.h"
|
||||
|
||||
namespace webrtc {
|
||||
InternalAPMConfig::InternalAPMConfig() = default;
|
||||
InternalAPMConfig::InternalAPMConfig(const InternalAPMConfig&) = default;
|
||||
InternalAPMConfig::InternalAPMConfig(InternalAPMConfig&&) = default;
|
||||
InternalAPMConfig& InternalAPMConfig::operator=(const InternalAPMConfig&) =
|
||||
default;
|
||||
|
||||
bool InternalAPMConfig::operator==(const InternalAPMConfig& other) {
|
||||
return aec_enabled == other.aec_enabled &&
|
||||
aec_delay_agnostic_enabled == other.aec_delay_agnostic_enabled &&
|
||||
aec_drift_compensation_enabled ==
|
||||
other.aec_drift_compensation_enabled &&
|
||||
aec_extended_filter_enabled == other.aec_extended_filter_enabled &&
|
||||
aec_suppression_level == other.aec_suppression_level &&
|
||||
aecm_enabled == other.aecm_enabled &&
|
||||
aecm_comfort_noise_enabled == other.aecm_comfort_noise_enabled &&
|
||||
aecm_routing_mode == other.aecm_routing_mode &&
|
||||
agc_enabled == other.agc_enabled && agc_mode == other.agc_mode &&
|
||||
agc_limiter_enabled == other.agc_limiter_enabled &&
|
||||
hpf_enabled == other.hpf_enabled && ns_enabled == other.ns_enabled &&
|
||||
ns_level == other.ns_level &&
|
||||
transient_suppression_enabled == other.transient_suppression_enabled &&
|
||||
intelligibility_enhancer_enabled ==
|
||||
other.intelligibility_enhancer_enabled &&
|
||||
noise_robust_agc_enabled == other.noise_robust_agc_enabled &&
|
||||
experiments_description == other.experiments_description;
|
||||
}
|
||||
} // namespace webrtc
|
||||
141
webrtc/modules/audio_processing/include/aec_dump.h
Normal file
141
webrtc/modules/audio_processing/include/aec_dump.h
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
|
||||
#define WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/base/array_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioFrame;
|
||||
|
||||
// Struct for passing current config from APM without having to
|
||||
// include protobuf headers.
|
||||
struct InternalAPMConfig {
|
||||
InternalAPMConfig();
|
||||
InternalAPMConfig(const InternalAPMConfig&);
|
||||
InternalAPMConfig(InternalAPMConfig&&);
|
||||
|
||||
InternalAPMConfig& operator=(const InternalAPMConfig&);
|
||||
InternalAPMConfig& operator=(InternalAPMConfig&&) = delete;
|
||||
|
||||
bool operator==(const InternalAPMConfig& other);
|
||||
|
||||
bool aec_enabled = false;
|
||||
bool aec_delay_agnostic_enabled = false;
|
||||
bool aec_drift_compensation_enabled = false;
|
||||
bool aec_extended_filter_enabled = false;
|
||||
int aec_suppression_level = 0;
|
||||
bool aecm_enabled = false;
|
||||
bool aecm_comfort_noise_enabled = false;
|
||||
int aecm_routing_mode = 0;
|
||||
bool agc_enabled = false;
|
||||
int agc_mode = 0;
|
||||
bool agc_limiter_enabled = false;
|
||||
bool hpf_enabled = false;
|
||||
bool ns_enabled = false;
|
||||
int ns_level = 0;
|
||||
bool transient_suppression_enabled = false;
|
||||
bool intelligibility_enhancer_enabled = false;
|
||||
bool noise_robust_agc_enabled = false;
|
||||
std::string experiments_description = "";
|
||||
};
|
||||
|
||||
struct InternalAPMStreamsConfig {
|
||||
int input_sample_rate = 0;
|
||||
int output_sample_rate = 0;
|
||||
int render_input_sample_rate = 0;
|
||||
int render_output_sample_rate = 0;
|
||||
|
||||
size_t input_num_channels = 0;
|
||||
size_t output_num_channels = 0;
|
||||
size_t render_input_num_channels = 0;
|
||||
size_t render_output_num_channels = 0;
|
||||
};
|
||||
|
||||
// Class to pass audio data in float** format. This is to avoid
|
||||
// dependence on AudioBuffer, and avoid problems associated with
|
||||
// rtc::ArrayView<rtc::ArrayView>.
|
||||
class FloatAudioFrame {
|
||||
public:
|
||||
// |num_channels| and |channel_size| describe the float**
|
||||
// |audio_samples|. |audio_samples| is assumed to point to a
|
||||
// two-dimensional |num_channels * channel_size| array of floats.
|
||||
FloatAudioFrame(const float* const* audio_samples,
|
||||
size_t num_channels,
|
||||
size_t channel_size)
|
||||
: audio_samples_(audio_samples),
|
||||
num_channels_(num_channels),
|
||||
channel_size_(channel_size) {}
|
||||
|
||||
FloatAudioFrame() = delete;
|
||||
|
||||
size_t num_channels() const { return num_channels_; }
|
||||
|
||||
rtc::ArrayView<const float> channel(size_t idx) const {
|
||||
RTC_DCHECK_LE(0, idx);
|
||||
RTC_DCHECK_LE(idx, num_channels_);
|
||||
return rtc::ArrayView<const float>(audio_samples_[idx], channel_size_);
|
||||
}
|
||||
|
||||
private:
|
||||
const float* const* audio_samples_;
|
||||
size_t num_channels_;
|
||||
size_t channel_size_;
|
||||
};
|
||||
|
||||
// An interface for recording configuration and input/output streams
|
||||
// of the Audio Processing Module. The recordings are called
|
||||
// 'aec-dumps' and are stored in a protobuf format defined in
|
||||
// debug.proto.
|
||||
// The Write* methods are always safe to call concurrently or
|
||||
// otherwise for all implementing subclasses. The intended mode of
|
||||
// operation is to create a protobuf object from the input, and send
|
||||
// it away to be written to file asynchronously.
|
||||
class AecDump {
|
||||
public:
|
||||
struct AudioProcessingState {
|
||||
int delay;
|
||||
int drift;
|
||||
int level;
|
||||
bool keypress;
|
||||
};
|
||||
|
||||
virtual ~AecDump() = default;
|
||||
|
||||
// Logs Event::Type INIT message.
|
||||
virtual void WriteInitMessage(
|
||||
const InternalAPMStreamsConfig& streams_config) = 0;
|
||||
|
||||
// Logs Event::Type STREAM message. To log an input/output pair,
|
||||
// call the AddCapture* and AddAudioProcessingState methods followed
|
||||
// by a WriteCaptureStreamMessage call.
|
||||
virtual void AddCaptureStreamInput(const FloatAudioFrame& src) = 0;
|
||||
virtual void AddCaptureStreamOutput(const FloatAudioFrame& src) = 0;
|
||||
virtual void AddCaptureStreamInput(const AudioFrame& frame) = 0;
|
||||
virtual void AddCaptureStreamOutput(const AudioFrame& frame) = 0;
|
||||
virtual void AddAudioProcessingState(const AudioProcessingState& state) = 0;
|
||||
virtual void WriteCaptureStreamMessage() = 0;
|
||||
|
||||
// Logs Event::Type REVERSE_STREAM message.
|
||||
virtual void WriteRenderStreamMessage(const AudioFrame& frame) = 0;
|
||||
virtual void WriteRenderStreamMessage(const FloatAudioFrame& src) = 0;
|
||||
|
||||
// Logs Event::Type CONFIG message.
|
||||
virtual void WriteConfig(const InternalAPMConfig& config) = 0;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
|
||||
@ -10,6 +10,11 @@
|
||||
|
||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/modules/audio_processing/include/aec_dump.h"
|
||||
// TODO(aleloi): remove AecDump header usage when internal projcets
|
||||
// have updated. See https://bugs.webrtc.org/7404.
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
Beamforming::Beamforming()
|
||||
@ -31,4 +36,20 @@ Beamforming::Beamforming(bool enabled,
|
||||
|
||||
Beamforming::~Beamforming() {}
|
||||
|
||||
// TODO(aleloi): make pure virtual when internal projects have
|
||||
// updated. See https://bugs.webrtc.org/7404
|
||||
void AudioProcessing::AttachAecDump(std::unique_ptr<AecDump> aec_dump) {
|
||||
RTC_NOTREACHED();
|
||||
}
|
||||
|
||||
// If no AecDump is attached, this has no effect. If an AecDump is
|
||||
// attached, it's destructor is called. The d-tor may block until
|
||||
// all pending logging tasks are completed.
|
||||
//
|
||||
// TODO(aleloi): make pure virtual when internal projects have
|
||||
// updated. See https://bugs.webrtc.org/7404
|
||||
void AudioProcessing::DetachAecDump() {
|
||||
RTC_NOTREACHED();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -29,6 +29,7 @@ namespace webrtc {
|
||||
|
||||
struct AecCore;
|
||||
|
||||
class AecDump;
|
||||
class AudioFrame;
|
||||
|
||||
class NonlinearBeamformer;
|
||||
@ -455,6 +456,26 @@ class AudioProcessing {
|
||||
virtual void set_delay_offset_ms(int offset) = 0;
|
||||
virtual int delay_offset_ms() const = 0;
|
||||
|
||||
// Attaches provided webrtc::AecDump for recording debugging
|
||||
// information. Log file and maximum file size logic is supposed to
|
||||
// be handled by implementing instance of AecDump. Calling this
|
||||
// method when another AecDump is attached resets the active AecDump
|
||||
// with a new one. This causes the d-tor of the earlier AecDump to
|
||||
// be called. The d-tor call may block until all pending logging
|
||||
// tasks are completed.
|
||||
//
|
||||
// TODO(aleloi): make pure virtual when internal projects have
|
||||
// updated. See https://bugs.webrtc.org/7404
|
||||
virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump);
|
||||
|
||||
// If no AecDump is attached, this has no effect. If an AecDump is
|
||||
// attached, it's destructor is called. The d-tor may block until
|
||||
// all pending logging tasks are completed.
|
||||
//
|
||||
// TODO(aleloi): make pure virtual when internal projects have
|
||||
// updated. See https://bugs.webrtc.org/7404
|
||||
virtual void DetachAecDump();
|
||||
|
||||
// Starts recording debugging information to a file specified by |filename|,
|
||||
// a NULL-terminated string. If there is an ongoing recording, the old file
|
||||
// will be closed, and recording will continue in the newly specified file.
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "webrtc/modules/audio_processing/include/aec_dump.h"
|
||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||
#include "webrtc/test/gmock.h"
|
||||
|
||||
@ -174,6 +175,10 @@ class MockAudioProcessing : public AudioProcessing {
|
||||
MOCK_METHOD1(set_stream_key_pressed, void(bool key_pressed));
|
||||
MOCK_METHOD1(set_delay_offset_ms, void(int offset));
|
||||
MOCK_CONST_METHOD0(delay_offset_ms, int());
|
||||
|
||||
virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) {}
|
||||
MOCK_METHOD0(DetachAecDump, void());
|
||||
|
||||
MOCK_METHOD2(StartDebugRecording, int(const char filename[kMaxFilenameSize],
|
||||
int64_t max_log_size_bytes));
|
||||
MOCK_METHOD2(StartDebugRecording, int(FILE* handle,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user