Deprecate AudioProcessing::AnalyzeReverseStream(AudioFrame) API

Review URL: https://codereview.webrtc.org/1783693005

Cr-Commit-Position: refs/heads/master@{#12045}
This commit is contained in:
aluebs 2016-03-17 20:39:53 -07:00 committed by Commit bot
parent da116c4c37
commit b031955770
9 changed files with 37 additions and 62 deletions

View File

@ -78,7 +78,6 @@ class FakeAudioProcessing : public webrtc::AudioProcessing {
const webrtc::StreamConfig& input_config,
const webrtc::StreamConfig& output_config,
float* const* dest));
WEBRTC_STUB(AnalyzeReverseStream, (webrtc::AudioFrame* frame));
WEBRTC_STUB(ProcessReverseStream, (webrtc::AudioFrame * frame));
WEBRTC_STUB(AnalyzeReverseStream, (
const float* const* data,

View File

@ -837,17 +837,6 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked(
int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
rtc::CritScope cs(&crit_render_);
RETURN_ON_ERR(AnalyzeReverseStream(frame));
if (is_rev_processed()) {
render_.render_audio->InterleaveTo(frame, true);
}
return kNoError;
}
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_AudioFrame");
rtc::CritScope cs(&crit_render_);
if (frame == nullptr) {
return kNullPointerError;
}
@ -893,7 +882,11 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
}
#endif
render_.render_audio->DeinterleaveFrom(frame);
return ProcessReverseStreamLocked();
RETURN_ON_ERR(ProcessReverseStreamLocked());
if (is_rev_processed()) {
render_.render_audio->InterleaveTo(frame, true);
}
return kNoError;
}
int AudioProcessingImpl::ProcessReverseStreamLocked() {

View File

@ -86,7 +86,6 @@ class AudioProcessingImpl : public AudioProcessing {
// Render-side exclusive methods possibly running APM in a
// multi-threaded manner. Acquire the render lock.
int AnalyzeReverseStream(AudioFrame* frame) override;
int ProcessReverseStream(AudioFrame* frame) override;
int AnalyzeReverseStream(const float* const* data,
size_t samples_per_channel,

View File

@ -35,8 +35,7 @@ class AudioProcessingImplLockTest;
enum class RenderApiImpl {
ProcessReverseStreamImpl1,
ProcessReverseStreamImpl2,
AnalyzeReverseStreamImpl1,
AnalyzeReverseStreamImpl2
AnalyzeReverseStreamImpl
};
// Type of the capture thread APM API call to use in the test.
@ -150,7 +149,7 @@ struct TestConfig {
// Create test config for the first processing API function set.
test_configs.push_back(test_config);
test_config.render_api_function =
RenderApiImpl::AnalyzeReverseStreamImpl2;
RenderApiImpl::AnalyzeReverseStreamImpl;
test_config.capture_api_function = CaptureApiImpl::ProcessStreamImpl3;
test_configs.push_back(test_config);
}
@ -172,15 +171,13 @@ struct TestConfig {
const AllowedApiCallCombinations api_calls[] = {
{RenderApiImpl::ProcessReverseStreamImpl1,
CaptureApiImpl::ProcessStreamImpl1},
{RenderApiImpl::AnalyzeReverseStreamImpl1,
CaptureApiImpl::ProcessStreamImpl1},
{RenderApiImpl::ProcessReverseStreamImpl2,
CaptureApiImpl::ProcessStreamImpl2},
{RenderApiImpl::ProcessReverseStreamImpl2,
CaptureApiImpl::ProcessStreamImpl3},
{RenderApiImpl::AnalyzeReverseStreamImpl2,
{RenderApiImpl::AnalyzeReverseStreamImpl,
CaptureApiImpl::ProcessStreamImpl2},
{RenderApiImpl::AnalyzeReverseStreamImpl2,
{RenderApiImpl::AnalyzeReverseStreamImpl,
CaptureApiImpl::ProcessStreamImpl3}};
std::vector<TestConfig> out;
for (auto api_call : api_calls) {
@ -941,8 +938,6 @@ void RenderProcessor::PrepareFrame() {
// Restrict to a common fixed sample rate if the AudioFrame interface is
// used.
if ((test_config_->render_api_function ==
RenderApiImpl::AnalyzeReverseStreamImpl1) ||
(test_config_->render_api_function ==
RenderApiImpl::ProcessReverseStreamImpl1) ||
(test_config_->aec_type !=
AecType::BasicWebRtcAecSettingsWithAecMobile)) {
@ -1003,10 +998,7 @@ void RenderProcessor::CallApmRenderSide() {
&frame_data_.input_frame[0], frame_data_.input_stream_config,
frame_data_.output_stream_config, &frame_data_.output_frame[0]);
break;
case RenderApiImpl::AnalyzeReverseStreamImpl1:
result = apm_->AnalyzeReverseStream(&frame_data_.frame);
break;
case RenderApiImpl::AnalyzeReverseStreamImpl2:
case RenderApiImpl::AnalyzeReverseStreamImpl:
result = apm_->AnalyzeReverseStream(
&frame_data_.input_frame[0], frame_data_.input_samples_per_channel,
frame_data_.input_sample_rate_hz, frame_data_.input_channel_layout);

View File

@ -48,7 +48,7 @@ TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
EXPECT_CALL(mock, InitializeLocked())
.Times(0);
EXPECT_NOERR(mock.ProcessStream(&frame));
EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
EXPECT_NOERR(mock.ProcessReverseStream(&frame));
// New sample rate. (Only impacts ProcessStream).
SetFrameSampleRate(&frame, 32000);
@ -63,12 +63,12 @@ TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
EXPECT_NOERR(mock.ProcessStream(&frame));
// ProcessStream sets num_channels_ == num_output_channels.
frame.num_channels_ = 2;
EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
EXPECT_NOERR(mock.ProcessReverseStream(&frame));
// A new sample rate passed to AnalyzeReverseStream should cause an init.
// A new sample rate passed to ProcessReverseStream should cause an init.
SetFrameSampleRate(&frame, 16000);
EXPECT_CALL(mock, InitializeLocked()).Times(1);
EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
EXPECT_NOERR(mock.ProcessReverseStream(&frame));
}
} // namespace webrtc

View File

@ -166,11 +166,11 @@ struct Intelligibility {
//
// APM operates on two audio streams on a frame-by-frame basis. Frames of the
// primary stream, on which all processing is applied, are passed to
// |ProcessStream()|. Frames of the reverse direction stream, which are used for
// analysis by some components, are passed to |AnalyzeReverseStream()|. On the
// client-side, this will typically be the near-end (capture) and far-end
// (render) streams, respectively. APM should be placed in the signal chain as
// close to the audio hardware abstraction layer (HAL) as possible.
// |ProcessStream()|. Frames of the reverse direction stream are passed to
// |ProcessReverseStream()|. On the client-side, this will typically be the
// near-end (capture) and far-end (render) streams, respectively. APM should be
// placed in the signal chain as close to the audio hardware abstraction layer
// (HAL) as possible.
//
// On the server-side, the reverse stream will normally not be used, with
// processing occurring on each incoming stream.
@ -214,7 +214,7 @@ struct Intelligibility {
// // Start a voice call...
//
// // ... Render frame arrives bound for the audio HAL ...
// apm->AnalyzeReverseStream(render_frame);
// apm->ProcessReverseStream(render_frame);
//
// // ... Capture frame arrives from the audio HAL ...
// // Call required set_stream_ functions.
@ -267,7 +267,7 @@ class AudioProcessing {
//
// It is also not necessary to call if the audio parameters (sample
// rate and number of channels) have changed. Passing updated parameters
// directly to |ProcessStream()| and |AnalyzeReverseStream()| is permissible.
// directly to |ProcessStream()| and |ProcessReverseStream()| is permissible.
// If the parameters are known at init-time though, they may be provided.
virtual int Initialize() = 0;
@ -352,11 +352,11 @@ class AudioProcessing {
const StreamConfig& output_config,
float* const* dest) = 0;
// Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
// will not be modified. On the client-side, this is the far-end (or to be
// Processes a 10 ms |frame| of the reverse direction audio stream. The frame
// may be modified. On the client-side, this is the far-end (or to be
// rendered) audio.
//
// It is only necessary to provide this if echo processing is enabled, as the
// It is necessary to provide this if echo processing is enabled, as the
// reverse stream forms the echo reference signal. It is recommended, but not
// necessary, to provide if gain control is enabled. On the server-side this
// typically will not be used. If you're not sure what to pass in here,
@ -364,14 +364,6 @@ class AudioProcessing {
//
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
// members of |frame| must be valid.
//
// TODO(ajm): add const to input; requires an implementation fix.
// DEPRECATED: Use |ProcessReverseStream| instead.
// TODO(ekm): Remove once all users have updated to |ProcessReverseStream|.
virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
// Same as |AnalyzeReverseStream|, but may modify |frame| if intelligibility
// is enabled.
virtual int ProcessReverseStream(AudioFrame* frame) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element
@ -391,12 +383,12 @@ class AudioProcessing {
// This must be called if and only if echo processing is enabled.
//
// Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end
// Sets the |delay| in ms between ProcessReverseStream() receiving a far-end
// frame and ProcessStream() receiving a near-end frame containing the
// corresponding echo. On the client-side this can be expressed as
// delay = (t_render - t_analyze) + (t_process - t_capture)
// where,
// - t_analyze is the time a frame is passed to AnalyzeReverseStream() and
// - t_analyze is the time a frame is passed to ProcessReverseStream() and
// t_render is the time the first sample of the same frame is rendered by
// the audio hardware.
// - t_capture is the time the first sample of a frame is captured by the

View File

@ -589,7 +589,7 @@ int ApmTest::ProcessStreamChooser(Format format) {
int ApmTest::AnalyzeReverseStreamChooser(Format format) {
if (format == kIntFormat) {
return apm_->AnalyzeReverseStream(revframe_);
return apm_->ProcessReverseStream(revframe_);
}
return apm_->AnalyzeReverseStream(
revfloat_cb_->channels(),
@ -645,7 +645,7 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
process_frame = &tmp_frame;
process_frame->CopyFrom(*frame);
}
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(reverse_frame));
EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(reverse_frame));
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(system_delay_ms));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(process_frame));
frame = frame_queue.front();
@ -815,7 +815,7 @@ void ApmTest::TestChangingChannelsInt16Interface(
AudioProcessing::Error expected_return) {
frame_->num_channels_ = num_channels;
EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_));
EXPECT_EQ(expected_return, apm_->ProcessReverseStream(frame_));
}
void ApmTest::TestChangingForwardChannels(
@ -1594,7 +1594,7 @@ TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_);
ASSERT_EQ(kNoErr, apm_->AnalyzeReverseStream(revframe_));
ASSERT_EQ(kNoErr, apm_->ProcessReverseStream(revframe_));
CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_);
frame_->vad_activity_ = AudioFrame::kVadUnknown;
@ -1869,7 +1869,7 @@ TEST_F(ApmTest, DebugDump) {
EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str(), -1));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
// Verify the file has been written.
@ -1903,7 +1903,7 @@ TEST_F(ApmTest, DebugDumpFromFileHandle) {
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(fid, -1));
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
@ -1963,7 +1963,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
ReadFrame(near_file_, frame_, float_cb_.get())) {
frame_->vad_activity_ = AudioFrame::kVadUnknown;
EXPECT_NOERR(apm_->AnalyzeReverseStream(revframe_));
EXPECT_NOERR(apm_->ProcessReverseStream(revframe_));
EXPECT_NOERR(fapm->AnalyzeReverseStream(
revfloat_cb_->channels(),
samples_per_channel,
@ -2103,7 +2103,7 @@ TEST_F(ApmTest, Process) {
float ns_speech_prob_average = 0.0f;
while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
frame_->vad_activity_ = AudioFrame::kVadUnknown;

View File

@ -677,7 +677,7 @@ void void_main(int argc, char* argv[]) {
if (msg.has_data()) {
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
apm->ProcessReverseStream(&far_frame));
} else {
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(
@ -925,7 +925,7 @@ void void_main(int argc, char* argv[]) {
}
ASSERT_EQ(apm->kNoError,
apm->AnalyzeReverseStream(&far_frame));
apm->ProcessReverseStream(&far_frame));
if (perf_testing) {
t1 = TickTime::Now();

View File

@ -494,7 +494,7 @@ OutputMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm)
if (feed_data_to_apm) {
if (_audioProcessingModulePtr->ProcessReverseStream(&_audioFrame) != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
"AudioProcessingModule::AnalyzeReverseStream() => error");
"AudioProcessingModule::ProcessReverseStream() => error");
RTC_DCHECK(false);
}
}