Revert "[ACM] iSAC audio codec removed"

This reverts commit b46c4bf27ba5c417fcba7f200d80fa4634e7e1a1.

Reason for revert: breaks a downstream project

Original change's description:
> [ACM] iSAC audio codec removed
>
> Note: this CL has to leave behind one part of iSAC, which is its VAD
> currently used by AGC1 in APM. The target visibility has been
> restricted and the VAD will be removed together with AGC1 when the
> time comes.
>
> Tested: see https://chromium-review.googlesource.com/c/chromium/src/+/4013319
>
> Bug: webrtc:14450
> Change-Id: I69cc518b16280eae62a1f1977cdbfa24c08cf5f9
> Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/282421
> Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
> Reviewed-by: Sam Zackrisson <saza@webrtc.org>
> Reviewed-by: Henrik Boström <hbos@webrtc.org>
> Commit-Queue: Alessio Bazzica <alessiob@webrtc.org>
> Cr-Commit-Position: refs/heads/main@{#38652}

Bug: webrtc:14450
Change-Id: Ice138004e84e8c5f896684e8d01133d4b2a77bb7
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/283800
Reviewed-by: Alessio Bazzica <alessiob@webrtc.org>
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Auto-Submit: Alessio Bazzica <alessiob@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Owners-Override: Mirko Bonadei <mbonadei@webrtc.org>
Bot-Commit: rubber-stamper@appspot.gserviceaccount.com <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/main@{#38655}
This commit is contained in:
Alessio Bazzica 2022-11-16 19:13:25 +00:00 committed by WebRTC LUCI CQ
parent cb2b133bf0
commit fbeb76ab51
164 changed files with 39429 additions and 117 deletions

View File

@ -62,6 +62,7 @@ rtc_library("builtin_audio_decoder_factory") {
"L16:audio_decoder_L16", "L16:audio_decoder_L16",
"g711:audio_decoder_g711", "g711:audio_decoder_g711",
"g722:audio_decoder_g722", "g722:audio_decoder_g722",
"isac:audio_decoder_isac",
] ]
defines = [] defines = []
if (rtc_include_ilbc) { if (rtc_include_ilbc) {
@ -94,6 +95,7 @@ rtc_library("builtin_audio_encoder_factory") {
"L16:audio_encoder_L16", "L16:audio_encoder_L16",
"g711:audio_encoder_g711", "g711:audio_encoder_g711",
"g722:audio_encoder_g722", "g722:audio_encoder_g722",
"isac:audio_encoder_isac",
] ]
defines = [] defines = []
if (rtc_include_ilbc) { if (rtc_include_ilbc) {

View File

@ -20,6 +20,7 @@
#if WEBRTC_USE_BUILTIN_ILBC #if WEBRTC_USE_BUILTIN_ILBC
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h" // nogncheck #include "api/audio_codecs/ilbc/audio_decoder_ilbc.h" // nogncheck
#endif #endif
#include "api/audio_codecs/isac/audio_decoder_isac.h"
#if WEBRTC_USE_BUILTIN_OPUS #if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h" #include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h" // nogncheck #include "api/audio_codecs/opus/audio_decoder_opus.h" // nogncheck
@ -56,7 +57,7 @@ rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory() {
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>, AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>,
#endif #endif
AudioDecoderG722, AudioDecoderIsac, AudioDecoderG722,
#if WEBRTC_USE_BUILTIN_ILBC #if WEBRTC_USE_BUILTIN_ILBC
AudioDecoderIlbc, AudioDecoderIlbc,

View File

@ -20,6 +20,7 @@
#if WEBRTC_USE_BUILTIN_ILBC #if WEBRTC_USE_BUILTIN_ILBC
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h" // nogncheck #include "api/audio_codecs/ilbc/audio_encoder_ilbc.h" // nogncheck
#endif #endif
#include "api/audio_codecs/isac/audio_encoder_isac.h"
#if WEBRTC_USE_BUILTIN_OPUS #if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h" #include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h" // nogncheck #include "api/audio_codecs/opus/audio_encoder_opus.h" // nogncheck
@ -62,7 +63,7 @@ rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory() {
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>, AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>,
#endif #endif
AudioEncoderG722, AudioEncoderIsac, AudioEncoderG722,
#if WEBRTC_USE_BUILTIN_ILBC #if WEBRTC_USE_BUILTIN_ILBC
AudioEncoderIlbc, AudioEncoderIlbc,

View File

@ -0,0 +1,135 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
# The targets with _fix and _float suffixes unconditionally use the
# fixed-point and floating-point iSAC implementations, respectively.
# The targets without suffixes pick one of the implementations based
# on cleverly chosen criteria.
rtc_source_set("audio_encoder_isac") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_isac.h" ]
public_configs = [ ":isac_config" ]
if (current_cpu == "arm") {
deps = [ ":audio_encoder_isac_fix" ]
} else {
deps = [ ":audio_encoder_isac_float" ]
}
}
rtc_source_set("audio_decoder_isac") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_decoder_isac.h" ]
public_configs = [ ":isac_config" ]
if (current_cpu == "arm") {
deps = [ ":audio_decoder_isac_fix" ]
} else {
deps = [ ":audio_decoder_isac_float" ]
}
}
config("isac_config") {
visibility = [ ":*" ]
if (current_cpu == "arm") {
defines = [
"WEBRTC_USE_BUILTIN_ISAC_FIX=1",
"WEBRTC_USE_BUILTIN_ISAC_FLOAT=0",
]
} else {
defines = [
"WEBRTC_USE_BUILTIN_ISAC_FIX=0",
"WEBRTC_USE_BUILTIN_ISAC_FLOAT=1",
]
}
}
rtc_library("audio_encoder_isac_fix") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_isac_fix.cc",
"audio_encoder_isac_fix.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:isac_fix",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_isac_fix") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_isac_fix.cc",
"audio_decoder_isac_fix.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:isac_fix",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_encoder_isac_float") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_isac_float.cc",
"audio_encoder_isac_float.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:isac",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_isac_float") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_isac_float.cc",
"audio_decoder_isac_float.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:isac",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_H_
#if WEBRTC_USE_BUILTIN_ISAC_FIX && !WEBRTC_USE_BUILTIN_ISAC_FLOAT
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h" // nogncheck
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT && !WEBRTC_USE_BUILTIN_ISAC_FIX
#include "api/audio_codecs/isac/audio_decoder_isac_float.h" // nogncheck
#else
#error "Must choose either fix or float"
#endif
namespace webrtc {
#if WEBRTC_USE_BUILTIN_ISAC_FIX
using AudioDecoderIsac = AudioDecoderIsacFix;
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT
using AudioDecoderIsac = AudioDecoderIsacFloat;
#endif
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_H_

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
namespace webrtc {
absl::optional<AudioDecoderIsacFix::Config> AudioDecoderIsacFix::SdpToConfig(
const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
format.clockrate_hz == 16000 && format.num_channels == 1) {
return Config();
}
return absl::nullopt;
}
void AudioDecoderIsacFix::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIsacFix::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
AudioDecoderIsacFixImpl::Config c;
c.sample_rate_hz = 16000;
return std::make_unique<AudioDecoderIsacFixImpl>(c);
}
} // namespace webrtc

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC decoder API (fixed-point implementation) for use as a template
// parameter to CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderIsacFix {
struct Config {}; // Empty---no config values needed!
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
namespace webrtc {
absl::optional<AudioDecoderIsacFloat::Config>
AudioDecoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
(format.clockrate_hz == 16000 || format.clockrate_hz == 32000) &&
format.num_channels == 1) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderIsacFloat::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}});
specs->push_back({{"ISAC", 32000, 1}, {32000, 1, 56000, 10000, 56000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIsacFloat::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
AudioDecoderIsacFloatImpl::Config c;
c.sample_rate_hz = config.sample_rate_hz;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioDecoderIsacFloatImpl>(c);
}
} // namespace webrtc

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC decoder API (floating-point implementation) for use as a template
// parameter to CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderIsacFloat {
struct Config {
bool IsOk() const {
return sample_rate_hz == 16000 || sample_rate_hz == 32000;
}
int sample_rate_hz = 16000;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_H_
#if WEBRTC_USE_BUILTIN_ISAC_FIX && !WEBRTC_USE_BUILTIN_ISAC_FLOAT
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h" // nogncheck
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT && !WEBRTC_USE_BUILTIN_ISAC_FIX
#include "api/audio_codecs/isac/audio_encoder_isac_float.h" // nogncheck
#else
#error "Must choose either fix or float"
#endif
namespace webrtc {
#if WEBRTC_USE_BUILTIN_ISAC_FIX
using AudioEncoderIsac = AudioEncoderIsacFix;
#elif WEBRTC_USE_BUILTIN_ISAC_FLOAT
using AudioEncoderIsac = AudioEncoderIsacFloat;
#endif
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_H_

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderIsacFix::Config> AudioEncoderIsacFix::SdpToConfig(
const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
format.clockrate_hz == 16000 && format.num_channels == 1) {
Config config;
const auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime >= 60) {
config.frame_size_ms = 60;
}
}
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
}
}
void AudioEncoderIsacFix::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
const SdpAudioFormat fmt = {"ISAC", 16000, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
AudioCodecInfo AudioEncoderIsacFix::QueryAudioEncoder(
AudioEncoderIsacFix::Config config) {
RTC_DCHECK(config.IsOk());
return {16000, 1, 32000, 10000, 32000};
}
std::unique_ptr<AudioEncoder> AudioEncoderIsacFix::MakeAudioEncoder(
AudioEncoderIsacFix::Config config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
AudioEncoderIsacFixImpl::Config c;
c.frame_size_ms = config.frame_size_ms;
c.bit_rate = config.bit_rate;
c.payload_type = payload_type;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderIsacFixImpl>(c);
}
} // namespace webrtc

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC encoder API (fixed-point implementation) for use as a template
// parameter to CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderIsacFix {
struct Config {
bool IsOk() const {
if (frame_size_ms != 30 && frame_size_ms != 60) {
return false;
}
if (bit_rate < 10000 || bit_rate > 32000) {
return false;
}
return true;
}
int frame_size_ms = 30;
int bit_rate = 32000; // Limit on short-term average bit rate, in bits/s.
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(Config config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
Config config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderIsacFloat::Config>
AudioEncoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
(format.clockrate_hz == 16000 || format.clockrate_hz == 32000) &&
format.num_channels == 1) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.bit_rate = format.clockrate_hz == 16000 ? 32000 : 56000;
if (config.sample_rate_hz == 16000) {
// For sample rate 16 kHz, optionally use 60 ms frames, instead of the
// default 30 ms.
const auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime >= 60) {
config.frame_size_ms = 60;
}
}
}
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
}
}
void AudioEncoderIsacFloat::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
for (int sample_rate_hz : {16000, 32000}) {
const SdpAudioFormat fmt = {"ISAC", sample_rate_hz, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
}
AudioCodecInfo AudioEncoderIsacFloat::QueryAudioEncoder(
const AudioEncoderIsacFloat::Config& config) {
RTC_DCHECK(config.IsOk());
constexpr int min_bitrate = 10000;
const int max_bitrate = config.sample_rate_hz == 16000 ? 32000 : 56000;
const int default_bitrate = max_bitrate;
return {config.sample_rate_hz, 1, default_bitrate, min_bitrate, max_bitrate};
}
std::unique_ptr<AudioEncoder> AudioEncoderIsacFloat::MakeAudioEncoder(
const AudioEncoderIsacFloat::Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
AudioEncoderIsacFloatImpl::Config c;
c.payload_type = payload_type;
c.sample_rate_hz = config.sample_rate_hz;
c.frame_size_ms = config.frame_size_ms;
c.bit_rate = config.bit_rate;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderIsacFloatImpl>(c);
}
} // namespace webrtc

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC encoder API (floating-point implementation) for use as a template
// parameter to CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderIsacFloat {
struct Config {
bool IsOk() const {
switch (sample_rate_hz) {
case 16000:
if (frame_size_ms != 30 && frame_size_ms != 60) {
return false;
}
if (bit_rate < 10000 || bit_rate > 32000) {
return false;
}
return true;
case 32000:
if (frame_size_ms != 30) {
return false;
}
if (bit_rate < 10000 || bit_rate > 56000) {
return false;
}
return true;
default:
return false;
}
}
int sample_rate_hz = 16000;
int frame_size_ms = 30;
int bit_rate = 32000; // Limit on short-term average bit rate, in bits/s.
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_

View File

@ -32,6 +32,10 @@ if (rtc_include_tests) {
"../g722:audio_encoder_g722", "../g722:audio_encoder_g722",
"../ilbc:audio_decoder_ilbc", "../ilbc:audio_decoder_ilbc",
"../ilbc:audio_encoder_ilbc", "../ilbc:audio_encoder_ilbc",
"../isac:audio_decoder_isac_fix",
"../isac:audio_decoder_isac_float",
"../isac:audio_encoder_isac_fix",
"../isac:audio_encoder_isac_float",
"../opus:audio_decoder_opus", "../opus:audio_decoder_opus",
"../opus:audio_encoder_opus", "../opus:audio_encoder_opus",
] ]

View File

@ -16,6 +16,8 @@
#include "api/audio_codecs/g711/audio_decoder_g711.h" #include "api/audio_codecs/g711/audio_decoder_g711.h"
#include "api/audio_codecs/g722/audio_decoder_g722.h" #include "api/audio_codecs/g722/audio_decoder_g722.h"
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h" #include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h"
#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h" #include "api/audio_codecs/opus/audio_decoder_opus.h"
#include "test/gmock.h" #include "test/gmock.h"
#include "test/gtest.h" #include "test/gtest.h"
@ -180,6 +182,41 @@ TEST(AudioDecoderFactoryTemplateTest, Ilbc) {
EXPECT_EQ(8000, dec->SampleRateHz()); EXPECT_EQ(8000, dec->SampleRateHz());
} }
TEST(AudioDecoderFactoryTemplateTest, IsacFix) {
auto factory = CreateAudioDecoderFactory<AudioDecoderIsacFix>();
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(AudioCodecSpec{
{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"isac", 16000, 2}));
EXPECT_TRUE(factory->IsSupportedDecoder({"isac", 16000, 1}));
EXPECT_FALSE(factory->IsSupportedDecoder({"isac", 32000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"isac", 8000, 1}, absl::nullopt));
auto dec = factory->MakeAudioDecoder({"isac", 16000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec);
EXPECT_EQ(16000, dec->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, IsacFloat) {
auto factory = CreateAudioDecoderFactory<AudioDecoderIsacFloat>();
EXPECT_THAT(
factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}},
AudioCodecSpec{{"ISAC", 32000, 1}, {32000, 1, 56000, 10000, 56000}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"isac", 16000, 2}));
EXPECT_TRUE(factory->IsSupportedDecoder({"isac", 16000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"isac", 32000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"isac", 8000, 1}, absl::nullopt));
auto dec1 = factory->MakeAudioDecoder({"isac", 16000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec1);
EXPECT_EQ(16000, dec1->SampleRateHz());
auto dec2 = factory->MakeAudioDecoder({"isac", 32000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec2);
EXPECT_EQ(32000, dec2->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, L16) { TEST(AudioDecoderFactoryTemplateTest, L16) {
auto factory = CreateAudioDecoderFactory<AudioDecoderL16>(); auto factory = CreateAudioDecoderFactory<AudioDecoderL16>();
EXPECT_THAT( EXPECT_THAT(

View File

@ -16,6 +16,8 @@
#include "api/audio_codecs/g711/audio_encoder_g711.h" #include "api/audio_codecs/g711/audio_encoder_g711.h"
#include "api/audio_codecs/g722/audio_encoder_g722.h" #include "api/audio_codecs/g722/audio_encoder_g722.h"
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h" #include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h"
#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h" #include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "test/gmock.h" #include "test/gmock.h"
#include "test/gtest.h" #include "test/gtest.h"
@ -178,6 +180,49 @@ TEST(AudioEncoderFactoryTemplateTest, Ilbc) {
EXPECT_EQ(8000, enc->SampleRateHz()); EXPECT_EQ(8000, enc->SampleRateHz());
} }
TEST(AudioEncoderFactoryTemplateTest, IsacFix) {
auto factory = CreateAudioEncoderFactory<AudioEncoderIsacFix>();
EXPECT_THAT(factory->GetSupportedEncoders(),
::testing::ElementsAre(AudioCodecSpec{
{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"isac", 16000, 2}));
EXPECT_EQ(AudioCodecInfo(16000, 1, 32000, 10000, 32000),
factory->QueryAudioEncoder({"isac", 16000, 1}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"isac", 32000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"isac", 8000, 1}, absl::nullopt));
auto enc1 = factory->MakeAudioEncoder(17, {"isac", 16000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc1);
EXPECT_EQ(16000, enc1->SampleRateHz());
EXPECT_EQ(3u, enc1->Num10MsFramesInNextPacket());
auto enc2 = factory->MakeAudioEncoder(
17, {"isac", 16000, 1, {{"ptime", "60"}}}, absl::nullopt);
ASSERT_NE(nullptr, enc2);
EXPECT_EQ(6u, enc2->Num10MsFramesInNextPacket());
}
TEST(AudioEncoderFactoryTemplateTest, IsacFloat) {
auto factory = CreateAudioEncoderFactory<AudioEncoderIsacFloat>();
EXPECT_THAT(
factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}},
AudioCodecSpec{{"ISAC", 32000, 1}, {32000, 1, 56000, 10000, 56000}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"isac", 16000, 2}));
EXPECT_EQ(AudioCodecInfo(16000, 1, 32000, 10000, 32000),
factory->QueryAudioEncoder({"isac", 16000, 1}));
EXPECT_EQ(AudioCodecInfo(32000, 1, 56000, 10000, 56000),
factory->QueryAudioEncoder({"isac", 32000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"isac", 8000, 1}, absl::nullopt));
auto enc1 = factory->MakeAudioEncoder(17, {"isac", 16000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc1);
EXPECT_EQ(16000, enc1->SampleRateHz());
auto enc2 = factory->MakeAudioEncoder(17, {"isac", 32000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc2);
EXPECT_EQ(32000, enc2->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, L16) { TEST(AudioEncoderFactoryTemplateTest, L16) {
auto factory = CreateAudioEncoderFactory<AudioEncoderL16>(); auto factory = CreateAudioEncoderFactory<AudioEncoderL16>();
EXPECT_THAT( EXPECT_THAT(

View File

@ -53,6 +53,7 @@ using webrtc::BitrateConstraints;
constexpr uint32_t kMaxUnsignaledRecvStreams = 4; constexpr uint32_t kMaxUnsignaledRecvStreams = 4;
const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1); const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1);
const cricket::AudioCodec kIsacCodec(103, "ISAC", 16000, 32000, 1);
const cricket::AudioCodec kOpusCodec(111, "opus", 48000, 32000, 2); const cricket::AudioCodec kOpusCodec(111, "opus", 48000, 32000, 2);
const cricket::AudioCodec kG722CodecVoE(9, "G722", 16000, 64000, 1); const cricket::AudioCodec kG722CodecVoE(9, "G722", 16000, 64000, 1);
const cricket::AudioCodec kG722CodecSdp(9, "G722", 8000, 64000, 1); const cricket::AudioCodec kG722CodecSdp(9, "G722", 8000, 64000, 1);
@ -851,7 +852,7 @@ TEST_P(WebRtcVoiceEngineTestFake, OpusSupportsTransportCc) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecs) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kTelephoneEventCodec1); parameters.codecs.push_back(kTelephoneEventCodec1);
parameters.codecs.push_back(kTelephoneEventCodec2); parameters.codecs.push_back(kTelephoneEventCodec2);
@ -862,7 +863,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map, EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
(ContainerEq<std::map<int, webrtc::SdpAudioFormat>>( (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
{{0, {"PCMU", 8000, 1}}, {{0, {"PCMU", 8000, 1}},
{106, {"OPUS", 48000, 2}}, {106, {"ISAC", 16000, 1}},
{126, {"telephone-event", 8000, 1}}, {126, {"telephone-event", 8000, 1}},
{107, {"telephone-event", 32000, 1}}}))); {107, {"telephone-event", 32000, 1}}})));
} }
@ -871,7 +872,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(cricket::AudioCodec(127, "XYZ", 32000, 0, 1)); parameters.codecs.push_back(cricket::AudioCodec(127, "XYZ", 32000, 0, 1));
EXPECT_FALSE(channel_->SetRecvParameters(parameters)); EXPECT_FALSE(channel_->SetRecvParameters(parameters));
} }
@ -880,9 +881,9 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs[1].id = kOpusCodec.id; parameters.codecs[1].id = kIsacCodec.id;
EXPECT_FALSE(channel_->SetRecvParameters(parameters)); EXPECT_FALSE(channel_->SetRecvParameters(parameters));
} }
@ -890,27 +891,32 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kOpusCodec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
EXPECT_TRUE(AddRecvStream(kSsrcX)); EXPECT_TRUE(AddRecvStream(kSsrcX));
EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map, EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
(ContainerEq<std::map<int, webrtc::SdpAudioFormat>>( (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
{{0, {"PCMU", 8000, 1}}, {111, {"opus", 48000, 2}}}))); {{0, {"PCMU", 8000, 1}},
{103, {"ISAC", 16000, 1}},
{111, {"opus", 48000, 2}}})));
} }
// Test that we can decode OPUS with stereo = 0. // Test that we can decode OPUS with stereo = 0.
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kOpusCodec);
parameters.codecs[1].params["stereo"] = "0"; parameters.codecs[2].params["stereo"] = "0";
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
EXPECT_TRUE(AddRecvStream(kSsrcX)); EXPECT_TRUE(AddRecvStream(kSsrcX));
EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map, EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
(ContainerEq<std::map<int, webrtc::SdpAudioFormat>>( (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
{{0, {"PCMU", 8000, 1}}, {{0, {"PCMU", 8000, 1}},
{103, {"ISAC", 16000, 1}},
{111, {"opus", 48000, 2, {{"stereo", "0"}}}}}))); {111, {"opus", 48000, 2, {{"stereo", "0"}}}}})));
} }
@ -918,14 +924,16 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kOpusCodec);
parameters.codecs[1].params["stereo"] = "1"; parameters.codecs[2].params["stereo"] = "1";
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
EXPECT_TRUE(AddRecvStream(kSsrcX)); EXPECT_TRUE(AddRecvStream(kSsrcX));
EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map, EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
(ContainerEq<std::map<int, webrtc::SdpAudioFormat>>( (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
{{0, {"PCMU", 8000, 1}}, {{0, {"PCMU", 8000, 1}},
{103, {"ISAC", 16000, 1}},
{111, {"opus", 48000, 2, {{"stereo", "1"}}}}}))); {111, {"opus", 48000, 2, {{"stereo", "1"}}}}})));
} }
@ -933,7 +941,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kTelephoneEventCodec1); parameters.codecs.push_back(kTelephoneEventCodec1);
parameters.codecs.push_back(kTelephoneEventCodec2); parameters.codecs.push_back(kTelephoneEventCodec2);
@ -945,7 +953,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
EXPECT_THAT(GetRecvStreamConfig(ssrc).decoder_map, EXPECT_THAT(GetRecvStreamConfig(ssrc).decoder_map,
(ContainerEq<std::map<int, webrtc::SdpAudioFormat>>( (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
{{0, {"PCMU", 8000, 1}}, {{0, {"PCMU", 8000, 1}},
{106, {"OPUS", 48000, 2}}, {106, {"ISAC", 16000, 1}},
{126, {"telephone-event", 8000, 1}}, {126, {"telephone-event", 8000, 1}},
{107, {"telephone-event", 32000, 1}}}))); {107, {"telephone-event", 32000, 1}}})));
} }
@ -954,20 +962,20 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
EXPECT_TRUE(SetupRecvStream()); EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs[0].id = 106; // collide with existing CN 32k parameters.codecs[0].id = 106; // collide with existing CN 32k
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
const auto& dm = GetRecvStreamConfig(kSsrcX).decoder_map; const auto& dm = GetRecvStreamConfig(kSsrcX).decoder_map;
ASSERT_EQ(1u, dm.count(106)); ASSERT_EQ(1u, dm.count(106));
EXPECT_EQ(webrtc::SdpAudioFormat("opus", 48000, 2), dm.at(106)); EXPECT_EQ(webrtc::SdpAudioFormat("isac", 16000, 1), dm.at(106));
} }
// Test that we can apply the same set of codecs again while playing. // Test that we can apply the same set of codecs again while playing.
TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) { TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
EXPECT_TRUE(SetupRecvStream()); EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
channel_->SetPlayout(true); channel_->SetPlayout(true);
@ -975,7 +983,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
// Remapping a payload type to a different codec should fail. // Remapping a payload type to a different codec should fail.
parameters.codecs[0] = kOpusCodec; parameters.codecs[0] = kOpusCodec;
parameters.codecs[0].id = kPcmuCodec.id; parameters.codecs[0].id = kIsacCodec.id;
EXPECT_FALSE(channel_->SetRecvParameters(parameters)); EXPECT_FALSE(channel_->SetRecvParameters(parameters));
EXPECT_TRUE(GetRecvStream(kSsrcX).started()); EXPECT_TRUE(GetRecvStream(kSsrcX).started());
} }
@ -984,7 +992,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
TEST_P(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) { TEST_P(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
EXPECT_TRUE(SetupRecvStream()); EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
channel_->SetPlayout(true); channel_->SetPlayout(true);
@ -999,7 +1007,7 @@ TEST_P(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
TEST_P(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) { TEST_P(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) {
EXPECT_TRUE(SetupRecvStream()); EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
++parameters.codecs[0].id; ++parameters.codecs[0].id;
@ -1027,6 +1035,9 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
// value. autobw is enabled for the following tests because the target // value. autobw is enabled for the following tests because the target
// bitrate is <= 0. // bitrate is <= 0.
// ISAC, default bitrate == 32000.
TestMaxSendBandwidth(kIsacCodec, 0, true, 32000);
// PCMU, default bitrate == 64000. // PCMU, default bitrate == 64000.
TestMaxSendBandwidth(kPcmuCodec, -1, true, 64000); TestMaxSendBandwidth(kPcmuCodec, -1, true, 64000);
@ -1037,6 +1048,11 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCaller) { TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCaller) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
// ISAC, default bitrate == 32000.
TestMaxSendBandwidth(kIsacCodec, 16000, true, 16000);
// Rates above the max (56000) should be capped.
TestMaxSendBandwidth(kIsacCodec, 100000, true, 32000);
// opus, default bitrate == 64000. // opus, default bitrate == 64000.
TestMaxSendBandwidth(kOpusCodec, 96000, true, 96000); TestMaxSendBandwidth(kOpusCodec, 96000, true, 96000);
TestMaxSendBandwidth(kOpusCodec, 48000, true, 48000); TestMaxSendBandwidth(kOpusCodec, 48000, true, 48000);
@ -1248,13 +1264,13 @@ TEST_P(WebRtcVoiceEngineTestFake, RtpParametersArePerStream) {
TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersCodecs) { TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersCodecs) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
SetSendParameters(parameters); SetSendParameters(parameters);
webrtc::RtpParameters rtp_parameters = channel_->GetRtpSendParameters(kSsrcX); webrtc::RtpParameters rtp_parameters = channel_->GetRtpSendParameters(kSsrcX);
ASSERT_EQ(2u, rtp_parameters.codecs.size()); ASSERT_EQ(2u, rtp_parameters.codecs.size());
EXPECT_EQ(kOpusCodec.ToCodecParameters(), rtp_parameters.codecs[0]); EXPECT_EQ(kIsacCodec.ToCodecParameters(), rtp_parameters.codecs[0]);
EXPECT_EQ(kPcmuCodec.ToCodecParameters(), rtp_parameters.codecs[1]); EXPECT_EQ(kPcmuCodec.ToCodecParameters(), rtp_parameters.codecs[1]);
} }
@ -1294,7 +1310,7 @@ TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersSsrc) {
TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpSendParameters) { TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpSendParameters) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
SetSendParameters(parameters); SetSendParameters(parameters);
@ -1368,14 +1384,14 @@ TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesBitratePriority) {
TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersCodecs) { TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersCodecs) {
EXPECT_TRUE(SetupRecvStream()); EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
webrtc::RtpParameters rtp_parameters = webrtc::RtpParameters rtp_parameters =
channel_->GetRtpReceiveParameters(kSsrcX); channel_->GetRtpReceiveParameters(kSsrcX);
ASSERT_EQ(2u, rtp_parameters.codecs.size()); ASSERT_EQ(2u, rtp_parameters.codecs.size());
EXPECT_EQ(kOpusCodec.ToCodecParameters(), rtp_parameters.codecs[0]); EXPECT_EQ(kIsacCodec.ToCodecParameters(), rtp_parameters.codecs[0]);
EXPECT_EQ(kPcmuCodec.ToCodecParameters(), rtp_parameters.codecs[1]); EXPECT_EQ(kPcmuCodec.ToCodecParameters(), rtp_parameters.codecs[1]);
} }
@ -1392,7 +1408,7 @@ TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersSsrc) {
TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpReceiveParameters) { TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpReceiveParameters) {
EXPECT_TRUE(SetupRecvStream()); EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
@ -1414,7 +1430,7 @@ TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) {
// Call necessary methods to configure receiving a default stream as // Call necessary methods to configure receiving a default stream as
// soon as it arrives. // soon as it arrives.
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
@ -1446,7 +1462,7 @@ TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecs) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecs) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn8000Codec);
parameters.codecs[0].id = 96; parameters.codecs[0].id = 96;
@ -1455,7 +1471,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecs) {
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(96, send_codec_spec.payload_type); EXPECT_EQ(96, send_codec_spec.payload_type);
EXPECT_EQ(22000, send_codec_spec.target_bitrate_bps); EXPECT_EQ(22000, send_codec_spec.target_bitrate_bps);
EXPECT_STRCASEEQ("OPUS", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_NE(send_codec_spec.format.clockrate_hz, 8000); EXPECT_NE(send_codec_spec.format.clockrate_hz, 8000);
EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type); EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type);
EXPECT_FALSE(channel_->CanInsertDtmf()); EXPECT_FALSE(channel_->CanInsertDtmf());
@ -1551,7 +1567,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedFmtpAmountOfRedundancy) {
TEST_P(WebRtcVoiceEngineTestFake, DontRecreateSendStream) { TEST_P(WebRtcVoiceEngineTestFake, DontRecreateSendStream) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn8000Codec);
parameters.codecs[0].id = 96; parameters.codecs[0].id = 96;
@ -1824,7 +1840,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecDisableNackRecvStreams) {
TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) { TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs[0].AddFeedbackParam(cricket::FeedbackParam( parameters.codecs[0].AddFeedbackParam(cricket::FeedbackParam(
cricket::kRtcpFbParamNack, cricket::kParamValueEmpty)); cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
@ -1844,7 +1860,7 @@ TEST_P(WebRtcVoiceEngineTestFake, TransportCcCanBeEnabledAndDisabled) {
SetSendParameters(send_parameters); SetSendParameters(send_parameters);
cricket::AudioRecvParameters recv_parameters; cricket::AudioRecvParameters recv_parameters;
recv_parameters.codecs.push_back(kOpusCodec); recv_parameters.codecs.push_back(kIsacCodec);
EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters)); EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters));
EXPECT_TRUE(AddRecvStream(kSsrcX)); EXPECT_TRUE(AddRecvStream(kSsrcX));
ASSERT_TRUE(call_.GetAudioReceiveStream(kSsrcX) != nullptr); ASSERT_TRUE(call_.GetAudioReceiveStream(kSsrcX) != nullptr);
@ -1856,8 +1872,8 @@ TEST_P(WebRtcVoiceEngineTestFake, TransportCcCanBeEnabledAndDisabled) {
EXPECT_TRUE(call_.GetAudioReceiveStream(kSsrcX)->transport_cc()); EXPECT_TRUE(call_.GetAudioReceiveStream(kSsrcX)->transport_cc());
} }
// Test that we can switch back and forth between Opus and PCMU with CN. // Test that we can switch back and forth between Opus and ISAC with CN.
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsOpusPcmuSwitching) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsIsacOpusSwitching) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters opus_parameters; cricket::AudioSendParameters opus_parameters;
@ -1869,15 +1885,15 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsOpusPcmuSwitching) {
EXPECT_STRCASEEQ("opus", spec.format.name.c_str()); EXPECT_STRCASEEQ("opus", spec.format.name.c_str());
} }
cricket::AudioSendParameters pcmu_parameters; cricket::AudioSendParameters isac_parameters;
pcmu_parameters.codecs.push_back(kPcmuCodec); isac_parameters.codecs.push_back(kIsacCodec);
pcmu_parameters.codecs.push_back(kCn16000Codec); isac_parameters.codecs.push_back(kCn16000Codec);
pcmu_parameters.codecs.push_back(kOpusCodec); isac_parameters.codecs.push_back(kOpusCodec);
SetSendParameters(pcmu_parameters); SetSendParameters(isac_parameters);
{ {
const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(0, spec.payload_type); EXPECT_EQ(103, spec.payload_type);
EXPECT_STRCASEEQ("PCMU", spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", spec.format.name.c_str());
} }
SetSendParameters(opus_parameters); SetSendParameters(opus_parameters);
@ -1892,7 +1908,33 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsOpusPcmuSwitching) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kIsacCodec); // bitrate == 32000
SetSendParameters(parameters);
{
const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(103, spec.payload_type);
EXPECT_STRCASEEQ("ISAC", spec.format.name.c_str());
EXPECT_EQ(32000, spec.target_bitrate_bps);
}
parameters.codecs[0].bitrate = 0; // bitrate == default
SetSendParameters(parameters);
{
const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(103, spec.payload_type);
EXPECT_STRCASEEQ("ISAC", spec.format.name.c_str());
EXPECT_EQ(32000, spec.target_bitrate_bps);
}
parameters.codecs[0].bitrate = 28000; // bitrate == 28000
SetSendParameters(parameters);
{
const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(103, spec.payload_type);
EXPECT_STRCASEEQ("ISAC", spec.format.name.c_str());
EXPECT_EQ(28000, spec.target_bitrate_bps);
}
parameters.codecs[0] = kPcmuCodec; // bitrate == 64000
SetSendParameters(parameters); SetSendParameters(parameters);
{ {
const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
@ -1934,14 +1976,14 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kTelephoneEventCodec1); parameters.codecs.push_back(kTelephoneEventCodec1);
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs[0].id = 98; // DTMF parameters.codecs[0].id = 98; // DTMF
parameters.codecs[1].id = 96; parameters.codecs[1].id = 96;
SetSendParameters(parameters); SetSendParameters(parameters);
const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(96, spec.payload_type); EXPECT_EQ(96, spec.payload_type);
EXPECT_STRCASEEQ("OPUS", spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", spec.format.name.c_str());
SetSend(true); SetSend(true);
EXPECT_TRUE(channel_->CanInsertDtmf()); EXPECT_TRUE(channel_->CanInsertDtmf());
} }
@ -1967,7 +2009,7 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFPayloadTypeOutOfRange) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kTelephoneEventCodec2); parameters.codecs.push_back(kTelephoneEventCodec2);
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs[0].id = 0; // DTMF parameters.codecs[0].id = 0; // DTMF
parameters.codecs[1].id = 96; parameters.codecs[1].id = 96;
SetSendParameters(parameters); SetSendParameters(parameters);
@ -1989,13 +2031,15 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFPayloadTypeOutOfRange) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs[0].id = 98; // narrowband CN parameters.codecs[0].id = 98; // wideband CN
parameters.codecs[1].id = 96;
SetSendParameters(parameters); SetSendParameters(parameters);
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(0, send_codec_spec.payload_type); EXPECT_EQ(96, send_codec_spec.payload_type);
EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(98, send_codec_spec.cng_payload_type); EXPECT_EQ(98, send_codec_spec.cng_payload_type);
} }
@ -2003,17 +2047,19 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
// TODO(juberti): cn 32000
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn8000Codec);
parameters.codecs.push_back(kTelephoneEventCodec1); parameters.codecs.push_back(kTelephoneEventCodec1);
parameters.codecs[0].id = 96; parameters.codecs[0].id = 96;
parameters.codecs[2].id = 97; // narrowband CN parameters.codecs[2].id = 97; // wideband CN
parameters.codecs[3].id = 98; // DTMF parameters.codecs[4].id = 98; // DTMF
SetSendParameters(parameters); SetSendParameters(parameters);
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(96, send_codec_spec.payload_type); EXPECT_EQ(96, send_codec_spec.payload_type);
EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(1u, send_codec_spec.format.num_channels); EXPECT_EQ(1u, send_codec_spec.format.num_channels);
EXPECT_EQ(97, send_codec_spec.cng_payload_type); EXPECT_EQ(97, send_codec_spec.cng_payload_type);
SetSend(true); SetSend(true);
@ -2024,20 +2070,22 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
EXPECT_TRUE(SetupChannel()); EXPECT_TRUE(SetupChannel());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
// TODO(juberti): cn 32000
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn8000Codec);
parameters.codecs.push_back(kTelephoneEventCodec2); parameters.codecs.push_back(kTelephoneEventCodec2);
parameters.codecs[0].id = 96; parameters.codecs[0].id = 96;
parameters.codecs[2].id = 97; // narrowband CN parameters.codecs[2].id = 97; // wideband CN
parameters.codecs[3].id = 98; // DTMF parameters.codecs[4].id = 98; // DTMF
SetSendParameters(parameters); SetSendParameters(parameters);
EXPECT_TRUE( EXPECT_TRUE(
channel_->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrcX))); channel_->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrcX)));
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(96, send_codec_spec.payload_type); EXPECT_EQ(96, send_codec_spec.payload_type);
EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(1u, send_codec_spec.format.num_channels); EXPECT_EQ(1u, send_codec_spec.format.num_channels);
EXPECT_EQ(97, send_codec_spec.cng_payload_type); EXPECT_EQ(97, send_codec_spec.cng_payload_type);
SetSend(true); SetSend(true);
@ -2049,11 +2097,20 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
// Set PCMU(8K) and CN(16K). VAD should not be activated. // Set ISAC(16K) and CN(16K). VAD should be activated.
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs[1].id = 97; parameters.codecs[1].id = 97;
SetSendParameters(parameters); SetSendParameters(parameters);
{
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(1u, send_codec_spec.format.num_channels);
EXPECT_EQ(97, send_codec_spec.cng_payload_type);
}
// Set PCMU(8K) and CN(16K). VAD should not be activated.
parameters.codecs[0] = kPcmuCodec;
SetSendParameters(parameters);
{ {
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
@ -2068,12 +2125,12 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
EXPECT_EQ(1u, send_codec_spec.format.num_channels); EXPECT_EQ(1u, send_codec_spec.format.num_channels);
EXPECT_EQ(13, send_codec_spec.cng_payload_type); EXPECT_EQ(13, send_codec_spec.cng_payload_type);
} }
// Set OPUS(48K) and CN(8K). VAD should not be activated. // Set ISAC(16K) and CN(8K). VAD should not be activated.
parameters.codecs[0] = kOpusCodec; parameters.codecs[0] = kIsacCodec;
SetSendParameters(parameters); SetSendParameters(parameters);
{ {
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_STRCASEEQ("OPUS", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type); EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type);
} }
} }
@ -2082,18 +2139,19 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) { TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
parameters.codecs.push_back(kCn16000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn8000Codec);
parameters.codecs.push_back(kTelephoneEventCodec1); parameters.codecs.push_back(kTelephoneEventCodec1);
parameters.codecs[0].name = "PcMu"; parameters.codecs[0].name = "iSaC";
parameters.codecs[0].id = 96; parameters.codecs[0].id = 96;
parameters.codecs[2].id = 97; // narrowband CN parameters.codecs[2].id = 97; // wideband CN
parameters.codecs[3].id = 98; // DTMF parameters.codecs[4].id = 98; // DTMF
SetSendParameters(parameters); SetSendParameters(parameters);
const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
EXPECT_EQ(96, send_codec_spec.payload_type); EXPECT_EQ(96, send_codec_spec.payload_type);
EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(1u, send_codec_spec.format.num_channels); EXPECT_EQ(1u, send_codec_spec.format.num_channels);
EXPECT_EQ(97, send_codec_spec.cng_payload_type); EXPECT_EQ(97, send_codec_spec.cng_payload_type);
SetSend(true); SetSend(true);
@ -2234,25 +2292,24 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
} }
cricket::AudioSendParameters parameters; cricket::AudioSendParameters parameters;
// Set PCMU and CN(8K). VAD should be activated. // Set ISAC(16K) and CN(16K). VAD should be activated.
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kCn8000Codec); parameters.codecs.push_back(kCn16000Codec);
parameters.codecs[1].id = 97; parameters.codecs[1].id = 97;
SetSendParameters(parameters); SetSendParameters(parameters);
// Verify PCMU and VAD are corrected configured on all send channels. // Verify ISAC and VAD are corrected configured on all send channels.
for (uint32_t ssrc : kSsrcs4) { for (uint32_t ssrc : kSsrcs4) {
ASSERT_TRUE(call_.GetAudioSendStream(ssrc) != nullptr); ASSERT_TRUE(call_.GetAudioSendStream(ssrc) != nullptr);
const auto& send_codec_spec = const auto& send_codec_spec =
*call_.GetAudioSendStream(ssrc)->GetConfig().send_codec_spec; *call_.GetAudioSendStream(ssrc)->GetConfig().send_codec_spec;
EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str()); EXPECT_STRCASEEQ("ISAC", send_codec_spec.format.name.c_str());
EXPECT_EQ(1u, send_codec_spec.format.num_channels); EXPECT_EQ(1u, send_codec_spec.format.num_channels);
EXPECT_EQ(97, send_codec_spec.cng_payload_type); EXPECT_EQ(97, send_codec_spec.cng_payload_type);
} }
// Change to PCMU(8K) and CN(16K). // Change to PCMU(8K) and CN(16K).
parameters.codecs[0] = kPcmuCodec; parameters.codecs[0] = kPcmuCodec;
parameters.codecs[1] = kCn16000Codec;
SetSendParameters(parameters); SetSendParameters(parameters);
for (uint32_t ssrc : kSsrcs4) { for (uint32_t ssrc : kSsrcs4) {
ASSERT_TRUE(call_.GetAudioSendStream(ssrc) != nullptr); ASSERT_TRUE(call_.GetAudioSendStream(ssrc) != nullptr);
@ -2802,13 +2859,13 @@ TEST_P(WebRtcVoiceEngineTestFake, AddRecvStream) {
TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) { TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
EXPECT_TRUE(SetupSendStream()); EXPECT_TRUE(SetupSendStream());
cricket::AudioRecvParameters parameters; cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kIsacCodec);
parameters.codecs.push_back(kPcmuCodec); parameters.codecs.push_back(kPcmuCodec);
EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_TRUE(channel_->SetRecvParameters(parameters));
EXPECT_TRUE(AddRecvStream(kSsrcX)); EXPECT_TRUE(AddRecvStream(kSsrcX));
EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map, EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
(ContainerEq<std::map<int, webrtc::SdpAudioFormat>>( (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
{{0, {"PCMU", 8000, 1}}, {111, {"OPUS", 48000, 2}}}))); {{0, {"PCMU", 8000, 1}}, {103, {"ISAC", 16000, 1}}})));
} }
// Test that we properly clean up any streams that were added, even if // Test that we properly clean up any streams that were added, even if
@ -3609,7 +3666,7 @@ TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) {
} }
} }
// Verify the payload id of common audio codecs, including CN and G722. // Verify the payload id of common audio codecs, including CN, ISAC, and G722.
TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) { TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) {
for (bool use_null_apm : {false, true}) { for (bool use_null_apm : {false, true}) {
std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory = std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
@ -3636,6 +3693,10 @@ TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) {
EXPECT_EQ(105, codec.id); EXPECT_EQ(105, codec.id);
} else if (is_codec("CN", 32000)) { } else if (is_codec("CN", 32000)) {
EXPECT_EQ(106, codec.id); EXPECT_EQ(106, codec.id);
} else if (is_codec("ISAC", 16000)) {
EXPECT_EQ(103, codec.id);
} else if (is_codec("ISAC", 32000)) {
EXPECT_EQ(104, codec.id);
} else if (is_codec("G722", 8000)) { } else if (is_codec("G722", 8000)) {
EXPECT_EQ(9, codec.id); EXPECT_EQ(9, codec.id);
} else if (is_codec("telephone-event", 8000)) { } else if (is_codec("telephone-event", 8000)) {

View File

@ -381,8 +381,50 @@ rtc_library("ilbc_c") {
absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ]
} }
rtc_source_set("isac_common") {
poisonous = [ "audio_codecs" ]
sources = [
"codecs/isac/audio_decoder_isac_t.h",
"codecs/isac/audio_decoder_isac_t_impl.h",
"codecs/isac/audio_encoder_isac_t.h",
"codecs/isac/audio_encoder_isac_t_impl.h",
]
deps = [
":isac_bwinfo",
"../../api:scoped_refptr",
"../../api/audio_codecs:audio_codecs_api",
"../../api/units:time_delta",
"../../rtc_base:checks",
"../../rtc_base:safe_minmax",
"../../system_wrappers:field_trial",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("isac") {
visibility += [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"codecs/isac/main/include/audio_decoder_isac.h",
"codecs/isac/main/include/audio_encoder_isac.h",
"codecs/isac/main/source/audio_decoder_isac.cc",
"codecs/isac/main/source/audio_encoder_isac.cc",
]
deps = [
":isac_common",
"../../api/audio_codecs:audio_codecs_api",
]
public_deps = [ ":isac_c" ] # no-presubmit-check TODO(webrtc:8603)
}
rtc_source_set("isac_bwinfo") {
sources = [ "codecs/isac/bandwidth_info.h" ]
deps = []
}
rtc_library("isac_vad") { rtc_library("isac_vad") {
visibility += [ "../audio_processing/vad:*" ] visibility += webrtc_default_visibility
sources = [ sources = [
"codecs/isac/main/source/filter_functions.c", "codecs/isac/main/source/filter_functions.c",
"codecs/isac/main/source/filter_functions.h", "codecs/isac/main/source/filter_functions.h",
@ -405,9 +447,247 @@ rtc_library("isac_vad") {
] ]
} }
rtc_source_set("isac_bwinfo") { rtc_library("isac_c") {
sources = [ "codecs/isac/bandwidth_info.h" ] poisonous = [ "audio_codecs" ]
deps = [] sources = [
"codecs/isac/main/include/isac.h",
"codecs/isac/main/source/arith_routines.c",
"codecs/isac/main/source/arith_routines.h",
"codecs/isac/main/source/arith_routines_hist.c",
"codecs/isac/main/source/arith_routines_logist.c",
"codecs/isac/main/source/bandwidth_estimator.c",
"codecs/isac/main/source/bandwidth_estimator.h",
"codecs/isac/main/source/codec.h",
"codecs/isac/main/source/crc.c",
"codecs/isac/main/source/crc.h",
"codecs/isac/main/source/decode.c",
"codecs/isac/main/source/decode_bwe.c",
"codecs/isac/main/source/encode.c",
"codecs/isac/main/source/encode_lpc_swb.c",
"codecs/isac/main/source/encode_lpc_swb.h",
"codecs/isac/main/source/entropy_coding.c",
"codecs/isac/main/source/entropy_coding.h",
"codecs/isac/main/source/filterbanks.c",
"codecs/isac/main/source/intialize.c",
"codecs/isac/main/source/isac.c",
"codecs/isac/main/source/isac_float_type.h",
"codecs/isac/main/source/lattice.c",
"codecs/isac/main/source/lpc_analysis.c",
"codecs/isac/main/source/lpc_analysis.h",
"codecs/isac/main/source/lpc_gain_swb_tables.c",
"codecs/isac/main/source/lpc_gain_swb_tables.h",
"codecs/isac/main/source/lpc_shape_swb12_tables.c",
"codecs/isac/main/source/lpc_shape_swb12_tables.h",
"codecs/isac/main/source/lpc_shape_swb16_tables.c",
"codecs/isac/main/source/lpc_shape_swb16_tables.h",
"codecs/isac/main/source/lpc_tables.c",
"codecs/isac/main/source/lpc_tables.h",
"codecs/isac/main/source/pitch_gain_tables.c",
"codecs/isac/main/source/pitch_gain_tables.h",
"codecs/isac/main/source/pitch_lag_tables.c",
"codecs/isac/main/source/pitch_lag_tables.h",
"codecs/isac/main/source/spectrum_ar_model_tables.c",
"codecs/isac/main/source/spectrum_ar_model_tables.h",
"codecs/isac/main/source/transform.c",
]
if (is_linux || is_chromeos) {
libs = [ "m" ]
}
deps = [
":isac_bwinfo",
":isac_vad",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../rtc_base:checks",
"../../rtc_base:compile_assert_c",
"../../rtc_base/system:arch",
"../third_party/fft",
]
}
rtc_library("isac_fix") {
visibility += [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"codecs/isac/fix/source/audio_decoder_isacfix.cc",
"codecs/isac/fix/source/audio_encoder_isacfix.cc",
]
deps = [
":isac_common",
"../../api/audio_codecs:audio_codecs_api",
"../../common_audio",
"../../system_wrappers",
]
public_deps = [ ":isac_fix_c" ] # no-presubmit-check TODO(webrtc:8603)
if (rtc_build_with_neon) {
deps += [ ":isac_neon" ]
}
}
rtc_library("isac_fix_common") {
poisonous = [ "audio_codecs" ]
sources = [
"codecs/isac/fix/source/codec.h",
"codecs/isac/fix/source/entropy_coding.h",
"codecs/isac/fix/source/fft.c",
"codecs/isac/fix/source/fft.h",
"codecs/isac/fix/source/filterbank_internal.h",
"codecs/isac/fix/source/settings.h",
"codecs/isac/fix/source/structs.h",
"codecs/isac/fix/source/transform_tables.c",
]
deps = [
":isac_bwinfo",
"../../common_audio",
"../../common_audio:common_audio_c",
]
}
rtc_source_set("isac_fix_c_arm_asm") {
poisonous = [ "audio_codecs" ]
sources = []
if (current_cpu == "arm" && arm_version >= 7) {
sources += [
"codecs/isac/fix/source/lattice_armv7.S",
"codecs/isac/fix/source/pitch_filter_armv6.S",
]
deps = [
":isac_fix_common",
"../../rtc_base/system:asm_defines",
]
}
}
rtc_library("isac_fix_c") {
poisonous = [ "audio_codecs" ]
sources = [
"codecs/isac/fix/include/audio_decoder_isacfix.h",
"codecs/isac/fix/include/audio_encoder_isacfix.h",
"codecs/isac/fix/include/isacfix.h",
"codecs/isac/fix/source/arith_routines.c",
"codecs/isac/fix/source/arith_routines_hist.c",
"codecs/isac/fix/source/arith_routines_logist.c",
"codecs/isac/fix/source/arith_routins.h",
"codecs/isac/fix/source/bandwidth_estimator.c",
"codecs/isac/fix/source/bandwidth_estimator.h",
"codecs/isac/fix/source/decode.c",
"codecs/isac/fix/source/decode_bwe.c",
"codecs/isac/fix/source/decode_plc.c",
"codecs/isac/fix/source/encode.c",
"codecs/isac/fix/source/entropy_coding.c",
"codecs/isac/fix/source/filterbank_tables.c",
"codecs/isac/fix/source/filterbank_tables.h",
"codecs/isac/fix/source/filterbanks.c",
"codecs/isac/fix/source/filters.c",
"codecs/isac/fix/source/initialize.c",
"codecs/isac/fix/source/isac_fix_type.h",
"codecs/isac/fix/source/isacfix.c",
"codecs/isac/fix/source/lattice.c",
"codecs/isac/fix/source/lattice_c.c",
"codecs/isac/fix/source/lpc_masking_model.c",
"codecs/isac/fix/source/lpc_masking_model.h",
"codecs/isac/fix/source/lpc_tables.c",
"codecs/isac/fix/source/lpc_tables.h",
"codecs/isac/fix/source/pitch_estimator.c",
"codecs/isac/fix/source/pitch_estimator.h",
"codecs/isac/fix/source/pitch_estimator_c.c",
"codecs/isac/fix/source/pitch_filter.c",
"codecs/isac/fix/source/pitch_filter_c.c",
"codecs/isac/fix/source/pitch_gain_tables.c",
"codecs/isac/fix/source/pitch_gain_tables.h",
"codecs/isac/fix/source/pitch_lag_tables.c",
"codecs/isac/fix/source/pitch_lag_tables.h",
"codecs/isac/fix/source/spectrum_ar_model_tables.c",
"codecs/isac/fix/source/spectrum_ar_model_tables.h",
"codecs/isac/fix/source/transform.c",
]
deps = [
":isac_bwinfo",
":isac_common",
":isac_fix_common",
"../../api/audio_codecs:audio_codecs_api",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../rtc_base:checks",
"../../rtc_base:compile_assert_c",
"../../rtc_base:sanitizer",
"../../system_wrappers",
"../third_party/fft",
]
if (rtc_build_with_neon) {
deps += [ ":isac_neon" ]
# TODO(bugs.webrtc.org/9579): Consider moving the usage of NEON from
# pitch_estimator_c.c into the "isac_neon" target and delete this flag:
if (current_cpu != "arm64") {
suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
cflags = [ "-mfpu=neon" ]
}
}
if (current_cpu == "arm" && arm_version >= 7) {
sources -= [
"codecs/isac/fix/source/lattice_c.c",
"codecs/isac/fix/source/pitch_filter_c.c",
]
deps += [ ":isac_fix_c_arm_asm" ]
}
if (current_cpu == "mipsel") {
sources += [
"codecs/isac/fix/source/entropy_coding_mips.c",
"codecs/isac/fix/source/filters_mips.c",
"codecs/isac/fix/source/lattice_mips.c",
"codecs/isac/fix/source/pitch_estimator_mips.c",
"codecs/isac/fix/source/transform_mips.c",
]
sources -= [
"codecs/isac/fix/source/lattice_c.c",
"codecs/isac/fix/source/pitch_estimator_c.c",
]
if (mips_dsp_rev > 0) {
sources += [ "codecs/isac/fix/source/filterbanks_mips.c" ]
}
if (mips_dsp_rev > 1) {
sources += [
"codecs/isac/fix/source/lpc_masking_model_mips.c",
"codecs/isac/fix/source/pitch_filter_mips.c",
]
sources -= [ "codecs/isac/fix/source/pitch_filter_c.c" ]
}
}
}
if (rtc_build_with_neon) {
rtc_library("isac_neon") {
poisonous = [ "audio_codecs" ]
sources = [
"codecs/isac/fix/source/entropy_coding_neon.c",
"codecs/isac/fix/source/filterbanks_neon.c",
"codecs/isac/fix/source/filters_neon.c",
"codecs/isac/fix/source/lattice_neon.c",
"codecs/isac/fix/source/transform_neon.c",
]
if (current_cpu != "arm64") {
# Enable compilation for the NEON instruction set.
suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
cflags = [ "-mfpu=neon" ]
}
deps = [
":isac_fix_common",
"../../common_audio",
"../../common_audio:common_audio_c",
"../../rtc_base:checks",
]
}
} }
rtc_library("pcm16b") { rtc_library("pcm16b") {
@ -1011,6 +1291,11 @@ if (rtc_include_tests) {
if (rtc_include_opus) { if (rtc_include_opus) {
audio_coding_deps += [ ":webrtc_opus" ] audio_coding_deps += [ ":webrtc_opus" ]
} }
if (current_cpu == "arm") {
audio_coding_deps += [ ":isac_fix" ]
} else {
audio_coding_deps += [ ":isac" ]
}
if (!build_with_mozilla && !build_with_chromium) { if (!build_with_mozilla && !build_with_chromium) {
audio_coding_deps += [ ":red" ] audio_coding_deps += [ ":red" ]
} }
@ -1042,7 +1327,11 @@ if (rtc_include_tests) {
":g711_test", ":g711_test",
":g722_test", ":g722_test",
":ilbc_test", ":ilbc_test",
":isac_api_test",
":isac_switch_samprate_test",
":isac_test",
":neteq_ilbc_quality_test", ":neteq_ilbc_quality_test",
":neteq_isac_quality_test",
":neteq_opus_quality_test", ":neteq_opus_quality_test",
":neteq_pcm16b_quality_test", ":neteq_pcm16b_quality_test",
":neteq_pcmu_quality_test", ":neteq_pcmu_quality_test",
@ -1082,6 +1371,8 @@ if (rtc_include_tests) {
"test/Tester.cc", "test/Tester.cc",
"test/TwoWayCommunication.cc", "test/TwoWayCommunication.cc",
"test/TwoWayCommunication.h", "test/TwoWayCommunication.h",
"test/iSACTest.cc",
"test/iSACTest.h",
"test/target_delay_unittest.cc", "test/target_delay_unittest.cc",
] ]
deps = [ deps = [
@ -1106,6 +1397,8 @@ if (rtc_include_tests) {
"../../api/audio_codecs/g722:audio_encoder_g722", "../../api/audio_codecs/g722:audio_encoder_g722",
"../../api/audio_codecs/ilbc:audio_decoder_ilbc", "../../api/audio_codecs/ilbc:audio_decoder_ilbc",
"../../api/audio_codecs/ilbc:audio_encoder_ilbc", "../../api/audio_codecs/ilbc:audio_encoder_ilbc",
"../../api/audio_codecs/isac:audio_decoder_isac_float",
"../../api/audio_codecs/isac:audio_encoder_isac_float",
"../../api/audio_codecs/opus:audio_decoder_opus", "../../api/audio_codecs/opus:audio_decoder_opus",
"../../api/audio_codecs/opus:audio_encoder_opus", "../../api/audio_codecs/opus:audio_encoder_opus",
"../../common_audio", "../../common_audio",
@ -1220,6 +1513,8 @@ if (rtc_include_tests) {
deps = [ deps = [
":ilbc", ":ilbc",
":isac",
":isac_fix",
":neteq", ":neteq",
":neteq_input_audio_tools", ":neteq_input_audio_tools",
":neteq_tools", ":neteq_tools",
@ -1324,10 +1619,12 @@ if (rtc_include_tests) {
testonly = true testonly = true
defines = [] defines = []
deps = [ deps = [
":isac_fix_common",
"../../rtc_base:macromagic", "../../rtc_base:macromagic",
"../../test:fileutils", "../../test:fileutils",
] ]
sources = [ sources = [
"codecs/isac/fix/test/isac_speed_test.cc",
"codecs/opus/opus_speed_test.cc", "codecs/opus/opus_speed_test.cc",
"codecs/tools/audio_codec_speed_test.cc", "codecs/tools/audio_codec_speed_test.cc",
"codecs/tools/audio_codec_speed_test.h", "codecs/tools/audio_codec_speed_test.h",
@ -1350,6 +1647,7 @@ if (rtc_include_tests) {
} }
deps += [ deps += [
":isac_fix",
":webrtc_opus", ":webrtc_opus",
"../../rtc_base:checks", "../../rtc_base:checks",
"../../test:test_main", "../../test:test_main",
@ -1425,6 +1723,7 @@ if (rtc_include_tests) {
"../../api/audio_codecs/g711:audio_encoder_g711", "../../api/audio_codecs/g711:audio_encoder_g711",
"../../api/audio_codecs/g722:audio_encoder_g722", "../../api/audio_codecs/g722:audio_encoder_g722",
"../../api/audio_codecs/ilbc:audio_encoder_ilbc", "../../api/audio_codecs/ilbc:audio_encoder_ilbc",
"../../api/audio_codecs/isac:audio_encoder_isac",
"../../api/audio_codecs/opus:audio_encoder_opus", "../../api/audio_codecs/opus:audio_encoder_opus",
"../../rtc_base:safe_conversions", "../../rtc_base:safe_conversions",
"//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:flag",
@ -1533,6 +1832,21 @@ if (rtc_include_tests) {
] ]
} }
rtc_executable("neteq_isac_quality_test") {
testonly = true
sources = [ "neteq/test/neteq_isac_quality_test.cc" ]
deps = [
":isac_fix",
":neteq",
":neteq_quality_test_support",
"../../test:test_main",
"//testing/gtest",
"//third_party/abseil-cpp/absl/flags:flag",
]
}
rtc_executable("neteq_pcmu_quality_test") { rtc_executable("neteq_pcmu_quality_test") {
testonly = true testonly = true
@ -1570,6 +1884,28 @@ if (rtc_include_tests) {
} }
} }
rtc_library("isac_test_util") {
testonly = true
sources = [
"codecs/isac/main/util/utility.c",
"codecs/isac/main/util/utility.h",
]
}
if (!build_with_chromium) {
rtc_executable("isac_test") {
testonly = true
sources = [ "codecs/isac/main/test/simpleKenny.c" ]
deps = [
":isac",
":isac_test_util",
"../../rtc_base:macromagic",
]
}
}
rtc_executable("g711_test") { rtc_executable("g711_test") {
testonly = true testonly = true
@ -1587,6 +1923,32 @@ if (rtc_include_tests) {
} }
if (!build_with_chromium) { if (!build_with_chromium) {
rtc_executable("isac_api_test") {
testonly = true
sources = [ "codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc" ]
deps = [
":isac",
":isac_test_util",
"../../rtc_base:macromagic",
]
}
rtc_executable("isac_switch_samprate_test") {
testonly = true
sources =
[ "codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc" ]
deps = [
":isac",
":isac_test_util",
"../../common_audio",
"../../common_audio:common_audio_c",
]
}
rtc_executable("ilbc_test") { rtc_executable("ilbc_test") {
testonly = true testonly = true
@ -1635,6 +1997,13 @@ if (rtc_include_tests) {
"codecs/cng/audio_encoder_cng_unittest.cc", "codecs/cng/audio_encoder_cng_unittest.cc",
"codecs/cng/cng_unittest.cc", "codecs/cng/cng_unittest.cc",
"codecs/ilbc/ilbc_unittest.cc", "codecs/ilbc/ilbc_unittest.cc",
"codecs/isac/fix/source/filterbanks_unittest.cc",
"codecs/isac/fix/source/filters_unittest.cc",
"codecs/isac/fix/source/lpc_masking_model_unittest.cc",
"codecs/isac/fix/source/transform_unittest.cc",
"codecs/isac/isac_webrtc_api_test.cc",
"codecs/isac/main/source/audio_encoder_isac_unittest.cc",
"codecs/isac/main/source/isac_unittest.cc",
"codecs/legacy_encoded_audio_frame_unittest.cc", "codecs/legacy_encoded_audio_frame_unittest.cc",
"codecs/opus/audio_decoder_multi_channel_opus_unittest.cc", "codecs/opus/audio_decoder_multi_channel_opus_unittest.cc",
"codecs/opus/audio_encoder_multi_channel_opus_unittest.cc", "codecs/opus/audio_encoder_multi_channel_opus_unittest.cc",
@ -1701,6 +2070,11 @@ if (rtc_include_tests) {
":default_neteq_factory", ":default_neteq_factory",
":g711", ":g711",
":ilbc", ":ilbc",
":isac",
":isac_c",
":isac_common",
":isac_fix",
":isac_fix_common",
":legacy_encoded_audio_frame", ":legacy_encoded_audio_frame",
":mocks", ":mocks",
":neteq", ":neteq",
@ -1720,6 +2094,10 @@ if (rtc_include_tests) {
"../../api/audio_codecs:audio_codecs_api", "../../api/audio_codecs:audio_codecs_api",
"../../api/audio_codecs:builtin_audio_decoder_factory", "../../api/audio_codecs:builtin_audio_decoder_factory",
"../../api/audio_codecs:builtin_audio_encoder_factory", "../../api/audio_codecs:builtin_audio_encoder_factory",
"../../api/audio_codecs/isac:audio_decoder_isac_fix",
"../../api/audio_codecs/isac:audio_decoder_isac_float",
"../../api/audio_codecs/isac:audio_encoder_isac_fix",
"../../api/audio_codecs/isac:audio_encoder_isac_float",
"../../api/audio_codecs/opus:audio_decoder_multiopus", "../../api/audio_codecs/opus:audio_decoder_multiopus",
"../../api/audio_codecs/opus:audio_decoder_opus", "../../api/audio_codecs/opus:audio_decoder_opus",
"../../api/audio_codecs/opus:audio_encoder_multiopus", "../../api/audio_codecs/opus:audio_encoder_multiopus",

View File

@ -13,7 +13,6 @@
#include <algorithm> // std::min #include <algorithm> // std::min
#include <memory> #include <memory>
#include "absl/types/optional.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h"
#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h" #include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
@ -65,14 +64,12 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
const SdpAudioFormat& format, const SdpAudioFormat& format,
const std::map<int, int> cng_payload_types = {}) { const std::map<int, int> cng_payload_types = {}) {
// Create the speech encoder. // Create the speech encoder.
absl::optional<AudioCodecInfo> info = AudioCodecInfo info = encoder_factory_->QueryAudioEncoder(format).value();
encoder_factory_->QueryAudioEncoder(format);
RTC_CHECK(info.has_value());
std::unique_ptr<AudioEncoder> enc = std::unique_ptr<AudioEncoder> enc =
encoder_factory_->MakeAudioEncoder(payload_type, format, absl::nullopt); encoder_factory_->MakeAudioEncoder(payload_type, format, absl::nullopt);
// If we have a compatible CN specification, stack a CNG on top. // If we have a compatible CN specification, stack a CNG on top.
auto it = cng_payload_types.find(info->sample_rate_hz); auto it = cng_payload_types.find(info.sample_rate_hz);
if (it != cng_payload_types.end()) { if (it != cng_payload_types.end()) {
AudioEncoderCngConfig config; AudioEncoderCngConfig config;
config.speech_encoder = std::move(enc); config.speech_encoder = std::move(enc);
@ -84,7 +81,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
// Actually start using the new encoder. // Actually start using the new encoder.
acm_->SetEncoder(std::move(enc)); acm_->SetEncoder(std::move(enc));
return *info; return info;
} }
int InsertOnePacketOfSilence(const AudioCodecInfo& info) { int InsertOnePacketOfSilence(const AudioCodecInfo& info) {
@ -151,7 +148,8 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
#define MAYBE_SampleRate SampleRate #define MAYBE_SampleRate SampleRate
#endif #endif
TEST_F(AcmReceiverTestOldApi, MAYBE_SampleRate) { TEST_F(AcmReceiverTestOldApi, MAYBE_SampleRate) {
const std::map<int, SdpAudioFormat> codecs = {{0, {"OPUS", 48000, 2}}}; const std::map<int, SdpAudioFormat> codecs = {{0, {"ISAC", 16000, 1}},
{1, {"ISAC", 32000, 1}}};
receiver_->SetCodecs(codecs); receiver_->SetCodecs(codecs);
constexpr int kOutSampleRateHz = 8000; // Different than codec sample rate. constexpr int kOutSampleRateHz = 8000; // Different than codec sample rate.
@ -234,6 +232,15 @@ TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFramePCMU) {
RunVerifyAudioFrame({"PCMU", 8000, 1}); RunVerifyAudioFrame({"PCMU", 8000, 1});
} }
#if defined(WEBRTC_ANDROID)
#define MAYBE_VerifyAudioFrameISAC DISABLED_VerifyAudioFrameISAC
#else
#define MAYBE_VerifyAudioFrameISAC VerifyAudioFrameISAC
#endif
TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFrameISAC) {
RunVerifyAudioFrame({"ISAC", 16000, 1});
}
#if defined(WEBRTC_ANDROID) #if defined(WEBRTC_ANDROID)
#define MAYBE_VerifyAudioFrameOpus DISABLED_VerifyAudioFrameOpus #define MAYBE_VerifyAudioFrameOpus DISABLED_VerifyAudioFrameOpus
#else #else
@ -303,10 +310,12 @@ TEST_F(AcmReceiverTestPostDecodeVadPassiveOldApi, MAYBE_PostdecodingVad) {
#else #else
#define MAYBE_LastAudioCodec LastAudioCodec #define MAYBE_LastAudioCodec LastAudioCodec
#endif #endif
#if defined(WEBRTC_CODEC_OPUS) #if defined(WEBRTC_CODEC_ISAC)
TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) { TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
const std::map<int, SdpAudioFormat> codecs = { const std::map<int, SdpAudioFormat> codecs = {{0, {"ISAC", 16000, 1}},
{0, {"PCMU", 8000, 1}}, {1, {"PCMA", 8000, 1}}, {2, {"L16", 32000, 1}}}; {1, {"PCMA", 8000, 1}},
{2, {"ISAC", 32000, 1}},
{3, {"L16", 32000, 1}}};
const std::map<int, int> cng_payload_types = { const std::map<int, int> cng_payload_types = {
{8000, 100}, {16000, 101}, {32000, 102}}; {8000, 100}, {16000, 101}, {32000, 102}};
{ {

View File

@ -30,6 +30,7 @@
#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h" #include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h" #include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h" #include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#include "modules/audio_coding/include/audio_coding_module_typedefs.h" #include "modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "modules/audio_coding/neteq/tools/audio_checksum.h" #include "modules/audio_coding/neteq/tools/audio_checksum.h"
#include "modules/audio_coding/neteq/tools/audio_loop.h" #include "modules/audio_coding/neteq/tools/audio_loop.h"
@ -301,6 +302,44 @@ TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
EXPECT_EQ(AudioFrameType::kAudioFrameSpeech, packet_cb_.last_frame_type()); EXPECT_EQ(AudioFrameType::kAudioFrameSpeech, packet_cb_.last_frame_type());
} }
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
// Verifies that the RTP timestamp series is not reset when the codec is
// changed.
TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) {
RegisterCodec(); // This registers the default codec.
uint32_t expected_ts = input_frame_.timestamp_;
int blocks_per_packet = pac_size_ / (kSampleRateHz / 100);
// Encode 5 packets of the first codec type.
const int kNumPackets1 = 5;
for (int j = 0; j < kNumPackets1; ++j) {
for (int i = 0; i < blocks_per_packet; ++i) {
EXPECT_EQ(j, packet_cb_.num_calls());
InsertAudio();
}
EXPECT_EQ(j + 1, packet_cb_.num_calls());
EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
expected_ts += pac_size_;
}
// Change codec.
audio_format_ = SdpAudioFormat("ISAC", kSampleRateHz, 1);
pac_size_ = 480;
RegisterCodec();
blocks_per_packet = pac_size_ / (kSampleRateHz / 100);
// Encode another 5 packets.
const int kNumPackets2 = 5;
for (int j = 0; j < kNumPackets2; ++j) {
for (int i = 0; i < blocks_per_packet; ++i) {
EXPECT_EQ(kNumPackets1 + j, packet_cb_.num_calls());
InsertAudio();
}
EXPECT_EQ(kNumPackets1 + j + 1, packet_cb_.num_calls());
EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
expected_ts += pac_size_;
}
}
#endif
// Introduce this class to set different expectations on the number of encoded // Introduce this class to set different expectations on the number of encoded
// bytes. This class expects all encoded packets to be 9 bytes (matching one // bytes. This class expects all encoded packets to be 9 bytes (matching one
// CNG SID frame) or 0 bytes. This test depends on `input_frame_` containing // CNG SID frame) or 0 bytes. This test depends on `input_frame_` containing
@ -381,7 +420,8 @@ TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
DoTest(k10MsBlocksPerPacket, kCngPayloadType); DoTest(k10MsBlocksPerPacket, kCngPayloadType);
} }
// A multi-threaded test for ACM that uses the PCM16b 16 kHz codec. // A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
// codec, while the derive class AcmIsacMtTest is using iSAC.
class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi { class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
protected: protected:
static const int kNumPackets = 500; static const int kNumPackets = 500;
@ -520,6 +560,272 @@ TEST_F(AudioCodingModuleMtTestOldApi, MAYBE_DoTest) {
EXPECT_TRUE(RunTest()); EXPECT_TRUE(RunTest());
} }
// This is a multi-threaded ACM test using iSAC. The test encodes audio
// from a PCM file. The most recent encoded frame is used as input to the
// receiving part. Depending on timing, it may happen that the same RTP packet
// is inserted into the receiver multiple times, but this is a valid use-case,
// and simplifies the test code a lot.
class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
protected:
static const int kNumPackets = 500;
static const int kNumPullCalls = 500;
AcmIsacMtTestOldApi()
: AudioCodingModuleMtTestOldApi(), last_packet_number_(0) {}
~AcmIsacMtTestOldApi() {}
void SetUp() override {
AudioCodingModuleTestOldApi::SetUp();
RegisterCodec(); // Must be called before the threads start below.
// Set up input audio source to read from specified file, loop after 5
// seconds, and deliver blocks of 10 ms.
const std::string input_file_name =
webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
// Generate one packet to have something to insert.
int loop_counter = 0;
while (packet_cb_.last_payload_len_bytes() == 0) {
InsertAudio();
ASSERT_LT(loop_counter++, 10);
}
// Set `last_packet_number_` to one less that `num_calls` so that the packet
// will be fetched in the next InsertPacket() call.
last_packet_number_ = packet_cb_.num_calls() - 1;
StartThreads();
}
void RegisterCodec() override {
static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
audio_format_ = SdpAudioFormat("isac", kSampleRateHz, 1);
pac_size_ = 480;
// Register iSAC codec in ACM, effectively unregistering the PCM16B codec
// registered in AudioCodingModuleTestOldApi::SetUp();
acm_->SetReceiveCodecs({{kPayloadType, *audio_format_}});
acm_->SetEncoder(CreateBuiltinAudioEncoderFactory()->MakeAudioEncoder(
kPayloadType, *audio_format_, absl::nullopt));
}
void InsertPacket() override {
int num_calls = packet_cb_.num_calls(); // Store locally for thread safety.
if (num_calls > last_packet_number_) {
// Get the new payload out from the callback handler.
// Note that since we swap buffers here instead of directly inserting
// a pointer to the data in `packet_cb_`, we avoid locking the callback
// for the duration of the IncomingPacket() call.
packet_cb_.SwapBuffers(&last_payload_vec_);
ASSERT_GT(last_payload_vec_.size(), 0u);
rtp_utility_->Forward(&rtp_header_);
last_packet_number_ = num_calls;
}
ASSERT_GT(last_payload_vec_.size(), 0u);
ASSERT_EQ(0, acm_->IncomingPacket(&last_payload_vec_[0],
last_payload_vec_.size(), rtp_header_));
}
void InsertAudio() override {
// TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
// this call confuses the number of samples with the number of bytes, and
// ends up copying only half of what it should.
memcpy(input_frame_.mutable_data(), audio_loop_.GetNextBlock().data(),
kNumSamples10ms);
AudioCodingModuleTestOldApi::InsertAudio();
}
// Override the verification function with no-op, since iSAC produces variable
// payload sizes.
void VerifyEncoding() override {}
// This method is the same as AudioCodingModuleMtTestOldApi::TestDone(), but
// here it is using the constants defined in this class (i.e., shorter test
// run).
bool TestDone() override {
if (packet_cb_.num_calls() > kNumPackets) {
MutexLock lock(&mutex_);
if (pull_audio_count_ > kNumPullCalls) {
// Both conditions for completion are met. End the test.
return true;
}
}
return false;
}
int last_packet_number_;
std::vector<uint8_t> last_payload_vec_;
test::AudioLoop audio_loop_;
};
#if defined(WEBRTC_IOS)
#define MAYBE_DoTest DISABLED_DoTest
#else
#define MAYBE_DoTest DoTest
#endif
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
TEST_F(AcmIsacMtTestOldApi, MAYBE_DoTest) {
EXPECT_TRUE(RunTest());
}
#endif
class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
protected:
static const int kRegisterAfterNumPackets = 5;
static const int kNumPackets = 10;
static const int kPacketSizeMs = 30;
static const int kPacketSizeSamples = kPacketSizeMs * 16;
AcmReRegisterIsacMtTestOldApi()
: AudioCodingModuleTestOldApi(),
codec_registered_(false),
receive_packet_count_(0),
next_insert_packet_time_ms_(0),
fake_clock_(new SimulatedClock(0)) {
AudioEncoderIsacFloatImpl::Config config;
config.payload_type = kPayloadType;
isac_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
clock_ = fake_clock_.get();
}
void SetUp() override {
AudioCodingModuleTestOldApi::SetUp();
// Set up input audio source to read from specified file, loop after 5
// seconds, and deliver blocks of 10 ms.
const std::string input_file_name =
webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
RegisterCodec(); // Must be called before the threads start below.
StartThreads();
}
void RegisterCodec() override {
// Register iSAC codec in ACM, effectively unregistering the PCM16B codec
// registered in AudioCodingModuleTestOldApi::SetUp();
// Only register the decoder for now. The encoder is registered later.
static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
acm_->SetReceiveCodecs({{kPayloadType, {"ISAC", kSampleRateHz, 1}}});
}
void StartThreads() {
quit_.store(false);
const auto attributes =
rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
receive_thread_ = rtc::PlatformThread::SpawnJoinable(
[this] {
while (!quit_.load() && CbReceiveImpl()) {
}
},
"receive", attributes);
codec_registration_thread_ = rtc::PlatformThread::SpawnJoinable(
[this] {
while (!quit_.load()) {
CbCodecRegistrationImpl();
}
},
"codec_registration", attributes);
}
void TearDown() override {
AudioCodingModuleTestOldApi::TearDown();
quit_.store(true);
receive_thread_.Finalize();
codec_registration_thread_.Finalize();
}
bool RunTest() { return test_complete_.Wait(TimeDelta::Minutes(10)); }
bool CbReceiveImpl() {
SleepMs(1);
rtc::Buffer encoded;
AudioEncoder::EncodedInfo info;
{
MutexLock lock(&mutex_);
if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
return true;
}
next_insert_packet_time_ms_ += kPacketSizeMs;
++receive_packet_count_;
// Encode new frame.
uint32_t input_timestamp = rtp_header_.timestamp;
while (info.encoded_bytes == 0) {
info = isac_encoder_->Encode(input_timestamp,
audio_loop_.GetNextBlock(), &encoded);
input_timestamp += 160; // 10 ms at 16 kHz.
}
EXPECT_EQ(rtp_header_.timestamp + kPacketSizeSamples, input_timestamp);
EXPECT_EQ(rtp_header_.timestamp, info.encoded_timestamp);
EXPECT_EQ(rtp_header_.payloadType, info.payload_type);
}
// Now we're not holding the crit sect when calling ACM.
// Insert into ACM.
EXPECT_EQ(0, acm_->IncomingPacket(encoded.data(), info.encoded_bytes,
rtp_header_));
// Pull audio.
for (int i = 0; i < rtc::CheckedDivExact(kPacketSizeMs, 10); ++i) {
AudioFrame audio_frame;
bool muted;
EXPECT_EQ(0, acm_->PlayoutData10Ms(-1 /* default output frequency */,
&audio_frame, &muted));
if (muted) {
ADD_FAILURE();
return false;
}
fake_clock_->AdvanceTimeMilliseconds(10);
}
rtp_utility_->Forward(&rtp_header_);
return true;
}
void CbCodecRegistrationImpl() {
SleepMs(1);
if (HasFatalFailure()) {
// End the test early if a fatal failure (ASSERT_*) has occurred.
test_complete_.Set();
}
MutexLock lock(&mutex_);
if (!codec_registered_ &&
receive_packet_count_ > kRegisterAfterNumPackets) {
// Register the iSAC encoder.
acm_->SetEncoder(CreateBuiltinAudioEncoderFactory()->MakeAudioEncoder(
kPayloadType, *audio_format_, absl::nullopt));
codec_registered_ = true;
}
if (codec_registered_ && receive_packet_count_ > kNumPackets) {
test_complete_.Set();
}
}
rtc::PlatformThread receive_thread_;
rtc::PlatformThread codec_registration_thread_;
// Used to force worker threads to stop looping.
std::atomic<bool> quit_;
rtc::Event test_complete_;
Mutex mutex_;
bool codec_registered_ RTC_GUARDED_BY(mutex_);
int receive_packet_count_ RTC_GUARDED_BY(mutex_);
int64_t next_insert_packet_time_ms_ RTC_GUARDED_BY(mutex_);
std::unique_ptr<AudioEncoderIsacFloatImpl> isac_encoder_;
std::unique_ptr<SimulatedClock> fake_clock_;
test::AudioLoop audio_loop_;
};
#if defined(WEBRTC_IOS)
#define MAYBE_DoTest DISABLED_DoTest
#else
#define MAYBE_DoTest DoTest
#endif
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
TEST_F(AcmReRegisterIsacMtTestOldApi, MAYBE_DoTest) {
EXPECT_TRUE(RunTest());
}
#endif
// Disabling all of these tests on iOS until file support has been added. // Disabling all of these tests on iOS until file support has been added.
// See https://code.google.com/p/webrtc/issues/detail?id=4752 for details. // See https://code.google.com/p/webrtc/issues/detail?id=4752 for details.
#if !defined(WEBRTC_IOS) #if !defined(WEBRTC_IOS)
@ -719,6 +1025,38 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
class AcmSenderBitExactnessNewApi : public AcmSenderBitExactnessOldApi {}; class AcmSenderBitExactnessNewApi : public AcmSenderBitExactnessOldApi {};
// Run bit exactness tests only for release builds.
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
defined(NDEBUG) && defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64)
TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
Run(/*audio_checksum_ref=*/"37ecdabad1698a857cf811e6d1fa91df",
/*payload_checksum_ref=*/"3c79f16f34218271f3dca4e2b1dfe1bb",
/*expected_packets=*/33,
/*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
}
TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
Run(/*audio_checksum_ref=*/"0e9078d23454901496a88362ba0740c3",
/*payload_checksum_ref=*/"9e0a0ab743ad987b55b8e14802769c56",
/*expected_packets=*/16,
/*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
}
#endif
// Run bit exactness test only for release build.
#if defined(WEBRTC_CODEC_ISAC) && defined(NDEBUG) && defined(WEBRTC_LINUX) && \
defined(WEBRTC_ARCH_X86_64)
TEST_F(AcmSenderBitExactnessOldApi, IsacSwb30ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
Run(/*audio_checksum_ref=*/"f4cf577f28a0dcbac33358b757518e0c",
/*payload_checksum_ref=*/"ce86106a93419aefb063097108ec94ab",
/*expected_packets=*/33,
/*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput);
}
#endif
TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) { TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80)); ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
Run(/*audio_checksum_ref=*/"69118ed438ac76252d023e0463819471", Run(/*audio_checksum_ref=*/"69118ed438ac76252d023e0463819471",

View File

@ -20,6 +20,11 @@ if (rtc_opus_support_120ms_ptime) {
} else { } else {
audio_codec_defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=0" ] audio_codec_defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=0" ]
} }
if (current_cpu == "arm") {
audio_codec_defines += [ "WEBRTC_CODEC_ISACFX" ]
} else {
audio_codec_defines += [ "WEBRTC_CODEC_ISAC" ]
}
audio_coding_defines = audio_codec_defines audio_coding_defines = audio_codec_defines
neteq_defines = audio_codec_defines neteq_defines = audio_codec_defines

View File

@ -75,6 +75,31 @@ TEST(AudioDecoderFactoryTest, CreateIlbc) {
adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 16000, 1), absl::nullopt)); adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 16000, 1), absl::nullopt));
} }
TEST(AudioDecoderFactoryTest, CreateIsac) {
rtc::scoped_refptr<AudioDecoderFactory> adf =
CreateBuiltinAudioDecoderFactory();
ASSERT_TRUE(adf);
// iSAC supports 16 kHz, 1 channel. The float implementation additionally
// supports 32 kHz, 1 channel.
EXPECT_FALSE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 0), absl::nullopt));
EXPECT_TRUE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 1), absl::nullopt));
EXPECT_FALSE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 2), absl::nullopt));
EXPECT_FALSE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 8000, 1), absl::nullopt));
EXPECT_FALSE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 48000, 1), absl::nullopt));
#ifdef WEBRTC_ARCH_ARM
EXPECT_FALSE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 32000, 1), absl::nullopt));
#else
EXPECT_TRUE(
adf->MakeAudioDecoder(SdpAudioFormat("isac", 32000, 1), absl::nullopt));
#endif
}
TEST(AudioDecoderFactoryTest, CreateL16) { TEST(AudioDecoderFactoryTest, CreateL16) {
rtc::scoped_refptr<AudioDecoderFactory> adf = rtc::scoped_refptr<AudioDecoderFactory> adf =
CreateBuiltinAudioDecoderFactory(); CreateBuiltinAudioDecoderFactory();
@ -100,6 +125,9 @@ TEST(AudioDecoderFactoryTest, MaxNrOfChannels) {
#ifdef WEBRTC_CODEC_OPUS #ifdef WEBRTC_CODEC_OPUS
"opus", "opus",
#endif #endif
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
"isac",
#endif
#ifdef WEBRTC_CODEC_ILBC #ifdef WEBRTC_CODEC_ILBC
"ilbc", "ilbc",
#endif #endif

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/scoped_refptr.h"
namespace webrtc {
template <typename T>
class AudioDecoderIsacT final : public AudioDecoder {
public:
struct Config {
bool IsOk() const;
int sample_rate_hz = 16000;
};
explicit AudioDecoderIsacT(const Config& config);
virtual ~AudioDecoderIsacT() override;
AudioDecoderIsacT(const AudioDecoderIsacT&) = delete;
AudioDecoderIsacT& operator=(const AudioDecoderIsacT&) = delete;
bool HasDecodePlc() const override;
size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
void Reset() override;
int ErrorCode() override;
int SampleRateHz() const override;
size_t Channels() const override;
int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) override;
private:
typename T::instance_type* isac_state_;
int sample_rate_hz_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
#include "rtc_base/checks.h"
namespace webrtc {
template <typename T>
bool AudioDecoderIsacT<T>::Config::IsOk() const {
return (sample_rate_hz == 16000 || sample_rate_hz == 32000);
}
template <typename T>
AudioDecoderIsacT<T>::AudioDecoderIsacT(const Config& config)
: sample_rate_hz_(config.sample_rate_hz) {
RTC_CHECK(config.IsOk()) << "Unsupported sample rate "
<< config.sample_rate_hz;
RTC_CHECK_EQ(0, T::Create(&isac_state_));
T::DecoderInit(isac_state_);
RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz_));
}
template <typename T>
AudioDecoderIsacT<T>::~AudioDecoderIsacT() {
RTC_CHECK_EQ(0, T::Free(isac_state_));
}
template <typename T>
int AudioDecoderIsacT<T>::DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) {
RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
int16_t temp_type = 1; // Default is speech.
int ret =
T::DecodeInternal(isac_state_, encoded, encoded_len, decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
return ret;
}
template <typename T>
bool AudioDecoderIsacT<T>::HasDecodePlc() const {
return false;
}
template <typename T>
size_t AudioDecoderIsacT<T>::DecodePlc(size_t num_frames, int16_t* decoded) {
return T::DecodePlc(isac_state_, decoded, num_frames);
}
template <typename T>
void AudioDecoderIsacT<T>::Reset() {
T::DecoderInit(isac_state_);
}
template <typename T>
int AudioDecoderIsacT<T>::ErrorCode() {
return T::GetErrorCode(isac_state_);
}
template <typename T>
int AudioDecoderIsacT<T>::SampleRateHz() const {
return sample_rate_hz_;
}
template <typename T>
size_t AudioDecoderIsacT<T>::Channels() const {
return 1;
}
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/scoped_refptr.h"
#include "api/units/time_delta.h"
#include "system_wrappers/include/field_trial.h"
namespace webrtc {
template <typename T>
class AudioEncoderIsacT final : public AudioEncoder {
public:
// Allowed combinations of sample rate, frame size, and bit rate are
// - 16000 Hz, 30 ms, 10000-32000 bps
// - 16000 Hz, 60 ms, 10000-32000 bps
// - 32000 Hz, 30 ms, 10000-56000 bps (if T has super-wideband support)
struct Config {
bool IsOk() const;
int payload_type = 103;
int sample_rate_hz = 16000;
int frame_size_ms = 30;
int bit_rate = kDefaultBitRate; // Limit on the short-term average bit
// rate, in bits/s.
int max_payload_size_bytes = -1;
int max_bit_rate = -1;
};
explicit AudioEncoderIsacT(const Config& config);
~AudioEncoderIsacT() override;
AudioEncoderIsacT(const AudioEncoderIsacT&) = delete;
AudioEncoderIsacT& operator=(const AudioEncoderIsacT&) = delete;
int SampleRateHz() const override;
size_t NumChannels() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
void SetTargetBitrate(int target_bps) override;
void OnReceivedTargetAudioBitrate(int target_bps) override;
void OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms) override;
void OnReceivedUplinkAllocation(BitrateAllocationUpdate update) override;
void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) override;
void Reset() override;
absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const override;
private:
// This value is taken from STREAM_SIZE_MAX_60 for iSAC float (60 ms) and
// STREAM_MAXW16_60MS for iSAC fix (60 ms).
static const size_t kSufficientEncodeBufferSizeBytes = 400;
static constexpr int kDefaultBitRate = 32000;
static constexpr int kMinBitrateBps = 10000;
static constexpr int MaxBitrateBps(int sample_rate_hz) {
return sample_rate_hz == 32000 ? 56000 : 32000;
}
void SetTargetBitrate(int target_bps, bool subtract_per_packet_overhead);
// Recreate the iSAC encoder instance with the given settings, and save them.
void RecreateEncoderInstance(const Config& config);
Config config_;
typename T::instance_type* isac_state_ = nullptr;
// Have we accepted input but not yet emitted it in a packet?
bool packet_in_progress_ = false;
// Timestamp of the first input of the currently in-progress packet.
uint32_t packet_timestamp_;
// Timestamp of the previously encoded packet.
uint32_t last_encoded_timestamp_;
// Cache the value of the "WebRTC-SendSideBwe-WithOverhead" field trial.
const bool send_side_bwe_with_overhead_ =
!field_trial::IsDisabled("WebRTC-SendSideBwe-WithOverhead");
// When we send a packet, expect this many bytes of headers to be added to it.
// Start out with a reasonable default that we can use until we receive a real
// value.
DataSize overhead_per_packet_ = DataSize::Bytes(28);
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_

View File

@ -0,0 +1,225 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_minmax.h"
namespace webrtc {
template <typename T>
bool AudioEncoderIsacT<T>::Config::IsOk() const {
if (max_bit_rate < 32000 && max_bit_rate != -1)
return false;
if (max_payload_size_bytes < 120 && max_payload_size_bytes != -1)
return false;
switch (sample_rate_hz) {
case 16000:
if (max_bit_rate > 53400)
return false;
if (max_payload_size_bytes > 400)
return false;
return (frame_size_ms == 30 || frame_size_ms == 60) &&
(bit_rate == 0 || (bit_rate >= 10000 && bit_rate <= 32000));
case 32000:
if (max_bit_rate > 160000)
return false;
if (max_payload_size_bytes > 600)
return false;
return T::has_swb &&
(frame_size_ms == 30 &&
(bit_rate == 0 || (bit_rate >= 10000 && bit_rate <= 56000)));
default:
return false;
}
}
template <typename T>
AudioEncoderIsacT<T>::AudioEncoderIsacT(const Config& config) {
RecreateEncoderInstance(config);
}
template <typename T>
AudioEncoderIsacT<T>::~AudioEncoderIsacT() {
RTC_CHECK_EQ(0, T::Free(isac_state_));
}
template <typename T>
int AudioEncoderIsacT<T>::SampleRateHz() const {
return T::EncSampRate(isac_state_);
}
template <typename T>
size_t AudioEncoderIsacT<T>::NumChannels() const {
return 1;
}
template <typename T>
size_t AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
const int samples_in_next_packet = T::GetNewFrameLen(isac_state_);
return static_cast<size_t>(rtc::CheckedDivExact(
samples_in_next_packet, rtc::CheckedDivExact(SampleRateHz(), 100)));
}
template <typename T>
size_t AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
return 6; // iSAC puts at most 60 ms in a packet.
}
template <typename T>
int AudioEncoderIsacT<T>::GetTargetBitrate() const {
return config_.bit_rate == 0 ? kDefaultBitRate : config_.bit_rate;
}
template <typename T>
void AudioEncoderIsacT<T>::SetTargetBitrate(int target_bps) {
// Set target bitrate directly without subtracting per-packet overhead,
// because that's what AudioEncoderOpus does.
SetTargetBitrate(target_bps,
/*subtract_per_packet_overhead=*/false);
}
template <typename T>
void AudioEncoderIsacT<T>::OnReceivedTargetAudioBitrate(int target_bps) {
// Set target bitrate directly without subtracting per-packet overhead,
// because that's what AudioEncoderOpus does.
SetTargetBitrate(target_bps,
/*subtract_per_packet_overhead=*/false);
}
template <typename T>
void AudioEncoderIsacT<T>::OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
absl::optional<int64_t> /*bwe_period_ms*/) {
// Set target bitrate, subtracting the per-packet overhead if
// WebRTC-SendSideBwe-WithOverhead is enabled, because that's what
// AudioEncoderOpus does.
SetTargetBitrate(
target_audio_bitrate_bps,
/*subtract_per_packet_overhead=*/send_side_bwe_with_overhead_);
}
template <typename T>
void AudioEncoderIsacT<T>::OnReceivedUplinkAllocation(
BitrateAllocationUpdate update) {
// Set target bitrate, subtracting the per-packet overhead if
// WebRTC-SendSideBwe-WithOverhead is enabled, because that's what
// AudioEncoderOpus does.
SetTargetBitrate(
update.target_bitrate.bps<int>(),
/*subtract_per_packet_overhead=*/send_side_bwe_with_overhead_);
}
template <typename T>
void AudioEncoderIsacT<T>::OnReceivedOverhead(
size_t overhead_bytes_per_packet) {
overhead_per_packet_ = DataSize::Bytes(overhead_bytes_per_packet);
}
template <typename T>
AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeImpl(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
if (!packet_in_progress_) {
// Starting a new packet; remember the timestamp for later.
packet_in_progress_ = true;
packet_timestamp_ = rtp_timestamp;
}
size_t encoded_bytes = encoded->AppendData(
kSufficientEncodeBufferSizeBytes, [&](rtc::ArrayView<uint8_t> encoded) {
int r = T::Encode(isac_state_, audio.data(), encoded.data());
if (T::GetErrorCode(isac_state_) == 6450) {
// Isac is not able to effectively compress all types of signals. This
// is a limitation of the codec that cannot be easily fixed.
r = 0;
}
RTC_CHECK_GE(r, 0) << "Encode failed (error code "
<< T::GetErrorCode(isac_state_) << ")";
return static_cast<size_t>(r);
});
if (encoded_bytes == 0)
return EncodedInfo();
// Got enough input to produce a packet. Return the saved timestamp from
// the first chunk of input that went into the packet.
packet_in_progress_ = false;
EncodedInfo info;
info.encoded_bytes = encoded_bytes;
info.encoded_timestamp = packet_timestamp_;
info.payload_type = config_.payload_type;
info.encoder_type = CodecType::kIsac;
return info;
}
template <typename T>
void AudioEncoderIsacT<T>::Reset() {
RecreateEncoderInstance(config_);
}
template <typename T>
absl::optional<std::pair<TimeDelta, TimeDelta>>
AudioEncoderIsacT<T>::GetFrameLengthRange() const {
return {{TimeDelta::Millis(config_.frame_size_ms),
TimeDelta::Millis(config_.frame_size_ms)}};
}
template <typename T>
void AudioEncoderIsacT<T>::SetTargetBitrate(int target_bps,
bool subtract_per_packet_overhead) {
if (subtract_per_packet_overhead) {
const DataRate overhead_rate =
overhead_per_packet_ / TimeDelta::Millis(config_.frame_size_ms);
target_bps -= overhead_rate.bps();
}
target_bps = rtc::SafeClamp(target_bps, kMinBitrateBps,
MaxBitrateBps(config_.sample_rate_hz));
int result = T::Control(isac_state_, target_bps, config_.frame_size_ms);
RTC_DCHECK_EQ(result, 0);
config_.bit_rate = target_bps;
}
template <typename T>
void AudioEncoderIsacT<T>::RecreateEncoderInstance(const Config& config) {
RTC_CHECK(config.IsOk());
packet_in_progress_ = false;
if (isac_state_)
RTC_CHECK_EQ(0, T::Free(isac_state_));
RTC_CHECK_EQ(0, T::Create(&isac_state_));
RTC_CHECK_EQ(0, T::EncoderInit(isac_state_, /*coding_mode=*/1));
RTC_CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
const int bit_rate = config.bit_rate == 0 ? kDefaultBitRate : config.bit_rate;
RTC_CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
if (config.max_payload_size_bytes != -1)
RTC_CHECK_EQ(
0, T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
if (config.max_bit_rate != -1)
RTC_CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
// Set the decoder sample rate even though we just use the encoder. This
// doesn't appear to be necessary to produce a valid encoding, but without it
// we get an encoding that isn't bit-for-bit identical with what a combined
// encoder+decoder object produces.
RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, config.sample_rate_hz));
config_ = config;
}
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
#include "modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
namespace webrtc {
using AudioDecoderIsacFixImpl = AudioDecoderIsacT<IsacFix>;
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
#include "modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
namespace webrtc {
using AudioEncoderIsacFixImpl = AudioEncoderIsacT<IsacFix>;
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_

View File

@ -0,0 +1,486 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_
#include <stddef.h>
#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
typedef struct {
void* dummy;
} ISACFIX_MainStruct;
#if defined(__cplusplus)
extern "C" {
#endif
/****************************************************************************
* WebRtcIsacfix_Create(...)
*
* This function creates an ISAC instance, which will contain the state
* information for one coding/decoding channel.
*
* Input:
* - *ISAC_main_inst : a pointer to the coder instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_Create(ISACFIX_MainStruct** ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_Free(...)
*
* This function frees the ISAC instance created at the beginning.
*
* Input:
* - ISAC_main_inst : a ISAC instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_Free(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_EncoderInit(...)
*
* This function initializes an ISAC instance prior to the encoder calls.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - CodingMode : 0 - Bit rate and frame length are automatically
* adjusted to available bandwidth on
* transmission channel.
* 1 - User sets a frame length and a target bit
* rate which is taken as the maximum short-term
* average bit rate.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct* ISAC_main_inst,
int16_t CodingMode);
/****************************************************************************
* WebRtcIsacfix_Encode(...)
*
* This function encodes 10ms frame(s) and inserts it into a package.
* Input speech length has to be 160 samples (10ms). The encoder buffers those
* 10ms frames until it reaches the chosen Framesize (480 or 960 samples
* corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - speechIn : input speech vector.
*
* Output:
* - encoded : the encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* 0 - The buffer didn't reach the chosen framesize
* so it keeps buffering speech samples.
* -1 - Error
*/
int WebRtcIsacfix_Encode(ISACFIX_MainStruct* ISAC_main_inst,
const int16_t* speechIn,
uint8_t* encoded);
/****************************************************************************
* WebRtcIsacfix_DecoderInit(...)
*
* This function initializes an ISAC instance prior to the decoder calls.
*
* Input:
* - ISAC_main_inst : ISAC instance.
*/
void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_UpdateBwEstimate1(...)
*
* This function updates the estimate of the bandwidth.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
* - packet_size : size of the packet in bytes.
* - rtp_seq_number : the RTP number of the packet.
* - arr_ts : the arrival time of the packet (from NetEq)
* in samples.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t arr_ts);
/****************************************************************************
* WebRtcIsacfix_UpdateBwEstimate(...)
*
* This function updates the estimate of the bandwidth.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
* - packet_size : size of the packet in bytes.
* - rtp_seq_number : the RTP number of the packet.
* - send_ts : the send time of the packet from RTP header,
* in samples.
* - arr_ts : the arrival time of the packet (from NetEq)
* in samples.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
/****************************************************************************
* WebRtcIsacfix_Decode(...)
*
* This function decodes an ISAC frame. Output speech length
* will be a multiple of 480 samples: 480 or 960 samples,
* depending on the framesize (30 or 60 ms).
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s)
* - len : bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
*
* Return value : >0 - number of samples in decoded vector
* -1 - Error
*/
int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speechType);
/****************************************************************************
* WebRtcIsacfix_DecodePlc(...)
*
* This function conducts PLC for ISAC frame(s) in wide-band (16kHz sampling).
* Output speech length will be "480*noOfLostFrames" samples
* that is equevalent of "30*noOfLostFrames" millisecond.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - noOfLostFrames : Number of PLC frames (480sample = 30ms)
* to produce
* NOTE! Maximum number is 2 (960 samples = 60ms)
*
* Output:
* - decoded : The decoded vector
*
* Return value : Number of samples in decoded PLC vector
*/
size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
int16_t* decoded,
size_t noOfLostFrames);
/****************************************************************************
* WebRtcIsacfix_ReadFrameLen(...)
*
* This function returns the length of the frame represented in the packet.
*
* Input:
* - encoded : Encoded bitstream
* - encoded_len_bytes : Length of the bitstream in bytes.
*
* Output:
* - frameLength : Length of frame in packet (in samples)
*
*/
int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
size_t encoded_len_bytes,
size_t* frameLength);
/****************************************************************************
* WebRtcIsacfix_Control(...)
*
* This function sets the limit on the short-term average bit rate and the
* frame length. Should be used only in Instantaneous mode.
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - rate : limit on the short-term average bit rate,
* in bits/second (between 10000 and 32000)
* - framesize : number of milliseconds per frame (30 or 60)
*
* Return value : 0 - ok
* -1 - Error
*/
int16_t WebRtcIsacfix_Control(ISACFIX_MainStruct* ISAC_main_inst,
int16_t rate,
int framesize);
void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
int bottleneck_bits_per_second);
/****************************************************************************
* WebRtcIsacfix_ControlBwe(...)
*
* This function sets the initial values of bottleneck and frame-size if
* iSAC is used in channel-adaptive mode. Through this API, users can
* enforce a frame-size for all values of bottleneck. Then iSAC will not
* automatically change the frame-size.
*
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - rateBPS : initial value of bottleneck in bits/second
* 10000 <= rateBPS <= 32000 is accepted
* - frameSizeMs : number of milliseconds per frame (30 or 60)
* - enforceFrameSize : 1 to enforce the given frame-size through out
* the adaptation process, 0 to let iSAC change
* the frame-size if required.
*
* Return value : 0 - ok
* -1 - Error
*/
int16_t WebRtcIsacfix_ControlBwe(ISACFIX_MainStruct* ISAC_main_inst,
int16_t rateBPS,
int frameSizeMs,
int16_t enforceFrameSize);
/****************************************************************************
* WebRtcIsacfix_version(...)
*
* This function returns the version number.
*
* Output:
* - version : Pointer to character string
*
*/
void WebRtcIsacfix_version(char* version);
/****************************************************************************
* WebRtcIsacfix_GetErrorCode(...)
*
* This function can be used to check the error code of an iSAC instance. When
* a function returns -1 a error code will be set for that instance. The
* function below extract the code of the last error that occured in the
* specified instance.
*
* Input:
* - ISAC_main_inst : ISAC instance
*
* Return value : Error code
*/
int16_t WebRtcIsacfix_GetErrorCode(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_GetUplinkBw(...)
*
* This function return iSAC send bitrate
*
* Input:
* - ISAC_main_inst : iSAC instance
*
* Return value : <0 Error code
* else bitrate
*/
int32_t WebRtcIsacfix_GetUplinkBw(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_SetMaxPayloadSize(...)
*
* This function sets a limit for the maximum payload size of iSAC. The same
* value is used both for 30 and 60 msec packets.
* The absolute max will be valid until next time the function is called.
* NOTE! This function may override the function WebRtcIsacfix_SetMaxRate()
*
* Input:
* - ISAC_main_inst : iSAC instance
* - maxPayloadBytes : maximum size of the payload in bytes
* valid values are between 100 and 400 bytes
*
*
* Return value : 0 if sucessful
* -1 if error happens
*/
int16_t WebRtcIsacfix_SetMaxPayloadSize(ISACFIX_MainStruct* ISAC_main_inst,
int16_t maxPayloadBytes);
/****************************************************************************
* WebRtcIsacfix_SetMaxRate(...)
*
* This function sets the maximum rate which the codec may not exceed for a
* singel packet. The maximum rate is set in bits per second.
* The codec has an absolute maximum rate of 53400 bits per second (200 bytes
* per 30 msec).
* It is possible to set a maximum rate between 32000 and 53400 bits per second.
*
* The rate limit is valid until next time the function is called.
*
* NOTE! Packet size will never go above the value set if calling
* WebRtcIsacfix_SetMaxPayloadSize() (default max packet size is 400 bytes).
*
* Input:
* - ISAC_main_inst : iSAC instance
* - maxRateInBytes : maximum rate in bits per second,
* valid values are 32000 to 53400 bits
*
* Return value : 0 if sucessful
* -1 if error happens
*/
int16_t WebRtcIsacfix_SetMaxRate(ISACFIX_MainStruct* ISAC_main_inst,
int32_t maxRate);
/****************************************************************************
* WebRtcIsacfix_CreateInternal(...)
*
* This function creates the memory that is used to store data in the encoder
*
* Input:
* - *ISAC_main_inst : a pointer to the coder instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_CreateInternal(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_FreeInternal(...)
*
* This function frees the internal memory for storing encoder data.
*
* Input:
* - ISAC_main_inst : an ISAC instance.
*
* Return value : 0 - Ok
* -1 - Error
*/
int16_t WebRtcIsacfix_FreeInternal(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_GetNewBitStream(...)
*
* This function returns encoded data, with the received bwe-index in the
* stream. It should always return a complete packet, i.e. only called once
* even for 60 msec frames
*
* Input:
* - ISAC_main_inst : ISAC instance.
* - bweIndex : index of bandwidth estimate to put in new
* bitstream - scale : factor for rate change (0.4 ~=> half the
* rate, 1 no change).
*
* Output:
* - encoded : the encoded data vector
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error
*/
int16_t WebRtcIsacfix_GetNewBitStream(ISACFIX_MainStruct* ISAC_main_inst,
int16_t bweIndex,
float scale,
uint8_t* encoded);
/****************************************************************************
* WebRtcIsacfix_GetDownLinkBwIndex(...)
*
* This function returns index representing the Bandwidth estimate from
* other side to this side.
*
* Input:
* - ISAC_main_inst : iSAC struct
*
* Output:
* - rateIndex : Bandwidth estimate to transmit to other side.
*
*/
int16_t WebRtcIsacfix_GetDownLinkBwIndex(ISACFIX_MainStruct* ISAC_main_inst,
int16_t* rateIndex);
/****************************************************************************
* WebRtcIsacfix_UpdateUplinkBw(...)
*
* This function takes an index representing the Bandwidth estimate from
* this side to other side and updates BWE.
*
* Input:
* - ISAC_main_inst : iSAC struct
* - rateIndex : Bandwidth estimate from other side.
*
*/
int16_t WebRtcIsacfix_UpdateUplinkBw(ISACFIX_MainStruct* ISAC_main_inst,
int16_t rateIndex);
/****************************************************************************
* WebRtcIsacfix_ReadBwIndex(...)
*
* This function returns the index of the Bandwidth estimate from the bitstream.
*
* Input:
* - encoded : Encoded bitstream
* - encoded_len_bytes : Length of the bitstream in bytes.
*
* Output:
* - rateIndex : Bandwidth estimate in bitstream
*
*/
int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
size_t encoded_len_bytes,
int16_t* rateIndex);
/****************************************************************************
* WebRtcIsacfix_GetNewFrameLen(...)
*
* This function return the next frame length (in samples) of iSAC.
*
* Input:
* -ISAC_main_inst : iSAC instance
*
* Return value : frame lenght in samples
*/
int16_t WebRtcIsacfix_GetNewFrameLen(ISACFIX_MainStruct* ISAC_main_inst);
#if defined(__cplusplus)
}
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_ */

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routins.c
*
* This C file contains a function for finalizing the bitstream
* after arithmetic coding.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
/****************************************************************************
* WebRtcIsacfix_EncTerminate(...)
*
* Final call to the arithmetic coder for an encoder call. This function
* terminates and return byte stream.
*
* Input:
* - streamData : in-/output struct containing bitstream
*
* Return value : number of bytes in the stream
*/
int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc *streamData)
{
uint16_t *streamPtr;
uint16_t negCarry;
/* point to the right place in the stream buffer */
streamPtr = streamData->stream + streamData->stream_index;
/* find minimum length (determined by current interval width) */
if ( streamData->W_upper > 0x01FFFFFF )
{
streamData->streamval += 0x01000000;
/* if result is less than the added value we must take care of the carry */
if (streamData->streamval < 0x01000000)
{
/* propagate carry */
if (streamData->full == 0) {
/* Add value to current value */
negCarry = *streamPtr;
negCarry += 0x0100;
*streamPtr = negCarry;
/* if value is too big, propagate carry to next byte, and so on */
while (!(negCarry))
{
negCarry = *--streamPtr;
negCarry++;
*streamPtr = negCarry;
}
} else {
/* propagate carry by adding one to the previous byte in the
* stream if that byte is 0xFFFF we need to propagate the carry
* furhter back in the stream */
while ( !(++(*--streamPtr)) );
}
/* put pointer back to the old value */
streamPtr = streamData->stream + streamData->stream_index;
}
/* write remaining data to bitstream, if "full == 0" first byte has data */
if (streamData->full == 0) {
*streamPtr++ += (uint16_t)(streamData->streamval >> 24);
streamData->full = 1;
} else {
*streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
streamData->full = 0;
}
}
else
{
streamData->streamval += 0x00010000;
/* if result is less than the added value we must take care of the carry */
if (streamData->streamval < 0x00010000)
{
/* propagate carry */
if (streamData->full == 0) {
/* Add value to current value */
negCarry = *streamPtr;
negCarry += 0x0100;
*streamPtr = negCarry;
/* if value to big, propagate carry to next byte, and so on */
while (!(negCarry))
{
negCarry = *--streamPtr;
negCarry++;
*streamPtr = negCarry;
}
} else {
/* Add carry to previous byte */
while ( !(++(*--streamPtr)) );
}
/* put pointer back to the old value */
streamPtr = streamData->stream + streamData->stream_index;
}
/* write remaining data (2 bytes) to bitstream */
if (streamData->full) {
*streamPtr++ = (uint16_t)(streamData->streamval >> 16);
} else {
*streamPtr++ |= (uint16_t)(streamData->streamval >> 24);
*streamPtr = (uint16_t)(streamData->streamval >> 8) & 0xFF00;
}
}
/* calculate stream length in bytes */
return (((streamPtr - streamData->stream)<<1) + !(streamData->full));
}

View File

@ -0,0 +1,401 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routinshist.c
*
* This C file contains arithmetic encoding and decoding.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
/****************************************************************************
* WebRtcIsacfix_EncHistMulti(...)
*
* Encode the histogram interval
*
* Input:
* - streamData : in-/output struct containing bitstream
* - data : data vector
* - cdf : array of cdf arrays
* - lenData : data vector length
*
* Return value : 0 if ok
* <0 if error detected
*/
int WebRtcIsacfix_EncHistMulti(Bitstr_enc *streamData,
const int16_t *data,
const uint16_t *const *cdf,
const int16_t lenData)
{
uint32_t W_lower;
uint32_t W_upper;
uint32_t W_upper_LSB;
uint32_t W_upper_MSB;
uint16_t *streamPtr;
uint16_t negCarry;
uint16_t *maxStreamPtr;
uint16_t *streamPtrCarry;
uint32_t cdfLo;
uint32_t cdfHi;
int k;
/* point to beginning of stream buffer
* and set maximum streamPtr value */
streamPtr = streamData->stream + streamData->stream_index;
maxStreamPtr = streamData->stream + STREAM_MAXW16_60MS - 1;
W_upper = streamData->W_upper;
for (k = lenData; k > 0; k--)
{
/* fetch cdf_lower and cdf_upper from cdf tables */
cdfLo = (uint32_t) *(*cdf + (uint32_t)*data);
cdfHi = (uint32_t) *(*cdf++ + (uint32_t)*data++ + 1);
/* update interval */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
W_lower = WEBRTC_SPL_UMUL(W_upper_MSB, cdfLo);
W_lower += ((W_upper_LSB * cdfLo) >> 16);
W_upper = WEBRTC_SPL_UMUL(W_upper_MSB, cdfHi);
W_upper += ((W_upper_LSB * cdfHi) >> 16);
/* shift interval such that it begins at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamData->streamval += W_lower;
/* handle carry */
if (streamData->streamval < W_lower)
{
/* propagate carry */
streamPtrCarry = streamPtr;
if (streamData->full == 0) {
negCarry = *streamPtrCarry;
negCarry += 0x0100;
*streamPtrCarry = negCarry;
while (!(negCarry))
{
negCarry = *--streamPtrCarry;
negCarry++;
*streamPtrCarry = negCarry;
}
} else {
while ( !(++(*--streamPtrCarry)) );
}
}
/* renormalize interval, store most significant byte of streamval and update streamval
* W_upper < 2^24 */
while ( !(W_upper & 0xFF000000) )
{
W_upper <<= 8;
if (streamData->full == 0) {
*streamPtr++ += (uint16_t)(streamData->streamval >> 24);
streamData->full = 1;
} else {
*streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
streamData->full = 0;
}
if( streamPtr > maxStreamPtr ) {
return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
}
streamData->streamval <<= 8;
}
}
/* calculate new stream_index */
streamData->stream_index = streamPtr - streamData->stream;
streamData->W_upper = W_upper;
return 0;
}
/****************************************************************************
* WebRtcIsacfix_DecHistBisectMulti(...)
*
* Function to decode more symbols from the arithmetic bytestream, using
* method of bisection cdf tables should be of size 2^k-1 (which corresponds
* to an alphabet size of 2^k-2)
*
* Input:
* - streamData : in-/output struct containing bitstream
* - cdf : array of cdf arrays
* - cdfSize : array of cdf table sizes+1 (power of two: 2^k)
* - lenData : data vector length
*
* Output:
* - data : data vector
*
* Return value : number of bytes in the stream
* <0 if error detected
*/
int16_t WebRtcIsacfix_DecHistBisectMulti(int16_t *data,
Bitstr_dec *streamData,
const uint16_t *const *cdf,
const uint16_t *cdfSize,
const int16_t lenData)
{
uint32_t W_lower = 0;
uint32_t W_upper;
uint32_t W_tmp;
uint32_t W_upper_LSB;
uint32_t W_upper_MSB;
uint32_t streamval;
const uint16_t *streamPtr;
const uint16_t *cdfPtr;
int16_t sizeTmp;
int k;
streamPtr = streamData->stream + streamData->stream_index;
W_upper = streamData->W_upper;
/* Error check: should not be possible in normal operation */
if (W_upper == 0) {
return -2;
}
/* first time decoder is called for this stream */
if (streamData->stream_index == 0)
{
/* read first word from bytestream */
streamval = (uint32_t)*streamPtr++ << 16;
streamval |= *streamPtr++;
} else {
streamval = streamData->streamval;
}
for (k = lenData; k > 0; k--)
{
/* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = W_upper >> 16;
/* start halfway the cdf range */
sizeTmp = *cdfSize++ / 2;
cdfPtr = *cdf + (sizeTmp - 1);
/* method of bisection */
for ( ;; )
{
W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
sizeTmp /= 2;
if (sizeTmp == 0) {
break;
}
if (streamval > W_tmp)
{
W_lower = W_tmp;
cdfPtr += sizeTmp;
} else {
W_upper = W_tmp;
cdfPtr -= sizeTmp;
}
}
if (streamval > W_tmp)
{
W_lower = W_tmp;
*data++ = cdfPtr - *cdf++;
} else {
W_upper = W_tmp;
*data++ = cdfPtr - *cdf++ - 1;
}
/* shift interval to start at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamval -= W_lower;
/* renormalize interval and update streamval */
/* W_upper < 2^24 */
while ( !(W_upper & 0xFF000000) )
{
/* read next byte from stream */
if (streamData->full == 0) {
streamval = (streamval << 8) | (*streamPtr++ & 0x00FF);
streamData->full = 1;
} else {
streamval = (streamval << 8) | (*streamPtr >> 8);
streamData->full = 0;
}
W_upper <<= 8;
}
/* Error check: should not be possible in normal operation */
if (W_upper == 0) {
return -2;
}
}
streamData->stream_index = streamPtr - streamData->stream;
streamData->W_upper = W_upper;
streamData->streamval = streamval;
if ( W_upper > 0x01FFFFFF ) {
return (streamData->stream_index*2 - 3 + !streamData->full);
} else {
return (streamData->stream_index*2 - 2 + !streamData->full);
}
}
/****************************************************************************
* WebRtcIsacfix_DecHistOneStepMulti(...)
*
* Function to decode more symbols from the arithmetic bytestream, taking
* single step up or down at a time.
* cdf tables can be of arbitrary size, but large tables may take a lot of
* iterations.
*
* Input:
* - streamData : in-/output struct containing bitstream
* - cdf : array of cdf arrays
* - initIndex : vector of initial cdf table search entries
* - lenData : data vector length
*
* Output:
* - data : data vector
*
* Return value : number of bytes in original stream
* <0 if error detected
*/
int16_t WebRtcIsacfix_DecHistOneStepMulti(int16_t *data,
Bitstr_dec *streamData,
const uint16_t *const *cdf,
const uint16_t *initIndex,
const int16_t lenData)
{
uint32_t W_lower;
uint32_t W_upper;
uint32_t W_tmp;
uint32_t W_upper_LSB;
uint32_t W_upper_MSB;
uint32_t streamval;
const uint16_t *streamPtr;
const uint16_t *cdfPtr;
int k;
streamPtr = streamData->stream + streamData->stream_index;
W_upper = streamData->W_upper;
/* Error check: Should not be possible in normal operation */
if (W_upper == 0) {
return -2;
}
/* Check if it is the first time decoder is called for this stream */
if (streamData->stream_index == 0)
{
/* read first word from bytestream */
streamval = (uint32_t)(*streamPtr++) << 16;
streamval |= *streamPtr++;
} else {
streamval = streamData->streamval;
}
for (k = lenData; k > 0; k--)
{
/* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
W_upper_LSB = W_upper & 0x0000FFFF;
W_upper_MSB = WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
/* start at the specified table entry */
cdfPtr = *cdf + (*initIndex++);
W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
if (streamval > W_tmp)
{
for ( ;; )
{
W_lower = W_tmp;
/* range check */
if (cdfPtr[0] == 65535) {
return -3;
}
W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *++cdfPtr);
W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
if (streamval <= W_tmp) {
break;
}
}
W_upper = W_tmp;
*data++ = cdfPtr - *cdf++ - 1;
} else {
for ( ;; )
{
W_upper = W_tmp;
--cdfPtr;
/* range check */
if (cdfPtr < *cdf) {
return -3;
}
W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
if (streamval > W_tmp) {
break;
}
}
W_lower = W_tmp;
*data++ = cdfPtr - *cdf++;
}
/* shift interval to start at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamval -= W_lower;
/* renormalize interval and update streamval */
/* W_upper < 2^24 */
while ( !(W_upper & 0xFF000000) )
{
/* read next byte from stream */
if (streamData->full == 0) {
streamval = (streamval << 8) | (*streamPtr++ & 0x00FF);
streamData->full = 1;
} else {
streamval = (streamval << 8) | (*streamPtr >> 8);
streamData->full = 0;
}
W_upper <<= 8;
}
}
streamData->stream_index = streamPtr - streamData->stream;
streamData->W_upper = W_upper;
streamData->streamval = streamval;
/* find number of bytes in original stream (determined by current interval width) */
if ( W_upper > 0x01FFFFFF ) {
return (streamData->stream_index*2 - 3 + !streamData->full);
} else {
return (streamData->stream_index*2 - 2 + !streamData->full);
}
}

View File

@ -0,0 +1,413 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routinslogist.c
*
* This C file contains arithmetic encode and decode logistic
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
/* Tables for piecewise linear cdf functions: y = k*x */
/* x Points for function piecewise() in Q15 */
static const int32_t kHistEdges[51] = {
-327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716,
-196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858, -91751, -78644,
-65536, -52429, -39322, -26215, -13108, 0, 13107, 26214, 39321, 52428,
65536, 78643, 91750, 104857, 117964, 131072, 144179, 157286, 170393, 183500,
196608, 209715, 222822, 235929, 249036, 262144, 275251, 288358, 301465, 314572,
327680
};
/* k Points for function piecewise() in Q0 */
static const uint16_t kCdfSlope[51] = {
5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 13, 23, 47, 87, 154, 315, 700, 1088,
2471, 6064, 14221, 21463, 36634, 36924, 19750, 13270, 5806, 2312,
1095, 660, 316, 145, 86, 41, 32, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 2,
0
};
/* y Points for function piecewise() in Q0 */
static const uint16_t kCdfLogistic[51] = {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
20, 22, 24, 29, 38, 57, 92, 153, 279, 559,
994, 1983, 4408, 10097, 18682, 33336, 48105, 56005, 61313, 63636,
64560, 64998, 65262, 65389, 65447, 65481, 65497, 65510, 65512, 65514,
65516, 65518, 65520, 65522, 65524, 65526, 65528, 65530, 65532, 65534,
65535
};
/****************************************************************************
* WebRtcIsacfix_Piecewise(...)
*
* Piecewise linear function
*
* Input:
* - xinQ15 : input value x in Q15
*
* Return value : korresponding y-value in Q0
*/
static __inline uint16_t WebRtcIsacfix_Piecewise(int32_t xinQ15) {
int32_t ind;
int32_t qtmp1;
uint16_t qtmp2;
/* Find index for x-value */
qtmp1 = WEBRTC_SPL_SAT(kHistEdges[50],xinQ15,kHistEdges[0]);
ind = WEBRTC_SPL_MUL(5, qtmp1 - kHistEdges[0]);
ind >>= 16;
/* Calculate corresponding y-value ans return*/
qtmp1 = qtmp1 - kHistEdges[ind];
qtmp2 = (uint16_t)WEBRTC_SPL_RSHIFT_U32(
WEBRTC_SPL_UMUL_32_16(qtmp1,kCdfSlope[ind]), 15);
return (kCdfLogistic[ind] + qtmp2);
}
/****************************************************************************
* WebRtcIsacfix_EncLogisticMulti2(...)
*
* Arithmetic coding of spectrum.
*
* Input:
* - streamData : in-/output struct containing bitstream
* - dataQ7 : data vector in Q7
* - envQ8 : side info vector defining the width of the pdf
* in Q8
* - lenData : data vector length
*
* Return value : 0 if ok,
* <0 otherwise.
*/
int WebRtcIsacfix_EncLogisticMulti2(Bitstr_enc *streamData,
int16_t *dataQ7,
const uint16_t *envQ8,
const int16_t lenData)
{
uint32_t W_lower;
uint32_t W_upper;
uint16_t W_upper_LSB;
uint16_t W_upper_MSB;
uint16_t *streamPtr;
uint16_t *maxStreamPtr;
uint16_t *streamPtrCarry;
uint16_t negcarry;
uint32_t cdfLo;
uint32_t cdfHi;
int k;
/* point to beginning of stream buffer
* and set maximum streamPtr value */
streamPtr = streamData->stream + streamData->stream_index;
maxStreamPtr = streamData->stream + STREAM_MAXW16_60MS - 1;
W_upper = streamData->W_upper;
for (k = 0; k < lenData; k++)
{
/* compute cdf_lower and cdf_upper by evaluating the
* WebRtcIsacfix_Piecewise linear cdf */
cdfLo = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(*dataQ7 - 64, *envQ8));
cdfHi = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(*dataQ7 + 64, *envQ8));
/* test and clip if probability gets too small */
while ((cdfLo + 1) >= cdfHi) {
/* clip */
if (*dataQ7 > 0) {
*dataQ7 -= 128;
cdfHi = cdfLo;
cdfLo = WebRtcIsacfix_Piecewise(
WEBRTC_SPL_MUL_16_U16(*dataQ7 - 64, *envQ8));
} else {
*dataQ7 += 128;
cdfLo = cdfHi;
cdfHi = WebRtcIsacfix_Piecewise(
WEBRTC_SPL_MUL_16_U16(*dataQ7 + 64, *envQ8));
}
}
dataQ7++;
/* increment only once per 4 iterations */
envQ8 += (k & 1) & (k >> 1);
/* update interval */
W_upper_LSB = (uint16_t)W_upper;
W_upper_MSB = (uint16_t)WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
W_lower = WEBRTC_SPL_UMUL_32_16(cdfLo, W_upper_MSB);
W_lower += (cdfLo * W_upper_LSB) >> 16;
W_upper = WEBRTC_SPL_UMUL_32_16(cdfHi, W_upper_MSB);
W_upper += (cdfHi * W_upper_LSB) >> 16;
/* shift interval such that it begins at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamData->streamval += W_lower;
/* handle carry */
if (streamData->streamval < W_lower)
{
/* propagate carry */
streamPtrCarry = streamPtr;
if (streamData->full == 0) {
negcarry = *streamPtrCarry;
negcarry += 0x0100;
*streamPtrCarry = negcarry;
while (!(negcarry))
{
negcarry = *--streamPtrCarry;
negcarry++;
*streamPtrCarry = negcarry;
}
} else {
while (!(++(*--streamPtrCarry)));
}
}
/* renormalize interval, store most significant byte of streamval and update streamval
* W_upper < 2^24 */
while ( !(W_upper & 0xFF000000) )
{
W_upper <<= 8;
if (streamData->full == 0) {
*streamPtr++ += (uint16_t) WEBRTC_SPL_RSHIFT_U32(
streamData->streamval, 24);
streamData->full = 1;
} else {
*streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
streamData->full = 0;
}
if( streamPtr > maxStreamPtr )
return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
streamData->streamval <<= 8;
}
}
/* calculate new stream_index */
streamData->stream_index = streamPtr - streamData->stream;
streamData->W_upper = W_upper;
return 0;
}
/****************************************************************************
* WebRtcIsacfix_DecLogisticMulti2(...)
*
* Arithmetic decoding of spectrum.
*
* Input:
* - streamData : in-/output struct containing bitstream
* - envQ8 : side info vector defining the width of the pdf
* in Q8
* - lenData : data vector length
*
* Input/Output:
* - dataQ7 : input: dither vector, output: data vector
*
* Return value : number of bytes in the stream so far
* -1 if error detected
*/
int WebRtcIsacfix_DecLogisticMulti2(int16_t *dataQ7,
Bitstr_dec *streamData,
const int32_t *envQ8,
const int16_t lenData)
{
uint32_t W_lower;
uint32_t W_upper;
uint32_t W_tmp;
uint16_t W_upper_LSB;
uint16_t W_upper_MSB;
uint32_t streamVal;
uint16_t cdfTmp;
int32_t res;
int32_t inSqrt;
int32_t newRes;
const uint16_t *streamPtr;
int16_t candQ7;
int16_t envCount;
uint16_t tmpARSpecQ8 = 0;
int k, i;
int offset = 0;
/* point to beginning of stream buffer */
streamPtr = streamData->stream + streamData->stream_index;
W_upper = streamData->W_upper;
/* Check if it is first time decoder is called for this stream */
if (streamData->stream_index == 0)
{
/* read first word from bytestream */
streamVal = (uint32_t)(*streamPtr++) << 16;
streamVal |= *streamPtr++;
} else {
streamVal = streamData->streamval;
}
res = 1 << (WebRtcSpl_GetSizeInBits(envQ8[0]) >> 1);
envCount = 0;
/* code assumes lenData%4 == 0 */
for (k = 0; k < lenData; k += 4)
{
int k4;
/* convert to magnitude spectrum, by doing square-roots (modified from SPLIB) */
inSqrt = envQ8[envCount];
i = 10;
/* For safty reasons */
if (inSqrt < 0)
inSqrt=-inSqrt;
newRes = (inSqrt / res + res) >> 1;
do
{
res = newRes;
newRes = (inSqrt / res + res) >> 1;
} while (newRes != res && i-- > 0);
tmpARSpecQ8 = (uint16_t)newRes;
for(k4 = 0; k4 < 4; k4++)
{
/* find the integer *data for which streamVal lies in [W_lower+1, W_upper] */
W_upper_LSB = (uint16_t) (W_upper & 0x0000FFFF);
W_upper_MSB = (uint16_t) WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
/* find first candidate by inverting the logistic cdf
* Input dither value collected from io-stream */
candQ7 = - *dataQ7 + 64;
cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
if (streamVal > W_tmp)
{
W_lower = W_tmp;
candQ7 += 128;
cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
while (streamVal > W_tmp)
{
W_lower = W_tmp;
candQ7 += 128;
cdfTmp = WebRtcIsacfix_Piecewise(
WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
/* error check */
if (W_lower == W_tmp) {
return -1;
}
}
W_upper = W_tmp;
/* Output value put in dataQ7: another sample decoded */
*dataQ7 = candQ7 - 64;
}
else
{
W_upper = W_tmp;
candQ7 -= 128;
cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
while ( !(streamVal > W_tmp) )
{
W_upper = W_tmp;
candQ7 -= 128;
cdfTmp = WebRtcIsacfix_Piecewise(
WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
/* error check */
if (W_upper == W_tmp){
return -1;
}
}
W_lower = W_tmp;
/* Output value put in dataQ7: another sample decoded */
*dataQ7 = candQ7 + 64;
}
dataQ7++;
/* shift interval to start at zero */
W_upper -= ++W_lower;
/* add integer to bitstream */
streamVal -= W_lower;
/* renormalize interval and update streamVal
* W_upper < 2^24 */
while ( !(W_upper & 0xFF000000) )
{
if (streamPtr < streamData->stream + streamData->stream_size) {
/* read next byte from stream */
if (streamData->full == 0) {
streamVal = (streamVal << 8) | (*streamPtr++ & 0x00FF);
streamData->full = 1;
} else {
streamVal = (streamVal << 8) | (*streamPtr >> 8);
streamData->full = 0;
}
} else {
/* Intending to read outside the stream. This can happen for the last
* two or three bytes. It is how the algorithm is implemented. Do
* not read from the bit stream and insert zeros instead. */
streamVal <<= 8;
if (streamData->full == 0) {
offset++; // We would have incremented the pointer in this case.
streamData->full = 1;
} else {
streamData->full = 0;
}
}
W_upper <<= 8;
}
}
envCount++;
}
streamData->stream_index = streamPtr + offset - streamData->stream;
streamData->W_upper = W_upper;
streamData->streamval = streamVal;
/* find number of bytes in original stream (determined by current interval width) */
if ( W_upper > 0x01FFFFFF )
return (streamData->stream_index*2 - 3 + !streamData->full);
else
return (streamData->stream_index*2 - 2 + !streamData->full);
}

View File

@ -0,0 +1,149 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* arith_routins.h
*
* Functions for arithmetic coding.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
/****************************************************************************
* WebRtcIsacfix_EncLogisticMulti2(...)
*
* Arithmetic coding of spectrum.
*
* Input:
* - streamData : in-/output struct containing bitstream
* - dataQ7 : data vector in Q7
* - envQ8 : side info vector defining the width of the pdf
* in Q8
* - lenData : data vector length
*
* Return value : 0 if ok,
* <0 otherwise.
*/
int WebRtcIsacfix_EncLogisticMulti2(Bitstr_enc* streamData,
int16_t* dataQ7,
const uint16_t* env,
int16_t lenData);
/****************************************************************************
* WebRtcIsacfix_EncTerminate(...)
*
* Final call to the arithmetic coder for an encoder call. This function
* terminates and return byte stream.
*
* Input:
* - streamData : in-/output struct containing bitstream
*
* Return value : number of bytes in the stream
*/
int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc* streamData);
/****************************************************************************
* WebRtcIsacfix_DecLogisticMulti2(...)
*
* Arithmetic decoding of spectrum.
*
* Input:
* - streamData : in-/output struct containing bitstream
* - envQ8 : side info vector defining the width of the pdf
* in Q8
* - lenData : data vector length
*
* Input/Output:
* - dataQ7 : input: dither vector, output: data vector, in Q7
*
* Return value : number of bytes in the stream so far
* <0 if error detected
*/
int WebRtcIsacfix_DecLogisticMulti2(int16_t* data,
Bitstr_dec* streamData,
const int32_t* env,
int16_t lenData);
/****************************************************************************
* WebRtcIsacfix_EncHistMulti(...)
*
* Encode the histogram interval
*
* Input:
* - streamData : in-/output struct containing bitstream
* - data : data vector
* - cdf : array of cdf arrays
* - lenData : data vector length
*
* Return value : 0 if ok
* <0 if error detected
*/
int WebRtcIsacfix_EncHistMulti(Bitstr_enc* streamData,
const int16_t* data,
const uint16_t* const* cdf,
int16_t lenData);
/****************************************************************************
* WebRtcIsacfix_DecHistBisectMulti(...)
*
* Function to decode more symbols from the arithmetic bytestream, using
* method of bisection.
* C df tables should be of size 2^k-1 (which corresponds to an
* alphabet size of 2^k-2)
*
* Input:
* - streamData : in-/output struct containing bitstream
* - cdf : array of cdf arrays
* - cdfSize : array of cdf table sizes+1 (power of two: 2^k)
* - lenData : data vector length
*
* Output:
* - data : data vector
*
* Return value : number of bytes in the stream
* <0 if error detected
*/
int16_t WebRtcIsacfix_DecHistBisectMulti(int16_t* data,
Bitstr_dec* streamData,
const uint16_t* const* cdf,
const uint16_t* cdfSize,
int16_t lenData);
/****************************************************************************
* WebRtcIsacfix_DecHistOneStepMulti(...)
*
* Function to decode more symbols from the arithmetic bytestream, taking
* single step up or down at a time.
* cdf tables can be of arbitrary size, but large tables may take a lot of
* iterations.
*
* Input:
* - streamData : in-/output struct containing bitstream
* - cdf : array of cdf arrays
* - initIndex : vector of initial cdf table search entries
* - lenData : data vector length
*
* Output:
* - data : data vector
*
* Return value : number of bytes in original stream
* <0 if error detected
*/
int16_t WebRtcIsacfix_DecHistOneStepMulti(int16_t* data,
Bitstr_dec* streamData,
const uint16_t* const* cdf,
const uint16_t* initIndex,
int16_t lenData);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_ */

View File

@ -0,0 +1,20 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
namespace webrtc {
// Explicit instantiation:
template class AudioDecoderIsacT<IsacFix>;
} // namespace webrtc

View File

@ -0,0 +1,20 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
namespace webrtc {
// Explicit instantiation:
template class AudioEncoderIsacT<IsacFix>;
} // namespace webrtc

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* bandwidth_estimator.h
*
* This header file contains the API for the Bandwidth Estimator
* designed for iSAC.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
/****************************************************************************
* WebRtcIsacfix_InitBandwidthEstimator(...)
*
* This function initializes the struct for the bandwidth estimator
*
* Input/Output:
* - bwest_str : Struct containing bandwidth information.
*
* Return value : 0
*/
int32_t WebRtcIsacfix_InitBandwidthEstimator(BwEstimatorstr* bwest_str);
/****************************************************************************
* WebRtcIsacfix_UpdateUplinkBwImpl(...)
*
* This function updates bottle neck rate received from other side in payload
* and calculates a new bottle neck to send to the other side.
*
* Input/Output:
* - bweStr : struct containing bandwidth information.
* - rtpNumber : value from RTP packet, from NetEq
* - frameSize : length of signal frame in ms, from iSAC decoder
* - sendTime : value in RTP header giving send time in samples
* - arrivalTime : value given by timeGetTime() time of arrival in
* samples of packet from NetEq
* - pksize : size of packet in bytes, from NetEq
* - Index : integer (range 0...23) indicating bottle neck &
* jitter as estimated by other side
*
* Return value : 0 if everything went fine,
* -1 otherwise
*/
int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr* bwest_str,
uint16_t rtp_number,
int16_t frameSize,
uint32_t send_ts,
uint32_t arr_ts,
size_t pksize,
uint16_t Index);
/* Update receiving estimates. Used when we only receive BWE index, no iSAC data
* packet. */
int16_t WebRtcIsacfix_UpdateUplinkBwRec(BwEstimatorstr* bwest_str,
int16_t Index);
/****************************************************************************
* WebRtcIsacfix_GetDownlinkBwIndexImpl(...)
*
* This function calculates and returns the bandwidth/jitter estimation code
* (integer 0...23) to put in the sending iSAC payload.
*
* Input:
* - bweStr : BWE struct
*
* Return:
* bandwith and jitter index (0..23)
*/
uint16_t WebRtcIsacfix_GetDownlinkBwIndexImpl(BwEstimatorstr* bwest_str);
/* Returns the bandwidth estimation (in bps) */
uint16_t WebRtcIsacfix_GetDownlinkBandwidth(const BwEstimatorstr* bwest_str);
/* Returns the bandwidth that iSAC should send with in bps */
int16_t WebRtcIsacfix_GetUplinkBandwidth(const BwEstimatorstr* bwest_str);
/* Returns the max delay (in ms) */
int16_t WebRtcIsacfix_GetDownlinkMaxDelay(const BwEstimatorstr* bwest_str);
/* Returns the max delay value from the other side in ms */
int16_t WebRtcIsacfix_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str);
/*
* update amount of data in bottle neck buffer and burst handling
* returns minimum payload size (bytes)
*/
uint16_t WebRtcIsacfix_GetMinBytes(
RateModel* State,
int16_t StreamSize, /* bytes in bitstream */
int16_t FrameLen, /* ms per frame */
int16_t BottleNeck, /* bottle neck rate; excl headers (bps) */
int16_t DelayBuildUp); /* max delay from bottle neck buffering (ms) */
/*
* update long-term average bitrate and amount of data in buffer
*/
void WebRtcIsacfix_UpdateRateModel(
RateModel* State,
int16_t StreamSize, /* bytes in bitstream */
int16_t FrameSamples, /* samples per frame */
int16_t BottleNeck); /* bottle neck rate; excl headers (bps) */
void WebRtcIsacfix_InitRateModel(RateModel* State);
/* Returns the new framelength value (input argument: bottle_neck) */
int16_t WebRtcIsacfix_GetNewFrameLength(int16_t bottle_neck,
int16_t current_framelength);
/* Returns the new SNR value (input argument: bottle_neck) */
// returns snr in Q10
int16_t WebRtcIsacfix_GetSnr(int16_t bottle_neck, int16_t framesamples);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_ \
*/

View File

@ -0,0 +1,212 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* codec.h
*
* This header file contains the calls to the internal encoder
* and decoder functions.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
#ifdef __cplusplus
extern "C" {
#endif
int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr* bwest_str,
Bitstr_dec* streamdata,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
IsacFixDecoderInstance* ISACdec_obj,
size_t* current_framesamples);
void WebRtcIsacfix_DecodePlcImpl(int16_t* decoded,
IsacFixDecoderInstance* ISACdec_obj,
size_t* current_framesample);
int WebRtcIsacfix_EncodeImpl(int16_t* in,
IsacFixEncoderInstance* ISACenc_obj,
BwEstimatorstr* bw_estimatordata,
int16_t CodingMode);
int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance* ISACenc_obj,
int BWnumber,
float scale);
/* initialization functions */
void WebRtcIsacfix_InitMaskingEnc(MaskFiltstr_enc* maskdata);
void WebRtcIsacfix_InitMaskingDec(MaskFiltstr_dec* maskdata);
void WebRtcIsacfix_InitPreFilterbank(PreFiltBankstr* prefiltdata);
void WebRtcIsacfix_InitPostFilterbank(PostFiltBankstr* postfiltdata);
void WebRtcIsacfix_InitPitchFilter(PitchFiltstr* pitchfiltdata);
void WebRtcIsacfix_InitPitchAnalysis(PitchAnalysisStruct* State);
void WebRtcIsacfix_InitPlc(PLCstr* State);
/* transform functions */
void WebRtcIsacfix_InitTransform(void);
typedef void (*Time2Spec)(int16_t* inre1Q9,
int16_t* inre2Q9,
int16_t* outre,
int16_t* outim);
typedef void (*Spec2Time)(int16_t* inreQ7,
int16_t* inimQ7,
int32_t* outre1Q16,
int32_t* outre2Q16);
extern Time2Spec WebRtcIsacfix_Time2Spec;
extern Spec2Time WebRtcIsacfix_Spec2Time;
void WebRtcIsacfix_Time2SpecC(int16_t* inre1Q9,
int16_t* inre2Q9,
int16_t* outre,
int16_t* outim);
void WebRtcIsacfix_Spec2TimeC(int16_t* inreQ7,
int16_t* inimQ7,
int32_t* outre1Q16,
int32_t* outre2Q16);
#if defined(WEBRTC_HAS_NEON)
void WebRtcIsacfix_Time2SpecNeon(int16_t* inre1Q9,
int16_t* inre2Q9,
int16_t* outre,
int16_t* outim);
void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
int16_t* inimQ7,
int32_t* outre1Q16,
int32_t* outre2Q16);
#endif
#if defined(MIPS32_LE)
void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
int16_t* inre2Q9,
int16_t* outre,
int16_t* outim);
void WebRtcIsacfix_Spec2TimeMIPS(int16_t* inreQ7,
int16_t* inimQ7,
int32_t* outre1Q16,
int32_t* outre2Q16);
#endif
/* filterbank functions */
void WebRtcIsacfix_SplitAndFilter1(int16_t* in,
int16_t* LP16,
int16_t* HP16,
PreFiltBankstr* prefiltdata);
void WebRtcIsacfix_FilterAndCombine1(int16_t* tempin_ch1,
int16_t* tempin_ch2,
int16_t* out16,
PostFiltBankstr* postfiltdata);
/* normalized lattice filters */
void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
int32_t* stateGQ15,
int16_t* lat_inQ0,
int16_t* filt_coefQ15,
int32_t* gain_lo_hiQ17,
int16_t lo_hi,
int16_t* lat_outQ9);
void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
int16_t* stateGQ0,
int32_t* lat_inQ25,
int16_t* filt_coefQ15,
int32_t* gain_lo_hiQ17,
int16_t lo_hi,
int16_t* lat_outQ0);
/* TODO(kma): Remove the following functions into individual header files. */
/* Internal functions in both C and ARM Neon versions */
int WebRtcIsacfix_AutocorrC(int32_t* __restrict r,
const int16_t* __restrict x,
int16_t N,
int16_t order,
int16_t* __restrict scale);
void WebRtcIsacfix_FilterMaLoopC(int16_t input0,
int16_t input1,
int32_t input2,
int32_t* ptr0,
int32_t* ptr1,
int32_t* ptr2);
#if defined(WEBRTC_HAS_NEON)
int WebRtcIsacfix_AutocorrNeon(int32_t* __restrict r,
const int16_t* __restrict x,
int16_t N,
int16_t order,
int16_t* __restrict scale);
void WebRtcIsacfix_FilterMaLoopNeon(int16_t input0,
int16_t input1,
int32_t input2,
int32_t* ptr0,
int32_t* ptr1,
int32_t* ptr2);
#endif
#if defined(MIPS32_LE)
int WebRtcIsacfix_AutocorrMIPS(int32_t* __restrict r,
const int16_t* __restrict x,
int16_t N,
int16_t order,
int16_t* __restrict scale);
void WebRtcIsacfix_FilterMaLoopMIPS(int16_t input0,
int16_t input1,
int32_t input2,
int32_t* ptr0,
int32_t* ptr1,
int32_t* ptr2);
#endif
/* Function pointers associated with the above functions. */
typedef int (*AutocorrFix)(int32_t* __restrict r,
const int16_t* __restrict x,
int16_t N,
int16_t order,
int16_t* __restrict scale);
extern AutocorrFix WebRtcIsacfix_AutocorrFix;
typedef void (*FilterMaLoopFix)(int16_t input0,
int16_t input1,
int32_t input2,
int32_t* ptr0,
int32_t* ptr1,
int32_t* ptr2);
extern FilterMaLoopFix WebRtcIsacfix_FilterMaLoopFix;
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_ */

View File

@ -0,0 +1,221 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* decode.c
*
* This C file contains the internal decoding function.
*
*/
#include <string.h>
#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
IsacFixDecoderInstance* ISACdec_obj,
size_t* current_framesamples)
{
int k;
int err;
int16_t BWno;
int len = 0;
int16_t model;
int16_t Vector_Word16_1[FRAMESAMPLES/2];
int16_t Vector_Word16_2[FRAMESAMPLES/2];
int32_t Vector_Word32_1[FRAMESAMPLES/2];
int32_t Vector_Word32_2[FRAMESAMPLES/2];
int16_t lofilt_coefQ15[ORDERLO*SUBFRAMES]; //refl. coeffs
int16_t hifilt_coefQ15[ORDERHI*SUBFRAMES]; //refl. coeffs
int32_t gain_lo_hiQ17[2*SUBFRAMES];
int16_t PitchLags_Q7[PITCH_SUBFRAMES];
int16_t PitchGains_Q12[PITCH_SUBFRAMES];
int16_t AvgPitchGain_Q12;
int16_t tmp_1, tmp_2;
int32_t tmp32a;
int16_t gainQ13;
size_t frame_nb; /* counter */
size_t frame_mode; /* 0 for 30ms, 1 for 60ms */
static const size_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
/* PLC */
int16_t overlapWin[ 240 ];
(ISACdec_obj->bitstr_obj).W_upper = 0xFFFFFFFF;
(ISACdec_obj->bitstr_obj).streamval = 0;
(ISACdec_obj->bitstr_obj).stream_index = 0;
(ISACdec_obj->bitstr_obj).full = 1;
/* decode framelength and BW estimation - not used, only for stream pointer*/
err = WebRtcIsacfix_DecodeFrameLen(&ISACdec_obj->bitstr_obj, current_framesamples);
if (err<0) // error check
return err;
frame_mode = *current_framesamples / MAX_FRAMESAMPLES; /* 0, or 1 */
err = WebRtcIsacfix_DecodeSendBandwidth(&ISACdec_obj->bitstr_obj, &BWno);
if (err<0) // error check
return err;
/* one loop if it's one frame (30ms), two loops if two frames bundled together
* (60ms) */
for (frame_nb = 0; frame_nb <= frame_mode; frame_nb++) {
/* decode & dequantize pitch parameters */
err = WebRtcIsacfix_DecodePitchGain(&(ISACdec_obj->bitstr_obj), PitchGains_Q12);
if (err<0) // error check
return err;
err = WebRtcIsacfix_DecodePitchLag(&ISACdec_obj->bitstr_obj, PitchGains_Q12, PitchLags_Q7);
if (err<0) // error check
return err;
AvgPitchGain_Q12 = (int16_t)(((int32_t)PitchGains_Q12[0] + PitchGains_Q12[1] + PitchGains_Q12[2] + PitchGains_Q12[3])>>2);
/* decode & dequantize FiltCoef */
err = WebRtcIsacfix_DecodeLpc(gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15,
&ISACdec_obj->bitstr_obj, &model);
if (err<0) // error check
return err;
/* decode & dequantize spectrum */
len = WebRtcIsacfix_DecodeSpec(&ISACdec_obj->bitstr_obj, Vector_Word16_1, Vector_Word16_2, AvgPitchGain_Q12);
if (len < 0) // error check
return len;
// Why does this need Q16 in and out? /JS
WebRtcIsacfix_Spec2Time(Vector_Word16_1, Vector_Word16_2, Vector_Word32_1, Vector_Word32_2);
for (k=0; k<FRAMESAMPLES/2; k++) {
// Q16 -> Q9.
Vector_Word16_1[k] = (int16_t)((Vector_Word32_1[k] + 64) >> 7);
}
/* ---- If this is recovery frame ---- */
if( (ISACdec_obj->plcstr_obj).used == PLC_WAS_USED )
{
(ISACdec_obj->plcstr_obj).used = PLC_NOT_USED;
if( (ISACdec_obj->plcstr_obj).B < 1000 )
{
(ISACdec_obj->plcstr_obj).decayCoeffPriodic = 4000;
}
ISACdec_obj->plcstr_obj.decayCoeffPriodic = WEBRTC_SPL_WORD16_MAX; /* DECAY_RATE is in Q15 */
ISACdec_obj->plcstr_obj.decayCoeffNoise = WEBRTC_SPL_WORD16_MAX; /* DECAY_RATE is in Q15 */
ISACdec_obj->plcstr_obj.pitchCycles = 0;
PitchGains_Q12[0] = (int16_t)(PitchGains_Q12[0] * 700 >> 10);
/* ---- Add-overlap ---- */
WebRtcSpl_GetHanningWindow( overlapWin, RECOVERY_OVERLAP );
for( k = 0; k < RECOVERY_OVERLAP; k++ )
Vector_Word16_1[k] = WebRtcSpl_AddSatW16(
(int16_t)(ISACdec_obj->plcstr_obj.overlapLP[k] *
overlapWin[RECOVERY_OVERLAP - k - 1] >> 14),
(int16_t)(Vector_Word16_1[k] * overlapWin[k] >> 14));
}
/* --- Store side info --- */
if( frame_nb == frame_mode )
{
/* --- LPC info */
WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).lofilt_coefQ15, &lofilt_coefQ15[(SUBFRAMES-1)*ORDERLO], ORDERLO );
WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).hifilt_coefQ15, &hifilt_coefQ15[(SUBFRAMES-1)*ORDERHI], ORDERHI );
(ISACdec_obj->plcstr_obj).gain_lo_hiQ17[0] = gain_lo_hiQ17[(SUBFRAMES-1) * 2];
(ISACdec_obj->plcstr_obj).gain_lo_hiQ17[1] = gain_lo_hiQ17[(SUBFRAMES-1) * 2 + 1];
/* --- LTP info */
(ISACdec_obj->plcstr_obj).AvgPitchGain_Q12 = PitchGains_Q12[3];
(ISACdec_obj->plcstr_obj).lastPitchGain_Q12 = PitchGains_Q12[3];
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 = PitchLags_Q7[3];
if( PitchLags_Q7[3] < 3000 )
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 += PitchLags_Q7[3];
WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).prevPitchInvIn, Vector_Word16_1, FRAMESAMPLES/2 );
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* inverse pitch filter */
WebRtcIsacfix_PitchFilter(Vector_Word16_1, Vector_Word16_2, &ISACdec_obj->pitchfiltstr_obj, PitchLags_Q7, PitchGains_Q12, 4);
if( frame_nb == frame_mode )
{
WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).prevPitchInvOut, &(Vector_Word16_2[FRAMESAMPLES/2 - (PITCH_MAX_LAG + 10)]), PITCH_MAX_LAG );
}
/* reduce gain to compensate for pitch enhancer */
/* gain = 1.0f - 0.45f * AvgPitchGain; */
tmp32a = AvgPitchGain_Q12 * 29; // Q18
gainQ13 = (int16_t)((262144 - tmp32a) >> 5); // Q18 -> Q13.
for (k = 0; k < FRAMESAMPLES/2; k++)
{
Vector_Word32_1[k] = (Vector_Word16_2[k] * gainQ13) * (1 << 3); // Q25
}
/* perceptual post-filtering (using normalized lattice filter) */
WebRtcIsacfix_NormLatticeFilterAr(ORDERLO, (ISACdec_obj->maskfiltstr_obj).PostStateLoGQ0,
Vector_Word32_1, lofilt_coefQ15, gain_lo_hiQ17, 0, Vector_Word16_1);
/* --- Store Highpass Residual --- */
for (k = 0; k < FRAMESAMPLES/2; k++)
Vector_Word32_1[k] = Vector_Word32_2[k] * (1 << 9); // Q16 -> Q25
for( k = 0; k < PITCH_MAX_LAG + 10; k++ )
(ISACdec_obj->plcstr_obj).prevHP[k] = Vector_Word32_1[FRAMESAMPLES/2 - (PITCH_MAX_LAG + 10) + k];
WebRtcIsacfix_NormLatticeFilterAr(ORDERHI, (ISACdec_obj->maskfiltstr_obj).PostStateHiGQ0,
Vector_Word32_1, hifilt_coefQ15, gain_lo_hiQ17, 1, Vector_Word16_2);
/* recombine the 2 bands */
/* Form the polyphase signals, and compensate for DC offset */
for (k=0;k<FRAMESAMPLES/2;k++) {
tmp_1 = (int16_t)WebRtcSpl_SatW32ToW16(((int32_t)Vector_Word16_1[k]+Vector_Word16_2[k] + 1)); /* Construct a new upper channel signal*/
tmp_2 = (int16_t)WebRtcSpl_SatW32ToW16(((int32_t)Vector_Word16_1[k]-Vector_Word16_2[k])); /* Construct a new lower channel signal*/
Vector_Word16_1[k] = tmp_1;
Vector_Word16_2[k] = tmp_2;
}
WebRtcIsacfix_FilterAndCombine1(Vector_Word16_1,
Vector_Word16_2,
signal_out16 + frame_nb * kProcessedSamples,
&ISACdec_obj->postfiltbankstr_obj);
}
return len;
}

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* decode_bwe.c
*
* This C file contains the internal decode bandwidth estimate function.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
Bitstr_dec *streamdata,
size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts)
{
int16_t index;
size_t frame_samples;
int err;
/* decode framelength */
err = WebRtcIsacfix_DecodeFrameLen(streamdata, &frame_samples);
/* error check */
if (err<0) {
return err;
}
/* decode BW estimation */
err = WebRtcIsacfix_DecodeSendBandwidth(streamdata, &index);
/* error check */
if (err<0) {
return err;
}
/* Update BWE with received data */
err = WebRtcIsacfix_UpdateUplinkBwImpl(
bwest_str,
rtp_seq_number,
(int16_t)(frame_samples * 1000 / FS),
send_ts,
arr_ts,
packet_size, /* in bytes */
index);
/* error check */
if (err<0) {
return err;
}
/* Succesful */
return 0;
}

View File

@ -0,0 +1,805 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* decode_plc.c
*
* Packet Loss Concealment.
*
*/
#include <string.h>
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#define NO_OF_PRIMES 8
#define NOISE_FILTER_LEN 30
/*
* function to decode the bitstream
* returns the total number of bytes in the stream
*/
static int16_t plc_filterma_Fast(
int16_t *In, /* (i) Vector to be filtered. InOut[-orderCoef+1]
to InOut[-1] contains state */
int16_t *Out, /* (o) Filtered vector */
int16_t *B, /* (i) The filter coefficients (in Q0) */
int16_t Blen, /* (i) Number of B coefficients */
int16_t len, /* (i) Number of samples to be filtered */
int16_t reduceDecay,
int16_t decay,
int16_t rshift )
{
int i, j;
int32_t o;
int32_t lim = (1 << (15 + rshift)) - 1;
for (i = 0; i < len; i++)
{
const int16_t *b_ptr = &B[0];
const int16_t *x_ptr = &In[i];
o = (int32_t)0;
for (j = 0;j < Blen; j++)
{
o = WebRtcSpl_AddSatW32(o, *b_ptr * *x_ptr);
b_ptr++;
x_ptr--;
}
/* to round off correctly */
o = WebRtcSpl_AddSatW32(o, 1 << (rshift - 1));
/* saturate according to the domain of the filter coefficients */
o = WEBRTC_SPL_SAT((int32_t)lim, o, (int32_t)-lim);
/* o should be in the range of int16_t */
o >>= rshift;
/* decay the output signal; this is specific to plc */
*Out++ = (int16_t)((int16_t)o * decay >> 15);
/* change the decay */
decay -= reduceDecay;
if( decay < 0 )
decay = 0;
}
return( decay );
}
static __inline int32_t log2_Q8_T( uint32_t x ) {
int32_t zeros;
int16_t frac;
zeros=WebRtcSpl_NormU32(x);
frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
/* log2(magn(i)) */
return ((31 - zeros) << 8) + frac;
}
static __inline int16_t exp2_Q10_T(int16_t x) { // Both in and out in Q10
int16_t tmp16_1, tmp16_2;
tmp16_2=(int16_t)(0x0400|(x&0x03FF));
tmp16_1 = -(x >> 10);
if(tmp16_1>0)
return tmp16_2 >> tmp16_1;
else
return tmp16_2 << -tmp16_1;
}
/*
This is a fixed-point version of the above code with limLow = 700 and limHigh = 5000,
hard-coded. The values 700 and 5000 were experimentally obtained.
The function implements membership values for two sets. The mebership functions are
of second orders corresponding to half-bell-shapped pulses.
*/
static void MemshipValQ15( int16_t in, int16_t *A, int16_t *B )
{
int16_t x;
in -= 700; /* translate the lowLim to 0, limHigh = 5000 - 700, M = 2150 */
if( in <= 2150 )
{
if( in > 0 )
{
/* b = in^2 / (2 * M^2), a = 1 - b in Q0.
We have to compute in Q15 */
/* x = in / 2150 {in Q15} = x * 15.2409 {in Q15} =
x*15 + (x*983)/(2^12); note that 983/2^12 = 0.23999 */
/* we are sure that x is in the range of int16_t */
x = (int16_t)(in * 15 + (in * 983 >> 12));
/* b = x^2 / 2 {in Q15} so a shift of 16 is required to
be in correct domain and one more for the division by 2 */
*B = (int16_t)((x * x + 0x00010000) >> 17);
*A = WEBRTC_SPL_WORD16_MAX - *B;
}
else
{
*B = 0;
*A = WEBRTC_SPL_WORD16_MAX;
}
}
else
{
if( in < 4300 )
{
/* This is a mirror case of the above */
in = 4300 - in;
x = (int16_t)(in * 15 + (in * 983 >> 12));
/* b = x^2 / 2 {in Q15} so a shift of 16 is required to
be in correct domain and one more for the division by 2 */
*A = (int16_t)((x * x + 0x00010000) >> 17);
*B = WEBRTC_SPL_WORD16_MAX - *A;
}
else
{
*A = 0;
*B = WEBRTC_SPL_WORD16_MAX;
}
}
}
static void LinearResampler(int16_t* in,
int16_t* out,
size_t lenIn,
size_t lenOut)
{
size_t n = (lenIn - 1) * RESAMP_RES;
int16_t resOut, relativePos, diff; /* */
size_t i, j;
uint16_t udiff;
if( lenIn == lenOut )
{
WEBRTC_SPL_MEMCPY_W16( out, in, lenIn );
return;
}
resOut = WebRtcSpl_DivW32W16ResW16( (int32_t)n, (int16_t)(lenOut-1) );
out[0] = in[0];
for( i = 1, j = 0, relativePos = 0; i < lenOut; i++ )
{
relativePos += resOut;
while( relativePos > RESAMP_RES )
{
j++;
relativePos -= RESAMP_RES;
}
/* an overflow may happen and the differce in sample values may
* require more than 16 bits. We like to avoid 32 bit arithmatic
* as much as possible */
if( (in[ j ] > 0) && (in[j + 1] < 0) )
{
udiff = (uint16_t)(in[ j ] - in[j + 1]);
out[ i ] = in[ j ] - (uint16_t)( ((int32_t)( udiff * relativePos )) >> RESAMP_RES_BIT);
}
else
{
if( (in[j] < 0) && (in[j+1] > 0) )
{
udiff = (uint16_t)( in[j + 1] - in[ j ] );
out[ i ] = in[ j ] + (uint16_t)( ((int32_t)( udiff * relativePos )) >> RESAMP_RES_BIT);
}
else
{
diff = in[ j + 1 ] - in[ j ];
out[i] = in[j] + (int16_t)(diff * relativePos >> RESAMP_RES_BIT);
}
}
}
}
void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
IsacFixDecoderInstance *ISACdec_obj,
size_t *current_framesamples )
{
int subframecnt;
int16_t* Vector_Word16_1;
int16_t Vector_Word16_Extended_1[FRAMESAMPLES_HALF + NOISE_FILTER_LEN];
int16_t* Vector_Word16_2;
int16_t Vector_Word16_Extended_2[FRAMESAMPLES_HALF + NOISE_FILTER_LEN];
int32_t Vector_Word32_1[FRAMESAMPLES_HALF];
int32_t Vector_Word32_2[FRAMESAMPLES_HALF];
int16_t lofilt_coefQ15[ORDERLO*SUBFRAMES]; //refl. coeffs
int16_t hifilt_coefQ15[ORDERHI*SUBFRAMES]; //refl. coeffs
int16_t pitchLags_Q7[PITCH_SUBFRAMES];
int16_t pitchGains_Q12[PITCH_SUBFRAMES];
int16_t tmp_1, tmp_2;
int32_t tmp32a, tmp32b;
int16_t gainQ13;
int16_t myDecayRate;
/* ---------- PLC variables ------------ */
size_t lag0, i, k;
int16_t noiseIndex;
int16_t stretchPitchLP[PITCH_MAX_LAG + 10], stretchPitchLP1[PITCH_MAX_LAG + 10];
int32_t gain_lo_hiQ17[2*SUBFRAMES];
int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16;
size_t minIdx;
int32_t nHP, pHP, wNoisyHP, wPriodicHP, corr, minCorr, maxCoeff;
int16_t noise1, rshift;
int16_t ltpGain, pitchGain, myVoiceIndicator, myAbs, maxAbs;
int32_t varIn, varOut, logVarIn, logVarOut, Q, logMaxAbs;
int rightShiftIn, rightShiftOut;
/* ------------------------------------- */
myDecayRate = (DECAY_RATE);
Vector_Word16_1 = &Vector_Word16_Extended_1[NOISE_FILTER_LEN];
Vector_Word16_2 = &Vector_Word16_Extended_2[NOISE_FILTER_LEN];
/* ----- Simply Copy Previous LPC parameters ------ */
for( subframecnt = 0; subframecnt < SUBFRAMES; subframecnt++ )
{
/* lower Band */
WEBRTC_SPL_MEMCPY_W16(&lofilt_coefQ15[ subframecnt * ORDERLO ],
(ISACdec_obj->plcstr_obj).lofilt_coefQ15, ORDERLO);
gain_lo_hiQ17[2*subframecnt] = (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[0];
/* Upper Band */
WEBRTC_SPL_MEMCPY_W16(&hifilt_coefQ15[ subframecnt * ORDERHI ],
(ISACdec_obj->plcstr_obj).hifilt_coefQ15, ORDERHI);
gain_lo_hiQ17[2*subframecnt + 1] = (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[1];
}
lag0 = (size_t)(((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1);
if( (ISACdec_obj->plcstr_obj).used != PLC_WAS_USED )
{
(ISACdec_obj->plcstr_obj).pitchCycles = 0;
(ISACdec_obj->plcstr_obj).lastPitchLP =
&((ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0]);
minCorr = WEBRTC_SPL_WORD32_MAX;
if ((FRAMESAMPLES_HALF - 10) > 2 * lag0)
{
minIdx = 11;
for( i = 0; i < 21; i++ )
{
corr = 0;
for( k = 0; k < lag0; k++ )
{
corr = WebRtcSpl_AddSatW32(corr, WEBRTC_SPL_ABS_W32(
WebRtcSpl_SubSatW16(
(ISACdec_obj->plcstr_obj).lastPitchLP[k],
(ISACdec_obj->plcstr_obj).prevPitchInvIn[
FRAMESAMPLES_HALF - 2*lag0 - 10 + i + k ] ) ) );
}
if( corr < minCorr )
{
minCorr = corr;
minIdx = i;
}
}
(ISACdec_obj->plcstr_obj).prevPitchLP =
&( (ISACdec_obj->plcstr_obj).prevPitchInvIn[
FRAMESAMPLES_HALF - lag0*2 - 10 + minIdx] );
}
else
{
(ISACdec_obj->plcstr_obj).prevPitchLP =
(ISACdec_obj->plcstr_obj).lastPitchLP;
}
pitchGain = (ISACdec_obj->plcstr_obj).lastPitchGain_Q12;
WebRtcSpl_AutoCorrelation(
&(ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0],
lag0, 0, &varIn, &rightShiftIn);
WebRtcSpl_AutoCorrelation(
&(ISACdec_obj->plcstr_obj).prevPitchInvOut[PITCH_MAX_LAG + 10 - lag0],
lag0, 0, &varOut, &rightShiftOut);
maxAbs = 0;
for( i = 0; i< lag0; i++)
{
myAbs = WEBRTC_SPL_ABS_W16(
(ISACdec_obj->plcstr_obj).prevPitchInvOut[
PITCH_MAX_LAG + 10 - lag0 + i] );
maxAbs = (myAbs > maxAbs)? myAbs:maxAbs;
}
logVarIn = log2_Q8_T( (uint32_t)( varIn ) ) +
(int32_t)(rightShiftIn << 8);
logVarOut = log2_Q8_T( (uint32_t)( varOut ) ) +
(int32_t)(rightShiftOut << 8);
logMaxAbs = log2_Q8_T( (uint32_t)( maxAbs ) );
ltpGain = (int16_t)(logVarOut - logVarIn);
Q = 2 * logMaxAbs - ( logVarOut - 1512 );
/*
* ---
* We are computing sqrt( (VarIn/lag0) / var( noise ) )
* var( noise ) is almost 256. we have already computed log2( VarIn ) in Q8
* so we actually compute 2^( 0.5*(log2( VarIn ) - log2( lag0 ) - log2( var(noise ) ) ).
* Note that put log function is in Q8 but the exponential function is in Q10.
* --
*/
logVarIn -= log2_Q8_T( (uint32_t)( lag0 ) );
tmp16 = (int16_t)((logVarIn<<1) - (4<<10) );
rightShiftIn = 0;
if( tmp16 > 4096 )
{
tmp16 -= 4096;
tmp16 = exp2_Q10_T( tmp16 );
tmp16 >>= 6;
}
else
tmp16 = exp2_Q10_T( tmp16 )>>10;
(ISACdec_obj->plcstr_obj).std = tmp16 - 4;
if( (ltpGain < 110) || (ltpGain > 230) )
{
if( ltpGain < 100 && (pitchGain < 1800) )
{
(ISACdec_obj->plcstr_obj).A = WEBRTC_SPL_WORD16_MAX;
}
else
{
(ISACdec_obj->plcstr_obj).A = ((ltpGain < 110) && (Q < 800)
)? WEBRTC_SPL_WORD16_MAX:0;
}
(ISACdec_obj->plcstr_obj).B = WEBRTC_SPL_WORD16_MAX -
(ISACdec_obj->plcstr_obj).A;
}
else
{
if( (pitchGain < 450) || (pitchGain > 1600) )
{
(ISACdec_obj->plcstr_obj).A = ((pitchGain < 450)
)? WEBRTC_SPL_WORD16_MAX:0;
(ISACdec_obj->plcstr_obj).B = WEBRTC_SPL_WORD16_MAX -
(ISACdec_obj->plcstr_obj).A;
}
else
{
myVoiceIndicator = ltpGain * 2 + pitchGain;
MemshipValQ15( myVoiceIndicator,
&(ISACdec_obj->plcstr_obj).A, &(ISACdec_obj->plcstr_obj).B );
}
}
myVoiceIndicator = ltpGain * 16 + pitchGain * 2 + (pitchGain >> 8);
MemshipValQ15( myVoiceIndicator,
&(ISACdec_obj->plcstr_obj).A, &(ISACdec_obj->plcstr_obj).B );
(ISACdec_obj->plcstr_obj).stretchLag = lag0;
(ISACdec_obj->plcstr_obj).pitchIndex = 0;
}
else
{
myDecayRate = (DECAY_RATE<<2);
}
if( (ISACdec_obj->plcstr_obj).B < 1000 )
{
myDecayRate += (DECAY_RATE<<3);
}
/* ------------ reconstructing the residual signal ------------------ */
LinearResampler( (ISACdec_obj->plcstr_obj).lastPitchLP,
stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
/* inverse pitch filter */
pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
(int16_t)((ISACdec_obj->plcstr_obj).stretchLag<<7);
pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
pitchGains_Q12[0] = (int16_t)(pitchGains_Q12[1] * 1010 >> 10);
/* most of the time either B or A are zero so seperating */
if( (ISACdec_obj->plcstr_obj).B == 0 )
{
for( i = 0; i < FRAMESAMPLES_HALF; i++ )
{
/* --- Low Pass */
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
(ISACdec_obj->plcstr_obj).seed );
Vector_Word16_1[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
/* --- Highpass */
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
(ISACdec_obj->plcstr_obj).seed );
Vector_Word16_2[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
}
for( i = 1; i < NOISE_FILTER_LEN; i++ )
{
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
(ISACdec_obj->plcstr_obj).seed );
Vector_Word16_Extended_1[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
(ISACdec_obj->plcstr_obj).seed );
Vector_Word16_Extended_2[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
}
plc_filterma_Fast(Vector_Word16_1, Vector_Word16_Extended_1,
&(ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF -
NOISE_FILTER_LEN], (int16_t) NOISE_FILTER_LEN,
(int16_t) FRAMESAMPLES_HALF, (int16_t)(5),
(ISACdec_obj->plcstr_obj).decayCoeffNoise, (int16_t)(6));
maxCoeff = WebRtcSpl_MaxAbsValueW32(
&(ISACdec_obj->plcstr_obj).prevHP[
PITCH_MAX_LAG + 10 - NOISE_FILTER_LEN], NOISE_FILTER_LEN );
rshift = 0;
while( maxCoeff > WEBRTC_SPL_WORD16_MAX )
{
maxCoeff >>= 1;
rshift++;
}
for( i = 0; i < NOISE_FILTER_LEN; i++ ) {
Vector_Word16_1[FRAMESAMPLES_HALF - NOISE_FILTER_LEN + i] =(int16_t)(
ISACdec_obj->plcstr_obj.prevHP[PITCH_MAX_LAG + 10 - NOISE_FILTER_LEN +
i] >> rshift);
}
(ISACdec_obj->plcstr_obj).decayCoeffNoise = plc_filterma_Fast(
Vector_Word16_2,
Vector_Word16_Extended_2,
&Vector_Word16_1[FRAMESAMPLES_HALF - NOISE_FILTER_LEN],
(int16_t) NOISE_FILTER_LEN,
(int16_t) FRAMESAMPLES_HALF,
(int16_t) (5),
(ISACdec_obj->plcstr_obj).decayCoeffNoise,
(int16_t) (7) );
for( i = 0; i < FRAMESAMPLES_HALF; i++ )
Vector_Word32_2[i] = Vector_Word16_Extended_2[i] << rshift;
Vector_Word16_1 = Vector_Word16_Extended_1;
}
else
{
if( (ISACdec_obj->plcstr_obj).A == 0 )
{
/* ------ Periodic Vector --- */
for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
{
/* --- Lowpass */
pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
/* --- Highpass */
pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).decayCoeffPriodic,
(ISACdec_obj->plcstr_obj).prevHP[PITCH_MAX_LAG + 10 -
(ISACdec_obj->plcstr_obj).stretchLag +
(ISACdec_obj->plcstr_obj).pitchIndex] );
/* --- lower the muliplier (more decay at next sample) --- */
(ISACdec_obj->plcstr_obj).decayCoeffPriodic -= (myDecayRate);
if( (ISACdec_obj->plcstr_obj).decayCoeffPriodic < 0 )
(ISACdec_obj->plcstr_obj).decayCoeffPriodic = 0;
(ISACdec_obj->plcstr_obj).pitchIndex++;
if( (ISACdec_obj->plcstr_obj).pitchIndex ==
(ISACdec_obj->plcstr_obj).stretchLag )
{
(ISACdec_obj->plcstr_obj).pitchIndex = 0;
(ISACdec_obj->plcstr_obj).pitchCycles++;
if( (ISACdec_obj->plcstr_obj).stretchLag != (lag0 + 1) )
{
(ISACdec_obj->plcstr_obj).stretchLag = lag0 + 1;
}
else
{
(ISACdec_obj->plcstr_obj).stretchLag = lag0;
}
(ISACdec_obj->plcstr_obj).stretchLag = (
(ISACdec_obj->plcstr_obj).stretchLag > PITCH_MAX_LAG
)? (PITCH_MAX_LAG):(ISACdec_obj->plcstr_obj).stretchLag;
LinearResampler( (ISACdec_obj->plcstr_obj).lastPitchLP,
stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
LinearResampler( (ISACdec_obj->plcstr_obj).prevPitchLP,
stretchPitchLP1, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
switch( (ISACdec_obj->plcstr_obj).pitchCycles )
{
case 1:
{
for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
{
stretchPitchLP[k] = (int16_t)((
(int32_t)stretchPitchLP[k]* 3 +
(int32_t)stretchPitchLP1[k])>>2);
}
break;
}
case 2:
{
for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
{
stretchPitchLP[k] = (int16_t)((
(int32_t)stretchPitchLP[k] +
(int32_t)stretchPitchLP1[k] )>>1);
}
break;
}
case 3:
{
for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
{
stretchPitchLP[k] = (int16_t)((stretchPitchLP[k] +
(int32_t)stretchPitchLP1[k]*3 )>>2);
}
break;
}
}
if( (ISACdec_obj->plcstr_obj).pitchCycles == 3 )
{
myDecayRate += 35; //(myDecayRate>>1);
(ISACdec_obj->plcstr_obj).pitchCycles = 0;
}
}
/* ------ Sum the noisy and periodic signals ------ */
Vector_Word16_1[i] = pLP;
Vector_Word32_2[i] = pHP;
}
}
else
{
for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
{
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
(ISACdec_obj->plcstr_obj).seed );
noise1 = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
nLP = (int16_t)((int16_t)(noise1 * ISACdec_obj->plcstr_obj.std) *
ISACdec_obj->plcstr_obj.decayCoeffNoise >> 15);
/* --- Highpass */
(ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
(ISACdec_obj->plcstr_obj).seed );
noise1 = (ISACdec_obj->plcstr_obj.seed >> 11) - 8;
nHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).decayCoeffNoise,
(int32_t)(noise1*(ISACdec_obj->plcstr_obj).std) );
/* --- lower the muliplier (more decay at next sample) --- */
(ISACdec_obj->plcstr_obj).decayCoeffNoise -= (myDecayRate);
if( (ISACdec_obj->plcstr_obj).decayCoeffNoise < 0 )
(ISACdec_obj->plcstr_obj).decayCoeffNoise = 0;
/* ------ Periodic Vector --- */
/* --- Lowpass */
pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
/* --- Highpass */
pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).decayCoeffPriodic,
(ISACdec_obj->plcstr_obj).prevHP[PITCH_MAX_LAG + 10 -
(ISACdec_obj->plcstr_obj).stretchLag +
(ISACdec_obj->plcstr_obj).pitchIndex] );
/* --- lower the muliplier (more decay at next sample) --- */
(ISACdec_obj->plcstr_obj).decayCoeffPriodic -= (myDecayRate);
if( (ISACdec_obj->plcstr_obj).decayCoeffPriodic < 0 )
{
(ISACdec_obj->plcstr_obj).decayCoeffPriodic = 0;
}
/* ------ Weighting the noisy and periodic vectors ------- */
wNoisyLP = (int16_t)(ISACdec_obj->plcstr_obj.A * nLP >> 15);
wNoisyHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).A, (nHP) ) );
wPriodicLP = (int16_t)(ISACdec_obj->plcstr_obj.B * pLP >> 15);
wPriodicHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
(ISACdec_obj->plcstr_obj).B, pHP));
(ISACdec_obj->plcstr_obj).pitchIndex++;
if((ISACdec_obj->plcstr_obj).pitchIndex ==
(ISACdec_obj->plcstr_obj).stretchLag)
{
(ISACdec_obj->plcstr_obj).pitchIndex = 0;
(ISACdec_obj->plcstr_obj).pitchCycles++;
if( (ISACdec_obj->plcstr_obj).stretchLag != (lag0 + 1) )
(ISACdec_obj->plcstr_obj).stretchLag = lag0 + 1;
else
(ISACdec_obj->plcstr_obj).stretchLag = lag0;
(ISACdec_obj->plcstr_obj).stretchLag = (
(ISACdec_obj->plcstr_obj).stretchLag > PITCH_MAX_LAG
)? (PITCH_MAX_LAG):(ISACdec_obj->plcstr_obj).stretchLag;
LinearResampler(
(ISACdec_obj->plcstr_obj).lastPitchLP,
stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
LinearResampler((ISACdec_obj->plcstr_obj).prevPitchLP,
stretchPitchLP1, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
switch((ISACdec_obj->plcstr_obj).pitchCycles)
{
case 1:
{
for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
{
stretchPitchLP[k] = (int16_t)((
(int32_t)stretchPitchLP[k]* 3 +
(int32_t)stretchPitchLP1[k] )>>2);
}
break;
}
case 2:
{
for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
{
stretchPitchLP[k] = (int16_t)((
(int32_t)stretchPitchLP[k] +
(int32_t)stretchPitchLP1[k])>>1);
}
break;
}
case 3:
{
for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
{
stretchPitchLP[k] = (int16_t)(
(stretchPitchLP[k] +
(int32_t)stretchPitchLP1[k]*3 )>>2);
}
break;
}
}
if( (ISACdec_obj->plcstr_obj).pitchCycles == 3 )
{
myDecayRate += 55; //(myDecayRate>>1);
(ISACdec_obj->plcstr_obj).pitchCycles = 0;
}
}
/* ------ Sum the noisy and periodic signals ------ */
Vector_Word16_1[i] = WebRtcSpl_AddSatW16(wNoisyLP, wPriodicLP);
Vector_Word32_2[i] = WebRtcSpl_AddSatW32(wNoisyHP, wPriodicHP);
}
}
}
/* ----------------- residual signal is reconstructed ------------------ */
k = (ISACdec_obj->plcstr_obj).pitchIndex;
/* --- Write one pitch cycle for recovery block --- */
for( i = 0; i < RECOVERY_OVERLAP; i++ )
{
ISACdec_obj->plcstr_obj.overlapLP[i] = (int16_t)(
stretchPitchLP[k] * ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
}
(ISACdec_obj->plcstr_obj).lastPitchLag_Q7 =
(int16_t)((ISACdec_obj->plcstr_obj).stretchLag << 7);
/* --- Inverse Pitch Filter --- */
WebRtcIsacfix_PitchFilter(Vector_Word16_1, Vector_Word16_2,
&ISACdec_obj->pitchfiltstr_obj, pitchLags_Q7, pitchGains_Q12, 4);
/* reduce gain to compensate for pitch enhancer */
/* gain = 1.0f - 0.45f * AvgPitchGain; */
tmp32a = ISACdec_obj->plcstr_obj.AvgPitchGain_Q12 * 29; // Q18
tmp32b = 262144 - tmp32a; // Q18
gainQ13 = (int16_t) (tmp32b >> 5); // Q13
/* perceptual post-filtering (using normalized lattice filter) */
for (k = 0; k < FRAMESAMPLES_HALF; k++)
Vector_Word32_1[k] = (Vector_Word16_2[k] * gainQ13) << 3; // Q25
WebRtcIsacfix_NormLatticeFilterAr(ORDERLO,
(ISACdec_obj->maskfiltstr_obj).PostStateLoGQ0,
Vector_Word32_1, lofilt_coefQ15, gain_lo_hiQ17, 0, Vector_Word16_1);
WebRtcIsacfix_NormLatticeFilterAr(ORDERHI,
(ISACdec_obj->maskfiltstr_obj).PostStateHiGQ0,
Vector_Word32_2, hifilt_coefQ15, gain_lo_hiQ17, 1, Vector_Word16_2);
/* recombine the 2 bands */
/* Form the polyphase signals, and compensate for DC offset */
for (k=0;k<FRAMESAMPLES_HALF;k++)
{
/* Construct a new upper channel signal*/
tmp_1 = (int16_t)WebRtcSpl_SatW32ToW16(
((int32_t)Vector_Word16_1[k]+Vector_Word16_2[k] + 1));
/* Construct a new lower channel signal*/
tmp_2 = (int16_t)WebRtcSpl_SatW32ToW16(
((int32_t)Vector_Word16_1[k]-Vector_Word16_2[k]));
Vector_Word16_1[k] = tmp_1;
Vector_Word16_2[k] = tmp_2;
}
WebRtcIsacfix_FilterAndCombine1(Vector_Word16_1,
Vector_Word16_2, signal_out16, &ISACdec_obj->postfiltbankstr_obj);
(ISACdec_obj->plcstr_obj).used = PLC_WAS_USED;
*current_framesamples = 480;
}

View File

@ -0,0 +1,635 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* encode.c
*
* Encoding function for the iSAC coder.
*
*/
#include "rtc_base/checks.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include <stdio.h>
#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
int WebRtcIsacfix_EncodeImpl(int16_t *in,
IsacFixEncoderInstance *ISACenc_obj,
BwEstimatorstr *bw_estimatordata,
int16_t CodingMode)
{
int16_t stream_length = 0;
int16_t usefulstr_len = 0;
int k;
int16_t BWno;
int16_t lofilt_coefQ15[(ORDERLO)*SUBFRAMES];
int16_t hifilt_coefQ15[(ORDERHI)*SUBFRAMES];
int32_t gain_lo_hiQ17[2*SUBFRAMES];
int16_t LPandHP[FRAMESAMPLES/2 + QLOOKAHEAD];
int16_t LP16a[FRAMESAMPLES/2 + QLOOKAHEAD];
int16_t HP16a[FRAMESAMPLES/2 + QLOOKAHEAD];
int16_t PitchLags_Q7[PITCH_SUBFRAMES];
int16_t PitchGains_Q12[PITCH_SUBFRAMES];
int16_t AvgPitchGain_Q12;
int16_t frame_mode; /* 0 for 30ms, 1 for 60ms */
int16_t processed_samples;
int status;
int32_t bits_gainsQ11;
int16_t MinBytes;
int16_t bmodel;
transcode_obj transcodingParam;
int16_t payloadLimitBytes;
int16_t arithLenBeforeEncodingDFT;
int16_t iterCntr;
/* copy new frame length and bottle neck rate only for the first 10 ms data */
if (ISACenc_obj->buffer_index == 0) {
/* set the framelength for the next packet */
ISACenc_obj->current_framesamples = ISACenc_obj->new_framelength;
}
frame_mode = ISACenc_obj->current_framesamples/MAX_FRAMESAMPLES; /* 0 (30 ms) or 1 (60 ms) */
processed_samples = ISACenc_obj->current_framesamples/(frame_mode+1); /* 480 (30, 60 ms) */
/* buffer speech samples (by 10ms packet) until the framelength is reached (30 or 60 ms) */
/**************************************************************************************/
/* fill the buffer with 10ms input data */
for(k=0; k<FRAMESAMPLES_10ms; k++) {
ISACenc_obj->data_buffer_fix[k + ISACenc_obj->buffer_index] = in[k];
}
/* if buffersize is not equal to current framesize, and end of file is not reached yet, */
/* increase index and go back to main to get more speech samples */
if (ISACenc_obj->buffer_index + FRAMESAMPLES_10ms != processed_samples) {
ISACenc_obj->buffer_index = ISACenc_obj->buffer_index + FRAMESAMPLES_10ms;
return 0;
}
/* if buffer reached the right size, reset index and continue with encoding the frame */
ISACenc_obj->buffer_index = 0;
/* end of buffer function */
/**************************/
/* encoding */
/************/
if (frame_mode == 0 || ISACenc_obj->frame_nb == 0 )
{
/* reset bitstream */
ISACenc_obj->bitstr_obj.W_upper = 0xFFFFFFFF;
ISACenc_obj->bitstr_obj.streamval = 0;
ISACenc_obj->bitstr_obj.stream_index = 0;
ISACenc_obj->bitstr_obj.full = 1;
if (CodingMode == 0) {
ISACenc_obj->BottleNeck = WebRtcIsacfix_GetUplinkBandwidth(bw_estimatordata);
ISACenc_obj->MaxDelay = WebRtcIsacfix_GetUplinkMaxDelay(bw_estimatordata);
}
if (CodingMode == 0 && frame_mode == 0 && (ISACenc_obj->enforceFrameSize == 0)) {
ISACenc_obj->new_framelength = WebRtcIsacfix_GetNewFrameLength(ISACenc_obj->BottleNeck,
ISACenc_obj->current_framesamples);
}
// multiply the bottleneck by 0.88 before computing SNR, 0.88 is tuned by experimenting on TIMIT
// 901/1024 is 0.87988281250000
ISACenc_obj->s2nr = WebRtcIsacfix_GetSnr(
(int16_t)(ISACenc_obj->BottleNeck * 901 >> 10),
ISACenc_obj->current_framesamples);
/* encode frame length */
status = WebRtcIsacfix_EncodeFrameLen(ISACenc_obj->current_framesamples, &ISACenc_obj->bitstr_obj);
if (status < 0)
{
/* Wrong frame size */
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
/* Save framelength for multiple packets memory */
if (ISACenc_obj->SaveEnc_ptr != NULL) {
(ISACenc_obj->SaveEnc_ptr)->framelength=ISACenc_obj->current_framesamples;
}
/* bandwidth estimation and coding */
BWno = WebRtcIsacfix_GetDownlinkBwIndexImpl(bw_estimatordata);
status = WebRtcIsacfix_EncodeReceiveBandwidth(&BWno, &ISACenc_obj->bitstr_obj);
if (status < 0)
{
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
}
/* split signal in two bands */
WebRtcIsacfix_SplitAndFilter1(ISACenc_obj->data_buffer_fix, LP16a, HP16a, &ISACenc_obj->prefiltbankstr_obj );
/* estimate pitch parameters and pitch-filter lookahead signal */
WebRtcIsacfix_PitchAnalysis(LP16a+QLOOKAHEAD, LPandHP,
&ISACenc_obj->pitchanalysisstr_obj, PitchLags_Q7, PitchGains_Q12); /* LPandHP = LP_lookahead_pfQ0, */
/* Set where to store data in multiple packets memory */
if (ISACenc_obj->SaveEnc_ptr != NULL) {
if (frame_mode == 0 || ISACenc_obj->frame_nb == 0)
{
(ISACenc_obj->SaveEnc_ptr)->startIdx = 0;
}
else
{
(ISACenc_obj->SaveEnc_ptr)->startIdx = 1;
}
}
/* quantize & encode pitch parameters */
status = WebRtcIsacfix_EncodePitchGain(PitchGains_Q12, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
if (status < 0)
{
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
status = WebRtcIsacfix_EncodePitchLag(PitchLags_Q7 , PitchGains_Q12, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
if (status < 0)
{
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
/* find coefficients for perceptual pre-filters */
WebRtcIsacfix_GetLpcCoef(LPandHP, HP16a+QLOOKAHEAD, &ISACenc_obj->maskfiltstr_obj,
ISACenc_obj->s2nr, PitchGains_Q12,
gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15); /*LPandHP = LP_lookahead_pfQ0*/
// record LPC Gains for possible bit-rate reduction
for(k = 0; k < KLT_ORDER_GAIN; k++)
{
transcodingParam.lpcGains[k] = gain_lo_hiQ17[k];
}
/* code LPC model and shape - gains not quantized yet */
status = WebRtcIsacfix_EncodeLpc(gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15,
&bmodel, &bits_gainsQ11, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr, &transcodingParam);
if (status < 0)
{
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
arithLenBeforeEncodingDFT = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full);
/* low-band filtering */
WebRtcIsacfix_NormLatticeFilterMa(ORDERLO, ISACenc_obj->maskfiltstr_obj.PreStateLoGQ15,
LP16a, lofilt_coefQ15, gain_lo_hiQ17, 0, LPandHP);/* LPandHP = LP16b */
/* pitch filter */
WebRtcIsacfix_PitchFilter(LPandHP, LP16a, &ISACenc_obj->pitchfiltstr_obj, PitchLags_Q7, PitchGains_Q12, 1);/* LPandHP = LP16b */
/* high-band filtering */
WebRtcIsacfix_NormLatticeFilterMa(ORDERHI, ISACenc_obj->maskfiltstr_obj.PreStateHiGQ15,
HP16a, hifilt_coefQ15, gain_lo_hiQ17, 1, LPandHP);/*LPandHP = HP16b*/
/* transform */
WebRtcIsacfix_Time2Spec(LP16a, LPandHP, LP16a, LPandHP); /*LPandHP = HP16b*/
/* Save data for multiple packets memory */
if (ISACenc_obj->SaveEnc_ptr != NULL) {
for (k = 0; k < FRAMESAMPLES_HALF; k++) {
(ISACenc_obj->SaveEnc_ptr)->fre[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LP16a[k];
(ISACenc_obj->SaveEnc_ptr)->fim[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LPandHP[k];
}
(ISACenc_obj->SaveEnc_ptr)->AvgPitchGain[(ISACenc_obj->SaveEnc_ptr)->startIdx] = AvgPitchGain_Q12;
}
/* quantization and lossless coding */
status = WebRtcIsacfix_EncodeSpec(LP16a, LPandHP, &ISACenc_obj->bitstr_obj, AvgPitchGain_Q12);
if((status <= -1) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) /*LPandHP = HP16b*/
{
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
if((frame_mode == 1) && (ISACenc_obj->frame_nb == 0))
{
// it is a 60ms and we are in the first 30ms
// then the limit at this point should be half of the assigned value
payloadLimitBytes = ISACenc_obj->payloadLimitBytes60 >> 1;
}
else if (frame_mode == 0)
{
// it is a 30ms frame
payloadLimitBytes = (ISACenc_obj->payloadLimitBytes30) - 3;
}
else
{
// this is the second half of a 60ms frame.
payloadLimitBytes = ISACenc_obj->payloadLimitBytes60 - 3; // subract 3 because termination process may add 3 bytes
}
iterCntr = 0;
while((((ISACenc_obj->bitstr_obj.stream_index) << 1) > payloadLimitBytes) ||
(status == -ISAC_DISALLOWED_BITSTREAM_LENGTH))
{
int16_t arithLenDFTByte;
int16_t bytesLeftQ5;
int16_t ratioQ5[8] = {0, 6, 9, 12, 16, 19, 22, 25};
// According to experiments on TIMIT the following is proper for audio, but it is not agressive enough for tonal inputs
// such as DTMF, sweep-sine, ...
//
// (0.55 - (0.8 - ratio[i]/32) * 5 / 6) * 2^14
// int16_t scaleQ14[8] = {0, 648, 1928, 3208, 4915, 6195, 7475, 8755};
// This is a supper-agressive scaling passed the tests (tonal inputs) tone with one iteration for payload limit
// of 120 (32kbps bottleneck), number of frames needed a rate-reduction was 58403
//
int16_t scaleQ14[8] = {0, 348, 828, 1408, 2015, 3195, 3500, 3500};
int16_t idx;
if(iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION)
{
// We were not able to limit the payload size
if((frame_mode == 1) && (ISACenc_obj->frame_nb == 0))
{
// This was the first 30ms of a 60ms frame. Although the payload is larger than it
// should be but we let the second 30ms be encoded. Maybe togetehr we won't exceed
// the limit.
ISACenc_obj->frame_nb = 1;
return 0;
}
else if((frame_mode == 1) && (ISACenc_obj->frame_nb == 1))
{
ISACenc_obj->frame_nb = 0;
}
if(status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)
{
return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
}
else
{
return status;
}
}
if(status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)
{
arithLenDFTByte = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full) - arithLenBeforeEncodingDFT;
bytesLeftQ5 = (payloadLimitBytes - arithLenBeforeEncodingDFT) << 5;
// bytesLeft / arithLenDFTBytes indicates how much scaling is required a rough estimate (agressive)
// scale = 0.55 - (0.8 - bytesLeft / arithLenDFTBytes) * 5 / 6
// bytesLeft / arithLenDFTBytes below 0.2 will have a scale of zero and above 0.8 are treated as 0.8
// to avoid division we do more simplification.
//
// values of (bytesLeft / arithLenDFTBytes)*32 between ratioQ5[i] and ratioQ5[i+1] are rounded to ratioQ5[i]
// and the corresponding scale is chosen
// we compare bytesLeftQ5 with ratioQ5[]*arithLenDFTByte;
idx = 4;
idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 2 : -2;
idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 1 : -1;
idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 0 : -1;
}
else
{
// we are here because the bit-stream did not fit into the buffer, in this case, the stream_index is not
// trustable, especially if the is the first 30ms of a packet. Thereforem, we will go for the most agressive
// case.
idx = 0;
}
// scale FFT coefficients to reduce the bit-rate
for(k = 0; k < FRAMESAMPLES_HALF; k++)
{
LP16a[k] = (int16_t)(LP16a[k] * scaleQ14[idx] >> 14);
LPandHP[k] = (int16_t)(LPandHP[k] * scaleQ14[idx] >> 14);
}
// Save data for multiple packets memory
if (ISACenc_obj->SaveEnc_ptr != NULL)
{
for(k = 0; k < FRAMESAMPLES_HALF; k++)
{
(ISACenc_obj->SaveEnc_ptr)->fre[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LP16a[k];
(ISACenc_obj->SaveEnc_ptr)->fim[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LPandHP[k];
}
}
// scale the unquantized LPC gains and save the scaled version for the future use
for(k = 0; k < KLT_ORDER_GAIN; k++)
{
gain_lo_hiQ17[k] = WEBRTC_SPL_MUL_16_32_RSFT14(scaleQ14[idx], transcodingParam.lpcGains[k]);//transcodingParam.lpcGains[k]; //
transcodingParam.lpcGains[k] = gain_lo_hiQ17[k];
}
// reset the bit-stream object to the state which it had before encoding LPC Gains
ISACenc_obj->bitstr_obj.full = transcodingParam.full;
ISACenc_obj->bitstr_obj.stream_index = transcodingParam.stream_index;
ISACenc_obj->bitstr_obj.streamval = transcodingParam.streamval;
ISACenc_obj->bitstr_obj.W_upper = transcodingParam.W_upper;
ISACenc_obj->bitstr_obj.stream[transcodingParam.stream_index-1] = transcodingParam.beforeLastWord;
ISACenc_obj->bitstr_obj.stream[transcodingParam.stream_index] = transcodingParam.lastWord;
// quantize and encode LPC gain
WebRtcIsacfix_EstCodeLpcGain(gain_lo_hiQ17, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
arithLenBeforeEncodingDFT = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full);
status = WebRtcIsacfix_EncodeSpec(LP16a, LPandHP, &ISACenc_obj->bitstr_obj, AvgPitchGain_Q12);
if((status <= -1) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) /*LPandHP = HP16b*/
{
if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
// If this is the second 30ms of a 60ms frame reset this such that in the next call
// encoder starts fresh.
ISACenc_obj->frame_nb = 0;
}
return status;
}
iterCntr++;
}
if (frame_mode == 1 && ISACenc_obj->frame_nb == 0)
/* i.e. 60 ms framesize and just processed the first 30ms, */
/* go back to main function to buffer the other 30ms speech frame */
{
ISACenc_obj->frame_nb = 1;
return 0;
}
else if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
{
ISACenc_obj->frame_nb = 0;
/* also update the framelength for next packet, in Adaptive mode only */
if (CodingMode == 0 && (ISACenc_obj->enforceFrameSize == 0)) {
ISACenc_obj->new_framelength = WebRtcIsacfix_GetNewFrameLength(ISACenc_obj->BottleNeck,
ISACenc_obj->current_framesamples);
}
}
/* complete arithmetic coding */
stream_length = WebRtcIsacfix_EncTerminate(&ISACenc_obj->bitstr_obj);
/* can this be negative? */
if(CodingMode == 0)
{
/* update rate model and get minimum number of bytes in this packet */
MinBytes = WebRtcIsacfix_GetMinBytes(&ISACenc_obj->rate_data_obj, (int16_t) stream_length,
ISACenc_obj->current_framesamples, ISACenc_obj->BottleNeck, ISACenc_obj->MaxDelay);
/* if bitstream is too short, add garbage at the end */
/* Store length of coded data */
usefulstr_len = stream_length;
/* Make sure MinBytes does not exceed packet size limit */
if ((ISACenc_obj->frame_nb == 0) && (MinBytes > ISACenc_obj->payloadLimitBytes30)) {
MinBytes = ISACenc_obj->payloadLimitBytes30;
} else if ((ISACenc_obj->frame_nb == 1) && (MinBytes > ISACenc_obj->payloadLimitBytes60)) {
MinBytes = ISACenc_obj->payloadLimitBytes60;
}
/* Make sure we don't allow more than 255 bytes of garbage data.
We store the length of the garbage data in 8 bits in the bitstream,
255 is the max garbage lenght we can signal using 8 bits. */
if( MinBytes > usefulstr_len + 255 ) {
MinBytes = usefulstr_len + 255;
}
/* Save data for creation of multiple bitstreams */
if (ISACenc_obj->SaveEnc_ptr != NULL) {
(ISACenc_obj->SaveEnc_ptr)->minBytes = MinBytes;
}
while (stream_length < MinBytes)
{
RTC_DCHECK_GE(stream_length, 0);
if (stream_length & 0x0001){
ISACenc_obj->bitstr_seed = WEBRTC_SPL_RAND( ISACenc_obj->bitstr_seed );
ISACenc_obj->bitstr_obj.stream[stream_length / 2] |=
(uint16_t)(ISACenc_obj->bitstr_seed & 0xFF);
} else {
ISACenc_obj->bitstr_seed = WEBRTC_SPL_RAND( ISACenc_obj->bitstr_seed );
ISACenc_obj->bitstr_obj.stream[stream_length / 2] =
((uint16_t)ISACenc_obj->bitstr_seed << 8);
}
stream_length++;
}
/* to get the real stream_length, without garbage */
if (usefulstr_len & 0x0001) {
ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] &= 0xFF00;
ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] += (MinBytes - usefulstr_len) & 0x00FF;
}
else {
ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] &= 0x00FF;
ISACenc_obj->bitstr_obj.stream[usefulstr_len >> 1] +=
((uint16_t)((MinBytes - usefulstr_len) & 0x00FF) << 8);
}
}
else
{
/* update rate model */
WebRtcIsacfix_UpdateRateModel(&ISACenc_obj->rate_data_obj, (int16_t) stream_length,
ISACenc_obj->current_framesamples, ISACenc_obj->BottleNeck);
}
return stream_length;
}
/* This function is used to create a new bitstream with new BWE.
The same data as previously encoded with the fucntion WebRtcIsacfix_EncodeImpl()
is used. The data needed is taken from the struct, where it was stored
when calling the encoder. */
int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance *ISACenc_obj,
int BWnumber,
float scale)
{
int ii;
int status;
int16_t BWno = (int16_t)BWnumber;
int stream_length = 0;
int16_t model;
const uint16_t *Q_PitchGain_cdf_ptr[1];
const uint16_t **cdf;
const IsacSaveEncoderData *SaveEnc_str;
int32_t tmpLPCcoeffs_g[KLT_ORDER_GAIN<<1];
int16_t tmpLPCindex_g[KLT_ORDER_GAIN<<1];
int16_t tmp_fre[FRAMESAMPLES];
int16_t tmp_fim[FRAMESAMPLES];
SaveEnc_str = ISACenc_obj->SaveEnc_ptr;
/* Check if SaveEnc memory exists */
if (SaveEnc_str == NULL) {
return (-1);
}
/* Sanity Check - possible values for BWnumber is 0 - 23 */
if ((BWnumber < 0) || (BWnumber > 23)) {
return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
}
/* reset bitstream */
ISACenc_obj->bitstr_obj.W_upper = 0xFFFFFFFF;
ISACenc_obj->bitstr_obj.streamval = 0;
ISACenc_obj->bitstr_obj.stream_index = 0;
ISACenc_obj->bitstr_obj.full = 1;
/* encode frame length */
status = WebRtcIsacfix_EncodeFrameLen(SaveEnc_str->framelength, &ISACenc_obj->bitstr_obj);
if (status < 0) {
/* Wrong frame size */
return status;
}
/* encode bandwidth estimate */
status = WebRtcIsacfix_EncodeReceiveBandwidth(&BWno, &ISACenc_obj->bitstr_obj);
if (status < 0) {
return status;
}
/* Transcoding */
/* If scale < 1, rescale data to produce lower bitrate signal */
if ((0.0 < scale) && (scale < 1.0)) {
/* Compensate LPC gain */
for (ii = 0; ii < (KLT_ORDER_GAIN*(1+SaveEnc_str->startIdx)); ii++) {
tmpLPCcoeffs_g[ii] = (int32_t) ((scale) * (float) SaveEnc_str->LPCcoeffs_g[ii]);
}
/* Scale DFT */
for (ii = 0; ii < (FRAMESAMPLES_HALF*(1+SaveEnc_str->startIdx)); ii++) {
tmp_fre[ii] = (int16_t) ((scale) * (float) SaveEnc_str->fre[ii]) ;
tmp_fim[ii] = (int16_t) ((scale) * (float) SaveEnc_str->fim[ii]) ;
}
} else {
for (ii = 0; ii < (KLT_ORDER_GAIN*(1+SaveEnc_str->startIdx)); ii++) {
tmpLPCindex_g[ii] = SaveEnc_str->LPCindex_g[ii];
}
for (ii = 0; ii < (FRAMESAMPLES_HALF*(1+SaveEnc_str->startIdx)); ii++) {
tmp_fre[ii] = SaveEnc_str->fre[ii];
tmp_fim[ii] = SaveEnc_str->fim[ii];
}
}
/* Loop over number of 30 msec */
for (ii = 0; ii <= SaveEnc_str->startIdx; ii++)
{
/* encode pitch gains */
*Q_PitchGain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &SaveEnc_str->pitchGain_index[ii],
Q_PitchGain_cdf_ptr, 1);
if (status < 0) {
return status;
}
/* entropy coding of quantization pitch lags */
/* voicing classificiation */
if (SaveEnc_str->meanGain[ii] <= 819) {
cdf = WebRtcIsacfix_kPitchLagPtrLo;
} else if (SaveEnc_str->meanGain[ii] <= 1638) {
cdf = WebRtcIsacfix_kPitchLagPtrMid;
} else {
cdf = WebRtcIsacfix_kPitchLagPtrHi;
}
status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj,
&SaveEnc_str->pitchIndex[PITCH_SUBFRAMES*ii], cdf, PITCH_SUBFRAMES);
if (status < 0) {
return status;
}
/* LPC */
/* entropy coding of model number */
model = 0;
status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &model,
WebRtcIsacfix_kModelCdfPtr, 1);
if (status < 0) {
return status;
}
/* entropy coding of quantization indices - LPC shape only */
status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &SaveEnc_str->LPCindex_s[KLT_ORDER_SHAPE*ii],
WebRtcIsacfix_kCdfShapePtr[0], KLT_ORDER_SHAPE);
if (status < 0) {
return status;
}
/* If transcoding, get new LPC gain indices */
if (scale < 1.0) {
WebRtcIsacfix_TranscodeLpcCoef(&tmpLPCcoeffs_g[KLT_ORDER_GAIN*ii], &tmpLPCindex_g[KLT_ORDER_GAIN*ii]);
}
/* entropy coding of quantization indices - LPC gain */
status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &tmpLPCindex_g[KLT_ORDER_GAIN*ii],
WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
if (status < 0) {
return status;
}
/* quantization and lossless coding */
status = WebRtcIsacfix_EncodeSpec(&tmp_fre[ii*FRAMESAMPLES_HALF], &tmp_fim[ii*FRAMESAMPLES_HALF],
&ISACenc_obj->bitstr_obj, SaveEnc_str->AvgPitchGain[ii]);
if (status < 0) {
return status;
}
}
/* complete arithmetic coding */
stream_length = WebRtcIsacfix_EncTerminate(&ISACenc_obj->bitstr_obj);
return stream_length;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,177 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* entropy_coding.h
*
* This header file contains all of the functions used to arithmetically
* encode the iSAC bistream
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
/* decode complex spectrum (return number of bytes in stream) */
int WebRtcIsacfix_DecodeSpec(Bitstr_dec* streamdata,
int16_t* frQ7,
int16_t* fiQ7,
int16_t AvgPitchGain_Q12);
/* encode complex spectrum */
int WebRtcIsacfix_EncodeSpec(const int16_t* fr,
const int16_t* fi,
Bitstr_enc* streamdata,
int16_t AvgPitchGain_Q12);
/* decode & dequantize LPC Coef */
int WebRtcIsacfix_DecodeLpcCoef(Bitstr_dec* streamdata,
int32_t* LPCCoefQ17,
int32_t* gain_lo_hiQ17,
int16_t* outmodel);
int WebRtcIsacfix_DecodeLpc(int32_t* gain_lo_hiQ17,
int16_t* LPCCoef_loQ15,
int16_t* LPCCoef_hiQ15,
Bitstr_dec* streamdata,
int16_t* outmodel);
/* quantize & code LPC Coef */
int WebRtcIsacfix_EncodeLpc(int32_t* gain_lo_hiQ17,
int16_t* LPCCoef_loQ15,
int16_t* LPCCoef_hiQ15,
int16_t* model,
int32_t* sizeQ11,
Bitstr_enc* streamdata,
IsacSaveEncoderData* encData,
transcode_obj* transcodeParam);
int WebRtcIsacfix_EstCodeLpcGain(int32_t* gain_lo_hiQ17,
Bitstr_enc* streamdata,
IsacSaveEncoderData* encData);
/* decode & dequantize RC */
int WebRtcIsacfix_DecodeRcCoef(Bitstr_dec* streamdata, int16_t* RCQ15);
/* quantize & code RC */
int WebRtcIsacfix_EncodeRcCoef(int16_t* RCQ15, Bitstr_enc* streamdata);
/* decode & dequantize squared Gain */
int WebRtcIsacfix_DecodeGain2(Bitstr_dec* streamdata, int32_t* Gain2);
/* quantize & code squared Gain (input is squared gain) */
int WebRtcIsacfix_EncodeGain2(int32_t* gain2, Bitstr_enc* streamdata);
int WebRtcIsacfix_EncodePitchGain(int16_t* PitchGains_Q12,
Bitstr_enc* streamdata,
IsacSaveEncoderData* encData);
int WebRtcIsacfix_EncodePitchLag(int16_t* PitchLagQ7,
int16_t* PitchGain_Q12,
Bitstr_enc* streamdata,
IsacSaveEncoderData* encData);
int WebRtcIsacfix_DecodePitchGain(Bitstr_dec* streamdata,
int16_t* PitchGain_Q12);
int WebRtcIsacfix_DecodePitchLag(Bitstr_dec* streamdata,
int16_t* PitchGain_Q12,
int16_t* PitchLagQ7);
int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec* streamdata, size_t* framelength);
int WebRtcIsacfix_EncodeFrameLen(int16_t framelength, Bitstr_enc* streamdata);
int WebRtcIsacfix_DecodeSendBandwidth(Bitstr_dec* streamdata, int16_t* BWno);
int WebRtcIsacfix_EncodeReceiveBandwidth(int16_t* BWno, Bitstr_enc* streamdata);
void WebRtcIsacfix_TranscodeLpcCoef(int32_t* tmpcoeffs_gQ6, int16_t* index_gQQ);
// Pointer functions for LPC transforms.
typedef void (*MatrixProduct1)(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix1_index_factor1,
int matrix0_index_factor1,
int matrix1_index_init_case,
int matrix1_index_step,
int matrix0_index_step,
int inner_loop_count,
int mid_loop_count,
int shift);
typedef void (*MatrixProduct2)(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix0_index_factor,
int matrix0_index_step);
extern MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
extern MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
void WebRtcIsacfix_MatrixProduct1C(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix1_index_factor1,
int matrix0_index_factor1,
int matrix1_index_init_case,
int matrix1_index_step,
int matrix0_index_step,
int inner_loop_count,
int mid_loop_count,
int shift);
void WebRtcIsacfix_MatrixProduct2C(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix0_index_factor,
int matrix0_index_step);
#if defined(WEBRTC_HAS_NEON)
void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix1_index_factor1,
int matrix0_index_factor1,
int matrix1_index_init_case,
int matrix1_index_step,
int matrix0_index_step,
int inner_loop_count,
int mid_loop_count,
int shift);
void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix0_index_factor,
int matrix0_index_step);
#endif
#if defined(MIPS32_LE)
void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix1_index_factor1,
int matrix0_index_factor1,
int matrix1_index_init_case,
int matrix1_index_step,
int matrix0_index_step,
int inner_loop_count,
int mid_loop_count,
int shift);
void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
int matrix0_index_factor,
int matrix0_index_step);
#endif
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_

View File

@ -0,0 +1,249 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
// MIPS optimization of the function WebRtcIsacfix_MatrixProduct1.
// Bit-exact with the function WebRtcIsacfix_MatrixProduct1C from
// entropy_coding.c file.
void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
const int matrix1_index_factor1,
const int matrix0_index_factor1,
const int matrix1_index_init_case,
const int matrix1_index_step,
const int matrix0_index_step,
const int inner_loop_count,
const int mid_loop_count,
const int shift) {
if (matrix1_index_init_case != 0) {
int j = SUBFRAMES, k = 0, n = 0;
int32_t r0, r1, r2, sum32;
int32_t* product_start = matrix_product;
int32_t* product_ptr;
const uint32_t product_step = 4 * mid_loop_count;
const uint32_t matrix0_step = 2 * matrix0_index_step;
const uint32_t matrix1_step = 4 * matrix1_index_step;
const uint32_t matrix0_step2 = 2 * matrix0_index_factor1;
const uint32_t matrix1_step2 = 4 * matrix1_index_factor1;
const int16_t* matrix0_start = matrix0;
const int32_t* matrix1_start = matrix1;
int16_t* matrix0_ptr;
int32_t* matrix1_ptr;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"addu %[product_ptr], %[product_start], $0 \n\t"
"addu %[k], %[product_step], $0 \n\t"
"addiu %[j], %[j], -1 \n\t"
"addu %[matrix1_start], %[matrix1], $0 \n\t"
"2: \n\t"
"addu %[matrix1_ptr], %[matrix1_start], $0 \n\t"
"addu %[matrix0_ptr], %[matrix0_start], $0 \n\t"
"addu %[n], %[inner_loop_count], $0 \n\t"
"mul %[sum32], $0, $0 \n\t"
"3: \n\t"
"lw %[r0], 0(%[matrix1_ptr]) \n\t"
"lh %[r1], 0(%[matrix0_ptr]) \n\t"
"addu %[matrix1_ptr], %[matrix1_ptr], %[matrix1_step] \n\t"
"sllv %[r0], %[r0], %[shift] \n\t"
"andi %[r2], %[r0], 0xffff \n\t"
"sra %[r2], %[r2], 1 \n\t"
"mul %[r2], %[r2], %[r1] \n\t"
"sra %[r0], %[r0], 16 \n\t"
"mul %[r0], %[r0], %[r1] \n\t"
"addu %[matrix0_ptr], %[matrix0_ptr], %[matrix0_step] \n\t"
"addiu %[n], %[n], -1 \n\t"
#if defined(MIPS_DSP_R1_LE)
"shra_r.w %[r2], %[r2], 15 \n\t"
#else
"addiu %[r2], %[r2], 0x4000 \n\t"
"sra %[r2], %[r2], 15 \n\t"
#endif
"addu %[sum32], %[sum32], %[r2] \n\t"
"bgtz %[n], 3b \n\t"
" addu %[sum32], %[sum32], %[r0] \n\t"
"addiu %[k], %[k], -4 \n\t"
"addu %[matrix1_start], %[matrix1_start], %[matrix1_step2] \n\t"
"sw %[sum32], 0(%[product_ptr]) \n\t"
"bgtz %[k], 2b \n\t"
" addiu %[product_ptr], %[product_ptr], 4 \n\t"
"addu %[matrix0_start], %[matrix0_start], %[matrix0_step2] \n\t"
"bgtz %[j], 1b \n\t"
" addu %[product_start], %[product_start], %[product_step] \n\t"
".set pop \n\t"
: [product_ptr] "=&r" (product_ptr), [product_start] "+r" (product_start),
[k] "=&r" (k), [j] "+r" (j), [matrix1_start] "=&r"(matrix1_start),
[matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
[matrix0_start] "+r" (matrix0_start), [n] "=&r" (n), [r0] "=&r" (r0),
[sum32] "=&r" (sum32), [r1] "=&r" (r1),[r2] "=&r" (r2)
: [product_step] "r" (product_step), [matrix1] "r" (matrix1),
[inner_loop_count] "r" (inner_loop_count),
[matrix1_step] "r" (matrix1_step), [shift] "r" (shift),
[matrix0_step] "r" (matrix0_step), [matrix1_step2] "r" (matrix1_step2),
[matrix0_step2] "r" (matrix0_step2)
: "hi", "lo", "memory"
);
} else {
int j = SUBFRAMES, k = 0, n = 0;
int32_t r0, r1, r2, sum32;
int32_t* product_start = matrix_product;
int32_t* product_ptr;
const uint32_t product_step = 4 * mid_loop_count;
const uint32_t matrix0_step = 2 * matrix0_index_step;
const uint32_t matrix1_step = 4 * matrix1_index_step;
const uint32_t matrix0_step2 = 2 * matrix0_index_factor1;
const uint32_t matrix1_step2 = 4 * matrix1_index_factor1;
const int16_t* matrix0_start = matrix0;
const int32_t* matrix1_start = matrix1;
int16_t* matrix0_ptr;
int32_t* matrix1_ptr;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"addu %[product_ptr], %[product_start], $0 \n\t"
"addu %[k], %[product_step], $0 \n\t"
"addiu %[j], %[j], -1 \n\t"
"addu %[matrix0_start], %[matrix0], $0 \n\t"
"2: \n\t"
"addu %[matrix1_ptr], %[matrix1_start], $0 \n\t"
"addu %[matrix0_ptr], %[matrix0_start], $0 \n\t"
"addu %[n], %[inner_loop_count], $0 \n\t"
"mul %[sum32], $0, $0 \n\t"
"3: \n\t"
"lw %[r0], 0(%[matrix1_ptr]) \n\t"
"lh %[r1], 0(%[matrix0_ptr]) \n\t"
"addu %[matrix1_ptr], %[matrix1_ptr], %[matrix1_step] \n\t"
"sllv %[r0], %[r0], %[shift] \n\t"
"andi %[r2], %[r0], 0xffff \n\t"
"sra %[r2], %[r2], 1 \n\t"
"mul %[r2], %[r2], %[r1] \n\t"
"sra %[r0], %[r0], 16 \n\t"
"mul %[r0], %[r0], %[r1] \n\t"
"addu %[matrix0_ptr], %[matrix0_ptr], %[matrix0_step] \n\t"
"addiu %[n], %[n], -1 \n\t"
#if defined(MIPS_DSP_R1_LE)
"shra_r.w %[r2], %[r2], 15 \n\t"
#else
"addiu %[r2], %[r2], 0x4000 \n\t"
"sra %[r2], %[r2], 15 \n\t"
#endif
"addu %[sum32], %[sum32], %[r2] \n\t"
"bgtz %[n], 3b \n\t"
" addu %[sum32], %[sum32], %[r0] \n\t"
"addiu %[k], %[k], -4 \n\t"
"addu %[matrix0_start], %[matrix0_start], %[matrix0_step2] \n\t"
"sw %[sum32], 0(%[product_ptr]) \n\t"
"bgtz %[k], 2b \n\t"
" addiu %[product_ptr], %[product_ptr], 4 \n\t"
"addu %[matrix1_start], %[matrix1_start], %[matrix1_step2] \n\t"
"bgtz %[j], 1b \n\t"
" addu %[product_start], %[product_start], %[product_step] \n\t"
".set pop \n\t"
: [product_ptr] "=&r" (product_ptr), [product_start] "+r" (product_start),
[k] "=&r" (k), [j] "+r" (j), [matrix1_start] "+r"(matrix1_start),
[matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
[matrix0_start] "=&r" (matrix0_start), [n] "=&r" (n), [r0] "=&r" (r0),
[sum32] "=&r" (sum32), [r1] "=&r" (r1),[r2] "=&r" (r2)
: [product_step] "r" (product_step), [matrix0] "r" (matrix0),
[inner_loop_count] "r" (inner_loop_count),
[matrix1_step] "r" (matrix1_step), [shift] "r" (shift),
[matrix0_step] "r" (matrix0_step), [matrix1_step2] "r" (matrix1_step2),
[matrix0_step2] "r" (matrix0_step2)
: "hi", "lo", "memory"
);
}
}
// MIPS optimization of the function WebRtcIsacfix_MatrixProduct2.
// Bit-exact with the function WebRtcIsacfix_MatrixProduct2C from
// entropy_coding.c file.
void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
const int matrix0_index_factor,
const int matrix0_index_step) {
int j = 0, n = 0;
int loop_count = SUBFRAMES;
const int16_t* matrix0_ptr;
const int32_t* matrix1_ptr;
const int16_t* matrix0_start = matrix0;
const int matrix0_step = 2 * matrix0_index_step;
const int matrix0_step2 = 2 * matrix0_index_factor;
int32_t r0, r1, r2, r3, r4, sum32, sum32_2;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addu %[j], %[loop_count], $0 \n\t"
"addu %[matrix0_start], %[matrix0], $0 \n\t"
"1: \n\t"
"addu %[matrix1_ptr], %[matrix1], $0 \n\t"
"addu %[matrix0_ptr], %[matrix0_start], $0 \n\t"
"addu %[n], %[loop_count], $0 \n\t"
"mul %[sum32], $0, $0 \n\t"
"mul %[sum32_2], $0, $0 \n\t"
"2: \n\t"
"lw %[r0], 0(%[matrix1_ptr]) \n\t"
"lw %[r1], 4(%[matrix1_ptr]) \n\t"
"lh %[r2], 0(%[matrix0_ptr]) \n\t"
"andi %[r3], %[r0], 0xffff \n\t"
"sra %[r3], %[r3], 1 \n\t"
"mul %[r3], %[r3], %[r2] \n\t"
"andi %[r4], %[r1], 0xffff \n\t"
"sra %[r4], %[r4], 1 \n\t"
"mul %[r4], %[r4], %[r2] \n\t"
"sra %[r0], %[r0], 16 \n\t"
"mul %[r0], %[r0], %[r2] \n\t"
"sra %[r1], %[r1], 16 \n\t"
"mul %[r1], %[r1], %[r2] \n\t"
#if defined(MIPS_DSP_R1_LE)
"shra_r.w %[r3], %[r3], 15 \n\t"
"shra_r.w %[r4], %[r4], 15 \n\t"
#else
"addiu %[r3], %[r3], 0x4000 \n\t"
"sra %[r3], %[r3], 15 \n\t"
"addiu %[r4], %[r4], 0x4000 \n\t"
"sra %[r4], %[r4], 15 \n\t"
#endif
"addiu %[matrix1_ptr], %[matrix1_ptr], 8 \n\t"
"addu %[matrix0_ptr], %[matrix0_ptr], %[matrix0_step] \n\t"
"addiu %[n], %[n], -1 \n\t"
"addu %[sum32], %[sum32], %[r3] \n\t"
"addu %[sum32_2], %[sum32_2], %[r4] \n\t"
"addu %[sum32], %[sum32], %[r0] \n\t"
"bgtz %[n], 2b \n\t"
" addu %[sum32_2], %[sum32_2], %[r1] \n\t"
"sra %[sum32], %[sum32], 3 \n\t"
"sra %[sum32_2], %[sum32_2], 3 \n\t"
"addiu %[j], %[j], -1 \n\t"
"addu %[matrix0_start], %[matrix0_start], %[matrix0_step2] \n\t"
"sw %[sum32], 0(%[matrix_product]) \n\t"
"sw %[sum32_2], 4(%[matrix_product]) \n\t"
"bgtz %[j], 1b \n\t"
" addiu %[matrix_product], %[matrix_product], 8 \n\t"
".set pop \n\t"
: [j] "=&r" (j), [matrix0_start] "=&r" (matrix0_start),
[matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
[n] "=&r" (n), [sum32] "=&r" (sum32), [sum32_2] "=&r" (sum32_2),
[r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
[r4] "=&r" (r4), [matrix_product] "+r" (matrix_product)
: [loop_count] "r" (loop_count), [matrix0] "r" (matrix0),
[matrix1] "r" (matrix1), [matrix0_step] "r" (matrix0_step),
[matrix0_step2] "r" (matrix0_step2)
: "hi", "lo", "memory"
);
}

View File

@ -0,0 +1,217 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/* This file contains WebRtcIsacfix_MatrixProduct1Neon() and
* WebRtcIsacfix_MatrixProduct2Neon() for ARM Neon platform. API's are in
* entropy_coding.c. Results are bit exact with the c code for
* generic platforms.
*/
#include <arm_neon.h>
#include <stddef.h>
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "rtc_base/checks.h"
void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
const int matrix1_index_factor1,
const int matrix0_index_factor1,
const int matrix1_index_init_case,
const int matrix1_index_step,
const int matrix0_index_step,
const int inner_loop_count,
const int mid_loop_count,
const int shift) {
int j = 0, k = 0, n = 0;
int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
int* matrix1_index_factor2 = &j;
int* matrix0_index_factor2 = &k;
if (matrix1_index_init_case != 0) {
matrix1_index_factor2 = &k;
matrix0_index_factor2 = &j;
}
int32x4_t shift32x4 = vdupq_n_s32(shift);
int32x2_t shift32x2 = vdup_n_s32(shift);
int32x4_t sum_32x4 = vdupq_n_s32(0);
int32x2_t sum_32x2 = vdup_n_s32(0);
RTC_DCHECK_EQ(0, inner_loop_count % 2);
RTC_DCHECK_EQ(0, mid_loop_count % 2);
if (matrix1_index_init_case != 0 && matrix1_index_factor1 == 1) {
for (j = 0; j < SUBFRAMES; j++) {
matrix_prod_index = mid_loop_count * j;
for (k = 0; k < (mid_loop_count >> 2) << 2; k += 4) {
sum_32x4 = veorq_s32(sum_32x4, sum_32x4); // Initialize to zeros.
matrix1_index = k;
matrix0_index = matrix0_index_factor1 * j;
for (n = 0; n < inner_loop_count; n++) {
int32x4_t matrix0_32x4 =
vdupq_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
int32x4_t matrix1_32x4 =
vshlq_s32(vld1q_s32(&matrix1[matrix1_index]), shift32x4);
int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
matrix1_index += matrix1_index_step;
matrix0_index += matrix0_index_step;
}
vst1q_s32(&matrix_product[matrix_prod_index], sum_32x4);
matrix_prod_index += 4;
}
if (mid_loop_count % 4 > 1) {
sum_32x2 = veor_s32(sum_32x2, sum_32x2); // Initialize to zeros.
matrix1_index = k;
k += 2;
matrix0_index = matrix0_index_factor1 * j;
for (n = 0; n < inner_loop_count; n++) {
int32x2_t matrix0_32x2 =
vdup_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
int32x2_t matrix1_32x2 =
vshl_s32(vld1_s32(&matrix1[matrix1_index]), shift32x2);
int32x2_t multi_32x2 = vqdmulh_s32(matrix0_32x2, matrix1_32x2);
sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
matrix1_index += matrix1_index_step;
matrix0_index += matrix0_index_step;
}
vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
matrix_prod_index += 2;
}
}
}
else if (matrix1_index_init_case == 0 && matrix0_index_factor1 == 1) {
int32x2_t multi_32x2 = vdup_n_s32(0);
int32x2_t matrix0_32x2 = vdup_n_s32(0);
for (j = 0; j < SUBFRAMES; j++) {
matrix_prod_index = mid_loop_count * j;
for (k = 0; k < (mid_loop_count >> 2) << 2; k += 4) {
sum_32x4 = veorq_s32(sum_32x4, sum_32x4); // Initialize to zeros.
matrix1_index = matrix1_index_factor1 * j;
matrix0_index = k;
for (n = 0; n < inner_loop_count; n++) {
int32x4_t matrix1_32x4 = vdupq_n_s32(matrix1[matrix1_index] << shift);
int32x4_t matrix0_32x4 =
vshll_n_s16(vld1_s16(&matrix0[matrix0_index]), 15);
int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
matrix1_index += matrix1_index_step;
matrix0_index += matrix0_index_step;
}
vst1q_s32(&matrix_product[matrix_prod_index], sum_32x4);
matrix_prod_index += 4;
}
if (mid_loop_count % 4 > 1) {
sum_32x2 = veor_s32(sum_32x2, sum_32x2); // Initialize to zeros.
matrix1_index = matrix1_index_factor1 * j;
matrix0_index = k;
for (n = 0; n < inner_loop_count; n++) {
int32x2_t matrix1_32x2 = vdup_n_s32(matrix1[matrix1_index] << shift);
matrix0_32x2 =
vset_lane_s32((int32_t)matrix0[matrix0_index], matrix0_32x2, 0);
matrix0_32x2 = vset_lane_s32((int32_t)matrix0[matrix0_index + 1],
matrix0_32x2, 1);
matrix0_32x2 = vshl_n_s32(matrix0_32x2, 15);
multi_32x2 = vqdmulh_s32(matrix1_32x2, matrix0_32x2);
sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
matrix1_index += matrix1_index_step;
matrix0_index += matrix0_index_step;
}
vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
matrix_prod_index += 2;
}
}
}
else if (matrix1_index_init_case == 0 &&
matrix1_index_step == 1 &&
matrix0_index_step == 1) {
int32x2_t multi_32x2 = vdup_n_s32(0);
int32x2_t matrix0_32x2 = vdup_n_s32(0);
for (j = 0; j < SUBFRAMES; j++) {
matrix_prod_index = mid_loop_count * j;
for (k = 0; k < mid_loop_count; k++) {
sum_32x4 = veorq_s32(sum_32x4, sum_32x4); // Initialize to zeros.
matrix1_index = matrix1_index_factor1 * j;
matrix0_index = matrix0_index_factor1 * k;
for (n = 0; n < (inner_loop_count >> 2) << 2; n += 4) {
int32x4_t matrix1_32x4 =
vshlq_s32(vld1q_s32(&matrix1[matrix1_index]), shift32x4);
int32x4_t matrix0_32x4 =
vshll_n_s16(vld1_s16(&matrix0[matrix0_index]), 15);
int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
matrix1_index += 4;
matrix0_index += 4;
}
sum_32x2 = vqadd_s32(vget_low_s32(sum_32x4), vget_high_s32(sum_32x4));
if (inner_loop_count % 4 > 1) {
int32x2_t matrix1_32x2 =
vshl_s32(vld1_s32(&matrix1[matrix1_index]), shift32x2);
matrix0_32x2 =
vset_lane_s32((int32_t)matrix0[matrix0_index], matrix0_32x2, 0);
matrix0_32x2 = vset_lane_s32((int32_t)matrix0[matrix0_index + 1],
matrix0_32x2, 1);
matrix0_32x2 = vshl_n_s32(matrix0_32x2, 15);
multi_32x2 = vqdmulh_s32(matrix1_32x2, matrix0_32x2);
sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
}
sum_32x2 = vpadd_s32(sum_32x2, sum_32x2);
vst1_lane_s32(&matrix_product[matrix_prod_index], sum_32x2, 0);
matrix_prod_index++;
}
}
}
else {
for (j = 0; j < SUBFRAMES; j++) {
matrix_prod_index = mid_loop_count * j;
for (k=0; k < mid_loop_count; k++) {
int32_t sum32 = 0;
matrix1_index = matrix1_index_factor1 * (*matrix1_index_factor2);
matrix0_index = matrix0_index_factor1 * (*matrix0_index_factor2);
for (n = 0; n < inner_loop_count; n++) {
sum32 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
matrix1[matrix1_index] << shift));
matrix1_index += matrix1_index_step;
matrix0_index += matrix0_index_step;
}
matrix_product[matrix_prod_index] = sum32;
matrix_prod_index++;
}
}
}
}
void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[],
const int32_t matrix1[],
int32_t matrix_product[],
const int matrix0_index_factor,
const int matrix0_index_step) {
int j = 0, n = 0;
int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
int32x2_t sum_32x2 = vdup_n_s32(0);
for (j = 0; j < SUBFRAMES; j++) {
sum_32x2 = veor_s32(sum_32x2, sum_32x2); // Initialize to zeros.
matrix1_index = 0;
matrix0_index = matrix0_index_factor * j;
for (n = SUBFRAMES; n > 0; n--) {
int32x2_t matrix0_32x2 =
vdup_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
int32x2_t matrix1_32x2 = vld1_s32(&matrix1[matrix1_index]);
int32x2_t multi_32x2 = vqdmulh_s32(matrix0_32x2, matrix1_32x2);
sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
matrix1_index += 2;
matrix0_index += matrix0_index_step;
}
sum_32x2 = vshr_n_s32(sum_32x2, 3);
vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
matrix_prod_index += 2;
}
}

View File

@ -0,0 +1,415 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* fft.c
*
* Fast Fourier Transform
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
static const int16_t kSortTabFft[240] = {
0, 60, 120, 180, 20, 80, 140, 200, 40, 100, 160, 220,
4, 64, 124, 184, 24, 84, 144, 204, 44, 104, 164, 224,
8, 68, 128, 188, 28, 88, 148, 208, 48, 108, 168, 228,
12, 72, 132, 192, 32, 92, 152, 212, 52, 112, 172, 232,
16, 76, 136, 196, 36, 96, 156, 216, 56, 116, 176, 236,
1, 61, 121, 181, 21, 81, 141, 201, 41, 101, 161, 221,
5, 65, 125, 185, 25, 85, 145, 205, 45, 105, 165, 225,
9, 69, 129, 189, 29, 89, 149, 209, 49, 109, 169, 229,
13, 73, 133, 193, 33, 93, 153, 213, 53, 113, 173, 233,
17, 77, 137, 197, 37, 97, 157, 217, 57, 117, 177, 237,
2, 62, 122, 182, 22, 82, 142, 202, 42, 102, 162, 222,
6, 66, 126, 186, 26, 86, 146, 206, 46, 106, 166, 226,
10, 70, 130, 190, 30, 90, 150, 210, 50, 110, 170, 230,
14, 74, 134, 194, 34, 94, 154, 214, 54, 114, 174, 234,
18, 78, 138, 198, 38, 98, 158, 218, 58, 118, 178, 238,
3, 63, 123, 183, 23, 83, 143, 203, 43, 103, 163, 223,
7, 67, 127, 187, 27, 87, 147, 207, 47, 107, 167, 227,
11, 71, 131, 191, 31, 91, 151, 211, 51, 111, 171, 231,
15, 75, 135, 195, 35, 95, 155, 215, 55, 115, 175, 235,
19, 79, 139, 199, 39, 99, 159, 219, 59, 119, 179, 239
};
/* Cosine table in Q14 */
static const int16_t kCosTabFfftQ14[240] = {
16384, 16378, 16362, 16333, 16294, 16244, 16182, 16110, 16026, 15931, 15826, 15709,
15582, 15444, 15296, 15137, 14968, 14788, 14598, 14399, 14189, 13970, 13741, 13502,
13255, 12998, 12733, 12458, 12176, 11885, 11585, 11278, 10963, 10641, 10311, 9974,
9630, 9280, 8923, 8561, 8192, 7818, 7438, 7053, 6664, 6270, 5872, 5469,
5063, 4653, 4240, 3825, 3406, 2986, 2563, 2139, 1713, 1285, 857, 429,
0, -429, -857, -1285, -1713, -2139, -2563, -2986, -3406, -3825, -4240, -4653,
-5063, -5469, -5872, -6270, -6664, -7053, -7438, -7818, -8192, -8561, -8923, -9280,
-9630, -9974, -10311, -10641, -10963, -11278, -11585, -11885, -12176, -12458, -12733, -12998,
-13255, -13502, -13741, -13970, -14189, -14399, -14598, -14788, -14968, -15137, -15296, -15444,
-15582, -15709, -15826, -15931, -16026, -16110, -16182, -16244, -16294, -16333, -16362, -16378,
-16384, -16378, -16362, -16333, -16294, -16244, -16182, -16110, -16026, -15931, -15826, -15709,
-15582, -15444, -15296, -15137, -14968, -14788, -14598, -14399, -14189, -13970, -13741, -13502,
-13255, -12998, -12733, -12458, -12176, -11885, -11585, -11278, -10963, -10641, -10311, -9974,
-9630, -9280, -8923, -8561, -8192, -7818, -7438, -7053, -6664, -6270, -5872, -5469,
-5063, -4653, -4240, -3825, -3406, -2986, -2563, -2139, -1713, -1285, -857, -429,
0, 429, 857, 1285, 1713, 2139, 2563, 2986, 3406, 3825, 4240, 4653,
5063, 5469, 5872, 6270, 6664, 7053, 7438, 7818, 8192, 8561, 8923, 9280,
9630, 9974, 10311, 10641, 10963, 11278, 11585, 11885, 12176, 12458, 12733, 12998,
13255, 13502, 13741, 13970, 14189, 14399, 14598, 14788, 14968, 15137, 15296, 15444,
15582, 15709, 15826, 15931, 16026, 16110, 16182, 16244, 16294, 16333, 16362, 16378
};
/* Uses 16x16 mul, without rounding, which is faster. Uses WEBRTC_SPL_MUL_16_16_RSFT */
int16_t WebRtcIsacfix_FftRadix16Fastest(int16_t RexQx[], int16_t ImxQx[], int16_t iSign) {
int16_t dd, ee, ff, gg, hh, ii;
int16_t k0, k1, k2, k3, k4, kk;
int16_t tmp116, tmp216;
int16_t ccc1Q14, ccc2Q14, ccc3Q14, sss1Q14, sss2Q14, sss3Q14;
int16_t sss60Q14, ccc72Q14, sss72Q14;
int16_t aaQx, ajQx, akQx, ajmQx, ajpQx, akmQx, akpQx;
int16_t bbQx, bjQx, bkQx, bjmQx, bjpQx, bkmQx, bkpQx;
int16_t ReDATAQx[240], ImDATAQx[240];
sss60Q14 = kCosTabFfftQ14[20];
ccc72Q14 = kCosTabFfftQ14[48];
sss72Q14 = kCosTabFfftQ14[12];
if (iSign < 0) {
sss72Q14 = -sss72Q14;
sss60Q14 = -sss60Q14;
}
/* Complexity is: 10 cycles */
/* compute fourier transform */
// transform for factor of 4
for (kk=0; kk<60; kk++) {
k0 = kk;
k1 = k0 + 60;
k2 = k1 + 60;
k3 = k2 + 60;
akpQx = RexQx[k0] + RexQx[k2];
akmQx = RexQx[k0] - RexQx[k2];
ajpQx = RexQx[k1] + RexQx[k3];
ajmQx = RexQx[k1] - RexQx[k3];
bkpQx = ImxQx[k0] + ImxQx[k2];
bkmQx = ImxQx[k0] - ImxQx[k2];
bjpQx = ImxQx[k1] + ImxQx[k3];
bjmQx = ImxQx[k1] - ImxQx[k3];
RexQx[k0] = akpQx + ajpQx;
ImxQx[k0] = bkpQx + bjpQx;
ajpQx = akpQx - ajpQx;
bjpQx = bkpQx - bjpQx;
if (iSign < 0) {
akpQx = akmQx + bjmQx;
bkpQx = bkmQx - ajmQx;
akmQx -= bjmQx;
bkmQx += ajmQx;
} else {
akpQx = akmQx - bjmQx;
bkpQx = bkmQx + ajmQx;
akmQx += bjmQx;
bkmQx -= ajmQx;
}
ccc1Q14 = kCosTabFfftQ14[kk];
ccc2Q14 = kCosTabFfftQ14[2 * kk];
ccc3Q14 = kCosTabFfftQ14[3 * kk];
sss1Q14 = kCosTabFfftQ14[kk + 60];
sss2Q14 = kCosTabFfftQ14[2 * kk + 60];
sss3Q14 = kCosTabFfftQ14[3 * kk + 60];
if (iSign==1) {
sss1Q14 = -sss1Q14;
sss2Q14 = -sss2Q14;
sss3Q14 = -sss3Q14;
}
//Do several multiplications like Q14*Q16>>14 = Q16
// RexQ16[k1] = akpQ16 * ccc1Q14 - bkpQ16 * sss1Q14;
// RexQ16[k2] = ajpQ16 * ccc2Q14 - bjpQ16 * sss2Q14;
// RexQ16[k3] = akmQ16 * ccc3Q14 - bkmQ16 * sss3Q14;
// ImxQ16[k1] = akpQ16 * sss1Q14 + bkpQ16 * ccc1Q14;
// ImxQ16[k2] = ajpQ16 * sss2Q14 + bjpQ16 * ccc2Q14;
// ImxQ16[k3] = akmQ16 * sss3Q14 + bkmQ16 * ccc3Q14;
RexQx[k1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc1Q14, akpQx, 14) -
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss1Q14, bkpQx, 14); // 6 non-mul + 2 mul cycles, i.e. 8 cycles (6+2*7=20 cycles if 16x32mul)
RexQx[k2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, ajpQx, 14) -
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bjpQx, 14);
RexQx[k3] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc3Q14, akmQx, 14) -
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss3Q14, bkmQx, 14);
ImxQx[k1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss1Q14, akpQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc1Q14, bkpQx, 14);
ImxQx[k2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, ajpQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bjpQx, 14);
ImxQx[k3] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss3Q14, akmQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc3Q14, bkmQx, 14);
//This mul segment needs 6*8 = 48 cycles for 16x16 muls, but 6*20 = 120 cycles for 16x32 muls
}
/* Complexity is: 51+48 = 99 cycles for 16x16 muls, but 51+120 = 171 cycles for 16x32 muls*/
// transform for factor of 3
kk=0;
k1=20;
k2=40;
for (hh=0; hh<4; hh++) {
for (ii=0; ii<20; ii++) {
akQx = RexQx[kk];
bkQx = ImxQx[kk];
ajQx = RexQx[k1] + RexQx[k2];
bjQx = ImxQx[k1] + ImxQx[k2];
RexQx[kk] = akQx + ajQx;
ImxQx[kk] = bkQx + bjQx;
tmp116 = ajQx >> 1;
tmp216 = bjQx >> 1;
akQx = akQx - tmp116;
bkQx = bkQx - tmp216;
tmp116 = RexQx[k1] - RexQx[k2];
tmp216 = ImxQx[k1] - ImxQx[k2];
ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss60Q14, tmp116, 14); // Q14*Qx>>14 = Qx
bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss60Q14, tmp216, 14); // Q14*Qx>>14 = Qx
RexQx[k1] = akQx - bjQx;
RexQx[k2] = akQx + bjQx;
ImxQx[k1] = bkQx + ajQx;
ImxQx[k2] = bkQx - ajQx;
kk++;
k1++;
k2++;
}
/* Complexity : (31+6)*20 = 740 cycles for 16x16 muls, but (31+18)*20 = 980 cycles for 16x32 muls*/
kk=kk+40;
k1=k1+40;
k2=k2+40;
}
/* Complexity : 4*(740+3) = 2972 cycles for 16x16 muls, but 4*(980+3) = 3932 cycles for 16x32 muls*/
/* multiply by rotation factor for odd factor 3 or 5 (not for 4)
Same code (duplicated) for both ii=2 and ii=3 */
kk = 1;
ee = 0;
ff = 0;
for (gg=0; gg<19; gg++) {
kk += 20;
ff = ff+4;
for (hh=0; hh<2; hh++) {
ee = ff + hh * ff;
dd = ee + 60;
ccc2Q14 = kCosTabFfftQ14[ee];
sss2Q14 = kCosTabFfftQ14[dd];
if (iSign==1) {
sss2Q14 = -sss2Q14;
}
for (ii=0; ii<4; ii++) {
akQx = RexQx[kk];
bkQx = ImxQx[kk];
RexQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akQx, 14) - // Q14*Qx>>14 = Qx
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkQx, 14);
ImxQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akQx, 14) + // Q14*Qx>>14 = Qx
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkQx, 14);
kk += 60;
}
kk = kk - 220;
}
// Complexity: 2*(13+5+4*13+2) = 144 for 16x16 muls, but 2*(13+5+4*33+2) = 304 cycles for 16x32 muls
kk = kk - 59;
}
// Complexity: 19*144 = 2736 for 16x16 muls, but 19*304 = 5776 cycles for 16x32 muls
// transform for factor of 5
kk = 0;
ccc2Q14 = kCosTabFfftQ14[96];
sss2Q14 = kCosTabFfftQ14[84];
if (iSign==1) {
sss2Q14 = -sss2Q14;
}
for (hh=0; hh<4; hh++) {
for (ii=0; ii<12; ii++) {
k1 = kk + 4;
k2 = k1 + 4;
k3 = k2 + 4;
k4 = k3 + 4;
akpQx = RexQx[k1] + RexQx[k4];
akmQx = RexQx[k1] - RexQx[k4];
bkpQx = ImxQx[k1] + ImxQx[k4];
bkmQx = ImxQx[k1] - ImxQx[k4];
ajpQx = RexQx[k2] + RexQx[k3];
ajmQx = RexQx[k2] - RexQx[k3];
bjpQx = ImxQx[k2] + ImxQx[k3];
bjmQx = ImxQx[k2] - ImxQx[k3];
aaQx = RexQx[kk];
bbQx = ImxQx[kk];
RexQx[kk] = aaQx + akpQx + ajpQx;
ImxQx[kk] = bbQx + bkpQx + bjpQx;
akQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, akpQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, ajpQx, 14) + aaQx;
bkQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, bkpQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bjpQx, 14) + bbQx;
ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, akmQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, ajmQx, 14);
bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, bkmQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bjmQx, 14);
// 32+4*8=64 or 32+4*20=112
RexQx[k1] = akQx - bjQx;
RexQx[k4] = akQx + bjQx;
ImxQx[k1] = bkQx + ajQx;
ImxQx[k4] = bkQx - ajQx;
akQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akpQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, ajpQx, 14) + aaQx;
bkQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkpQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, bjpQx, 14) + bbQx;
ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akmQx, 14) -
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, ajmQx, 14);
bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkmQx, 14) -
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, bjmQx, 14);
// 8+4*8=40 or 8+4*20=88
RexQx[k2] = akQx - bjQx;
RexQx[k3] = akQx + bjQx;
ImxQx[k2] = bkQx + ajQx;
ImxQx[k3] = bkQx - ajQx;
kk = k4 + 4;
}
// Complexity: 12*(64+40+10) = 1368 for 16x16 muls, but 12*(112+88+10) = 2520 cycles for 16x32 muls
kk -= 239;
}
// Complexity: 4*1368 = 5472 for 16x16 muls, but 4*2520 = 10080 cycles for 16x32 muls
/* multiply by rotation factor for odd factor 3 or 5 (not for 4)
Same code (duplicated) for both ii=2 and ii=3 */
kk = 1;
ee=0;
for (gg=0; gg<3; gg++) {
kk += 4;
dd = 12 + 12 * gg;
ff = 0;
for (hh=0; hh<4; hh++) {
ff = ff+dd;
ee = ff+60;
for (ii=0; ii<12; ii++) {
akQx = RexQx[kk];
bkQx = ImxQx[kk];
ccc2Q14 = kCosTabFfftQ14[ff];
sss2Q14 = kCosTabFfftQ14[ee];
if (iSign==1) {
sss2Q14 = -sss2Q14;
}
RexQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akQx, 14) -
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkQx, 14);
ImxQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akQx, 14) +
(int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkQx, 14);
kk += 20;
}
kk = kk - 236;
// Complexity: 12*(12+12) = 288 for 16x16 muls, but 12*(12+32) = 528 cycles for 16x32 muls
}
kk = kk - 19;
// Complexity: 4*288+6 for 16x16 muls, but 4*528+6 cycles for 16x32 muls
}
// Complexity: 3*4*288+6 = 3462 for 16x16 muls, but 3*4*528+6 = 6342 cycles for 16x32 muls
// last transform for factor of 4 */
for (kk=0; kk<240; kk=kk+4) {
k1 = kk + 1;
k2 = k1 + 1;
k3 = k2 + 1;
akpQx = RexQx[kk] + RexQx[k2];
akmQx = RexQx[kk] - RexQx[k2];
ajpQx = RexQx[k1] + RexQx[k3];
ajmQx = RexQx[k1] - RexQx[k3];
bkpQx = ImxQx[kk] + ImxQx[k2];
bkmQx = ImxQx[kk] - ImxQx[k2];
bjpQx = ImxQx[k1] + ImxQx[k3];
bjmQx = ImxQx[k1] - ImxQx[k3];
RexQx[kk] = akpQx + ajpQx;
ImxQx[kk] = bkpQx + bjpQx;
ajpQx = akpQx - ajpQx;
bjpQx = bkpQx - bjpQx;
if (iSign < 0) {
akpQx = akmQx + bjmQx;
bkpQx = bkmQx - ajmQx;
akmQx -= bjmQx;
bkmQx += ajmQx;
} else {
akpQx = akmQx - bjmQx;
bkpQx = bkmQx + ajmQx;
akmQx += bjmQx;
bkmQx -= ajmQx;
}
RexQx[k1] = akpQx;
RexQx[k2] = ajpQx;
RexQx[k3] = akmQx;
ImxQx[k1] = bkpQx;
ImxQx[k2] = bjpQx;
ImxQx[k3] = bkmQx;
}
// Complexity: 60*45 = 2700 for 16x16 muls, but 60*45 = 2700 cycles for 16x32 muls
/* permute the results to normal order */
for (ii=0; ii<240; ii++) {
ReDATAQx[ii]=RexQx[ii];
ImDATAQx[ii]=ImxQx[ii];
}
// Complexity: 240*2=480 cycles
for (ii=0; ii<240; ii++) {
RexQx[ii]=ReDATAQx[kSortTabFft[ii]];
ImxQx[ii]=ImDATAQx[kSortTabFft[ii]];
}
// Complexity: 240*2*2=960 cycles
// Total complexity:
// 16x16 16x32
// Complexity: 10 10
// Complexity: 99 171
// Complexity: 2972 3932
// Complexity: 2736 5776
// Complexity: 5472 10080
// Complexity: 3462 6342
// Complexity: 2700 2700
// Complexity: 480 480
// Complexity: 960 960
// =======================
// 18891 30451
//
// If this FFT is called 2 time each frame, i.e. 67 times per second, it will correspond to
// a C54 complexity of 67*18891/1000000 = 1.27 MIPS with 16x16-muls, and 67*30451/1000000 =
// = 2.04 MIPS with 16x32-muls. Note that this routine somtimes is called 6 times during the
// encoding of a frame, i.e. the max complexity would be 7/2*1.27 = 4.4 MIPS for the 16x16 mul case.
return 0;
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*--------------------------------*-C-*---------------------------------*
* File:
* fft.h
* ---------------------------------------------------------------------*
* Re[]: real value array
* Im[]: imaginary value array
* nTotal: total number of complex values
* nPass: number of elements involved in this pass of transform
* nSpan: nspan/nPass = number of bytes to increment pointer
* in Re[] and Im[]
* isign: exponent: +1 = forward -1 = reverse
* scaling: normalizing constant by which the final result is *divided*
* scaling == -1, normalize by total dimension of the transform
* scaling < -1, normalize by the square-root of the total dimension
*
* ----------------------------------------------------------------------
* See the comments in the code for correct usage!
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
int16_t WebRtcIsacfix_FftRadix16Fastest(int16_t RexQx[],
int16_t ImxQx[],
int16_t iSign);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_ */

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_
#include <stdint.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/* Arguments:
* io: Input/output, in Q0.
* len: Input, sample length.
* coefficient: Input.
* state: Input/output, filter state, in Q4.
*/
typedef void (*HighpassFilterFixDec32)(int16_t* io,
int16_t len,
const int16_t* coefficient,
int32_t* state);
extern HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t* io,
int16_t len,
const int16_t* coefficient,
int32_t* state);
#if defined(MIPS_DSP_R1_LE)
void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
int16_t len,
const int16_t* coefficient,
int32_t* state);
#endif
typedef void (*AllpassFilter2FixDec16)(
int16_t* data_ch1, // Input and output in channel 1, in Q0
int16_t* data_ch2, // Input and output in channel 2, in Q0
const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15
const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15
int length, // Length of the data buffers
int32_t* filter_state_ch1, // Filter state for channel 1, in Q16
int32_t* filter_state_ch2); // Filter state for channel 2, in Q16
extern AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16;
void WebRtcIsacfix_AllpassFilter2FixDec16C(int16_t* data_ch1,
int16_t* data_ch2,
const int16_t* factor_ch1,
const int16_t* factor_ch2,
int length,
int32_t* filter_state_ch1,
int32_t* filter_state_ch2);
#if defined(WEBRTC_HAS_NEON)
void WebRtcIsacfix_AllpassFilter2FixDec16Neon(int16_t* data_ch1,
int16_t* data_ch2,
const int16_t* factor_ch1,
const int16_t* factor_ch2,
int length,
int32_t* filter_state_ch1,
int32_t* filter_state_ch2);
#endif
#if defined(MIPS_DSP_R1_LE)
void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(int16_t* data_ch1,
int16_t* data_ch2,
const int16_t* factor_ch1,
const int16_t* factor_ch2,
int length,
int32_t* filter_state_ch1,
int32_t* filter_state_ch2);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
/* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_ */

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* filterbank_tables.c
*
* This file contains variables that are used in
* filterbanks.c
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
/* HPstcoeff_in_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
* In float, they are: {-1.94895953203325f, 0.94984516000000f,
* -0.05101826139794f, 0.05015484000000f};
*/
const int16_t WebRtcIsacfix_kHpStCoeffInQ30[8] = {
16189, -31932, /* Q30 lo/hi pair */
17243, 15562, /* Q30 lo/hi pair */
-17186, -26748, /* Q35 lo/hi pair */
-27476, 26296 /* Q35 lo/hi pair */
};
/* HPstcoeff_out_1_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
* In float, they are: {-1.99701049409000f, 0.99714204490000f,
* 0.01701049409000f, -0.01704204490000f};
*/
const int16_t WebRtcIsacfix_kHPStCoeffOut1Q30[8] = {
-1306, -32719, /* Q30 lo/hi pair */
11486, 16337, /* Q30 lo/hi pair */
26078, 8918, /* Q35 lo/hi pair */
3956, -8935 /* Q35 lo/hi pair */
};
/* HPstcoeff_out_2_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
* In float, they are: {-1.98645294509837f, 0.98672435560000f,
* 0.00645294509837f, -0.00662435560000f};
*/
const int16_t WebRtcIsacfix_kHPStCoeffOut2Q30[8] = {
-2953, -32546, /* Q30 lo/hi pair */
32233, 16166, /* Q30 lo/hi pair */
13217, 3383, /* Q35 lo/hi pair */
-4597, -3473 /* Q35 lo/hi pair */
};
/* The upper channel all-pass filter factors */
const int16_t WebRtcIsacfix_kUpperApFactorsQ15[2] = {
1137, 12537
};
/* The lower channel all-pass filter factors */
const int16_t WebRtcIsacfix_kLowerApFactorsQ15[2] = {
5059, 24379
};

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* filterbank_tables.h
*
* Header file for variables that are defined in
* filterbank_tables.c.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_
#include <stdint.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/********************* Coefficient Tables ************************/
/* HPstcoeff_in_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
extern const int16_t WebRtcIsacfix_kHpStCoeffInQ30[8];
/* HPstcoeff_out_1_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
extern const int16_t WebRtcIsacfix_kHPStCoeffOut1Q30[8];
/* HPstcoeff_out_2_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
extern const int16_t WebRtcIsacfix_kHPStCoeffOut2Q30[8];
/* The upper channel all-pass filter factors */
extern const int16_t WebRtcIsacfix_kUpperApFactorsQ15[2];
/* The lower channel all-pass filter factors */
extern const int16_t WebRtcIsacfix_kLowerApFactorsQ15[2];
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_ */

View File

@ -0,0 +1,297 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* filterbanks.c
*
* This file contains function
* WebRtcIsacfix_SplitAndFilter, and WebRtcIsacfix_FilterAndCombine
* which implement filterbanks that produce decimated lowpass and
* highpass versions of a signal, and performs reconstruction.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "rtc_base/checks.h"
// Declare a function pointer.
AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16;
void WebRtcIsacfix_AllpassFilter2FixDec16C(
int16_t *data_ch1, // Input and output in channel 1, in Q0
int16_t *data_ch2, // Input and output in channel 2, in Q0
const int16_t *factor_ch1, // Scaling factor for channel 1, in Q15
const int16_t *factor_ch2, // Scaling factor for channel 2, in Q15
const int length, // Length of the data buffers
int32_t *filter_state_ch1, // Filter state for channel 1, in Q16
int32_t *filter_state_ch2) { // Filter state for channel 2, in Q16
int n = 0;
int32_t state0_ch1 = filter_state_ch1[0], state1_ch1 = filter_state_ch1[1];
int32_t state0_ch2 = filter_state_ch2[0], state1_ch2 = filter_state_ch2[1];
int16_t in_out = 0;
int32_t a = 0, b = 0;
// Assembly file assumption.
RTC_DCHECK_EQ(0, length % 2);
for (n = 0; n < length; n++) {
// Process channel 1:
in_out = data_ch1[n];
a = factor_ch1[0] * in_out; // Q15 * Q0 = Q15
a *= 1 << 1; // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, state0_ch1);
a = -factor_ch1[0] * (int16_t)(b >> 16); // Q15
state0_ch1 =
WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
in_out = (int16_t) (b >> 16); // Save as Q0
a = factor_ch1[1] * in_out; // Q15 * Q0 = Q15
a *= 1 << 1; // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, state1_ch1); // Q16
a = -factor_ch1[1] * (int16_t)(b >> 16); // Q15
state1_ch1 =
WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
data_ch1[n] = (int16_t) (b >> 16); // Save as Q0
// Process channel 2:
in_out = data_ch2[n];
a = factor_ch2[0] * in_out; // Q15 * Q0 = Q15
a *= 1 << 1; // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, state0_ch2); // Q16
a = -factor_ch2[0] * (int16_t)(b >> 16); // Q15
state0_ch2 =
WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
in_out = (int16_t) (b >> 16); // Save as Q0
a = factor_ch2[1] * in_out; // Q15 * Q0 = Q15
a *= (1 << 1); // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, state1_ch2); // Q16
a = -factor_ch2[1] * (int16_t)(b >> 16); // Q15
state1_ch2 =
WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16)); // Q16
data_ch2[n] = (int16_t) (b >> 16); // Save as Q0
}
filter_state_ch1[0] = state0_ch1;
filter_state_ch1[1] = state1_ch1;
filter_state_ch2[0] = state0_ch2;
filter_state_ch2[1] = state1_ch2;
}
// Declare a function pointer.
HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t *io,
int16_t len,
const int16_t *coefficient,
int32_t *state)
{
int k;
int32_t a1 = 0, b1 = 0, c = 0, in = 0;
int32_t a2 = 0, b2 = 0;
int32_t state0 = state[0];
int32_t state1 = state[1];
for (k=0; k<len; k++) {
in = (int32_t)io[k];
#ifdef WEBRTC_ARCH_ARM_V7
{
register int tmp_coeff0;
register int tmp_coeff1;
__asm __volatile(
"ldr %[tmp_coeff0], [%[coeff]]\n\t"
"ldr %[tmp_coeff1], [%[coeff], #4]\n\t"
"smmulr %[a2], %[tmp_coeff0], %[state0]\n\t"
"smmulr %[b2], %[tmp_coeff1], %[state1]\n\t"
"ldr %[tmp_coeff0], [%[coeff], #8]\n\t"
"ldr %[tmp_coeff1], [%[coeff], #12]\n\t"
"smmulr %[a1], %[tmp_coeff0], %[state0]\n\t"
"smmulr %[b1], %[tmp_coeff1], %[state1]\n\t"
:[a2]"=&r"(a2),
[b2]"=&r"(b2),
[a1]"=&r"(a1),
[b1]"=r"(b1),
[tmp_coeff0]"=&r"(tmp_coeff0),
[tmp_coeff1]"=&r"(tmp_coeff1)
:[coeff]"r"(coefficient),
[state0]"r"(state0),
[state1]"r"(state1)
);
}
#else
/* Q35 * Q4 = Q39 ; shift 32 bit => Q7 */
a1 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[5], state0) +
(WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[4], state0) >> 16);
b1 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[7], state1) +
(WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[6], state1) >> 16);
/* Q30 * Q4 = Q34 ; shift 32 bit => Q2 */
a2 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[1], state0) +
(WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[0], state0) >> 16);
b2 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[3], state1) +
(WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[2], state1) >> 16);
#endif
c = in + ((a1 + b1) >> 7); // Q0.
io[k] = (int16_t)WebRtcSpl_SatW32ToW16(c); // Write output as Q0.
c = in * (1 << 2) - a2 - b2; // In Q2.
c = (int32_t)WEBRTC_SPL_SAT(536870911, c, -536870912);
state1 = state0;
state0 = c * (1 << 2); // Write state as Q4
}
state[0] = state0;
state[1] = state1;
}
void WebRtcIsacfix_SplitAndFilter1(int16_t *pin,
int16_t *LP16,
int16_t *HP16,
PreFiltBankstr *prefiltdata)
{
/* Function WebRtcIsacfix_SplitAndFilter */
/* This function creates low-pass and high-pass decimated versions of part of
the input signal, and part of the signal in the input 'lookahead buffer'. */
int k;
int16_t tempin_ch1[FRAMESAMPLES/2 + QLOOKAHEAD];
int16_t tempin_ch2[FRAMESAMPLES/2 + QLOOKAHEAD];
int32_t tmpState_ch1[2 * (QORDER-1)]; /* 4 */
int32_t tmpState_ch2[2 * (QORDER-1)]; /* 4 */
/* High pass filter */
WebRtcIsacfix_HighpassFilterFixDec32(pin, FRAMESAMPLES, WebRtcIsacfix_kHpStCoeffInQ30, prefiltdata->HPstates_fix);
/* First Channel */
for (k=0;k<FRAMESAMPLES/2;k++) {
tempin_ch1[QLOOKAHEAD + k] = pin[1 + 2 * k];
}
for (k=0;k<QLOOKAHEAD;k++) {
tempin_ch1[k]=prefiltdata->INLABUF1_fix[k];
prefiltdata->INLABUF1_fix[k] = pin[FRAMESAMPLES + 1 - 2 * (QLOOKAHEAD - k)];
}
/* Second Channel. This is exactly like the first channel, except that the
even samples are now filtered instead (lower channel). */
for (k=0;k<FRAMESAMPLES/2;k++) {
tempin_ch2[QLOOKAHEAD + k] = pin[2 * k];
}
for (k=0;k<QLOOKAHEAD;k++) {
tempin_ch2[k]=prefiltdata->INLABUF2_fix[k];
prefiltdata->INLABUF2_fix[k] = pin[FRAMESAMPLES - 2 * (QLOOKAHEAD - k)];
}
/*obtain polyphase components by forward all-pass filtering through each channel */
/* The all pass filtering automatically updates the filter states which are exported in the
prefiltdata structure */
WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
tempin_ch2,
WebRtcIsacfix_kUpperApFactorsQ15,
WebRtcIsacfix_kLowerApFactorsQ15,
FRAMESAMPLES/2,
prefiltdata->INSTAT1_fix,
prefiltdata->INSTAT2_fix);
for (k = 0; k < 2 * (QORDER - 1); k++) {
tmpState_ch1[k] = prefiltdata->INSTAT1_fix[k];
tmpState_ch2[k] = prefiltdata->INSTAT2_fix[k];
}
WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1 + FRAMESAMPLES/2,
tempin_ch2 + FRAMESAMPLES/2,
WebRtcIsacfix_kUpperApFactorsQ15,
WebRtcIsacfix_kLowerApFactorsQ15,
QLOOKAHEAD,
tmpState_ch1,
tmpState_ch2);
/* Now Construct low-pass and high-pass signals as combinations of polyphase components */
for (k=0; k<FRAMESAMPLES/2 + QLOOKAHEAD; k++) {
int32_t tmp1, tmp2, tmp3;
tmp1 = (int32_t)tempin_ch1[k]; // Q0 -> Q0
tmp2 = (int32_t)tempin_ch2[k]; // Q0 -> Q0
tmp3 = (tmp1 + tmp2) >> 1; /* Low pass signal. */
LP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*low pass */
tmp3 = (tmp1 - tmp2) >> 1; /* High pass signal. */
HP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*high pass */
}
}/*end of WebRtcIsacfix_SplitAndFilter */
//////////////////////////////////////////////////////////
////////// Combining
/* Function WebRtcIsacfix_FilterAndCombine */
/* This is a decoder function that takes the decimated
length FRAMESAMPLES/2 input low-pass and
high-pass signals and creates a reconstructed fullband
output signal of length FRAMESAMPLES. WebRtcIsacfix_FilterAndCombine
is the sibling function of WebRtcIsacfix_SplitAndFilter */
/* INPUTS:
inLP: a length FRAMESAMPLES/2 array of input low-pass
samples.
inHP: a length FRAMESAMPLES/2 array of input high-pass
samples.
postfiltdata: input data structure containing the filterbank
states from the previous decoding iteration.
OUTPUTS:
Out: a length FRAMESAMPLES array of output reconstructed
samples (fullband) based on the input low-pass and
high-pass signals.
postfiltdata: the input data structure containing the filterbank
states is updated for the next decoding iteration */
void WebRtcIsacfix_FilterAndCombine1(int16_t *tempin_ch1,
int16_t *tempin_ch2,
int16_t *out16,
PostFiltBankstr *postfiltdata)
{
int k;
int16_t in[FRAMESAMPLES];
/* all-pass filter the new upper and lower channel signal.
For upper channel, use the all-pass filter factors that were used as a
lower channel at the encoding side. So at the decoder, the corresponding
all-pass filter factors for each channel are swapped.
For lower channel signal, since all-pass filter factors at the decoder are
swapped from the ones at the encoder, the 'upper' channel all-pass filter
factors (kUpperApFactors) are used to filter this new lower channel signal.
*/
WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
tempin_ch2,
WebRtcIsacfix_kLowerApFactorsQ15,
WebRtcIsacfix_kUpperApFactorsQ15,
FRAMESAMPLES/2,
postfiltdata->STATE_0_UPPER_fix,
postfiltdata->STATE_0_LOWER_fix);
/* Merge outputs to form the full length output signal.*/
for (k=0;k<FRAMESAMPLES/2;k++) {
in[2 * k] = tempin_ch2[k];
in[2 * k + 1] = tempin_ch1[k];
}
/* High pass filter */
WebRtcIsacfix_HighpassFilterFixDec32(in, FRAMESAMPLES, WebRtcIsacfix_kHPStCoeffOut1Q30, postfiltdata->HPstates1_fix);
WebRtcIsacfix_HighpassFilterFixDec32(in, FRAMESAMPLES, WebRtcIsacfix_kHPStCoeffOut2Q30, postfiltdata->HPstates2_fix);
for (k=0;k<FRAMESAMPLES;k++) {
out16[k] = in[k];
}
}

View File

@ -0,0 +1,242 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform.
// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c.
void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
int16_t* data_ch1, // Input and output in channel 1, in Q0.
int16_t* data_ch2, // Input and output in channel 2, in Q0.
const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15.
const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15.
const int length, // Length of the data buffers.
int32_t* filter_state_ch1, // Filter state for channel 1, in Q16.
int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16.
int32_t st0_ch1, st1_ch1; // channel1 state variables.
int32_t st0_ch2, st1_ch2; // channel2 state variables.
int32_t f_ch10, f_ch11, f_ch20, f_ch21; // factor variables.
int32_t r0, r1, r2, r3, r4, r5; // temporary register variables.
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
// Load all the state and factor variables.
"lh %[f_ch10], 0(%[factor_ch1]) \n\t"
"lh %[f_ch20], 0(%[factor_ch2]) \n\t"
"lh %[f_ch11], 2(%[factor_ch1]) \n\t"
"lh %[f_ch21], 2(%[factor_ch2]) \n\t"
"lw %[st0_ch1], 0(%[filter_state_ch1]) \n\t"
"lw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
"lw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
"lw %[st1_ch2], 4(%[filter_state_ch2]) \n\t"
// Allpass filtering loop.
"1: \n\t"
"lh %[r0], 0(%[data_ch1]) \n\t"
"lh %[r1], 0(%[data_ch2]) \n\t"
"addiu %[length], %[length], -1 \n\t"
"mul %[r2], %[r0], %[f_ch10] \n\t"
"mul %[r3], %[r1], %[f_ch20] \n\t"
"sll %[r0], %[r0], 16 \n\t"
"sll %[r1], %[r1], 16 \n\t"
"sll %[r2], %[r2], 1 \n\t"
"addq_s.w %[r2], %[r2], %[st0_ch1] \n\t"
"sll %[r3], %[r3], 1 \n\t"
"addq_s.w %[r3], %[r3], %[st0_ch2] \n\t"
"sra %[r2], %[r2], 16 \n\t"
"mul %[st0_ch1], %[f_ch10], %[r2] \n\t"
"sra %[r3], %[r3], 16 \n\t"
"mul %[st0_ch2], %[f_ch20], %[r3] \n\t"
"mul %[r4], %[r2], %[f_ch11] \n\t"
"mul %[r5], %[r3], %[f_ch21] \n\t"
"sll %[st0_ch1], %[st0_ch1], 1 \n\t"
"subq_s.w %[st0_ch1], %[r0], %[st0_ch1] \n\t"
"sll %[st0_ch2], %[st0_ch2], 1 \n\t"
"subq_s.w %[st0_ch2], %[r1], %[st0_ch2] \n\t"
"sll %[r4], %[r4], 1 \n\t"
"addq_s.w %[r4], %[r4], %[st1_ch1] \n\t"
"sll %[r5], %[r5], 1 \n\t"
"addq_s.w %[r5], %[r5], %[st1_ch2] \n\t"
"sra %[r4], %[r4], 16 \n\t"
"mul %[r0], %[r4], %[f_ch11] \n\t"
"sra %[r5], %[r5], 16 \n\t"
"mul %[r1], %[r5], %[f_ch21] \n\t"
"sh %[r4], 0(%[data_ch1]) \n\t"
"sh %[r5], 0(%[data_ch2]) \n\t"
"addiu %[data_ch1], %[data_ch1], 2 \n\t"
"sll %[r2], %[r2], 16 \n\t"
"sll %[r0], %[r0], 1 \n\t"
"subq_s.w %[st1_ch1], %[r2], %[r0] \n\t"
"sll %[r3], %[r3], 16 \n\t"
"sll %[r1], %[r1], 1 \n\t"
"subq_s.w %[st1_ch2], %[r3], %[r1] \n\t"
"bgtz %[length], 1b \n\t"
" addiu %[data_ch2], %[data_ch2], 2 \n\t"
// Store channel states.
"sw %[st0_ch1], 0(%[filter_state_ch1]) \n\t"
"sw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
"sw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
"sw %[st1_ch2], 4(%[filter_state_ch2]) \n\t"
".set pop \n\t"
: [f_ch10] "=&r" (f_ch10), [f_ch20] "=&r" (f_ch20),
[f_ch11] "=&r" (f_ch11), [f_ch21] "=&r" (f_ch21),
[st0_ch1] "=&r" (st0_ch1), [st1_ch1] "=&r" (st1_ch1),
[st0_ch2] "=&r" (st0_ch2), [st1_ch2] "=&r" (st1_ch2),
[r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
[r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5)
: [factor_ch1] "r" (factor_ch1), [factor_ch2] "r" (factor_ch2),
[filter_state_ch1] "r" (filter_state_ch1),
[filter_state_ch2] "r" (filter_state_ch2),
[data_ch1] "r" (data_ch1), [data_ch2] "r" (data_ch2),
[length] "r" (length)
: "memory", "hi", "lo"
);
}
// WebRtcIsacfix_HighpassFilterFixDec32 function optimized for MIPSDSP platform.
// Bit-exact with WebRtcIsacfix_HighpassFilterFixDec32C from filterbanks.c.
void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
int16_t len,
const int16_t* coefficient,
int32_t* state) {
int k;
int32_t a1, a2, b1, b2, in;
int32_t state0 = state[0];
int32_t state1 = state[1];
int32_t c0, c1, c2, c3;
int32_t c4, c5, c6, c7;
int32_t state0_lo, state0_hi;
int32_t state1_lo, state1_hi;
int32_t t0, t1, t2, t3, t4, t5;
__asm __volatile (
"lh %[c0], 0(%[coeff_ptr]) \n\t"
"lh %[c1], 2(%[coeff_ptr]) \n\t"
"lh %[c2], 4(%[coeff_ptr]) \n\t"
"lh %[c3], 6(%[coeff_ptr]) \n\t"
"sra %[state0_hi], %[state0], 16 \n\t"
"sra %[state1_hi], %[state1], 16 \n\t"
"andi %[state0_lo], %[state0], 0xFFFF \n\t"
"andi %[state1_lo], %[state1], 0xFFFF \n\t"
"lh %[c4], 8(%[coeff_ptr]) \n\t"
"lh %[c5], 10(%[coeff_ptr]) \n\t"
"lh %[c6], 12(%[coeff_ptr]) \n\t"
"lh %[c7], 14(%[coeff_ptr]) \n\t"
"sra %[state0_lo], %[state0_lo], 1 \n\t"
"sra %[state1_lo], %[state1_lo], 1 \n\t"
: [c0] "=&r" (c0), [c1] "=&r" (c1), [c2] "=&r" (c2), [c3] "=&r" (c3),
[c4] "=&r" (c4), [c5] "=&r" (c5), [c6] "=&r" (c6), [c7] "=&r" (c7),
[state0_hi] "=&r" (state0_hi), [state0_lo] "=&r" (state0_lo),
[state1_hi] "=&r" (state1_hi), [state1_lo] "=&r" (state1_lo)
: [coeff_ptr] "r" (coefficient), [state0] "r" (state0),
[state1] "r" (state1)
: "memory"
);
for (k = 0; k < len; k++) {
in = (int32_t)io[k];
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"mul %[t2], %[c4], %[state0_lo] \n\t"
"mul %[t0], %[c5], %[state0_lo] \n\t"
"mul %[t1], %[c4], %[state0_hi] \n\t"
"mul %[a1], %[c5], %[state0_hi] \n\t"
"mul %[t5], %[c6], %[state1_lo] \n\t"
"mul %[t3], %[c7], %[state1_lo] \n\t"
"mul %[t4], %[c6], %[state1_hi] \n\t"
"mul %[b1], %[c7], %[state1_hi] \n\t"
"shra_r.w %[t2], %[t2], 15 \n\t"
"shra_r.w %[t0], %[t0], 15 \n\t"
"addu %[t1], %[t1], %[t2] \n\t"
"addu %[a1], %[a1], %[t0] \n\t"
"sra %[t1], %[t1], 16 \n\t"
"addu %[a1], %[a1], %[t1] \n\t"
"shra_r.w %[t5], %[t5], 15 \n\t"
"shra_r.w %[t3], %[t3], 15 \n\t"
"addu %[t4], %[t4], %[t5] \n\t"
"addu %[b1], %[b1], %[t3] \n\t"
"sra %[t4], %[t4], 16 \n\t"
"addu %[b1], %[b1], %[t4] \n\t"
"mul %[t2], %[c0], %[state0_lo] \n\t"
"mul %[t0], %[c1], %[state0_lo] \n\t"
"mul %[t1], %[c0], %[state0_hi] \n\t"
"mul %[a2], %[c1], %[state0_hi] \n\t"
"mul %[t5], %[c2], %[state1_lo] \n\t"
"mul %[t3], %[c3], %[state1_lo] \n\t"
"mul %[t4], %[c2], %[state1_hi] \n\t"
"mul %[b2], %[c3], %[state1_hi] \n\t"
"shra_r.w %[t2], %[t2], 15 \n\t"
"shra_r.w %[t0], %[t0], 15 \n\t"
"addu %[t1], %[t1], %[t2] \n\t"
"addu %[a2], %[a2], %[t0] \n\t"
"sra %[t1], %[t1], 16 \n\t"
"addu %[a2], %[a2], %[t1] \n\t"
"shra_r.w %[t5], %[t5], 15 \n\t"
"shra_r.w %[t3], %[t3], 15 \n\t"
"addu %[t4], %[t4], %[t5] \n\t"
"addu %[b2], %[b2], %[t3] \n\t"
"sra %[t4], %[t4], 16 \n\t"
"addu %[b2], %[b2], %[t4] \n\t"
"addu %[a1], %[a1], %[b1] \n\t"
"sra %[a1], %[a1], 7 \n\t"
"addu %[a1], %[a1], %[in] \n\t"
"sll %[t0], %[in], 2 \n\t"
"addu %[a2], %[a2], %[b2] \n\t"
"subu %[t0], %[t0], %[a2] \n\t"
"shll_s.w %[a1], %[a1], 16 \n\t"
"shll_s.w %[t0], %[t0], 2 \n\t"
"sra %[a1], %[a1], 16 \n\t"
"addu %[state1_hi], %[state0_hi], $0 \n\t"
"addu %[state1_lo], %[state0_lo], $0 \n\t"
"sra %[state0_hi], %[t0], 16 \n\t"
"andi %[state0_lo], %[t0], 0xFFFF \n\t"
"sra %[state0_lo], %[state0_lo], 1 \n\t"
".set pop \n\t"
: [a1] "=&r" (a1), [b1] "=&r" (b1), [a2] "=&r" (a2), [b2] "=&r" (b2),
[state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
[state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo),
[t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2),
[t3] "=&r" (t3), [t4] "=&r" (t4), [t5] "=&r" (t5)
: [c0] "r" (c0), [c1] "r" (c1), [c2] "r" (c2), [c3] "r" (c3),
[c4] "r" (c4), [c5] "r" (c5), [c6] "r" (c6), [c7] "r" (c7),
[in] "r" (in)
: "hi", "lo"
);
io[k] = (int16_t)a1;
}
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
#if !defined(MIPS_DSP_R2_LE)
"sll %[state0_hi], %[state0_hi], 16 \n\t"
"sll %[state0_lo], %[state0_lo], 1 \n\t"
"sll %[state1_hi], %[state1_hi], 16 \n\t"
"sll %[state1_lo], %[state1_lo], 1 \n\t"
"or %[state0_hi], %[state0_hi], %[state0_lo] \n\t"
"or %[state1_hi], %[state1_hi], %[state1_lo] \n\t"
#else
"sll %[state0_lo], %[state0_lo], 1 \n\t"
"sll %[state1_lo], %[state1_lo], 1 \n\t"
"precr_sra.ph.w %[state0_hi], %[state0_lo], 0 \n\t"
"precr_sra.ph.w %[state1_hi], %[state1_lo], 0 \n\t"
#endif
"sw %[state0_hi], 0(%[state]) \n\t"
"sw %[state1_hi], 4(%[state]) \n\t"
".set pop \n\t"
: [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
[state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo)
: [state] "r" (state)
: "memory"
);
}

View File

@ -0,0 +1,278 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Contains a function for WebRtcIsacfix_AllpassFilter2FixDec16Neon()
// in iSAC codec, optimized for ARM Neon platform. Bit exact with function
// WebRtcIsacfix_AllpassFilter2FixDec16C() in filterbanks.c. Prototype
// C code is at end of this file.
#include <arm_neon.h>
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
#include "rtc_base/checks.h"
void WebRtcIsacfix_AllpassFilter2FixDec16Neon(
int16_t* data_ch1, // Input and output in channel 1, in Q0
int16_t* data_ch2, // Input and output in channel 2, in Q0
const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15
const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15
const int length, // Length of the data buffers
int32_t* filter_state_ch1, // Filter state for channel 1, in Q16
int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16
RTC_DCHECK_EQ(0, length % 2);
int n = 0;
int16x4_t factorv;
int16x4_t datav;
int32x4_t statev;
// Load factor_ch1 and factor_ch2.
factorv = vld1_dup_s16(factor_ch1);
factorv = vld1_lane_s16(factor_ch1 + 1, factorv, 1);
factorv = vld1_lane_s16(factor_ch2, factorv, 2);
factorv = vld1_lane_s16(factor_ch2 + 1, factorv, 3);
// Load filter_state_ch1[0] and filter_state_ch2[0].
statev = vld1q_dup_s32(filter_state_ch1);
statev = vld1q_lane_s32(filter_state_ch2, statev, 2);
// Loop unrolling preprocessing.
int32x4_t a;
int16x4_t tmp1, tmp2;
// Load data_ch1[0] and data_ch2[0].
datav = vld1_dup_s16(data_ch1);
datav = vld1_lane_s16(data_ch2, datav, 2);
a = vqdmlal_s16(statev, datav, factorv);
tmp1 = vshrn_n_s32(a, 16);
// Update filter_state_ch1[0] and filter_state_ch2[0].
statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
// Load filter_state_ch1[1] and filter_state_ch2[1].
statev = vld1q_lane_s32(filter_state_ch1 + 1, statev, 1);
statev = vld1q_lane_s32(filter_state_ch2 + 1, statev, 3);
// Load data_ch1[1] and data_ch2[1].
tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1);
tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3);
datav = vrev32_s16(tmp1);
// Loop unrolling processing.
for (n = 0; n < length - 2; n += 2) {
a = vqdmlal_s16(statev, datav, factorv);
tmp1 = vshrn_n_s32(a, 16);
// Store data_ch1[n] and data_ch2[n].
vst1_lane_s16(data_ch1 + n, tmp1, 1);
vst1_lane_s16(data_ch2 + n, tmp1, 3);
// Update filter_state_ch1[0], filter_state_ch1[1]
// and filter_state_ch2[0], filter_state_ch2[1].
statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
// Load data_ch1[n + 2] and data_ch2[n + 2].
tmp1 = vld1_lane_s16(data_ch1 + n + 2, tmp1, 1);
tmp1 = vld1_lane_s16(data_ch2 + n + 2, tmp1, 3);
datav = vrev32_s16(tmp1);
a = vqdmlal_s16(statev, datav, factorv);
tmp2 = vshrn_n_s32(a, 16);
// Store data_ch1[n + 1] and data_ch2[n + 1].
vst1_lane_s16(data_ch1 + n + 1, tmp2, 1);
vst1_lane_s16(data_ch2 + n + 1, tmp2, 3);
// Update filter_state_ch1[0], filter_state_ch1[1]
// and filter_state_ch2[0], filter_state_ch2[1].
statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv);
// Load data_ch1[n + 3] and data_ch2[n + 3].
tmp2 = vld1_lane_s16(data_ch1 + n + 3, tmp2, 1);
tmp2 = vld1_lane_s16(data_ch2 + n + 3, tmp2, 3);
datav = vrev32_s16(tmp2);
}
// Loop unrolling post-processing.
a = vqdmlal_s16(statev, datav, factorv);
tmp1 = vshrn_n_s32(a, 16);
// Store data_ch1[n] and data_ch2[n].
vst1_lane_s16(data_ch1 + n, tmp1, 1);
vst1_lane_s16(data_ch2 + n, tmp1, 3);
// Update filter_state_ch1[0], filter_state_ch1[1]
// and filter_state_ch2[0], filter_state_ch2[1].
statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
// Store filter_state_ch1[0] and filter_state_ch2[0].
vst1q_lane_s32(filter_state_ch1, statev, 0);
vst1q_lane_s32(filter_state_ch2, statev, 2);
datav = vrev32_s16(tmp1);
a = vqdmlal_s16(statev, datav, factorv);
tmp2 = vshrn_n_s32(a, 16);
// Store data_ch1[n + 1] and data_ch2[n + 1].
vst1_lane_s16(data_ch1 + n + 1, tmp2, 1);
vst1_lane_s16(data_ch2 + n + 1, tmp2, 3);
// Update filter_state_ch1[1] and filter_state_ch2[1].
statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv);
// Store filter_state_ch1[1] and filter_state_ch2[1].
vst1q_lane_s32(filter_state_ch1 + 1, statev, 1);
vst1q_lane_s32(filter_state_ch2 + 1, statev, 3);
}
// This function is the prototype for above neon optimized function.
//void AllpassFilter2FixDec16BothChannels(
// int16_t *data_ch1, // Input and output in channel 1, in Q0
// int16_t *data_ch2, // Input and output in channel 2, in Q0
// const int16_t *factor_ch1, // Scaling factor for channel 1, in Q15
// const int16_t *factor_ch2, // Scaling factor for channel 2, in Q15
// const int length, // Length of the data buffers
// int32_t *filter_state_ch1, // Filter state for channel 1, in Q16
// int32_t *filter_state_ch2) { // Filter state for channel 2, in Q16
// int n = 0;
// int32_t state0_ch1 = filter_state_ch1[0], state1_ch1 = filter_state_ch1[1];
// int32_t state0_ch2 = filter_state_ch2[0], state1_ch2 = filter_state_ch2[1];
// int16_t sample0_ch1 = 0, sample0_ch2 = 0;
// int16_t sample1_ch1 = 0, sample1_ch2 = 0;
// int32_t a0_ch1 = 0, a0_ch2 = 0;
// int32_t b0_ch1 = 0, b0_ch2 = 0;
//
// int32_t a1_ch1 = 0, a1_ch2 = 0;
// int32_t b1_ch1 = 0, b1_ch2 = 0;
// int32_t b2_ch1 = 0, b2_ch2 = 0;
//
// // Loop unrolling preprocessing.
//
// sample0_ch1 = data_ch1[n];
// sample0_ch2 = data_ch2[n];
//
// a0_ch1 = (factor_ch1[0] * sample0_ch1) << 1;
// a0_ch2 = (factor_ch2[0] * sample0_ch2) << 1;
//
// b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state0_ch1);
// b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state0_ch2); //Q16+Q16=Q16
//
// a0_ch1 = -factor_ch1[0] * (int16_t)(b0_ch1 >> 16);
// a0_ch2 = -factor_ch2[0] * (int16_t)(b0_ch2 >> 16);
//
// state0_ch1 = WebRtcSpl_AddSatW32(a0_ch1 <<1, (uint32_t)sample0_ch1 << 16);
// state0_ch2 = WebRtcSpl_AddSatW32(a0_ch2 <<1, (uint32_t)sample0_ch2 << 16);
//
// sample1_ch1 = data_ch1[n + 1];
// sample0_ch1 = (int16_t) (b0_ch1 >> 16); //Save as Q0
// sample1_ch2 = data_ch2[n + 1];
// sample0_ch2 = (int16_t) (b0_ch2 >> 16); //Save as Q0
//
//
// for (n = 0; n < length - 2; n += 2) {
// a1_ch1 = (factor_ch1[0] * sample1_ch1) << 1;
// a0_ch1 = (factor_ch1[1] * sample0_ch1) << 1;
// a1_ch2 = (factor_ch2[0] * sample1_ch2) << 1;
// a0_ch2 = (factor_ch2[1] * sample0_ch2) << 1;
//
// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state0_ch1);
// b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state1_ch1); //Q16+Q16=Q16
// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state0_ch2); //Q16+Q16=Q16
// b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state1_ch2); //Q16+Q16=Q16
//
// a1_ch1 = -factor_ch1[0] * (int16_t)(b1_ch1 >> 16);
// a0_ch1 = -factor_ch1[1] * (int16_t)(b0_ch1 >> 16);
// a1_ch2 = -factor_ch2[0] * (int16_t)(b1_ch2 >> 16);
// a0_ch2 = -factor_ch2[1] * (int16_t)(b0_ch2 >> 16);
//
// state0_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1 <<16);
// state1_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1 <<16);
// state0_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2 <<16);
// state1_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2 <<16);
//
// sample0_ch1 = data_ch1[n + 2];
// sample1_ch1 = (int16_t) (b1_ch1 >> 16); //Save as Q0
// sample0_ch2 = data_ch2[n + 2];
// sample1_ch2 = (int16_t) (b1_ch2 >> 16); //Save as Q0
//
// a0_ch1 = (factor_ch1[0] * sample0_ch1) << 1;
// a1_ch1 = (factor_ch1[1] * sample1_ch1) << 1;
// a0_ch2 = (factor_ch2[0] * sample0_ch2) << 1;
// a1_ch2 = (factor_ch2[1] * sample1_ch2) << 1;
//
// b2_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state0_ch1);
// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state1_ch1); //Q16+Q16=Q16
// b2_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state0_ch2); //Q16+Q16=Q16
// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state1_ch2); //Q16+Q16=Q16
//
// a0_ch1 = -factor_ch1[0] * (int16_t)(b2_ch1 >> 16);
// a1_ch1 = -factor_ch1[1] * (int16_t)(b1_ch1 >> 16);
// a0_ch2 = -factor_ch2[0] * (int16_t)(b2_ch2 >> 16);
// a1_ch2 = -factor_ch2[1] * (int16_t)(b1_ch2 >> 16);
//
// state0_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1<<16);
// state1_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1<<16);
// state0_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2<<16);
// state1_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2<<16);
//
//
// sample1_ch1 = data_ch1[n + 3];
// sample0_ch1 = (int16_t) (b2_ch1 >> 16); //Save as Q0
// sample1_ch2 = data_ch2[n + 3];
// sample0_ch2 = (int16_t) (b2_ch2 >> 16); //Save as Q0
//
// data_ch1[n] = (int16_t) (b0_ch1 >> 16); //Save as Q0
// data_ch1[n + 1] = (int16_t) (b1_ch1 >> 16); //Save as Q0
// data_ch2[n] = (int16_t) (b0_ch2 >> 16);
// data_ch2[n + 1] = (int16_t) (b1_ch2 >> 16);
// }
//
// // Loop unrolling post-processing.
//
// a1_ch1 = (factor_ch1[0] * sample1_ch1) << 1;
// a0_ch1 = (factor_ch1[1] * sample0_ch1) << 1;
// a1_ch2 = (factor_ch2[0] * sample1_ch2) << 1;
// a0_ch2 = (factor_ch2[1] * sample0_ch2) << 1;
//
// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state0_ch1);
// b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state1_ch1);
// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state0_ch2);
// b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state1_ch2);
//
// a1_ch1 = -factor_ch1[0] * (int16_t)(b1_ch1 >> 16);
// a0_ch1 = -factor_ch1[1] * (int16_t)(b0_ch1 >> 16);
// a1_ch2 = -factor_ch2[0] * (int16_t)(b1_ch2 >> 16);
// a0_ch2 = -factor_ch2[1] * (int16_t)(b0_ch2 >> 16);
//
// state0_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1 << 16);
// state1_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1 << 16);
// state0_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2 << 16);
// state1_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2 << 16);
//
// data_ch1[n] = (int16_t) (b0_ch1 >> 16); //Save as Q0
// data_ch2[n] = (int16_t) (b0_ch2 >> 16);
//
// sample1_ch1 = (int16_t) (b1_ch1 >> 16); //Save as Q0
// sample1_ch2 = (int16_t) (b1_ch2 >> 16); //Save as Q0
//
// a1_ch1 = (factor_ch1[1] * sample1_ch1) << 1;
// a1_ch2 = (factor_ch2[1] * sample1_ch2) << 1;
//
// b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state1_ch1); //Q16+Q16=Q16
// b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state1_ch2); //Q16+Q16=Q16
//
// a1_ch1 = -factor_ch1[1] * (int16_t)(b1_ch1 >> 16);
// a1_ch2 = -factor_ch2[1] * (int16_t)(b1_ch2 >> 16);
//
// state1_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1<<16);
// state1_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2<<16);
//
// data_ch1[n + 1] = (int16_t) (b1_ch1 >> 16); //Save as Q0
// data_ch2[n + 1] = (int16_t) (b1_ch2 >> 16);
//
// filter_state_ch1[0] = state0_ch1;
// filter_state_ch1[1] = state1_ch1;
// filter_state_ch2[0] = state0_ch2;
// filter_state_ch2[1] = state1_ch2;
//}

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "rtc_base/sanitizer.h"
#include "system_wrappers/include/cpu_features_wrapper.h"
#include "test/gtest.h"
class FilterBanksTest : public ::testing::Test {
protected:
// Pass a function pointer to the Tester function.
void RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/5513
CalculateResidualEnergyTester(
AllpassFilter2FixDec16 AllpassFilter2FixDec16Function) {
const int kSamples = QLOOKAHEAD;
const int kState = 2;
int16_t data_ch1[kSamples] = {0};
int16_t data_ch2[kSamples] = {0};
int32_t state_ch1[kState] = {0};
int32_t state_ch2[kState] = {0};
const int32_t out_state_ch1[kState] = {-809122714, 1645972152};
const int32_t out_state_ch2[kState] = {428019288, 1057309936};
const int32_t out_data_ch1[kSamples] = {
0, 0, 347, 10618, 16718, -7089, 32767, 16913,
27042, 8377, -22973, -28372, -27603, -14804, 398, -25332,
-11200, 18044, 25223, -6839, 1116, -23984, 32717, 7364};
const int32_t out_data_ch2[kSamples] = {
0, 0, 3010, 22351, 21106, 16969, -2095, -664,
3513, -30980, 32767, -23839, 13335, 20289, -6831, 339,
-17207, 32767, 4959, 6177, 32767, 16599, -4747, 20504};
int sign = 1;
for (int i = 0; i < kSamples; i++) {
sign *= -1;
data_ch1[i] = sign * WEBRTC_SPL_WORD32_MAX / (i * i + 1);
data_ch2[i] = sign * WEBRTC_SPL_WORD32_MIN / (i * i + 1);
// UBSan: -1 * -2147483648 cannot be represented in type 'int'
};
AllpassFilter2FixDec16Function(
data_ch1, data_ch2, WebRtcIsacfix_kUpperApFactorsQ15,
WebRtcIsacfix_kLowerApFactorsQ15, kSamples, state_ch1, state_ch2);
for (int i = 0; i < kSamples; i++) {
EXPECT_EQ(out_data_ch1[i], data_ch1[i]);
EXPECT_EQ(out_data_ch2[i], data_ch2[i]);
}
for (int i = 0; i < kState; i++) {
EXPECT_EQ(out_state_ch1[i], state_ch1[i]);
EXPECT_EQ(out_state_ch2[i], state_ch2[i]);
}
}
};
TEST_F(FilterBanksTest, AllpassFilter2FixDec16Test) {
CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16C);
#if defined(WEBRTC_HAS_NEON)
CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16Neon);
#endif
}
TEST_F(FilterBanksTest, HighpassFilterFixDec32Test) {
const int kSamples = 20;
int16_t in[kSamples];
int32_t state[2] = {12345, 987654};
#ifdef WEBRTC_ARCH_ARM_V7
int32_t out[kSamples] = {-1040, -1035, -22875, -1397, -27604, 20018, 7917,
-1279, -8552, -14494, -7558, -23537, -27258, -30554,
-32768, -3432, -32768, 25215, -27536, 22436};
#else
int32_t out[kSamples] = {-1040, -1035, -22875, -1397, -27604, 20017, 7915,
-1280, -8554, -14496, -7561, -23541, -27263, -30560,
-32768, -3441, -32768, 25203, -27550, 22419};
#endif
HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
#if defined(MIPS_DSP_R1_LE)
WebRtcIsacfix_HighpassFilterFixDec32 =
WebRtcIsacfix_HighpassFilterFixDec32MIPS;
#else
WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
#endif
for (int i = 0; i < kSamples; i++) {
in[i] = WEBRTC_SPL_WORD32_MAX / (i + 1);
}
WebRtcIsacfix_HighpassFilterFixDec32(in, kSamples,
WebRtcIsacfix_kHPStCoeffOut1Q30, state);
for (int i = 0; i < kSamples; i++) {
EXPECT_EQ(out[i], in[i]);
}
}

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "rtc_base/checks.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
// Autocorrelation function in fixed point.
// NOTE! Different from SPLIB-version in how it scales the signal.
int WebRtcIsacfix_AutocorrC(int32_t* __restrict r,
const int16_t* __restrict x,
int16_t N,
int16_t order,
int16_t* __restrict scale) {
int i = 0;
int j = 0;
int16_t scaling = 0;
int32_t sum = 0;
uint32_t temp = 0;
int64_t prod = 0;
// The ARM assembly code assumptoins.
RTC_DCHECK_EQ(0, N % 4);
RTC_DCHECK_GE(N, 8);
// Calculate r[0].
for (i = 0; i < N; i++) {
prod += x[i] * x[i];
}
// Calculate scaling (the value of shifting).
temp = (uint32_t)(prod >> 31);
if(temp == 0) {
scaling = 0;
} else {
scaling = 32 - WebRtcSpl_NormU32(temp);
}
r[0] = (int32_t)(prod >> scaling);
// Perform the actual correlation calculation.
for (i = 1; i < order + 1; i++) {
prod = 0;
for (j = 0; j < N - i; j++) {
prod += x[j] * x[i + j];
}
sum = (int32_t)(prod >> scaling);
r[i] = sum;
}
*scale = scaling;
return(order + 1);
}
static const int32_t kApUpperQ15[ALLPASSSECTIONS] = { 1137, 12537 };
static const int32_t kApLowerQ15[ALLPASSSECTIONS] = { 5059, 24379 };
static void AllpassFilterForDec32(int16_t *InOut16, //Q0
const int32_t *APSectionFactors, //Q15
int16_t lengthInOut,
int32_t *FilterState) //Q16
{
int n, j;
int32_t a, b;
for (j=0; j<ALLPASSSECTIONS; j++) {
for (n=0;n<lengthInOut;n+=2){
a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15
a <<= 1; // Q15 -> Q16
b = WebRtcSpl_AddSatW32(a, FilterState[j]); //Q16+Q16=Q16
// `a` in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]);
// FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16
FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16);
InOut16[n] = (int16_t)(b >> 16); // Save as Q0.
}
}
}
void WebRtcIsacfix_DecimateAllpass32(const int16_t *in,
int32_t *state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
int16_t N, /* number of input samples */
int16_t *out) /* array of size N/2 */
{
int n;
int16_t data_vec[PITCH_FRAME_LEN];
/* copy input */
memcpy(data_vec + 1, in, sizeof(int16_t) * (N - 1));
data_vec[0] = (int16_t)(state_in[2 * ALLPASSSECTIONS] >> 16); // z^-1 state.
state_in[2 * ALLPASSSECTIONS] = (uint32_t)in[N - 1] << 16;
AllpassFilterForDec32(data_vec+1, kApUpperQ15, N, state_in);
AllpassFilterForDec32(data_vec, kApLowerQ15, N, state_in+ALLPASSSECTIONS);
for (n=0;n<N/2;n++) {
out[n] = WebRtcSpl_AddSatW16(data_vec[2 * n], data_vec[2 * n + 1]);
}
}

View File

@ -0,0 +1,365 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
// MIPS optimized implementation of the Autocorrelation function in fixed point.
// NOTE! Different from SPLIB-version in how it scales the signal.
int WebRtcIsacfix_AutocorrMIPS(int32_t* __restrict r,
const int16_t* __restrict x,
int16_t N,
int16_t order,
int16_t* __restrict scale) {
int i = 0;
int16_t scaling = 0;
int16_t* in = (int16_t*)x;
int loop_size = (int)(N >> 3);
int count = (int)(N & 7);
// Declare temporary variables used as registry values.
int32_t r0, r1, r2, r3;
#if !defined(MIPS_DSP_R2_LE)
// For non-DSPR2 optimizations 4 more registers are used.
int32_t r4, r5, r6, r7;
#endif
// Calculate r[0] and scaling needed.
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"mult $0, $0 \n\t"
// Loop is unrolled 8 times, set accumulator to zero in branch delay slot.
"beqz %[loop_size], 2f \n\t"
" mult $0, $0 \n\t"
"1: \n\t"
// Load 8 samples per loop iteration.
#if defined(MIPS_DSP_R2_LE)
"ulw %[r0], 0(%[in]) \n\t"
"ulw %[r1], 4(%[in]) \n\t"
"ulw %[r2], 8(%[in]) \n\t"
"ulw %[r3], 12(%[in]) \n\t"
#else
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 2(%[in]) \n\t"
"lh %[r2], 4(%[in]) \n\t"
"lh %[r3], 6(%[in]) \n\t"
"lh %[r4], 8(%[in]) \n\t"
"lh %[r5], 10(%[in]) \n\t"
"lh %[r6], 12(%[in]) \n\t"
"lh %[r7], 14(%[in]) \n\t"
#endif
"addiu %[loop_size], %[loop_size], -1 \n\t"
// Multiply and accumulate.
#if defined(MIPS_DSP_R2_LE)
"dpa.w.ph $ac0, %[r0], %[r0] \n\t"
"dpa.w.ph $ac0, %[r1], %[r1] \n\t"
"dpa.w.ph $ac0, %[r2], %[r2] \n\t"
"dpa.w.ph $ac0, %[r3], %[r3] \n\t"
#else
"madd %[r0], %[r0] \n\t"
"madd %[r1], %[r1] \n\t"
"madd %[r2], %[r2] \n\t"
"madd %[r3], %[r3] \n\t"
"madd %[r4], %[r4] \n\t"
"madd %[r5], %[r5] \n\t"
"madd %[r6], %[r6] \n\t"
"madd %[r7], %[r7] \n\t"
#endif
"bnez %[loop_size], 1b \n\t"
" addiu %[in], %[in], 16 \n\t"
"2: \n\t"
"beqz %[count], 4f \n\t"
#if defined(MIPS_DSP_R1_LE)
" extr.w %[r0], $ac0, 31 \n\t"
#else
" mfhi %[r2] \n\t"
#endif
// Process remaining samples (if any).
"3: \n\t"
"lh %[r0], 0(%[in]) \n\t"
"addiu %[count], %[count], -1 \n\t"
"madd %[r0], %[r0] \n\t"
"bnez %[count], 3b \n\t"
" addiu %[in], %[in], 2 \n\t"
#if defined(MIPS_DSP_R1_LE)
"extr.w %[r0], $ac0, 31 \n\t"
#else
"mfhi %[r2] \n\t"
#endif
"4: \n\t"
#if !defined(MIPS_DSP_R1_LE)
"mflo %[r3] \n\t"
"sll %[r0], %[r2], 1 \n\t"
"srl %[r1], %[r3], 31 \n\t"
"addu %[r0], %[r0], %[r1] \n\t"
#endif
// Calculate scaling (the value of shifting).
"clz %[r1], %[r0] \n\t"
"addiu %[r1], %[r1], -32 \n\t"
"subu %[scaling], $0, %[r1] \n\t"
"slti %[r1], %[r0], 0x1 \n\t"
"movn %[scaling], $0, %[r1] \n\t"
#if defined(MIPS_DSP_R1_LE)
"extrv.w %[r0], $ac0, %[scaling] \n\t"
"mfhi %[r2], $ac0 \n\t"
#else
"addiu %[r1], %[scaling], -32 \n\t"
"subu %[r1], $0, %[r1] \n\t"
"sllv %[r1], %[r2], %[r1] \n\t"
"srlv %[r0], %[r3], %[scaling] \n\t"
"addu %[r0], %[r0], %[r1] \n\t"
#endif
"slti %[r1], %[scaling], 32 \n\t"
"movz %[r0], %[r2], %[r1] \n\t"
".set pop \n\t"
: [loop_size] "+r" (loop_size), [in] "+r" (in), [r0] "=&r" (r0),
[r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
#if !defined(MIPS_DSP_R2_LE)
[r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
#endif
[count] "+r" (count), [scaling] "=r" (scaling)
: [N] "r" (N)
: "memory", "hi", "lo"
);
r[0] = r0;
// Correlation calculation is divided in 3 cases depending on the scaling
// value (different accumulator manipulation needed). Three slightly different
// loops are written in order to avoid branches inside the loop.
if (scaling == 0) {
// In this case, the result will be in low part of the accumulator.
for (i = 1; i < order + 1; i++) {
in = (int16_t*)x;
int16_t* in1 = (int16_t*)x + i;
count = N - i;
loop_size = (count) >> 2;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"mult $0, $0 \n\t"
"beqz %[loop_size], 2f \n\t"
" andi %[count], %[count], 0x3 \n\t"
// Loop processing 4 pairs of samples per iteration.
"1: \n\t"
#if defined(MIPS_DSP_R2_LE)
"ulw %[r0], 0(%[in]) \n\t"
"ulw %[r1], 0(%[in1]) \n\t"
"ulw %[r2], 4(%[in]) \n\t"
"ulw %[r3], 4(%[in1]) \n\t"
#else
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 0(%[in1]) \n\t"
"lh %[r2], 2(%[in]) \n\t"
"lh %[r3], 2(%[in1]) \n\t"
"lh %[r4], 4(%[in]) \n\t"
"lh %[r5], 4(%[in1]) \n\t"
"lh %[r6], 6(%[in]) \n\t"
"lh %[r7], 6(%[in1]) \n\t"
#endif
"addiu %[loop_size], %[loop_size], -1 \n\t"
#if defined(MIPS_DSP_R2_LE)
"dpa.w.ph $ac0, %[r0], %[r1] \n\t"
"dpa.w.ph $ac0, %[r2], %[r3] \n\t"
#else
"madd %[r0], %[r1] \n\t"
"madd %[r2], %[r3] \n\t"
"madd %[r4], %[r5] \n\t"
"madd %[r6], %[r7] \n\t"
#endif
"addiu %[in], %[in], 8 \n\t"
"bnez %[loop_size], 1b \n\t"
" addiu %[in1], %[in1], 8 \n\t"
"2: \n\t"
"beqz %[count], 4f \n\t"
" mflo %[r0] \n\t"
// Process remaining samples (if any).
"3: \n\t"
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 0(%[in1]) \n\t"
"addiu %[count], %[count], -1 \n\t"
"addiu %[in], %[in], 2 \n\t"
"madd %[r0], %[r1] \n\t"
"bnez %[count], 3b \n\t"
" addiu %[in1], %[in1], 2 \n\t"
"mflo %[r0] \n\t"
"4: \n\t"
".set pop \n\t"
: [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
#if !defined(MIPS_DSP_R2_LE)
[r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
#endif
[r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
[count] "+r" (count)
:
: "memory", "hi", "lo"
);
r[i] = r0;
}
} else if (scaling == 32) {
// In this case, the result will be high part of the accumulator.
for (i = 1; i < order + 1; i++) {
in = (int16_t*)x;
int16_t* in1 = (int16_t*)x + i;
count = N - i;
loop_size = (count) >> 2;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"mult $0, $0 \n\t"
"beqz %[loop_size], 2f \n\t"
" andi %[count], %[count], 0x3 \n\t"
// Loop processing 4 pairs of samples per iteration.
"1: \n\t"
#if defined(MIPS_DSP_R2_LE)
"ulw %[r0], 0(%[in]) \n\t"
"ulw %[r1], 0(%[in1]) \n\t"
"ulw %[r2], 4(%[in]) \n\t"
"ulw %[r3], 4(%[in1]) \n\t"
#else
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 0(%[in1]) \n\t"
"lh %[r2], 2(%[in]) \n\t"
"lh %[r3], 2(%[in1]) \n\t"
"lh %[r4], 4(%[in]) \n\t"
"lh %[r5], 4(%[in1]) \n\t"
"lh %[r6], 6(%[in]) \n\t"
"lh %[r7], 6(%[in1]) \n\t"
#endif
"addiu %[loop_size], %[loop_size], -1 \n\t"
#if defined(MIPS_DSP_R2_LE)
"dpa.w.ph $ac0, %[r0], %[r1] \n\t"
"dpa.w.ph $ac0, %[r2], %[r3] \n\t"
#else
"madd %[r0], %[r1] \n\t"
"madd %[r2], %[r3] \n\t"
"madd %[r4], %[r5] \n\t"
"madd %[r6], %[r7] \n\t"
#endif
"addiu %[in], %[in], 8 \n\t"
"bnez %[loop_size], 1b \n\t"
" addiu %[in1], %[in1], 8 \n\t"
"2: \n\t"
"beqz %[count], 4f \n\t"
" mfhi %[r0] \n\t"
// Process remaining samples (if any).
"3: \n\t"
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 0(%[in1]) \n\t"
"addiu %[count], %[count], -1 \n\t"
"addiu %[in], %[in], 2 \n\t"
"madd %[r0], %[r1] \n\t"
"bnez %[count], 3b \n\t"
" addiu %[in1], %[in1], 2 \n\t"
"mfhi %[r0] \n\t"
"4: \n\t"
".set pop \n\t"
: [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
#if !defined(MIPS_DSP_R2_LE)
[r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
#endif
[r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
[count] "+r" (count)
:
: "memory", "hi", "lo"
);
r[i] = r0;
}
} else {
// In this case, the result is obtained by combining low and high parts
// of the accumulator.
#if !defined(MIPS_DSP_R1_LE)
int32_t tmp_shift = 32 - scaling;
#endif
for (i = 1; i < order + 1; i++) {
in = (int16_t*)x;
int16_t* in1 = (int16_t*)x + i;
count = N - i;
loop_size = (count) >> 2;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"mult $0, $0 \n\t"
"beqz %[loop_size], 2f \n\t"
" andi %[count], %[count], 0x3 \n\t"
"1: \n\t"
#if defined(MIPS_DSP_R2_LE)
"ulw %[r0], 0(%[in]) \n\t"
"ulw %[r1], 0(%[in1]) \n\t"
"ulw %[r2], 4(%[in]) \n\t"
"ulw %[r3], 4(%[in1]) \n\t"
#else
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 0(%[in1]) \n\t"
"lh %[r2], 2(%[in]) \n\t"
"lh %[r3], 2(%[in1]) \n\t"
"lh %[r4], 4(%[in]) \n\t"
"lh %[r5], 4(%[in1]) \n\t"
"lh %[r6], 6(%[in]) \n\t"
"lh %[r7], 6(%[in1]) \n\t"
#endif
"addiu %[loop_size], %[loop_size], -1 \n\t"
#if defined(MIPS_DSP_R2_LE)
"dpa.w.ph $ac0, %[r0], %[r1] \n\t"
"dpa.w.ph $ac0, %[r2], %[r3] \n\t"
#else
"madd %[r0], %[r1] \n\t"
"madd %[r2], %[r3] \n\t"
"madd %[r4], %[r5] \n\t"
"madd %[r6], %[r7] \n\t"
#endif
"addiu %[in], %[in], 8 \n\t"
"bnez %[loop_size], 1b \n\t"
" addiu %[in1], %[in1], 8 \n\t"
"2: \n\t"
"beqz %[count], 4f \n\t"
#if defined(MIPS_DSP_R1_LE)
" extrv.w %[r0], $ac0, %[scaling] \n\t"
#else
" mfhi %[r0] \n\t"
#endif
"3: \n\t"
"lh %[r0], 0(%[in]) \n\t"
"lh %[r1], 0(%[in1]) \n\t"
"addiu %[count], %[count], -1 \n\t"
"addiu %[in], %[in], 2 \n\t"
"madd %[r0], %[r1] \n\t"
"bnez %[count], 3b \n\t"
" addiu %[in1], %[in1], 2 \n\t"
#if defined(MIPS_DSP_R1_LE)
"extrv.w %[r0], $ac0, %[scaling] \n\t"
#else
"mfhi %[r0] \n\t"
#endif
"4: \n\t"
#if !defined(MIPS_DSP_R1_LE)
"mflo %[r1] \n\t"
"sllv %[r0], %[r0], %[tmp_shift] \n\t"
"srlv %[r1], %[r1], %[scaling] \n\t"
"addu %[r0], %[r0], %[r1] \n\t"
#endif
".set pop \n\t"
: [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
#if !defined(MIPS_DSP_R2_LE)
[r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
#endif
[r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
[count] "+r" (count)
: [scaling] "r" (scaling)
#if !defined(MIPS_DSP_R1_LE)
, [tmp_shift] "r" (tmp_shift)
#endif
: "memory", "hi", "lo"
);
r[i] = r0;
}
}
*scale = scaling;
return (order + 1);
}

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "rtc_base/checks.h"
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
// Autocorrelation function in fixed point.
// NOTE! Different from SPLIB-version in how it scales the signal.
int WebRtcIsacfix_AutocorrNeon(int32_t* __restrict r,
const int16_t* x,
int16_t n,
int16_t order,
int16_t* __restrict scale) {
int i = 0;
int16_t scaling = 0;
uint32_t temp = 0;
int64_t prod = 0;
int64_t prod_tail = 0;
RTC_DCHECK_EQ(0, n % 4);
RTC_DCHECK_GE(n, 8);
// Calculate r[0].
int16x4_t x0_v;
int32x4_t tmpa0_v;
int64x2_t tmpb_v;
tmpb_v = vdupq_n_s64(0);
const int16_t* x_start = x;
const int16_t* x_end0 = x_start + n;
while (x_start < x_end0) {
x0_v = vld1_s16(x_start);
tmpa0_v = vmull_s16(x0_v, x0_v);
tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
x_start += 4;
}
#ifdef WEBRTC_ARCH_ARM64
prod = vaddvq_s64(tmpb_v);
#else
prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)),
0);
#endif
// Calculate scaling (the value of shifting).
temp = (uint32_t)(prod >> 31);
scaling = temp ? 32 - WebRtcSpl_NormU32(temp) : 0;
r[0] = (int32_t)(prod >> scaling);
int16x8_t x1_v;
int16x8_t y_v;
int32x4_t tmpa1_v;
// Perform the actual correlation calculation.
for (i = 1; i < order + 1; i++) {
tmpb_v = vdupq_n_s64(0);
int rest = (n - i) % 8;
x_start = x;
x_end0 = x_start + n - i - rest;
const int16_t* y_start = x_start + i;
while (x_start < x_end0) {
x1_v = vld1q_s16(x_start);
y_v = vld1q_s16(y_start);
tmpa0_v = vmull_s16(vget_low_s16(x1_v), vget_low_s16(y_v));
#ifdef WEBRTC_ARCH_ARM64
tmpa1_v = vmull_high_s16(x1_v, y_v);
#else
tmpa1_v = vmull_s16(vget_high_s16(x1_v), vget_high_s16(y_v));
#endif
tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
tmpb_v = vpadalq_s32(tmpb_v, tmpa1_v);
x_start += 8;
y_start += 8;
}
// The remaining calculation.
const int16_t* x_end1 = x + n - i;
if (rest >= 4) {
int16x4_t x2_v = vld1_s16(x_start);
int16x4_t y2_v = vld1_s16(y_start);
tmpa0_v = vmull_s16(x2_v, y2_v);
tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
x_start += 4;
y_start += 4;
}
#ifdef WEBRTC_ARCH_ARM64
prod = vaddvq_s64(tmpb_v);
#else
prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)),
0);
#endif
prod_tail = 0;
while (x_start < x_end1) {
prod_tail += *x_start * *y_start;
++x_start;
++y_start;
}
r[i] = (int32_t)((prod + prod_tail) >> scaling);
}
*scale = scaling;
return order + 1;
}

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "system_wrappers/include/cpu_features_wrapper.h"
#include "test/gtest.h"
class FiltersTest : public ::testing::Test {
protected:
// Pass a function pointer to the Tester function.
void FiltersTester(AutocorrFix WebRtcIsacfix_AutocorrFixFunction) {
const int kOrder = 12;
const int kBuffer = 40;
int16_t scale = 0;
int32_t r_buffer[kOrder + 2] = {0};
// Test an overflow case.
const int16_t x_buffer_0[kBuffer] = {
0, 0, 3010, 22351, 21106, 16969, -2095, -664,
3513, -30980, 32767, -23839, 13335, 20289, -6831, 339,
-17207, 32767, 4959, 6177, 32767, 16599, -4747, 20504,
3513, -30980, 32767, -23839, 13335, 20289, 0, -16969,
-2095, -664, 3513, 31981, 32767, -13839, 23336, 30281};
const int32_t r_expected_0[kOrder + 2] = {
1872498461, -224288754, 203789985, 483400487, -208272635,
2436500, 137785322, 266600814, -208486262, 329510080,
137949184, -161738972, -26894267, 237630192};
WebRtcIsacfix_AutocorrFixFunction(r_buffer, x_buffer_0, kBuffer, kOrder + 1,
&scale);
for (int i = 0; i < kOrder + 2; i++) {
EXPECT_EQ(r_expected_0[i], r_buffer[i]);
}
EXPECT_EQ(3, scale);
// Test a no-overflow case.
const int16_t x_buffer_1[kBuffer] = {
0, 0, 300, 21, 206, 169, -295, -664, 3513, -300,
327, -29, 15, 289, -6831, 339, -107, 37, 59, 6177,
327, 169, -4747, 204, 313, -980, 767, -9, 135, 289,
0, -6969, -2095, -664, 0, 1, 7, -39, 236, 281};
const int32_t r_expected_1[kOrder + 2] = {
176253864, 8126617, 1983287, -26196788, -3487363,
-42839676, -24644043, 3469813, 30559879, 31905045,
5101567, 29328896, -55787438, -13163978};
WebRtcIsacfix_AutocorrFixFunction(r_buffer, x_buffer_1, kBuffer, kOrder + 1,
&scale);
for (int i = 0; i < kOrder + 2; i++) {
EXPECT_EQ(r_expected_1[i], r_buffer[i]);
}
EXPECT_EQ(0, scale);
}
};
TEST_F(FiltersTest, AutocorrFixTest) {
FiltersTester(WebRtcIsacfix_AutocorrC);
#if defined(WEBRTC_HAS_NEON)
FiltersTester(WebRtcIsacfix_AutocorrNeon);
#endif
}

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* initialize.c
*
* Internal initfunctions
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
void WebRtcIsacfix_InitMaskingEnc(MaskFiltstr_enc *maskdata) {
int k;
for (k = 0; k < WINLEN; k++) {
maskdata->DataBufferLoQ0[k] = (int16_t) 0;
maskdata->DataBufferHiQ0[k] = (int16_t) 0;
}
for (k = 0; k < ORDERLO+1; k++) {
maskdata->CorrBufLoQQ[k] = (int32_t) 0;
maskdata->CorrBufLoQdom[k] = 0;
maskdata->PreStateLoGQ15[k] = 0;
}
for (k = 0; k < ORDERHI+1; k++) {
maskdata->CorrBufHiQQ[k] = (int32_t) 0;
maskdata->CorrBufHiQdom[k] = 0;
maskdata->PreStateHiGQ15[k] = 0;
}
maskdata->OldEnergy = 10;
return;
}
void WebRtcIsacfix_InitMaskingDec(MaskFiltstr_dec *maskdata) {
int k;
for (k = 0; k < ORDERLO+1; k++)
{
maskdata->PostStateLoGQ0[k] = 0;
}
for (k = 0; k < ORDERHI+1; k++)
{
maskdata->PostStateHiGQ0[k] = 0;
}
maskdata->OldEnergy = 10;
return;
}
void WebRtcIsacfix_InitPreFilterbank(PreFiltBankstr *prefiltdata)
{
int k;
for (k = 0; k < QLOOKAHEAD; k++) {
prefiltdata->INLABUF1_fix[k] = 0;
prefiltdata->INLABUF2_fix[k] = 0;
}
for (k = 0; k < 2 * (QORDER - 1); k++) {
prefiltdata->INSTAT1_fix[k] = 0;
prefiltdata->INSTAT2_fix[k] = 0;
}
/* High pass filter states */
prefiltdata->HPstates_fix[0] = 0;
prefiltdata->HPstates_fix[1] = 0;
return;
}
void WebRtcIsacfix_InitPostFilterbank(PostFiltBankstr *postfiltdata)
{
int k;
for (k = 0; k < 2 * POSTQORDER; k++) {
postfiltdata->STATE_0_LOWER_fix[k] = 0;
postfiltdata->STATE_0_UPPER_fix[k] = 0;
}
/* High pass filter states */
postfiltdata->HPstates1_fix[0] = 0;
postfiltdata->HPstates1_fix[1] = 0;
postfiltdata->HPstates2_fix[0] = 0;
postfiltdata->HPstates2_fix[1] = 0;
return;
}
void WebRtcIsacfix_InitPitchFilter(PitchFiltstr *pitchfiltdata)
{
int k;
for (k = 0; k < PITCH_BUFFSIZE; k++)
pitchfiltdata->ubufQQ[k] = 0;
for (k = 0; k < (PITCH_DAMPORDER); k++)
pitchfiltdata->ystateQQ[k] = 0;
pitchfiltdata->oldlagQ7 = 6400; /* 50.0 in Q7 */
pitchfiltdata->oldgainQ12 = 0;
}
void WebRtcIsacfix_InitPitchAnalysis(PitchAnalysisStruct *State)
{
int k;
for (k = 0; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k++) {
State->dec_buffer16[k] = 0;
}
for (k = 0; k < 2 * ALLPASSSECTIONS + 1; k++) {
State->decimator_state32[k] = 0;
}
for (k = 0; k < QLOOKAHEAD; k++)
State->inbuf[k] = 0;
WebRtcIsacfix_InitPitchFilter(&(State->PFstr_wght));
WebRtcIsacfix_InitPitchFilter(&(State->PFstr));
}
void WebRtcIsacfix_InitPlc( PLCstr *State )
{
State->decayCoeffPriodic = WEBRTC_SPL_WORD16_MAX;
State->decayCoeffNoise = WEBRTC_SPL_WORD16_MAX;
State->used = PLC_WAS_USED;
WebRtcSpl_ZerosArrayW16(State->overlapLP, RECOVERY_OVERLAP);
WebRtcSpl_ZerosArrayW16(State->lofilt_coefQ15, ORDERLO);
WebRtcSpl_ZerosArrayW16(State->hifilt_coefQ15, ORDERHI );
State->AvgPitchGain_Q12 = 0;
State->lastPitchGain_Q12 = 0;
State->lastPitchLag_Q7 = 0;
State->gain_lo_hiQ17[0]=State->gain_lo_hiQ17[1] = 0;
WebRtcSpl_ZerosArrayW16(State->prevPitchInvIn, FRAMESAMPLES/2);
WebRtcSpl_ZerosArrayW16(State->prevPitchInvOut, PITCH_MAX_LAG + 10 );
WebRtcSpl_ZerosArrayW32(State->prevHP, PITCH_MAX_LAG + 10 );
State->pitchCycles = 0;
State->A = 0;
State->B = 0;
State->pitchIndex = 0;
State->stretchLag = 240;
State->seed = 4447;
}

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
#include "rtc_base/checks.h"
namespace webrtc {
class IsacFix {
public:
using instance_type = ISACFIX_MainStruct;
static const bool has_swb = false;
static inline int16_t Control(instance_type* inst,
int32_t rate,
int framesize) {
return WebRtcIsacfix_Control(inst, rate, framesize);
}
static inline int16_t ControlBwe(instance_type* inst,
int32_t rate_bps,
int frame_size_ms,
int16_t enforce_frame_size) {
return WebRtcIsacfix_ControlBwe(inst, rate_bps, frame_size_ms,
enforce_frame_size);
}
static inline int16_t Create(instance_type** inst) {
return WebRtcIsacfix_Create(inst);
}
static inline int DecodeInternal(instance_type* inst,
const uint8_t* encoded,
size_t len,
int16_t* decoded,
int16_t* speech_type) {
return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
}
static inline size_t DecodePlc(instance_type* inst,
int16_t* decoded,
size_t num_lost_frames) {
return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
}
static inline void DecoderInit(instance_type* inst) {
WebRtcIsacfix_DecoderInit(inst);
}
static inline int Encode(instance_type* inst,
const int16_t* speech_in,
uint8_t* encoded) {
return WebRtcIsacfix_Encode(inst, speech_in, encoded);
}
static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
return WebRtcIsacfix_EncoderInit(inst, coding_mode);
}
static inline uint16_t EncSampRate(instance_type* inst) {
return kFixSampleRate;
}
static inline int16_t Free(instance_type* inst) {
return WebRtcIsacfix_Free(inst);
}
static inline int16_t GetErrorCode(instance_type* inst) {
return WebRtcIsacfix_GetErrorCode(inst);
}
static inline int16_t GetNewFrameLen(instance_type* inst) {
return WebRtcIsacfix_GetNewFrameLen(inst);
}
static inline int16_t SetDecSampRate(instance_type* inst,
uint16_t sample_rate_hz) {
RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
return 0;
}
static inline int16_t SetEncSampRate(instance_type* inst,
uint16_t sample_rate_hz) {
RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
return 0;
}
static inline void SetEncSampRateInDecoder(instance_type* inst,
uint16_t sample_rate_hz) {
RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
}
static inline void SetInitialBweBottleneck(instance_type* inst,
int bottleneck_bits_per_second) {
WebRtcIsacfix_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
}
static inline int16_t SetMaxPayloadSize(instance_type* inst,
int16_t max_payload_size_bytes) {
return WebRtcIsacfix_SetMaxPayloadSize(inst, max_payload_size_bytes);
}
static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
return WebRtcIsacfix_SetMaxRate(inst, max_bit_rate);
}
private:
enum { kFixSampleRate = 16000 };
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,321 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lattice.c
*
* Contains the normalized lattice filter routines (MA and AR) for iSAC codec
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "rtc_base/sanitizer.h"
#define LATTICE_MUL_32_32_RSFT16(a32a, a32b, b32) \
((int32_t)(WEBRTC_SPL_MUL(a32a, b32) + (WEBRTC_SPL_MUL_16_32_RSFT16(a32b, b32))))
/* This macro is FORBIDDEN to use elsewhere than in a function in this file and
its corresponding neon version. It might give unpredictable results, since a
general int32_t*int32_t multiplication results in a 64 bit value.
The result is then shifted just 16 steps to the right, giving need for 48
bits, i.e. in the generel case, it will NOT fit in a int32_t. In the
cases used in here, the int32_t will be enough, since (for a good
reason) the involved multiplicands aren't big enough to overflow a
int32_t after shifting right 16 bits. I have compared the result of a
multiplication between t32 and tmp32, done in two ways:
1) Using (int32_t) (((float)(tmp32))*((float)(tmp32b))/65536.0);
2) Using LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
By running 25 files, I haven't found any bigger diff than 64 - this was in the
case when method 1) gave 650235648 and 2) gave 650235712.
*/
/* Function prototype: filtering ar_g_Q0[] and ar_f_Q0[] through an AR filter
with coefficients cth_Q15[] and sth_Q15[].
Implemented for both generic and ARMv7 platforms.
*/
void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0,
int16_t* ar_f_Q0,
int16_t* cth_Q15,
int16_t* sth_Q15,
size_t order_coef);
/* Inner loop used for function WebRtcIsacfix_NormLatticeFilterMa(). It does:
for 0 <= n < HALF_SUBFRAMELEN - 1:
*ptr2 = input2 * (*ptr2) + input0 * (*ptr0));
*ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
Note, function WebRtcIsacfix_FilterMaLoopNeon and WebRtcIsacfix_FilterMaLoopC
are not bit-exact. The accuracy by the ARM Neon function is same or better.
*/
void WebRtcIsacfix_FilterMaLoopC(int16_t input0, // Filter coefficient
int16_t input1, // Filter coefficient
int32_t input2, // Inverse coeff. (1/input1)
int32_t* ptr0, // Sample buffer
int32_t* ptr1, // Sample buffer
int32_t* ptr2) { // Sample buffer
int n = 0;
// Separate the 32-bit variable input2 into two 16-bit integers (high 16 and
// low 16 bits), for using LATTICE_MUL_32_32_RSFT16 in the loop.
int16_t t16a = (int16_t)(input2 >> 16);
int16_t t16b = (int16_t)input2;
if (t16b < 0) t16a++;
// The loop filtering the samples *ptr0, *ptr1, *ptr2 with filter coefficients
// input0, input1, and input2.
for(n = 0; n < HALF_SUBFRAMELEN - 1; n++, ptr0++, ptr1++, ptr2++) {
int32_t tmp32a = 0;
int32_t tmp32b = 0;
// Calculate *ptr2 = input2 * (*ptr2 + input0 * (*ptr0));
tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr0); // Q15 * Q15 >> 15 = Q15
tmp32b = *ptr2 + tmp32a; // Q15 + Q15 = Q15
*ptr2 = LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
// Calculate *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input1, *ptr0); // Q15*Q15>>15 = Q15
tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr2); // Q15*Q15>>15 = Q15
*ptr1 = tmp32a + tmp32b; // Q15 + Q15 = Q15
}
}
/* filter the signal using normalized lattice filter */
/* MA filter */
void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
int32_t *stateGQ15,
int16_t *lat_inQ0,
int16_t *filt_coefQ15,
int32_t *gain_lo_hiQ17,
int16_t lo_hi,
int16_t *lat_outQ9)
{
int16_t sthQ15[MAX_AR_MODEL_ORDER];
int16_t cthQ15[MAX_AR_MODEL_ORDER];
int u, n;
size_t i, k;
int16_t temp2,temp3;
size_t ord_1 = orderCoef+1;
int32_t inv_cthQ16[MAX_AR_MODEL_ORDER];
int32_t gain32, fQtmp;
int16_t gain16;
int16_t gain_sh;
int32_t tmp32, tmp32b;
int32_t fQ15vec[HALF_SUBFRAMELEN];
int32_t gQ15[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
int16_t sh;
int16_t t16a;
int16_t t16b;
for (u=0;u<SUBFRAMES;u++)
{
int32_t temp1 = u * HALF_SUBFRAMELEN;
/* set the Direct Form coefficients */
temp2 = (int16_t)(u * orderCoef);
temp3 = (int16_t)(2 * u + lo_hi);
/* compute lattice filter coefficients */
memcpy(sthQ15, &filt_coefQ15[temp2], orderCoef * sizeof(int16_t));
WebRtcSpl_SqrtOfOneMinusXSquared(sthQ15, orderCoef, cthQ15);
/* compute the gain */
gain32 = gain_lo_hiQ17[temp3];
gain_sh = WebRtcSpl_NormW32(gain32);
gain32 <<= gain_sh; // Q(17+gain_sh)
for (k=0;k<orderCoef;k++)
{
gain32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[k], gain32); //Q15*Q(17+gain_sh)>>15 = Q(17+gain_sh)
inv_cthQ16[k] = WebRtcSpl_DivW32W16((int32_t)2147483647, cthQ15[k]); // 1/cth[k] in Q31/Q15 = Q16
}
gain16 = (int16_t)(gain32 >> 16); // Q(1+gain_sh).
/* normalized lattice filter */
/*****************************/
/* initial conditions */
for (i=0;i<HALF_SUBFRAMELEN;i++)
{
fQ15vec[i] = lat_inQ0[i + temp1] << 15; // Q15
gQ15[0][i] = lat_inQ0[i + temp1] << 15; // Q15
}
fQtmp = fQ15vec[0];
/* get the state of f&g for the first input, for all orders */
for (i=1;i<ord_1;i++)
{
// Calculate f[i][0] = inv_cth[i-1]*(f[i-1][0] + sth[i-1]*stateG[i-1]);
tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(sthQ15[i-1], stateGQ15[i-1]);//Q15*Q15>>15 = Q15
tmp32b= fQtmp + tmp32; //Q15+Q15=Q15
tmp32 = inv_cthQ16[i-1]; //Q16
t16a = (int16_t)(tmp32 >> 16);
t16b = (int16_t)(tmp32 - (t16a << 16));
if (t16b<0) t16a++;
tmp32 = LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
fQtmp = tmp32; // Q15
// Calculate g[i][0] = cth[i-1]*stateG[i-1] + sth[i-1]* f[i][0];
tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[i-1], stateGQ15[i-1]); //Q15*Q15>>15 = Q15
tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(sthQ15[i-1], fQtmp); //Q15*Q15>>15 = Q15
tmp32 = tmp32 + tmp32b;//Q15+Q15 = Q15
gQ15[i][0] = tmp32; // Q15
}
/* filtering */
/* save the states */
for(k=0;k<orderCoef;k++)
{
// for 0 <= n < HALF_SUBFRAMELEN - 1:
// f[k+1][n+1] = inv_cth[k]*(f[k][n+1] + sth[k]*g[k][n]);
// g[k+1][n+1] = cth[k]*g[k][n] + sth[k]* f[k+1][n+1];
WebRtcIsacfix_FilterMaLoopFix(sthQ15[k], cthQ15[k], inv_cthQ16[k],
&gQ15[k][0], &gQ15[k+1][1], &fQ15vec[1]);
}
fQ15vec[0] = fQtmp;
for(n=0;n<HALF_SUBFRAMELEN;n++)
{
//gain32 >>= gain_sh; // Q(17+gain_sh) -> Q17
tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(gain16, fQ15vec[n]); //Q(1+gain_sh)*Q15>>16 = Q(gain_sh)
sh = 9-gain_sh; //number of needed shifts to reach Q9
t16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32, sh);
lat_outQ9[n + temp1] = t16a;
}
/* save the states */
for (i=0;i<ord_1;i++)
{
stateGQ15[i] = gQ15[i][HALF_SUBFRAMELEN-1];
}
//process next frame
}
return;
}
// Left shift of an int32_t that's allowed to overflow. (It's still undefined
// behavior, so not a good idea; this just makes UBSan ignore the violation, so
// that our old code can continue to do what it's always been doing.)
static inline int32_t RTC_NO_SANITIZE("shift")
OverflowingLShiftS32(int32_t x, int shift) {
return x << shift;
}
/* ----------------AR filter-------------------------*/
/* filter the signal using normalized lattice filter */
void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
int16_t *stateGQ0,
int32_t *lat_inQ25,
int16_t *filt_coefQ15,
int32_t *gain_lo_hiQ17,
int16_t lo_hi,
int16_t *lat_outQ0)
{
size_t ii, k, i;
int n, u;
int16_t sthQ15[MAX_AR_MODEL_ORDER];
int16_t cthQ15[MAX_AR_MODEL_ORDER];
int32_t tmp32;
int16_t tmpAR;
int16_t ARfQ0vec[HALF_SUBFRAMELEN];
int16_t ARgQ0vec[MAX_AR_MODEL_ORDER+1];
int32_t inv_gain32;
int16_t inv_gain16;
int16_t den16;
int16_t sh;
int16_t temp2,temp3;
size_t ord_1 = orderCoef+1;
for (u=0;u<SUBFRAMES;u++)
{
int32_t temp1 = u * HALF_SUBFRAMELEN;
//set the denominator and numerator of the Direct Form
temp2 = (int16_t)(u * orderCoef);
temp3 = (int16_t)(2 * u + lo_hi);
for (ii=0; ii<orderCoef; ii++) {
sthQ15[ii] = filt_coefQ15[temp2+ii];
}
WebRtcSpl_SqrtOfOneMinusXSquared(sthQ15, orderCoef, cthQ15);
// Originally, this line was assumed to never overflow, since "[s]imulation
// of the 25 files shows that maximum value in the vector gain_lo_hiQ17[]
// is 441344, which means that it is log2((2^31)/441344) = 12.2 shifting
// bits from saturation. Therefore, it should be safe to use Q27 instead of
// Q17." However, a fuzzer test succeeded in provoking an overflow here,
// which we ignore on the theory that only "abnormal" inputs cause
// overflow.
tmp32 = OverflowingLShiftS32(gain_lo_hiQ17[temp3], 10); // Q27
for (k=0;k<orderCoef;k++) {
tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[k], tmp32); // Q15*Q27>>15 = Q27
}
sh = WebRtcSpl_NormW32(tmp32); // tmp32 is the gain
den16 = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32, sh-16); //Q(27+sh-16) = Q(sh+11) (all 16 bits are value bits)
inv_gain32 = WebRtcSpl_DivW32W16((int32_t)2147483647, den16); // 1/gain in Q31/Q(sh+11) = Q(20-sh)
//initial conditions
inv_gain16 = (int16_t)(inv_gain32 >> 2); // 1/gain in Q(20-sh-2) = Q(18-sh)
for (i=0;i<HALF_SUBFRAMELEN;i++)
{
tmp32 = OverflowingLShiftS32(lat_inQ25[i + temp1], 1); // Q25->Q26
tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(inv_gain16, tmp32); //lat_in[]*inv_gain in (Q(18-sh)*Q26)>>16 = Q(28-sh)
tmp32 = WEBRTC_SPL_SHIFT_W32(tmp32, -(28-sh)); // lat_in[]*inv_gain in Q0
ARfQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
}
// Get the state of f & g for the first input, for all orders.
for (i = orderCoef; i > 0; i--)
{
tmp32 = (cthQ15[i - 1] * ARfQ0vec[0] - sthQ15[i - 1] * stateGQ0[i - 1] +
16384) >> 15;
tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
tmp32 = (sthQ15[i - 1] * ARfQ0vec[0] + cthQ15[i - 1] * stateGQ0[i - 1] +
16384) >> 15;
ARgQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
ARfQ0vec[0] = tmpAR;
}
ARgQ0vec[0] = ARfQ0vec[0];
// Filter ARgQ0vec[] and ARfQ0vec[] through coefficients cthQ15[] and sthQ15[].
WebRtcIsacfix_FilterArLoop(ARgQ0vec, ARfQ0vec, cthQ15, sthQ15, orderCoef);
for(n=0;n<HALF_SUBFRAMELEN;n++)
{
lat_outQ0[n + temp1] = ARfQ0vec[n];
}
/* cannot use memcpy in the following */
for (i=0;i<ord_1;i++)
{
stateGQ0[i] = ARgQ0vec[i];
}
}
return;
}

View File

@ -0,0 +1,77 @@
@
@ Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
@
@ Use of this source code is governed by a BSD-style license
@ that can be found in the LICENSE file in the root of the source
@ tree. An additional intellectual property rights grant can be found
@ in the file PATENTS. All contributing project authors may
@ be found in the AUTHORS file in the root of the source tree.
@
@ Contains a function for the core loop in the normalized lattice AR
@ filter routine for iSAC codec, optimized for ARMv7 platforms.
@
@ Output is bit-exact with the reference C code in lattic_c.c
@
@ Register usage:
@
@ r0: &ar_g_Q0
@ r1: &ar_f_Q0
@ r2: &cth_Q15
@ r3: &sth_Q15
@ r4: out loop counter
@ r5: tmpAR
@ r9: inner loop counter
@ r12: constant #16384
@ r6, r7, r8, r10, r11: scratch
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "rtc_base/system/asm_defines.h"
GLOBAL_FUNCTION WebRtcIsacfix_FilterArLoop
.align 2
DEFINE_FUNCTION WebRtcIsacfix_FilterArLoop
push {r4-r11}
add r1, #2 @ &ar_f_Q0[1]
mov r12, #16384
mov r4, #HALF_SUBFRAMELEN
sub r4, #1 @ Outer loop counter = HALF_SUBFRAMELEN - 1
HALF_SUBFRAME_LOOP: @ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++)
ldr r9, [sp, #32] @ Restore the inner loop counter to order_coef
ldrh r5, [r1] @ tmpAR = ar_f_Q0[n+1]
add r0, r9, asl #1 @ Restore r0 to &ar_g_Q0[order_coef]
add r2, r9, asl #1 @ Restore r2 to &cth_Q15[order_coef]
add r3, r9, asl #1 @ Restore r3 to &sth_Q15[order_coef]
ORDER_COEF_LOOP: @ for (k = order_coef; k > 0; k--)
ldrh r7, [r3, #-2]! @ sth_Q15[k - 1]
ldrh r6, [r2, #-2]! @ cth_Q15[k - 1]
ldrh r8, [r0, #-2] @ ar_g_Q0[k - 1]
smlabb r11, r7, r5, r12 @ sth_Q15[k - 1] * tmpAR + 16384
smlabb r10, r6, r5, r12 @ cth_Q15[k - 1] * tmpAR + 16384
smulbb r7, r7, r8 @ sth_Q15[k - 1] * ar_g_Q0[k - 1]
smlabb r11, r6, r8, r11 @ cth_Q15[k - 1] * ar_g_Q0[k - 1] +
@ (sth_Q15[k - 1] * tmpAR + 16384)
sub r10, r10, r7 @ cth_Q15[k - 1] * tmpAR + 16384 -
@ (sth_Q15[k - 1] * ar_g_Q0[k - 1])
ssat r11, #16, r11, asr #15
ssat r5, #16, r10, asr #15
strh r11, [r0], #-2 @ Output: ar_g_Q0[k]
subs r9, #1
bgt ORDER_COEF_LOOP
strh r5, [r0] @ Output: ar_g_Q0[0] = tmpAR;
strh r5, [r1], #2 @ Output: ar_f_Q0[n+1] = tmpAR;
subs r4, #1
bne HALF_SUBFRAME_LOOP
pop {r4-r11}
bx lr

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* Contains the core loop function for the lattice filter AR routine
* for iSAC codec.
*
*/
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/* Filter ar_g_Q0[] and ar_f_Q0[] through an AR filter with coefficients
* cth_Q15[] and sth_Q15[].
*/
void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
int16_t* ar_f_Q0, // Input samples
int16_t* cth_Q15, // Filter coefficients
int16_t* sth_Q15, // Filter coefficients
size_t order_coef) { // order of the filter
int n = 0;
for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
size_t k = 0;
int16_t tmpAR = 0;
int32_t tmp32 = 0;
int32_t tmp32_2 = 0;
tmpAR = ar_f_Q0[n + 1];
for (k = order_coef; k > 0; k--) {
tmp32 = (cth_Q15[k - 1] * tmpAR - sth_Q15[k - 1] * ar_g_Q0[k - 1] +
16384) >> 15;
tmp32_2 = (sth_Q15[k - 1] * tmpAR + cth_Q15[k - 1] * ar_g_Q0[k - 1] +
16384) >> 15;
tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32);
ar_g_Q0[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32_2);
}
ar_f_Q0[n + 1] = tmpAR;
ar_g_Q0[0] = tmpAR;
}
}

View File

@ -0,0 +1,329 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stddef.h>
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
// Filter ar_g_Q0[] and ar_f_Q0[] through an AR filter with coefficients
// cth_Q15[] and sth_Q15[].
void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
int16_t* ar_f_Q0, // Input samples
int16_t* cth_Q15, // Filter coefficients
int16_t* sth_Q15, // Filter coefficients
size_t order_coef) { // order of the filter
int n = 0;
for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
int count = (int)(order_coef - 1);
int offset;
#if !defined(MIPS_DSP_R1_LE)
int16_t* tmp_cth;
int16_t* tmp_sth;
int16_t* tmp_arg;
int32_t max_q16 = 0x7fff;
int32_t min_q16 = 0xffff8000;
#endif
// Declare variables used as temporary registers.
int32_t r0, r1, r2, t0, t1, t2, t_ar;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"bltz %[count], 2f \n\t"
" lh %[t_ar], 0(%[tmp]) \n\t"
// Inner loop
"1: \n\t"
"sll %[offset], %[count], 1 \n\t"
#if defined(MIPS_DSP_R1_LE)
"lhx %[r0], %[offset](%[cth_Q15]) \n\t"
"lhx %[r1], %[offset](%[sth_Q15]) \n\t"
"lhx %[r2], %[offset](%[ar_g_Q0]) \n\t"
#else
"addu %[tmp_cth], %[cth_Q15], %[offset] \n\t"
"addu %[tmp_sth], %[sth_Q15], %[offset] \n\t"
"addu %[tmp_arg], %[ar_g_Q0], %[offset] \n\t"
"lh %[r0], 0(%[tmp_cth]) \n\t"
"lh %[r1], 0(%[tmp_sth]) \n\t"
"lh %[r2], 0(%[tmp_arg]) \n\t"
#endif
"mul %[t0], %[r0], %[t_ar] \n\t"
"mul %[t1], %[r1], %[t_ar] \n\t"
"mul %[t2], %[r1], %[r2] \n\t"
"mul %[r0], %[r0], %[r2] \n\t"
"subu %[t0], %[t0], %[t2] \n\t"
"addu %[t1], %[t1], %[r0] \n\t"
#if defined(MIPS_DSP_R1_LE)
"shra_r.w %[t1], %[t1], 15 \n\t"
"shra_r.w %[t0], %[t0], 15 \n\t"
#else
"addiu %[t1], %[t1], 0x4000 \n\t"
"sra %[t1], %[t1], 15 \n\t"
"addiu %[t0], %[t0], 0x4000 \n\t"
"sra %[t0], %[t0], 15 \n\t"
#endif
"addiu %[offset], %[offset], 2 \n\t"
#if defined(MIPS_DSP_R1_LE)
"shll_s.w %[t1], %[t1], 16 \n\t"
"shll_s.w %[t_ar], %[t0], 16 \n\t"
#else
"slt %[r0], %[t1], %[max_q16] \n\t"
"slt %[r1], %[t0], %[max_q16] \n\t"
"movz %[t1], %[max_q16], %[r0] \n\t"
"movz %[t0], %[max_q16], %[r1] \n\t"
#endif
"addu %[offset], %[offset], %[ar_g_Q0] \n\t"
#if defined(MIPS_DSP_R1_LE)
"sra %[t1], %[t1], 16 \n\t"
"sra %[t_ar], %[t_ar], 16 \n\t"
#else
"slt %[r0], %[t1], %[min_q16] \n\t"
"slt %[r1], %[t0], %[min_q16] \n\t"
"movn %[t1], %[min_q16], %[r0] \n\t"
"movn %[t0], %[min_q16], %[r1] \n\t"
"addu %[t_ar], $zero, %[t0] \n\t"
#endif
"sh %[t1], 0(%[offset]) \n\t"
"bgtz %[count], 1b \n\t"
" addiu %[count], %[count], -1 \n\t"
"2: \n\t"
"sh %[t_ar], 0(%[tmp]) \n\t"
"sh %[t_ar], 0(%[ar_g_Q0]) \n\t"
".set pop \n\t"
: [t_ar] "=&r" (t_ar), [count] "+r" (count), [offset] "=&r" (offset),
[r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [t0] "=&r" (t0),
#if !defined(MIPS_DSP_R1_LE)
[tmp_cth] "=&r" (tmp_cth), [tmp_sth] "=&r" (tmp_sth),
[tmp_arg] "=&r" (tmp_arg),
#endif
[t1] "=&r" (t1), [t2] "=&r" (t2)
: [tmp] "r" (&ar_f_Q0[n+1]), [cth_Q15] "r" (cth_Q15),
#if !defined(MIPS_DSP_R1_LE)
[max_q16] "r" (max_q16), [min_q16] "r" (min_q16),
#endif
[sth_Q15] "r" (sth_Q15), [ar_g_Q0] "r" (ar_g_Q0)
: "memory", "hi", "lo"
);
}
}
// MIPS optimization of the inner loop used for function
// WebRtcIsacfix_NormLatticeFilterMa(). It does:
//
// for 0 <= n < HALF_SUBFRAMELEN - 1:
// *ptr2 = input2 * (*ptr2) + input0 * (*ptr0));
// *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
//
// Note, function WebRtcIsacfix_FilterMaLoopMIPS and WebRtcIsacfix_FilterMaLoopC
// are not bit-exact. The accuracy of the MIPS function is same or better.
void WebRtcIsacfix_FilterMaLoopMIPS(int16_t input0, // Filter coefficient
int16_t input1, // Filter coefficient
int32_t input2, // Inverse coeff (1/input1)
int32_t* ptr0, // Sample buffer
int32_t* ptr1, // Sample buffer
int32_t* ptr2) { // Sample buffer
#if defined(MIPS_DSP_R2_LE)
// MIPS DSPR2 version. 4 available accumulators allows loop unrolling 4 times.
// This variant is not bit-exact with WebRtcIsacfix_FilterMaLoopC, since we
// are exploiting 64-bit accumulators. The accuracy of the MIPS DSPR2 function
// is same or better.
int n = (HALF_SUBFRAMELEN - 1) >> 2;
int m = (HALF_SUBFRAMELEN - 1) & 3;
int r0, r1, r2, r3;
int t0, t1, t2, t3;
int s0, s1, s2, s3;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lw %[r0], 0(%[ptr0]) \n\t"
"lw %[r1], 4(%[ptr0]) \n\t"
"lw %[r2], 8(%[ptr0]) \n\t"
"lw %[r3], 12(%[ptr0]) \n\t"
"mult $ac0, %[r0], %[input0] \n\t"
"mult $ac1, %[r1], %[input0] \n\t"
"mult $ac2, %[r2], %[input0] \n\t"
"mult $ac3, %[r3], %[input0] \n\t"
"lw %[t0], 0(%[ptr2]) \n\t"
"extr_rs.w %[s0], $ac0, 15 \n\t"
"extr_rs.w %[s1], $ac1, 15 \n\t"
"extr_rs.w %[s2], $ac2, 15 \n\t"
"extr_rs.w %[s3], $ac3, 15 \n\t"
"lw %[t1], 4(%[ptr2]) \n\t"
"lw %[t2], 8(%[ptr2]) \n\t"
"lw %[t3], 12(%[ptr2]) \n\t"
"addu %[t0], %[t0], %[s0] \n\t"
"addu %[t1], %[t1], %[s1] \n\t"
"addu %[t2], %[t2], %[s2] \n\t"
"addu %[t3], %[t3], %[s3] \n\t"
"mult $ac0, %[t0], %[input2] \n\t"
"mult $ac1, %[t1], %[input2] \n\t"
"mult $ac2, %[t2], %[input2] \n\t"
"mult $ac3, %[t3], %[input2] \n\t"
"addiu %[ptr0], %[ptr0], 16 \n\t"
"extr_rs.w %[t0], $ac0, 16 \n\t"
"extr_rs.w %[t1], $ac1, 16 \n\t"
"extr_rs.w %[t2], $ac2, 16 \n\t"
"extr_rs.w %[t3], $ac3, 16 \n\t"
"addiu %[n], %[n], -1 \n\t"
"mult $ac0, %[r0], %[input1] \n\t"
"mult $ac1, %[r1], %[input1] \n\t"
"mult $ac2, %[r2], %[input1] \n\t"
"mult $ac3, %[r3], %[input1] \n\t"
"sw %[t0], 0(%[ptr2]) \n\t"
"extr_rs.w %[s0], $ac0, 15 \n\t"
"extr_rs.w %[s1], $ac1, 15 \n\t"
"extr_rs.w %[s2], $ac2, 15 \n\t"
"extr_rs.w %[s3], $ac3, 15 \n\t"
"sw %[t1], 4(%[ptr2]) \n\t"
"sw %[t2], 8(%[ptr2]) \n\t"
"sw %[t3], 12(%[ptr2]) \n\t"
"mult $ac0, %[t0], %[input0] \n\t"
"mult $ac1, %[t1], %[input0] \n\t"
"mult $ac2, %[t2], %[input0] \n\t"
"mult $ac3, %[t3], %[input0] \n\t"
"addiu %[ptr2], %[ptr2], 16 \n\t"
"extr_rs.w %[t0], $ac0, 15 \n\t"
"extr_rs.w %[t1], $ac1, 15 \n\t"
"extr_rs.w %[t2], $ac2, 15 \n\t"
"extr_rs.w %[t3], $ac3, 15 \n\t"
"addu %[t0], %[t0], %[s0] \n\t"
"addu %[t1], %[t1], %[s1] \n\t"
"addu %[t2], %[t2], %[s2] \n\t"
"addu %[t3], %[t3], %[s3] \n\t"
"sw %[t0], 0(%[ptr1]) \n\t"
"sw %[t1], 4(%[ptr1]) \n\t"
"sw %[t2], 8(%[ptr1]) \n\t"
"sw %[t3], 12(%[ptr1]) \n\t"
"bgtz %[n], 1b \n\t"
" addiu %[ptr1], %[ptr1], 16 \n\t"
"beq %[m], %0, 3f \n\t"
" nop \n\t"
"2: \n\t"
"lw %[r0], 0(%[ptr0]) \n\t"
"lw %[t0], 0(%[ptr2]) \n\t"
"addiu %[ptr0], %[ptr0], 4 \n\t"
"mult $ac0, %[r0], %[input0] \n\t"
"mult $ac1, %[r0], %[input1] \n\t"
"extr_rs.w %[r1], $ac0, 15 \n\t"
"extr_rs.w %[t1], $ac1, 15 \n\t"
"addu %[t0], %[t0], %[r1] \n\t"
"mult $ac0, %[t0], %[input2] \n\t"
"extr_rs.w %[t0], $ac0, 16 \n\t"
"sw %[t0], 0(%[ptr2]) \n\t"
"mult $ac0, %[t0], %[input0] \n\t"
"addiu %[ptr2], %[ptr2], 4 \n\t"
"addiu %[m], %[m], -1 \n\t"
"extr_rs.w %[t0], $ac0, 15 \n\t"
"addu %[t0], %[t0], %[t1] \n\t"
"sw %[t0], 0(%[ptr1]) \n\t"
"bgtz %[m], 2b \n\t"
" addiu %[ptr1], %[ptr1], 4 \n\t"
"3: \n\t"
".set pop \n\t"
: [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
[r3] "=&r" (r3), [t0] "=&r" (t0), [t1] "=&r" (t1),
[t2] "=&r" (t2), [t3] "=&r" (t3), [s0] "=&r" (s0),
[s1] "=&r" (s1), [s2] "=&r" (s2), [s3] "=&r" (s3),
[ptr0] "+r" (ptr0), [ptr1] "+r" (ptr1), [m] "+r" (m),
[ptr2] "+r" (ptr2), [n] "+r" (n)
: [input0] "r" (input0), [input1] "r" (input1),
[input2] "r" (input2)
: "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi",
"$ac2lo", "$ac3hi", "$ac3lo"
);
#else
// Non-DSPR2 version of the function. Avoiding the accumulator usage due to
// large latencies. This variant is bit-exact with C code.
int n = HALF_SUBFRAMELEN - 1;
int32_t t16a, t16b;
int32_t r0, r1, r2, r3, r4;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"sra %[t16a], %[input2], 16 \n\t"
"andi %[t16b], %[input2], 0xFFFF \n\t"
#if defined(MIPS32R2_LE)
"seh %[t16b], %[t16b] \n\t"
"seh %[input0], %[input0] \n\t"
"seh %[input1], %[input1] \n\t"
#else
"sll %[t16b], %[t16b], 16 \n\t"
"sra %[t16b], %[t16b], 16 \n\t"
"sll %[input0], %[input0], 16 \n\t"
"sra %[input0], %[input0], 16 \n\t"
"sll %[input1], %[input1], 16 \n\t"
"sra %[input1], %[input1], 16 \n\t"
#endif
"addiu %[r0], %[t16a], 1 \n\t"
"slt %[r1], %[t16b], $zero \n\t"
"movn %[t16a], %[r0], %[r1] \n\t"
"1: \n\t"
"lw %[r0], 0(%[ptr0]) \n\t"
"lw %[r1], 0(%[ptr2]) \n\t"
"addiu %[ptr0], %[ptr0], 4 \n\t"
"sra %[r2], %[r0], 16 \n\t"
"andi %[r0], %[r0], 0xFFFF \n\t"
"mul %[r3], %[r2], %[input0] \n\t"
"mul %[r4], %[r0], %[input0] \n\t"
"mul %[r2], %[r2], %[input1] \n\t"
"mul %[r0], %[r0], %[input1] \n\t"
"addiu %[ptr2], %[ptr2], 4 \n\t"
"sll %[r3], %[r3], 1 \n\t"
"sra %[r4], %[r4], 1 \n\t"
"addiu %[r4], %[r4], 0x2000 \n\t"
"sra %[r4], %[r4], 14 \n\t"
"addu %[r3], %[r3], %[r4] \n\t"
"addu %[r1], %[r1], %[r3] \n\t"
"sra %[r3], %[r1], 16 \n\t"
"andi %[r4], %[r1], 0xFFFF \n\t"
"sra %[r4], %[r4], 1 \n\t"
"mul %[r1], %[r1], %[t16a] \n\t"
"mul %[r3], %[r3], %[t16b] \n\t"
"mul %[r4], %[r4], %[t16b] \n\t"
"sll %[r2], %[r2], 1 \n\t"
"sra %[r0], %[r0], 1 \n\t"
"addiu %[r0], %[r0], 0x2000 \n\t"
"sra %[r0], %[r0], 14 \n\t"
"addu %[r0], %[r0], %[r2] \n\t"
"addiu %[n], %[n], -1 \n\t"
"addu %[r1], %[r1], %[r3] \n\t"
"addiu %[r4], %[r4], 0x4000 \n\t"
"sra %[r4], %[r4], 15 \n\t"
"addu %[r1], %[r1], %[r4] \n\t"
"sra %[r2], %[r1], 16 \n\t"
"andi %[r3], %[r1], 0xFFFF \n\t"
"mul %[r3], %[r3], %[input0] \n\t"
"mul %[r2], %[r2], %[input0] \n\t"
"sw %[r1], -4(%[ptr2]) \n\t"
"sra %[r3], %[r3], 1 \n\t"
"addiu %[r3], %[r3], 0x2000 \n\t"
"sra %[r3], %[r3], 14 \n\t"
"addu %[r0], %[r0], %[r3] \n\t"
"sll %[r2], %[r2], 1 \n\t"
"addu %[r0], %[r0], %[r2] \n\t"
"sw %[r0], 0(%[ptr1]) \n\t"
"bgtz %[n], 1b \n\t"
" addiu %[ptr1], %[ptr1], 4 \n\t"
".set pop \n\t"
: [t16a] "=&r" (t16a), [t16b] "=&r" (t16b), [r0] "=&r" (r0),
[r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
[r4] "=&r" (r4), [ptr0] "+r" (ptr0), [ptr1] "+r" (ptr1),
[ptr2] "+r" (ptr2), [n] "+r" (n)
: [input0] "r" (input0), [input1] "r" (input1),
[input2] "r" (input2)
: "hi", "lo", "memory"
);
#endif
}

View File

@ -0,0 +1,195 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
// Contains a function for the core loop in the normalized lattice MA
// filter routine for iSAC codec, optimized for ARM Neon platform.
// It does:
// for 0 <= n < HALF_SUBFRAMELEN - 1:
// *ptr2 = input2 * ((*ptr2) + input0 * (*ptr0));
// *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
// Output is not bit-exact with the reference C code, due to the replacement
// of WEBRTC_SPL_MUL_16_32_RSFT15 and LATTICE_MUL_32_32_RSFT16 with Neon
// instructions. The difference should not be bigger than 1.
void WebRtcIsacfix_FilterMaLoopNeon(int16_t input0, // Filter coefficient
int16_t input1, // Filter coefficient
int32_t input2, // Inverse coefficient
int32_t* ptr0, // Sample buffer
int32_t* ptr1, // Sample buffer
int32_t* ptr2) // Sample buffer
{
int n = 0;
int loop = (HALF_SUBFRAMELEN - 1) >> 3;
int loop_tail = (HALF_SUBFRAMELEN - 1) & 0x7;
int32x4_t input0_v = vdupq_n_s32((int32_t)input0 << 16);
int32x4_t input1_v = vdupq_n_s32((int32_t)input1 << 16);
int32x4_t input2_v = vdupq_n_s32(input2);
int32x4_t tmp0a, tmp1a, tmp2a, tmp3a;
int32x4_t tmp0b, tmp1b, tmp2b, tmp3b;
int32x4_t ptr0va, ptr1va, ptr2va;
int32x4_t ptr0vb, ptr1vb, ptr2vb;
int64x2_t tmp2al_low, tmp2al_high, tmp2bl_low, tmp2bl_high;
// Unroll to process 8 samples at once.
for (n = 0; n < loop; n++) {
ptr0va = vld1q_s32(ptr0);
ptr0vb = vld1q_s32(ptr0 + 4);
ptr0 += 8;
ptr2va = vld1q_s32(ptr2);
ptr2vb = vld1q_s32(ptr2 + 4);
// Calculate tmp0 = (*ptr0) * input0.
tmp0a = vqrdmulhq_s32(ptr0va, input0_v);
tmp0b = vqrdmulhq_s32(ptr0vb, input0_v);
// Calculate tmp1 = (*ptr0) * input1.
tmp1a = vqrdmulhq_s32(ptr0va, input1_v);
tmp1b = vqrdmulhq_s32(ptr0vb, input1_v);
// Calculate tmp2 = tmp0 + *(ptr2).
tmp2a = vaddq_s32(tmp0a, ptr2va);
tmp2b = vaddq_s32(tmp0b, ptr2vb);
// Calculate *ptr2 = input2 * tmp2.
tmp2al_low = vmull_s32(vget_low_s32(tmp2a), vget_low_s32(input2_v));
#if defined(WEBRTC_ARCH_ARM64)
tmp2al_high = vmull_high_s32(tmp2a, input2_v);
#else
tmp2al_high = vmull_s32(vget_high_s32(tmp2a), vget_high_s32(input2_v));
#endif
ptr2va = vcombine_s32(vrshrn_n_s64(tmp2al_low, 16),
vrshrn_n_s64(tmp2al_high, 16));
tmp2bl_low = vmull_s32(vget_low_s32(tmp2b), vget_low_s32(input2_v));
#if defined(WEBRTC_ARCH_ARM64)
tmp2bl_high = vmull_high_s32(tmp2b, input2_v);
#else
tmp2bl_high = vmull_s32(vget_high_s32(tmp2b), vget_high_s32(input2_v));
#endif
ptr2vb = vcombine_s32(vrshrn_n_s64(tmp2bl_low, 16),
vrshrn_n_s64(tmp2bl_high, 16));
vst1q_s32(ptr2, ptr2va);
vst1q_s32(ptr2 + 4, ptr2vb);
ptr2 += 8;
// Calculate tmp3 = ptr2v * input0.
tmp3a = vqrdmulhq_s32(ptr2va, input0_v);
tmp3b = vqrdmulhq_s32(ptr2vb, input0_v);
// Calculate *ptr1 = tmp1 + tmp3.
ptr1va = vaddq_s32(tmp1a, tmp3a);
ptr1vb = vaddq_s32(tmp1b, tmp3b);
vst1q_s32(ptr1, ptr1va);
vst1q_s32(ptr1 + 4, ptr1vb);
ptr1 += 8;
}
// Process four more samples.
if (loop_tail & 0x4) {
ptr0va = vld1q_s32(ptr0);
ptr2va = vld1q_s32(ptr2);
ptr0 += 4;
// Calculate tmp0 = (*ptr0) * input0.
tmp0a = vqrdmulhq_s32(ptr0va, input0_v);
// Calculate tmp1 = (*ptr0) * input1.
tmp1a = vqrdmulhq_s32(ptr0va, input1_v);
// Calculate tmp2 = tmp0 + *(ptr2).
tmp2a = vaddq_s32(tmp0a, ptr2va);
// Calculate *ptr2 = input2 * tmp2.
tmp2al_low = vmull_s32(vget_low_s32(tmp2a), vget_low_s32(input2_v));
#if defined(WEBRTC_ARCH_ARM64)
tmp2al_high = vmull_high_s32(tmp2a, input2_v);
#else
tmp2al_high = vmull_s32(vget_high_s32(tmp2a), vget_high_s32(input2_v));
#endif
ptr2va = vcombine_s32(vrshrn_n_s64(tmp2al_low, 16),
vrshrn_n_s64(tmp2al_high, 16));
vst1q_s32(ptr2, ptr2va);
ptr2 += 4;
// Calculate tmp3 = *(ptr2) * input0.
tmp3a = vqrdmulhq_s32(ptr2va, input0_v);
// Calculate *ptr1 = tmp1 + tmp3.
ptr1va = vaddq_s32(tmp1a, tmp3a);
vst1q_s32(ptr1, ptr1va);
ptr1 += 4;
}
// Process two more samples.
if (loop_tail & 0x2) {
int32x2_t ptr0v_tail, ptr2v_tail, ptr1v_tail;
int32x2_t tmp0_tail, tmp1_tail, tmp2_tail, tmp3_tail;
int64x2_t tmp2l_tail;
ptr0v_tail = vld1_s32(ptr0);
ptr2v_tail = vld1_s32(ptr2);
ptr0 += 2;
// Calculate tmp0 = (*ptr0) * input0.
tmp0_tail = vqrdmulh_s32(ptr0v_tail, vget_low_s32(input0_v));
// Calculate tmp1 = (*ptr0) * input1.
tmp1_tail = vqrdmulh_s32(ptr0v_tail, vget_low_s32(input1_v));
// Calculate tmp2 = tmp0 + *(ptr2).
tmp2_tail = vadd_s32(tmp0_tail, ptr2v_tail);
// Calculate *ptr2 = input2 * tmp2.
tmp2l_tail = vmull_s32(tmp2_tail, vget_low_s32(input2_v));
ptr2v_tail = vrshrn_n_s64(tmp2l_tail, 16);
vst1_s32(ptr2, ptr2v_tail);
ptr2 += 2;
// Calculate tmp3 = *(ptr2) * input0.
tmp3_tail = vqrdmulh_s32(ptr2v_tail, vget_low_s32(input0_v));
// Calculate *ptr1 = tmp1 + tmp3.
ptr1v_tail = vadd_s32(tmp1_tail, tmp3_tail);
vst1_s32(ptr1, ptr1v_tail);
ptr1 += 2;
}
// Process one more sample.
if (loop_tail & 0x1) {
int16_t t16a = (int16_t)(input2 >> 16);
int16_t t16b = (int16_t)input2;
if (t16b < 0) t16a++;
int32_t tmp32a;
int32_t tmp32b;
// Calculate *ptr2 = input2 * (*ptr2 + input0 * (*ptr0)).
tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr0);
tmp32b = *ptr2 + tmp32a;
*ptr2 = (int32_t)(WEBRTC_SPL_MUL(t16a, tmp32b) +
(WEBRTC_SPL_MUL_16_32_RSFT16(t16b, tmp32b)));
// Calculate *ptr1 = input1 * (*ptr0) + input0 * (*ptr2).
tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input1, *ptr0);
tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr2);
*ptr1 = tmp32a + tmp32b;
}
}

View File

@ -0,0 +1,949 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_masking_model.c
*
* LPC analysis and filtering functions
*
*/
#include "lpc_masking_model.h"
#include <limits.h> /* For LLONG_MAX and LLONG_MIN. */
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/* The conversion is implemented by the step-down algorithm */
void WebRtcSpl_AToK_JSK(
int16_t *a16, /* Q11 */
int16_t useOrder,
int16_t *k16 /* Q15 */
)
{
int m, k;
int32_t tmp32[MAX_AR_MODEL_ORDER];
int32_t tmp32b;
int32_t tmp_inv_denum32;
int16_t tmp_inv_denum16;
k16[useOrder-1] = a16[useOrder] << 4; // Q11<<4 => Q15
for (m=useOrder-1; m>0; m--) {
// (1 - k^2) in Q30
tmp_inv_denum32 = 1073741823 - k16[m] * k16[m];
tmp_inv_denum16 = (int16_t)(tmp_inv_denum32 >> 15); // (1 - k^2) in Q15.
for (k=1; k<=m; k++) {
tmp32b = (a16[k] << 16) - ((k16[m] * a16[m - k + 1]) << 1);
tmp32[k] = WebRtcSpl_DivW32W16(tmp32b, tmp_inv_denum16); //Q27/Q15 = Q12
}
for (k=1; k<m; k++) {
a16[k] = (int16_t)(tmp32[k] >> 1); // Q12>>1 => Q11
}
tmp32[m] = WEBRTC_SPL_SAT(4092, tmp32[m], -4092);
k16[m - 1] = (int16_t)(tmp32[m] << 3); // Q12<<3 => Q15
}
return;
}
int16_t WebRtcSpl_LevinsonW32_JSK(
int32_t *R, /* (i) Autocorrelation of length >= order+1 */
int16_t *A, /* (o) A[0..order] LPC coefficients (Q11) */
int16_t *K, /* (o) K[0...order-1] Reflection coefficients (Q15) */
int16_t order /* (i) filter order */
) {
int16_t i, j;
int16_t R_hi[LEVINSON_MAX_ORDER+1], R_low[LEVINSON_MAX_ORDER+1];
/* Aurocorr coefficients in high precision */
int16_t A_hi[LEVINSON_MAX_ORDER+1], A_low[LEVINSON_MAX_ORDER+1];
/* LPC coefficients in high precicion */
int16_t A_upd_hi[LEVINSON_MAX_ORDER+1], A_upd_low[LEVINSON_MAX_ORDER+1];
/* LPC coefficients for next iteration */
int16_t K_hi, K_low; /* reflection coefficient in high precision */
int16_t Alpha_hi, Alpha_low, Alpha_exp; /* Prediction gain Alpha in high precision
and with scale factor */
int16_t tmp_hi, tmp_low;
int32_t temp1W32, temp2W32, temp3W32;
int16_t norm;
/* Normalize the autocorrelation R[0]...R[order+1] */
norm = WebRtcSpl_NormW32(R[0]);
for (i=order;i>=0;i--) {
temp1W32 = R[i] << norm;
/* Put R in hi and low format */
R_hi[i] = (int16_t)(temp1W32 >> 16);
R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] << 16)) >> 1);
}
/* K = A[1] = -R[1] / R[0] */
temp2W32 = (R_hi[1] << 16) + (R_low[1] << 1); /* R[1] in Q31 */
temp3W32 = WEBRTC_SPL_ABS_W32(temp2W32); /* abs R[1] */
temp1W32 = WebRtcSpl_DivW32HiLow(temp3W32, R_hi[0], R_low[0]); /* abs(R[1])/R[0] in Q31 */
/* Put back the sign on R[1] */
if (temp2W32 > 0) {
temp1W32 = -temp1W32;
}
/* Put K in hi and low format */
K_hi = (int16_t)(temp1W32 >> 16);
K_low = (int16_t)((temp1W32 - ((int32_t)K_hi << 16)) >> 1);
/* Store first reflection coefficient */
K[0] = K_hi;
temp1W32 >>= 4; /* A[1] in Q27. */
/* Put A[1] in hi and low format */
A_hi[1] = (int16_t)(temp1W32 >> 16);
A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] << 16)) >> 1);
/* Alpha = R[0] * (1-K^2) */
temp1W32 = (((K_hi * K_low) >> 14) + K_hi * K_hi) << 1; /* = k^2 in Q31 */
temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); /* Guard against <0 */
temp1W32 = (int32_t)0x7fffffffL - temp1W32; /* temp1W32 = (1 - K[0]*K[0]) in Q31 */
/* Store temp1W32 = 1 - K[0]*K[0] on hi and low format */
tmp_hi = (int16_t)(temp1W32 >> 16);
tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
/* Calculate Alpha in Q31 */
temp1W32 = (R_hi[0] * tmp_hi + ((R_hi[0] * tmp_low) >> 15) +
((R_low[0] * tmp_hi) >> 15)) << 1;
/* Normalize Alpha and put it in hi and low format */
Alpha_exp = WebRtcSpl_NormW32(temp1W32);
temp1W32 <<= Alpha_exp;
Alpha_hi = (int16_t)(temp1W32 >> 16);
Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi<< 16)) >> 1);
/* Perform the iterative calculations in the
Levinson Durbin algorithm */
for (i=2; i<=order; i++)
{
/* ----
\
temp1W32 = R[i] + > R[j]*A[i-j]
/
----
j=1..i-1
*/
temp1W32 = 0;
for(j=1; j<i; j++) {
/* temp1W32 is in Q31 */
temp1W32 += ((R_hi[j] * A_hi[i - j]) << 1) +
((((R_hi[j] * A_low[i - j]) >> 15) +
((R_low[j] * A_hi[i - j]) >> 15)) << 1);
}
temp1W32 <<= 4;
temp1W32 += (R_hi[i] << 16) + (R_low[i] << 1);
/* K = -temp1W32 / Alpha */
temp2W32 = WEBRTC_SPL_ABS_W32(temp1W32); /* abs(temp1W32) */
temp3W32 = WebRtcSpl_DivW32HiLow(temp2W32, Alpha_hi, Alpha_low); /* abs(temp1W32)/Alpha */
/* Put the sign of temp1W32 back again */
if (temp1W32 > 0) {
temp3W32 = -temp3W32;
}
/* Use the Alpha shifts from earlier to denormalize */
norm = WebRtcSpl_NormW32(temp3W32);
if ((Alpha_exp <= norm)||(temp3W32==0)) {
temp3W32 <<= Alpha_exp;
} else {
if (temp3W32 > 0)
{
temp3W32 = (int32_t)0x7fffffffL;
} else
{
temp3W32 = (int32_t)0x80000000L;
}
}
/* Put K on hi and low format */
K_hi = (int16_t)(temp3W32 >> 16);
K_low = (int16_t)((temp3W32 - ((int32_t)K_hi << 16)) >> 1);
/* Store Reflection coefficient in Q15 */
K[i-1] = K_hi;
/* Test for unstable filter. If unstable return 0 and let the
user decide what to do in that case
*/
if ((int32_t)WEBRTC_SPL_ABS_W16(K_hi) > (int32_t)32740) {
return(-i); /* Unstable filter */
}
/*
Compute updated LPC coefficient: Anew[i]
Anew[j]= A[j] + K*A[i-j] for j=1..i-1
Anew[i]= K
*/
for(j=1; j<i; j++)
{
temp1W32 = (A_hi[j] << 16) + (A_low[j] << 1); // temp1W32 = A[j] in Q27
temp1W32 += (K_hi * A_hi[i - j] + ((K_hi * A_low[i - j]) >> 15) +
((K_low * A_hi[i - j]) >> 15)) << 1; // temp1W32 += K*A[i-j] in Q27.
/* Put Anew in hi and low format */
A_upd_hi[j] = (int16_t)(temp1W32 >> 16);
A_upd_low[j] = (int16_t)((temp1W32 - ((int32_t)A_upd_hi[j] << 16)) >> 1);
}
temp3W32 >>= 4; /* temp3W32 = K in Q27 (Convert from Q31 to Q27) */
/* Store Anew in hi and low format */
A_upd_hi[i] = (int16_t)(temp3W32 >> 16);
A_upd_low[i] = (int16_t)((temp3W32 - ((int32_t)A_upd_hi[i] << 16)) >> 1);
/* Alpha = Alpha * (1-K^2) */
temp1W32 = (((K_hi * K_low) >> 14) + K_hi * K_hi) << 1; /* K*K in Q31 */
temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); /* Guard against <0 */
temp1W32 = (int32_t)0x7fffffffL - temp1W32; /* 1 - K*K in Q31 */
/* Convert 1- K^2 in hi and low format */
tmp_hi = (int16_t)(temp1W32 >> 16);
tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
/* Calculate Alpha = Alpha * (1-K^2) in Q31 */
temp1W32 = (Alpha_hi * tmp_hi + ((Alpha_hi * tmp_low) >> 15) +
((Alpha_low * tmp_hi) >> 15)) << 1;
/* Normalize Alpha and store it on hi and low format */
norm = WebRtcSpl_NormW32(temp1W32);
temp1W32 <<= norm;
Alpha_hi = (int16_t)(temp1W32 >> 16);
Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1);
/* Update the total nomalization of Alpha */
Alpha_exp = Alpha_exp + norm;
/* Update A[] */
for(j=1; j<=i; j++)
{
A_hi[j] =A_upd_hi[j];
A_low[j] =A_upd_low[j];
}
}
/*
Set A[0] to 1.0 and store the A[i] i=1...order in Q12
(Convert from Q27 and use rounding)
*/
A[0] = 2048;
for(i=1; i<=order; i++) {
/* temp1W32 in Q27 */
temp1W32 = (A_hi[i] << 16) + (A_low[i] << 1);
/* Round and store upper word */
A[i] = (int16_t)((temp1W32 + 32768) >> 16);
}
return(1); /* Stable filters */
}
/* window */
/* Matlab generation of floating point code:
* t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
* for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
* All values are multiplyed with 2^21 in fixed point code.
*/
static const int16_t kWindowAutocorr[WINLEN] = {
0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 5, 6,
8, 10, 12, 14, 17, 20, 24, 28, 33, 38, 43, 49,
56, 63, 71, 79, 88, 98, 108, 119, 131, 143, 157, 171,
186, 202, 219, 237, 256, 275, 296, 318, 341, 365, 390, 416,
444, 472, 502, 533, 566, 600, 635, 671, 709, 748, 789, 831,
875, 920, 967, 1015, 1065, 1116, 1170, 1224, 1281, 1339, 1399, 1461,
1525, 1590, 1657, 1726, 1797, 1870, 1945, 2021, 2100, 2181, 2263, 2348,
2434, 2523, 2614, 2706, 2801, 2898, 2997, 3099, 3202, 3307, 3415, 3525,
3637, 3751, 3867, 3986, 4106, 4229, 4354, 4481, 4611, 4742, 4876, 5012,
5150, 5291, 5433, 5578, 5725, 5874, 6025, 6178, 6333, 6490, 6650, 6811,
6974, 7140, 7307, 7476, 7647, 7820, 7995, 8171, 8349, 8529, 8711, 8894,
9079, 9265, 9453, 9642, 9833, 10024, 10217, 10412, 10607, 10803, 11000, 11199,
11398, 11597, 11797, 11998, 12200, 12401, 12603, 12805, 13008, 13210, 13412, 13614,
13815, 14016, 14216, 14416, 14615, 14813, 15009, 15205, 15399, 15591, 15782, 15971,
16157, 16342, 16524, 16704, 16881, 17056, 17227, 17395, 17559, 17720, 17877, 18030,
18179, 18323, 18462, 18597, 18727, 18851, 18970, 19082, 19189, 19290, 19384, 19471,
19551, 19623, 19689, 19746, 19795, 19835, 19867, 19890, 19904, 19908, 19902, 19886,
19860, 19823, 19775, 19715, 19644, 19561, 19465, 19357, 19237, 19102, 18955, 18793,
18618, 18428, 18223, 18004, 17769, 17518, 17252, 16970, 16672, 16357, 16025, 15677,
15311, 14929, 14529, 14111, 13677, 13225, 12755, 12268, 11764, 11243, 10706, 10152,
9583, 8998, 8399, 7787, 7162, 6527, 5883, 5231, 4576, 3919, 3265, 2620,
1990, 1386, 825, 333
};
/* By using a hearing threshold level in dB of -28 dB (higher value gives more noise),
the H_T_H (in float) can be calculated as:
H_T_H = pow(10.0, 0.05 * (-28.0)) = 0.039810717055350
In Q19, H_T_H becomes round(0.039810717055350*2^19) ~= 20872, i.e.
H_T_H = 20872/524288.0, and H_T_HQ19 = 20872;
*/
/* The bandwidth expansion vectors are created from:
kPolyVecLo=[0.900000,0.810000,0.729000,0.656100,0.590490,0.531441,0.478297,0.430467,0.387420,0.348678,0.313811,0.282430];
kPolyVecHi=[0.800000,0.640000,0.512000,0.409600,0.327680,0.262144];
round(kPolyVecLo*32768)
round(kPolyVecHi*32768)
*/
static const int16_t kPolyVecLo[12] = {
29491, 26542, 23888, 21499, 19349, 17414, 15673, 14106, 12695, 11425, 10283, 9255
};
static const int16_t kPolyVecHi[6] = {
26214, 20972, 16777, 13422, 10737, 8590
};
static __inline int32_t log2_Q8_LPC( uint32_t x ) {
int32_t zeros;
int16_t frac;
zeros=WebRtcSpl_NormU32(x);
frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
/* log2(x) */
return ((31 - zeros) << 8) + frac;
}
static const int16_t kMulPitchGain = -25; /* 200/256 in Q5 */
static const int16_t kChngFactor = 3523; /* log10(2)*10/4*0.4/1.4=log10(2)/1.4= 0.2150 in Q14 */
static const int16_t kExp2 = 11819; /* 1/log(2) */
const int kShiftLowerBand = 11; /* Shift value for lower band in Q domain. */
const int kShiftHigherBand = 12; /* Shift value for higher band in Q domain. */
void WebRtcIsacfix_GetVars(const int16_t *input, const int16_t *pitchGains_Q12,
uint32_t *oldEnergy, int16_t *varscale)
{
int k;
uint32_t nrgQ[4];
int16_t nrgQlog[4];
int16_t tmp16, chng1, chng2, chng3, chng4, tmp, chngQ, oldNrgQlog, pgQ, pg3;
int32_t expPg32;
int16_t expPg, divVal;
int16_t tmp16_1, tmp16_2;
/* Calculate energies of first and second frame halfs */
nrgQ[0]=0;
for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES/4 + QLOOKAHEAD) / 2; k++) {
nrgQ[0] += (uint32_t)(input[k] * input[k]);
}
nrgQ[1]=0;
for ( ; k < (FRAMESAMPLES/2 + QLOOKAHEAD) / 2; k++) {
nrgQ[1] += (uint32_t)(input[k] * input[k]);
}
nrgQ[2]=0;
for ( ; k < (FRAMESAMPLES * 3 / 4 + QLOOKAHEAD) / 2; k++) {
nrgQ[2] += (uint32_t)(input[k] * input[k]);
}
nrgQ[3]=0;
for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
nrgQ[3] += (uint32_t)(input[k] * input[k]);
}
for ( k=0; k<4; k++) {
nrgQlog[k] = (int16_t)log2_Q8_LPC(nrgQ[k]); /* log2(nrgQ) */
}
oldNrgQlog = (int16_t)log2_Q8_LPC(*oldEnergy);
/* Calculate average level change */
chng1 = WEBRTC_SPL_ABS_W16(nrgQlog[3]-nrgQlog[2]);
chng2 = WEBRTC_SPL_ABS_W16(nrgQlog[2]-nrgQlog[1]);
chng3 = WEBRTC_SPL_ABS_W16(nrgQlog[1]-nrgQlog[0]);
chng4 = WEBRTC_SPL_ABS_W16(nrgQlog[0]-oldNrgQlog);
tmp = chng1+chng2+chng3+chng4;
chngQ = (int16_t)(tmp * kChngFactor >> 10); /* Q12 */
chngQ += 2926; /* + 1.0/1.4 in Q12 */
/* Find average pitch gain */
pgQ = 0;
for (k=0; k<4; k++)
{
pgQ += pitchGains_Q12[k];
}
pg3 = (int16_t)(pgQ * pgQ >> 11); // pgQ in Q(12+2)=Q14. Q14*Q14>>11 => Q17
pg3 = (int16_t)(pgQ * pg3 >> 13); /* Q14*Q17>>13 =>Q18 */
/* kMulPitchGain = -25 = -200 in Q-3. */
pg3 = (int16_t)(pg3 * kMulPitchGain >> 5); // Q10
tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,pg3,13);/* Q13*Q10>>13 => Q10*/
if (tmp16<0) {
tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
tmp16_1 = ((uint16_t)(tmp16 ^ 0xFFFF) >> 10) - 3; /* Gives result in Q14 */
if (tmp16_1<0)
expPg = -(tmp16_2 << -tmp16_1);
else
expPg = -(tmp16_2 >> tmp16_1);
} else
expPg = (int16_t) -16384; /* 1 in Q14, since 2^0=1 */
expPg32 = (int32_t)expPg << 8; /* Q22 */
divVal = WebRtcSpl_DivW32W16ResW16(expPg32, chngQ); /* Q22/Q12=Q10 */
tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,divVal,13);/* Q13*Q10>>13 => Q10*/
if (tmp16<0) {
tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
tmp16_1 = ((uint16_t)(tmp16 ^ 0xFFFF) >> 10) - 3; /* Gives result in Q14 */
if (tmp16_1<0)
expPg = tmp16_2 << -tmp16_1;
else
expPg = tmp16_2 >> tmp16_1;
} else
expPg = (int16_t) 16384; /* 1 in Q14, since 2^0=1 */
*varscale = expPg-1;
*oldEnergy = nrgQ[3];
}
static __inline int16_t exp2_Q10_T(int16_t x) { // Both in and out in Q10
int16_t tmp16_1, tmp16_2;
tmp16_2=(int16_t)(0x0400|(x&0x03FF));
tmp16_1 = -(x >> 10);
if(tmp16_1>0)
return tmp16_2 >> tmp16_1;
else
return tmp16_2 << -tmp16_1;
}
// Declare function pointers.
AutocorrFix WebRtcIsacfix_AutocorrFix;
CalculateResidualEnergy WebRtcIsacfix_CalculateResidualEnergy;
/* This routine calculates the residual energy for LPC.
* Formula as shown in comments inside.
*/
int32_t WebRtcIsacfix_CalculateResidualEnergyC(int lpc_order,
int32_t q_val_corr,
int q_val_polynomial,
int16_t* a_polynomial,
int32_t* corr_coeffs,
int* q_val_residual_energy) {
int i = 0, j = 0;
int shift_internal = 0, shift_norm = 0;
int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
int64_t sum64 = 0, sum64_tmp = 0;
for (i = 0; i <= lpc_order; i++) {
for (j = i; j <= lpc_order; j++) {
/* For the case of i == 0: residual_energy +=
* a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
* For the case of i != 0: residual_energy +=
* a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
*/
tmp32 = a_polynomial[j] * a_polynomial[j - i];
/* tmp32 in Q(q_val_polynomial * 2). */
if (i != 0) {
tmp32 <<= 1;
}
sum64_tmp = (int64_t)tmp32 * (int64_t)corr_coeffs[i];
sum64_tmp >>= shift_internal;
/* Test overflow and sum the result. */
if(((sum64_tmp > 0 && sum64 > 0) && (LLONG_MAX - sum64 < sum64_tmp)) ||
((sum64_tmp < 0 && sum64 < 0) && (LLONG_MIN - sum64 > sum64_tmp))) {
/* Shift right for overflow. */
shift_internal += 1;
sum64 >>= 1;
sum64 += sum64_tmp >> 1;
} else {
sum64 += sum64_tmp;
}
}
}
word32_high = (int32_t)(sum64 >> 32);
word32_low = (int32_t)sum64;
// Calculate the value of shifting (shift_norm) for the 64-bit sum.
if(word32_high != 0) {
shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
residual_energy = (int32_t)(sum64 >> shift_norm);
} else {
if((word32_low & 0x80000000) != 0) {
shift_norm = 1;
residual_energy = (uint32_t)word32_low >> 1;
} else {
shift_norm = WebRtcSpl_NormW32(word32_low);
residual_energy = word32_low << shift_norm;
shift_norm = -shift_norm;
}
}
/* Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
* = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
*/
*q_val_residual_energy = q_val_corr - shift_internal - shift_norm
+ q_val_polynomial * 2;
return residual_energy;
}
void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
int16_t *inHiQ0,
MaskFiltstr_enc *maskdata,
int16_t snrQ10,
const int16_t *pitchGains_Q12,
int32_t *gain_lo_hiQ17,
int16_t *lo_coeffQ15,
int16_t *hi_coeffQ15)
{
int k, n, ii;
int pos1, pos2;
int sh_lo, sh_hi, sh, ssh, shMem;
int16_t varscaleQ14;
int16_t tmpQQlo, tmpQQhi;
int32_t tmp32;
int16_t tmp16,tmp16b;
int16_t polyHI[ORDERHI+1];
int16_t rcQ15_lo[ORDERLO], rcQ15_hi[ORDERHI];
int16_t DataLoQ6[WINLEN], DataHiQ6[WINLEN];
int32_t corrloQQ[ORDERLO+2];
int32_t corrhiQQ[ORDERHI+1];
int32_t corrlo2QQ[ORDERLO+1];
int16_t scale;
int16_t QdomLO, QdomHI, newQdomHI, newQdomLO;
int32_t res_nrgQQ;
int32_t sqrt_nrg;
/* less-noise-at-low-frequencies factor */
int16_t aaQ14;
/* Multiplication with 1/sqrt(12) ~= 0.28901734104046 can be done by convertion to
Q15, i.e. round(0.28901734104046*32768) = 9471, and use 9471/32768.0 ~= 0.289032
*/
int16_t snrq;
int shft;
int16_t tmp16a;
int32_t tmp32a, tmp32b, tmp32c;
int16_t a_LOQ11[ORDERLO+1];
int16_t k_vecloQ15[ORDERLO];
int16_t a_HIQ12[ORDERHI+1];
int16_t k_vechiQ15[ORDERHI];
int16_t stab;
snrq=snrQ10;
/* SNR= C * 2 ^ (D * snrq) ; C=0.289, D=0.05*log2(10)=0.166 (~=172 in Q10)*/
tmp16 = (int16_t)(snrq * 172 >> 10); // Q10
tmp16b = exp2_Q10_T(tmp16); // Q10
snrq = (int16_t)(tmp16b * 285 >> 10); // Q10
/* change quallevel depending on pitch gains and level fluctuations */
WebRtcIsacfix_GetVars(inLoQ0, pitchGains_Q12, &(maskdata->OldEnergy), &varscaleQ14);
/* less-noise-at-low-frequencies factor */
/* Calculation of 0.35 * (0.5 + 0.5 * varscale) in fixpoint:
With 0.35 in Q16 (0.35 ~= 22938/65536.0 = 0.3500061) and varscaleQ14 in Q14,
we get Q16*Q14>>16 = Q14
*/
aaQ14 = (int16_t)((22938 * (8192 + (varscaleQ14 >> 1)) + 32768) >> 16);
/* Calculate tmp = (1.0 + aa*aa); in Q12 */
tmp16 = (int16_t)(aaQ14 * aaQ14 >> 15); // Q14*Q14>>15 = Q13
tmpQQlo = 4096 + (tmp16 >> 1); // Q12 + Q13>>1 = Q12.
/* Calculate tmp = (1.0+aa) * (1.0+aa); */
tmp16 = 8192 + (aaQ14 >> 1); // 1+a in Q13.
tmpQQhi = (int16_t)(tmp16 * tmp16 >> 14); // Q13*Q13>>14 = Q12
/* replace data in buffer by new look-ahead data */
for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++) {
maskdata->DataBufferLoQ0[pos1 + WINLEN - QLOOKAHEAD] = inLoQ0[pos1];
}
for (k = 0; k < SUBFRAMES; k++) {
/* Update input buffer and multiply signal with window */
for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
maskdata->DataBufferLoQ0[pos1] = maskdata->DataBufferLoQ0[pos1 + UPDATE/2];
maskdata->DataBufferHiQ0[pos1] = maskdata->DataBufferHiQ0[pos1 + UPDATE/2];
DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
}
pos2 = (int16_t)(k * UPDATE / 2);
for (n = 0; n < UPDATE/2; n++, pos1++) {
maskdata->DataBufferLoQ0[pos1] = inLoQ0[QLOOKAHEAD + pos2];
maskdata->DataBufferHiQ0[pos1] = inHiQ0[pos2++];
DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
kWindowAutocorr[pos1] >> 15); // Q0*Q21>>15 = Q6
}
/* Get correlation coefficients */
/* The highest absolute value measured inside DataLo in the test set
For DataHi, corresponding value was 160.
This means that it should be possible to represent the input values
to WebRtcSpl_AutoCorrelation() as Q6 values (since 307*2^6 =
19648). Of course, Q0 will also work, but due to the low energy in
DataLo and DataHi, the outputted autocorrelation will be more accurate
and mimic the floating point code better, by being in an high as possible
Q-domain.
*/
WebRtcIsacfix_AutocorrFix(corrloQQ,DataLoQ6,WINLEN, ORDERLO+1, &scale);
QdomLO = 12-scale; // QdomLO is the Q-domain of corrloQQ
sh_lo = WebRtcSpl_NormW32(corrloQQ[0]);
QdomLO += sh_lo;
for (ii=0; ii<ORDERLO+2; ii++) {
corrloQQ[ii] <<= sh_lo;
}
/* It is investigated whether it was possible to use 16 bits for the
32-bit vector corrloQQ, but it didn't work. */
WebRtcIsacfix_AutocorrFix(corrhiQQ,DataHiQ6,WINLEN, ORDERHI, &scale);
QdomHI = 12-scale; // QdomHI is the Q-domain of corrhiQQ
sh_hi = WebRtcSpl_NormW32(corrhiQQ[0]);
QdomHI += sh_hi;
for (ii=0; ii<ORDERHI+1; ii++) {
corrhiQQ[ii] <<= sh_hi;
}
/* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
/* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/
// `corrlo2QQ` in Q(QdomLO-5).
corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) -
(WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2);
/* Calculate corrlo2[n] = tmpQQlo * corrlo[n] - tmpQQlo * (corrlo[n-1] + corrlo[n+1]);*/
for (n = 1; n <= ORDERLO; n++) {
tmp32 = (corrloQQ[n - 1] >> 1) + (corrloQQ[n + 1] >> 1); // Q(QdomLO-1).
corrlo2QQ[n] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[n]) >> 1) -
(WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, tmp32) >> 2);
}
QdomLO -= 5;
/* Calculate corrhi[n] = tmpQQhi * corrhi[n]; */
for (n = 0; n <= ORDERHI; n++) {
corrhiQQ[n] = WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQhi, corrhiQQ[n]); // Q(12+QdomHI-16) = Q(QdomHI-4)
}
QdomHI -= 4;
/* add white noise floor */
/* corrlo2QQ is in Q(QdomLO) and corrhiQQ is in Q(QdomHI) */
/* Calculate corrlo2[0] += 9.5367431640625e-7; and
corrhi[0] += 9.5367431640625e-7, where the constant is 1/2^20 */
tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t) 1, QdomLO-20);
corrlo2QQ[0] += tmp32;
tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t) 1, QdomHI-20);
corrhiQQ[0] += tmp32;
/* corrlo2QQ is in Q(QdomLO) and corrhiQQ is in Q(QdomHI) before the following
code segment, where we want to make sure we get a 1-bit margin */
for (n = 0; n <= ORDERLO; n++) {
corrlo2QQ[n] >>= 1; // Make sure we have a 1-bit margin.
}
QdomLO -= 1; // Now, corrlo2QQ is in Q(QdomLO), with a 1-bit margin
for (n = 0; n <= ORDERHI; n++) {
corrhiQQ[n] >>= 1; // Make sure we have a 1-bit margin.
}
QdomHI -= 1; // Now, corrhiQQ is in Q(QdomHI), with a 1-bit margin
newQdomLO = QdomLO;
for (n = 0; n <= ORDERLO; n++) {
int32_t tmp, tmpB, tmpCorr;
int16_t alpha=328; //0.01 in Q15
int16_t beta=324; //(1-0.01)*0.01=0.0099 in Q15
int16_t gamma=32440; //(1-0.01)=0.99 in Q15
if (maskdata->CorrBufLoQQ[n] != 0) {
shMem=WebRtcSpl_NormW32(maskdata->CorrBufLoQQ[n]);
sh = QdomLO - maskdata->CorrBufLoQdom[n];
if (sh<=shMem) {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], sh); // Get CorrBufLoQQ to same domain as corrlo2
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
} else if ((sh-shMem)<7){
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible
// Shift `alpha` the number of times required to get `tmp` in QdomLO.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
} else {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift `alpha` as much as possible without overflow the number of
// times required to get `tmp` in QdomLO.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
tmpCorr = corrloQQ[n] >> (sh - shMem - 6);
tmp = tmp + tmpCorr;
maskdata->CorrBufLoQQ[n] = tmp;
newQdomLO = QdomLO-(sh-shMem-6);
maskdata->CorrBufLoQdom[n] = newQdomLO;
}
} else
tmp = 0;
tmp = tmp + corrlo2QQ[n];
maskdata->CorrBufLoQQ[n] = tmp;
maskdata->CorrBufLoQdom[n] = QdomLO;
tmp=WEBRTC_SPL_MUL_16_32_RSFT15(beta, tmp);
tmpB=WEBRTC_SPL_MUL_16_32_RSFT15(gamma, corrlo2QQ[n]);
corrlo2QQ[n] = tmp + tmpB;
}
if( newQdomLO!=QdomLO) {
for (n = 0; n <= ORDERLO; n++) {
if (maskdata->CorrBufLoQdom[n] != newQdomLO)
corrloQQ[n] >>= maskdata->CorrBufLoQdom[n] - newQdomLO;
}
QdomLO = newQdomLO;
}
newQdomHI = QdomHI;
for (n = 0; n <= ORDERHI; n++) {
int32_t tmp, tmpB, tmpCorr;
int16_t alpha=328; //0.01 in Q15
int16_t beta=324; //(1-0.01)*0.01=0.0099 in Q15
int16_t gamma=32440; //(1-0.01)=0.99 in Q1
if (maskdata->CorrBufHiQQ[n] != 0) {
shMem=WebRtcSpl_NormW32(maskdata->CorrBufHiQQ[n]);
sh = QdomHI - maskdata->CorrBufHiQdom[n];
if (sh<=shMem) {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], sh); // Get CorrBufHiQQ to same domain as corrhi
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
tmpCorr = corrhiQQ[n];
tmp = tmp + tmpCorr;
maskdata->CorrBufHiQQ[n] = tmp;
maskdata->CorrBufHiQdom[n] = QdomHI;
} else if ((sh-shMem)<7) {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift `alpha` the number of times required to get `tmp` in QdomHI.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
tmpCorr = corrhiQQ[n];
tmp = tmp + tmpCorr;
maskdata->CorrBufHiQQ[n] = tmp;
maskdata->CorrBufHiQdom[n] = QdomHI;
} else {
tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
// Shift `alpha` as much as possible without overflow the number of
// times required to get `tmp` in QdomHI.
tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
tmpCorr = corrhiQQ[n] >> (sh - shMem - 6);
tmp = tmp + tmpCorr;
maskdata->CorrBufHiQQ[n] = tmp;
newQdomHI = QdomHI-(sh-shMem-6);
maskdata->CorrBufHiQdom[n] = newQdomHI;
}
} else {
tmp = corrhiQQ[n];
tmpCorr = tmp;
maskdata->CorrBufHiQQ[n] = tmp;
maskdata->CorrBufHiQdom[n] = QdomHI;
}
tmp=WEBRTC_SPL_MUL_16_32_RSFT15(beta, tmp);
tmpB=WEBRTC_SPL_MUL_16_32_RSFT15(gamma, tmpCorr);
corrhiQQ[n] = tmp + tmpB;
}
if( newQdomHI!=QdomHI) {
for (n = 0; n <= ORDERHI; n++) {
if (maskdata->CorrBufHiQdom[n] != newQdomHI)
corrhiQQ[n] >>= maskdata->CorrBufHiQdom[n] - newQdomHI;
}
QdomHI = newQdomHI;
}
stab=WebRtcSpl_LevinsonW32_JSK(corrlo2QQ, a_LOQ11, k_vecloQ15, ORDERLO);
if (stab<0) { // If unstable use lower order
a_LOQ11[0]=2048;
for (n = 1; n <= ORDERLO; n++) {
a_LOQ11[n]=0;
}
stab=WebRtcSpl_LevinsonW32_JSK(corrlo2QQ, a_LOQ11, k_vecloQ15, 8);
}
WebRtcSpl_LevinsonDurbin(corrhiQQ, a_HIQ12, k_vechiQ15, ORDERHI);
/* bandwidth expansion */
for (n = 1; n <= ORDERLO; n++) {
a_LOQ11[n] = (int16_t)((kPolyVecLo[n - 1] * a_LOQ11[n] + (1 << 14)) >>
15);
}
polyHI[0] = a_HIQ12[0];
for (n = 1; n <= ORDERHI; n++) {
a_HIQ12[n] = (int16_t)(((int32_t)(kPolyVecHi[n - 1] * a_HIQ12[n]) +
(1 << 14)) >> 15);
polyHI[n] = a_HIQ12[n];
}
/* Normalize the corrlo2 vector */
sh = WebRtcSpl_NormW32(corrlo2QQ[0]);
for (n = 0; n <= ORDERLO; n++) {
corrlo2QQ[n] <<= sh;
}
QdomLO += sh; /* Now, corrlo2QQ is still in Q(QdomLO) */
/* residual energy */
sh_lo = 31;
res_nrgQQ = WebRtcIsacfix_CalculateResidualEnergy(ORDERLO, QdomLO,
kShiftLowerBand, a_LOQ11, corrlo2QQ, &sh_lo);
/* Convert to reflection coefficients */
WebRtcSpl_AToK_JSK(a_LOQ11, ORDERLO, rcQ15_lo);
if (sh_lo & 0x0001) {
res_nrgQQ >>= 1;
sh_lo-=1;
}
if( res_nrgQQ > 0 )
{
sqrt_nrg=WebRtcSpl_Sqrt(res_nrgQQ);
/* add hearing threshold and compute the gain */
/* lo_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
ssh = sh_lo >> 1; // sqrt_nrg is in Qssh.
sh = ssh - 14;
tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)
tmp32a = varscaleQ14 * snrq; // Q24 (numerator)
sh = WebRtcSpl_NormW32(tmp32c);
shft = 16 - sh;
tmp16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32c, -shft); // Q(ssh-shft) (denominator)
tmp32b = WebRtcSpl_DivW32W16(tmp32a, tmp16a); // Q(24-ssh+shft)
sh = ssh-shft-7;
*gain_lo_hiQ17 = WEBRTC_SPL_SHIFT_W32(tmp32b, sh); // Gains in Q17
}
else
{
*gain_lo_hiQ17 = 100; // Gains in Q17
}
gain_lo_hiQ17++;
/* copy coefficients to output array */
for (n = 0; n < ORDERLO; n++) {
*lo_coeffQ15 = (int16_t) (rcQ15_lo[n]);
lo_coeffQ15++;
}
/* residual energy */
sh_hi = 31;
res_nrgQQ = WebRtcIsacfix_CalculateResidualEnergy(ORDERHI, QdomHI,
kShiftHigherBand, a_HIQ12, corrhiQQ, &sh_hi);
/* Convert to reflection coefficients */
WebRtcSpl_LpcToReflCoef(polyHI, ORDERHI, rcQ15_hi);
if (sh_hi & 0x0001) {
res_nrgQQ >>= 1;
sh_hi-=1;
}
if( res_nrgQQ > 0 )
{
sqrt_nrg=WebRtcSpl_Sqrt(res_nrgQQ);
/* add hearing threshold and compute the gain */
/* hi_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1)
ssh = sh_hi >> 1; // `sqrt_nrg` is in Qssh.
sh = ssh - 14;
tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator)
tmp32a = varscaleQ14 * snrq; // Q24 (numerator)
sh = WebRtcSpl_NormW32(tmp32c);
shft = 16 - sh;
tmp16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32c, -shft); // Q(ssh-shft) (denominator)
tmp32b = WebRtcSpl_DivW32W16(tmp32a, tmp16a); // Q(24-ssh+shft)
sh = ssh-shft-7;
*gain_lo_hiQ17 = WEBRTC_SPL_SHIFT_W32(tmp32b, sh); // Gains in Q17
}
else
{
*gain_lo_hiQ17 = 100; // Gains in Q17
}
gain_lo_hiQ17++;
/* copy coefficients to output array */
for (n = 0; n < ORDERHI; n++) {
*hi_coeffQ15 = rcQ15_hi[n];
hi_coeffQ15++;
}
}
}

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_masking_model.h
*
* LPC functions
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
void WebRtcIsacfix_GetVars(const int16_t* input,
const int16_t* pitchGains_Q12,
uint32_t* oldEnergy,
int16_t* varscale);
void WebRtcIsacfix_GetLpcCoef(int16_t* inLoQ0,
int16_t* inHiQ0,
MaskFiltstr_enc* maskdata,
int16_t snrQ10,
const int16_t* pitchGains_Q12,
int32_t* gain_lo_hiQ17,
int16_t* lo_coeffQ15,
int16_t* hi_coeffQ15);
typedef int32_t (*CalculateResidualEnergy)(int lpc_order,
int32_t q_val_corr,
int q_val_polynomial,
int16_t* a_polynomial,
int32_t* corr_coeffs,
int* q_val_residual_energy);
extern CalculateResidualEnergy WebRtcIsacfix_CalculateResidualEnergy;
int32_t WebRtcIsacfix_CalculateResidualEnergyC(int lpc_order,
int32_t q_val_corr,
int q_val_polynomial,
int16_t* a_polynomial,
int32_t* corr_coeffs,
int* q_val_residual_energy);
#if defined(MIPS_DSP_R2_LE)
int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
int32_t q_val_corr,
int q_val_polynomial,
int16_t* a_polynomial,
int32_t* corr_coeffs,
int* q_val_residual_energy);
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_ */

View File

@ -0,0 +1,237 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
// MIPS DSPR2 optimization for function WebRtcIsacfix_CalculateResidualEnergy
// Bit-exact with WebRtcIsacfix_CalculateResidualEnergyC from file
// lpc_masking_model.c
int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
int32_t q_val_corr,
int q_val_polynomial,
int16_t* a_polynomial,
int32_t* corr_coeffs,
int* q_val_residual_energy) {
int i = 0, j = 0;
int shift_internal = 0, shift_norm = 0;
int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
int32_t tmp_corr_c = corr_coeffs[0];
int16_t* tmp_a_poly = &a_polynomial[0];
int32_t sum64_hi = 0;
int32_t sum64_lo = 0;
for (j = 0; j <= lpc_order; j++) {
// For the case of i == 0:
// residual_energy +=
// a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
int32_t tmp2, tmp3;
int16_t sign_1;
int16_t sign_2;
int16_t sign_3;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"lh %[tmp2], 0(%[tmp_a_poly]) \n\t"
"mul %[tmp32], %[tmp2], %[tmp2] \n\t"
"addiu %[tmp_a_poly], %[tmp_a_poly], 2 \n\t"
"sra %[sign_2], %[sum64_hi], 31 \n\t"
"mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
"shilov $ac0, %[shift_internal] \n\t"
"mfhi %[tmp2], $ac0 \n\t"
"mflo %[tmp3], $ac0 \n\t"
"sra %[sign_1], %[tmp2], 31 \n\t"
"xor %[sign_3], %[sign_1], %[sign_2] \n\t"
".set pop \n\t"
: [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
[tmp_a_poly] "+r" (tmp_a_poly), [sign_1] "=&r" (sign_1),
[sign_3] "=&r" (sign_3), [sign_2] "=&r" (sign_2),
[sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
: [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
: "hi", "lo", "memory"
);
if (sign_3 != 0) {
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
"addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
".set pop \n\t"
: [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
: [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
: "hi", "lo", "memory"
);
} else {
if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
// Shift right for overflow.
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addiu %[shift_internal], %[shift_internal], 1 \n\t"
"prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
"sra %[sum64_hi], %[sum64_hi], 1 \n\t"
"prepend %[tmp3], %[tmp2], 1 \n\t"
"sra %[tmp2], %[tmp2], 1 \n\t"
"addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
"addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
".set pop \n\t"
: [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
[shift_internal] "+r" (shift_internal),
[sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
:
: "hi", "lo", "memory"
);
} else {
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
"addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
".set pop \n\t"
: [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
: [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
: "hi", "lo", "memory"
);
}
}
}
for (i = 1; i <= lpc_order; i++) {
tmp_corr_c = corr_coeffs[i];
int16_t* tmp_a_poly_j = &a_polynomial[i];
int16_t* tmp_a_poly_j_i = &a_polynomial[0];
for (j = i; j <= lpc_order; j++) {
// For the case of i = 1 .. lpc_order:
// residual_energy +=
// a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
int32_t tmp2, tmp3;
int16_t sign_1;
int16_t sign_2;
int16_t sign_3;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"lh %[tmp3], 0(%[tmp_a_poly_j]) \n\t"
"lh %[tmp2], 0(%[tmp_a_poly_j_i]) \n\t"
"addiu %[tmp_a_poly_j], %[tmp_a_poly_j], 2 \n\t"
"addiu %[tmp_a_poly_j_i], %[tmp_a_poly_j_i], 2 \n\t"
"mul %[tmp32], %[tmp3], %[tmp2] \n\t"
"sll %[tmp32], %[tmp32], 1 \n\t"
"mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
"shilov $ac0, %[shift_internal] \n\t"
"mfhi %[tmp2], $ac0 \n\t"
"mflo %[tmp3], $ac0 \n\t"
"sra %[sign_1], %[tmp2], 31 \n\t"
"sra %[sign_2], %[sum64_hi], 31 \n\t"
"xor %[sign_3], %[sign_1], %[sign_2] \n\t"
".set pop \n\t"
: [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
[tmp_a_poly_j] "+r" (tmp_a_poly_j), [sign_1] "=&r" (sign_1),
[tmp_a_poly_j_i] "+r" (tmp_a_poly_j_i), [sign_2] "=&r" (sign_2),
[sign_3] "=&r" (sign_3), [sum64_hi] "+r" (sum64_hi),
[sum64_lo] "+r" (sum64_lo)
: [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
: "hi", "lo", "memory"
);
if (sign_3 != 0) {
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
"addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
".set pop \n\t"
: [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi),
[sum64_lo] "+r" (sum64_lo)
:
:"memory"
);
} else {
// Test overflow and sum the result.
if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
// Shift right for overflow.
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addiu %[shift_internal], %[shift_internal], 1 \n\t"
"prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
"sra %[sum64_hi], %[sum64_hi], 1 \n\t"
"prepend %[tmp3], %[tmp2], 1 \n\t"
"sra %[tmp2], %[tmp2], 1 \n\t"
"addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
"addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
".set pop \n\t"
: [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
[shift_internal] "+r" (shift_internal),
[sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
:
: "hi", "lo", "memory"
);
} else {
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
"addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
".set pop \n\t"
: [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
[sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
:
: "hi", "lo", "memory"
);
}
}
}
}
word32_high = sum64_hi;
word32_low = sum64_lo;
// Calculate the value of shifting (shift_norm) for the 64-bit sum.
if (word32_high != 0) {
shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
int tmp1;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"srl %[residual_energy], %[sum64_lo], %[shift_norm] \n\t"
"li %[tmp1], 32 \n\t"
"subu %[tmp1], %[tmp1], %[shift_norm] \n\t"
"sll %[tmp1], %[sum64_hi], %[tmp1] \n\t"
"or %[residual_energy], %[residual_energy], %[tmp1] \n\t"
".set pop \n\t"
: [residual_energy] "=&r" (residual_energy), [tmp1]"=&r"(tmp1),
[sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
: [shift_norm] "r" (shift_norm)
: "memory"
);
} else {
if ((word32_low & 0x80000000) != 0) {
shift_norm = 1;
residual_energy = (uint32_t)word32_low >> 1;
} else {
shift_norm = WebRtcSpl_NormW32(word32_low);
residual_energy = word32_low << shift_norm;
shift_norm = -shift_norm;
}
}
// Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
// = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
*q_val_residual_energy =
q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2;
return residual_energy;
}

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
#include "system_wrappers/include/cpu_features_wrapper.h"
#include "test/gtest.h"
class LpcMaskingModelTest : public ::testing::Test {
protected:
// Pass a function pointer to the Tester function.
void CalculateResidualEnergyTester(
CalculateResidualEnergy CalculateResidualEnergyFunction) {
const int kIntOrder = 10;
const int32_t kInt32QDomain = 5;
const int kIntShift = 11;
int16_t a[kIntOrder + 1] = {32760, 122, 7, 0, -32760, -3958,
-48, 18745, 498, 9, 23456};
int32_t corr[kIntOrder + 1] = {11443647, -27495, 0, 98745, -11443600, 1,
1, 498, 9, 888, 23456};
int q_shift_residual = 0;
int32_t residual_energy = 0;
// Test the code path where (residual_energy >= 0x10000).
residual_energy = CalculateResidualEnergyFunction(
kIntOrder, kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
EXPECT_EQ(1789023310, residual_energy);
EXPECT_EQ(2, q_shift_residual);
// Test the code path where (residual_energy < 0x10000)
// and ((energy & 0x8000) != 0).
for (int i = 0; i < kIntOrder + 1; i++) {
a[i] = 24575 >> i;
corr[i] = i;
}
residual_energy = CalculateResidualEnergyFunction(
kIntOrder, kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
EXPECT_EQ(1595279092, residual_energy);
EXPECT_EQ(26, q_shift_residual);
// Test the code path where (residual_energy <= 0x7fff).
for (int i = 0; i < kIntOrder + 1; i++) {
a[i] = 2457 >> i;
}
residual_energy = CalculateResidualEnergyFunction(
kIntOrder, kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
EXPECT_EQ(2029266944, residual_energy);
EXPECT_EQ(33, q_shift_residual);
}
};
TEST_F(LpcMaskingModelTest, CalculateResidualEnergyTest) {
CalculateResidualEnergyTester(WebRtcIsacfix_CalculateResidualEnergyC);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* lpc_tables.h
*
* header file for coding tables for the LPC coefficients
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/* indices of KLT coefficients used */
extern const uint16_t WebRtcIsacfix_kSelIndGain[12];
extern const uint16_t WebRtcIsacfix_kSelIndShape[108];
/* cdf array for model indicator */
extern const uint16_t WebRtcIsacfix_kModelCdf[KLT_NUM_MODELS + 1];
/* pointer to cdf array for model indicator */
extern const uint16_t* WebRtcIsacfix_kModelCdfPtr[1];
/* initial cdf index for decoder of model indicator */
extern const uint16_t WebRtcIsacfix_kModelInitIndex[1];
/* offset to go from rounded value to quantization index */
extern const int16_t WebRtcIsacfix_kQuantMinGain[12];
extern const int16_t WebRtcIsacfix_kQuantMinShape[108];
/* maximum quantization index */
extern const uint16_t WebRtcIsacfix_kMaxIndGain[12];
extern const uint16_t WebRtcIsacfix_kMaxIndShape[108];
/* index offset */
extern const uint16_t WebRtcIsacfix_kOffsetGain[KLT_NUM_MODELS][12];
extern const uint16_t WebRtcIsacfix_kOffsetShape[KLT_NUM_MODELS][108];
/* initial cdf index for KLT coefficients */
extern const uint16_t WebRtcIsacfix_kInitIndexGain[KLT_NUM_MODELS][12];
extern const uint16_t WebRtcIsacfix_kInitIndexShape[KLT_NUM_MODELS][108];
/* offsets for quantizer representation levels */
extern const uint16_t WebRtcIsacfix_kOfLevelsGain[3];
extern const uint16_t WebRtcIsacfix_kOfLevelsShape[3];
/* quantizer representation levels */
extern const int32_t WebRtcIsacfix_kLevelsGainQ17[1176];
extern const int16_t WebRtcIsacfix_kLevelsShapeQ10[1735];
/* cdf tables for quantizer indices */
extern const uint16_t WebRtcIsacfix_kCdfGain[1212];
extern const uint16_t WebRtcIsacfix_kCdfShape[2059];
/* pointers to cdf tables for quantizer indices */
extern const uint16_t* WebRtcIsacfix_kCdfGainPtr[KLT_NUM_MODELS][12];
extern const uint16_t* WebRtcIsacfix_kCdfShapePtr[KLT_NUM_MODELS][108];
/* code length for all coefficients using different models */
extern const int16_t WebRtcIsacfix_kCodeLenGainQ11[392];
extern const int16_t WebRtcIsacfix_kCodeLenShapeQ11[578];
/* left KLT transforms */
extern const int16_t WebRtcIsacfix_kT1GainQ15[KLT_NUM_MODELS][4];
extern const int16_t WebRtcIsacfix_kT1ShapeQ15[KLT_NUM_MODELS][324];
/* right KLT transforms */
extern const int16_t WebRtcIsacfix_kT2GainQ15[KLT_NUM_MODELS][36];
extern const int16_t WebRtcIsacfix_kT2ShapeQ15[KLT_NUM_MODELS][36];
/* means of log gains and LAR coefficients */
extern const int16_t WebRtcIsacfix_kMeansGainQ8[KLT_NUM_MODELS][12];
extern const int32_t WebRtcIsacfix_kMeansShapeQ17[3][108];
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_ */

View File

@ -0,0 +1,435 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "rtc_base/compile_assert_c.h"
/* log2[0.2, 0.5, 0.98] in Q8 */
static const int16_t kLogLagWinQ8[3] = {
-594, -256, -7
};
/* [1 -0.75 0.25] in Q12 */
static const int16_t kACoefQ12[3] = {
4096, -3072, 1024
};
int32_t WebRtcIsacfix_Log2Q8(uint32_t x) {
int32_t zeros;
int16_t frac;
zeros=WebRtcSpl_NormU32(x);
frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
/* log2(magn(i)) */
return ((31 - zeros) << 8) + frac;
}
static __inline int16_t Exp2Q10(int16_t x) { // Both in and out in Q10
int16_t tmp16_1, tmp16_2;
tmp16_2=(int16_t)(0x0400|(x&0x03FF));
tmp16_1 = -(x >> 10);
if(tmp16_1>0)
return tmp16_2 >> tmp16_1;
else
return tmp16_2 << -tmp16_1;
}
/* 1D parabolic interpolation . All input and output values are in Q8 */
static __inline void Intrp1DQ8(int32_t *x, int32_t *fx, int32_t *y, int32_t *fy) {
int16_t sign1=1, sign2=1;
int32_t r32, q32, t32, nom32, den32;
int16_t t16, tmp16, tmp16_1;
if ((fx[0]>0) && (fx[2]>0)) {
r32=fx[1]-fx[2];
q32=fx[0]-fx[1];
nom32=q32+r32;
den32 = (q32 - r32) * 2;
if (nom32<0)
sign1=-1;
if (den32<0)
sign2=-1;
/* t = (q32+r32)/(2*(q32-r32)) = (fx[0]-fx[1] + fx[1]-fx[2])/(2 * fx[0]-fx[1] - (fx[1]-fx[2]))*/
/* (Signs are removed because WebRtcSpl_DivResultInQ31 can't handle negative numbers) */
/* t in Q31, without signs */
t32 = WebRtcSpl_DivResultInQ31(nom32 * sign1, den32 * sign2);
t16 = (int16_t)(t32 >> 23); /* Q8 */
t16=t16*sign1*sign2; /* t in Q8 with signs */
*y = x[0]+t16; /* Q8 */
// *y = x[1]+t16; /* Q8 */
/* The following code calculates fy in three steps */
/* fy = 0.5 * t * (t-1) * fx[0] + (1-t*t) * fx[1] + 0.5 * t * (t+1) * fx[2]; */
/* Part I: 0.5 * t * (t-1) * fx[0] */
tmp16_1 = (int16_t)(t16 * t16); /* Q8*Q8=Q16 */
tmp16_1 >>= 2; /* Q16>>2 = Q14 */
t16 <<= 6; /* Q8<<6 = Q14 */
tmp16 = tmp16_1-t16;
*fy = WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[0]); /* (Q14 * Q8 >>15)/2 = Q8 */
/* Part II: (1-t*t) * fx[1] */
tmp16 = 16384-tmp16_1; /* 1 in Q14 - Q14 */
*fy += WEBRTC_SPL_MUL_16_32_RSFT14(tmp16, fx[1]);/* Q14 * Q8 >> 14 = Q8 */
/* Part III: 0.5 * t * (t+1) * fx[2] */
tmp16 = tmp16_1+t16;
*fy += WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[2]);/* (Q14 * Q8 >>15)/2 = Q8 */
} else {
*y = x[0];
*fy= fx[1];
}
}
static void FindFour32(int32_t *in, int16_t length, int16_t *bestind)
{
int32_t best[4]= {-100, -100, -100, -100};
int16_t k;
for (k=0; k<length; k++) {
if (in[k] > best[3]) {
if (in[k] > best[2]) {
if (in[k] > best[1]) {
if (in[k] > best[0]) { // The Best
best[3] = best[2];
bestind[3] = bestind[2];
best[2] = best[1];
bestind[2] = bestind[1];
best[1] = best[0];
bestind[1] = bestind[0];
best[0] = in[k];
bestind[0] = k;
} else { // 2nd best
best[3] = best[2];
bestind[3] = bestind[2];
best[2] = best[1];
bestind[2] = bestind[1];
best[1] = in[k];
bestind[1] = k;
}
} else { // 3rd best
best[3] = best[2];
bestind[3] = bestind[2];
best[2] = in[k];
bestind[2] = k;
}
} else { // 4th best
best[3] = in[k];
bestind[3] = k;
}
}
}
}
extern void WebRtcIsacfix_PCorr2Q32(const int16_t *in, int32_t *logcorQ8);
void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
PitchAnalysisStruct *State,
int16_t *lagsQ7 /* Q7 */
)
{
int16_t buf_dec16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2];
int32_t *crrvecQ8_1,*crrvecQ8_2;
int32_t cv1q[PITCH_LAG_SPAN2+2],cv2q[PITCH_LAG_SPAN2+2], peakvq[PITCH_LAG_SPAN2+2];
int k;
int16_t peaks_indq;
int16_t peakiq[PITCH_LAG_SPAN2];
int32_t corr;
int32_t corr32, corr_max32, corr_max_o32;
int16_t npkq;
int16_t best4q[4]={0,0,0,0};
int32_t xq[3],yq[1],fyq[1];
int32_t *fxq;
int32_t best_lag1q, best_lag2q;
int32_t tmp32a,tmp32b,lag32,ratq;
int16_t start;
int16_t oldgQ12, tmp16a, tmp16b, gain_bias16,tmp16c, tmp16d, bias16;
int32_t tmp32c,tmp32d, tmp32e;
int16_t old_lagQ;
int32_t old_lagQ8;
int32_t lagsQ8[4];
old_lagQ = State->PFstr_wght.oldlagQ7; // Q7
old_lagQ8 = old_lagQ << 1; // Q8
oldgQ12= State->PFstr_wght.oldgainQ12;
crrvecQ8_1=&cv1q[1];
crrvecQ8_2=&cv2q[1];
/* copy old values from state buffer */
memcpy(buf_dec16, State->dec_buffer16, sizeof(State->dec_buffer16));
/* decimation; put result after the old values */
WebRtcIsacfix_DecimateAllpass32(in, State->decimator_state32, PITCH_FRAME_LEN,
&buf_dec16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2]);
/* low-pass filtering */
start= PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2;
WebRtcSpl_FilterARFastQ12(&buf_dec16[start],&buf_dec16[start],(int16_t*)kACoefQ12,3, PITCH_FRAME_LEN/2);
/* copy end part back into state buffer */
for (k = 0; k < (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2); k++)
State->dec_buffer16[k] = buf_dec16[k+PITCH_FRAME_LEN/2];
/* compute correlation for first and second half of the frame */
WebRtcIsacfix_PCorr2Q32(buf_dec16, crrvecQ8_1);
WebRtcIsacfix_PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
/* bias towards pitch lag of previous frame */
tmp32a = WebRtcIsacfix_Log2Q8((uint32_t) old_lagQ8) - 2304;
// log2(0.5*oldlag) in Q8
tmp32b = oldgQ12 * oldgQ12 >> 10; // Q12 & * 4.0;
gain_bias16 = (int16_t) tmp32b; //Q12
if (gain_bias16 > 3276) gain_bias16 = 3276; // 0.8 in Q12
for (k = 0; k < PITCH_LAG_SPAN2; k++)
{
if (crrvecQ8_1[k]>0) {
tmp32b = WebRtcIsacfix_Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
tmp16a = (int16_t) (tmp32b - tmp32a); // Q8 & fabs(ratio)<4
tmp32c = tmp16a * tmp16a >> 6; // Q10
tmp16b = (int16_t) tmp32c; // Q10 & <8
tmp32d = tmp16b * 177 >> 8; // mult with ln2 in Q8
tmp16c = (int16_t) tmp32d; // Q10 & <4
tmp16d = Exp2Q10((int16_t) -tmp16c); //Q10
tmp32c = gain_bias16 * tmp16d >> 13; // Q10 & * 0.5
bias16 = (int16_t) (1024 + tmp32c); // Q10
tmp32b = WebRtcIsacfix_Log2Q8((uint32_t)bias16) - 2560;
// Q10 in -> Q8 out with 10*2^8 offset
crrvecQ8_1[k] += tmp32b ; // -10*2^8 offset
}
}
/* taper correlation functions */
for (k = 0; k < 3; k++) {
crrvecQ8_1[k] += kLogLagWinQ8[k];
crrvecQ8_2[k] += kLogLagWinQ8[k];
crrvecQ8_1[PITCH_LAG_SPAN2-1-k] += kLogLagWinQ8[k];
crrvecQ8_2[PITCH_LAG_SPAN2-1-k] += kLogLagWinQ8[k];
}
/* Make zeropadded corr vectors */
cv1q[0]=0;
cv2q[0]=0;
cv1q[PITCH_LAG_SPAN2+1]=0;
cv2q[PITCH_LAG_SPAN2+1]=0;
corr_max32 = 0;
for (k = 1; k <= PITCH_LAG_SPAN2; k++)
{
corr32=crrvecQ8_1[k-1];
if (corr32 > corr_max32)
corr_max32 = corr32;
corr32=crrvecQ8_2[k-1];
corr32 += -4; // Compensate for later (log2(0.99))
if (corr32 > corr_max32)
corr_max32 = corr32;
}
/* threshold value to qualify as a peak */
// corr_max32 += -726; // log(0.14)/log(2.0) in Q8
corr_max32 += -1000; // log(0.14)/log(2.0) in Q8
corr_max_o32 = corr_max32;
/* find peaks in corr1 */
peaks_indq = 0;
for (k = 1; k <= PITCH_LAG_SPAN2; k++)
{
corr32=cv1q[k];
if (corr32>corr_max32) { // Disregard small peaks
if ((corr32>=cv1q[k-1]) && (corr32>cv1q[k+1])) { // Peak?
peakvq[peaks_indq] = corr32;
peakiq[peaks_indq++] = k;
}
}
}
/* find highest interpolated peak */
corr_max32=0;
best_lag1q =0;
if (peaks_indq > 0) {
FindFour32(peakvq, (int16_t) peaks_indq, best4q);
npkq = WEBRTC_SPL_MIN(peaks_indq, 4);
for (k=0;k<npkq;k++) {
lag32 = peakiq[best4q[k]];
fxq = &cv1q[peakiq[best4q[k]]-1];
xq[0]= lag32;
xq[0] <<= 8;
Intrp1DQ8(xq, fxq, yq, fyq);
tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
/* Bias towards short lags */
/* log(pow(0.8, log(2.0 * *y )))/log(2.0) */
tmp32b = (int16_t)tmp32a * -42 >> 8;
tmp32c= tmp32b + 256;
*fyq += tmp32c;
if (*fyq > corr_max32) {
corr_max32 = *fyq;
best_lag1q = *yq;
}
}
tmp32b = (best_lag1q - OFFSET_Q8) * 2;
lagsQ8[0] = tmp32b + PITCH_MIN_LAG_Q8;
lagsQ8[1] = lagsQ8[0];
} else {
lagsQ8[0] = old_lagQ8;
lagsQ8[1] = lagsQ8[0];
}
/* Bias towards constant pitch */
tmp32a = lagsQ8[0] - PITCH_MIN_LAG_Q8;
ratq = (tmp32a >> 1) + OFFSET_Q8;
for (k = 1; k <= PITCH_LAG_SPAN2; k++)
{
tmp32a = k << 7; // 0.5*k Q8
tmp32b = tmp32a * 2 - ratq; // Q8
tmp32c = (int16_t)tmp32b * (int16_t)tmp32b >> 8; // Q8
tmp32b = tmp32c + (ratq >> 1);
// (k-r)^2 + 0.5 * r Q8
tmp32c = WebRtcIsacfix_Log2Q8((uint32_t)tmp32a) - 2048;
// offset 8*2^8 , log2(0.5*k) Q8
tmp32d = WebRtcIsacfix_Log2Q8((uint32_t)tmp32b) - 2048;
// offset 8*2^8 , log2(0.5*k) Q8
tmp32e = tmp32c - tmp32d;
cv2q[k] += tmp32e >> 1;
}
/* find peaks in corr2 */
corr_max32 = corr_max_o32;
peaks_indq = 0;
for (k = 1; k <= PITCH_LAG_SPAN2; k++)
{
corr=cv2q[k];
if (corr>corr_max32) { // Disregard small peaks
if ((corr>=cv2q[k-1]) && (corr>cv2q[k+1])) { // Peak?
peakvq[peaks_indq] = corr;
peakiq[peaks_indq++] = k;
}
}
}
/* find highest interpolated peak */
corr_max32 = 0;
best_lag2q =0;
if (peaks_indq > 0) {
FindFour32(peakvq, (int16_t) peaks_indq, best4q);
npkq = WEBRTC_SPL_MIN(peaks_indq, 4);
for (k=0;k<npkq;k++) {
lag32 = peakiq[best4q[k]];
fxq = &cv2q[peakiq[best4q[k]]-1];
xq[0]= lag32;
xq[0] <<= 8;
Intrp1DQ8(xq, fxq, yq, fyq);
/* Bias towards short lags */
/* log(pow(0.8, log(2.0f * *y )))/log(2.0f) */
tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
tmp32b = (int16_t)tmp32a * -82 >> 8;
tmp32c= tmp32b + 256;
*fyq += tmp32c;
if (*fyq > corr_max32) {
corr_max32 = *fyq;
best_lag2q = *yq;
}
}
tmp32b = (best_lag2q - OFFSET_Q8) * 2;
lagsQ8[2] = tmp32b + PITCH_MIN_LAG_Q8;
lagsQ8[3] = lagsQ8[2];
} else {
lagsQ8[2] = lagsQ8[0];
lagsQ8[3] = lagsQ8[0];
}
lagsQ7[0] = (int16_t)(lagsQ8[0] >> 1);
lagsQ7[1] = (int16_t)(lagsQ8[1] >> 1);
lagsQ7[2] = (int16_t)(lagsQ8[2] >> 1);
lagsQ7[3] = (int16_t)(lagsQ8[3] >> 1);
}
void WebRtcIsacfix_PitchAnalysis(const int16_t *inn, /* PITCH_FRAME_LEN samples */
int16_t *outQ0, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
PitchAnalysisStruct *State,
int16_t *PitchLags_Q7,
int16_t *PitchGains_Q12)
{
int16_t inbufQ0[PITCH_FRAME_LEN + QLOOKAHEAD];
int16_t k;
/* inital pitch estimate */
WebRtcIsacfix_InitialPitch(inn, State, PitchLags_Q7);
/* Calculate gain */
WebRtcIsacfix_PitchFilterGains(inn, &(State->PFstr_wght), PitchLags_Q7, PitchGains_Q12);
/* concatenate previous input's end and current input */
for (k = 0; k < QLOOKAHEAD; k++) {
inbufQ0[k] = State->inbuf[k];
}
for (k = 0; k < PITCH_FRAME_LEN; k++) {
inbufQ0[k+QLOOKAHEAD] = (int16_t) inn[k];
}
/* lookahead pitch filtering for masking analysis */
WebRtcIsacfix_PitchFilter(inbufQ0, outQ0, &(State->PFstr), PitchLags_Q7,PitchGains_Q12, 2);
/* store last part of input */
for (k = 0; k < QLOOKAHEAD; k++) {
State->inbuf[k] = inbufQ0[k + PITCH_FRAME_LEN];
}
}

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_estimator.h
*
* Pitch functions
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
void WebRtcIsacfix_PitchAnalysis(
const int16_t* in, /* PITCH_FRAME_LEN samples */
int16_t* outQ0, /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
PitchAnalysisStruct* State,
int16_t* lagsQ7,
int16_t* PitchGains_Q12);
void WebRtcIsacfix_InitialPitch(const int16_t* in,
PitchAnalysisStruct* State,
int16_t* qlags);
void WebRtcIsacfix_PitchFilter(int16_t* indatFix,
int16_t* outdatQQ,
PitchFiltstr* pfp,
int16_t* lagsQ7,
int16_t* gainsQ12,
int16_t type);
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuff2,
const int16_t* coefficient,
int16_t* inputBuf,
int16_t* outputBuf,
int* index2);
void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
PitchFiltstr* pfp,
int16_t* lagsQ7,
int16_t* gainsQ12);
void WebRtcIsacfix_DecimateAllpass32(
const int16_t* in,
int32_t* state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
int16_t N, /* number of input samples */
int16_t* out); /* array of size N/2 */
int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8);
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_ */

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#ifdef WEBRTC_HAS_NEON
#include <arm_neon.h>
#endif
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "rtc_base/compile_assert_c.h"
extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
int16_t scaling,n,k;
int32_t csum32, lys, lcs;
int64_t ysum64;
const int32_t oneQ8 = 1 << 8; // 1.00 in Q8
const int16_t* x;
const int16_t* inptr;
x = in + PITCH_MAX_LAG / 2 + 2;
scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
PITCH_CORR_LEN2,
PITCH_CORR_LEN2);
ysum64 = 1;
csum32 = 0;
x = in + PITCH_MAX_LAG / 2 + 2;
for (n = 0; n < PITCH_CORR_LEN2; n++) {
ysum64 += in[n] * in[n] >> scaling; // Q0
csum32 += x[n] * in[n] >> scaling; // Q0
}
logcorQ8 += PITCH_LAG_SPAN2 - 1;
lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum64) >> 1; // Q8, sqrt(ysum)
if (csum32 > 0) {
lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
*logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
} else {
*logcorQ8 = oneQ8; // 1.00
}
} else {
*logcorQ8 = 0;
}
for (k = 1; k < PITCH_LAG_SPAN2; k++) {
inptr = &in[k];
ysum64 -= in[k - 1] * in[k - 1] >> scaling;
ysum64 += (int32_t)(in[PITCH_CORR_LEN2 + k - 1])
* in[PITCH_CORR_LEN2 + k - 1] >> scaling;
#ifdef WEBRTC_HAS_NEON
{
int32_t vbuff[4];
int32x4_t int_32x4_sum = vmovq_n_s32(0);
// Can't shift a Neon register to right with a non-constant shift value.
int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
// Assert a codition used in loop unrolling at compile-time.
RTC_COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
int16x4_t int_16x4_x = vld1_s16(&x[n]);
int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
}
// Use vector store to avoid long stall from data trasferring
// from vector to general register.
vst1q_s32(vbuff, int_32x4_sum);
csum32 = vbuff[0] + vbuff[1];
csum32 += vbuff[2];
csum32 += vbuff[3];
}
#else
int64_t csum64_tmp = 0;
if(scaling == 0) {
for (n = 0; n < PITCH_CORR_LEN2; n++) {
csum64_tmp += (int32_t)(x[n]) * inptr[n];
}
} else {
for (n = 0; n < PITCH_CORR_LEN2; n++) {
csum64_tmp += ((int32_t)(x[n]) * inptr[n]) >> scaling;
}
}
csum32 = csum64_tmp;
#endif
logcorQ8--;
lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum64) >> 1; // Q8, sqrt(ysum)
if (csum32 > 0) {
lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
*logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
} else {
*logcorQ8 = oneQ8; // 1.00
}
} else {
*logcorQ8 = 0;
}
}
}

View File

@ -0,0 +1,193 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "rtc_base/compile_assert_c.h"
extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
int16_t scaling,n,k;
int32_t ysum32,csum32, lys, lcs;
const int32_t oneQ8 = 1 << 8; // 1.00 in Q8
const int16_t* x;
const int16_t* inptr;
x = in + PITCH_MAX_LAG / 2 + 2;
scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
PITCH_CORR_LEN2,
PITCH_CORR_LEN2);
ysum32 = 1;
csum32 = 0;
x = in + PITCH_MAX_LAG / 2 + 2;
{
const int16_t* tmp_x = x;
const int16_t* tmp_in = in;
int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
n = PITCH_CORR_LEN2;
RTC_COMPILE_ASSERT(PITCH_CORR_LEN2 % 4 == 0);
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
"lh %[tmp1], 0(%[tmp_in]) \n\t"
"lh %[tmp2], 2(%[tmp_in]) \n\t"
"lh %[tmp3], 4(%[tmp_in]) \n\t"
"lh %[tmp4], 6(%[tmp_in]) \n\t"
"lh %[tmp5], 0(%[tmp_x]) \n\t"
"lh %[tmp6], 2(%[tmp_x]) \n\t"
"lh %[tmp7], 4(%[tmp_x]) \n\t"
"lh %[tmp8], 6(%[tmp_x]) \n\t"
"mul %[tmp5], %[tmp1], %[tmp5] \n\t"
"mul %[tmp1], %[tmp1], %[tmp1] \n\t"
"mul %[tmp6], %[tmp2], %[tmp6] \n\t"
"mul %[tmp2], %[tmp2], %[tmp2] \n\t"
"mul %[tmp7], %[tmp3], %[tmp7] \n\t"
"mul %[tmp3], %[tmp3], %[tmp3] \n\t"
"mul %[tmp8], %[tmp4], %[tmp8] \n\t"
"mul %[tmp4], %[tmp4], %[tmp4] \n\t"
"addiu %[n], %[n], -4 \n\t"
"srav %[tmp5], %[tmp5], %[scaling] \n\t"
"srav %[tmp1], %[tmp1], %[scaling] \n\t"
"srav %[tmp6], %[tmp6], %[scaling] \n\t"
"srav %[tmp2], %[tmp2], %[scaling] \n\t"
"srav %[tmp7], %[tmp7], %[scaling] \n\t"
"srav %[tmp3], %[tmp3], %[scaling] \n\t"
"srav %[tmp8], %[tmp8], %[scaling] \n\t"
"srav %[tmp4], %[tmp4], %[scaling] \n\t"
"addu %[ysum32], %[ysum32], %[tmp1] \n\t"
"addu %[csum32], %[csum32], %[tmp5] \n\t"
"addu %[ysum32], %[ysum32], %[tmp2] \n\t"
"addu %[csum32], %[csum32], %[tmp6] \n\t"
"addu %[ysum32], %[ysum32], %[tmp3] \n\t"
"addu %[csum32], %[csum32], %[tmp7] \n\t"
"addu %[ysum32], %[ysum32], %[tmp4] \n\t"
"addu %[csum32], %[csum32], %[tmp8] \n\t"
"addiu %[tmp_in], %[tmp_in], 8 \n\t"
"bgtz %[n], 1b \n\t"
" addiu %[tmp_x], %[tmp_x], 8 \n\t"
".set pop \n\t"
: [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
[tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
[tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [tmp_in] "+r" (tmp_in),
[ysum32] "+r" (ysum32), [tmp_x] "+r" (tmp_x), [csum32] "+r" (csum32),
[n] "+r" (n)
: [scaling] "r" (scaling)
: "memory", "hi", "lo"
);
}
logcorQ8 += PITCH_LAG_SPAN2 - 1;
lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
if (csum32 > 0) {
lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
*logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
} else {
*logcorQ8 = oneQ8; // 1.00
}
} else {
*logcorQ8 = 0;
}
for (k = 1; k < PITCH_LAG_SPAN2; k++) {
inptr = &in[k];
const int16_t* tmp_in1 = &in[k - 1];
const int16_t* tmp_in2 = &in[PITCH_CORR_LEN2 + k - 1];
const int16_t* tmp_x = x;
int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
n = PITCH_CORR_LEN2;
csum32 = 0;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"lh %[tmp1], 0(%[tmp_in1]) \n\t"
"lh %[tmp2], 0(%[tmp_in2]) \n\t"
"mul %[tmp1], %[tmp1], %[tmp1] \n\t"
"mul %[tmp2], %[tmp2], %[tmp2] \n\t"
"srav %[tmp1], %[tmp1], %[scaling] \n\t"
"srav %[tmp2], %[tmp2], %[scaling] \n\t"
"subu %[ysum32], %[ysum32], %[tmp1] \n\t"
"bnez %[scaling], 2f \n\t"
" addu %[ysum32], %[ysum32], %[tmp2] \n\t"
"1: \n\t"
"lh %[tmp1], 0(%[inptr]) \n\t"
"lh %[tmp2], 0(%[tmp_x]) \n\t"
"lh %[tmp3], 2(%[inptr]) \n\t"
"lh %[tmp4], 2(%[tmp_x]) \n\t"
"lh %[tmp5], 4(%[inptr]) \n\t"
"lh %[tmp6], 4(%[tmp_x]) \n\t"
"lh %[tmp7], 6(%[inptr]) \n\t"
"lh %[tmp8], 6(%[tmp_x]) \n\t"
"mul %[tmp1], %[tmp1], %[tmp2] \n\t"
"mul %[tmp2], %[tmp3], %[tmp4] \n\t"
"mul %[tmp3], %[tmp5], %[tmp6] \n\t"
"mul %[tmp4], %[tmp7], %[tmp8] \n\t"
"addiu %[n], %[n], -4 \n\t"
"addiu %[inptr], %[inptr], 8 \n\t"
"addiu %[tmp_x], %[tmp_x], 8 \n\t"
"addu %[csum32], %[csum32], %[tmp1] \n\t"
"addu %[csum32], %[csum32], %[tmp2] \n\t"
"addu %[csum32], %[csum32], %[tmp3] \n\t"
"bgtz %[n], 1b \n\t"
" addu %[csum32], %[csum32], %[tmp4] \n\t"
"b 3f \n\t"
" nop \n\t"
"2: \n\t"
"lh %[tmp1], 0(%[inptr]) \n\t"
"lh %[tmp2], 0(%[tmp_x]) \n\t"
"lh %[tmp3], 2(%[inptr]) \n\t"
"lh %[tmp4], 2(%[tmp_x]) \n\t"
"lh %[tmp5], 4(%[inptr]) \n\t"
"lh %[tmp6], 4(%[tmp_x]) \n\t"
"lh %[tmp7], 6(%[inptr]) \n\t"
"lh %[tmp8], 6(%[tmp_x]) \n\t"
"mul %[tmp1], %[tmp1], %[tmp2] \n\t"
"mul %[tmp2], %[tmp3], %[tmp4] \n\t"
"mul %[tmp3], %[tmp5], %[tmp6] \n\t"
"mul %[tmp4], %[tmp7], %[tmp8] \n\t"
"addiu %[n], %[n], -4 \n\t"
"addiu %[inptr], %[inptr], 8 \n\t"
"addiu %[tmp_x], %[tmp_x], 8 \n\t"
"srav %[tmp1], %[tmp1], %[scaling] \n\t"
"srav %[tmp2], %[tmp2], %[scaling] \n\t"
"srav %[tmp3], %[tmp3], %[scaling] \n\t"
"srav %[tmp4], %[tmp4], %[scaling] \n\t"
"addu %[csum32], %[csum32], %[tmp1] \n\t"
"addu %[csum32], %[csum32], %[tmp2] \n\t"
"addu %[csum32], %[csum32], %[tmp3] \n\t"
"bgtz %[n], 2b \n\t"
" addu %[csum32], %[csum32], %[tmp4] \n\t"
"3: \n\t"
".set pop \n\t"
: [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
[tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
[tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [inptr] "+r" (inptr),
[csum32] "+r" (csum32), [tmp_x] "+r" (tmp_x), [ysum32] "+r" (ysum32),
[n] "+r" (n)
: [tmp_in1] "r" (tmp_in1), [tmp_in2] "r" (tmp_in2),
[scaling] "r" (scaling)
: "memory", "hi", "lo"
);
logcorQ8--;
lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
if (csum32 > 0) {
lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
*logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
} else {
*logcorQ8 = oneQ8; // 1.00
}
} else {
*logcorQ8 = 0;
}
}
}

View File

@ -0,0 +1,248 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
#include "rtc_base/compile_assert_c.h"
// Number of segments in a pitch subframe.
static const int kSegments = 5;
// A division factor of 1/5 in Q15.
static const int16_t kDivFactor = 6553;
// Interpolation coefficients; generated by design_pitch_filter.m.
// Coefficients are stored in Q14.
static const int16_t kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = {
{-367, 1090, -2706, 9945, 10596, -3318, 1626, -781, 287},
{-325, 953, -2292, 7301, 12963, -3320, 1570, -743, 271},
{-240, 693, -1622, 4634, 14809, -2782, 1262, -587, 212},
{-125, 358, -817, 2144, 15982, -1668, 721, -329, 118},
{ 0, 0, -1, 1, 16380, 1, -1, 0, 0},
{ 118, -329, 721, -1668, 15982, 2144, -817, 358, -125},
{ 212, -587, 1262, -2782, 14809, 4634, -1622, 693, -240},
{ 271, -743, 1570, -3320, 12963, 7301, -2292, 953, -325}
};
static __inline size_t CalcLrIntQ(int16_t fixVal,
int16_t qDomain) {
int32_t roundVal = 1 << (qDomain - 1);
return (fixVal + roundVal) >> qDomain;
}
void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
// Q0 if type is 2.
int16_t* outdatQQ,
PitchFiltstr* pfp,
int16_t* lagsQ7,
int16_t* gainsQ12,
int16_t type) {
int k, ind, cnt;
int16_t sign = 1;
int16_t inystateQQ[PITCH_DAMPORDER];
int16_t ubufQQ[PITCH_INTBUFFSIZE + QLOOKAHEAD];
const int16_t Gain = 21299; // 1.3 in Q14
int16_t oldLagQ7;
int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
size_t frcQQ = 0;
int32_t indW32 = 0;
const int16_t* fracoeffQQ = NULL;
// Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
RTC_COMPILE_ASSERT(PITCH_FRACORDER == 9);
RTC_COMPILE_ASSERT(PITCH_DAMPORDER == 5);
// Set up buffer and states.
memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
memcpy(inystateQQ, pfp->ystateQQ, sizeof(inystateQQ));
// Get old lag and gain value from memory.
oldLagQ7 = pfp->oldlagQ7;
oldGainQ12 = pfp->oldgainQ12;
if (type == 4) {
sign = -1;
// Make output more periodic.
for (k = 0; k < PITCH_SUBFRAMES; k++) {
gainsQ12[k] = (int16_t)(gainsQ12[k] * Gain >> 14);
}
}
// No interpolation if pitch lag step is big.
if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
oldLagQ7 = lagsQ7[0];
oldGainQ12 = gainsQ12[0];
}
ind = 0;
for (k = 0; k < PITCH_SUBFRAMES; k++) {
// Calculate interpolation steps.
lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
lagdeltaQ7, kDivFactor, 15);
curLagQ7 = oldLagQ7;
gaindeltaQ12 = gainsQ12[k] - oldGainQ12;
gaindeltaQ12 = (int16_t)(gaindeltaQ12 * kDivFactor >> 15);
curGainQ12 = oldGainQ12;
oldLagQ7 = lagsQ7[k];
oldGainQ12 = gainsQ12[k];
// Each frame has 4 60-sample pitch subframes, and each subframe has 5
// 12-sample segments. Each segment need to be processed with
// newly-updated parameters, so we break the pitch filtering into
// two for-loops (5 x 12) below. It's also why kDivFactor = 0.2 (in Q15).
for (cnt = 0; cnt < kSegments; cnt++) {
// Update parameters for each segment.
curGainQ12 += gaindeltaQ12;
curLagQ7 += lagdeltaQ7;
indW32 = CalcLrIntQ(curLagQ7, 7);
if (indW32 < PITCH_FRACORDER - 2) {
// WebRtcIsacfix_PitchFilterCore requires indW32 >= PITCH_FRACORDER -
// 2; otherwise, it will read from entries of ubufQQ that haven't been
// written yet. (This problem has only been seen in fuzzer tests, not
// in real life.) See Chromium bug 581901.
indW32 = PITCH_FRACORDER - 2;
}
frcQQ = ((indW32 << 7) + 64 - curLagQ7) >> 4;
if (frcQQ >= PITCH_FRACS) {
frcQQ = 0;
}
fracoeffQQ = kIntrpCoef[frcQQ];
// Pitch filtering.
WebRtcIsacfix_PitchFilterCore(PITCH_SUBFRAME_LEN / kSegments, curGainQ12,
indW32, sign, inystateQQ, ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
}
}
// Export buffer and states.
memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
memcpy(pfp->ystateQQ, inystateQQ, sizeof(pfp->ystateQQ));
pfp->oldlagQ7 = oldLagQ7;
pfp->oldgainQ12 = oldGainQ12;
if (type == 2) {
// Filter look-ahead segment.
WebRtcIsacfix_PitchFilterCore(QLOOKAHEAD, curGainQ12, indW32, 1, inystateQQ,
ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
}
}
void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
PitchFiltstr* pfp,
int16_t* lagsQ7,
int16_t* gainsQ12) {
int k, n, m;
size_t ind, pos, pos3QQ;
int16_t ubufQQ[PITCH_INTBUFFSIZE];
int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
const int16_t* fracoeffQQ = NULL;
int16_t scale;
int16_t cnt = 0, tmpW16;
size_t frcQQ, indW16 = 0;
int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;
// Set up buffer and states.
memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
oldLagQ7 = pfp->oldlagQ7;
// No interpolation if pitch lag step is big.
if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
oldLagQ7 = lagsQ7[0];
}
ind = 0;
pos = ind + PITCH_BUFFSIZE;
scale = 0;
for (k = 0; k < PITCH_SUBFRAMES; k++) {
// Calculate interpolation steps.
lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
lagdeltaQ7, kDivFactor, 15);
curLagQ7 = oldLagQ7;
oldLagQ7 = lagsQ7[k];
csum1QQ = 1;
esumxQQ = 1;
// Same as function WebRtcIsacfix_PitchFilter(), we break the pitch
// filtering into two for-loops (5 x 12) below.
for (cnt = 0; cnt < kSegments; cnt++) {
// Update parameters for each segment.
curLagQ7 += lagdeltaQ7;
indW16 = CalcLrIntQ(curLagQ7, 7);
frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
if (frcQQ >= PITCH_FRACS) {
frcQQ = 0;
}
fracoeffQQ = kIntrpCoef[frcQQ];
pos3QQ = pos - (indW16 + 4);
for (n = 0; n < PITCH_SUBFRAME_LEN / kSegments; n++) {
// Filter to get fractional pitch.
tmpW32 = 0;
for (m = 0; m < PITCH_FRACORDER; m++) {
tmpW32 += ubufQQ[pos3QQ + m] * fracoeffQQ[m];
}
// Subtract from input and update buffer.
ubufQQ[pos] = indatQ0[ind];
tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
tmpW32 += 8192;
tmpW16 = tmpW32 >> 14;
tmpW32 = tmpW16 * tmpW16;
if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
(tmpW32 > 1073700000) || (esumxQQ > 1073700000)) { // 2^30
scale++;
csum1QQ >>= 1;
esumxQQ >>= 1;
}
csum1QQ += tmp2W32 >> scale;
esumxQQ += tmpW32 >> scale;
ind++;
pos++;
pos3QQ++;
}
}
if (csum1QQ < esumxQQ) {
tmp2W32 = WebRtcSpl_DivResultInQ31(csum1QQ, esumxQQ);
// Gain should be half the correlation.
tmpW32 = tmp2W32 >> 20;
} else {
tmpW32 = 4096;
}
gainsQ12[k] = (int16_t)WEBRTC_SPL_SAT(PITCH_MAX_GAIN_Q12, tmpW32, 0);
}
// Export buffer and states.
memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
pfp->oldlagQ7 = lagsQ7[PITCH_SUBFRAMES - 1];
pfp->oldgainQ12 = gainsQ12[PITCH_SUBFRAMES - 1];
}

View File

@ -0,0 +1,143 @@
@
@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
@
@ Use of this source code is governed by a BSD-style license
@ that can be found in the LICENSE file in the root of the source
@ tree. An additional intellectual property rights grant can be found
@ in the file PATENTS. All contributing project authors may
@ be found in the AUTHORS file in the root of the source tree.
@
@ Contains the core loop routine for the pitch filter function in iSAC,
@ optimized for ARMv7 platforms.
@
@ Output is bit-exact with the reference C code in pitch_filter.c.
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "rtc_base/system/asm_defines.h"
GLOBAL_FUNCTION WebRtcIsacfix_PitchFilterCore
.align 2
@ void WebRtcIsacfix_PitchFilterCore(int loopNumber,
@ int16_t gain,
@ size_t index,
@ int16_t sign,
@ int16_t* inputState,
@ int16_t* outputBuf2,
@ const int16_t* coefficient,
@ int16_t* inputBuf,
@ int16_t* outputBuf,
@ int* index2) {
DEFINE_FUNCTION WebRtcIsacfix_PitchFilterCore
push {r4-r11}
sub sp, #8
str r0, [sp] @ loopNumber
str r3, [sp, #4] @ sign
ldr r3, [sp, #44] @ outputBuf2
ldr r6, [sp, #60] @ index2
ldr r7, [r6] @ *index2
ldr r8, [sp, #52] @ inputBuf
ldr r12, [sp, #56] @ outputBuf
add r4, r7, r0
str r4, [r6] @ Store return value to index2.
mov r10, r7, asl #1
add r12, r10 @ &outputBuf[*index2]
add r8, r10 @ &inputBuf[*index2]
add r4, r7, #PITCH_BUFFSIZE @ *index2 + PITCH_BUFFSIZE
add r6, r3, r4, lsl #1 @ &outputBuf2[*index2 + PITCH_BUFFSIZE]
sub r4, r2 @ r2: index
sub r4, #2 @ *index2 + PITCH_BUFFSIZE - index - 2
add r3, r4, lsl #1 @ &ubufQQpos2[*index2]
ldr r9, [sp, #48] @ coefficient
LOOP:
@ Usage of registers in the loop:
@ r0: loop counter
@ r1: gain
@ r2: tmpW32
@ r3: &ubufQQpos2[]
@ r6: &outputBuf2[]
@ r8: &inputBuf[]
@ r9: &coefficient[]
@ r12: &outputBuf[]
@ r4, r5, r7, r10, r11: scratch
@ Filter to get fractional pitch.
@ The pitch filter loop here is unrolled with 9 multipications.
pld [r3]
ldr r10, [r3], #4 @ ubufQQpos2[*index2 + 0, *index2 + 1]
ldr r4, [r9], #4 @ coefficient[0, 1]
ldr r11, [r3], #4
ldr r5, [r9], #4
smuad r2, r10, r4
smlad r2, r11, r5, r2
ldr r10, [r3], #4
ldr r4, [r9], #4
ldr r11, [r3], #4
ldr r5, [r9], #4
smlad r2, r10, r4, r2
ldrh r10, [r3], #-14 @ r3 back to &ubufQQpos2[*index2].
ldrh r4, [r9], #-16 @ r9 back to &coefficient[0].
smlad r2, r11, r5, r2
smlabb r2, r10, r4, r2
@ Saturate to avoid overflow in tmpW16.
asr r2, #1
add r4, r2, #0x1000
ssat r7, #16, r4, asr #13
@ Shift low pass filter state, and excute the low pass filter.
@ The memmove() and the low pass filter loop are unrolled and mixed.
smulbb r5, r1, r7
add r7, r5, #0x800
asr r7, #12 @ Get the value for inputState[0].
ldr r11, [sp, #40] @ inputState
pld [r11]
adr r10, kDampFilter
ldrsh r4, [r10], #2 @ kDampFilter[0]
mul r2, r7, r4
ldr r4, [r11] @ inputState[0, 1], before shift.
strh r7, [r11] @ inputState[0], after shift.
ldr r5, [r11, #4] @ inputState[2, 3], before shift.
ldr r7, [r10], #4 @ kDampFilter[1, 2]
ldr r10, [r10] @ kDampFilter[3, 4]
str r4, [r11, #2] @ inputState[1, 2], after shift.
str r5, [r11, #6] @ inputState[3, 4], after shift.
smlad r2, r4, r7, r2
smlad r2, r5, r10, r2
@ Saturate to avoid overflow.
@ First shift the sample to the range of [0xC0000000, 0x3FFFFFFF],
@ to avoid overflow in the next saturation step.
asr r2, #1
add r10, r2, #0x2000
ssat r10, #16, r10, asr #14
@ Subtract from input and update buffer.
ldr r11, [sp, #4] @ sign
ldrsh r4, [r8]
ldrsh r7, [r8], #2 @ inputBuf[*index2]
smulbb r5, r11, r10
subs r0, #1
sub r4, r5
ssat r2, #16, r4
strh r2, [r12], #2 @ outputBuf[*index2]
add r2, r7
ssat r2, #16, r2
strh r2, [r6], #2 @ outputBuff2[*index2 + PITCH_BUFFSIZE]
bgt LOOP
add sp, #8
pop {r4-r11}
bx lr
.align 2
kDampFilter:
.short -2294, 8192, 20972, 8192, -2294

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
/* Filter coefficicients in Q15. */
static const int16_t kDampFilter[PITCH_DAMPORDER] = {
-2294, 8192, 20972, 8192, -2294
};
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuf2,
const int16_t* coefficient,
int16_t* inputBuf,
int16_t* outputBuf,
int* index2) {
int i = 0, j = 0; /* Loop counters. */
int16_t* ubufQQpos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)];
int16_t tmpW16 = 0;
for (i = 0; i < loopNumber; i++) {
int32_t tmpW32 = 0;
/* Filter to get fractional pitch. */
for (j = 0; j < PITCH_FRACORDER; j++) {
tmpW32 += ubufQQpos2[*index2 + j] * coefficient[j];
}
/* Saturate to avoid overflow in tmpW16. */
tmpW32 = WEBRTC_SPL_SAT(536862719, tmpW32, -536879104);
tmpW32 += 8192;
tmpW16 = (int16_t)(tmpW32 >> 14);
/* Shift low pass filter state. */
memmove(&inputState[1], &inputState[0],
(PITCH_DAMPORDER - 1) * sizeof(int16_t));
inputState[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
gain, tmpW16, 12);
/* Low pass filter. */
tmpW32 = 0;
/* TODO(kma): Define a static inline function WebRtcSpl_DotProduct()
in spl_inl.h to replace this and other similar loops. */
for (j = 0; j < PITCH_DAMPORDER; j++) {
tmpW32 += inputState[j] * kDampFilter[j];
}
/* Saturate to avoid overflow in tmpW16. */
tmpW32 = WEBRTC_SPL_SAT(1073725439, tmpW32, -1073758208);
tmpW32 += 16384;
tmpW16 = (int16_t)(tmpW32 >> 15);
/* Subtract from input and update buffer. */
tmpW32 = inputBuf[*index2] - sign * tmpW16;
outputBuf[*index2] = WebRtcSpl_SatW32ToW16(tmpW32);
tmpW32 = inputBuf[*index2] + outputBuf[*index2];
outputBuf2[*index2 + PITCH_BUFFSIZE] = WebRtcSpl_SatW32ToW16(tmpW32);
(*index2)++;
}
}

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuf2,
const int16_t* coefficient,
int16_t* inputBuf,
int16_t* outputBuf,
int* index2) {
int ind2t = *index2;
int i = 0;
int16_t* out2_pos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)] + ind2t;
int32_t w1, w2, w3, w4, w5, gain32, sign32;
int32_t coef1, coef2, coef3, coef4, coef5 = 0;
// Define damp factors as int32_t (pair of int16_t)
int32_t kDampF0 = 0x0000F70A;
int32_t kDampF1 = 0x51EC2000;
int32_t kDampF2 = 0xF70A2000;
int16_t* input1 = inputBuf + ind2t;
int16_t* output1 = outputBuf + ind2t;
int16_t* output2 = outputBuf2 + ind2t + PITCH_BUFFSIZE;
// Load coefficients outside the loop and sign-extend gain and sign
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
"lwl %[coef1], 3(%[coefficient]) \n\t"
"lwl %[coef2], 7(%[coefficient]) \n\t"
"lwl %[coef3], 11(%[coefficient]) \n\t"
"lwl %[coef4], 15(%[coefficient]) \n\t"
"lwr %[coef1], 0(%[coefficient]) \n\t"
"lwr %[coef2], 4(%[coefficient]) \n\t"
"lwr %[coef3], 8(%[coefficient]) \n\t"
"lwr %[coef4], 12(%[coefficient]) \n\t"
"lhu %[coef5], 16(%[coefficient]) \n\t"
"seh %[gain32], %[gain] \n\t"
"seh %[sign32], %[sign] \n\t"
".set pop \n\t"
: [coef1] "=&r" (coef1), [coef2] "=&r" (coef2), [coef3] "=&r" (coef3),
[coef4] "=&r" (coef4), [coef5] "=&r" (coef5), [gain32] "=&r" (gain32),
[sign32] "=&r" (sign32)
: [coefficient] "r" (coefficient), [gain] "r" (gain),
[sign] "r" (sign)
: "memory"
);
for (i = 0; i < loopNumber; i++) {
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
// Filter to get fractional pitch
"li %[w1], 8192 \n\t"
"mtlo %[w1] \n\t"
"mthi $0 \n\t"
"lwl %[w1], 3(%[out2_pos2]) \n\t"
"lwl %[w2], 7(%[out2_pos2]) \n\t"
"lwl %[w3], 11(%[out2_pos2]) \n\t"
"lwl %[w4], 15(%[out2_pos2]) \n\t"
"lwr %[w1], 0(%[out2_pos2]) \n\t"
"lwr %[w2], 4(%[out2_pos2]) \n\t"
"lwr %[w3], 8(%[out2_pos2]) \n\t"
"lwr %[w4], 12(%[out2_pos2]) \n\t"
"lhu %[w5], 16(%[out2_pos2]) \n\t"
"dpa.w.ph $ac0, %[w1], %[coef1] \n\t"
"dpa.w.ph $ac0, %[w2], %[coef2] \n\t"
"dpa.w.ph $ac0, %[w3], %[coef3] \n\t"
"dpa.w.ph $ac0, %[w4], %[coef4] \n\t"
"dpa.w.ph $ac0, %[w5], %[coef5] \n\t"
"addiu %[out2_pos2], %[out2_pos2], 2 \n\t"
"mthi $0, $ac1 \n\t"
"lwl %[w2], 3(%[inputState]) \n\t"
"lwl %[w3], 7(%[inputState]) \n\t"
// Fractional pitch shift & saturation
"extr_s.h %[w1], $ac0, 14 \n\t"
"li %[w4], 16384 \n\t"
"lwr %[w2], 0(%[inputState]) \n\t"
"lwr %[w3], 4(%[inputState]) \n\t"
"mtlo %[w4], $ac1 \n\t"
// Shift low pass filter state
"swl %[w2], 5(%[inputState]) \n\t"
"swl %[w3], 9(%[inputState]) \n\t"
"mul %[w1], %[gain32], %[w1] \n\t"
"swr %[w2], 2(%[inputState]) \n\t"
"swr %[w3], 6(%[inputState]) \n\t"
// Low pass filter accumulation
"dpa.w.ph $ac1, %[kDampF1], %[w2] \n\t"
"dpa.w.ph $ac1, %[kDampF2], %[w3] \n\t"
"lh %[w4], 0(%[input1]) \n\t"
"addiu %[input1], %[input1], 2 \n\t"
"shra_r.w %[w1], %[w1], 12 \n\t"
"sh %[w1], 0(%[inputState]) \n\t"
"dpa.w.ph $ac1, %[kDampF0], %[w1] \n\t"
// Low pass filter shift & saturation
"extr_s.h %[w2], $ac1, 15 \n\t"
"mul %[w2], %[w2], %[sign32] \n\t"
// Buffer update
"subu %[w2], %[w4], %[w2] \n\t"
"shll_s.w %[w2], %[w2], 16 \n\t"
"sra %[w2], %[w2], 16 \n\t"
"sh %[w2], 0(%[output1]) \n\t"
"addu %[w2], %[w2], %[w4] \n\t"
"shll_s.w %[w2], %[w2], 16 \n\t"
"addiu %[output1], %[output1], 2 \n\t"
"sra %[w2], %[w2], 16 \n\t"
"sh %[w2], 0(%[output2]) \n\t"
"addiu %[output2], %[output2], 2 \n\t"
".set pop \n\t"
: [w1] "=&r" (w1), [w2] "=&r" (w2), [w3] "=&r" (w3), [w4] "=&r" (w4),
[w5] "=&r" (w5), [input1] "+r" (input1), [out2_pos2] "+r" (out2_pos2),
[output1] "+r" (output1), [output2] "+r" (output2)
: [coefficient] "r" (coefficient), [inputState] "r" (inputState),
[gain32] "r" (gain32), [sign32] "r" (sign32), [kDampF0] "r" (kDampF0),
[kDampF1] "r" (kDampF1), [kDampF2] "r" (kDampF2),
[coef1] "r" (coef1), [coef2] "r" (coef2), [coef3] "r" (coef3),
[coef4] "r" (coef4), [coef5] "r" (coef5)
: "hi", "lo", "$ac1hi", "$ac1lo", "memory"
);
}
(*index2) += loopNumber;
}

View File

@ -0,0 +1,149 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_gain_tables.c
*
* This file contains tables for the pitch filter side-info in the entropy coder.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
/********************* Pitch Filter Gain Coefficient Tables ************************/
/* cdf for quantized pitch filter gains */
const uint16_t WebRtcIsacfix_kPitchGainCdf[255] = {
0, 2, 4, 6, 64, 901, 903, 905, 16954, 16956,
16961, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17411,
17514, 17516, 17583, 18790, 18796, 18802, 20760, 20777, 20782, 21722,
21724, 21728, 21738, 21740, 21742, 21744, 21746, 21748, 22224, 22227,
22230, 23214, 23229, 23239, 25086, 25108, 25120, 26088, 26094, 26098,
26175, 26177, 26179, 26181, 26183, 26185, 26484, 26507, 26522, 27705,
27731, 27750, 29767, 29799, 29817, 30866, 30883, 30885, 31025, 31029,
31031, 31033, 31035, 31037, 31114, 31126, 31134, 32687, 32722, 32767,
35718, 35742, 35757, 36943, 36952, 36954, 37115, 37128, 37130, 37132,
37134, 37136, 37143, 37145, 37152, 38843, 38863, 38897, 47458, 47467,
47474, 49040, 49061, 49063, 49145, 49157, 49159, 49161, 49163, 49165,
49167, 49169, 49171, 49757, 49770, 49782, 61333, 61344, 61346, 62860,
62883, 62885, 62887, 62889, 62891, 62893, 62895, 62897, 62899, 62901,
62903, 62905, 62907, 62909, 65496, 65498, 65500, 65521, 65523, 65525,
65527, 65529, 65531, 65533, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535
};
/* index limits and ranges */
const int16_t WebRtcIsacfix_kLowerlimiGain[3] = {
-7, -2, -1
};
const int16_t WebRtcIsacfix_kUpperlimitGain[3] = {
0, 3, 1
};
const uint16_t WebRtcIsacfix_kMultsGain[2] = {
18, 3
};
/* size of cdf table */
const uint16_t WebRtcIsacfix_kCdfTableSizeGain[1] = {
256
};
/* mean values of pitch filter gains in FIXED point Q12 */
const int16_t WebRtcIsacfix_kPitchGain1[144] = {
843, 1092, 1336, 1222, 1405, 1656, 1500, 1815, 1843, 1838, 1839,
1843, 1843, 1843, 1843, 1843, 1843, 1843, 814, 846, 1092, 1013,
1174, 1383, 1391, 1511, 1584, 1734, 1753, 1843, 1843, 1843, 1843,
1843, 1843, 1843, 524, 689, 777, 845, 947, 1069, 1090, 1263,
1380, 1447, 1559, 1676, 1645, 1749, 1843, 1843, 1843, 1843, 81,
477, 563, 611, 706, 806, 849, 1012, 1192, 1128, 1330, 1489,
1425, 1576, 1826, 1741, 1843, 1843, 0, 290, 305, 356, 488,
575, 602, 741, 890, 835, 1079, 1196, 1182, 1376, 1519, 1506,
1680, 1843, 0, 47, 97, 69, 289, 381, 385, 474, 617,
664, 803, 1079, 935, 1160, 1269, 1265, 1506, 1741, 0, 0,
0, 0, 112, 120, 190, 283, 442, 343, 526, 809, 684,
935, 1134, 1020, 1265, 1506, 0, 0, 0, 0, 0, 0,
0, 111, 256, 87, 373, 597, 430, 684, 935, 770, 1020,
1265
};
const int16_t WebRtcIsacfix_kPitchGain2[144] = {
1760, 1525, 1285, 1747, 1671, 1393, 1843, 1826, 1555, 1843, 1784,
1606, 1843, 1843, 1711, 1843, 1843, 1814, 1389, 1275, 1040, 1564,
1414, 1252, 1610, 1495, 1343, 1753, 1592, 1405, 1804, 1720, 1475,
1843, 1814, 1581, 1208, 1061, 856, 1349, 1148, 994, 1390, 1253,
1111, 1495, 1343, 1178, 1770, 1465, 1234, 1814, 1581, 1342, 1040,
793, 713, 1053, 895, 737, 1128, 1003, 861, 1277, 1094, 981,
1475, 1192, 1019, 1581, 1342, 1098, 855, 570, 483, 833, 648,
540, 948, 744, 572, 1009, 844, 636, 1234, 934, 685, 1342,
1217, 984, 537, 318, 124, 603, 423, 350, 687, 479, 322,
791, 581, 430, 987, 671, 488, 1098, 849, 597, 283, 27,
0, 397, 222, 38, 513, 271, 124, 624, 325, 157, 737,
484, 233, 849, 597, 343, 27, 0, 0, 141, 0, 0,
256, 69, 0, 370, 87, 0, 484, 229, 0, 597, 343,
87
};
const int16_t WebRtcIsacfix_kPitchGain3[144] = {
1843, 1843, 1711, 1843, 1818, 1606, 1843, 1827, 1511, 1814, 1639,
1393, 1760, 1525, 1285, 1656, 1419, 1176, 1835, 1718, 1475, 1841,
1650, 1387, 1648, 1498, 1287, 1600, 1411, 1176, 1522, 1299, 1040,
1419, 1176, 928, 1773, 1461, 1128, 1532, 1355, 1202, 1429, 1260,
1115, 1398, 1151, 1025, 1172, 1080, 790, 1176, 928, 677, 1475,
1147, 1019, 1276, 1096, 922, 1214, 1010, 901, 1057, 893, 800,
1040, 796, 734, 928, 677, 424, 1137, 897, 753, 1120, 830,
710, 875, 751, 601, 795, 642, 583, 790, 544, 475, 677,
474, 140, 987, 750, 482, 697, 573, 450, 691, 487, 303,
661, 394, 332, 537, 303, 220, 424, 168, 0, 737, 484,
229, 624, 348, 153, 441, 261, 136, 397, 166, 51, 283,
27, 0, 168, 0, 0, 484, 229, 0, 370, 57, 0,
256, 43, 0, 141, 0, 0, 27, 0, 0, 0, 0,
0
};
const int16_t WebRtcIsacfix_kPitchGain4[144] = {
1843, 1843, 1843, 1843, 1841, 1843, 1500, 1821, 1843, 1222, 1434,
1656, 843, 1092, 1336, 504, 757, 1007, 1843, 1843, 1843, 1838,
1791, 1843, 1265, 1505, 1599, 965, 1219, 1425, 730, 821, 1092,
249, 504, 757, 1783, 1819, 1843, 1351, 1567, 1727, 1096, 1268,
1409, 805, 961, 1131, 444, 670, 843, 0, 249, 504, 1425,
1655, 1743, 1096, 1324, 1448, 822, 1019, 1199, 490, 704, 867,
81, 450, 555, 0, 0, 249, 1247, 1428, 1530, 881, 1073,
1283, 610, 759, 939, 278, 464, 645, 0, 200, 270, 0,
0, 0, 935, 1163, 1410, 528, 790, 1068, 377, 499, 717,
173, 240, 274, 0, 43, 62, 0, 0, 0, 684, 935,
1182, 343, 551, 735, 161, 262, 423, 0, 55, 27, 0,
0, 0, 0, 0, 0, 430, 684, 935, 87, 377, 597,
0, 46, 256, 0, 0, 0, 0, 0, 0, 0, 0,
0
};
/* transform matrix in Q12*/
const int16_t WebRtcIsacfix_kTransform[4][4] = {
{ -2048, -2048, -2048, -2048 },
{ 2748, 916, -916, -2748 },
{ 2048, -2048, -2048, 2048 },
{ 916, -2748, 2748, -916 }
};

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_gain_tables.h
*
* This file contains tables for the pitch filter side-info in the entropy
* coder.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_
#include <stdint.h>
/********************* Pitch Filter Gain Coefficient Tables
* ************************/
/* cdf for quantized pitch filter gains */
extern const uint16_t WebRtcIsacfix_kPitchGainCdf[255];
/* index limits and ranges */
extern const int16_t WebRtcIsacfix_kLowerlimiGain[3];
extern const int16_t WebRtcIsacfix_kUpperlimitGain[3];
extern const uint16_t WebRtcIsacfix_kMultsGain[2];
/* mean values of pitch filter gains in Q12*/
extern const int16_t WebRtcIsacfix_kPitchGain1[144];
extern const int16_t WebRtcIsacfix_kPitchGain2[144];
extern const int16_t WebRtcIsacfix_kPitchGain3[144];
extern const int16_t WebRtcIsacfix_kPitchGain4[144];
/* size of cdf table */
extern const uint16_t WebRtcIsacfix_kCdfTableSizeGain[1];
/* transform matrix */
extern const int16_t WebRtcIsacfix_kTransform[4][4];
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_ */

View File

@ -0,0 +1,306 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_lag_tables.c
*
* This file contains tables for the pitch filter side-info in the entropy coder.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
/********************* Pitch Filter Gain Coefficient Tables ************************/
/* tables for use with small pitch gain */
/* cdf for quantized pitch filter lags */
const uint16_t WebRtcIsacfix_kPitchLagCdf1Lo[127] = {
0, 134, 336, 549, 778, 998, 1264, 1512, 1777, 2070,
2423, 2794, 3051, 3361, 3708, 3979, 4315, 4610, 4933, 5269,
5575, 5896, 6155, 6480, 6816, 7129, 7477, 7764, 8061, 8358,
8718, 9020, 9390, 9783, 10177, 10543, 10885, 11342, 11795, 12213,
12680, 13096, 13524, 13919, 14436, 14903, 15349, 15795, 16267, 16734,
17266, 17697, 18130, 18632, 19080, 19447, 19884, 20315, 20735, 21288,
21764, 22264, 22723, 23193, 23680, 24111, 24557, 25022, 25537, 26082,
26543, 27090, 27620, 28139, 28652, 29149, 29634, 30175, 30692, 31273,
31866, 32506, 33059, 33650, 34296, 34955, 35629, 36295, 36967, 37726,
38559, 39458, 40364, 41293, 42256, 43215, 44231, 45253, 46274, 47359,
48482, 49678, 50810, 51853, 53016, 54148, 55235, 56263, 57282, 58363,
59288, 60179, 61076, 61806, 62474, 63129, 63656, 64160, 64533, 64856,
65152, 65535, 65535, 65535, 65535, 65535, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf2Lo[20] = {
0, 429, 3558, 5861, 8558, 11639, 15210, 19502, 24773, 31983,
42602, 48567, 52601, 55676, 58160, 60172, 61889, 63235, 65383, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf3Lo[2] = {
0, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf4Lo[10] = {
0, 2966, 6368, 11182, 19431, 37793, 48532, 55353, 60626, 65535
};
const uint16_t *WebRtcIsacfix_kPitchLagPtrLo[4] = {
WebRtcIsacfix_kPitchLagCdf1Lo,
WebRtcIsacfix_kPitchLagCdf2Lo,
WebRtcIsacfix_kPitchLagCdf3Lo,
WebRtcIsacfix_kPitchLagCdf4Lo
};
/* size of first cdf table */
const uint16_t WebRtcIsacfix_kPitchLagSizeLo[1] = {
128
};
/* index limits and ranges */
const int16_t WebRtcIsacfix_kLowerLimitLo[4] = {
-140, -9, 0, -4
};
const int16_t WebRtcIsacfix_kUpperLimitLo[4] = {
-20, 9, 0, 4
};
/* initial index for arithmetic decoder */
const uint16_t WebRtcIsacfix_kInitIndLo[3] = {
10, 1, 5
};
/* mean values of pitch filter lags in Q10 */
const int16_t WebRtcIsacfix_kMeanLag2Lo[19] = {
-17627, -16207, -14409, -12319, -10253, -8200, -6054, -3986, -1948, -19,
1937, 3974, 6064, 8155, 10229, 12270, 14296, 16127, 17520
};
const int16_t WebRtcIsacfix_kMeanLag4Lo[9] = {
-7949, -6063, -4036, -1941, 38, 1977, 4060, 6059
};
/* tables for use with medium pitch gain */
/* cdf for quantized pitch filter lags */
const uint16_t WebRtcIsacfix_kPitchLagCdf1Mid[255] = {
0, 28, 61, 88, 121, 149, 233, 331, 475, 559,
624, 661, 689, 712, 745, 791, 815, 843, 866, 922,
959, 1024, 1061, 1117, 1178, 1238, 1280, 1350, 1453, 1513,
1564, 1625, 1671, 1741, 1788, 1904, 2072, 2421, 2626, 2770,
2840, 2900, 2942, 3012, 3068, 3115, 3147, 3194, 3254, 3319,
3366, 3520, 3678, 3780, 3850, 3911, 3957, 4032, 4106, 4185,
4292, 4474, 4683, 4842, 5019, 5191, 5321, 5428, 5540, 5675,
5763, 5847, 5959, 6127, 6304, 6564, 6839, 7090, 7263, 7421,
7556, 7728, 7872, 7984, 8142, 8361, 8580, 8743, 8938, 9227,
9409, 9539, 9674, 9795, 9930, 10060, 10177, 10382, 10614, 10861,
11038, 11271, 11415, 11629, 11792, 12044, 12193, 12416, 12574, 12821,
13007, 13235, 13445, 13654, 13901, 14134, 14488, 15000, 15703, 16285,
16504, 16797, 17086, 17328, 17579, 17807, 17998, 18268, 18538, 18836,
19087, 19274, 19474, 19716, 19935, 20270, 20833, 21303, 21532, 21741,
21978, 22207, 22523, 22770, 23054, 23613, 23943, 24204, 24399, 24651,
24832, 25074, 25270, 25549, 25759, 26015, 26150, 26424, 26713, 27048,
27342, 27504, 27681, 27854, 28021, 28207, 28412, 28664, 28859, 29064,
29278, 29548, 29748, 30107, 30377, 30656, 30856, 31164, 31452, 31755,
32011, 32328, 32626, 32919, 33319, 33789, 34329, 34925, 35396, 35973,
36443, 36964, 37551, 38156, 38724, 39357, 40023, 40908, 41587, 42602,
43924, 45037, 45810, 46597, 47421, 48291, 49092, 50051, 51448, 52719,
53440, 54241, 54944, 55977, 56676, 57299, 57872, 58389, 59059, 59688,
60237, 60782, 61094, 61573, 61890, 62290, 62658, 63030, 63217, 63454,
63622, 63882, 64003, 64273, 64427, 64529, 64581, 64697, 64758, 64902,
65414, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf2Mid[36] = {
0, 71, 335, 581, 836, 1039, 1323, 1795, 2258, 2608,
3005, 3591, 4243, 5344, 7163, 10583, 16848, 28078, 49448, 57007,
60357, 61850, 62837, 63437, 63872, 64188, 64377, 64614, 64774, 64949,
65039, 65115, 65223, 65360, 65474, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf3Mid[2] = {
0, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf4Mid[20] = {
0, 28, 246, 459, 667, 1045, 1523, 2337, 4337, 11347,
44231, 56709, 60781, 62243, 63161, 63969, 64608, 65062, 65502, 65535
};
const uint16_t *WebRtcIsacfix_kPitchLagPtrMid[4] = {
WebRtcIsacfix_kPitchLagCdf1Mid,
WebRtcIsacfix_kPitchLagCdf2Mid,
WebRtcIsacfix_kPitchLagCdf3Mid,
WebRtcIsacfix_kPitchLagCdf4Mid
};
/* size of first cdf table */
const uint16_t WebRtcIsacfix_kPitchLagSizeMid[1] = {
256
};
/* index limits and ranges */
const int16_t WebRtcIsacfix_kLowerLimitMid[4] = {
-280, -17, 0, -9
};
const int16_t WebRtcIsacfix_kUpperLimitMid[4] = {
-40, 17, 0, 9
};
/* initial index for arithmetic decoder */
const uint16_t WebRtcIsacfix_kInitIndMid[3] = {
18, 1, 10
};
/* mean values of pitch filter lags in Q10 */
const int16_t WebRtcIsacfix_kMeanLag2Mid[35] = {
-17297, -16250, -15416, -14343, -13341, -12363, -11270,
-10355, -9122, -8217, -7172, -6083, -5102, -4004, -3060,
-1982, -952, -18, 935, 1976, 3040, 4032,
5082, 6065, 7257, 8202, 9264, 10225, 11242,
12234, 13337, 14336, 15374, 16187, 17347
};
const int16_t WebRtcIsacfix_kMeanLag4Mid[19] = {
-8811, -8081, -7203, -6003, -5057, -4025, -2983, -1964,
-891, 29, 921, 1920, 2988, 4064, 5187, 6079, 7173, 8074, 8849
};
/* tables for use with large pitch gain */
/* cdf for quantized pitch filter lags */
const uint16_t WebRtcIsacfix_kPitchLagCdf1Hi[511] = {
0, 7, 18, 33, 69, 105, 156, 228, 315, 612,
680, 691, 709, 724, 735, 738, 742, 746, 749, 753,
756, 760, 764, 774, 782, 785, 789, 796, 800, 803,
807, 814, 818, 822, 829, 832, 847, 854, 858, 869,
876, 883, 898, 908, 934, 977, 1010, 1050, 1060, 1064,
1075, 1078, 1086, 1089, 1093, 1104, 1111, 1122, 1133, 1136,
1151, 1162, 1183, 1209, 1252, 1281, 1339, 1364, 1386, 1401,
1411, 1415, 1426, 1430, 1433, 1440, 1448, 1455, 1462, 1477,
1487, 1495, 1502, 1506, 1509, 1516, 1524, 1531, 1535, 1542,
1553, 1556, 1578, 1589, 1611, 1625, 1639, 1643, 1654, 1665,
1672, 1687, 1694, 1705, 1708, 1719, 1730, 1744, 1752, 1759,
1791, 1795, 1820, 1867, 1886, 1915, 1936, 1943, 1965, 1987,
2041, 2099, 2161, 2175, 2200, 2211, 2226, 2233, 2244, 2251,
2266, 2280, 2287, 2298, 2309, 2316, 2331, 2342, 2356, 2378,
2403, 2418, 2447, 2497, 2544, 2602, 2863, 2895, 2903, 2935,
2950, 2971, 3004, 3011, 3018, 3029, 3040, 3062, 3087, 3127,
3152, 3170, 3199, 3243, 3293, 3322, 3340, 3377, 3402, 3427,
3474, 3518, 3543, 3579, 3601, 3637, 3659, 3706, 3731, 3760,
3818, 3847, 3869, 3901, 3920, 3952, 4068, 4169, 4220, 4271,
4524, 4571, 4604, 4632, 4672, 4730, 4777, 4806, 4857, 4904,
4951, 5002, 5031, 5060, 5107, 5150, 5212, 5266, 5331, 5382,
5432, 5490, 5544, 5610, 5700, 5762, 5812, 5874, 5972, 6022,
6091, 6163, 6232, 6305, 6402, 6540, 6685, 6880, 7090, 7271,
7379, 7452, 7542, 7625, 7687, 7770, 7843, 7911, 7966, 8024,
8096, 8190, 8252, 8320, 8411, 8501, 8585, 8639, 8751, 8842,
8918, 8986, 9066, 9127, 9203, 9269, 9345, 9406, 9464, 9536,
9612, 9667, 9735, 9844, 9931, 10036, 10119, 10199, 10260, 10358,
10441, 10514, 10666, 10734, 10872, 10951, 11053, 11125, 11223, 11324,
11516, 11664, 11737, 11816, 11892, 12008, 12120, 12200, 12280, 12392,
12490, 12576, 12685, 12812, 12917, 13003, 13108, 13210, 13300, 13384,
13470, 13579, 13673, 13771, 13879, 13999, 14136, 14201, 14368, 14614,
14759, 14867, 14958, 15030, 15121, 15189, 15280, 15385, 15461, 15555,
15653, 15768, 15884, 15971, 16069, 16145, 16210, 16279, 16380, 16463,
16539, 16615, 16688, 16818, 16919, 17017, 18041, 18338, 18523, 18649,
18790, 18917, 19047, 19167, 19315, 19460, 19601, 19731, 19858, 20068,
20173, 20318, 20466, 20625, 20741, 20911, 21045, 21201, 21396, 21588,
21816, 22022, 22305, 22547, 22786, 23072, 23322, 23600, 23879, 24168,
24433, 24769, 25120, 25511, 25895, 26289, 26792, 27219, 27683, 28077,
28566, 29094, 29546, 29977, 30491, 30991, 31573, 32105, 32594, 33173,
33788, 34497, 35181, 35833, 36488, 37255, 37921, 38645, 39275, 39894,
40505, 41167, 41790, 42431, 43096, 43723, 44385, 45134, 45858, 46607,
47349, 48091, 48768, 49405, 49955, 50555, 51167, 51985, 52611, 53078,
53494, 53965, 54435, 54996, 55601, 56125, 56563, 56838, 57244, 57566,
57967, 58297, 58771, 59093, 59419, 59647, 59886, 60143, 60461, 60693,
60917, 61170, 61416, 61634, 61891, 62122, 62310, 62455, 62632, 62839,
63103, 63436, 63639, 63805, 63906, 64015, 64192, 64355, 64475, 64558,
64663, 64742, 64811, 64865, 64916, 64956, 64981, 65025, 65068, 65115,
65195, 65314, 65419, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535,
65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf2Hi[68] = {
0, 7, 11, 22, 37, 52, 56, 59, 81, 85,
89, 96, 115, 130, 137, 152, 170, 181, 193, 200,
207, 233, 237, 259, 289, 318, 363, 433, 592, 992,
1607, 3062, 6149, 12206, 25522, 48368, 58223, 61918, 63640, 64584,
64943, 65098, 65206, 65268, 65294, 65335, 65350, 65372, 65387, 65402,
65413, 65420, 65428, 65435, 65439, 65450, 65454, 65468, 65472, 65476,
65483, 65491, 65498, 65505, 65516, 65520, 65528, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf3Hi[2] = {
0, 65535
};
const uint16_t WebRtcIsacfix_kPitchLagCdf4Hi[35] = {
0, 7, 19, 30, 41, 48, 63, 74, 82, 96,
122, 152, 215, 330, 701, 2611, 10931, 48106, 61177, 64341,
65112, 65238, 65309, 65338, 65364, 65379, 65401, 65427, 65453,
65465, 65476, 65490, 65509, 65528, 65535
};
const uint16_t *WebRtcIsacfix_kPitchLagPtrHi[4] = {
WebRtcIsacfix_kPitchLagCdf1Hi,
WebRtcIsacfix_kPitchLagCdf2Hi,
WebRtcIsacfix_kPitchLagCdf3Hi,
WebRtcIsacfix_kPitchLagCdf4Hi
};
/* size of first cdf table */
const uint16_t WebRtcIsacfix_kPitchLagSizeHi[1] = {
512
};
/* index limits and ranges */
const int16_t WebRtcIsacfix_kLowerLimitHi[4] = {
-552, -34, 0, -16
};
const int16_t WebRtcIsacfix_kUpperLimitHi[4] = {
-80, 32, 0, 17
};
/* initial index for arithmetic decoder */
const uint16_t WebRtcIsacfix_kInitIndHi[3] = {
34, 1, 18
};
/* mean values of pitch filter lags */
const int16_t WebRtcIsacfix_kMeanLag2Hi[67] = {
-17482, -16896, -16220, -15929, -15329, -14848, -14336, -13807, -13312, -12800, -12218, -11720,
-11307, -10649, -10396, -9742, -9148, -8668, -8297, -7718, -7155, -6656, -6231, -5600, -5129,
-4610, -4110, -3521, -3040, -2525, -2016, -1506, -995, -477, -5, 469, 991, 1510, 2025, 2526, 3079,
3555, 4124, 4601, 5131, 5613, 6194, 6671, 7140, 7645, 8207, 8601, 9132, 9728, 10359, 10752, 11302,
11776, 12288, 12687, 13204, 13759, 14295, 14810, 15360, 15764, 16350
};
const int16_t WebRtcIsacfix_kMeanLag4Hi[34] = {
-8175, -7659, -7205, -6684, -6215, -5651, -5180, -4566, -4087, -3536, -3096,
-2532, -1990, -1482, -959, -440, 11, 451, 954, 1492, 2020, 2562, 3059,
3577, 4113, 4618, 5134, 5724, 6060, 6758, 7015, 7716, 8066, 8741
};

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* pitch_lag_tables.h
*
* This file contains tables for the pitch filter side-info in the entropy
* coder.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_
#include <stdint.h>
/********************* Pitch Filter Lag Coefficient Tables
* ************************/
/* tables for use with small pitch gain */
/* cdfs for quantized pitch lags */
extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Lo[127];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Lo[20];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Lo[2];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Lo[10];
extern const uint16_t* WebRtcIsacfix_kPitchLagPtrLo[4];
/* size of first cdf table */
extern const uint16_t WebRtcIsacfix_kPitchLagSizeLo[1];
/* index limits and ranges */
extern const int16_t WebRtcIsacfix_kLowerLimitLo[4];
extern const int16_t WebRtcIsacfix_kUpperLimitLo[4];
/* initial index for arithmetic decoder */
extern const uint16_t WebRtcIsacfix_kInitIndLo[3];
/* mean values of pitch filter lags */
extern const int16_t WebRtcIsacfix_kMeanLag2Lo[19];
extern const int16_t WebRtcIsacfix_kMeanLag4Lo[9];
/* tables for use with medium pitch gain */
/* cdfs for quantized pitch lags */
extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Mid[255];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Mid[36];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Mid[2];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Mid[20];
extern const uint16_t* WebRtcIsacfix_kPitchLagPtrMid[4];
/* size of first cdf table */
extern const uint16_t WebRtcIsacfix_kPitchLagSizeMid[1];
/* index limits and ranges */
extern const int16_t WebRtcIsacfix_kLowerLimitMid[4];
extern const int16_t WebRtcIsacfix_kUpperLimitMid[4];
/* initial index for arithmetic decoder */
extern const uint16_t WebRtcIsacfix_kInitIndMid[3];
/* mean values of pitch filter lags */
extern const int16_t WebRtcIsacfix_kMeanLag2Mid[35];
extern const int16_t WebRtcIsacfix_kMeanLag4Mid[19];
/* tables for use with large pitch gain */
/* cdfs for quantized pitch lags */
extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Hi[511];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Hi[68];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Hi[2];
extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Hi[35];
extern const uint16_t* WebRtcIsacfix_kPitchLagPtrHi[4];
/* size of first cdf table */
extern const uint16_t WebRtcIsacfix_kPitchLagSizeHi[1];
/* index limits and ranges */
extern const int16_t WebRtcIsacfix_kLowerLimitHi[4];
extern const int16_t WebRtcIsacfix_kUpperLimitHi[4];
/* initial index for arithmetic decoder */
extern const uint16_t WebRtcIsacfix_kInitIndHi[3];
/* mean values of pitch filter lags */
extern const int16_t WebRtcIsacfix_kMeanLag2Hi[67];
extern const int16_t WebRtcIsacfix_kMeanLag4Hi[34];
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_ */

View File

@ -0,0 +1,211 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* settings.h
*
* Declaration of #defines used in the iSAC codec
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_
/* sampling frequency (Hz) */
#define FS 16000
/* 1.5 times Sampling frequency */
#define FS_1_HALF (uint32_t)24000
/* Three times Sampling frequency */
#define FS3 (uint32_t)48000
/* Eight times Sampling frequency */
#define FS8 (uint32_t)128000
/* number of samples per frame (either 480 (30ms) or 960 (60ms)) */
#define INITIAL_FRAMESAMPLES 960
/* miliseconds */
#define FRAMESIZE 30
/* number of samples per frame processed in the encoder (30ms) */
#define FRAMESAMPLES 480 /* ((FRAMESIZE*FS)/1000) */
#define FRAMESAMPLES_HALF 240
/* max number of samples per frame (= 60 ms frame) */
#define MAX_FRAMESAMPLES 960
/* number of samples per 10ms frame */
#define FRAMESAMPLES_10ms 160 /* ((10*FS)/1000) */
/* Number of samples per 1 ms */
#define SAMPLES_PER_MSEC 16
/* number of subframes */
#define SUBFRAMES 6
/* length of a subframe */
#define UPDATE 80
/* length of half a subframe (low/high band) */
#define HALF_SUBFRAMELEN 40 /* (UPDATE/2) */
/* samples of look ahead (in a half-band, so actually half the samples of look
* ahead @ FS) */
#define QLOOKAHEAD 24 /* 3 ms */
/* order of AR model in spectral entropy coder */
#define AR_ORDER 6
#define MAX_ORDER 13
#define LEVINSON_MAX_ORDER 12
/* window length (masking analysis) */
#define WINLEN 256
/* order of low-band pole filter used to approximate masking curve */
#define ORDERLO 12
/* order of hi-band pole filter used to approximate masking curve */
#define ORDERHI 6
#define KLT_NUM_AVG_GAIN 0
#define KLT_NUM_AVG_SHAPE 0
#define KLT_NUM_MODELS 3
#define LPC_SHAPE_ORDER 18 /* (ORDERLO + ORDERHI) */
#define KLT_ORDER_GAIN 12 /* (2 * SUBFRAMES) */
#define KLT_ORDER_SHAPE 108 /* (LPC_SHAPE_ORDER * SUBFRAMES) */
/* order for post_filter_bank */
#define POSTQORDER 3
/* order for pre-filterbank */
#define QORDER 3
/* for decimator */
#define ALLPASSSECTIONS 2
/* The number of composite all-pass filter factors */
#define NUMBEROFCOMPOSITEAPSECTIONS 4
/* The number of all-pass filter factors in an upper or lower channel*/
#define NUMBEROFCHANNELAPSECTIONS 2
#define DPMIN_Q10 -10240 /* -10.00 in Q10 */
#define DPMAX_Q10 10240 /* 10.00 in Q10 */
#define MINBITS_Q10 10240 /* 10.0 in Q10 */
/* array size for byte stream in number of Word16. */
#define STREAM_MAXW16 \
300 /* The old maximum size still needed for the decoding */
#define STREAM_MAXW16_30MS \
100 /* 100 Word16 = 200 bytes = 53.4 kbit/s @ 30 ms.framelength */
#define STREAM_MAXW16_60MS \
200 /* 200 Word16 = 400 bytes = 53.4 kbit/s @ 60 ms.framelength */
/* This is used only at the decoder bit-stream struct.
* - The encoder and decoder bitstream containers are of different size because
* old iSAC limited the encoded bitstream to 600 bytes. But newer versions
* restrict to shorter bitstream.
* - We add 10 bytes of guards to the internal bitstream container. The reason
* is that entropy decoder might read few bytes (3 according to our
* observations) more than the actual size of the bitstream. To avoid reading
* outside memory, in rare occasion of full-size bitstream we add 10 bytes
* of guard. */
#define INTERNAL_STREAM_SIZE_W16 (STREAM_MAXW16 + 5)
/* storage size for bit counts */
//#define BIT_COUNTER_SIZE 30
/* maximum order of any AR model or filter */
#define MAX_AR_MODEL_ORDER 12
/* Maximum number of iterations allowed to limit payload size */
#define MAX_PAYLOAD_LIMIT_ITERATION 1
/* Bandwidth estimator */
#define MIN_ISAC_BW 10000 /* Minimum bandwidth in bits per sec */
#define MAX_ISAC_BW 32000 /* Maxmum bandwidth in bits per sec */
#define MIN_ISAC_MD 5 /* Minimum Max Delay in ?? */
#define MAX_ISAC_MD 25 /* Maxmum Max Delay in ?? */
#define DELAY_CORRECTION_MAX 717
#define DELAY_CORRECTION_MED 819
#define Thld_30_60 18000
#define Thld_60_30 27000
/* assumed header size; we don't know the exact number (header compression may
* be used) */
#define HEADER_SIZE 35 /* bytes */
#define INIT_FRAME_LEN 60
#define INIT_BN_EST 20000
#define INIT_BN_EST_Q7 2560000 /* 20 kbps in Q7 */
#define INIT_REC_BN_EST_Q5 789312 /* INIT_BN_EST + INIT_HDR_RATE in Q5 */
/* 8738 in Q18 is ~ 1/30 */
/* #define INIT_HDR_RATE (((HEADER_SIZE * 8 * 1000) * 8738) >> NUM_BITS_TO_SHIFT
* (INIT_FRAME_LEN)) */
#define INIT_HDR_RATE 4666
/* number of packets in a row for a high rate burst */
#define BURST_LEN 3
/* ms, max time between two full bursts */
#define BURST_INTERVAL 800
/* number of packets in a row for initial high rate burst */
#define INIT_BURST_LEN 5
/* bits/s, rate for the first BURST_LEN packets */
#define INIT_RATE 10240000 /* INIT_BN_EST in Q9 */
/* For pitch analysis */
#define PITCH_FRAME_LEN 240 /* (FRAMESAMPLES/2) 30 ms */
#define PITCH_MAX_LAG 140 /* 57 Hz */
#define PITCH_MIN_LAG 20 /* 400 Hz */
#define PITCH_MIN_LAG_Q8 5120 /* 256 * PITCH_MIN_LAG */
#define OFFSET_Q8 768 /* 256 * 3 */
#define PITCH_MAX_GAIN_Q12 1843 /* 0.45 */
#define PITCH_LAG_SPAN2 65 /* (PITCH_MAX_LAG/2-PITCH_MIN_LAG/2+5) */
#define PITCH_CORR_LEN2 60 /* 15 ms */
#define PITCH_CORR_STEP2 60 /* (PITCH_FRAME_LEN/4) */
#define PITCH_SUBFRAMES 4
#define PITCH_SUBFRAME_LEN 60 /* (PITCH_FRAME_LEN/PITCH_SUBFRAMES) */
/* For pitch filter */
#define PITCH_BUFFSIZE \
190 /* (PITCH_MAX_LAG + 50) Extra 50 for fraction and LP filters */
#define PITCH_INTBUFFSIZE 430 /* (PITCH_FRAME_LEN+PITCH_BUFFSIZE) */
#define PITCH_FRACS 8
#define PITCH_FRACORDER 9
#define PITCH_DAMPORDER 5
/* Order of high pass filter */
#define HPORDER 2
/* PLC */
#define DECAY_RATE \
10 /* Q15, 20% of decay every lost frame apllied linearly sample by sample*/
#define PLC_WAS_USED 1
#define PLC_NOT_USED 3
#define RECOVERY_OVERLAP 80
#define RESAMP_RES 256
#define RESAMP_RES_BIT 8
/* Define Error codes */
/* 6000 General */
#define ISAC_MEMORY_ALLOCATION_FAILED 6010
#define ISAC_MODE_MISMATCH 6020
#define ISAC_DISALLOWED_BOTTLENECK 6030
#define ISAC_DISALLOWED_FRAME_LENGTH 6040
/* 6200 Bandwidth estimator */
#define ISAC_RANGE_ERROR_BW_ESTIMATOR 6240
/* 6400 Encoder */
#define ISAC_ENCODER_NOT_INITIATED 6410
#define ISAC_DISALLOWED_CODING_MODE 6420
#define ISAC_DISALLOWED_FRAME_MODE_ENCODER 6430
#define ISAC_DISALLOWED_BITSTREAM_LENGTH 6440
#define ISAC_PAYLOAD_LARGER_THAN_LIMIT 6450
/* 6600 Decoder */
#define ISAC_DECODER_NOT_INITIATED 6610
#define ISAC_EMPTY_PACKET 6620
#define ISAC_PACKET_TOO_SHORT 6625
#define ISAC_DISALLOWED_FRAME_MODE_DECODER 6630
#define ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH 6640
#define ISAC_RANGE_ERROR_DECODE_BANDWIDTH 6650
#define ISAC_RANGE_ERROR_DECODE_PITCH_GAIN 6660
#define ISAC_RANGE_ERROR_DECODE_PITCH_LAG 6670
#define ISAC_RANGE_ERROR_DECODE_LPC 6680
#define ISAC_RANGE_ERROR_DECODE_SPECTRUM 6690
#define ISAC_LENGTH_MISMATCH 6730
/* 6800 Call setup formats */
#define ISAC_INCOMPATIBLE_FORMATS 6810
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_ */

View File

@ -0,0 +1,193 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* spectrum_ar_model_tables.c
*
* This file contains tables with AR coefficients, Gain coefficients
* and cosine tables.
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/********************* AR Coefficient Tables ************************/
/* cdf for quantized reflection coefficient 1 */
const uint16_t WebRtcIsacfix_kRc1Cdf[12] = {
0, 2, 4, 129, 7707, 57485, 65495, 65527, 65529, 65531,
65533, 65535
};
/* cdf for quantized reflection coefficient 2 */
const uint16_t WebRtcIsacfix_kRc2Cdf[12] = {
0, 2, 4, 7, 531, 25298, 64525, 65526, 65529, 65531,
65533, 65535
};
/* cdf for quantized reflection coefficient 3 */
const uint16_t WebRtcIsacfix_kRc3Cdf[12] = {
0, 2, 4, 6, 620, 22898, 64843, 65527, 65529, 65531,
65533, 65535
};
/* cdf for quantized reflection coefficient 4 */
const uint16_t WebRtcIsacfix_kRc4Cdf[12] = {
0, 2, 4, 6, 35, 10034, 60733, 65506, 65529, 65531,
65533, 65535
};
/* cdf for quantized reflection coefficient 5 */
const uint16_t WebRtcIsacfix_kRc5Cdf[12] = {
0, 2, 4, 6, 36, 7567, 56727, 65385, 65529, 65531,
65533, 65535
};
/* cdf for quantized reflection coefficient 6 */
const uint16_t WebRtcIsacfix_kRc6Cdf[12] = {
0, 2, 4, 6, 14, 6579, 57360, 65409, 65529, 65531,
65533, 65535
};
/* representation levels for quantized reflection coefficient 1 */
const int16_t WebRtcIsacfix_kRc1Levels[11] = {
-32104, -29007, -23202, -15496, -9279, -2577, 5934, 17535, 24512, 29503, 32104
};
/* representation levels for quantized reflection coefficient 2 */
const int16_t WebRtcIsacfix_kRc2Levels[11] = {
-32104, -29503, -23494, -15261, -7309, -1399, 6158, 16381, 24512, 29503, 32104
};
/* representation levels for quantized reflection coefficient 3 */
const int16_t WebRtcIsacfix_kRc3Levels[11] = {
-32104, -29503, -23157, -15186, -7347, -1359, 5829, 17535, 24512, 29503, 32104
};
/* representation levels for quantized reflection coefficient 4 */
const int16_t WebRtcIsacfix_kRc4Levels[11] = {
-32104, -29503, -24512, -15362, -6665, -342, 6596, 14585, 24512, 29503, 32104
};
/* representation levels for quantized reflection coefficient 5 */
const int16_t WebRtcIsacfix_kRc5Levels[11] = {
-32104, -29503, -24512, -15005, -6564, -106, 7123, 14920, 24512, 29503, 32104
};
/* representation levels for quantized reflection coefficient 6 */
const int16_t WebRtcIsacfix_kRc6Levels[11] = {
-32104, -29503, -24512, -15096, -6656, -37, 7036, 14847, 24512, 29503, 32104
};
/* quantization boundary levels for reflection coefficients */
const int16_t WebRtcIsacfix_kRcBound[12] = {
-32768, -31441, -27566, -21458, -13612, -4663,
4663, 13612, 21458, 27566, 31441, 32767
};
/* initial index for AR reflection coefficient quantizer and cdf table search */
const uint16_t WebRtcIsacfix_kRcInitInd[6] = {
5, 5, 5, 5, 5, 5
};
/* pointers to AR cdf tables */
const uint16_t *WebRtcIsacfix_kRcCdfPtr[AR_ORDER] = {
WebRtcIsacfix_kRc1Cdf,
WebRtcIsacfix_kRc2Cdf,
WebRtcIsacfix_kRc3Cdf,
WebRtcIsacfix_kRc4Cdf,
WebRtcIsacfix_kRc5Cdf,
WebRtcIsacfix_kRc6Cdf
};
/* pointers to AR representation levels tables */
const int16_t *WebRtcIsacfix_kRcLevPtr[AR_ORDER] = {
WebRtcIsacfix_kRc1Levels,
WebRtcIsacfix_kRc2Levels,
WebRtcIsacfix_kRc3Levels,
WebRtcIsacfix_kRc4Levels,
WebRtcIsacfix_kRc5Levels,
WebRtcIsacfix_kRc6Levels
};
/******************** GAIN Coefficient Tables ***********************/
/* cdf for Gain coefficient */
const uint16_t WebRtcIsacfix_kGainCdf[19] = {
0, 2, 4, 6, 8, 10, 12, 14, 16, 1172,
11119, 29411, 51699, 64445, 65527, 65529, 65531, 65533, 65535
};
/* representation levels for quantized squared Gain coefficient */
const int32_t WebRtcIsacfix_kGain2Lev[18] = {
128, 128, 128, 128, 128, 215, 364, 709, 1268,
1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000
};
/* quantization boundary levels for squared Gain coefficient */
const int32_t WebRtcIsacfix_kGain2Bound[19] = {
0, 21, 35, 59, 99, 166, 280, 475, 815, 1414,
2495, 4505, 8397, 16405, 34431, 81359, 240497, 921600, 0x7FFFFFFF
};
/* pointers to Gain cdf table */
const uint16_t *WebRtcIsacfix_kGainPtr[1] = {
WebRtcIsacfix_kGainCdf
};
/* gain initial index for gain quantizer and cdf table search */
const uint16_t WebRtcIsacfix_kGainInitInd[1] = {
11
};
/************************* Cosine Tables ****************************/
/* cosine table */
const int16_t WebRtcIsacfix_kCos[6][60] = {
{ 512, 512, 511, 510, 508, 507, 505, 502, 499, 496,
493, 489, 485, 480, 476, 470, 465, 459, 453, 447,
440, 433, 426, 418, 410, 402, 394, 385, 376, 367,
357, 348, 338, 327, 317, 306, 295, 284, 273, 262,
250, 238, 226, 214, 202, 190, 177, 165, 152, 139,
126, 113, 100, 87, 73, 60, 47, 33, 20, 7 },
{ 512, 510, 508, 503, 498, 491, 483, 473, 462, 450,
437, 422, 406, 389, 371, 352, 333, 312, 290, 268,
244, 220, 196, 171, 145, 120, 93, 67, 40, 13,
-13, -40, -67, -93, -120, -145, -171, -196, -220, -244,
-268, -290, -312, -333, -352, -371, -389, -406, -422, -437,
-450, -462, -473, -483, -491, -498, -503, -508, -510, -512 },
{ 512, 508, 502, 493, 480, 465, 447, 426, 402, 376,
348, 317, 284, 250, 214, 177, 139, 100, 60, 20,
-20, -60, -100, -139, -177, -214, -250, -284, -317, -348,
-376, -402, -426, -447, -465, -480, -493, -502, -508, -512,
-512, -508, -502, -493, -480, -465, -447, -426, -402, -376,
-348, -317, -284, -250, -214, -177, -139, -100, -60, -20 },
{ 511, 506, 495, 478, 456, 429, 398, 362, 322, 279,
232, 183, 133, 80, 27, -27, -80, -133, -183, -232,
-279, -322, -362, -398, -429, -456, -478, -495, -506, -511,
-511, -506, -495, -478, -456, -429, -398, -362, -322, -279,
-232, -183, -133, -80, -27, 27, 80, 133, 183, 232,
279, 322, 362, 398, 429, 456, 478, 495, 506, 511 },
{ 511, 502, 485, 459, 426, 385, 338, 284, 226, 165,
100, 33, -33, -100, -165, -226, -284, -338, -385, -426,
-459, -485, -502, -511, -511, -502, -485, -459, -426, -385,
-338, -284, -226, -165, -100, -33, 33, 100, 165, 226,
284, 338, 385, 426, 459, 485, 502, 511, 511, 502,
485, 459, 426, 385, 338, 284, 226, 165, 100, 33 },
{ 510, 498, 473, 437, 389, 333, 268, 196, 120, 40,
-40, -120, -196, -268, -333, -389, -437, -473, -498, -510,
-510, -498, -473, -437, -389, -333, -268, -196, -120, -40,
40, 120, 196, 268, 333, 389, 437, 473, 498, 510,
510, 498, 473, 437, 389, 333, 268, 196, 120, 40,
-40, -120, -196, -268, -333, -389, -437, -473, -498, -510 }
};

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* spectrum_ar_model_tables.h
*
* This file contains definitions of tables with AR coefficients,
* Gain coefficients and cosine tables.
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/********************* AR Coefficient Tables ************************/
/* cdf for quantized reflection coefficient 1 */
extern const uint16_t WebRtcIsacfix_kRc1Cdf[12];
/* cdf for quantized reflection coefficient 2 */
extern const uint16_t WebRtcIsacfix_kRc2Cdf[12];
/* cdf for quantized reflection coefficient 3 */
extern const uint16_t WebRtcIsacfix_kRc3Cdf[12];
/* cdf for quantized reflection coefficient 4 */
extern const uint16_t WebRtcIsacfix_kRc4Cdf[12];
/* cdf for quantized reflection coefficient 5 */
extern const uint16_t WebRtcIsacfix_kRc5Cdf[12];
/* cdf for quantized reflection coefficient 6 */
extern const uint16_t WebRtcIsacfix_kRc6Cdf[12];
/* representation levels for quantized reflection coefficient 1 */
extern const int16_t WebRtcIsacfix_kRc1Levels[11];
/* representation levels for quantized reflection coefficient 2 */
extern const int16_t WebRtcIsacfix_kRc2Levels[11];
/* representation levels for quantized reflection coefficient 3 */
extern const int16_t WebRtcIsacfix_kRc3Levels[11];
/* representation levels for quantized reflection coefficient 4 */
extern const int16_t WebRtcIsacfix_kRc4Levels[11];
/* representation levels for quantized reflection coefficient 5 */
extern const int16_t WebRtcIsacfix_kRc5Levels[11];
/* representation levels for quantized reflection coefficient 6 */
extern const int16_t WebRtcIsacfix_kRc6Levels[11];
/* quantization boundary levels for reflection coefficients */
extern const int16_t WebRtcIsacfix_kRcBound[12];
/* initial indices for AR reflection coefficient quantizer and cdf table search
*/
extern const uint16_t WebRtcIsacfix_kRcInitInd[AR_ORDER];
/* pointers to AR cdf tables */
extern const uint16_t* WebRtcIsacfix_kRcCdfPtr[AR_ORDER];
/* pointers to AR representation levels tables */
extern const int16_t* WebRtcIsacfix_kRcLevPtr[AR_ORDER];
/******************** GAIN Coefficient Tables ***********************/
/* cdf for Gain coefficient */
extern const uint16_t WebRtcIsacfix_kGainCdf[19];
/* representation levels for quantized Gain coefficient */
extern const int32_t WebRtcIsacfix_kGain2Lev[18];
/* squared quantization boundary levels for Gain coefficient */
extern const int32_t WebRtcIsacfix_kGain2Bound[19];
/* pointer to Gain cdf table */
extern const uint16_t* WebRtcIsacfix_kGainPtr[1];
/* Gain initial index for gain quantizer and cdf table search */
extern const uint16_t WebRtcIsacfix_kGainInitInd[1];
/************************* Cosine Tables ****************************/
/* Cosine table */
extern const int16_t WebRtcIsacfix_kCos[6][60];
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_ \
*/

View File

@ -0,0 +1,345 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* structs.h
*
* This header file contains all the structs used in the ISAC codec
*
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/* Bitstream struct for decoder */
typedef struct Bitstreamstruct_dec {
uint16_t stream[INTERNAL_STREAM_SIZE_W16]; /* Array bytestream to decode */
uint32_t W_upper; /* Upper boundary of interval W */
uint32_t streamval;
uint16_t stream_index; /* Index to the current position in bytestream */
int16_t full; /* 0 - first byte in memory filled, second empty*/
/* 1 - both bytes are empty (we just filled the previous memory */
size_t stream_size; /* The size of stream in bytes. */
} Bitstr_dec;
/* Bitstream struct for encoder */
typedef struct Bitstreamstruct_enc {
uint16_t
stream[STREAM_MAXW16_60MS]; /* Vector for adding encoded bytestream */
uint32_t W_upper; /* Upper boundary of interval W */
uint32_t streamval;
uint16_t stream_index; /* Index to the current position in bytestream */
int16_t full; /* 0 - first byte in memory filled, second empty*/
/* 1 - both bytes are empty (we just filled the previous memory */
} Bitstr_enc;
typedef struct {
int16_t DataBufferLoQ0[WINLEN];
int16_t DataBufferHiQ0[WINLEN];
int32_t CorrBufLoQQ[ORDERLO + 1];
int32_t CorrBufHiQQ[ORDERHI + 1];
int16_t CorrBufLoQdom[ORDERLO + 1];
int16_t CorrBufHiQdom[ORDERHI + 1];
int32_t PreStateLoGQ15[ORDERLO + 1];
int32_t PreStateHiGQ15[ORDERHI + 1];
uint32_t OldEnergy;
} MaskFiltstr_enc;
typedef struct {
int16_t PostStateLoGQ0[ORDERLO + 1];
int16_t PostStateHiGQ0[ORDERHI + 1];
uint32_t OldEnergy;
} MaskFiltstr_dec;
typedef struct {
// state vectors for each of the two analysis filters
int32_t INSTAT1_fix[2 * (QORDER - 1)];
int32_t INSTAT2_fix[2 * (QORDER - 1)];
int16_t INLABUF1_fix[QLOOKAHEAD];
int16_t INLABUF2_fix[QLOOKAHEAD];
/* High pass filter */
int32_t HPstates_fix[HPORDER];
} PreFiltBankstr;
typedef struct {
// state vectors for each of the two analysis filters
int32_t STATE_0_LOWER_fix[2 * POSTQORDER];
int32_t STATE_0_UPPER_fix[2 * POSTQORDER];
/* High pass filter */
int32_t HPstates1_fix[HPORDER];
int32_t HPstates2_fix[HPORDER];
} PostFiltBankstr;
typedef struct {
/* data buffer for pitch filter */
int16_t ubufQQ[PITCH_BUFFSIZE];
/* low pass state vector */
int16_t ystateQQ[PITCH_DAMPORDER];
/* old lag and gain */
int16_t oldlagQ7;
int16_t oldgainQ12;
} PitchFiltstr;
typedef struct {
// for inital estimator
int16_t dec_buffer16[PITCH_CORR_LEN2 + PITCH_CORR_STEP2 + PITCH_MAX_LAG / 2 -
PITCH_FRAME_LEN / 2 + 2];
int32_t decimator_state32[2 * ALLPASSSECTIONS + 1];
int16_t inbuf[QLOOKAHEAD];
PitchFiltstr PFstr_wght;
PitchFiltstr PFstr;
} PitchAnalysisStruct;
typedef struct {
/* Parameters used in PLC to avoid re-computation */
/* --- residual signals --- */
int16_t prevPitchInvIn[FRAMESAMPLES / 2];
int16_t prevPitchInvOut[PITCH_MAX_LAG + 10]; // [FRAMESAMPLES/2]; save 90
int32_t prevHP[PITCH_MAX_LAG + 10]; // [FRAMESAMPLES/2]; save 90
int16_t decayCoeffPriodic; /* how much to supress a sample */
int16_t decayCoeffNoise;
int16_t used; /* if PLC is used */
int16_t* lastPitchLP; // [FRAMESAMPLES/2]; saved 240;
/* --- LPC side info --- */
int16_t lofilt_coefQ15[ORDERLO];
int16_t hifilt_coefQ15[ORDERHI];
int32_t gain_lo_hiQ17[2];
/* --- LTP side info --- */
int16_t AvgPitchGain_Q12;
int16_t lastPitchGain_Q12;
int16_t lastPitchLag_Q7;
/* --- Add-overlap in recovery packet --- */
int16_t overlapLP[RECOVERY_OVERLAP]; // [FRAMESAMPLES/2]; saved 160
int16_t pitchCycles;
int16_t A;
int16_t B;
size_t pitchIndex;
size_t stretchLag;
int16_t* prevPitchLP; // [ FRAMESAMPLES/2 ]; saved 240
int16_t seed;
int16_t std;
} PLCstr;
/* Have instance of struct together with other iSAC structs */
typedef struct {
int16_t prevFrameSizeMs; /* Previous frame size (in ms) */
uint16_t prevRtpNumber; /* Previous RTP timestamp from received packet */
/* (in samples relative beginning) */
uint32_t prevSendTime; /* Send time for previous packet, from RTP header */
uint32_t prevArrivalTime; /* Arrival time for previous packet (in ms using
timeGetTime()) */
uint16_t prevRtpRate; /* rate of previous packet, derived from RTP timestamps
(in bits/s) */
uint32_t lastUpdate; /* Time since the last update of the Bottle Neck estimate
(in samples) */
uint32_t lastReduction; /* Time sinse the last reduction (in samples) */
int32_t countUpdates; /* How many times the estimate was update in the
beginning */
/* The estimated bottle neck rate from there to here (in bits/s) */
uint32_t recBw;
uint32_t recBwInv;
uint32_t recBwAvg;
uint32_t recBwAvgQ;
uint32_t minBwInv;
uint32_t maxBwInv;
/* The estimated mean absolute jitter value, as seen on this side (in ms) */
int32_t recJitter;
int32_t recJitterShortTerm;
int32_t recJitterShortTermAbs;
int32_t recMaxDelay;
int32_t recMaxDelayAvgQ;
int16_t recHeaderRate; /* (assumed) bitrate for headers (bps) */
uint32_t sendBwAvg; /* The estimated bottle neck rate from here to there (in
bits/s) */
int32_t sendMaxDelayAvg; /* The estimated mean absolute jitter value, as seen
on the other siee (in ms) */
int16_t countRecPkts; /* number of packets received since last update */
int16_t highSpeedRec; /* flag for marking that a high speed network has been
detected downstream */
/* number of consecutive pkts sent during which the bwe estimate has
remained at a value greater than the downstream threshold for determining
highspeed network */
int16_t countHighSpeedRec;
/* flag indicating bwe should not adjust down immediately for very late pckts
*/
int16_t inWaitPeriod;
/* variable holding the time of the start of a window of time when
bwe should not adjust down immediately for very late pckts */
uint32_t startWaitPeriod;
/* number of consecutive pkts sent during which the bwe estimate has
remained at a value greater than the upstream threshold for determining
highspeed network */
int16_t countHighSpeedSent;
/* flag indicated the desired number of packets over threshold rate have been
sent and bwe will assume the connection is over broadband network */
int16_t highSpeedSend;
IsacBandwidthInfo external_bw_info;
} BwEstimatorstr;
typedef struct {
/* boolean, flags if previous packet exceeded B.N. */
int16_t PrevExceed;
/* ms */
int16_t ExceedAgo;
/* packets left to send in current burst */
int16_t BurstCounter;
/* packets */
int16_t InitCounter;
/* ms remaining in buffer when next packet will be sent */
int16_t StillBuffered;
} RateModel;
/* The following strutc is used to store data from encoding, to make it
fast and easy to construct a new bitstream with a different Bandwidth
estimate. All values (except framelength and minBytes) is double size to
handle 60 ms of data.
*/
typedef struct {
/* Used to keep track of if it is first or second part of 60 msec packet */
int startIdx;
/* Frame length in samples */
int16_t framelength;
/* Pitch Gain */
int16_t pitchGain_index[2];
/* Pitch Lag */
int32_t meanGain[2];
int16_t pitchIndex[PITCH_SUBFRAMES * 2];
/* LPC */
int32_t LPCcoeffs_g[12 * 2]; /* KLT_ORDER_GAIN = 12 */
int16_t LPCindex_s[108 * 2]; /* KLT_ORDER_SHAPE = 108 */
int16_t LPCindex_g[12 * 2]; /* KLT_ORDER_GAIN = 12 */
/* Encode Spec */
int16_t fre[FRAMESAMPLES];
int16_t fim[FRAMESAMPLES];
int16_t AvgPitchGain[2];
/* Used in adaptive mode only */
int minBytes;
} IsacSaveEncoderData;
typedef struct {
Bitstr_enc bitstr_obj;
MaskFiltstr_enc maskfiltstr_obj;
PreFiltBankstr prefiltbankstr_obj;
PitchFiltstr pitchfiltstr_obj;
PitchAnalysisStruct pitchanalysisstr_obj;
RateModel rate_data_obj;
int16_t buffer_index;
int16_t current_framesamples;
int16_t data_buffer_fix[FRAMESAMPLES]; // the size was MAX_FRAMESAMPLES
int16_t frame_nb;
int16_t BottleNeck;
int16_t MaxDelay;
int16_t new_framelength;
int16_t s2nr;
uint16_t MaxBits;
int16_t bitstr_seed;
IsacSaveEncoderData* SaveEnc_ptr;
int16_t payloadLimitBytes30; /* Maximum allowed number of bits for a 30 msec
packet */
int16_t payloadLimitBytes60; /* Maximum allowed number of bits for a 30 msec
packet */
int16_t maxPayloadBytes; /* Maximum allowed number of bits for both 30 and 60
msec packet */
int16_t maxRateInBytes; /* Maximum allowed rate in bytes per 30 msec packet */
int16_t enforceFrameSize; /* If set iSAC will never change packet size */
} IsacFixEncoderInstance;
typedef struct {
Bitstr_dec bitstr_obj;
MaskFiltstr_dec maskfiltstr_obj;
PostFiltBankstr postfiltbankstr_obj;
PitchFiltstr pitchfiltstr_obj;
PLCstr plcstr_obj; /* TS; for packet loss concealment */
} IsacFixDecoderInstance;
typedef struct {
IsacFixEncoderInstance ISACenc_obj;
IsacFixDecoderInstance ISACdec_obj;
BwEstimatorstr bwestimator_obj;
int16_t CodingMode; /* 0 = adaptive; 1 = instantaneous */
int16_t errorcode;
int16_t initflag; /* 0 = nothing initiated; 1 = encoder or decoder */
/* not initiated; 2 = all initiated */
} ISACFIX_SubStruct;
typedef struct {
int32_t lpcGains[12]; /* 6 lower-band & 6 upper-band we may need to double it
for 60*/
/* */
uint32_t W_upper; /* Upper boundary of interval W */
uint32_t streamval;
uint16_t stream_index; /* Index to the current position in bytestream */
int16_t full; /* 0 - first byte in memory filled, second empty*/
/* 1 - both bytes are empty (we just filled the previous memory */
uint16_t beforeLastWord;
uint16_t lastWord;
} transcode_obj;
// Bitstr_enc myBitStr;
#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_ */

View File

@ -0,0 +1,214 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* WebRtcIsacfix_kTransform.c
*
* Transform functions
*
*/
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "modules/third_party/fft/fft.h"
/* Tables are defined in transform_tables.c file or ARM assembly files. */
/* Cosine table 1 in Q14 */
extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
/* Sine table 1 in Q14 */
extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
/* Sine table 2 in Q14 */
extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
void WebRtcIsacfix_Time2SpecC(int16_t *inre1Q9,
int16_t *inre2Q9,
int16_t *outreQ7,
int16_t *outimQ7)
{
int k;
int32_t tmpreQ16[FRAMESAMPLES/2], tmpimQ16[FRAMESAMPLES/2];
int16_t tmp1rQ14, tmp1iQ14;
int32_t xrQ16, xiQ16, yrQ16, yiQ16;
int32_t v1Q16, v2Q16;
int16_t factQ19, sh;
/* Multiply with complex exponentials and combine into one complex vector */
factQ19 = 16921; // 0.5/sqrt(240) in Q19 is round(.5/sqrt(240)*(2^19)) = 16921
for (k = 0; k < FRAMESAMPLES/2; k++) {
tmp1rQ14 = WebRtcIsacfix_kCosTab1[k];
tmp1iQ14 = WebRtcIsacfix_kSinTab1[k];
xrQ16 = (tmp1rQ14 * inre1Q9[k] + tmp1iQ14 * inre2Q9[k]) >> 7;
xiQ16 = (tmp1rQ14 * inre2Q9[k] - tmp1iQ14 * inre1Q9[k]) >> 7;
// Q-domains below: (Q16*Q19>>16)>>3 = Q16
tmpreQ16[k] = (WEBRTC_SPL_MUL_16_32_RSFT16(factQ19, xrQ16) + 4) >> 3;
tmpimQ16[k] = (WEBRTC_SPL_MUL_16_32_RSFT16(factQ19, xiQ16) + 4) >> 3;
}
xrQ16 = WebRtcSpl_MaxAbsValueW32(tmpreQ16, FRAMESAMPLES/2);
yrQ16 = WebRtcSpl_MaxAbsValueW32(tmpimQ16, FRAMESAMPLES/2);
if (yrQ16>xrQ16) {
xrQ16 = yrQ16;
}
sh = WebRtcSpl_NormW32(xrQ16);
sh = sh-24; //if sh becomes >=0, then we should shift sh steps to the left, and the domain will become Q(16+sh)
//if sh becomes <0, then we should shift -sh steps to the right, and the domain will become Q(16+sh)
//"Fastest" vectors
if (sh>=0) {
for (k=0; k<FRAMESAMPLES/2; k++) {
inre1Q9[k] = (int16_t)(tmpreQ16[k] << sh); // Q(16+sh)
inre2Q9[k] = (int16_t)(tmpimQ16[k] << sh); // Q(16+sh)
}
} else {
int32_t round = 1 << (-sh - 1);
for (k=0; k<FRAMESAMPLES/2; k++) {
inre1Q9[k] = (int16_t)((tmpreQ16[k] + round) >> -sh); // Q(16+sh)
inre2Q9[k] = (int16_t)((tmpimQ16[k] + round) >> -sh); // Q(16+sh)
}
}
/* Get DFT */
WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
//"Fastest" vectors
if (sh>=0) {
for (k=0; k<FRAMESAMPLES/2; k++) {
tmpreQ16[k] = inre1Q9[k] >> sh; // Q(16+sh) -> Q16
tmpimQ16[k] = inre2Q9[k] >> sh; // Q(16+sh) -> Q16
}
} else {
for (k=0; k<FRAMESAMPLES/2; k++) {
tmpreQ16[k] = inre1Q9[k] << -sh; // Q(16+sh) -> Q16
tmpimQ16[k] = inre2Q9[k] << -sh; // Q(16+sh) -> Q16
}
}
/* Use symmetry to separate into two complex vectors and center frames in time around zero */
for (k = 0; k < FRAMESAMPLES/4; k++) {
xrQ16 = tmpreQ16[k] + tmpreQ16[FRAMESAMPLES/2 - 1 - k];
yiQ16 = -tmpreQ16[k] + tmpreQ16[FRAMESAMPLES/2 - 1 - k];
xiQ16 = tmpimQ16[k] - tmpimQ16[FRAMESAMPLES/2 - 1 - k];
yrQ16 = tmpimQ16[k] + tmpimQ16[FRAMESAMPLES/2 - 1 - k];
tmp1rQ14 = -WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 1 - k];
tmp1iQ14 = WebRtcIsacfix_kSinTab2[k];
v1Q16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, xrQ16) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, xiQ16);
v2Q16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, xrQ16) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, xiQ16);
outreQ7[k] = (int16_t)(v1Q16 >> 9);
outimQ7[k] = (int16_t)(v2Q16 >> 9);
v1Q16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, yrQ16) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, yiQ16);
v2Q16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, yrQ16) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, yiQ16);
// CalcLrIntQ(v1Q16, 9);
outreQ7[FRAMESAMPLES / 2 - 1 - k] = (int16_t)(v1Q16 >> 9);
// CalcLrIntQ(v2Q16, 9);
outimQ7[FRAMESAMPLES / 2 - 1 - k] = (int16_t)(v2Q16 >> 9);
}
}
void WebRtcIsacfix_Spec2TimeC(int16_t *inreQ7, int16_t *inimQ7, int32_t *outre1Q16, int32_t *outre2Q16)
{
int k;
int16_t tmp1rQ14, tmp1iQ14;
int32_t xrQ16, xiQ16, yrQ16, yiQ16;
int32_t tmpInRe, tmpInIm, tmpInRe2, tmpInIm2;
int16_t factQ11;
int16_t sh;
for (k = 0; k < FRAMESAMPLES/4; k++) {
/* Move zero in time to beginning of frames */
tmp1rQ14 = -WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 1 - k];
tmp1iQ14 = WebRtcIsacfix_kSinTab2[k];
tmpInRe = inreQ7[k] * (1 << 9); // Q7 -> Q16
tmpInIm = inimQ7[k] * (1 << 9); // Q7 -> Q16
tmpInRe2 = inreQ7[FRAMESAMPLES / 2 - 1 - k] * (1 << 9); // Q7 -> Q16
tmpInIm2 = inimQ7[FRAMESAMPLES / 2 - 1 - k] * (1 << 9); // Q7 -> Q16
xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInRe) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInIm);
xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInIm) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInRe);
yrQ16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInIm2) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInRe2);
yiQ16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInRe2) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInIm2);
/* Combine into one vector, z = x + j * y */
outre1Q16[k] = xrQ16 - yiQ16;
outre1Q16[FRAMESAMPLES/2 - 1 - k] = xrQ16 + yiQ16;
outre2Q16[k] = xiQ16 + yrQ16;
outre2Q16[FRAMESAMPLES/2 - 1 - k] = -xiQ16 + yrQ16;
}
/* Get IDFT */
tmpInRe = WebRtcSpl_MaxAbsValueW32(outre1Q16, 240);
tmpInIm = WebRtcSpl_MaxAbsValueW32(outre2Q16, 240);
if (tmpInIm>tmpInRe) {
tmpInRe = tmpInIm;
}
sh = WebRtcSpl_NormW32(tmpInRe);
sh = sh-24; //if sh becomes >=0, then we should shift sh steps to the left, and the domain will become Q(16+sh)
//if sh becomes <0, then we should shift -sh steps to the right, and the domain will become Q(16+sh)
//"Fastest" vectors
if (sh>=0) {
for (k=0; k<240; k++) {
inreQ7[k] = (int16_t)(outre1Q16[k] << sh); // Q(16+sh)
inimQ7[k] = (int16_t)(outre2Q16[k] << sh); // Q(16+sh)
}
} else {
int32_t round = 1 << (-sh - 1);
for (k=0; k<240; k++) {
inreQ7[k] = (int16_t)((outre1Q16[k] + round) >> -sh); // Q(16+sh)
inimQ7[k] = (int16_t)((outre2Q16[k] + round) >> -sh); // Q(16+sh)
}
}
WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
//"Fastest" vectors
if (sh>=0) {
for (k=0; k<240; k++) {
outre1Q16[k] = inreQ7[k] >> sh; // Q(16+sh) -> Q16
outre2Q16[k] = inimQ7[k] >> sh; // Q(16+sh) -> Q16
}
} else {
for (k=0; k<240; k++) {
outre1Q16[k] = inreQ7[k] * (1 << -sh); // Q(16+sh) -> Q16
outre2Q16[k] = inimQ7[k] * (1 << -sh); // Q(16+sh) -> Q16
}
}
/* Divide through by the normalizing constant: */
/* scale all values with 1/240, i.e. with 273 in Q16 */
/* 273/65536 ~= 0.0041656 */
/* 1/240 ~= 0.0041666 */
for (k=0; k<240; k++) {
outre1Q16[k] = WEBRTC_SPL_MUL_16_32_RSFT16(273, outre1Q16[k]);
outre2Q16[k] = WEBRTC_SPL_MUL_16_32_RSFT16(273, outre2Q16[k]);
}
/* Demodulate and separate */
factQ11 = 31727; // sqrt(240) in Q11 is round(15.49193338482967*2048) = 31727
for (k = 0; k < FRAMESAMPLES/2; k++) {
tmp1rQ14 = WebRtcIsacfix_kCosTab1[k];
tmp1iQ14 = WebRtcIsacfix_kSinTab1[k];
xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, outre1Q16[k]) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, outre2Q16[k]);
xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, outre2Q16[k]) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, outre1Q16[k]);
xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT11(factQ11, xrQ16);
xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT11(factQ11, xiQ16);
outre2Q16[k] = xiQ16;
outre1Q16[k] = xrQ16;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,479 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
// Tables are defined in transform_tables.c file.
// Cosine table 1 in Q14.
extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
// Sine table 1 in Q14.
extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
// Sine table 2 in Q14.
extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
static inline int32_t ComplexMulAndFindMaxNeon(int16_t* inre1Q9,
int16_t* inre2Q9,
int32_t* outreQ16,
int32_t* outimQ16) {
int k;
const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0];
const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0];
// 0.5 / sqrt(240) in Q19 is round((.5 / sqrt(240)) * (2^19)) = 16921.
// Use "16921 << 5" and vqdmulh, instead of ">> 26" as in the C code.
int32_t fact = 16921 << 5;
int32x4_t factq = vdupq_n_s32(fact);
uint32x4_t max_r = vdupq_n_u32(0);
uint32x4_t max_i = vdupq_n_u32(0);
for (k = 0; k < FRAMESAMPLES/2; k += 8) {
int16x8_t tmpr = vld1q_s16(kCosTab);
int16x8_t tmpi = vld1q_s16(kSinTab);
int16x8_t inre1 = vld1q_s16(inre1Q9);
int16x8_t inre2 = vld1q_s16(inre2Q9);
kCosTab += 8;
kSinTab += 8;
inre1Q9 += 8;
inre2Q9 += 8;
// Use ">> 26", instead of ">> 7", ">> 16" and then ">> 3" as in the C code.
int32x4_t tmp0 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre1));
int32x4_t tmp1 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre2));
tmp0 = vmlal_s16(tmp0, vget_low_s16(tmpi), vget_low_s16(inre2));
tmp1 = vmlsl_s16(tmp1, vget_low_s16(tmpi), vget_low_s16(inre1));
#if defined(WEBRTC_ARCH_ARM64)
int32x4_t tmp2 = vmull_high_s16(tmpr, inre1);
int32x4_t tmp3 = vmull_high_s16(tmpr, inre2);
tmp2 = vmlal_high_s16(tmp2, tmpi, inre2);
tmp3 = vmlsl_high_s16(tmp3, tmpi, inre1);
#else
int32x4_t tmp2 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre1));
int32x4_t tmp3 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre2));
tmp2 = vmlal_s16(tmp2, vget_high_s16(tmpi), vget_high_s16(inre2));
tmp3 = vmlsl_s16(tmp3, vget_high_s16(tmpi), vget_high_s16(inre1));
#endif
int32x4_t outr_0 = vqdmulhq_s32(tmp0, factq);
int32x4_t outr_1 = vqdmulhq_s32(tmp2, factq);
int32x4_t outi_0 = vqdmulhq_s32(tmp1, factq);
int32x4_t outi_1 = vqdmulhq_s32(tmp3, factq);
vst1q_s32(outreQ16, outr_0);
outreQ16 += 4;
vst1q_s32(outreQ16, outr_1);
outreQ16 += 4;
vst1q_s32(outimQ16, outi_0);
outimQ16 += 4;
vst1q_s32(outimQ16, outi_1);
outimQ16 += 4;
// Find the absolute maximum in the vectors.
tmp0 = vabsq_s32(outr_0);
tmp1 = vabsq_s32(outr_1);
tmp2 = vabsq_s32(outi_0);
tmp3 = vabsq_s32(outi_1);
// vabs doesn't change the value of 0x80000000.
// Use u32 so we don't lose the value 0x80000000.
max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp0));
max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp2));
max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp1));
max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp3));
}
max_r = vmaxq_u32(max_r, max_i);
#if defined(WEBRTC_ARCH_ARM64)
uint32_t maximum = vmaxvq_u32(max_r);
#else
uint32x2_t max32x2_r = vmax_u32(vget_low_u32(max_r), vget_high_u32(max_r));
max32x2_r = vpmax_u32(max32x2_r, max32x2_r);
uint32_t maximum = vget_lane_u32(max32x2_r, 0);
#endif
return (int32_t)maximum;
}
static inline void PreShiftW32toW16Neon(int32_t* inre,
int32_t* inim,
int16_t* outre,
int16_t* outim,
int32_t sh) {
int k;
int32x4_t sh32x4 = vdupq_n_s32(sh);
for (k = 0; k < FRAMESAMPLES/2; k += 16) {
int32x4x4_t inre32x4x4 = vld4q_s32(inre);
int32x4x4_t inim32x4x4 = vld4q_s32(inim);
inre += 16;
inim += 16;
inre32x4x4.val[0] = vrshlq_s32(inre32x4x4.val[0], sh32x4);
inre32x4x4.val[1] = vrshlq_s32(inre32x4x4.val[1], sh32x4);
inre32x4x4.val[2] = vrshlq_s32(inre32x4x4.val[2], sh32x4);
inre32x4x4.val[3] = vrshlq_s32(inre32x4x4.val[3], sh32x4);
inim32x4x4.val[0] = vrshlq_s32(inim32x4x4.val[0], sh32x4);
inim32x4x4.val[1] = vrshlq_s32(inim32x4x4.val[1], sh32x4);
inim32x4x4.val[2] = vrshlq_s32(inim32x4x4.val[2], sh32x4);
inim32x4x4.val[3] = vrshlq_s32(inim32x4x4.val[3], sh32x4);
int16x4x4_t outre16x4x4;
int16x4x4_t outim16x4x4;
outre16x4x4.val[0] = vmovn_s32(inre32x4x4.val[0]);
outre16x4x4.val[1] = vmovn_s32(inre32x4x4.val[1]);
outre16x4x4.val[2] = vmovn_s32(inre32x4x4.val[2]);
outre16x4x4.val[3] = vmovn_s32(inre32x4x4.val[3]);
outim16x4x4.val[0] = vmovn_s32(inim32x4x4.val[0]);
outim16x4x4.val[1] = vmovn_s32(inim32x4x4.val[1]);
outim16x4x4.val[2] = vmovn_s32(inim32x4x4.val[2]);
outim16x4x4.val[3] = vmovn_s32(inim32x4x4.val[3]);
vst4_s16(outre, outre16x4x4);
vst4_s16(outim, outim16x4x4);
outre += 16;
outim += 16;
}
}
static inline void PostShiftAndSeparateNeon(int16_t* inre,
int16_t* inim,
int16_t* outre,
int16_t* outim,
int32_t sh) {
int k;
int16_t* inre1 = inre;
int16_t* inre2 = &inre[FRAMESAMPLES/2 - 4];
int16_t* inim1 = inim;
int16_t* inim2 = &inim[FRAMESAMPLES/2 - 4];
int16_t* outre1 = outre;
int16_t* outre2 = &outre[FRAMESAMPLES/2 - 4];
int16_t* outim1 = outim;
int16_t* outim2 = &outim[FRAMESAMPLES/2 - 4];
const int16_t* kSinTab1 = &WebRtcIsacfix_kSinTab2[0];
const int16_t* kSinTab2 = &WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 -4];
// By vshl, we effectively did "<< (-sh - 23)", instead of "<< (-sh)",
// ">> 14" and then ">> 9" as in the C code.
int32x4_t shift = vdupq_n_s32(-sh - 23);
for (k = 0; k < FRAMESAMPLES/4; k += 4) {
int16x4_t tmpi = vld1_s16(kSinTab1);
kSinTab1 += 4;
int16x4_t tmpr = vld1_s16(kSinTab2);
kSinTab2 -= 4;
int16x4_t inre_0 = vld1_s16(inre1);
inre1 += 4;
int16x4_t inre_1 = vld1_s16(inre2);
inre2 -= 4;
int16x4_t inim_0 = vld1_s16(inim1);
inim1 += 4;
int16x4_t inim_1 = vld1_s16(inim2);
inim2 -= 4;
tmpr = vneg_s16(tmpr);
inre_1 = vrev64_s16(inre_1);
inim_1 = vrev64_s16(inim_1);
tmpr = vrev64_s16(tmpr);
int16x4_t xr = vqadd_s16(inre_0, inre_1);
int16x4_t xi = vqsub_s16(inim_0, inim_1);
int16x4_t yr = vqadd_s16(inim_0, inim_1);
int16x4_t yi = vqsub_s16(inre_1, inre_0);
int32x4_t outr0 = vmull_s16(tmpr, xr);
int32x4_t outi0 = vmull_s16(tmpi, xr);
int32x4_t outr1 = vmull_s16(tmpi, yr);
int32x4_t outi1 = vmull_s16(tmpi, yi);
outr0 = vmlsl_s16(outr0, tmpi, xi);
outi0 = vmlal_s16(outi0, tmpr, xi);
outr1 = vmlal_s16(outr1, tmpr, yi);
outi1 = vmlsl_s16(outi1, tmpr, yr);
outr0 = vshlq_s32(outr0, shift);
outi0 = vshlq_s32(outi0, shift);
outr1 = vshlq_s32(outr1, shift);
outi1 = vshlq_s32(outi1, shift);
outr1 = vnegq_s32(outr1);
int16x4_t outre_0 = vmovn_s32(outr0);
int16x4_t outim_0 = vmovn_s32(outi0);
int16x4_t outre_1 = vmovn_s32(outr1);
int16x4_t outim_1 = vmovn_s32(outi1);
outre_1 = vrev64_s16(outre_1);
outim_1 = vrev64_s16(outim_1);
vst1_s16(outre1, outre_0);
outre1 += 4;
vst1_s16(outim1, outim_0);
outim1 += 4;
vst1_s16(outre2, outre_1);
outre2 -= 4;
vst1_s16(outim2, outim_1);
outim2 -= 4;
}
}
void WebRtcIsacfix_Time2SpecNeon(int16_t* inre1Q9,
int16_t* inre2Q9,
int16_t* outreQ7,
int16_t* outimQ7) {
int32_t tmpreQ16[FRAMESAMPLES/2], tmpimQ16[FRAMESAMPLES/2];
int32_t max;
int32_t sh;
// Multiply with complex exponentials and combine into one complex vector.
// And find the maximum.
max = ComplexMulAndFindMaxNeon(inre1Q9, inre2Q9, tmpreQ16, tmpimQ16);
sh = (int32_t)WebRtcSpl_NormW32(max);
sh = sh - 24;
// If sh becomes >= 0, then we should shift sh steps to the left,
// and the domain will become Q(16 + sh).
// If sh becomes < 0, then we should shift -sh steps to the right,
// and the domain will become Q(16 + sh).
PreShiftW32toW16Neon(tmpreQ16, tmpimQ16, inre1Q9, inre2Q9, sh);
// Get DFT.
WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1);
// If sh >= 0, shift sh steps to the right,
// If sh < 0, shift -sh steps to the left.
// Use symmetry to separate into two complex vectors
// and center frames in time around zero.
PostShiftAndSeparateNeon(inre1Q9, inre2Q9, outreQ7, outimQ7, sh);
}
static inline int32_t TransformAndFindMaxNeon(int16_t* inre,
int16_t* inim,
int32_t* outre,
int32_t* outim) {
int k;
int16_t* inre1 = inre;
int16_t* inre2 = &inre[FRAMESAMPLES/2 - 4];
int16_t* inim1 = inim;
int16_t* inim2 = &inim[FRAMESAMPLES/2 - 4];
int32_t* outre1 = outre;
int32_t* outre2 = &outre[FRAMESAMPLES/2 - 4];
int32_t* outim1 = outim;
int32_t* outim2 = &outim[FRAMESAMPLES/2 - 4];
const int16_t* kSinTab1 = &WebRtcIsacfix_kSinTab2[0];
const int16_t* kSinTab2 = &WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 4];
uint32x4_t max_r = vdupq_n_u32(0);
uint32x4_t max_i = vdupq_n_u32(0);
// Use ">> 5", instead of "<< 9" and then ">> 14" as in the C code.
for (k = 0; k < FRAMESAMPLES/4; k += 4) {
int16x4_t tmpi = vld1_s16(kSinTab1);
kSinTab1 += 4;
int16x4_t tmpr = vld1_s16(kSinTab2);
kSinTab2 -= 4;
int16x4_t inre_0 = vld1_s16(inre1);
inre1 += 4;
int16x4_t inre_1 = vld1_s16(inre2);
inre2 -= 4;
int16x4_t inim_0 = vld1_s16(inim1);
inim1 += 4;
int16x4_t inim_1 = vld1_s16(inim2);
inim2 -= 4;
tmpr = vneg_s16(tmpr);
inre_1 = vrev64_s16(inre_1);
inim_1 = vrev64_s16(inim_1);
tmpr = vrev64_s16(tmpr);
int32x4_t xr = vmull_s16(tmpr, inre_0);
int32x4_t xi = vmull_s16(tmpr, inim_0);
int32x4_t yr = vmull_s16(tmpr, inim_1);
int32x4_t yi = vmull_s16(tmpi, inim_1);
xr = vmlal_s16(xr, tmpi, inim_0);
xi = vmlsl_s16(xi, tmpi, inre_0);
yr = vmlal_s16(yr, tmpi, inre_1);
yi = vmlsl_s16(yi, tmpr, inre_1);
yr = vnegq_s32(yr);
xr = vshrq_n_s32(xr, 5);
xi = vshrq_n_s32(xi, 5);
yr = vshrq_n_s32(yr, 5);
yi = vshrq_n_s32(yi, 5);
int32x4_t outr0 = vsubq_s32(xr, yi);
int32x4_t outr1 = vaddq_s32(xr, yi);
int32x4_t outi0 = vaddq_s32(xi, yr);
int32x4_t outi1 = vsubq_s32(yr, xi);
// Find the absolute maximum in the vectors.
int32x4_t tmp0 = vabsq_s32(outr0);
int32x4_t tmp1 = vabsq_s32(outr1);
int32x4_t tmp2 = vabsq_s32(outi0);
int32x4_t tmp3 = vabsq_s32(outi1);
// vabs doesn't change the value of 0x80000000.
// Use u32 so we don't lose the value 0x80000000.
max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp0));
max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp2));
max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp1));
max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp3));
// Store the vectors.
outr1 = vrev64q_s32(outr1);
outi1 = vrev64q_s32(outi1);
int32x4_t outr_1 = vcombine_s32(vget_high_s32(outr1), vget_low_s32(outr1));
int32x4_t outi_1 = vcombine_s32(vget_high_s32(outi1), vget_low_s32(outi1));
vst1q_s32(outre1, outr0);
outre1 += 4;
vst1q_s32(outim1, outi0);
outim1 += 4;
vst1q_s32(outre2, outr_1);
outre2 -= 4;
vst1q_s32(outim2, outi_1);
outim2 -= 4;
}
max_r = vmaxq_u32(max_r, max_i);
#if defined(WEBRTC_ARCH_ARM64)
uint32_t maximum = vmaxvq_u32(max_r);
#else
uint32x2_t max32x2_r = vmax_u32(vget_low_u32(max_r), vget_high_u32(max_r));
max32x2_r = vpmax_u32(max32x2_r, max32x2_r);
uint32_t maximum = vget_lane_u32(max32x2_r, 0);
#endif
return (int32_t)maximum;
}
static inline void PostShiftAndDivideAndDemodulateNeon(int16_t* inre,
int16_t* inim,
int32_t* outre1,
int32_t* outre2,
int32_t sh) {
int k;
int16_t* p_inre = inre;
int16_t* p_inim = inim;
int32_t* p_outre1 = outre1;
int32_t* p_outre2 = outre2;
const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0];
const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0];
int32x4_t shift = vdupq_n_s32(-sh - 16);
// Divide through by the normalizing constant:
// scale all values with 1/240, i.e. with 273 in Q16.
// 273/65536 ~= 0.0041656
// 1/240 ~= 0.0041666
int16x8_t scale = vdupq_n_s16(273);
// Sqrt(240) in Q11 is round(15.49193338482967 * 2048) = 31727.
int factQ19 = 31727 << 16;
int32x4_t fact = vdupq_n_s32(factQ19);
for (k = 0; k < FRAMESAMPLES/2; k += 8) {
int16x8_t inre16x8 = vld1q_s16(p_inre);
int16x8_t inim16x8 = vld1q_s16(p_inim);
p_inre += 8;
p_inim += 8;
int16x8_t tmpr = vld1q_s16(kCosTab);
int16x8_t tmpi = vld1q_s16(kSinTab);
kCosTab += 8;
kSinTab += 8;
// By vshl and vmull, we effectively did "<< (-sh - 16)",
// instead of "<< (-sh)" and ">> 16" as in the C code.
int32x4_t outre1_0 = vmull_s16(vget_low_s16(inre16x8), vget_low_s16(scale));
int32x4_t outre2_0 = vmull_s16(vget_low_s16(inim16x8), vget_low_s16(scale));
#if defined(WEBRTC_ARCH_ARM64)
int32x4_t outre1_1 = vmull_high_s16(inre16x8, scale);
int32x4_t outre2_1 = vmull_high_s16(inim16x8, scale);
#else
int32x4_t outre1_1 = vmull_s16(vget_high_s16(inre16x8),
vget_high_s16(scale));
int32x4_t outre2_1 = vmull_s16(vget_high_s16(inim16x8),
vget_high_s16(scale));
#endif
outre1_0 = vshlq_s32(outre1_0, shift);
outre1_1 = vshlq_s32(outre1_1, shift);
outre2_0 = vshlq_s32(outre2_0, shift);
outre2_1 = vshlq_s32(outre2_1, shift);
// Demodulate and separate.
int32x4_t tmpr_0 = vmovl_s16(vget_low_s16(tmpr));
int32x4_t tmpi_0 = vmovl_s16(vget_low_s16(tmpi));
#if defined(WEBRTC_ARCH_ARM64)
int32x4_t tmpr_1 = vmovl_high_s16(tmpr);
int32x4_t tmpi_1 = vmovl_high_s16(tmpi);
#else
int32x4_t tmpr_1 = vmovl_s16(vget_high_s16(tmpr));
int32x4_t tmpi_1 = vmovl_s16(vget_high_s16(tmpi));
#endif
int64x2_t xr0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre1_0));
int64x2_t xi0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre2_0));
int64x2_t xr2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre1_1));
int64x2_t xi2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre2_1));
xr0 = vmlsl_s32(xr0, vget_low_s32(tmpi_0), vget_low_s32(outre2_0));
xi0 = vmlal_s32(xi0, vget_low_s32(tmpi_0), vget_low_s32(outre1_0));
xr2 = vmlsl_s32(xr2, vget_low_s32(tmpi_1), vget_low_s32(outre2_1));
xi2 = vmlal_s32(xi2, vget_low_s32(tmpi_1), vget_low_s32(outre1_1));
#if defined(WEBRTC_ARCH_ARM64)
int64x2_t xr1 = vmull_high_s32(tmpr_0, outre1_0);
int64x2_t xi1 = vmull_high_s32(tmpr_0, outre2_0);
int64x2_t xr3 = vmull_high_s32(tmpr_1, outre1_1);
int64x2_t xi3 = vmull_high_s32(tmpr_1, outre2_1);
xr1 = vmlsl_high_s32(xr1, tmpi_0, outre2_0);
xi1 = vmlal_high_s32(xi1, tmpi_0, outre1_0);
xr3 = vmlsl_high_s32(xr3, tmpi_1, outre2_1);
xi3 = vmlal_high_s32(xi3, tmpi_1, outre1_1);
#else
int64x2_t xr1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre1_0));
int64x2_t xi1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre2_0));
int64x2_t xr3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre1_1));
int64x2_t xi3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre2_1));
xr1 = vmlsl_s32(xr1, vget_high_s32(tmpi_0), vget_high_s32(outre2_0));
xi1 = vmlal_s32(xi1, vget_high_s32(tmpi_0), vget_high_s32(outre1_0));
xr3 = vmlsl_s32(xr3, vget_high_s32(tmpi_1), vget_high_s32(outre2_1));
xi3 = vmlal_s32(xi3, vget_high_s32(tmpi_1), vget_high_s32(outre1_1));
#endif
outre1_0 = vcombine_s32(vrshrn_n_s64(xr0, 10), vrshrn_n_s64(xr1, 10));
outre2_0 = vcombine_s32(vrshrn_n_s64(xi0, 10), vrshrn_n_s64(xi1, 10));
outre1_1 = vcombine_s32(vrshrn_n_s64(xr2, 10), vrshrn_n_s64(xr3, 10));
outre2_1 = vcombine_s32(vrshrn_n_s64(xi2, 10), vrshrn_n_s64(xi3, 10));
outre1_0 = vqdmulhq_s32(outre1_0, fact);
outre2_0 = vqdmulhq_s32(outre2_0, fact);
outre1_1 = vqdmulhq_s32(outre1_1, fact);
outre2_1 = vqdmulhq_s32(outre2_1, fact);
vst1q_s32(p_outre1, outre1_0);
p_outre1 += 4;
vst1q_s32(p_outre1, outre1_1);
p_outre1 += 4;
vst1q_s32(p_outre2, outre2_0);
p_outre2 += 4;
vst1q_s32(p_outre2, outre2_1);
p_outre2 += 4;
}
}
void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
int16_t* inimQ7,
int32_t* outre1Q16,
int32_t* outre2Q16) {
int32_t max;
int32_t sh;
max = TransformAndFindMaxNeon(inreQ7, inimQ7, outre1Q16, outre2Q16);
sh = (int32_t)WebRtcSpl_NormW32(max);
sh = sh - 24;
// If sh becomes >= 0, then we should shift sh steps to the left,
// and the domain will become Q(16 + sh).
// If sh becomes < 0, then we should shift -sh steps to the right,
// and the domain will become Q(16 + sh).
// "Fastest" vectors.
PreShiftW32toW16Neon(outre1Q16, outre2Q16, inreQ7, inimQ7, sh);
// Get IDFT.
WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1);
PostShiftAndDivideAndDemodulateNeon(inreQ7, inimQ7, outre1Q16, outre2Q16, sh);
}

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* This file contains trigonometric functions look-up tables used in
* transform functions WebRtcIsacfix_Time2Spec and WebRtcIsacfix_Spec2Time.
*/
#include <stdint.h>
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
/* Cosine table 1 in Q14. */
const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214,
0, -214, -429, -643, -857, -1072, -1285, -1499, -1713, -1926,
-2139, -2351, -2563, -2775, -2986, -3196, -3406, -3616, -3825, -4033,
-4240, -4447, -4653, -4859, -5063, -5266, -5469, -5671, -5872, -6071,
-6270, -6467, -6664, -6859, -7053, -7246, -7438, -7629, -7818, -8006,
-8192, -8377, -8561, -8743, -8923, -9102, -9280, -9456, -9630, -9803,
-9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
-11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
-12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
-13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
-14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
-15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
-16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
-16333, -16349, -16362, -16371, -16378, -16383
};
/* Sine table 1 in Q14. */
const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
0, 214, 429, 643, 857, 1072, 1285, 1499, 1713, 1926,
2139, 2351, 2563, 2775, 2986, 3196, 3406, 3616, 3825, 4033,
4240, 4447, 4653, 4859, 5063, 5266, 5469, 5671, 5872, 6071,
6270, 6467, 6664, 6859, 7053, 7246, 7438, 7629, 7818, 8006,
8192, 8377, 8561, 8743, 8923, 9102, 9280, 9456, 9630, 9803,
9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214
};
/* Sine table 2 in Q14. */
const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
9889, -9717, 9543, -9368, 9191, -9013, 8833, -8652, 8469, -8285,
8099, -7912, 7723, -7534, 7342, -7150, 6957, -6762, 6566, -6369,
6171, -5971, 5771, -5570, 5368, -5165, 4961, -4756, 4550, -4344,
4137, -3929, 3720, -3511, 3301, -3091, 2880, -2669, 2457, -2245,
2032, -1819, 1606, -1392, 1179, -965, 750, -536, 322, -107
};
#if defined(MIPS32_LE)
/* Cosine table 2 in Q14. Used only on MIPS platforms. */
const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4] = {
107, -322, 536, -750, 965, -1179, 1392, -1606, 1819, -2032,
2245, -2457, 2669, -2880, 3091, -3301, 3511, -3720, 3929, -4137,
4344, -4550, 4756, -4961, 5165, -5368, 5570, -5771, 5971, -6171,
6369, -6566, 6762, -6957, 7150, -7342, 7534, -7723, 7912, -8099,
8285, -8469, 8652, -8833, 9013, -9191, 9368, -9543, 9717, -9889,
10059, -10227, 10394, -10559, 10722, -10883, 11042, -11200, 11356, -11509,
11661, -11810, 11958, -12104, 12247, -12389, 12528, -12665, 12800, -12933,
13063, -13192, 13318, -13441, 13563, -13682, 13799, -13913, 14025, -14135,
14242, -14347, 14449, -14549, 14647, -14741, 14834, -14924, 15011, -15095,
15178, -15257, 15334, -15408, 15480, -15549, 15615, -15679, 15739, -15798,
15853, -15906, 15956, -16003, 16048, -16090, 16129, -16165, 16199, -16229,
16257, -16283, 16305, -16325, 16342, -16356, 16367, -16375, 16381, -16384
};
#endif

View File

@ -0,0 +1,199 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
#include "system_wrappers/include/cpu_features_wrapper.h"
#include "test/gtest.h"
static const int kSamples = FRAMESAMPLES / 2;
static const int32_t spec2time_out_expected_1[kSamples] = {
-3366470, -2285227, -3415765, -2310215, -3118030, -2222470, -3030254,
-2192091, -3423170, -2216041, -3305541, -2171936, -3195767, -2095779,
-3153304, -2157560, -3071167, -2032108, -3101190, -1972016, -3103824,
-2089118, -3139811, -1898337, -3102801, -2055082, -3029665, -1854140,
-2962586, -1966454, -3071167, -1894588, -2851743, -1917315, -2848087,
-1594932, -2799242, -1462184, -2845887, -1437599, -2691776, -1329637,
-2770659, -1268491, -2625161, -1578991, -2460299, -1186385, -2365613,
-1039354, -2322608, -958518, -2271749, -789860, -2254538, -850308,
-2384436, -850959, -2133734, -587678, -2093316, -495115, -1973364,
-475177, -1801282, -173507, -1848516, -158015, -1792018, -62648,
-1643313, 214746, -1500758, 267077, -1450193, 560521, -1521579,
675283, -1345408, 857559, -1300822, 1116332, -1294533, 1241117,
-1070027, 1263503, -983816, 1529821, -1019586, 1910421, -955420,
2073688, -836459, 2401105, -653905, 2690474, -731425, 2930131,
-935234, 3299500, -875978, 3523432, -878906, 3924822, -1081630,
4561267, -1203023, 5105274, -1510983, 6052762, -2294646, 7021597,
-3108053, 8826736, -4935222, 11678789, -8442713, 18725700, -21526692,
25420577, 19589811, -28108666, 12634054, -14483066, 6263217, -9979706,
3665661, -7909736, 2531530, -6434896, 1700772, -5525393, 1479473,
-4894262, 1231760, -4353044, 1032940, -3786590, 941152, -3331614,
665090, -2851619, 830696, -2762201, 958007, -2483118, 788233,
-2184965, 804825, -1967306, 1007255, -1862474, 920889, -1457506,
755406, -1405841, 890230, -1302124, 1161599, -701867, 1154163,
-1083366, 1204743, -513581, 1547264, -650636, 1493384, -285543,
1771863, -277906, 1841343, -9078, 1751863, 230222, 1819578,
207170, 1978972, 398137, 2106468, 552155, 1997624, 685213,
2129520, 601078, 2238736, 944591, 2441879, 1194178, 2355280,
986124, 2393328, 1049005, 2417944, 1208368, 2489516, 1352023,
2572118, 1445283, 2856081, 1532997, 2742279, 1615877, 2915274,
1808036, 2856871, 1806936, 3241747, 1622461, 2978558, 1841297,
3010378, 1923666, 3271367, 2126700, 3070935, 1956958, 3107588,
2128405, 3288872, 2114911, 3315952, 2406651, 3344038, 2370199,
3368980, 2144361, 3305030, 2183803, 3401450, 2523102, 3405463,
2452475, 3463355, 2421678, 3551968, 2431949, 3477251, 2148125,
3244489, 2174090};
static const int32_t spec2time_out_expected_2[kSamples] = {
1691694, -2499988, -2035547, 1060469, 988634, -2044502, -306271,
2041000, 201454, -2289456, 93694, 2129427, -369152, -1887834,
860796, 2089102, -929424, -1673956, 1395291, 1785651, -1619673,
-1380109, 1963449, 1093311, -2111007, -840456, 2372786, 578119,
-2242702, 89774, 2463304, -132717, -2121480, 643634, 2277636,
-1125999, -1995858, 1543748, 2227861, -1483779, -1495491, 2102642,
1833876, -1920568, -958378, 2485101, 772261, -2454257, -24942,
2918714, 136838, -2500453, 816118, 3039735, -746560, -2365815,
1586396, 2714951, -1511696, -1942334, 2571792, 2182827, -2325335,
-1311543, 3055970, 1367220, -2737182, -110626, 3889222, 631008,
-3280879, 853066, 4122279, -706638, -3334449, 2148311, 3993512,
-1846301, -3004894, 3426779, 3329522, -3165264, -2242423, 4756866,
2557711, -4131280, -805259, 5702711, 1120592, -4852821, 743664,
6476444, -621186, -5465828, 2815787, 6768835, -3017442, -5338409,
5658126, 6838454, -5492288, -4682382, 8874947, 6153814, -8832561,
-2649251, 12817398, 4237692, -13000247, 1190661, 18986363, -115738,
-19693978, 9908367, 30660381, -10632635, -37962068, 47022884, 89744622,
-42087632, 40279224, -88869341, -47542383, 38572364, 10441576, -30339718,
-9926740, 19896578, 28009, -18886612, -1124047, 13232498, -4150304,
-12770551, 2637074, 9051831, -6162211, -8713972, 4557937, 5489716,
-6862312, -5532349, 5415449, 2791310, -6999367, -2790102, 5375806,
546222, -6486452, -821261, 4994973, -1278840, -5645501, 1060484,
3996285, -2503954, -4653629, 2220549, 3036977, -3282133, -3318585,
2780636, 1789880, -4004589, -2041031, 3105373, 574819, -3992722,
-971004, 3001703, -676739, -3841508, 417284, 2897970, -1427018,
-3058480, 1189948, 2210960, -2268992, -2603272, 1949785, 1576172,
-2720404, -1891738, 2309456, 769178, -2975646, -707150, 2424652,
-88039, -2966660, -65452, 2320780, -957557, -2798978, 744640,
1879794, -1672081, -2365319, 1253309, 1366383, -2204082, -1544367,
1801452, 613828, -2531994, -983847, 2064842, 118326, -2613790,
-203220, 2219635, -730341, -2641861, 563557, 1765434, -1329916,
-2272927, 1037138, 1266725, -1939220, -1588643, 1754528, 816552,
-2376303, -1099167, 1864999, 122477, -2422762, -400027, 1889228,
-579916, -2490353, 287139, 2011318, -1176657, -2502978, 812896,
1116502, -1940211};
static const int16_t time2spec_out_expected_1[kSamples] = {
20342, 23889, -10063, -9419, 3242, 7280, -2012, -5029, 332, 4478,
-97, -3244, -891, 3117, 773, -2204, -1335, 2009, 1236, -1469,
-1562, 1277, 1366, -815, -1619, 599, 1449, -177, -1507, 116,
1294, 263, -1338, -244, 1059, 553, -1045, -549, 829, 826,
-731, -755, 516, 909, -427, -853, 189, 1004, -184, -828,
-108, 888, 72, -700, -280, 717, 342, -611, -534, 601,
534, -374, -646, 399, 567, -171, -720, 234, 645, -11,
-712, -26, 593, 215, -643, -172, 536, 361, -527, -403,
388, 550, -361, -480, 208, 623, -206, -585, 41, 578,
12, -504, -182, 583, 218, -437, -339, 499, 263, -354,
-450, 347, 456, -193, -524, 212, 475, -74, -566, 94,
511, 112, -577, -201, 408, 217, -546, -295, 338, 387,
-13, 4, -46, 2, -76, 103, -83, 108, -55, 100,
-150, 131, -156, 141, -171, 179, -190, 128, -227, 172,
-214, 215, -189, 265, -244, 322, -335, 337, -352, 358,
-368, 362, -355, 366, -381, 403, -395, 411, -392, 446,
-458, 504, -449, 507, -464, 452, -491, 481, -534, 486,
-516, 560, -535, 525, -537, 559, -554, 570, -616, 591,
-585, 627, -509, 588, -584, 547, -610, 580, -614, 635,
-620, 655, -554, 546, -591, 642, -590, 660, -656, 629,
-604, 620, -580, 617, -645, 648, -573, 612, -604, 584,
-571, 597, -562, 627, -550, 560, -606, 529, -584, 568,
-503, 532, -463, 512, -440, 399, -457, 437, -349, 278,
-317, 257, -220, 163, -8, -61, 18, -161, 367, -1306};
static const int16_t time2spec_out_expected_2[kSamples] = {
14283, -11552, -15335, 6626, 7554, -2150, -6309, 1307, 4523, -4,
-3908, -314, 3001, 914, -2715, -1042, 2094, 1272, -1715, -1399,
1263, 1508, -1021, -1534, 735, 1595, -439, -1447, 155, 1433,
22, -1325, -268, 1205, 424, -1030, -608, 950, 643, -733,
-787, 661, 861, -502, -888, 331, 852, -144, -849, 19,
833, 99, -826, -154, 771, 368, -735, -459, 645, 513,
-491, -604, 431, 630, -314, -598, 183, 622, -78, -612,
-48, 641, 154, -645, -257, 610, 281, -529, -444, 450,
441, -327, -506, 274, 476, -232, -570, 117, 554, -86,
-531, -21, 572, 151, -606, -221, 496, 322, -407, -388,
407, 394, -268, -428, 280, 505, -115, -588, 19, 513,
-29, -539, -109, 468, 173, -501, -242, 442, 278, -478,
-680, 656, -659, 656, -669, 602, -688, 612, -667, 612,
-642, 627, -648, 653, -676, 596, -680, 655, -649, 678,
-672, 587, -608, 637, -645, 637, -620, 556, -580, 553,
-635, 518, -599, 583, -501, 536, -544, 473, -552, 583,
-511, 541, -532, 563, -486, 461, -453, 486, -388, 424,
-416, 432, -374, 399, -462, 364, -346, 293, -329, 331,
-313, 281, -247, 309, -337, 241, -190, 207, -194, 179,
-163, 155, -156, 117, -135, 107, -126, 29, -22, 81,
-8, 17, -61, -10, 8, -37, 80, -44, 72, -88,
65, -89, 130, -114, 181, -215, 189, -245, 260, -288,
294, -339, 344, -396, 407, -429, 438, -439, 485, -556,
629, -612, 637, -645, 661, -737, 829, -830, 831, -1041};
class TransformTest : public ::testing::Test {
protected:
// Pass a function pointer to the Tester function.
void Time2SpecTester(Time2Spec Time2SpecFunction) {
// WebRtcIsacfix_Time2Spec functions hard coded the buffer lengths. It's a
// large buffer but we have to test it here.
int16_t data_in_1[kSamples] = {0};
int16_t data_in_2[kSamples] = {0};
int16_t data_out_1[kSamples] = {0};
int16_t data_out_2[kSamples] = {0};
for (int i = 0; i < kSamples; i++) {
data_in_1[i] = i * i + 1777;
data_in_2[i] = WEBRTC_SPL_WORD16_MAX / (i + 1) + 17;
}
Time2SpecFunction(data_in_1, data_in_2, data_out_1, data_out_2);
for (int i = 0; i < kSamples; i++) {
// We don't require bit-exact for ARM assembly code.
EXPECT_LE(abs(time2spec_out_expected_1[i] - data_out_1[i]), 1);
EXPECT_LE(abs(time2spec_out_expected_2[i] - data_out_2[i]), 1);
}
}
// Pass a function pointer to the Tester function.
void Spec2TimeTester(Spec2Time Spec2TimeFunction) {
// WebRtcIsacfix_Spec2Time functions hard coded the buffer lengths. It's a
// large buffer but we have to test it here.
int16_t data_in_1[kSamples] = {0};
int16_t data_in_2[kSamples] = {0};
int32_t data_out_1[kSamples] = {0};
int32_t data_out_2[kSamples] = {0};
for (int i = 0; i < kSamples; i++) {
data_in_1[i] = i * i + 1777;
data_in_2[i] = WEBRTC_SPL_WORD16_MAX / (i + 1) + 17;
}
Spec2TimeFunction(data_in_1, data_in_2, data_out_1, data_out_2);
for (int i = 0; i < kSamples; i++) {
// We don't require bit-exact for ARM assembly code.
EXPECT_LE(abs(spec2time_out_expected_1[i] - data_out_1[i]), 16);
EXPECT_LE(abs(spec2time_out_expected_2[i] - data_out_2[i]), 16);
}
}
};
TEST_F(TransformTest, Time2SpecTest) {
Time2SpecTester(WebRtcIsacfix_Time2SpecC);
#if defined(WEBRTC_HAS_NEON)
Time2SpecTester(WebRtcIsacfix_Time2SpecNeon);
#endif
}
TEST_F(TransformTest, Spec2TimeTest) {
Spec2TimeTester(WebRtcIsacfix_Spec2TimeC);
#if defined(WEBRTC_HAS_NEON)
Spec2TimeTester(WebRtcIsacfix_Spec2TimeNeon);
#endif
}

View File

@ -0,0 +1,120 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
#include "rtc_base/checks.h"
using std::string;
namespace webrtc {
static const int kIsacBlockDurationMs = 30;
static const int kIsacInputSamplingKhz = 16;
static const int kIsacOutputSamplingKhz = 16;
class IsacSpeedTest : public AudioCodecSpeedTest {
protected:
IsacSpeedTest();
void SetUp() override;
void TearDown() override;
float EncodeABlock(int16_t* in_data,
uint8_t* bit_stream,
size_t max_bytes,
size_t* encoded_bytes) override;
float DecodeABlock(const uint8_t* bit_stream,
size_t encoded_bytes,
int16_t* out_data) override;
ISACFIX_MainStruct* ISACFIX_main_inst_;
};
IsacSpeedTest::IsacSpeedTest()
: AudioCodecSpeedTest(kIsacBlockDurationMs,
kIsacInputSamplingKhz,
kIsacOutputSamplingKhz),
ISACFIX_main_inst_(NULL) {}
void IsacSpeedTest::SetUp() {
AudioCodecSpeedTest::SetUp();
// Check whether the allocated buffer for the bit stream is large enough.
EXPECT_GE(max_bytes_, static_cast<size_t>(STREAM_MAXW16_60MS));
// Create encoder memory.
EXPECT_EQ(0, WebRtcIsacfix_Create(&ISACFIX_main_inst_));
EXPECT_EQ(0, WebRtcIsacfix_EncoderInit(ISACFIX_main_inst_, 1));
WebRtcIsacfix_DecoderInit(ISACFIX_main_inst_);
// Set bitrate and block length.
EXPECT_EQ(0, WebRtcIsacfix_Control(ISACFIX_main_inst_, bit_rate_,
block_duration_ms_));
}
void IsacSpeedTest::TearDown() {
AudioCodecSpeedTest::TearDown();
// Free memory.
EXPECT_EQ(0, WebRtcIsacfix_Free(ISACFIX_main_inst_));
}
float IsacSpeedTest::EncodeABlock(int16_t* in_data,
uint8_t* bit_stream,
size_t max_bytes,
size_t* encoded_bytes) {
// ISAC takes 10 ms everycall
const int subblocks = block_duration_ms_ / 10;
const int subblock_length = 10 * input_sampling_khz_;
int value = 0;
clock_t clocks = clock();
size_t pointer = 0;
for (int idx = 0; idx < subblocks; idx++, pointer += subblock_length) {
value =
WebRtcIsacfix_Encode(ISACFIX_main_inst_, &in_data[pointer], bit_stream);
if (idx == subblocks - 1)
EXPECT_GT(value, 0);
else
EXPECT_EQ(0, value);
}
clocks = clock() - clocks;
*encoded_bytes = static_cast<size_t>(value);
RTC_DCHECK_LE(*encoded_bytes, max_bytes);
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
size_t encoded_bytes,
int16_t* out_data) {
int value;
int16_t audio_type;
clock_t clocks = clock();
value = WebRtcIsacfix_Decode(ISACFIX_main_inst_, bit_stream, encoded_bytes,
out_data, &audio_type);
clocks = clock() - clocks;
EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
TEST_P(IsacSpeedTest, IsacEncodeDecodeTest) {
size_t kDurationSec = 400; // Test audio length in second.
EncodeDecode(kDurationSec);
}
const coding_param param_set[] = {
std::make_tuple(1,
32000,
string("audio_coding/speech_mono_16kHz"),
string("pcm"),
true)};
INSTANTIATE_TEST_SUITE_P(AllTest,
IsacSpeedTest,
::testing::ValuesIn(param_set));
} // namespace webrtc

View File

@ -0,0 +1,346 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <array>
#include <map>
#include <memory>
#include <vector>
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h"
#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h"
#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
#include "modules/audio_coding/test/PCMFile.h"
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace {
constexpr int kPayloadType = 42;
enum class IsacImpl { kFixed, kFloat };
absl::string_view IsacImplToString(IsacImpl impl) {
switch (impl) {
case IsacImpl::kFixed:
return "fixed";
case IsacImpl::kFloat:
return "float";
}
}
std::unique_ptr<PCMFile> GetPcmTestFileReader(int sample_rate_hz) {
std::string filename;
switch (sample_rate_hz) {
case 16000:
filename = test::ResourcePath("audio_coding/testfile16kHz", "pcm");
break;
case 32000:
filename = test::ResourcePath("audio_coding/testfile32kHz", "pcm");
break;
default:
RTC_DCHECK_NOTREACHED()
<< "No test file available for " << sample_rate_hz << " Hz.";
}
auto pcm_file = std::make_unique<PCMFile>();
pcm_file->ReadStereo(false);
pcm_file->Open(filename, sample_rate_hz, "rb", /*auto_rewind=*/true);
pcm_file->FastForward(/*num_10ms_blocks=*/100); // Skip initial silence.
RTC_CHECK(!pcm_file->EndOfFile());
return pcm_file;
}
// Returns a view to the interleaved samples of an AudioFrame object.
rtc::ArrayView<const int16_t> AudioFrameToView(const AudioFrame& audio_frame) {
return {audio_frame.data(),
audio_frame.samples_per_channel() * audio_frame.num_channels()};
}
std::unique_ptr<AudioEncoder> CreateEncoder(IsacImpl impl,
int sample_rate_hz,
int frame_size_ms,
int bitrate_bps) {
RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000);
RTC_CHECK(frame_size_ms == 30 || frame_size_ms == 60);
RTC_CHECK_GT(bitrate_bps, 0);
switch (impl) {
case IsacImpl::kFixed: {
AudioEncoderIsacFix::Config config;
config.bit_rate = bitrate_bps;
config.frame_size_ms = frame_size_ms;
RTC_CHECK_EQ(16000, sample_rate_hz);
return AudioEncoderIsacFix::MakeAudioEncoder(config, kPayloadType);
}
case IsacImpl::kFloat: {
AudioEncoderIsacFloat::Config config;
config.bit_rate = bitrate_bps;
config.frame_size_ms = frame_size_ms;
config.sample_rate_hz = sample_rate_hz;
return AudioEncoderIsacFloat::MakeAudioEncoder(config, kPayloadType);
}
}
}
std::unique_ptr<AudioDecoder> CreateDecoder(IsacImpl impl, int sample_rate_hz) {
RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000);
switch (impl) {
case IsacImpl::kFixed: {
webrtc::AudioDecoderIsacFix::Config config;
RTC_CHECK_EQ(16000, sample_rate_hz);
return webrtc::AudioDecoderIsacFix::MakeAudioDecoder(config);
}
case IsacImpl::kFloat: {
webrtc::AudioDecoderIsacFloat::Config config;
config.sample_rate_hz = sample_rate_hz;
return webrtc::AudioDecoderIsacFloat::MakeAudioDecoder(config);
}
}
}
struct EncoderTestParams {
IsacImpl impl;
int sample_rate_hz;
int frame_size_ms;
};
class EncoderTest : public testing::TestWithParam<EncoderTestParams> {
protected:
EncoderTest() = default;
IsacImpl GetIsacImpl() const { return GetParam().impl; }
int GetSampleRateHz() const { return GetParam().sample_rate_hz; }
int GetFrameSizeMs() const { return GetParam().frame_size_ms; }
};
TEST_P(EncoderTest, TestConfig) {
for (int bitrate_bps : {10000, 21000, 32000}) {
SCOPED_TRACE(bitrate_bps);
auto encoder = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
GetFrameSizeMs(), bitrate_bps);
EXPECT_EQ(GetSampleRateHz(), encoder->SampleRateHz());
EXPECT_EQ(size_t{1}, encoder->NumChannels());
EXPECT_EQ(bitrate_bps, encoder->GetTargetBitrate());
}
}
// Encodes an input audio sequence with a low and a high target bitrate and
// checks that the number of produces bytes in the first case is less than that
// of the second case.
TEST_P(EncoderTest, TestDifferentBitrates) {
auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
constexpr int kLowBps = 20000;
constexpr int kHighBps = 25000;
auto encoder_low = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
GetFrameSizeMs(), kLowBps);
auto encoder_high = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
GetFrameSizeMs(), kHighBps);
int num_bytes_low = 0;
int num_bytes_high = 0;
constexpr int kNumFrames = 12;
for (int i = 0; i < kNumFrames; ++i) {
AudioFrame in;
pcm_file->Read10MsData(in);
rtc::Buffer low, high;
encoder_low->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &low);
encoder_high->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &high);
num_bytes_low += low.size();
num_bytes_high += high.size();
}
EXPECT_LT(num_bytes_low, num_bytes_high);
}
// Encodes an input audio sequence first with a low, then with a high target
// bitrate *using the same encoder* and checks that the number of emitted bytes
// in the first case is less than in the second case.
TEST_P(EncoderTest, TestDynamicBitrateChange) {
constexpr int kLowBps = 20000;
constexpr int kHighBps = 25000;
constexpr int kStartBps = 30000;
auto encoder = CreateEncoder(GetIsacImpl(), GetSampleRateHz(),
GetFrameSizeMs(), kStartBps);
std::map<int, int> num_bytes;
constexpr int kNumFrames = 200; // 2 seconds.
for (int bitrate_bps : {kLowBps, kHighBps}) {
auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
encoder->OnReceivedTargetAudioBitrate(bitrate_bps);
for (int i = 0; i < kNumFrames; ++i) {
AudioFrame in;
pcm_file->Read10MsData(in);
rtc::Buffer buf;
encoder->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &buf);
num_bytes[bitrate_bps] += buf.size();
}
}
// kHighBps / kLowBps == 1.25, so require the high-bitrate run to produce at
// least 1.195 times the number of bytes.
EXPECT_LT(1.195 * num_bytes[kLowBps], num_bytes[kHighBps]);
}
// Checks that, given a target bitrate, the encoder does not overshoot too much.
TEST_P(EncoderTest, DoNotOvershootTargetBitrate) {
for (int bitrate_bps : {10000, 15000, 20000, 26000, 32000}) {
SCOPED_TRACE(bitrate_bps);
auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
auto e = CreateEncoder(GetIsacImpl(), GetSampleRateHz(), GetFrameSizeMs(),
bitrate_bps);
int num_bytes = 0;
constexpr int kNumFrames = 200; // 2 seconds.
for (int i = 0; i < kNumFrames; ++i) {
AudioFrame in;
pcm_file->Read10MsData(in);
rtc::Buffer encoded;
e->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
num_bytes += encoded.size();
}
// Inverse of the duration of `kNumFrames` 10 ms frames (unit: seconds^-1).
constexpr float kAudioDurationInv = 100.f / kNumFrames;
const int measured_bitrate_bps = 8 * num_bytes * kAudioDurationInv;
EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2250); // Max 2250 bps extra.
}
}
// Creates tests for different encoder configurations and implementations.
INSTANTIATE_TEST_SUITE_P(
IsacApiTest,
EncoderTest,
::testing::ValuesIn([] {
std::vector<EncoderTestParams> cases;
for (IsacImpl impl : {IsacImpl::kFloat, IsacImpl::kFixed}) {
for (int frame_size_ms : {30, 60}) {
cases.push_back({impl, 16000, frame_size_ms});
}
}
cases.push_back({IsacImpl::kFloat, 32000, 30});
return cases;
}()),
[](const ::testing::TestParamInfo<EncoderTestParams>& info) {
rtc::StringBuilder b;
const auto& p = info.param;
b << IsacImplToString(p.impl) << "_" << p.sample_rate_hz << "_"
<< p.frame_size_ms;
return b.Release();
});
struct DecoderTestParams {
IsacImpl impl;
int sample_rate_hz;
};
class DecoderTest : public testing::TestWithParam<DecoderTestParams> {
protected:
DecoderTest() = default;
IsacImpl GetIsacImpl() const { return GetParam().impl; }
int GetSampleRateHz() const { return GetParam().sample_rate_hz; }
};
TEST_P(DecoderTest, TestConfig) {
auto decoder = CreateDecoder(GetIsacImpl(), GetSampleRateHz());
EXPECT_EQ(GetSampleRateHz(), decoder->SampleRateHz());
EXPECT_EQ(size_t{1}, decoder->Channels());
}
// Creates tests for different decoder configurations and implementations.
INSTANTIATE_TEST_SUITE_P(
IsacApiTest,
DecoderTest,
::testing::ValuesIn({DecoderTestParams{IsacImpl::kFixed, 16000},
DecoderTestParams{IsacImpl::kFloat, 16000},
DecoderTestParams{IsacImpl::kFloat, 32000}}),
[](const ::testing::TestParamInfo<DecoderTestParams>& info) {
const auto& p = info.param;
return (rtc::StringBuilder()
<< IsacImplToString(p.impl) << "_" << p.sample_rate_hz)
.Release();
});
struct EncoderDecoderPairTestParams {
int sample_rate_hz;
int frame_size_ms;
IsacImpl encoder_impl;
IsacImpl decoder_impl;
};
class EncoderDecoderPairTest
: public testing::TestWithParam<EncoderDecoderPairTestParams> {
protected:
EncoderDecoderPairTest() = default;
int GetSampleRateHz() const { return GetParam().sample_rate_hz; }
int GetEncoderFrameSizeMs() const { return GetParam().frame_size_ms; }
IsacImpl GetEncoderIsacImpl() const { return GetParam().encoder_impl; }
IsacImpl GetDecoderIsacImpl() const { return GetParam().decoder_impl; }
int GetEncoderFrameSize() const {
return GetEncoderFrameSizeMs() * GetSampleRateHz() / 1000;
}
};
// Checks that the number of encoded and decoded samples match.
TEST_P(EncoderDecoderPairTest, EncodeDecode) {
auto pcm_file = GetPcmTestFileReader(GetSampleRateHz());
auto encoder = CreateEncoder(GetEncoderIsacImpl(), GetSampleRateHz(),
GetEncoderFrameSizeMs(), /*bitrate_bps=*/20000);
auto decoder = CreateDecoder(GetDecoderIsacImpl(), GetSampleRateHz());
const int encoder_frame_size = GetEncoderFrameSize();
std::vector<int16_t> out(encoder_frame_size);
size_t num_encoded_samples = 0;
size_t num_decoded_samples = 0;
constexpr int kNumFrames = 12;
for (int i = 0; i < kNumFrames; ++i) {
AudioFrame in;
pcm_file->Read10MsData(in);
rtc::Buffer encoded;
encoder->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
num_encoded_samples += in.samples_per_channel();
if (encoded.empty()) {
continue;
}
// Decode.
const std::vector<AudioDecoder::ParseResult> parse_result =
decoder->ParsePayload(std::move(encoded), /*timestamp=*/0);
EXPECT_EQ(parse_result.size(), size_t{1});
auto decode_result = parse_result[0].frame->Decode(out);
EXPECT_TRUE(decode_result.has_value());
EXPECT_EQ(out.size(), decode_result->num_decoded_samples);
num_decoded_samples += decode_result->num_decoded_samples;
}
EXPECT_EQ(num_encoded_samples, num_decoded_samples);
}
// Creates tests for different encoder frame sizes and different
// encoder/decoder implementations.
INSTANTIATE_TEST_SUITE_P(
IsacApiTest,
EncoderDecoderPairTest,
::testing::ValuesIn([] {
std::vector<EncoderDecoderPairTestParams> cases;
for (int frame_size_ms : {30, 60}) {
for (IsacImpl enc : {IsacImpl::kFloat, IsacImpl::kFixed}) {
for (IsacImpl dec : {IsacImpl::kFloat, IsacImpl::kFixed}) {
cases.push_back({16000, frame_size_ms, enc, dec});
}
}
}
cases.push_back({32000, 30, IsacImpl::kFloat, IsacImpl::kFloat});
return cases;
}()),
[](const ::testing::TestParamInfo<EncoderDecoderPairTestParams>& info) {
rtc::StringBuilder b;
const auto& p = info.param;
b << p.sample_rate_hz << "_" << p.frame_size_ms << "_"
<< IsacImplToString(p.encoder_impl) << "_"
<< IsacImplToString(p.decoder_impl);
return b.Release();
});
} // namespace
} // namespace webrtc

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
namespace webrtc {
using AudioDecoderIsacFloatImpl = AudioDecoderIsacT<IsacFloat>;
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
namespace webrtc {
using AudioEncoderIsacFloatImpl = AudioEncoderIsacT<IsacFloat>;
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_

Some files were not shown because too many files have changed in this diff Show More