Add more refined control over dumping of data and the aecdump content
This CL adds the ability in audioproc_f and unpack_aecdump to: -Clearly identify the Init events and when those occur. -Optionally only process a specific Init section of an aecdump. -Optionally selectively turn on dumping of internal data for a specific init section, and a specific time interval. -Optionally let unpack_aecdump produce file names based on inits. Bug: webrtc:5298 Change-Id: Id654b7175407a23ef634fca832994d87d1073239 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/196160 Reviewed-by: Gustaf Ullberg <gustaf@webrtc.org> Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Commit-Queue: Per Åhgren <peah@webrtc.org> Cr-Commit-Position: refs/heads/master@{#33181}
This commit is contained in:
parent
d4ad2ef732
commit
879d33b9f8
@ -14,6 +14,8 @@
|
||||
#include <memory>
|
||||
|
||||
#include "modules/audio_processing/echo_control_mobile_impl.h"
|
||||
#include "modules/audio_processing/logging/apm_data_dumper.h"
|
||||
#include "modules/audio_processing/test/aec_dump_based_simulator.h"
|
||||
#include "modules/audio_processing/test/protobuf_utils.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
@ -62,6 +64,18 @@ bool VerifyFloatBitExactness(const webrtc::audioproc::Stream& msg,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Selectively reads the next proto-buf message from dump-file or string input.
|
||||
// Returns a bool indicating whether a new message was available.
|
||||
bool ReadNextMessage(bool use_dump_file,
|
||||
FILE* dump_input_file,
|
||||
std::stringstream& input,
|
||||
webrtc::audioproc::Event& event_msg) {
|
||||
if (use_dump_file) {
|
||||
return ReadMessageFromFile(dump_input_file, &event_msg);
|
||||
}
|
||||
return ReadMessageFromString(&input, &event_msg);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AecDumpBasedSimulator::AecDumpBasedSimulator(
|
||||
@ -226,36 +240,93 @@ void AecDumpBasedSimulator::Process() {
|
||||
rtc::CheckedDivExact(sample_rate_hz, kChunksPerSecond), 1));
|
||||
}
|
||||
|
||||
webrtc::audioproc::Event event_msg;
|
||||
int num_forward_chunks_processed = 0;
|
||||
if (settings_.aec_dump_input_string.has_value()) {
|
||||
std::stringstream input;
|
||||
input << settings_.aec_dump_input_string.value();
|
||||
while (ReadMessageFromString(&input, &event_msg))
|
||||
HandleEvent(event_msg, &num_forward_chunks_processed);
|
||||
} else {
|
||||
const bool use_dump_file = !settings_.aec_dump_input_string.has_value();
|
||||
std::stringstream input;
|
||||
if (use_dump_file) {
|
||||
dump_input_file_ =
|
||||
OpenFile(settings_.aec_dump_input_filename->c_str(), "rb");
|
||||
while (ReadMessageFromFile(dump_input_file_, &event_msg))
|
||||
HandleEvent(event_msg, &num_forward_chunks_processed);
|
||||
} else {
|
||||
input << settings_.aec_dump_input_string.value();
|
||||
}
|
||||
|
||||
webrtc::audioproc::Event event_msg;
|
||||
int capture_frames_since_init = 0;
|
||||
int init_index = 0;
|
||||
while (ReadNextMessage(use_dump_file, dump_input_file_, input, event_msg)) {
|
||||
SelectivelyToggleDataDumping(init_index, capture_frames_since_init);
|
||||
HandleEvent(event_msg, capture_frames_since_init, init_index);
|
||||
|
||||
// Perfom an early exit if the init block to process has been fully
|
||||
// processed
|
||||
if (finished_processing_specified_init_block_) {
|
||||
break;
|
||||
}
|
||||
RTC_CHECK(!settings_.init_to_process ||
|
||||
*settings_.init_to_process >= init_index);
|
||||
}
|
||||
|
||||
if (use_dump_file) {
|
||||
fclose(dump_input_file_);
|
||||
}
|
||||
|
||||
DetachAecDump();
|
||||
}
|
||||
|
||||
void AecDumpBasedSimulator::Analyze() {
|
||||
const bool use_dump_file = !settings_.aec_dump_input_string.has_value();
|
||||
std::stringstream input;
|
||||
if (use_dump_file) {
|
||||
dump_input_file_ =
|
||||
OpenFile(settings_.aec_dump_input_filename->c_str(), "rb");
|
||||
} else {
|
||||
input << settings_.aec_dump_input_string.value();
|
||||
}
|
||||
|
||||
webrtc::audioproc::Event event_msg;
|
||||
int num_capture_frames = 0;
|
||||
int num_render_frames = 0;
|
||||
int init_index = 0;
|
||||
while (ReadNextMessage(use_dump_file, dump_input_file_, input, event_msg)) {
|
||||
if (event_msg.type() == webrtc::audioproc::Event::INIT) {
|
||||
++init_index;
|
||||
constexpr float kNumFramesPerSecond = 100.f;
|
||||
float capture_time_seconds = num_capture_frames / kNumFramesPerSecond;
|
||||
float render_time_seconds = num_render_frames / kNumFramesPerSecond;
|
||||
|
||||
std::cout << "Inits:" << std::endl;
|
||||
std::cout << init_index << ": -->" << std::endl;
|
||||
std::cout << " Time:" << std::endl;
|
||||
std::cout << " Capture: " << capture_time_seconds << " s ("
|
||||
<< num_capture_frames << " frames) " << std::endl;
|
||||
std::cout << " Render: " << render_time_seconds << " s ("
|
||||
<< num_render_frames << " frames) " << std::endl;
|
||||
} else if (event_msg.type() == webrtc::audioproc::Event::STREAM) {
|
||||
++num_capture_frames;
|
||||
} else if (event_msg.type() == webrtc::audioproc::Event::REVERSE_STREAM) {
|
||||
++num_render_frames;
|
||||
}
|
||||
}
|
||||
|
||||
if (use_dump_file) {
|
||||
fclose(dump_input_file_);
|
||||
}
|
||||
}
|
||||
|
||||
void AecDumpBasedSimulator::HandleEvent(
|
||||
const webrtc::audioproc::Event& event_msg,
|
||||
int* num_forward_chunks_processed) {
|
||||
int& capture_frames_since_init,
|
||||
int& init_index) {
|
||||
switch (event_msg.type()) {
|
||||
case webrtc::audioproc::Event::INIT:
|
||||
RTC_CHECK(event_msg.has_init());
|
||||
HandleMessage(event_msg.init());
|
||||
++init_index;
|
||||
capture_frames_since_init = 0;
|
||||
HandleMessage(event_msg.init(), init_index);
|
||||
break;
|
||||
case webrtc::audioproc::Event::STREAM:
|
||||
RTC_CHECK(event_msg.has_stream());
|
||||
++capture_frames_since_init;
|
||||
HandleMessage(event_msg.stream());
|
||||
++num_forward_chunks_processed;
|
||||
break;
|
||||
case webrtc::audioproc::Event::REVERSE_STREAM:
|
||||
RTC_CHECK(event_msg.has_reverse_stream());
|
||||
@ -439,11 +510,18 @@ void AecDumpBasedSimulator::HandleMessage(
|
||||
}
|
||||
}
|
||||
|
||||
void AecDumpBasedSimulator::HandleMessage(const webrtc::audioproc::Init& msg) {
|
||||
void AecDumpBasedSimulator::HandleMessage(const webrtc::audioproc::Init& msg,
|
||||
int init_index) {
|
||||
RTC_CHECK(msg.has_sample_rate());
|
||||
RTC_CHECK(msg.has_num_input_channels());
|
||||
RTC_CHECK(msg.has_num_reverse_channels());
|
||||
RTC_CHECK(msg.has_reverse_sample_rate());
|
||||
|
||||
// Do not perform the init if the init block to process is fully processed
|
||||
if (settings_.init_to_process && *settings_.init_to_process < init_index) {
|
||||
finished_processing_specified_init_block_ = true;
|
||||
}
|
||||
|
||||
MaybeOpenCallOrderFile();
|
||||
|
||||
if (settings_.use_verbose_logging) {
|
||||
|
||||
@ -44,10 +44,14 @@ class AecDumpBasedSimulator final : public AudioProcessingSimulator {
|
||||
// Processes the messages in the aecdump file.
|
||||
void Process() override;
|
||||
|
||||
// Analyzes the data in the aecdump file and reports the resulting statistics.
|
||||
void Analyze() override;
|
||||
|
||||
private:
|
||||
void HandleEvent(const webrtc::audioproc::Event& event_msg,
|
||||
int* num_forward_chunks_processed);
|
||||
void HandleMessage(const webrtc::audioproc::Init& msg);
|
||||
int& num_forward_chunks_processed,
|
||||
int& init_index);
|
||||
void HandleMessage(const webrtc::audioproc::Init& msg, int init_index);
|
||||
void HandleMessage(const webrtc::audioproc::Stream& msg);
|
||||
void HandleMessage(const webrtc::audioproc::ReverseStream& msg);
|
||||
void HandleMessage(const webrtc::audioproc::Config& msg);
|
||||
@ -69,6 +73,7 @@ class AecDumpBasedSimulator final : public AudioProcessingSimulator {
|
||||
bool artificial_nearend_eof_reported_ = false;
|
||||
InterfaceType interface_used_ = InterfaceType::kNotSpecified;
|
||||
std::unique_ptr<std::ofstream> call_order_output_file_;
|
||||
bool finished_processing_specified_init_block_ = false;
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
|
||||
@ -122,10 +122,16 @@ AudioProcessingSimulator::AudioProcessingSimulator(
|
||||
settings_.simulate_mic_gain ? *settings.simulated_mic_kind : 0),
|
||||
worker_queue_("file_writer_task_queue") {
|
||||
RTC_CHECK(!settings_.dump_internal_data || WEBRTC_APM_DEBUG_DUMP == 1);
|
||||
ApmDataDumper::SetActivated(settings_.dump_internal_data);
|
||||
if (settings_.dump_start_frame || settings_.dump_end_frame) {
|
||||
ApmDataDumper::SetActivated(!settings_.dump_start_frame);
|
||||
} else {
|
||||
ApmDataDumper::SetActivated(settings_.dump_internal_data);
|
||||
}
|
||||
|
||||
if (settings_.dump_set_to_use) {
|
||||
ApmDataDumper::SetDumpSetToUse(*settings_.dump_set_to_use);
|
||||
}
|
||||
|
||||
if (settings_.dump_internal_data_output_dir.has_value()) {
|
||||
ApmDataDumper::SetOutputDirectory(
|
||||
settings_.dump_internal_data_output_dir.value());
|
||||
@ -360,6 +366,28 @@ void AudioProcessingSimulator::SetupBuffersConfigsOutputs(
|
||||
SetupOutput();
|
||||
}
|
||||
|
||||
void AudioProcessingSimulator::SelectivelyToggleDataDumping(
|
||||
int init_index,
|
||||
int capture_frames_since_init) const {
|
||||
if (!(settings_.dump_start_frame || settings_.dump_end_frame)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (settings_.init_to_process && *settings_.init_to_process != init_index) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (settings_.dump_start_frame &&
|
||||
*settings_.dump_start_frame == capture_frames_since_init) {
|
||||
ApmDataDumper::SetActivated(true);
|
||||
}
|
||||
|
||||
if (settings_.dump_end_frame &&
|
||||
*settings_.dump_end_frame == capture_frames_since_init) {
|
||||
ApmDataDumper::SetActivated(false);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioProcessingSimulator::SetupOutput() {
|
||||
if (settings_.output_filename) {
|
||||
std::string filename;
|
||||
|
||||
@ -144,6 +144,10 @@ struct SimulationSettings {
|
||||
absl::optional<std::string> aec_settings_filename;
|
||||
absl::optional<absl::string_view> aec_dump_input_string;
|
||||
std::vector<float>* processed_capture_samples = nullptr;
|
||||
bool analysis_only = false;
|
||||
absl::optional<int> dump_start_frame;
|
||||
absl::optional<int> dump_end_frame;
|
||||
absl::optional<int> init_to_process;
|
||||
};
|
||||
|
||||
// Provides common functionality for performing audioprocessing simulations.
|
||||
@ -167,6 +171,9 @@ class AudioProcessingSimulator {
|
||||
return api_call_statistics_;
|
||||
}
|
||||
|
||||
// Analyzes the data in the input and reports the resulting statistics.
|
||||
virtual void Analyze() = 0;
|
||||
|
||||
// Reports whether the processed recording was bitexact.
|
||||
bool OutputWasBitexact() { return bitexact_output_; }
|
||||
|
||||
@ -188,6 +195,8 @@ class AudioProcessingSimulator {
|
||||
int output_num_channels,
|
||||
int reverse_input_num_channels,
|
||||
int reverse_output_num_channels);
|
||||
void SelectivelyToggleDataDumping(int init_index,
|
||||
int capture_frames_since_init) const;
|
||||
|
||||
const SimulationSettings settings_;
|
||||
rtc::scoped_refptr<AudioProcessing> ap_;
|
||||
|
||||
@ -252,6 +252,31 @@ ABSL_FLAG(int,
|
||||
kParameterNotSpecifiedValue,
|
||||
"Specifies the dump set to use (if not all the dump sets will "
|
||||
"be used");
|
||||
ABSL_FLAG(bool,
|
||||
analyze,
|
||||
false,
|
||||
"Only analyze the call setup behavior (no processing)");
|
||||
ABSL_FLAG(float,
|
||||
dump_start_seconds,
|
||||
kParameterNotSpecifiedValue,
|
||||
"Start of when to dump data (seconds).");
|
||||
ABSL_FLAG(float,
|
||||
dump_end_seconds,
|
||||
kParameterNotSpecifiedValue,
|
||||
"End of when to dump data (seconds).");
|
||||
ABSL_FLAG(int,
|
||||
dump_start_frame,
|
||||
kParameterNotSpecifiedValue,
|
||||
"Start of when to dump data (frames).");
|
||||
ABSL_FLAG(int,
|
||||
dump_end_frame,
|
||||
kParameterNotSpecifiedValue,
|
||||
"End of when to dump data (frames).");
|
||||
ABSL_FLAG(int,
|
||||
init_to_process,
|
||||
kParameterNotSpecifiedValue,
|
||||
"Init index to process.");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
float_wav_output,
|
||||
false,
|
||||
@ -396,6 +421,7 @@ SimulationSettings CreateSettings() {
|
||||
&settings.agc_compression_gain);
|
||||
SetSettingIfFlagSet(absl::GetFlag(FLAGS_agc2_enable_adaptive_gain),
|
||||
&settings.agc2_use_adaptive_gain);
|
||||
|
||||
SetSettingIfSpecified(absl::GetFlag(FLAGS_agc2_fixed_gain_db),
|
||||
&settings.agc2_fixed_gain_db);
|
||||
settings.agc2_adaptive_level_estimator = MapAgc2AdaptiveLevelEstimator(
|
||||
@ -447,6 +473,30 @@ SimulationSettings CreateSettings() {
|
||||
? WavFile::SampleFormat::kFloat
|
||||
: WavFile::SampleFormat::kInt16;
|
||||
|
||||
settings.analysis_only = absl::GetFlag(FLAGS_analyze);
|
||||
|
||||
SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_start_frame),
|
||||
&settings.dump_start_frame);
|
||||
SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_end_frame),
|
||||
&settings.dump_end_frame);
|
||||
|
||||
constexpr int kFramesPerSecond = 100;
|
||||
absl::optional<float> start_seconds;
|
||||
SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_start_seconds),
|
||||
&start_seconds);
|
||||
if (start_seconds) {
|
||||
settings.dump_start_frame = *start_seconds * kFramesPerSecond;
|
||||
}
|
||||
|
||||
absl::optional<float> end_seconds;
|
||||
SetSettingIfSpecified(absl::GetFlag(FLAGS_dump_end_seconds), &end_seconds);
|
||||
if (end_seconds) {
|
||||
settings.dump_end_frame = *end_seconds * kFramesPerSecond;
|
||||
}
|
||||
|
||||
SetSettingIfSpecified(absl::GetFlag(FLAGS_init_to_process),
|
||||
&settings.init_to_process);
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
@ -612,6 +662,18 @@ void PerformBasicParameterSanityChecks(
|
||||
WEBRTC_APM_DEBUG_DUMP == 0 && settings.dump_internal_data,
|
||||
"Error: --dump_data cannot be set without proper build support.\n");
|
||||
|
||||
ReportConditionalErrorAndExit(settings.init_to_process &&
|
||||
*settings.init_to_process != 1 &&
|
||||
!settings.aec_dump_input_filename,
|
||||
"Error: --init_to_process must be set to 1 for "
|
||||
"wav-file based simulations.\n");
|
||||
|
||||
ReportConditionalErrorAndExit(
|
||||
!settings.init_to_process &&
|
||||
(settings.dump_start_frame || settings.dump_end_frame),
|
||||
"Error: --init_to_process must be set when specifying a start and/or end "
|
||||
"frame for when to dump internal data.\n");
|
||||
|
||||
ReportConditionalErrorAndExit(
|
||||
!settings.dump_internal_data &&
|
||||
settings.dump_internal_data_output_dir.has_value(),
|
||||
@ -684,7 +746,11 @@ int RunSimulation(rtc::scoped_refptr<AudioProcessing> audio_processing,
|
||||
std::move(ap_builder)));
|
||||
}
|
||||
|
||||
processor->Process();
|
||||
if (settings.analysis_only) {
|
||||
processor->Analyze();
|
||||
} else {
|
||||
processor->Process();
|
||||
}
|
||||
|
||||
if (settings.report_performance) {
|
||||
processor->GetApiCallStatistics().PrintReport();
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "modules/audio_processing/logging/apm_data_dumper.h"
|
||||
#include "modules/audio_processing/test/test_utils.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/system/file_wrapper.h"
|
||||
@ -106,12 +107,15 @@ void WavBasedSimulator::Process() {
|
||||
|
||||
bool samples_left_to_process = true;
|
||||
int call_chain_index = 0;
|
||||
int num_forward_chunks_processed = 0;
|
||||
int capture_frames_since_init = 0;
|
||||
constexpr int kInitIndex = 1;
|
||||
while (samples_left_to_process) {
|
||||
switch (call_chain_[call_chain_index]) {
|
||||
case SimulationEventType::kProcessStream:
|
||||
SelectivelyToggleDataDumping(kInitIndex, capture_frames_since_init);
|
||||
|
||||
samples_left_to_process = HandleProcessStreamCall();
|
||||
++num_forward_chunks_processed;
|
||||
++capture_frames_since_init;
|
||||
break;
|
||||
case SimulationEventType::kProcessReverseStream:
|
||||
if (settings_.reverse_input_filename) {
|
||||
@ -128,6 +132,14 @@ void WavBasedSimulator::Process() {
|
||||
DetachAecDump();
|
||||
}
|
||||
|
||||
void WavBasedSimulator::Analyze() {
|
||||
std::cout << "Inits:" << std::endl;
|
||||
std::cout << "1: -->" << std::endl;
|
||||
std::cout << " Time:" << std::endl;
|
||||
std::cout << " Capture: 0 s (0 frames) " << std::endl;
|
||||
std::cout << " Render: 0 s (0 frames)" << std::endl;
|
||||
}
|
||||
|
||||
bool WavBasedSimulator::HandleProcessStreamCall() {
|
||||
bool samples_left_to_process = buffer_reader_->Read(in_buf_.get());
|
||||
if (samples_left_to_process) {
|
||||
|
||||
@ -34,6 +34,10 @@ class WavBasedSimulator final : public AudioProcessingSimulator {
|
||||
// Processes the WAV input.
|
||||
void Process() override;
|
||||
|
||||
// Only analyzes the data for the simulation, instead of perform any
|
||||
// processing.
|
||||
void Analyze() override;
|
||||
|
||||
private:
|
||||
enum SimulationEventType {
|
||||
kProcessStream,
|
||||
|
||||
@ -81,6 +81,10 @@ ABSL_FLAG(bool,
|
||||
text,
|
||||
false,
|
||||
"Write non-audio files as text files instead of binary files.");
|
||||
ABSL_FLAG(bool,
|
||||
use_init_suffix,
|
||||
false,
|
||||
"Use init index instead of capture frame count as file name suffix.");
|
||||
|
||||
#define PRINT_CONFIG(field_name) \
|
||||
if (msg.has_##field_name()) { \
|
||||
@ -224,6 +228,16 @@ std::vector<RuntimeSettingWriter> RuntimeSettingWriters() {
|
||||
})};
|
||||
}
|
||||
|
||||
std::string GetWavFileIndex(int init_index, int frame_count) {
|
||||
rtc::StringBuilder suffix;
|
||||
if (absl::GetFlag(FLAGS_use_init_suffix)) {
|
||||
suffix << "_" << init_index;
|
||||
} else {
|
||||
suffix << frame_count;
|
||||
}
|
||||
return suffix.str();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int do_main(int argc, char* argv[]) {
|
||||
@ -243,6 +257,7 @@ int do_main(int argc, char* argv[]) {
|
||||
|
||||
Event event_msg;
|
||||
int frame_count = 0;
|
||||
int init_count = 0;
|
||||
size_t reverse_samples_per_channel = 0;
|
||||
size_t input_samples_per_channel = 0;
|
||||
size_t output_samples_per_channel = 0;
|
||||
@ -452,9 +467,11 @@ int do_main(int argc, char* argv[]) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
++init_count;
|
||||
const Init msg = event_msg.init();
|
||||
// These should print out zeros if they're missing.
|
||||
fprintf(settings_file, "Init at frame: %d\n", frame_count);
|
||||
fprintf(settings_file, "Init #%d at frame: %d\n", init_count,
|
||||
frame_count);
|
||||
int input_sample_rate = msg.sample_rate();
|
||||
fprintf(settings_file, " Input sample rate: %d\n", input_sample_rate);
|
||||
int output_sample_rate = msg.output_sample_rate();
|
||||
@ -495,24 +512,24 @@ int do_main(int argc, char* argv[]) {
|
||||
if (!absl::GetFlag(FLAGS_raw)) {
|
||||
// The WAV files need to be reset every time, because they cant change
|
||||
// their sample rate or number of channels.
|
||||
|
||||
std::string suffix = GetWavFileIndex(init_count, frame_count);
|
||||
rtc::StringBuilder reverse_name;
|
||||
reverse_name << absl::GetFlag(FLAGS_reverse_file) << frame_count
|
||||
<< ".wav";
|
||||
reverse_name << absl::GetFlag(FLAGS_reverse_file) << suffix << ".wav";
|
||||
reverse_wav_file.reset(new WavWriter(
|
||||
reverse_name.str(), reverse_sample_rate, num_reverse_channels));
|
||||
rtc::StringBuilder input_name;
|
||||
input_name << absl::GetFlag(FLAGS_input_file) << frame_count << ".wav";
|
||||
input_name << absl::GetFlag(FLAGS_input_file) << suffix << ".wav";
|
||||
input_wav_file.reset(new WavWriter(input_name.str(), input_sample_rate,
|
||||
num_input_channels));
|
||||
rtc::StringBuilder output_name;
|
||||
output_name << absl::GetFlag(FLAGS_output_file) << frame_count
|
||||
<< ".wav";
|
||||
output_name << absl::GetFlag(FLAGS_output_file) << suffix << ".wav";
|
||||
output_wav_file.reset(new WavWriter(
|
||||
output_name.str(), output_sample_rate, num_output_channels));
|
||||
|
||||
if (WritingCallOrderFile()) {
|
||||
rtc::StringBuilder callorder_name;
|
||||
callorder_name << absl::GetFlag(FLAGS_callorder_file) << frame_count
|
||||
callorder_name << absl::GetFlag(FLAGS_callorder_file) << suffix
|
||||
<< ".char";
|
||||
callorder_char_file = OpenFile(callorder_name.str(), "wb");
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user