Add ana config to event log visualiser

BUG=webrtc:7160

Review-Url: https://codereview.webrtc.org/2695613005
Cr-Commit-Position: refs/heads/master@{#16776}
This commit is contained in:
michaelt 2017-02-22 07:33:27 -08:00 committed by Commit bot
parent 0335e6c4bf
commit 6e5b2195d7
3 changed files with 166 additions and 0 deletions

View File

@ -451,6 +451,10 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log)
break;
}
case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: {
AudioNetworkAdaptationEvent ana_event;
ana_event.timestamp = parsed_log_.GetTimestamp(i);
parsed_log_.GetAudioNetworkAdaptation(i, &ana_event.config);
audio_network_adaptation_events_.push_back(ana_event);
break;
}
case ParsedRtcEventLog::UNKNOWN_EVENT: {
@ -532,6 +536,21 @@ std::string EventLogAnalyzer::GetStreamName(StreamId stream_id) const {
return name.str();
}
void EventLogAnalyzer::FillAudioEncoderTimeSeries(
Plot* plot,
rtc::FunctionView<rtc::Optional<float>(
const AudioNetworkAdaptationEvent& ana_event)> get_y) const {
plot->series_list_.push_back(TimeSeries());
plot->series_list_.back().style = LINE_DOT_GRAPH;
for (auto& ana_event : audio_network_adaptation_events_) {
rtc::Optional<float> y = get_y(ana_event);
if (y) {
float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
plot->series_list_.back().points.emplace_back(x, *y);
}
}
}
void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction,
Plot* plot) {
for (auto& kv : rtp_packets_) {
@ -1275,5 +1294,92 @@ void EventLogAnalyzer::CreateTimestampGraph(Plot* plot) {
plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin);
plot->SetTitle("Timestamps");
}
void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) {
FillAudioEncoderTimeSeries(
plot, [](const AudioNetworkAdaptationEvent& ana_event) {
if (ana_event.config.bitrate_bps)
return rtc::Optional<float>(
static_cast<float>(*ana_event.config.bitrate_bps));
return rtc::Optional<float>();
});
plot->series_list_.back().label = "Audio encoder target bitrate";
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
plot->SetTitle("Reported audio encoder target bitrate");
}
void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
FillAudioEncoderTimeSeries(
plot, [](const AudioNetworkAdaptationEvent& ana_event) {
if (ana_event.config.frame_length_ms)
return rtc::Optional<float>(
static_cast<float>(*ana_event.config.frame_length_ms));
return rtc::Optional<float>();
});
plot->series_list_.back().label = "Audio encoder frame length";
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
plot->SetTitle("Reported audio encoder frame length");
}
void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph(
Plot* plot) {
FillAudioEncoderTimeSeries(
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
if (ana_event.config.uplink_packet_loss_fraction)
return rtc::Optional<float>(static_cast<float>(
*ana_event.config.uplink_packet_loss_fraction));
return rtc::Optional<float>();
});
plot->series_list_.back().label = "Audio encoder uplink packet loss fraction";
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
kTopMargin);
plot->SetTitle("Reported audio encoder lost packets");
}
void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) {
FillAudioEncoderTimeSeries(
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
if (ana_event.config.enable_fec)
return rtc::Optional<float>(
static_cast<float>(*ana_event.config.enable_fec));
return rtc::Optional<float>();
});
plot->series_list_.back().label = "Audio encoder FEC";
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
plot->SetTitle("Reported audio encoder FEC");
}
void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) {
FillAudioEncoderTimeSeries(
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
if (ana_event.config.enable_dtx)
return rtc::Optional<float>(
static_cast<float>(*ana_event.config.enable_dtx));
return rtc::Optional<float>();
});
plot->series_list_.back().label = "Audio encoder DTX";
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
plot->SetTitle("Reported audio encoder DTX");
}
void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
FillAudioEncoderTimeSeries(
plot, [&](const AudioNetworkAdaptationEvent& ana_event) {
if (ana_event.config.num_channels)
return rtc::Optional<float>(
static_cast<float>(*ana_event.config.num_channels));
return rtc::Optional<float>();
});
plot->series_list_.back().label = "Audio encoder number of channels";
plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
kBottomMargin, kTopMargin);
plot->SetTitle("Reported audio encoder number of channels");
}
} // namespace plotting
} // namespace webrtc

View File

@ -18,6 +18,7 @@
#include <utility>
#include <vector>
#include "webrtc/base/function_view.h"
#include "webrtc/logging/rtc_event_log/rtc_event_log_parser.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
@ -52,6 +53,11 @@ struct LossBasedBweUpdate {
int32_t expected_packets;
};
struct AudioNetworkAdaptationEvent {
uint64_t timestamp;
AudioNetworkAdaptor::EncoderRuntimeConfig config;
};
class EventLogAnalyzer {
public:
// The EventLogAnalyzer keeps a reference to the ParsedRtcEventLog for the
@ -87,6 +93,13 @@ class EventLogAnalyzer {
void CreateNetworkDelayFeedbackGraph(Plot* plot);
void CreateTimestampGraph(Plot* plot);
void CreateAudioEncoderTargetBitrateGraph(Plot* plot);
void CreateAudioEncoderFrameLengthGraph(Plot* plot);
void CreateAudioEncoderUplinkPacketLossFractionGraph(Plot* plot);
void CreateAudioEncoderEnableFecGraph(Plot* plot);
void CreateAudioEncoderEnableDtxGraph(Plot* plot);
void CreateAudioEncoderNumChannelsGraph(Plot* plot);
// Returns a vector of capture and arrival timestamps for the video frames
// of the stream with the most number of frames.
std::vector<std::pair<int64_t, int64_t>> GetFrameTimestamps() const;
@ -127,6 +140,11 @@ class EventLogAnalyzer {
std::string GetStreamName(StreamId) const;
void FillAudioEncoderTimeSeries(
Plot* plot,
rtc::FunctionView<rtc::Optional<float>(
const AudioNetworkAdaptationEvent& ana_event)> get_y) const;
const ParsedRtcEventLog& parsed_log_;
// A list of SSRCs we are interested in analysing.
@ -152,6 +170,8 @@ class EventLogAnalyzer {
// A list of all updates from the send-side loss-based bandwidth estimator.
std::vector<LossBasedBweUpdate> bwe_loss_updates_;
std::vector<AudioNetworkAdaptationEvent> audio_network_adaptation_events_;
// Window and step size used for calculating moving averages, e.g. bitrate.
// The generated data points will be |step_| microseconds apart.
// Only events occuring at most |window_duration_| microseconds before the

View File

@ -62,6 +62,21 @@ DEFINE_bool(plot_fraction_loss,
DEFINE_bool(plot_timestamps,
false,
"Plot the rtp timestamps of all rtp and rtcp packets over time.");
DEFINE_bool(audio_encoder_bitrate_bps,
false,
"Plot the audio encoder target bitrate.");
DEFINE_bool(audio_encoder_frame_length_ms,
false,
"Plot the audio encoder frame length.");
DEFINE_bool(
audio_encoder_uplink_packet_loss_fraction,
false,
"Plot the uplink packet loss fraction which is send to the audio encoder.");
DEFINE_bool(audio_encoder_fec, false, "Plot the audio encoder FEC.");
DEFINE_bool(audio_encoder_dtx, false, "Plot the audio encoder DTX.");
DEFINE_bool(audio_encoder_num_channels,
false,
"Plot the audio encoder number of channels.");
DEFINE_string(
force_fieldtrials,
"",
@ -187,6 +202,31 @@ int main(int argc, char* argv[]) {
analyzer.CreateTimestampGraph(collection->AppendNewPlot());
}
if (FLAGS_plot_all || FLAGS_audio_encoder_bitrate_bps) {
analyzer.CreateAudioEncoderTargetBitrateGraph(collection->AppendNewPlot());
}
if (FLAGS_plot_all || FLAGS_audio_encoder_frame_length_ms) {
analyzer.CreateAudioEncoderFrameLengthGraph(collection->AppendNewPlot());
}
if (FLAGS_plot_all || FLAGS_audio_encoder_uplink_packet_loss_fraction) {
analyzer.CreateAudioEncoderUplinkPacketLossFractionGraph(
collection->AppendNewPlot());
}
if (FLAGS_plot_all || FLAGS_audio_encoder_fec) {
analyzer.CreateAudioEncoderEnableFecGraph(collection->AppendNewPlot());
}
if (FLAGS_plot_all || FLAGS_audio_encoder_dtx) {
analyzer.CreateAudioEncoderEnableDtxGraph(collection->AppendNewPlot());
}
if (FLAGS_plot_all || FLAGS_audio_encoder_num_channels) {
analyzer.CreateAudioEncoderNumChannelsGraph(collection->AppendNewPlot());
}
collection->Draw();
return 0;