Remove unused items in tools/

These things are not used anywhere.

BUG=webrtc:5006
TBR=charujain@webrtc.org

Review-Url: https://codereview.webrtc.org/2580763002 .
Cr-Commit-Position: refs/heads/master@{#15620}
This commit is contained in:
Henrik Kjellander 2016-12-15 08:45:49 +01:00
parent c3765f941d
commit 3bc031beb9
15 changed files with 0 additions and 1211 deletions

View File

@ -1,468 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Given the output of -t commands from a ninja build for a gyp and GN generated
build, report on differences between the command lines.
When invoked from the command line, this script assumes that the GN and GYP
targets have been generated in the specified folders. It is meant to be used as
follows:
$ python tools/gyp_flag_compare.py gyp_dir gn_dir target
When the GN and GYP target names differ, it should be called invoked as follows:
$ python tools/gyp_flag_compare.py gyp_dir gn_dir gyp_target gn_target
When all targets want to be compared, it should be called without a target name,
i.e.:
$ python tools/gyp_flag_compare.py gyp_dir gn_dir
This script can also be used interactively. Then ConfigureBuild can optionally
be used to generate ninja files with GYP and GN.
Here's an example setup. Note that the current working directory must be the
project root:
$ PYTHONPATH=tools python
>>> import sys
>>> import pprint
>>> sys.displayhook = pprint.pprint
>>> import gyp_flag_compare as fc
>>> fc.ConfigureBuild(['gyp_define=1', 'define=2'], ['gn_arg=1', 'arg=2'])
>>> modules_unittests = fc.Comparison('modules_unittests')
The above starts interactive Python, sets up the output to be pretty-printed
(useful for making lists, dicts, and sets readable), configures the build with
GN arguments and GYP defines, and then generates a comparison for that build
configuration for the "modules_unittests" target.
After that, the |modules_unittests| object can be used to investigate
differences in the build.
To configure an official build, use this configuration. Disabling NaCl produces
a more meaningful comparison, as certain files need to get compiled twice
for the IRT build, which uses different flags:
>>> fc.ConfigureBuild(
['disable_nacl=1', 'buildtype=Official', 'branding=Chrome'],
['enable_nacl=false', 'is_official_build=true',
'is_chrome_branded=true'])
"""
import os
import shlex
import subprocess
import sys
# Must be in src/.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(BASE_DIR)
_DEFAULT_GN_DIR = 'out/gn'
_DEFAULT_GYP_DIR = 'out/Release'
def FilterChromium(filename):
"""Replaces 'chromium/src/' by '' in the filename."""
return filename.replace('chromium/src/', '')
def ConfigureBuild(gyp_args=None, gn_args=None, gn_dir=_DEFAULT_GN_DIR):
"""Generates gn and gyp targets with the given arguments."""
gyp_args = gyp_args or []
gn_args = gn_args or []
print >> sys.stderr, 'Regenerating GN in %s...' % gn_dir
# Currently only Release, non-component.
Run('gn gen %s --args="is_debug=false is_component_build=false %s"' % \
(gn_dir, ' '.join(gn_args)))
os.environ.pop('GYP_DEFINES', None)
# Remove environment variables required by gn but conflicting with GYP.
# Relevant if Windows toolchain isn't provided by depot_tools.
os.environ.pop('GYP_MSVS_OVERRIDE_PATH', None)
os.environ.pop('WINDOWSSDKDIR', None)
gyp_defines = ''
if len(gyp_args) > 0:
gyp_defines = '-D' + ' -D'.join(gyp_args)
print >> sys.stderr, 'Regenerating GYP in %s...' % _DEFAULT_GYP_DIR
Run('python webrtc/build/gyp_webrtc.py -Gconfig=Release %s' % gyp_defines)
def Counts(dict_of_list):
"""Given a dictionary whose value are lists, returns a dictionary whose values
are the length of the list. This can be used to summarize a dictionary.
"""
return {k: len(v) for k, v in dict_of_list.iteritems()}
def CountsByDirname(dict_of_list):
"""Given a list of files, returns a dict of dirname to counts in that dir."""
r = {}
for path in dict_of_list:
dirname = os.path.dirname(path)
r.setdefault(dirname, 0)
r[dirname] += 1
return r
class Comparison(object):
"""A comparison of the currently-configured build for a target."""
def __init__(self, gyp_target="", gn_target=None, gyp_dir=_DEFAULT_GYP_DIR,
gn_dir=_DEFAULT_GN_DIR):
"""Creates a comparison of a GN and GYP target. If the target names differ
between the two build systems, then two names may be passed.
"""
if gn_target is None:
gn_target = gyp_target
self._gyp_target = gyp_target
self._gn_target = gn_target
self._gyp_dir = gyp_dir
self._gn_dir = gn_dir
self._skipped = []
self._total_diffs = 0
self._missing_gyp_flags = {}
self._missing_gn_flags = {}
self._missing_gyp_files = {}
self._missing_gn_files = {}
self._CompareFiles()
@property
def gyp_files(self):
"""Returns the set of files that are in the GYP target."""
return set(self._gyp_flags.keys())
@property
def gn_files(self):
"""Returns the set of files that are in the GN target."""
return set(self._gn_flags.keys())
@property
def skipped(self):
"""Returns the list of compiler commands that were not processed during the
comparison.
"""
return self._skipped
@property
def total_differences(self):
"""Returns the total number of differences detected."""
return self._total_diffs
@property
def missing_in_gyp(self):
"""Differences that are only in GYP build but not in GN, indexed by the
difference."""
return self._missing_gyp_flags
@property
def missing_in_gn(self):
"""Differences that are only in the GN build but not in GYP, indexed by
the difference."""
return self._missing_gn_flags
@property
def missing_in_gyp_by_file(self):
"""Differences that are only in the GYP build but not in GN, indexed by
file.
"""
return self._missing_gyp_files
@property
def missing_in_gn_by_file(self):
"""Differences that are only in the GYP build but not in GN, indexed by
file.
"""
return self._missing_gn_files
def _CompareFiles(self):
"""Performs the actual target comparison."""
if sys.platform == 'win32':
# On Windows flags are stored in .rsp files which are created by building.
print >> sys.stderr, 'Building in %s...' % self._gn_dir
Run('ninja -C %s -d keeprsp %s' % (self._gn_dir, self._gn_target))
print >> sys.stderr, 'Building in %s...' % self._gyp_dir
Run('ninja -C %s -d keeprsp %s' % (self._gyp_dir, self._gn_target))
gn = Run('ninja -C %s -t commands %s' % (self._gn_dir, self._gn_target))
gyp = Run('ninja -C %s -t commands %s' % (self._gyp_dir, self._gyp_target))
self._gn_flags = self._GetFlags(gn.splitlines(),
os.path.join(os.getcwd(), self._gn_dir))
self._gyp_flags = self._GetFlags(gyp.splitlines(),
os.path.join(os.getcwd(), self._gyp_dir))
self._gn_flags = dict((FilterChromium(filename), value)
for filename, value in self._gn_flags.iteritems())
self._gyp_flags = dict((FilterChromium(filename), value)
for filename, value in self._gyp_flags.iteritems())
all_files = sorted(self.gn_files & self.gyp_files)
for filename in all_files:
gyp_flags = self._gyp_flags[filename]
gn_flags = self._gn_flags[filename]
self._CompareLists(filename, gyp_flags, gn_flags, 'dash_f')
self._CompareLists(filename, gyp_flags, gn_flags, 'defines',
# These defines are not used by WebRTC
dont_care_gyp=[
'-DENABLE_WEBVR',
'-DUSE_EXTERNAL_POPUP_MENU',
'-DUSE_LIBJPEG_TURBO=1',
'-DUSE_MINIKIN_HYPHENATION=1',
'-DV8_USE_EXTERNAL_STARTUP_DATA',
'-DCR_CLANG_REVISION=280106-1',
'-DUSE_LIBPCI=1'
],
dont_care_gn=[
'-DUSE_EXTERNAL_POPUP_MENU=1'
])
self._CompareLists(filename, gyp_flags, gn_flags, 'include_dirs')
self._CompareLists(filename, gyp_flags, gn_flags, 'warnings',
# More conservative warnings in GN we consider to be OK.
dont_care_gyp=[
'/wd4091', # 'keyword' : ignored on left of 'type' when no variable
# is declared.
'/wd4456', # Declaration hides previous local declaration.
'/wd4457', # Declaration hides function parameter.
'/wd4458', # Declaration hides class member.
'/wd4459', # Declaration hides global declaration.
'/wd4702', # Unreachable code.
'/wd4800', # Forcing value to bool 'true' or 'false'.
'/wd4838', # Conversion from 'type' to 'type' requires a narrowing
# conversion.
] if sys.platform == 'win32' else None,
dont_care_gn=[
'-Wendif-labels',
'-Wextra',
'-Wsign-compare',
] if not sys.platform == 'win32' else None)
self._CompareLists(filename, gyp_flags, gn_flags, 'other',
dont_care_gyp=['-g'], dont_care_gn=['-g2'])
def _CompareLists(self, filename, gyp, gn, name,
dont_care_gyp=None, dont_care_gn=None):
"""Return a report of any differences between gyp and gn lists, ignoring
anything in |dont_care_{gyp|gn}| respectively."""
if gyp[name] == gn[name]:
return
if not dont_care_gyp:
dont_care_gyp = []
if not dont_care_gn:
dont_care_gn = []
gyp_set = set(gyp[name])
gn_set = set(gn[name])
missing_in_gyp = gyp_set - gn_set
missing_in_gn = gn_set - gyp_set
missing_in_gyp -= set(dont_care_gyp)
missing_in_gn -= set(dont_care_gn)
for m in missing_in_gyp:
self._missing_gyp_flags.setdefault(name, {}) \
.setdefault(m, []).append(filename)
self._total_diffs += 1
self._missing_gyp_files.setdefault(filename, {}) \
.setdefault(name, set()).update(missing_in_gyp)
for m in missing_in_gn:
self._missing_gn_flags.setdefault(name, {}) \
.setdefault(m, []).append(filename)
self._total_diffs += 1
self._missing_gn_files.setdefault(filename, {}) \
.setdefault(name, set()).update(missing_in_gn)
def _GetFlags(self, lines, build_dir):
"""Turn a list of command lines into a semi-structured dict."""
is_win = sys.platform == 'win32'
flags_by_output = {}
for line in lines:
line = FilterChromium(line)
line = line.replace(os.getcwd(), '../../')
line = line.replace('//', '/')
command_line = shlex.split(line.strip(), posix=not is_win)[1:]
output_name = _FindAndRemoveArgWithValue(command_line, '-o')
dep_name = _FindAndRemoveArgWithValue(command_line, '-MF')
command_line = _MergeSpacedArgs(command_line, '-Xclang')
cc_file = [x for x in command_line if x.endswith('.cc') or
x.endswith('.c') or
x.endswith('.cpp') or
x.endswith('.mm') or
x.endswith('.m')]
if len(cc_file) != 1:
self._skipped.append(command_line)
continue
assert len(cc_file) == 1
if is_win:
rsp_file = [x for x in command_line if x.endswith('.rsp')]
assert len(rsp_file) <= 1
if rsp_file:
rsp_file = os.path.join(build_dir, rsp_file[0][1:])
with open(rsp_file, "r") as open_rsp_file:
command_line = shlex.split(open_rsp_file, posix=False)
defines = [x for x in command_line if x.startswith('-D')]
include_dirs = [x for x in command_line if x.startswith('-I')]
dash_f = [x for x in command_line if x.startswith('-f')]
warnings = \
[x for x in command_line if x.startswith('/wd' if is_win else '-W')]
others = [x for x in command_line if x not in defines and \
x not in include_dirs and \
x not in dash_f and \
x not in warnings and \
x not in cc_file]
for index, value in enumerate(include_dirs):
if value == '-Igen':
continue
path = value[2:]
if not os.path.isabs(path):
path = os.path.join(build_dir, path)
include_dirs[index] = '-I' + os.path.normpath(path)
# GYP supports paths above the source root like <(DEPTH)/../foo while such
# paths are unsupported by gn. But gn allows to use system-absolute paths
# instead (paths that start with single '/'). Normalize all paths.
cc_file = [os.path.normpath(os.path.join(build_dir, cc_file[0]))]
# Filter for libFindBadConstructs.so having a relative path in one and
# absolute path in the other.
others_filtered = []
for x in others:
if x.startswith('-Xclang ') and \
(x.endswith('libFindBadConstructs.so') or \
x.endswith('libFindBadConstructs.dylib')):
others_filtered.append(
'-Xclang ' +
os.path.join(os.getcwd(), os.path.normpath(
os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
elif x.startswith('-B'):
others_filtered.append(
'-B' +
os.path.join(os.getcwd(), os.path.normpath(
os.path.join('out/gn_flags', x[2:]))))
else:
others_filtered.append(x)
others = others_filtered
flags_by_output[cc_file[0]] = {
'output': output_name,
'depname': dep_name,
'defines': sorted(defines),
'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
'dash_f': sorted(dash_f),
'warnings': sorted(warnings),
'other': sorted(others),
}
return flags_by_output
def _FindAndRemoveArgWithValue(command_line, argname):
"""Given a command line as a list, remove and return the value of an option
that takes a value as a separate entry.
Modifies |command_line| in place.
"""
if argname not in command_line:
return ''
location = command_line.index(argname)
value = command_line[location + 1]
command_line[location:location + 2] = []
return value
def _MergeSpacedArgs(command_line, argname):
"""Combine all arguments |argname| with their values, separated by a space."""
i = 0
result = []
while i < len(command_line):
arg = command_line[i]
if arg == argname:
result.append(arg + ' ' + command_line[i + 1])
i += 1
else:
result.append(arg)
i += 1
return result
def Run(command_line):
"""Run |command_line| as a subprocess and return stdout. Raises on error."""
print >> sys.stderr, command_line
return subprocess.check_output(command_line, shell=True)
def main():
if len(sys.argv) < 3:
print 'usage: %s gyp_dir gn_dir' % __file__
print ' or: %s gyp_dir gn_dir target' % __file__
print ' or: %s gyp_dir gn_dir gyp_target gn_target' % __file__
return 1
gyp_dir = sys.argv[1]
gn_dir = sys.argv[2]
gyp_target = gn_target = ""
if len(sys.argv) > 3:
gyp_target = sys.argv[3]
if len(sys.argv) > 4:
gn_target = sys.argv[4]
print 'GYP output directory is %s' % gyp_dir
print 'GN output directory is %s' % gn_dir
comparison = Comparison(gyp_target, gn_target, gyp_dir, gn_dir)
differing_files = set(comparison.missing_in_gn_by_file.keys()) & \
set(comparison.missing_in_gyp_by_file.keys())
files_with_given_differences = {}
for filename in differing_files:
output = ''
missing_in_gyp = comparison.missing_in_gyp_by_file.get(filename, {})
missing_in_gn = comparison.missing_in_gn_by_file.get(filename, {})
difference_types = sorted(set(missing_in_gyp.keys() + missing_in_gn.keys()))
for difference_type in difference_types:
if (len(missing_in_gyp[difference_type]) == 0 and
len(missing_in_gn[difference_type]) == 0):
continue
output += ' %s differ:\n' % difference_type
if (difference_type in missing_in_gyp and
len(missing_in_gyp[difference_type])):
output += ' In gyp, but not in GN:\n %s' % '\n '.join(
sorted(missing_in_gyp[difference_type])) + '\n'
if (difference_type in missing_in_gn and
len(missing_in_gn[difference_type])):
output += ' In GN, but not in gyp:\n %s' % '\n '.join(
sorted(missing_in_gn[difference_type])) + '\n'
if output:
files_with_given_differences.setdefault(output, []).append(filename)
for diff, files in files_with_given_differences.iteritems():
print '\n'.join(sorted(files))
print diff
print 'Total differences:', comparison.total_differences
# TODO(scottmg): Return failure on difference once we're closer to identical.
return 0
if __name__ == '__main__':
sys.exit(main())

View File

View File

@ -1 +0,0 @@
../../perf

View File

@ -1,37 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Copied from /src/chrome/test/pyautolib/pyauto_utils.py in Chromium.
import sys
def PrintPerfResult(graph_name, series_name, data_point, units,
show_on_waterfall=False):
"""Prints a line to stdout that is specially formatted for the perf bots.
Args:
graph_name: String name for the graph on which to plot the data.
series_name: String name for the series (line on the graph) associated with
the data. This is also the string displayed on the waterfall
if |show_on_waterfall| is True.
data_point: Numeric data value to plot on the graph for the current build.
This can be a single value or an array of values. If an array,
the graph will plot the average of the values, along with error
bars.
units: The string unit of measurement for the given |data_point|.
show_on_waterfall: Whether or not to display this result directly on the
buildbot waterfall itself (in the buildbot step running
this test on the waterfall page, not the stdio page).
"""
waterfall_indicator = ['', '*'][show_on_waterfall]
print '%sRESULT %s: %s= %s %s' % (
waterfall_indicator, graph_name, series_name,
str(data_point).replace(' ', ''), units)
sys.stdout.flush()

View File

@ -1 +0,0 @@
kjellander@webrtc.org

View File

@ -1,41 +0,0 @@
This file describes how to setup Eclipse and then the Python Charts project
Setup Eclipse
-------------
These instructions were tested on Linux, but are very similar for Windows and
Mac.
1. Ensure you have Python 2.x installed
2. Download and install Google App Engine SDK for Python from 
http://code.google.com/appengine/downloads.html
3. Note which location you put App Engine in, as this will be needed later on.
4. Download Eclipse from http://www.eclipse.org. Any distribution will probably
do, but if you're going to do mainly web development, you might pick Eclipse
IDE for JavaScript Web Developers
5. Install the PyDev plugin using the Eclipse update site mentioned at 
http://pydev.org/download.html
6. Install the Google Plugin for Eclipse: http://code.google.com/eclipse/
Setup the project
-----------------
Generic instructions are available at
http://code.google.com/appengine/docs/python/gettingstarted/ but the following
should be enough:
1. Launch Eclipse and create a workspace
2. Create a new PyDev Project
3. In the PyDev Project wizard, uncheck the "Use Default" checkbox for Project
contents and browse to your tools/python_charts directory.
4. Enter a project name. We'll assume PythonCharts in the examples below.
5. In the radio button of the lower part of the window, select
"Add project directory to the PYTHONPATH"
6. Click Finish
7. Select the Run > Run Configuration… menu item
8. Create a new "Python Run" configuration
9. Select your Python Charts project as project
10. As Main Module, enter the path to your dev_appserver.py, which is a part
of your App Engine installation,
e.g. /usr/local/google_appengine/dev_appserver.py
11. At the Arguments tab, enter the location of your project root.
Using Eclipse variables if your project name is PythonCharts:
${workspace_loc:PythonCharts}
12. Launch the development app server by clicking the Run button.
13. Launch a browser and go to http://localhost:8080

View File

@ -1,9 +0,0 @@
application: webrtc-python-charts
version: 1
runtime: python
api_version: 1
handlers:
- url: /*
script: webrtc/main.py

View File

@ -1,50 +0,0 @@
# Sample output from the video_quality_measurment program, included only for
# reference. Geneate your own by running with the --python flag and then change
# the filenames in main.py
test_configuration = [{'name': 'name', 'value': 'VP8 hardware test'},
{'name': 'description', 'value': ''},
{'name': 'test_number', 'value': '0'},
{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
{'name': 'output_filename', 'value': 'foreman_cif_out.yuv'},
{'name': 'output_dir', 'value': '.'},
{'name': 'packet_size_in_bytes', 'value': '1500'},
{'name': 'max_payload_size_in_bytes', 'value': '1440'},
{'name': 'packet_loss_mode', 'value': 'Uniform'},
{'name': 'packet_loss_probability', 'value': '0.000000'},
{'name': 'packet_loss_burst_length', 'value': '1'},
{'name': 'exclude_frame_types', 'value': 'ExcludeOnlyFirstKeyFrame'},
{'name': 'frame_length_in_bytes', 'value': '152064'},
{'name': 'use_single_core', 'value': 'False'},
{'name': 'keyframe_interval;', 'value': '0'},
{'name': 'video_codec_type', 'value': 'VP8'},
{'name': 'width', 'value': '352'},
{'name': 'height', 'value': '288'},
{'name': 'bit_rate_in_kbps', 'value': '500'},
]
frame_data_types = {'frame_number': ('number', 'Frame number'),
'encoding_successful': ('boolean', 'Encoding successful?'),
'decoding_successful': ('boolean', 'Decoding successful?'),
'encode_time': ('number', 'Encode time (us)'),
'decode_time': ('number', 'Decode time (us)'),
'encode_return_code': ('number', 'Encode return code'),
'decode_return_code': ('number', 'Decode return code'),
'bit_rate': ('number', 'Bit rate (kbps)'),
'encoded_frame_length': ('number', 'Encoded frame length (bytes)'),
'frame_type': ('string', 'Frame type'),
'packets_dropped': ('number', 'Packets dropped'),
'total_packets': ('number', 'Total packets'),
'ssim': ('number', 'SSIM'),
'psnr': ('number', 'PSNR (dB)'),
}
frame_data = [{'frame_number': 0, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 94676, 'decode_time': 37942, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 1098, 'encoded_frame_length': 4579, 'frame_type': 'Other', 'packets_dropped': 0, 'total_packets': 4, 'ssim': 0.910364, 'psnr': 35.067258},
{'frame_number': 1, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 244007, 'decode_time': 39421, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 306, 'encoded_frame_length': 1277, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.911859, 'psnr': 35.115193},
{'frame_number': 2, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 240508, 'decode_time': 38918, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 330, 'encoded_frame_length': 1379, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.913597, 'psnr': 35.181604},
{'frame_number': 3, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 243449, 'decode_time': 39664, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 298, 'encoded_frame_length': 1242, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.912378, 'psnr': 35.164710},
{'frame_number': 4, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 248024, 'decode_time': 39115, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 332, 'encoded_frame_length': 1385, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.911471, 'psnr': 35.109488},
{'frame_number': 5, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 246910, 'decode_time': 39146, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 416, 'encoded_frame_length': 1734, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.915231, 'psnr': 35.392300},
{'frame_number': 6, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 242953, 'decode_time': 38827, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 279, 'encoded_frame_length': 1165, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.916130, 'psnr': 35.452889},
{'frame_number': 7, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 247343, 'decode_time': 41429, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 393, 'encoded_frame_length': 1639, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.919356, 'psnr': 35.647128},
{'frame_number': 8, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 249529, 'decode_time': 40329, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 487, 'encoded_frame_length': 2033, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.924705, 'psnr': 36.179837},
{'frame_number': 9, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 249408, 'decode_time': 41716, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 583, 'encoded_frame_length': 2433, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.928433, 'psnr': 36.589875},
]

View File

@ -1,50 +0,0 @@
# Sample output from the video_quality_measurment program, included only for
# reference. Geneate your own by running with the --python flag and then change
# the filenames in main.py
test_configuration = [{'name': 'name', 'value': 'VP8 software test'},
{'name': 'description', 'value': ''},
{'name': 'test_number', 'value': '0'},
{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
{'name': 'output_filename', 'value': 'foreman_cif_out.yuv'},
{'name': 'output_dir', 'value': '.'},
{'name': 'packet_size_in_bytes', 'value': '1500'},
{'name': 'max_payload_size_in_bytes', 'value': '1440'},
{'name': 'packet_loss_mode', 'value': 'Uniform'},
{'name': 'packet_loss_probability', 'value': '0.000000'},
{'name': 'packet_loss_burst_length', 'value': '1'},
{'name': 'exclude_frame_types', 'value': 'ExcludeOnlyFirstKeyFrame'},
{'name': 'frame_length_in_bytes', 'value': '152064'},
{'name': 'use_single_core', 'value': 'False'},
{'name': 'keyframe_interval;', 'value': '0'},
{'name': 'video_codec_type', 'value': 'VP8'},
{'name': 'width', 'value': '352'},
{'name': 'height', 'value': '288'},
{'name': 'bit_rate_in_kbps', 'value': '500'},
]
frame_data_types = {'frame_number': ('number', 'Frame number'),
'encoding_successful': ('boolean', 'Encoding successful?'),
'decoding_successful': ('boolean', 'Decoding successful?'),
'encode_time': ('number', 'Encode time (us)'),
'decode_time': ('number', 'Decode time (us)'),
'encode_return_code': ('number', 'Encode return code'),
'decode_return_code': ('number', 'Decode return code'),
'bit_rate': ('number', 'Bit rate (kbps)'),
'encoded_frame_length': ('number', 'Encoded frame length (bytes)'),
'frame_type': ('string', 'Frame type'),
'packets_dropped': ('number', 'Packets dropped'),
'total_packets': ('number', 'Total packets'),
'ssim': ('number', 'SSIM'),
'psnr': ('number', 'PSNR (dB)'),
}
frame_data = [{'frame_number': 0, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 12427, 'decode_time': 4403, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 2270, 'encoded_frame_length': 9459, 'frame_type': 'Other', 'packets_dropped': 0, 'total_packets': 7, 'ssim': 0.947050, 'psnr': 38.332820},
{'frame_number': 1, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 3292, 'decode_time': 821, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 88, 'encoded_frame_length': 368, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.927272, 'psnr': 35.883510},
{'frame_number': 2, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4295, 'decode_time': 902, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 130, 'encoded_frame_length': 544, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.920539, 'psnr': 35.457107},
{'frame_number': 3, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 3880, 'decode_time': 767, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 171, 'encoded_frame_length': 714, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.917434, 'psnr': 35.389298},
{'frame_number': 4, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4471, 'decode_time': 909, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 248, 'encoded_frame_length': 1035, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.918892, 'psnr': 35.570229},
{'frame_number': 5, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4447, 'decode_time': 976, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 269, 'encoded_frame_length': 1123, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.920609, 'psnr': 35.769663},
{'frame_number': 6, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4432, 'decode_time': 891, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 271, 'encoded_frame_length': 1132, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.922672, 'psnr': 35.913519},
{'frame_number': 7, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 5026, 'decode_time': 1068, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 366, 'encoded_frame_length': 1529, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.925505, 'psnr': 36.246713},
{'frame_number': 8, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4877, 'decode_time': 1051, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 369, 'encoded_frame_length': 1538, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.926122, 'psnr': 36.305984},
{'frame_number': 9, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4712, 'decode_time': 1087, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 406, 'encoded_frame_length': 1692, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.927183, 'psnr': 36.379735},
]

View File

@ -1 +0,0 @@
../../third_party/google-visualization-python/gviz_api.py

View File

@ -1,90 +0,0 @@
<html>
<!--
Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
Use of this source code is governed by a BSD-style license
that can be found in the LICENSE file in the root of the source
tree. An additional intellectual property rights grant can be found
in the file PATENTS. All contributing project authors may
be found in the AUTHORS file in the root of the source tree.
Template file to be used to generate Charts for Video Quality Metrics.
-->
<head>
<link href="http://code.google.com/css/codesite.pack.04102009.css"
rel="stylesheet" type="text/css" />
</head>
<script src="https://www.google.com/jsapi" type="text/javascript"></script>
<script>
google.load('visualization', '1', {packages:['table', 'corechart']});
google.setOnLoadCallback(drawTable);
function drawTable() {
/* Build data tables and views */
var configurations_data_table =
new google.visualization.DataTable(%(json_configurations)s);
var ssim_data_table =
new google.visualization.DataTable(%(json_ssim_data)s);
var psnr_data_table =
new google.visualization.DataTable(%(json_psnr_data)s);
var packet_loss_data_table =
new google.visualization.DataTable(%(json_packet_loss_data)s);
var bit_rate_data_table =
new google.visualization.DataTable(%(json_bit_rate_data)s);
/* Display tables and charts */
var configurations_table = new google.visualization.Table(
document.getElementById('table_div_configurations'));
configurations_table.draw(configurations_data_table, {
height: 200
});
var ssim_chart = new google.visualization.LineChart(
document.getElementById('table_div_ssim'));
ssim_chart.draw(ssim_data_table, {
colors: ['blue', 'red', 'lightblue', 'pink'],
vAxis: {title: 'SSIM'},
hAxis: {title: 'Frame'},
width: 1200, height: 300,
});
var psnr_chart = new google.visualization.LineChart(
document.getElementById('table_div_psnr'));
psnr_chart.draw(psnr_data_table, {
colors: ['blue', 'red', 'lightblue', 'pink'],
vAxis: {title: 'PSNR (dB)'},
hAxis: {title: 'Frame'},
width: 1200, height: 300,
});
var packet_loss_chart = new google.visualization.LineChart(
document.getElementById('table_div_packet_loss'));
packet_loss_chart.draw(packet_loss_data_table, {
colors: ['blue', 'red', 'lightblue', 'pink'],
vAxis: {title: 'Packets dropped'},
hAxis: {title: 'Frame'},
width: 1200, height: 300,
});
var bit_rate_chart = new google.visualization.LineChart(
document.getElementById('table_div_bit_rate'));
bit_rate_chart.draw(bit_rate_data_table, {
colors: ['blue', 'red', 'lightblue', 'pink', 'green'],
vAxis: {title: 'Bit rate'},
hAxis: {title: 'Frame'},
width: 1200, height: 300,
});
}
</script>
<body>
<h3>Test Configurations:</h3>
<div id="table_div_configurations"></div>
<h3>Messages:</h3>
<pre>%(messages)s</pre>
<h3>Metrics measured per frame:</h3>
<div id="table_div_ssim"></div>
<div id="table_div_psnr"></div>
<div id="table_div_packet_loss"></div>
<div id="table_div_bit_rate"></div>
</body>
</html>

View File

@ -1,8 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.

View File

@ -1,183 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
class DataHelper(object):
"""
Helper class for managing table data.
This class does not verify the consistency of the data tables sent into it.
"""
def __init__(self, data_list, table_description, names_list, messages):
""" Initializes the DataHelper with data.
Args:
data_list: List of one or more data lists in the format that the
Google Visualization Python API expects (list of dictionaries, one
per row of data). See the gviz_api.DataTable documentation for more
info.
table_description: dictionary describing the data types of all
columns in the data lists, as defined in the gviz_api.DataTable
documentation.
names_list: List of strings of what we're going to name the data
columns after. Usually different runs of data collection.
messages: List of strings we might append error messages to.
"""
self.data_list = data_list
self.table_description = table_description
self.names_list = names_list
self.messages = messages
self.number_of_datasets = len(data_list)
self.number_of_frames = len(data_list[0])
def CreateData(self, field_name, start_frame=0, end_frame=0):
""" Creates a data structure for a specified data field.
Creates a data structure (data type description dictionary and a list
of data dictionaries) to be used with the Google Visualization Python
API. The frame_number column is always present and one column per data
set is added and its field name is suffixed by _N where N is the number
of the data set (0, 1, 2...)
Args:
field_name: String name of the field, must be present in the data
structure this DataHelper was created with.
start_frame: Frame number to start at (zero indexed). Default: 0.
end_frame: Frame number to be the last frame. If zero all frames
will be included. Default: 0.
Returns:
A tuple containing:
- a dictionary describing the columns in the data result_data_table below.
This description uses the name for each data set specified by
names_list.
Example with two data sets named 'Foreman' and 'Crew':
{
'frame_number': ('number', 'Frame number'),
'ssim_0': ('number', 'Foreman'),
'ssim_1': ('number', 'Crew'),
}
- a list containing dictionaries (one per row) with the frame_number
column and one column of the specified field_name column per data
set.
Example with two data sets named 'Foreman' and 'Crew':
[
{'frame_number': 0, 'ssim_0': 0.98, 'ssim_1': 0.77 },
{'frame_number': 1, 'ssim_0': 0.81, 'ssim_1': 0.53 },
]
"""
# Build dictionary that describes the data types
result_table_description = {'frame_number': ('string', 'Frame number')}
for dataset_index in range(self.number_of_datasets):
column_name = '%s_%s' % (field_name, dataset_index)
column_type = self.table_description[field_name][0]
column_description = self.names_list[dataset_index]
result_table_description[column_name] = (column_type, column_description)
# Build data table of all the data
result_data_table = []
# We're going to have one dictionary per row.
# Create that and copy frame_number values from the first data set
for source_row in self.data_list[0]:
row_dict = {'frame_number': source_row['frame_number']}
result_data_table.append(row_dict)
# Pick target field data points from the all data tables
if end_frame == 0: # Default to all frames
end_frame = self.number_of_frames
for dataset_index in range(self.number_of_datasets):
for row_number in range(start_frame, end_frame):
column_name = '%s_%s' % (field_name, dataset_index)
# Stop if any of the data sets are missing the frame
try:
result_data_table[row_number][column_name] = \
self.data_list[dataset_index][row_number][field_name]
except IndexError:
self.messages.append("Couldn't find frame data for row %d "
"for %s" % (row_number, self.names_list[dataset_index]))
break
return result_table_description, result_data_table
def GetOrdering(self, table_description): # pylint: disable=R0201
""" Creates a list of column names, ordered alphabetically except for the
frame_number column which always will be the first column.
Args:
table_description: A dictionary of column definitions as defined by the
gviz_api.DataTable documentation.
Returns:
A list of column names, where frame_number is the first and the
remaining columns are sorted alphabetically.
"""
# The JSON data representation generated from gviz_api.DataTable.ToJSon()
# must have frame_number as its first column in order for the chart to
# use it as it's X-axis value series.
# gviz_api.DataTable orders the columns by name by default, which will
# be incorrect if we have column names that are sorted before frame_number
# in our data table.
columns_ordering = ['frame_number']
# add all other columns:
for column in sorted(table_description.keys()):
if column != 'frame_number':
columns_ordering.append(column)
return columns_ordering
def CreateConfigurationTable(self, configurations): # pylint: disable=R0201
""" Combines multiple test data configurations for display.
Args:
configurations: List of one ore more configurations. Each configuration
is required to be a list of dictionaries with two keys: 'name' and
'value'.
Example of a single configuration:
[
{'name': 'name', 'value': 'VP8 software'},
{'name': 'test_number', 'value': '0'},
{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
]
Returns:
A tuple containing:
- a dictionary describing the columns in the configuration table to be
displayed. All columns will have string as data type.
Example:
{
'name': 'string',
'test_number': 'string',
'input_filename': 'string',
}
- a list containing dictionaries (one per configuration) with the
configuration column names mapped to the value for each test run:
Example matching the columns above:
[
{'name': 'VP8 software',
'test_number': '12',
'input_filename': 'foreman_cif.yuv' },
{'name': 'VP8 hardware',
'test_number': '5',
'input_filename': 'foreman_cif.yuv' },
]
"""
result_description = {}
result_data = []
for configuration in configurations:
data = {}
result_data.append(data)
for values in configuration:
name = values['name']
value = values['value']
result_description[name] = 'string'
data[name] = value
return result_description, result_data

View File

@ -1,113 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import unittest
import webrtc.data_helper
class Test(unittest.TestCase):
def setUp(self):
# Simulate frame data from two different test runs, with 2 frames each.
self.frame_data_0 = [{'frame_number': 0, 'ssim': 0.5, 'psnr': 30.5},
{'frame_number': 1, 'ssim': 0.55, 'psnr': 30.55}]
self.frame_data_1 = [{'frame_number': 0, 'ssim': 0.6, 'psnr': 30.6},
{'frame_number': 0, 'ssim': 0.66, 'psnr': 30.66}]
self.all_data = [self.frame_data_0, self.frame_data_1]
# Test with frame_number column in a non-first position sice we need to
# support reordering that to be able to use the gviz_api as we want.
self.type_description = {
'ssim': ('number', 'SSIM'),
'frame_number': ('number', 'Frame number'),
'psnr': ('number', 'PSRN'),
}
self.names = ["Test 0", "Test 1"]
self.configurations = [
[{'name': 'name', 'value': 'Test 0'},
{'name': 'test_number', 'value': '13'},
{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
],
[{'name': 'name', 'value': 'Test 1'},
{'name': 'test_number', 'value': '5'},
{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
],
]
def testCreateData(self):
messages = []
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
self.names, messages)
description, data_table = helper.CreateData('ssim')
self.assertEqual(3, len(description))
self.assertTrue('frame_number' in description)
self.assertTrue('ssim_0' in description)
self.assertTrue('number' in description['ssim_0'][0])
self.assertTrue('Test 0' in description['ssim_0'][1])
self.assertTrue('ssim_1' in description)
self.assertTrue('number' in description['ssim_1'][0])
self.assertTrue('Test 1' in description['ssim_1'][1])
self.assertEqual(0, len(messages))
self.assertEquals(2, len(data_table))
row = data_table[0]
self.assertEquals(0, row['frame_number'])
self.assertEquals(0.5, row['ssim_0'])
self.assertEquals(0.6, row['ssim_1'])
row = data_table[1]
self.assertEquals(1, row['frame_number'])
self.assertEquals(0.55, row['ssim_0'])
self.assertEquals(0.66, row['ssim_1'])
description, data_table = helper.CreateData('psnr')
self.assertEqual(3, len(description))
self.assertTrue('frame_number' in description)
self.assertTrue('psnr_0' in description)
self.assertTrue('psnr_1' in description)
self.assertEqual(0, len(messages))
self.assertEquals(2, len(data_table))
row = data_table[0]
self.assertEquals(0, row['frame_number'])
self.assertEquals(30.5, row['psnr_0'])
self.assertEquals(30.6, row['psnr_1'])
row = data_table[1]
self.assertEquals(1, row['frame_number'])
self.assertEquals(30.55, row['psnr_0'])
self.assertEquals(30.66, row['psnr_1'])
def testGetOrdering(self):
""" Tests that the ordering help method returns a list with frame_number
first and the rest sorted alphabetically """
messages = []
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
self.names, messages)
description, _ = helper.CreateData('ssim')
columns = helper.GetOrdering(description)
self.assertEqual(3, len(columns))
self.assertEqual(0, len(messages))
self.assertEqual('frame_number', columns[0])
self.assertEqual('ssim_0', columns[1])
self.assertEqual('ssim_1', columns[2])
def testCreateConfigurationTable(self):
messages = []
helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
self.names, messages)
description, data = helper.CreateConfigurationTable(self.configurations)
self.assertEqual(3, len(description)) # 3 columns
self.assertEqual(2, len(data)) # 2 data sets
self.assertTrue(description.has_key('name'))
self.assertTrue(description.has_key('test_number'))
self.assertTrue(description.has_key('input_filename'))
self.assertEquals('Test 0', data[0]['name'])
self.assertEquals('Test 1', data[1]['name'])
if __name__ == "__main__":
unittest.main()

View File

@ -1,159 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import os
import gviz_api
import webrtc.data_helper
def main():
"""
This Python script displays a web page with test created with the
video_quality_measurement program, which is a tool in WebRTC.
The script requires on two external files and one Python library:
- A HTML template file with layout and references to the json variables
defined in this script
- A data file in Python format, containing the following:
- test_configuration - a dictionary of test configuration names and values.
- frame_data_types - a dictionary that maps the different metrics to their
data types.
- frame_data - a list of dictionaries where each dictionary maps a metric to
it's value.
- The gviz_api.py of the Google Visualization Python API, available at
http://code.google.com/p/google-visualization-python/
The HTML file is shipped with the script, while the data file must be
generated by running video_quality_measurement with the --python flag
specified.
"""
print 'Content-type: text/html\n' # the newline is required!
page_template_filename = '../templates/chart_page_template.html'
# The data files must be located in the project tree for app engine being
# able to access them.
data_filenames = ['../data/vp8_sw.py', '../data/vp8_hw.py']
# Will contain info/error messages to be displayed on the resulting page.
messages = []
# Load the page HTML template.
try:
f = open(page_template_filename)
page_template = f.read()
f.close()
except IOError as e:
ShowErrorPage('Cannot open page template file: %s<br>Details: %s' %
(page_template_filename, e))
return
# Read data from external Python script files. First check that they exist.
for filename in data_filenames:
if not os.path.exists(filename):
messages.append('Cannot open data file: %s' % filename)
data_filenames.remove(filename)
# Read data from all existing input files.
data_list = []
test_configurations = []
names = []
for filename in data_filenames:
read_vars = {} # empty dictionary to load the data into.
execfile(filename, read_vars, read_vars)
test_configuration = read_vars['test_configuration']
table_description = read_vars['frame_data_types']
table_data = read_vars['frame_data']
# Verify the data in the file loaded properly.
if not table_description or not table_data:
messages.append('Invalid input file: %s. Missing description list or '
'data dictionary variables.' % filename)
continue
# Frame numbers appear as number type in the data, but Chart API requires
# values of the X-axis to be of string type.
# Change the frame_number column data type:
table_description['frame_number'] = ('string', 'Frame number')
# Convert all the values to string types:
for row in table_data:
row['frame_number'] = str(row['frame_number'])
# Store the unique data from this file in the high level lists.
test_configurations.append(test_configuration)
data_list.append(table_data)
# Name of the test run must be present.
test_name = FindConfiguration(test_configuration, 'name')
if not test_name:
messages.append('Invalid input file: %s. Missing configuration key '
'"name"', filename)
continue
names.append(test_name)
# Create data helper and build data tables for each graph.
helper = webrtc.data_helper.DataHelper(data_list, table_description,
names, messages)
# Loading it into gviz_api.DataTable objects and create JSON strings.
description, data = helper.CreateConfigurationTable(test_configurations)
configurations = gviz_api.DataTable(description, data)
json_configurations = configurations.ToJSon() # pylint: disable=W0612
description, data = helper.CreateData('ssim')
ssim = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_ssim_data = ssim.ToJSon(helper.GetOrdering(description))
description, data = helper.CreateData('psnr')
psnr = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_psnr_data = psnr.ToJSon(helper.GetOrdering(description))
description, data = helper.CreateData('packets_dropped')
packet_loss = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
description, data = helper.CreateData('bit_rate')
# Add a column of data points for the desired bit rate to be plotted.
# (uses test configuration from the last data set, assuming it is the same
# for all of them)
desired_bit_rate = FindConfiguration(test_configuration, 'bit_rate_in_kbps')
if not desired_bit_rate:
ShowErrorPage('Cannot configuration field named "bit_rate_in_kbps"')
return
desired_bit_rate = int(desired_bit_rate)
# Add new column data type description.
description['desired_bit_rate'] = ('number', 'Desired bit rate (kbps)')
for row in data:
row['desired_bit_rate'] = desired_bit_rate
bit_rate = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_bit_rate_data = bit_rate.ToJSon(helper.GetOrdering(description))
# Format the messages list with newlines.
messages = '\n'.join(messages)
# Put the variables as JSon strings into the template.
print page_template % vars()
def FindConfiguration(configuration, name):
""" Finds a configuration value using it's name.
Returns the first configuration with a matching name. Returns None if no
matching configuration is found. """
return_value = None
for row in configuration:
if row['name'] == name:
return_value = row['value']
break
return return_value
def ShowErrorPage(error_message):
print '<html><body>%s</body></html>' % error_message
if __name__ == '__main__':
main()