Reduce test flakiness because of upload issues

Sometimes upload fails because of one measurement out of hundreds
couldn't be upload. It happens because of issues on the Dashboard side
like https://crbug.com/1145904. Such failures are flaky and happens
rarely. However, we don't want it to affect our tests.

No-Presubmit: True
Bug: webrtc:12162
Change-Id: Ieb56a67a7d3d18b86c715ba14578aac7b9f6ba94
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/194040
Commit-Queue: Andrey Logvin <landrey@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32636}
This commit is contained in:
Andrey Logvin 2020-11-18 22:07:15 +00:00 committed by Commit Bot
parent 01719fbeb5
commit 844125cc48

View File

@ -108,6 +108,59 @@ def _WaitForUploadConfirmation(url, oauth_token, upload_token, wait_timeout,
return response, resp_json
# Because of an issues on the Dashboard side few measurements over a large set
# can fail to upload. That would lead to the whole upload to be marked as
# failed. Check it, so it doesn't increase flakiness of our tests.
# TODO(crbug.com/1145904): Remove check after fixed.
def _CheckFullUploadInfo(url, oauth_token, upload_token,
min_measurements_amount=100,
max_failed_measurements_amount=1):
"""Make a HTTP GET requests to the Performance Dashboard to get full info
about upload (including measurements). Checks if upload is correct despite
not having status "COMPLETED".
Args:
url: URL of Performance Dashboard instance, e.g.
"https://chromeperf.appspot.com".
oauth_token: An oauth token to use for authorization.
upload_token: String that identifies Performance Dashboard and can be used
for the status check.
min_measurements_amount: minimal amount of measurements that the upload
should have to start tolerating failures in particular measurements.
max_failed_measurements_amount: maximal amount of failured measurements to
tolerate.
"""
headers = {'Authorization': 'Bearer %s' % oauth_token}
http = httplib2.Http()
response, content = http.request(url + '/uploads/' + upload_token +
'?additional_info=measurements',
method='GET', headers=headers)
print 'Full upload info: %r.' % content
if response.status != 200:
print 'Failed to reach the dashboard to get full upload info.'
return False
resp_json = json.loads(content)
if 'measurements' in resp_json:
measurements_cnt = len(resp_json['measurements'])
not_completed_state_cnt = len([
m for m in resp_json['measurements']
if m['state'] != 'COMPLETED'
])
if (measurements_cnt >= min_measurements_amount and
not_completed_state_cnt <= max_failed_measurements_amount):
print('Not all measurements were uploaded. Measurements count: %d, '
'failed to upload: %d' %
(measurements_cnt, not_completed_state_cnt))
return True
return False
# TODO(https://crbug.com/1029452): HACKHACK
# Remove once we have doubles in the proto and handle -infinity correctly.
def _ApplyHacks(dicts):
@ -183,15 +236,16 @@ def UploadToDashboard(options):
datetime.timedelta(seconds=options.wait_timeout_sec),
datetime.timedelta(seconds=options.wait_polling_period_sec))
if ((resp_json and resp_json['state'] == 'COMPLETED') or
_CheckFullUploadInfo(options.dashboard_url, oauth_token, upload_token)):
print 'Upload completed.'
return 0
if response.status != 200 or resp_json['state'] == 'FAILED':
print('Upload failed with %d: %s\n\n%s' % (response.status,
response.reason,
str(resp_json)))
return 1
if resp_json['state'] == 'COMPLETED':
print 'Upload completed.'
return 0
print('Upload wasn\'t completed in a given time: %d.', options.wait_timeout)
return 1