oe-build-perf-report 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. #!/usr/bin/env python3
  2. #
  3. # Examine build performance test results
  4. #
  5. # Copyright (c) 2017, Intel Corporation.
  6. #
  7. # SPDX-License-Identifier: GPL-2.0-only
  8. #
  9. import argparse
  10. import json
  11. import logging
  12. import os
  13. import re
  14. import sys
  15. from collections import namedtuple, OrderedDict
  16. from operator import attrgetter
  17. from xml.etree import ElementTree as ET
  18. # Import oe libs
  19. scripts_path = os.path.dirname(os.path.realpath(__file__))
  20. sys.path.append(os.path.join(scripts_path, 'lib'))
  21. import scriptpath
  22. from build_perf import print_table
  23. from build_perf.report import (metadata_xml_to_json, results_xml_to_json,
  24. aggregate_data, aggregate_metadata, measurement_stats,
  25. AggregateTestData)
  26. from build_perf import html
  27. from buildstats import BuildStats, diff_buildstats, BSVerDiff
  28. scriptpath.add_oe_lib_path()
  29. from oeqa.utils.git import GitRepo, GitError
  30. import oeqa.utils.gitarchive as gitarchive
  31. # Setup logging
  32. logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
  33. log = logging.getLogger('oe-build-perf-report')
  34. def list_test_revs(repo, tag_name, verbosity, **kwargs):
  35. """Get list of all tested revisions"""
  36. valid_kwargs = dict([(k, v) for k, v in kwargs.items() if v is not None])
  37. fields, revs = gitarchive.get_test_runs(log, repo, tag_name, **valid_kwargs)
  38. ignore_fields = ['tag_number']
  39. if verbosity < 2:
  40. extra_fields = ['COMMITS', 'TEST RUNS']
  41. ignore_fields.extend(['commit_number', 'commit'])
  42. else:
  43. extra_fields = ['TEST RUNS']
  44. print_fields = [i for i, f in enumerate(fields) if f not in ignore_fields]
  45. # Sort revs
  46. rows = [[fields[i].upper() for i in print_fields] + extra_fields]
  47. prev = [''] * len(print_fields)
  48. prev_commit = None
  49. commit_cnt = 0
  50. commit_field = fields.index('commit')
  51. for rev in revs:
  52. # Only use fields that we want to print
  53. cols = [rev[i] for i in print_fields]
  54. if cols != prev:
  55. commit_cnt = 1
  56. test_run_cnt = 1
  57. new_row = [''] * (len(print_fields) + len(extra_fields))
  58. for i in print_fields:
  59. if cols[i] != prev[i]:
  60. break
  61. new_row[i:-len(extra_fields)] = cols[i:]
  62. rows.append(new_row)
  63. else:
  64. if rev[commit_field] != prev_commit:
  65. commit_cnt += 1
  66. test_run_cnt += 1
  67. if verbosity < 2:
  68. new_row[-2] = commit_cnt
  69. new_row[-1] = test_run_cnt
  70. prev = cols
  71. prev_commit = rev[commit_field]
  72. print_table(rows)
  73. def is_xml_format(repo, commit):
  74. """Check if the commit contains xml (or json) data"""
  75. if repo.rev_parse(commit + ':results.xml'):
  76. log.debug("Detected report in xml format in %s", commit)
  77. return True
  78. else:
  79. log.debug("No xml report in %s, assuming json formatted results", commit)
  80. return False
  81. def read_results(repo, tags, xml=True):
  82. """Read result files from repo"""
  83. def parse_xml_stream(data):
  84. """Parse multiple concatenated XML objects"""
  85. objs = []
  86. xml_d = ""
  87. for line in data.splitlines():
  88. if xml_d and line.startswith('<?xml version='):
  89. objs.append(ET.fromstring(xml_d))
  90. xml_d = line
  91. else:
  92. xml_d += line
  93. objs.append(ET.fromstring(xml_d))
  94. return objs
  95. def parse_json_stream(data):
  96. """Parse multiple concatenated JSON objects"""
  97. objs = []
  98. json_d = ""
  99. for line in data.splitlines():
  100. if line == '}{':
  101. json_d += '}'
  102. objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
  103. json_d = '{'
  104. else:
  105. json_d += line
  106. objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
  107. return objs
  108. num_revs = len(tags)
  109. # Optimize by reading all data with one git command
  110. log.debug("Loading raw result data from %d tags, %s...", num_revs, tags[0])
  111. if xml:
  112. git_objs = [tag + ':metadata.xml' for tag in tags] + [tag + ':results.xml' for tag in tags]
  113. data = parse_xml_stream(repo.run_cmd(['show'] + git_objs + ['--']))
  114. return ([metadata_xml_to_json(e) for e in data[0:num_revs]],
  115. [results_xml_to_json(e) for e in data[num_revs:]])
  116. else:
  117. git_objs = [tag + ':metadata.json' for tag in tags] + [tag + ':results.json' for tag in tags]
  118. data = parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--']))
  119. return data[0:num_revs], data[num_revs:]
  120. def get_data_item(data, key):
  121. """Nested getitem lookup"""
  122. for k in key.split('.'):
  123. data = data[k]
  124. return data
  125. def metadata_diff(metadata_l, metadata_r):
  126. """Prepare a metadata diff for printing"""
  127. keys = [('Hostname', 'hostname', 'hostname'),
  128. ('Branch', 'branch', 'layers.meta.branch'),
  129. ('Commit number', 'commit_num', 'layers.meta.commit_count'),
  130. ('Commit', 'commit', 'layers.meta.commit'),
  131. ('Number of test runs', 'testrun_count', 'testrun_count')
  132. ]
  133. def _metadata_diff(key):
  134. """Diff metadata from two test reports"""
  135. try:
  136. val1 = get_data_item(metadata_l, key)
  137. except KeyError:
  138. val1 = '(N/A)'
  139. try:
  140. val2 = get_data_item(metadata_r, key)
  141. except KeyError:
  142. val2 = '(N/A)'
  143. return val1, val2
  144. metadata = OrderedDict()
  145. for title, key, key_json in keys:
  146. value_l, value_r = _metadata_diff(key_json)
  147. metadata[key] = {'title': title,
  148. 'value_old': value_l,
  149. 'value': value_r}
  150. return metadata
  151. def print_diff_report(metadata_l, data_l, metadata_r, data_r):
  152. """Print differences between two data sets"""
  153. # First, print general metadata
  154. print("\nTEST METADATA:\n==============")
  155. meta_diff = metadata_diff(metadata_l, metadata_r)
  156. rows = []
  157. row_fmt = ['{:{wid}} ', '{:<{wid}} ', '{:<{wid}}']
  158. rows = [['', 'CURRENT COMMIT', 'COMPARING WITH']]
  159. for key, val in meta_diff.items():
  160. # Shorten commit hashes
  161. if key == 'commit':
  162. rows.append([val['title'] + ':', val['value'][:20], val['value_old'][:20]])
  163. else:
  164. rows.append([val['title'] + ':', val['value'], val['value_old']])
  165. print_table(rows, row_fmt)
  166. # Print test results
  167. print("\nTEST RESULTS:\n=============")
  168. tests = list(data_l['tests'].keys())
  169. # Append tests that are only present in 'right' set
  170. tests += [t for t in list(data_r['tests'].keys()) if t not in tests]
  171. # Prepare data to be printed
  172. rows = []
  173. row_fmt = ['{:8}', '{:{wid}}', '{:{wid}}', ' {:>{wid}}', ' {:{wid}} ', '{:{wid}}',
  174. ' {:>{wid}}', ' {:>{wid}}']
  175. num_cols = len(row_fmt)
  176. for test in tests:
  177. test_l = data_l['tests'][test] if test in data_l['tests'] else None
  178. test_r = data_r['tests'][test] if test in data_r['tests'] else None
  179. pref = ' '
  180. if test_l is None:
  181. pref = '+'
  182. elif test_r is None:
  183. pref = '-'
  184. descr = test_l['description'] if test_l else test_r['description']
  185. heading = "{} {}: {}".format(pref, test, descr)
  186. rows.append([heading])
  187. # Generate the list of measurements
  188. meas_l = test_l['measurements'] if test_l else {}
  189. meas_r = test_r['measurements'] if test_r else {}
  190. measurements = list(meas_l.keys())
  191. measurements += [m for m in list(meas_r.keys()) if m not in measurements]
  192. for meas in measurements:
  193. m_pref = ' '
  194. if meas in meas_l:
  195. stats_l = measurement_stats(meas_l[meas], 'l.')
  196. else:
  197. stats_l = measurement_stats(None, 'l.')
  198. m_pref = '+'
  199. if meas in meas_r:
  200. stats_r = measurement_stats(meas_r[meas], 'r.')
  201. else:
  202. stats_r = measurement_stats(None, 'r.')
  203. m_pref = '-'
  204. stats = stats_l.copy()
  205. stats.update(stats_r)
  206. absdiff = stats['val_cls'](stats['r.mean'] - stats['l.mean'])
  207. reldiff = "{:+.1f} %".format(absdiff * 100 / stats['l.mean'])
  208. if stats['r.mean'] > stats['l.mean']:
  209. absdiff = '+' + str(absdiff)
  210. else:
  211. absdiff = str(absdiff)
  212. rows.append(['', m_pref, stats['name'] + ' ' + stats['quantity'],
  213. str(stats['l.mean']), '->', str(stats['r.mean']),
  214. absdiff, reldiff])
  215. rows.append([''] * num_cols)
  216. print_table(rows, row_fmt)
  217. print()
  218. class BSSummary(object):
  219. def __init__(self, bs1, bs2):
  220. self.tasks = {'count': bs2.num_tasks,
  221. 'change': '{:+d}'.format(bs2.num_tasks - bs1.num_tasks)}
  222. self.top_consumer = None
  223. self.top_decrease = None
  224. self.top_increase = None
  225. self.ver_diff = OrderedDict()
  226. tasks_diff = diff_buildstats(bs1, bs2, 'cputime')
  227. # Get top consumers of resources
  228. tasks_diff = sorted(tasks_diff, key=attrgetter('value2'))
  229. self.top_consumer = tasks_diff[-5:]
  230. # Get biggest increase and decrease in resource usage
  231. tasks_diff = sorted(tasks_diff, key=attrgetter('absdiff'))
  232. self.top_decrease = tasks_diff[0:5]
  233. self.top_increase = tasks_diff[-5:]
  234. # Compare recipe versions and prepare data for display
  235. ver_diff = BSVerDiff(bs1, bs2)
  236. if ver_diff:
  237. if ver_diff.new:
  238. self.ver_diff['New recipes'] = [(n, r.evr) for n, r in ver_diff.new.items()]
  239. if ver_diff.dropped:
  240. self.ver_diff['Dropped recipes'] = [(n, r.evr) for n, r in ver_diff.dropped.items()]
  241. if ver_diff.echanged:
  242. self.ver_diff['Epoch changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.echanged.items()]
  243. if ver_diff.vchanged:
  244. self.ver_diff['Version changed'] = [(n, "{} &rarr; {}".format(r.left.version, r.right.version)) for n, r in ver_diff.vchanged.items()]
  245. if ver_diff.rchanged:
  246. self.ver_diff['Revision changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.rchanged.items()]
  247. def print_html_report(data, id_comp, buildstats):
  248. """Print report in html format"""
  249. # Handle metadata
  250. metadata = metadata_diff(data[id_comp].metadata, data[-1].metadata)
  251. # Generate list of tests
  252. tests = []
  253. for test in data[-1].results['tests'].keys():
  254. test_r = data[-1].results['tests'][test]
  255. new_test = {'name': test_r['name'],
  256. 'description': test_r['description'],
  257. 'status': test_r['status'],
  258. 'measurements': [],
  259. 'err_type': test_r.get('err_type'),
  260. }
  261. # Limit length of err output shown
  262. if 'message' in test_r:
  263. lines = test_r['message'].splitlines()
  264. if len(lines) > 20:
  265. new_test['message'] = '...\n' + '\n'.join(lines[-20:])
  266. else:
  267. new_test['message'] = test_r['message']
  268. # Generate the list of measurements
  269. for meas in test_r['measurements'].keys():
  270. meas_r = test_r['measurements'][meas]
  271. meas_type = 'time' if meas_r['type'] == 'sysres' else 'size'
  272. new_meas = {'name': meas_r['name'],
  273. 'legend': meas_r['legend'],
  274. 'description': meas_r['name'] + ' ' + meas_type,
  275. }
  276. samples = []
  277. # Run through all revisions in our data
  278. for meta, test_data in data:
  279. if (not test in test_data['tests'] or
  280. not meas in test_data['tests'][test]['measurements']):
  281. samples.append(measurement_stats(None))
  282. continue
  283. test_i = test_data['tests'][test]
  284. meas_i = test_i['measurements'][meas]
  285. commit_num = get_data_item(meta, 'layers.meta.commit_count')
  286. commit = get_data_item(meta, 'layers.meta.commit')
  287. # Add start_time for both test measurement types of sysres and disk usage
  288. try:
  289. # Use the commit_time if available, falling back to start_time
  290. start_time = get_data_item(meta, 'layers.meta.commit_time')
  291. except KeyError:
  292. start_time = test_i['start_time'][0]
  293. samples.append(measurement_stats(meas_i, '', start_time))
  294. samples[-1]['commit_num'] = commit_num
  295. samples[-1]['commit'] = commit
  296. absdiff = samples[-1]['val_cls'](samples[-1]['mean'] - samples[id_comp]['mean'])
  297. reldiff = absdiff * 100 / samples[id_comp]['mean']
  298. new_meas['absdiff'] = absdiff
  299. new_meas['absdiff_str'] = str(absdiff) if absdiff < 0 else '+' + str(absdiff)
  300. new_meas['reldiff'] = reldiff
  301. new_meas['reldiff_str'] = "{:+.1f} %".format(reldiff)
  302. new_meas['samples'] = samples
  303. new_meas['value'] = samples[-1]
  304. new_meas['value_type'] = samples[-1]['val_cls']
  305. # Compare buildstats
  306. bs_key = test + '.' + meas
  307. rev = str(metadata['commit_num']['value'])
  308. comp_rev = str(metadata['commit_num']['value_old'])
  309. if (buildstats and rev in buildstats and bs_key in buildstats[rev] and
  310. comp_rev in buildstats and bs_key in buildstats[comp_rev]):
  311. new_meas['buildstats'] = BSSummary(buildstats[comp_rev][bs_key],
  312. buildstats[rev][bs_key])
  313. new_test['measurements'].append(new_meas)
  314. tests.append(new_test)
  315. # Chart options
  316. chart_opts = {'haxis': {'min': get_data_item(data[0][0], 'layers.meta.commit_count'),
  317. 'max': get_data_item(data[-1][0], 'layers.meta.commit_count')}
  318. }
  319. print(html.template.render(title="Build Perf Test Report",
  320. metadata=metadata, test_data=tests,
  321. chart_opts=chart_opts))
  322. def get_buildstats(repo, notes_ref, notes_ref2, revs, outdir=None):
  323. """Get the buildstats from git notes"""
  324. full_ref = 'refs/notes/' + notes_ref
  325. if not repo.rev_parse(full_ref):
  326. log.error("No buildstats found, please try running "
  327. "'git fetch origin %s:%s' to fetch them from the remote",
  328. full_ref, full_ref)
  329. return
  330. missing = False
  331. buildstats = {}
  332. log.info("Parsing buildstats from 'refs/notes/%s'", notes_ref)
  333. for rev in revs:
  334. buildstats[rev.commit_number] = {}
  335. log.debug('Dumping buildstats for %s (%s)', rev.commit_number,
  336. rev.commit)
  337. for tag in rev.tags:
  338. log.debug(' %s', tag)
  339. try:
  340. try:
  341. bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref, 'show', tag + '^0']))
  342. except GitError:
  343. if notes_ref2:
  344. bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref2, 'show', tag + '^0']))
  345. else:
  346. raise
  347. except GitError:
  348. log.warning("Buildstats not found for %s", tag)
  349. bs_all = {}
  350. missing = True
  351. for measurement, bs in bs_all.items():
  352. # Write out onto disk
  353. if outdir:
  354. tag_base, run_id = tag.rsplit('/', 1)
  355. tag_base = tag_base.replace('/', '_')
  356. bs_dir = os.path.join(outdir, measurement, tag_base)
  357. if not os.path.exists(bs_dir):
  358. os.makedirs(bs_dir)
  359. with open(os.path.join(bs_dir, run_id + '.json'), 'w') as f:
  360. json.dump(bs, f, indent=2)
  361. # Read buildstats into a dict
  362. _bs = BuildStats.from_json(bs)
  363. if measurement not in buildstats[rev.commit_number]:
  364. buildstats[rev.commit_number][measurement] = _bs
  365. else:
  366. buildstats[rev.commit_number][measurement].aggregate(_bs)
  367. if missing:
  368. log.info("Buildstats were missing for some test runs, please "
  369. "run 'git fetch origin %s:%s' and try again",
  370. full_ref, full_ref)
  371. return buildstats
  372. def auto_args(repo, args):
  373. """Guess arguments, if not defined by the user"""
  374. # Get the latest commit in the repo
  375. log.debug("Guessing arguments from the latest commit")
  376. msg = repo.run_cmd(['log', '-1', '--branches', '--remotes', '--format=%b'])
  377. for line in msg.splitlines():
  378. split = line.split(':', 1)
  379. if len(split) != 2:
  380. continue
  381. key = split[0]
  382. val = split[1].strip()
  383. if key == 'hostname' and not args.hostname:
  384. log.debug("Using hostname %s", val)
  385. args.hostname = val
  386. elif key == 'branch' and not args.branch:
  387. log.debug("Using branch %s", val)
  388. args.branch = val
  389. def parse_args(argv):
  390. """Parse command line arguments"""
  391. description = """
  392. Examine build performance test results from a Git repository"""
  393. parser = argparse.ArgumentParser(
  394. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  395. description=description)
  396. parser.add_argument('--debug', '-d', action='store_true',
  397. help="Verbose logging")
  398. parser.add_argument('--repo', '-r', required=True,
  399. help="Results repository (local git clone)")
  400. parser.add_argument('--list', '-l', action='count',
  401. help="List available test runs")
  402. parser.add_argument('--html', action='store_true',
  403. help="Generate report in html format")
  404. group = parser.add_argument_group('Tag and revision')
  405. group.add_argument('--tag-name', '-t',
  406. default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}',
  407. help="Tag name (pattern) for finding results")
  408. group.add_argument('--hostname', '-H')
  409. group.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
  410. group.add_argument('--branch2', help="Branch to find comparision revisions in")
  411. group.add_argument('--machine', default='qemux86')
  412. group.add_argument('--history-length', default=300, type=int,
  413. help="Number of tested revisions to plot in html report")
  414. group.add_argument('--commit',
  415. help="Revision to search for")
  416. group.add_argument('--commit-number',
  417. help="Revision number to search for, redundant if "
  418. "--commit is specified")
  419. group.add_argument('--commit2',
  420. help="Revision to compare with")
  421. group.add_argument('--commit-number2',
  422. help="Revision number to compare with, redundant if "
  423. "--commit2 is specified")
  424. parser.add_argument('--dump-buildstats', nargs='?', const='.',
  425. help="Dump buildstats of the tests")
  426. return parser.parse_args(argv)
  427. def main(argv=None):
  428. """Script entry point"""
  429. args = parse_args(argv)
  430. if args.debug:
  431. log.setLevel(logging.DEBUG)
  432. repo = GitRepo(args.repo)
  433. if args.list:
  434. list_test_revs(repo, args.tag_name, args.list, hostname=args.hostname)
  435. return 0
  436. # Determine hostname which to use
  437. if not args.hostname:
  438. auto_args(repo, args)
  439. revs = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
  440. branch=args.branch, machine=args.machine)
  441. if args.branch2 and args.branch2 != args.branch:
  442. revs2 = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
  443. branch=args.branch2, machine=args.machine)
  444. if not len(revs2):
  445. log.error("No revisions found to compare against")
  446. return 1
  447. if not len(revs):
  448. log.error("No revision to report on found")
  449. return 1
  450. else:
  451. if len(revs) < 2:
  452. log.error("Only %d tester revisions found, unable to generate report" % len(revs))
  453. return 1
  454. # Pick revisions
  455. if args.commit:
  456. if args.commit_number:
  457. log.warning("Ignoring --commit-number as --commit was specified")
  458. index1 = gitarchive.rev_find(revs, 'commit', args.commit)
  459. elif args.commit_number:
  460. index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
  461. else:
  462. index1 = len(revs) - 1
  463. if args.branch2 and args.branch2 != args.branch:
  464. revs2.append(revs[index1])
  465. index1 = len(revs2) - 1
  466. revs = revs2
  467. if args.commit2:
  468. if args.commit_number2:
  469. log.warning("Ignoring --commit-number2 as --commit2 was specified")
  470. index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
  471. elif args.commit_number2:
  472. index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
  473. else:
  474. if index1 > 0:
  475. index2 = index1 - 1
  476. # Find the closest matching commit number for comparision
  477. # In future we could check the commit is a common ancestor and
  478. # continue back if not but this good enough for now
  479. while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
  480. index2 = index2 - 1
  481. else:
  482. log.error("Unable to determine the other commit, use "
  483. "--commit2 or --commit-number2 to specify it")
  484. return 1
  485. index_l = min(index1, index2)
  486. index_r = max(index1, index2)
  487. rev_l = revs[index_l]
  488. rev_r = revs[index_r]
  489. log.debug("Using 'left' revision %s (%s), %s test runs:\n %s",
  490. rev_l.commit_number, rev_l.commit, len(rev_l.tags),
  491. '\n '.join(rev_l.tags))
  492. log.debug("Using 'right' revision %s (%s), %s test runs:\n %s",
  493. rev_r.commit_number, rev_r.commit, len(rev_r.tags),
  494. '\n '.join(rev_r.tags))
  495. # Check report format used in the repo (assume all reports in the same fmt)
  496. xml = is_xml_format(repo, revs[index_r].tags[-1])
  497. if args.html:
  498. index_0 = max(0, min(index_l, index_r - args.history_length))
  499. rev_range = range(index_0, index_r + 1)
  500. else:
  501. # We do not need range of commits for text report (no graphs)
  502. index_0 = index_l
  503. rev_range = (index_l, index_r)
  504. # Read raw data
  505. log.debug("Reading %d revisions, starting from %s (%s)",
  506. len(rev_range), revs[index_0].commit_number, revs[index_0].commit)
  507. raw_data = [read_results(repo, revs[i].tags, xml) for i in rev_range]
  508. data = []
  509. for raw_m, raw_d in raw_data:
  510. data.append(AggregateTestData(aggregate_metadata(raw_m),
  511. aggregate_data(raw_d)))
  512. # Read buildstats only when needed
  513. buildstats = None
  514. if args.dump_buildstats or args.html:
  515. outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
  516. notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
  517. notes_ref2 = None
  518. if args.branch2:
  519. notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch2, args.machine)
  520. notes_ref2 = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
  521. buildstats = get_buildstats(repo, notes_ref, notes_ref2, [rev_l, rev_r], outdir)
  522. # Print report
  523. if not args.html:
  524. print_diff_report(data[0].metadata, data[0].results,
  525. data[1].metadata, data[1].results)
  526. else:
  527. # Re-map 'left' list index to the data table where index_0 maps to 0
  528. print_html_report(data, index_l - index_0, buildstats)
  529. return 0
  530. if __name__ == "__main__":
  531. sys.exit(main())