report.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. # test result tool - report text based test results
  2. #
  3. # Copyright (c) 2019, Intel Corporation.
  4. # Copyright (c) 2019, Linux Foundation
  5. #
  6. # SPDX-License-Identifier: GPL-2.0-only
  7. #
  8. import os
  9. import glob
  10. import json
  11. import resulttool.resultutils as resultutils
  12. from oeqa.utils.git import GitRepo
  13. import oeqa.utils.gitarchive as gitarchive
  14. class ResultsTextReport(object):
  15. def __init__(self):
  16. self.ptests = {}
  17. self.ltptests = {}
  18. self.ltpposixtests = {}
  19. self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
  20. 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
  21. 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
  22. def handle_ptest_result(self, k, status, result, machine):
  23. if machine not in self.ptests:
  24. self.ptests[machine] = {}
  25. if k == 'ptestresult.sections':
  26. # Ensure tests without any test results still show up on the report
  27. for suite in result['ptestresult.sections']:
  28. if suite not in self.ptests[machine]:
  29. self.ptests[machine][suite] = {
  30. 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
  31. 'failed_testcases': [], "testcases": set(),
  32. }
  33. if 'duration' in result['ptestresult.sections'][suite]:
  34. self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
  35. if 'timeout' in result['ptestresult.sections'][suite]:
  36. self.ptests[machine][suite]['duration'] += " T"
  37. return True
  38. # process test result
  39. try:
  40. _, suite, test = k.split(".", 2)
  41. except ValueError:
  42. return True
  43. # Handle 'glib-2.0'
  44. if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
  45. try:
  46. _, suite, suite1, test = k.split(".", 3)
  47. if suite + "." + suite1 in result['ptestresult.sections']:
  48. suite = suite + "." + suite1
  49. except ValueError:
  50. pass
  51. if suite not in self.ptests[machine]:
  52. self.ptests[machine][suite] = {
  53. 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
  54. 'failed_testcases': [], "testcases": set(),
  55. }
  56. # do not process duplicate results
  57. if test in self.ptests[machine][suite]["testcases"]:
  58. print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
  59. return False
  60. for tk in self.result_types:
  61. if status in self.result_types[tk]:
  62. self.ptests[machine][suite][tk] += 1
  63. self.ptests[machine][suite]["testcases"].add(test)
  64. return True
  65. def handle_ltptest_result(self, k, status, result, machine):
  66. if machine not in self.ltptests:
  67. self.ltptests[machine] = {}
  68. if k == 'ltpresult.sections':
  69. # Ensure tests without any test results still show up on the report
  70. for suite in result['ltpresult.sections']:
  71. if suite not in self.ltptests[machine]:
  72. self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  73. if 'duration' in result['ltpresult.sections'][suite]:
  74. self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
  75. if 'timeout' in result['ltpresult.sections'][suite]:
  76. self.ltptests[machine][suite]['duration'] += " T"
  77. return
  78. try:
  79. _, suite, test = k.split(".", 2)
  80. except ValueError:
  81. return
  82. # Handle 'glib-2.0'
  83. if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
  84. try:
  85. _, suite, suite1, test = k.split(".", 3)
  86. print("split2: %s %s %s" % (suite, suite1, test))
  87. if suite + "." + suite1 in result['ltpresult.sections']:
  88. suite = suite + "." + suite1
  89. except ValueError:
  90. pass
  91. if suite not in self.ltptests[machine]:
  92. self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  93. for tk in self.result_types:
  94. if status in self.result_types[tk]:
  95. self.ltptests[machine][suite][tk] += 1
  96. def handle_ltpposixtest_result(self, k, status, result, machine):
  97. if machine not in self.ltpposixtests:
  98. self.ltpposixtests[machine] = {}
  99. if k == 'ltpposixresult.sections':
  100. # Ensure tests without any test results still show up on the report
  101. for suite in result['ltpposixresult.sections']:
  102. if suite not in self.ltpposixtests[machine]:
  103. self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  104. if 'duration' in result['ltpposixresult.sections'][suite]:
  105. self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
  106. return
  107. try:
  108. _, suite, test = k.split(".", 2)
  109. except ValueError:
  110. return
  111. # Handle 'glib-2.0'
  112. if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
  113. try:
  114. _, suite, suite1, test = k.split(".", 3)
  115. if suite + "." + suite1 in result['ltpposixresult.sections']:
  116. suite = suite + "." + suite1
  117. except ValueError:
  118. pass
  119. if suite not in self.ltpposixtests[machine]:
  120. self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  121. for tk in self.result_types:
  122. if status in self.result_types[tk]:
  123. self.ltpposixtests[machine][suite][tk] += 1
  124. def get_aggregated_test_result(self, logger, testresult, machine):
  125. test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
  126. result = testresult.get('result', [])
  127. for k in result:
  128. test_status = result[k].get('status', [])
  129. if k.startswith("ptestresult."):
  130. if not self.handle_ptest_result(k, test_status, result, machine):
  131. continue
  132. elif k.startswith("ltpresult."):
  133. self.handle_ltptest_result(k, test_status, result, machine)
  134. elif k.startswith("ltpposixresult."):
  135. self.handle_ltpposixtest_result(k, test_status, result, machine)
  136. # process result if it was not skipped by a handler
  137. for tk in self.result_types:
  138. if test_status in self.result_types[tk]:
  139. test_count_report[tk] += 1
  140. if test_status in self.result_types['failed']:
  141. test_count_report['failed_testcases'].append(k)
  142. return test_count_report
  143. def print_test_report(self, template_file_name, test_count_reports):
  144. from jinja2 import Environment, FileSystemLoader
  145. script_path = os.path.dirname(os.path.realpath(__file__))
  146. file_loader = FileSystemLoader(script_path + '/template')
  147. env = Environment(loader=file_loader, trim_blocks=True)
  148. template = env.get_template(template_file_name)
  149. havefailed = False
  150. reportvalues = []
  151. machines = []
  152. cols = ['passed', 'failed', 'skipped']
  153. maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
  154. for line in test_count_reports:
  155. total_tested = line['passed'] + line['failed'] + line['skipped']
  156. vals = {}
  157. vals['result_id'] = line['result_id']
  158. vals['testseries'] = line['testseries']
  159. vals['sort'] = line['testseries'] + "_" + line['result_id']
  160. vals['failed_testcases'] = line['failed_testcases']
  161. for k in cols:
  162. vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
  163. for k in maxlen:
  164. if k in vals and len(vals[k]) > maxlen[k]:
  165. maxlen[k] = len(vals[k])
  166. reportvalues.append(vals)
  167. if line['failed_testcases']:
  168. havefailed = True
  169. if line['machine'] not in machines:
  170. machines.append(line['machine'])
  171. reporttotalvalues = {}
  172. for k in cols:
  173. reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
  174. reporttotalvalues['count'] = '%s' % len(test_count_reports)
  175. for (machine, report) in self.ptests.items():
  176. for ptest in self.ptests[machine]:
  177. if len(ptest) > maxlen['ptest']:
  178. maxlen['ptest'] = len(ptest)
  179. for (machine, report) in self.ltptests.items():
  180. for ltptest in self.ltptests[machine]:
  181. if len(ltptest) > maxlen['ltptest']:
  182. maxlen['ltptest'] = len(ltptest)
  183. for (machine, report) in self.ltpposixtests.items():
  184. for ltpposixtest in self.ltpposixtests[machine]:
  185. if len(ltpposixtest) > maxlen['ltpposixtest']:
  186. maxlen['ltpposixtest'] = len(ltpposixtest)
  187. output = template.render(reportvalues=reportvalues,
  188. reporttotalvalues=reporttotalvalues,
  189. havefailed=havefailed,
  190. machines=machines,
  191. ptests=self.ptests,
  192. ltptests=self.ltptests,
  193. ltpposixtests=self.ltpposixtests,
  194. maxlen=maxlen)
  195. print(output)
  196. def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test):
  197. test_count_reports = []
  198. configmap = resultutils.store_map
  199. if use_regression_map:
  200. configmap = resultutils.regression_map
  201. if commit:
  202. if tag:
  203. logger.warning("Ignoring --tag as --commit was specified")
  204. tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
  205. repo = GitRepo(source_dir)
  206. revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
  207. rev_index = gitarchive.rev_find(revs, 'commit', commit)
  208. testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
  209. elif tag:
  210. repo = GitRepo(source_dir)
  211. testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
  212. else:
  213. testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
  214. if raw_test:
  215. raw_results = {}
  216. for testsuite in testresults:
  217. result = testresults[testsuite].get(raw_test, {})
  218. if result:
  219. raw_results[testsuite] = result
  220. if raw_results:
  221. print(json.dumps(raw_results, sort_keys=True, indent=4))
  222. else:
  223. print('Could not find raw test result for %s' % raw_test)
  224. return 0
  225. for testsuite in testresults:
  226. for resultid in testresults[testsuite]:
  227. skip = False
  228. result = testresults[testsuite][resultid]
  229. machine = result['configuration']['MACHINE']
  230. # Check to see if there is already results for these kinds of tests for the machine
  231. for key in result['result'].keys():
  232. testtype = str(key).split('.')[0]
  233. if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
  234. (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
  235. print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
  236. skip = True
  237. break
  238. if skip:
  239. break
  240. test_count_report = self.get_aggregated_test_result(logger, result, machine)
  241. test_count_report['machine'] = machine
  242. test_count_report['testseries'] = result['configuration']['TESTSERIES']
  243. test_count_report['result_id'] = resultid
  244. test_count_reports.append(test_count_report)
  245. self.print_test_report('test_report_full_text.txt', test_count_reports)
  246. def report(args, logger):
  247. report = ResultsTextReport()
  248. report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
  249. args.raw_test_only)
  250. return 0
  251. def register_commands(subparsers):
  252. """Register subcommands from this plugin"""
  253. parser_build = subparsers.add_parser('report', help='summarise test results',
  254. description='print a text-based summary of the test results',
  255. group='analysis')
  256. parser_build.set_defaults(func=report)
  257. parser_build.add_argument('source_dir',
  258. help='source file/directory/URL that contain the test result files to summarise')
  259. parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
  260. parser_build.add_argument('--commit', help="Revision to report")
  261. parser_build.add_argument('-t', '--tag', default='',
  262. help='source_dir is a git repository, report on the tag specified from that repository')
  263. parser_build.add_argument('-m', '--use_regression_map', action='store_true',
  264. help='instead of the default "store_map", use the "regression_map" for report')
  265. parser_build.add_argument('-r', '--raw_test_only', default='',
  266. help='output raw test result only for the user provided test result id')