report.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. # test result tool - report text based test results
  2. #
  3. # Copyright (c) 2019, Intel Corporation.
  4. # Copyright (c) 2019, Linux Foundation
  5. #
  6. # SPDX-License-Identifier: GPL-2.0-only
  7. #
  8. import os
  9. import glob
  10. import json
  11. import resulttool.resultutils as resultutils
  12. from oeqa.utils.git import GitRepo
  13. import oeqa.utils.gitarchive as gitarchive
  14. class ResultsTextReport(object):
  15. def __init__(self):
  16. self.ptests = {}
  17. self.ltptests = {}
  18. self.ltpposixtests = {}
  19. self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
  20. 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
  21. 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
  22. def handle_ptest_result(self, k, status, result, machine):
  23. if machine not in self.ptests:
  24. self.ptests[machine] = {}
  25. if k == 'ptestresult.sections':
  26. # Ensure tests without any test results still show up on the report
  27. for suite in result['ptestresult.sections']:
  28. if suite not in self.ptests[machine]:
  29. self.ptests[machine][suite] = {
  30. 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
  31. 'failed_testcases': [], "testcases": set(),
  32. }
  33. if 'duration' in result['ptestresult.sections'][suite]:
  34. self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
  35. if 'timeout' in result['ptestresult.sections'][suite]:
  36. self.ptests[machine][suite]['duration'] += " T"
  37. return True
  38. # process test result
  39. try:
  40. _, suite, test = k.split(".", 2)
  41. except ValueError:
  42. return True
  43. # Handle 'glib-2.0'
  44. if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
  45. try:
  46. _, suite, suite1, test = k.split(".", 3)
  47. if suite + "." + suite1 in result['ptestresult.sections']:
  48. suite = suite + "." + suite1
  49. except ValueError:
  50. pass
  51. if suite not in self.ptests[machine]:
  52. self.ptests[machine][suite] = {
  53. 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
  54. 'failed_testcases': [], "testcases": set(),
  55. }
  56. # do not process duplicate results
  57. if test in self.ptests[machine][suite]["testcases"]:
  58. print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
  59. return False
  60. for tk in self.result_types:
  61. if status in self.result_types[tk]:
  62. self.ptests[machine][suite][tk] += 1
  63. self.ptests[machine][suite]["testcases"].add(test)
  64. return True
  65. def handle_ltptest_result(self, k, status, result, machine):
  66. if machine not in self.ltptests:
  67. self.ltptests[machine] = {}
  68. if k == 'ltpresult.sections':
  69. # Ensure tests without any test results still show up on the report
  70. for suite in result['ltpresult.sections']:
  71. if suite not in self.ltptests[machine]:
  72. self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  73. if 'duration' in result['ltpresult.sections'][suite]:
  74. self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
  75. if 'timeout' in result['ltpresult.sections'][suite]:
  76. self.ltptests[machine][suite]['duration'] += " T"
  77. return
  78. try:
  79. _, suite, test = k.split(".", 2)
  80. except ValueError:
  81. return
  82. # Handle 'glib-2.0'
  83. if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
  84. try:
  85. _, suite, suite1, test = k.split(".", 3)
  86. if suite + "." + suite1 in result['ltpresult.sections']:
  87. suite = suite + "." + suite1
  88. except ValueError:
  89. pass
  90. if suite not in self.ltptests[machine]:
  91. self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  92. for tk in self.result_types:
  93. if status in self.result_types[tk]:
  94. self.ltptests[machine][suite][tk] += 1
  95. def handle_ltpposixtest_result(self, k, status, result, machine):
  96. if machine not in self.ltpposixtests:
  97. self.ltpposixtests[machine] = {}
  98. if k == 'ltpposixresult.sections':
  99. # Ensure tests without any test results still show up on the report
  100. for suite in result['ltpposixresult.sections']:
  101. if suite not in self.ltpposixtests[machine]:
  102. self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  103. if 'duration' in result['ltpposixresult.sections'][suite]:
  104. self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
  105. return
  106. try:
  107. _, suite, test = k.split(".", 2)
  108. except ValueError:
  109. return
  110. # Handle 'glib-2.0'
  111. if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
  112. try:
  113. _, suite, suite1, test = k.split(".", 3)
  114. if suite + "." + suite1 in result['ltpposixresult.sections']:
  115. suite = suite + "." + suite1
  116. except ValueError:
  117. pass
  118. if suite not in self.ltpposixtests[machine]:
  119. self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  120. for tk in self.result_types:
  121. if status in self.result_types[tk]:
  122. self.ltpposixtests[machine][suite][tk] += 1
  123. def get_aggregated_test_result(self, logger, testresult, machine):
  124. test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
  125. result = testresult.get('result', [])
  126. for k in result:
  127. test_status = result[k].get('status', [])
  128. if k.startswith("ptestresult."):
  129. if not self.handle_ptest_result(k, test_status, result, machine):
  130. continue
  131. elif k.startswith("ltpresult."):
  132. self.handle_ltptest_result(k, test_status, result, machine)
  133. elif k.startswith("ltpposixresult."):
  134. self.handle_ltpposixtest_result(k, test_status, result, machine)
  135. # process result if it was not skipped by a handler
  136. for tk in self.result_types:
  137. if test_status in self.result_types[tk]:
  138. test_count_report[tk] += 1
  139. if test_status in self.result_types['failed']:
  140. test_count_report['failed_testcases'].append(k)
  141. return test_count_report
  142. def print_test_report(self, template_file_name, test_count_reports):
  143. from jinja2 import Environment, FileSystemLoader
  144. script_path = os.path.dirname(os.path.realpath(__file__))
  145. file_loader = FileSystemLoader(script_path + '/template')
  146. env = Environment(loader=file_loader, trim_blocks=True)
  147. template = env.get_template(template_file_name)
  148. havefailed = False
  149. reportvalues = []
  150. machines = []
  151. cols = ['passed', 'failed', 'skipped']
  152. maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
  153. for line in test_count_reports:
  154. total_tested = line['passed'] + line['failed'] + line['skipped']
  155. vals = {}
  156. vals['result_id'] = line['result_id']
  157. vals['testseries'] = line['testseries']
  158. vals['sort'] = line['testseries'] + "_" + line['result_id']
  159. vals['failed_testcases'] = line['failed_testcases']
  160. for k in cols:
  161. if total_tested:
  162. vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
  163. else:
  164. vals[k] = "0 (0%)"
  165. for k in maxlen:
  166. if k in vals and len(vals[k]) > maxlen[k]:
  167. maxlen[k] = len(vals[k])
  168. reportvalues.append(vals)
  169. if line['failed_testcases']:
  170. havefailed = True
  171. if line['machine'] not in machines:
  172. machines.append(line['machine'])
  173. reporttotalvalues = {}
  174. for k in cols:
  175. reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
  176. reporttotalvalues['count'] = '%s' % len(test_count_reports)
  177. for (machine, report) in self.ptests.items():
  178. for ptest in self.ptests[machine]:
  179. if len(ptest) > maxlen['ptest']:
  180. maxlen['ptest'] = len(ptest)
  181. for (machine, report) in self.ltptests.items():
  182. for ltptest in self.ltptests[machine]:
  183. if len(ltptest) > maxlen['ltptest']:
  184. maxlen['ltptest'] = len(ltptest)
  185. for (machine, report) in self.ltpposixtests.items():
  186. for ltpposixtest in self.ltpposixtests[machine]:
  187. if len(ltpposixtest) > maxlen['ltpposixtest']:
  188. maxlen['ltpposixtest'] = len(ltpposixtest)
  189. output = template.render(reportvalues=reportvalues,
  190. reporttotalvalues=reporttotalvalues,
  191. havefailed=havefailed,
  192. machines=machines,
  193. ptests=self.ptests,
  194. ltptests=self.ltptests,
  195. ltpposixtests=self.ltpposixtests,
  196. maxlen=maxlen)
  197. print(output)
  198. def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
  199. def print_selected_testcase_result(testresults, selected_test_case_only):
  200. for testsuite in testresults:
  201. for resultid in testresults[testsuite]:
  202. result = testresults[testsuite][resultid]['result']
  203. test_case_result = result.get(selected_test_case_only, {})
  204. if test_case_result.get('status'):
  205. print('Found selected test case result for %s from %s' % (selected_test_case_only,
  206. resultid))
  207. print(test_case_result['status'])
  208. else:
  209. print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
  210. resultid))
  211. if test_case_result.get('log'):
  212. print(test_case_result['log'])
  213. test_count_reports = []
  214. configmap = resultutils.store_map
  215. if use_regression_map:
  216. configmap = resultutils.regression_map
  217. if commit:
  218. if tag:
  219. logger.warning("Ignoring --tag as --commit was specified")
  220. tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
  221. repo = GitRepo(source_dir)
  222. revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
  223. rev_index = gitarchive.rev_find(revs, 'commit', commit)
  224. testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
  225. elif tag:
  226. repo = GitRepo(source_dir)
  227. testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
  228. else:
  229. testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
  230. if raw_test:
  231. raw_results = {}
  232. for testsuite in testresults:
  233. result = testresults[testsuite].get(raw_test, {})
  234. if result:
  235. raw_results[testsuite] = {raw_test: result}
  236. if raw_results:
  237. if selected_test_case_only:
  238. print_selected_testcase_result(raw_results, selected_test_case_only)
  239. else:
  240. print(json.dumps(raw_results, sort_keys=True, indent=1))
  241. else:
  242. print('Could not find raw test result for %s' % raw_test)
  243. return 0
  244. if selected_test_case_only:
  245. print_selected_testcase_result(testresults, selected_test_case_only)
  246. return 0
  247. for testsuite in testresults:
  248. for resultid in testresults[testsuite]:
  249. skip = False
  250. result = testresults[testsuite][resultid]
  251. machine = result['configuration']['MACHINE']
  252. # Check to see if there is already results for these kinds of tests for the machine
  253. for key in result['result'].keys():
  254. testtype = str(key).split('.')[0]
  255. if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
  256. (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
  257. print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
  258. skip = True
  259. break
  260. if skip:
  261. break
  262. test_count_report = self.get_aggregated_test_result(logger, result, machine)
  263. test_count_report['machine'] = machine
  264. test_count_report['testseries'] = result['configuration']['TESTSERIES']
  265. test_count_report['result_id'] = resultid
  266. test_count_reports.append(test_count_report)
  267. self.print_test_report('test_report_full_text.txt', test_count_reports)
  268. def report(args, logger):
  269. report = ResultsTextReport()
  270. report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
  271. args.raw_test_only, args.selected_test_case_only)
  272. return 0
  273. def register_commands(subparsers):
  274. """Register subcommands from this plugin"""
  275. parser_build = subparsers.add_parser('report', help='summarise test results',
  276. description='print a text-based summary of the test results',
  277. group='analysis')
  278. parser_build.set_defaults(func=report)
  279. parser_build.add_argument('source_dir',
  280. help='source file/directory/URL that contain the test result files to summarise')
  281. parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
  282. parser_build.add_argument('--commit', help="Revision to report")
  283. parser_build.add_argument('-t', '--tag', default='',
  284. help='source_dir is a git repository, report on the tag specified from that repository')
  285. parser_build.add_argument('-m', '--use_regression_map', action='store_true',
  286. help='instead of the default "store_map", use the "regression_map" for report')
  287. parser_build.add_argument('-r', '--raw_test_only', default='',
  288. help='output raw test result only for the user provided test result id')
  289. parser_build.add_argument('-s', '--selected_test_case_only', default='',
  290. help='output selected test case result for the user provided test case id, if both test '
  291. 'result id and test case id are provided then output the selected test case result '
  292. 'from the provided test result id')