report.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. # test result tool - report text based test results
  2. #
  3. # Copyright (c) 2019, Intel Corporation.
  4. # Copyright (c) 2019, Linux Foundation
  5. #
  6. # This program is free software; you can redistribute it and/or modify it
  7. # under the terms and conditions of the GNU General Public License,
  8. # version 2, as published by the Free Software Foundation.
  9. #
  10. # This program is distributed in the hope it will be useful, but WITHOUT
  11. # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. # more details.
  14. #
  15. import os
  16. import glob
  17. import json
  18. import resulttool.resultutils as resultutils
  19. from oeqa.utils.git import GitRepo
  20. import oeqa.utils.gitarchive as gitarchive
  21. class ResultsTextReport(object):
  22. def __init__(self):
  23. self.ptests = {}
  24. self.result_types = {'passed': ['PASSED', 'passed'],
  25. 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
  26. 'skipped': ['SKIPPED', 'skipped']}
  27. def handle_ptest_result(self, k, status, result):
  28. if k == 'ptestresult.sections':
  29. # Ensure tests without any test results still show up on the report
  30. for suite in result['ptestresult.sections']:
  31. if suite not in self.ptests:
  32. self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  33. if 'duration' in result['ptestresult.sections'][suite]:
  34. self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
  35. if 'timeout' in result['ptestresult.sections'][suite]:
  36. self.ptests[suite]['duration'] += " T"
  37. return
  38. try:
  39. _, suite, test = k.split(".", 2)
  40. except ValueError:
  41. return
  42. # Handle 'glib-2.0'
  43. if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
  44. try:
  45. _, suite, suite1, test = k.split(".", 3)
  46. if suite + "." + suite1 in result['ptestresult.sections']:
  47. suite = suite + "." + suite1
  48. except ValueError:
  49. pass
  50. if suite not in self.ptests:
  51. self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
  52. for tk in self.result_types:
  53. if status in self.result_types[tk]:
  54. self.ptests[suite][tk] += 1
  55. def get_aggregated_test_result(self, logger, testresult):
  56. test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
  57. result = testresult.get('result', [])
  58. for k in result:
  59. test_status = result[k].get('status', [])
  60. for tk in self.result_types:
  61. if test_status in self.result_types[tk]:
  62. test_count_report[tk] += 1
  63. if test_status in self.result_types['failed']:
  64. test_count_report['failed_testcases'].append(k)
  65. if k.startswith("ptestresult."):
  66. self.handle_ptest_result(k, test_status, result)
  67. return test_count_report
  68. def print_test_report(self, template_file_name, test_count_reports):
  69. from jinja2 import Environment, FileSystemLoader
  70. script_path = os.path.dirname(os.path.realpath(__file__))
  71. file_loader = FileSystemLoader(script_path + '/template')
  72. env = Environment(loader=file_loader, trim_blocks=True)
  73. template = env.get_template(template_file_name)
  74. havefailed = False
  75. haveptest = bool(self.ptests)
  76. reportvalues = []
  77. cols = ['passed', 'failed', 'skipped']
  78. maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
  79. for line in test_count_reports:
  80. total_tested = line['passed'] + line['failed'] + line['skipped']
  81. vals = {}
  82. vals['result_id'] = line['result_id']
  83. vals['testseries'] = line['testseries']
  84. vals['sort'] = line['testseries'] + "_" + line['result_id']
  85. vals['failed_testcases'] = line['failed_testcases']
  86. for k in cols:
  87. vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
  88. for k in maxlen:
  89. if k in vals and len(vals[k]) > maxlen[k]:
  90. maxlen[k] = len(vals[k])
  91. reportvalues.append(vals)
  92. if line['failed_testcases']:
  93. havefailed = True
  94. for ptest in self.ptests:
  95. if len(ptest) > maxlen['ptest']:
  96. maxlen['ptest'] = len(ptest)
  97. output = template.render(reportvalues=reportvalues,
  98. havefailed=havefailed,
  99. haveptest=haveptest,
  100. ptests=self.ptests,
  101. maxlen=maxlen)
  102. print(output)
  103. def view_test_report(self, logger, source_dir, branch, commit, tag):
  104. test_count_reports = []
  105. if commit:
  106. if tag:
  107. logger.warning("Ignoring --tag as --commit was specified")
  108. tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
  109. repo = GitRepo(source_dir)
  110. revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
  111. rev_index = gitarchive.rev_find(revs, 'commit', commit)
  112. testresults = resultutils.git_get_result(repo, revs[rev_index][2])
  113. elif tag:
  114. repo = GitRepo(source_dir)
  115. testresults = resultutils.git_get_result(repo, [tag])
  116. else:
  117. testresults = resultutils.load_resultsdata(source_dir)
  118. for testsuite in testresults:
  119. for resultid in testresults[testsuite]:
  120. result = testresults[testsuite][resultid]
  121. test_count_report = self.get_aggregated_test_result(logger, result)
  122. test_count_report['testseries'] = result['configuration']['TESTSERIES']
  123. test_count_report['result_id'] = resultid
  124. test_count_reports.append(test_count_report)
  125. self.print_test_report('test_report_full_text.txt', test_count_reports)
  126. def report(args, logger):
  127. report = ResultsTextReport()
  128. report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
  129. return 0
  130. def register_commands(subparsers):
  131. """Register subcommands from this plugin"""
  132. parser_build = subparsers.add_parser('report', help='summarise test results',
  133. description='print a text-based summary of the test results',
  134. group='analysis')
  135. parser_build.set_defaults(func=report)
  136. parser_build.add_argument('source_dir',
  137. help='source file/directory/URL that contain the test result files to summarise')
  138. parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
  139. parser_build.add_argument('--commit', help="Revision to report")
  140. parser_build.add_argument('-t', '--tag', default='',
  141. help='source_dir is a git repository, report on the tag specified from that repository')