runner.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. #
  2. # Copyright (C) 2016 Intel Corporation
  3. #
  4. # SPDX-License-Identifier: MIT
  5. #
  6. import os
  7. import time
  8. import unittest
  9. import logging
  10. import re
  11. import json
  12. import sys
  13. from unittest import TextTestResult as _TestResult
  14. from unittest import TextTestRunner as _TestRunner
  15. class OEStreamLogger(object):
  16. def __init__(self, logger):
  17. self.logger = logger
  18. self.buffer = ""
  19. def write(self, msg):
  20. if len(msg) > 1 and msg[0] != '\n':
  21. if '...' in msg:
  22. self.buffer += msg
  23. elif self.buffer:
  24. self.buffer += msg
  25. self.logger.log(logging.INFO, self.buffer)
  26. self.buffer = ""
  27. else:
  28. self.logger.log(logging.INFO, msg)
  29. def flush(self):
  30. for handler in self.logger.handlers:
  31. handler.flush()
  32. class OETestResult(_TestResult):
  33. def __init__(self, tc, *args, **kwargs):
  34. super(OETestResult, self).__init__(*args, **kwargs)
  35. self.successes = []
  36. self.starttime = {}
  37. self.endtime = {}
  38. self.progressinfo = {}
  39. # Inject into tc so that TestDepends decorator can see results
  40. tc.results = self
  41. self.tc = tc
  42. # stdout and stderr for each test case
  43. self.logged_output = {}
  44. def startTest(self, test):
  45. # May have been set by concurrencytest
  46. if test.id() not in self.starttime:
  47. self.starttime[test.id()] = time.time()
  48. super(OETestResult, self).startTest(test)
  49. def stopTest(self, test):
  50. self.endtime[test.id()] = time.time()
  51. if self.buffer:
  52. self.logged_output[test.id()] = (
  53. sys.stdout.getvalue(), sys.stderr.getvalue())
  54. super(OETestResult, self).stopTest(test)
  55. if test.id() in self.progressinfo:
  56. self.tc.logger.info(self.progressinfo[test.id()])
  57. # Print the errors/failures early to aid/speed debugging, its a pain
  58. # to wait until selftest finishes to see them.
  59. for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
  60. for (scase, msg) in getattr(self, t):
  61. if test.id() == scase.id():
  62. self.tc.logger.info(str(msg))
  63. break
  64. def logSummary(self, component, context_msg=''):
  65. elapsed_time = self.tc._run_end_time - self.tc._run_start_time
  66. self.tc.logger.info("SUMMARY:")
  67. self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
  68. context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
  69. elapsed_time))
  70. if self.wasSuccessful():
  71. msg = "%s - OK - All required tests passed" % component
  72. else:
  73. msg = "%s - FAIL - Required tests failed" % component
  74. msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
  75. self.tc.logger.info(msg)
  76. def _getTestResultDetails(self, case):
  77. result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
  78. 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED',
  79. 'unexpectedSuccesses' : 'PASSED'}
  80. for rtype in result_types:
  81. found = False
  82. for resultclass in getattr(self, rtype):
  83. # unexpectedSuccesses are just lists, not lists of tuples
  84. if isinstance(resultclass, tuple):
  85. scase, msg = resultclass
  86. else:
  87. scase, msg = resultclass, None
  88. if case.id() == scase.id():
  89. found = True
  90. break
  91. scase_str = str(scase.id())
  92. # When fails at module or class level the class name is passed as string
  93. # so figure out to see if match
  94. m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str)
  95. if m:
  96. if case.__class__.__module__ == m.group('module_name'):
  97. found = True
  98. break
  99. m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str)
  100. if m:
  101. class_name = "%s.%s" % (case.__class__.__module__,
  102. case.__class__.__name__)
  103. if class_name == m.group('class_name'):
  104. found = True
  105. break
  106. if found:
  107. return result_types[rtype], msg
  108. return 'UNKNOWN', None
  109. def addSuccess(self, test):
  110. #Added so we can keep track of successes too
  111. self.successes.append((test, None))
  112. super(OETestResult, self).addSuccess(test)
  113. def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
  114. dump_streams=False):
  115. self.tc.logger.info("RESULTS:")
  116. result = {}
  117. logs = {}
  118. if hasattr(self.tc, "extraresults"):
  119. result = self.tc.extraresults
  120. for case_name in self.tc._registry['cases']:
  121. case = self.tc._registry['cases'][case_name]
  122. (status, log) = self._getTestResultDetails(case)
  123. t = ""
  124. if case.id() in self.starttime and case.id() in self.endtime:
  125. t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)"
  126. if status not in logs:
  127. logs[status] = []
  128. logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
  129. report = {'status': status}
  130. if log:
  131. report['log'] = log
  132. if dump_streams and case.id() in self.logged_output:
  133. (stdout, stderr) = self.logged_output[case.id()]
  134. report['stdout'] = stdout
  135. report['stderr'] = stderr
  136. result[case.id()] = report
  137. for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
  138. if i not in logs:
  139. continue
  140. for l in logs[i]:
  141. self.tc.logger.info(l)
  142. if json_file_dir:
  143. tresultjsonhelper = OETestResultJSONHelper()
  144. tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
  145. def wasSuccessful(self):
  146. # Override as we unexpected successes aren't failures for us
  147. return (len(self.failures) == len(self.errors) == 0)
  148. class OEListTestsResult(object):
  149. def wasSuccessful(self):
  150. return True
  151. class OETestRunner(_TestRunner):
  152. streamLoggerClass = OEStreamLogger
  153. def __init__(self, tc, *args, **kwargs):
  154. kwargs['stream'] = self.streamLoggerClass(tc.logger)
  155. super(OETestRunner, self).__init__(*args, **kwargs)
  156. self.tc = tc
  157. self.resultclass = OETestResult
  158. def _makeResult(self):
  159. return self.resultclass(self.tc, self.stream, self.descriptions,
  160. self.verbosity)
  161. def _walk_suite(self, suite, func):
  162. for obj in suite:
  163. if isinstance(obj, unittest.suite.TestSuite):
  164. if len(obj._tests):
  165. self._walk_suite(obj, func)
  166. elif isinstance(obj, unittest.case.TestCase):
  167. func(self.tc.logger, obj)
  168. self._walked_cases = self._walked_cases + 1
  169. def _list_tests_name(self, suite):
  170. from oeqa.core.decorator.oetag import OETestTag
  171. self._walked_cases = 0
  172. def _list_cases(logger, case):
  173. oetag = None
  174. if hasattr(case, 'decorators'):
  175. for d in case.decorators:
  176. if isinstance(d, OETestTag):
  177. oetag = d.oetag
  178. logger.info("%s\t\t%s" % (oetag, case.id()))
  179. self.tc.logger.info("Listing all available tests:")
  180. self._walked_cases = 0
  181. self.tc.logger.info("id\ttag\t\ttest")
  182. self.tc.logger.info("-" * 80)
  183. self._walk_suite(suite, _list_cases)
  184. self.tc.logger.info("-" * 80)
  185. self.tc.logger.info("Total found:\t%s" % self._walked_cases)
  186. def _list_tests_class(self, suite):
  187. self._walked_cases = 0
  188. curr = {}
  189. def _list_classes(logger, case):
  190. if not 'module' in curr or curr['module'] != case.__module__:
  191. curr['module'] = case.__module__
  192. logger.info(curr['module'])
  193. if not 'class' in curr or curr['class'] != \
  194. case.__class__.__name__:
  195. curr['class'] = case.__class__.__name__
  196. logger.info(" -- %s" % curr['class'])
  197. logger.info(" -- -- %s" % case._testMethodName)
  198. self.tc.logger.info("Listing all available test classes:")
  199. self._walk_suite(suite, _list_classes)
  200. def _list_tests_module(self, suite):
  201. self._walked_cases = 0
  202. listed = []
  203. def _list_modules(logger, case):
  204. if not case.__module__ in listed:
  205. if case.__module__.startswith('_'):
  206. logger.info("%s (hidden)" % case.__module__)
  207. else:
  208. logger.info(case.__module__)
  209. listed.append(case.__module__)
  210. self.tc.logger.info("Listing all available test modules:")
  211. self._walk_suite(suite, _list_modules)
  212. def list_tests(self, suite, display_type):
  213. if display_type == 'name':
  214. self._list_tests_name(suite)
  215. elif display_type == 'class':
  216. self._list_tests_class(suite)
  217. elif display_type == 'module':
  218. self._list_tests_module(suite)
  219. return OEListTestsResult()
  220. class OETestResultJSONHelper(object):
  221. testresult_filename = 'testresults.json'
  222. def _get_existing_testresults_if_available(self, write_dir):
  223. testresults = {}
  224. file = os.path.join(write_dir, self.testresult_filename)
  225. if os.path.exists(file):
  226. with open(file, "r") as f:
  227. testresults = json.load(f)
  228. return testresults
  229. def _write_file(self, write_dir, file_name, file_content):
  230. file_path = os.path.join(write_dir, file_name)
  231. with open(file_path, 'w') as the_file:
  232. the_file.write(file_content)
  233. def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
  234. bb.utils.mkdirhier(write_dir)
  235. lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
  236. test_results = self._get_existing_testresults_if_available(write_dir)
  237. test_results[result_id] = {'configuration': configuration, 'result': test_result}
  238. json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
  239. self._write_file(write_dir, self.testresult_filename, json_testresults)
  240. bb.utils.unlockfile(lf)