ptest.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. import unittest
  2. import pprint
  3. from oeqa.runtime.case import OERuntimeTestCase
  4. from oeqa.core.decorator.depends import OETestDepends
  5. from oeqa.core.decorator.oeid import OETestID
  6. from oeqa.core.decorator.data import skipIfNotFeature
  7. from oeqa.utils.logparser import Lparser, Result
  8. class PtestRunnerTest(OERuntimeTestCase):
  9. # a ptest log parser
  10. def parse_ptest(self, logfile):
  11. parser = Lparser(test_0_pass_regex="^PASS:(.+)",
  12. test_0_fail_regex="^FAIL:(.+)",
  13. test_0_skip_regex="^SKIP:(.+)",
  14. section_0_begin_regex="^BEGIN: .*/(.+)/ptest",
  15. section_0_end_regex="^END: .*/(.+)/ptest")
  16. parser.init()
  17. result = Result()
  18. with open(logfile, errors='replace') as f:
  19. for line in f:
  20. result_tuple = parser.parse_line(line)
  21. if not result_tuple:
  22. continue
  23. result_tuple = line_type, category, status, name = parser.parse_line(line)
  24. if line_type == 'section' and status == 'begin':
  25. current_section = name
  26. continue
  27. if line_type == 'section' and status == 'end':
  28. current_section = None
  29. continue
  30. if line_type == 'test' and status == 'pass':
  31. result.store(current_section, name, status)
  32. continue
  33. if line_type == 'test' and status == 'fail':
  34. result.store(current_section, name, status)
  35. continue
  36. if line_type == 'test' and status == 'skip':
  37. result.store(current_section, name, status)
  38. continue
  39. result.sort_tests()
  40. return result
  41. @OETestID(1600)
  42. @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
  43. @OETestDepends(['ssh.SSHTest.test_ssh'])
  44. @unittest.expectedFailure
  45. def test_ptestrunner(self):
  46. status, output = self.target.run('which ptest-runner', 0)
  47. if status != 0:
  48. self.skipTest("No -ptest packages are installed in the image")
  49. import datetime
  50. test_log_dir = self.td.get('TEST_LOG_DIR', '')
  51. # The TEST_LOG_DIR maybe NULL when testimage is added after
  52. # testdata.json is generated.
  53. if not test_log_dir:
  54. test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
  55. # Don't use self.td.get('DATETIME'), it's from testdata.json, not
  56. # up-to-date, and may cause "File exists" when re-reun.
  57. datetime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
  58. ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
  59. ptest_log_dir = '%s.%s' % (ptest_log_dir_link, datetime)
  60. ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
  61. status, output = self.target.run('ptest-runner', 0)
  62. os.makedirs(ptest_log_dir)
  63. with open(ptest_runner_log, 'w') as f:
  64. f.write(output)
  65. # status != 0 is OK since some ptest tests may fail
  66. self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
  67. if not hasattr(self.tc, "extraresults"):
  68. self.tc.extraresults = {}
  69. extras = self.tc.extraresults
  70. extras['ptestresult.rawlogs'] = {'log': output}
  71. # Parse and save results
  72. parse_result = self.parse_ptest(ptest_runner_log)
  73. parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip'])
  74. if os.path.exists(ptest_log_dir_link):
  75. # Remove the old link to create a new one
  76. os.remove(ptest_log_dir_link)
  77. os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
  78. trans = str.maketrans("()", "__")
  79. resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'}
  80. for section in parse_result.result_dict:
  81. for test, result in parse_result.result_dict[section]:
  82. testname = "ptestresult." + section + "." + "_".join(test.translate(trans).split())
  83. extras[testname] = {'status': resmap[result]}
  84. failed_tests = {}
  85. for section in parse_result.result_dict:
  86. failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ]
  87. if failed_testcases:
  88. failed_tests[section] = failed_testcases
  89. if failed_tests:
  90. self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests))