test_util.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. # SPDX-License-Identifier: GPL-2.0+
  2. #
  3. # Copyright (c) 2016 Google, Inc
  4. #
  5. from contextlib import contextmanager
  6. import doctest
  7. import glob
  8. import multiprocessing
  9. import os
  10. import sys
  11. import unittest
  12. from u_boot_pylib import command
  13. from io import StringIO
  14. use_concurrent = True
  15. try:
  16. from concurrencytest import ConcurrentTestSuite
  17. from concurrencytest import fork_for_tests
  18. except:
  19. use_concurrent = False
  20. def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None,
  21. extra_args=None, single_thread='-P1'):
  22. """Run tests and check that we get 100% coverage
  23. Args:
  24. prog: Program to run (with be passed a '-t' argument to run tests
  25. filter_fname: Normally all *.py files in the program's directory will
  26. be included. If this is not None, then it is used to filter the
  27. list so that only filenames that don't contain filter_fname are
  28. included.
  29. exclude_list: List of file patterns to exclude from the coverage
  30. calculation
  31. build_dir: Build directory, used to locate libfdt.py
  32. required: List of modules which must be in the coverage report
  33. extra_args (str): Extra arguments to pass to the tool before the -t/test
  34. arg
  35. single_thread (str): Argument string to make the tests run
  36. single-threaded. This is necessary to get proper coverage results.
  37. The default is '-P0'
  38. Raises:
  39. ValueError if the code coverage is not 100%
  40. """
  41. # This uses the build output from sandbox_spl to get _libfdt.so
  42. path = os.path.dirname(prog)
  43. if filter_fname:
  44. glob_list = glob.glob(os.path.join(path, '*.py'))
  45. glob_list = [fname for fname in glob_list if filter_fname in fname]
  46. else:
  47. glob_list = []
  48. glob_list += exclude_list
  49. glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
  50. glob_list += ['*concurrencytest*']
  51. test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
  52. prefix = ''
  53. if build_dir:
  54. prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
  55. cmd = ('%spython3-coverage run '
  56. '--omit "%s" %s %s %s %s' % (prefix, ','.join(glob_list),
  57. prog, extra_args or '', test_cmd,
  58. single_thread or '-P1'))
  59. os.system(cmd)
  60. stdout = command.output('python3-coverage', 'report')
  61. lines = stdout.splitlines()
  62. if required:
  63. # Convert '/path/to/name.py' just the module name 'name'
  64. test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
  65. for line in lines if '/etype/' in line])
  66. missing_list = required
  67. missing_list.discard('__init__')
  68. missing_list.difference_update(test_set)
  69. if missing_list:
  70. print('Missing tests for %s' % (', '.join(missing_list)))
  71. print(stdout)
  72. ok = False
  73. coverage = lines[-1].split(' ')[-1]
  74. ok = True
  75. print(coverage)
  76. if coverage != '100%':
  77. print(stdout)
  78. print("To get a report in 'htmlcov/index.html', type: python3-coverage html")
  79. print('Coverage error: %s, but should be 100%%' % coverage)
  80. ok = False
  81. if not ok:
  82. raise ValueError('Test coverage failure')
  83. # Use this to suppress stdout/stderr output:
  84. # with capture_sys_output() as (stdout, stderr)
  85. # ...do something...
  86. @contextmanager
  87. def capture_sys_output():
  88. capture_out, capture_err = StringIO(), StringIO()
  89. old_out, old_err = sys.stdout, sys.stderr
  90. try:
  91. sys.stdout, sys.stderr = capture_out, capture_err
  92. yield capture_out, capture_err
  93. finally:
  94. sys.stdout, sys.stderr = old_out, old_err
  95. class FullTextTestResult(unittest.TextTestResult):
  96. """A test result class that can print extended text results to a stream
  97. This is meant to be used by a TestRunner as a result class. Like
  98. TextTestResult, this prints out the names of tests as they are run,
  99. errors as they occur, and a summary of the results at the end of the
  100. test run. Beyond those, this prints information about skipped tests,
  101. expected failures and unexpected successes.
  102. Args:
  103. stream: A file-like object to write results to
  104. descriptions (bool): True to print descriptions with test names
  105. verbosity (int): Detail of printed output per test as they run
  106. Test stdout and stderr always get printed when buffering
  107. them is disabled by the test runner. In addition to that,
  108. 0: Print nothing
  109. 1: Print a dot per test
  110. 2: Print test names
  111. """
  112. def __init__(self, stream, descriptions, verbosity):
  113. self.verbosity = verbosity
  114. super().__init__(stream, descriptions, verbosity)
  115. def printErrors(self):
  116. "Called by TestRunner after test run to summarize the tests"
  117. # The parent class doesn't keep unexpected successes in the same
  118. # format as the rest. Adapt it to what printErrorList expects.
  119. unexpected_successes = [
  120. (test, 'Test was expected to fail, but succeeded.\n')
  121. for test in self.unexpectedSuccesses
  122. ]
  123. super().printErrors() # FAIL and ERROR
  124. self.printErrorList('SKIP', self.skipped)
  125. self.printErrorList('XFAIL', self.expectedFailures)
  126. self.printErrorList('XPASS', unexpected_successes)
  127. def addSkip(self, test, reason):
  128. """Called when a test is skipped."""
  129. # Add empty line to keep spacing consistent with other results
  130. if not reason.endswith('\n'):
  131. reason += '\n'
  132. super().addSkip(test, reason)
  133. def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
  134. test_name, toolpath, class_and_module_list):
  135. """Run a series of test suites and collect the results
  136. Args:
  137. toolname: Name of the tool that ran the tests
  138. debug: True to enable debugging, which shows a full stack trace on error
  139. verbosity: Verbosity level to use (0-4)
  140. test_preserve_dirs: True to preserve the input directory used by tests
  141. so that it can be examined afterwards (only useful for debugging
  142. tests). If a single test is selected (in args[0]) it also preserves
  143. the output directory for this test. Both directories are displayed
  144. on the command line.
  145. processes: Number of processes to use to run tests (None=same as #CPUs)
  146. test_name: Name of test to run, or None for all
  147. toolpath: List of paths to use for tools
  148. class_and_module_list: List of test classes (type class) and module
  149. names (type str) to run
  150. """
  151. sys.argv = [sys.argv[0]]
  152. if debug:
  153. sys.argv.append('-D')
  154. if verbosity:
  155. sys.argv.append('-v%d' % verbosity)
  156. if toolpath:
  157. for path in toolpath:
  158. sys.argv += ['--toolpath', path]
  159. suite = unittest.TestSuite()
  160. loader = unittest.TestLoader()
  161. runner = unittest.TextTestRunner(
  162. stream=sys.stdout,
  163. verbosity=(1 if verbosity is None else verbosity),
  164. resultclass=FullTextTestResult,
  165. )
  166. if use_concurrent and processes != 1:
  167. suite = ConcurrentTestSuite(suite,
  168. fork_for_tests(processes or multiprocessing.cpu_count()))
  169. for module in class_and_module_list:
  170. if isinstance(module, str) and (not test_name or test_name == module):
  171. suite.addTests(doctest.DocTestSuite(module))
  172. for module in class_and_module_list:
  173. if isinstance(module, str):
  174. continue
  175. # Test the test module about our arguments, if it is interested
  176. if hasattr(module, 'setup_test_args'):
  177. setup_test_args = getattr(module, 'setup_test_args')
  178. setup_test_args(preserve_indir=test_preserve_dirs,
  179. preserve_outdirs=test_preserve_dirs and test_name is not None,
  180. toolpath=toolpath, verbosity=verbosity)
  181. if test_name:
  182. # Since Python v3.5 If an ImportError or AttributeError occurs
  183. # while traversing a name then a synthetic test that raises that
  184. # error when run will be returned. Check that the requested test
  185. # exists, otherwise these errors are included in the results.
  186. if test_name in loader.getTestCaseNames(module):
  187. suite.addTests(loader.loadTestsFromName(test_name, module))
  188. else:
  189. suite.addTests(loader.loadTestsFromTestCase(module))
  190. print(f" Running {toolname} tests ".center(70, "="))
  191. result = runner.run(suite)
  192. print()
  193. return result