1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
|
# SPDX-License-Identifier: GPL-2.0+
#
# Copyright (c) 2016 Google, Inc
#
from contextlib import contextmanager
import doctest
import glob
import multiprocessing
import os
import sys
import unittest
from u_boot_pylib import command
from io import StringIO
use_concurrent = True
try:
from concurrencytest import ConcurrentTestSuite
from concurrencytest import fork_for_tests
except:
use_concurrent = False
def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None,
extra_args=None, single_thread='-P1'):
"""Run tests and check that we get 100% coverage
Args:
prog: Program to run (with be passed a '-t' argument to run tests
filter_fname: Normally all *.py files in the program's directory will
be included. If this is not None, then it is used to filter the
list so that only filenames that don't contain filter_fname are
included.
exclude_list: List of file patterns to exclude from the coverage
calculation
build_dir: Build directory, used to locate libfdt.py
required: List of modules which must be in the coverage report
extra_args (str): Extra arguments to pass to the tool before the -t/test
arg
single_thread (str): Argument string to make the tests run
single-threaded. This is necessary to get proper coverage results.
The default is '-P0'
Raises:
ValueError if the code coverage is not 100%
"""
# This uses the build output from sandbox_spl to get _libfdt.so
path = os.path.dirname(prog)
if filter_fname:
glob_list = glob.glob(os.path.join(path, '*.py'))
glob_list = [fname for fname in glob_list if filter_fname in fname]
else:
glob_list = []
glob_list += exclude_list
glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
glob_list += ['*concurrencytest*']
test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
prefix = ''
if build_dir:
prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
cmd = ('%spython3-coverage run '
'--omit "%s" %s %s %s %s' % (prefix, ','.join(glob_list),
prog, extra_args or '', test_cmd,
single_thread or '-P1'))
os.system(cmd)
stdout = command.output('python3-coverage', 'report')
lines = stdout.splitlines()
if required:
# Convert '/path/to/name.py' just the module name 'name'
test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
for line in lines if '/etype/' in line])
missing_list = required
missing_list.discard('__init__')
missing_list.difference_update(test_set)
if missing_list:
print('Missing tests for %s' % (', '.join(missing_list)))
print(stdout)
ok = False
coverage = lines[-1].split(' ')[-1]
ok = True
print(coverage)
if coverage != '100%':
print(stdout)
print("To get a report in 'htmlcov/index.html', type: python3-coverage html")
print('Coverage error: %s, but should be 100%%' % coverage)
ok = False
if not ok:
raise ValueError('Test coverage failure')
# Use this to suppress stdout/stderr output:
# with capture_sys_output() as (stdout, stderr)
# ...do something...
@contextmanager
def capture_sys_output():
capture_out, capture_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = old_out, old_err
class FullTextTestResult(unittest.TextTestResult):
"""A test result class that can print extended text results to a stream
This is meant to be used by a TestRunner as a result class. Like
TextTestResult, this prints out the names of tests as they are run,
errors as they occur, and a summary of the results at the end of the
test run. Beyond those, this prints information about skipped tests,
expected failures and unexpected successes.
Args:
stream: A file-like object to write results to
descriptions (bool): True to print descriptions with test names
verbosity (int): Detail of printed output per test as they run
Test stdout and stderr always get printed when buffering
them is disabled by the test runner. In addition to that,
0: Print nothing
1: Print a dot per test
2: Print test names
"""
def __init__(self, stream, descriptions, verbosity):
self.verbosity = verbosity
super().__init__(stream, descriptions, verbosity)
def printErrors(self):
"Called by TestRunner after test run to summarize the tests"
# The parent class doesn't keep unexpected successes in the same
# format as the rest. Adapt it to what printErrorList expects.
unexpected_successes = [
(test, 'Test was expected to fail, but succeeded.\n')
for test in self.unexpectedSuccesses
]
super().printErrors() # FAIL and ERROR
self.printErrorList('SKIP', self.skipped)
self.printErrorList('XFAIL', self.expectedFailures)
self.printErrorList('XPASS', unexpected_successes)
def addSkip(self, test, reason):
"""Called when a test is skipped."""
# Add empty line to keep spacing consistent with other results
if not reason.endswith('\n'):
reason += '\n'
super().addSkip(test, reason)
def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
test_name, toolpath, class_and_module_list):
"""Run a series of test suites and collect the results
Args:
toolname: Name of the tool that ran the tests
debug: True to enable debugging, which shows a full stack trace on error
verbosity: Verbosity level to use (0-4)
test_preserve_dirs: True to preserve the input directory used by tests
so that it can be examined afterwards (only useful for debugging
tests). If a single test is selected (in args[0]) it also preserves
the output directory for this test. Both directories are displayed
on the command line.
processes: Number of processes to use to run tests (None=same as #CPUs)
test_name: Name of test to run, or None for all
toolpath: List of paths to use for tools
class_and_module_list: List of test classes (type class) and module
names (type str) to run
"""
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
if verbosity:
sys.argv.append('-v%d' % verbosity)
if toolpath:
for path in toolpath:
sys.argv += ['--toolpath', path]
suite = unittest.TestSuite()
loader = unittest.TestLoader()
runner = unittest.TextTestRunner(
stream=sys.stdout,
verbosity=(1 if verbosity is None else verbosity),
resultclass=FullTextTestResult,
)
if use_concurrent and processes != 1:
suite = ConcurrentTestSuite(suite,
fork_for_tests(processes or multiprocessing.cpu_count()))
for module in class_and_module_list:
if isinstance(module, str) and (not test_name or test_name == module):
suite.addTests(doctest.DocTestSuite(module))
for module in class_and_module_list:
if isinstance(module, str):
continue
# Test the test module about our arguments, if it is interested
if hasattr(module, 'setup_test_args'):
setup_test_args = getattr(module, 'setup_test_args')
setup_test_args(preserve_indir=test_preserve_dirs,
preserve_outdirs=test_preserve_dirs and test_name is not None,
toolpath=toolpath, verbosity=verbosity)
if test_name:
# Since Python v3.5 If an ImportError or AttributeError occurs
# while traversing a name then a synthetic test that raises that
# error when run will be returned. Check that the requested test
# exists, otherwise these errors are included in the results.
if test_name in loader.getTestCaseNames(module):
suite.addTests(loader.loadTestsFromName(test_name, module))
else:
suite.addTests(loader.loadTestsFromTestCase(module))
print(f" Running {toolname} tests ".center(70, "="))
result = runner.run(suite)
print()
return result
|