xref: /rk3399_rockchip-uboot/test/py/conftest.py (revision d20e5e976f70bd2e230787091a88278dfe6e5192)
1# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pexpect
21import pytest
22from _pytest.runner import runtestprotocol
23import ConfigParser
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
32    """Create a directory path.
33
34    This includes creating any intermediate/parent directories. Any errors
35    caused due to already extant directories are ignored.
36
37    Args:
38        path: The directory path to create.
39
40    Returns:
41        Nothing.
42    """
43
44    try:
45        os.makedirs(path)
46    except OSError as exc:
47        if exc.errno == errno.EEXIST and os.path.isdir(path):
48            pass
49        else:
50            raise
51
52def pytest_addoption(parser):
53    """pytest hook: Add custom command-line options to the cmdline parser.
54
55    Args:
56        parser: The pytest command-line parser.
57
58    Returns:
59        Nothing.
60    """
61
62    parser.addoption('--build-dir', default=None,
63        help='U-Boot build directory (O=)')
64    parser.addoption('--result-dir', default=None,
65        help='U-Boot test result/tmp directory')
66    parser.addoption('--persistent-data-dir', default=None,
67        help='U-Boot test persistent generated data directory')
68    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69        help='U-Boot board type')
70    parser.addoption('--board-identity', '--id', default='na',
71        help='U-Boot board identity/instance')
72    parser.addoption('--build', default=False, action='store_true',
73        help='Compile U-Boot before running tests')
74
75def pytest_configure(config):
76    """pytest hook: Perform custom initialization at startup time.
77
78    Args:
79        config: The pytest configuration.
80
81    Returns:
82        Nothing.
83    """
84
85    global log
86    global console
87    global ubconfig
88
89    test_py_dir = os.path.dirname(os.path.abspath(__file__))
90    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
91
92    board_type = config.getoption('board_type')
93    board_type_filename = board_type.replace('-', '_')
94
95    board_identity = config.getoption('board_identity')
96    board_identity_filename = board_identity.replace('-', '_')
97
98    build_dir = config.getoption('build_dir')
99    if not build_dir:
100        build_dir = source_dir + '/build-' + board_type
101    mkdir_p(build_dir)
102
103    result_dir = config.getoption('result_dir')
104    if not result_dir:
105        result_dir = build_dir
106    mkdir_p(result_dir)
107
108    persistent_data_dir = config.getoption('persistent_data_dir')
109    if not persistent_data_dir:
110        persistent_data_dir = build_dir + '/persistent-data'
111    mkdir_p(persistent_data_dir)
112
113    import multiplexed_log
114    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
115
116    if config.getoption('build'):
117        if build_dir != source_dir:
118            o_opt = 'O=%s' % build_dir
119        else:
120            o_opt = ''
121        cmds = (
122            ['make', o_opt, '-s', board_type + '_defconfig'],
123            ['make', o_opt, '-s', '-j8'],
124        )
125        runner = log.get_runner('make', sys.stdout)
126        for cmd in cmds:
127            runner.run(cmd, cwd=source_dir)
128        runner.close()
129
130    class ArbitraryAttributeContainer(object):
131        pass
132
133    ubconfig = ArbitraryAttributeContainer()
134    ubconfig.brd = dict()
135    ubconfig.env = dict()
136
137    modules = [
138        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
139        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
140        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
141            board_identity_filename),
142    ]
143    for (dict_to_fill, module_name) in modules:
144        try:
145            module = __import__(module_name)
146        except ImportError:
147            continue
148        dict_to_fill.update(module.__dict__)
149
150    ubconfig.buildconfig = dict()
151
152    for conf_file in ('.config', 'include/autoconf.mk'):
153        dot_config = build_dir + '/' + conf_file
154        if not os.path.exists(dot_config):
155            raise Exception(conf_file + ' does not exist; ' +
156                'try passing --build option?')
157
158        with open(dot_config, 'rt') as f:
159            ini_str = '[root]\n' + f.read()
160            ini_sio = StringIO.StringIO(ini_str)
161            parser = ConfigParser.RawConfigParser()
162            parser.readfp(ini_sio)
163            ubconfig.buildconfig.update(parser.items('root'))
164
165    ubconfig.test_py_dir = test_py_dir
166    ubconfig.source_dir = source_dir
167    ubconfig.build_dir = build_dir
168    ubconfig.result_dir = result_dir
169    ubconfig.persistent_data_dir = persistent_data_dir
170    ubconfig.board_type = board_type
171    ubconfig.board_identity = board_identity
172
173    env_vars = (
174        'board_type',
175        'board_identity',
176        'source_dir',
177        'test_py_dir',
178        'build_dir',
179        'result_dir',
180        'persistent_data_dir',
181    )
182    for v in env_vars:
183        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
184
185    if board_type == 'sandbox':
186        import u_boot_console_sandbox
187        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
188    else:
189        import u_boot_console_exec_attach
190        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
191
192def pytest_generate_tests(metafunc):
193    """pytest hook: parameterize test functions based on custom rules.
194
195    If a test function takes parameter(s) (fixture names) of the form brd__xxx
196    or env__xxx, the brd and env configuration dictionaries are consulted to
197    find the list of values to use for those parameters, and the test is
198    parametrized so that it runs once for each combination of values.
199
200    Args:
201        metafunc: The pytest test function.
202
203    Returns:
204        Nothing.
205    """
206
207    subconfigs = {
208        'brd': console.config.brd,
209        'env': console.config.env,
210    }
211    for fn in metafunc.fixturenames:
212        parts = fn.split('__')
213        if len(parts) < 2:
214            continue
215        if parts[0] not in subconfigs:
216            continue
217        subconfig = subconfigs[parts[0]]
218        vals = []
219        val = subconfig.get(fn, [])
220        # If that exact name is a key in the data source:
221        if val:
222            # ... use the dict value as a single parameter value.
223            vals = (val, )
224        else:
225            # ... otherwise, see if there's a key that contains a list of
226            # values to use instead.
227            vals = subconfig.get(fn + 's', [])
228        def fixture_id(index, val):
229            try:
230                return val["fixture_id"]
231            except:
232                return fn + str(index)
233        ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
234        metafunc.parametrize(fn, vals, ids=ids)
235
236@pytest.fixture(scope='function')
237def u_boot_console(request):
238    """Generate the value of a test's u_boot_console fixture.
239
240    Args:
241        request: The pytest request.
242
243    Returns:
244        The fixture value.
245    """
246
247    console.ensure_spawned()
248    return console
249
250tests_not_run = set()
251tests_failed = set()
252tests_skipped = set()
253tests_passed = set()
254
255def pytest_itemcollected(item):
256    """pytest hook: Called once for each test found during collection.
257
258    This enables our custom result analysis code to see the list of all tests
259    that should eventually be run.
260
261    Args:
262        item: The item that was collected.
263
264    Returns:
265        Nothing.
266    """
267
268    tests_not_run.add(item.name)
269
270def cleanup():
271    """Clean up all global state.
272
273    Executed (via atexit) once the entire test process is complete. This
274    includes logging the status of all tests, and the identity of any failed
275    or skipped tests.
276
277    Args:
278        None.
279
280    Returns:
281        Nothing.
282    """
283
284    if console:
285        console.close()
286    if log:
287        log.status_pass('%d passed' % len(tests_passed))
288        if tests_skipped:
289            log.status_skipped('%d skipped' % len(tests_skipped))
290            for test in tests_skipped:
291                log.status_skipped('... ' + test)
292        if tests_failed:
293            log.status_fail('%d failed' % len(tests_failed))
294            for test in tests_failed:
295                log.status_fail('... ' + test)
296        if tests_not_run:
297            log.status_fail('%d not run' % len(tests_not_run))
298            for test in tests_not_run:
299                log.status_fail('... ' + test)
300        log.close()
301atexit.register(cleanup)
302
303def setup_boardspec(item):
304    """Process any 'boardspec' marker for a test.
305
306    Such a marker lists the set of board types that a test does/doesn't
307    support. If tests are being executed on an unsupported board, the test is
308    marked to be skipped.
309
310    Args:
311        item: The pytest test item.
312
313    Returns:
314        Nothing.
315    """
316
317    mark = item.get_marker('boardspec')
318    if not mark:
319        return
320    required_boards = []
321    for board in mark.args:
322        if board.startswith('!'):
323            if ubconfig.board_type == board[1:]:
324                pytest.skip('board not supported')
325                return
326        else:
327            required_boards.append(board)
328    if required_boards and ubconfig.board_type not in required_boards:
329        pytest.skip('board not supported')
330
331def setup_buildconfigspec(item):
332    """Process any 'buildconfigspec' marker for a test.
333
334    Such a marker lists some U-Boot configuration feature that the test
335    requires. If tests are being executed on an U-Boot build that doesn't
336    have the required feature, the test is marked to be skipped.
337
338    Args:
339        item: The pytest test item.
340
341    Returns:
342        Nothing.
343    """
344
345    mark = item.get_marker('buildconfigspec')
346    if not mark:
347        return
348    for option in mark.args:
349        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
350            pytest.skip('.config feature not enabled')
351
352def pytest_runtest_setup(item):
353    """pytest hook: Configure (set up) a test item.
354
355    Called once for each test to perform any custom configuration. This hook
356    is used to skip the test if certain conditions apply.
357
358    Args:
359        item: The pytest test item.
360
361    Returns:
362        Nothing.
363    """
364
365    log.start_section(item.name)
366    setup_boardspec(item)
367    setup_buildconfigspec(item)
368
369def pytest_runtest_protocol(item, nextitem):
370    """pytest hook: Called to execute a test.
371
372    This hook wraps the standard pytest runtestprotocol() function in order
373    to acquire visibility into, and record, each test function's result.
374
375    Args:
376        item: The pytest test item to execute.
377        nextitem: The pytest test item that will be executed after this one.
378
379    Returns:
380        A list of pytest reports (test result data).
381    """
382
383    reports = runtestprotocol(item, nextitem=nextitem)
384    failed = None
385    skipped = None
386    for report in reports:
387        if report.outcome == 'failed':
388            failed = report
389            break
390        if report.outcome == 'skipped':
391            if not skipped:
392                skipped = report
393
394    if failed:
395        console.drain_console()
396        tests_failed.add(item.name)
397    elif skipped:
398        tests_skipped.add(item.name)
399    else:
400        tests_passed.add(item.name)
401    tests_not_run.remove(item.name)
402
403    try:
404        if failed:
405            msg = 'FAILED:\n' + str(failed.longrepr)
406            log.status_fail(msg)
407        elif skipped:
408            msg = 'SKIPPED:\n' + str(skipped.longrepr)
409            log.status_skipped(msg)
410        else:
411            log.status_pass('OK')
412    except:
413        # If something went wrong with logging, it's better to let the test
414        # process continue, which may report other exceptions that triggered
415        # the logging issue (e.g. console.log wasn't created). Hence, just
416        # squash the exception. If the test setup failed due to e.g. syntax
417        # error somewhere else, this won't be seen. However, once that issue
418        # is fixed, if this exception still exists, it will then be logged as
419        # part of the test's stdout.
420        import traceback
421        print 'Exception occurred while logging runtest status:'
422        traceback.print_exc()
423        # FIXME: Can we force a test failure here?
424
425    log.end_section(item.name)
426
427    if failed:
428        console.cleanup_spawn()
429
430    return reports
431