xref: /rk3399_rockchip-uboot/test/py/conftest.py (revision 89ab841088f5ccc78f0d501641fc99ea4d8c26f2)
1# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pexpect
21import pytest
22from _pytest.runner import runtestprotocol
23import ConfigParser
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
32    """Create a directory path.
33
34    This includes creating any intermediate/parent directories. Any errors
35    caused due to already extant directories are ignored.
36
37    Args:
38        path: The directory path to create.
39
40    Returns:
41        Nothing.
42    """
43
44    try:
45        os.makedirs(path)
46    except OSError as exc:
47        if exc.errno == errno.EEXIST and os.path.isdir(path):
48            pass
49        else:
50            raise
51
52def pytest_addoption(parser):
53    """pytest hook: Add custom command-line options to the cmdline parser.
54
55    Args:
56        parser: The pytest command-line parser.
57
58    Returns:
59        Nothing.
60    """
61
62    parser.addoption('--build-dir', default=None,
63        help='U-Boot build directory (O=)')
64    parser.addoption('--result-dir', default=None,
65        help='U-Boot test result/tmp directory')
66    parser.addoption('--persistent-data-dir', default=None,
67        help='U-Boot test persistent generated data directory')
68    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69        help='U-Boot board type')
70    parser.addoption('--board-identity', '--id', default='na',
71        help='U-Boot board identity/instance')
72    parser.addoption('--build', default=False, action='store_true',
73        help='Compile U-Boot before running tests')
74    parser.addoption('--gdbserver', default=None,
75        help='Run sandbox under gdbserver. The argument is the channel '+
76        'over which gdbserver should communicate, e.g. localhost:1234')
77
78def pytest_configure(config):
79    """pytest hook: Perform custom initialization at startup time.
80
81    Args:
82        config: The pytest configuration.
83
84    Returns:
85        Nothing.
86    """
87
88    global log
89    global console
90    global ubconfig
91
92    test_py_dir = os.path.dirname(os.path.abspath(__file__))
93    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
94
95    board_type = config.getoption('board_type')
96    board_type_filename = board_type.replace('-', '_')
97
98    board_identity = config.getoption('board_identity')
99    board_identity_filename = board_identity.replace('-', '_')
100
101    build_dir = config.getoption('build_dir')
102    if not build_dir:
103        build_dir = source_dir + '/build-' + board_type
104    mkdir_p(build_dir)
105
106    result_dir = config.getoption('result_dir')
107    if not result_dir:
108        result_dir = build_dir
109    mkdir_p(result_dir)
110
111    persistent_data_dir = config.getoption('persistent_data_dir')
112    if not persistent_data_dir:
113        persistent_data_dir = build_dir + '/persistent-data'
114    mkdir_p(persistent_data_dir)
115
116    gdbserver = config.getoption('gdbserver')
117    if gdbserver and board_type != 'sandbox':
118        raise Exception('--gdbserver only supported with sandbox')
119
120    import multiplexed_log
121    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
122
123    if config.getoption('build'):
124        if build_dir != source_dir:
125            o_opt = 'O=%s' % build_dir
126        else:
127            o_opt = ''
128        cmds = (
129            ['make', o_opt, '-s', board_type + '_defconfig'],
130            ['make', o_opt, '-s', '-j8'],
131        )
132        runner = log.get_runner('make', sys.stdout)
133        for cmd in cmds:
134            runner.run(cmd, cwd=source_dir)
135        runner.close()
136
137    class ArbitraryAttributeContainer(object):
138        pass
139
140    ubconfig = ArbitraryAttributeContainer()
141    ubconfig.brd = dict()
142    ubconfig.env = dict()
143
144    modules = [
145        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
146        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
147        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
148            board_identity_filename),
149    ]
150    for (dict_to_fill, module_name) in modules:
151        try:
152            module = __import__(module_name)
153        except ImportError:
154            continue
155        dict_to_fill.update(module.__dict__)
156
157    ubconfig.buildconfig = dict()
158
159    for conf_file in ('.config', 'include/autoconf.mk'):
160        dot_config = build_dir + '/' + conf_file
161        if not os.path.exists(dot_config):
162            raise Exception(conf_file + ' does not exist; ' +
163                'try passing --build option?')
164
165        with open(dot_config, 'rt') as f:
166            ini_str = '[root]\n' + f.read()
167            ini_sio = StringIO.StringIO(ini_str)
168            parser = ConfigParser.RawConfigParser()
169            parser.readfp(ini_sio)
170            ubconfig.buildconfig.update(parser.items('root'))
171
172    ubconfig.test_py_dir = test_py_dir
173    ubconfig.source_dir = source_dir
174    ubconfig.build_dir = build_dir
175    ubconfig.result_dir = result_dir
176    ubconfig.persistent_data_dir = persistent_data_dir
177    ubconfig.board_type = board_type
178    ubconfig.board_identity = board_identity
179    ubconfig.gdbserver = gdbserver
180
181    env_vars = (
182        'board_type',
183        'board_identity',
184        'source_dir',
185        'test_py_dir',
186        'build_dir',
187        'result_dir',
188        'persistent_data_dir',
189    )
190    for v in env_vars:
191        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
192
193    if board_type == 'sandbox':
194        import u_boot_console_sandbox
195        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
196    else:
197        import u_boot_console_exec_attach
198        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
199
200def pytest_generate_tests(metafunc):
201    """pytest hook: parameterize test functions based on custom rules.
202
203    If a test function takes parameter(s) (fixture names) of the form brd__xxx
204    or env__xxx, the brd and env configuration dictionaries are consulted to
205    find the list of values to use for those parameters, and the test is
206    parametrized so that it runs once for each combination of values.
207
208    Args:
209        metafunc: The pytest test function.
210
211    Returns:
212        Nothing.
213    """
214
215    subconfigs = {
216        'brd': console.config.brd,
217        'env': console.config.env,
218    }
219    for fn in metafunc.fixturenames:
220        parts = fn.split('__')
221        if len(parts) < 2:
222            continue
223        if parts[0] not in subconfigs:
224            continue
225        subconfig = subconfigs[parts[0]]
226        vals = []
227        val = subconfig.get(fn, [])
228        # If that exact name is a key in the data source:
229        if val:
230            # ... use the dict value as a single parameter value.
231            vals = (val, )
232        else:
233            # ... otherwise, see if there's a key that contains a list of
234            # values to use instead.
235            vals = subconfig.get(fn + 's', [])
236        def fixture_id(index, val):
237            try:
238                return val["fixture_id"]
239            except:
240                return fn + str(index)
241        ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
242        metafunc.parametrize(fn, vals, ids=ids)
243
244@pytest.fixture(scope='function')
245def u_boot_console(request):
246    """Generate the value of a test's u_boot_console fixture.
247
248    Args:
249        request: The pytest request.
250
251    Returns:
252        The fixture value.
253    """
254
255    console.ensure_spawned()
256    return console
257
258tests_not_run = set()
259tests_failed = set()
260tests_xpassed = set()
261tests_xfailed = set()
262tests_skipped = set()
263tests_passed = set()
264
265def pytest_itemcollected(item):
266    """pytest hook: Called once for each test found during collection.
267
268    This enables our custom result analysis code to see the list of all tests
269    that should eventually be run.
270
271    Args:
272        item: The item that was collected.
273
274    Returns:
275        Nothing.
276    """
277
278    tests_not_run.add(item.name)
279
280def cleanup():
281    """Clean up all global state.
282
283    Executed (via atexit) once the entire test process is complete. This
284    includes logging the status of all tests, and the identity of any failed
285    or skipped tests.
286
287    Args:
288        None.
289
290    Returns:
291        Nothing.
292    """
293
294    if console:
295        console.close()
296    if log:
297        log.status_pass('%d passed' % len(tests_passed))
298        if tests_skipped:
299            log.status_skipped('%d skipped' % len(tests_skipped))
300            for test in tests_skipped:
301                log.status_skipped('... ' + test)
302        if tests_xpassed:
303            log.status_xpass('%d xpass' % len(tests_xpassed))
304            for test in tests_xpassed:
305                log.status_xpass('... ' + test)
306        if tests_xfailed:
307            log.status_xfail('%d xfail' % len(tests_xfailed))
308            for test in tests_xfailed:
309                log.status_xfail('... ' + test)
310        if tests_failed:
311            log.status_fail('%d failed' % len(tests_failed))
312            for test in tests_failed:
313                log.status_fail('... ' + test)
314        if tests_not_run:
315            log.status_fail('%d not run' % len(tests_not_run))
316            for test in tests_not_run:
317                log.status_fail('... ' + test)
318        log.close()
319atexit.register(cleanup)
320
321def setup_boardspec(item):
322    """Process any 'boardspec' marker for a test.
323
324    Such a marker lists the set of board types that a test does/doesn't
325    support. If tests are being executed on an unsupported board, the test is
326    marked to be skipped.
327
328    Args:
329        item: The pytest test item.
330
331    Returns:
332        Nothing.
333    """
334
335    mark = item.get_marker('boardspec')
336    if not mark:
337        return
338    required_boards = []
339    for board in mark.args:
340        if board.startswith('!'):
341            if ubconfig.board_type == board[1:]:
342                pytest.skip('board not supported')
343                return
344        else:
345            required_boards.append(board)
346    if required_boards and ubconfig.board_type not in required_boards:
347        pytest.skip('board not supported')
348
349def setup_buildconfigspec(item):
350    """Process any 'buildconfigspec' marker for a test.
351
352    Such a marker lists some U-Boot configuration feature that the test
353    requires. If tests are being executed on an U-Boot build that doesn't
354    have the required feature, the test is marked to be skipped.
355
356    Args:
357        item: The pytest test item.
358
359    Returns:
360        Nothing.
361    """
362
363    mark = item.get_marker('buildconfigspec')
364    if not mark:
365        return
366    for option in mark.args:
367        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
368            pytest.skip('.config feature not enabled')
369
370def pytest_runtest_setup(item):
371    """pytest hook: Configure (set up) a test item.
372
373    Called once for each test to perform any custom configuration. This hook
374    is used to skip the test if certain conditions apply.
375
376    Args:
377        item: The pytest test item.
378
379    Returns:
380        Nothing.
381    """
382
383    log.start_section(item.name)
384    setup_boardspec(item)
385    setup_buildconfigspec(item)
386
387def pytest_runtest_protocol(item, nextitem):
388    """pytest hook: Called to execute a test.
389
390    This hook wraps the standard pytest runtestprotocol() function in order
391    to acquire visibility into, and record, each test function's result.
392
393    Args:
394        item: The pytest test item to execute.
395        nextitem: The pytest test item that will be executed after this one.
396
397    Returns:
398        A list of pytest reports (test result data).
399    """
400
401    reports = runtestprotocol(item, nextitem=nextitem)
402
403    failure_cleanup = False
404    test_list = tests_passed
405    msg = 'OK'
406    msg_log = log.status_pass
407    for report in reports:
408        if report.outcome == 'failed':
409            if hasattr(report, 'wasxfail'):
410                test_list = tests_xpassed
411                msg = 'XPASSED'
412                msg_log = log.status_xpass
413            else:
414                failure_cleanup = True
415                test_list = tests_failed
416                msg = 'FAILED:\n' + str(report.longrepr)
417                msg_log = log.status_fail
418            break
419        if report.outcome == 'skipped':
420            if hasattr(report, 'wasxfail'):
421                failure_cleanup = True
422                test_list = tests_xfailed
423                msg = 'XFAILED:\n' + str(report.longrepr)
424                msg_log = log.status_xfail
425                break
426            test_list = tests_skipped
427            msg = 'SKIPPED:\n' + str(report.longrepr)
428            msg_log = log.status_skipped
429
430    if failure_cleanup:
431        console.drain_console()
432
433    test_list.add(item.name)
434    tests_not_run.remove(item.name)
435
436    try:
437        msg_log(msg)
438    except:
439        # If something went wrong with logging, it's better to let the test
440        # process continue, which may report other exceptions that triggered
441        # the logging issue (e.g. console.log wasn't created). Hence, just
442        # squash the exception. If the test setup failed due to e.g. syntax
443        # error somewhere else, this won't be seen. However, once that issue
444        # is fixed, if this exception still exists, it will then be logged as
445        # part of the test's stdout.
446        import traceback
447        print 'Exception occurred while logging runtest status:'
448        traceback.print_exc()
449        # FIXME: Can we force a test failure here?
450
451    log.end_section(item.name)
452
453    if failure_cleanup:
454        console.cleanup_spawn()
455
456    return reports
457