xref: /rk3399_rockchip-uboot/test/py/conftest.py (revision 6febd8ca5aef84de67a2d7da9450d9385352bd04)
1# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pexpect
21import pytest
22from _pytest.runner import runtestprotocol
23import ConfigParser
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
32    """Create a directory path.
33
34    This includes creating any intermediate/parent directories. Any errors
35    caused due to already extant directories are ignored.
36
37    Args:
38        path: The directory path to create.
39
40    Returns:
41        Nothing.
42    """
43
44    try:
45        os.makedirs(path)
46    except OSError as exc:
47        if exc.errno == errno.EEXIST and os.path.isdir(path):
48            pass
49        else:
50            raise
51
52def pytest_addoption(parser):
53    """pytest hook: Add custom command-line options to the cmdline parser.
54
55    Args:
56        parser: The pytest command-line parser.
57
58    Returns:
59        Nothing.
60    """
61
62    parser.addoption('--build-dir', default=None,
63        help='U-Boot build directory (O=)')
64    parser.addoption('--result-dir', default=None,
65        help='U-Boot test result/tmp directory')
66    parser.addoption('--persistent-data-dir', default=None,
67        help='U-Boot test persistent generated data directory')
68    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69        help='U-Boot board type')
70    parser.addoption('--board-identity', '--id', default='na',
71        help='U-Boot board identity/instance')
72    parser.addoption('--build', default=False, action='store_true',
73        help='Compile U-Boot before running tests')
74    parser.addoption('--gdbserver', default=None,
75        help='Run sandbox under gdbserver. The argument is the channel '+
76        'over which gdbserver should communicate, e.g. localhost:1234')
77
78def pytest_configure(config):
79    """pytest hook: Perform custom initialization at startup time.
80
81    Args:
82        config: The pytest configuration.
83
84    Returns:
85        Nothing.
86    """
87
88    global log
89    global console
90    global ubconfig
91
92    test_py_dir = os.path.dirname(os.path.abspath(__file__))
93    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
94
95    board_type = config.getoption('board_type')
96    board_type_filename = board_type.replace('-', '_')
97
98    board_identity = config.getoption('board_identity')
99    board_identity_filename = board_identity.replace('-', '_')
100
101    build_dir = config.getoption('build_dir')
102    if not build_dir:
103        build_dir = source_dir + '/build-' + board_type
104    mkdir_p(build_dir)
105
106    result_dir = config.getoption('result_dir')
107    if not result_dir:
108        result_dir = build_dir
109    mkdir_p(result_dir)
110
111    persistent_data_dir = config.getoption('persistent_data_dir')
112    if not persistent_data_dir:
113        persistent_data_dir = build_dir + '/persistent-data'
114    mkdir_p(persistent_data_dir)
115
116    gdbserver = config.getoption('gdbserver')
117    if gdbserver and board_type != 'sandbox':
118        raise Exception('--gdbserver only supported with sandbox')
119
120    import multiplexed_log
121    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
122
123    if config.getoption('build'):
124        if build_dir != source_dir:
125            o_opt = 'O=%s' % build_dir
126        else:
127            o_opt = ''
128        cmds = (
129            ['make', o_opt, '-s', board_type + '_defconfig'],
130            ['make', o_opt, '-s', '-j8'],
131        )
132        with log.section('make'):
133            runner = log.get_runner('make', sys.stdout)
134            for cmd in cmds:
135                runner.run(cmd, cwd=source_dir)
136            runner.close()
137            log.status_pass('OK')
138
139    class ArbitraryAttributeContainer(object):
140        pass
141
142    ubconfig = ArbitraryAttributeContainer()
143    ubconfig.brd = dict()
144    ubconfig.env = dict()
145
146    modules = [
147        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
148        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
149        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
150            board_identity_filename),
151    ]
152    for (dict_to_fill, module_name) in modules:
153        try:
154            module = __import__(module_name)
155        except ImportError:
156            continue
157        dict_to_fill.update(module.__dict__)
158
159    ubconfig.buildconfig = dict()
160
161    for conf_file in ('.config', 'include/autoconf.mk'):
162        dot_config = build_dir + '/' + conf_file
163        if not os.path.exists(dot_config):
164            raise Exception(conf_file + ' does not exist; ' +
165                'try passing --build option?')
166
167        with open(dot_config, 'rt') as f:
168            ini_str = '[root]\n' + f.read()
169            ini_sio = StringIO.StringIO(ini_str)
170            parser = ConfigParser.RawConfigParser()
171            parser.readfp(ini_sio)
172            ubconfig.buildconfig.update(parser.items('root'))
173
174    ubconfig.test_py_dir = test_py_dir
175    ubconfig.source_dir = source_dir
176    ubconfig.build_dir = build_dir
177    ubconfig.result_dir = result_dir
178    ubconfig.persistent_data_dir = persistent_data_dir
179    ubconfig.board_type = board_type
180    ubconfig.board_identity = board_identity
181    ubconfig.gdbserver = gdbserver
182
183    env_vars = (
184        'board_type',
185        'board_identity',
186        'source_dir',
187        'test_py_dir',
188        'build_dir',
189        'result_dir',
190        'persistent_data_dir',
191    )
192    for v in env_vars:
193        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
194
195    if board_type == 'sandbox':
196        import u_boot_console_sandbox
197        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
198    else:
199        import u_boot_console_exec_attach
200        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
201
202def pytest_generate_tests(metafunc):
203    """pytest hook: parameterize test functions based on custom rules.
204
205    If a test function takes parameter(s) (fixture names) of the form brd__xxx
206    or env__xxx, the brd and env configuration dictionaries are consulted to
207    find the list of values to use for those parameters, and the test is
208    parametrized so that it runs once for each combination of values.
209
210    Args:
211        metafunc: The pytest test function.
212
213    Returns:
214        Nothing.
215    """
216
217    subconfigs = {
218        'brd': console.config.brd,
219        'env': console.config.env,
220    }
221    for fn in metafunc.fixturenames:
222        parts = fn.split('__')
223        if len(parts) < 2:
224            continue
225        if parts[0] not in subconfigs:
226            continue
227        subconfig = subconfigs[parts[0]]
228        vals = []
229        val = subconfig.get(fn, [])
230        # If that exact name is a key in the data source:
231        if val:
232            # ... use the dict value as a single parameter value.
233            vals = (val, )
234        else:
235            # ... otherwise, see if there's a key that contains a list of
236            # values to use instead.
237            vals = subconfig.get(fn + 's', [])
238        def fixture_id(index, val):
239            try:
240                return val["fixture_id"]
241            except:
242                return fn + str(index)
243        ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
244        metafunc.parametrize(fn, vals, ids=ids)
245
246@pytest.fixture(scope='function')
247def u_boot_console(request):
248    """Generate the value of a test's u_boot_console fixture.
249
250    Args:
251        request: The pytest request.
252
253    Returns:
254        The fixture value.
255    """
256
257    console.ensure_spawned()
258    return console
259
260anchors = {}
261tests_not_run = set()
262tests_failed = set()
263tests_xpassed = set()
264tests_xfailed = set()
265tests_skipped = set()
266tests_passed = set()
267
268def pytest_itemcollected(item):
269    """pytest hook: Called once for each test found during collection.
270
271    This enables our custom result analysis code to see the list of all tests
272    that should eventually be run.
273
274    Args:
275        item: The item that was collected.
276
277    Returns:
278        Nothing.
279    """
280
281    tests_not_run.add(item.name)
282
283def cleanup():
284    """Clean up all global state.
285
286    Executed (via atexit) once the entire test process is complete. This
287    includes logging the status of all tests, and the identity of any failed
288    or skipped tests.
289
290    Args:
291        None.
292
293    Returns:
294        Nothing.
295    """
296
297    if console:
298        console.close()
299    if log:
300        with log.section('Status Report', 'status_report'):
301            log.status_pass('%d passed' % len(tests_passed))
302            if tests_skipped:
303                log.status_skipped('%d skipped' % len(tests_skipped))
304                for test in tests_skipped:
305                    anchor = anchors.get(test, None)
306                    log.status_skipped('... ' + test, anchor)
307            if tests_xpassed:
308                log.status_xpass('%d xpass' % len(tests_xpassed))
309                for test in tests_xpassed:
310                    anchor = anchors.get(test, None)
311                    log.status_xpass('... ' + test, anchor)
312            if tests_xfailed:
313                log.status_xfail('%d xfail' % len(tests_xfailed))
314                for test in tests_xfailed:
315                    anchor = anchors.get(test, None)
316                    log.status_xfail('... ' + test, anchor)
317            if tests_failed:
318                log.status_fail('%d failed' % len(tests_failed))
319                for test in tests_failed:
320                    anchor = anchors.get(test, None)
321                    log.status_fail('... ' + test, anchor)
322            if tests_not_run:
323                log.status_fail('%d not run' % len(tests_not_run))
324                for test in tests_not_run:
325                    anchor = anchors.get(test, None)
326                    log.status_fail('... ' + test, anchor)
327        log.close()
328atexit.register(cleanup)
329
330def setup_boardspec(item):
331    """Process any 'boardspec' marker for a test.
332
333    Such a marker lists the set of board types that a test does/doesn't
334    support. If tests are being executed on an unsupported board, the test is
335    marked to be skipped.
336
337    Args:
338        item: The pytest test item.
339
340    Returns:
341        Nothing.
342    """
343
344    mark = item.get_marker('boardspec')
345    if not mark:
346        return
347    required_boards = []
348    for board in mark.args:
349        if board.startswith('!'):
350            if ubconfig.board_type == board[1:]:
351                pytest.skip('board not supported')
352                return
353        else:
354            required_boards.append(board)
355    if required_boards and ubconfig.board_type not in required_boards:
356        pytest.skip('board not supported')
357
358def setup_buildconfigspec(item):
359    """Process any 'buildconfigspec' marker for a test.
360
361    Such a marker lists some U-Boot configuration feature that the test
362    requires. If tests are being executed on an U-Boot build that doesn't
363    have the required feature, the test is marked to be skipped.
364
365    Args:
366        item: The pytest test item.
367
368    Returns:
369        Nothing.
370    """
371
372    mark = item.get_marker('buildconfigspec')
373    if not mark:
374        return
375    for option in mark.args:
376        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
377            pytest.skip('.config feature not enabled')
378
379def pytest_runtest_setup(item):
380    """pytest hook: Configure (set up) a test item.
381
382    Called once for each test to perform any custom configuration. This hook
383    is used to skip the test if certain conditions apply.
384
385    Args:
386        item: The pytest test item.
387
388    Returns:
389        Nothing.
390    """
391
392    anchors[item.name] = log.start_section(item.name)
393    setup_boardspec(item)
394    setup_buildconfigspec(item)
395
396def pytest_runtest_protocol(item, nextitem):
397    """pytest hook: Called to execute a test.
398
399    This hook wraps the standard pytest runtestprotocol() function in order
400    to acquire visibility into, and record, each test function's result.
401
402    Args:
403        item: The pytest test item to execute.
404        nextitem: The pytest test item that will be executed after this one.
405
406    Returns:
407        A list of pytest reports (test result data).
408    """
409
410    reports = runtestprotocol(item, nextitem=nextitem)
411
412    failure_cleanup = False
413    test_list = tests_passed
414    msg = 'OK'
415    msg_log = log.status_pass
416    for report in reports:
417        if report.outcome == 'failed':
418            if hasattr(report, 'wasxfail'):
419                test_list = tests_xpassed
420                msg = 'XPASSED'
421                msg_log = log.status_xpass
422            else:
423                failure_cleanup = True
424                test_list = tests_failed
425                msg = 'FAILED:\n' + str(report.longrepr)
426                msg_log = log.status_fail
427            break
428        if report.outcome == 'skipped':
429            if hasattr(report, 'wasxfail'):
430                failure_cleanup = True
431                test_list = tests_xfailed
432                msg = 'XFAILED:\n' + str(report.longrepr)
433                msg_log = log.status_xfail
434                break
435            test_list = tests_skipped
436            msg = 'SKIPPED:\n' + str(report.longrepr)
437            msg_log = log.status_skipped
438
439    if failure_cleanup:
440        console.drain_console()
441
442    test_list.add(item.name)
443    tests_not_run.remove(item.name)
444
445    try:
446        msg_log(msg)
447    except:
448        # If something went wrong with logging, it's better to let the test
449        # process continue, which may report other exceptions that triggered
450        # the logging issue (e.g. console.log wasn't created). Hence, just
451        # squash the exception. If the test setup failed due to e.g. syntax
452        # error somewhere else, this won't be seen. However, once that issue
453        # is fixed, if this exception still exists, it will then be logged as
454        # part of the test's stdout.
455        import traceback
456        print 'Exception occurred while logging runtest status:'
457        traceback.print_exc()
458        # FIXME: Can we force a test failure here?
459
460    log.end_section(item.name)
461
462    if failure_cleanup:
463        console.cleanup_spawn()
464
465    return reports
466