xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 2c3a10780df3317c004de74fbe85df53daab94e5)
19b476841SSoby Mathew/*
2a205a56eSDimitris Papastamos * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
39b476841SSoby Mathew *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
59b476841SSoby Mathew */
69b476841SSoby Mathew
79b476841SSoby Mathew#include <arch.h>
89b476841SSoby Mathew#include <asm_macros.S>
99b476841SSoby Mathew#include <assert_macros.S>
109b476841SSoby Mathew#include <cpu_data.h>
1155c70cb7SDavid Cunado#include <cpu_macros.S>
121319e7b1SSoby Mathew#include <debug.h>
1310bcd761SJeenu Viswambharan#include <errata_report.h>
149b476841SSoby Mathew
159b476841SSoby Mathew /* Reset fn is needed in BL at reset vector */
16b1d27b48SRoberto Vargas#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
179b476841SSoby Mathew	/*
189b476841SSoby Mathew	 * The reset handler common to all platforms.  After a matching
199b476841SSoby Mathew	 * cpu_ops structure entry is found, the correponding reset_handler
209b476841SSoby Mathew	 * in the cpu_ops is invoked.
21683f788fSSoby Mathew	 * Clobbers: x0 - x19, x30
229b476841SSoby Mathew	 */
239b476841SSoby Mathew	.globl	reset_handler
249b476841SSoby Mathewfunc reset_handler
257395a725SSoby Mathew	mov	x19, x30
269b476841SSoby Mathew
27683f788fSSoby Mathew	/* The plat_reset_handler can clobber x0 - x18, x30 */
2824fb838fSSoby Mathew	bl	plat_reset_handler
2924fb838fSSoby Mathew
309b476841SSoby Mathew	/* Get the matching cpu_ops pointer */
319b476841SSoby Mathew	bl	get_cpu_ops_ptr
32044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
339b476841SSoby Mathew	cmp	x0, #0
349b476841SSoby Mathew	ASM_ASSERT(ne)
359b476841SSoby Mathew#endif
369b476841SSoby Mathew
379b476841SSoby Mathew	/* Get the cpu_ops reset handler */
389b476841SSoby Mathew	ldr	x2, [x0, #CPU_RESET_FUNC]
397395a725SSoby Mathew	mov	x30, x19
409b476841SSoby Mathew	cbz	x2, 1f
41683f788fSSoby Mathew
42683f788fSSoby Mathew	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
437395a725SSoby Mathew	br	x2
449b476841SSoby Mathew1:
457395a725SSoby Mathew	ret
468b779620SKévin Petitendfunc reset_handler
4724fb838fSSoby Mathew
48b1d27b48SRoberto Vargas#endif
499b476841SSoby Mathew
503d8256b2SMasahiro Yamada#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
51add40351SSoby Mathew	/*
525dd9dbb5SJeenu Viswambharan	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
535dd9dbb5SJeenu Viswambharan	 *
545dd9dbb5SJeenu Viswambharan	 * Prepare CPU power down function for all platforms. The function takes
555dd9dbb5SJeenu Viswambharan	 * a domain level to be powered down as its parameter. After the cpu_ops
565dd9dbb5SJeenu Viswambharan	 * pointer is retrieved from cpu_data, the handler for requested power
575dd9dbb5SJeenu Viswambharan	 * level is called.
58add40351SSoby Mathew	 */
595dd9dbb5SJeenu Viswambharan	.globl	prepare_cpu_pwr_dwn
605dd9dbb5SJeenu Viswambharanfunc prepare_cpu_pwr_dwn
61add40351SSoby Mathew	/*
625dd9dbb5SJeenu Viswambharan	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
635dd9dbb5SJeenu Viswambharan	 * power down handler for the last power level
64add40351SSoby Mathew	 */
655dd9dbb5SJeenu Viswambharan	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
665dd9dbb5SJeenu Viswambharan	cmp	x0, x2
675dd9dbb5SJeenu Viswambharan	csel	x2, x2, x0, hi
685dd9dbb5SJeenu Viswambharan
69add40351SSoby Mathew	mrs	x1, tpidr_el3
70add40351SSoby Mathew	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
71044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
72add40351SSoby Mathew	cmp	x0, #0
73add40351SSoby Mathew	ASM_ASSERT(ne)
74add40351SSoby Mathew#endif
75add40351SSoby Mathew
765dd9dbb5SJeenu Viswambharan	/* Get the appropriate power down handler */
775dd9dbb5SJeenu Viswambharan	mov	x1, #CPU_PWR_DWN_OPS
785dd9dbb5SJeenu Viswambharan	add	x1, x1, x2, lsl #3
795dd9dbb5SJeenu Viswambharan	ldr	x1, [x0, x1]
80add40351SSoby Mathew	br	x1
815dd9dbb5SJeenu Viswambharanendfunc prepare_cpu_pwr_dwn
82add40351SSoby Mathew
83add40351SSoby Mathew
84add40351SSoby Mathew	/*
85add40351SSoby Mathew	 * Initializes the cpu_ops_ptr if not already initialized
8612e7c4abSVikram Kanigiri	 * in cpu_data. This can be called without a runtime stack, but may
8712e7c4abSVikram Kanigiri	 * only be called after the MMU is enabled.
88add40351SSoby Mathew	 * clobbers: x0 - x6, x10
89add40351SSoby Mathew	 */
90add40351SSoby Mathew	.globl	init_cpu_ops
91add40351SSoby Mathewfunc init_cpu_ops
92add40351SSoby Mathew	mrs	x6, tpidr_el3
93add40351SSoby Mathew	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
94add40351SSoby Mathew	cbnz	x0, 1f
95add40351SSoby Mathew	mov	x10, x30
96add40351SSoby Mathew	bl	get_cpu_ops_ptr
97044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
98add40351SSoby Mathew	cmp	x0, #0
99add40351SSoby Mathew	ASM_ASSERT(ne)
100add40351SSoby Mathew#endif
10109997346SSoby Mathew	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
102add40351SSoby Mathew	mov x30, x10
103add40351SSoby Mathew1:
104add40351SSoby Mathew	ret
1058b779620SKévin Petitendfunc init_cpu_ops
106add40351SSoby Mathew#endif /* IMAGE_BL31 */
107add40351SSoby Mathew
1083d8256b2SMasahiro Yamada#if defined(IMAGE_BL31) && CRASH_REPORTING
109d3f70af6SSoby Mathew	/*
110d3f70af6SSoby Mathew	 * The cpu specific registers which need to be reported in a crash
111d3f70af6SSoby Mathew	 * are reported via cpu_ops cpu_reg_dump function. After a matching
112d3f70af6SSoby Mathew	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
113d3f70af6SSoby Mathew	 * in the cpu_ops is invoked.
114d3f70af6SSoby Mathew	 */
115d3f70af6SSoby Mathew	.globl	do_cpu_reg_dump
116d3f70af6SSoby Mathewfunc do_cpu_reg_dump
117d3f70af6SSoby Mathew	mov	x16, x30
118d3f70af6SSoby Mathew
119d3f70af6SSoby Mathew	/* Get the matching cpu_ops pointer */
120d3f70af6SSoby Mathew	bl	get_cpu_ops_ptr
121d3f70af6SSoby Mathew	cbz	x0, 1f
122d3f70af6SSoby Mathew
123d3f70af6SSoby Mathew	/* Get the cpu_ops cpu_reg_dump */
124d3f70af6SSoby Mathew	ldr	x2, [x0, #CPU_REG_DUMP]
125d3f70af6SSoby Mathew	cbz	x2, 1f
126d3f70af6SSoby Mathew	blr	x2
127d3f70af6SSoby Mathew1:
128d3f70af6SSoby Mathew	mov	x30, x16
129d3f70af6SSoby Mathew	ret
1308b779620SKévin Petitendfunc do_cpu_reg_dump
131d3f70af6SSoby Mathew#endif
132d3f70af6SSoby Mathew
1339b476841SSoby Mathew	/*
1349b476841SSoby Mathew	 * The below function returns the cpu_ops structure matching the
1359b476841SSoby Mathew	 * midr of the core. It reads the MIDR_EL1 and finds the matching
1369b476841SSoby Mathew	 * entry in cpu_ops entries. Only the implementation and part number
1379b476841SSoby Mathew	 * are used to match the entries.
1389b476841SSoby Mathew	 * Return :
1399b476841SSoby Mathew	 *     x0 - The matching cpu_ops pointer on Success
1409b476841SSoby Mathew	 *     x0 - 0 on failure.
1419b476841SSoby Mathew	 * Clobbers : x0 - x5
1429b476841SSoby Mathew	 */
1439b476841SSoby Mathew	.globl	get_cpu_ops_ptr
1449b476841SSoby Mathewfunc get_cpu_ops_ptr
1459b476841SSoby Mathew	/* Get the cpu_ops start and end locations */
1469b476841SSoby Mathew	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
1479b476841SSoby Mathew	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
1489b476841SSoby Mathew
1499b476841SSoby Mathew	/* Initialize the return parameter */
1509b476841SSoby Mathew	mov	x0, #0
1519b476841SSoby Mathew
1529b476841SSoby Mathew	/* Read the MIDR_EL1 */
1539b476841SSoby Mathew	mrs	x2, midr_el1
1549b476841SSoby Mathew	mov_imm	x3, CPU_IMPL_PN_MASK
1559b476841SSoby Mathew
1569b476841SSoby Mathew	/* Retain only the implementation and part number using mask */
1579b476841SSoby Mathew	and	w2, w2, w3
1589b476841SSoby Mathew1:
1599b476841SSoby Mathew	/* Check if we have reached end of list */
1609b476841SSoby Mathew	cmp	x4, x5
1619b476841SSoby Mathew	b.eq	error_exit
1629b476841SSoby Mathew
1639b476841SSoby Mathew	/* load the midr from the cpu_ops */
1649b476841SSoby Mathew	ldr	x1, [x4], #CPU_OPS_SIZE
1659b476841SSoby Mathew	and	w1, w1, w3
1669b476841SSoby Mathew
1679b476841SSoby Mathew	/* Check if midr matches to midr of this core */
1689b476841SSoby Mathew	cmp	w1, w2
1699b476841SSoby Mathew	b.ne	1b
1709b476841SSoby Mathew
1719b476841SSoby Mathew	/* Subtract the increment and offset to get the cpu-ops pointer */
1729b476841SSoby Mathew	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
1739b476841SSoby Mathewerror_exit:
1749b476841SSoby Mathew	ret
1758b779620SKévin Petitendfunc get_cpu_ops_ptr
1767395a725SSoby Mathew
17710bcd761SJeenu Viswambharan/*
17810bcd761SJeenu Viswambharan * Extract CPU revision and variant, and combine them into a single numeric for
17910bcd761SJeenu Viswambharan * easier comparison.
18010bcd761SJeenu Viswambharan */
18110bcd761SJeenu Viswambharan	.globl	cpu_get_rev_var
18210bcd761SJeenu Viswambharanfunc cpu_get_rev_var
18310bcd761SJeenu Viswambharan	mrs	x1, midr_el1
1847395a725SSoby Mathew
18554035fc4SSandrine Bailleux	/*
18610bcd761SJeenu Viswambharan	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
18710bcd761SJeenu Viswambharan	 * as variant[7:4] and revision[3:0] of x0.
18854035fc4SSandrine Bailleux	 *
18910bcd761SJeenu Viswambharan	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
19010bcd761SJeenu Viswambharan	 * extract x1[3:0] into x0[3:0] retaining other bits.
19154035fc4SSandrine Bailleux	 */
19210bcd761SJeenu Viswambharan	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
19310bcd761SJeenu Viswambharan	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
19410bcd761SJeenu Viswambharan	ret
19510bcd761SJeenu Viswambharanendfunc cpu_get_rev_var
1967395a725SSoby Mathew
19710bcd761SJeenu Viswambharan/*
19810bcd761SJeenu Viswambharan * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
19910bcd761SJeenu Viswambharan * application purposes. If the revision-variant is less than or same as a given
20010bcd761SJeenu Viswambharan * value, indicates that errata applies; otherwise not.
2019ec3921cSJonathan Wright *
2029ec3921cSJonathan Wright * Shall clobber: x0-x3
20310bcd761SJeenu Viswambharan */
20410bcd761SJeenu Viswambharan	.globl	cpu_rev_var_ls
20510bcd761SJeenu Viswambharanfunc cpu_rev_var_ls
20610bcd761SJeenu Viswambharan	mov	x2, #ERRATA_APPLIES
20710bcd761SJeenu Viswambharan	mov	x3, #ERRATA_NOT_APPLIES
20810bcd761SJeenu Viswambharan	cmp	x0, x1
20910bcd761SJeenu Viswambharan	csel	x0, x2, x3, ls
21010bcd761SJeenu Viswambharan	ret
21110bcd761SJeenu Viswambharanendfunc cpu_rev_var_ls
21210bcd761SJeenu Viswambharan
213b75dc0e4SAndre Przywara/*
214b75dc0e4SAndre Przywara * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
215b75dc0e4SAndre Przywara * application purposes. If the revision-variant is higher than or same as a
216b75dc0e4SAndre Przywara * given value, indicates that errata applies; otherwise not.
2179ec3921cSJonathan Wright *
2189ec3921cSJonathan Wright * Shall clobber: x0-x3
219b75dc0e4SAndre Przywara */
220b75dc0e4SAndre Przywara	.globl	cpu_rev_var_hs
221b75dc0e4SAndre Przywarafunc cpu_rev_var_hs
222b75dc0e4SAndre Przywara	mov	x2, #ERRATA_APPLIES
223b75dc0e4SAndre Przywara	mov	x3, #ERRATA_NOT_APPLIES
224b75dc0e4SAndre Przywara	cmp	x0, x1
225b75dc0e4SAndre Przywara	csel	x0, x2, x3, hs
226b75dc0e4SAndre Przywara	ret
227b75dc0e4SAndre Przywaraendfunc cpu_rev_var_hs
228b75dc0e4SAndre Przywara
22910bcd761SJeenu Viswambharan#if REPORT_ERRATA
23010bcd761SJeenu Viswambharan/*
23110bcd761SJeenu Viswambharan * void print_errata_status(void);
23210bcd761SJeenu Viswambharan *
23310bcd761SJeenu Viswambharan * Function to print errata status for CPUs of its class. Must be called only:
23410bcd761SJeenu Viswambharan *
23510bcd761SJeenu Viswambharan *   - with MMU and data caches are enabled;
23610bcd761SJeenu Viswambharan *   - after cpu_ops have been initialized in per-CPU data.
23710bcd761SJeenu Viswambharan */
23810bcd761SJeenu Viswambharan	.globl print_errata_status
23910bcd761SJeenu Viswambharanfunc print_errata_status
24010bcd761SJeenu Viswambharan#ifdef IMAGE_BL1
24110bcd761SJeenu Viswambharan	/*
24210bcd761SJeenu Viswambharan	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
24310bcd761SJeenu Viswambharan	 * directly.
24410bcd761SJeenu Viswambharan	 */
24510bcd761SJeenu Viswambharan	stp	xzr, x30, [sp, #-16]!
24610bcd761SJeenu Viswambharan	bl	get_cpu_ops_ptr
24710bcd761SJeenu Viswambharan	ldp	xzr, x30, [sp], #16
24810bcd761SJeenu Viswambharan	ldr	x1, [x0, #CPU_ERRATA_FUNC]
24910bcd761SJeenu Viswambharan	cbnz	x1, .Lprint
25010bcd761SJeenu Viswambharan#else
25110bcd761SJeenu Viswambharan	/*
25210bcd761SJeenu Viswambharan	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
25310bcd761SJeenu Viswambharan	 * errata printing function. If it's non-NULL, jump to the function in
25410bcd761SJeenu Viswambharan	 * turn.
25510bcd761SJeenu Viswambharan	 */
25610bcd761SJeenu Viswambharan	mrs	x0, tpidr_el3
25710bcd761SJeenu Viswambharan	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
25810bcd761SJeenu Viswambharan	ldr	x0, [x1, #CPU_ERRATA_FUNC]
25910bcd761SJeenu Viswambharan	cbz	x0, .Lnoprint
26010bcd761SJeenu Viswambharan
26110bcd761SJeenu Viswambharan	/*
26210bcd761SJeenu Viswambharan	 * Printing errata status requires atomically testing the printed flag.
26310bcd761SJeenu Viswambharan	 */
26422fa58cbSdp-arm	stp	x19, x30, [sp, #-16]!
26522fa58cbSdp-arm	mov	x19, x0
26610bcd761SJeenu Viswambharan
26710bcd761SJeenu Viswambharan	/*
26810bcd761SJeenu Viswambharan	 * Load pointers to errata lock and printed flag. Call
26910bcd761SJeenu Viswambharan	 * errata_needs_reporting to check whether this CPU needs to report
27010bcd761SJeenu Viswambharan	 * errata status pertaining to its class.
27110bcd761SJeenu Viswambharan	 */
27210bcd761SJeenu Viswambharan	ldr	x0, [x1, #CPU_ERRATA_LOCK]
27310bcd761SJeenu Viswambharan	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
27410bcd761SJeenu Viswambharan	bl	errata_needs_reporting
27522fa58cbSdp-arm	mov	x1, x19
27622fa58cbSdp-arm	ldp	x19, x30, [sp], #16
27710bcd761SJeenu Viswambharan	cbnz	x0, .Lprint
27810bcd761SJeenu Viswambharan#endif
27910bcd761SJeenu Viswambharan.Lnoprint:
28010bcd761SJeenu Viswambharan	ret
28110bcd761SJeenu Viswambharan.Lprint:
28210bcd761SJeenu Viswambharan	/* Jump to errata reporting function for this CPU */
28310bcd761SJeenu Viswambharan	br	x1
28410bcd761SJeenu Viswambharanendfunc print_errata_status
28510bcd761SJeenu Viswambharan#endif
286a205a56eSDimitris Papastamos
287a205a56eSDimitris Papastamos/*
288*2c3a1078SDimitris Papastamos * int check_wa_cve_2017_5715(void);
289a205a56eSDimitris Papastamos *
290a205a56eSDimitris Papastamos * This function returns:
291a205a56eSDimitris Papastamos *  - ERRATA_APPLIES when firmware mitigation is required.
292a205a56eSDimitris Papastamos *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
293a205a56eSDimitris Papastamos *  - ERRATA_MISSING when firmware mitigation would be required but
294a205a56eSDimitris Papastamos *    is not compiled in.
295a205a56eSDimitris Papastamos *
296a205a56eSDimitris Papastamos * NOTE: Must be called only after cpu_ops have been initialized
297a205a56eSDimitris Papastamos *       in per-CPU data.
298a205a56eSDimitris Papastamos */
299*2c3a1078SDimitris Papastamos	.globl	check_wa_cve_2017_5715
300*2c3a1078SDimitris Papastamosfunc check_wa_cve_2017_5715
301a205a56eSDimitris Papastamos	mrs	x0, tpidr_el3
302a205a56eSDimitris Papastamos#if ENABLE_ASSERTIONS
303a205a56eSDimitris Papastamos	cmp	x0, #0
304a205a56eSDimitris Papastamos	ASM_ASSERT(ne)
305a205a56eSDimitris Papastamos#endif
306a205a56eSDimitris Papastamos	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
307a205a56eSDimitris Papastamos	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
308a205a56eSDimitris Papastamos	/*
309a205a56eSDimitris Papastamos	 * If the reserved function pointer is NULL, this CPU
310a205a56eSDimitris Papastamos	 * is unaffected by CVE-2017-5715 so bail out.
311a205a56eSDimitris Papastamos	 */
312a205a56eSDimitris Papastamos	cmp	x0, #0
313a205a56eSDimitris Papastamos	beq	1f
314a205a56eSDimitris Papastamos	br	x0
315a205a56eSDimitris Papastamos1:
316a205a56eSDimitris Papastamos	mov	x0, #ERRATA_NOT_APPLIES
317a205a56eSDimitris Papastamos	ret
318*2c3a1078SDimitris Papastamosendfunc check_wa_cve_2017_5715
319