xref: /rk3399_ARM-atf/include/lib/cpus/aarch64/cpu_macros.S (revision 1c20f05c5a4d292688a982cf05b64df9fce0726e)
1/*
2 * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU. If there's no CPU reset function,
53	 *	specify CPU_NO_RESET_FUNC
54	 * _extra1:
55	 *	This is a placeholder for future per CPU operations.  Currently,
56	 *	some CPUs use this entry to set a test function to determine if
57	 *	the workaround for CVE-2017-5715 needs to be applied or not.
58	 * _extra2:
59	 *	This is a placeholder for future per CPU operations. Currently
60	 *	some CPUs use this entry to set a function to disable the
61	 *	workaround for CVE-2018-3639.
62	 * _extra3:
63	 *	This is a placeholder for future per CPU operations. Currently,
64	 *	some CPUs use this entry to set a test function to determine if
65	 *	the workaround for CVE-2022-23960 needs to be applied or not.
66	 * _e_handler:
67	 *	This is a placeholder for future per CPU exception handlers.
68	 * _power_down_ops:
69	 *	Comma-separated list of functions to perform power-down
70	 *	operatios on the CPU. At least one, and up to
71	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
72	 *	Starting at power level 0, these functions shall handle power
73	 *	down at subsequent power levels. If there aren't exactly
74	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
75	 *	used to handle power down at subsequent levels
76	 */
77	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
78		_extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
79	.section .cpu_ops, "a"
80	.align 3
81	.type cpu_ops_\_name, %object
82	.quad \_midr
83#if defined(IMAGE_AT_EL3)
84	.quad \_resetfunc
85#endif
86	.quad \_extra1
87	.quad \_extra2
88	.quad \_extra3
89	.quad \_e_handler
90#ifdef IMAGE_BL31
91	/* Insert list of functions */
92	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
93#endif
94	/*
95	 * It is possible (although unlikely) that a cpu may have no errata in
96	 * code. In that case the start label will not be defined. The list is
97	 * intended to be used in a loop, so define it as zero-length for
98	 * predictable behaviour. Since this macro is always called at the end
99	 * of the cpu file (after all errata have been parsed) we can be sure
100	 * that we are at the end of the list. Some cpus call declare_cpu_ops
101	 * twice, so only do this once.
102	 */
103	.pushsection .rodata.errata_entries
104	.ifndef \_name\()_errata_list_start
105		\_name\()_errata_list_start:
106	.endif
107	.ifndef \_name\()_errata_list_end
108		\_name\()_errata_list_end:
109	.endif
110	.popsection
111
112	/* and now put them in cpu_ops */
113	.quad \_name\()_errata_list_start
114	.quad \_name\()_errata_list_end
115
116#if REPORT_ERRATA
117	.ifndef \_name\()_cpu_str
118	  /*
119	   * Place errata reported flag, and the spinlock to arbitrate access to
120	   * it in the data section.
121	   */
122	  .pushsection .data
123	  define_asm_spinlock \_name\()_errata_lock
124	  \_name\()_errata_reported:
125	  .word	0
126	  .popsection
127
128	  /* Place CPU string in rodata */
129	  .pushsection .rodata
130	  \_name\()_cpu_str:
131	  .asciz "\_name"
132	  .popsection
133	.endif
134
135
136	/*
137	 * Mandatory errata status printing function for CPUs of
138	 * this class.
139	 */
140	.quad \_name\()_errata_report
141	.quad \_name\()_cpu_str
142
143#ifdef IMAGE_BL31
144	/* Pointers to errata lock and reported flag */
145	.quad \_name\()_errata_lock
146	.quad \_name\()_errata_reported
147#endif /* IMAGE_BL31 */
148#endif /* REPORT_ERRATA */
149
150#if defined(IMAGE_BL31) && CRASH_REPORTING
151	.quad \_name\()_cpu_reg_dump
152#endif
153	.endm
154
155	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
156		_power_down_ops:vararg
157		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
158			\_power_down_ops
159	.endm
160
161	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
162		_e_handler:req, _power_down_ops:vararg
163		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
164			0, 0, 0, \_e_handler, \_power_down_ops
165	.endm
166
167	.macro declare_cpu_ops_wa _name:req, _midr:req, \
168		_resetfunc:req, _extra1:req, _extra2:req, \
169		_extra3:req, _power_down_ops:vararg
170		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
171			\_extra1, \_extra2, \_extra3, 0, \_power_down_ops
172	.endm
173
174	/*
175	 * This macro is used on some CPUs to detect if they are vulnerable
176	 * to CVE-2017-5715.
177	 */
178	.macro	cpu_check_csv2 _reg _label
179	mrs	\_reg, id_aa64pfr0_el1
180	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
181	/*
182	 * If the field equals 1, branch targets trained in one context cannot
183	 * affect speculative execution in a different context.
184	 *
185	 * If the field equals 2, it means that the system is also aware of
186	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
187	 * expect users of the registers to do the right thing.
188	 *
189	 * Only apply mitigations if the value of this field is 0.
190	 */
191#if ENABLE_ASSERTIONS
192	cmp	\_reg, #3 /* Only values 0 to 2 are expected */
193	ASM_ASSERT(lo)
194#endif
195
196	cmp	\_reg, #0
197	bne	\_label
198	.endm
199
200	/*
201	 * Helper macro that reads the part number of the current
202	 * CPU and jumps to the given label if it matches the CPU
203	 * MIDR provided.
204	 *
205	 * Clobbers x0.
206	 */
207	.macro  jump_if_cpu_midr _cpu_midr, _label
208	mrs	x0, midr_el1
209	ubfx	x0, x0, MIDR_PN_SHIFT, #12
210	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
211	b.eq	\_label
212	.endm
213
214
215/*
216 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
217 * will be applied automatically
218 *
219 * _cpu:
220 *	Name of cpu as given to declare_cpu_ops
221 *
222 * _cve:
223 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
224 *
225 * _id:
226 *	Erratum or CVE number. Please combine with previous field with ERRATUM
227 *	or CVE macros
228 *
229 * _chosen:
230 *	Compile time flag on whether the erratum is included
231 *
232 * _apply_at_reset:
233 *	Whether the erratum should be automatically applied at reset
234 */
235.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
236	.pushsection .rodata.errata_entries
237		.align	3
238		.ifndef \_cpu\()_errata_list_start
239		\_cpu\()_errata_list_start:
240		.endif
241
242		/* check if unused and compile out if no references */
243		.if \_apply_at_reset && \_chosen
244			.quad	erratum_\_cpu\()_\_id\()_wa
245		.else
246			.quad	0
247		.endif
248		/* TODO(errata ABI): this prevents all checker functions from
249		 * being optimised away. Can be done away with unless the ABI
250		 * needs them */
251		.quad	check_erratum_\_cpu\()_\_id
252		/* Will fit CVEs with up to 10 character in the ID field */
253		.word	\_id
254		.hword	\_cve
255		.byte	\_chosen
256		/* TODO(errata ABI): mitigated field for known but unmitigated
257		 * errata */
258		.byte	0x1
259	.popsection
260.endm
261
262.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
263	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
264
265	func erratum_\_cpu\()_\_id\()_wa
266		mov	x8, x30
267
268		/* save rev_var for workarounds that might need it but don't
269		 * restore to x0 because few will care */
270		mov	x7, x0
271		bl	check_erratum_\_cpu\()_\_id
272		cbz	x0, erratum_\_cpu\()_\_id\()_skip
273.endm
274
275.macro _workaround_end _cpu:req, _id:req
276	erratum_\_cpu\()_\_id\()_skip:
277		ret	x8
278	endfunc erratum_\_cpu\()_\_id\()_wa
279.endm
280
281/*******************************************************************************
282 * Errata workaround wrappers
283 ******************************************************************************/
284/*
285 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
286 * will be applied automatically
287 *
288 * _cpu:
289 *	Name of cpu as given to declare_cpu_ops
290 *
291 * _cve:
292 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
293 *
294 * _id:
295 *	Erratum or CVE number. Please combine with previous field with ERRATUM
296 *	or CVE macros
297 *
298 * _chosen:
299 *	Compile time flag on whether the erratum is included
300 *
301 * in body:
302 *	clobber x0 to x7 (please only use those)
303 *	argument x7 - cpu_rev_var
304 *
305 * _wa clobbers: x0-x8 (PCS compliant)
306 */
307.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
308	_workaround_start \_cpu, \_cve, \_id, \_chosen, 1
309.endm
310
311/*
312 * See `workaround_reset_start` for usage info. Additional arguments:
313 *
314 * _midr:
315 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
316 *	for errata applied in generic code
317 */
318.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
319	/*
320	 * Let errata specify if they need MIDR checking. Sadly, storing the
321	 * MIDR in an .equ to retrieve automatically blows up as it stores some
322	 * brackets in the symbol
323	 */
324	.ifnb \_midr
325		jump_if_cpu_midr \_midr, 1f
326		b	erratum_\_cpu\()_\_id\()_skip
327
328		1:
329	.endif
330	_workaround_start \_cpu, \_cve, \_id, \_chosen, 0
331.endm
332
333/*
334 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
335 * is kept here so the same #define can be used as that macro
336 */
337.macro workaround_reset_end _cpu:req, _cve:req, _id:req
338	_workaround_end \_cpu, \_id
339.endm
340
341/*
342 * See `workaround_reset_start` for usage info. The _cve argument is kept here
343 * so the same #define can be used as that macro. Additional arguments:
344 *
345 * _no_isb:
346 *	Optionally do not include the trailing isb. Please disable with the
347 *	NO_ISB macro
348 */
349.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
350	/*
351	 * Runtime errata do not have a reset function to call the isb for them
352	 * and missing the isb could be very problematic. It is also likely as
353	 * they tend to be scattered in generic code.
354	 */
355	.ifb \_no_isb
356		isb
357	.endif
358	_workaround_end \_cpu, \_id
359.endm
360
361/*******************************************************************************
362 * Errata workaround helpers
363 ******************************************************************************/
364/*
365 * Set a bit in a system register. Can set multiple bits but is limited by the
366 *  way the ORR instruction encodes them.
367 *
368 * _reg:
369 *	Register to write to
370 *
371 * _bit:
372 *	Bit to set. Please use a descriptive #define
373 *
374 * _assert:
375 *	Optionally whether to read back and assert that the bit has been
376 *	written. Please disable with NO_ASSERT macro
377 *
378 * clobbers: x1
379 */
380.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
381	mrs	x1, \_reg
382	orr	x1, x1, #\_bit
383	msr	\_reg, x1
384.endm
385
386/*
387 * Clear a bit in a system register. Can clear multiple bits but is limited by
388 *  the way the BIC instrucion encodes them.
389 *
390 * see sysreg_bit_set for usage
391 */
392.macro sysreg_bit_clear _reg:req, _bit:req
393	mrs	x1, \_reg
394	bic	x1, x1, #\_bit
395	msr	\_reg, x1
396.endm
397
398.macro override_vector_table _table:req
399	adr	x1, \_table
400	msr	vbar_el3, x1
401.endm
402
403/*
404 * BFI : Inserts bitfield into a system register.
405 *
406 * BFI{cond} Rd, Rn, #lsb, #width
407 */
408.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
409	/* Source value for BFI */
410	mov	x1, #\_src
411	mrs	x0, \_reg
412	bfi	x0, x1, #\_lsb, #\_width
413	msr	\_reg, x0
414.endm
415
416.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
417	/* Source value in register for BFI */
418	mov	x1, \_gpr
419	mrs	x0, \_reg
420	bfi	x0, x1, #\_lsb, #\_width
421	msr	\_reg, x0
422.endm
423
424/*
425 * Apply erratum
426 *
427 * _cpu:
428 *	Name of cpu as given to declare_cpu_ops
429 *
430 * _cve:
431 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
432 *
433 * _id:
434 *	Erratum or CVE number. Please combine with previous field with ERRATUM
435 *	or CVE macros
436 *
437 * _chosen:
438 *	Compile time flag on whether the erratum is included
439 *
440 * _get_rev:
441 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
442 *	procedure. Stores the result of this in the temporary register x10.
443 *
444 * clobbers: x0-x10 (PCS compliant)
445 */
446.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
447	.if (\_chosen & \_get_rev)
448		mov	x9, x30
449		bl	cpu_get_rev_var
450		mov	x10, x0
451	.elseif (\_chosen)
452		mov	x9, x30
453		mov	x0, x10
454	.endif
455
456	.if \_chosen
457		bl	erratum_\_cpu\()_\_id\()_wa
458		mov	x30, x9
459	.endif
460.endm
461
462/*
463 * Helpers to select which revisions errata apply to. Don't leave a link
464 * register as the cpu_rev_var_*** will call the ret and we can save on one.
465 *
466 * _cpu:
467 *	Name of cpu as given to declare_cpu_ops
468 *
469 * _cve:
470 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
471 *
472 * _id:
473 *	Erratum or CVE number. Please combine with previous field with ERRATUM
474 *	or CVE macros
475 *
476 * _rev_num:
477 *	Revision to apply to
478 *
479 * in body:
480 *	clobber: x0 to x4
481 *	argument: x0 - cpu_rev_var
482 */
483.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
484	func check_erratum_\_cpu\()_\_id
485		mov	x1, #\_rev_num
486		b	cpu_rev_var_ls
487	endfunc check_erratum_\_cpu\()_\_id
488.endm
489
490.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
491	func check_erratum_\_cpu\()_\_id
492		mov	x1, #\_rev_num
493		b	cpu_rev_var_hs
494	endfunc check_erratum_\_cpu\()_\_id
495.endm
496
497.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
498	func check_erratum_\_cpu\()_\_id
499		mov	x1, #\_rev_num_lo
500		mov	x2, #\_rev_num_hi
501		b	cpu_rev_var_range
502	endfunc check_erratum_\_cpu\()_\_id
503.endm
504
505.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
506	func check_erratum_\_cpu\()_\_id
507		.if \_chosen
508			mov	x0, #ERRATA_APPLIES
509		.else
510			mov	x0, #ERRATA_MISSING
511		.endif
512		ret
513	endfunc check_erratum_\_cpu\()_\_id
514.endm
515
516/* provide a shorthand for the name format for annoying errata */
517.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
518	func check_erratum_\_cpu\()_\_id
519.endm
520
521.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
522	endfunc check_erratum_\_cpu\()_\_id
523.endm
524
525
526/*******************************************************************************
527 * CPU reset function wrapper
528 ******************************************************************************/
529
530/*
531 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
532 *
533 * _cpu:
534 *	Name of cpu as given to declare_cpu_ops
535 *
536 * in body:
537 *	clobber x8 to x14
538 *	argument x14 - cpu_rev_var
539 */
540.macro cpu_reset_func_start _cpu:req
541	func \_cpu\()_reset_func
542		mov	x15, x30
543		bl	cpu_get_rev_var
544		mov	x14, x0
545
546		/* short circuit the location to avoid searching the list */
547		adrp	x12, \_cpu\()_errata_list_start
548		add	x12, x12, :lo12:\_cpu\()_errata_list_start
549		adrp	x13, \_cpu\()_errata_list_end
550		add	x13, x13, :lo12:\_cpu\()_errata_list_end
551
552	errata_begin:
553		/* if head catches up with end of list, exit */
554		cmp	x12, x13
555		b.eq	errata_end
556
557		ldr	x10, [x12, #ERRATUM_WA_FUNC]
558		/* TODO(errata ABI): check mitigated and checker function fields
559		 * for 0 */
560		ldrb	w11, [x12, #ERRATUM_CHOSEN]
561
562		/* skip if not chosen */
563		cbz	x11, 1f
564		/* skip if runtime erratum */
565		cbz	x10, 1f
566
567		/* put cpu revision in x0 and call workaround */
568		mov	x0, x14
569		blr	x10
570	1:
571		add	x12, x12, #ERRATUM_ENTRY_SIZE
572		b	errata_begin
573	errata_end:
574.endm
575
576.macro cpu_reset_func_end _cpu:req
577		isb
578		ret	x15
579	endfunc \_cpu\()_reset_func
580.endm
581
582/*
583 * Maintain compatibility with the old scheme of each cpu has its own reporting.
584 * TODO remove entirely once all cpus have been converted. This includes the
585 * cpu_ops entry, as print_errata_status can call this directly for all cpus
586 */
587.macro errata_report_shim _cpu:req
588	#if REPORT_ERRATA
589	func \_cpu\()_errata_report
590		/* normal stack frame for pretty debugging */
591		stp	x29, x30, [sp, #-16]!
592		mov	x29, sp
593
594		bl	generic_errata_report
595
596		ldp	x29, x30, [sp], #16
597		ret
598	endfunc \_cpu\()_errata_report
599	#endif
600.endm
601#endif /* CPU_MACROS_S */
602