xref: /rk3399_ARM-atf/include/lib/cpus/aarch64/cpu_macros.S (revision b62673c645752a78f649282cfa293e8da09e3bef)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU. If there's no CPU reset function,
53	 *	specify CPU_NO_RESET_FUNC
54	 * _extra1:
55	 *	This is a placeholder for future per CPU operations.  Currently,
56	 *	some CPUs use this entry to set a test function to determine if
57	 *	the workaround for CVE-2017-5715 needs to be applied or not.
58	 * _extra2:
59	 *	This is a placeholder for future per CPU operations. Currently
60	 *	some CPUs use this entry to set a function to disable the
61	 *	workaround for CVE-2018-3639.
62	 * _extra3:
63	 *	This is a placeholder for future per CPU operations. Currently,
64	 *	some CPUs use this entry to set a test function to determine if
65	 *	the workaround for CVE-2022-23960 needs to be applied or not.
66	 * _extra4:
67	 *	This is a placeholder for future per CPU operations. Currently,
68	 *	some CPUs use this entry to set a test function to determine if
69	 *	the workaround for CVE-2024-7881 needs to be applied or not.
70	 * _e_handler:
71	 *	This is a placeholder for future per CPU exception handlers.
72	 * _power_down_ops:
73	 *	Comma-separated list of functions to perform power-down
74	 *	operatios on the CPU. At least one, and up to
75	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
76	 *	Starting at power level 0, these functions shall handle power
77	 *	down at subsequent power levels. If there aren't exactly
78	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
79	 *	used to handle power down at subsequent levels
80	 */
81	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
82		_extra1:req, _extra2:req, _extra3:req, _extra4:req, \
83		_e_handler:req, _power_down_ops:vararg
84	.section .cpu_ops, "a"
85	.align 3
86	.type cpu_ops_\_name, %object
87	.quad \_midr
88#if defined(IMAGE_AT_EL3)
89	.quad \_resetfunc
90#endif
91	.quad \_extra1
92	.quad \_extra2
93	.quad \_extra3
94	.quad \_extra4
95	.quad \_e_handler
96#ifdef IMAGE_BL31
97	/* Insert list of functions */
98	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
99#endif
100	/*
101	 * It is possible (although unlikely) that a cpu may have no errata in
102	 * code. In that case the start label will not be defined. The list is
103	 * intended to be used in a loop, so define it as zero-length for
104	 * predictable behaviour. Since this macro is always called at the end
105	 * of the cpu file (after all errata have been parsed) we can be sure
106	 * that we are at the end of the list. Some cpus call declare_cpu_ops
107	 * twice, so only do this once.
108	 */
109	.pushsection .rodata.errata_entries
110	.ifndef \_name\()_errata_list_start
111		\_name\()_errata_list_start:
112	.endif
113	.ifndef \_name\()_errata_list_end
114		\_name\()_errata_list_end:
115	.endif
116	.popsection
117
118	/* and now put them in cpu_ops */
119	.quad \_name\()_errata_list_start
120	.quad \_name\()_errata_list_end
121
122#if REPORT_ERRATA
123	.ifndef \_name\()_cpu_str
124	  /*
125	   * Place errata reported flag, and the spinlock to arbitrate access to
126	   * it in the data section.
127	   */
128	  .pushsection .data
129	  define_asm_spinlock \_name\()_errata_lock
130	  \_name\()_errata_reported:
131	  .word	0
132	  .popsection
133
134	  /* Place CPU string in rodata */
135	  .pushsection .rodata
136	  \_name\()_cpu_str:
137	  .asciz "\_name"
138	  .popsection
139	.endif
140
141	.quad \_name\()_cpu_str
142
143#ifdef IMAGE_BL31
144	/* Pointers to errata lock and reported flag */
145	.quad \_name\()_errata_lock
146	.quad \_name\()_errata_reported
147#endif /* IMAGE_BL31 */
148#endif /* REPORT_ERRATA */
149
150#if defined(IMAGE_BL31) && CRASH_REPORTING
151	.quad \_name\()_cpu_reg_dump
152#endif
153	.endm
154
155	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
156		_power_down_ops:vararg
157		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
158			\_power_down_ops
159	.endm
160
161	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
162		_e_handler:req, _power_down_ops:vararg
163		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
164			0, 0, 0, 0, \_e_handler, \_power_down_ops
165	.endm
166
167	.macro declare_cpu_ops_wa _name:req, _midr:req, \
168		_resetfunc:req, _extra1:req, _extra2:req, \
169		_extra3:req, _power_down_ops:vararg
170		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
171			\_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
172	.endm
173
174	.macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
175		_resetfunc:req, _extra1:req, _extra2:req, \
176		_extra3:req, _extra4:req, _power_down_ops:vararg
177		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
178			\_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
179	.endm
180
181	/*
182	 * This macro is used on some CPUs to detect if they are vulnerable
183	 * to CVE-2017-5715.
184	 */
185	.macro	cpu_check_csv2 _reg _label
186	mrs	\_reg, id_aa64pfr0_el1
187	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
188	/*
189	 * If the field equals 1, branch targets trained in one context cannot
190	 * affect speculative execution in a different context.
191	 *
192	 * If the field equals 2, it means that the system is also aware of
193	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
194	 * expect users of the registers to do the right thing.
195	 *
196	 * Only apply mitigations if the value of this field is 0.
197	 */
198#if ENABLE_ASSERTIONS
199	cmp	\_reg, #3 /* Only values 0 to 2 are expected */
200	ASM_ASSERT(lo)
201#endif
202
203	cmp	\_reg, #0
204	bne	\_label
205	.endm
206
207	/*
208	 * Helper macro that reads the part number of the current
209	 * CPU and jumps to the given label if it matches the CPU
210	 * MIDR provided.
211	 *
212	 * Clobbers x0.
213	 */
214	.macro  jump_if_cpu_midr _cpu_midr, _label
215	mrs	x0, midr_el1
216	ubfx	x0, x0, MIDR_PN_SHIFT, #12
217	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
218	b.eq	\_label
219	.endm
220
221
222/*
223 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
224 * will be applied automatically
225 *
226 * _cpu:
227 *	Name of cpu as given to declare_cpu_ops
228 *
229 * _cve:
230 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
231 *
232 * _id:
233 *	Erratum or CVE number. Please combine with previous field with ERRATUM
234 *	or CVE macros
235 *
236 * _chosen:
237 *	Compile time flag on whether the erratum is included
238 *
239 * _apply_at_reset:
240 *	Whether the erratum should be automatically applied at reset
241 */
242.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
243	.pushsection .rodata.errata_entries
244		.align	3
245		.ifndef \_cpu\()_errata_list_start
246		\_cpu\()_errata_list_start:
247		.endif
248
249		/* check if unused and compile out if no references */
250		.if \_apply_at_reset && \_chosen
251			.quad	erratum_\_cpu\()_\_id\()_wa
252		.else
253			.quad	0
254		.endif
255		/* TODO(errata ABI): this prevents all checker functions from
256		 * being optimised away. Can be done away with unless the ABI
257		 * needs them */
258		.quad	check_erratum_\_cpu\()_\_id
259		/* Will fit CVEs with up to 10 character in the ID field */
260		.word	\_id
261		.hword	\_cve
262		.byte	\_chosen
263		/* TODO(errata ABI): mitigated field for known but unmitigated
264		 * errata */
265		.byte	0x1
266	.popsection
267.endm
268
269.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
270	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
271
272	func erratum_\_cpu\()_\_id\()_wa
273		mov	x8, x30
274
275		/* save rev_var for workarounds that might need it but don't
276		 * restore to x0 because few will care */
277		mov	x7, x0
278		bl	check_erratum_\_cpu\()_\_id
279		cbz	x0, erratum_\_cpu\()_\_id\()_skip
280.endm
281
282.macro _workaround_end _cpu:req, _id:req
283	erratum_\_cpu\()_\_id\()_skip:
284		ret	x8
285	endfunc erratum_\_cpu\()_\_id\()_wa
286.endm
287
288/*******************************************************************************
289 * Errata workaround wrappers
290 ******************************************************************************/
291/*
292 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
293 * will be applied automatically
294 *
295 * _cpu:
296 *	Name of cpu as given to declare_cpu_ops
297 *
298 * _cve:
299 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
300 *
301 * _id:
302 *	Erratum or CVE number. Please combine with previous field with ERRATUM
303 *	or CVE macros
304 *
305 * _chosen:
306 *	Compile time flag on whether the erratum is included
307 *
308 * in body:
309 *	clobber x0 to x7 (please only use those)
310 *	argument x7 - cpu_rev_var
311 *
312 * _wa clobbers: x0-x8 (PCS compliant)
313 */
314.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
315	_workaround_start \_cpu, \_cve, \_id, \_chosen, 1
316.endm
317
318/*
319 * See `workaround_reset_start` for usage info. Additional arguments:
320 *
321 * _midr:
322 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
323 *	for errata applied in generic code
324 */
325.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
326	/*
327	 * Let errata specify if they need MIDR checking. Sadly, storing the
328	 * MIDR in an .equ to retrieve automatically blows up as it stores some
329	 * brackets in the symbol
330	 */
331	.ifnb \_midr
332		jump_if_cpu_midr \_midr, 1f
333		b	erratum_\_cpu\()_\_id\()_skip
334
335		1:
336	.endif
337	_workaround_start \_cpu, \_cve, \_id, \_chosen, 0
338.endm
339
340/*
341 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
342 * is kept here so the same #define can be used as that macro
343 */
344.macro workaround_reset_end _cpu:req, _cve:req, _id:req
345	_workaround_end \_cpu, \_id
346.endm
347
348/*
349 * See `workaround_reset_start` for usage info. The _cve argument is kept here
350 * so the same #define can be used as that macro. Additional arguments:
351 *
352 * _no_isb:
353 *	Optionally do not include the trailing isb. Please disable with the
354 *	NO_ISB macro
355 */
356.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
357	/*
358	 * Runtime errata do not have a reset function to call the isb for them
359	 * and missing the isb could be very problematic. It is also likely as
360	 * they tend to be scattered in generic code.
361	 */
362	.ifb \_no_isb
363		isb
364	.endif
365	_workaround_end \_cpu, \_id
366.endm
367
368/*******************************************************************************
369 * Errata workaround helpers
370 ******************************************************************************/
371/*
372 * Set a bit in a system register. Can set multiple bits but is limited by the
373 *  way the ORR instruction encodes them.
374 *
375 * _reg:
376 *	Register to write to
377 *
378 * _bit:
379 *	Bit to set. Please use a descriptive #define
380 *
381 * _assert:
382 *	Optionally whether to read back and assert that the bit has been
383 *	written. Please disable with NO_ASSERT macro
384 *
385 * clobbers: x1
386 */
387.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
388	mrs	x1, \_reg
389	orr	x1, x1, #\_bit
390	msr	\_reg, x1
391.endm
392
393/*
394 * Clear a bit in a system register. Can clear multiple bits but is limited by
395 *  the way the BIC instrucion encodes them.
396 *
397 * see sysreg_bit_set for usage
398 */
399.macro sysreg_bit_clear _reg:req, _bit:req
400	mrs	x1, \_reg
401	bic	x1, x1, #\_bit
402	msr	\_reg, x1
403.endm
404
405/*
406 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
407 *  the way the EOR instrucion encodes them.
408 *
409 * see sysreg_bit_set for usage
410 */
411.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
412	mrs	x1, \_reg
413	eor	x1, x1, #\_bit
414	msr	\_reg, x1
415.endm
416
417.macro override_vector_table _table:req
418	adr	x1, \_table
419	msr	vbar_el3, x1
420.endm
421
422/*
423 * BFI : Inserts bitfield into a system register.
424 *
425 * BFI{cond} Rd, Rn, #lsb, #width
426 */
427.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
428	/* Source value for BFI */
429	mov	x1, #\_src
430	mrs	x0, \_reg
431	bfi	x0, x1, #\_lsb, #\_width
432	msr	\_reg, x0
433.endm
434
435.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
436	/* Source value in register for BFI */
437	mov	x1, \_gpr
438	mrs	x0, \_reg
439	bfi	x0, x1, #\_lsb, #\_width
440	msr	\_reg, x0
441.endm
442
443/*
444 * Apply erratum
445 *
446 * _cpu:
447 *	Name of cpu as given to declare_cpu_ops
448 *
449 * _cve:
450 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
451 *
452 * _id:
453 *	Erratum or CVE number. Please combine with previous field with ERRATUM
454 *	or CVE macros
455 *
456 * _chosen:
457 *	Compile time flag on whether the erratum is included
458 *
459 * _get_rev:
460 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
461 *	procedure. Stores the result of this in the temporary register x10 to allow for chaining
462 *
463 * clobbers: x0-x10 (PCS compliant)
464 */
465.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
466	.if (\_chosen && \_get_rev)
467		mov	x9, x30
468		bl	cpu_get_rev_var
469		mov	x10, x0
470	.elseif (\_chosen)
471		mov	x9, x30
472		mov	x0, x10
473	.endif
474
475	.if \_chosen
476		bl	erratum_\_cpu\()_\_id\()_wa
477		mov	x30, x9
478	.endif
479.endm
480
481/*
482 * Helpers to select which revisions errata apply to. Don't leave a link
483 * register as the cpu_rev_var_*** will call the ret and we can save on one.
484 *
485 * _cpu:
486 *	Name of cpu as given to declare_cpu_ops
487 *
488 * _cve:
489 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
490 *
491 * _id:
492 *	Erratum or CVE number. Please combine with previous field with ERRATUM
493 *	or CVE macros
494 *
495 * _rev_num:
496 *	Revision to apply to
497 *
498 * in body:
499 *	clobber: x0 to x4
500 *	argument: x0 - cpu_rev_var
501 */
502.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
503	func check_erratum_\_cpu\()_\_id
504		mov	x1, #\_rev_num
505		b	cpu_rev_var_ls
506	endfunc check_erratum_\_cpu\()_\_id
507.endm
508
509.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
510	func check_erratum_\_cpu\()_\_id
511		mov	x1, #\_rev_num
512		b	cpu_rev_var_hs
513	endfunc check_erratum_\_cpu\()_\_id
514.endm
515
516.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
517	func check_erratum_\_cpu\()_\_id
518		mov	x1, #\_rev_num_lo
519		mov	x2, #\_rev_num_hi
520		b	cpu_rev_var_range
521	endfunc check_erratum_\_cpu\()_\_id
522.endm
523
524.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
525	func check_erratum_\_cpu\()_\_id
526		.if \_chosen
527			mov	x0, #ERRATA_APPLIES
528		.else
529			mov	x0, #ERRATA_MISSING
530		.endif
531		ret
532	endfunc check_erratum_\_cpu\()_\_id
533.endm
534
535/* provide a shorthand for the name format for annoying errata */
536.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
537	func check_erratum_\_cpu\()_\_id
538.endm
539
540.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
541	endfunc check_erratum_\_cpu\()_\_id
542.endm
543
544
545/*******************************************************************************
546 * CPU reset function wrapper
547 ******************************************************************************/
548
549/*
550 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
551 *
552 * _cpu:
553 *	Name of cpu as given to declare_cpu_ops
554 *
555 * in body:
556 *	clobber x8 to x14
557 *	argument x14 - cpu_rev_var
558 */
559.macro cpu_reset_func_start _cpu:req
560	func \_cpu\()_reset_func
561		mov	x15, x30
562		bl	cpu_get_rev_var
563		mov	x14, x0
564
565		/* short circuit the location to avoid searching the list */
566		adrp	x12, \_cpu\()_errata_list_start
567		add	x12, x12, :lo12:\_cpu\()_errata_list_start
568		adrp	x13, \_cpu\()_errata_list_end
569		add	x13, x13, :lo12:\_cpu\()_errata_list_end
570
571	errata_begin:
572		/* if head catches up with end of list, exit */
573		cmp	x12, x13
574		b.eq	errata_end
575
576		ldr	x10, [x12, #ERRATUM_WA_FUNC]
577		/* TODO(errata ABI): check mitigated and checker function fields
578		 * for 0 */
579		ldrb	w11, [x12, #ERRATUM_CHOSEN]
580
581		/* skip if not chosen */
582		cbz	x11, 1f
583		/* skip if runtime erratum */
584		cbz	x10, 1f
585
586		/* put cpu revision in x0 and call workaround */
587		mov	x0, x14
588		blr	x10
589	1:
590		add	x12, x12, #ERRATUM_ENTRY_SIZE
591		b	errata_begin
592	errata_end:
593.endm
594
595.macro cpu_reset_func_end _cpu:req
596		isb
597		ret	x15
598	endfunc \_cpu\()_reset_func
599.endm
600
601#endif /* CPU_MACROS_S */
602