xref: /rk3399_ARM-atf/include/lib/cpus/aarch64/cpu_macros.S (revision 79e7aae82dd173d1ccc63e5d553222f1d58f12f5)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU.
53	 * _extra1:
54	 *	This is a placeholder for future per CPU operations.  Currently,
55	 *	some CPUs use this entry to set a test function to determine if
56	 *	the workaround for CVE-2017-5715 needs to be applied or not.
57	 * _extra2:
58	 *	This is a placeholder for future per CPU operations. Currently
59	 *	some CPUs use this entry to set a function to disable the
60	 *	workaround for CVE-2018-3639.
61	 * _extra3:
62	 *	This is a placeholder for future per CPU operations. Currently,
63	 *	some CPUs use this entry to set a test function to determine if
64	 *	the workaround for CVE-2022-23960 needs to be applied or not.
65	 * _extra4:
66	 *	This is a placeholder for future per CPU operations. Currently,
67	 *	some CPUs use this entry to set a test function to determine if
68	 *	the workaround for CVE-2024-7881 needs to be applied or not.
69	 * _e_handler:
70	 *	This is a placeholder for future per CPU exception handlers.
71	 * _power_down_ops:
72	 *	Comma-separated list of functions to perform power-down
73	 *	operatios on the CPU. At least one, and up to
74	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
75	 *	Starting at power level 0, these functions shall handle power
76	 *	down at subsequent power levels. If there aren't exactly
77	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
78	 *	used to handle power down at subsequent levels
79	 */
80	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
81		_extra1:req, _extra2:req, _extra3:req, _extra4:req, \
82		_e_handler:req, _power_down_ops:vararg
83	.section .cpu_ops, "a"
84	.align 3
85	.type cpu_ops_\_name, %object
86	.quad \_midr
87#if defined(IMAGE_AT_EL3)
88	.quad \_resetfunc
89#endif
90	.quad \_extra1
91	.quad \_extra2
92	.quad \_extra3
93	.quad \_extra4
94	.quad \_e_handler
95#ifdef IMAGE_BL31
96	/* Insert list of functions */
97	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
98#endif
99	/*
100	 * It is possible (although unlikely) that a cpu may have no errata in
101	 * code. In that case the start label will not be defined. The list is
102	 * intended to be used in a loop, so define it as zero-length for
103	 * predictable behaviour. Since this macro is always called at the end
104	 * of the cpu file (after all errata have been parsed) we can be sure
105	 * that we are at the end of the list. Some cpus call declare_cpu_ops
106	 * twice, so only do this once.
107	 */
108	.pushsection .rodata.errata_entries
109	.ifndef \_name\()_errata_list_start
110		\_name\()_errata_list_start:
111	.endif
112	.ifndef \_name\()_errata_list_end
113		\_name\()_errata_list_end:
114	.endif
115	.popsection
116
117	/* and now put them in cpu_ops */
118	.quad \_name\()_errata_list_start
119	.quad \_name\()_errata_list_end
120
121#if REPORT_ERRATA
122	.ifndef \_name\()_cpu_str
123	  /*
124	   * Place errata reported flag, and the spinlock to arbitrate access to
125	   * it in the data section.
126	   */
127	  .pushsection .data
128	  define_asm_spinlock \_name\()_errata_lock
129	  \_name\()_errata_reported:
130	  .word	0
131	  .popsection
132
133	  /* Place CPU string in rodata */
134	  .pushsection .rodata
135	  \_name\()_cpu_str:
136	  .asciz "\_name"
137	  .popsection
138	.endif
139
140	.quad \_name\()_cpu_str
141
142#ifdef IMAGE_BL31
143	/* Pointers to errata lock and reported flag */
144	.quad \_name\()_errata_lock
145	.quad \_name\()_errata_reported
146#endif /* IMAGE_BL31 */
147#endif /* REPORT_ERRATA */
148
149#if defined(IMAGE_BL31) && CRASH_REPORTING
150	.quad \_name\()_cpu_reg_dump
151#endif
152	.endm
153
154	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
155		_power_down_ops:vararg
156		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
157			\_power_down_ops
158	.endm
159
160	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
161		_e_handler:req, _power_down_ops:vararg
162		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
163			0, 0, 0, 0, \_e_handler, \_power_down_ops
164	.endm
165
166	.macro declare_cpu_ops_wa _name:req, _midr:req, \
167		_resetfunc:req, _extra1:req, _extra2:req, \
168		_extra3:req, _power_down_ops:vararg
169		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
170			\_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
171	.endm
172
173	.macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
174		_resetfunc:req, _extra1:req, _extra2:req, \
175		_extra3:req, _extra4:req, _power_down_ops:vararg
176		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
177			\_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
178	.endm
179
180	/*
181	 * This macro is used on some CPUs to detect if they are vulnerable
182	 * to CVE-2017-5715.
183	 */
184	.macro	cpu_check_csv2 _reg _label
185	mrs	\_reg, id_aa64pfr0_el1
186	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
187	/*
188	 * If the field equals 1, branch targets trained in one context cannot
189	 * affect speculative execution in a different context.
190	 *
191	 * If the field equals 2, it means that the system is also aware of
192	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
193	 * expect users of the registers to do the right thing.
194	 *
195	 * Only apply mitigations if the value of this field is 0.
196	 */
197#if ENABLE_ASSERTIONS
198	cmp	\_reg, #3 /* Only values 0 to 2 are expected */
199	ASM_ASSERT(lo)
200#endif
201
202	cmp	\_reg, #0
203	bne	\_label
204	.endm
205
206	/*
207	 * Helper macro that reads the part number of the current
208	 * CPU and jumps to the given label if it matches the CPU
209	 * MIDR provided.
210	 *
211	 * Clobbers x0.
212	 */
213	.macro  jump_if_cpu_midr _cpu_midr, _label
214	mrs	x0, midr_el1
215	ubfx	x0, x0, MIDR_PN_SHIFT, #12
216	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
217	b.eq	\_label
218	.endm
219
220
221/*
222 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
223 * will be applied automatically
224 *
225 * _cpu:
226 *	Name of cpu as given to declare_cpu_ops
227 *
228 * _cve:
229 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
230 *
231 * _id:
232 *	Erratum or CVE number. Please combine with previous field with ERRATUM
233 *	or CVE macros
234 *
235 * _chosen:
236 *	Compile time flag on whether the erratum is included
237 *
238 * _split_wa:
239 *	Flag that indicates whether an erratum has split workaround or not.
240 *	Default value is 0.
241 */
242.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
243#if REPORT_ERRATA || ERRATA_ABI_SUPPORT
244	.pushsection .rodata.errata_entries
245		.align	3
246		.ifndef \_cpu\()_errata_list_start
247		\_cpu\()_errata_list_start:
248		.endif
249
250		.quad	check_erratum_\_cpu\()_\_id
251		/* Will fit CVEs with up to 10 character in the ID field */
252		.word	\_id
253		.hword	\_cve
254		/* bit magic that appends chosen field based on _split_wa */
255		.byte	((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
256		.byte	0x0 /* alignment */
257	.popsection
258#endif
259.endm
260
261/*******************************************************************************
262 * Errata workaround wrappers
263 ******************************************************************************/
264/*
265 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
266 * will be applied automatically
267 *
268 * _cpu:
269 *	Name of cpu as given to declare_cpu_ops
270 *
271 * _cve:
272 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
273 *
274 * _id:
275 *	Erratum or CVE number. Please combine with previous field with ERRATUM
276 *	or CVE macros
277 *
278 * _chosen:
279 *	Compile time flag on whether the erratum is included
280 *
281 * _split_wa:
282 *	Flag that indicates whether an erratum has split workaround or not.
283 *	Default value is 0.
284 *
285 * in body:
286 *	clobber x0 to x7 (please only use those)
287 *	argument x7 - cpu_rev_var
288 *
289 * _wa clobbers: x0-x8 (PCS compliant)
290 */
291.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
292	_chosen:req, _split_wa=0
293
294	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
295
296	.if \_chosen
297		/* put errata directly into the reset function */
298		.pushsection .text.asm.\_cpu\()_reset_func, "ax"
299	.else
300		/* or something else that will get garbage collected by the
301		 * linker */
302		.pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
303	.endif
304		/* revision is stored in x14, get it */
305		mov	x0, x14
306		bl	check_erratum_\_cpu\()_\_id
307		/* save rev_var for workarounds that might need it */
308		mov	x7, x14
309		cbz	x0, erratum_\_cpu\()_\_id\()_skip_reset
310.endm
311
312/*
313 * See `workaround_reset_start` for usage info. Additional arguments:
314 *
315 * _midr:
316 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
317 *	for errata applied in generic code
318 */
319.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
320	add_erratum_entry \_cpu, \_cve, \_id, \_chosen
321
322	func erratum_\_cpu\()_\_id\()_wa
323		mov	x8, x30
324	/*
325	 * Let errata specify if they need MIDR checking. Sadly, storing the
326	 * MIDR in an .equ to retrieve automatically blows up as it stores some
327	 * brackets in the symbol
328	 */
329	.ifnb \_midr
330		jump_if_cpu_midr \_midr, 1f
331		b	erratum_\_cpu\()_\_id\()_skip_runtime
332
333		1:
334	.endif
335		/* save rev_var for workarounds that might need it but don't
336		 * restore to x0 because few will care */
337		mov	x7, x0
338		bl	check_erratum_\_cpu\()_\_id
339		cbz	x0, erratum_\_cpu\()_\_id\()_skip_runtime
340.endm
341
342/*
343 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
344 * is kept here so the same #define can be used as that macro
345 */
346.macro workaround_reset_end _cpu:req, _cve:req, _id:req
347	erratum_\_cpu\()_\_id\()_skip_reset:
348	.popsection
349.endm
350
351/*
352 * See `workaround_reset_start` for usage info. The _cve argument is kept here
353 * so the same #define can be used as that macro. Additional arguments:
354 *
355 * _no_isb:
356 *	Optionally do not include the trailing isb. Please disable with the
357 *	NO_ISB macro
358 */
359.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
360	/*
361	 * Runtime errata do not have a reset function to call the isb for them
362	 * and missing the isb could be very problematic. It is also likely as
363	 * they tend to be scattered in generic code.
364	 */
365	.ifb \_no_isb
366		isb
367	.endif
368	erratum_\_cpu\()_\_id\()_skip_runtime:
369		ret	x8
370	endfunc erratum_\_cpu\()_\_id\()_wa
371.endm
372
373/*******************************************************************************
374 * Errata workaround helpers
375 ******************************************************************************/
376/*
377 * Set a bit in a system register. Can set multiple bits but is limited by the
378 *  way the ORR instruction encodes them.
379 *
380 * _reg:
381 *	Register to write to
382 *
383 * _bit:
384 *	Bit to set. Please use a descriptive #define
385 *
386 * _assert:
387 *	Optionally whether to read back and assert that the bit has been
388 *	written. Please disable with NO_ASSERT macro
389 *
390 * clobbers: x1
391 */
392.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
393	mrs	x1, \_reg
394	orr	x1, x1, #\_bit
395	msr	\_reg, x1
396.endm
397
398/*
399 * Clear a bit in a system register. Can clear multiple bits but is limited by
400 *  the way the BIC instrucion encodes them.
401 *
402 * see sysreg_bit_set for usage
403 */
404.macro sysreg_bit_clear _reg:req, _bit:req
405	mrs	x1, \_reg
406	bic	x1, x1, #\_bit
407	msr	\_reg, x1
408.endm
409
410/*
411 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
412 *  the way the EOR instrucion encodes them.
413 *
414 * see sysreg_bit_set for usage
415 */
416.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
417	mrs	x1, \_reg
418	eor	x1, x1, #\_bit
419	msr	\_reg, x1
420.endm
421
422.macro override_vector_table _table:req
423	adr	x1, \_table
424	msr	vbar_el3, x1
425.endm
426
427/*
428 * BFI : Inserts bitfield into a system register.
429 *
430 * BFI{cond} Rd, Rn, #lsb, #width
431 */
432.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
433	/* Source value for BFI */
434	mov	x1, #\_src
435	mrs	x0, \_reg
436	bfi	x0, x1, #\_lsb, #\_width
437	msr	\_reg, x0
438.endm
439
440.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
441	/* Source value in register for BFI */
442	mov	x1, \_gpr
443	mrs	x0, \_reg
444	bfi	x0, x1, #\_lsb, #\_width
445	msr	\_reg, x0
446.endm
447
448/*
449 * Extract CPU revision and variant, and combine them into a single numeric for
450 * easier comparison.
451 *
452 * _res:
453 *	register where the result will be placed
454 * _tmp:
455 *	register to clobber for temporaries
456 */
457.macro get_rev_var _res:req, _tmp:req
458	mrs	\_tmp, midr_el1
459
460	/*
461	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
462	 * as variant[7:4] and revision[3:0] of x0.
463	 *
464	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
465	 * extract x1[3:0] into x0[3:0] retaining other bits.
466	 */
467	ubfx	\_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
468	bfxil	\_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
469.endm
470
471/*
472 * Apply erratum
473 *
474 * _cpu:
475 *	Name of cpu as given to declare_cpu_ops
476 *
477 * _cve:
478 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
479 *
480 * _id:
481 *	Erratum or CVE number. Please combine with previous field with ERRATUM
482 *	or CVE macros
483 *
484 * _chosen:
485 *	Compile time flag on whether the erratum is included
486 *
487 * _get_rev:
488 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
489 *	procedure. Stores the result of this in the temporary register x10 to allow for chaining
490 *
491 * clobbers: x0-x10 (PCS compliant)
492 */
493.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
494	.if (\_chosen && \_get_rev)
495		mov	x9, x30
496		bl	cpu_get_rev_var
497		mov	x10, x0
498	.elseif (\_chosen)
499		mov	x9, x30
500		mov	x0, x10
501	.endif
502
503	.if \_chosen
504		bl	erratum_\_cpu\()_\_id\()_wa
505		mov	x30, x9
506	.endif
507.endm
508
509/*
510 * Helpers to report if an erratum applies. Compares the given revision variant
511 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
512 *
513 * _rev_num: the given revision variant. Or
514 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
515 *
516 * in body:
517 *	clobber: x0
518 *	argument: x0 - cpu_rev_var
519 */
520.macro cpu_rev_var_ls _rev_num:req
521	cmp	x0, #\_rev_num
522	cset	x0, ls
523.endm
524
525.macro cpu_rev_var_hs _rev_num:req
526	cmp	x0, #\_rev_num
527	cset	x0, hs
528.endm
529
530.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
531	cmp	x0, #\_rev_num_lo
532	mov	x1, #\_rev_num_hi
533	ccmp	x0, x1, #2, hs
534	cset	x0, ls
535.endm
536
537/*
538 * Helpers to select which revisions errata apply to.
539 *
540 * _cpu:
541 *	Name of cpu as given to declare_cpu_ops
542 *
543 * _cve:
544 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
545 *
546 * _id:
547 *	Erratum or CVE number. Please combine with previous field with ERRATUM
548 *	or CVE macros
549 *
550 * _rev_num:
551 *	Revision to apply to
552 *
553 * in body:
554 *	clobber: x0 to x1
555 *	argument: x0 - cpu_rev_var
556 */
557.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
558	func check_erratum_\_cpu\()_\_id
559		cpu_rev_var_ls \_rev_num
560		ret
561	endfunc check_erratum_\_cpu\()_\_id
562.endm
563
564.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
565	func check_erratum_\_cpu\()_\_id
566		cpu_rev_var_hs \_rev_num
567		ret
568	endfunc check_erratum_\_cpu\()_\_id
569.endm
570
571.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
572	func check_erratum_\_cpu\()_\_id
573		cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
574		ret
575	endfunc check_erratum_\_cpu\()_\_id
576.endm
577
578.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
579	func check_erratum_\_cpu\()_\_id
580		.if \_chosen
581			mov	x0, #ERRATA_APPLIES
582		.else
583			mov	x0, #ERRATA_MISSING
584		.endif
585		ret
586	endfunc check_erratum_\_cpu\()_\_id
587.endm
588
589/*
590 * provide a shorthand for the name format for annoying errata
591 * body: clobber x0 to x4
592 */
593.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
594	func check_erratum_\_cpu\()_\_id
595.endm
596
597.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
598	endfunc check_erratum_\_cpu\()_\_id
599.endm
600
601
602/*******************************************************************************
603 * CPU reset function wrapper
604 ******************************************************************************/
605
606/*
607 * Helper to register a cpu with the errata framework. Begins the definition of
608 * the reset function.
609 *
610 * _cpu:
611 *	Name of cpu as given to declare_cpu_ops
612 */
613.macro cpu_reset_prologue _cpu:req
614	func \_cpu\()_reset_func
615		mov	x15, x30
616		get_rev_var x14, x0
617.endm
618
619/*
620 * Wrapper of the reset function to automatically apply all reset-time errata.
621 * Will end with an isb.
622 *
623 * _cpu:
624 *	Name of cpu as given to declare_cpu_ops
625 *
626 * in body:
627 *	clobber x8 to x14
628 *	argument x14 - cpu_rev_var
629 */
630.macro cpu_reset_func_start _cpu:req
631	/* the func/endfunc macros will change sections. So change the section
632	 * back to the reset function's */
633	.section .text.asm.\_cpu\()_reset_func, "ax"
634.endm
635
636.macro cpu_reset_func_end _cpu:req
637		isb
638		ret	x15
639	endfunc \_cpu\()_reset_func
640.endm
641
642/*
643 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on
644 * compatible Arm cores.
645 *
646 * Clobbers x0.
647 */
648.macro enable_mpmm
649#if ENABLE_MPMM
650	mrs	x0, CPUPPMCR_EL3
651	/* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */
652	ands	x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT
653	b.ne	1f
654	sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT
655	1:
656#endif
657.endm
658
659#endif /* CPU_MACROS_S */
660