xref: /rk3399_ARM-atf/include/lib/cpus/aarch64/cpu_macros.S (revision 67ad1ac8e77ca84ba03ea0e754f7b95c51cc796d)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU.
53	 * _e_handler:
54	 *	This is a placeholder for future per CPU exception handlers.
55	 * _power_down_ops:
56	 *	Comma-separated list of functions to perform power-down
57	 *	operatios on the CPU. At least one, and up to
58	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
59	 *	Starting at power level 0, these functions shall handle power
60	 *	down at subsequent power levels. If there aren't exactly
61	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
62	 *	used to handle power down at subsequent levels
63	 */
64	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
65		_e_handler:req, _power_down_ops:vararg
66	.section .cpu_ops, "a"
67	.align 3
68	.type cpu_ops_\_name, %object
69	.quad \_midr
70#if defined(IMAGE_AT_EL3)
71	.quad \_resetfunc
72#endif
73	.quad \_e_handler
74#ifdef IMAGE_BL31
75	/* Insert list of functions */
76	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
77#endif
78	/*
79	 * It is possible (although unlikely) that a cpu may have no errata in
80	 * code. In that case the start label will not be defined. The list is
81	 * intended to be used in a loop, so define it as zero-length for
82	 * predictable behaviour. Since this macro is always called at the end
83	 * of the cpu file (after all errata have been parsed) we can be sure
84	 * that we are at the end of the list. Some cpus call declare_cpu_ops
85	 * twice, so only do this once.
86	 */
87	.pushsection .rodata.errata_entries
88	.ifndef \_name\()_errata_list_start
89		\_name\()_errata_list_start:
90	.endif
91	.ifndef \_name\()_errata_list_end
92		\_name\()_errata_list_end:
93	.endif
94	.popsection
95
96	/* and now put them in cpu_ops */
97	.quad \_name\()_errata_list_start
98	.quad \_name\()_errata_list_end
99
100#if REPORT_ERRATA
101	.ifndef \_name\()_cpu_str
102	  /*
103	   * Place errata reported flag, and the spinlock to arbitrate access to
104	   * it in the data section.
105	   */
106	  .pushsection .data
107	  define_asm_spinlock \_name\()_errata_reported
108	  .popsection
109
110	  /* Place CPU string in rodata */
111	  .pushsection .rodata
112	  \_name\()_cpu_str:
113	  .asciz "\_name"
114	  .popsection
115	.endif
116
117	.quad \_name\()_cpu_str
118
119#ifdef IMAGE_BL31
120	/* Pointers to errata lock and reported flag */
121	.quad \_name\()_errata_reported
122#endif /* IMAGE_BL31 */
123#endif /* REPORT_ERRATA */
124
125#if defined(IMAGE_BL31) && CRASH_REPORTING
126	.quad \_name\()_cpu_reg_dump
127#endif
128	.endm
129
130	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
131		_power_down_ops:vararg
132		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \_power_down_ops
133	.endm
134
135	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
136		_e_handler:req, _power_down_ops:vararg
137		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
138			\_e_handler, \_power_down_ops
139	.endm
140
141	/*
142	 * This macro is used on some CPUs to detect if they are vulnerable
143	 * to CVE-2017-5715.
144	 */
145	.macro	cpu_check_csv2 _reg _label
146	mrs	\_reg, id_aa64pfr0_el1
147	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
148	/*
149	 * If the field equals 1, branch targets trained in one context cannot
150	 * affect speculative execution in a different context.
151	 *
152	 * If the field equals 2 or 3, it means that the system is also aware of
153	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
154	 * expect users of the registers to do the right thing.
155	 *
156	 * Only apply mitigations if the value of this field is 0.
157	 */
158#if ENABLE_ASSERTIONS
159	cmp	\_reg, #4 /* Only values 0 to 3 are expected */
160	ASM_ASSERT(lo)
161#endif
162
163	cmp	\_reg, #0
164	bne	\_label
165	.endm
166
167	/*
168	 * Helper macro that reads the part number of the current
169	 * CPU and jumps to the given label if it matches the CPU
170	 * MIDR provided.
171	 *
172	 * Clobbers x0.
173	 */
174	.macro  jump_if_cpu_midr _cpu_midr, _label
175	mrs	x0, midr_el1
176	ubfx	x0, x0, MIDR_PN_SHIFT, #12
177	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
178	b.eq	\_label
179	.endm
180
181
182/*
183 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
184 * will be applied automatically
185 *
186 * _cpu:
187 *	Name of cpu as given to declare_cpu_ops
188 *
189 * _cve:
190 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
191 *
192 * _id:
193 *	Erratum or CVE number. Please combine with previous field with ERRATUM
194 *	or CVE macros
195 *
196 * _chosen:
197 *	Compile time flag on whether the erratum is included
198 *
199 * _split_wa:
200 *	Flag that indicates whether an erratum has split workaround or not.
201 *	Default value is 0.
202 */
203.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
204#if INCLUDE_ERRATA_LIST
205	.pushsection .rodata.errata_entries
206		.align	3
207		.ifndef \_cpu\()_errata_list_start
208		\_cpu\()_errata_list_start:
209		.endif
210
211		.quad	check_erratum_\_cpu\()_\_id
212		/* Will fit CVEs with up to 10 character in the ID field */
213		.word	\_id
214		.hword	\_cve
215		/* bit magic that appends chosen field based on _split_wa */
216		.byte	((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
217		.byte	0x0 /* alignment */
218	.popsection
219#endif
220.endm
221
222/*******************************************************************************
223 * Errata workaround wrappers
224 ******************************************************************************/
225/*
226 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
227 * will be applied automatically
228 *
229 * _cpu:
230 *	Name of cpu as given to declare_cpu_ops
231 *
232 * _cve:
233 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
234 *
235 * _id:
236 *	Erratum or CVE number. Please combine with previous field with ERRATUM
237 *	or CVE macros
238 *
239 * _chosen:
240 *	Compile time flag on whether the erratum is included
241 *
242 * _split_wa:
243 *	Flag that indicates whether an erratum has split workaround or not.
244 *	Default value is 0.
245 *
246 * in body:
247 *	clobber x0 to x7 (please only use those)
248 *	argument x7 - cpu_rev_var
249 *
250 * _wa clobbers: x0-x8 (PCS compliant)
251 */
252.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
253	_chosen:req, _split_wa=0
254
255	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
256
257	.if \_chosen
258		/* put errata directly into the reset function */
259		.pushsection .text.asm.\_cpu\()_reset_func, "ax"
260	.else
261		/* or something else that will get garbage collected by the
262		 * linker */
263		.pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
264	.endif
265		/* revision is stored in x14, get it */
266		mov	x0, x14
267		bl	check_erratum_\_cpu\()_\_id
268		/* save rev_var for workarounds that might need it */
269		mov	x7, x14
270		cbz	x0, erratum_\_cpu\()_\_id\()_skip_reset
271.endm
272
273/*
274 * See `workaround_reset_start` for usage info. Additional arguments:
275 *
276 * _midr:
277 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
278 *	for errata applied in generic code
279 */
280.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
281	add_erratum_entry \_cpu, \_cve, \_id, \_chosen
282
283	func erratum_\_cpu\()_\_id\()_wa
284		mov	x8, x30
285	/*
286	 * Let errata specify if they need MIDR checking. Sadly, storing the
287	 * MIDR in an .equ to retrieve automatically blows up as it stores some
288	 * brackets in the symbol
289	 */
290	.ifnb \_midr
291		jump_if_cpu_midr \_midr, 1f
292		b	erratum_\_cpu\()_\_id\()_skip_runtime
293
294		1:
295	.endif
296		/* save rev_var for workarounds that might need it but don't
297		 * restore to x0 because few will care */
298		mov	x7, x0
299		bl	check_erratum_\_cpu\()_\_id
300		cbz	x0, erratum_\_cpu\()_\_id\()_skip_runtime
301.endm
302
303/*
304 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
305 * is kept here so the same #define can be used as that macro
306 */
307.macro workaround_reset_end _cpu:req, _cve:req, _id:req
308	erratum_\_cpu\()_\_id\()_skip_reset:
309	.popsection
310.endm
311
312/*
313 * See `workaround_reset_start` for usage info. The _cve argument is kept here
314 * so the same #define can be used as that macro. Additional arguments:
315 *
316 * _no_isb:
317 *	Optionally do not include the trailing isb. Please disable with the
318 *	NO_ISB macro
319 */
320.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
321	/*
322	 * Runtime errata do not have a reset function to call the isb for them
323	 * and missing the isb could be very problematic. It is also likely as
324	 * they tend to be scattered in generic code.
325	 */
326	.ifb \_no_isb
327		isb
328	.endif
329	erratum_\_cpu\()_\_id\()_skip_runtime:
330		ret	x8
331	endfunc erratum_\_cpu\()_\_id\()_wa
332.endm
333
334/*******************************************************************************
335 * Errata workaround helpers
336 ******************************************************************************/
337/*
338 * Set a bit in a system register. Can set multiple bits but is limited by the
339 *  way the ORR instruction encodes them.
340 *
341 * _reg:
342 *	Register to write to
343 *
344 * _bit:
345 *	Bit to set. Please use a descriptive #define
346 *
347 * _assert:
348 *	Optionally whether to read back and assert that the bit has been
349 *	written. Please disable with NO_ASSERT macro
350 *
351 * clobbers: x1
352 */
353.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
354	mrs	x1, \_reg
355	orr	x1, x1, #\_bit
356	msr	\_reg, x1
357.endm
358
359/*
360 * Clear a bit in a system register. Can clear multiple bits but is limited by
361 *  the way the BIC instrucion encodes them.
362 *
363 * see sysreg_bit_set for usage
364 */
365.macro sysreg_bit_clear _reg:req, _bit:req
366	mrs	x1, \_reg
367	bic	x1, x1, #\_bit
368	msr	\_reg, x1
369.endm
370
371/*
372 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
373 *  the way the EOR instrucion encodes them.
374 *
375 * see sysreg_bit_set for usage
376 */
377.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
378	mrs	x1, \_reg
379	eor	x1, x1, #\_bit
380	msr	\_reg, x1
381.endm
382
383.macro override_vector_table _table:req
384	adr	x1, \_table
385	msr	vbar_el3, x1
386.endm
387
388/*
389 * BFI : Inserts bitfield into a system register.
390 *
391 * BFI{cond} Rd, Rn, #lsb, #width
392 */
393.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
394	/* Source value for BFI */
395	mov	x1, #\_src
396	mrs	x0, \_reg
397	bfi	x0, x1, #\_lsb, #\_width
398	msr	\_reg, x0
399.endm
400
401.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
402	/* Source value in register for BFI */
403	mov	x1, \_gpr
404	mrs	x0, \_reg
405	bfi	x0, x1, #\_lsb, #\_width
406	msr	\_reg, x0
407.endm
408
409/*
410 * Extract CPU revision and variant, and combine them into a single numeric for
411 * easier comparison.
412 *
413 * _res:
414 *	register where the result will be placed
415 * _tmp:
416 *	register to clobber for temporaries
417 */
418.macro get_rev_var _res:req, _tmp:req
419	mrs	\_tmp, midr_el1
420
421	/*
422	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
423	 * as variant[7:4] and revision[3:0] of x0.
424	 *
425	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
426	 * extract x1[3:0] into x0[3:0] retaining other bits.
427	 */
428	ubfx	\_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
429	bfxil	\_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
430.endm
431
432/*
433 * Apply erratum
434 *
435 * _cpu:
436 *	Name of cpu as given to declare_cpu_ops
437 *
438 * _cve:
439 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
440 *
441 * _id:
442 *	Erratum or CVE number. Please combine with previous field with ERRATUM
443 *	or CVE macros
444 *
445 * _chosen:
446 *	Compile time flag on whether the erratum is included
447 *
448 * _get_rev:
449 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
450 *	procedure. Stores the result of this in the temporary register x10 to allow for chaining
451 *
452 * clobbers: x0-x10 (PCS compliant)
453 */
454.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
455	.if (\_chosen && \_get_rev)
456		mov	x9, x30
457		bl	cpu_get_rev_var
458		mov	x10, x0
459	.elseif (\_chosen)
460		mov	x9, x30
461		mov	x0, x10
462	.endif
463
464	.if \_chosen
465		bl	erratum_\_cpu\()_\_id\()_wa
466		mov	x30, x9
467	.endif
468.endm
469
470/*
471 * Helpers to report if an erratum applies. Compares the given revision variant
472 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
473 *
474 * _rev_num: the given revision variant. Or
475 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
476 *
477 * in body:
478 *	clobber: x0
479 *	argument: x0 - cpu_rev_var
480 */
481.macro cpu_rev_var_ls _rev_num:req
482	cmp	x0, #\_rev_num
483	cset	x0, ls
484.endm
485
486.macro cpu_rev_var_hs _rev_num:req
487	cmp	x0, #\_rev_num
488	cset	x0, hs
489.endm
490
491.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
492	cmp	x0, #\_rev_num_lo
493	mov	x1, #\_rev_num_hi
494	ccmp	x0, x1, #2, hs
495	cset	x0, ls
496.endm
497
498
499#if __clang_major__ < 17
500/*
501 * A problem with clang version < 17 can cause resolving nested
502 * 'cfi_startproc' to fail compilation.
503 * So add a compatibility variant for start and endfunc expansions
504 * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
505 * check_errata/reset macros if we build TF-A with clang version < 17
506 */
507
508.macro func_compat _name, _align=2
509	.section .text.asm.\_name, "ax"
510	.type \_name, %function
511	.align \_align
512	\_name:
513#if ENABLE_BTI
514	BTI	jc
515#endif
516.endm
517
518/*
519 * This macro is used to mark the end of a function.
520 */
521.macro endfunc_compat _name
522	.size \_name, . - \_name
523.endm
524
525#else
526
527#define func_compat func
528#define endfunc_compat endfunc
529
530#endif /* __clang_version__ < 17 */
531
532/*
533 * Helpers to select which revisions errata apply to.
534 *
535 * _cpu:
536 *	Name of cpu as given to declare_cpu_ops
537 *
538 * _cve:
539 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
540 *
541 * _id:
542 *	Erratum or CVE number. Please combine with previous field with ERRATUM
543 *	or CVE macros
544 *
545 * _rev_num:
546 *	Revision to apply to
547 *
548 * in body:
549 *	clobber: x0 to x1
550 *	argument: x0 - cpu_rev_var
551 */
552.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
553	func_compat check_erratum_\_cpu\()_\_id
554		cpu_rev_var_ls \_rev_num
555		ret
556	endfunc_compat check_erratum_\_cpu\()_\_id
557.endm
558
559.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
560	func_compat check_erratum_\_cpu\()_\_id
561		cpu_rev_var_hs \_rev_num
562		ret
563	endfunc_compat check_erratum_\_cpu\()_\_id
564.endm
565
566.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
567	func_compat check_erratum_\_cpu\()_\_id
568		cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
569		ret
570	endfunc_compat check_erratum_\_cpu\()_\_id
571.endm
572
573.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
574	func_compat check_erratum_\_cpu\()_\_id
575		.if \_chosen
576			mov	x0, #ERRATA_APPLIES
577		.else
578			mov	x0, #ERRATA_MISSING
579		.endif
580		ret
581	endfunc_compat check_erratum_\_cpu\()_\_id
582.endm
583
584/*
585 * provide a shorthand for the name format for annoying errata
586 * body: clobber x0 to x4
587 */
588.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
589	func_compat check_erratum_\_cpu\()_\_id
590.endm
591
592.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
593	endfunc_compat check_erratum_\_cpu\()_\_id
594.endm
595
596/*******************************************************************************
597 * CPU reset function wrapper
598 ******************************************************************************/
599
600/*
601 * Helper to register a cpu with the errata framework. Begins the definition of
602 * the reset function.
603 *
604 * _cpu:
605 *	Name of cpu as given to declare_cpu_ops
606 */
607.macro cpu_reset_prologue _cpu:req
608	func_compat \_cpu\()_reset_func
609		mov	x15, x30
610		get_rev_var x14, x0
611.endm
612
613/*
614 * Wrapper of the reset function to automatically apply all reset-time errata.
615 * Will end with an isb.
616 *
617 * _cpu:
618 *	Name of cpu as given to declare_cpu_ops
619 *
620 * in body:
621 *	clobber x8 to x14
622 *	argument x14 - cpu_rev_var
623 */
624.macro cpu_reset_func_start _cpu:req
625	/* the func/endfunc macros will change sections. So change the section
626	 * back to the reset function's */
627	.section .text.asm.\_cpu\()_reset_func, "ax"
628.endm
629
630.macro cpu_reset_func_end _cpu:req
631		isb
632		ret	x15
633	endfunc_compat \_cpu\()_reset_func
634.endm
635
636/*
637 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on
638 * compatible Arm cores.
639 *
640 * Clobbers x0.
641 */
642.macro enable_mpmm
643#if ENABLE_MPMM
644	mrs	x0, CPUPPMCR_EL3
645	/* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */
646	ands	x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT
647	b.ne	1f
648	sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT
649	1:
650#endif
651.endm
652
653/*
654 * Call this just before a return to indicate support for pabandon. Only
655 * necessary on an abandon call, but harmless on a powerdown call.
656 *
657 * PSCI wants us to tell it we handled a pabandon by returning 0. This is the
658 * only way support for it is indicated.
659 */
660.macro signal_pabandon_handled
661	mov_imm	x0, PABANDON_ACK
662.endm
663
664#endif /* CPU_MACROS_S */
665