xref: /rk3399_ARM-atf/include/lib/cpus/aarch64/cpu_macros.S (revision 73c587ec986865741945b1a4f4ecaabf8f7ce641)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU.
53	 * _e_handler:
54	 *	This is a placeholder for future per CPU exception handlers.
55	 * _power_down_ops:
56	 *	Comma-separated list of functions to perform power-down
57	 *	operatios on the CPU. At least one, and up to
58	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
59	 *	Starting at power level 0, these functions shall handle power
60	 *	down at subsequent power levels. If there aren't exactly
61	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
62	 *	used to handle power down at subsequent levels
63	 */
64	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
65		_e_handler:req, _power_down_ops:vararg
66	.section .cpu_ops, "a"
67	.align 3
68	.type cpu_ops_\_name, %object
69	.quad \_midr
70#if defined(IMAGE_AT_EL3)
71	.quad \_resetfunc
72#endif
73	.quad \_e_handler
74#ifdef IMAGE_BL31
75	/* Insert list of functions */
76	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
77#endif
78	/*
79	 * It is possible (although unlikely) that a cpu may have no errata in
80	 * code. In that case the start label will not be defined. The list is
81	 * intended to be used in a loop, so define it as zero-length for
82	 * predictable behaviour. Since this macro is always called at the end
83	 * of the cpu file (after all errata have been parsed) we can be sure
84	 * that we are at the end of the list. Some cpus call declare_cpu_ops
85	 * twice, so only do this once.
86	 */
87	.pushsection .rodata.errata_entries
88	.ifndef \_name\()_errata_list_start
89		\_name\()_errata_list_start:
90	.endif
91	.ifndef \_name\()_errata_list_end
92		\_name\()_errata_list_end:
93	.endif
94	.popsection
95
96	/* and now put them in cpu_ops */
97	.quad \_name\()_errata_list_start
98	.quad \_name\()_errata_list_end
99
100#if REPORT_ERRATA
101	.ifndef \_name\()_cpu_str
102	  /*
103	   * Place errata reported flag, and the spinlock to arbitrate access to
104	   * it in the data section.
105	   */
106	  .pushsection .data
107	  define_asm_spinlock \_name\()_errata_lock
108	  \_name\()_errata_reported:
109	  .word	0
110	  .popsection
111
112	  /* Place CPU string in rodata */
113	  .pushsection .rodata
114	  \_name\()_cpu_str:
115	  .asciz "\_name"
116	  .popsection
117	.endif
118
119	.quad \_name\()_cpu_str
120
121#ifdef IMAGE_BL31
122	/* Pointers to errata lock and reported flag */
123	.quad \_name\()_errata_lock
124	.quad \_name\()_errata_reported
125#endif /* IMAGE_BL31 */
126#endif /* REPORT_ERRATA */
127
128#if defined(IMAGE_BL31) && CRASH_REPORTING
129	.quad \_name\()_cpu_reg_dump
130#endif
131	.endm
132
133	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
134		_power_down_ops:vararg
135		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \_power_down_ops
136	.endm
137
138	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
139		_e_handler:req, _power_down_ops:vararg
140		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
141			\_e_handler, \_power_down_ops
142	.endm
143
144	/*
145	 * This macro is used on some CPUs to detect if they are vulnerable
146	 * to CVE-2017-5715.
147	 */
148	.macro	cpu_check_csv2 _reg _label
149	mrs	\_reg, id_aa64pfr0_el1
150	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
151	/*
152	 * If the field equals 1, branch targets trained in one context cannot
153	 * affect speculative execution in a different context.
154	 *
155	 * If the field equals 2 or 3, it means that the system is also aware of
156	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
157	 * expect users of the registers to do the right thing.
158	 *
159	 * Only apply mitigations if the value of this field is 0.
160	 */
161#if ENABLE_ASSERTIONS
162	cmp	\_reg, #4 /* Only values 0 to 3 are expected */
163	ASM_ASSERT(lo)
164#endif
165
166	cmp	\_reg, #0
167	bne	\_label
168	.endm
169
170	/*
171	 * Helper macro that reads the part number of the current
172	 * CPU and jumps to the given label if it matches the CPU
173	 * MIDR provided.
174	 *
175	 * Clobbers x0.
176	 */
177	.macro  jump_if_cpu_midr _cpu_midr, _label
178	mrs	x0, midr_el1
179	ubfx	x0, x0, MIDR_PN_SHIFT, #12
180	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
181	b.eq	\_label
182	.endm
183
184
185/*
186 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
187 * will be applied automatically
188 *
189 * _cpu:
190 *	Name of cpu as given to declare_cpu_ops
191 *
192 * _cve:
193 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
194 *
195 * _id:
196 *	Erratum or CVE number. Please combine with previous field with ERRATUM
197 *	or CVE macros
198 *
199 * _chosen:
200 *	Compile time flag on whether the erratum is included
201 *
202 * _split_wa:
203 *	Flag that indicates whether an erratum has split workaround or not.
204 *	Default value is 0.
205 */
206.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
207#if INCLUDE_ERRATA_LIST
208	.pushsection .rodata.errata_entries
209		.align	3
210		.ifndef \_cpu\()_errata_list_start
211		\_cpu\()_errata_list_start:
212		.endif
213
214		.quad	check_erratum_\_cpu\()_\_id
215		/* Will fit CVEs with up to 10 character in the ID field */
216		.word	\_id
217		.hword	\_cve
218		/* bit magic that appends chosen field based on _split_wa */
219		.byte	((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
220		.byte	0x0 /* alignment */
221	.popsection
222#endif
223.endm
224
225/*******************************************************************************
226 * Errata workaround wrappers
227 ******************************************************************************/
228/*
229 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
230 * will be applied automatically
231 *
232 * _cpu:
233 *	Name of cpu as given to declare_cpu_ops
234 *
235 * _cve:
236 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
237 *
238 * _id:
239 *	Erratum or CVE number. Please combine with previous field with ERRATUM
240 *	or CVE macros
241 *
242 * _chosen:
243 *	Compile time flag on whether the erratum is included
244 *
245 * _split_wa:
246 *	Flag that indicates whether an erratum has split workaround or not.
247 *	Default value is 0.
248 *
249 * in body:
250 *	clobber x0 to x7 (please only use those)
251 *	argument x7 - cpu_rev_var
252 *
253 * _wa clobbers: x0-x8 (PCS compliant)
254 */
255.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
256	_chosen:req, _split_wa=0
257
258	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
259
260	.if \_chosen
261		/* put errata directly into the reset function */
262		.pushsection .text.asm.\_cpu\()_reset_func, "ax"
263	.else
264		/* or something else that will get garbage collected by the
265		 * linker */
266		.pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
267	.endif
268		/* revision is stored in x14, get it */
269		mov	x0, x14
270		bl	check_erratum_\_cpu\()_\_id
271		/* save rev_var for workarounds that might need it */
272		mov	x7, x14
273		cbz	x0, erratum_\_cpu\()_\_id\()_skip_reset
274.endm
275
276/*
277 * See `workaround_reset_start` for usage info. Additional arguments:
278 *
279 * _midr:
280 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
281 *	for errata applied in generic code
282 */
283.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
284	add_erratum_entry \_cpu, \_cve, \_id, \_chosen
285
286	func erratum_\_cpu\()_\_id\()_wa
287		mov	x8, x30
288	/*
289	 * Let errata specify if they need MIDR checking. Sadly, storing the
290	 * MIDR in an .equ to retrieve automatically blows up as it stores some
291	 * brackets in the symbol
292	 */
293	.ifnb \_midr
294		jump_if_cpu_midr \_midr, 1f
295		b	erratum_\_cpu\()_\_id\()_skip_runtime
296
297		1:
298	.endif
299		/* save rev_var for workarounds that might need it but don't
300		 * restore to x0 because few will care */
301		mov	x7, x0
302		bl	check_erratum_\_cpu\()_\_id
303		cbz	x0, erratum_\_cpu\()_\_id\()_skip_runtime
304.endm
305
306/*
307 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
308 * is kept here so the same #define can be used as that macro
309 */
310.macro workaround_reset_end _cpu:req, _cve:req, _id:req
311	erratum_\_cpu\()_\_id\()_skip_reset:
312	.popsection
313.endm
314
315/*
316 * See `workaround_reset_start` for usage info. The _cve argument is kept here
317 * so the same #define can be used as that macro. Additional arguments:
318 *
319 * _no_isb:
320 *	Optionally do not include the trailing isb. Please disable with the
321 *	NO_ISB macro
322 */
323.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
324	/*
325	 * Runtime errata do not have a reset function to call the isb for them
326	 * and missing the isb could be very problematic. It is also likely as
327	 * they tend to be scattered in generic code.
328	 */
329	.ifb \_no_isb
330		isb
331	.endif
332	erratum_\_cpu\()_\_id\()_skip_runtime:
333		ret	x8
334	endfunc erratum_\_cpu\()_\_id\()_wa
335.endm
336
337/*******************************************************************************
338 * Errata workaround helpers
339 ******************************************************************************/
340/*
341 * Set a bit in a system register. Can set multiple bits but is limited by the
342 *  way the ORR instruction encodes them.
343 *
344 * _reg:
345 *	Register to write to
346 *
347 * _bit:
348 *	Bit to set. Please use a descriptive #define
349 *
350 * _assert:
351 *	Optionally whether to read back and assert that the bit has been
352 *	written. Please disable with NO_ASSERT macro
353 *
354 * clobbers: x1
355 */
356.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
357	mrs	x1, \_reg
358	orr	x1, x1, #\_bit
359	msr	\_reg, x1
360.endm
361
362/*
363 * Clear a bit in a system register. Can clear multiple bits but is limited by
364 *  the way the BIC instrucion encodes them.
365 *
366 * see sysreg_bit_set for usage
367 */
368.macro sysreg_bit_clear _reg:req, _bit:req
369	mrs	x1, \_reg
370	bic	x1, x1, #\_bit
371	msr	\_reg, x1
372.endm
373
374/*
375 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
376 *  the way the EOR instrucion encodes them.
377 *
378 * see sysreg_bit_set for usage
379 */
380.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
381	mrs	x1, \_reg
382	eor	x1, x1, #\_bit
383	msr	\_reg, x1
384.endm
385
386.macro override_vector_table _table:req
387	adr	x1, \_table
388	msr	vbar_el3, x1
389.endm
390
391/*
392 * BFI : Inserts bitfield into a system register.
393 *
394 * BFI{cond} Rd, Rn, #lsb, #width
395 */
396.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
397	/* Source value for BFI */
398	mov	x1, #\_src
399	mrs	x0, \_reg
400	bfi	x0, x1, #\_lsb, #\_width
401	msr	\_reg, x0
402.endm
403
404.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
405	/* Source value in register for BFI */
406	mov	x1, \_gpr
407	mrs	x0, \_reg
408	bfi	x0, x1, #\_lsb, #\_width
409	msr	\_reg, x0
410.endm
411
412/*
413 * Extract CPU revision and variant, and combine them into a single numeric for
414 * easier comparison.
415 *
416 * _res:
417 *	register where the result will be placed
418 * _tmp:
419 *	register to clobber for temporaries
420 */
421.macro get_rev_var _res:req, _tmp:req
422	mrs	\_tmp, midr_el1
423
424	/*
425	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
426	 * as variant[7:4] and revision[3:0] of x0.
427	 *
428	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
429	 * extract x1[3:0] into x0[3:0] retaining other bits.
430	 */
431	ubfx	\_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
432	bfxil	\_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
433.endm
434
435/*
436 * Apply erratum
437 *
438 * _cpu:
439 *	Name of cpu as given to declare_cpu_ops
440 *
441 * _cve:
442 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
443 *
444 * _id:
445 *	Erratum or CVE number. Please combine with previous field with ERRATUM
446 *	or CVE macros
447 *
448 * _chosen:
449 *	Compile time flag on whether the erratum is included
450 *
451 * _get_rev:
452 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
453 *	procedure. Stores the result of this in the temporary register x10 to allow for chaining
454 *
455 * clobbers: x0-x10 (PCS compliant)
456 */
457.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
458	.if (\_chosen && \_get_rev)
459		mov	x9, x30
460		bl	cpu_get_rev_var
461		mov	x10, x0
462	.elseif (\_chosen)
463		mov	x9, x30
464		mov	x0, x10
465	.endif
466
467	.if \_chosen
468		bl	erratum_\_cpu\()_\_id\()_wa
469		mov	x30, x9
470	.endif
471.endm
472
473/*
474 * Helpers to report if an erratum applies. Compares the given revision variant
475 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
476 *
477 * _rev_num: the given revision variant. Or
478 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
479 *
480 * in body:
481 *	clobber: x0
482 *	argument: x0 - cpu_rev_var
483 */
484.macro cpu_rev_var_ls _rev_num:req
485	cmp	x0, #\_rev_num
486	cset	x0, ls
487.endm
488
489.macro cpu_rev_var_hs _rev_num:req
490	cmp	x0, #\_rev_num
491	cset	x0, hs
492.endm
493
494.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
495	cmp	x0, #\_rev_num_lo
496	mov	x1, #\_rev_num_hi
497	ccmp	x0, x1, #2, hs
498	cset	x0, ls
499.endm
500
501
502#if __clang_major__ < 17
503/*
504 * A problem with clang version < 17 can cause resolving nested
505 * 'cfi_startproc' to fail compilation.
506 * So add a compatibility variant for start and endfunc expansions
507 * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
508 * check_errata/reset macros if we build TF-A with clang version < 17
509 */
510
511.macro func_compat _name, _align=2
512	.section .text.asm.\_name, "ax"
513	.type \_name, %function
514	.align \_align
515	\_name:
516#if ENABLE_BTI
517	BTI	jc
518#endif
519.endm
520
521/*
522 * This macro is used to mark the end of a function.
523 */
524.macro endfunc_compat _name
525	.size \_name, . - \_name
526.endm
527
528#else
529
530#define func_compat func
531#define endfunc_compat endfunc
532
533#endif /* __clang_version__ < 17 */
534
535/*
536 * Helpers to select which revisions errata apply to.
537 *
538 * _cpu:
539 *	Name of cpu as given to declare_cpu_ops
540 *
541 * _cve:
542 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
543 *
544 * _id:
545 *	Erratum or CVE number. Please combine with previous field with ERRATUM
546 *	or CVE macros
547 *
548 * _rev_num:
549 *	Revision to apply to
550 *
551 * in body:
552 *	clobber: x0 to x1
553 *	argument: x0 - cpu_rev_var
554 */
555.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
556	func_compat check_erratum_\_cpu\()_\_id
557		cpu_rev_var_ls \_rev_num
558		ret
559	endfunc_compat check_erratum_\_cpu\()_\_id
560.endm
561
562.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
563	func_compat check_erratum_\_cpu\()_\_id
564		cpu_rev_var_hs \_rev_num
565		ret
566	endfunc_compat check_erratum_\_cpu\()_\_id
567.endm
568
569.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
570	func_compat check_erratum_\_cpu\()_\_id
571		cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
572		ret
573	endfunc_compat check_erratum_\_cpu\()_\_id
574.endm
575
576.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
577	func_compat check_erratum_\_cpu\()_\_id
578		.if \_chosen
579			mov	x0, #ERRATA_APPLIES
580		.else
581			mov	x0, #ERRATA_MISSING
582		.endif
583		ret
584	endfunc_compat check_erratum_\_cpu\()_\_id
585.endm
586
587/*
588 * provide a shorthand for the name format for annoying errata
589 * body: clobber x0 to x4
590 */
591.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
592	func_compat check_erratum_\_cpu\()_\_id
593.endm
594
595.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
596	endfunc_compat check_erratum_\_cpu\()_\_id
597.endm
598
599/*******************************************************************************
600 * CPU reset function wrapper
601 ******************************************************************************/
602
603/*
604 * Helper to register a cpu with the errata framework. Begins the definition of
605 * the reset function.
606 *
607 * _cpu:
608 *	Name of cpu as given to declare_cpu_ops
609 */
610.macro cpu_reset_prologue _cpu:req
611	func_compat \_cpu\()_reset_func
612		mov	x15, x30
613		get_rev_var x14, x0
614.endm
615
616/*
617 * Wrapper of the reset function to automatically apply all reset-time errata.
618 * Will end with an isb.
619 *
620 * _cpu:
621 *	Name of cpu as given to declare_cpu_ops
622 *
623 * in body:
624 *	clobber x8 to x14
625 *	argument x14 - cpu_rev_var
626 */
627.macro cpu_reset_func_start _cpu:req
628	/* the func/endfunc macros will change sections. So change the section
629	 * back to the reset function's */
630	.section .text.asm.\_cpu\()_reset_func, "ax"
631.endm
632
633.macro cpu_reset_func_end _cpu:req
634		isb
635		ret	x15
636	endfunc_compat \_cpu\()_reset_func
637.endm
638
639/*
640 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on
641 * compatible Arm cores.
642 *
643 * Clobbers x0.
644 */
645.macro enable_mpmm
646#if ENABLE_MPMM
647	mrs	x0, CPUPPMCR_EL3
648	/* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */
649	ands	x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT
650	b.ne	1f
651	sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT
652	1:
653#endif
654.endm
655
656#endif /* CPU_MACROS_S */
657