xref: /rk3399_ARM-atf/include/lib/cpus/aarch64/cpu_macros.S (revision c4351f7f62449e8c8e58e71c398f7fc5c96bbfe8)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU.
53	 * _e_handler:
54	 *	This is a placeholder for future per CPU exception handlers.
55	 * _power_down_ops:
56	 *	Comma-separated list of functions to perform power-down
57	 *	operatios on the CPU. At least one, and up to
58	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
59	 *	Starting at power level 0, these functions shall handle power
60	 *	down at subsequent power levels. If there aren't exactly
61	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
62	 *	used to handle power down at subsequent levels
63	 */
64	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
65		_e_handler:req, _power_down_ops:vararg
66	.section .cpu_ops, "a"
67	.align 3
68	.type cpu_ops_\_name, %object
69	.quad \_midr
70#if defined(IMAGE_AT_EL3)
71	.quad \_resetfunc
72#endif
73	.quad \_e_handler
74#ifdef IMAGE_BL31
75	/* Insert list of functions */
76	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
77#endif
78	/*
79	 * It is possible (although unlikely) that a cpu may have no errata in
80	 * code. In that case the start label will not be defined. The list is
81	 * intended to be used in a loop, so define it as zero-length for
82	 * predictable behaviour. Since this macro is always called at the end
83	 * of the cpu file (after all errata have been parsed) we can be sure
84	 * that we are at the end of the list. Some cpus call declare_cpu_ops
85	 * twice, so only do this once.
86	 */
87	.pushsection .rodata.errata_entries
88	.ifndef \_name\()_errata_list_start
89		\_name\()_errata_list_start:
90	.endif
91	.ifndef \_name\()_errata_list_end
92		\_name\()_errata_list_end:
93	.endif
94	.popsection
95
96	/* and now put them in cpu_ops */
97	.quad \_name\()_errata_list_start
98	.quad \_name\()_errata_list_end
99
100#if REPORT_ERRATA
101	.ifndef \_name\()_cpu_str
102	  /*
103	   * Place errata reported flag, and the spinlock to arbitrate access to
104	   * it in the data section.
105	   */
106	  .pushsection .data
107	  define_asm_spinlock \_name\()_errata_reported
108	  .popsection
109
110	  /* Place CPU string in rodata */
111	  .pushsection .rodata
112	  \_name\()_cpu_str:
113	  .asciz "\_name"
114	  .popsection
115	.endif
116
117	.quad \_name\()_cpu_str
118
119#ifdef IMAGE_BL31
120	/* Pointers to errata lock and reported flag */
121	.quad \_name\()_errata_reported
122#endif /* IMAGE_BL31 */
123#endif /* REPORT_ERRATA */
124
125#if defined(IMAGE_BL31) && CRASH_REPORTING
126	.quad \_name\()_cpu_reg_dump
127#endif
128	.endm
129
130	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
131		_power_down_ops:vararg
132		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \_power_down_ops
133	.endm
134
135	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
136		_e_handler:req, _power_down_ops:vararg
137		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
138			\_e_handler, \_power_down_ops
139	.endm
140
141	/*
142	 * This macro is used on some CPUs to detect if they are vulnerable
143	 * to CVE-2017-5715.
144	 */
145	.macro	cpu_check_csv2 _reg _label
146	mrs	\_reg, id_aa64pfr0_el1
147	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
148	/*
149	 * If the field equals 1, branch targets trained in one context cannot
150	 * affect speculative execution in a different context.
151	 *
152	 * If the field equals 2 or 3, it means that the system is also aware of
153	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
154	 * expect users of the registers to do the right thing.
155	 *
156	 * Only apply mitigations if the value of this field is 0.
157	 */
158#if ENABLE_ASSERTIONS
159	cmp	\_reg, #4 /* Only values 0 to 3 are expected */
160	ASM_ASSERT(lo)
161#endif
162
163	cmp	\_reg, #0
164	bne	\_label
165	.endm
166
167	/*
168	 * Helper macro that reads the part number of the current
169	 * CPU and jumps to the given label if it matches the CPU
170	 * MIDR provided.
171	 *
172	 * Clobbers x0.
173	 */
174	.macro  jump_if_cpu_midr _cpu_midr, _label
175	mrs	x0, midr_el1
176	ubfx	x0, x0, MIDR_PN_SHIFT, #12
177	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
178	b.eq	\_label
179	.endm
180
181
182/*
183 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
184 * will be applied automatically
185 *
186 * _cpu:
187 *	Name of cpu as given to declare_cpu_ops
188 *
189 * _cve:
190 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
191 *
192 * _id:
193 *	Erratum or CVE number. Please combine with previous field with ERRATUM
194 *	or CVE macros
195 *
196 * _chosen:
197 *	Compile time flag on whether the erratum is included
198 *
199 * _split_wa:
200 *	Flag that indicates whether an erratum has split workaround or not.
201 *	Default value is 0.
202 */
203.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
204#if INCLUDE_ERRATA_LIST
205	.pushsection .rodata.errata_entries
206		.align	3
207		.ifndef \_cpu\()_errata_list_start
208		\_cpu\()_errata_list_start:
209		.endif
210
211		.quad	check_erratum_\_cpu\()_\_id
212		/* Will fit CVEs with up to 10 character in the ID field */
213		.word	\_id
214		.hword	\_cve
215		/* bit magic that appends chosen field based on _split_wa */
216		.byte	((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
217		.byte	0x0 /* alignment */
218	.popsection
219#endif
220.endm
221
222/*******************************************************************************
223 * Errata workaround wrappers
224 ******************************************************************************/
225/*
226 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
227 * will be applied automatically
228 *
229 * _cpu:
230 *	Name of cpu as given to declare_cpu_ops
231 *
232 * _cve:
233 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
234 *
235 * _id:
236 *	Erratum or CVE number. Please combine with previous field with ERRATUM
237 *	or CVE macros
238 *
239 * _chosen:
240 *	Compile time flag on whether the erratum is included
241 *
242 * _split_wa:
243 *	Flag that indicates whether an erratum has split workaround or not.
244 *	Default value is 0.
245 *
246 * in body:
247 *	clobber x0 to x7 (please only use those)
248 *	argument x7 - cpu_rev_var
249 *
250 * _wa clobbers: x0-x8 (PCS compliant)
251 */
252.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
253	_chosen:req, _split_wa=0
254
255	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
256
257	.if \_chosen
258		/* put errata directly into the reset function */
259		.pushsection .text.asm.\_cpu\()_reset_func, "ax"
260	.else
261		/* or something else that will get garbage collected by the
262		 * linker */
263		.pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
264	.endif
265		/* revision is stored in x14, get it */
266		mov	x0, x14
267		bl	check_erratum_\_cpu\()_\_id
268		/* save rev_var for workarounds that might need it */
269		mov	x7, x14
270		cbz	x0, erratum_\_cpu\()_\_id\()_skip_reset
271.endm
272
273/*
274 * See `workaround_reset_start` for usage info. Additional arguments:
275 *
276 * _midr:
277 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
278 *	for errata applied in generic code
279 */
280.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
281	add_erratum_entry \_cpu, \_cve, \_id, \_chosen
282
283	func erratum_\_cpu\()_\_id\()_wa
284		mov	x8, x30
285	/*
286	 * Let errata specify if they need MIDR checking. Sadly, storing the
287	 * MIDR in an .equ to retrieve automatically blows up as it stores some
288	 * brackets in the symbol
289	 */
290	.ifnb \_midr
291		jump_if_cpu_midr \_midr, 1f
292		b	erratum_\_cpu\()_\_id\()_skip_runtime
293
294		1:
295	.endif
296		/* save rev_var for workarounds that might need it but don't
297		 * restore to x0 because few will care */
298		mov	x7, x0
299		bl	check_erratum_\_cpu\()_\_id
300		cbz	x0, erratum_\_cpu\()_\_id\()_skip_runtime
301.endm
302
303/*
304 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
305 * is kept here so the same #define can be used as that macro
306 */
307.macro workaround_reset_end _cpu:req, _cve:req, _id:req
308	erratum_\_cpu\()_\_id\()_skip_reset:
309	.popsection
310.endm
311
312/*
313 * See `workaround_reset_start` for usage info. The _cve argument is kept here
314 * so the same #define can be used as that macro. Additional arguments:
315 *
316 * _no_isb:
317 *	Optionally do not include the trailing isb. Please disable with the
318 *	NO_ISB macro
319 */
320.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
321	/*
322	 * Runtime errata do not have a reset function to call the isb for them
323	 * and missing the isb could be very problematic. It is also likely as
324	 * they tend to be scattered in generic code.
325	 */
326	.ifb \_no_isb
327		isb
328	.endif
329	erratum_\_cpu\()_\_id\()_skip_runtime:
330		ret	x8
331	endfunc erratum_\_cpu\()_\_id\()_wa
332.endm
333
334/*******************************************************************************
335 * Errata workaround helpers
336 ******************************************************************************/
337/*
338 * Set a bit in a system register. Can set multiple bits but is limited by the
339 *  way the ORR instruction encodes them.
340 *
341 * _reg:
342 *	Register to write to
343 *
344 * _bit:
345 *	Bit to set. Please use a descriptive #define
346 *
347 * _assert:
348 *	Optionally whether to read back and assert that the bit has been
349 *	written. Please disable with NO_ASSERT macro
350 *
351 * clobbers: x1
352 */
353.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
354	mrs	x1, \_reg
355	orr	x1, x1, #\_bit
356	msr	\_reg, x1
357.endm
358
359/*
360 * Clear a bit in a system register. Can clear multiple bits but is limited by
361 *  the way the BIC instrucion encodes them.
362 *
363 * see sysreg_bit_set for usage
364 */
365.macro sysreg_bit_clear _reg:req, _bit:req
366	mrs	x1, \_reg
367	bic	x1, x1, #\_bit
368	msr	\_reg, x1
369.endm
370
371/*
372 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
373 *  the way the EOR instrucion encodes them.
374 *
375 * see sysreg_bit_set for usage
376 */
377.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
378	mrs	x1, \_reg
379	eor	x1, x1, #\_bit
380	msr	\_reg, x1
381.endm
382
383.macro override_vector_table _table:req
384	adr	x1, \_table
385	msr	vbar_el3, x1
386.endm
387
388/*
389 * BFI : Inserts bitfield into a system register.
390 *
391 * BFI{cond} Rd, Rn, #lsb, #width
392 */
393.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
394	/* Source value for BFI */
395	mov	x1, #\_src
396	mrs	x0, \_reg
397	bfi	x0, x1, #\_lsb, #\_width
398	msr	\_reg, x0
399.endm
400
401.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
402	/* Source value in register for BFI */
403	mov	x1, \_gpr
404	mrs	x0, \_reg
405	bfi	x0, x1, #\_lsb, #\_width
406	msr	\_reg, x0
407.endm
408
409/*
410 * Lazy read-modify-write helpers for system registers.
411 *
412 * When multiple bit operations target the same register, these macros collapse
413 * them into a single mrs at start and a single msr at commit, with pure
414 * register-form operations in between.  This avoids the redundant reads and
415 * writes that would result from calling sysreg_bit_set / sysreg_bit_clear /
416 * sysreg_bitfield_insert once per bit.
417 *
418 * Usage:
419 *   sysreg_lazy_start  _reg
420 *   sysreg_lazy_set    _bit
421 *   sysreg_lazy_clear  _bit
422 *   sysreg_lazy_insert _src, _lsb, _width
423 *   sysreg_lazy_commit _reg
424 *
425 * _reg must be the same identifier in sysreg_lazy_start and
426 * sysreg_lazy_commit.  Any combination of sysreg_lazy_set,
427 * sysreg_lazy_clear, and sysreg_lazy_insert may appear between them.
428 *
429 * Clobbers: x0 (scratch), x1 (holds register value between start and commit)
430 */
431
432/* Read _reg into x1 to begin a lazy sequence. */
433.macro sysreg_lazy_start _reg:req
434	mrs	x1, \_reg
435.endm
436
437/* ORR _bit into x1.  _bit may be any 64-bit mask. */
438.macro sysreg_lazy_set _bit:req
439	mov_imm	x0, (\_bit)
440	orr	x1, x1, x0
441.endm
442
443/* BIC _bit from x1.  _bit may be any 64-bit mask. */
444.macro sysreg_lazy_clear _bit:req
445	mov_imm	x0, (\_bit)
446	bic	x1, x1, x0
447.endm
448
449/* BFI _src into x1 at position _lsb for _width bits. */
450.macro sysreg_lazy_insert _src:req, _lsb:req, _width:req
451	mov_imm	x0, (\_src)
452	bfi	x1, x0, #\_lsb, #\_width
453.endm
454
455/* Write x1 back to _reg to end a lazy sequence. */
456.macro sysreg_lazy_commit _reg:req
457	msr	\_reg, x1
458.endm
459
460/*
461 * Extract CPU revision and variant, and combine them into a single numeric for
462 * easier comparison.
463 *
464 * _res:
465 *	register where the result will be placed
466 * _tmp:
467 *	register to clobber for temporaries
468 */
469.macro get_rev_var _res:req, _tmp:req
470	mrs	\_tmp, midr_el1
471
472	/*
473	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
474	 * as variant[7:4] and revision[3:0] of x0.
475	 *
476	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
477	 * extract x1[3:0] into x0[3:0] retaining other bits.
478	 */
479	ubfx	\_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
480	bfxil	\_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
481.endm
482
483/*
484 * Apply erratum
485 *
486 * _cpu:
487 *	Name of cpu as given to declare_cpu_ops
488 *
489 * _cve:
490 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
491 *
492 * _id:
493 *	Erratum or CVE number. Please combine with previous field with ERRATUM
494 *	or CVE macros
495 *
496 * _chosen:
497 *	Compile time flag on whether the erratum is included
498 *
499 * _get_rev:
500 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
501 *	procedure. Stores the result of this in the temporary register x10 to allow for chaining
502 *
503 * clobbers: x0-x10 (PCS compliant)
504 */
505.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
506	.if (\_chosen && \_get_rev)
507		mov	x9, x30
508		bl	cpu_get_rev_var
509		mov	x10, x0
510	.elseif (\_chosen)
511		mov	x9, x30
512		mov	x0, x10
513	.endif
514
515	.if \_chosen
516		bl	erratum_\_cpu\()_\_id\()_wa
517		mov	x30, x9
518	.endif
519.endm
520
521/*
522 * Helpers to report if an erratum applies. Compares the given revision variant
523 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
524 *
525 * _rev_num: the given revision variant. Or
526 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
527 *
528 * in body:
529 *	clobber: x0
530 *	argument: x0 - cpu_rev_var
531 */
532.macro cpu_rev_var_ls _rev_num:req
533	cmp	x0, #\_rev_num
534	cset	x0, ls
535.endm
536
537.macro cpu_rev_var_hs _rev_num:req
538	cmp	x0, #\_rev_num
539	cset	x0, hs
540.endm
541
542.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
543	cmp	x0, #\_rev_num_lo
544	mov	x1, #\_rev_num_hi
545	ccmp	x0, x1, #2, hs
546	cset	x0, ls
547.endm
548
549
550#if __clang_major__ < 17
551/*
552 * A problem with clang version < 17 can cause resolving nested
553 * 'cfi_startproc' to fail compilation.
554 * So add a compatibility variant for start and endfunc expansions
555 * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
556 * check_errata/reset macros if we build TF-A with clang version < 17
557 */
558
559.macro func_compat _name, _align=2
560	.section .text.asm.\_name, "ax"
561	.type \_name, %function
562	.align \_align
563	\_name:
564#if ENABLE_BTI
565	BTI	jc
566#endif
567.endm
568
569/*
570 * This macro is used to mark the end of a function.
571 */
572.macro endfunc_compat _name
573	.size \_name, . - \_name
574.endm
575
576#else
577
578#define func_compat func
579#define endfunc_compat endfunc
580
581#endif /* __clang_version__ < 17 */
582
583/*
584 * Helpers to select which revisions errata apply to.
585 *
586 * _cpu:
587 *	Name of cpu as given to declare_cpu_ops
588 *
589 * _cve:
590 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
591 *
592 * _id:
593 *	Erratum or CVE number. Please combine with previous field with ERRATUM
594 *	or CVE macros
595 *
596 * _rev_num:
597 *	Revision to apply to
598 *
599 * in body:
600 *	clobber: x0 to x1
601 *	argument: x0 - cpu_rev_var
602 */
603.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
604	func_compat check_erratum_\_cpu\()_\_id
605		cpu_rev_var_ls \_rev_num
606		ret
607	endfunc_compat check_erratum_\_cpu\()_\_id
608.endm
609
610.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
611	func_compat check_erratum_\_cpu\()_\_id
612		cpu_rev_var_hs \_rev_num
613		ret
614	endfunc_compat check_erratum_\_cpu\()_\_id
615.endm
616
617.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
618	func_compat check_erratum_\_cpu\()_\_id
619		cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
620		ret
621	endfunc_compat check_erratum_\_cpu\()_\_id
622.endm
623
624.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
625	func_compat check_erratum_\_cpu\()_\_id
626		.if \_chosen
627			mov	x0, #ERRATA_APPLIES
628		.else
629			mov	x0, #ERRATA_MISSING
630		.endif
631		ret
632	endfunc_compat check_erratum_\_cpu\()_\_id
633.endm
634
635/*
636 * provide a shorthand for the name format for annoying errata
637 * body: clobber x0 to x4
638 */
639.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
640	func_compat check_erratum_\_cpu\()_\_id
641.endm
642
643.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
644	endfunc_compat check_erratum_\_cpu\()_\_id
645.endm
646
647/*******************************************************************************
648 * CPU reset function wrapper
649 ******************************************************************************/
650
651/*
652 * Helper to register a cpu with the errata framework. Begins the definition of
653 * the reset function.
654 *
655 * _cpu:
656 *	Name of cpu as given to declare_cpu_ops
657 */
658.macro cpu_reset_prologue _cpu:req
659	func_compat \_cpu\()_reset_func
660		mov	x15, x30
661		get_rev_var x14, x0
662.endm
663
664/*
665 * Wrapper of the reset function to automatically apply all reset-time errata.
666 * Will end with an isb.
667 *
668 * _cpu:
669 *	Name of cpu as given to declare_cpu_ops
670 *
671 * in body:
672 *	clobber x8 to x14
673 *	argument x14 - cpu_rev_var
674 */
675.macro cpu_reset_func_start _cpu:req
676	/* the func/endfunc macros will change sections. So change the section
677	 * back to the reset function's */
678	.section .text.asm.\_cpu\()_reset_func, "ax"
679.endm
680
681.macro cpu_reset_func_end _cpu:req
682		isb
683		ret	x15
684	endfunc_compat \_cpu\()_reset_func
685.endm
686
687/*
688 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on
689 * compatible Arm cores.
690 *
691 * Clobbers x0.
692 */
693.macro enable_mpmm
694#if ENABLE_MPMM
695	mrs	x0, CPUPPMCR_EL3
696	/* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */
697	ands	x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT
698	b.ne	1f
699	sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT
700	1:
701#endif
702.endm
703
704/*
705 * Call this just before a return to indicate support for pabandon. Only
706 * necessary on an abandon call, but harmless on a powerdown call.
707 *
708 * PSCI wants us to tell it we handled a pabandon by returning 0. This is the
709 * only way support for it is indicated.
710 */
711.macro signal_pabandon_handled
712	mov_imm	x0, PABANDON_ACK
713.endm
714
715#endif /* CPU_MACROS_S */
716