xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 1123a5e2f973dc9f0223467f4782f6b2df542620)
1/*
2 * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
15
16 /* Reset fn is needed in BL at reset vector */
17#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
18	/*
19	 * The reset handler common to all platforms.  After a matching
20	 * cpu_ops structure entry is found, the correponding reset_handler
21	 * in the cpu_ops is invoked.
22	 * Clobbers: x0 - x19, x30
23	 */
24	.globl	reset_handler
25func reset_handler
26	mov	x19, x30
27
28	/* The plat_reset_handler can clobber x0 - x18, x30 */
29	bl	plat_reset_handler
30
31	/* Get the matching cpu_ops pointer */
32	bl	get_cpu_ops_ptr
33#if ENABLE_ASSERTIONS
34	cmp	x0, #0
35	ASM_ASSERT(ne)
36#endif
37
38	/* Get the cpu_ops reset handler */
39	ldr	x2, [x0, #CPU_RESET_FUNC]
40	mov	x30, x19
41	cbz	x2, 1f
42
43	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
44	br	x2
451:
46	ret
47endfunc reset_handler
48
49#endif
50
51#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
52	/*
53	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54	 *
55	 * Prepare CPU power down function for all platforms. The function takes
56	 * a domain level to be powered down as its parameter. After the cpu_ops
57	 * pointer is retrieved from cpu_data, the handler for requested power
58	 * level is called.
59	 */
60	.globl	prepare_cpu_pwr_dwn
61func prepare_cpu_pwr_dwn
62	/*
63	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64	 * power down handler for the last power level
65	 */
66	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
67	cmp	x0, x2
68	csel	x2, x2, x0, hi
69
70	mrs	x1, tpidr_el3
71	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
72#if ENABLE_ASSERTIONS
73	cmp	x0, #0
74	ASM_ASSERT(ne)
75#endif
76
77	/* Get the appropriate power down handler */
78	mov	x1, #CPU_PWR_DWN_OPS
79	add	x1, x1, x2, lsl #3
80	ldr	x1, [x0, x1]
81#if ENABLE_ASSERTIONS
82	cmp	x1, #0
83	ASM_ASSERT(ne)
84#endif
85	br	x1
86endfunc prepare_cpu_pwr_dwn
87
88
89	/*
90	 * Initializes the cpu_ops_ptr if not already initialized
91	 * in cpu_data. This can be called without a runtime stack, but may
92	 * only be called after the MMU is enabled.
93	 * clobbers: x0 - x6, x10
94	 */
95	.globl	init_cpu_ops
96func init_cpu_ops
97	mrs	x6, tpidr_el3
98	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
99	cbnz	x0, 1f
100	mov	x10, x30
101	bl	get_cpu_ops_ptr
102#if ENABLE_ASSERTIONS
103	cmp	x0, #0
104	ASM_ASSERT(ne)
105#endif
106	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
107	mov x30, x10
1081:
109	ret
110endfunc init_cpu_ops
111#endif /* IMAGE_BL31 */
112
113#if defined(IMAGE_BL31) && CRASH_REPORTING
114	/*
115	 * The cpu specific registers which need to be reported in a crash
116	 * are reported via cpu_ops cpu_reg_dump function. After a matching
117	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
118	 * in the cpu_ops is invoked.
119	 */
120	.globl	do_cpu_reg_dump
121func do_cpu_reg_dump
122	mov	x16, x30
123
124	/* Get the matching cpu_ops pointer */
125	bl	get_cpu_ops_ptr
126	cbz	x0, 1f
127
128	/* Get the cpu_ops cpu_reg_dump */
129	ldr	x2, [x0, #CPU_REG_DUMP]
130	cbz	x2, 1f
131	blr	x2
1321:
133	mov	x30, x16
134	ret
135endfunc do_cpu_reg_dump
136#endif
137
138	/*
139	 * The below function returns the cpu_ops structure matching the
140	 * midr of the core. It reads the MIDR_EL1 and finds the matching
141	 * entry in cpu_ops entries. Only the implementation and part number
142	 * are used to match the entries.
143	 * Return :
144	 *     x0 - The matching cpu_ops pointer on Success
145	 *     x0 - 0 on failure.
146	 * Clobbers : x0 - x5
147	 */
148	.globl	get_cpu_ops_ptr
149func get_cpu_ops_ptr
150	/* Get the cpu_ops start and end locations */
151	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
152	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
153
154	/* Initialize the return parameter */
155	mov	x0, #0
156
157	/* Read the MIDR_EL1 */
158	mrs	x2, midr_el1
159	mov_imm	x3, CPU_IMPL_PN_MASK
160
161	/* Retain only the implementation and part number using mask */
162	and	w2, w2, w3
1631:
164	/* Check if we have reached end of list */
165	cmp	x4, x5
166	b.eq	error_exit
167
168	/* load the midr from the cpu_ops */
169	ldr	x1, [x4], #CPU_OPS_SIZE
170	and	w1, w1, w3
171
172	/* Check if midr matches to midr of this core */
173	cmp	w1, w2
174	b.ne	1b
175
176	/* Subtract the increment and offset to get the cpu-ops pointer */
177	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
178#if ENABLE_ASSERTIONS
179	cmp	x0, #0
180	ASM_ASSERT(ne)
181#endif
182error_exit:
183	ret
184endfunc get_cpu_ops_ptr
185
186/*
187 * Extract CPU revision and variant, and combine them into a single numeric for
188 * easier comparison.
189 */
190	.globl	cpu_get_rev_var
191func cpu_get_rev_var
192	mrs	x1, midr_el1
193
194	/*
195	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
196	 * as variant[7:4] and revision[3:0] of x0.
197	 *
198	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
199	 * extract x1[3:0] into x0[3:0] retaining other bits.
200	 */
201	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
202	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
203	ret
204endfunc cpu_get_rev_var
205
206/*
207 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
208 * application purposes. If the revision-variant is less than or same as a given
209 * value, indicates that errata applies; otherwise not.
210 *
211 * Shall clobber: x0-x3
212 */
213	.globl	cpu_rev_var_ls
214func cpu_rev_var_ls
215	mov	x2, #ERRATA_APPLIES
216	mov	x3, #ERRATA_NOT_APPLIES
217	cmp	x0, x1
218	csel	x0, x2, x3, ls
219	ret
220endfunc cpu_rev_var_ls
221
222/*
223 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
224 * application purposes. If the revision-variant is higher than or same as a
225 * given value, indicates that errata applies; otherwise not.
226 *
227 * Shall clobber: x0-x3
228 */
229	.globl	cpu_rev_var_hs
230func cpu_rev_var_hs
231	mov	x2, #ERRATA_APPLIES
232	mov	x3, #ERRATA_NOT_APPLIES
233	cmp	x0, x1
234	csel	x0, x2, x3, hs
235	ret
236endfunc cpu_rev_var_hs
237
238/*
239 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
240 * application purposes. If the revision-variant is between or includes the given
241 * values, this indicates that errata applies; otherwise not.
242 *
243 * Shall clobber: x0-x4
244 */
245	.globl	cpu_rev_var_range
246func cpu_rev_var_range
247	mov	x3, #ERRATA_APPLIES
248	mov	x4, #ERRATA_NOT_APPLIES
249	cmp	x0, x1
250	csel	x1, x3, x4, hs
251	cbz	x1, 1f
252	cmp	x0, x2
253	csel	x1, x3, x4, ls
2541:
255	mov	x0, x1
256	ret
257endfunc cpu_rev_var_range
258
259#if REPORT_ERRATA
260/*
261 * void print_errata_status(void);
262 *
263 * Function to print errata status for CPUs of its class. Must be called only:
264 *
265 *   - with MMU and data caches are enabled;
266 *   - after cpu_ops have been initialized in per-CPU data.
267 */
268	.globl print_errata_status
269func print_errata_status
270#ifdef IMAGE_BL1
271	/*
272	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
273	 * directly.
274	 */
275	stp	xzr, x30, [sp, #-16]!
276	bl	get_cpu_ops_ptr
277	ldp	xzr, x30, [sp], #16
278	ldr	x1, [x0, #CPU_ERRATA_FUNC]
279	cbnz	x1, .Lprint
280#else
281	/*
282	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
283	 * errata printing function. If it's non-NULL, jump to the function in
284	 * turn.
285	 */
286	mrs	x0, tpidr_el3
287#if ENABLE_ASSERTIONS
288	cmp	x0, #0
289	ASM_ASSERT(ne)
290#endif
291	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
292#if ENABLE_ASSERTIONS
293	cmp	x1, #0
294	ASM_ASSERT(ne)
295#endif
296	ldr	x0, [x1, #CPU_ERRATA_FUNC]
297	cbz	x0, .Lnoprint
298
299	/*
300	 * Printing errata status requires atomically testing the printed flag.
301	 */
302	stp	x19, x30, [sp, #-16]!
303	mov	x19, x0
304
305	/*
306	 * Load pointers to errata lock and printed flag. Call
307	 * errata_needs_reporting to check whether this CPU needs to report
308	 * errata status pertaining to its class.
309	 */
310	ldr	x0, [x1, #CPU_ERRATA_LOCK]
311	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
312	bl	errata_needs_reporting
313	mov	x1, x19
314	ldp	x19, x30, [sp], #16
315	cbnz	x0, .Lprint
316#endif
317.Lnoprint:
318	ret
319.Lprint:
320	/* Jump to errata reporting function for this CPU */
321	br	x1
322endfunc print_errata_status
323#endif
324
325/*
326 * int check_wa_cve_2017_5715(void);
327 *
328 * This function returns:
329 *  - ERRATA_APPLIES when firmware mitigation is required.
330 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
331 *  - ERRATA_MISSING when firmware mitigation would be required but
332 *    is not compiled in.
333 *
334 * NOTE: Must be called only after cpu_ops have been initialized
335 *       in per-CPU data.
336 */
337	.globl	check_wa_cve_2017_5715
338func check_wa_cve_2017_5715
339	mrs	x0, tpidr_el3
340#if ENABLE_ASSERTIONS
341	cmp	x0, #0
342	ASM_ASSERT(ne)
343#endif
344	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
345#if ENABLE_ASSERTIONS
346	cmp	x0, #0
347	ASM_ASSERT(ne)
348#endif
349	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
350	/*
351	 * If the reserved function pointer is NULL, this CPU
352	 * is unaffected by CVE-2017-5715 so bail out.
353	 */
354	cmp	x0, #0
355	beq	1f
356	br	x0
3571:
358	mov	x0, #ERRATA_NOT_APPLIES
359	ret
360endfunc check_wa_cve_2017_5715
361
362/*
363 * void *wa_cve_2018_3639_get_disable_ptr(void);
364 *
365 * Returns a function pointer which is used to disable mitigation
366 * for CVE-2018-3639.
367 * The function pointer is only returned on cores that employ
368 * dynamic mitigation.  If the core uses static mitigation or is
369 * unaffected by CVE-2018-3639 this function returns NULL.
370 *
371 * NOTE: Must be called only after cpu_ops have been initialized
372 *       in per-CPU data.
373 */
374	.globl	wa_cve_2018_3639_get_disable_ptr
375func wa_cve_2018_3639_get_disable_ptr
376	mrs	x0, tpidr_el3
377#if ENABLE_ASSERTIONS
378	cmp	x0, #0
379	ASM_ASSERT(ne)
380#endif
381	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
382#if ENABLE_ASSERTIONS
383	cmp	x0, #0
384	ASM_ASSERT(ne)
385#endif
386	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
387	ret
388endfunc wa_cve_2018_3639_get_disable_ptr
389