xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 81e2ff1f364fdf18e086f690eb3715bc89307592)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 #include <lib/extensions/amu_private.h>
18 
19 #include <plat/common/platform.h>
20 
21 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22 
23 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
24 {
25 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
26 		ID_AA64PFR0_AMU_MASK;
27 }
28 
29 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30 {
31 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 		HCR_AMVOFFEN_SHIFT;
33 }
34 
35 static inline __unused void write_cptr_el2_tam(uint64_t value)
36 {
37 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39 }
40 
41 static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42 {
43 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44 
45 	value &= ~TAM_BIT;
46 	value |= (tam << TAM_SHIFT) & TAM_BIT;
47 
48 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49 }
50 
51 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52 {
53 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55 }
56 
57 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58 {
59 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61 }
62 
63 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64 {
65 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 		AMCFGR_EL0_NCG_MASK;
67 }
68 
69 static inline uint64_t read_amcgcr_el0_cg0nc(void)
70 {
71 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
72 		AMCGCR_EL0_CG0NC_MASK;
73 }
74 
75 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
76 {
77 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
78 		AMCG1IDR_VOFF_MASK;
79 }
80 
81 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
82 {
83 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
84 		AMCGCR_EL0_CG1NC_MASK;
85 }
86 
87 static inline __unused uint64_t read_amcntenset0_el0_px(void)
88 {
89 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
90 		AMCNTENSET0_EL0_Pn_MASK;
91 }
92 
93 static inline __unused uint64_t read_amcntenset1_el0_px(void)
94 {
95 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
96 		AMCNTENSET1_EL0_Pn_MASK;
97 }
98 
99 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
100 {
101 	uint64_t value = read_amcntenset0_el0();
102 
103 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
104 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
105 
106 	write_amcntenset0_el0(value);
107 }
108 
109 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
110 {
111 	uint64_t value = read_amcntenset1_el0();
112 
113 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
114 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
115 
116 	write_amcntenset1_el0(value);
117 }
118 
119 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
120 {
121 	uint64_t value = read_amcntenclr0_el0();
122 
123 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
124 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
125 
126 	write_amcntenclr0_el0(value);
127 }
128 
129 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
130 {
131 	uint64_t value = read_amcntenclr1_el0();
132 
133 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
134 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
135 
136 	write_amcntenclr1_el0(value);
137 }
138 
139 static bool amu_supported(void)
140 {
141 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
142 }
143 
144 static bool amu_v1p1_supported(void)
145 {
146 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
147 }
148 
149 #if ENABLE_AMU_AUXILIARY_COUNTERS
150 static bool amu_group1_supported(void)
151 {
152 	return read_amcfgr_el0_ncg() > 0U;
153 }
154 #endif
155 
156 /*
157  * Enable counters. This function is meant to be invoked
158  * by the context management library before exiting from EL3.
159  */
160 void amu_enable(bool el2_unused, cpu_context_t *ctx)
161 {
162 	if (!amu_supported()) {
163 		return;
164 	}
165 
166 #if ENABLE_AMU_AUXILIARY_COUNTERS
167 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
168 		/* Check and set presence of group 1 counters */
169 		if (!amu_group1_supported()) {
170 			ERROR("AMU Counter Group 1 is not implemented\n");
171 			panic();
172 		}
173 
174 		/* Check number of group 1 counters */
175 		uint64_t cnt_num = read_amcgcr_el0_cg1nc();
176 
177 		VERBOSE("%s%llu. %s%u\n",
178 			"Number of AMU Group 1 Counters ", cnt_num,
179 			"Requested number ", AMU_GROUP1_NR_COUNTERS);
180 
181 		if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
182 			ERROR("%s%llu is less than %s%u\n",
183 			"Number of AMU Group 1 Counters ", cnt_num,
184 			"Requested number ", AMU_GROUP1_NR_COUNTERS);
185 			panic();
186 		}
187 	}
188 #endif
189 
190 	if (el2_unused) {
191 		/*
192 		 * CPTR_EL2.TAM: Set to zero so any accesses to
193 		 * the Activity Monitor registers do not trap to EL2.
194 		 */
195 		write_cptr_el2_tam(0U);
196 	}
197 
198 	/*
199 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
200 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
201 	 * the Activity Monitor registers do not trap to EL3.
202 	 */
203 	write_cptr_el3_tam(ctx, 0U);
204 
205 	/* Enable group 0 counters */
206 	write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
207 
208 #if ENABLE_AMU_AUXILIARY_COUNTERS
209 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
210 		/* Enable group 1 counters */
211 		write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
212 	}
213 #endif
214 
215 	/* Initialize FEAT_AMUv1p1 features if present. */
216 	if (!amu_v1p1_supported()) {
217 		return;
218 	}
219 
220 	if (el2_unused) {
221 		/* Make sure virtual offsets are disabled if EL2 not used. */
222 		write_hcr_el2_amvoffen(0U);
223 	}
224 
225 #if AMU_RESTRICT_COUNTERS
226 	/*
227 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
228 	 * counters at all but the highest implemented EL.  This is controlled
229 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
230 	 * register reads at lower ELs return zero.  Reads from the memory
231 	 * mapped view are unaffected.
232 	 */
233 	VERBOSE("AMU group 1 counter access restricted.\n");
234 	write_amcr_el0_cg1rz(1U);
235 #else
236 	write_amcr_el0_cg1rz(0U);
237 #endif
238 }
239 
240 /* Read the group 0 counter identified by the given `idx`. */
241 static uint64_t amu_group0_cnt_read(unsigned int idx)
242 {
243 	assert(amu_supported());
244 	assert(idx < read_amcgcr_el0_cg0nc());
245 
246 	return amu_group0_cnt_read_internal(idx);
247 }
248 
249 /* Write the group 0 counter identified by the given `idx` with `val` */
250 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
251 {
252 	assert(amu_supported());
253 	assert(idx < read_amcgcr_el0_cg0nc());
254 
255 	amu_group0_cnt_write_internal(idx, val);
256 	isb();
257 }
258 
259 /*
260  * Read the group 0 offset register for a given index. Index must be 0, 2,
261  * or 3, the register for 1 does not exist.
262  *
263  * Using this function requires FEAT_AMUv1p1 support.
264  */
265 static uint64_t amu_group0_voffset_read(unsigned int idx)
266 {
267 	assert(amu_v1p1_supported());
268 	assert(idx < read_amcgcr_el0_cg0nc());
269 	assert(idx != 1U);
270 
271 	return amu_group0_voffset_read_internal(idx);
272 }
273 
274 /*
275  * Write the group 0 offset register for a given index. Index must be 0, 2, or
276  * 3, the register for 1 does not exist.
277  *
278  * Using this function requires FEAT_AMUv1p1 support.
279  */
280 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
281 {
282 	assert(amu_v1p1_supported());
283 	assert(idx < read_amcgcr_el0_cg0nc());
284 	assert(idx != 1U);
285 
286 	amu_group0_voffset_write_internal(idx, val);
287 	isb();
288 }
289 
290 #if ENABLE_AMU_AUXILIARY_COUNTERS
291 /* Read the group 1 counter identified by the given `idx` */
292 static uint64_t amu_group1_cnt_read(unsigned int idx)
293 {
294 	assert(amu_supported());
295 	assert(amu_group1_supported());
296 	assert(idx < AMU_GROUP1_NR_COUNTERS);
297 
298 	return amu_group1_cnt_read_internal(idx);
299 }
300 
301 /* Write the group 1 counter identified by the given `idx` with `val` */
302 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
303 {
304 	assert(amu_supported());
305 	assert(amu_group1_supported());
306 	assert(idx < AMU_GROUP1_NR_COUNTERS);
307 
308 	amu_group1_cnt_write_internal(idx, val);
309 	isb();
310 }
311 
312 /*
313  * Read the group 1 offset register for a given index.
314  *
315  * Using this function requires FEAT_AMUv1p1 support.
316  */
317 static uint64_t amu_group1_voffset_read(unsigned int idx)
318 {
319 	assert(amu_v1p1_supported());
320 	assert(amu_group1_supported());
321 	assert(idx < AMU_GROUP1_NR_COUNTERS);
322 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
323 
324 	return amu_group1_voffset_read_internal(idx);
325 }
326 
327 /*
328  * Write the group 1 offset register for a given index.
329  *
330  * Using this function requires FEAT_AMUv1p1 support.
331  */
332 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
333 {
334 	assert(amu_v1p1_supported());
335 	assert(amu_group1_supported());
336 	assert(idx < AMU_GROUP1_NR_COUNTERS);
337 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
338 
339 	amu_group1_voffset_write_internal(idx, val);
340 	isb();
341 }
342 #endif
343 
344 static void *amu_context_save(const void *arg)
345 {
346 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
347 	unsigned int i;
348 
349 	if (!amu_supported()) {
350 		return (void *)-1;
351 	}
352 
353 #if ENABLE_AMU_AUXILIARY_COUNTERS
354 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
355 		if (!amu_group1_supported()) {
356 			return (void *)-1;
357 		}
358 	}
359 #endif
360 
361 	/* Assert that group 0/1 counter configuration is what we expect */
362 	assert(read_amcntenset0_el0_px() ==
363 		((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U));
364 
365 #if ENABLE_AMU_AUXILIARY_COUNTERS
366 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
367 		assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
368 	}
369 #endif
370 
371 	/*
372 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
373 	 * counter values from the future via the memory mapped view.
374 	 */
375 	write_amcntenclr0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
376 
377 #if ENABLE_AMU_AUXILIARY_COUNTERS
378 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
379 		write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
380 	}
381 #endif
382 
383 	isb();
384 
385 	/* Save all group 0 counters */
386 	for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
387 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
388 	}
389 
390 	/* Save group 0 virtual offsets if supported and enabled. */
391 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
392 		/* Not using a loop because count is fixed and index 1 DNE. */
393 		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
394 		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
395 		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
396 	}
397 
398 #if ENABLE_AMU_AUXILIARY_COUNTERS
399 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
400 		/* Save group 1 counters */
401 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
402 			if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
403 				ctx->group1_cnts[i] = amu_group1_cnt_read(i);
404 			}
405 		}
406 
407 		/* Save group 1 virtual offsets if supported and enabled. */
408 		if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
409 			uint64_t amcg1idr = read_amcg1idr_el0_voff() &
410 				AMU_GROUP1_COUNTERS_MASK;
411 
412 			for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
413 				if (((amcg1idr >> i) & 1ULL) != 0ULL) {
414 					ctx->group1_voffsets[i] =
415 						amu_group1_voffset_read(i);
416 				}
417 			}
418 		}
419 	}
420 #endif
421 
422 	return (void *)0;
423 }
424 
425 static void *amu_context_restore(const void *arg)
426 {
427 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
428 	unsigned int i;
429 
430 	if (!amu_supported()) {
431 		return (void *)-1;
432 	}
433 
434 #if ENABLE_AMU_AUXILIARY_COUNTERS
435 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
436 		if (!amu_group1_supported()) {
437 			return (void *)-1;
438 		}
439 	}
440 #endif
441 
442 	/* Counters were disabled in `amu_context_save()` */
443 	assert(read_amcntenset0_el0_px() == 0U);
444 
445 #if ENABLE_AMU_AUXILIARY_COUNTERS
446 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
447 		assert(read_amcntenset1_el0_px() == 0U);
448 	}
449 #endif
450 
451 	/* Restore all group 0 counters */
452 	for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
453 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
454 	}
455 
456 	/* Restore group 0 virtual offsets if supported and enabled. */
457 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
458 		/* Not using a loop because count is fixed and index 1 DNE. */
459 		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
460 		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
461 		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
462 	}
463 
464 	/* Restore group 0 counter configuration */
465 	write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
466 
467 #if ENABLE_AMU_AUXILIARY_COUNTERS
468 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
469 		/* Restore group 1 counters */
470 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
471 			if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
472 				amu_group1_cnt_write(i, ctx->group1_cnts[i]);
473 			}
474 		}
475 
476 		/* Restore group 1 virtual offsets if supported and enabled. */
477 		if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
478 			uint64_t amcg1idr = read_amcg1idr_el0_voff() &
479 				AMU_GROUP1_COUNTERS_MASK;
480 
481 			for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
482 				if (((amcg1idr >> i) & 1ULL) != 0ULL) {
483 					amu_group1_voffset_write(i,
484 						ctx->group1_voffsets[i]);
485 				}
486 			}
487 		}
488 
489 		/* Restore group 1 counter configuration */
490 		write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
491 	}
492 #endif
493 
494 	return (void *)0;
495 }
496 
497 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
498 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
499