xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 1fd685a74dd33c9c26a0ec0e82e7a5a378461362)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 #include <lib/extensions/amu_private.h>
18 
19 #include <plat/common/platform.h>
20 
21 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22 
23 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
24 {
25 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
26 		ID_AA64PFR0_AMU_MASK;
27 }
28 
29 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30 {
31 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 		HCR_AMVOFFEN_SHIFT;
33 }
34 
35 static inline __unused void write_cptr_el2_tam(uint64_t value)
36 {
37 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39 }
40 
41 static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42 {
43 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44 
45 	value &= ~TAM_BIT;
46 	value |= (tam << TAM_SHIFT) & TAM_BIT;
47 
48 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49 }
50 
51 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52 {
53 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55 }
56 
57 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58 {
59 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61 }
62 
63 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64 {
65 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 		AMCFGR_EL0_NCG_MASK;
67 }
68 
69 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
70 {
71 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
72 		AMCG1IDR_VOFF_MASK;
73 }
74 
75 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
76 {
77 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
78 		AMCGCR_EL0_CG1NC_MASK;
79 }
80 
81 static inline __unused uint64_t read_amcntenset0_el0_px(void)
82 {
83 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
84 		AMCNTENSET0_EL0_Pn_MASK;
85 }
86 
87 static inline __unused uint64_t read_amcntenset1_el0_px(void)
88 {
89 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
90 		AMCNTENSET1_EL0_Pn_MASK;
91 }
92 
93 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
94 {
95 	uint64_t value = read_amcntenset0_el0();
96 
97 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
98 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
99 
100 	write_amcntenset0_el0(value);
101 }
102 
103 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
104 {
105 	uint64_t value = read_amcntenset1_el0();
106 
107 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
108 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
109 
110 	write_amcntenset1_el0(value);
111 }
112 
113 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
114 {
115 	uint64_t value = read_amcntenclr0_el0();
116 
117 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
118 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
119 
120 	write_amcntenclr0_el0(value);
121 }
122 
123 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
124 {
125 	uint64_t value = read_amcntenclr1_el0();
126 
127 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
128 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
129 
130 	write_amcntenclr1_el0(value);
131 }
132 
133 static bool amu_supported(void)
134 {
135 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
136 }
137 
138 static bool amu_v1p1_supported(void)
139 {
140 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
141 }
142 
143 #if ENABLE_AMU_AUXILIARY_COUNTERS
144 static bool amu_group1_supported(void)
145 {
146 	return read_amcfgr_el0_ncg() > 0U;
147 }
148 #endif
149 
150 /*
151  * Enable counters. This function is meant to be invoked
152  * by the context management library before exiting from EL3.
153  */
154 void amu_enable(bool el2_unused, cpu_context_t *ctx)
155 {
156 	if (!amu_supported()) {
157 		return;
158 	}
159 
160 #if ENABLE_AMU_AUXILIARY_COUNTERS
161 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
162 		/* Check and set presence of group 1 counters */
163 		if (!amu_group1_supported()) {
164 			ERROR("AMU Counter Group 1 is not implemented\n");
165 			panic();
166 		}
167 
168 		/* Check number of group 1 counters */
169 		uint64_t cnt_num = read_amcgcr_el0_cg1nc();
170 
171 		VERBOSE("%s%llu. %s%u\n",
172 			"Number of AMU Group 1 Counters ", cnt_num,
173 			"Requested number ", AMU_GROUP1_NR_COUNTERS);
174 
175 		if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
176 			ERROR("%s%llu is less than %s%u\n",
177 			"Number of AMU Group 1 Counters ", cnt_num,
178 			"Requested number ", AMU_GROUP1_NR_COUNTERS);
179 			panic();
180 		}
181 	}
182 #endif
183 
184 	if (el2_unused) {
185 		/*
186 		 * CPTR_EL2.TAM: Set to zero so any accesses to
187 		 * the Activity Monitor registers do not trap to EL2.
188 		 */
189 		write_cptr_el2_tam(0U);
190 	}
191 
192 	/*
193 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
194 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
195 	 * the Activity Monitor registers do not trap to EL3.
196 	 */
197 	write_cptr_el3_tam(ctx, 0U);
198 
199 	/* Enable group 0 counters */
200 	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
201 
202 #if ENABLE_AMU_AUXILIARY_COUNTERS
203 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
204 		/* Enable group 1 counters */
205 		write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
206 	}
207 #endif
208 
209 	/* Initialize FEAT_AMUv1p1 features if present. */
210 	if (!amu_v1p1_supported()) {
211 		return;
212 	}
213 
214 	if (el2_unused) {
215 		/* Make sure virtual offsets are disabled if EL2 not used. */
216 		write_hcr_el2_amvoffen(0U);
217 	}
218 
219 #if AMU_RESTRICT_COUNTERS
220 	/*
221 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
222 	 * counters at all but the highest implemented EL.  This is controlled
223 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
224 	 * register reads at lower ELs return zero.  Reads from the memory
225 	 * mapped view are unaffected.
226 	 */
227 	VERBOSE("AMU group 1 counter access restricted.\n");
228 	write_amcr_el0_cg1rz(1U);
229 #else
230 	write_amcr_el0_cg1rz(0U);
231 #endif
232 }
233 
234 /* Read the group 0 counter identified by the given `idx`. */
235 static uint64_t amu_group0_cnt_read(unsigned int idx)
236 {
237 	assert(amu_supported());
238 	assert(idx < AMU_GROUP0_NR_COUNTERS);
239 
240 	return amu_group0_cnt_read_internal(idx);
241 }
242 
243 /* Write the group 0 counter identified by the given `idx` with `val` */
244 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
245 {
246 	assert(amu_supported());
247 	assert(idx < AMU_GROUP0_NR_COUNTERS);
248 
249 	amu_group0_cnt_write_internal(idx, val);
250 	isb();
251 }
252 
253 /*
254  * Read the group 0 offset register for a given index. Index must be 0, 2,
255  * or 3, the register for 1 does not exist.
256  *
257  * Using this function requires FEAT_AMUv1p1 support.
258  */
259 static uint64_t amu_group0_voffset_read(unsigned int idx)
260 {
261 	assert(amu_v1p1_supported());
262 	assert(idx < AMU_GROUP0_NR_COUNTERS);
263 	assert(idx != 1U);
264 
265 	return amu_group0_voffset_read_internal(idx);
266 }
267 
268 /*
269  * Write the group 0 offset register for a given index. Index must be 0, 2, or
270  * 3, the register for 1 does not exist.
271  *
272  * Using this function requires FEAT_AMUv1p1 support.
273  */
274 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
275 {
276 	assert(amu_v1p1_supported());
277 	assert(idx < AMU_GROUP0_NR_COUNTERS);
278 	assert(idx != 1U);
279 
280 	amu_group0_voffset_write_internal(idx, val);
281 	isb();
282 }
283 
284 #if ENABLE_AMU_AUXILIARY_COUNTERS
285 /* Read the group 1 counter identified by the given `idx` */
286 static uint64_t amu_group1_cnt_read(unsigned int idx)
287 {
288 	assert(amu_supported());
289 	assert(amu_group1_supported());
290 	assert(idx < AMU_GROUP1_NR_COUNTERS);
291 
292 	return amu_group1_cnt_read_internal(idx);
293 }
294 
295 /* Write the group 1 counter identified by the given `idx` with `val` */
296 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
297 {
298 	assert(amu_supported());
299 	assert(amu_group1_supported());
300 	assert(idx < AMU_GROUP1_NR_COUNTERS);
301 
302 	amu_group1_cnt_write_internal(idx, val);
303 	isb();
304 }
305 
306 /*
307  * Read the group 1 offset register for a given index.
308  *
309  * Using this function requires FEAT_AMUv1p1 support.
310  */
311 static uint64_t amu_group1_voffset_read(unsigned int idx)
312 {
313 	assert(amu_v1p1_supported());
314 	assert(amu_group1_supported());
315 	assert(idx < AMU_GROUP1_NR_COUNTERS);
316 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
317 
318 	return amu_group1_voffset_read_internal(idx);
319 }
320 
321 /*
322  * Write the group 1 offset register for a given index.
323  *
324  * Using this function requires FEAT_AMUv1p1 support.
325  */
326 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
327 {
328 	assert(amu_v1p1_supported());
329 	assert(amu_group1_supported());
330 	assert(idx < AMU_GROUP1_NR_COUNTERS);
331 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
332 
333 	amu_group1_voffset_write_internal(idx, val);
334 	isb();
335 }
336 #endif
337 
338 static void *amu_context_save(const void *arg)
339 {
340 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
341 	unsigned int i;
342 
343 	if (!amu_supported()) {
344 		return (void *)-1;
345 	}
346 
347 #if ENABLE_AMU_AUXILIARY_COUNTERS
348 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
349 		if (!amu_group1_supported()) {
350 			return (void *)-1;
351 		}
352 	}
353 #endif
354 
355 	/* Assert that group 0/1 counter configuration is what we expect */
356 	assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
357 
358 #if ENABLE_AMU_AUXILIARY_COUNTERS
359 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
360 		assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
361 	}
362 #endif
363 
364 	/*
365 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
366 	 * counter values from the future via the memory mapped view.
367 	 */
368 	write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
369 
370 #if ENABLE_AMU_AUXILIARY_COUNTERS
371 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
372 		write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
373 	}
374 #endif
375 
376 	isb();
377 
378 	/* Save all group 0 counters */
379 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
380 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
381 	}
382 
383 	/* Save group 0 virtual offsets if supported and enabled. */
384 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
385 		/* Not using a loop because count is fixed and index 1 DNE. */
386 		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
387 		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
388 		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
389 	}
390 
391 #if ENABLE_AMU_AUXILIARY_COUNTERS
392 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
393 		/* Save group 1 counters */
394 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
395 			if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
396 				ctx->group1_cnts[i] = amu_group1_cnt_read(i);
397 			}
398 		}
399 
400 		/* Save group 1 virtual offsets if supported and enabled. */
401 		if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
402 			uint64_t amcg1idr = read_amcg1idr_el0_voff() &
403 				AMU_GROUP1_COUNTERS_MASK;
404 
405 			for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
406 				if (((amcg1idr >> i) & 1ULL) != 0ULL) {
407 					ctx->group1_voffsets[i] =
408 						amu_group1_voffset_read(i);
409 				}
410 			}
411 		}
412 	}
413 #endif
414 
415 	return (void *)0;
416 }
417 
418 static void *amu_context_restore(const void *arg)
419 {
420 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
421 	unsigned int i;
422 
423 	if (!amu_supported()) {
424 		return (void *)-1;
425 	}
426 
427 #if ENABLE_AMU_AUXILIARY_COUNTERS
428 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
429 		if (!amu_group1_supported()) {
430 			return (void *)-1;
431 		}
432 	}
433 #endif
434 
435 	/* Counters were disabled in `amu_context_save()` */
436 	assert(read_amcntenset0_el0_px() == 0U);
437 
438 #if ENABLE_AMU_AUXILIARY_COUNTERS
439 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
440 		assert(read_amcntenset1_el0_px() == 0U);
441 	}
442 #endif
443 
444 	/* Restore all group 0 counters */
445 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
446 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
447 	}
448 
449 	/* Restore group 0 virtual offsets if supported and enabled. */
450 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
451 		/* Not using a loop because count is fixed and index 1 DNE. */
452 		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
453 		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
454 		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
455 	}
456 
457 	/* Restore group 0 counter configuration */
458 	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
459 
460 #if ENABLE_AMU_AUXILIARY_COUNTERS
461 	if (AMU_GROUP1_NR_COUNTERS > 0U) {
462 		/* Restore group 1 counters */
463 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
464 			if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
465 				amu_group1_cnt_write(i, ctx->group1_cnts[i]);
466 			}
467 		}
468 
469 		/* Restore group 1 virtual offsets if supported and enabled. */
470 		if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
471 			uint64_t amcg1idr = read_amcg1idr_el0_voff() &
472 				AMU_GROUP1_COUNTERS_MASK;
473 
474 			for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
475 				if (((amcg1idr >> i) & 1ULL) != 0ULL) {
476 					amu_group1_voffset_write(i,
477 						ctx->group1_voffsets[i]);
478 				}
479 			}
480 		}
481 
482 		/* Restore group 1 counter configuration */
483 		write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
484 	}
485 #endif
486 
487 	return (void *)0;
488 }
489 
490 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
491 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
492