xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision e747a59be4ab8e9fa6edc7f4fb04478cd0f823c2)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include "../amu_private.h"
12 #include <arch.h>
13 #include <arch_features.h>
14 #include <arch_helpers.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 
18 #include <plat/common/platform.h>
19 
20 struct amu_ctx {
21 	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
22 #if ENABLE_AMU_AUXILIARY_COUNTERS
23 	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
24 #endif
25 
26 	/* Architected event counter 1 does not have an offset register */
27 	uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
28 #if ENABLE_AMU_AUXILIARY_COUNTERS
29 	uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
30 #endif
31 
32 	uint16_t group0_enable;
33 #if ENABLE_AMU_AUXILIARY_COUNTERS
34 	uint16_t group1_enable;
35 #endif
36 };
37 
38 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
39 
40 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
41 	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
42 
43 #if ENABLE_AMU_AUXILIARY_COUNTERS
44 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
45 	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
46 #endif
47 
48 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
49 {
50 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
51 		ID_AA64PFR0_AMU_MASK;
52 }
53 
54 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
55 {
56 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
57 		HCR_AMVOFFEN_SHIFT;
58 }
59 
60 static inline __unused void write_cptr_el2_tam(uint64_t value)
61 {
62 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
63 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
64 }
65 
66 static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
67 {
68 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
69 
70 	value &= ~TAM_BIT;
71 	value |= (tam << TAM_SHIFT) & TAM_BIT;
72 
73 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
74 }
75 
76 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
77 {
78 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
79 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
80 }
81 
82 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
83 {
84 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
85 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
86 }
87 
88 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
89 {
90 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
91 		AMCFGR_EL0_NCG_MASK;
92 }
93 
94 static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
95 {
96 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
97 		AMCGCR_EL0_CG0NC_MASK;
98 }
99 
100 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
101 {
102 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
103 		AMCG1IDR_VOFF_MASK;
104 }
105 
106 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
107 {
108 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
109 		AMCGCR_EL0_CG1NC_MASK;
110 }
111 
112 static inline __unused uint64_t read_amcntenset0_el0_px(void)
113 {
114 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
115 		AMCNTENSET0_EL0_Pn_MASK;
116 }
117 
118 static inline __unused uint64_t read_amcntenset1_el0_px(void)
119 {
120 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
121 		AMCNTENSET1_EL0_Pn_MASK;
122 }
123 
124 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
125 {
126 	uint64_t value = read_amcntenset0_el0();
127 
128 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
129 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
130 
131 	write_amcntenset0_el0(value);
132 }
133 
134 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
135 {
136 	uint64_t value = read_amcntenset1_el0();
137 
138 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
139 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
140 
141 	write_amcntenset1_el0(value);
142 }
143 
144 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
145 {
146 	uint64_t value = read_amcntenclr0_el0();
147 
148 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
149 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
150 
151 	write_amcntenclr0_el0(value);
152 }
153 
154 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
155 {
156 	uint64_t value = read_amcntenclr1_el0();
157 
158 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
159 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
160 
161 	write_amcntenclr1_el0(value);
162 }
163 
164 static __unused bool amu_supported(void)
165 {
166 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
167 }
168 
169 static __unused bool amu_v1p1_supported(void)
170 {
171 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
172 }
173 
174 #if ENABLE_AMU_AUXILIARY_COUNTERS
175 static __unused bool amu_group1_supported(void)
176 {
177 	return read_amcfgr_el0_ncg() > 0U;
178 }
179 #endif
180 
181 /*
182  * Enable counters. This function is meant to be invoked by the context
183  * management library before exiting from EL3.
184  */
185 void amu_enable(bool el2_unused, cpu_context_t *ctx)
186 {
187 	uint64_t id_aa64pfr0_el1_amu;		/* AMU version */
188 
189 	uint64_t amcfgr_el0_ncg;		/* Number of counter groups */
190 	uint64_t amcgcr_el0_cg0nc;		/* Number of group 0 counters */
191 
192 	uint64_t amcntenset0_el0_px = 0x0;	/* Group 0 enable mask */
193 	uint64_t amcntenset1_el0_px = 0x0;	/* Group 1 enable mask */
194 
195 	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
196 	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
197 		/*
198 		 * If the AMU is unsupported, nothing needs to be done.
199 		 */
200 
201 		return;
202 	}
203 
204 	if (el2_unused) {
205 		/*
206 		 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
207 		 * Monitor registers do not trap to EL2.
208 		 */
209 		write_cptr_el2_tam(0U);
210 	}
211 
212 	/*
213 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
214 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
215 	 * the Activity Monitor registers do not trap to EL3.
216 	 */
217 	write_cptr_el3_tam(ctx, 0U);
218 
219 	/*
220 	 * Retrieve the number of architected counters. All of these counters
221 	 * are enabled by default.
222 	 */
223 
224 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
225 	amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
226 
227 	assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
228 
229 	/*
230 	 * Enable the requested counters.
231 	 */
232 
233 	write_amcntenset0_el0_px(amcntenset0_el0_px);
234 
235 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
236 	if (amcfgr_el0_ncg > 0U) {
237 		write_amcntenset1_el0_px(amcntenset1_el0_px);
238 	}
239 
240 	/* Initialize FEAT_AMUv1p1 features if present. */
241 	if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
242 		return;
243 	}
244 
245 	if (el2_unused) {
246 		/* Make sure virtual offsets are disabled if EL2 not used. */
247 		write_hcr_el2_amvoffen(0U);
248 	}
249 
250 #if AMU_RESTRICT_COUNTERS
251 	/*
252 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
253 	 * counters at all but the highest implemented EL.  This is controlled
254 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
255 	 * register reads at lower ELs return zero.  Reads from the memory
256 	 * mapped view are unaffected.
257 	 */
258 	VERBOSE("AMU group 1 counter access restricted.\n");
259 	write_amcr_el0_cg1rz(1U);
260 #else
261 	write_amcr_el0_cg1rz(0U);
262 #endif
263 }
264 
265 /* Read the group 0 counter identified by the given `idx`. */
266 static uint64_t amu_group0_cnt_read(unsigned int idx)
267 {
268 	assert(amu_supported());
269 	assert(idx < read_amcgcr_el0_cg0nc());
270 
271 	return amu_group0_cnt_read_internal(idx);
272 }
273 
274 /* Write the group 0 counter identified by the given `idx` with `val` */
275 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
276 {
277 	assert(amu_supported());
278 	assert(idx < read_amcgcr_el0_cg0nc());
279 
280 	amu_group0_cnt_write_internal(idx, val);
281 	isb();
282 }
283 
284 /*
285  * Unlike with auxiliary counters, we cannot detect at runtime whether an
286  * architected counter supports a virtual offset. These are instead fixed
287  * according to FEAT_AMUv1p1, but this switch will need to be updated if later
288  * revisions of FEAT_AMU add additional architected counters.
289  */
290 static bool amu_group0_voffset_supported(uint64_t idx)
291 {
292 	switch (idx) {
293 	case 0U:
294 	case 2U:
295 	case 3U:
296 		return true;
297 
298 	case 1U:
299 		return false;
300 
301 	default:
302 		ERROR("AMU: can't set up virtual offset for unknown "
303 		      "architected counter %llu!\n", idx);
304 
305 		panic();
306 	}
307 }
308 
309 /*
310  * Read the group 0 offset register for a given index. Index must be 0, 2,
311  * or 3, the register for 1 does not exist.
312  *
313  * Using this function requires FEAT_AMUv1p1 support.
314  */
315 static uint64_t amu_group0_voffset_read(unsigned int idx)
316 {
317 	assert(amu_v1p1_supported());
318 	assert(idx < read_amcgcr_el0_cg0nc());
319 	assert(idx != 1U);
320 
321 	return amu_group0_voffset_read_internal(idx);
322 }
323 
324 /*
325  * Write the group 0 offset register for a given index. Index must be 0, 2, or
326  * 3, the register for 1 does not exist.
327  *
328  * Using this function requires FEAT_AMUv1p1 support.
329  */
330 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
331 {
332 	assert(amu_v1p1_supported());
333 	assert(idx < read_amcgcr_el0_cg0nc());
334 	assert(idx != 1U);
335 
336 	amu_group0_voffset_write_internal(idx, val);
337 	isb();
338 }
339 
340 #if ENABLE_AMU_AUXILIARY_COUNTERS
341 /* Read the group 1 counter identified by the given `idx` */
342 static uint64_t amu_group1_cnt_read(unsigned int idx)
343 {
344 	assert(amu_supported());
345 	assert(amu_group1_supported());
346 	assert(idx < read_amcgcr_el0_cg1nc());
347 
348 	return amu_group1_cnt_read_internal(idx);
349 }
350 
351 /* Write the group 1 counter identified by the given `idx` with `val` */
352 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
353 {
354 	assert(amu_supported());
355 	assert(amu_group1_supported());
356 	assert(idx < read_amcgcr_el0_cg1nc());
357 
358 	amu_group1_cnt_write_internal(idx, val);
359 	isb();
360 }
361 
362 /*
363  * Read the group 1 offset register for a given index.
364  *
365  * Using this function requires FEAT_AMUv1p1 support.
366  */
367 static uint64_t amu_group1_voffset_read(unsigned int idx)
368 {
369 	assert(amu_v1p1_supported());
370 	assert(amu_group1_supported());
371 	assert(idx < read_amcgcr_el0_cg1nc());
372 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
373 
374 	return amu_group1_voffset_read_internal(idx);
375 }
376 
377 /*
378  * Write the group 1 offset register for a given index.
379  *
380  * Using this function requires FEAT_AMUv1p1 support.
381  */
382 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
383 {
384 	assert(amu_v1p1_supported());
385 	assert(amu_group1_supported());
386 	assert(idx < read_amcgcr_el0_cg1nc());
387 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
388 
389 	amu_group1_voffset_write_internal(idx, val);
390 	isb();
391 }
392 #endif
393 
394 static void *amu_context_save(const void *arg)
395 {
396 	uint64_t i, j;
397 
398 	unsigned int core_pos;
399 	struct amu_ctx *ctx;
400 
401 	uint64_t id_aa64pfr0_el1_amu;	/* AMU version */
402 	uint64_t hcr_el2_amvoffen;	/* AMU virtual offsets enabled */
403 	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
404 
405 #if ENABLE_AMU_AUXILIARY_COUNTERS
406 	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
407 	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
408 	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
409 #endif
410 
411 	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
412 	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
413 		return (void *)0;
414 	}
415 
416 	core_pos = plat_my_core_pos();
417 	ctx = &amu_ctxs_[core_pos];
418 
419 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
420 	hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
421 		read_hcr_el2_amvoffen() : 0U;
422 
423 #if ENABLE_AMU_AUXILIARY_COUNTERS
424 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
425 	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
426 	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
427 #endif
428 
429 	/*
430 	 * Disable all AMU counters.
431 	 */
432 
433 	ctx->group0_enable = read_amcntenset0_el0_px();
434 	write_amcntenclr0_el0_px(ctx->group0_enable);
435 
436 #if ENABLE_AMU_AUXILIARY_COUNTERS
437 	if (amcfgr_el0_ncg > 0U) {
438 		ctx->group1_enable = read_amcntenset1_el0_px();
439 		write_amcntenclr1_el0_px(ctx->group1_enable);
440 	}
441 #endif
442 
443 	/*
444 	 * Save the counters to the local context.
445 	 */
446 
447 	isb(); /* Ensure counters have been stopped */
448 
449 	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
450 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
451 	}
452 
453 #if ENABLE_AMU_AUXILIARY_COUNTERS
454 	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
455 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
456 	}
457 #endif
458 
459 	/*
460 	 * Save virtual offsets for counters that offer them.
461 	 */
462 
463 	if (hcr_el2_amvoffen != 0U) {
464 		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
465 			if (!amu_group0_voffset_supported(i)) {
466 				continue; /* No virtual offset */
467 			}
468 
469 			ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
470 		}
471 
472 #if ENABLE_AMU_AUXILIARY_COUNTERS
473 		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
474 			if ((amcg1idr_el0_voff >> i) & 1U) {
475 				continue; /* No virtual offset */
476 			}
477 
478 			ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
479 		}
480 #endif
481 	}
482 
483 	return (void *)0;
484 }
485 
486 static void *amu_context_restore(const void *arg)
487 {
488 	uint64_t i, j;
489 
490 	unsigned int core_pos;
491 	struct amu_ctx *ctx;
492 
493 	uint64_t id_aa64pfr0_el1_amu;	/* AMU version */
494 
495 	uint64_t hcr_el2_amvoffen;	/* AMU virtual offsets enabled */
496 
497 	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
498 	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
499 
500 #if ENABLE_AMU_AUXILIARY_COUNTERS
501 	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
502 	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
503 #endif
504 
505 	id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
506 	if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
507 		return (void *)0;
508 	}
509 
510 	core_pos = plat_my_core_pos();
511 	ctx = &amu_ctxs_[core_pos];
512 
513 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
514 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
515 
516 	hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
517 		read_hcr_el2_amvoffen() : 0U;
518 
519 #if ENABLE_AMU_AUXILIARY_COUNTERS
520 	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
521 	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
522 #endif
523 
524 	/*
525 	 * Sanity check that all counters were disabled when the context was
526 	 * previously saved.
527 	 */
528 
529 	assert(read_amcntenset0_el0_px() == 0U);
530 
531 	if (amcfgr_el0_ncg > 0U) {
532 		assert(read_amcntenset1_el0_px() == 0U);
533 	}
534 
535 	/*
536 	 * Restore the counter values from the local context.
537 	 */
538 
539 	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
540 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
541 	}
542 
543 #if ENABLE_AMU_AUXILIARY_COUNTERS
544 	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
545 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
546 	}
547 #endif
548 
549 	/*
550 	 * Restore virtual offsets for counters that offer them.
551 	 */
552 
553 	if (hcr_el2_amvoffen != 0U) {
554 		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
555 			if (!amu_group0_voffset_supported(i)) {
556 				continue; /* No virtual offset */
557 			}
558 
559 			amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
560 		}
561 
562 #if ENABLE_AMU_AUXILIARY_COUNTERS
563 		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
564 			if ((amcg1idr_el0_voff >> i) & 1U) {
565 				continue; /* No virtual offset */
566 			}
567 
568 			amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
569 		}
570 #endif
571 	}
572 
573 	/*
574 	 * Re-enable counters that were disabled during context save.
575 	 */
576 
577 	write_amcntenset0_el0_px(ctx->group0_enable);
578 
579 #if ENABLE_AMU_AUXILIARY_COUNTERS
580 	if (amcfgr_el0_ncg > 0) {
581 		write_amcntenset1_el0_px(ctx->group1_enable);
582 	}
583 #endif
584 
585 	return (void *)0;
586 }
587 
588 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
589 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
590