xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 33b9be6d758d4fcef1f5a9802a54bb56f2c4ff8d)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 #include <lib/extensions/amu_private.h>
18 
19 #include <plat/common/platform.h>
20 
21 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22 
23 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
24 {
25 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
26 		ID_AA64PFR0_AMU_MASK;
27 }
28 
29 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30 {
31 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 		HCR_AMVOFFEN_SHIFT;
33 }
34 
35 static inline __unused void write_cptr_el2_tam(uint64_t value)
36 {
37 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39 }
40 
41 static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42 {
43 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44 
45 	value &= ~TAM_BIT;
46 	value |= (tam << TAM_SHIFT) & TAM_BIT;
47 
48 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49 }
50 
51 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52 {
53 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55 }
56 
57 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58 {
59 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61 }
62 
63 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64 {
65 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 		AMCFGR_EL0_NCG_MASK;
67 }
68 
69 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
70 {
71 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
72 		AMCG1IDR_VOFF_MASK;
73 }
74 
75 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
76 {
77 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
78 		AMCGCR_EL0_CG1NC_MASK;
79 }
80 
81 static inline __unused uint64_t read_amcntenset0_el0_px(void)
82 {
83 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
84 		AMCNTENSET0_EL0_Pn_MASK;
85 }
86 
87 static inline __unused uint64_t read_amcntenset1_el0_px(void)
88 {
89 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
90 		AMCNTENSET1_EL0_Pn_MASK;
91 }
92 
93 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
94 {
95 	uint64_t value = read_amcntenset0_el0();
96 
97 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
98 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
99 
100 	write_amcntenset0_el0(value);
101 }
102 
103 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
104 {
105 	uint64_t value = read_amcntenset1_el0();
106 
107 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
108 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
109 
110 	write_amcntenset1_el0(value);
111 }
112 
113 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
114 {
115 	uint64_t value = read_amcntenclr0_el0();
116 
117 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
118 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
119 
120 	write_amcntenclr0_el0(value);
121 }
122 
123 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
124 {
125 	uint64_t value = read_amcntenclr1_el0();
126 
127 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
128 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
129 
130 	write_amcntenclr1_el0(value);
131 }
132 
133 static bool amu_supported(void)
134 {
135 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
136 }
137 
138 static bool amu_v1p1_supported(void)
139 {
140 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
141 }
142 
143 #if ENABLE_AMU_AUXILIARY_COUNTERS
144 static bool amu_group1_supported(void)
145 {
146 	return read_amcfgr_el0_ncg() > 0U;
147 }
148 #endif
149 
150 /*
151  * Enable counters. This function is meant to be invoked
152  * by the context management library before exiting from EL3.
153  */
154 void amu_enable(bool el2_unused, cpu_context_t *ctx)
155 {
156 	if (!amu_supported()) {
157 		return;
158 	}
159 
160 #if AMU_GROUP1_NR_COUNTERS
161 	/* Check and set presence of group 1 counters */
162 	if (!amu_group1_supported()) {
163 		ERROR("AMU Counter Group 1 is not implemented\n");
164 		panic();
165 	}
166 
167 	/* Check number of group 1 counters */
168 	uint64_t cnt_num = read_amcgcr_el0_cg1nc();
169 	VERBOSE("%s%llu. %s%u\n",
170 		"Number of AMU Group 1 Counters ", cnt_num,
171 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
172 
173 	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
174 		ERROR("%s%llu is less than %s%u\n",
175 		"Number of AMU Group 1 Counters ", cnt_num,
176 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
177 		panic();
178 	}
179 #endif
180 
181 	if (el2_unused) {
182 		/*
183 		 * CPTR_EL2.TAM: Set to zero so any accesses to
184 		 * the Activity Monitor registers do not trap to EL2.
185 		 */
186 		write_cptr_el2_tam(0U);
187 	}
188 
189 	/*
190 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
191 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
192 	 * the Activity Monitor registers do not trap to EL3.
193 	 */
194 	write_cptr_el3_tam(ctx, 0U);
195 
196 	/* Enable group 0 counters */
197 	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
198 
199 #if AMU_GROUP1_NR_COUNTERS
200 	/* Enable group 1 counters */
201 	write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
202 #endif
203 
204 	/* Initialize FEAT_AMUv1p1 features if present. */
205 	if (!amu_v1p1_supported()) {
206 		return;
207 	}
208 
209 	if (el2_unused) {
210 		/* Make sure virtual offsets are disabled if EL2 not used. */
211 		write_hcr_el2_amvoffen(0U);
212 	}
213 
214 #if AMU_RESTRICT_COUNTERS
215 	/*
216 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
217 	 * counters at all but the highest implemented EL.  This is controlled
218 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
219 	 * register reads at lower ELs return zero.  Reads from the memory
220 	 * mapped view are unaffected.
221 	 */
222 	VERBOSE("AMU group 1 counter access restricted.\n");
223 	write_amcr_el0_cg1rz(1U);
224 #else
225 	write_amcr_el0_cg1rz(0U);
226 #endif
227 }
228 
229 /* Read the group 0 counter identified by the given `idx`. */
230 static uint64_t amu_group0_cnt_read(unsigned int idx)
231 {
232 	assert(amu_supported());
233 	assert(idx < AMU_GROUP0_NR_COUNTERS);
234 
235 	return amu_group0_cnt_read_internal(idx);
236 }
237 
238 /* Write the group 0 counter identified by the given `idx` with `val` */
239 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
240 {
241 	assert(amu_supported());
242 	assert(idx < AMU_GROUP0_NR_COUNTERS);
243 
244 	amu_group0_cnt_write_internal(idx, val);
245 	isb();
246 }
247 
248 /*
249  * Read the group 0 offset register for a given index. Index must be 0, 2,
250  * or 3, the register for 1 does not exist.
251  *
252  * Using this function requires FEAT_AMUv1p1 support.
253  */
254 static uint64_t amu_group0_voffset_read(unsigned int idx)
255 {
256 	assert(amu_v1p1_supported());
257 	assert(idx < AMU_GROUP0_NR_COUNTERS);
258 	assert(idx != 1U);
259 
260 	return amu_group0_voffset_read_internal(idx);
261 }
262 
263 /*
264  * Write the group 0 offset register for a given index. Index must be 0, 2, or
265  * 3, the register for 1 does not exist.
266  *
267  * Using this function requires FEAT_AMUv1p1 support.
268  */
269 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
270 {
271 	assert(amu_v1p1_supported());
272 	assert(idx < AMU_GROUP0_NR_COUNTERS);
273 	assert(idx != 1U);
274 
275 	amu_group0_voffset_write_internal(idx, val);
276 	isb();
277 }
278 
279 #if AMU_GROUP1_NR_COUNTERS
280 /* Read the group 1 counter identified by the given `idx` */
281 static uint64_t amu_group1_cnt_read(unsigned int idx)
282 {
283 	assert(amu_supported());
284 	assert(amu_group1_supported());
285 	assert(idx < AMU_GROUP1_NR_COUNTERS);
286 
287 	return amu_group1_cnt_read_internal(idx);
288 }
289 
290 /* Write the group 1 counter identified by the given `idx` with `val` */
291 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
292 {
293 	assert(amu_supported());
294 	assert(amu_group1_supported());
295 	assert(idx < AMU_GROUP1_NR_COUNTERS);
296 
297 	amu_group1_cnt_write_internal(idx, val);
298 	isb();
299 }
300 
301 /*
302  * Read the group 1 offset register for a given index.
303  *
304  * Using this function requires FEAT_AMUv1p1 support.
305  */
306 static uint64_t amu_group1_voffset_read(unsigned int idx)
307 {
308 	assert(amu_v1p1_supported());
309 	assert(amu_group1_supported());
310 	assert(idx < AMU_GROUP1_NR_COUNTERS);
311 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
312 
313 	return amu_group1_voffset_read_internal(idx);
314 }
315 
316 /*
317  * Write the group 1 offset register for a given index.
318  *
319  * Using this function requires FEAT_AMUv1p1 support.
320  */
321 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
322 {
323 	assert(amu_v1p1_supported());
324 	assert(amu_group1_supported());
325 	assert(idx < AMU_GROUP1_NR_COUNTERS);
326 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
327 
328 	amu_group1_voffset_write_internal(idx, val);
329 	isb();
330 }
331 #endif	/* AMU_GROUP1_NR_COUNTERS */
332 
333 static void *amu_context_save(const void *arg)
334 {
335 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
336 	unsigned int i;
337 
338 	if (!amu_supported()) {
339 		return (void *)-1;
340 	}
341 
342 #if AMU_GROUP1_NR_COUNTERS
343 	if (!amu_group1_supported()) {
344 		return (void *)-1;
345 	}
346 #endif
347 	/* Assert that group 0/1 counter configuration is what we expect */
348 	assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
349 
350 #if AMU_GROUP1_NR_COUNTERS
351 	assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
352 #endif
353 	/*
354 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
355 	 * counter values from the future via the memory mapped view.
356 	 */
357 	write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
358 
359 #if AMU_GROUP1_NR_COUNTERS
360 	write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
361 #endif
362 	isb();
363 
364 	/* Save all group 0 counters */
365 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
366 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
367 	}
368 
369 	/* Save group 0 virtual offsets if supported and enabled. */
370 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
371 		/* Not using a loop because count is fixed and index 1 DNE. */
372 		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
373 		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
374 		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
375 	}
376 
377 #if AMU_GROUP1_NR_COUNTERS
378 	/* Save group 1 counters */
379 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
380 		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
381 			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
382 		}
383 	}
384 
385 	/* Save group 1 virtual offsets if supported and enabled. */
386 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
387 		uint64_t amcg1idr = read_amcg1idr_el0_voff() &
388 			AMU_GROUP1_COUNTERS_MASK;
389 
390 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
391 			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
392 				ctx->group1_voffsets[i] =
393 					amu_group1_voffset_read(i);
394 			}
395 		}
396 	}
397 #endif
398 	return (void *)0;
399 }
400 
401 static void *amu_context_restore(const void *arg)
402 {
403 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
404 	unsigned int i;
405 
406 	if (!amu_supported()) {
407 		return (void *)-1;
408 	}
409 
410 #if AMU_GROUP1_NR_COUNTERS
411 	if (!amu_group1_supported()) {
412 		return (void *)-1;
413 	}
414 #endif
415 	/* Counters were disabled in `amu_context_save()` */
416 	assert(read_amcntenset0_el0_px() == 0U);
417 
418 #if AMU_GROUP1_NR_COUNTERS
419 	assert(read_amcntenset1_el0_px() == 0U);
420 #endif
421 
422 	/* Restore all group 0 counters */
423 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
424 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
425 	}
426 
427 	/* Restore group 0 virtual offsets if supported and enabled. */
428 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
429 		/* Not using a loop because count is fixed and index 1 DNE. */
430 		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
431 		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
432 		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
433 	}
434 
435 	/* Restore group 0 counter configuration */
436 	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
437 
438 #if AMU_GROUP1_NR_COUNTERS
439 	/* Restore group 1 counters */
440 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
441 		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
442 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
443 		}
444 	}
445 
446 	/* Restore group 1 virtual offsets if supported and enabled. */
447 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
448 		uint64_t amcg1idr = read_amcg1idr_el0_voff() &
449 			AMU_GROUP1_COUNTERS_MASK;
450 
451 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
452 			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
453 				amu_group1_voffset_write(i,
454 					ctx->group1_voffsets[i]);
455 			}
456 		}
457 	}
458 
459 	/* Restore group 1 counter configuration */
460 	write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
461 #endif
462 
463 	return (void *)0;
464 }
465 
466 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
467 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
468