xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 31d3cc2570dd61ac30efab030708ef32fcc987e5)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 #include <lib/extensions/amu_private.h>
18 
19 #include <plat/common/platform.h>
20 
21 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22 
23 static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
24 {
25 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
26 		ID_AA64PFR0_AMU_MASK;
27 }
28 
29 static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30 {
31 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 		HCR_AMVOFFEN_SHIFT;
33 }
34 
35 static inline __unused void write_cptr_el2_tam(uint64_t value)
36 {
37 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39 }
40 
41 static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42 {
43 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44 
45 	value &= ~TAM_BIT;
46 	value |= (tam << TAM_SHIFT) & TAM_BIT;
47 
48 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49 }
50 
51 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52 {
53 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55 }
56 
57 static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58 {
59 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61 }
62 
63 static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64 {
65 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 		AMCFGR_EL0_NCG_MASK;
67 }
68 
69 static inline uint64_t read_amcgcr_el0_cg0nc(void)
70 {
71 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
72 		AMCGCR_EL0_CG0NC_MASK;
73 }
74 
75 static inline __unused uint64_t read_amcg1idr_el0_voff(void)
76 {
77 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
78 		AMCG1IDR_VOFF_MASK;
79 }
80 
81 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
82 {
83 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
84 		AMCGCR_EL0_CG1NC_MASK;
85 }
86 
87 static inline __unused uint64_t read_amcntenset0_el0_px(void)
88 {
89 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
90 		AMCNTENSET0_EL0_Pn_MASK;
91 }
92 
93 static inline __unused uint64_t read_amcntenset1_el0_px(void)
94 {
95 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
96 		AMCNTENSET1_EL0_Pn_MASK;
97 }
98 
99 static inline __unused void write_amcntenset0_el0_px(uint64_t px)
100 {
101 	uint64_t value = read_amcntenset0_el0();
102 
103 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
104 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
105 
106 	write_amcntenset0_el0(value);
107 }
108 
109 static inline __unused void write_amcntenset1_el0_px(uint64_t px)
110 {
111 	uint64_t value = read_amcntenset1_el0();
112 
113 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
114 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
115 
116 	write_amcntenset1_el0(value);
117 }
118 
119 static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
120 {
121 	uint64_t value = read_amcntenclr0_el0();
122 
123 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
124 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
125 
126 	write_amcntenclr0_el0(value);
127 }
128 
129 static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
130 {
131 	uint64_t value = read_amcntenclr1_el0();
132 
133 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
134 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
135 
136 	write_amcntenclr1_el0(value);
137 }
138 
139 static bool amu_supported(void)
140 {
141 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
142 }
143 
144 static bool amu_v1p1_supported(void)
145 {
146 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
147 }
148 
149 #if ENABLE_AMU_AUXILIARY_COUNTERS
150 static bool amu_group1_supported(void)
151 {
152 	return read_amcfgr_el0_ncg() > 0U;
153 }
154 #endif
155 
156 /*
157  * Enable counters. This function is meant to be invoked
158  * by the context management library before exiting from EL3.
159  */
160 void amu_enable(bool el2_unused, cpu_context_t *ctx)
161 {
162 	if (!amu_supported()) {
163 		return;
164 	}
165 
166 	if (el2_unused) {
167 		/*
168 		 * CPTR_EL2.TAM: Set to zero so any accesses to
169 		 * the Activity Monitor registers do not trap to EL2.
170 		 */
171 		write_cptr_el2_tam(0U);
172 	}
173 
174 	/*
175 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
176 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
177 	 * the Activity Monitor registers do not trap to EL3.
178 	 */
179 	write_cptr_el3_tam(ctx, 0U);
180 
181 	/* Enable group 0 counters */
182 	write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
183 
184 #if ENABLE_AMU_AUXILIARY_COUNTERS
185 	if (amu_group1_supported()) {
186 		/* Enable group 1 counters */
187 		write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
188 	}
189 #endif
190 
191 	/* Initialize FEAT_AMUv1p1 features if present. */
192 	if (!amu_v1p1_supported()) {
193 		return;
194 	}
195 
196 	if (el2_unused) {
197 		/* Make sure virtual offsets are disabled if EL2 not used. */
198 		write_hcr_el2_amvoffen(0U);
199 	}
200 
201 #if AMU_RESTRICT_COUNTERS
202 	/*
203 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
204 	 * counters at all but the highest implemented EL.  This is controlled
205 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
206 	 * register reads at lower ELs return zero.  Reads from the memory
207 	 * mapped view are unaffected.
208 	 */
209 	VERBOSE("AMU group 1 counter access restricted.\n");
210 	write_amcr_el0_cg1rz(1U);
211 #else
212 	write_amcr_el0_cg1rz(0U);
213 #endif
214 }
215 
216 /* Read the group 0 counter identified by the given `idx`. */
217 static uint64_t amu_group0_cnt_read(unsigned int idx)
218 {
219 	assert(amu_supported());
220 	assert(idx < read_amcgcr_el0_cg0nc());
221 
222 	return amu_group0_cnt_read_internal(idx);
223 }
224 
225 /* Write the group 0 counter identified by the given `idx` with `val` */
226 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
227 {
228 	assert(amu_supported());
229 	assert(idx < read_amcgcr_el0_cg0nc());
230 
231 	amu_group0_cnt_write_internal(idx, val);
232 	isb();
233 }
234 
235 /*
236  * Read the group 0 offset register for a given index. Index must be 0, 2,
237  * or 3, the register for 1 does not exist.
238  *
239  * Using this function requires FEAT_AMUv1p1 support.
240  */
241 static uint64_t amu_group0_voffset_read(unsigned int idx)
242 {
243 	assert(amu_v1p1_supported());
244 	assert(idx < read_amcgcr_el0_cg0nc());
245 	assert(idx != 1U);
246 
247 	return amu_group0_voffset_read_internal(idx);
248 }
249 
250 /*
251  * Write the group 0 offset register for a given index. Index must be 0, 2, or
252  * 3, the register for 1 does not exist.
253  *
254  * Using this function requires FEAT_AMUv1p1 support.
255  */
256 static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
257 {
258 	assert(amu_v1p1_supported());
259 	assert(idx < read_amcgcr_el0_cg0nc());
260 	assert(idx != 1U);
261 
262 	amu_group0_voffset_write_internal(idx, val);
263 	isb();
264 }
265 
266 #if ENABLE_AMU_AUXILIARY_COUNTERS
267 /* Read the group 1 counter identified by the given `idx` */
268 static uint64_t amu_group1_cnt_read(unsigned int idx)
269 {
270 	assert(amu_supported());
271 	assert(amu_group1_supported());
272 	assert(idx < read_amcgcr_el0_cg1nc());
273 
274 	return amu_group1_cnt_read_internal(idx);
275 }
276 
277 /* Write the group 1 counter identified by the given `idx` with `val` */
278 static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
279 {
280 	assert(amu_supported());
281 	assert(amu_group1_supported());
282 	assert(idx < read_amcgcr_el0_cg1nc());
283 
284 	amu_group1_cnt_write_internal(idx, val);
285 	isb();
286 }
287 
288 /*
289  * Read the group 1 offset register for a given index.
290  *
291  * Using this function requires FEAT_AMUv1p1 support.
292  */
293 static uint64_t amu_group1_voffset_read(unsigned int idx)
294 {
295 	assert(amu_v1p1_supported());
296 	assert(amu_group1_supported());
297 	assert(idx < read_amcgcr_el0_cg1nc());
298 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
299 
300 	return amu_group1_voffset_read_internal(idx);
301 }
302 
303 /*
304  * Write the group 1 offset register for a given index.
305  *
306  * Using this function requires FEAT_AMUv1p1 support.
307  */
308 static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
309 {
310 	assert(amu_v1p1_supported());
311 	assert(amu_group1_supported());
312 	assert(idx < read_amcgcr_el0_cg1nc());
313 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
314 
315 	amu_group1_voffset_write_internal(idx, val);
316 	isb();
317 }
318 #endif
319 
320 static void *amu_context_save(const void *arg)
321 {
322 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
323 	unsigned int i;
324 
325 	if (!amu_supported()) {
326 		return (void *)-1;
327 	}
328 
329 	/* Assert that group 0/1 counter configuration is what we expect */
330 	assert(read_amcntenset0_el0_px() ==
331 		((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U));
332 
333 #if ENABLE_AMU_AUXILIARY_COUNTERS
334 	if (amu_group1_supported()) {
335 		assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
336 	}
337 #endif
338 
339 	/*
340 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
341 	 * counter values from the future via the memory mapped view.
342 	 */
343 	write_amcntenclr0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
344 
345 #if ENABLE_AMU_AUXILIARY_COUNTERS
346 	if (amu_group1_supported()) {
347 		write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
348 	}
349 #endif
350 
351 	isb();
352 
353 	/* Save all group 0 counters */
354 	for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
355 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
356 	}
357 
358 	/* Save group 0 virtual offsets if supported and enabled. */
359 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
360 		/* Not using a loop because count is fixed and index 1 DNE. */
361 		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
362 		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
363 		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
364 	}
365 
366 #if ENABLE_AMU_AUXILIARY_COUNTERS
367 	if (amu_group1_supported()) {
368 		/* Save group 1 counters */
369 		for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
370 			if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
371 				ctx->group1_cnts[i] = amu_group1_cnt_read(i);
372 			}
373 		}
374 
375 		/* Save group 1 virtual offsets if supported and enabled. */
376 		if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
377 			uint64_t amcg1idr = read_amcg1idr_el0_voff() &
378 				AMU_GROUP1_COUNTERS_MASK;
379 
380 			for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
381 				if (((amcg1idr >> i) & 1ULL) != 0ULL) {
382 					ctx->group1_voffsets[i] =
383 						amu_group1_voffset_read(i);
384 				}
385 			}
386 		}
387 	}
388 #endif
389 
390 	return (void *)0;
391 }
392 
393 static void *amu_context_restore(const void *arg)
394 {
395 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
396 	unsigned int i;
397 
398 	if (!amu_supported()) {
399 		return (void *)-1;
400 	}
401 
402 	/* Counters were disabled in `amu_context_save()` */
403 	assert(read_amcntenset0_el0_px() == 0U);
404 
405 #if ENABLE_AMU_AUXILIARY_COUNTERS
406 	if (amu_group1_supported()) {
407 		assert(read_amcntenset1_el0_px() == 0U);
408 	}
409 #endif
410 
411 	/* Restore all group 0 counters */
412 	for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
413 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
414 	}
415 
416 	/* Restore group 0 virtual offsets if supported and enabled. */
417 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
418 		/* Not using a loop because count is fixed and index 1 DNE. */
419 		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
420 		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
421 		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
422 	}
423 
424 	/* Restore group 0 counter configuration */
425 	write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
426 
427 #if ENABLE_AMU_AUXILIARY_COUNTERS
428 	if (amu_group1_supported()) {
429 		/* Restore group 1 counters */
430 		for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
431 			if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
432 				amu_group1_cnt_write(i, ctx->group1_cnts[i]);
433 			}
434 		}
435 
436 		/* Restore group 1 virtual offsets if supported and enabled. */
437 		if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
438 			uint64_t amcg1idr = read_amcg1idr_el0_voff() &
439 				AMU_GROUP1_COUNTERS_MASK;
440 
441 			for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
442 				if (((amcg1idr >> i) & 1ULL) != 0ULL) {
443 					amu_group1_voffset_write(i,
444 						ctx->group1_voffsets[i]);
445 				}
446 			}
447 		}
448 
449 		/* Restore group 1 counter configuration */
450 		write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
451 	}
452 #endif
453 
454 	return (void *)0;
455 }
456 
457 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
458 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
459