xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 3dbbbca29e3c42a6f9976878f27e1f1fd75b5c8e)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stdbool.h>
9 
10 #include <arch.h>
11 #include <arch_features.h>
12 #include <arch_helpers.h>
13 
14 #include <lib/el3_runtime/pubsub_events.h>
15 #include <lib/extensions/amu.h>
16 #include <lib/extensions/amu_private.h>
17 
18 #include <plat/common/platform.h>
19 
20 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
21 
22 /*
23  * Get AMU version value from aa64pfr0.
24  * Return values
25  *   ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
26  *   ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
27  *   ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
28  */
29 unsigned int amu_get_version(void)
30 {
31 	return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
32 		ID_AA64PFR0_AMU_MASK;
33 }
34 
35 #if AMU_GROUP1_NR_COUNTERS
36 /* Check if group 1 counters is implemented */
37 bool amu_group1_supported(void)
38 {
39 	uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
40 
41 	return (features & AMCFGR_EL0_NCG_MASK) == 1U;
42 }
43 #endif
44 
45 /*
46  * Enable counters. This function is meant to be invoked
47  * by the context management library before exiting from EL3.
48  */
49 void amu_enable(bool el2_unused)
50 {
51 	uint64_t v;
52 	unsigned int amu_version = amu_get_version();
53 
54 	if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
55 		return;
56 	}
57 
58 #if AMU_GROUP1_NR_COUNTERS
59 	/* Check and set presence of group 1 counters */
60 	if (!amu_group1_supported()) {
61 		ERROR("AMU Counter Group 1 is not implemented\n");
62 		panic();
63 	}
64 
65 	/* Check number of group 1 counters */
66 	uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
67 				AMCGCR_EL0_CG1NC_MASK;
68 	VERBOSE("%s%llu. %s%u\n",
69 		"Number of AMU Group 1 Counters ", cnt_num,
70 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
71 
72 	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
73 		ERROR("%s%llu is less than %s%u\n",
74 		"Number of AMU Group 1 Counters ", cnt_num,
75 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
76 		panic();
77 	}
78 #endif
79 
80 	if (el2_unused) {
81 		/*
82 		 * CPTR_EL2.TAM: Set to zero so any accesses to
83 		 * the Activity Monitor registers do not trap to EL2.
84 		 */
85 		v = read_cptr_el2();
86 		v &= ~CPTR_EL2_TAM_BIT;
87 		write_cptr_el2(v);
88 	}
89 
90 	/*
91 	 * CPTR_EL3.TAM: Set to zero so that any accesses to
92 	 * the Activity Monitor registers do not trap to EL3.
93 	 */
94 	v = read_cptr_el3();
95 	v &= ~TAM_BIT;
96 	write_cptr_el3(v);
97 
98 	/* Enable group 0 counters */
99 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
100 
101 #if AMU_GROUP1_NR_COUNTERS
102 	/* Enable group 1 counters */
103 	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
104 #endif
105 
106 	/* Initialize FEAT_AMUv1p1 features if present. */
107 	if (amu_version < ID_AA64PFR0_AMU_V1P1) {
108 		return;
109 	}
110 
111 	if (el2_unused) {
112 		/* Make sure virtual offsets are disabled if EL2 not used. */
113 		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
114 	}
115 
116 #if AMU_RESTRICT_COUNTERS
117 	/*
118 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
119 	 * counters at all but the highest implemented EL.  This is controlled
120 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
121 	 * register reads at lower ELs return zero.  Reads from the memory
122 	 * mapped view are unaffected.
123 	 */
124 	VERBOSE("AMU group 1 counter access restricted.\n");
125 	write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT);
126 #else
127 	write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT);
128 #endif
129 }
130 
131 /* Read the group 0 counter identified by the given `idx`. */
132 uint64_t amu_group0_cnt_read(unsigned int idx)
133 {
134 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
135 	assert(idx < AMU_GROUP0_NR_COUNTERS);
136 
137 	return amu_group0_cnt_read_internal(idx);
138 }
139 
140 /* Write the group 0 counter identified by the given `idx` with `val` */
141 void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
142 {
143 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
144 	assert(idx < AMU_GROUP0_NR_COUNTERS);
145 
146 	amu_group0_cnt_write_internal(idx, val);
147 	isb();
148 }
149 
150 /*
151  * Read the group 0 offset register for a given index. Index must be 0, 2,
152  * or 3, the register for 1 does not exist.
153  *
154  * Using this function requires FEAT_AMUv1p1 support.
155  */
156 uint64_t amu_group0_voffset_read(unsigned int idx)
157 {
158 	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
159 	assert(idx < AMU_GROUP0_NR_COUNTERS);
160 	assert(idx != 1U);
161 
162 	return amu_group0_voffset_read_internal(idx);
163 }
164 
165 /*
166  * Write the group 0 offset register for a given index. Index must be 0, 2, or
167  * 3, the register for 1 does not exist.
168  *
169  * Using this function requires FEAT_AMUv1p1 support.
170  */
171 void amu_group0_voffset_write(unsigned int idx, uint64_t val)
172 {
173 	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
174 	assert(idx < AMU_GROUP0_NR_COUNTERS);
175 	assert(idx != 1U);
176 
177 	amu_group0_voffset_write_internal(idx, val);
178 	isb();
179 }
180 
181 #if AMU_GROUP1_NR_COUNTERS
182 /* Read the group 1 counter identified by the given `idx` */
183 uint64_t amu_group1_cnt_read(unsigned int idx)
184 {
185 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
186 	assert(amu_group1_supported());
187 	assert(idx < AMU_GROUP1_NR_COUNTERS);
188 
189 	return amu_group1_cnt_read_internal(idx);
190 }
191 
192 /* Write the group 1 counter identified by the given `idx` with `val` */
193 void amu_group1_cnt_write(unsigned int idx, uint64_t val)
194 {
195 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
196 	assert(amu_group1_supported());
197 	assert(idx < AMU_GROUP1_NR_COUNTERS);
198 
199 	amu_group1_cnt_write_internal(idx, val);
200 	isb();
201 }
202 
203 /*
204  * Read the group 1 offset register for a given index.
205  *
206  * Using this function requires FEAT_AMUv1p1 support.
207  */
208 uint64_t amu_group1_voffset_read(unsigned int idx)
209 {
210 	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
211 	assert(amu_group1_supported());
212 	assert(idx < AMU_GROUP1_NR_COUNTERS);
213 	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
214 		(1ULL << idx)) != 0ULL);
215 
216 	return amu_group1_voffset_read_internal(idx);
217 }
218 
219 /*
220  * Write the group 1 offset register for a given index.
221  *
222  * Using this function requires FEAT_AMUv1p1 support.
223  */
224 void amu_group1_voffset_write(unsigned int idx, uint64_t val)
225 {
226 	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
227 	assert(amu_group1_supported());
228 	assert(idx < AMU_GROUP1_NR_COUNTERS);
229 	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
230 		(1ULL << idx)) != 0ULL);
231 
232 	amu_group1_voffset_write_internal(idx, val);
233 	isb();
234 }
235 
236 /*
237  * Program the event type register for the given `idx` with
238  * the event number `val`
239  */
240 void amu_group1_set_evtype(unsigned int idx, unsigned int val)
241 {
242 	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
243 	assert(amu_group1_supported());
244 	assert(idx < AMU_GROUP1_NR_COUNTERS);
245 
246 	amu_group1_set_evtype_internal(idx, val);
247 	isb();
248 }
249 #endif	/* AMU_GROUP1_NR_COUNTERS */
250 
251 static void *amu_context_save(const void *arg)
252 {
253 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
254 	unsigned int i;
255 
256 	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
257 		return (void *)-1;
258 	}
259 
260 #if AMU_GROUP1_NR_COUNTERS
261 	if (!amu_group1_supported()) {
262 		return (void *)-1;
263 	}
264 #endif
265 	/* Assert that group 0/1 counter configuration is what we expect */
266 	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
267 
268 #if AMU_GROUP1_NR_COUNTERS
269 	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
270 #endif
271 	/*
272 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
273 	 * counter values from the future via the memory mapped view.
274 	 */
275 	write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
276 
277 #if AMU_GROUP1_NR_COUNTERS
278 	write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
279 #endif
280 	isb();
281 
282 	/* Save all group 0 counters */
283 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
284 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
285 	}
286 
287 	/* Save group 0 virtual offsets if supported and enabled. */
288 	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
289 			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
290 		/* Not using a loop because count is fixed and index 1 DNE. */
291 		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
292 		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
293 		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
294 	}
295 
296 #if AMU_GROUP1_NR_COUNTERS
297 	/* Save group 1 counters */
298 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
299 		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
300 			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
301 		}
302 	}
303 
304 	/* Save group 1 virtual offsets if supported and enabled. */
305 	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
306 			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
307 		u_register_t amcg1idr = read_amcg1idr_el0() >>
308 			AMCG1IDR_VOFF_SHIFT;
309 		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
310 
311 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
312 			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
313 				ctx->group1_voffsets[i] =
314 					amu_group1_voffset_read(i);
315 			}
316 		}
317 	}
318 #endif
319 	return (void *)0;
320 }
321 
322 static void *amu_context_restore(const void *arg)
323 {
324 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
325 	unsigned int i;
326 
327 	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
328 		return (void *)-1;
329 	}
330 
331 #if AMU_GROUP1_NR_COUNTERS
332 	if (!amu_group1_supported()) {
333 		return (void *)-1;
334 	}
335 #endif
336 	/* Counters were disabled in `amu_context_save()` */
337 	assert(read_amcntenset0_el0() == 0U);
338 
339 #if AMU_GROUP1_NR_COUNTERS
340 	assert(read_amcntenset1_el0() == 0U);
341 #endif
342 
343 	/* Restore all group 0 counters */
344 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
345 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
346 	}
347 
348 	/* Restore group 0 virtual offsets if supported and enabled. */
349 	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
350 			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
351 		/* Not using a loop because count is fixed and index 1 DNE. */
352 		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
353 		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
354 		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
355 	}
356 
357 	/* Restore group 0 counter configuration */
358 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
359 
360 #if AMU_GROUP1_NR_COUNTERS
361 	/* Restore group 1 counters */
362 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
363 		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
364 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
365 		}
366 	}
367 
368 	/* Restore group 1 virtual offsets if supported and enabled. */
369 	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
370 			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
371 		u_register_t amcg1idr = read_amcg1idr_el0() >>
372 			AMCG1IDR_VOFF_SHIFT;
373 		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
374 
375 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
376 			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
377 				amu_group1_voffset_write(i,
378 					ctx->group1_voffsets[i]);
379 			}
380 		}
381 	}
382 
383 	/* Restore group 1 counter configuration */
384 	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
385 #endif
386 
387 	return (void *)0;
388 }
389 
390 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
391 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
392