xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision e747a59be4ab8e9fa6edc7f4fb04478cd0f823c2)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include "../amu_private.h"
12 #include <arch.h>
13 #include <arch_helpers.h>
14 #include <lib/el3_runtime/pubsub_events.h>
15 #include <lib/extensions/amu.h>
16 
17 #include <plat/common/platform.h>
18 
19 struct amu_ctx {
20 	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
21 #if ENABLE_AMU_AUXILIARY_COUNTERS
22 	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
23 #endif
24 
25 	uint16_t group0_enable;
26 #if ENABLE_AMU_AUXILIARY_COUNTERS
27 	uint16_t group1_enable;
28 #endif
29 };
30 
31 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
32 
33 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
34 	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
35 
36 #if ENABLE_AMU_AUXILIARY_COUNTERS
37 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
38 	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
39 #endif
40 
41 static inline __unused uint32_t read_id_pfr0_amu(void)
42 {
43 	return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
44 		ID_PFR0_AMU_MASK;
45 }
46 
47 static inline __unused void write_hcptr_tam(uint32_t value)
48 {
49 	write_hcptr((read_hcptr() & ~TAM_BIT) |
50 		((value << TAM_SHIFT) & TAM_BIT));
51 }
52 
53 static inline __unused void write_amcr_cg1rz(uint32_t value)
54 {
55 	write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
56 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
57 }
58 
59 static inline __unused uint32_t read_amcfgr_ncg(void)
60 {
61 	return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
62 		AMCFGR_NCG_MASK;
63 }
64 
65 static inline __unused uint32_t read_amcgcr_cg0nc(void)
66 {
67 	return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
68 		AMCGCR_CG0NC_MASK;
69 }
70 
71 static inline __unused uint32_t read_amcgcr_cg1nc(void)
72 {
73 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
74 		AMCGCR_CG1NC_MASK;
75 }
76 
77 static inline __unused uint32_t read_amcntenset0_px(void)
78 {
79 	return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
80 		AMCNTENSET0_Pn_MASK;
81 }
82 
83 static inline __unused uint32_t read_amcntenset1_px(void)
84 {
85 	return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
86 		AMCNTENSET1_Pn_MASK;
87 }
88 
89 static inline __unused void write_amcntenset0_px(uint32_t px)
90 {
91 	uint32_t value = read_amcntenset0();
92 
93 	value &= ~AMCNTENSET0_Pn_MASK;
94 	value |= (px << AMCNTENSET0_Pn_SHIFT) &
95 		AMCNTENSET0_Pn_MASK;
96 
97 	write_amcntenset0(value);
98 }
99 
100 static inline __unused void write_amcntenset1_px(uint32_t px)
101 {
102 	uint32_t value = read_amcntenset1();
103 
104 	value &= ~AMCNTENSET1_Pn_MASK;
105 	value |= (px << AMCNTENSET1_Pn_SHIFT) &
106 		AMCNTENSET1_Pn_MASK;
107 
108 	write_amcntenset1(value);
109 }
110 
111 static inline __unused void write_amcntenclr0_px(uint32_t px)
112 {
113 	uint32_t value = read_amcntenclr0();
114 
115 	value &= ~AMCNTENCLR0_Pn_MASK;
116 	value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
117 
118 	write_amcntenclr0(value);
119 }
120 
121 static inline __unused void write_amcntenclr1_px(uint32_t px)
122 {
123 	uint32_t value = read_amcntenclr1();
124 
125 	value &= ~AMCNTENCLR1_Pn_MASK;
126 	value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
127 
128 	write_amcntenclr1(value);
129 }
130 
131 static __unused bool amu_supported(void)
132 {
133 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
134 }
135 
136 #if ENABLE_AMU_AUXILIARY_COUNTERS
137 static __unused bool amu_group1_supported(void)
138 {
139 	return read_amcfgr_ncg() > 0U;
140 }
141 #endif
142 
143 /*
144  * Enable counters. This function is meant to be invoked by the context
145  * management library before exiting from EL3.
146  */
147 void amu_enable(bool el2_unused)
148 {
149 	uint32_t id_pfr0_amu;		/* AMU version */
150 
151 	uint32_t amcfgr_ncg;		/* Number of counter groups */
152 	uint32_t amcgcr_cg0nc;		/* Number of group 0 counters */
153 
154 	uint32_t amcntenset0_px = 0x0;	/* Group 0 enable mask */
155 	uint32_t amcntenset1_px = 0x0;	/* Group 1 enable mask */
156 
157 	id_pfr0_amu = read_id_pfr0_amu();
158 	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
159 		/*
160 		 * If the AMU is unsupported, nothing needs to be done.
161 		 */
162 
163 		return;
164 	}
165 
166 	if (el2_unused) {
167 		/*
168 		 * HCPTR.TAM: Set to zero so any accesses to the Activity
169 		 * Monitor registers do not trap to EL2.
170 		 */
171 		write_hcptr_tam(0U);
172 	}
173 
174 	/*
175 	 * Retrieve the number of architected counters. All of these counters
176 	 * are enabled by default.
177 	 */
178 
179 	amcgcr_cg0nc = read_amcgcr_cg0nc();
180 	amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U;
181 
182 	assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
183 
184 	/*
185 	 * Enable the requested counters.
186 	 */
187 
188 	write_amcntenset0_px(amcntenset0_px);
189 
190 	amcfgr_ncg = read_amcfgr_ncg();
191 	if (amcfgr_ncg > 0U) {
192 		write_amcntenset1_px(amcntenset1_px);
193 	}
194 
195 	/* Initialize FEAT_AMUv1p1 features if present. */
196 	if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
197 		return;
198 	}
199 
200 #if AMU_RESTRICT_COUNTERS
201 	/*
202 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
203 	 * counters at all but the highest implemented EL.  This is controlled
204 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
205 	 * register reads at lower ELs return zero.  Reads from the memory
206 	 * mapped view are unaffected.
207 	 */
208 	VERBOSE("AMU group 1 counter access restricted.\n");
209 	write_amcr_cg1rz(1U);
210 #else
211 	write_amcr_cg1rz(0U);
212 #endif
213 }
214 
215 /* Read the group 0 counter identified by the given `idx`. */
216 static uint64_t amu_group0_cnt_read(unsigned int idx)
217 {
218 	assert(amu_supported());
219 	assert(idx < read_amcgcr_cg0nc());
220 
221 	return amu_group0_cnt_read_internal(idx);
222 }
223 
224 /* Write the group 0 counter identified by the given `idx` with `val` */
225 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
226 {
227 	assert(amu_supported());
228 	assert(idx < read_amcgcr_cg0nc());
229 
230 	amu_group0_cnt_write_internal(idx, val);
231 	isb();
232 }
233 
234 #if ENABLE_AMU_AUXILIARY_COUNTERS
235 /* Read the group 1 counter identified by the given `idx` */
236 static uint64_t amu_group1_cnt_read(unsigned  int idx)
237 {
238 	assert(amu_supported());
239 	assert(amu_group1_supported());
240 	assert(idx < read_amcgcr_cg1nc());
241 
242 	return amu_group1_cnt_read_internal(idx);
243 }
244 
245 /* Write the group 1 counter identified by the given `idx` with `val` */
246 static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
247 {
248 	assert(amu_supported());
249 	assert(amu_group1_supported());
250 	assert(idx < read_amcgcr_cg1nc());
251 
252 	amu_group1_cnt_write_internal(idx, val);
253 	isb();
254 }
255 #endif
256 
257 static void *amu_context_save(const void *arg)
258 {
259 	uint32_t i;
260 
261 	unsigned int core_pos;
262 	struct amu_ctx *ctx;
263 
264 	uint32_t id_pfr0_amu;	/* AMU version */
265 	uint32_t amcgcr_cg0nc;	/* Number of group 0 counters */
266 
267 #if ENABLE_AMU_AUXILIARY_COUNTERS
268 	uint32_t amcfgr_ncg;	/* Number of counter groups */
269 	uint32_t amcgcr_cg1nc;	/* Number of group 1 counters */
270 #endif
271 
272 	id_pfr0_amu = read_id_pfr0_amu();
273 	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
274 		return (void *)0;
275 	}
276 
277 	core_pos = plat_my_core_pos();
278 	ctx = &amu_ctxs_[core_pos];
279 
280 	amcgcr_cg0nc = read_amcgcr_cg0nc();
281 
282 #if ENABLE_AMU_AUXILIARY_COUNTERS
283 	amcfgr_ncg = read_amcfgr_ncg();
284 	amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
285 #endif
286 
287 	/*
288 	 * Disable all AMU counters.
289 	 */
290 
291 	ctx->group0_enable = read_amcntenset0_px();
292 	write_amcntenclr0_px(ctx->group0_enable);
293 
294 #if ENABLE_AMU_AUXILIARY_COUNTERS
295 	if (amcfgr_ncg > 0U) {
296 		ctx->group1_enable = read_amcntenset1_px();
297 		write_amcntenclr1_px(ctx->group1_enable);
298 	}
299 #endif
300 
301 	/*
302 	 * Save the counters to the local context.
303 	 */
304 
305 	isb(); /* Ensure counters have been stopped */
306 
307 	for (i = 0U; i < amcgcr_cg0nc; i++) {
308 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
309 	}
310 
311 #if ENABLE_AMU_AUXILIARY_COUNTERS
312 	for (i = 0U; i < amcgcr_cg1nc; i++) {
313 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
314 	}
315 #endif
316 
317 	return (void *)0;
318 }
319 
320 static void *amu_context_restore(const void *arg)
321 {
322 	uint32_t i;
323 
324 	unsigned int core_pos;
325 	struct amu_ctx *ctx;
326 
327 	uint32_t id_pfr0_amu;	/* AMU version */
328 
329 	uint32_t amcfgr_ncg;	/* Number of counter groups */
330 	uint32_t amcgcr_cg0nc;	/* Number of group 0 counters */
331 
332 #if ENABLE_AMU_AUXILIARY_COUNTERS
333 	uint32_t amcgcr_cg1nc;	/* Number of group 1 counters */
334 #endif
335 
336 	id_pfr0_amu = read_id_pfr0_amu();
337 	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
338 		return (void *)0;
339 	}
340 
341 	core_pos = plat_my_core_pos();
342 	ctx = &amu_ctxs_[core_pos];
343 
344 	amcfgr_ncg = read_amcfgr_ncg();
345 	amcgcr_cg0nc = read_amcgcr_cg0nc();
346 
347 #if ENABLE_AMU_AUXILIARY_COUNTERS
348 	amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
349 #endif
350 
351 	/*
352 	 * Sanity check that all counters were disabled when the context was
353 	 * previously saved.
354 	 */
355 
356 	assert(read_amcntenset0_px() == 0U);
357 
358 	if (amcfgr_ncg > 0U) {
359 		assert(read_amcntenset1_px() == 0U);
360 	}
361 
362 	/*
363 	 * Restore the counter values from the local context.
364 	 */
365 
366 	for (i = 0U; i < amcgcr_cg0nc; i++) {
367 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
368 	}
369 
370 #if ENABLE_AMU_AUXILIARY_COUNTERS
371 	for (i = 0U; i < amcgcr_cg1nc; i++) {
372 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
373 	}
374 #endif
375 
376 	/*
377 	 * Re-enable counters that were disabled during context save.
378 	 */
379 
380 	write_amcntenset0_px(ctx->group0_enable);
381 
382 #if ENABLE_AMU_AUXILIARY_COUNTERS
383 	if (amcfgr_ncg > 0U) {
384 		write_amcntenset1_px(ctx->group1_enable);
385 	}
386 #endif
387 
388 	return (void *)0;
389 }
390 
391 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
392 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
393