xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision 33b9be6d758d4fcef1f5a9802a54bb56f2c4ff8d)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_helpers.h>
13 
14 #include <lib/el3_runtime/pubsub_events.h>
15 #include <lib/extensions/amu.h>
16 #include <lib/extensions/amu_private.h>
17 
18 #include <plat/common/platform.h>
19 
20 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
21 
22 static inline __unused uint32_t read_id_pfr0_amu(void)
23 {
24 	return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
25 		ID_PFR0_AMU_MASK;
26 }
27 
28 static inline __unused void write_hcptr_tam(uint32_t value)
29 {
30 	write_hcptr((read_hcptr() & ~TAM_BIT) |
31 		((value << TAM_SHIFT) & TAM_BIT));
32 }
33 
34 static inline __unused void write_amcr_cg1rz(uint32_t value)
35 {
36 	write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38 }
39 
40 static inline __unused uint32_t read_amcfgr_ncg(void)
41 {
42 	return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43 		AMCFGR_NCG_MASK;
44 }
45 
46 static inline __unused uint32_t read_amcgcr_cg1nc(void)
47 {
48 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
49 		AMCGCR_CG1NC_MASK;
50 }
51 
52 static inline __unused uint32_t read_amcntenset0_px(void)
53 {
54 	return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
55 		AMCNTENSET0_Pn_MASK;
56 }
57 
58 static inline __unused uint32_t read_amcntenset1_px(void)
59 {
60 	return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
61 		AMCNTENSET1_Pn_MASK;
62 }
63 
64 static inline __unused void write_amcntenset0_px(uint32_t px)
65 {
66 	uint32_t value = read_amcntenset0();
67 
68 	value &= ~AMCNTENSET0_Pn_MASK;
69 	value |= (px << AMCNTENSET0_Pn_SHIFT) &
70 		AMCNTENSET0_Pn_MASK;
71 
72 	write_amcntenset0(value);
73 }
74 
75 static inline __unused void write_amcntenset1_px(uint32_t px)
76 {
77 	uint32_t value = read_amcntenset1();
78 
79 	value &= ~AMCNTENSET1_Pn_MASK;
80 	value |= (px << AMCNTENSET1_Pn_SHIFT) &
81 		AMCNTENSET1_Pn_MASK;
82 
83 	write_amcntenset1(value);
84 }
85 
86 static inline __unused void write_amcntenclr0_px(uint32_t px)
87 {
88 	uint32_t value = read_amcntenclr0();
89 
90 	value &= ~AMCNTENCLR0_Pn_MASK;
91 	value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
92 
93 	write_amcntenclr0(value);
94 }
95 
96 static inline __unused void write_amcntenclr1_px(uint32_t px)
97 {
98 	uint32_t value = read_amcntenclr1();
99 
100 	value &= ~AMCNTENCLR1_Pn_MASK;
101 	value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
102 
103 	write_amcntenclr1(value);
104 }
105 
106 static bool amu_supported(void)
107 {
108 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
109 }
110 
111 static bool amu_v1p1_supported(void)
112 {
113 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
114 }
115 
116 #if ENABLE_AMU_AUXILIARY_COUNTERS
117 static bool amu_group1_supported(void)
118 {
119 	return read_amcfgr_ncg() > 0U;
120 }
121 #endif
122 
123 /*
124  * Enable counters. This function is meant to be invoked
125  * by the context management library before exiting from EL3.
126  */
127 void amu_enable(bool el2_unused)
128 {
129 	if (!amu_supported()) {
130 		return;
131 	}
132 
133 #if AMU_GROUP1_NR_COUNTERS
134 	/* Check and set presence of group 1 counters */
135 	if (!amu_group1_supported()) {
136 		ERROR("AMU Counter Group 1 is not implemented\n");
137 		panic();
138 	}
139 
140 	/* Check number of group 1 counters */
141 	uint32_t cnt_num = read_amcgcr_cg1nc();
142 	VERBOSE("%s%u. %s%u\n",
143 		"Number of AMU Group 1 Counters ", cnt_num,
144 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
145 
146 	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
147 		ERROR("%s%u is less than %s%u\n",
148 		"Number of AMU Group 1 Counters ", cnt_num,
149 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
150 		panic();
151 	}
152 #endif
153 
154 	if (el2_unused) {
155 		/*
156 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
157 		 * registers do not trap to EL2.
158 		 */
159 		write_hcptr_tam(0U);
160 	}
161 
162 	/* Enable group 0 counters */
163 	write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
164 
165 #if AMU_GROUP1_NR_COUNTERS
166 	/* Enable group 1 counters */
167 	write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
168 #endif
169 
170 	/* Initialize FEAT_AMUv1p1 features if present. */
171 	if (!amu_v1p1_supported()) {
172 		return;
173 	}
174 
175 #if AMU_RESTRICT_COUNTERS
176 	/*
177 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
178 	 * counters at all but the highest implemented EL.  This is controlled
179 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
180 	 * register reads at lower ELs return zero.  Reads from the memory
181 	 * mapped view are unaffected.
182 	 */
183 	VERBOSE("AMU group 1 counter access restricted.\n");
184 	write_amcr_cg1rz(1U);
185 #else
186 	write_amcr_cg1rz(0U);
187 #endif
188 }
189 
190 /* Read the group 0 counter identified by the given `idx`. */
191 static uint64_t amu_group0_cnt_read(unsigned int idx)
192 {
193 	assert(amu_supported());
194 	assert(idx < AMU_GROUP0_NR_COUNTERS);
195 
196 	return amu_group0_cnt_read_internal(idx);
197 }
198 
199 /* Write the group 0 counter identified by the given `idx` with `val` */
200 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
201 {
202 	assert(amu_supported());
203 	assert(idx < AMU_GROUP0_NR_COUNTERS);
204 
205 	amu_group0_cnt_write_internal(idx, val);
206 	isb();
207 }
208 
209 #if AMU_GROUP1_NR_COUNTERS
210 /* Read the group 1 counter identified by the given `idx` */
211 static uint64_t amu_group1_cnt_read(unsigned  int idx)
212 {
213 	assert(amu_supported());
214 	assert(amu_group1_supported());
215 	assert(idx < AMU_GROUP1_NR_COUNTERS);
216 
217 	return amu_group1_cnt_read_internal(idx);
218 }
219 
220 /* Write the group 1 counter identified by the given `idx` with `val` */
221 static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
222 {
223 	assert(amu_supported());
224 	assert(amu_group1_supported());
225 	assert(idx < AMU_GROUP1_NR_COUNTERS);
226 
227 	amu_group1_cnt_write_internal(idx, val);
228 	isb();
229 }
230 #endif	/* AMU_GROUP1_NR_COUNTERS */
231 
232 static void *amu_context_save(const void *arg)
233 {
234 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
235 	unsigned int i;
236 
237 	if (!amu_supported()) {
238 		return (void *)-1;
239 	}
240 
241 #if AMU_GROUP1_NR_COUNTERS
242 	if (!amu_group1_supported()) {
243 		return (void *)-1;
244 	}
245 #endif
246 	/* Assert that group 0/1 counter configuration is what we expect */
247 	assert(read_amcntenset0_px() == AMU_GROUP0_COUNTERS_MASK);
248 
249 #if AMU_GROUP1_NR_COUNTERS
250 	assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
251 #endif
252 	/*
253 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
254 	 * counter values from the future via the memory mapped view.
255 	 */
256 	write_amcntenclr0_px(AMU_GROUP0_COUNTERS_MASK);
257 
258 #if AMU_GROUP1_NR_COUNTERS
259 	write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
260 #endif
261 	isb();
262 
263 	/* Save all group 0 counters */
264 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
265 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
266 	}
267 
268 #if AMU_GROUP1_NR_COUNTERS
269 	/* Save group 1 counters */
270 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
271 		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
272 			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
273 		}
274 	}
275 #endif
276 	return (void *)0;
277 }
278 
279 static void *amu_context_restore(const void *arg)
280 {
281 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
282 	unsigned int i;
283 
284 	if (!amu_supported()) {
285 		return (void *)-1;
286 	}
287 
288 #if AMU_GROUP1_NR_COUNTERS
289 	if (amu_group1_supported()) {
290 		return (void *)-1;
291 	}
292 #endif
293 	/* Counters were disabled in `amu_context_save()` */
294 	assert(read_amcntenset0_px() == 0U);
295 
296 #if AMU_GROUP1_NR_COUNTERS
297 	assert(read_amcntenset1_px() == 0U);
298 #endif
299 
300 	/* Restore all group 0 counters */
301 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
302 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
303 	}
304 
305 	/* Restore group 0 counter configuration */
306 	write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
307 
308 #if AMU_GROUP1_NR_COUNTERS
309 	/* Restore group 1 counters */
310 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
311 		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
312 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
313 		}
314 	}
315 
316 	/* Restore group 1 counter configuration */
317 	write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
318 #endif
319 
320 	return (void *)0;
321 }
322 
323 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
324 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
325