xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision 31d3cc2570dd61ac30efab030708ef32fcc987e5)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_helpers.h>
13 
14 #include <lib/el3_runtime/pubsub_events.h>
15 #include <lib/extensions/amu.h>
16 #include <lib/extensions/amu_private.h>
17 
18 #include <plat/common/platform.h>
19 
20 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
21 
22 static inline __unused uint32_t read_id_pfr0_amu(void)
23 {
24 	return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
25 		ID_PFR0_AMU_MASK;
26 }
27 
28 static inline __unused void write_hcptr_tam(uint32_t value)
29 {
30 	write_hcptr((read_hcptr() & ~TAM_BIT) |
31 		((value << TAM_SHIFT) & TAM_BIT));
32 }
33 
34 static inline __unused void write_amcr_cg1rz(uint32_t value)
35 {
36 	write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38 }
39 
40 static inline __unused uint32_t read_amcfgr_ncg(void)
41 {
42 	return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43 		AMCFGR_NCG_MASK;
44 }
45 
46 static inline __unused uint32_t read_amcgcr_cg0nc(void)
47 {
48 	return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
49 		AMCGCR_CG0NC_MASK;
50 }
51 
52 static inline __unused uint32_t read_amcgcr_cg1nc(void)
53 {
54 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
55 		AMCGCR_CG1NC_MASK;
56 }
57 
58 static inline __unused uint32_t read_amcntenset0_px(void)
59 {
60 	return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
61 		AMCNTENSET0_Pn_MASK;
62 }
63 
64 static inline __unused uint32_t read_amcntenset1_px(void)
65 {
66 	return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
67 		AMCNTENSET1_Pn_MASK;
68 }
69 
70 static inline __unused void write_amcntenset0_px(uint32_t px)
71 {
72 	uint32_t value = read_amcntenset0();
73 
74 	value &= ~AMCNTENSET0_Pn_MASK;
75 	value |= (px << AMCNTENSET0_Pn_SHIFT) &
76 		AMCNTENSET0_Pn_MASK;
77 
78 	write_amcntenset0(value);
79 }
80 
81 static inline __unused void write_amcntenset1_px(uint32_t px)
82 {
83 	uint32_t value = read_amcntenset1();
84 
85 	value &= ~AMCNTENSET1_Pn_MASK;
86 	value |= (px << AMCNTENSET1_Pn_SHIFT) &
87 		AMCNTENSET1_Pn_MASK;
88 
89 	write_amcntenset1(value);
90 }
91 
92 static inline __unused void write_amcntenclr0_px(uint32_t px)
93 {
94 	uint32_t value = read_amcntenclr0();
95 
96 	value &= ~AMCNTENCLR0_Pn_MASK;
97 	value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
98 
99 	write_amcntenclr0(value);
100 }
101 
102 static inline __unused void write_amcntenclr1_px(uint32_t px)
103 {
104 	uint32_t value = read_amcntenclr1();
105 
106 	value &= ~AMCNTENCLR1_Pn_MASK;
107 	value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
108 
109 	write_amcntenclr1(value);
110 }
111 
112 static bool amu_supported(void)
113 {
114 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
115 }
116 
117 static bool amu_v1p1_supported(void)
118 {
119 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
120 }
121 
122 #if ENABLE_AMU_AUXILIARY_COUNTERS
123 static bool amu_group1_supported(void)
124 {
125 	return read_amcfgr_ncg() > 0U;
126 }
127 #endif
128 
129 /*
130  * Enable counters. This function is meant to be invoked
131  * by the context management library before exiting from EL3.
132  */
133 void amu_enable(bool el2_unused)
134 {
135 	if (!amu_supported()) {
136 		return;
137 	}
138 
139 	if (el2_unused) {
140 		/*
141 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
142 		 * registers do not trap to EL2.
143 		 */
144 		write_hcptr_tam(0U);
145 	}
146 
147 	/* Enable group 0 counters */
148 	write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
149 
150 #if ENABLE_AMU_AUXILIARY_COUNTERS
151 	if (amu_group1_supported()) {
152 		/* Enable group 1 counters */
153 		write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
154 	}
155 #endif
156 
157 	/* Initialize FEAT_AMUv1p1 features if present. */
158 	if (!amu_v1p1_supported()) {
159 		return;
160 	}
161 
162 #if AMU_RESTRICT_COUNTERS
163 	/*
164 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
165 	 * counters at all but the highest implemented EL.  This is controlled
166 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
167 	 * register reads at lower ELs return zero.  Reads from the memory
168 	 * mapped view are unaffected.
169 	 */
170 	VERBOSE("AMU group 1 counter access restricted.\n");
171 	write_amcr_cg1rz(1U);
172 #else
173 	write_amcr_cg1rz(0U);
174 #endif
175 }
176 
177 /* Read the group 0 counter identified by the given `idx`. */
178 static uint64_t amu_group0_cnt_read(unsigned int idx)
179 {
180 	assert(amu_supported());
181 	assert(idx < read_amcgcr_cg0nc());
182 
183 	return amu_group0_cnt_read_internal(idx);
184 }
185 
186 /* Write the group 0 counter identified by the given `idx` with `val` */
187 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
188 {
189 	assert(amu_supported());
190 	assert(idx < read_amcgcr_cg0nc());
191 
192 	amu_group0_cnt_write_internal(idx, val);
193 	isb();
194 }
195 
196 #if ENABLE_AMU_AUXILIARY_COUNTERS
197 /* Read the group 1 counter identified by the given `idx` */
198 static uint64_t amu_group1_cnt_read(unsigned  int idx)
199 {
200 	assert(amu_supported());
201 	assert(amu_group1_supported());
202 	assert(idx < read_amcgcr_cg1nc());
203 
204 	return amu_group1_cnt_read_internal(idx);
205 }
206 
207 /* Write the group 1 counter identified by the given `idx` with `val` */
208 static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
209 {
210 	assert(amu_supported());
211 	assert(amu_group1_supported());
212 	assert(idx < read_amcgcr_cg1nc());
213 
214 	amu_group1_cnt_write_internal(idx, val);
215 	isb();
216 }
217 #endif
218 
219 static void *amu_context_save(const void *arg)
220 {
221 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
222 	unsigned int i;
223 
224 	if (!amu_supported()) {
225 		return (void *)-1;
226 	}
227 
228 	/* Assert that group 0/1 counter configuration is what we expect */
229 	assert(read_amcntenset0_px() ==
230 		((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U));
231 
232 #if ENABLE_AMU_AUXILIARY_COUNTERS
233 	if (amu_group1_supported()) {
234 		assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
235 	}
236 #endif
237 	/*
238 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
239 	 * counter values from the future via the memory mapped view.
240 	 */
241 	write_amcntenclr0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
242 
243 #if ENABLE_AMU_AUXILIARY_COUNTERS
244 	if (amu_group1_supported()) {
245 		write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
246 	}
247 #endif
248 
249 	isb();
250 
251 	/* Save all group 0 counters */
252 	for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
253 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
254 	}
255 
256 #if ENABLE_AMU_AUXILIARY_COUNTERS
257 	if (amu_group1_supported()) {
258 		/* Save group 1 counters */
259 		for (i = 0U; i < read_amcgcr_cg1nc(); i++) {
260 			if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
261 				ctx->group1_cnts[i] = amu_group1_cnt_read(i);
262 			}
263 		}
264 	}
265 #endif
266 
267 	return (void *)0;
268 }
269 
270 static void *amu_context_restore(const void *arg)
271 {
272 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
273 	unsigned int i;
274 
275 	if (!amu_supported()) {
276 		return (void *)-1;
277 	}
278 
279 	/* Counters were disabled in `amu_context_save()` */
280 	assert(read_amcntenset0_px() == 0U);
281 
282 #if ENABLE_AMU_AUXILIARY_COUNTERS
283 	if (amu_group1_supported()) {
284 		assert(read_amcntenset1_px() == 0U);
285 	}
286 #endif
287 
288 	/* Restore all group 0 counters */
289 	for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
290 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
291 	}
292 
293 	/* Restore group 0 counter configuration */
294 	write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
295 
296 #if ENABLE_AMU_AUXILIARY_COUNTERS
297 	if (amu_group1_supported()) {
298 		/* Restore group 1 counters */
299 		for (i = 0U; i < read_amcgcr_cg1nc(); i++) {
300 			if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
301 				amu_group1_cnt_write(i, ctx->group1_cnts[i]);
302 			}
303 		}
304 
305 		/* Restore group 1 counter configuration */
306 		write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
307 	}
308 #endif
309 
310 	return (void *)0;
311 }
312 
313 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
314 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
315