xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision 33b9be6d758d4fcef1f5a9802a54bb56f2c4ff8d)
1ef69e1eaSDimitris Papastamos /*
2873d4241Sjohpow01  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3ef69e1eaSDimitris Papastamos  *
4ef69e1eaSDimitris Papastamos  * SPDX-License-Identifier: BSD-3-Clause
5ef69e1eaSDimitris Papastamos  */
6ef69e1eaSDimitris Papastamos 
7f3ccf036SAlexei Fedorov #include <assert.h>
8*33b9be6dSChris Kay #include <cdefs.h>
909d40e0eSAntonio Nino Diaz #include <stdbool.h>
1009d40e0eSAntonio Nino Diaz 
11ef69e1eaSDimitris Papastamos #include <arch.h>
12ef69e1eaSDimitris Papastamos #include <arch_helpers.h>
13f3ccf036SAlexei Fedorov 
1409d40e0eSAntonio Nino Diaz #include <lib/el3_runtime/pubsub_events.h>
1509d40e0eSAntonio Nino Diaz #include <lib/extensions/amu.h>
1609d40e0eSAntonio Nino Diaz #include <lib/extensions/amu_private.h>
17f3ccf036SAlexei Fedorov 
1809d40e0eSAntonio Nino Diaz #include <plat/common/platform.h>
19b6eb3932SDimitris Papastamos 
20b6eb3932SDimitris Papastamos static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
21ef69e1eaSDimitris Papastamos 
22*33b9be6dSChris Kay static inline __unused uint32_t read_id_pfr0_amu(void)
23ef69e1eaSDimitris Papastamos {
24*33b9be6dSChris Kay 	return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
25873d4241Sjohpow01 		ID_PFR0_AMU_MASK;
26c70da546SJoel Hutton }
27c70da546SJoel Hutton 
28*33b9be6dSChris Kay static inline __unused void write_hcptr_tam(uint32_t value)
29*33b9be6dSChris Kay {
30*33b9be6dSChris Kay 	write_hcptr((read_hcptr() & ~TAM_BIT) |
31*33b9be6dSChris Kay 		((value << TAM_SHIFT) & TAM_BIT));
32*33b9be6dSChris Kay }
33*33b9be6dSChris Kay 
34*33b9be6dSChris Kay static inline __unused void write_amcr_cg1rz(uint32_t value)
35*33b9be6dSChris Kay {
36*33b9be6dSChris Kay 	write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37*33b9be6dSChris Kay 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38*33b9be6dSChris Kay }
39*33b9be6dSChris Kay 
40*33b9be6dSChris Kay static inline __unused uint32_t read_amcfgr_ncg(void)
41*33b9be6dSChris Kay {
42*33b9be6dSChris Kay 	return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43*33b9be6dSChris Kay 		AMCFGR_NCG_MASK;
44*33b9be6dSChris Kay }
45*33b9be6dSChris Kay 
46*33b9be6dSChris Kay static inline __unused uint32_t read_amcgcr_cg1nc(void)
47*33b9be6dSChris Kay {
48*33b9be6dSChris Kay 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
49*33b9be6dSChris Kay 		AMCGCR_CG1NC_MASK;
50*33b9be6dSChris Kay }
51*33b9be6dSChris Kay 
52*33b9be6dSChris Kay static inline __unused uint32_t read_amcntenset0_px(void)
53*33b9be6dSChris Kay {
54*33b9be6dSChris Kay 	return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
55*33b9be6dSChris Kay 		AMCNTENSET0_Pn_MASK;
56*33b9be6dSChris Kay }
57*33b9be6dSChris Kay 
58*33b9be6dSChris Kay static inline __unused uint32_t read_amcntenset1_px(void)
59*33b9be6dSChris Kay {
60*33b9be6dSChris Kay 	return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
61*33b9be6dSChris Kay 		AMCNTENSET1_Pn_MASK;
62*33b9be6dSChris Kay }
63*33b9be6dSChris Kay 
64*33b9be6dSChris Kay static inline __unused void write_amcntenset0_px(uint32_t px)
65*33b9be6dSChris Kay {
66*33b9be6dSChris Kay 	uint32_t value = read_amcntenset0();
67*33b9be6dSChris Kay 
68*33b9be6dSChris Kay 	value &= ~AMCNTENSET0_Pn_MASK;
69*33b9be6dSChris Kay 	value |= (px << AMCNTENSET0_Pn_SHIFT) &
70*33b9be6dSChris Kay 		AMCNTENSET0_Pn_MASK;
71*33b9be6dSChris Kay 
72*33b9be6dSChris Kay 	write_amcntenset0(value);
73*33b9be6dSChris Kay }
74*33b9be6dSChris Kay 
75*33b9be6dSChris Kay static inline __unused void write_amcntenset1_px(uint32_t px)
76*33b9be6dSChris Kay {
77*33b9be6dSChris Kay 	uint32_t value = read_amcntenset1();
78*33b9be6dSChris Kay 
79*33b9be6dSChris Kay 	value &= ~AMCNTENSET1_Pn_MASK;
80*33b9be6dSChris Kay 	value |= (px << AMCNTENSET1_Pn_SHIFT) &
81*33b9be6dSChris Kay 		AMCNTENSET1_Pn_MASK;
82*33b9be6dSChris Kay 
83*33b9be6dSChris Kay 	write_amcntenset1(value);
84*33b9be6dSChris Kay }
85*33b9be6dSChris Kay 
86*33b9be6dSChris Kay static inline __unused void write_amcntenclr0_px(uint32_t px)
87*33b9be6dSChris Kay {
88*33b9be6dSChris Kay 	uint32_t value = read_amcntenclr0();
89*33b9be6dSChris Kay 
90*33b9be6dSChris Kay 	value &= ~AMCNTENCLR0_Pn_MASK;
91*33b9be6dSChris Kay 	value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
92*33b9be6dSChris Kay 
93*33b9be6dSChris Kay 	write_amcntenclr0(value);
94*33b9be6dSChris Kay }
95*33b9be6dSChris Kay 
96*33b9be6dSChris Kay static inline __unused void write_amcntenclr1_px(uint32_t px)
97*33b9be6dSChris Kay {
98*33b9be6dSChris Kay 	uint32_t value = read_amcntenclr1();
99*33b9be6dSChris Kay 
100*33b9be6dSChris Kay 	value &= ~AMCNTENCLR1_Pn_MASK;
101*33b9be6dSChris Kay 	value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
102*33b9be6dSChris Kay 
103*33b9be6dSChris Kay 	write_amcntenclr1(value);
104*33b9be6dSChris Kay }
105*33b9be6dSChris Kay 
106*33b9be6dSChris Kay static bool amu_supported(void)
107*33b9be6dSChris Kay {
108*33b9be6dSChris Kay 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
109*33b9be6dSChris Kay }
110*33b9be6dSChris Kay 
111*33b9be6dSChris Kay static bool amu_v1p1_supported(void)
112*33b9be6dSChris Kay {
113*33b9be6dSChris Kay 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
114*33b9be6dSChris Kay }
115*33b9be6dSChris Kay 
116*33b9be6dSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
117b4b726eaSChris Kay static bool amu_group1_supported(void)
118f3ccf036SAlexei Fedorov {
119*33b9be6dSChris Kay 	return read_amcfgr_ncg() > 0U;
120f3ccf036SAlexei Fedorov }
121f3ccf036SAlexei Fedorov #endif
122f3ccf036SAlexei Fedorov 
123f3ccf036SAlexei Fedorov /*
124f3ccf036SAlexei Fedorov  * Enable counters. This function is meant to be invoked
125f3ccf036SAlexei Fedorov  * by the context management library before exiting from EL3.
126f3ccf036SAlexei Fedorov  */
12740daecc1SAntonio Nino Diaz void amu_enable(bool el2_unused)
128c70da546SJoel Hutton {
129*33b9be6dSChris Kay 	if (!amu_supported()) {
1300767d50eSDimitris Papastamos 		return;
131f3ccf036SAlexei Fedorov 	}
132f3ccf036SAlexei Fedorov 
133f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
134f3ccf036SAlexei Fedorov 	/* Check and set presence of group 1 counters */
135f3ccf036SAlexei Fedorov 	if (!amu_group1_supported()) {
136f3ccf036SAlexei Fedorov 		ERROR("AMU Counter Group 1 is not implemented\n");
137f3ccf036SAlexei Fedorov 		panic();
138f3ccf036SAlexei Fedorov 	}
139f3ccf036SAlexei Fedorov 
140f3ccf036SAlexei Fedorov 	/* Check number of group 1 counters */
141*33b9be6dSChris Kay 	uint32_t cnt_num = read_amcgcr_cg1nc();
142f3ccf036SAlexei Fedorov 	VERBOSE("%s%u. %s%u\n",
143f3ccf036SAlexei Fedorov 		"Number of AMU Group 1 Counters ", cnt_num,
144f3ccf036SAlexei Fedorov 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
145f3ccf036SAlexei Fedorov 
146f3ccf036SAlexei Fedorov 	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
147f3ccf036SAlexei Fedorov 		ERROR("%s%u is less than %s%u\n",
148f3ccf036SAlexei Fedorov 		"Number of AMU Group 1 Counters ", cnt_num,
149f3ccf036SAlexei Fedorov 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
150f3ccf036SAlexei Fedorov 		panic();
151f3ccf036SAlexei Fedorov 	}
152f3ccf036SAlexei Fedorov #endif
1530767d50eSDimitris Papastamos 
154ef69e1eaSDimitris Papastamos 	if (el2_unused) {
155ef69e1eaSDimitris Papastamos 		/*
156ef69e1eaSDimitris Papastamos 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
157ef69e1eaSDimitris Papastamos 		 * registers do not trap to EL2.
158ef69e1eaSDimitris Papastamos 		 */
159*33b9be6dSChris Kay 		write_hcptr_tam(0U);
160ef69e1eaSDimitris Papastamos 	}
161ef69e1eaSDimitris Papastamos 
162ef69e1eaSDimitris Papastamos 	/* Enable group 0 counters */
163*33b9be6dSChris Kay 	write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
164c70da546SJoel Hutton 
165f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
166c70da546SJoel Hutton 	/* Enable group 1 counters */
167*33b9be6dSChris Kay 	write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
168f3ccf036SAlexei Fedorov #endif
169873d4241Sjohpow01 
170873d4241Sjohpow01 	/* Initialize FEAT_AMUv1p1 features if present. */
171*33b9be6dSChris Kay 	if (!amu_v1p1_supported()) {
172873d4241Sjohpow01 		return;
173873d4241Sjohpow01 	}
174873d4241Sjohpow01 
175873d4241Sjohpow01 #if AMU_RESTRICT_COUNTERS
176873d4241Sjohpow01 	/*
177873d4241Sjohpow01 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
178873d4241Sjohpow01 	 * counters at all but the highest implemented EL.  This is controlled
179873d4241Sjohpow01 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
180873d4241Sjohpow01 	 * register reads at lower ELs return zero.  Reads from the memory
181873d4241Sjohpow01 	 * mapped view are unaffected.
182873d4241Sjohpow01 	 */
183873d4241Sjohpow01 	VERBOSE("AMU group 1 counter access restricted.\n");
184*33b9be6dSChris Kay 	write_amcr_cg1rz(1U);
185873d4241Sjohpow01 #else
186*33b9be6dSChris Kay 	write_amcr_cg1rz(0U);
187873d4241Sjohpow01 #endif
188c70da546SJoel Hutton }
189c70da546SJoel Hutton 
190c70da546SJoel Hutton /* Read the group 0 counter identified by the given `idx`. */
191b4b726eaSChris Kay static uint64_t amu_group0_cnt_read(unsigned int idx)
192c70da546SJoel Hutton {
193*33b9be6dSChris Kay 	assert(amu_supported());
194f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP0_NR_COUNTERS);
195c70da546SJoel Hutton 
196c70da546SJoel Hutton 	return amu_group0_cnt_read_internal(idx);
197c70da546SJoel Hutton }
198c70da546SJoel Hutton 
199f3ccf036SAlexei Fedorov /* Write the group 0 counter identified by the given `idx` with `val` */
200b4b726eaSChris Kay static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
201c70da546SJoel Hutton {
202*33b9be6dSChris Kay 	assert(amu_supported());
203f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP0_NR_COUNTERS);
204c70da546SJoel Hutton 
205c70da546SJoel Hutton 	amu_group0_cnt_write_internal(idx, val);
206c70da546SJoel Hutton 	isb();
207c70da546SJoel Hutton }
208c70da546SJoel Hutton 
209f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
210f3ccf036SAlexei Fedorov /* Read the group 1 counter identified by the given `idx` */
211b4b726eaSChris Kay static uint64_t amu_group1_cnt_read(unsigned  int idx)
212c70da546SJoel Hutton {
213*33b9be6dSChris Kay 	assert(amu_supported());
214f3ccf036SAlexei Fedorov 	assert(amu_group1_supported());
215f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP1_NR_COUNTERS);
216c70da546SJoel Hutton 
217c70da546SJoel Hutton 	return amu_group1_cnt_read_internal(idx);
218c70da546SJoel Hutton }
219c70da546SJoel Hutton 
220f3ccf036SAlexei Fedorov /* Write the group 1 counter identified by the given `idx` with `val` */
221b4b726eaSChris Kay static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
222c70da546SJoel Hutton {
223*33b9be6dSChris Kay 	assert(amu_supported());
224f3ccf036SAlexei Fedorov 	assert(amu_group1_supported());
225f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP1_NR_COUNTERS);
226c70da546SJoel Hutton 
227c70da546SJoel Hutton 	amu_group1_cnt_write_internal(idx, val);
228c70da546SJoel Hutton 	isb();
229c70da546SJoel Hutton }
230f3ccf036SAlexei Fedorov #endif	/* AMU_GROUP1_NR_COUNTERS */
231b6eb3932SDimitris Papastamos 
232b6eb3932SDimitris Papastamos static void *amu_context_save(const void *arg)
233b6eb3932SDimitris Papastamos {
234f3ccf036SAlexei Fedorov 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
235f3ccf036SAlexei Fedorov 	unsigned int i;
236b6eb3932SDimitris Papastamos 
237*33b9be6dSChris Kay 	if (!amu_supported()) {
238b6eb3932SDimitris Papastamos 		return (void *)-1;
239f3ccf036SAlexei Fedorov 	}
240b6eb3932SDimitris Papastamos 
241f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
242f3ccf036SAlexei Fedorov 	if (!amu_group1_supported()) {
243f3ccf036SAlexei Fedorov 		return (void *)-1;
244f3ccf036SAlexei Fedorov 	}
245f3ccf036SAlexei Fedorov #endif
246f3ccf036SAlexei Fedorov 	/* Assert that group 0/1 counter configuration is what we expect */
247*33b9be6dSChris Kay 	assert(read_amcntenset0_px() == AMU_GROUP0_COUNTERS_MASK);
248b6eb3932SDimitris Papastamos 
249f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
250*33b9be6dSChris Kay 	assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
251f3ccf036SAlexei Fedorov #endif
252b6eb3932SDimitris Papastamos 	/*
253f3ccf036SAlexei Fedorov 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
254b6eb3932SDimitris Papastamos 	 * counter values from the future via the memory mapped view.
255b6eb3932SDimitris Papastamos 	 */
256*33b9be6dSChris Kay 	write_amcntenclr0_px(AMU_GROUP0_COUNTERS_MASK);
257f3ccf036SAlexei Fedorov 
258f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
259*33b9be6dSChris Kay 	write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
260f3ccf036SAlexei Fedorov #endif
261b6eb3932SDimitris Papastamos 	isb();
262b6eb3932SDimitris Papastamos 
263f3ccf036SAlexei Fedorov 	/* Save all group 0 counters */
264f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
265c70da546SJoel Hutton 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
266f3ccf036SAlexei Fedorov 	}
267c70da546SJoel Hutton 
268f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
269f3ccf036SAlexei Fedorov 	/* Save group 1 counters */
270f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
271f3ccf036SAlexei Fedorov 		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
272c70da546SJoel Hutton 			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
273f3ccf036SAlexei Fedorov 		}
274f3ccf036SAlexei Fedorov 	}
275f3ccf036SAlexei Fedorov #endif
27640daecc1SAntonio Nino Diaz 	return (void *)0;
277b6eb3932SDimitris Papastamos }
278b6eb3932SDimitris Papastamos 
279b6eb3932SDimitris Papastamos static void *amu_context_restore(const void *arg)
280b6eb3932SDimitris Papastamos {
281f3ccf036SAlexei Fedorov 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
282f3ccf036SAlexei Fedorov 	unsigned int i;
283b6eb3932SDimitris Papastamos 
284*33b9be6dSChris Kay 	if (!amu_supported()) {
285b6eb3932SDimitris Papastamos 		return (void *)-1;
286f3ccf036SAlexei Fedorov 	}
287b6eb3932SDimitris Papastamos 
288f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
289*33b9be6dSChris Kay 	if (amu_group1_supported()) {
290f3ccf036SAlexei Fedorov 		return (void *)-1;
291f3ccf036SAlexei Fedorov 	}
292f3ccf036SAlexei Fedorov #endif
293b6eb3932SDimitris Papastamos 	/* Counters were disabled in `amu_context_save()` */
294*33b9be6dSChris Kay 	assert(read_amcntenset0_px() == 0U);
295b6eb3932SDimitris Papastamos 
296f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
297*33b9be6dSChris Kay 	assert(read_amcntenset1_px() == 0U);
298f3ccf036SAlexei Fedorov #endif
299f3ccf036SAlexei Fedorov 
300f3ccf036SAlexei Fedorov 	/* Restore all group 0 counters */
301f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
302c70da546SJoel Hutton 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
303f3ccf036SAlexei Fedorov 	}
304b6eb3932SDimitris Papastamos 
305f3ccf036SAlexei Fedorov 	/* Restore group 0 counter configuration */
306*33b9be6dSChris Kay 	write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
307b6eb3932SDimitris Papastamos 
308f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
309f3ccf036SAlexei Fedorov 	/* Restore group 1 counters */
310f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
311f3ccf036SAlexei Fedorov 		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
312f3ccf036SAlexei Fedorov 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
313f3ccf036SAlexei Fedorov 		}
314f3ccf036SAlexei Fedorov 	}
315f3ccf036SAlexei Fedorov 
316f3ccf036SAlexei Fedorov 	/* Restore group 1 counter configuration */
317*33b9be6dSChris Kay 	write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
318f3ccf036SAlexei Fedorov #endif
319f3ccf036SAlexei Fedorov 
32040daecc1SAntonio Nino Diaz 	return (void *)0;
321b6eb3932SDimitris Papastamos }
322b6eb3932SDimitris Papastamos 
323b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
324b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
325