xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 33b9be6d758d4fcef1f5a9802a54bb56f2c4ff8d)
1380559c1SDimitris Papastamos /*
2873d4241Sjohpow01  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3380559c1SDimitris Papastamos  *
4380559c1SDimitris Papastamos  * SPDX-License-Identifier: BSD-3-Clause
5380559c1SDimitris Papastamos  */
6380559c1SDimitris Papastamos 
709d40e0eSAntonio Nino Diaz #include <assert.h>
8*33b9be6dSChris Kay #include <cdefs.h>
909d40e0eSAntonio Nino Diaz #include <stdbool.h>
1009d40e0eSAntonio Nino Diaz 
11380559c1SDimitris Papastamos #include <arch.h>
12873d4241Sjohpow01 #include <arch_features.h>
13380559c1SDimitris Papastamos #include <arch_helpers.h>
14f3ccf036SAlexei Fedorov 
1509d40e0eSAntonio Nino Diaz #include <lib/el3_runtime/pubsub_events.h>
1609d40e0eSAntonio Nino Diaz #include <lib/extensions/amu.h>
1709d40e0eSAntonio Nino Diaz #include <lib/extensions/amu_private.h>
18f3ccf036SAlexei Fedorov 
1909d40e0eSAntonio Nino Diaz #include <plat/common/platform.h>
20380559c1SDimitris Papastamos 
21b6eb3932SDimitris Papastamos static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22b6eb3932SDimitris Papastamos 
23*33b9be6dSChris Kay static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
24380559c1SDimitris Papastamos {
25*33b9be6dSChris Kay 	return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
26873d4241Sjohpow01 		ID_AA64PFR0_AMU_MASK;
270767d50eSDimitris Papastamos }
280767d50eSDimitris Papastamos 
29*33b9be6dSChris Kay static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30*33b9be6dSChris Kay {
31*33b9be6dSChris Kay 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32*33b9be6dSChris Kay 		HCR_AMVOFFEN_SHIFT;
33*33b9be6dSChris Kay }
34*33b9be6dSChris Kay 
35*33b9be6dSChris Kay static inline __unused void write_cptr_el2_tam(uint64_t value)
36*33b9be6dSChris Kay {
37*33b9be6dSChris Kay 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38*33b9be6dSChris Kay 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39*33b9be6dSChris Kay }
40*33b9be6dSChris Kay 
41*33b9be6dSChris Kay static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42*33b9be6dSChris Kay {
43*33b9be6dSChris Kay 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44*33b9be6dSChris Kay 
45*33b9be6dSChris Kay 	value &= ~TAM_BIT;
46*33b9be6dSChris Kay 	value |= (tam << TAM_SHIFT) & TAM_BIT;
47*33b9be6dSChris Kay 
48*33b9be6dSChris Kay 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49*33b9be6dSChris Kay }
50*33b9be6dSChris Kay 
51*33b9be6dSChris Kay static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52*33b9be6dSChris Kay {
53*33b9be6dSChris Kay 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54*33b9be6dSChris Kay 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55*33b9be6dSChris Kay }
56*33b9be6dSChris Kay 
57*33b9be6dSChris Kay static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58*33b9be6dSChris Kay {
59*33b9be6dSChris Kay 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60*33b9be6dSChris Kay 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61*33b9be6dSChris Kay }
62*33b9be6dSChris Kay 
63*33b9be6dSChris Kay static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64*33b9be6dSChris Kay {
65*33b9be6dSChris Kay 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66*33b9be6dSChris Kay 		AMCFGR_EL0_NCG_MASK;
67*33b9be6dSChris Kay }
68*33b9be6dSChris Kay 
69*33b9be6dSChris Kay static inline __unused uint64_t read_amcg1idr_el0_voff(void)
70*33b9be6dSChris Kay {
71*33b9be6dSChris Kay 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
72*33b9be6dSChris Kay 		AMCG1IDR_VOFF_MASK;
73*33b9be6dSChris Kay }
74*33b9be6dSChris Kay 
75*33b9be6dSChris Kay static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
76*33b9be6dSChris Kay {
77*33b9be6dSChris Kay 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
78*33b9be6dSChris Kay 		AMCGCR_EL0_CG1NC_MASK;
79*33b9be6dSChris Kay }
80*33b9be6dSChris Kay 
81*33b9be6dSChris Kay static inline __unused uint64_t read_amcntenset0_el0_px(void)
82*33b9be6dSChris Kay {
83*33b9be6dSChris Kay 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
84*33b9be6dSChris Kay 		AMCNTENSET0_EL0_Pn_MASK;
85*33b9be6dSChris Kay }
86*33b9be6dSChris Kay 
87*33b9be6dSChris Kay static inline __unused uint64_t read_amcntenset1_el0_px(void)
88*33b9be6dSChris Kay {
89*33b9be6dSChris Kay 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
90*33b9be6dSChris Kay 		AMCNTENSET1_EL0_Pn_MASK;
91*33b9be6dSChris Kay }
92*33b9be6dSChris Kay 
93*33b9be6dSChris Kay static inline __unused void write_amcntenset0_el0_px(uint64_t px)
94*33b9be6dSChris Kay {
95*33b9be6dSChris Kay 	uint64_t value = read_amcntenset0_el0();
96*33b9be6dSChris Kay 
97*33b9be6dSChris Kay 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
98*33b9be6dSChris Kay 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
99*33b9be6dSChris Kay 
100*33b9be6dSChris Kay 	write_amcntenset0_el0(value);
101*33b9be6dSChris Kay }
102*33b9be6dSChris Kay 
103*33b9be6dSChris Kay static inline __unused void write_amcntenset1_el0_px(uint64_t px)
104*33b9be6dSChris Kay {
105*33b9be6dSChris Kay 	uint64_t value = read_amcntenset1_el0();
106*33b9be6dSChris Kay 
107*33b9be6dSChris Kay 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
108*33b9be6dSChris Kay 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
109*33b9be6dSChris Kay 
110*33b9be6dSChris Kay 	write_amcntenset1_el0(value);
111*33b9be6dSChris Kay }
112*33b9be6dSChris Kay 
113*33b9be6dSChris Kay static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
114*33b9be6dSChris Kay {
115*33b9be6dSChris Kay 	uint64_t value = read_amcntenclr0_el0();
116*33b9be6dSChris Kay 
117*33b9be6dSChris Kay 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
118*33b9be6dSChris Kay 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
119*33b9be6dSChris Kay 
120*33b9be6dSChris Kay 	write_amcntenclr0_el0(value);
121*33b9be6dSChris Kay }
122*33b9be6dSChris Kay 
123*33b9be6dSChris Kay static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
124*33b9be6dSChris Kay {
125*33b9be6dSChris Kay 	uint64_t value = read_amcntenclr1_el0();
126*33b9be6dSChris Kay 
127*33b9be6dSChris Kay 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
128*33b9be6dSChris Kay 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
129*33b9be6dSChris Kay 
130*33b9be6dSChris Kay 	write_amcntenclr1_el0(value);
131*33b9be6dSChris Kay }
132*33b9be6dSChris Kay 
133*33b9be6dSChris Kay static bool amu_supported(void)
134*33b9be6dSChris Kay {
135*33b9be6dSChris Kay 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
136*33b9be6dSChris Kay }
137*33b9be6dSChris Kay 
138*33b9be6dSChris Kay static bool amu_v1p1_supported(void)
139*33b9be6dSChris Kay {
140*33b9be6dSChris Kay 	return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
141*33b9be6dSChris Kay }
142*33b9be6dSChris Kay 
143*33b9be6dSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
144b4b726eaSChris Kay static bool amu_group1_supported(void)
145f3ccf036SAlexei Fedorov {
146*33b9be6dSChris Kay 	return read_amcfgr_el0_ncg() > 0U;
147f3ccf036SAlexei Fedorov }
148f3ccf036SAlexei Fedorov #endif
149f3ccf036SAlexei Fedorov 
1500767d50eSDimitris Papastamos /*
1510767d50eSDimitris Papastamos  * Enable counters. This function is meant to be invoked
1520767d50eSDimitris Papastamos  * by the context management library before exiting from EL3.
1530767d50eSDimitris Papastamos  */
15468ac5ed0SArunachalam Ganapathy void amu_enable(bool el2_unused, cpu_context_t *ctx)
1550767d50eSDimitris Papastamos {
156*33b9be6dSChris Kay 	if (!amu_supported()) {
1570767d50eSDimitris Papastamos 		return;
158f3ccf036SAlexei Fedorov 	}
159f3ccf036SAlexei Fedorov 
160f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
161f3ccf036SAlexei Fedorov 	/* Check and set presence of group 1 counters */
162f3ccf036SAlexei Fedorov 	if (!amu_group1_supported()) {
163f3ccf036SAlexei Fedorov 		ERROR("AMU Counter Group 1 is not implemented\n");
164f3ccf036SAlexei Fedorov 		panic();
165f3ccf036SAlexei Fedorov 	}
166f3ccf036SAlexei Fedorov 
167f3ccf036SAlexei Fedorov 	/* Check number of group 1 counters */
168*33b9be6dSChris Kay 	uint64_t cnt_num = read_amcgcr_el0_cg1nc();
169f3ccf036SAlexei Fedorov 	VERBOSE("%s%llu. %s%u\n",
170f3ccf036SAlexei Fedorov 		"Number of AMU Group 1 Counters ", cnt_num,
171f3ccf036SAlexei Fedorov 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
172f3ccf036SAlexei Fedorov 
173f3ccf036SAlexei Fedorov 	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
174f3ccf036SAlexei Fedorov 		ERROR("%s%llu is less than %s%u\n",
175f3ccf036SAlexei Fedorov 		"Number of AMU Group 1 Counters ", cnt_num,
176f3ccf036SAlexei Fedorov 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
177f3ccf036SAlexei Fedorov 		panic();
178f3ccf036SAlexei Fedorov 	}
179f3ccf036SAlexei Fedorov #endif
1800767d50eSDimitris Papastamos 
181380559c1SDimitris Papastamos 	if (el2_unused) {
182380559c1SDimitris Papastamos 		/*
183380559c1SDimitris Papastamos 		 * CPTR_EL2.TAM: Set to zero so any accesses to
184380559c1SDimitris Papastamos 		 * the Activity Monitor registers do not trap to EL2.
185380559c1SDimitris Papastamos 		 */
186*33b9be6dSChris Kay 		write_cptr_el2_tam(0U);
187380559c1SDimitris Papastamos 	}
188380559c1SDimitris Papastamos 
189380559c1SDimitris Papastamos 	/*
19068ac5ed0SArunachalam Ganapathy 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
19168ac5ed0SArunachalam Ganapathy 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
192380559c1SDimitris Papastamos 	 * the Activity Monitor registers do not trap to EL3.
193380559c1SDimitris Papastamos 	 */
194*33b9be6dSChris Kay 	write_cptr_el3_tam(ctx, 0U);
195380559c1SDimitris Papastamos 
196380559c1SDimitris Papastamos 	/* Enable group 0 counters */
197*33b9be6dSChris Kay 	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
198f3ccf036SAlexei Fedorov 
199f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
20059902b7cSDimitris Papastamos 	/* Enable group 1 counters */
201*33b9be6dSChris Kay 	write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
202f3ccf036SAlexei Fedorov #endif
203873d4241Sjohpow01 
204873d4241Sjohpow01 	/* Initialize FEAT_AMUv1p1 features if present. */
205*33b9be6dSChris Kay 	if (!amu_v1p1_supported()) {
206873d4241Sjohpow01 		return;
207873d4241Sjohpow01 	}
208873d4241Sjohpow01 
209873d4241Sjohpow01 	if (el2_unused) {
210873d4241Sjohpow01 		/* Make sure virtual offsets are disabled if EL2 not used. */
211*33b9be6dSChris Kay 		write_hcr_el2_amvoffen(0U);
212873d4241Sjohpow01 	}
213873d4241Sjohpow01 
214873d4241Sjohpow01 #if AMU_RESTRICT_COUNTERS
215873d4241Sjohpow01 	/*
216873d4241Sjohpow01 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
217873d4241Sjohpow01 	 * counters at all but the highest implemented EL.  This is controlled
218873d4241Sjohpow01 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
219873d4241Sjohpow01 	 * register reads at lower ELs return zero.  Reads from the memory
220873d4241Sjohpow01 	 * mapped view are unaffected.
221873d4241Sjohpow01 	 */
222873d4241Sjohpow01 	VERBOSE("AMU group 1 counter access restricted.\n");
223*33b9be6dSChris Kay 	write_amcr_el0_cg1rz(1U);
224873d4241Sjohpow01 #else
225*33b9be6dSChris Kay 	write_amcr_el0_cg1rz(0U);
226873d4241Sjohpow01 #endif
227380559c1SDimitris Papastamos }
2280767d50eSDimitris Papastamos 
2290767d50eSDimitris Papastamos /* Read the group 0 counter identified by the given `idx`. */
230b4b726eaSChris Kay static uint64_t amu_group0_cnt_read(unsigned int idx)
2310767d50eSDimitris Papastamos {
232*33b9be6dSChris Kay 	assert(amu_supported());
233f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP0_NR_COUNTERS);
2340767d50eSDimitris Papastamos 
2350767d50eSDimitris Papastamos 	return amu_group0_cnt_read_internal(idx);
2360767d50eSDimitris Papastamos }
2370767d50eSDimitris Papastamos 
238f3ccf036SAlexei Fedorov /* Write the group 0 counter identified by the given `idx` with `val` */
239b4b726eaSChris Kay static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
2400767d50eSDimitris Papastamos {
241*33b9be6dSChris Kay 	assert(amu_supported());
242f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP0_NR_COUNTERS);
2430767d50eSDimitris Papastamos 
2440767d50eSDimitris Papastamos 	amu_group0_cnt_write_internal(idx, val);
2450767d50eSDimitris Papastamos 	isb();
2460767d50eSDimitris Papastamos }
2470767d50eSDimitris Papastamos 
248873d4241Sjohpow01 /*
249873d4241Sjohpow01  * Read the group 0 offset register for a given index. Index must be 0, 2,
250873d4241Sjohpow01  * or 3, the register for 1 does not exist.
251873d4241Sjohpow01  *
252873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
253873d4241Sjohpow01  */
254b4b726eaSChris Kay static uint64_t amu_group0_voffset_read(unsigned int idx)
255873d4241Sjohpow01 {
256*33b9be6dSChris Kay 	assert(amu_v1p1_supported());
257873d4241Sjohpow01 	assert(idx < AMU_GROUP0_NR_COUNTERS);
258873d4241Sjohpow01 	assert(idx != 1U);
259873d4241Sjohpow01 
260873d4241Sjohpow01 	return amu_group0_voffset_read_internal(idx);
261873d4241Sjohpow01 }
262873d4241Sjohpow01 
263873d4241Sjohpow01 /*
264873d4241Sjohpow01  * Write the group 0 offset register for a given index. Index must be 0, 2, or
265873d4241Sjohpow01  * 3, the register for 1 does not exist.
266873d4241Sjohpow01  *
267873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
268873d4241Sjohpow01  */
269b4b726eaSChris Kay static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
270873d4241Sjohpow01 {
271*33b9be6dSChris Kay 	assert(amu_v1p1_supported());
272873d4241Sjohpow01 	assert(idx < AMU_GROUP0_NR_COUNTERS);
273873d4241Sjohpow01 	assert(idx != 1U);
274873d4241Sjohpow01 
275873d4241Sjohpow01 	amu_group0_voffset_write_internal(idx, val);
276873d4241Sjohpow01 	isb();
277873d4241Sjohpow01 }
278873d4241Sjohpow01 
279f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
280f3ccf036SAlexei Fedorov /* Read the group 1 counter identified by the given `idx` */
281b4b726eaSChris Kay static uint64_t amu_group1_cnt_read(unsigned int idx)
2820767d50eSDimitris Papastamos {
283*33b9be6dSChris Kay 	assert(amu_supported());
284f3ccf036SAlexei Fedorov 	assert(amu_group1_supported());
285f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP1_NR_COUNTERS);
2860767d50eSDimitris Papastamos 
2870767d50eSDimitris Papastamos 	return amu_group1_cnt_read_internal(idx);
2880767d50eSDimitris Papastamos }
2890767d50eSDimitris Papastamos 
290f3ccf036SAlexei Fedorov /* Write the group 1 counter identified by the given `idx` with `val` */
291b4b726eaSChris Kay static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
2920767d50eSDimitris Papastamos {
293*33b9be6dSChris Kay 	assert(amu_supported());
294f3ccf036SAlexei Fedorov 	assert(amu_group1_supported());
295f3ccf036SAlexei Fedorov 	assert(idx < AMU_GROUP1_NR_COUNTERS);
2960767d50eSDimitris Papastamos 
2970767d50eSDimitris Papastamos 	amu_group1_cnt_write_internal(idx, val);
2980767d50eSDimitris Papastamos 	isb();
2990767d50eSDimitris Papastamos }
3000767d50eSDimitris Papastamos 
3010767d50eSDimitris Papastamos /*
302873d4241Sjohpow01  * Read the group 1 offset register for a given index.
303873d4241Sjohpow01  *
304873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
305873d4241Sjohpow01  */
306b4b726eaSChris Kay static uint64_t amu_group1_voffset_read(unsigned int idx)
307873d4241Sjohpow01 {
308*33b9be6dSChris Kay 	assert(amu_v1p1_supported());
309873d4241Sjohpow01 	assert(amu_group1_supported());
310873d4241Sjohpow01 	assert(idx < AMU_GROUP1_NR_COUNTERS);
311*33b9be6dSChris Kay 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
312873d4241Sjohpow01 
313873d4241Sjohpow01 	return amu_group1_voffset_read_internal(idx);
314873d4241Sjohpow01 }
315873d4241Sjohpow01 
316873d4241Sjohpow01 /*
317873d4241Sjohpow01  * Write the group 1 offset register for a given index.
318873d4241Sjohpow01  *
319873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
320873d4241Sjohpow01  */
321b4b726eaSChris Kay static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
322873d4241Sjohpow01 {
323*33b9be6dSChris Kay 	assert(amu_v1p1_supported());
324873d4241Sjohpow01 	assert(amu_group1_supported());
325873d4241Sjohpow01 	assert(idx < AMU_GROUP1_NR_COUNTERS);
326*33b9be6dSChris Kay 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
327873d4241Sjohpow01 
328873d4241Sjohpow01 	amu_group1_voffset_write_internal(idx, val);
329873d4241Sjohpow01 	isb();
330873d4241Sjohpow01 }
331f3ccf036SAlexei Fedorov #endif	/* AMU_GROUP1_NR_COUNTERS */
332b6eb3932SDimitris Papastamos 
333b6eb3932SDimitris Papastamos static void *amu_context_save(const void *arg)
334b6eb3932SDimitris Papastamos {
335b6eb3932SDimitris Papastamos 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
336f3ccf036SAlexei Fedorov 	unsigned int i;
337b6eb3932SDimitris Papastamos 
338*33b9be6dSChris Kay 	if (!amu_supported()) {
339b6eb3932SDimitris Papastamos 		return (void *)-1;
340f3ccf036SAlexei Fedorov 	}
341b6eb3932SDimitris Papastamos 
342f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
343f3ccf036SAlexei Fedorov 	if (!amu_group1_supported()) {
344f3ccf036SAlexei Fedorov 		return (void *)-1;
345f3ccf036SAlexei Fedorov 	}
346f3ccf036SAlexei Fedorov #endif
347b6eb3932SDimitris Papastamos 	/* Assert that group 0/1 counter configuration is what we expect */
348*33b9be6dSChris Kay 	assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
349b6eb3932SDimitris Papastamos 
350f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
351*33b9be6dSChris Kay 	assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
352f3ccf036SAlexei Fedorov #endif
353b6eb3932SDimitris Papastamos 	/*
354b6eb3932SDimitris Papastamos 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
355b6eb3932SDimitris Papastamos 	 * counter values from the future via the memory mapped view.
356b6eb3932SDimitris Papastamos 	 */
357*33b9be6dSChris Kay 	write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
358f3ccf036SAlexei Fedorov 
359f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
360*33b9be6dSChris Kay 	write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
361f3ccf036SAlexei Fedorov #endif
362b6eb3932SDimitris Papastamos 	isb();
363b6eb3932SDimitris Papastamos 
364f3ccf036SAlexei Fedorov 	/* Save all group 0 counters */
365f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
366b6eb3932SDimitris Papastamos 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
367f3ccf036SAlexei Fedorov 	}
368b6eb3932SDimitris Papastamos 
369873d4241Sjohpow01 	/* Save group 0 virtual offsets if supported and enabled. */
370*33b9be6dSChris Kay 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
371873d4241Sjohpow01 		/* Not using a loop because count is fixed and index 1 DNE. */
372873d4241Sjohpow01 		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
373873d4241Sjohpow01 		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
374873d4241Sjohpow01 		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
375873d4241Sjohpow01 	}
376873d4241Sjohpow01 
377f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
378b6eb3932SDimitris Papastamos 	/* Save group 1 counters */
379f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
380873d4241Sjohpow01 		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
381b6eb3932SDimitris Papastamos 			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
382f3ccf036SAlexei Fedorov 		}
383f3ccf036SAlexei Fedorov 	}
384873d4241Sjohpow01 
385873d4241Sjohpow01 	/* Save group 1 virtual offsets if supported and enabled. */
386*33b9be6dSChris Kay 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
387*33b9be6dSChris Kay 		uint64_t amcg1idr = read_amcg1idr_el0_voff() &
388*33b9be6dSChris Kay 			AMU_GROUP1_COUNTERS_MASK;
389873d4241Sjohpow01 
390873d4241Sjohpow01 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
391873d4241Sjohpow01 			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
392873d4241Sjohpow01 				ctx->group1_voffsets[i] =
393873d4241Sjohpow01 					amu_group1_voffset_read(i);
394873d4241Sjohpow01 			}
395873d4241Sjohpow01 		}
396873d4241Sjohpow01 	}
397f3ccf036SAlexei Fedorov #endif
39840daecc1SAntonio Nino Diaz 	return (void *)0;
399b6eb3932SDimitris Papastamos }
400b6eb3932SDimitris Papastamos 
401b6eb3932SDimitris Papastamos static void *amu_context_restore(const void *arg)
402b6eb3932SDimitris Papastamos {
403b6eb3932SDimitris Papastamos 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
404f3ccf036SAlexei Fedorov 	unsigned int i;
405b6eb3932SDimitris Papastamos 
406*33b9be6dSChris Kay 	if (!amu_supported()) {
407b6eb3932SDimitris Papastamos 		return (void *)-1;
408f3ccf036SAlexei Fedorov 	}
409b6eb3932SDimitris Papastamos 
410f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
411f3ccf036SAlexei Fedorov 	if (!amu_group1_supported()) {
412f3ccf036SAlexei Fedorov 		return (void *)-1;
413f3ccf036SAlexei Fedorov 	}
414f3ccf036SAlexei Fedorov #endif
415b6eb3932SDimitris Papastamos 	/* Counters were disabled in `amu_context_save()` */
416*33b9be6dSChris Kay 	assert(read_amcntenset0_el0_px() == 0U);
417b6eb3932SDimitris Papastamos 
418f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
419*33b9be6dSChris Kay 	assert(read_amcntenset1_el0_px() == 0U);
420f3ccf036SAlexei Fedorov #endif
421b6eb3932SDimitris Papastamos 
422f3ccf036SAlexei Fedorov 	/* Restore all group 0 counters */
423f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
424b6eb3932SDimitris Papastamos 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
425f3ccf036SAlexei Fedorov 	}
426b6eb3932SDimitris Papastamos 
427873d4241Sjohpow01 	/* Restore group 0 virtual offsets if supported and enabled. */
428*33b9be6dSChris Kay 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
429873d4241Sjohpow01 		/* Not using a loop because count is fixed and index 1 DNE. */
430873d4241Sjohpow01 		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
431873d4241Sjohpow01 		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
432873d4241Sjohpow01 		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
433873d4241Sjohpow01 	}
434873d4241Sjohpow01 
435f3ccf036SAlexei Fedorov 	/* Restore group 0 counter configuration */
436*33b9be6dSChris Kay 	write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
437f3ccf036SAlexei Fedorov 
438f3ccf036SAlexei Fedorov #if AMU_GROUP1_NR_COUNTERS
439f3ccf036SAlexei Fedorov 	/* Restore group 1 counters */
440f3ccf036SAlexei Fedorov 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
441873d4241Sjohpow01 		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
442f3ccf036SAlexei Fedorov 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
443f3ccf036SAlexei Fedorov 		}
444f3ccf036SAlexei Fedorov 	}
445f3ccf036SAlexei Fedorov 
446873d4241Sjohpow01 	/* Restore group 1 virtual offsets if supported and enabled. */
447*33b9be6dSChris Kay 	if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
448*33b9be6dSChris Kay 		uint64_t amcg1idr = read_amcg1idr_el0_voff() &
449*33b9be6dSChris Kay 			AMU_GROUP1_COUNTERS_MASK;
450873d4241Sjohpow01 
451873d4241Sjohpow01 		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
452873d4241Sjohpow01 			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
453873d4241Sjohpow01 				amu_group1_voffset_write(i,
454873d4241Sjohpow01 					ctx->group1_voffsets[i]);
455873d4241Sjohpow01 			}
456873d4241Sjohpow01 		}
457873d4241Sjohpow01 	}
458873d4241Sjohpow01 
459f3ccf036SAlexei Fedorov 	/* Restore group 1 counter configuration */
460*33b9be6dSChris Kay 	write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
461f3ccf036SAlexei Fedorov #endif
462b6eb3932SDimitris Papastamos 
46340daecc1SAntonio Nino Diaz 	return (void *)0;
464b6eb3932SDimitris Papastamos }
465b6eb3932SDimitris Papastamos 
466b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
467b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
468