xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision b57e16a4f96b6cfa4da9e3b2cc6d6d4533da1950)
1380559c1SDimitris Papastamos /*
2873d4241Sjohpow01  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3380559c1SDimitris Papastamos  *
4380559c1SDimitris Papastamos  * SPDX-License-Identifier: BSD-3-Clause
5380559c1SDimitris Papastamos  */
6380559c1SDimitris Papastamos 
709d40e0eSAntonio Nino Diaz #include <assert.h>
833b9be6dSChris Kay #include <cdefs.h>
94ce3e99aSScott Branden #include <inttypes.h>
1009d40e0eSAntonio Nino Diaz #include <stdbool.h>
114ce3e99aSScott Branden #include <stdint.h>
1209d40e0eSAntonio Nino Diaz 
13e747a59bSChris Kay #include "../amu_private.h"
14380559c1SDimitris Papastamos #include <arch.h>
15873d4241Sjohpow01 #include <arch_features.h>
16380559c1SDimitris Papastamos #include <arch_helpers.h>
17742ca230SChris Kay #include <common/debug.h>
1809d40e0eSAntonio Nino Diaz #include <lib/el3_runtime/pubsub_events.h>
1909d40e0eSAntonio Nino Diaz #include <lib/extensions/amu.h>
20f3ccf036SAlexei Fedorov 
2109d40e0eSAntonio Nino Diaz #include <plat/common/platform.h>
22380559c1SDimitris Papastamos 
23742ca230SChris Kay #if ENABLE_AMU_FCONF
24742ca230SChris Kay #	include <lib/fconf/fconf.h>
25742ca230SChris Kay #	include <lib/fconf/fconf_amu_getter.h>
26742ca230SChris Kay #endif
27742ca230SChris Kay 
2868120783SChris Kay #if ENABLE_MPMM
2968120783SChris Kay #	include <lib/mpmm/mpmm.h>
3068120783SChris Kay #endif
3168120783SChris Kay 
32e747a59bSChris Kay struct amu_ctx {
33e747a59bSChris Kay 	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
34e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
35e747a59bSChris Kay 	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
36e747a59bSChris Kay #endif
37e747a59bSChris Kay 
38e747a59bSChris Kay 	/* Architected event counter 1 does not have an offset register */
39e747a59bSChris Kay 	uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
40e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
41e747a59bSChris Kay 	uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
42e747a59bSChris Kay #endif
43e747a59bSChris Kay 
44e747a59bSChris Kay 	uint16_t group0_enable;
45e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
46e747a59bSChris Kay 	uint16_t group1_enable;
47e747a59bSChris Kay #endif
48e747a59bSChris Kay };
49e747a59bSChris Kay 
50e747a59bSChris Kay static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
51e747a59bSChris Kay 
52e747a59bSChris Kay CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
53e747a59bSChris Kay 	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
54e747a59bSChris Kay 
55e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
56e747a59bSChris Kay CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
57e747a59bSChris Kay 	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
58e747a59bSChris Kay #endif
59b6eb3932SDimitris Papastamos 
6033b9be6dSChris Kay static inline __unused uint64_t read_hcr_el2_amvoffen(void)
6133b9be6dSChris Kay {
6233b9be6dSChris Kay 	return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
6333b9be6dSChris Kay 		HCR_AMVOFFEN_SHIFT;
6433b9be6dSChris Kay }
6533b9be6dSChris Kay 
6633b9be6dSChris Kay static inline __unused void write_cptr_el2_tam(uint64_t value)
6733b9be6dSChris Kay {
6833b9be6dSChris Kay 	write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
6933b9be6dSChris Kay 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
7033b9be6dSChris Kay }
7133b9be6dSChris Kay 
72a4c39456SJohn Powell static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
7333b9be6dSChris Kay {
7433b9be6dSChris Kay 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
7533b9be6dSChris Kay 
7633b9be6dSChris Kay 	value &= ~TAM_BIT;
7733b9be6dSChris Kay 	value |= (tam << TAM_SHIFT) & TAM_BIT;
7833b9be6dSChris Kay 
7933b9be6dSChris Kay 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
8033b9be6dSChris Kay }
8133b9be6dSChris Kay 
82a4c39456SJohn Powell static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
83a4c39456SJohn Powell {
84a4c39456SJohn Powell 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
85a4c39456SJohn Powell 
86a4c39456SJohn Powell 	value &= ~SCR_AMVOFFEN_BIT;
87a4c39456SJohn Powell 	value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
88a4c39456SJohn Powell 
89a4c39456SJohn Powell 	write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
90a4c39456SJohn Powell }
91a4c39456SJohn Powell 
9233b9be6dSChris Kay static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
9333b9be6dSChris Kay {
9433b9be6dSChris Kay 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
9533b9be6dSChris Kay 		((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
9633b9be6dSChris Kay }
9733b9be6dSChris Kay 
9833b9be6dSChris Kay static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
9933b9be6dSChris Kay {
10033b9be6dSChris Kay 	write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
10133b9be6dSChris Kay 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
10233b9be6dSChris Kay }
10333b9be6dSChris Kay 
10433b9be6dSChris Kay static inline __unused uint64_t read_amcfgr_el0_ncg(void)
10533b9be6dSChris Kay {
10633b9be6dSChris Kay 	return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
10733b9be6dSChris Kay 		AMCFGR_EL0_NCG_MASK;
10833b9be6dSChris Kay }
10933b9be6dSChris Kay 
110e747a59bSChris Kay static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
11181e2ff1fSChris Kay {
11281e2ff1fSChris Kay 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
11381e2ff1fSChris Kay 		AMCGCR_EL0_CG0NC_MASK;
11481e2ff1fSChris Kay }
11581e2ff1fSChris Kay 
11633b9be6dSChris Kay static inline __unused uint64_t read_amcg1idr_el0_voff(void)
11733b9be6dSChris Kay {
11833b9be6dSChris Kay 	return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
11933b9be6dSChris Kay 		AMCG1IDR_VOFF_MASK;
12033b9be6dSChris Kay }
12133b9be6dSChris Kay 
12233b9be6dSChris Kay static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
12333b9be6dSChris Kay {
12433b9be6dSChris Kay 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
12533b9be6dSChris Kay 		AMCGCR_EL0_CG1NC_MASK;
12633b9be6dSChris Kay }
12733b9be6dSChris Kay 
12833b9be6dSChris Kay static inline __unused uint64_t read_amcntenset0_el0_px(void)
12933b9be6dSChris Kay {
13033b9be6dSChris Kay 	return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
13133b9be6dSChris Kay 		AMCNTENSET0_EL0_Pn_MASK;
13233b9be6dSChris Kay }
13333b9be6dSChris Kay 
13433b9be6dSChris Kay static inline __unused uint64_t read_amcntenset1_el0_px(void)
13533b9be6dSChris Kay {
13633b9be6dSChris Kay 	return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
13733b9be6dSChris Kay 		AMCNTENSET1_EL0_Pn_MASK;
13833b9be6dSChris Kay }
13933b9be6dSChris Kay 
14033b9be6dSChris Kay static inline __unused void write_amcntenset0_el0_px(uint64_t px)
14133b9be6dSChris Kay {
14233b9be6dSChris Kay 	uint64_t value = read_amcntenset0_el0();
14333b9be6dSChris Kay 
14433b9be6dSChris Kay 	value &= ~AMCNTENSET0_EL0_Pn_MASK;
14533b9be6dSChris Kay 	value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
14633b9be6dSChris Kay 
14733b9be6dSChris Kay 	write_amcntenset0_el0(value);
14833b9be6dSChris Kay }
14933b9be6dSChris Kay 
15033b9be6dSChris Kay static inline __unused void write_amcntenset1_el0_px(uint64_t px)
15133b9be6dSChris Kay {
15233b9be6dSChris Kay 	uint64_t value = read_amcntenset1_el0();
15333b9be6dSChris Kay 
15433b9be6dSChris Kay 	value &= ~AMCNTENSET1_EL0_Pn_MASK;
15533b9be6dSChris Kay 	value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
15633b9be6dSChris Kay 
15733b9be6dSChris Kay 	write_amcntenset1_el0(value);
15833b9be6dSChris Kay }
15933b9be6dSChris Kay 
16033b9be6dSChris Kay static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
16133b9be6dSChris Kay {
16233b9be6dSChris Kay 	uint64_t value = read_amcntenclr0_el0();
16333b9be6dSChris Kay 
16433b9be6dSChris Kay 	value &= ~AMCNTENCLR0_EL0_Pn_MASK;
16533b9be6dSChris Kay 	value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
16633b9be6dSChris Kay 
16733b9be6dSChris Kay 	write_amcntenclr0_el0(value);
16833b9be6dSChris Kay }
16933b9be6dSChris Kay 
17033b9be6dSChris Kay static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
17133b9be6dSChris Kay {
17233b9be6dSChris Kay 	uint64_t value = read_amcntenclr1_el0();
17333b9be6dSChris Kay 
17433b9be6dSChris Kay 	value &= ~AMCNTENCLR1_EL0_Pn_MASK;
17533b9be6dSChris Kay 	value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
17633b9be6dSChris Kay 
17733b9be6dSChris Kay 	write_amcntenclr1_el0(value);
17833b9be6dSChris Kay }
17933b9be6dSChris Kay 
18033b9be6dSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
181e747a59bSChris Kay static __unused bool amu_group1_supported(void)
182f3ccf036SAlexei Fedorov {
18333b9be6dSChris Kay 	return read_amcfgr_el0_ncg() > 0U;
184f3ccf036SAlexei Fedorov }
185f3ccf036SAlexei Fedorov #endif
186f3ccf036SAlexei Fedorov 
1870767d50eSDimitris Papastamos /*
188e747a59bSChris Kay  * Enable counters. This function is meant to be invoked by the context
189e747a59bSChris Kay  * management library before exiting from EL3.
1900767d50eSDimitris Papastamos  */
19168ac5ed0SArunachalam Ganapathy void amu_enable(bool el2_unused, cpu_context_t *ctx)
1920767d50eSDimitris Papastamos {
193e747a59bSChris Kay 	uint64_t amcfgr_el0_ncg;		/* Number of counter groups */
194e747a59bSChris Kay 	uint64_t amcgcr_el0_cg0nc;		/* Number of group 0 counters */
195e747a59bSChris Kay 
196e747a59bSChris Kay 	uint64_t amcntenset0_el0_px = 0x0;	/* Group 0 enable mask */
197e747a59bSChris Kay 	uint64_t amcntenset1_el0_px = 0x0;	/* Group 1 enable mask */
198e747a59bSChris Kay 
199380559c1SDimitris Papastamos 	if (el2_unused) {
200380559c1SDimitris Papastamos 		/*
201e747a59bSChris Kay 		 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
202e747a59bSChris Kay 		 * Monitor registers do not trap to EL2.
203380559c1SDimitris Papastamos 		 */
20433b9be6dSChris Kay 		write_cptr_el2_tam(0U);
205380559c1SDimitris Papastamos 	}
206380559c1SDimitris Papastamos 
207380559c1SDimitris Papastamos 	/*
20868ac5ed0SArunachalam Ganapathy 	 * Retrieve and update the CPTR_EL3 value from the context mentioned
20968ac5ed0SArunachalam Ganapathy 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
210380559c1SDimitris Papastamos 	 * the Activity Monitor registers do not trap to EL3.
211380559c1SDimitris Papastamos 	 */
212a4c39456SJohn Powell 	ctx_write_cptr_el3_tam(ctx, 0U);
213380559c1SDimitris Papastamos 
214e747a59bSChris Kay 	/*
215e747a59bSChris Kay 	 * Retrieve the number of architected counters. All of these counters
216e747a59bSChris Kay 	 * are enabled by default.
217e747a59bSChris Kay 	 */
218f3ccf036SAlexei Fedorov 
219e747a59bSChris Kay 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
220e747a59bSChris Kay 	amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
221e747a59bSChris Kay 
222e747a59bSChris Kay 	assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
223e747a59bSChris Kay 
224e747a59bSChris Kay 	/*
225742ca230SChris Kay 	 * The platform may opt to enable specific auxiliary counters. This can
226742ca230SChris Kay 	 * be done via the common FCONF getter, or via the platform-implemented
227742ca230SChris Kay 	 * function.
228742ca230SChris Kay 	 */
229742ca230SChris Kay 
230742ca230SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
231742ca230SChris Kay 	const struct amu_topology *topology;
232742ca230SChris Kay 
233742ca230SChris Kay #if ENABLE_AMU_FCONF
234742ca230SChris Kay 	topology = FCONF_GET_PROPERTY(amu, config, topology);
235742ca230SChris Kay #else
236742ca230SChris Kay 	topology = plat_amu_topology();
237742ca230SChris Kay #endif /* ENABLE_AMU_FCONF */
238742ca230SChris Kay 
239742ca230SChris Kay 	if (topology != NULL) {
240742ca230SChris Kay 		unsigned int core_pos = plat_my_core_pos();
241742ca230SChris Kay 
242742ca230SChris Kay 		amcntenset1_el0_px = topology->cores[core_pos].enable;
243742ca230SChris Kay 	} else {
244742ca230SChris Kay 		ERROR("AMU: failed to generate AMU topology\n");
245742ca230SChris Kay 	}
246742ca230SChris Kay #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
247742ca230SChris Kay 
248742ca230SChris Kay 	/*
249e747a59bSChris Kay 	 * Enable the requested counters.
250e747a59bSChris Kay 	 */
251e747a59bSChris Kay 
252e747a59bSChris Kay 	write_amcntenset0_el0_px(amcntenset0_el0_px);
253e747a59bSChris Kay 
254e747a59bSChris Kay 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
255e747a59bSChris Kay 	if (amcfgr_el0_ncg > 0U) {
256e747a59bSChris Kay 		write_amcntenset1_el0_px(amcntenset1_el0_px);
257742ca230SChris Kay 
258742ca230SChris Kay #if !ENABLE_AMU_AUXILIARY_COUNTERS
259742ca230SChris Kay 		VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
260742ca230SChris Kay #endif
2611fd685a7SChris Kay 	}
262873d4241Sjohpow01 
263873d4241Sjohpow01 	/* Initialize FEAT_AMUv1p1 features if present. */
264*b57e16a4SAndre Przywara 	if (is_feat_amuv1p1_supported()) {
265873d4241Sjohpow01 		if (el2_unused) {
26668120783SChris Kay 			/*
26768120783SChris Kay 			 * Make sure virtual offsets are disabled if EL2 not
26868120783SChris Kay 			 * used.
26968120783SChris Kay 			 */
27033b9be6dSChris Kay 			write_hcr_el2_amvoffen(0U);
271a4c39456SJohn Powell 		} else {
272a4c39456SJohn Powell 			/*
273a4c39456SJohn Powell 			 * Virtual offset registers are only accessible from EL3
274a4c39456SJohn Powell 			 * and EL2, when clear, this bit traps accesses from EL2
275a4c39456SJohn Powell 			 * so we set it to 1 when EL2 is present.
276a4c39456SJohn Powell 			 */
277a4c39456SJohn Powell 			ctx_write_scr_el3_amvoffen(ctx, 1U);
278873d4241Sjohpow01 		}
279873d4241Sjohpow01 
280873d4241Sjohpow01 #if AMU_RESTRICT_COUNTERS
281873d4241Sjohpow01 		/*
28268120783SChris Kay 		 * FEAT_AMUv1p1 adds a register field to restrict access to
28368120783SChris Kay 		 * group 1 counters at all but the highest implemented EL. This
28468120783SChris Kay 		 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
28568120783SChris Kay 		 * flag, when set, system register reads at lower ELs return
28668120783SChris Kay 		 * zero. Reads from the memory mapped view are unaffected.
287873d4241Sjohpow01 		 */
288873d4241Sjohpow01 		VERBOSE("AMU group 1 counter access restricted.\n");
28933b9be6dSChris Kay 		write_amcr_el0_cg1rz(1U);
290873d4241Sjohpow01 #else
29133b9be6dSChris Kay 		write_amcr_el0_cg1rz(0U);
292873d4241Sjohpow01 #endif
293380559c1SDimitris Papastamos 	}
2940767d50eSDimitris Papastamos 
29568120783SChris Kay #if ENABLE_MPMM
29668120783SChris Kay 	mpmm_enable();
29768120783SChris Kay #endif
29868120783SChris Kay }
29968120783SChris Kay 
3000767d50eSDimitris Papastamos /* Read the group 0 counter identified by the given `idx`. */
301b4b726eaSChris Kay static uint64_t amu_group0_cnt_read(unsigned int idx)
3020767d50eSDimitris Papastamos {
303*b57e16a4SAndre Przywara 	assert(is_feat_amu_supported());
30481e2ff1fSChris Kay 	assert(idx < read_amcgcr_el0_cg0nc());
3050767d50eSDimitris Papastamos 
3060767d50eSDimitris Papastamos 	return amu_group0_cnt_read_internal(idx);
3070767d50eSDimitris Papastamos }
3080767d50eSDimitris Papastamos 
309f3ccf036SAlexei Fedorov /* Write the group 0 counter identified by the given `idx` with `val` */
310b4b726eaSChris Kay static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
3110767d50eSDimitris Papastamos {
312*b57e16a4SAndre Przywara 	assert(is_feat_amu_supported());
31381e2ff1fSChris Kay 	assert(idx < read_amcgcr_el0_cg0nc());
3140767d50eSDimitris Papastamos 
3150767d50eSDimitris Papastamos 	amu_group0_cnt_write_internal(idx, val);
3160767d50eSDimitris Papastamos 	isb();
3170767d50eSDimitris Papastamos }
3180767d50eSDimitris Papastamos 
319873d4241Sjohpow01 /*
320e747a59bSChris Kay  * Unlike with auxiliary counters, we cannot detect at runtime whether an
321e747a59bSChris Kay  * architected counter supports a virtual offset. These are instead fixed
322e747a59bSChris Kay  * according to FEAT_AMUv1p1, but this switch will need to be updated if later
323e747a59bSChris Kay  * revisions of FEAT_AMU add additional architected counters.
324e747a59bSChris Kay  */
325e747a59bSChris Kay static bool amu_group0_voffset_supported(uint64_t idx)
326e747a59bSChris Kay {
327e747a59bSChris Kay 	switch (idx) {
328e747a59bSChris Kay 	case 0U:
329e747a59bSChris Kay 	case 2U:
330e747a59bSChris Kay 	case 3U:
331e747a59bSChris Kay 		return true;
332e747a59bSChris Kay 
333e747a59bSChris Kay 	case 1U:
334e747a59bSChris Kay 		return false;
335e747a59bSChris Kay 
336e747a59bSChris Kay 	default:
337e747a59bSChris Kay 		ERROR("AMU: can't set up virtual offset for unknown "
3384ce3e99aSScott Branden 		      "architected counter %" PRIu64 "!\n", idx);
339e747a59bSChris Kay 
340e747a59bSChris Kay 		panic();
341e747a59bSChris Kay 	}
342e747a59bSChris Kay }
343e747a59bSChris Kay 
344e747a59bSChris Kay /*
345873d4241Sjohpow01  * Read the group 0 offset register for a given index. Index must be 0, 2,
346873d4241Sjohpow01  * or 3, the register for 1 does not exist.
347873d4241Sjohpow01  *
348873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
349873d4241Sjohpow01  */
350b4b726eaSChris Kay static uint64_t amu_group0_voffset_read(unsigned int idx)
351873d4241Sjohpow01 {
352*b57e16a4SAndre Przywara 	assert(is_feat_amuv1p1_supported());
35381e2ff1fSChris Kay 	assert(idx < read_amcgcr_el0_cg0nc());
354873d4241Sjohpow01 	assert(idx != 1U);
355873d4241Sjohpow01 
356873d4241Sjohpow01 	return amu_group0_voffset_read_internal(idx);
357873d4241Sjohpow01 }
358873d4241Sjohpow01 
359873d4241Sjohpow01 /*
360873d4241Sjohpow01  * Write the group 0 offset register for a given index. Index must be 0, 2, or
361873d4241Sjohpow01  * 3, the register for 1 does not exist.
362873d4241Sjohpow01  *
363873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
364873d4241Sjohpow01  */
365b4b726eaSChris Kay static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
366873d4241Sjohpow01 {
367*b57e16a4SAndre Przywara 	assert(is_feat_amuv1p1_supported());
36881e2ff1fSChris Kay 	assert(idx < read_amcgcr_el0_cg0nc());
369873d4241Sjohpow01 	assert(idx != 1U);
370873d4241Sjohpow01 
371873d4241Sjohpow01 	amu_group0_voffset_write_internal(idx, val);
372873d4241Sjohpow01 	isb();
373873d4241Sjohpow01 }
374873d4241Sjohpow01 
3751fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
376f3ccf036SAlexei Fedorov /* Read the group 1 counter identified by the given `idx` */
377b4b726eaSChris Kay static uint64_t amu_group1_cnt_read(unsigned int idx)
3780767d50eSDimitris Papastamos {
379*b57e16a4SAndre Przywara 	assert(is_feat_amu_supported());
380f3ccf036SAlexei Fedorov 	assert(amu_group1_supported());
38131d3cc25SChris Kay 	assert(idx < read_amcgcr_el0_cg1nc());
3820767d50eSDimitris Papastamos 
3830767d50eSDimitris Papastamos 	return amu_group1_cnt_read_internal(idx);
3840767d50eSDimitris Papastamos }
3850767d50eSDimitris Papastamos 
386f3ccf036SAlexei Fedorov /* Write the group 1 counter identified by the given `idx` with `val` */
387b4b726eaSChris Kay static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
3880767d50eSDimitris Papastamos {
389*b57e16a4SAndre Przywara 	assert(is_feat_amu_supported());
390f3ccf036SAlexei Fedorov 	assert(amu_group1_supported());
39131d3cc25SChris Kay 	assert(idx < read_amcgcr_el0_cg1nc());
3920767d50eSDimitris Papastamos 
3930767d50eSDimitris Papastamos 	amu_group1_cnt_write_internal(idx, val);
3940767d50eSDimitris Papastamos 	isb();
3950767d50eSDimitris Papastamos }
3960767d50eSDimitris Papastamos 
3970767d50eSDimitris Papastamos /*
398873d4241Sjohpow01  * Read the group 1 offset register for a given index.
399873d4241Sjohpow01  *
400873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
401873d4241Sjohpow01  */
402b4b726eaSChris Kay static uint64_t amu_group1_voffset_read(unsigned int idx)
403873d4241Sjohpow01 {
404*b57e16a4SAndre Przywara 	assert(is_feat_amuv1p1_supported());
405873d4241Sjohpow01 	assert(amu_group1_supported());
40631d3cc25SChris Kay 	assert(idx < read_amcgcr_el0_cg1nc());
40733b9be6dSChris Kay 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
408873d4241Sjohpow01 
409873d4241Sjohpow01 	return amu_group1_voffset_read_internal(idx);
410873d4241Sjohpow01 }
411873d4241Sjohpow01 
412873d4241Sjohpow01 /*
413873d4241Sjohpow01  * Write the group 1 offset register for a given index.
414873d4241Sjohpow01  *
415873d4241Sjohpow01  * Using this function requires FEAT_AMUv1p1 support.
416873d4241Sjohpow01  */
417b4b726eaSChris Kay static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
418873d4241Sjohpow01 {
419*b57e16a4SAndre Przywara 	assert(is_feat_amuv1p1_supported());
420873d4241Sjohpow01 	assert(amu_group1_supported());
42131d3cc25SChris Kay 	assert(idx < read_amcgcr_el0_cg1nc());
42233b9be6dSChris Kay 	assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
423873d4241Sjohpow01 
424873d4241Sjohpow01 	amu_group1_voffset_write_internal(idx, val);
425873d4241Sjohpow01 	isb();
426873d4241Sjohpow01 }
4271fd685a7SChris Kay #endif
428b6eb3932SDimitris Papastamos 
429b6eb3932SDimitris Papastamos static void *amu_context_save(const void *arg)
430b6eb3932SDimitris Papastamos {
431e747a59bSChris Kay 	uint64_t i, j;
432b6eb3932SDimitris Papastamos 
433e747a59bSChris Kay 	unsigned int core_pos;
434e747a59bSChris Kay 	struct amu_ctx *ctx;
435b6eb3932SDimitris Papastamos 
436*b57e16a4SAndre Przywara 	uint64_t hcr_el2_amvoffen = 0;	/* AMU virtual offsets enabled */
437e747a59bSChris Kay 	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
438b6eb3932SDimitris Papastamos 
4391fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
440e747a59bSChris Kay 	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
441e747a59bSChris Kay 	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
442e747a59bSChris Kay 	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
443e747a59bSChris Kay #endif
444e747a59bSChris Kay 
445*b57e16a4SAndre Przywara 	if (!is_feat_amu_supported()) {
446e747a59bSChris Kay 		return (void *)0;
447e747a59bSChris Kay 	}
448e747a59bSChris Kay 
449e747a59bSChris Kay 	core_pos = plat_my_core_pos();
450e747a59bSChris Kay 	ctx = &amu_ctxs_[core_pos];
451e747a59bSChris Kay 
452e747a59bSChris Kay 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
453*b57e16a4SAndre Przywara 	if (is_feat_amuv1p1_supported()) {
454*b57e16a4SAndre Przywara 		hcr_el2_amvoffen = read_hcr_el2_amvoffen();
455*b57e16a4SAndre Przywara 	}
456e747a59bSChris Kay 
457e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
458e747a59bSChris Kay 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
459e747a59bSChris Kay 	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
460e747a59bSChris Kay 	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
461e747a59bSChris Kay #endif
462e747a59bSChris Kay 
463e747a59bSChris Kay 	/*
464e747a59bSChris Kay 	 * Disable all AMU counters.
465e747a59bSChris Kay 	 */
466e747a59bSChris Kay 
467e747a59bSChris Kay 	ctx->group0_enable = read_amcntenset0_el0_px();
468e747a59bSChris Kay 	write_amcntenclr0_el0_px(ctx->group0_enable);
469e747a59bSChris Kay 
470e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
471e747a59bSChris Kay 	if (amcfgr_el0_ncg > 0U) {
472e747a59bSChris Kay 		ctx->group1_enable = read_amcntenset1_el0_px();
473e747a59bSChris Kay 		write_amcntenclr1_el0_px(ctx->group1_enable);
4741fd685a7SChris Kay 	}
475f3ccf036SAlexei Fedorov #endif
4761fd685a7SChris Kay 
477b6eb3932SDimitris Papastamos 	/*
478e747a59bSChris Kay 	 * Save the counters to the local context.
479b6eb3932SDimitris Papastamos 	 */
480f3ccf036SAlexei Fedorov 
481e747a59bSChris Kay 	isb(); /* Ensure counters have been stopped */
4821fd685a7SChris Kay 
483e747a59bSChris Kay 	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
484b6eb3932SDimitris Papastamos 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
485f3ccf036SAlexei Fedorov 	}
486b6eb3932SDimitris Papastamos 
487e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
488e747a59bSChris Kay 	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
489e747a59bSChris Kay 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
490e747a59bSChris Kay 	}
491e747a59bSChris Kay #endif
492e747a59bSChris Kay 
493e747a59bSChris Kay 	/*
494e747a59bSChris Kay 	 * Save virtual offsets for counters that offer them.
495e747a59bSChris Kay 	 */
496e747a59bSChris Kay 
497e747a59bSChris Kay 	if (hcr_el2_amvoffen != 0U) {
498e747a59bSChris Kay 		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
499e747a59bSChris Kay 			if (!amu_group0_voffset_supported(i)) {
500e747a59bSChris Kay 				continue; /* No virtual offset */
501e747a59bSChris Kay 			}
502e747a59bSChris Kay 
503e747a59bSChris Kay 			ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
504873d4241Sjohpow01 		}
505873d4241Sjohpow01 
5061fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
507e747a59bSChris Kay 		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
508e747a59bSChris Kay 			if ((amcg1idr_el0_voff >> i) & 1U) {
509e747a59bSChris Kay 				continue; /* No virtual offset */
510f3ccf036SAlexei Fedorov 			}
511873d4241Sjohpow01 
512e747a59bSChris Kay 			ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
5131fd685a7SChris Kay 		}
514f3ccf036SAlexei Fedorov #endif
515e747a59bSChris Kay 	}
5161fd685a7SChris Kay 
51740daecc1SAntonio Nino Diaz 	return (void *)0;
518b6eb3932SDimitris Papastamos }
519b6eb3932SDimitris Papastamos 
520b6eb3932SDimitris Papastamos static void *amu_context_restore(const void *arg)
521b6eb3932SDimitris Papastamos {
522e747a59bSChris Kay 	uint64_t i, j;
523b6eb3932SDimitris Papastamos 
524e747a59bSChris Kay 	unsigned int core_pos;
525e747a59bSChris Kay 	struct amu_ctx *ctx;
526b6eb3932SDimitris Papastamos 
527*b57e16a4SAndre Przywara 	uint64_t hcr_el2_amvoffen = 0;	/* AMU virtual offsets enabled */
528e747a59bSChris Kay 
529e747a59bSChris Kay 	uint64_t amcfgr_el0_ncg;	/* Number of counter groups */
530e747a59bSChris Kay 	uint64_t amcgcr_el0_cg0nc;	/* Number of group 0 counters */
531b6eb3932SDimitris Papastamos 
5321fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
533e747a59bSChris Kay 	uint64_t amcgcr_el0_cg1nc;	/* Number of group 1 counters */
534e747a59bSChris Kay 	uint64_t amcg1idr_el0_voff;	/* Auxiliary counters with virtual offsets */
535f3ccf036SAlexei Fedorov #endif
536b6eb3932SDimitris Papastamos 
537*b57e16a4SAndre Przywara 	if (!is_feat_amu_supported()) {
538e747a59bSChris Kay 		return (void *)0;
539e747a59bSChris Kay 	}
540e747a59bSChris Kay 
541e747a59bSChris Kay 	core_pos = plat_my_core_pos();
542e747a59bSChris Kay 	ctx = &amu_ctxs_[core_pos];
543e747a59bSChris Kay 
544e747a59bSChris Kay 	amcfgr_el0_ncg = read_amcfgr_el0_ncg();
545e747a59bSChris Kay 	amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
546e747a59bSChris Kay 
547*b57e16a4SAndre Przywara 	if (is_feat_amuv1p1_supported()) {
548*b57e16a4SAndre Przywara 		hcr_el2_amvoffen = read_hcr_el2_amvoffen();
549*b57e16a4SAndre Przywara 	}
550e747a59bSChris Kay 
551e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
552e747a59bSChris Kay 	amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
553e747a59bSChris Kay 	amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
554e747a59bSChris Kay #endif
555e747a59bSChris Kay 
556e747a59bSChris Kay 	/*
557e747a59bSChris Kay 	 * Sanity check that all counters were disabled when the context was
558e747a59bSChris Kay 	 * previously saved.
559e747a59bSChris Kay 	 */
560e747a59bSChris Kay 
561e747a59bSChris Kay 	assert(read_amcntenset0_el0_px() == 0U);
562e747a59bSChris Kay 
563e747a59bSChris Kay 	if (amcfgr_el0_ncg > 0U) {
564e747a59bSChris Kay 		assert(read_amcntenset1_el0_px() == 0U);
565e747a59bSChris Kay 	}
566e747a59bSChris Kay 
567e747a59bSChris Kay 	/*
568e747a59bSChris Kay 	 * Restore the counter values from the local context.
569e747a59bSChris Kay 	 */
570e747a59bSChris Kay 
571e747a59bSChris Kay 	for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
572b6eb3932SDimitris Papastamos 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
573f3ccf036SAlexei Fedorov 	}
574b6eb3932SDimitris Papastamos 
5751fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
576e747a59bSChris Kay 	for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
577f3ccf036SAlexei Fedorov 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
578f3ccf036SAlexei Fedorov 	}
579e747a59bSChris Kay #endif
580e747a59bSChris Kay 
581e747a59bSChris Kay 	/*
582e747a59bSChris Kay 	 * Restore virtual offsets for counters that offer them.
583e747a59bSChris Kay 	 */
584e747a59bSChris Kay 
585e747a59bSChris Kay 	if (hcr_el2_amvoffen != 0U) {
586e747a59bSChris Kay 		for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
587e747a59bSChris Kay 			if (!amu_group0_voffset_supported(i)) {
588e747a59bSChris Kay 				continue; /* No virtual offset */
589f3ccf036SAlexei Fedorov 			}
590f3ccf036SAlexei Fedorov 
591e747a59bSChris Kay 			amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
592873d4241Sjohpow01 		}
593873d4241Sjohpow01 
594e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
595e747a59bSChris Kay 		for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
596e747a59bSChris Kay 			if ((amcg1idr_el0_voff >> i) & 1U) {
597e747a59bSChris Kay 				continue; /* No virtual offset */
598e747a59bSChris Kay 			}
599e747a59bSChris Kay 
600e747a59bSChris Kay 			amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
601e747a59bSChris Kay 		}
602e747a59bSChris Kay #endif
603e747a59bSChris Kay 	}
604e747a59bSChris Kay 
605e747a59bSChris Kay 	/*
606e747a59bSChris Kay 	 * Re-enable counters that were disabled during context save.
607e747a59bSChris Kay 	 */
608e747a59bSChris Kay 
609e747a59bSChris Kay 	write_amcntenset0_el0_px(ctx->group0_enable);
610e747a59bSChris Kay 
611e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS
612e747a59bSChris Kay 	if (amcfgr_el0_ncg > 0) {
613e747a59bSChris Kay 		write_amcntenset1_el0_px(ctx->group1_enable);
6141fd685a7SChris Kay 	}
615f3ccf036SAlexei Fedorov #endif
616b6eb3932SDimitris Papastamos 
61768120783SChris Kay #if ENABLE_MPMM
61868120783SChris Kay 	mpmm_enable();
61968120783SChris Kay #endif
62068120783SChris Kay 
62140daecc1SAntonio Nino Diaz 	return (void *)0;
622b6eb3932SDimitris Papastamos }
623b6eb3932SDimitris Papastamos 
624b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
625b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
626