xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <stdint.h>
12 
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <lib/el3_runtime/pubsub_events.h>
18 #include <lib/extensions/amu.h>
19 #include <lib/utils_def.h>
20 #include <platform_def.h>
21 
22 amu_regs_t amu_ctx[PLATFORM_CORE_COUNT];
23 
24 static inline uint8_t read_amcgcr_el0_cg1nc(void)
25 {
26 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
27 		AMCGCR_EL0_CG1NC_MASK;
28 }
29 
30 void amu_enable(cpu_context_t *ctx)
31 {
32 	/* Initialize FEAT_AMUv1p1 features if present. */
33 	if (is_feat_amuv1p1_supported()) {
34 		el3_state_t *state = get_el3state_ctx(ctx);
35 		u_register_t reg;
36 
37 		/*
38 		 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
39 		 * offset registers at EL2 do not trap to EL3
40 		 */
41 		reg = read_ctx_reg(state, CTX_SCR_EL3);
42 		reg |= SCR_AMVOFFEN_BIT;
43 		write_ctx_reg(state, CTX_SCR_EL3, reg);
44 	}
45 }
46 
47 void amu_enable_per_world(per_world_context_t *per_world_ctx)
48 {
49 	/*
50 	 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
51 	 * registers do not trap to EL3.
52 	 */
53 	uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3;
54 
55 	cptr_el3 &= ~TAM_BIT;
56 	per_world_ctx->ctx_cptr_el3 = cptr_el3;
57 }
58 
59 void amu_init_el3(unsigned int core_pos)
60 {
61 	/* architecture is currently pinned to 4 */
62 	assert((read_amcgcr_el0() & AMCGCR_EL0_CG0NC_MASK) == CTX_AMU_GRP0_ALL);
63 
64 	/* Enable all architected counters by default */
65 	write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_MASK);
66 	if (is_feat_amu_aux_supported()) {
67 		/* something went wrong if we're trying to write higher bits */
68 		assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_EL0_Pn_MASK) == 0);
69 		write_amcntenset1_el0(get_amu_aux_enables(core_pos));
70 	}
71 
72 	if (is_feat_amuv1p1_supported()) {
73 #if AMU_RESTRICT_COUNTERS
74 		/*
75 		 * FEAT_AMUv1p1 adds a register field to restrict access to
76 		 * group 1 counters at all but the highest implemented EL. This
77 		 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
78 		 * flag, when set, system register reads at lower ELs return
79 		 * zero. Reads from the memory mapped view are unaffected.
80 		 */
81 		VERBOSE("AMU group 1 counter access restricted.\n");
82 		write_amcr_el0(AMCR_CG1RZ_BIT);
83 #else
84 		/* HDBG = 0 in both cases */
85 		write_amcr_el0(0);
86 #endif
87 	}
88 }
89 
90 void amu_init_el2_unused(void)
91 {
92 	/*
93 	 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
94 	 *  registers do not trap to EL2.
95 	 */
96 	write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TAM_BIT);
97 
98 	if (is_feat_amuv1p1_supported()) {
99 		/* Make sure virtual offsets are disabled */
100 		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
101 	}
102 }
103 
104 static void *amu_context_save(const void *arg)
105 {
106 	if (!is_feat_amu_supported()) {
107 		return (void *)0;
108 	}
109 
110 	unsigned int core_pos = *(unsigned int *)arg;
111 	amu_regs_t *ctx = &amu_ctx[core_pos];
112 
113 	/* disable all counters so we can write them safely later */
114 	write_amcntenclr0_el0(AMCNTENCLR0_EL0_Pn_MASK);
115 	if (is_feat_amu_aux_supported()) {
116 		write_amcntenclr1_el0(get_amu_aux_enables(core_pos));
117 	}
118 
119 	isb();
120 
121 	write_amu_grp0_ctx_reg(ctx, 0, read_amevcntr00_el0());
122 	write_amu_grp0_ctx_reg(ctx, 1, read_amevcntr01_el0());
123 	write_amu_grp0_ctx_reg(ctx, 2, read_amevcntr02_el0());
124 	write_amu_grp0_ctx_reg(ctx, 3, read_amevcntr03_el0());
125 
126 	if (is_feat_amu_aux_supported()) {
127 		uint8_t num_counters = read_amcgcr_el0_cg1nc();
128 
129 		switch (num_counters) {
130 		case 0x10:
131 			write_amu_grp1_ctx_reg(ctx, 0xf, read_amevcntr1f_el0());
132 			__fallthrough;
133 		case 0x0f:
134 			write_amu_grp1_ctx_reg(ctx, 0xe, read_amevcntr1e_el0());
135 			__fallthrough;
136 		case 0x0e:
137 			write_amu_grp1_ctx_reg(ctx, 0xd, read_amevcntr1d_el0());
138 			__fallthrough;
139 		case 0x0d:
140 			write_amu_grp1_ctx_reg(ctx, 0xc, read_amevcntr1c_el0());
141 			__fallthrough;
142 		case 0x0c:
143 			write_amu_grp1_ctx_reg(ctx, 0xb, read_amevcntr1b_el0());
144 			__fallthrough;
145 		case 0x0b:
146 			write_amu_grp1_ctx_reg(ctx, 0xa, read_amevcntr1a_el0());
147 			__fallthrough;
148 		case 0x0a:
149 			write_amu_grp1_ctx_reg(ctx, 0x9, read_amevcntr19_el0());
150 			__fallthrough;
151 		case 0x09:
152 			write_amu_grp1_ctx_reg(ctx, 0x8, read_amevcntr18_el0());
153 			__fallthrough;
154 		case 0x08:
155 			write_amu_grp1_ctx_reg(ctx, 0x7, read_amevcntr17_el0());
156 			__fallthrough;
157 		case 0x07:
158 			write_amu_grp1_ctx_reg(ctx, 0x6, read_amevcntr16_el0());
159 			__fallthrough;
160 		case 0x06:
161 			write_amu_grp1_ctx_reg(ctx, 0x5, read_amevcntr15_el0());
162 			__fallthrough;
163 		case 0x05:
164 			write_amu_grp1_ctx_reg(ctx, 0x4, read_amevcntr14_el0());
165 			__fallthrough;
166 		case 0x04:
167 			write_amu_grp1_ctx_reg(ctx, 0x3, read_amevcntr13_el0());
168 			__fallthrough;
169 		case 0x03:
170 			write_amu_grp1_ctx_reg(ctx, 0x2, read_amevcntr12_el0());
171 			__fallthrough;
172 		case 0x02:
173 			write_amu_grp1_ctx_reg(ctx, 0x1, read_amevcntr11_el0());
174 			__fallthrough;
175 		case 0x01:
176 			write_amu_grp1_ctx_reg(ctx, 0x0, read_amevcntr10_el0());
177 			__fallthrough;
178 		case 0x00:
179 			break;
180 		default:
181 			assert(0); /* something is wrong */
182 		}
183 	}
184 
185 	return (void *)0;
186 }
187 
188 static void *amu_context_restore(const void *arg)
189 {
190 	if (!is_feat_amu_supported()) {
191 		return (void *)0;
192 	}
193 
194 	unsigned int core_pos = *(unsigned int *)arg;
195 	amu_regs_t *ctx = &amu_ctx[core_pos];
196 
197 	write_amevcntr00_el0(read_amu_grp0_ctx_reg(ctx, 0));
198 	write_amevcntr01_el0(read_amu_grp0_ctx_reg(ctx, 1));
199 	write_amevcntr02_el0(read_amu_grp0_ctx_reg(ctx, 2));
200 	write_amevcntr03_el0(read_amu_grp0_ctx_reg(ctx, 3));
201 
202 	if (is_feat_amu_aux_supported()) {
203 		uint8_t num_counters = read_amcgcr_el0_cg1nc();
204 
205 		switch (num_counters) {
206 		case 0x10:
207 			write_amevcntr1f_el0(read_amu_grp1_ctx_reg(ctx, 0xf));
208 			__fallthrough;
209 		case 0x0f:
210 			write_amevcntr1e_el0(read_amu_grp1_ctx_reg(ctx, 0xe));
211 			__fallthrough;
212 		case 0x0e:
213 			write_amevcntr1d_el0(read_amu_grp1_ctx_reg(ctx, 0xd));
214 			__fallthrough;
215 		case 0x0d:
216 			write_amevcntr1c_el0(read_amu_grp1_ctx_reg(ctx, 0xc));
217 			__fallthrough;
218 		case 0x0c:
219 			write_amevcntr1b_el0(read_amu_grp1_ctx_reg(ctx, 0xb));
220 			__fallthrough;
221 		case 0x0b:
222 			write_amevcntr1a_el0(read_amu_grp1_ctx_reg(ctx, 0xa));
223 			__fallthrough;
224 		case 0x0a:
225 			write_amevcntr19_el0(read_amu_grp1_ctx_reg(ctx, 0x9));
226 			__fallthrough;
227 		case 0x09:
228 			write_amevcntr18_el0(read_amu_grp1_ctx_reg(ctx, 0x8));
229 			__fallthrough;
230 		case 0x08:
231 			write_amevcntr17_el0(read_amu_grp1_ctx_reg(ctx, 0x7));
232 			__fallthrough;
233 		case 0x07:
234 			write_amevcntr16_el0(read_amu_grp1_ctx_reg(ctx, 0x6));
235 			__fallthrough;
236 		case 0x06:
237 			write_amevcntr15_el0(read_amu_grp1_ctx_reg(ctx, 0x5));
238 			__fallthrough;
239 		case 0x05:
240 			write_amevcntr14_el0(read_amu_grp1_ctx_reg(ctx, 0x4));
241 			__fallthrough;
242 		case 0x04:
243 			write_amevcntr13_el0(read_amu_grp1_ctx_reg(ctx, 0x3));
244 			__fallthrough;
245 		case 0x03:
246 			write_amevcntr12_el0(read_amu_grp1_ctx_reg(ctx, 0x2));
247 			__fallthrough;
248 		case 0x02:
249 			write_amevcntr11_el0(read_amu_grp1_ctx_reg(ctx, 0x1));
250 			__fallthrough;
251 		case 0x01:
252 			write_amevcntr10_el0(read_amu_grp1_ctx_reg(ctx, 0x0));
253 			__fallthrough;
254 		case 0x00:
255 			break;
256 		default:
257 			assert(0); /* something is wrong */
258 		}
259 	}
260 
261 
262 	/* now enable them again */
263 	write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_MASK);
264 	if (is_feat_amu_aux_supported()) {
265 		write_amcntenset1_el0(get_amu_aux_enables(core_pos));
266 	}
267 
268 	isb();
269 	return (void *)0;
270 }
271 
272 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
273 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
274