xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 71b6bf71497dc546267ba19c5a37141e4f69a596)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <stdint.h>
12 
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <lib/el3_runtime/pubsub_events.h>
18 #include <lib/extensions/amu.h>
19 #include <lib/per_cpu/per_cpu.h>
20 #include <lib/utils_def.h>
21 #include <platform_def.h>
22 
23 PER_CPU_DEFINE(amu_regs_t, amu_ctx);
24 
25 static inline uint8_t read_amcgcr_el0_cg1nc(void)
26 {
27 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
28 		AMCGCR_EL0_CG1NC_MASK;
29 }
30 
31 void amu_enable(cpu_context_t *ctx)
32 {
33 	/* Initialize FEAT_AMUv1p1 features if present. */
34 	if (is_feat_amuv1p1_supported()) {
35 		el3_state_t *state = get_el3state_ctx(ctx);
36 		u_register_t reg;
37 
38 		/*
39 		 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
40 		 * offset registers at EL2 do not trap to EL3
41 		 */
42 		reg = read_ctx_reg(state, CTX_SCR_EL3);
43 		reg |= SCR_AMVOFFEN_BIT;
44 		write_ctx_reg(state, CTX_SCR_EL3, reg);
45 	}
46 }
47 
48 void amu_enable_per_world(per_world_context_t *per_world_ctx)
49 {
50 	/*
51 	 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
52 	 * registers do not trap to EL3.
53 	 */
54 	uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3;
55 
56 	cptr_el3 &= ~TAM_BIT;
57 	per_world_ctx->ctx_cptr_el3 = cptr_el3;
58 }
59 
60 void amu_init_el3(unsigned int core_pos)
61 {
62 	/*
63 	 * Enable all architected counters by default. Currently, only the
64 	 * bottom 4 are defined and enabled. Higher bits are allocated but
65 	 * RAZ/WI in the latest (AMUv1p1) revision. Future revisions should
66 	 * consider whether the new counters need to be enabled and/or context
67 	 * switched.
68 	 */
69 	write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_ALWAYS_ON);
70 
71 	if (!is_feat_amuv1p1_supported()) {
72 		return;
73 	}
74 
75 	if (is_feat_amu_aux_supported()) {
76 		/* something went wrong if we're trying to write higher bits */
77 		assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_EL0_Pn_MASK) == 0);
78 		write_amcntenset1_el0(get_amu_aux_enables(core_pos));
79 	}
80 
81 #if AMU_RESTRICT_COUNTERS
82 	/*
83 	 * FEAT_AMUv1p1 adds a register field to restrict access to
84 	 * group 1 counters at all but the highest implemented EL. This
85 	 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
86 	 * flag, when set, system register reads at lower ELs return
87 	 * zero. Reads from the memory mapped view are unaffected.
88 	 */
89 	VERBOSE("AMU group 1 counter access restricted.\n");
90 	write_amcr_el0(AMCR_CG1RZ_BIT);
91 #else
92 	/* HDBG = 0 in both cases */
93 	write_amcr_el0(0);
94 #endif
95 }
96 
97 void amu_init_el2_unused(void)
98 {
99 	/*
100 	 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
101 	 *  registers do not trap to EL2.
102 	 */
103 	write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TAM_BIT);
104 
105 	if (is_feat_amuv1p1_supported()) {
106 		/* Make sure virtual offsets are disabled */
107 		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
108 	}
109 }
110 
111 static void amu_disable_counters(unsigned int core_pos)
112 {
113 	write_amcntenclr0_el0(AMCNTENCLR0_EL0_Pn_ALL);
114 
115 	if (is_feat_amu_aux_supported()) {
116 		write_amcntenclr1_el0(get_amu_aux_enables(core_pos));
117 	}
118 
119 	isb(); /* wait for the disable to take effect */
120 }
121 
122 static void *amu_context_save(const void *arg)
123 {
124 	if (!is_feat_amu_supported()) {
125 		return (void *)0;
126 	}
127 
128 	unsigned int core_pos = *(unsigned int *)arg;
129 	amu_regs_t *ctx = PER_CPU_CUR(amu_ctx);
130 
131 	/* disable counters so the save is a static snapshot for all counters */
132 	amu_disable_counters(core_pos);
133 
134 	write_amu_grp0_ctx_reg(ctx, 0, read_amevcntr00_el0());
135 	write_amu_grp0_ctx_reg(ctx, 1, read_amevcntr01_el0());
136 	/* update the NS copy of counters 2 and 3 since that will be picked up
137 	 * by ctx management when the core wakes up */
138 	cm_sysregs_context_save_amu(NON_SECURE);
139 
140 	if (is_feat_amu_aux_supported()) {
141 		uint8_t num_counters = read_amcgcr_el0_cg1nc();
142 
143 		switch (num_counters) {
144 		case 0x10:
145 			write_amu_grp1_ctx_reg(ctx, 0xf, read_amevcntr1f_el0());
146 			__fallthrough;
147 		case 0x0f:
148 			write_amu_grp1_ctx_reg(ctx, 0xe, read_amevcntr1e_el0());
149 			__fallthrough;
150 		case 0x0e:
151 			write_amu_grp1_ctx_reg(ctx, 0xd, read_amevcntr1d_el0());
152 			__fallthrough;
153 		case 0x0d:
154 			write_amu_grp1_ctx_reg(ctx, 0xc, read_amevcntr1c_el0());
155 			__fallthrough;
156 		case 0x0c:
157 			write_amu_grp1_ctx_reg(ctx, 0xb, read_amevcntr1b_el0());
158 			__fallthrough;
159 		case 0x0b:
160 			write_amu_grp1_ctx_reg(ctx, 0xa, read_amevcntr1a_el0());
161 			__fallthrough;
162 		case 0x0a:
163 			write_amu_grp1_ctx_reg(ctx, 0x9, read_amevcntr19_el0());
164 			__fallthrough;
165 		case 0x09:
166 			write_amu_grp1_ctx_reg(ctx, 0x8, read_amevcntr18_el0());
167 			__fallthrough;
168 		case 0x08:
169 			write_amu_grp1_ctx_reg(ctx, 0x7, read_amevcntr17_el0());
170 			__fallthrough;
171 		case 0x07:
172 			write_amu_grp1_ctx_reg(ctx, 0x6, read_amevcntr16_el0());
173 			__fallthrough;
174 		case 0x06:
175 			write_amu_grp1_ctx_reg(ctx, 0x5, read_amevcntr15_el0());
176 			__fallthrough;
177 		case 0x05:
178 			write_amu_grp1_ctx_reg(ctx, 0x4, read_amevcntr14_el0());
179 			__fallthrough;
180 		case 0x04:
181 			write_amu_grp1_ctx_reg(ctx, 0x3, read_amevcntr13_el0());
182 			__fallthrough;
183 		case 0x03:
184 			write_amu_grp1_ctx_reg(ctx, 0x2, read_amevcntr12_el0());
185 			__fallthrough;
186 		case 0x02:
187 			write_amu_grp1_ctx_reg(ctx, 0x1, read_amevcntr11_el0());
188 			__fallthrough;
189 		case 0x01:
190 			write_amu_grp1_ctx_reg(ctx, 0x0, read_amevcntr10_el0());
191 			__fallthrough;
192 		case 0x00:
193 			break;
194 		default:
195 			assert(0); /* something is wrong */
196 		}
197 	}
198 
199 	return (void *)0;
200 }
201 
202 static void *amu_context_restore(const void *arg)
203 {
204 	if (!is_feat_amu_supported()) {
205 		return (void *)0;
206 	}
207 
208 	unsigned int core_pos = *(unsigned int *)arg;
209 	amu_regs_t *ctx = PER_CPU_CUR(amu_ctx);
210 
211 	/*
212 	 * Counters must be disabled to write them safely. All counters start
213 	 * disabled on an AMU reset but AMU reset doesn't have to happen with PE
214 	 * reset. So don't bother disabling them if they already are.
215 	 */
216 	if (read_amcntenclr0_el0() != 0) {
217 		amu_disable_counters(core_pos);
218 	}
219 
220 	/* context management will pick up counters 2 and 3 later */
221 	write_amevcntr00_el0(read_amu_grp0_ctx_reg(ctx, 0));
222 	write_amevcntr01_el0(read_amu_grp0_ctx_reg(ctx, 1));
223 
224 	if (is_feat_amu_aux_supported()) {
225 		uint8_t num_counters = read_amcgcr_el0_cg1nc();
226 
227 		switch (num_counters) {
228 		case 0x10:
229 			write_amevcntr1f_el0(read_amu_grp1_ctx_reg(ctx, 0xf));
230 			__fallthrough;
231 		case 0x0f:
232 			write_amevcntr1e_el0(read_amu_grp1_ctx_reg(ctx, 0xe));
233 			__fallthrough;
234 		case 0x0e:
235 			write_amevcntr1d_el0(read_amu_grp1_ctx_reg(ctx, 0xd));
236 			__fallthrough;
237 		case 0x0d:
238 			write_amevcntr1c_el0(read_amu_grp1_ctx_reg(ctx, 0xc));
239 			__fallthrough;
240 		case 0x0c:
241 			write_amevcntr1b_el0(read_amu_grp1_ctx_reg(ctx, 0xb));
242 			__fallthrough;
243 		case 0x0b:
244 			write_amevcntr1a_el0(read_amu_grp1_ctx_reg(ctx, 0xa));
245 			__fallthrough;
246 		case 0x0a:
247 			write_amevcntr19_el0(read_amu_grp1_ctx_reg(ctx, 0x9));
248 			__fallthrough;
249 		case 0x09:
250 			write_amevcntr18_el0(read_amu_grp1_ctx_reg(ctx, 0x8));
251 			__fallthrough;
252 		case 0x08:
253 			write_amevcntr17_el0(read_amu_grp1_ctx_reg(ctx, 0x7));
254 			__fallthrough;
255 		case 0x07:
256 			write_amevcntr16_el0(read_amu_grp1_ctx_reg(ctx, 0x6));
257 			__fallthrough;
258 		case 0x06:
259 			write_amevcntr15_el0(read_amu_grp1_ctx_reg(ctx, 0x5));
260 			__fallthrough;
261 		case 0x05:
262 			write_amevcntr14_el0(read_amu_grp1_ctx_reg(ctx, 0x4));
263 			__fallthrough;
264 		case 0x04:
265 			write_amevcntr13_el0(read_amu_grp1_ctx_reg(ctx, 0x3));
266 			__fallthrough;
267 		case 0x03:
268 			write_amevcntr12_el0(read_amu_grp1_ctx_reg(ctx, 0x2));
269 			__fallthrough;
270 		case 0x02:
271 			write_amevcntr11_el0(read_amu_grp1_ctx_reg(ctx, 0x1));
272 			__fallthrough;
273 		case 0x01:
274 			write_amevcntr10_el0(read_amu_grp1_ctx_reg(ctx, 0x0));
275 			__fallthrough;
276 		case 0x00:
277 			break;
278 		default:
279 			assert(0); /* something is wrong */
280 		}
281 	}
282 
283 
284 	/* now enable them again */
285 	write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_ALWAYS_ON);
286 	if (is_feat_amu_aux_supported()) {
287 		write_amcntenset1_el0(get_amu_aux_enables(core_pos));
288 	}
289 
290 	isb();
291 	return (void *)0;
292 }
293 
294 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
295 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
296