xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision fcb7b26021c842c843db5d2df5aeb553785c0084)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <stdint.h>
12 
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <lib/el3_runtime/pubsub_events.h>
18 #include <lib/extensions/amu.h>
19 #include <lib/per_cpu/per_cpu.h>
20 #include <lib/utils_def.h>
21 #include <platform_def.h>
22 
23 PER_CPU_DEFINE(amu_regs_t, amu_ctx);
24 
25 static inline uint8_t read_amcgcr_el0_cg1nc(void)
26 {
27 	return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
28 		AMCGCR_EL0_CG1NC_MASK;
29 }
30 
31 void amu_enable(cpu_context_t *ctx)
32 {
33 	/* Initialize FEAT_AMUv1p1 features if present. */
34 	if (is_feat_amuv1p1_supported()) {
35 		el3_state_t *state = get_el3state_ctx(ctx);
36 		u_register_t reg;
37 
38 		/*
39 		 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
40 		 * offset registers at EL2 do not trap to EL3
41 		 */
42 		reg = read_ctx_reg(state, CTX_SCR_EL3);
43 		reg |= SCR_AMVOFFEN_BIT;
44 		write_ctx_reg(state, CTX_SCR_EL3, reg);
45 	}
46 }
47 
48 void amu_enable_per_world(per_world_context_t *per_world_ctx)
49 {
50 	/*
51 	 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
52 	 * registers do not trap to EL3.
53 	 */
54 	uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3;
55 
56 	cptr_el3 &= ~TAM_BIT;
57 	per_world_ctx->ctx_cptr_el3 = cptr_el3;
58 }
59 
60 void amu_init_el3(unsigned int core_pos)
61 {
62 	/* architecture is currently pinned to 4 */
63 	assert((read_amcgcr_el0() & AMCGCR_EL0_CG0NC_MASK) == CTX_AMU_GRP0_ALL);
64 
65 	/* Enable all architected counters by default */
66 	write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_MASK);
67 	if (is_feat_amu_aux_supported()) {
68 		/* something went wrong if we're trying to write higher bits */
69 		assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_EL0_Pn_MASK) == 0);
70 		write_amcntenset1_el0(get_amu_aux_enables(core_pos));
71 	}
72 
73 	if (is_feat_amuv1p1_supported()) {
74 #if AMU_RESTRICT_COUNTERS
75 		/*
76 		 * FEAT_AMUv1p1 adds a register field to restrict access to
77 		 * group 1 counters at all but the highest implemented EL. This
78 		 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
79 		 * flag, when set, system register reads at lower ELs return
80 		 * zero. Reads from the memory mapped view are unaffected.
81 		 */
82 		VERBOSE("AMU group 1 counter access restricted.\n");
83 		write_amcr_el0(AMCR_CG1RZ_BIT);
84 #else
85 		/* HDBG = 0 in both cases */
86 		write_amcr_el0(0);
87 #endif
88 	}
89 }
90 
91 void amu_init_el2_unused(void)
92 {
93 	/*
94 	 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
95 	 *  registers do not trap to EL2.
96 	 */
97 	write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TAM_BIT);
98 
99 	if (is_feat_amuv1p1_supported()) {
100 		/* Make sure virtual offsets are disabled */
101 		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
102 	}
103 }
104 
105 static void *amu_context_save(const void *arg)
106 {
107 	if (!is_feat_amu_supported()) {
108 		return (void *)0;
109 	}
110 
111 	unsigned int core_pos = *(unsigned int *)arg;
112 	amu_regs_t *ctx = PER_CPU_CUR(amu_ctx);
113 
114 	/* disable all counters so we can write them safely later */
115 	write_amcntenclr0_el0(AMCNTENCLR0_EL0_Pn_MASK);
116 	if (is_feat_amu_aux_supported()) {
117 		write_amcntenclr1_el0(get_amu_aux_enables(core_pos));
118 	}
119 
120 	isb();
121 
122 	write_amu_grp0_ctx_reg(ctx, 0, read_amevcntr00_el0());
123 	write_amu_grp0_ctx_reg(ctx, 1, read_amevcntr01_el0());
124 	write_amu_grp0_ctx_reg(ctx, 2, read_amevcntr02_el0());
125 	write_amu_grp0_ctx_reg(ctx, 3, read_amevcntr03_el0());
126 
127 	if (is_feat_amu_aux_supported()) {
128 		uint8_t num_counters = read_amcgcr_el0_cg1nc();
129 
130 		switch (num_counters) {
131 		case 0x10:
132 			write_amu_grp1_ctx_reg(ctx, 0xf, read_amevcntr1f_el0());
133 			__fallthrough;
134 		case 0x0f:
135 			write_amu_grp1_ctx_reg(ctx, 0xe, read_amevcntr1e_el0());
136 			__fallthrough;
137 		case 0x0e:
138 			write_amu_grp1_ctx_reg(ctx, 0xd, read_amevcntr1d_el0());
139 			__fallthrough;
140 		case 0x0d:
141 			write_amu_grp1_ctx_reg(ctx, 0xc, read_amevcntr1c_el0());
142 			__fallthrough;
143 		case 0x0c:
144 			write_amu_grp1_ctx_reg(ctx, 0xb, read_amevcntr1b_el0());
145 			__fallthrough;
146 		case 0x0b:
147 			write_amu_grp1_ctx_reg(ctx, 0xa, read_amevcntr1a_el0());
148 			__fallthrough;
149 		case 0x0a:
150 			write_amu_grp1_ctx_reg(ctx, 0x9, read_amevcntr19_el0());
151 			__fallthrough;
152 		case 0x09:
153 			write_amu_grp1_ctx_reg(ctx, 0x8, read_amevcntr18_el0());
154 			__fallthrough;
155 		case 0x08:
156 			write_amu_grp1_ctx_reg(ctx, 0x7, read_amevcntr17_el0());
157 			__fallthrough;
158 		case 0x07:
159 			write_amu_grp1_ctx_reg(ctx, 0x6, read_amevcntr16_el0());
160 			__fallthrough;
161 		case 0x06:
162 			write_amu_grp1_ctx_reg(ctx, 0x5, read_amevcntr15_el0());
163 			__fallthrough;
164 		case 0x05:
165 			write_amu_grp1_ctx_reg(ctx, 0x4, read_amevcntr14_el0());
166 			__fallthrough;
167 		case 0x04:
168 			write_amu_grp1_ctx_reg(ctx, 0x3, read_amevcntr13_el0());
169 			__fallthrough;
170 		case 0x03:
171 			write_amu_grp1_ctx_reg(ctx, 0x2, read_amevcntr12_el0());
172 			__fallthrough;
173 		case 0x02:
174 			write_amu_grp1_ctx_reg(ctx, 0x1, read_amevcntr11_el0());
175 			__fallthrough;
176 		case 0x01:
177 			write_amu_grp1_ctx_reg(ctx, 0x0, read_amevcntr10_el0());
178 			__fallthrough;
179 		case 0x00:
180 			break;
181 		default:
182 			assert(0); /* something is wrong */
183 		}
184 	}
185 
186 	return (void *)0;
187 }
188 
189 static void *amu_context_restore(const void *arg)
190 {
191 	if (!is_feat_amu_supported()) {
192 		return (void *)0;
193 	}
194 
195 	unsigned int core_pos = *(unsigned int *)arg;
196 	amu_regs_t *ctx = PER_CPU_CUR(amu_ctx);
197 
198 	write_amevcntr00_el0(read_amu_grp0_ctx_reg(ctx, 0));
199 	write_amevcntr01_el0(read_amu_grp0_ctx_reg(ctx, 1));
200 	write_amevcntr02_el0(read_amu_grp0_ctx_reg(ctx, 2));
201 	write_amevcntr03_el0(read_amu_grp0_ctx_reg(ctx, 3));
202 
203 	if (is_feat_amu_aux_supported()) {
204 		uint8_t num_counters = read_amcgcr_el0_cg1nc();
205 
206 		switch (num_counters) {
207 		case 0x10:
208 			write_amevcntr1f_el0(read_amu_grp1_ctx_reg(ctx, 0xf));
209 			__fallthrough;
210 		case 0x0f:
211 			write_amevcntr1e_el0(read_amu_grp1_ctx_reg(ctx, 0xe));
212 			__fallthrough;
213 		case 0x0e:
214 			write_amevcntr1d_el0(read_amu_grp1_ctx_reg(ctx, 0xd));
215 			__fallthrough;
216 		case 0x0d:
217 			write_amevcntr1c_el0(read_amu_grp1_ctx_reg(ctx, 0xc));
218 			__fallthrough;
219 		case 0x0c:
220 			write_amevcntr1b_el0(read_amu_grp1_ctx_reg(ctx, 0xb));
221 			__fallthrough;
222 		case 0x0b:
223 			write_amevcntr1a_el0(read_amu_grp1_ctx_reg(ctx, 0xa));
224 			__fallthrough;
225 		case 0x0a:
226 			write_amevcntr19_el0(read_amu_grp1_ctx_reg(ctx, 0x9));
227 			__fallthrough;
228 		case 0x09:
229 			write_amevcntr18_el0(read_amu_grp1_ctx_reg(ctx, 0x8));
230 			__fallthrough;
231 		case 0x08:
232 			write_amevcntr17_el0(read_amu_grp1_ctx_reg(ctx, 0x7));
233 			__fallthrough;
234 		case 0x07:
235 			write_amevcntr16_el0(read_amu_grp1_ctx_reg(ctx, 0x6));
236 			__fallthrough;
237 		case 0x06:
238 			write_amevcntr15_el0(read_amu_grp1_ctx_reg(ctx, 0x5));
239 			__fallthrough;
240 		case 0x05:
241 			write_amevcntr14_el0(read_amu_grp1_ctx_reg(ctx, 0x4));
242 			__fallthrough;
243 		case 0x04:
244 			write_amevcntr13_el0(read_amu_grp1_ctx_reg(ctx, 0x3));
245 			__fallthrough;
246 		case 0x03:
247 			write_amevcntr12_el0(read_amu_grp1_ctx_reg(ctx, 0x2));
248 			__fallthrough;
249 		case 0x02:
250 			write_amevcntr11_el0(read_amu_grp1_ctx_reg(ctx, 0x1));
251 			__fallthrough;
252 		case 0x01:
253 			write_amevcntr10_el0(read_amu_grp1_ctx_reg(ctx, 0x0));
254 			__fallthrough;
255 		case 0x00:
256 			break;
257 		default:
258 			assert(0); /* something is wrong */
259 		}
260 	}
261 
262 
263 	/* now enable them again */
264 	write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_MASK);
265 	if (is_feat_amu_aux_supported()) {
266 		write_amcntenset1_el0(get_amu_aux_enables(core_pos));
267 	}
268 
269 	isb();
270 	return (void *)0;
271 }
272 
273 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
274 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
275