xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision 761d0c72c32bfd45d2a2bedbf92e5a0e629729d6)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 #include <common/debug.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 #include <lib/per_cpu/per_cpu.h>
18 
19 #include <plat/common/platform.h>
20 
21 PER_CPU_DEFINE(amu_regs_t, amu_ctx);
22 
23 static inline __unused uint32_t read_amcgcr_cg1nc(void)
24 {
25 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
26 		AMCGCR_CG1NC_MASK;
27 }
28 
29 /*
30  * Enable counters. This function is meant to be invoked by the context
31  * management library before exiting from EL3.
32  */
33 void amu_enable(bool el2_unused)
34 {
35 	if (el2_unused) {
36 		/*
37 		 * HCPTR.TAM: Set to zero so any accesses to the Activity
38 		 * Monitor registers do not trap to EL2.
39 		 */
40 		write_hcptr(read_hcptr() & ~TAM_BIT);
41 	}
42 
43 	/* Architecture is currently pinned to 4 */
44 	assert((read_amcgcr() & AMCGCR_CG0NC_MASK) == CTX_AMU_GRP0_ALL);
45 
46 	/* Enable all architected counters by default */
47 	write_amcntenset0(AMCNTENSET0_Pn_MASK);
48 
49 	/* Bail out if FEAT_AMUv1p1 features are not present. */
50 	if (!is_feat_amuv1p1_supported()) {
51 		return;
52 	}
53 
54 	if (is_feat_amu_aux_supported()) {
55 		unsigned int core_pos = plat_my_core_pos();
56 
57 		/* Something went wrong if we're trying to write higher bits */
58 		assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_Pn_MASK) == 0);
59 		write_amcntenset1(get_amu_aux_enables(core_pos));
60 	}
61 
62 #if AMU_RESTRICT_COUNTERS
63 	/*
64 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
65 	 * counters at all but the highest implemented EL.  This is controlled
66 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
67 	 * register reads at lower ELs return zero.  Reads from the memory
68 	 * mapped view are unaffected.
69 	 */
70 	VERBOSE("AMU group 1 counter access restricted.\n");
71 	write_amcr(read_amcr() | 1U);
72 #else
73 	write_amcr(0);
74 #endif
75 }
76 
77 static void amu_disable_counters(unsigned int core_pos)
78 {
79 	/* Disable all counters so we can write to them safely later */
80 	write_amcntenclr0(AMCNTENCLR0_Pn_MASK);
81 	if (is_feat_amu_aux_supported()) {
82 		write_amcntenclr1(get_amu_aux_enables(core_pos));
83 	}
84 
85 	isb(); /* Ensure counters have been stopped */
86 }
87 
88 static void *amu_context_save(const void *arg)
89 {
90 	if (!is_feat_amu_supported()) {
91 		return (void *)0;
92 	}
93 
94 	unsigned int core_pos = *(unsigned int *)arg;
95 	amu_regs_t *ctx = PER_CPU_CUR(amu_ctx);
96 
97 	/* disable counters so the save is a static snapshot for all counters */
98 	amu_disable_counters(core_pos);
99 
100 	write_amu_grp0_ctx_reg(ctx, 0, read64_amevcntr00());
101 	write_amu_grp0_ctx_reg(ctx, 1, read64_amevcntr01());
102 	write_amu_grp0_ctx_reg(ctx, 2, read64_amevcntr02());
103 	write_amu_grp0_ctx_reg(ctx, 3, read64_amevcntr03());
104 
105 	if (is_feat_amu_aux_supported()) {
106 		uint8_t num_counters = read_amcgcr_cg1nc();
107 
108 		switch (num_counters) {
109 		case 0x10:
110 			write_amu_grp1_ctx_reg(ctx, 0xf, read64_amevcntr1f());
111 			__fallthrough;
112 		case 0x0f:
113 			write_amu_grp1_ctx_reg(ctx, 0xe, read64_amevcntr1e());
114 			__fallthrough;
115 		case 0x0e:
116 			write_amu_grp1_ctx_reg(ctx, 0xd, read64_amevcntr1d());
117 			__fallthrough;
118 		case 0x0d:
119 			write_amu_grp1_ctx_reg(ctx, 0xc, read64_amevcntr1c());
120 			__fallthrough;
121 		case 0x0c:
122 			write_amu_grp1_ctx_reg(ctx, 0xb, read64_amevcntr1b());
123 			__fallthrough;
124 		case 0x0b:
125 			write_amu_grp1_ctx_reg(ctx, 0xa, read64_amevcntr1a());
126 			__fallthrough;
127 		case 0x0a:
128 			write_amu_grp1_ctx_reg(ctx, 0x9, read64_amevcntr19());
129 			__fallthrough;
130 		case 0x09:
131 			write_amu_grp1_ctx_reg(ctx, 0x8, read64_amevcntr18());
132 			__fallthrough;
133 		case 0x08:
134 			write_amu_grp1_ctx_reg(ctx, 0x7, read64_amevcntr17());
135 			__fallthrough;
136 		case 0x07:
137 			write_amu_grp1_ctx_reg(ctx, 0x6, read64_amevcntr16());
138 			__fallthrough;
139 		case 0x06:
140 			write_amu_grp1_ctx_reg(ctx, 0x5, read64_amevcntr15());
141 			__fallthrough;
142 		case 0x05:
143 			write_amu_grp1_ctx_reg(ctx, 0x4, read64_amevcntr14());
144 			__fallthrough;
145 		case 0x04:
146 			write_amu_grp1_ctx_reg(ctx, 0x3, read64_amevcntr13());
147 			__fallthrough;
148 		case 0x03:
149 			write_amu_grp1_ctx_reg(ctx, 0x2, read64_amevcntr12());
150 			__fallthrough;
151 		case 0x02:
152 			write_amu_grp1_ctx_reg(ctx, 0x1, read64_amevcntr11());
153 			__fallthrough;
154 		case 0x01:
155 			write_amu_grp1_ctx_reg(ctx, 0x0, read64_amevcntr10());
156 			__fallthrough;
157 		case 0x00:
158 			break;
159 		default:
160 			assert(0); /* something is wrong */
161 		}
162 	}
163 
164 	return (void *)0;
165 }
166 
167 static void *amu_context_restore(const void *arg)
168 {
169 	if (!is_feat_amu_supported()) {
170 		return (void *)0;
171 	}
172 
173 	unsigned int core_pos = *(unsigned int *)arg;
174 	amu_regs_t *ctx = PER_CPU_CUR(amu_ctx);
175 
176 	/*
177 	 * Counters must be disabled to write them safely. All counters start
178 	 * disabled on an AMU reset but AMU reset doesn't have to happen with PE
179 	 * reset. So don't bother disabling them if they already are.
180 	 */
181 	if (read_amcntenclr0() != 0) {
182 		amu_disable_counters(core_pos);
183 	}
184 
185 	write64_amevcntr00(read_amu_grp0_ctx_reg(ctx, 0));
186 	write64_amevcntr01(read_amu_grp0_ctx_reg(ctx, 1));
187 	write64_amevcntr02(read_amu_grp0_ctx_reg(ctx, 2));
188 	write64_amevcntr03(read_amu_grp0_ctx_reg(ctx, 3));
189 
190 	if (is_feat_amu_aux_supported()) {
191 		uint8_t num_counters = read_amcgcr_cg1nc();
192 
193 		switch (num_counters) {
194 		case 0x10:
195 			write64_amevcntr1f(read_amu_grp1_ctx_reg(ctx, 0xf));
196 			__fallthrough;
197 		case 0x0f:
198 			write64_amevcntr1e(read_amu_grp1_ctx_reg(ctx, 0xe));
199 			__fallthrough;
200 		case 0x0e:
201 			write64_amevcntr1d(read_amu_grp1_ctx_reg(ctx, 0xd));
202 			__fallthrough;
203 		case 0x0d:
204 			write64_amevcntr1c(read_amu_grp1_ctx_reg(ctx, 0xc));
205 			__fallthrough;
206 		case 0x0c:
207 			write64_amevcntr1b(read_amu_grp1_ctx_reg(ctx, 0xb));
208 			__fallthrough;
209 		case 0x0b:
210 			write64_amevcntr1a(read_amu_grp1_ctx_reg(ctx, 0xa));
211 			__fallthrough;
212 		case 0x0a:
213 			write64_amevcntr19(read_amu_grp1_ctx_reg(ctx, 0x9));
214 			__fallthrough;
215 		case 0x09:
216 			write64_amevcntr18(read_amu_grp1_ctx_reg(ctx, 0x8));
217 			__fallthrough;
218 		case 0x08:
219 			write64_amevcntr17(read_amu_grp1_ctx_reg(ctx, 0x7));
220 			__fallthrough;
221 		case 0x07:
222 			write64_amevcntr16(read_amu_grp1_ctx_reg(ctx, 0x6));
223 			__fallthrough;
224 		case 0x06:
225 			write64_amevcntr15(read_amu_grp1_ctx_reg(ctx, 0x5));
226 			__fallthrough;
227 		case 0x05:
228 			write64_amevcntr14(read_amu_grp1_ctx_reg(ctx, 0x4));
229 			__fallthrough;
230 		case 0x04:
231 			write64_amevcntr13(read_amu_grp1_ctx_reg(ctx, 0x3));
232 			__fallthrough;
233 		case 0x03:
234 			write64_amevcntr12(read_amu_grp1_ctx_reg(ctx, 0x2));
235 			__fallthrough;
236 		case 0x02:
237 			write64_amevcntr11(read_amu_grp1_ctx_reg(ctx, 0x1));
238 			__fallthrough;
239 		case 0x01:
240 			write64_amevcntr10(read_amu_grp1_ctx_reg(ctx, 0x0));
241 			__fallthrough;
242 		case 0x00:
243 			break;
244 		default:
245 			assert(0); /* something is wrong */
246 		}
247 	}
248 
249 
250 	/* now enable them again */
251 	write_amcntenset0(AMCNTENSET0_Pn_MASK);
252 	if (is_feat_amu_aux_supported()) {
253 		write_amcntenset1(get_amu_aux_enables(core_pos));
254 	}
255 
256 	isb();
257 	return (void *)0;
258 }
259 
260 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
261 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
262