xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 #include <common/debug.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 
18 #include <plat/common/platform.h>
19 
20 amu_regs_t amu_ctx[PLATFORM_CORE_COUNT];
21 
22 static inline __unused uint32_t read_amcgcr_cg1nc(void)
23 {
24 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
25 		AMCGCR_CG1NC_MASK;
26 }
27 
28 /*
29  * Enable counters. This function is meant to be invoked by the context
30  * management library before exiting from EL3.
31  */
32 void amu_enable(bool el2_unused)
33 {
34 	if (el2_unused) {
35 		/*
36 		 * HCPTR.TAM: Set to zero so any accesses to the Activity
37 		 * Monitor registers do not trap to EL2.
38 		 */
39 		write_hcptr(read_hcptr() & ~TAM_BIT);
40 	}
41 
42 	/* Architecture is currently pinned to 4 */
43 	assert((read_amcgcr() & AMCGCR_CG0NC_MASK) == CTX_AMU_GRP0_ALL);
44 
45 	/* Enable all architected counters by default */
46 	write_amcntenset0(AMCNTENSET0_Pn_MASK);
47 	if (is_feat_amu_aux_supported()) {
48 		unsigned int core_pos = plat_my_core_pos();
49 
50 		/* Something went wrong if we're trying to write higher bits */
51 		assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_Pn_MASK) == 0);
52 		write_amcntenset1(get_amu_aux_enables(core_pos));
53 	}
54 
55 	/* Bail out if FEAT_AMUv1p1 features are not present. */
56 	if (!is_feat_amuv1p1_supported()) {
57 		return;
58 	}
59 
60 #if AMU_RESTRICT_COUNTERS
61 	/*
62 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
63 	 * counters at all but the highest implemented EL.  This is controlled
64 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
65 	 * register reads at lower ELs return zero.  Reads from the memory
66 	 * mapped view are unaffected.
67 	 */
68 	VERBOSE("AMU group 1 counter access restricted.\n");
69 	write_amcr(read_amcr() | 1U);
70 #else
71 	write_amcr(0);
72 #endif
73 }
74 
75 static void *amu_context_save(const void *arg)
76 {
77 	if (!is_feat_amu_supported()) {
78 		return (void *)0;
79 	}
80 
81 	unsigned int core_pos = *(unsigned int *)arg;
82 	amu_regs_t *ctx = &amu_ctx[core_pos];
83 
84 	/* Disable all counters so we can write to them safely later */
85 	write_amcntenclr0(AMCNTENCLR0_Pn_MASK);
86 	if (is_feat_amu_aux_supported()) {
87 		write_amcntenclr1(get_amu_aux_enables(core_pos));
88 	}
89 
90 	isb(); /* Ensure counters have been stopped */
91 
92 	write_amu_grp0_ctx_reg(ctx, 0, read64_amevcntr00());
93 	write_amu_grp0_ctx_reg(ctx, 1, read64_amevcntr01());
94 	write_amu_grp0_ctx_reg(ctx, 2, read64_amevcntr02());
95 	write_amu_grp0_ctx_reg(ctx, 3, read64_amevcntr03());
96 
97 	if (is_feat_amu_aux_supported()) {
98 		uint8_t num_counters = read_amcgcr_cg1nc();
99 
100 		switch (num_counters) {
101 		case 0x10:
102 			write_amu_grp1_ctx_reg(ctx, 0xf, read64_amevcntr1f());
103 			__fallthrough;
104 		case 0x0f:
105 			write_amu_grp1_ctx_reg(ctx, 0xe, read64_amevcntr1e());
106 			__fallthrough;
107 		case 0x0e:
108 			write_amu_grp1_ctx_reg(ctx, 0xd, read64_amevcntr1d());
109 			__fallthrough;
110 		case 0x0d:
111 			write_amu_grp1_ctx_reg(ctx, 0xc, read64_amevcntr1c());
112 			__fallthrough;
113 		case 0x0c:
114 			write_amu_grp1_ctx_reg(ctx, 0xb, read64_amevcntr1b());
115 			__fallthrough;
116 		case 0x0b:
117 			write_amu_grp1_ctx_reg(ctx, 0xa, read64_amevcntr1a());
118 			__fallthrough;
119 		case 0x0a:
120 			write_amu_grp1_ctx_reg(ctx, 0x9, read64_amevcntr19());
121 			__fallthrough;
122 		case 0x09:
123 			write_amu_grp1_ctx_reg(ctx, 0x8, read64_amevcntr18());
124 			__fallthrough;
125 		case 0x08:
126 			write_amu_grp1_ctx_reg(ctx, 0x7, read64_amevcntr17());
127 			__fallthrough;
128 		case 0x07:
129 			write_amu_grp1_ctx_reg(ctx, 0x6, read64_amevcntr16());
130 			__fallthrough;
131 		case 0x06:
132 			write_amu_grp1_ctx_reg(ctx, 0x5, read64_amevcntr15());
133 			__fallthrough;
134 		case 0x05:
135 			write_amu_grp1_ctx_reg(ctx, 0x4, read64_amevcntr14());
136 			__fallthrough;
137 		case 0x04:
138 			write_amu_grp1_ctx_reg(ctx, 0x3, read64_amevcntr13());
139 			__fallthrough;
140 		case 0x03:
141 			write_amu_grp1_ctx_reg(ctx, 0x2, read64_amevcntr12());
142 			__fallthrough;
143 		case 0x02:
144 			write_amu_grp1_ctx_reg(ctx, 0x1, read64_amevcntr11());
145 			__fallthrough;
146 		case 0x01:
147 			write_amu_grp1_ctx_reg(ctx, 0x0, read64_amevcntr10());
148 			__fallthrough;
149 		case 0x00:
150 			break;
151 		default:
152 			assert(0); /* something is wrong */
153 		}
154 	}
155 
156 	return (void *)0;
157 }
158 
159 static void *amu_context_restore(const void *arg)
160 {
161 	if (!is_feat_amu_supported()) {
162 		return (void *)0;
163 	}
164 
165 	unsigned int core_pos = *(unsigned int *)arg;
166 	amu_regs_t *ctx = &amu_ctx[core_pos];
167 
168 	write64_amevcntr00(read_amu_grp0_ctx_reg(ctx, 0));
169 	write64_amevcntr01(read_amu_grp0_ctx_reg(ctx, 1));
170 	write64_amevcntr02(read_amu_grp0_ctx_reg(ctx, 2));
171 	write64_amevcntr03(read_amu_grp0_ctx_reg(ctx, 3));
172 
173 	if (is_feat_amu_aux_supported()) {
174 		uint8_t num_counters = read_amcgcr_cg1nc();
175 
176 		switch (num_counters) {
177 		case 0x10:
178 			write64_amevcntr1f(read_amu_grp1_ctx_reg(ctx, 0xf));
179 			__fallthrough;
180 		case 0x0f:
181 			write64_amevcntr1e(read_amu_grp1_ctx_reg(ctx, 0xe));
182 			__fallthrough;
183 		case 0x0e:
184 			write64_amevcntr1d(read_amu_grp1_ctx_reg(ctx, 0xd));
185 			__fallthrough;
186 		case 0x0d:
187 			write64_amevcntr1c(read_amu_grp1_ctx_reg(ctx, 0xc));
188 			__fallthrough;
189 		case 0x0c:
190 			write64_amevcntr1b(read_amu_grp1_ctx_reg(ctx, 0xb));
191 			__fallthrough;
192 		case 0x0b:
193 			write64_amevcntr1a(read_amu_grp1_ctx_reg(ctx, 0xa));
194 			__fallthrough;
195 		case 0x0a:
196 			write64_amevcntr19(read_amu_grp1_ctx_reg(ctx, 0x9));
197 			__fallthrough;
198 		case 0x09:
199 			write64_amevcntr18(read_amu_grp1_ctx_reg(ctx, 0x8));
200 			__fallthrough;
201 		case 0x08:
202 			write64_amevcntr17(read_amu_grp1_ctx_reg(ctx, 0x7));
203 			__fallthrough;
204 		case 0x07:
205 			write64_amevcntr16(read_amu_grp1_ctx_reg(ctx, 0x6));
206 			__fallthrough;
207 		case 0x06:
208 			write64_amevcntr15(read_amu_grp1_ctx_reg(ctx, 0x5));
209 			__fallthrough;
210 		case 0x05:
211 			write64_amevcntr14(read_amu_grp1_ctx_reg(ctx, 0x4));
212 			__fallthrough;
213 		case 0x04:
214 			write64_amevcntr13(read_amu_grp1_ctx_reg(ctx, 0x3));
215 			__fallthrough;
216 		case 0x03:
217 			write64_amevcntr12(read_amu_grp1_ctx_reg(ctx, 0x2));
218 			__fallthrough;
219 		case 0x02:
220 			write64_amevcntr11(read_amu_grp1_ctx_reg(ctx, 0x1));
221 			__fallthrough;
222 		case 0x01:
223 			write64_amevcntr10(read_amu_grp1_ctx_reg(ctx, 0x0));
224 			__fallthrough;
225 		case 0x00:
226 			break;
227 		default:
228 			assert(0); /* something is wrong */
229 		}
230 	}
231 
232 
233 	/* now enable them again */
234 	write_amcntenset0(AMCNTENSET0_Pn_MASK);
235 	if (is_feat_amu_aux_supported()) {
236 		write_amcntenset1(get_amu_aux_enables(core_pos));
237 	}
238 
239 	isb();
240 	return (void *)0;
241 }
242 
243 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
244 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
245