xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision bc3dac6c24f1941d21bbc6169fdf2dac6ad72f30)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include <arch.h>
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 #include <common/debug.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 
18 #include <plat/common/platform.h>
19 
20 amu_regs_t amu_ctx[PLATFORM_CORE_COUNT];
21 
read_amcgcr_cg1nc(void)22 static inline __unused uint32_t read_amcgcr_cg1nc(void)
23 {
24 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
25 		AMCGCR_CG1NC_MASK;
26 }
27 
28 /*
29  * Enable counters. This function is meant to be invoked by the context
30  * management library before exiting from EL3.
31  */
amu_enable(bool el2_unused)32 void amu_enable(bool el2_unused)
33 {
34 	if (el2_unused) {
35 		/*
36 		 * HCPTR.TAM: Set to zero so any accesses to the Activity
37 		 * Monitor registers do not trap to EL2.
38 		 */
39 		write_hcptr(read_hcptr() & ~TAM_BIT);
40 	}
41 
42 	/* Architecture is currently pinned to 4 */
43 	assert((read_amcgcr() & AMCGCR_CG0NC_MASK) == CTX_AMU_GRP0_ALL);
44 
45 	/* Enable all architected counters by default */
46 	write_amcntenset0(AMCNTENSET0_Pn_MASK);
47 
48 	/* Bail out if FEAT_AMUv1p1 features are not present. */
49 	if (!is_feat_amuv1p1_supported()) {
50 		return;
51 	}
52 
53 	if (is_feat_amu_aux_supported()) {
54 		unsigned int core_pos = plat_my_core_pos();
55 
56 		/* Something went wrong if we're trying to write higher bits */
57 		assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_Pn_MASK) == 0);
58 		write_amcntenset1(get_amu_aux_enables(core_pos));
59 	}
60 
61 #if AMU_RESTRICT_COUNTERS
62 	/*
63 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
64 	 * counters at all but the highest implemented EL.  This is controlled
65 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
66 	 * register reads at lower ELs return zero.  Reads from the memory
67 	 * mapped view are unaffected.
68 	 */
69 	VERBOSE("AMU group 1 counter access restricted.\n");
70 	write_amcr(read_amcr() | 1U);
71 #else
72 	write_amcr(0);
73 #endif
74 }
75 
amu_context_save(const void * arg)76 static void *amu_context_save(const void *arg)
77 {
78 	if (!is_feat_amu_supported()) {
79 		return (void *)0;
80 	}
81 
82 	unsigned int core_pos = *(unsigned int *)arg;
83 	amu_regs_t *ctx = &amu_ctx[core_pos];
84 
85 	/* Disable all counters so we can write to them safely later */
86 	write_amcntenclr0(AMCNTENCLR0_Pn_MASK);
87 	if (is_feat_amu_aux_supported()) {
88 		write_amcntenclr1(get_amu_aux_enables(core_pos));
89 	}
90 
91 	isb(); /* Ensure counters have been stopped */
92 
93 	write_amu_grp0_ctx_reg(ctx, 0, read64_amevcntr00());
94 	write_amu_grp0_ctx_reg(ctx, 1, read64_amevcntr01());
95 	write_amu_grp0_ctx_reg(ctx, 2, read64_amevcntr02());
96 	write_amu_grp0_ctx_reg(ctx, 3, read64_amevcntr03());
97 
98 	if (is_feat_amu_aux_supported()) {
99 		uint8_t num_counters = read_amcgcr_cg1nc();
100 
101 		switch (num_counters) {
102 		case 0x10:
103 			write_amu_grp1_ctx_reg(ctx, 0xf, read64_amevcntr1f());
104 			__fallthrough;
105 		case 0x0f:
106 			write_amu_grp1_ctx_reg(ctx, 0xe, read64_amevcntr1e());
107 			__fallthrough;
108 		case 0x0e:
109 			write_amu_grp1_ctx_reg(ctx, 0xd, read64_amevcntr1d());
110 			__fallthrough;
111 		case 0x0d:
112 			write_amu_grp1_ctx_reg(ctx, 0xc, read64_amevcntr1c());
113 			__fallthrough;
114 		case 0x0c:
115 			write_amu_grp1_ctx_reg(ctx, 0xb, read64_amevcntr1b());
116 			__fallthrough;
117 		case 0x0b:
118 			write_amu_grp1_ctx_reg(ctx, 0xa, read64_amevcntr1a());
119 			__fallthrough;
120 		case 0x0a:
121 			write_amu_grp1_ctx_reg(ctx, 0x9, read64_amevcntr19());
122 			__fallthrough;
123 		case 0x09:
124 			write_amu_grp1_ctx_reg(ctx, 0x8, read64_amevcntr18());
125 			__fallthrough;
126 		case 0x08:
127 			write_amu_grp1_ctx_reg(ctx, 0x7, read64_amevcntr17());
128 			__fallthrough;
129 		case 0x07:
130 			write_amu_grp1_ctx_reg(ctx, 0x6, read64_amevcntr16());
131 			__fallthrough;
132 		case 0x06:
133 			write_amu_grp1_ctx_reg(ctx, 0x5, read64_amevcntr15());
134 			__fallthrough;
135 		case 0x05:
136 			write_amu_grp1_ctx_reg(ctx, 0x4, read64_amevcntr14());
137 			__fallthrough;
138 		case 0x04:
139 			write_amu_grp1_ctx_reg(ctx, 0x3, read64_amevcntr13());
140 			__fallthrough;
141 		case 0x03:
142 			write_amu_grp1_ctx_reg(ctx, 0x2, read64_amevcntr12());
143 			__fallthrough;
144 		case 0x02:
145 			write_amu_grp1_ctx_reg(ctx, 0x1, read64_amevcntr11());
146 			__fallthrough;
147 		case 0x01:
148 			write_amu_grp1_ctx_reg(ctx, 0x0, read64_amevcntr10());
149 			__fallthrough;
150 		case 0x00:
151 			break;
152 		default:
153 			assert(0); /* something is wrong */
154 		}
155 	}
156 
157 	return (void *)0;
158 }
159 
amu_context_restore(const void * arg)160 static void *amu_context_restore(const void *arg)
161 {
162 	if (!is_feat_amu_supported()) {
163 		return (void *)0;
164 	}
165 
166 	unsigned int core_pos = *(unsigned int *)arg;
167 	amu_regs_t *ctx = &amu_ctx[core_pos];
168 
169 	write64_amevcntr00(read_amu_grp0_ctx_reg(ctx, 0));
170 	write64_amevcntr01(read_amu_grp0_ctx_reg(ctx, 1));
171 	write64_amevcntr02(read_amu_grp0_ctx_reg(ctx, 2));
172 	write64_amevcntr03(read_amu_grp0_ctx_reg(ctx, 3));
173 
174 	if (is_feat_amu_aux_supported()) {
175 		uint8_t num_counters = read_amcgcr_cg1nc();
176 
177 		switch (num_counters) {
178 		case 0x10:
179 			write64_amevcntr1f(read_amu_grp1_ctx_reg(ctx, 0xf));
180 			__fallthrough;
181 		case 0x0f:
182 			write64_amevcntr1e(read_amu_grp1_ctx_reg(ctx, 0xe));
183 			__fallthrough;
184 		case 0x0e:
185 			write64_amevcntr1d(read_amu_grp1_ctx_reg(ctx, 0xd));
186 			__fallthrough;
187 		case 0x0d:
188 			write64_amevcntr1c(read_amu_grp1_ctx_reg(ctx, 0xc));
189 			__fallthrough;
190 		case 0x0c:
191 			write64_amevcntr1b(read_amu_grp1_ctx_reg(ctx, 0xb));
192 			__fallthrough;
193 		case 0x0b:
194 			write64_amevcntr1a(read_amu_grp1_ctx_reg(ctx, 0xa));
195 			__fallthrough;
196 		case 0x0a:
197 			write64_amevcntr19(read_amu_grp1_ctx_reg(ctx, 0x9));
198 			__fallthrough;
199 		case 0x09:
200 			write64_amevcntr18(read_amu_grp1_ctx_reg(ctx, 0x8));
201 			__fallthrough;
202 		case 0x08:
203 			write64_amevcntr17(read_amu_grp1_ctx_reg(ctx, 0x7));
204 			__fallthrough;
205 		case 0x07:
206 			write64_amevcntr16(read_amu_grp1_ctx_reg(ctx, 0x6));
207 			__fallthrough;
208 		case 0x06:
209 			write64_amevcntr15(read_amu_grp1_ctx_reg(ctx, 0x5));
210 			__fallthrough;
211 		case 0x05:
212 			write64_amevcntr14(read_amu_grp1_ctx_reg(ctx, 0x4));
213 			__fallthrough;
214 		case 0x04:
215 			write64_amevcntr13(read_amu_grp1_ctx_reg(ctx, 0x3));
216 			__fallthrough;
217 		case 0x03:
218 			write64_amevcntr12(read_amu_grp1_ctx_reg(ctx, 0x2));
219 			__fallthrough;
220 		case 0x02:
221 			write64_amevcntr11(read_amu_grp1_ctx_reg(ctx, 0x1));
222 			__fallthrough;
223 		case 0x01:
224 			write64_amevcntr10(read_amu_grp1_ctx_reg(ctx, 0x0));
225 			__fallthrough;
226 		case 0x00:
227 			break;
228 		default:
229 			assert(0); /* something is wrong */
230 		}
231 	}
232 
233 
234 	/* now enable them again */
235 	write_amcntenset0(AMCNTENSET0_Pn_MASK);
236 	if (is_feat_amu_aux_supported()) {
237 		write_amcntenset1(get_amu_aux_enables(core_pos));
238 	}
239 
240 	isb();
241 	return (void *)0;
242 }
243 
244 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
245 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
246