xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision b3385aa08ec68d2c80f9b6c35f0cc4102fa14d36)
1 /*
2  * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stdbool.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 
13 #include <lib/el3_runtime/pubsub_events.h>
14 #include <lib/extensions/amu.h>
15 #include <lib/extensions/amu_private.h>
16 
17 #include <plat/common/platform.h>
18 
19 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
20 
21 /* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
22 bool amu_supported(void)
23 {
24 	uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
25 
26 	features &= ID_PFR0_AMU_MASK;
27 	return ((features == 1U) || (features == 2U));
28 }
29 
30 #if AMU_GROUP1_NR_COUNTERS
31 /* Check if group 1 counters is implemented */
32 bool amu_group1_supported(void)
33 {
34 	uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
35 
36 	return (features & AMCFGR_NCG_MASK) == 1U;
37 }
38 #endif
39 
40 /*
41  * Enable counters. This function is meant to be invoked
42  * by the context management library before exiting from EL3.
43  */
44 void amu_enable(bool el2_unused)
45 {
46 	if (!amu_supported()) {
47 		INFO("AMU is not implemented\n");
48 		return;
49 	}
50 
51 #if AMU_GROUP1_NR_COUNTERS
52 	/* Check and set presence of group 1 counters */
53 	if (!amu_group1_supported()) {
54 		ERROR("AMU Counter Group 1 is not implemented\n");
55 		panic();
56 	}
57 
58 	/* Check number of group 1 counters */
59 	uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
60 				AMCGCR_CG1NC_MASK;
61 	VERBOSE("%s%u. %s%u\n",
62 		"Number of AMU Group 1 Counters ", cnt_num,
63 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
64 
65 	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
66 		ERROR("%s%u is less than %s%u\n",
67 		"Number of AMU Group 1 Counters ", cnt_num,
68 		"Requested number ", AMU_GROUP1_NR_COUNTERS);
69 		panic();
70 	}
71 #endif
72 
73 	if (el2_unused) {
74 		uint64_t v;
75 		/*
76 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
77 		 * registers do not trap to EL2.
78 		 */
79 		v = read_hcptr();
80 		v &= ~TAM_BIT;
81 		write_hcptr(v);
82 	}
83 
84 	/* Enable group 0 counters */
85 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
86 
87 #if AMU_GROUP1_NR_COUNTERS
88 	/* Enable group 1 counters */
89 	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
90 #endif
91 }
92 
93 /* Read the group 0 counter identified by the given `idx`. */
94 uint64_t amu_group0_cnt_read(unsigned int idx)
95 {
96 	assert(amu_supported());
97 	assert(idx < AMU_GROUP0_NR_COUNTERS);
98 
99 	return amu_group0_cnt_read_internal(idx);
100 }
101 
102 /* Write the group 0 counter identified by the given `idx` with `val` */
103 void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
104 {
105 	assert(amu_supported());
106 	assert(idx < AMU_GROUP0_NR_COUNTERS);
107 
108 	amu_group0_cnt_write_internal(idx, val);
109 	isb();
110 }
111 
112 #if AMU_GROUP1_NR_COUNTERS
113 /* Read the group 1 counter identified by the given `idx` */
114 uint64_t amu_group1_cnt_read(unsigned  int idx)
115 {
116 	assert(amu_supported());
117 	assert(amu_group1_supported());
118 	assert(idx < AMU_GROUP1_NR_COUNTERS);
119 
120 	return amu_group1_cnt_read_internal(idx);
121 }
122 
123 /* Write the group 1 counter identified by the given `idx` with `val` */
124 void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
125 {
126 	assert(amu_supported());
127 	assert(amu_group1_supported());
128 	assert(idx < AMU_GROUP1_NR_COUNTERS);
129 
130 	amu_group1_cnt_write_internal(idx, val);
131 	isb();
132 }
133 
134 /*
135  * Program the event type register for the given `idx` with
136  * the event number `val`
137  */
138 void amu_group1_set_evtype(unsigned int idx, unsigned int val)
139 {
140 	assert(amu_supported());
141 	assert(amu_group1_supported());
142 	assert(idx < AMU_GROUP1_NR_COUNTERS);
143 
144 	amu_group1_set_evtype_internal(idx, val);
145 	isb();
146 }
147 #endif	/* AMU_GROUP1_NR_COUNTERS */
148 
149 static void *amu_context_save(const void *arg)
150 {
151 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
152 	unsigned int i;
153 
154 	if (!amu_supported()) {
155 		return (void *)-1;
156 	}
157 
158 #if AMU_GROUP1_NR_COUNTERS
159 	if (!amu_group1_supported()) {
160 		return (void *)-1;
161 	}
162 #endif
163 	/* Assert that group 0/1 counter configuration is what we expect */
164 	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
165 
166 #if AMU_GROUP1_NR_COUNTERS
167 	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
168 #endif
169 	/*
170 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
171 	 * counter values from the future via the memory mapped view.
172 	 */
173 	write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
174 
175 #if AMU_GROUP1_NR_COUNTERS
176 	write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
177 #endif
178 	isb();
179 
180 	/* Save all group 0 counters */
181 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
182 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
183 	}
184 
185 #if AMU_GROUP1_NR_COUNTERS
186 	/* Save group 1 counters */
187 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
188 		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
189 			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
190 		}
191 	}
192 #endif
193 	return (void *)0;
194 }
195 
196 static void *amu_context_restore(const void *arg)
197 {
198 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
199 	unsigned int i;
200 
201 	if (!amu_supported()) {
202 		return (void *)-1;
203 	}
204 
205 #if AMU_GROUP1_NR_COUNTERS
206 	if (!amu_group1_supported()) {
207 		return (void *)-1;
208 	}
209 #endif
210 	/* Counters were disabled in `amu_context_save()` */
211 	assert(read_amcntenset0_el0() == 0U);
212 
213 #if AMU_GROUP1_NR_COUNTERS
214 	assert(read_amcntenset1_el0() == 0U);
215 #endif
216 
217 	/* Restore all group 0 counters */
218 	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
219 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
220 	}
221 
222 	/* Restore group 0 counter configuration */
223 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
224 
225 #if AMU_GROUP1_NR_COUNTERS
226 	/* Restore group 1 counters */
227 	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
228 		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
229 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
230 		}
231 	}
232 
233 	/* Restore group 1 counter configuration */
234 	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
235 #endif
236 
237 	return (void *)0;
238 }
239 
240 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
241 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
242