xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision b6eb39327c5b009bc0bbecdb6867d8bf7c05f2fd)
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <amu.h>
8 #include <amu_private.h>
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <assert.h>
12 #include <debug.h>
13 #include <platform.h>
14 #include <pubsub_events.h>
15 
16 #define AMU_GROUP0_NR_COUNTERS	4
17 
18 struct amu_ctx {
19 	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
20 	uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
21 };
22 
23 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
24 
25 int amu_supported(void)
26 {
27 	uint64_t features;
28 
29 	features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
30 	return (features & ID_AA64PFR0_AMU_MASK) == 1;
31 }
32 
33 /*
34  * Enable counters.  This function is meant to be invoked
35  * by the context management library before exiting from EL3.
36  */
37 void amu_enable(int el2_unused)
38 {
39 	uint64_t v;
40 
41 	if (!amu_supported()) {
42 		WARN("Cannot enable AMU - not supported\n");
43 		return;
44 	}
45 
46 	if (el2_unused) {
47 		/*
48 		 * CPTR_EL2.TAM: Set to zero so any accesses to
49 		 * the Activity Monitor registers do not trap to EL2.
50 		 */
51 		v = read_cptr_el2();
52 		v &= ~CPTR_EL2_TAM_BIT;
53 		write_cptr_el2(v);
54 	}
55 
56 	/*
57 	 * CPTR_EL3.TAM: Set to zero so that any accesses to
58 	 * the Activity Monitor registers do not trap to EL3.
59 	 */
60 	v = read_cptr_el3();
61 	v &= ~TAM_BIT;
62 	write_cptr_el3(v);
63 
64 	/* Enable group 0 counters */
65 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
66 	/* Enable group 1 counters */
67 	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
68 }
69 
70 /* Read the group 0 counter identified by the given `idx`. */
71 uint64_t amu_group0_cnt_read(int idx)
72 {
73 	assert(amu_supported());
74 	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
75 
76 	return amu_group0_cnt_read_internal(idx);
77 }
78 
79 /* Write the group 0 counter identified by the given `idx` with `val`. */
80 void amu_group0_cnt_write(int idx, uint64_t val)
81 {
82 	assert(amu_supported());
83 	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
84 
85 	amu_group0_cnt_write_internal(idx, val);
86 	isb();
87 }
88 
89 /* Read the group 1 counter identified by the given `idx`. */
90 uint64_t amu_group1_cnt_read(int idx)
91 {
92 	assert(amu_supported());
93 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
94 
95 	return amu_group1_cnt_read_internal(idx);
96 }
97 
98 /* Write the group 1 counter identified by the given `idx` with `val`. */
99 void amu_group1_cnt_write(int idx, uint64_t val)
100 {
101 	assert(amu_supported());
102 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
103 
104 	amu_group1_cnt_write_internal(idx, val);
105 	isb();
106 }
107 
108 /*
109  * Program the event type register for the given `idx` with
110  * the event number `val`.
111  */
112 void amu_group1_set_evtype(int idx, unsigned int val)
113 {
114 	assert(amu_supported());
115 	assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
116 
117 	amu_group1_set_evtype_internal(idx, val);
118 	isb();
119 }
120 
121 static void *amu_context_save(const void *arg)
122 {
123 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
124 	int i;
125 
126 	if (!amu_supported())
127 		return (void *)-1;
128 
129 	/* Assert that group 0/1 counter configuration is what we expect */
130 	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK &&
131 	       read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
132 
133 	assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
134 		<= AMU_GROUP1_NR_COUNTERS);
135 
136 	/*
137 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
138 	 * counter values from the future via the memory mapped view.
139 	 */
140 	write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
141 	write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
142 	isb();
143 
144 	/* Save group 0 counters */
145 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
146 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
147 
148 	/* Save group 1 counters */
149 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
150 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
151 
152 	return 0;
153 }
154 
155 static void *amu_context_restore(const void *arg)
156 {
157 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
158 	int i;
159 
160 	if (!amu_supported())
161 		return (void *)-1;
162 
163 	/* Counters were disabled in `amu_context_save()` */
164 	assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0);
165 
166 	assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
167 		<= AMU_GROUP1_NR_COUNTERS);
168 
169 	/* Restore group 0 counters */
170 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
171 		if (AMU_GROUP0_COUNTERS_MASK & (1U << i))
172 			amu_group0_cnt_write(i, ctx->group0_cnts[i]);
173 
174 	/* Restore group 1 counters */
175 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
176 		if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
177 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
178 	isb();
179 
180 	/* Restore group 0/1 counter configuration */
181 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
182 	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
183 
184 	return 0;
185 }
186 
187 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
188 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
189