xref: /rk3399_ARM-atf/lib/extensions/amu/aarch64/amu.c (revision 61f72a34250d063da67f4fc2b0eb8c3fda3376be)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <amu.h>
8 #include <amu_private.h>
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <assert.h>
12 #include <platform.h>
13 #include <pubsub_events.h>
14 
15 #define AMU_GROUP0_NR_COUNTERS	4
16 
17 struct amu_ctx {
18 	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
19 	uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
20 };
21 
22 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
23 
24 int amu_supported(void)
25 {
26 	uint64_t features;
27 
28 	features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
29 	return (features & ID_AA64PFR0_AMU_MASK) == 1;
30 }
31 
32 /*
33  * Enable counters.  This function is meant to be invoked
34  * by the context management library before exiting from EL3.
35  */
36 void amu_enable(int el2_unused)
37 {
38 	uint64_t v;
39 
40 	if (amu_supported() == 0)
41 		return;
42 
43 	if (el2_unused) {
44 		/*
45 		 * CPTR_EL2.TAM: Set to zero so any accesses to
46 		 * the Activity Monitor registers do not trap to EL2.
47 		 */
48 		v = read_cptr_el2();
49 		v &= ~CPTR_EL2_TAM_BIT;
50 		write_cptr_el2(v);
51 	}
52 
53 	/*
54 	 * CPTR_EL3.TAM: Set to zero so that any accesses to
55 	 * the Activity Monitor registers do not trap to EL3.
56 	 */
57 	v = read_cptr_el3();
58 	v &= ~TAM_BIT;
59 	write_cptr_el3(v);
60 
61 	/* Enable group 0 counters */
62 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
63 	/* Enable group 1 counters */
64 	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
65 }
66 
67 /* Read the group 0 counter identified by the given `idx`. */
68 uint64_t amu_group0_cnt_read(int idx)
69 {
70 	assert(amu_supported() != 0);
71 	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
72 
73 	return amu_group0_cnt_read_internal(idx);
74 }
75 
76 /* Write the group 0 counter identified by the given `idx` with `val`. */
77 void amu_group0_cnt_write(int idx, uint64_t val)
78 {
79 	assert(amu_supported() != 0);
80 	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
81 
82 	amu_group0_cnt_write_internal(idx, val);
83 	isb();
84 }
85 
86 /* Read the group 1 counter identified by the given `idx`. */
87 uint64_t amu_group1_cnt_read(int idx)
88 {
89 	assert(amu_supported() != 0);
90 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
91 
92 	return amu_group1_cnt_read_internal(idx);
93 }
94 
95 /* Write the group 1 counter identified by the given `idx` with `val`. */
96 void amu_group1_cnt_write(int idx, uint64_t val)
97 {
98 	assert(amu_supported() != 0);
99 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
100 
101 	amu_group1_cnt_write_internal(idx, val);
102 	isb();
103 }
104 
105 /*
106  * Program the event type register for the given `idx` with
107  * the event number `val`.
108  */
109 void amu_group1_set_evtype(int idx, unsigned int val)
110 {
111 	assert(amu_supported() != 0);
112 	assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
113 
114 	amu_group1_set_evtype_internal(idx, val);
115 	isb();
116 }
117 
118 static void *amu_context_save(const void *arg)
119 {
120 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
121 	int i;
122 
123 	if (amu_supported() == 0)
124 		return (void *)-1;
125 
126 	/* Assert that group 0/1 counter configuration is what we expect */
127 	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK &&
128 	       read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
129 
130 	assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
131 		<= AMU_GROUP1_NR_COUNTERS);
132 
133 	/*
134 	 * Disable group 0/1 counters to avoid other observers like SCP sampling
135 	 * counter values from the future via the memory mapped view.
136 	 */
137 	write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
138 	write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
139 	isb();
140 
141 	/* Save group 0 counters */
142 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
143 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
144 
145 	/* Save group 1 counters */
146 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
147 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
148 
149 	return 0;
150 }
151 
152 static void *amu_context_restore(const void *arg)
153 {
154 	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
155 	int i;
156 
157 	if (amu_supported() == 0)
158 		return (void *)-1;
159 
160 	/* Counters were disabled in `amu_context_save()` */
161 	assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0);
162 
163 	assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
164 		<= AMU_GROUP1_NR_COUNTERS);
165 
166 	/* Restore group 0 counters */
167 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
168 		if (AMU_GROUP0_COUNTERS_MASK & (1U << i))
169 			amu_group0_cnt_write(i, ctx->group0_cnts[i]);
170 
171 	/* Restore group 1 counters */
172 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
173 		if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
174 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
175 
176 	/* Restore group 0/1 counter configuration */
177 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
178 	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
179 
180 	return 0;
181 }
182 
183 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
184 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
185