xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision 2eedba9a5576ad8d76ae896b829e617e962d0e96)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <amu.h>
8 #include <amu_private.h>
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <platform.h>
12 #include <pubsub_events.h>
13 #include <stdbool.h>
14 
15 #define AMU_GROUP0_NR_COUNTERS	4
16 
17 struct amu_ctx {
18 	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
19 	uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
20 };
21 
22 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
23 
24 bool amu_supported(void)
25 {
26 	uint64_t features;
27 
28 	features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
29 	return (features & ID_PFR0_AMU_MASK) == 1U;
30 }
31 
32 void amu_enable(bool el2_unused)
33 {
34 	if (!amu_supported())
35 		return;
36 
37 	if (el2_unused) {
38 		uint64_t v;
39 		/*
40 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
41 		 * registers do not trap to EL2.
42 		 */
43 		v = read_hcptr();
44 		v &= ~TAM_BIT;
45 		write_hcptr(v);
46 	}
47 
48 	/* Enable group 0 counters */
49 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
50 
51 	/* Enable group 1 counters */
52 	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
53 }
54 
55 /* Read the group 0 counter identified by the given `idx`. */
56 uint64_t amu_group0_cnt_read(int idx)
57 {
58 	assert(amu_supported());
59 	assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
60 
61 	return amu_group0_cnt_read_internal(idx);
62 }
63 
64 /* Write the group 0 counter identified by the given `idx` with `val`. */
65 void amu_group0_cnt_write(int idx, uint64_t val)
66 {
67 	assert(amu_supported());
68 	assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
69 
70 	amu_group0_cnt_write_internal(idx, val);
71 	isb();
72 }
73 
74 /* Read the group 1 counter identified by the given `idx`. */
75 uint64_t amu_group1_cnt_read(int idx)
76 {
77 	assert(amu_supported());
78 	assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
79 
80 	return amu_group1_cnt_read_internal(idx);
81 }
82 
83 /* Write the group 1 counter identified by the given `idx` with `val`. */
84 void amu_group1_cnt_write(int idx, uint64_t val)
85 {
86 	assert(amu_supported());
87 	assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
88 
89 	amu_group1_cnt_write_internal(idx, val);
90 	isb();
91 }
92 
93 void amu_group1_set_evtype(int idx, unsigned int val)
94 {
95 	assert(amu_supported());
96 	assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
97 
98 	amu_group1_set_evtype_internal(idx, val);
99 	isb();
100 }
101 
102 static void *amu_context_save(const void *arg)
103 {
104 	struct amu_ctx *ctx;
105 	int i;
106 
107 	if (!amu_supported())
108 		return (void *)-1;
109 
110 	ctx = &amu_ctxs[plat_my_core_pos()];
111 
112 	/* Assert that group 0 counter configuration is what we expect */
113 	assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
114 	       read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
115 
116 	/*
117 	 * Disable group 0 counters to avoid other observers like SCP sampling
118 	 * counter values from the future via the memory mapped view.
119 	 */
120 	write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
121 	write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
122 	isb();
123 
124 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
125 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
126 
127 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
128 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
129 
130 	return (void *)0;
131 }
132 
133 static void *amu_context_restore(const void *arg)
134 {
135 	struct amu_ctx *ctx;
136 	int i;
137 
138 	if (!amu_supported())
139 		return (void *)-1;
140 
141 	ctx = &amu_ctxs[plat_my_core_pos()];
142 
143 	/* Counters were disabled in `amu_context_save()` */
144 	assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U));
145 
146 	/* Restore group 0 counters */
147 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
148 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
149 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
150 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
151 
152 	/* Enable group 0 counters */
153 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
154 
155 	/* Enable group 1 counters */
156 	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
157 	return (void *)0;
158 }
159 
160 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
161 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
162