xref: /rk3399_ARM-atf/lib/extensions/amu/aarch32/amu.c (revision e6e17ee8f17cc0e34bf681cc36e0954b6cf2e319)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <amu.h>
8 #include <amu_private.h>
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <platform.h>
12 #include <pubsub_events.h>
13 
14 #define AMU_GROUP0_NR_COUNTERS	4
15 
16 struct amu_ctx {
17 	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
18 	uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
19 };
20 
21 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22 
23 int amu_supported(void)
24 {
25 	uint64_t features;
26 
27 	features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
28 	return (features & ID_PFR0_AMU_MASK) == 1;
29 }
30 
31 void amu_enable(int el2_unused)
32 {
33 	if (!amu_supported())
34 		return;
35 
36 	if (el2_unused) {
37 		uint64_t v;
38 		/*
39 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
40 		 * registers do not trap to EL2.
41 		 */
42 		v = read_hcptr();
43 		v &= ~TAM_BIT;
44 		write_hcptr(v);
45 	}
46 
47 	/* Enable group 0 counters */
48 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
49 
50 	/* Enable group 1 counters */
51 	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
52 }
53 
54 /* Read the group 0 counter identified by the given `idx`. */
55 uint64_t amu_group0_cnt_read(int idx)
56 {
57 	assert(amu_supported());
58 	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
59 
60 	return amu_group0_cnt_read_internal(idx);
61 }
62 
63 /* Write the group 0 counter identified by the given `idx` with `val`. */
64 void amu_group0_cnt_write(int idx, uint64_t val)
65 {
66 	assert(amu_supported());
67 	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
68 
69 	amu_group0_cnt_write_internal(idx, val);
70 	isb();
71 }
72 
73 /* Read the group 1 counter identified by the given `idx`. */
74 uint64_t amu_group1_cnt_read(int idx)
75 {
76 	assert(amu_supported());
77 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
78 
79 	return amu_group1_cnt_read_internal(idx);
80 }
81 
82 /* Write the group 1 counter identified by the given `idx` with `val`. */
83 void amu_group1_cnt_write(int idx, uint64_t val)
84 {
85 	assert(amu_supported());
86 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
87 
88 	amu_group1_cnt_write_internal(idx, val);
89 	isb();
90 }
91 
92 void amu_group1_set_evtype(int idx, unsigned int val)
93 {
94 	assert(amu_supported());
95 	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
96 
97 	amu_group1_set_evtype_internal(idx, val);
98 	isb();
99 }
100 
101 static void *amu_context_save(const void *arg)
102 {
103 	struct amu_ctx *ctx;
104 	int i;
105 
106 	if (!amu_supported())
107 		return (void *)-1;
108 
109 	ctx = &amu_ctxs[plat_my_core_pos()];
110 
111 	/* Assert that group 0 counter configuration is what we expect */
112 	assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
113 	       read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
114 
115 	/*
116 	 * Disable group 0 counters to avoid other observers like SCP sampling
117 	 * counter values from the future via the memory mapped view.
118 	 */
119 	write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
120 	write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
121 	isb();
122 
123 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
124 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
125 
126 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
127 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
128 
129 	return 0;
130 }
131 
132 static void *amu_context_restore(const void *arg)
133 {
134 	struct amu_ctx *ctx;
135 	uint64_t features;
136 	int i;
137 
138 	features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
139 	if ((features & ID_PFR0_AMU_MASK) != 1)
140 		return (void *)-1;
141 
142 	ctx = &amu_ctxs[plat_my_core_pos()];
143 
144 	/* Counters were disabled in `amu_context_save()` */
145 	assert(read_amcntenset0() == 0 && read_amcntenset1() == 0);
146 
147 	/* Restore group 0 counters */
148 	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
149 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
150 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
151 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
152 
153 	/* Enable group 0 counters */
154 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
155 
156 	/* Enable group 1 counters */
157 	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
158 	return 0;
159 }
160 
161 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
162 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
163