1 /* 2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 13 #include <lib/el3_runtime/pubsub_events.h> 14 #include <lib/extensions/amu.h> 15 #include <lib/extensions/amu_private.h> 16 17 #include <plat/common/platform.h> 18 19 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; 20 21 /* Check if AMUv1 for Armv8.4 or 8.6 is implemented */ 22 bool amu_supported(void) 23 { 24 uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT; 25 26 features &= ID_AA64PFR0_AMU_MASK; 27 return ((features == 1U) || (features == 2U)); 28 } 29 30 #if AMU_GROUP1_NR_COUNTERS 31 /* Check if group 1 counters is implemented */ 32 bool amu_group1_supported(void) 33 { 34 uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT; 35 36 return (features & AMCFGR_EL0_NCG_MASK) == 1U; 37 } 38 #endif 39 40 /* 41 * Enable counters. This function is meant to be invoked 42 * by the context management library before exiting from EL3. 43 */ 44 void amu_enable(bool el2_unused) 45 { 46 uint64_t v; 47 48 if (!amu_supported()) { 49 return; 50 } 51 52 #if AMU_GROUP1_NR_COUNTERS 53 /* Check and set presence of group 1 counters */ 54 if (!amu_group1_supported()) { 55 ERROR("AMU Counter Group 1 is not implemented\n"); 56 panic(); 57 } 58 59 /* Check number of group 1 counters */ 60 uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 61 AMCGCR_EL0_CG1NC_MASK; 62 VERBOSE("%s%llu. %s%u\n", 63 "Number of AMU Group 1 Counters ", cnt_num, 64 "Requested number ", AMU_GROUP1_NR_COUNTERS); 65 66 if (cnt_num < AMU_GROUP1_NR_COUNTERS) { 67 ERROR("%s%llu is less than %s%u\n", 68 "Number of AMU Group 1 Counters ", cnt_num, 69 "Requested number ", AMU_GROUP1_NR_COUNTERS); 70 panic(); 71 } 72 #endif 73 74 if (el2_unused) { 75 /* 76 * CPTR_EL2.TAM: Set to zero so any accesses to 77 * the Activity Monitor registers do not trap to EL2. 78 */ 79 v = read_cptr_el2(); 80 v &= ~CPTR_EL2_TAM_BIT; 81 write_cptr_el2(v); 82 } 83 84 /* 85 * CPTR_EL3.TAM: Set to zero so that any accesses to 86 * the Activity Monitor registers do not trap to EL3. 87 */ 88 v = read_cptr_el3(); 89 v &= ~TAM_BIT; 90 write_cptr_el3(v); 91 92 /* Enable group 0 counters */ 93 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); 94 95 #if AMU_GROUP1_NR_COUNTERS 96 /* Enable group 1 counters */ 97 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); 98 #endif 99 } 100 101 /* Read the group 0 counter identified by the given `idx`. */ 102 uint64_t amu_group0_cnt_read(unsigned int idx) 103 { 104 assert(amu_supported()); 105 assert(idx < AMU_GROUP0_NR_COUNTERS); 106 107 return amu_group0_cnt_read_internal(idx); 108 } 109 110 /* Write the group 0 counter identified by the given `idx` with `val` */ 111 void amu_group0_cnt_write(unsigned int idx, uint64_t val) 112 { 113 assert(amu_supported()); 114 assert(idx < AMU_GROUP0_NR_COUNTERS); 115 116 amu_group0_cnt_write_internal(idx, val); 117 isb(); 118 } 119 120 #if AMU_GROUP1_NR_COUNTERS 121 /* Read the group 1 counter identified by the given `idx` */ 122 uint64_t amu_group1_cnt_read(unsigned int idx) 123 { 124 assert(amu_supported()); 125 assert(amu_group1_supported()); 126 assert(idx < AMU_GROUP1_NR_COUNTERS); 127 128 return amu_group1_cnt_read_internal(idx); 129 } 130 131 /* Write the group 1 counter identified by the given `idx` with `val` */ 132 void amu_group1_cnt_write(unsigned int idx, uint64_t val) 133 { 134 assert(amu_supported()); 135 assert(amu_group1_supported()); 136 assert(idx < AMU_GROUP1_NR_COUNTERS); 137 138 amu_group1_cnt_write_internal(idx, val); 139 isb(); 140 } 141 142 /* 143 * Program the event type register for the given `idx` with 144 * the event number `val` 145 */ 146 void amu_group1_set_evtype(unsigned int idx, unsigned int val) 147 { 148 assert(amu_supported()); 149 assert(amu_group1_supported()); 150 assert(idx < AMU_GROUP1_NR_COUNTERS); 151 152 amu_group1_set_evtype_internal(idx, val); 153 isb(); 154 } 155 #endif /* AMU_GROUP1_NR_COUNTERS */ 156 157 static void *amu_context_save(const void *arg) 158 { 159 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; 160 unsigned int i; 161 162 if (!amu_supported()) { 163 return (void *)-1; 164 } 165 166 #if AMU_GROUP1_NR_COUNTERS 167 if (!amu_group1_supported()) { 168 return (void *)-1; 169 } 170 #endif 171 /* Assert that group 0/1 counter configuration is what we expect */ 172 assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK); 173 174 #if AMU_GROUP1_NR_COUNTERS 175 assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); 176 #endif 177 /* 178 * Disable group 0/1 counters to avoid other observers like SCP sampling 179 * counter values from the future via the memory mapped view. 180 */ 181 write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK); 182 183 #if AMU_GROUP1_NR_COUNTERS 184 write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK); 185 #endif 186 isb(); 187 188 /* Save all group 0 counters */ 189 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { 190 ctx->group0_cnts[i] = amu_group0_cnt_read(i); 191 } 192 193 #if AMU_GROUP1_NR_COUNTERS 194 /* Save group 1 counters */ 195 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { 196 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { 197 ctx->group1_cnts[i] = amu_group1_cnt_read(i); 198 } 199 } 200 #endif 201 return (void *)0; 202 } 203 204 static void *amu_context_restore(const void *arg) 205 { 206 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; 207 unsigned int i; 208 209 if (!amu_supported()) { 210 return (void *)-1; 211 } 212 213 #if AMU_GROUP1_NR_COUNTERS 214 if (!amu_group1_supported()) { 215 return (void *)-1; 216 } 217 #endif 218 /* Counters were disabled in `amu_context_save()` */ 219 assert(read_amcntenset0_el0() == 0U); 220 221 #if AMU_GROUP1_NR_COUNTERS 222 assert(read_amcntenset1_el0() == 0U); 223 #endif 224 225 /* Restore all group 0 counters */ 226 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { 227 amu_group0_cnt_write(i, ctx->group0_cnts[i]); 228 } 229 230 /* Restore group 0 counter configuration */ 231 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); 232 233 #if AMU_GROUP1_NR_COUNTERS 234 /* Restore group 1 counters */ 235 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { 236 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { 237 amu_group1_cnt_write(i, ctx->group1_cnts[i]); 238 } 239 } 240 241 /* Restore group 1 counter configuration */ 242 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); 243 #endif 244 245 return (void *)0; 246 } 247 248 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 249 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 250