1 /* 2 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 13 #include <lib/el3_runtime/pubsub_events.h> 14 #include <lib/extensions/amu.h> 15 #include <lib/extensions/amu_private.h> 16 17 #include <plat/common/platform.h> 18 19 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; 20 21 /* 22 * Get AMU version value from pfr0. 23 * Return values 24 * ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4) 25 * ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6) 26 * ID_PFR0_AMU_NOT_SUPPORTED: not supported 27 */ 28 unsigned int amu_get_version(void) 29 { 30 return (unsigned int)(read_id_pfr0() >> ID_PFR0_AMU_SHIFT) & 31 ID_PFR0_AMU_MASK; 32 } 33 34 #if AMU_GROUP1_NR_COUNTERS 35 /* Check if group 1 counters is implemented */ 36 bool amu_group1_supported(void) 37 { 38 uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT; 39 40 return (features & AMCFGR_NCG_MASK) == 1U; 41 } 42 #endif 43 44 /* 45 * Enable counters. This function is meant to be invoked 46 * by the context management library before exiting from EL3. 47 */ 48 void amu_enable(bool el2_unused) 49 { 50 if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { 51 return; 52 } 53 54 #if AMU_GROUP1_NR_COUNTERS 55 /* Check and set presence of group 1 counters */ 56 if (!amu_group1_supported()) { 57 ERROR("AMU Counter Group 1 is not implemented\n"); 58 panic(); 59 } 60 61 /* Check number of group 1 counters */ 62 uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) & 63 AMCGCR_CG1NC_MASK; 64 VERBOSE("%s%u. %s%u\n", 65 "Number of AMU Group 1 Counters ", cnt_num, 66 "Requested number ", AMU_GROUP1_NR_COUNTERS); 67 68 if (cnt_num < AMU_GROUP1_NR_COUNTERS) { 69 ERROR("%s%u is less than %s%u\n", 70 "Number of AMU Group 1 Counters ", cnt_num, 71 "Requested number ", AMU_GROUP1_NR_COUNTERS); 72 panic(); 73 } 74 #endif 75 76 if (el2_unused) { 77 uint64_t v; 78 /* 79 * Non-secure access from EL0 or EL1 to the Activity Monitor 80 * registers do not trap to EL2. 81 */ 82 v = read_hcptr(); 83 v &= ~TAM_BIT; 84 write_hcptr(v); 85 } 86 87 /* Enable group 0 counters */ 88 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK); 89 90 #if AMU_GROUP1_NR_COUNTERS 91 /* Enable group 1 counters */ 92 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); 93 #endif 94 95 /* Initialize FEAT_AMUv1p1 features if present. */ 96 if (amu_get_version() < ID_PFR0_AMU_V1P1) { 97 return; 98 } 99 100 #if AMU_RESTRICT_COUNTERS 101 /* 102 * FEAT_AMUv1p1 adds a register field to restrict access to group 1 103 * counters at all but the highest implemented EL. This is controlled 104 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system 105 * register reads at lower ELs return zero. Reads from the memory 106 * mapped view are unaffected. 107 */ 108 VERBOSE("AMU group 1 counter access restricted.\n"); 109 write_amcr(read_amcr() | AMCR_CG1RZ_BIT); 110 #else 111 write_amcr(read_amcr() & ~AMCR_CG1RZ_BIT); 112 #endif 113 } 114 115 /* Read the group 0 counter identified by the given `idx`. */ 116 uint64_t amu_group0_cnt_read(unsigned int idx) 117 { 118 assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); 119 assert(idx < AMU_GROUP0_NR_COUNTERS); 120 121 return amu_group0_cnt_read_internal(idx); 122 } 123 124 /* Write the group 0 counter identified by the given `idx` with `val` */ 125 void amu_group0_cnt_write(unsigned int idx, uint64_t val) 126 { 127 assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); 128 assert(idx < AMU_GROUP0_NR_COUNTERS); 129 130 amu_group0_cnt_write_internal(idx, val); 131 isb(); 132 } 133 134 #if AMU_GROUP1_NR_COUNTERS 135 /* Read the group 1 counter identified by the given `idx` */ 136 uint64_t amu_group1_cnt_read(unsigned int idx) 137 { 138 assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); 139 assert(amu_group1_supported()); 140 assert(idx < AMU_GROUP1_NR_COUNTERS); 141 142 return amu_group1_cnt_read_internal(idx); 143 } 144 145 /* Write the group 1 counter identified by the given `idx` with `val` */ 146 void amu_group1_cnt_write(unsigned int idx, uint64_t val) 147 { 148 assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); 149 assert(amu_group1_supported()); 150 assert(idx < AMU_GROUP1_NR_COUNTERS); 151 152 amu_group1_cnt_write_internal(idx, val); 153 isb(); 154 } 155 156 /* 157 * Program the event type register for the given `idx` with 158 * the event number `val` 159 */ 160 void amu_group1_set_evtype(unsigned int idx, unsigned int val) 161 { 162 assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); 163 assert(amu_group1_supported()); 164 assert(idx < AMU_GROUP1_NR_COUNTERS); 165 166 amu_group1_set_evtype_internal(idx, val); 167 isb(); 168 } 169 #endif /* AMU_GROUP1_NR_COUNTERS */ 170 171 static void *amu_context_save(const void *arg) 172 { 173 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; 174 unsigned int i; 175 176 if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { 177 return (void *)-1; 178 } 179 180 #if AMU_GROUP1_NR_COUNTERS 181 if (!amu_group1_supported()) { 182 return (void *)-1; 183 } 184 #endif 185 /* Assert that group 0/1 counter configuration is what we expect */ 186 assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK); 187 188 #if AMU_GROUP1_NR_COUNTERS 189 assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); 190 #endif 191 /* 192 * Disable group 0/1 counters to avoid other observers like SCP sampling 193 * counter values from the future via the memory mapped view. 194 */ 195 write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK); 196 197 #if AMU_GROUP1_NR_COUNTERS 198 write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK); 199 #endif 200 isb(); 201 202 /* Save all group 0 counters */ 203 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { 204 ctx->group0_cnts[i] = amu_group0_cnt_read(i); 205 } 206 207 #if AMU_GROUP1_NR_COUNTERS 208 /* Save group 1 counters */ 209 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { 210 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { 211 ctx->group1_cnts[i] = amu_group1_cnt_read(i); 212 } 213 } 214 #endif 215 return (void *)0; 216 } 217 218 static void *amu_context_restore(const void *arg) 219 { 220 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; 221 unsigned int i; 222 223 if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { 224 return (void *)-1; 225 } 226 227 #if AMU_GROUP1_NR_COUNTERS 228 if (!amu_group1_supported()) { 229 return (void *)-1; 230 } 231 #endif 232 /* Counters were disabled in `amu_context_save()` */ 233 assert(read_amcntenset0_el0() == 0U); 234 235 #if AMU_GROUP1_NR_COUNTERS 236 assert(read_amcntenset1_el0() == 0U); 237 #endif 238 239 /* Restore all group 0 counters */ 240 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { 241 amu_group0_cnt_write(i, ctx->group0_cnts[i]); 242 } 243 244 /* Restore group 0 counter configuration */ 245 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK); 246 247 #if AMU_GROUP1_NR_COUNTERS 248 /* Restore group 1 counters */ 249 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { 250 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { 251 amu_group1_cnt_write(i, ctx->group1_cnts[i]); 252 } 253 } 254 255 /* Restore group 1 counter configuration */ 256 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); 257 #endif 258 259 return (void *)0; 260 } 261 262 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 263 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 264