1 /* 2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 13 #include <lib/el3_runtime/pubsub_events.h> 14 #include <lib/extensions/amu.h> 15 #include <lib/extensions/amu_private.h> 16 17 #include <plat/common/platform.h> 18 19 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; 20 21 /* Check if AMUv1 for Armv8.4 or 8.6 is implemented */ 22 bool amu_supported(void) 23 { 24 uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT; 25 26 features &= ID_AA64PFR0_AMU_MASK; 27 return ((features == 1U) || (features == 2U)); 28 } 29 30 #if AMU_GROUP1_NR_COUNTERS 31 /* Check if group 1 counters is implemented */ 32 bool amu_group1_supported(void) 33 { 34 uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT; 35 36 return (features & AMCFGR_EL0_NCG_MASK) == 1U; 37 } 38 #endif 39 40 /* 41 * Enable counters. This function is meant to be invoked 42 * by the context management library before exiting from EL3. 43 */ 44 void amu_enable(bool el2_unused) 45 { 46 uint64_t v; 47 48 if (!amu_supported()) { 49 INFO("AMU is not implemented\n"); 50 return; 51 } 52 53 #if AMU_GROUP1_NR_COUNTERS 54 /* Check and set presence of group 1 counters */ 55 if (!amu_group1_supported()) { 56 ERROR("AMU Counter Group 1 is not implemented\n"); 57 panic(); 58 } 59 60 /* Check number of group 1 counters */ 61 uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 62 AMCGCR_EL0_CG1NC_MASK; 63 VERBOSE("%s%llu. %s%u\n", 64 "Number of AMU Group 1 Counters ", cnt_num, 65 "Requested number ", AMU_GROUP1_NR_COUNTERS); 66 67 if (cnt_num < AMU_GROUP1_NR_COUNTERS) { 68 ERROR("%s%llu is less than %s%u\n", 69 "Number of AMU Group 1 Counters ", cnt_num, 70 "Requested number ", AMU_GROUP1_NR_COUNTERS); 71 panic(); 72 } 73 #endif 74 75 if (el2_unused) { 76 /* 77 * CPTR_EL2.TAM: Set to zero so any accesses to 78 * the Activity Monitor registers do not trap to EL2. 79 */ 80 v = read_cptr_el2(); 81 v &= ~CPTR_EL2_TAM_BIT; 82 write_cptr_el2(v); 83 } 84 85 /* 86 * CPTR_EL3.TAM: Set to zero so that any accesses to 87 * the Activity Monitor registers do not trap to EL3. 88 */ 89 v = read_cptr_el3(); 90 v &= ~TAM_BIT; 91 write_cptr_el3(v); 92 93 /* Enable group 0 counters */ 94 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); 95 96 #if AMU_GROUP1_NR_COUNTERS 97 /* Enable group 1 counters */ 98 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); 99 #endif 100 } 101 102 /* Read the group 0 counter identified by the given `idx`. */ 103 uint64_t amu_group0_cnt_read(unsigned int idx) 104 { 105 assert(amu_supported()); 106 assert(idx < AMU_GROUP0_NR_COUNTERS); 107 108 return amu_group0_cnt_read_internal(idx); 109 } 110 111 /* Write the group 0 counter identified by the given `idx` with `val` */ 112 void amu_group0_cnt_write(unsigned int idx, uint64_t val) 113 { 114 assert(amu_supported()); 115 assert(idx < AMU_GROUP0_NR_COUNTERS); 116 117 amu_group0_cnt_write_internal(idx, val); 118 isb(); 119 } 120 121 #if AMU_GROUP1_NR_COUNTERS 122 /* Read the group 1 counter identified by the given `idx` */ 123 uint64_t amu_group1_cnt_read(unsigned int idx) 124 { 125 assert(amu_supported()); 126 assert(amu_group1_supported()); 127 assert(idx < AMU_GROUP1_NR_COUNTERS); 128 129 return amu_group1_cnt_read_internal(idx); 130 } 131 132 /* Write the group 1 counter identified by the given `idx` with `val` */ 133 void amu_group1_cnt_write(unsigned int idx, uint64_t val) 134 { 135 assert(amu_supported()); 136 assert(amu_group1_supported()); 137 assert(idx < AMU_GROUP1_NR_COUNTERS); 138 139 amu_group1_cnt_write_internal(idx, val); 140 isb(); 141 } 142 143 /* 144 * Program the event type register for the given `idx` with 145 * the event number `val` 146 */ 147 void amu_group1_set_evtype(unsigned int idx, unsigned int val) 148 { 149 assert(amu_supported()); 150 assert(amu_group1_supported()); 151 assert(idx < AMU_GROUP1_NR_COUNTERS); 152 153 amu_group1_set_evtype_internal(idx, val); 154 isb(); 155 } 156 #endif /* AMU_GROUP1_NR_COUNTERS */ 157 158 static void *amu_context_save(const void *arg) 159 { 160 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; 161 unsigned int i; 162 163 if (!amu_supported()) { 164 return (void *)-1; 165 } 166 167 #if AMU_GROUP1_NR_COUNTERS 168 if (!amu_group1_supported()) { 169 return (void *)-1; 170 } 171 #endif 172 /* Assert that group 0/1 counter configuration is what we expect */ 173 assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK); 174 175 #if AMU_GROUP1_NR_COUNTERS 176 assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); 177 #endif 178 /* 179 * Disable group 0/1 counters to avoid other observers like SCP sampling 180 * counter values from the future via the memory mapped view. 181 */ 182 write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK); 183 184 #if AMU_GROUP1_NR_COUNTERS 185 write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK); 186 #endif 187 isb(); 188 189 /* Save all group 0 counters */ 190 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { 191 ctx->group0_cnts[i] = amu_group0_cnt_read(i); 192 } 193 194 #if AMU_GROUP1_NR_COUNTERS 195 /* Save group 1 counters */ 196 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { 197 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { 198 ctx->group1_cnts[i] = amu_group1_cnt_read(i); 199 } 200 } 201 #endif 202 return (void *)0; 203 } 204 205 static void *amu_context_restore(const void *arg) 206 { 207 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; 208 unsigned int i; 209 210 if (!amu_supported()) { 211 return (void *)-1; 212 } 213 214 #if AMU_GROUP1_NR_COUNTERS 215 if (!amu_group1_supported()) { 216 return (void *)-1; 217 } 218 #endif 219 /* Counters were disabled in `amu_context_save()` */ 220 assert(read_amcntenset0_el0() == 0U); 221 222 #if AMU_GROUP1_NR_COUNTERS 223 assert(read_amcntenset1_el0() == 0U); 224 #endif 225 226 /* Restore all group 0 counters */ 227 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { 228 amu_group0_cnt_write(i, ctx->group0_cnts[i]); 229 } 230 231 /* Restore group 0 counter configuration */ 232 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); 233 234 #if AMU_GROUP1_NR_COUNTERS 235 /* Restore group 1 counters */ 236 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { 237 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { 238 amu_group1_cnt_write(i, ctx->group1_cnts[i]); 239 } 240 } 241 242 /* Restore group 1 counter configuration */ 243 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); 244 #endif 245 246 return (void *)0; 247 } 248 249 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 250 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 251