1380559c1SDimitris Papastamos /* 2*461c0a5dSElizabeth Ho * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved. 3380559c1SDimitris Papastamos * 4380559c1SDimitris Papastamos * SPDX-License-Identifier: BSD-3-Clause 5380559c1SDimitris Papastamos */ 6380559c1SDimitris Papastamos 709d40e0eSAntonio Nino Diaz #include <assert.h> 833b9be6dSChris Kay #include <cdefs.h> 94ce3e99aSScott Branden #include <inttypes.h> 1009d40e0eSAntonio Nino Diaz #include <stdbool.h> 114ce3e99aSScott Branden #include <stdint.h> 1209d40e0eSAntonio Nino Diaz 13e747a59bSChris Kay #include "../amu_private.h" 14380559c1SDimitris Papastamos #include <arch.h> 15873d4241Sjohpow01 #include <arch_features.h> 16380559c1SDimitris Papastamos #include <arch_helpers.h> 17742ca230SChris Kay #include <common/debug.h> 1809d40e0eSAntonio Nino Diaz #include <lib/el3_runtime/pubsub_events.h> 1909d40e0eSAntonio Nino Diaz #include <lib/extensions/amu.h> 20f3ccf036SAlexei Fedorov 2109d40e0eSAntonio Nino Diaz #include <plat/common/platform.h> 22380559c1SDimitris Papastamos 23742ca230SChris Kay #if ENABLE_AMU_FCONF 24742ca230SChris Kay # include <lib/fconf/fconf.h> 25742ca230SChris Kay # include <lib/fconf/fconf_amu_getter.h> 26742ca230SChris Kay #endif 27742ca230SChris Kay 2868120783SChris Kay #if ENABLE_MPMM 2968120783SChris Kay # include <lib/mpmm/mpmm.h> 3068120783SChris Kay #endif 3168120783SChris Kay 32e747a59bSChris Kay struct amu_ctx { 33e747a59bSChris Kay uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; 34e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 35e747a59bSChris Kay uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; 36e747a59bSChris Kay #endif 37e747a59bSChris Kay 38e747a59bSChris Kay /* Architected event counter 1 does not have an offset register */ 39e747a59bSChris Kay uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U]; 40e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 41e747a59bSChris Kay uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS]; 42e747a59bSChris Kay #endif 43e747a59bSChris Kay 44e747a59bSChris Kay uint16_t group0_enable; 45e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 46e747a59bSChris Kay uint16_t group1_enable; 47e747a59bSChris Kay #endif 48e747a59bSChris Kay }; 49e747a59bSChris Kay 50e747a59bSChris Kay static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; 51e747a59bSChris Kay 52e747a59bSChris Kay CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, 53e747a59bSChris Kay amu_ctx_group0_enable_cannot_represent_all_group0_counters); 54e747a59bSChris Kay 55e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 56e747a59bSChris Kay CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, 57e747a59bSChris Kay amu_ctx_group1_enable_cannot_represent_all_group1_counters); 58e747a59bSChris Kay #endif 59b6eb3932SDimitris Papastamos 6033b9be6dSChris Kay static inline __unused uint64_t read_hcr_el2_amvoffen(void) 6133b9be6dSChris Kay { 6233b9be6dSChris Kay return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >> 6333b9be6dSChris Kay HCR_AMVOFFEN_SHIFT; 6433b9be6dSChris Kay } 6533b9be6dSChris Kay 6633b9be6dSChris Kay static inline __unused void write_cptr_el2_tam(uint64_t value) 6733b9be6dSChris Kay { 6833b9be6dSChris Kay write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) | 6933b9be6dSChris Kay ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT)); 7033b9be6dSChris Kay } 7133b9be6dSChris Kay 72a4c39456SJohn Powell static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen) 73a4c39456SJohn Powell { 74a4c39456SJohn Powell uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 75a4c39456SJohn Powell 76a4c39456SJohn Powell value &= ~SCR_AMVOFFEN_BIT; 77a4c39456SJohn Powell value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT; 78a4c39456SJohn Powell 79a4c39456SJohn Powell write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value); 80a4c39456SJohn Powell } 81a4c39456SJohn Powell 8233b9be6dSChris Kay static inline __unused void write_hcr_el2_amvoffen(uint64_t value) 8333b9be6dSChris Kay { 8433b9be6dSChris Kay write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) | 8533b9be6dSChris Kay ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT)); 8633b9be6dSChris Kay } 8733b9be6dSChris Kay 8833b9be6dSChris Kay static inline __unused void write_amcr_el0_cg1rz(uint64_t value) 8933b9be6dSChris Kay { 9033b9be6dSChris Kay write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) | 9133b9be6dSChris Kay ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); 9233b9be6dSChris Kay } 9333b9be6dSChris Kay 9433b9be6dSChris Kay static inline __unused uint64_t read_amcfgr_el0_ncg(void) 9533b9be6dSChris Kay { 9633b9be6dSChris Kay return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) & 9733b9be6dSChris Kay AMCFGR_EL0_NCG_MASK; 9833b9be6dSChris Kay } 9933b9be6dSChris Kay 100e747a59bSChris Kay static inline __unused uint64_t read_amcgcr_el0_cg0nc(void) 10181e2ff1fSChris Kay { 10281e2ff1fSChris Kay return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) & 10381e2ff1fSChris Kay AMCGCR_EL0_CG0NC_MASK; 10481e2ff1fSChris Kay } 10581e2ff1fSChris Kay 10633b9be6dSChris Kay static inline __unused uint64_t read_amcg1idr_el0_voff(void) 10733b9be6dSChris Kay { 10833b9be6dSChris Kay return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & 10933b9be6dSChris Kay AMCG1IDR_VOFF_MASK; 11033b9be6dSChris Kay } 11133b9be6dSChris Kay 11233b9be6dSChris Kay static inline __unused uint64_t read_amcgcr_el0_cg1nc(void) 11333b9be6dSChris Kay { 11433b9be6dSChris Kay return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 11533b9be6dSChris Kay AMCGCR_EL0_CG1NC_MASK; 11633b9be6dSChris Kay } 11733b9be6dSChris Kay 11833b9be6dSChris Kay static inline __unused uint64_t read_amcntenset0_el0_px(void) 11933b9be6dSChris Kay { 12033b9be6dSChris Kay return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) & 12133b9be6dSChris Kay AMCNTENSET0_EL0_Pn_MASK; 12233b9be6dSChris Kay } 12333b9be6dSChris Kay 12433b9be6dSChris Kay static inline __unused uint64_t read_amcntenset1_el0_px(void) 12533b9be6dSChris Kay { 12633b9be6dSChris Kay return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) & 12733b9be6dSChris Kay AMCNTENSET1_EL0_Pn_MASK; 12833b9be6dSChris Kay } 12933b9be6dSChris Kay 13033b9be6dSChris Kay static inline __unused void write_amcntenset0_el0_px(uint64_t px) 13133b9be6dSChris Kay { 13233b9be6dSChris Kay uint64_t value = read_amcntenset0_el0(); 13333b9be6dSChris Kay 13433b9be6dSChris Kay value &= ~AMCNTENSET0_EL0_Pn_MASK; 13533b9be6dSChris Kay value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK; 13633b9be6dSChris Kay 13733b9be6dSChris Kay write_amcntenset0_el0(value); 13833b9be6dSChris Kay } 13933b9be6dSChris Kay 14033b9be6dSChris Kay static inline __unused void write_amcntenset1_el0_px(uint64_t px) 14133b9be6dSChris Kay { 14233b9be6dSChris Kay uint64_t value = read_amcntenset1_el0(); 14333b9be6dSChris Kay 14433b9be6dSChris Kay value &= ~AMCNTENSET1_EL0_Pn_MASK; 14533b9be6dSChris Kay value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK; 14633b9be6dSChris Kay 14733b9be6dSChris Kay write_amcntenset1_el0(value); 14833b9be6dSChris Kay } 14933b9be6dSChris Kay 15033b9be6dSChris Kay static inline __unused void write_amcntenclr0_el0_px(uint64_t px) 15133b9be6dSChris Kay { 15233b9be6dSChris Kay uint64_t value = read_amcntenclr0_el0(); 15333b9be6dSChris Kay 15433b9be6dSChris Kay value &= ~AMCNTENCLR0_EL0_Pn_MASK; 15533b9be6dSChris Kay value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK; 15633b9be6dSChris Kay 15733b9be6dSChris Kay write_amcntenclr0_el0(value); 15833b9be6dSChris Kay } 15933b9be6dSChris Kay 16033b9be6dSChris Kay static inline __unused void write_amcntenclr1_el0_px(uint64_t px) 16133b9be6dSChris Kay { 16233b9be6dSChris Kay uint64_t value = read_amcntenclr1_el0(); 16333b9be6dSChris Kay 16433b9be6dSChris Kay value &= ~AMCNTENCLR1_EL0_Pn_MASK; 16533b9be6dSChris Kay value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK; 16633b9be6dSChris Kay 16733b9be6dSChris Kay write_amcntenclr1_el0(value); 16833b9be6dSChris Kay } 16933b9be6dSChris Kay 17033b9be6dSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 171e747a59bSChris Kay static __unused bool amu_group1_supported(void) 172f3ccf036SAlexei Fedorov { 17333b9be6dSChris Kay return read_amcfgr_el0_ncg() > 0U; 174f3ccf036SAlexei Fedorov } 175f3ccf036SAlexei Fedorov #endif 176f3ccf036SAlexei Fedorov 1770767d50eSDimitris Papastamos /* 178e747a59bSChris Kay * Enable counters. This function is meant to be invoked by the context 179e747a59bSChris Kay * management library before exiting from EL3. 1800767d50eSDimitris Papastamos */ 1814085a02cSBoyan Karatotev void amu_enable(cpu_context_t *ctx) 1820767d50eSDimitris Papastamos { 1834085a02cSBoyan Karatotev /* Initialize FEAT_AMUv1p1 features if present. */ 1844085a02cSBoyan Karatotev if (is_feat_amuv1p1_supported()) { 185e747a59bSChris Kay /* 1864085a02cSBoyan Karatotev * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual 1874085a02cSBoyan Karatotev * offset registers at EL2 do not trap to EL3 188e747a59bSChris Kay */ 1894085a02cSBoyan Karatotev ctx_write_scr_el3_amvoffen(ctx, 1U); 1904085a02cSBoyan Karatotev } 1914085a02cSBoyan Karatotev } 192f3ccf036SAlexei Fedorov 193*461c0a5dSElizabeth Ho void amu_enable_per_world(per_world_context_t *per_world_ctx) 194*461c0a5dSElizabeth Ho { 195*461c0a5dSElizabeth Ho /* 196*461c0a5dSElizabeth Ho * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor 197*461c0a5dSElizabeth Ho * registers do not trap to EL3. 198*461c0a5dSElizabeth Ho */ 199*461c0a5dSElizabeth Ho uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3; 200*461c0a5dSElizabeth Ho 201*461c0a5dSElizabeth Ho cptr_el3 &= ~TAM_BIT; 202*461c0a5dSElizabeth Ho per_world_ctx->ctx_cptr_el3 = cptr_el3; 203*461c0a5dSElizabeth Ho } 204*461c0a5dSElizabeth Ho 2054085a02cSBoyan Karatotev void amu_init_el3(void) 2064085a02cSBoyan Karatotev { 2074085a02cSBoyan Karatotev uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc(); 2084085a02cSBoyan Karatotev uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U; 2094085a02cSBoyan Karatotev uint64_t num_ctr_groups = read_amcfgr_el0_ncg(); 210e747a59bSChris Kay 2114085a02cSBoyan Karatotev /* Enable all architected counters by default */ 2124085a02cSBoyan Karatotev write_amcntenset0_el0_px(group0_en_mask); 213742ca230SChris Kay 214742ca230SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 2154085a02cSBoyan Karatotev if (num_ctr_groups > 0U) { 2164085a02cSBoyan Karatotev uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */ 217742ca230SChris Kay const struct amu_topology *topology; 218742ca230SChris Kay 2194085a02cSBoyan Karatotev /* 2204085a02cSBoyan Karatotev * The platform may opt to enable specific auxiliary counters. 2214085a02cSBoyan Karatotev * This can be done via the common FCONF getter, or via the 2224085a02cSBoyan Karatotev * platform-implemented function. 2234085a02cSBoyan Karatotev */ 224742ca230SChris Kay #if ENABLE_AMU_FCONF 225742ca230SChris Kay topology = FCONF_GET_PROPERTY(amu, config, topology); 226742ca230SChris Kay #else 227742ca230SChris Kay topology = plat_amu_topology(); 228742ca230SChris Kay #endif /* ENABLE_AMU_FCONF */ 229742ca230SChris Kay 230742ca230SChris Kay if (topology != NULL) { 231742ca230SChris Kay unsigned int core_pos = plat_my_core_pos(); 232742ca230SChris Kay 233742ca230SChris Kay amcntenset1_el0_px = topology->cores[core_pos].enable; 234742ca230SChris Kay } else { 235742ca230SChris Kay ERROR("AMU: failed to generate AMU topology\n"); 236742ca230SChris Kay } 2374085a02cSBoyan Karatotev 2384085a02cSBoyan Karatotev write_amcntenset1_el0_px(amcntenset1_el0_px); 2394085a02cSBoyan Karatotev } 2404085a02cSBoyan Karatotev #else /* ENABLE_AMU_AUXILIARY_COUNTERS */ 2414085a02cSBoyan Karatotev if (num_ctr_groups > 0U) { 2424085a02cSBoyan Karatotev VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); 2434085a02cSBoyan Karatotev } 244742ca230SChris Kay #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ 245742ca230SChris Kay 246b57e16a4SAndre Przywara if (is_feat_amuv1p1_supported()) { 247873d4241Sjohpow01 #if AMU_RESTRICT_COUNTERS 248873d4241Sjohpow01 /* 24968120783SChris Kay * FEAT_AMUv1p1 adds a register field to restrict access to 25068120783SChris Kay * group 1 counters at all but the highest implemented EL. This 25168120783SChris Kay * is controlled with the `AMU_RESTRICT_COUNTERS` compile time 25268120783SChris Kay * flag, when set, system register reads at lower ELs return 25368120783SChris Kay * zero. Reads from the memory mapped view are unaffected. 254873d4241Sjohpow01 */ 255873d4241Sjohpow01 VERBOSE("AMU group 1 counter access restricted.\n"); 25633b9be6dSChris Kay write_amcr_el0_cg1rz(1U); 257873d4241Sjohpow01 #else 25833b9be6dSChris Kay write_amcr_el0_cg1rz(0U); 259873d4241Sjohpow01 #endif 260380559c1SDimitris Papastamos } 2610767d50eSDimitris Papastamos 26268120783SChris Kay #if ENABLE_MPMM 26368120783SChris Kay mpmm_enable(); 26468120783SChris Kay #endif 26568120783SChris Kay } 26668120783SChris Kay 2674085a02cSBoyan Karatotev void amu_init_el2_unused(void) 2684085a02cSBoyan Karatotev { 2694085a02cSBoyan Karatotev /* 2704085a02cSBoyan Karatotev * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor 2714085a02cSBoyan Karatotev * registers do not trap to EL2. 2724085a02cSBoyan Karatotev */ 2734085a02cSBoyan Karatotev write_cptr_el2_tam(0U); 2744085a02cSBoyan Karatotev 2754085a02cSBoyan Karatotev /* Initialize FEAT_AMUv1p1 features if present. */ 2764085a02cSBoyan Karatotev if (is_feat_amuv1p1_supported()) { 2774085a02cSBoyan Karatotev /* Make sure virtual offsets are disabled if EL2 not used. */ 2784085a02cSBoyan Karatotev write_hcr_el2_amvoffen(0U); 2794085a02cSBoyan Karatotev } 2804085a02cSBoyan Karatotev } 2814085a02cSBoyan Karatotev 2820767d50eSDimitris Papastamos /* Read the group 0 counter identified by the given `idx`. */ 283b4b726eaSChris Kay static uint64_t amu_group0_cnt_read(unsigned int idx) 2840767d50eSDimitris Papastamos { 285b57e16a4SAndre Przywara assert(is_feat_amu_supported()); 28681e2ff1fSChris Kay assert(idx < read_amcgcr_el0_cg0nc()); 2870767d50eSDimitris Papastamos 2880767d50eSDimitris Papastamos return amu_group0_cnt_read_internal(idx); 2890767d50eSDimitris Papastamos } 2900767d50eSDimitris Papastamos 291f3ccf036SAlexei Fedorov /* Write the group 0 counter identified by the given `idx` with `val` */ 292b4b726eaSChris Kay static void amu_group0_cnt_write(unsigned int idx, uint64_t val) 2930767d50eSDimitris Papastamos { 294b57e16a4SAndre Przywara assert(is_feat_amu_supported()); 29581e2ff1fSChris Kay assert(idx < read_amcgcr_el0_cg0nc()); 2960767d50eSDimitris Papastamos 2970767d50eSDimitris Papastamos amu_group0_cnt_write_internal(idx, val); 2980767d50eSDimitris Papastamos isb(); 2990767d50eSDimitris Papastamos } 3000767d50eSDimitris Papastamos 301873d4241Sjohpow01 /* 302e747a59bSChris Kay * Unlike with auxiliary counters, we cannot detect at runtime whether an 303e747a59bSChris Kay * architected counter supports a virtual offset. These are instead fixed 304e747a59bSChris Kay * according to FEAT_AMUv1p1, but this switch will need to be updated if later 305e747a59bSChris Kay * revisions of FEAT_AMU add additional architected counters. 306e747a59bSChris Kay */ 307e747a59bSChris Kay static bool amu_group0_voffset_supported(uint64_t idx) 308e747a59bSChris Kay { 309e747a59bSChris Kay switch (idx) { 310e747a59bSChris Kay case 0U: 311e747a59bSChris Kay case 2U: 312e747a59bSChris Kay case 3U: 313e747a59bSChris Kay return true; 314e747a59bSChris Kay 315e747a59bSChris Kay case 1U: 316e747a59bSChris Kay return false; 317e747a59bSChris Kay 318e747a59bSChris Kay default: 319e747a59bSChris Kay ERROR("AMU: can't set up virtual offset for unknown " 3204ce3e99aSScott Branden "architected counter %" PRIu64 "!\n", idx); 321e747a59bSChris Kay 322e747a59bSChris Kay panic(); 323e747a59bSChris Kay } 324e747a59bSChris Kay } 325e747a59bSChris Kay 326e747a59bSChris Kay /* 327873d4241Sjohpow01 * Read the group 0 offset register for a given index. Index must be 0, 2, 328873d4241Sjohpow01 * or 3, the register for 1 does not exist. 329873d4241Sjohpow01 * 330873d4241Sjohpow01 * Using this function requires FEAT_AMUv1p1 support. 331873d4241Sjohpow01 */ 332b4b726eaSChris Kay static uint64_t amu_group0_voffset_read(unsigned int idx) 333873d4241Sjohpow01 { 334b57e16a4SAndre Przywara assert(is_feat_amuv1p1_supported()); 33581e2ff1fSChris Kay assert(idx < read_amcgcr_el0_cg0nc()); 336873d4241Sjohpow01 assert(idx != 1U); 337873d4241Sjohpow01 338873d4241Sjohpow01 return amu_group0_voffset_read_internal(idx); 339873d4241Sjohpow01 } 340873d4241Sjohpow01 341873d4241Sjohpow01 /* 342873d4241Sjohpow01 * Write the group 0 offset register for a given index. Index must be 0, 2, or 343873d4241Sjohpow01 * 3, the register for 1 does not exist. 344873d4241Sjohpow01 * 345873d4241Sjohpow01 * Using this function requires FEAT_AMUv1p1 support. 346873d4241Sjohpow01 */ 347b4b726eaSChris Kay static void amu_group0_voffset_write(unsigned int idx, uint64_t val) 348873d4241Sjohpow01 { 349b57e16a4SAndre Przywara assert(is_feat_amuv1p1_supported()); 35081e2ff1fSChris Kay assert(idx < read_amcgcr_el0_cg0nc()); 351873d4241Sjohpow01 assert(idx != 1U); 352873d4241Sjohpow01 353873d4241Sjohpow01 amu_group0_voffset_write_internal(idx, val); 354873d4241Sjohpow01 isb(); 355873d4241Sjohpow01 } 356873d4241Sjohpow01 3571fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 358f3ccf036SAlexei Fedorov /* Read the group 1 counter identified by the given `idx` */ 359b4b726eaSChris Kay static uint64_t amu_group1_cnt_read(unsigned int idx) 3600767d50eSDimitris Papastamos { 361b57e16a4SAndre Przywara assert(is_feat_amu_supported()); 362f3ccf036SAlexei Fedorov assert(amu_group1_supported()); 36331d3cc25SChris Kay assert(idx < read_amcgcr_el0_cg1nc()); 3640767d50eSDimitris Papastamos 3650767d50eSDimitris Papastamos return amu_group1_cnt_read_internal(idx); 3660767d50eSDimitris Papastamos } 3670767d50eSDimitris Papastamos 368f3ccf036SAlexei Fedorov /* Write the group 1 counter identified by the given `idx` with `val` */ 369b4b726eaSChris Kay static void amu_group1_cnt_write(unsigned int idx, uint64_t val) 3700767d50eSDimitris Papastamos { 371b57e16a4SAndre Przywara assert(is_feat_amu_supported()); 372f3ccf036SAlexei Fedorov assert(amu_group1_supported()); 37331d3cc25SChris Kay assert(idx < read_amcgcr_el0_cg1nc()); 3740767d50eSDimitris Papastamos 3750767d50eSDimitris Papastamos amu_group1_cnt_write_internal(idx, val); 3760767d50eSDimitris Papastamos isb(); 3770767d50eSDimitris Papastamos } 3780767d50eSDimitris Papastamos 3790767d50eSDimitris Papastamos /* 380873d4241Sjohpow01 * Read the group 1 offset register for a given index. 381873d4241Sjohpow01 * 382873d4241Sjohpow01 * Using this function requires FEAT_AMUv1p1 support. 383873d4241Sjohpow01 */ 384b4b726eaSChris Kay static uint64_t amu_group1_voffset_read(unsigned int idx) 385873d4241Sjohpow01 { 386b57e16a4SAndre Przywara assert(is_feat_amuv1p1_supported()); 387873d4241Sjohpow01 assert(amu_group1_supported()); 38831d3cc25SChris Kay assert(idx < read_amcgcr_el0_cg1nc()); 38933b9be6dSChris Kay assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 390873d4241Sjohpow01 391873d4241Sjohpow01 return amu_group1_voffset_read_internal(idx); 392873d4241Sjohpow01 } 393873d4241Sjohpow01 394873d4241Sjohpow01 /* 395873d4241Sjohpow01 * Write the group 1 offset register for a given index. 396873d4241Sjohpow01 * 397873d4241Sjohpow01 * Using this function requires FEAT_AMUv1p1 support. 398873d4241Sjohpow01 */ 399b4b726eaSChris Kay static void amu_group1_voffset_write(unsigned int idx, uint64_t val) 400873d4241Sjohpow01 { 401b57e16a4SAndre Przywara assert(is_feat_amuv1p1_supported()); 402873d4241Sjohpow01 assert(amu_group1_supported()); 40331d3cc25SChris Kay assert(idx < read_amcgcr_el0_cg1nc()); 40433b9be6dSChris Kay assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 405873d4241Sjohpow01 406873d4241Sjohpow01 amu_group1_voffset_write_internal(idx, val); 407873d4241Sjohpow01 isb(); 408873d4241Sjohpow01 } 4091fd685a7SChris Kay #endif 410b6eb3932SDimitris Papastamos 411b6eb3932SDimitris Papastamos static void *amu_context_save(const void *arg) 412b6eb3932SDimitris Papastamos { 413e747a59bSChris Kay uint64_t i, j; 414b6eb3932SDimitris Papastamos 415e747a59bSChris Kay unsigned int core_pos; 416e747a59bSChris Kay struct amu_ctx *ctx; 417b6eb3932SDimitris Papastamos 418b57e16a4SAndre Przywara uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 419e747a59bSChris Kay uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 420b6eb3932SDimitris Papastamos 4211fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 422e747a59bSChris Kay uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 423e747a59bSChris Kay uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 424e747a59bSChris Kay uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 425e747a59bSChris Kay #endif 426e747a59bSChris Kay 427b57e16a4SAndre Przywara if (!is_feat_amu_supported()) { 428e747a59bSChris Kay return (void *)0; 429e747a59bSChris Kay } 430e747a59bSChris Kay 431e747a59bSChris Kay core_pos = plat_my_core_pos(); 432e747a59bSChris Kay ctx = &amu_ctxs_[core_pos]; 433e747a59bSChris Kay 434e747a59bSChris Kay amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 435b57e16a4SAndre Przywara if (is_feat_amuv1p1_supported()) { 436b57e16a4SAndre Przywara hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 437b57e16a4SAndre Przywara } 438e747a59bSChris Kay 439e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 440e747a59bSChris Kay amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 441e747a59bSChris Kay amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 442e747a59bSChris Kay amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 443e747a59bSChris Kay #endif 444e747a59bSChris Kay 445e747a59bSChris Kay /* 446e747a59bSChris Kay * Disable all AMU counters. 447e747a59bSChris Kay */ 448e747a59bSChris Kay 449e747a59bSChris Kay ctx->group0_enable = read_amcntenset0_el0_px(); 450e747a59bSChris Kay write_amcntenclr0_el0_px(ctx->group0_enable); 451e747a59bSChris Kay 452e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 453e747a59bSChris Kay if (amcfgr_el0_ncg > 0U) { 454e747a59bSChris Kay ctx->group1_enable = read_amcntenset1_el0_px(); 455e747a59bSChris Kay write_amcntenclr1_el0_px(ctx->group1_enable); 4561fd685a7SChris Kay } 457f3ccf036SAlexei Fedorov #endif 4581fd685a7SChris Kay 459b6eb3932SDimitris Papastamos /* 460e747a59bSChris Kay * Save the counters to the local context. 461b6eb3932SDimitris Papastamos */ 462f3ccf036SAlexei Fedorov 463e747a59bSChris Kay isb(); /* Ensure counters have been stopped */ 4641fd685a7SChris Kay 465e747a59bSChris Kay for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 466b6eb3932SDimitris Papastamos ctx->group0_cnts[i] = amu_group0_cnt_read(i); 467f3ccf036SAlexei Fedorov } 468b6eb3932SDimitris Papastamos 469e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 470e747a59bSChris Kay for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 471e747a59bSChris Kay ctx->group1_cnts[i] = amu_group1_cnt_read(i); 472e747a59bSChris Kay } 473e747a59bSChris Kay #endif 474e747a59bSChris Kay 475e747a59bSChris Kay /* 476e747a59bSChris Kay * Save virtual offsets for counters that offer them. 477e747a59bSChris Kay */ 478e747a59bSChris Kay 479e747a59bSChris Kay if (hcr_el2_amvoffen != 0U) { 480e747a59bSChris Kay for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 481e747a59bSChris Kay if (!amu_group0_voffset_supported(i)) { 482e747a59bSChris Kay continue; /* No virtual offset */ 483e747a59bSChris Kay } 484e747a59bSChris Kay 485e747a59bSChris Kay ctx->group0_voffsets[j++] = amu_group0_voffset_read(i); 486873d4241Sjohpow01 } 487873d4241Sjohpow01 4881fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 489e747a59bSChris Kay for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 490e747a59bSChris Kay if ((amcg1idr_el0_voff >> i) & 1U) { 491e747a59bSChris Kay continue; /* No virtual offset */ 492f3ccf036SAlexei Fedorov } 493873d4241Sjohpow01 494e747a59bSChris Kay ctx->group1_voffsets[j++] = amu_group1_voffset_read(i); 4951fd685a7SChris Kay } 496f3ccf036SAlexei Fedorov #endif 497e747a59bSChris Kay } 4981fd685a7SChris Kay 49940daecc1SAntonio Nino Diaz return (void *)0; 500b6eb3932SDimitris Papastamos } 501b6eb3932SDimitris Papastamos 502b6eb3932SDimitris Papastamos static void *amu_context_restore(const void *arg) 503b6eb3932SDimitris Papastamos { 504e747a59bSChris Kay uint64_t i, j; 505b6eb3932SDimitris Papastamos 506e747a59bSChris Kay unsigned int core_pos; 507e747a59bSChris Kay struct amu_ctx *ctx; 508b6eb3932SDimitris Papastamos 509b57e16a4SAndre Przywara uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 510e747a59bSChris Kay 511e747a59bSChris Kay uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 512b6eb3932SDimitris Papastamos 5131fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 5144085a02cSBoyan Karatotev uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 515e747a59bSChris Kay uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 516e747a59bSChris Kay uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 517f3ccf036SAlexei Fedorov #endif 518b6eb3932SDimitris Papastamos 519b57e16a4SAndre Przywara if (!is_feat_amu_supported()) { 520e747a59bSChris Kay return (void *)0; 521e747a59bSChris Kay } 522e747a59bSChris Kay 523e747a59bSChris Kay core_pos = plat_my_core_pos(); 524e747a59bSChris Kay ctx = &amu_ctxs_[core_pos]; 525e747a59bSChris Kay 526e747a59bSChris Kay amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 527e747a59bSChris Kay 528b57e16a4SAndre Przywara if (is_feat_amuv1p1_supported()) { 529b57e16a4SAndre Przywara hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 530b57e16a4SAndre Przywara } 531e747a59bSChris Kay 532e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 5334085a02cSBoyan Karatotev amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 534e747a59bSChris Kay amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 535e747a59bSChris Kay amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 536e747a59bSChris Kay #endif 537e747a59bSChris Kay 538e747a59bSChris Kay /* 539e747a59bSChris Kay * Restore the counter values from the local context. 540e747a59bSChris Kay */ 541e747a59bSChris Kay 542e747a59bSChris Kay for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 543b6eb3932SDimitris Papastamos amu_group0_cnt_write(i, ctx->group0_cnts[i]); 544f3ccf036SAlexei Fedorov } 545b6eb3932SDimitris Papastamos 5461fd685a7SChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 547e747a59bSChris Kay for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 548f3ccf036SAlexei Fedorov amu_group1_cnt_write(i, ctx->group1_cnts[i]); 549f3ccf036SAlexei Fedorov } 550e747a59bSChris Kay #endif 551e747a59bSChris Kay 552e747a59bSChris Kay /* 553e747a59bSChris Kay * Restore virtual offsets for counters that offer them. 554e747a59bSChris Kay */ 555e747a59bSChris Kay 556e747a59bSChris Kay if (hcr_el2_amvoffen != 0U) { 557e747a59bSChris Kay for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 558e747a59bSChris Kay if (!amu_group0_voffset_supported(i)) { 559e747a59bSChris Kay continue; /* No virtual offset */ 560f3ccf036SAlexei Fedorov } 561f3ccf036SAlexei Fedorov 562e747a59bSChris Kay amu_group0_voffset_write(i, ctx->group0_voffsets[j++]); 563873d4241Sjohpow01 } 564873d4241Sjohpow01 565e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 566e747a59bSChris Kay for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 567e747a59bSChris Kay if ((amcg1idr_el0_voff >> i) & 1U) { 568e747a59bSChris Kay continue; /* No virtual offset */ 569e747a59bSChris Kay } 570e747a59bSChris Kay 571e747a59bSChris Kay amu_group1_voffset_write(i, ctx->group1_voffsets[j++]); 572e747a59bSChris Kay } 573e747a59bSChris Kay #endif 574e747a59bSChris Kay } 575e747a59bSChris Kay 576e747a59bSChris Kay /* 577e747a59bSChris Kay * Re-enable counters that were disabled during context save. 578e747a59bSChris Kay */ 579e747a59bSChris Kay 580e747a59bSChris Kay write_amcntenset0_el0_px(ctx->group0_enable); 581e747a59bSChris Kay 582e747a59bSChris Kay #if ENABLE_AMU_AUXILIARY_COUNTERS 583e747a59bSChris Kay if (amcfgr_el0_ncg > 0) { 584e747a59bSChris Kay write_amcntenset1_el0_px(ctx->group1_enable); 5851fd685a7SChris Kay } 586f3ccf036SAlexei Fedorov #endif 587b6eb3932SDimitris Papastamos 58868120783SChris Kay #if ENABLE_MPMM 58968120783SChris Kay mpmm_enable(); 59068120783SChris Kay #endif 59168120783SChris Kay 59240daecc1SAntonio Nino Diaz return (void *)0; 593b6eb3932SDimitris Papastamos } 594b6eb3932SDimitris Papastamos 595b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 596b6eb3932SDimitris Papastamos SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 597