1 /* 2 * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <cdefs.h> 9 #include <inttypes.h> 10 #include <stdbool.h> 11 #include <stdint.h> 12 13 #include "../amu_private.h" 14 #include <arch.h> 15 #include <arch_features.h> 16 #include <arch_helpers.h> 17 #include <common/debug.h> 18 #include <lib/el3_runtime/pubsub_events.h> 19 #include <lib/extensions/amu.h> 20 21 #include <plat/common/platform.h> 22 23 #if ENABLE_AMU_FCONF 24 # include <lib/fconf/fconf.h> 25 # include <lib/fconf/fconf_amu_getter.h> 26 #endif 27 28 #if ENABLE_MPMM 29 # include <lib/mpmm/mpmm.h> 30 #endif 31 32 struct amu_ctx { 33 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; 34 #if ENABLE_AMU_AUXILIARY_COUNTERS 35 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; 36 #endif 37 38 /* Architected event counter 1 does not have an offset register */ 39 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U]; 40 #if ENABLE_AMU_AUXILIARY_COUNTERS 41 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS]; 42 #endif 43 44 uint16_t group0_enable; 45 #if ENABLE_AMU_AUXILIARY_COUNTERS 46 uint16_t group1_enable; 47 #endif 48 }; 49 50 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; 51 52 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, 53 amu_ctx_group0_enable_cannot_represent_all_group0_counters); 54 55 #if ENABLE_AMU_AUXILIARY_COUNTERS 56 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, 57 amu_ctx_group1_enable_cannot_represent_all_group1_counters); 58 #endif 59 60 static inline __unused uint64_t read_hcr_el2_amvoffen(void) 61 { 62 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >> 63 HCR_AMVOFFEN_SHIFT; 64 } 65 66 static inline __unused void write_cptr_el2_tam(uint64_t value) 67 { 68 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) | 69 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT)); 70 } 71 72 static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen) 73 { 74 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 75 76 value &= ~SCR_AMVOFFEN_BIT; 77 value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT; 78 79 write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value); 80 } 81 82 static inline __unused void write_hcr_el2_amvoffen(uint64_t value) 83 { 84 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) | 85 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT)); 86 } 87 88 static inline __unused void write_amcr_el0_cg1rz(uint64_t value) 89 { 90 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) | 91 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); 92 } 93 94 static inline __unused uint64_t read_amcfgr_el0_ncg(void) 95 { 96 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) & 97 AMCFGR_EL0_NCG_MASK; 98 } 99 100 static inline __unused uint64_t read_amcgcr_el0_cg0nc(void) 101 { 102 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) & 103 AMCGCR_EL0_CG0NC_MASK; 104 } 105 106 static inline __unused uint64_t read_amcg1idr_el0_voff(void) 107 { 108 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & 109 AMCG1IDR_VOFF_MASK; 110 } 111 112 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void) 113 { 114 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 115 AMCGCR_EL0_CG1NC_MASK; 116 } 117 118 static inline __unused uint64_t read_amcntenset0_el0_px(void) 119 { 120 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) & 121 AMCNTENSET0_EL0_Pn_MASK; 122 } 123 124 static inline __unused uint64_t read_amcntenset1_el0_px(void) 125 { 126 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) & 127 AMCNTENSET1_EL0_Pn_MASK; 128 } 129 130 static inline __unused void write_amcntenset0_el0_px(uint64_t px) 131 { 132 uint64_t value = read_amcntenset0_el0(); 133 134 value &= ~AMCNTENSET0_EL0_Pn_MASK; 135 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK; 136 137 write_amcntenset0_el0(value); 138 } 139 140 static inline __unused void write_amcntenset1_el0_px(uint64_t px) 141 { 142 uint64_t value = read_amcntenset1_el0(); 143 144 value &= ~AMCNTENSET1_EL0_Pn_MASK; 145 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK; 146 147 write_amcntenset1_el0(value); 148 } 149 150 static inline __unused void write_amcntenclr0_el0_px(uint64_t px) 151 { 152 uint64_t value = read_amcntenclr0_el0(); 153 154 value &= ~AMCNTENCLR0_EL0_Pn_MASK; 155 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK; 156 157 write_amcntenclr0_el0(value); 158 } 159 160 static inline __unused void write_amcntenclr1_el0_px(uint64_t px) 161 { 162 uint64_t value = read_amcntenclr1_el0(); 163 164 value &= ~AMCNTENCLR1_EL0_Pn_MASK; 165 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK; 166 167 write_amcntenclr1_el0(value); 168 } 169 170 #if ENABLE_AMU_AUXILIARY_COUNTERS 171 static __unused bool amu_group1_supported(void) 172 { 173 return read_amcfgr_el0_ncg() > 0U; 174 } 175 #endif 176 177 /* 178 * Enable counters. This function is meant to be invoked by the context 179 * management library before exiting from EL3. 180 */ 181 void amu_enable(cpu_context_t *ctx) 182 { 183 /* 184 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor 185 * registers do not trap to EL3. 186 */ 187 u_register_t cptr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); 188 189 cptr_el3 &= ~TAM_BIT; 190 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, cptr_el3); 191 192 /* Initialize FEAT_AMUv1p1 features if present. */ 193 if (is_feat_amuv1p1_supported()) { 194 /* 195 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual 196 * offset registers at EL2 do not trap to EL3 197 */ 198 ctx_write_scr_el3_amvoffen(ctx, 1U); 199 } 200 } 201 202 void amu_init_el3(void) 203 { 204 uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc(); 205 uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U; 206 uint64_t num_ctr_groups = read_amcfgr_el0_ncg(); 207 208 /* Enable all architected counters by default */ 209 write_amcntenset0_el0_px(group0_en_mask); 210 211 #if ENABLE_AMU_AUXILIARY_COUNTERS 212 if (num_ctr_groups > 0U) { 213 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */ 214 const struct amu_topology *topology; 215 216 /* 217 * The platform may opt to enable specific auxiliary counters. 218 * This can be done via the common FCONF getter, or via the 219 * platform-implemented function. 220 */ 221 #if ENABLE_AMU_FCONF 222 topology = FCONF_GET_PROPERTY(amu, config, topology); 223 #else 224 topology = plat_amu_topology(); 225 #endif /* ENABLE_AMU_FCONF */ 226 227 if (topology != NULL) { 228 unsigned int core_pos = plat_my_core_pos(); 229 230 amcntenset1_el0_px = topology->cores[core_pos].enable; 231 } else { 232 ERROR("AMU: failed to generate AMU topology\n"); 233 } 234 235 write_amcntenset1_el0_px(amcntenset1_el0_px); 236 } 237 #else /* ENABLE_AMU_AUXILIARY_COUNTERS */ 238 if (num_ctr_groups > 0U) { 239 VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); 240 } 241 #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ 242 243 if (is_feat_amuv1p1_supported()) { 244 #if AMU_RESTRICT_COUNTERS 245 /* 246 * FEAT_AMUv1p1 adds a register field to restrict access to 247 * group 1 counters at all but the highest implemented EL. This 248 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time 249 * flag, when set, system register reads at lower ELs return 250 * zero. Reads from the memory mapped view are unaffected. 251 */ 252 VERBOSE("AMU group 1 counter access restricted.\n"); 253 write_amcr_el0_cg1rz(1U); 254 #else 255 write_amcr_el0_cg1rz(0U); 256 #endif 257 } 258 259 #if ENABLE_MPMM 260 mpmm_enable(); 261 #endif 262 } 263 264 void amu_init_el2_unused(void) 265 { 266 /* 267 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor 268 * registers do not trap to EL2. 269 */ 270 write_cptr_el2_tam(0U); 271 272 /* Initialize FEAT_AMUv1p1 features if present. */ 273 if (is_feat_amuv1p1_supported()) { 274 /* Make sure virtual offsets are disabled if EL2 not used. */ 275 write_hcr_el2_amvoffen(0U); 276 } 277 } 278 279 /* Read the group 0 counter identified by the given `idx`. */ 280 static uint64_t amu_group0_cnt_read(unsigned int idx) 281 { 282 assert(is_feat_amu_supported()); 283 assert(idx < read_amcgcr_el0_cg0nc()); 284 285 return amu_group0_cnt_read_internal(idx); 286 } 287 288 /* Write the group 0 counter identified by the given `idx` with `val` */ 289 static void amu_group0_cnt_write(unsigned int idx, uint64_t val) 290 { 291 assert(is_feat_amu_supported()); 292 assert(idx < read_amcgcr_el0_cg0nc()); 293 294 amu_group0_cnt_write_internal(idx, val); 295 isb(); 296 } 297 298 /* 299 * Unlike with auxiliary counters, we cannot detect at runtime whether an 300 * architected counter supports a virtual offset. These are instead fixed 301 * according to FEAT_AMUv1p1, but this switch will need to be updated if later 302 * revisions of FEAT_AMU add additional architected counters. 303 */ 304 static bool amu_group0_voffset_supported(uint64_t idx) 305 { 306 switch (idx) { 307 case 0U: 308 case 2U: 309 case 3U: 310 return true; 311 312 case 1U: 313 return false; 314 315 default: 316 ERROR("AMU: can't set up virtual offset for unknown " 317 "architected counter %" PRIu64 "!\n", idx); 318 319 panic(); 320 } 321 } 322 323 /* 324 * Read the group 0 offset register for a given index. Index must be 0, 2, 325 * or 3, the register for 1 does not exist. 326 * 327 * Using this function requires FEAT_AMUv1p1 support. 328 */ 329 static uint64_t amu_group0_voffset_read(unsigned int idx) 330 { 331 assert(is_feat_amuv1p1_supported()); 332 assert(idx < read_amcgcr_el0_cg0nc()); 333 assert(idx != 1U); 334 335 return amu_group0_voffset_read_internal(idx); 336 } 337 338 /* 339 * Write the group 0 offset register for a given index. Index must be 0, 2, or 340 * 3, the register for 1 does not exist. 341 * 342 * Using this function requires FEAT_AMUv1p1 support. 343 */ 344 static void amu_group0_voffset_write(unsigned int idx, uint64_t val) 345 { 346 assert(is_feat_amuv1p1_supported()); 347 assert(idx < read_amcgcr_el0_cg0nc()); 348 assert(idx != 1U); 349 350 amu_group0_voffset_write_internal(idx, val); 351 isb(); 352 } 353 354 #if ENABLE_AMU_AUXILIARY_COUNTERS 355 /* Read the group 1 counter identified by the given `idx` */ 356 static uint64_t amu_group1_cnt_read(unsigned int idx) 357 { 358 assert(is_feat_amu_supported()); 359 assert(amu_group1_supported()); 360 assert(idx < read_amcgcr_el0_cg1nc()); 361 362 return amu_group1_cnt_read_internal(idx); 363 } 364 365 /* Write the group 1 counter identified by the given `idx` with `val` */ 366 static void amu_group1_cnt_write(unsigned int idx, uint64_t val) 367 { 368 assert(is_feat_amu_supported()); 369 assert(amu_group1_supported()); 370 assert(idx < read_amcgcr_el0_cg1nc()); 371 372 amu_group1_cnt_write_internal(idx, val); 373 isb(); 374 } 375 376 /* 377 * Read the group 1 offset register for a given index. 378 * 379 * Using this function requires FEAT_AMUv1p1 support. 380 */ 381 static uint64_t amu_group1_voffset_read(unsigned int idx) 382 { 383 assert(is_feat_amuv1p1_supported()); 384 assert(amu_group1_supported()); 385 assert(idx < read_amcgcr_el0_cg1nc()); 386 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 387 388 return amu_group1_voffset_read_internal(idx); 389 } 390 391 /* 392 * Write the group 1 offset register for a given index. 393 * 394 * Using this function requires FEAT_AMUv1p1 support. 395 */ 396 static void amu_group1_voffset_write(unsigned int idx, uint64_t val) 397 { 398 assert(is_feat_amuv1p1_supported()); 399 assert(amu_group1_supported()); 400 assert(idx < read_amcgcr_el0_cg1nc()); 401 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 402 403 amu_group1_voffset_write_internal(idx, val); 404 isb(); 405 } 406 #endif 407 408 static void *amu_context_save(const void *arg) 409 { 410 uint64_t i, j; 411 412 unsigned int core_pos; 413 struct amu_ctx *ctx; 414 415 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 416 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 417 418 #if ENABLE_AMU_AUXILIARY_COUNTERS 419 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 420 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 421 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 422 #endif 423 424 if (!is_feat_amu_supported()) { 425 return (void *)0; 426 } 427 428 core_pos = plat_my_core_pos(); 429 ctx = &amu_ctxs_[core_pos]; 430 431 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 432 if (is_feat_amuv1p1_supported()) { 433 hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 434 } 435 436 #if ENABLE_AMU_AUXILIARY_COUNTERS 437 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 438 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 439 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 440 #endif 441 442 /* 443 * Disable all AMU counters. 444 */ 445 446 ctx->group0_enable = read_amcntenset0_el0_px(); 447 write_amcntenclr0_el0_px(ctx->group0_enable); 448 449 #if ENABLE_AMU_AUXILIARY_COUNTERS 450 if (amcfgr_el0_ncg > 0U) { 451 ctx->group1_enable = read_amcntenset1_el0_px(); 452 write_amcntenclr1_el0_px(ctx->group1_enable); 453 } 454 #endif 455 456 /* 457 * Save the counters to the local context. 458 */ 459 460 isb(); /* Ensure counters have been stopped */ 461 462 for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 463 ctx->group0_cnts[i] = amu_group0_cnt_read(i); 464 } 465 466 #if ENABLE_AMU_AUXILIARY_COUNTERS 467 for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 468 ctx->group1_cnts[i] = amu_group1_cnt_read(i); 469 } 470 #endif 471 472 /* 473 * Save virtual offsets for counters that offer them. 474 */ 475 476 if (hcr_el2_amvoffen != 0U) { 477 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 478 if (!amu_group0_voffset_supported(i)) { 479 continue; /* No virtual offset */ 480 } 481 482 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i); 483 } 484 485 #if ENABLE_AMU_AUXILIARY_COUNTERS 486 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 487 if ((amcg1idr_el0_voff >> i) & 1U) { 488 continue; /* No virtual offset */ 489 } 490 491 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i); 492 } 493 #endif 494 } 495 496 return (void *)0; 497 } 498 499 static void *amu_context_restore(const void *arg) 500 { 501 uint64_t i, j; 502 503 unsigned int core_pos; 504 struct amu_ctx *ctx; 505 506 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 507 508 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 509 510 #if ENABLE_AMU_AUXILIARY_COUNTERS 511 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 512 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 513 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 514 #endif 515 516 if (!is_feat_amu_supported()) { 517 return (void *)0; 518 } 519 520 core_pos = plat_my_core_pos(); 521 ctx = &amu_ctxs_[core_pos]; 522 523 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 524 525 if (is_feat_amuv1p1_supported()) { 526 hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 527 } 528 529 #if ENABLE_AMU_AUXILIARY_COUNTERS 530 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 531 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 532 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 533 #endif 534 535 /* 536 * Restore the counter values from the local context. 537 */ 538 539 for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 540 amu_group0_cnt_write(i, ctx->group0_cnts[i]); 541 } 542 543 #if ENABLE_AMU_AUXILIARY_COUNTERS 544 for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 545 amu_group1_cnt_write(i, ctx->group1_cnts[i]); 546 } 547 #endif 548 549 /* 550 * Restore virtual offsets for counters that offer them. 551 */ 552 553 if (hcr_el2_amvoffen != 0U) { 554 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 555 if (!amu_group0_voffset_supported(i)) { 556 continue; /* No virtual offset */ 557 } 558 559 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]); 560 } 561 562 #if ENABLE_AMU_AUXILIARY_COUNTERS 563 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 564 if ((amcg1idr_el0_voff >> i) & 1U) { 565 continue; /* No virtual offset */ 566 } 567 568 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]); 569 } 570 #endif 571 } 572 573 /* 574 * Re-enable counters that were disabled during context save. 575 */ 576 577 write_amcntenset0_el0_px(ctx->group0_enable); 578 579 #if ENABLE_AMU_AUXILIARY_COUNTERS 580 if (amcfgr_el0_ncg > 0) { 581 write_amcntenset1_el0_px(ctx->group1_enable); 582 } 583 #endif 584 585 #if ENABLE_MPMM 586 mpmm_enable(); 587 #endif 588 589 return (void *)0; 590 } 591 592 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 593 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 594