1 /* 2 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <cdefs.h> 9 #include <inttypes.h> 10 #include <stdbool.h> 11 #include <stdint.h> 12 13 #include "../amu_private.h" 14 #include <arch.h> 15 #include <arch_features.h> 16 #include <arch_helpers.h> 17 #include <common/debug.h> 18 #include <lib/el3_runtime/pubsub_events.h> 19 #include <lib/extensions/amu.h> 20 21 #include <plat/common/platform.h> 22 23 #if ENABLE_AMU_FCONF 24 # include <lib/fconf/fconf.h> 25 # include <lib/fconf/fconf_amu_getter.h> 26 #endif 27 28 #if ENABLE_MPMM 29 # include <lib/mpmm/mpmm.h> 30 #endif 31 32 struct amu_ctx { 33 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; 34 #if ENABLE_AMU_AUXILIARY_COUNTERS 35 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; 36 #endif 37 38 /* Architected event counter 1 does not have an offset register */ 39 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U]; 40 #if ENABLE_AMU_AUXILIARY_COUNTERS 41 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS]; 42 #endif 43 44 uint16_t group0_enable; 45 #if ENABLE_AMU_AUXILIARY_COUNTERS 46 uint16_t group1_enable; 47 #endif 48 }; 49 50 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; 51 52 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, 53 amu_ctx_group0_enable_cannot_represent_all_group0_counters); 54 55 #if ENABLE_AMU_AUXILIARY_COUNTERS 56 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, 57 amu_ctx_group1_enable_cannot_represent_all_group1_counters); 58 #endif 59 60 static inline __unused uint64_t read_hcr_el2_amvoffen(void) 61 { 62 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >> 63 HCR_AMVOFFEN_SHIFT; 64 } 65 66 static inline __unused void write_cptr_el2_tam(uint64_t value) 67 { 68 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) | 69 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT)); 70 } 71 72 static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam) 73 { 74 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); 75 76 value &= ~TAM_BIT; 77 value |= (tam << TAM_SHIFT) & TAM_BIT; 78 79 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value); 80 } 81 82 static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen) 83 { 84 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 85 86 value &= ~SCR_AMVOFFEN_BIT; 87 value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT; 88 89 write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value); 90 } 91 92 static inline __unused void write_hcr_el2_amvoffen(uint64_t value) 93 { 94 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) | 95 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT)); 96 } 97 98 static inline __unused void write_amcr_el0_cg1rz(uint64_t value) 99 { 100 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) | 101 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); 102 } 103 104 static inline __unused uint64_t read_amcfgr_el0_ncg(void) 105 { 106 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) & 107 AMCFGR_EL0_NCG_MASK; 108 } 109 110 static inline __unused uint64_t read_amcgcr_el0_cg0nc(void) 111 { 112 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) & 113 AMCGCR_EL0_CG0NC_MASK; 114 } 115 116 static inline __unused uint64_t read_amcg1idr_el0_voff(void) 117 { 118 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & 119 AMCG1IDR_VOFF_MASK; 120 } 121 122 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void) 123 { 124 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 125 AMCGCR_EL0_CG1NC_MASK; 126 } 127 128 static inline __unused uint64_t read_amcntenset0_el0_px(void) 129 { 130 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) & 131 AMCNTENSET0_EL0_Pn_MASK; 132 } 133 134 static inline __unused uint64_t read_amcntenset1_el0_px(void) 135 { 136 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) & 137 AMCNTENSET1_EL0_Pn_MASK; 138 } 139 140 static inline __unused void write_amcntenset0_el0_px(uint64_t px) 141 { 142 uint64_t value = read_amcntenset0_el0(); 143 144 value &= ~AMCNTENSET0_EL0_Pn_MASK; 145 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK; 146 147 write_amcntenset0_el0(value); 148 } 149 150 static inline __unused void write_amcntenset1_el0_px(uint64_t px) 151 { 152 uint64_t value = read_amcntenset1_el0(); 153 154 value &= ~AMCNTENSET1_EL0_Pn_MASK; 155 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK; 156 157 write_amcntenset1_el0(value); 158 } 159 160 static inline __unused void write_amcntenclr0_el0_px(uint64_t px) 161 { 162 uint64_t value = read_amcntenclr0_el0(); 163 164 value &= ~AMCNTENCLR0_EL0_Pn_MASK; 165 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK; 166 167 write_amcntenclr0_el0(value); 168 } 169 170 static inline __unused void write_amcntenclr1_el0_px(uint64_t px) 171 { 172 uint64_t value = read_amcntenclr1_el0(); 173 174 value &= ~AMCNTENCLR1_EL0_Pn_MASK; 175 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK; 176 177 write_amcntenclr1_el0(value); 178 } 179 180 #if ENABLE_AMU_AUXILIARY_COUNTERS 181 static __unused bool amu_group1_supported(void) 182 { 183 return read_amcfgr_el0_ncg() > 0U; 184 } 185 #endif 186 187 /* 188 * Enable counters. This function is meant to be invoked by the context 189 * management library before exiting from EL3. 190 */ 191 void amu_enable(bool el2_unused, cpu_context_t *ctx) 192 { 193 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 194 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 195 196 uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */ 197 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */ 198 199 if (el2_unused) { 200 /* 201 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity 202 * Monitor registers do not trap to EL2. 203 */ 204 write_cptr_el2_tam(0U); 205 } 206 207 /* 208 * Retrieve and update the CPTR_EL3 value from the context mentioned 209 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to 210 * the Activity Monitor registers do not trap to EL3. 211 */ 212 ctx_write_cptr_el3_tam(ctx, 0U); 213 214 /* 215 * Retrieve the number of architected counters. All of these counters 216 * are enabled by default. 217 */ 218 219 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 220 amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U; 221 222 assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX); 223 224 /* 225 * The platform may opt to enable specific auxiliary counters. This can 226 * be done via the common FCONF getter, or via the platform-implemented 227 * function. 228 */ 229 230 #if ENABLE_AMU_AUXILIARY_COUNTERS 231 const struct amu_topology *topology; 232 233 #if ENABLE_AMU_FCONF 234 topology = FCONF_GET_PROPERTY(amu, config, topology); 235 #else 236 topology = plat_amu_topology(); 237 #endif /* ENABLE_AMU_FCONF */ 238 239 if (topology != NULL) { 240 unsigned int core_pos = plat_my_core_pos(); 241 242 amcntenset1_el0_px = topology->cores[core_pos].enable; 243 } else { 244 ERROR("AMU: failed to generate AMU topology\n"); 245 } 246 #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ 247 248 /* 249 * Enable the requested counters. 250 */ 251 252 write_amcntenset0_el0_px(amcntenset0_el0_px); 253 254 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 255 if (amcfgr_el0_ncg > 0U) { 256 write_amcntenset1_el0_px(amcntenset1_el0_px); 257 258 #if !ENABLE_AMU_AUXILIARY_COUNTERS 259 VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); 260 #endif 261 } 262 263 /* Initialize FEAT_AMUv1p1 features if present. */ 264 if (is_feat_amuv1p1_supported()) { 265 if (el2_unused) { 266 /* 267 * Make sure virtual offsets are disabled if EL2 not 268 * used. 269 */ 270 write_hcr_el2_amvoffen(0U); 271 } else { 272 /* 273 * Virtual offset registers are only accessible from EL3 274 * and EL2, when clear, this bit traps accesses from EL2 275 * so we set it to 1 when EL2 is present. 276 */ 277 ctx_write_scr_el3_amvoffen(ctx, 1U); 278 } 279 280 #if AMU_RESTRICT_COUNTERS 281 /* 282 * FEAT_AMUv1p1 adds a register field to restrict access to 283 * group 1 counters at all but the highest implemented EL. This 284 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time 285 * flag, when set, system register reads at lower ELs return 286 * zero. Reads from the memory mapped view are unaffected. 287 */ 288 VERBOSE("AMU group 1 counter access restricted.\n"); 289 write_amcr_el0_cg1rz(1U); 290 #else 291 write_amcr_el0_cg1rz(0U); 292 #endif 293 } 294 295 #if ENABLE_MPMM 296 mpmm_enable(); 297 #endif 298 } 299 300 /* Read the group 0 counter identified by the given `idx`. */ 301 static uint64_t amu_group0_cnt_read(unsigned int idx) 302 { 303 assert(is_feat_amu_supported()); 304 assert(idx < read_amcgcr_el0_cg0nc()); 305 306 return amu_group0_cnt_read_internal(idx); 307 } 308 309 /* Write the group 0 counter identified by the given `idx` with `val` */ 310 static void amu_group0_cnt_write(unsigned int idx, uint64_t val) 311 { 312 assert(is_feat_amu_supported()); 313 assert(idx < read_amcgcr_el0_cg0nc()); 314 315 amu_group0_cnt_write_internal(idx, val); 316 isb(); 317 } 318 319 /* 320 * Unlike with auxiliary counters, we cannot detect at runtime whether an 321 * architected counter supports a virtual offset. These are instead fixed 322 * according to FEAT_AMUv1p1, but this switch will need to be updated if later 323 * revisions of FEAT_AMU add additional architected counters. 324 */ 325 static bool amu_group0_voffset_supported(uint64_t idx) 326 { 327 switch (idx) { 328 case 0U: 329 case 2U: 330 case 3U: 331 return true; 332 333 case 1U: 334 return false; 335 336 default: 337 ERROR("AMU: can't set up virtual offset for unknown " 338 "architected counter %" PRIu64 "!\n", idx); 339 340 panic(); 341 } 342 } 343 344 /* 345 * Read the group 0 offset register for a given index. Index must be 0, 2, 346 * or 3, the register for 1 does not exist. 347 * 348 * Using this function requires FEAT_AMUv1p1 support. 349 */ 350 static uint64_t amu_group0_voffset_read(unsigned int idx) 351 { 352 assert(is_feat_amuv1p1_supported()); 353 assert(idx < read_amcgcr_el0_cg0nc()); 354 assert(idx != 1U); 355 356 return amu_group0_voffset_read_internal(idx); 357 } 358 359 /* 360 * Write the group 0 offset register for a given index. Index must be 0, 2, or 361 * 3, the register for 1 does not exist. 362 * 363 * Using this function requires FEAT_AMUv1p1 support. 364 */ 365 static void amu_group0_voffset_write(unsigned int idx, uint64_t val) 366 { 367 assert(is_feat_amuv1p1_supported()); 368 assert(idx < read_amcgcr_el0_cg0nc()); 369 assert(idx != 1U); 370 371 amu_group0_voffset_write_internal(idx, val); 372 isb(); 373 } 374 375 #if ENABLE_AMU_AUXILIARY_COUNTERS 376 /* Read the group 1 counter identified by the given `idx` */ 377 static uint64_t amu_group1_cnt_read(unsigned int idx) 378 { 379 assert(is_feat_amu_supported()); 380 assert(amu_group1_supported()); 381 assert(idx < read_amcgcr_el0_cg1nc()); 382 383 return amu_group1_cnt_read_internal(idx); 384 } 385 386 /* Write the group 1 counter identified by the given `idx` with `val` */ 387 static void amu_group1_cnt_write(unsigned int idx, uint64_t val) 388 { 389 assert(is_feat_amu_supported()); 390 assert(amu_group1_supported()); 391 assert(idx < read_amcgcr_el0_cg1nc()); 392 393 amu_group1_cnt_write_internal(idx, val); 394 isb(); 395 } 396 397 /* 398 * Read the group 1 offset register for a given index. 399 * 400 * Using this function requires FEAT_AMUv1p1 support. 401 */ 402 static uint64_t amu_group1_voffset_read(unsigned int idx) 403 { 404 assert(is_feat_amuv1p1_supported()); 405 assert(amu_group1_supported()); 406 assert(idx < read_amcgcr_el0_cg1nc()); 407 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 408 409 return amu_group1_voffset_read_internal(idx); 410 } 411 412 /* 413 * Write the group 1 offset register for a given index. 414 * 415 * Using this function requires FEAT_AMUv1p1 support. 416 */ 417 static void amu_group1_voffset_write(unsigned int idx, uint64_t val) 418 { 419 assert(is_feat_amuv1p1_supported()); 420 assert(amu_group1_supported()); 421 assert(idx < read_amcgcr_el0_cg1nc()); 422 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 423 424 amu_group1_voffset_write_internal(idx, val); 425 isb(); 426 } 427 #endif 428 429 static void *amu_context_save(const void *arg) 430 { 431 uint64_t i, j; 432 433 unsigned int core_pos; 434 struct amu_ctx *ctx; 435 436 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 437 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 438 439 #if ENABLE_AMU_AUXILIARY_COUNTERS 440 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 441 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 442 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 443 #endif 444 445 if (!is_feat_amu_supported()) { 446 return (void *)0; 447 } 448 449 core_pos = plat_my_core_pos(); 450 ctx = &amu_ctxs_[core_pos]; 451 452 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 453 if (is_feat_amuv1p1_supported()) { 454 hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 455 } 456 457 #if ENABLE_AMU_AUXILIARY_COUNTERS 458 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 459 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 460 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 461 #endif 462 463 /* 464 * Disable all AMU counters. 465 */ 466 467 ctx->group0_enable = read_amcntenset0_el0_px(); 468 write_amcntenclr0_el0_px(ctx->group0_enable); 469 470 #if ENABLE_AMU_AUXILIARY_COUNTERS 471 if (amcfgr_el0_ncg > 0U) { 472 ctx->group1_enable = read_amcntenset1_el0_px(); 473 write_amcntenclr1_el0_px(ctx->group1_enable); 474 } 475 #endif 476 477 /* 478 * Save the counters to the local context. 479 */ 480 481 isb(); /* Ensure counters have been stopped */ 482 483 for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 484 ctx->group0_cnts[i] = amu_group0_cnt_read(i); 485 } 486 487 #if ENABLE_AMU_AUXILIARY_COUNTERS 488 for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 489 ctx->group1_cnts[i] = amu_group1_cnt_read(i); 490 } 491 #endif 492 493 /* 494 * Save virtual offsets for counters that offer them. 495 */ 496 497 if (hcr_el2_amvoffen != 0U) { 498 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 499 if (!amu_group0_voffset_supported(i)) { 500 continue; /* No virtual offset */ 501 } 502 503 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i); 504 } 505 506 #if ENABLE_AMU_AUXILIARY_COUNTERS 507 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 508 if ((amcg1idr_el0_voff >> i) & 1U) { 509 continue; /* No virtual offset */ 510 } 511 512 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i); 513 } 514 #endif 515 } 516 517 return (void *)0; 518 } 519 520 static void *amu_context_restore(const void *arg) 521 { 522 uint64_t i, j; 523 524 unsigned int core_pos; 525 struct amu_ctx *ctx; 526 527 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 528 529 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 530 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 531 532 #if ENABLE_AMU_AUXILIARY_COUNTERS 533 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 534 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 535 #endif 536 537 if (!is_feat_amu_supported()) { 538 return (void *)0; 539 } 540 541 core_pos = plat_my_core_pos(); 542 ctx = &amu_ctxs_[core_pos]; 543 544 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 545 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 546 547 if (is_feat_amuv1p1_supported()) { 548 hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 549 } 550 551 #if ENABLE_AMU_AUXILIARY_COUNTERS 552 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 553 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 554 #endif 555 556 /* 557 * Sanity check that all counters were disabled when the context was 558 * previously saved. 559 */ 560 561 assert(read_amcntenset0_el0_px() == 0U); 562 563 if (amcfgr_el0_ncg > 0U) { 564 assert(read_amcntenset1_el0_px() == 0U); 565 } 566 567 /* 568 * Restore the counter values from the local context. 569 */ 570 571 for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 572 amu_group0_cnt_write(i, ctx->group0_cnts[i]); 573 } 574 575 #if ENABLE_AMU_AUXILIARY_COUNTERS 576 for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 577 amu_group1_cnt_write(i, ctx->group1_cnts[i]); 578 } 579 #endif 580 581 /* 582 * Restore virtual offsets for counters that offer them. 583 */ 584 585 if (hcr_el2_amvoffen != 0U) { 586 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 587 if (!amu_group0_voffset_supported(i)) { 588 continue; /* No virtual offset */ 589 } 590 591 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]); 592 } 593 594 #if ENABLE_AMU_AUXILIARY_COUNTERS 595 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 596 if ((amcg1idr_el0_voff >> i) & 1U) { 597 continue; /* No virtual offset */ 598 } 599 600 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]); 601 } 602 #endif 603 } 604 605 /* 606 * Re-enable counters that were disabled during context save. 607 */ 608 609 write_amcntenset0_el0_px(ctx->group0_enable); 610 611 #if ENABLE_AMU_AUXILIARY_COUNTERS 612 if (amcfgr_el0_ncg > 0) { 613 write_amcntenset1_el0_px(ctx->group1_enable); 614 } 615 #endif 616 617 #if ENABLE_MPMM 618 mpmm_enable(); 619 #endif 620 621 return (void *)0; 622 } 623 624 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 625 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 626