1 /* 2 * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <cdefs.h> 9 #include <inttypes.h> 10 #include <stdbool.h> 11 #include <stdint.h> 12 13 #include "../amu_private.h" 14 #include <arch.h> 15 #include <arch_features.h> 16 #include <arch_helpers.h> 17 #include <common/debug.h> 18 #include <lib/el3_runtime/pubsub_events.h> 19 #include <lib/extensions/amu.h> 20 21 #include <plat/common/platform.h> 22 23 #if ENABLE_AMU_FCONF 24 # include <lib/fconf/fconf.h> 25 # include <lib/fconf/fconf_amu_getter.h> 26 #endif 27 28 #if ENABLE_MPMM 29 # include <lib/mpmm/mpmm.h> 30 #endif 31 32 struct amu_ctx { 33 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; 34 #if ENABLE_AMU_AUXILIARY_COUNTERS 35 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; 36 #endif 37 38 /* Architected event counter 1 does not have an offset register */ 39 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U]; 40 #if ENABLE_AMU_AUXILIARY_COUNTERS 41 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS]; 42 #endif 43 44 uint16_t group0_enable; 45 #if ENABLE_AMU_AUXILIARY_COUNTERS 46 uint16_t group1_enable; 47 #endif 48 }; 49 50 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; 51 52 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, 53 amu_ctx_group0_enable_cannot_represent_all_group0_counters); 54 55 #if ENABLE_AMU_AUXILIARY_COUNTERS 56 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, 57 amu_ctx_group1_enable_cannot_represent_all_group1_counters); 58 #endif 59 60 static inline __unused uint64_t read_hcr_el2_amvoffen(void) 61 { 62 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >> 63 HCR_AMVOFFEN_SHIFT; 64 } 65 66 static inline __unused void write_cptr_el2_tam(uint64_t value) 67 { 68 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) | 69 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT)); 70 } 71 72 static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam) 73 { 74 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); 75 76 value &= ~TAM_BIT; 77 value |= (tam << TAM_SHIFT) & TAM_BIT; 78 79 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value); 80 } 81 82 static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen) 83 { 84 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 85 86 value &= ~SCR_AMVOFFEN_BIT; 87 value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT; 88 89 write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value); 90 } 91 92 static inline __unused void write_hcr_el2_amvoffen(uint64_t value) 93 { 94 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) | 95 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT)); 96 } 97 98 static inline __unused void write_amcr_el0_cg1rz(uint64_t value) 99 { 100 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) | 101 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); 102 } 103 104 static inline __unused uint64_t read_amcfgr_el0_ncg(void) 105 { 106 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) & 107 AMCFGR_EL0_NCG_MASK; 108 } 109 110 static inline __unused uint64_t read_amcgcr_el0_cg0nc(void) 111 { 112 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) & 113 AMCGCR_EL0_CG0NC_MASK; 114 } 115 116 static inline __unused uint64_t read_amcg1idr_el0_voff(void) 117 { 118 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & 119 AMCG1IDR_VOFF_MASK; 120 } 121 122 static inline __unused uint64_t read_amcgcr_el0_cg1nc(void) 123 { 124 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & 125 AMCGCR_EL0_CG1NC_MASK; 126 } 127 128 static inline __unused uint64_t read_amcntenset0_el0_px(void) 129 { 130 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) & 131 AMCNTENSET0_EL0_Pn_MASK; 132 } 133 134 static inline __unused uint64_t read_amcntenset1_el0_px(void) 135 { 136 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) & 137 AMCNTENSET1_EL0_Pn_MASK; 138 } 139 140 static inline __unused void write_amcntenset0_el0_px(uint64_t px) 141 { 142 uint64_t value = read_amcntenset0_el0(); 143 144 value &= ~AMCNTENSET0_EL0_Pn_MASK; 145 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK; 146 147 write_amcntenset0_el0(value); 148 } 149 150 static inline __unused void write_amcntenset1_el0_px(uint64_t px) 151 { 152 uint64_t value = read_amcntenset1_el0(); 153 154 value &= ~AMCNTENSET1_EL0_Pn_MASK; 155 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK; 156 157 write_amcntenset1_el0(value); 158 } 159 160 static inline __unused void write_amcntenclr0_el0_px(uint64_t px) 161 { 162 uint64_t value = read_amcntenclr0_el0(); 163 164 value &= ~AMCNTENCLR0_EL0_Pn_MASK; 165 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK; 166 167 write_amcntenclr0_el0(value); 168 } 169 170 static inline __unused void write_amcntenclr1_el0_px(uint64_t px) 171 { 172 uint64_t value = read_amcntenclr1_el0(); 173 174 value &= ~AMCNTENCLR1_EL0_Pn_MASK; 175 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK; 176 177 write_amcntenclr1_el0(value); 178 } 179 180 #if ENABLE_AMU_AUXILIARY_COUNTERS 181 static __unused bool amu_group1_supported(void) 182 { 183 return read_amcfgr_el0_ncg() > 0U; 184 } 185 #endif 186 187 /* 188 * Enable counters. This function is meant to be invoked by the context 189 * management library before exiting from EL3. 190 */ 191 void amu_enable(cpu_context_t *ctx) 192 { 193 /* 194 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor 195 * registers do not trap to EL3. 196 */ 197 ctx_write_cptr_el3_tam(ctx, 0U); 198 199 /* Initialize FEAT_AMUv1p1 features if present. */ 200 if (is_feat_amuv1p1_supported()) { 201 /* 202 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual 203 * offset registers at EL2 do not trap to EL3 204 */ 205 ctx_write_scr_el3_amvoffen(ctx, 1U); 206 } 207 } 208 209 void amu_init_el3(void) 210 { 211 uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc(); 212 uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U; 213 uint64_t num_ctr_groups = read_amcfgr_el0_ncg(); 214 215 /* Enable all architected counters by default */ 216 write_amcntenset0_el0_px(group0_en_mask); 217 218 #if ENABLE_AMU_AUXILIARY_COUNTERS 219 if (num_ctr_groups > 0U) { 220 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */ 221 const struct amu_topology *topology; 222 223 /* 224 * The platform may opt to enable specific auxiliary counters. 225 * This can be done via the common FCONF getter, or via the 226 * platform-implemented function. 227 */ 228 #if ENABLE_AMU_FCONF 229 topology = FCONF_GET_PROPERTY(amu, config, topology); 230 #else 231 topology = plat_amu_topology(); 232 #endif /* ENABLE_AMU_FCONF */ 233 234 if (topology != NULL) { 235 unsigned int core_pos = plat_my_core_pos(); 236 237 amcntenset1_el0_px = topology->cores[core_pos].enable; 238 } else { 239 ERROR("AMU: failed to generate AMU topology\n"); 240 } 241 242 write_amcntenset1_el0_px(amcntenset1_el0_px); 243 } 244 #else /* ENABLE_AMU_AUXILIARY_COUNTERS */ 245 if (num_ctr_groups > 0U) { 246 VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); 247 } 248 #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ 249 250 if (is_feat_amuv1p1_supported()) { 251 #if AMU_RESTRICT_COUNTERS 252 /* 253 * FEAT_AMUv1p1 adds a register field to restrict access to 254 * group 1 counters at all but the highest implemented EL. This 255 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time 256 * flag, when set, system register reads at lower ELs return 257 * zero. Reads from the memory mapped view are unaffected. 258 */ 259 VERBOSE("AMU group 1 counter access restricted.\n"); 260 write_amcr_el0_cg1rz(1U); 261 #else 262 write_amcr_el0_cg1rz(0U); 263 #endif 264 } 265 266 #if ENABLE_MPMM 267 mpmm_enable(); 268 #endif 269 } 270 271 void amu_init_el2_unused(void) 272 { 273 /* 274 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor 275 * registers do not trap to EL2. 276 */ 277 write_cptr_el2_tam(0U); 278 279 /* Initialize FEAT_AMUv1p1 features if present. */ 280 if (is_feat_amuv1p1_supported()) { 281 /* Make sure virtual offsets are disabled if EL2 not used. */ 282 write_hcr_el2_amvoffen(0U); 283 } 284 } 285 286 /* Read the group 0 counter identified by the given `idx`. */ 287 static uint64_t amu_group0_cnt_read(unsigned int idx) 288 { 289 assert(is_feat_amu_supported()); 290 assert(idx < read_amcgcr_el0_cg0nc()); 291 292 return amu_group0_cnt_read_internal(idx); 293 } 294 295 /* Write the group 0 counter identified by the given `idx` with `val` */ 296 static void amu_group0_cnt_write(unsigned int idx, uint64_t val) 297 { 298 assert(is_feat_amu_supported()); 299 assert(idx < read_amcgcr_el0_cg0nc()); 300 301 amu_group0_cnt_write_internal(idx, val); 302 isb(); 303 } 304 305 /* 306 * Unlike with auxiliary counters, we cannot detect at runtime whether an 307 * architected counter supports a virtual offset. These are instead fixed 308 * according to FEAT_AMUv1p1, but this switch will need to be updated if later 309 * revisions of FEAT_AMU add additional architected counters. 310 */ 311 static bool amu_group0_voffset_supported(uint64_t idx) 312 { 313 switch (idx) { 314 case 0U: 315 case 2U: 316 case 3U: 317 return true; 318 319 case 1U: 320 return false; 321 322 default: 323 ERROR("AMU: can't set up virtual offset for unknown " 324 "architected counter %" PRIu64 "!\n", idx); 325 326 panic(); 327 } 328 } 329 330 /* 331 * Read the group 0 offset register for a given index. Index must be 0, 2, 332 * or 3, the register for 1 does not exist. 333 * 334 * Using this function requires FEAT_AMUv1p1 support. 335 */ 336 static uint64_t amu_group0_voffset_read(unsigned int idx) 337 { 338 assert(is_feat_amuv1p1_supported()); 339 assert(idx < read_amcgcr_el0_cg0nc()); 340 assert(idx != 1U); 341 342 return amu_group0_voffset_read_internal(idx); 343 } 344 345 /* 346 * Write the group 0 offset register for a given index. Index must be 0, 2, or 347 * 3, the register for 1 does not exist. 348 * 349 * Using this function requires FEAT_AMUv1p1 support. 350 */ 351 static void amu_group0_voffset_write(unsigned int idx, uint64_t val) 352 { 353 assert(is_feat_amuv1p1_supported()); 354 assert(idx < read_amcgcr_el0_cg0nc()); 355 assert(idx != 1U); 356 357 amu_group0_voffset_write_internal(idx, val); 358 isb(); 359 } 360 361 #if ENABLE_AMU_AUXILIARY_COUNTERS 362 /* Read the group 1 counter identified by the given `idx` */ 363 static uint64_t amu_group1_cnt_read(unsigned int idx) 364 { 365 assert(is_feat_amu_supported()); 366 assert(amu_group1_supported()); 367 assert(idx < read_amcgcr_el0_cg1nc()); 368 369 return amu_group1_cnt_read_internal(idx); 370 } 371 372 /* Write the group 1 counter identified by the given `idx` with `val` */ 373 static void amu_group1_cnt_write(unsigned int idx, uint64_t val) 374 { 375 assert(is_feat_amu_supported()); 376 assert(amu_group1_supported()); 377 assert(idx < read_amcgcr_el0_cg1nc()); 378 379 amu_group1_cnt_write_internal(idx, val); 380 isb(); 381 } 382 383 /* 384 * Read the group 1 offset register for a given index. 385 * 386 * Using this function requires FEAT_AMUv1p1 support. 387 */ 388 static uint64_t amu_group1_voffset_read(unsigned int idx) 389 { 390 assert(is_feat_amuv1p1_supported()); 391 assert(amu_group1_supported()); 392 assert(idx < read_amcgcr_el0_cg1nc()); 393 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 394 395 return amu_group1_voffset_read_internal(idx); 396 } 397 398 /* 399 * Write the group 1 offset register for a given index. 400 * 401 * Using this function requires FEAT_AMUv1p1 support. 402 */ 403 static void amu_group1_voffset_write(unsigned int idx, uint64_t val) 404 { 405 assert(is_feat_amuv1p1_supported()); 406 assert(amu_group1_supported()); 407 assert(idx < read_amcgcr_el0_cg1nc()); 408 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); 409 410 amu_group1_voffset_write_internal(idx, val); 411 isb(); 412 } 413 #endif 414 415 static void *amu_context_save(const void *arg) 416 { 417 uint64_t i, j; 418 419 unsigned int core_pos; 420 struct amu_ctx *ctx; 421 422 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 423 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 424 425 #if ENABLE_AMU_AUXILIARY_COUNTERS 426 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 427 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 428 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 429 #endif 430 431 if (!is_feat_amu_supported()) { 432 return (void *)0; 433 } 434 435 core_pos = plat_my_core_pos(); 436 ctx = &amu_ctxs_[core_pos]; 437 438 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 439 if (is_feat_amuv1p1_supported()) { 440 hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 441 } 442 443 #if ENABLE_AMU_AUXILIARY_COUNTERS 444 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 445 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 446 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 447 #endif 448 449 /* 450 * Disable all AMU counters. 451 */ 452 453 ctx->group0_enable = read_amcntenset0_el0_px(); 454 write_amcntenclr0_el0_px(ctx->group0_enable); 455 456 #if ENABLE_AMU_AUXILIARY_COUNTERS 457 if (amcfgr_el0_ncg > 0U) { 458 ctx->group1_enable = read_amcntenset1_el0_px(); 459 write_amcntenclr1_el0_px(ctx->group1_enable); 460 } 461 #endif 462 463 /* 464 * Save the counters to the local context. 465 */ 466 467 isb(); /* Ensure counters have been stopped */ 468 469 for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 470 ctx->group0_cnts[i] = amu_group0_cnt_read(i); 471 } 472 473 #if ENABLE_AMU_AUXILIARY_COUNTERS 474 for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 475 ctx->group1_cnts[i] = amu_group1_cnt_read(i); 476 } 477 #endif 478 479 /* 480 * Save virtual offsets for counters that offer them. 481 */ 482 483 if (hcr_el2_amvoffen != 0U) { 484 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 485 if (!amu_group0_voffset_supported(i)) { 486 continue; /* No virtual offset */ 487 } 488 489 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i); 490 } 491 492 #if ENABLE_AMU_AUXILIARY_COUNTERS 493 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 494 if ((amcg1idr_el0_voff >> i) & 1U) { 495 continue; /* No virtual offset */ 496 } 497 498 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i); 499 } 500 #endif 501 } 502 503 return (void *)0; 504 } 505 506 static void *amu_context_restore(const void *arg) 507 { 508 uint64_t i, j; 509 510 unsigned int core_pos; 511 struct amu_ctx *ctx; 512 513 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */ 514 515 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ 516 517 #if ENABLE_AMU_AUXILIARY_COUNTERS 518 uint64_t amcfgr_el0_ncg; /* Number of counter groups */ 519 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ 520 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ 521 #endif 522 523 if (!is_feat_amu_supported()) { 524 return (void *)0; 525 } 526 527 core_pos = plat_my_core_pos(); 528 ctx = &amu_ctxs_[core_pos]; 529 530 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); 531 532 if (is_feat_amuv1p1_supported()) { 533 hcr_el2_amvoffen = read_hcr_el2_amvoffen(); 534 } 535 536 #if ENABLE_AMU_AUXILIARY_COUNTERS 537 amcfgr_el0_ncg = read_amcfgr_el0_ncg(); 538 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; 539 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; 540 #endif 541 542 /* 543 * Restore the counter values from the local context. 544 */ 545 546 for (i = 0U; i < amcgcr_el0_cg0nc; i++) { 547 amu_group0_cnt_write(i, ctx->group0_cnts[i]); 548 } 549 550 #if ENABLE_AMU_AUXILIARY_COUNTERS 551 for (i = 0U; i < amcgcr_el0_cg1nc; i++) { 552 amu_group1_cnt_write(i, ctx->group1_cnts[i]); 553 } 554 #endif 555 556 /* 557 * Restore virtual offsets for counters that offer them. 558 */ 559 560 if (hcr_el2_amvoffen != 0U) { 561 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { 562 if (!amu_group0_voffset_supported(i)) { 563 continue; /* No virtual offset */ 564 } 565 566 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]); 567 } 568 569 #if ENABLE_AMU_AUXILIARY_COUNTERS 570 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { 571 if ((amcg1idr_el0_voff >> i) & 1U) { 572 continue; /* No virtual offset */ 573 } 574 575 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]); 576 } 577 #endif 578 } 579 580 /* 581 * Re-enable counters that were disabled during context save. 582 */ 583 584 write_amcntenset0_el0_px(ctx->group0_enable); 585 586 #if ENABLE_AMU_AUXILIARY_COUNTERS 587 if (amcfgr_el0_ncg > 0) { 588 write_amcntenset1_el0_px(ctx->group1_enable); 589 } 590 #endif 591 592 #if ENABLE_MPMM 593 mpmm_enable(); 594 #endif 595 596 return (void *)0; 597 } 598 599 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); 600 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore); 601