1 /* 2 * Copyright (c) 2022-2025, Arm Limited. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <limits.h> 11 #include <stdint.h> 12 13 #include <arch.h> 14 #include <arch_features.h> 15 #include <common/debug.h> 16 #include <lib/gpt_rme/gpt_rme.h> 17 #include <lib/smccc.h> 18 #include <lib/xlat_tables/xlat_tables_v2.h> 19 20 #include "gpt_rme_private.h" 21 22 #if !ENABLE_RME 23 #error "ENABLE_RME must be enabled to use the GPT library" 24 #endif 25 26 /* 27 * Lookup T from PPS 28 * 29 * PPS Size T 30 * 0b000 4GB 32 31 * 0b001 64GB 36 32 * 0b010 1TB 40 33 * 0b011 4TB 42 34 * 0b100 16TB 44 35 * 0b101 256TB 48 36 * 0b110 4PB 52 37 * 38 * See section 15.1.27 of the RME specification. 39 */ 40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T, 41 PPS_1TB_T, PPS_4TB_T, 42 PPS_16TB_T, PPS_256TB_T, 43 PPS_4PB_T}; 44 45 /* 46 * Lookup P from PGS 47 * 48 * PGS Size P 49 * 0b00 4KB 12 50 * 0b10 16KB 14 51 * 0b01 64KB 16 52 * 53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. 54 * 55 * See section 15.1.27 of the RME specification. 56 */ 57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P}; 58 59 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info, 60 uint64_t l1_desc); 61 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info, 62 uint64_t l1_desc); 63 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info, 64 uint64_t l1_desc); 65 66 /* 67 * This structure contains GPT configuration data 68 */ 69 typedef struct { 70 uintptr_t plat_gpt_l0_base; 71 gpccr_pps_e pps; 72 gpt_t_val_e t; 73 gpccr_pgs_e pgs; 74 gpt_p_val_e p; 75 } gpt_config_t; 76 77 static gpt_config_t gpt_config; 78 79 /* 80 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS: 81 * +-------+------------+ 82 * | PGS | L1 entries | 83 * +-------+------------+ 84 * | 4KB | 32 | 85 * +-------+------------+ 86 * | 16KB | 8 | 87 * +-------+------------+ 88 * | 64KB | 2 | 89 * +-------+------------+ 90 */ 91 static unsigned int gpt_l1_cnt_2mb; 92 93 /* 94 * Mask for the L1 index field, depending on 95 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS: 96 * +---------+-------------------------------+ 97 * | | PGS | 98 * +---------+----------+----------+---------+ 99 * | L0GPTSZ | 4KB | 16KB | 64KB | 100 * +---------+----------+----------+---------+ 101 * | 1GB | 0x3FFF | 0xFFF | 0x3FF | 102 * +---------+----------+----------+---------+ 103 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF | 104 * +---------+----------+----------+---------+ 105 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF | 106 * +---------+----------+----------+---------+ 107 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF | 108 * +---------+----------+----------+---------+ 109 */ 110 static uint64_t gpt_l1_index_mask; 111 112 /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */ 113 #define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U) 114 #define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U) 115 #define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U) 116 117 /* Size in bytes of L1 entries in 2MB, 32MB */ 118 #define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t)) 119 #define L1_BYTES_32MB (L1_BYTES_2MB * 16U) 120 121 /* Get the index into the L1 table from a physical address */ 122 #define GPT_L1_INDEX(_pa) \ 123 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask) 124 125 /* This variable is used during initialization of the L1 tables */ 126 static uintptr_t gpt_l1_tbl; 127 128 /* These variables are used during runtime */ 129 #if (RME_GPT_BITLOCK_BLOCK == 0) 130 /* 131 * The GPTs are protected by a global spinlock to ensure 132 * that multiple CPUs do not attempt to change the descriptors at once. 133 */ 134 static spinlock_t gpt_lock; 135 136 /* Lock/unlock macros for GPT entries 137 * 138 * Access to GPT is controlled by a global lock to ensure 139 * that no more than one CPU is allowed to make changes at any 140 * given time. 141 */ 142 #define GPT_LOCK spin_lock(&gpt_lock) 143 #define GPT_UNLOCK spin_unlock(&gpt_lock) 144 #else 145 146 /* Base address of bitlocks array */ 147 static bitlock_t *gpt_bitlock; 148 149 /* 150 * Access to a block of memory is controlled by a bitlock. 151 * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB. 152 */ 153 #define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask) 154 #define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask) 155 #endif /* RME_GPT_BITLOCK_BLOCK */ 156 157 static void tlbi_page_dsbosh(uintptr_t base) 158 { 159 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */ 160 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = { 161 { tlbirpalos_4k, ~(SZ_4K - 1UL) }, 162 { tlbirpalos_64k, ~(SZ_64K - 1UL) }, 163 { tlbirpalos_16k, ~(SZ_16K - 1UL) } 164 }; 165 166 tlbi_page_lookup[gpt_config.pgs].function( 167 base & tlbi_page_lookup[gpt_config.pgs].mask); 168 dsbosh(); 169 } 170 171 /* 172 * Helper function to fill out GPI entries in a single L1 table 173 * with Granules or Contiguous descriptor. 174 * 175 * Parameters 176 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out 177 * l1_desc GPT Granules or Contiguous descriptor set this range to 178 * cnt Number of double 128-bit L1 entries to fill 179 * 180 */ 181 static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt) 182 { 183 uint128_t *l1_quad = (uint128_t *)l1; 184 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64); 185 186 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt); 187 188 for (unsigned int i = 0U; i < cnt; i++) { 189 *l1_quad++ = l1_quad_desc; 190 } 191 } 192 193 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info, 194 uint64_t l1_desc) 195 { 196 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base)); 197 198 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", 199 __func__, base, l1_desc); 200 201 /* Convert 2MB Contiguous block to Granules */ 202 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB); 203 } 204 205 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info, 206 uint64_t l1_desc) 207 { 208 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base)); 209 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx]; 210 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB); 211 uint64_t *l1; 212 213 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", 214 __func__, base, l1_desc); 215 216 /* Get index corresponding to 32MB aligned address */ 217 idx = GPT_L1_INDEX(ALIGN_32MB(base)); 218 l1 = &gpi_info->gpt_l1_addr[idx]; 219 220 /* 16 x 2MB blocks in 32MB */ 221 for (unsigned int i = 0U; i < 16U; i++) { 222 /* Fill with Granules or Contiguous descriptors */ 223 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc, 224 L1_QWORDS_2MB); 225 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB); 226 } 227 } 228 229 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info, 230 uint64_t l1_desc) 231 { 232 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)); 233 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx]; 234 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB); 235 uint64_t *l1; 236 237 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", 238 __func__, base, l1_desc); 239 240 /* Get index corresponding to 512MB aligned address */ 241 idx = GPT_L1_INDEX(ALIGN_512MB(base)); 242 l1 = &gpi_info->gpt_l1_addr[idx]; 243 244 /* 16 x 32MB blocks in 512MB */ 245 for (unsigned int i = 0U; i < 16U; i++) { 246 if (l1 == l1_32mb) { 247 /* Shatter this 32MB block */ 248 shatter_32mb(base, gpi_info, l1_desc); 249 } else { 250 /* Fill 32MB with Contiguous descriptors */ 251 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB); 252 } 253 254 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB); 255 } 256 } 257 258 /* 259 * This function checks to see if a GPI value is valid. 260 * 261 * These are valid GPI values. 262 * GPT_GPI_NO_ACCESS U(0x0) 263 * GPT_GPI_SECURE U(0x8) 264 * GPT_GPI_NS U(0x9) 265 * GPT_GPI_ROOT U(0xA) 266 * GPT_GPI_REALM U(0xB) 267 * GPT_GPI_ANY U(0xF) 268 * 269 * Parameters 270 * gpi GPI to check for validity. 271 * 272 * Return 273 * true for a valid GPI, false for an invalid one. 274 */ 275 static bool is_gpi_valid(unsigned int gpi) 276 { 277 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) || 278 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) { 279 return true; 280 } 281 return false; 282 } 283 284 /* 285 * This function checks to see if two PAS regions overlap. 286 * 287 * Parameters 288 * base_1: base address of first PAS 289 * size_1: size of first PAS 290 * base_2: base address of second PAS 291 * size_2: size of second PAS 292 * 293 * Return 294 * True if PAS regions overlap, false if they do not. 295 */ 296 static bool check_pas_overlap(uintptr_t base_1, size_t size_1, 297 uintptr_t base_2, size_t size_2) 298 { 299 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) { 300 return true; 301 } 302 return false; 303 } 304 305 /* 306 * This helper function checks to see if a PAS region from index 0 to 307 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table. 308 * 309 * Parameters 310 * l0_idx: Index of the L0 entry to check 311 * pas_regions: PAS region array 312 * pas_idx: Upper bound of the PAS array index. 313 * 314 * Return 315 * True if a PAS region occupies the L0 region in question, false if not. 316 */ 317 static bool does_previous_pas_exist_here(unsigned int l0_idx, 318 pas_region_t *pas_regions, 319 unsigned int pas_idx) 320 { 321 /* Iterate over PAS regions up to pas_idx */ 322 for (unsigned int i = 0U; i < pas_idx; i++) { 323 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx), 324 GPT_L0GPTSZ_ACTUAL_SIZE, 325 pas_regions[i].base_pa, pas_regions[i].size)) { 326 return true; 327 } 328 } 329 return false; 330 } 331 332 /* 333 * This function iterates over all of the PAS regions and checks them to ensure 334 * proper alignment of base and size, that the GPI is valid, and that no regions 335 * overlap. As a part of the overlap checks, this function checks existing L0 336 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables 337 * is called multiple times to place L1 tables in different areas of memory. It 338 * also counts the number of L1 tables needed and returns it on success. 339 * 340 * Parameters 341 * *pas_regions Pointer to array of PAS region structures. 342 * pas_region_cnt Total number of PAS regions in the array. 343 * 344 * Return 345 * Negative Linux error code in the event of a failure, number of L1 regions 346 * required when successful. 347 */ 348 static int validate_pas_mappings(pas_region_t *pas_regions, 349 unsigned int pas_region_cnt) 350 { 351 unsigned int idx; 352 unsigned int l1_cnt = 0U; 353 unsigned int pas_l1_cnt; 354 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base; 355 356 assert(pas_regions != NULL); 357 assert(pas_region_cnt != 0U); 358 359 for (idx = 0U; idx < pas_region_cnt; idx++) { 360 /* Check for arithmetic overflow in region */ 361 if ((ULONG_MAX - pas_regions[idx].base_pa) < 362 pas_regions[idx].size) { 363 ERROR("GPT: Address overflow in PAS[%u]!\n", idx); 364 return -EOVERFLOW; 365 } 366 367 /* Initial checks for PAS validity */ 368 if (((pas_regions[idx].base_pa + pas_regions[idx].size) > 369 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) || 370 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) { 371 ERROR("GPT: PAS[%u] is invalid!\n", idx); 372 return -EFAULT; 373 } 374 375 /* 376 * Make sure this PAS does not overlap with another one. We 377 * start from idx + 1 instead of 0 since prior PAS mappings will 378 * have already checked themselves against this one. 379 */ 380 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) { 381 if (check_pas_overlap(pas_regions[idx].base_pa, 382 pas_regions[idx].size, 383 pas_regions[i].base_pa, 384 pas_regions[i].size)) { 385 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n", 386 i, idx); 387 return -EFAULT; 388 } 389 } 390 391 /* 392 * Since this function can be called multiple times with 393 * separate L1 tables we need to check the existing L0 mapping 394 * to see if this PAS would fall into one that has already been 395 * initialized. 396 */ 397 for (unsigned int i = 398 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa); 399 i <= GPT_L0_IDX(pas_regions[idx].base_pa + 400 pas_regions[idx].size - 1UL); 401 i++) { 402 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) && 403 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) { 404 /* This descriptor is unused so continue */ 405 continue; 406 } 407 408 /* 409 * This descriptor has been initialized in a previous 410 * call to this function so cannot be initialized again. 411 */ 412 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n", 413 idx, i); 414 return -EFAULT; 415 } 416 417 /* Check for block mapping (L0) type */ 418 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 419 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 420 /* Make sure base and size are block-aligned */ 421 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) || 422 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) { 423 ERROR("GPT: PAS[%u] is not block-aligned!\n", 424 idx); 425 return -EFAULT; 426 } 427 428 continue; 429 } 430 431 /* Check for granule mapping (L1) type */ 432 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 433 GPT_PAS_ATTR_MAP_TYPE_GRANULE) { 434 /* Make sure base and size are granule-aligned */ 435 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) || 436 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) { 437 ERROR("GPT: PAS[%u] is not granule-aligned!\n", 438 idx); 439 return -EFAULT; 440 } 441 442 /* Find how many L1 tables this PAS occupies */ 443 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa + 444 pas_regions[idx].size - 1UL) - 445 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U); 446 447 /* 448 * This creates a situation where, if multiple PAS 449 * regions occupy the same table descriptor, we can get 450 * an artificially high total L1 table count. The way we 451 * handle this is by checking each PAS against those 452 * before it in the array, and if they both occupy the 453 * same PAS we subtract from pas_l1_cnt and only the 454 * first PAS in the array gets to count it. 455 */ 456 457 /* 458 * If L1 count is greater than 1 we know the start and 459 * end PAs are in different L0 regions so we must check 460 * both for overlap against other PAS. 461 */ 462 if (pas_l1_cnt > 1) { 463 if (does_previous_pas_exist_here( 464 GPT_L0_IDX(pas_regions[idx].base_pa + 465 pas_regions[idx].size - 1UL), 466 pas_regions, idx)) { 467 pas_l1_cnt--; 468 } 469 } 470 471 if (does_previous_pas_exist_here( 472 GPT_L0_IDX(pas_regions[idx].base_pa), 473 pas_regions, idx)) { 474 pas_l1_cnt--; 475 } 476 477 l1_cnt += pas_l1_cnt; 478 continue; 479 } 480 481 /* If execution reaches this point, mapping type is invalid */ 482 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx, 483 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 484 return -EINVAL; 485 } 486 487 return l1_cnt; 488 } 489 490 /* 491 * This function validates L0 initialization parameters. 492 * 493 * Parameters 494 * l0_mem_base Base address of memory used for L0 table. 495 * l0_mem_size Size of memory available for L0 table. 496 * 497 * Return 498 * Negative Linux error code in the event of a failure, 0 for success. 499 */ 500 static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base, 501 size_t l0_mem_size) 502 { 503 size_t l0_alignment; 504 505 /* 506 * Make sure PPS is valid and then store it since macros need this value 507 * to work. 508 */ 509 if (pps > GPT_PPS_MAX) { 510 ERROR("GPT: Invalid PPS: 0x%x\n", pps); 511 return -EINVAL; 512 } 513 gpt_config.pps = pps; 514 gpt_config.t = gpt_t_lookup[pps]; 515 516 /* Alignment must be the greater of 4KB or L0 table size */ 517 l0_alignment = SZ_4K; 518 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) { 519 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t); 520 } 521 522 /* Check base address */ 523 if ((l0_mem_base == 0UL) || 524 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) { 525 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base); 526 return -EFAULT; 527 } 528 529 /* Check memory size for L0 table */ 530 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) { 531 ERROR("GPT: Inadequate L0 memory\n"); 532 ERROR(" Expected 0x%lx bytes, got 0x%lx\n", 533 GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size); 534 return -ENOMEM; 535 } 536 537 return 0; 538 } 539 540 /* 541 * In the event that L1 tables are needed, this function validates 542 * the L1 table generation parameters. 543 * 544 * Parameters 545 * l1_mem_base Base address of memory used for L1 table allocation. 546 * l1_mem_size Total size of memory available for L1 tables. 547 * l1_gpt_cnt Number of L1 tables needed. 548 * 549 * Return 550 * Negative Linux error code in the event of a failure, 0 for success. 551 */ 552 static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size, 553 unsigned int l1_gpt_cnt) 554 { 555 size_t l1_gpt_mem_sz; 556 557 /* Check if the granularity is supported */ 558 if (!xlat_arch_is_granule_size_supported( 559 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) { 560 return -EPERM; 561 } 562 563 /* Make sure L1 tables are aligned to their size */ 564 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) { 565 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n", 566 l1_mem_base); 567 return -EFAULT; 568 } 569 570 /* Get total memory needed for L1 tables */ 571 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p); 572 573 /* Check for overflow */ 574 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) { 575 ERROR("GPT: Overflow calculating L1 memory size\n"); 576 return -ENOMEM; 577 } 578 579 /* Make sure enough space was supplied */ 580 if (l1_mem_size < l1_gpt_mem_sz) { 581 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ", 582 (const char *)" memory\n"); 583 ERROR(" Expected 0x%lx bytes, got 0x%lx\n", 584 l1_gpt_mem_sz, l1_mem_size); 585 return -ENOMEM; 586 } 587 588 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz); 589 return 0; 590 } 591 592 /* 593 * This function initializes L0 block descriptors (regions that cannot be 594 * transitioned at the granule level) according to the provided PAS. 595 * 596 * Parameters 597 * *pas Pointer to the structure defining the PAS region to 598 * initialize. 599 */ 600 static void generate_l0_blk_desc(pas_region_t *pas) 601 { 602 uint64_t gpt_desc; 603 unsigned long idx, end_idx; 604 uint64_t *l0_gpt_arr; 605 606 assert(gpt_config.plat_gpt_l0_base != 0UL); 607 assert(pas != NULL); 608 609 /* 610 * Checking of PAS parameters has already been done in 611 * validate_pas_mappings so no need to check the same things again. 612 */ 613 614 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base; 615 616 /* Create the GPT Block descriptor for this PAS region */ 617 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs)); 618 619 /* Start index of this region in L0 GPTs */ 620 idx = GPT_L0_IDX(pas->base_pa); 621 622 /* 623 * Determine number of L0 GPT descriptors covered by 624 * this PAS region and use the count to populate these 625 * descriptors. 626 */ 627 end_idx = GPT_L0_IDX(pas->base_pa + pas->size); 628 629 /* Generate the needed block descriptors */ 630 for (; idx < end_idx; idx++) { 631 l0_gpt_arr[idx] = gpt_desc; 632 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n", 633 idx, &l0_gpt_arr[idx], 634 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) & 635 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]); 636 } 637 } 638 639 /* 640 * Helper function to determine if the end physical address lies in the same L0 641 * region as the current physical address. If true, the end physical address is 642 * returned else, the start address of the next region is returned. 643 * 644 * Parameters 645 * cur_pa Physical address of the current PA in the loop through 646 * the range. 647 * end_pa Physical address of the end PA in a PAS range. 648 * 649 * Return 650 * The PA of the end of the current range. 651 */ 652 static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa) 653 { 654 uintptr_t cur_idx; 655 uintptr_t end_idx; 656 657 cur_idx = GPT_L0_IDX(cur_pa); 658 end_idx = GPT_L0_IDX(end_pa); 659 660 assert(cur_idx <= end_idx); 661 662 if (cur_idx == end_idx) { 663 return end_pa; 664 } 665 666 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT; 667 } 668 669 /* 670 * Helper function to fill out GPI entries from 'first' granule address of 671 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous 672 * descriptor. 673 * 674 * Parameters 675 * l1 Pointer to L1 table to fill out 676 * first Address of first granule in range 677 * length Length of the range in bytes 678 * gpi GPI set this range to 679 * 680 * Return 681 * Address of next granule in range. 682 */ 683 __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first, 684 size_t length, unsigned int gpi) 685 { 686 /* 687 * Look up table for contiguous blocks and descriptors. 688 * Entries should be defined in descending block sizes: 689 * 512MB, 32MB and 2MB. 690 */ 691 static const gpt_fill_lookup_t gpt_fill_lookup[] = { 692 #if (RME_GPT_MAX_BLOCK == 512) 693 { SZ_512M, GPT_L1_CONT_DESC_512MB }, 694 #endif 695 #if (RME_GPT_MAX_BLOCK >= 32) 696 { SZ_32M, GPT_L1_CONT_DESC_32MB }, 697 #endif 698 #if (RME_GPT_MAX_BLOCK != 0) 699 { SZ_2M, GPT_L1_CONT_DESC_2MB } 700 #endif 701 }; 702 703 /* 704 * Iterate through all block sizes (512MB, 32MB and 2MB) 705 * starting with maximum supported. 706 */ 707 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) { 708 /* Calculate index */ 709 unsigned long idx = GPT_L1_INDEX(first); 710 711 /* Contiguous block size */ 712 size_t cont_size = gpt_fill_lookup[i].size; 713 714 if (GPT_REGION_IS_CONT(length, first, cont_size)) { 715 716 /* Generate Contiguous descriptor */ 717 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi, 718 gpt_fill_lookup[i].desc); 719 720 /* Number of 128-bit L1 entries in block */ 721 unsigned int cnt; 722 723 switch (cont_size) { 724 case SZ_512M: 725 cnt = L1_QWORDS_512MB; 726 break; 727 case SZ_32M: 728 cnt = L1_QWORDS_32MB; 729 break; 730 default: /* SZ_2MB */ 731 cnt = L1_QWORDS_2MB; 732 } 733 734 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n", 735 first, cont_size / SZ_1M); 736 737 /* Fill Contiguous descriptors */ 738 fill_desc(&l1[idx], l1_desc, cnt); 739 return (first + cont_size); 740 } 741 } 742 743 return first; 744 } 745 746 /* Build Granules descriptor with the same 'gpi' for every GPI entry */ 747 static uint64_t build_l1_desc(unsigned int gpi) 748 { 749 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4); 750 751 l1_desc |= (l1_desc << 8); 752 l1_desc |= (l1_desc << 16); 753 return (l1_desc | (l1_desc << 32)); 754 } 755 756 /* 757 * Helper function to fill out GPI entries from 'first' to 'last' granule 758 * address in a single L1 table with 'l1_desc' Granules descriptor. 759 * 760 * Parameters 761 * l1 Pointer to L1 table to fill out 762 * first Address of first granule in range 763 * last Address of last granule in range (inclusive) 764 * gpi GPI set this range to 765 * 766 * Return 767 * Address of next granule in range. 768 */ 769 static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first, 770 uintptr_t last, unsigned int gpi) 771 { 772 uint64_t gpi_mask; 773 unsigned long i; 774 775 /* Generate Granules descriptor */ 776 uint64_t l1_desc = build_l1_desc(gpi); 777 778 /* Shift the mask if we're starting in the middle of an L1 entry */ 779 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2); 780 781 /* Fill out each L1 entry for this region */ 782 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) { 783 784 /* Account for stopping in the middle of an L1 entry */ 785 if (i == GPT_L1_INDEX(last)) { 786 gpi_mask &= (gpi_mask >> ((15U - 787 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2)); 788 } 789 790 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask)); 791 792 /* Write GPI values */ 793 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask); 794 795 /* Reset mask */ 796 gpi_mask = ULONG_MAX; 797 } 798 799 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p); 800 } 801 802 /* 803 * Helper function to fill out GPI entries in a single L1 table. 804 * This function fills out an entire L1 table with either Granules or Contiguous 805 * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment. 806 * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular 807 * Granules descriptors. 808 * 809 * Parameters 810 * l1 Pointer to L1 table to fill out 811 * first Address of first granule in range 812 * last Address of last granule in range (inclusive) 813 * gpi GPI set this range to 814 */ 815 static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last, 816 unsigned int gpi) 817 { 818 assert(l1 != NULL); 819 assert(first <= last); 820 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL); 821 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL); 822 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last)); 823 824 #if (RME_GPT_MAX_BLOCK != 0) 825 while (first <= last) { 826 /* Region length */ 827 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p); 828 829 if (length < SZ_2M) { 830 /* 831 * Fill with Granule descriptors in case of 832 * region length < 2MB. 833 */ 834 first = fill_l1_gran_desc(l1, first, last, gpi); 835 836 } else if ((first & (SZ_2M - UL(1))) == UL(0)) { 837 /* 838 * For region length >= 2MB and at least 2MB aligned 839 * call to fill_l1_cont_desc will iterate through 840 * all block sizes (512MB, 32MB and 2MB) supported and 841 * fill corresponding Contiguous descriptors. 842 */ 843 first = fill_l1_cont_desc(l1, first, length, gpi); 844 } else { 845 /* 846 * For not aligned region >= 2MB fill with Granules 847 * descriptors up to the next 2MB aligned address. 848 */ 849 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) - 850 GPT_PGS_ACTUAL_SIZE(gpt_config.p); 851 852 first = fill_l1_gran_desc(l1, first, new_last, gpi); 853 } 854 } 855 #else 856 /* Fill with Granule descriptors */ 857 first = fill_l1_gran_desc(l1, first, last, gpi); 858 #endif 859 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p))); 860 } 861 862 /* 863 * This function finds the next available unused L1 table and initializes all 864 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks 865 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the 866 * event that a PAS region stops midway through an L1 table, thus guaranteeing 867 * that all memory not explicitly assigned is GPI_ANY. This function does not 868 * check for overflow conditions, that should be done by the caller. 869 * 870 * Return 871 * Pointer to the next available L1 table. 872 */ 873 static uint64_t *get_new_l1_tbl(void) 874 { 875 /* Retrieve the next L1 table */ 876 uint64_t *l1 = (uint64_t *)gpt_l1_tbl; 877 878 /* Increment L1 GPT address */ 879 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p); 880 881 /* Initialize all GPIs to GPT_GPI_ANY */ 882 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) { 883 l1[i] = GPT_L1_ANY_DESC; 884 } 885 886 return l1; 887 } 888 889 /* 890 * When L1 tables are needed, this function creates the necessary L0 table 891 * descriptors and fills out the L1 table entries according to the supplied 892 * PAS range. 893 * 894 * Parameters 895 * *pas Pointer to the structure defining the PAS region. 896 */ 897 static void generate_l0_tbl_desc(pas_region_t *pas) 898 { 899 uintptr_t end_pa; 900 uintptr_t cur_pa; 901 uintptr_t last_gran_pa; 902 uint64_t *l0_gpt_base; 903 uint64_t *l1_gpt_arr; 904 unsigned int l0_idx, gpi; 905 906 assert(gpt_config.plat_gpt_l0_base != 0UL); 907 assert(pas != NULL); 908 909 /* 910 * Checking of PAS parameters has already been done in 911 * validate_pas_mappings so no need to check the same things again. 912 */ 913 end_pa = pas->base_pa + pas->size; 914 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 915 916 /* We start working from the granule at base PA */ 917 cur_pa = pas->base_pa; 918 919 /* Get GPI */ 920 gpi = GPT_PAS_ATTR_GPI(pas->attrs); 921 922 /* Iterate over each L0 region in this memory range */ 923 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa); 924 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL); 925 l0_idx++) { 926 /* 927 * See if the L0 entry is already a table descriptor or if we 928 * need to create one. 929 */ 930 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) { 931 /* Get the L1 array from the L0 entry */ 932 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]); 933 } else { 934 /* Get a new L1 table from the L1 memory space */ 935 l1_gpt_arr = get_new_l1_tbl(); 936 937 /* Fill out the L0 descriptor and flush it */ 938 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr); 939 } 940 941 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n", 942 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]); 943 944 /* 945 * Determine the PA of the last granule in this L0 descriptor. 946 */ 947 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) - 948 GPT_PGS_ACTUAL_SIZE(gpt_config.p); 949 950 /* 951 * Fill up L1 GPT entries between these two addresses. This 952 * function needs the addresses of the first granule and last 953 * granule in the range. 954 */ 955 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi); 956 957 /* Advance cur_pa to first granule in next L0 region */ 958 cur_pa = get_l1_end_pa(cur_pa, end_pa); 959 } 960 } 961 962 /* 963 * This function flushes a range of L0 descriptors used by a given PAS region 964 * array. There is a chance that some unmodified L0 descriptors would be flushed 965 * in the case that there are "holes" in an array of PAS regions but overall 966 * this should be faster than individually flushing each modified L0 descriptor 967 * as they are created. 968 * 969 * Parameters 970 * *pas Pointer to an array of PAS regions. 971 * pas_count Number of entries in the PAS array. 972 */ 973 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count) 974 { 975 unsigned long idx; 976 unsigned long start_idx; 977 unsigned long end_idx; 978 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base; 979 980 assert(pas != NULL); 981 assert(pas_count != 0U); 982 983 /* Initial start and end values */ 984 start_idx = GPT_L0_IDX(pas[0].base_pa); 985 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL); 986 987 /* Find lowest and highest L0 indices used in this PAS array */ 988 for (idx = 1UL; idx < pas_count; idx++) { 989 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) { 990 start_idx = GPT_L0_IDX(pas[idx].base_pa); 991 } 992 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) { 993 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL); 994 } 995 } 996 997 /* 998 * Flush all covered L0 descriptors, add 1 because we need to include 999 * the end index value. 1000 */ 1001 flush_dcache_range((uintptr_t)&l0[start_idx], 1002 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t)); 1003 } 1004 1005 /* 1006 * Public API to enable granule protection checks once the tables have all been 1007 * initialized. This function is called at first initialization and then again 1008 * later during warm boots of CPU cores. 1009 * 1010 * Return 1011 * Negative Linux error code in the event of a failure, 0 for success. 1012 */ 1013 int gpt_enable(void) 1014 { 1015 u_register_t gpccr_el3; 1016 1017 /* 1018 * Granule tables must be initialised before enabling 1019 * granule protection. 1020 */ 1021 if (gpt_config.plat_gpt_l0_base == 0UL) { 1022 ERROR("GPT: Tables have not been initialized!\n"); 1023 return -EPERM; 1024 } 1025 1026 /* Write the base address of the L0 tables into GPTBR */ 1027 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT) 1028 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK); 1029 1030 /* GPCCR_EL3.PPS */ 1031 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps); 1032 1033 /* GPCCR_EL3.PGS */ 1034 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs); 1035 1036 /* 1037 * Since EL3 maps the L1 region as Inner shareable, use the same 1038 * shareability attribute for GPC as well so that 1039 * GPC fetches are visible to PEs 1040 */ 1041 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS); 1042 1043 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */ 1044 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); 1045 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); 1046 1047 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */ 1048 write_gpccr_el3(gpccr_el3); 1049 isb(); 1050 1051 /* Invalidate any stale TLB entries and any cached register fields */ 1052 tlbipaallos(); 1053 dsb(); 1054 isb(); 1055 1056 /* Enable GPT */ 1057 gpccr_el3 |= GPCCR_GPC_BIT; 1058 1059 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */ 1060 write_gpccr_el3(gpccr_el3); 1061 isb(); 1062 tlbipaallos(); 1063 dsb(); 1064 isb(); 1065 1066 return 0; 1067 } 1068 1069 /* 1070 * Public API to disable granule protection checks. 1071 */ 1072 void gpt_disable(void) 1073 { 1074 u_register_t gpccr_el3 = read_gpccr_el3(); 1075 1076 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT); 1077 dsbsy(); 1078 isb(); 1079 } 1080 1081 /* 1082 * Public API that initializes the entire protected space to GPT_GPI_ANY using 1083 * the L0 tables (block descriptors). Ideally, this function is invoked prior 1084 * to DDR discovery and initialization. The MMU must be initialized before 1085 * calling this function. 1086 * 1087 * Parameters 1088 * pps PPS value to use for table generation 1089 * l0_mem_base Base address of L0 tables in memory. 1090 * l0_mem_size Total size of memory available for L0 tables. 1091 * 1092 * Return 1093 * Negative Linux error code in the event of a failure, 0 for success. 1094 */ 1095 int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base, 1096 size_t l0_mem_size) 1097 { 1098 uint64_t gpt_desc; 1099 int ret; 1100 1101 /* Ensure that MMU and Data caches are enabled */ 1102 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1103 1104 /* Validate other parameters */ 1105 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size); 1106 if (ret != 0) { 1107 return ret; 1108 } 1109 1110 /* Create the descriptor to initialize L0 entries with */ 1111 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY); 1112 1113 /* Iterate through all L0 entries */ 1114 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) { 1115 ((uint64_t *)l0_mem_base)[i] = gpt_desc; 1116 } 1117 1118 /* Flush updated L0 table to memory */ 1119 flush_dcache_range((uintptr_t)l0_mem_base, GPT_L0_TABLE_SIZE(gpt_config.t)); 1120 1121 /* Stash the L0 base address once initial setup is complete */ 1122 gpt_config.plat_gpt_l0_base = l0_mem_base; 1123 1124 return 0; 1125 } 1126 1127 /* 1128 * Public API that carves out PAS regions from the L0 tables and builds any L1 1129 * tables that are needed. This function ideally is run after DDR discovery and 1130 * initialization. The L0 tables must have already been initialized to GPI_ANY 1131 * when this function is called. 1132 * 1133 * This function can be called multiple times with different L1 memory ranges 1134 * and PAS regions if it is desirable to place L1 tables in different locations 1135 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables 1136 * in the DDR bank that they control). 1137 * 1138 * Parameters 1139 * pgs PGS value to use for table generation. 1140 * l1_mem_base Base address of memory used for L1 tables. 1141 * l1_mem_size Total size of memory available for L1 tables. 1142 * *pas_regions Pointer to PAS regions structure array. 1143 * pas_count Total number of PAS regions. 1144 * 1145 * Return 1146 * Negative Linux error code in the event of a failure, 0 for success. 1147 */ 1148 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base, 1149 size_t l1_mem_size, pas_region_t *pas_regions, 1150 unsigned int pas_count) 1151 { 1152 int l1_gpt_cnt, ret; 1153 1154 /* Ensure that MMU and Data caches are enabled */ 1155 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1156 1157 /* PGS is needed for validate_pas_mappings so check it now */ 1158 if (pgs > GPT_PGS_MAX) { 1159 ERROR("GPT: Invalid PGS: 0x%x\n", pgs); 1160 return -EINVAL; 1161 } 1162 gpt_config.pgs = pgs; 1163 gpt_config.p = gpt_p_lookup[pgs]; 1164 1165 /* Make sure L0 tables have been initialized */ 1166 if (gpt_config.plat_gpt_l0_base == 0UL) { 1167 ERROR("GPT: L0 tables must be initialized first!\n"); 1168 return -EPERM; 1169 } 1170 1171 /* Check if L1 GPTs are required and how many */ 1172 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count); 1173 if (l1_gpt_cnt < 0) { 1174 return l1_gpt_cnt; 1175 } 1176 1177 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt); 1178 1179 /* If L1 tables are needed then validate the L1 parameters */ 1180 if (l1_gpt_cnt > 0) { 1181 ret = validate_l1_params(l1_mem_base, l1_mem_size, 1182 (unsigned int)l1_gpt_cnt); 1183 if (ret != 0) { 1184 return ret; 1185 } 1186 1187 /* Set up parameters for L1 table generation */ 1188 gpt_l1_tbl = l1_mem_base; 1189 } 1190 1191 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */ 1192 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p); 1193 1194 /* Mask for the L1 index field */ 1195 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p); 1196 1197 INFO("GPT: Boot Configuration\n"); 1198 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 1199 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 1200 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 1201 INFO(" PAS count: %u\n", pas_count); 1202 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base); 1203 1204 /* Generate the tables in memory */ 1205 for (unsigned int idx = 0U; idx < pas_count; idx++) { 1206 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n", 1207 idx, pas_regions[idx].base_pa, pas_regions[idx].size, 1208 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs), 1209 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 1210 1211 /* Check if a block or table descriptor is required */ 1212 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 1213 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 1214 generate_l0_blk_desc(&pas_regions[idx]); 1215 1216 } else { 1217 generate_l0_tbl_desc(&pas_regions[idx]); 1218 } 1219 } 1220 1221 /* Flush modified L0 tables */ 1222 flush_l0_for_pas_array(pas_regions, pas_count); 1223 1224 /* Flush L1 tables if needed */ 1225 if (l1_gpt_cnt > 0) { 1226 flush_dcache_range(l1_mem_base, 1227 GPT_L1_TABLE_SIZE(gpt_config.p) * 1228 (size_t)l1_gpt_cnt); 1229 } 1230 1231 /* Make sure that all the entries are written to the memory */ 1232 dsbishst(); 1233 tlbipaallos(); 1234 dsb(); 1235 isb(); 1236 1237 return 0; 1238 } 1239 1240 /* 1241 * Public API to initialize the runtime gpt_config structure based on the values 1242 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization 1243 * typically happens in a bootloader stage prior to setting up the EL3 runtime 1244 * environment for the granule transition service so this function detects the 1245 * initialization from a previous stage. Granule protection checks must be 1246 * enabled already or this function will return an error. 1247 * 1248 * Parameters 1249 * l1_bitlocks_base Base address of memory for L1 tables bitlocks. 1250 * l1_bitlocks_size Total size of memory available for L1 tables bitlocks. 1251 * 1252 * Return 1253 * Negative Linux error code in the event of a failure, 0 for success. 1254 */ 1255 int gpt_runtime_init(uintptr_t l1_bitlocks_base, size_t l1_bitlocks_size) 1256 { 1257 u_register_t reg; 1258 __unused size_t locks_size; 1259 1260 /* Ensure that MMU and Data caches are enabled */ 1261 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1262 1263 /* Ensure GPC are already enabled */ 1264 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0UL) { 1265 ERROR("GPT: Granule protection checks are not enabled!\n"); 1266 return -EPERM; 1267 } 1268 1269 /* 1270 * Read the L0 table address from GPTBR, we don't need the L1 base 1271 * address since those are included in the L0 tables as needed. 1272 */ 1273 reg = read_gptbr_el3(); 1274 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) & 1275 GPTBR_BADDR_MASK) << 1276 GPTBR_BADDR_VAL_SHIFT; 1277 1278 /* Read GPCCR to get PGS and PPS values */ 1279 reg = read_gpccr_el3(); 1280 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK; 1281 gpt_config.t = gpt_t_lookup[gpt_config.pps]; 1282 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK; 1283 gpt_config.p = gpt_p_lookup[gpt_config.pgs]; 1284 1285 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */ 1286 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p); 1287 1288 /* Mask for the L1 index field */ 1289 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p); 1290 1291 #if (RME_GPT_BITLOCK_BLOCK != 0) 1292 /* 1293 * Size of GPT bitlocks in bytes for the protected address space 1294 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock. 1295 */ 1296 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) / 1297 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U); 1298 /* 1299 * If protected space size is less than the size covered 1300 * by 'bitlock' structure, check for a single bitlock. 1301 */ 1302 if (locks_size < LOCK_SIZE) { 1303 locks_size = LOCK_SIZE; 1304 /* Check bitlocks array size */ 1305 } else if (locks_size > l1_bitlocks_size) { 1306 ERROR("GPT: Inadequate GPT bitlocks memory\n"); 1307 ERROR(" Expected 0x%lx bytes, got 0x%lx\n", 1308 locks_size, l1_bitlocks_size); 1309 return -ENOMEM; 1310 } 1311 1312 gpt_bitlock = (bitlock_t *)l1_bitlocks_base; 1313 1314 /* Initialise GPT bitlocks */ 1315 (void)memset((void *)gpt_bitlock, 0, locks_size); 1316 1317 /* Flush GPT bitlocks to memory */ 1318 flush_dcache_range((uintptr_t)gpt_bitlock, locks_size); 1319 #endif /* RME_GPT_BITLOCK_BLOCK */ 1320 1321 VERBOSE("GPT: Runtime Configuration\n"); 1322 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 1323 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 1324 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 1325 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base); 1326 #if (RME_GPT_BITLOCK_BLOCK != 0) 1327 VERBOSE(" Bitlocks: 0x%"PRIxPTR"/0x%lx\n", (uintptr_t)gpt_bitlock, 1328 locks_size); 1329 #endif 1330 return 0; 1331 } 1332 1333 /* 1334 * A helper to write the value (target_pas << gpi_shift) to the index of 1335 * the gpt_l1_addr. 1336 */ 1337 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr, 1338 unsigned int gpi_shift, unsigned int idx, 1339 unsigned int target_pas) 1340 { 1341 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift); 1342 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift); 1343 gpt_l1_addr[idx] = *gpt_l1_desc; 1344 1345 dsboshst(); 1346 } 1347 1348 /* 1349 * Helper to retrieve the gpt_l1_* information from the base address 1350 * returned in gpi_info. 1351 */ 1352 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info) 1353 { 1354 uint64_t gpt_l0_desc, *gpt_l0_base; 1355 __unused unsigned int block_idx; 1356 1357 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 1358 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)]; 1359 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) { 1360 VERBOSE("GPT: Granule is not covered by a table descriptor!\n"); 1361 VERBOSE(" Base=0x%"PRIx64"\n", base); 1362 return -EINVAL; 1363 } 1364 1365 /* Get the table index and GPI shift from PA */ 1366 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc); 1367 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base); 1368 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2; 1369 1370 #if (RME_GPT_BITLOCK_BLOCK != 0) 1371 /* Block index */ 1372 block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M)); 1373 1374 /* Bitlock address and mask */ 1375 gpi_info->lock = &gpt_bitlock[block_idx / LOCK_BITS]; 1376 gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U)); 1377 #endif 1378 return 0; 1379 } 1380 1381 /* 1382 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info. 1383 * This function is called with bitlock or spinlock acquired. 1384 */ 1385 static void read_gpi(gpi_info_t *gpi_info) 1386 { 1387 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx]; 1388 1389 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) == 1390 GPT_L1_TYPE_CONT_DESC) { 1391 /* Read GPI from Contiguous descriptor */ 1392 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc); 1393 } else { 1394 /* Read GPI from Granules descriptor */ 1395 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) & 1396 GPT_L1_GRAN_DESC_GPI_MASK); 1397 } 1398 } 1399 1400 static void flush_page_to_popa(uintptr_t addr) 1401 { 1402 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p); 1403 1404 if (is_feat_mte2_supported()) { 1405 flush_dcache_to_popa_range_mte2(addr, size); 1406 } else { 1407 flush_dcache_to_popa_range(addr, size); 1408 } 1409 } 1410 1411 /* 1412 * Helper function to check if all L1 entries in 2MB block have 1413 * the same Granules descriptor value. 1414 * 1415 * Parameters 1416 * base Base address of the region to be checked 1417 * gpi_info Pointer to 'gpt_config_t' structure 1418 * l1_desc GPT Granules descriptor with all entries 1419 * set to the same GPI. 1420 * 1421 * Return 1422 * true if L1 all entries have the same descriptor value, false otherwise. 1423 */ 1424 __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info, 1425 uint64_t l1_desc) 1426 { 1427 /* Last L1 entry index in 2MB block */ 1428 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) + 1429 gpt_l1_cnt_2mb - 1UL; 1430 1431 /* Number of L1 entries in 2MB block */ 1432 unsigned int cnt = gpt_l1_cnt_2mb; 1433 1434 /* 1435 * Start check from the last L1 entry and continue until the first 1436 * non-matching to the passed Granules descriptor value is found. 1437 */ 1438 while (cnt-- != 0U) { 1439 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) { 1440 /* Non-matching L1 entry found */ 1441 return false; 1442 } 1443 } 1444 1445 return true; 1446 } 1447 1448 __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info, 1449 uint64_t l1_desc) 1450 { 1451 /* L1 entry index of the start of 2MB block */ 1452 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base)); 1453 1454 /* 2MB Contiguous descriptor */ 1455 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB); 1456 1457 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc); 1458 1459 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB); 1460 } 1461 1462 /* 1463 * Helper function to check if all 1st L1 entries of 2MB blocks 1464 * in 32MB have the same 2MB Contiguous descriptor value. 1465 * 1466 * Parameters 1467 * base Base address of the region to be checked 1468 * gpi_info Pointer to 'gpt_config_t' structure 1469 * l1_desc GPT Granules descriptor. 1470 * 1471 * Return 1472 * true if all L1 entries have the same descriptor value, false otherwise. 1473 */ 1474 __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info, 1475 uint64_t l1_desc) 1476 { 1477 /* The 1st L1 entry index of the last 2MB block in 32MB */ 1478 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) + 1479 (15UL * gpt_l1_cnt_2mb); 1480 1481 /* 2MB Contiguous descriptor */ 1482 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB); 1483 1484 /* Number of 2MB blocks in 32MB */ 1485 unsigned int cnt = 16U; 1486 1487 /* Set the first L1 entry to 2MB Contiguous descriptor */ 1488 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc; 1489 1490 /* 1491 * Start check from the 1st L1 entry of the last 2MB block and 1492 * continue until the first non-matching to 2MB Contiguous descriptor 1493 * value is found. 1494 */ 1495 while (cnt-- != 0U) { 1496 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) { 1497 /* Non-matching L1 entry found */ 1498 return false; 1499 } 1500 idx -= gpt_l1_cnt_2mb; 1501 } 1502 1503 return true; 1504 } 1505 1506 __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info, 1507 uint64_t l1_desc) 1508 { 1509 /* L1 entry index of the start of 32MB block */ 1510 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base)); 1511 1512 /* 32MB Contiguous descriptor */ 1513 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB); 1514 1515 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc); 1516 1517 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB); 1518 } 1519 1520 /* 1521 * Helper function to check if all 1st L1 entries of 32MB blocks 1522 * in 512MB have the same 32MB Contiguous descriptor value. 1523 * 1524 * Parameters 1525 * base Base address of the region to be checked 1526 * gpi_info Pointer to 'gpt_config_t' structure 1527 * l1_desc GPT Granules descriptor. 1528 * 1529 * Return 1530 * true if all L1 entries have the same descriptor value, false otherwise. 1531 */ 1532 __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info, 1533 uint64_t l1_desc) 1534 { 1535 /* The 1st L1 entry index of the last 32MB block in 512MB */ 1536 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) + 1537 (15UL * 16UL * gpt_l1_cnt_2mb); 1538 1539 /* 32MB Contiguous descriptor */ 1540 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB); 1541 1542 /* Number of 32MB blocks in 512MB */ 1543 unsigned int cnt = 16U; 1544 1545 /* Set the first L1 entry to 2MB Contiguous descriptor */ 1546 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc; 1547 1548 /* 1549 * Start check from the 1st L1 entry of the last 32MB block and 1550 * continue until the first non-matching to 32MB Contiguous descriptor 1551 * value is found. 1552 */ 1553 while (cnt-- != 0U) { 1554 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) { 1555 /* Non-matching L1 entry found */ 1556 return false; 1557 } 1558 idx -= 16UL * gpt_l1_cnt_2mb; 1559 } 1560 1561 return true; 1562 } 1563 1564 __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info, 1565 uint64_t l1_desc) 1566 { 1567 /* L1 entry index of the start of 512MB block */ 1568 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base)); 1569 1570 /* 512MB Contiguous descriptor */ 1571 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB); 1572 1573 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc); 1574 1575 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB); 1576 } 1577 1578 /* 1579 * Helper function to convert GPI entries in a single L1 table 1580 * from Granules to Contiguous descriptor. 1581 * 1582 * Parameters 1583 * base Base address of the region to be written 1584 * gpi_info Pointer to 'gpt_config_t' structure 1585 * l1_desc GPT Granules descriptor with all entries 1586 * set to the same GPI. 1587 */ 1588 __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info, 1589 uint64_t l1_desc) 1590 { 1591 /* Start with check for 2MB block */ 1592 if (!check_fuse_2mb(base, gpi_info, l1_desc)) { 1593 /* Check for 2MB fusing failed */ 1594 return; 1595 } 1596 1597 #if (RME_GPT_MAX_BLOCK == 2) 1598 fuse_2mb(base, gpi_info, l1_desc); 1599 #else 1600 /* Check for 32MB block */ 1601 if (!check_fuse_32mb(base, gpi_info, l1_desc)) { 1602 /* Check for 32MB fusing failed, fuse to 2MB */ 1603 fuse_2mb(base, gpi_info, l1_desc); 1604 return; 1605 } 1606 1607 #if (RME_GPT_MAX_BLOCK == 32) 1608 fuse_32mb(base, gpi_info, l1_desc); 1609 #else 1610 /* Check for 512MB block */ 1611 if (!check_fuse_512mb(base, gpi_info, l1_desc)) { 1612 /* Check for 512MB fusing failed, fuse to 32MB */ 1613 fuse_32mb(base, gpi_info, l1_desc); 1614 return; 1615 } 1616 1617 /* Fuse to 512MB */ 1618 fuse_512mb(base, gpi_info, l1_desc); 1619 1620 #endif /* RME_GPT_MAX_BLOCK == 32 */ 1621 #endif /* RME_GPT_MAX_BLOCK == 2 */ 1622 } 1623 1624 /* 1625 * Helper function to convert GPI entries in a single L1 table 1626 * from Contiguous to Granules descriptor. This function updates 1627 * descriptor to Granules in passed 'gpt_config_t' structure as 1628 * the result of shuttering. 1629 * 1630 * Parameters 1631 * base Base address of the region to be written 1632 * gpi_info Pointer to 'gpt_config_t' structure 1633 * l1_desc GPT Granules descriptor set this range to. 1634 */ 1635 __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info, 1636 uint64_t l1_desc) 1637 { 1638 /* Look-up table for 2MB, 32MB and 512MB locks shattering */ 1639 static const gpt_shatter_func gpt_shatter_lookup[] = { 1640 shatter_2mb, 1641 shatter_32mb, 1642 shatter_512mb 1643 }; 1644 1645 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */ 1646 static const gpt_tlbi_lookup_t tlbi_lookup[] = { 1647 { tlbirpalos_2m, ~(SZ_2M - 1UL) }, 1648 { tlbirpalos_32m, ~(SZ_32M - 1UL) }, 1649 { tlbirpalos_512m, ~(SZ_512M - 1UL) } 1650 }; 1651 1652 /* Get shattering level from Contig field of Contiguous descriptor */ 1653 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL; 1654 1655 /* Shatter contiguous block */ 1656 gpt_shatter_lookup[level](base, gpi_info, l1_desc); 1657 1658 tlbi_lookup[level].function(base & tlbi_lookup[level].mask); 1659 dsbosh(); 1660 1661 /* 1662 * Update 'gpt_config_t' structure's descriptor to Granules to reflect 1663 * the shattered GPI back to caller. 1664 */ 1665 gpi_info->gpt_l1_desc = l1_desc; 1666 } 1667 1668 /* 1669 * This function is the granule transition delegate service. When a granule 1670 * transition request occurs it is routed to this function to have the request, 1671 * if valid, fulfilled following A1.1.1 Delegate of RME supplement. 1672 * 1673 * TODO: implement support for transitioning multiple granules at once. 1674 * 1675 * Parameters 1676 * base Base address of the region to transition, must be 1677 * aligned to granule size. 1678 * size Size of region to transition, must be aligned to granule 1679 * size. 1680 * src_sec_state Security state of the caller. 1681 * 1682 * Return 1683 * Negative Linux error code in the event of a failure, 0 for success. 1684 */ 1685 int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1686 { 1687 gpi_info_t gpi_info; 1688 uint64_t nse, __unused l1_desc; 1689 unsigned int target_pas; 1690 int res; 1691 1692 /* Ensure that the tables have been set up before taking requests */ 1693 assert(gpt_config.plat_gpt_l0_base != 0UL); 1694 1695 /* Ensure that caches are enabled */ 1696 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1697 1698 /* See if this is a single or a range of granule transition */ 1699 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1700 return -EINVAL; 1701 } 1702 1703 /* Check that base and size are valid */ 1704 if ((ULONG_MAX - base) < size) { 1705 VERBOSE("GPT: Transition request address overflow!\n"); 1706 VERBOSE(" Base=0x%"PRIx64"\n", base); 1707 VERBOSE(" Size=0x%lx\n", size); 1708 return -EINVAL; 1709 } 1710 1711 /* Make sure base and size are valid */ 1712 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1713 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1714 (size == 0UL) || 1715 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1716 VERBOSE("GPT: Invalid granule transition address range!\n"); 1717 VERBOSE(" Base=0x%"PRIx64"\n", base); 1718 VERBOSE(" Size=0x%lx\n", size); 1719 return -EINVAL; 1720 } 1721 1722 /* Delegate request can only come from REALM or SECURE */ 1723 if ((src_sec_state != SMC_FROM_REALM) && 1724 (src_sec_state != SMC_FROM_SECURE)) { 1725 VERBOSE("GPT: Invalid caller security state 0x%x\n", 1726 src_sec_state); 1727 return -EINVAL; 1728 } 1729 1730 if (src_sec_state == SMC_FROM_REALM) { 1731 target_pas = GPT_GPI_REALM; 1732 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1733 l1_desc = GPT_L1_REALM_DESC; 1734 } else { 1735 target_pas = GPT_GPI_SECURE; 1736 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1737 l1_desc = GPT_L1_SECURE_DESC; 1738 } 1739 1740 res = get_gpi_params(base, &gpi_info); 1741 if (res != 0) { 1742 return res; 1743 } 1744 1745 /* 1746 * Access to GPT is controlled by a lock to ensure that no more 1747 * than one CPU is allowed to make changes at any given time. 1748 */ 1749 GPT_LOCK; 1750 read_gpi(&gpi_info); 1751 1752 /* Check that the current address is in NS state */ 1753 if (gpi_info.gpi != GPT_GPI_NS) { 1754 VERBOSE("GPT: Only Granule in NS state can be delegated.\n"); 1755 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state, 1756 gpi_info.gpi); 1757 GPT_UNLOCK; 1758 return -EPERM; 1759 } 1760 1761 #if (RME_GPT_MAX_BLOCK != 0) 1762 /* Check for Contiguous descriptor */ 1763 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) == 1764 GPT_L1_TYPE_CONT_DESC) { 1765 shatter_block(base, &gpi_info, GPT_L1_NS_DESC); 1766 } 1767 #endif 1768 /* 1769 * In order to maintain mutual distrust between Realm and Secure 1770 * states, remove any data speculatively fetched into the target 1771 * physical address space. 1772 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2. 1773 */ 1774 flush_page_to_popa(base | nse); 1775 1776 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1777 gpi_info.gpi_shift, gpi_info.idx, target_pas); 1778 1779 /* Ensure that all agents observe the new configuration */ 1780 tlbi_page_dsbosh(base); 1781 1782 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1783 1784 /* Ensure that the scrubbed data have made it past the PoPA */ 1785 flush_page_to_popa(base | nse); 1786 1787 #if (RME_GPT_MAX_BLOCK != 0) 1788 if (gpi_info.gpt_l1_desc == l1_desc) { 1789 /* Try to fuse */ 1790 fuse_block(base, &gpi_info, l1_desc); 1791 } 1792 #endif 1793 1794 /* Unlock the lock to GPT */ 1795 GPT_UNLOCK; 1796 1797 /* 1798 * The isb() will be done as part of context 1799 * synchronization when returning to lower EL. 1800 */ 1801 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n", 1802 base, gpi_info.gpi, target_pas); 1803 1804 return 0; 1805 } 1806 1807 /* 1808 * This function is the granule transition undelegate service. When a granule 1809 * transition request occurs it is routed to this function where the request is 1810 * validated then fulfilled if possible. 1811 * 1812 * TODO: implement support for transitioning multiple granules at once. 1813 * 1814 * Parameters 1815 * base Base address of the region to transition, must be 1816 * aligned to granule size. 1817 * size Size of region to transition, must be aligned to granule 1818 * size. 1819 * src_sec_state Security state of the caller. 1820 * 1821 * Return 1822 * Negative Linux error code in the event of a failure, 0 for success. 1823 */ 1824 int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1825 { 1826 gpi_info_t gpi_info; 1827 uint64_t nse, __unused l1_desc; 1828 int res; 1829 1830 /* Ensure that the tables have been set up before taking requests */ 1831 assert(gpt_config.plat_gpt_l0_base != 0UL); 1832 1833 /* Ensure that MMU and caches are enabled */ 1834 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1835 1836 /* See if this is a single or a range of granule transition */ 1837 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1838 return -EINVAL; 1839 } 1840 1841 /* Check that base and size are valid */ 1842 if ((ULONG_MAX - base) < size) { 1843 VERBOSE("GPT: Transition request address overflow!\n"); 1844 VERBOSE(" Base=0x%"PRIx64"\n", base); 1845 VERBOSE(" Size=0x%lx\n", size); 1846 return -EINVAL; 1847 } 1848 1849 /* Make sure base and size are valid */ 1850 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1851 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1852 (size == 0UL) || 1853 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1854 VERBOSE("GPT: Invalid granule transition address range!\n"); 1855 VERBOSE(" Base=0x%"PRIx64"\n", base); 1856 VERBOSE(" Size=0x%lx\n", size); 1857 return -EINVAL; 1858 } 1859 1860 res = get_gpi_params(base, &gpi_info); 1861 if (res != 0) { 1862 return res; 1863 } 1864 1865 /* 1866 * Access to GPT is controlled by a lock to ensure that no more 1867 * than one CPU is allowed to make changes at any given time. 1868 */ 1869 GPT_LOCK; 1870 read_gpi(&gpi_info); 1871 1872 /* Check that the current address is in the delegated state */ 1873 if ((src_sec_state == SMC_FROM_REALM) && 1874 (gpi_info.gpi == GPT_GPI_REALM)) { 1875 l1_desc = GPT_L1_REALM_DESC; 1876 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1877 } else if ((src_sec_state == SMC_FROM_SECURE) && 1878 (gpi_info.gpi == GPT_GPI_SECURE)) { 1879 l1_desc = GPT_L1_SECURE_DESC; 1880 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1881 } else { 1882 VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n"); 1883 VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state, 1884 gpi_info.gpi); 1885 GPT_UNLOCK; 1886 return -EPERM; 1887 } 1888 1889 #if (RME_GPT_MAX_BLOCK != 0) 1890 /* Check for Contiguous descriptor */ 1891 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) == 1892 GPT_L1_TYPE_CONT_DESC) { 1893 shatter_block(base, &gpi_info, l1_desc); 1894 } 1895 #endif 1896 /* 1897 * In order to maintain mutual distrust between Realm and Secure 1898 * states, remove access now, in order to guarantee that writes 1899 * to the currently-accessible physical address space will not 1900 * later become observable. 1901 */ 1902 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1903 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS); 1904 1905 /* Ensure that all agents observe the new NO_ACCESS configuration */ 1906 tlbi_page_dsbosh(base); 1907 1908 /* Ensure that the scrubbed data have made it past the PoPA */ 1909 flush_page_to_popa(base | nse); 1910 1911 /* 1912 * Remove any data loaded speculatively in NS space from before 1913 * the scrubbing. 1914 */ 1915 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1916 1917 flush_page_to_popa(base | nse); 1918 1919 /* Clear existing GPI encoding and transition granule */ 1920 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1921 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS); 1922 1923 /* Ensure that all agents observe the new NS configuration */ 1924 tlbi_page_dsbosh(base); 1925 1926 #if (RME_GPT_MAX_BLOCK != 0) 1927 if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) { 1928 /* Try to fuse */ 1929 fuse_block(base, &gpi_info, GPT_L1_NS_DESC); 1930 } 1931 #endif 1932 /* Unlock the lock to GPT */ 1933 GPT_UNLOCK; 1934 1935 /* 1936 * The isb() will be done as part of context 1937 * synchronization when returning to lower EL. 1938 */ 1939 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n", 1940 base, gpi_info.gpi, GPT_GPI_NS); 1941 1942 return 0; 1943 } 1944