1 /* 2 * Copyright (c) 2022-2025, Arm Limited. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <limits.h> 11 #include <stdint.h> 12 13 #include <arch.h> 14 #include <arch_features.h> 15 #include <common/debug.h> 16 #include <lib/gpt_rme/gpt_rme.h> 17 #include <lib/smccc.h> 18 #include <lib/xlat_tables/xlat_tables_v2.h> 19 20 #include "gpt_rme_private.h" 21 22 #if !ENABLE_RME 23 #error "ENABLE_RME must be enabled to use the GPT library" 24 #endif 25 26 /* 27 * Lookup T from PPS 28 * 29 * PPS Size T 30 * 0b000 4GB 32 31 * 0b001 64GB 36 32 * 0b010 1TB 40 33 * 0b011 4TB 42 34 * 0b100 16TB 44 35 * 0b101 256TB 48 36 * 0b110 4PB 52 37 * 38 * See section 15.1.27 of the RME specification. 39 */ 40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T, 41 PPS_1TB_T, PPS_4TB_T, 42 PPS_16TB_T, PPS_256TB_T, 43 PPS_4PB_T}; 44 45 /* 46 * Lookup P from PGS 47 * 48 * PGS Size P 49 * 0b00 4KB 12 50 * 0b10 16KB 14 51 * 0b01 64KB 16 52 * 53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. 54 * 55 * See section 15.1.27 of the RME specification. 56 */ 57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P}; 58 59 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info, 60 uint64_t l1_desc); 61 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info, 62 uint64_t l1_desc); 63 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info, 64 uint64_t l1_desc); 65 66 /* 67 * This structure contains GPT configuration data 68 */ 69 typedef struct { 70 uintptr_t plat_gpt_l0_base; 71 gpccr_pps_e pps; 72 gpt_t_val_e t; 73 gpccr_pgs_e pgs; 74 gpt_p_val_e p; 75 } gpt_config_t; 76 77 static gpt_config_t gpt_config; 78 79 /* 80 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS: 81 * +-------+------------+ 82 * | PGS | L1 entries | 83 * +-------+------------+ 84 * | 4KB | 32 | 85 * +-------+------------+ 86 * | 16KB | 8 | 87 * +-------+------------+ 88 * | 64KB | 2 | 89 * +-------+------------+ 90 */ 91 static unsigned int gpt_l1_cnt_2mb; 92 93 /* 94 * Mask for the L1 index field, depending on 95 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS: 96 * +---------+-------------------------------+ 97 * | | PGS | 98 * +---------+----------+----------+---------+ 99 * | L0GPTSZ | 4KB | 16KB | 64KB | 100 * +---------+----------+----------+---------+ 101 * | 1GB | 0x3FFF | 0xFFF | 0x3FF | 102 * +---------+----------+----------+---------+ 103 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF | 104 * +---------+----------+----------+---------+ 105 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF | 106 * +---------+----------+----------+---------+ 107 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF | 108 * +---------+----------+----------+---------+ 109 */ 110 static uint64_t gpt_l1_index_mask; 111 112 /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */ 113 #define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U) 114 #define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U) 115 #define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U) 116 117 /* Size in bytes of L1 entries in 2MB, 32MB */ 118 #define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t)) 119 #define L1_BYTES_32MB (L1_BYTES_2MB * 16U) 120 121 /* Get the index into the L1 table from a physical address */ 122 #define GPT_L1_INDEX(_pa) \ 123 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask) 124 125 /* This variable is used during initialization of the L1 tables */ 126 static uintptr_t gpt_l1_tbl; 127 128 /* These variables are used during runtime */ 129 #if (RME_GPT_BITLOCK_BLOCK == 0) 130 /* 131 * The GPTs are protected by a global spinlock to ensure 132 * that multiple CPUs do not attempt to change the descriptors at once. 133 */ 134 static spinlock_t gpt_lock; 135 136 /* Lock/unlock macros for GPT entries 137 * 138 * Access to GPT is controlled by a global lock to ensure 139 * that no more than one CPU is allowed to make changes at any 140 * given time. 141 */ 142 #define GPT_LOCK spin_lock(&gpt_lock) 143 #define GPT_UNLOCK spin_unlock(&gpt_lock) 144 #else 145 146 /* Base address of bitlocks array */ 147 static bitlock_t *gpt_bitlock; 148 149 /* 150 * Access to a block of memory is controlled by a bitlock. 151 * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB. 152 */ 153 #define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask) 154 #define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask) 155 #endif /* RME_GPT_BITLOCK_BLOCK */ 156 157 static void tlbi_page_dsbosh(uintptr_t base) 158 { 159 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */ 160 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = { 161 { tlbirpalos_4k, ~(SZ_4K - 1UL) }, 162 { tlbirpalos_64k, ~(SZ_64K - 1UL) }, 163 { tlbirpalos_16k, ~(SZ_16K - 1UL) } 164 }; 165 166 tlbi_page_lookup[gpt_config.pgs].function( 167 base & tlbi_page_lookup[gpt_config.pgs].mask); 168 dsbosh(); 169 } 170 171 /* 172 * Helper function to fill out GPI entries in a single L1 table 173 * with Granules or Contiguous descriptor. 174 * 175 * Parameters 176 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out 177 * l1_desc GPT Granules or Contiguous descriptor set this range to 178 * cnt Number of double 128-bit L1 entries to fill 179 * 180 */ 181 static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt) 182 { 183 uint128_t *l1_quad = (uint128_t *)l1; 184 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64); 185 186 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt); 187 188 for (unsigned int i = 0U; i < cnt; i++) { 189 *l1_quad++ = l1_quad_desc; 190 } 191 } 192 193 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info, 194 uint64_t l1_desc) 195 { 196 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base)); 197 198 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", 199 __func__, base, l1_desc); 200 201 /* Convert 2MB Contiguous block to Granules */ 202 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB); 203 } 204 205 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info, 206 uint64_t l1_desc) 207 { 208 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base)); 209 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx]; 210 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB); 211 uint64_t *l1; 212 213 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", 214 __func__, base, l1_desc); 215 216 /* Get index corresponding to 32MB aligned address */ 217 idx = GPT_L1_INDEX(ALIGN_32MB(base)); 218 l1 = &gpi_info->gpt_l1_addr[idx]; 219 220 /* 16 x 2MB blocks in 32MB */ 221 for (unsigned int i = 0U; i < 16U; i++) { 222 /* Fill with Granules or Contiguous descriptors */ 223 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc, 224 L1_QWORDS_2MB); 225 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB); 226 } 227 } 228 229 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info, 230 uint64_t l1_desc) 231 { 232 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)); 233 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx]; 234 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB); 235 uint64_t *l1; 236 237 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", 238 __func__, base, l1_desc); 239 240 /* Get index corresponding to 512MB aligned address */ 241 idx = GPT_L1_INDEX(ALIGN_512MB(base)); 242 l1 = &gpi_info->gpt_l1_addr[idx]; 243 244 /* 16 x 32MB blocks in 512MB */ 245 for (unsigned int i = 0U; i < 16U; i++) { 246 if (l1 == l1_32mb) { 247 /* Shatter this 32MB block */ 248 shatter_32mb(base, gpi_info, l1_desc); 249 } else { 250 /* Fill 32MB with Contiguous descriptors */ 251 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB); 252 } 253 254 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB); 255 } 256 } 257 258 /* 259 * This function checks to see if a GPI value is valid. 260 * 261 * These are valid GPI values. 262 * GPT_GPI_NO_ACCESS U(0x0) 263 * GPT_GPI_SECURE U(0x8) 264 * GPT_GPI_NS U(0x9) 265 * GPT_GPI_ROOT U(0xA) 266 * GPT_GPI_REALM U(0xB) 267 * GPT_GPI_ANY U(0xF) 268 * 269 * Parameters 270 * gpi GPI to check for validity. 271 * 272 * Return 273 * true for a valid GPI, false for an invalid one. 274 */ 275 static bool is_gpi_valid(unsigned int gpi) 276 { 277 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) || 278 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) { 279 return true; 280 } 281 return false; 282 } 283 284 /* 285 * This function checks to see if two PAS regions overlap. 286 * 287 * Parameters 288 * base_1: base address of first PAS 289 * size_1: size of first PAS 290 * base_2: base address of second PAS 291 * size_2: size of second PAS 292 * 293 * Return 294 * True if PAS regions overlap, false if they do not. 295 */ 296 static bool check_pas_overlap(uintptr_t base_1, size_t size_1, 297 uintptr_t base_2, size_t size_2) 298 { 299 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) { 300 return true; 301 } 302 return false; 303 } 304 305 /* 306 * This helper function checks to see if a PAS region from index 0 to 307 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table. 308 * 309 * Parameters 310 * l0_idx: Index of the L0 entry to check 311 * pas_regions: PAS region array 312 * pas_idx: Upper bound of the PAS array index. 313 * 314 * Return 315 * True if a PAS region occupies the L0 region in question, false if not. 316 */ 317 static bool does_previous_pas_exist_here(unsigned int l0_idx, 318 pas_region_t *pas_regions, 319 unsigned int pas_idx) 320 { 321 /* Iterate over PAS regions up to pas_idx */ 322 for (unsigned int i = 0U; i < pas_idx; i++) { 323 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx), 324 GPT_L0GPTSZ_ACTUAL_SIZE, 325 pas_regions[i].base_pa, pas_regions[i].size)) { 326 return true; 327 } 328 } 329 return false; 330 } 331 332 /* 333 * This function iterates over all of the PAS regions and checks them to ensure 334 * proper alignment of base and size, that the GPI is valid, and that no regions 335 * overlap. As a part of the overlap checks, this function checks existing L0 336 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables 337 * is called multiple times to place L1 tables in different areas of memory. It 338 * also counts the number of L1 tables needed and returns it on success. 339 * 340 * Parameters 341 * *pas_regions Pointer to array of PAS region structures. 342 * pas_region_cnt Total number of PAS regions in the array. 343 * 344 * Return 345 * Negative Linux error code in the event of a failure, number of L1 regions 346 * required when successful. 347 */ 348 static int validate_pas_mappings(pas_region_t *pas_regions, 349 unsigned int pas_region_cnt) 350 { 351 unsigned int idx; 352 unsigned int l1_cnt = 0U; 353 unsigned int pas_l1_cnt; 354 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base; 355 356 assert(pas_regions != NULL); 357 assert(pas_region_cnt != 0U); 358 359 for (idx = 0U; idx < pas_region_cnt; idx++) { 360 /* Check for arithmetic overflow in region */ 361 if ((ULONG_MAX - pas_regions[idx].base_pa) < 362 pas_regions[idx].size) { 363 ERROR("GPT: Address overflow in PAS[%u]!\n", idx); 364 return -EOVERFLOW; 365 } 366 367 /* Initial checks for PAS validity */ 368 if (((pas_regions[idx].base_pa + pas_regions[idx].size) > 369 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) || 370 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) { 371 ERROR("GPT: PAS[%u] is invalid!\n", idx); 372 return -EFAULT; 373 } 374 375 /* 376 * Make sure this PAS does not overlap with another one. We 377 * start from idx + 1 instead of 0 since prior PAS mappings will 378 * have already checked themselves against this one. 379 */ 380 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) { 381 if (check_pas_overlap(pas_regions[idx].base_pa, 382 pas_regions[idx].size, 383 pas_regions[i].base_pa, 384 pas_regions[i].size)) { 385 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n", 386 i, idx); 387 return -EFAULT; 388 } 389 } 390 391 /* 392 * Since this function can be called multiple times with 393 * separate L1 tables we need to check the existing L0 mapping 394 * to see if this PAS would fall into one that has already been 395 * initialized. 396 */ 397 for (unsigned int i = 398 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa); 399 i <= GPT_L0_IDX(pas_regions[idx].base_pa + 400 pas_regions[idx].size - 1UL); 401 i++) { 402 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) && 403 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) { 404 /* This descriptor is unused so continue */ 405 continue; 406 } 407 408 /* 409 * This descriptor has been initialized in a previous 410 * call to this function so cannot be initialized again. 411 */ 412 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n", 413 idx, i); 414 return -EFAULT; 415 } 416 417 /* Check for block mapping (L0) type */ 418 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 419 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 420 /* Make sure base and size are block-aligned */ 421 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) || 422 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) { 423 ERROR("GPT: PAS[%u] is not block-aligned!\n", 424 idx); 425 return -EFAULT; 426 } 427 428 continue; 429 } 430 431 /* Check for granule mapping (L1) type */ 432 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 433 GPT_PAS_ATTR_MAP_TYPE_GRANULE) { 434 /* Make sure base and size are granule-aligned */ 435 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) || 436 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) { 437 ERROR("GPT: PAS[%u] is not granule-aligned!\n", 438 idx); 439 return -EFAULT; 440 } 441 442 /* Find how many L1 tables this PAS occupies */ 443 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa + 444 pas_regions[idx].size - 1UL) - 445 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U); 446 447 /* 448 * This creates a situation where, if multiple PAS 449 * regions occupy the same table descriptor, we can get 450 * an artificially high total L1 table count. The way we 451 * handle this is by checking each PAS against those 452 * before it in the array, and if they both occupy the 453 * same PAS we subtract from pas_l1_cnt and only the 454 * first PAS in the array gets to count it. 455 */ 456 457 /* 458 * If L1 count is greater than 1 we know the start and 459 * end PAs are in different L0 regions so we must check 460 * both for overlap against other PAS. 461 */ 462 if (pas_l1_cnt > 1) { 463 if (does_previous_pas_exist_here( 464 GPT_L0_IDX(pas_regions[idx].base_pa + 465 pas_regions[idx].size - 1UL), 466 pas_regions, idx)) { 467 pas_l1_cnt--; 468 } 469 } 470 471 if (does_previous_pas_exist_here( 472 GPT_L0_IDX(pas_regions[idx].base_pa), 473 pas_regions, idx)) { 474 pas_l1_cnt--; 475 } 476 477 l1_cnt += pas_l1_cnt; 478 continue; 479 } 480 481 /* If execution reaches this point, mapping type is invalid */ 482 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx, 483 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 484 return -EINVAL; 485 } 486 487 return l1_cnt; 488 } 489 490 /* 491 * This function validates L0 initialization parameters. 492 * 493 * Parameters 494 * l0_mem_base Base address of memory used for L0 table. 495 * l0_mem_size Size of memory available for L0 table. 496 * 497 * Return 498 * Negative Linux error code in the event of a failure, 0 for success. 499 */ 500 static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base, 501 size_t l0_mem_size) 502 { 503 size_t l0_alignment; 504 505 /* 506 * Make sure PPS is valid and then store it since macros need this value 507 * to work. 508 */ 509 if (pps > GPT_PPS_MAX) { 510 ERROR("GPT: Invalid PPS: 0x%x\n", pps); 511 return -EINVAL; 512 } 513 gpt_config.pps = pps; 514 gpt_config.t = gpt_t_lookup[pps]; 515 516 /* Alignment must be the greater of 4KB or L0 table size */ 517 l0_alignment = SZ_4K; 518 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) { 519 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t); 520 } 521 522 /* Check base address */ 523 if ((l0_mem_base == 0UL) || 524 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) { 525 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base); 526 return -EFAULT; 527 } 528 529 /* Check memory size for L0 table */ 530 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) { 531 ERROR("GPT: Inadequate L0 memory\n"); 532 ERROR(" Expected 0x%lx bytes, got 0x%lx\n", 533 GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size); 534 return -ENOMEM; 535 } 536 537 return 0; 538 } 539 540 /* 541 * In the event that L1 tables are needed, this function validates 542 * the L1 table generation parameters. 543 * 544 * Parameters 545 * l1_mem_base Base address of memory used for L1 table allocation. 546 * l1_mem_size Total size of memory available for L1 tables. 547 * l1_gpt_cnt Number of L1 tables needed. 548 * 549 * Return 550 * Negative Linux error code in the event of a failure, 0 for success. 551 */ 552 static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size, 553 unsigned int l1_gpt_cnt) 554 { 555 size_t l1_gpt_mem_sz; 556 557 /* Check if the granularity is supported */ 558 if (!xlat_arch_is_granule_size_supported( 559 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) { 560 return -EPERM; 561 } 562 563 /* Make sure L1 tables are aligned to their size */ 564 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) { 565 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n", 566 l1_mem_base); 567 return -EFAULT; 568 } 569 570 /* Get total memory needed for L1 tables */ 571 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p); 572 573 /* Check for overflow */ 574 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) { 575 ERROR("GPT: Overflow calculating L1 memory size\n"); 576 return -ENOMEM; 577 } 578 579 /* Make sure enough space was supplied */ 580 if (l1_mem_size < l1_gpt_mem_sz) { 581 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ", 582 (const char *)" memory\n"); 583 ERROR(" Expected 0x%lx bytes, got 0x%lx\n", 584 l1_gpt_mem_sz, l1_mem_size); 585 return -ENOMEM; 586 } 587 588 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz); 589 return 0; 590 } 591 592 /* 593 * This function initializes L0 block descriptors (regions that cannot be 594 * transitioned at the granule level) according to the provided PAS. 595 * 596 * Parameters 597 * *pas Pointer to the structure defining the PAS region to 598 * initialize. 599 */ 600 static void generate_l0_blk_desc(pas_region_t *pas) 601 { 602 uint64_t gpt_desc; 603 unsigned long idx, end_idx; 604 uint64_t *l0_gpt_arr; 605 606 assert(gpt_config.plat_gpt_l0_base != 0UL); 607 assert(pas != NULL); 608 609 /* 610 * Checking of PAS parameters has already been done in 611 * validate_pas_mappings so no need to check the same things again. 612 */ 613 614 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base; 615 616 /* Create the GPT Block descriptor for this PAS region */ 617 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs)); 618 619 /* Start index of this region in L0 GPTs */ 620 idx = GPT_L0_IDX(pas->base_pa); 621 622 /* 623 * Determine number of L0 GPT descriptors covered by 624 * this PAS region and use the count to populate these 625 * descriptors. 626 */ 627 end_idx = GPT_L0_IDX(pas->base_pa + pas->size); 628 629 /* Generate the needed block descriptors */ 630 for (; idx < end_idx; idx++) { 631 l0_gpt_arr[idx] = gpt_desc; 632 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n", 633 idx, &l0_gpt_arr[idx], 634 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) & 635 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]); 636 } 637 } 638 639 /* 640 * Helper function to determine if the end physical address lies in the same L0 641 * region as the current physical address. If true, the end physical address is 642 * returned else, the start address of the next region is returned. 643 * 644 * Parameters 645 * cur_pa Physical address of the current PA in the loop through 646 * the range. 647 * end_pa Physical address of the end PA in a PAS range. 648 * 649 * Return 650 * The PA of the end of the current range. 651 */ 652 static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa) 653 { 654 uintptr_t cur_idx; 655 uintptr_t end_idx; 656 657 cur_idx = GPT_L0_IDX(cur_pa); 658 end_idx = GPT_L0_IDX(end_pa); 659 660 assert(cur_idx <= end_idx); 661 662 if (cur_idx == end_idx) { 663 return end_pa; 664 } 665 666 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT; 667 } 668 669 /* 670 * Helper function to fill out GPI entries from 'first' granule address of 671 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous 672 * descriptor. 673 * 674 * Parameters 675 * l1 Pointer to L1 table to fill out 676 * first Address of first granule in range 677 * length Length of the range in bytes 678 * gpi GPI set this range to 679 * 680 * Return 681 * Address of next granule in range. 682 */ 683 __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first, 684 size_t length, unsigned int gpi) 685 { 686 /* 687 * Look up table for contiguous blocks and descriptors. 688 * Entries should be defined in descending block sizes: 689 * 512MB, 32MB and 2MB. 690 */ 691 static const gpt_fill_lookup_t gpt_fill_lookup[] = { 692 #if (RME_GPT_MAX_BLOCK == 512) 693 { SZ_512M, GPT_L1_CONT_DESC_512MB }, 694 #endif 695 #if (RME_GPT_MAX_BLOCK >= 32) 696 { SZ_32M, GPT_L1_CONT_DESC_32MB }, 697 #endif 698 #if (RME_GPT_MAX_BLOCK != 0) 699 { SZ_2M, GPT_L1_CONT_DESC_2MB } 700 #endif 701 }; 702 703 /* 704 * Iterate through all block sizes (512MB, 32MB and 2MB) 705 * starting with maximum supported. 706 */ 707 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) { 708 /* Calculate index */ 709 unsigned long idx = GPT_L1_INDEX(first); 710 711 /* Contiguous block size */ 712 size_t cont_size = gpt_fill_lookup[i].size; 713 714 if (GPT_REGION_IS_CONT(length, first, cont_size)) { 715 716 /* Generate Contiguous descriptor */ 717 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi, 718 gpt_fill_lookup[i].desc); 719 720 /* Number of 128-bit L1 entries in block */ 721 unsigned int cnt; 722 723 switch (cont_size) { 724 case SZ_512M: 725 cnt = L1_QWORDS_512MB; 726 break; 727 case SZ_32M: 728 cnt = L1_QWORDS_32MB; 729 break; 730 default: /* SZ_2MB */ 731 cnt = L1_QWORDS_2MB; 732 } 733 734 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n", 735 first, cont_size / SZ_1M); 736 737 /* Fill Contiguous descriptors */ 738 fill_desc(&l1[idx], l1_desc, cnt); 739 first += cont_size; 740 length -= cont_size; 741 742 if (length == 0UL) { 743 break; 744 } 745 } 746 } 747 748 return first; 749 } 750 751 /* Build Granules descriptor with the same 'gpi' for every GPI entry */ 752 static uint64_t build_l1_desc(unsigned int gpi) 753 { 754 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4); 755 756 l1_desc |= (l1_desc << 8); 757 l1_desc |= (l1_desc << 16); 758 return (l1_desc | (l1_desc << 32)); 759 } 760 761 /* 762 * Helper function to fill out GPI entries from 'first' to 'last' granule 763 * address in a single L1 table with 'l1_desc' Granules descriptor. 764 * 765 * Parameters 766 * l1 Pointer to L1 table to fill out 767 * first Address of first granule in range 768 * last Address of last granule in range (inclusive) 769 * gpi GPI set this range to 770 * 771 * Return 772 * Address of next granule in range. 773 */ 774 static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first, 775 uintptr_t last, unsigned int gpi) 776 { 777 uint64_t gpi_mask; 778 unsigned long i; 779 780 /* Generate Granules descriptor */ 781 uint64_t l1_desc = build_l1_desc(gpi); 782 783 /* Shift the mask if we're starting in the middle of an L1 entry */ 784 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2); 785 786 /* Fill out each L1 entry for this region */ 787 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) { 788 789 /* Account for stopping in the middle of an L1 entry */ 790 if (i == GPT_L1_INDEX(last)) { 791 gpi_mask &= (gpi_mask >> ((15U - 792 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2)); 793 } 794 795 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask)); 796 797 /* Write GPI values */ 798 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask); 799 800 /* Reset mask */ 801 gpi_mask = ULONG_MAX; 802 } 803 804 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p); 805 } 806 807 /* 808 * Helper function to fill out GPI entries in a single L1 table. 809 * This function fills out an entire L1 table with either Granules or Contiguous 810 * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment. 811 * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular 812 * Granules descriptors. 813 * 814 * Parameters 815 * l1 Pointer to L1 table to fill out 816 * first Address of first granule in range 817 * last Address of last granule in range (inclusive) 818 * gpi GPI set this range to 819 */ 820 static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last, 821 unsigned int gpi) 822 { 823 assert(l1 != NULL); 824 assert(first <= last); 825 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL); 826 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL); 827 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last)); 828 829 #if (RME_GPT_MAX_BLOCK != 0) 830 while (first <= last) { 831 /* Region length */ 832 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p); 833 834 if (length < SZ_2M) { 835 /* 836 * Fill with Granule descriptors in case of 837 * region length < 2MB. 838 */ 839 first = fill_l1_gran_desc(l1, first, last, gpi); 840 841 } else if ((first & (SZ_2M - UL(1))) == UL(0)) { 842 /* 843 * For region length >= 2MB and at least 2MB aligned 844 * call to fill_l1_cont_desc will iterate through 845 * all block sizes (512MB, 32MB and 2MB) supported and 846 * fill corresponding Contiguous descriptors. 847 */ 848 first = fill_l1_cont_desc(l1, first, length, gpi); 849 } else { 850 /* 851 * For not aligned region >= 2MB fill with Granules 852 * descriptors up to the next 2MB aligned address. 853 */ 854 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) - 855 GPT_PGS_ACTUAL_SIZE(gpt_config.p); 856 857 first = fill_l1_gran_desc(l1, first, new_last, gpi); 858 } 859 } 860 #else 861 /* Fill with Granule descriptors */ 862 first = fill_l1_gran_desc(l1, first, last, gpi); 863 #endif 864 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p))); 865 } 866 867 /* 868 * This function finds the next available unused L1 table and initializes all 869 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks 870 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the 871 * event that a PAS region stops midway through an L1 table, thus guaranteeing 872 * that all memory not explicitly assigned is GPI_ANY. This function does not 873 * check for overflow conditions, that should be done by the caller. 874 * 875 * Return 876 * Pointer to the next available L1 table. 877 */ 878 static uint64_t *get_new_l1_tbl(void) 879 { 880 /* Retrieve the next L1 table */ 881 uint64_t *l1 = (uint64_t *)gpt_l1_tbl; 882 883 /* Increment L1 GPT address */ 884 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p); 885 886 /* Initialize all GPIs to GPT_GPI_ANY */ 887 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) { 888 l1[i] = GPT_L1_ANY_DESC; 889 } 890 891 return l1; 892 } 893 894 /* 895 * When L1 tables are needed, this function creates the necessary L0 table 896 * descriptors and fills out the L1 table entries according to the supplied 897 * PAS range. 898 * 899 * Parameters 900 * *pas Pointer to the structure defining the PAS region. 901 */ 902 static void generate_l0_tbl_desc(pas_region_t *pas) 903 { 904 uintptr_t end_pa; 905 uintptr_t cur_pa; 906 uintptr_t last_gran_pa; 907 uint64_t *l0_gpt_base; 908 uint64_t *l1_gpt_arr; 909 unsigned int l0_idx, gpi; 910 911 assert(gpt_config.plat_gpt_l0_base != 0UL); 912 assert(pas != NULL); 913 914 /* 915 * Checking of PAS parameters has already been done in 916 * validate_pas_mappings so no need to check the same things again. 917 */ 918 end_pa = pas->base_pa + pas->size; 919 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 920 921 /* We start working from the granule at base PA */ 922 cur_pa = pas->base_pa; 923 924 /* Get GPI */ 925 gpi = GPT_PAS_ATTR_GPI(pas->attrs); 926 927 /* Iterate over each L0 region in this memory range */ 928 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa); 929 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL); 930 l0_idx++) { 931 /* 932 * See if the L0 entry is already a table descriptor or if we 933 * need to create one. 934 */ 935 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) { 936 /* Get the L1 array from the L0 entry */ 937 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]); 938 } else { 939 /* Get a new L1 table from the L1 memory space */ 940 l1_gpt_arr = get_new_l1_tbl(); 941 942 /* Fill out the L0 descriptor and flush it */ 943 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr); 944 } 945 946 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n", 947 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]); 948 949 /* 950 * Determine the PA of the last granule in this L0 descriptor. 951 */ 952 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) - 953 GPT_PGS_ACTUAL_SIZE(gpt_config.p); 954 955 /* 956 * Fill up L1 GPT entries between these two addresses. This 957 * function needs the addresses of the first granule and last 958 * granule in the range. 959 */ 960 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi); 961 962 /* Advance cur_pa to first granule in next L0 region */ 963 cur_pa = get_l1_end_pa(cur_pa, end_pa); 964 } 965 } 966 967 /* 968 * This function flushes a range of L0 descriptors used by a given PAS region 969 * array. There is a chance that some unmodified L0 descriptors would be flushed 970 * in the case that there are "holes" in an array of PAS regions but overall 971 * this should be faster than individually flushing each modified L0 descriptor 972 * as they are created. 973 * 974 * Parameters 975 * *pas Pointer to an array of PAS regions. 976 * pas_count Number of entries in the PAS array. 977 */ 978 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count) 979 { 980 unsigned long idx; 981 unsigned long start_idx; 982 unsigned long end_idx; 983 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base; 984 985 assert(pas != NULL); 986 assert(pas_count != 0U); 987 988 /* Initial start and end values */ 989 start_idx = GPT_L0_IDX(pas[0].base_pa); 990 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL); 991 992 /* Find lowest and highest L0 indices used in this PAS array */ 993 for (idx = 1UL; idx < pas_count; idx++) { 994 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) { 995 start_idx = GPT_L0_IDX(pas[idx].base_pa); 996 } 997 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) { 998 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL); 999 } 1000 } 1001 1002 /* 1003 * Flush all covered L0 descriptors, add 1 because we need to include 1004 * the end index value. 1005 */ 1006 flush_dcache_range((uintptr_t)&l0[start_idx], 1007 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t)); 1008 } 1009 1010 /* 1011 * Public API to enable granule protection checks once the tables have all been 1012 * initialized. This function is called at first initialization and then again 1013 * later during warm boots of CPU cores. 1014 * 1015 * Return 1016 * Negative Linux error code in the event of a failure, 0 for success. 1017 */ 1018 int gpt_enable(void) 1019 { 1020 u_register_t gpccr_el3; 1021 1022 /* 1023 * Granule tables must be initialised before enabling 1024 * granule protection. 1025 */ 1026 if (gpt_config.plat_gpt_l0_base == 0UL) { 1027 ERROR("GPT: Tables have not been initialized!\n"); 1028 return -EPERM; 1029 } 1030 1031 /* Write the base address of the L0 tables into GPTBR */ 1032 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT) 1033 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK); 1034 1035 /* GPCCR_EL3.PPS */ 1036 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps); 1037 1038 /* GPCCR_EL3.PGS */ 1039 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs); 1040 1041 /* 1042 * Since EL3 maps the L1 region as Inner shareable, use the same 1043 * shareability attribute for GPC as well so that 1044 * GPC fetches are visible to PEs 1045 */ 1046 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS); 1047 1048 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */ 1049 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); 1050 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); 1051 1052 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */ 1053 write_gpccr_el3(gpccr_el3); 1054 isb(); 1055 1056 /* Invalidate any stale TLB entries and any cached register fields */ 1057 tlbipaallos(); 1058 dsb(); 1059 isb(); 1060 1061 /* Enable GPT */ 1062 gpccr_el3 |= GPCCR_GPC_BIT; 1063 1064 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */ 1065 write_gpccr_el3(gpccr_el3); 1066 isb(); 1067 tlbipaallos(); 1068 dsb(); 1069 isb(); 1070 1071 return 0; 1072 } 1073 1074 /* 1075 * Public API to disable granule protection checks. 1076 */ 1077 void gpt_disable(void) 1078 { 1079 u_register_t gpccr_el3 = read_gpccr_el3(); 1080 1081 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT); 1082 dsbsy(); 1083 isb(); 1084 } 1085 1086 /* 1087 * Public API that initializes the entire protected space to GPT_GPI_ANY using 1088 * the L0 tables (block descriptors). Ideally, this function is invoked prior 1089 * to DDR discovery and initialization. The MMU must be initialized before 1090 * calling this function. 1091 * 1092 * Parameters 1093 * pps PPS value to use for table generation 1094 * l0_mem_base Base address of L0 tables in memory. 1095 * l0_mem_size Total size of memory available for L0 tables. 1096 * 1097 * Return 1098 * Negative Linux error code in the event of a failure, 0 for success. 1099 */ 1100 int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base, 1101 size_t l0_mem_size) 1102 { 1103 uint64_t gpt_desc; 1104 int ret; 1105 1106 /* Ensure that MMU and Data caches are enabled */ 1107 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1108 1109 /* Validate other parameters */ 1110 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size); 1111 if (ret != 0) { 1112 return ret; 1113 } 1114 1115 /* Create the descriptor to initialize L0 entries with */ 1116 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY); 1117 1118 /* Iterate through all L0 entries */ 1119 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) { 1120 ((uint64_t *)l0_mem_base)[i] = gpt_desc; 1121 } 1122 1123 /* Flush updated L0 table to memory */ 1124 flush_dcache_range((uintptr_t)l0_mem_base, GPT_L0_TABLE_SIZE(gpt_config.t)); 1125 1126 /* Stash the L0 base address once initial setup is complete */ 1127 gpt_config.plat_gpt_l0_base = l0_mem_base; 1128 1129 return 0; 1130 } 1131 1132 /* 1133 * Public API that carves out PAS regions from the L0 tables and builds any L1 1134 * tables that are needed. This function ideally is run after DDR discovery and 1135 * initialization. The L0 tables must have already been initialized to GPI_ANY 1136 * when this function is called. 1137 * 1138 * This function can be called multiple times with different L1 memory ranges 1139 * and PAS regions if it is desirable to place L1 tables in different locations 1140 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables 1141 * in the DDR bank that they control). 1142 * 1143 * Parameters 1144 * pgs PGS value to use for table generation. 1145 * l1_mem_base Base address of memory used for L1 tables. 1146 * l1_mem_size Total size of memory available for L1 tables. 1147 * *pas_regions Pointer to PAS regions structure array. 1148 * pas_count Total number of PAS regions. 1149 * 1150 * Return 1151 * Negative Linux error code in the event of a failure, 0 for success. 1152 */ 1153 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base, 1154 size_t l1_mem_size, pas_region_t *pas_regions, 1155 unsigned int pas_count) 1156 { 1157 int l1_gpt_cnt, ret; 1158 1159 /* Ensure that MMU and Data caches are enabled */ 1160 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1161 1162 /* PGS is needed for validate_pas_mappings so check it now */ 1163 if (pgs > GPT_PGS_MAX) { 1164 ERROR("GPT: Invalid PGS: 0x%x\n", pgs); 1165 return -EINVAL; 1166 } 1167 gpt_config.pgs = pgs; 1168 gpt_config.p = gpt_p_lookup[pgs]; 1169 1170 /* Make sure L0 tables have been initialized */ 1171 if (gpt_config.plat_gpt_l0_base == 0UL) { 1172 ERROR("GPT: L0 tables must be initialized first!\n"); 1173 return -EPERM; 1174 } 1175 1176 /* Check if L1 GPTs are required and how many */ 1177 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count); 1178 if (l1_gpt_cnt < 0) { 1179 return l1_gpt_cnt; 1180 } 1181 1182 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt); 1183 1184 /* If L1 tables are needed then validate the L1 parameters */ 1185 if (l1_gpt_cnt > 0) { 1186 ret = validate_l1_params(l1_mem_base, l1_mem_size, 1187 (unsigned int)l1_gpt_cnt); 1188 if (ret != 0) { 1189 return ret; 1190 } 1191 1192 /* Set up parameters for L1 table generation */ 1193 gpt_l1_tbl = l1_mem_base; 1194 } 1195 1196 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */ 1197 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p); 1198 1199 /* Mask for the L1 index field */ 1200 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p); 1201 1202 INFO("GPT: Boot Configuration\n"); 1203 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 1204 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 1205 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 1206 INFO(" PAS count: %u\n", pas_count); 1207 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base); 1208 1209 /* Generate the tables in memory */ 1210 for (unsigned int idx = 0U; idx < pas_count; idx++) { 1211 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n", 1212 idx, pas_regions[idx].base_pa, pas_regions[idx].size, 1213 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs), 1214 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 1215 1216 /* Check if a block or table descriptor is required */ 1217 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 1218 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 1219 generate_l0_blk_desc(&pas_regions[idx]); 1220 1221 } else { 1222 generate_l0_tbl_desc(&pas_regions[idx]); 1223 } 1224 } 1225 1226 /* Flush modified L0 tables */ 1227 flush_l0_for_pas_array(pas_regions, pas_count); 1228 1229 /* Flush L1 tables if needed */ 1230 if (l1_gpt_cnt > 0) { 1231 flush_dcache_range(l1_mem_base, 1232 GPT_L1_TABLE_SIZE(gpt_config.p) * 1233 (size_t)l1_gpt_cnt); 1234 } 1235 1236 /* Make sure that all the entries are written to the memory */ 1237 dsbishst(); 1238 tlbipaallos(); 1239 dsb(); 1240 isb(); 1241 1242 return 0; 1243 } 1244 1245 /* 1246 * Public API to initialize the runtime gpt_config structure based on the values 1247 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization 1248 * typically happens in a bootloader stage prior to setting up the EL3 runtime 1249 * environment for the granule transition service so this function detects the 1250 * initialization from a previous stage. Granule protection checks must be 1251 * enabled already or this function will return an error. 1252 * 1253 * Parameters 1254 * l1_bitlocks_base Base address of memory for L1 tables bitlocks. 1255 * l1_bitlocks_size Total size of memory available for L1 tables bitlocks. 1256 * 1257 * Return 1258 * Negative Linux error code in the event of a failure, 0 for success. 1259 */ 1260 int gpt_runtime_init(uintptr_t l1_bitlocks_base, size_t l1_bitlocks_size) 1261 { 1262 u_register_t reg; 1263 __unused size_t locks_size; 1264 1265 /* Ensure that MMU and Data caches are enabled */ 1266 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1267 1268 /* Ensure GPC are already enabled */ 1269 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0UL) { 1270 ERROR("GPT: Granule protection checks are not enabled!\n"); 1271 return -EPERM; 1272 } 1273 1274 /* 1275 * Read the L0 table address from GPTBR, we don't need the L1 base 1276 * address since those are included in the L0 tables as needed. 1277 */ 1278 reg = read_gptbr_el3(); 1279 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) & 1280 GPTBR_BADDR_MASK) << 1281 GPTBR_BADDR_VAL_SHIFT; 1282 1283 /* Read GPCCR to get PGS and PPS values */ 1284 reg = read_gpccr_el3(); 1285 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK; 1286 gpt_config.t = gpt_t_lookup[gpt_config.pps]; 1287 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK; 1288 gpt_config.p = gpt_p_lookup[gpt_config.pgs]; 1289 1290 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */ 1291 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p); 1292 1293 /* Mask for the L1 index field */ 1294 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p); 1295 1296 #if (RME_GPT_BITLOCK_BLOCK != 0) 1297 /* 1298 * Size of GPT bitlocks in bytes for the protected address space 1299 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock. 1300 */ 1301 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) / 1302 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U); 1303 /* 1304 * If protected space size is less than the size covered 1305 * by 'bitlock' structure, check for a single bitlock. 1306 */ 1307 if (locks_size < LOCK_SIZE) { 1308 locks_size = LOCK_SIZE; 1309 /* Check bitlocks array size */ 1310 } else if (locks_size > l1_bitlocks_size) { 1311 ERROR("GPT: Inadequate GPT bitlocks memory\n"); 1312 ERROR(" Expected 0x%lx bytes, got 0x%lx\n", 1313 locks_size, l1_bitlocks_size); 1314 return -ENOMEM; 1315 } 1316 1317 gpt_bitlock = (bitlock_t *)l1_bitlocks_base; 1318 1319 /* Initialise GPT bitlocks */ 1320 (void)memset((void *)gpt_bitlock, 0, locks_size); 1321 1322 /* Flush GPT bitlocks to memory */ 1323 flush_dcache_range((uintptr_t)gpt_bitlock, locks_size); 1324 #endif /* RME_GPT_BITLOCK_BLOCK */ 1325 1326 VERBOSE("GPT: Runtime Configuration\n"); 1327 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 1328 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 1329 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 1330 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base); 1331 #if (RME_GPT_BITLOCK_BLOCK != 0) 1332 VERBOSE(" Bitlocks: 0x%"PRIxPTR"/0x%lx\n", (uintptr_t)gpt_bitlock, 1333 locks_size); 1334 #endif 1335 return 0; 1336 } 1337 1338 /* 1339 * A helper to write the value (target_pas << gpi_shift) to the index of 1340 * the gpt_l1_addr. 1341 */ 1342 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr, 1343 unsigned int gpi_shift, unsigned int idx, 1344 unsigned int target_pas) 1345 { 1346 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift); 1347 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift); 1348 gpt_l1_addr[idx] = *gpt_l1_desc; 1349 1350 dsboshst(); 1351 } 1352 1353 /* 1354 * Helper to retrieve the gpt_l1_* information from the base address 1355 * returned in gpi_info. 1356 */ 1357 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info) 1358 { 1359 uint64_t gpt_l0_desc, *gpt_l0_base; 1360 __unused unsigned int block_idx; 1361 1362 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 1363 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)]; 1364 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) { 1365 VERBOSE("GPT: Granule is not covered by a table descriptor!\n"); 1366 VERBOSE(" Base=0x%"PRIx64"\n", base); 1367 return -EINVAL; 1368 } 1369 1370 /* Get the table index and GPI shift from PA */ 1371 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc); 1372 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base); 1373 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2; 1374 1375 #if (RME_GPT_BITLOCK_BLOCK != 0) 1376 /* Block index */ 1377 block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M)); 1378 1379 /* Bitlock address and mask */ 1380 gpi_info->lock = &gpt_bitlock[block_idx / LOCK_BITS]; 1381 gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U)); 1382 #endif 1383 return 0; 1384 } 1385 1386 /* 1387 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info. 1388 * This function is called with bitlock or spinlock acquired. 1389 */ 1390 static void read_gpi(gpi_info_t *gpi_info) 1391 { 1392 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx]; 1393 1394 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) == 1395 GPT_L1_TYPE_CONT_DESC) { 1396 /* Read GPI from Contiguous descriptor */ 1397 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc); 1398 } else { 1399 /* Read GPI from Granules descriptor */ 1400 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) & 1401 GPT_L1_GRAN_DESC_GPI_MASK); 1402 } 1403 } 1404 1405 static void flush_page_to_popa(uintptr_t addr) 1406 { 1407 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p); 1408 1409 if (is_feat_mte2_supported()) { 1410 flush_dcache_to_popa_range_mte2(addr, size); 1411 } else { 1412 flush_dcache_to_popa_range(addr, size); 1413 } 1414 } 1415 1416 /* 1417 * Helper function to check if all L1 entries in 2MB block have 1418 * the same Granules descriptor value. 1419 * 1420 * Parameters 1421 * base Base address of the region to be checked 1422 * gpi_info Pointer to 'gpt_config_t' structure 1423 * l1_desc GPT Granules descriptor with all entries 1424 * set to the same GPI. 1425 * 1426 * Return 1427 * true if L1 all entries have the same descriptor value, false otherwise. 1428 */ 1429 __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info, 1430 uint64_t l1_desc) 1431 { 1432 /* Last L1 entry index in 2MB block */ 1433 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) + 1434 gpt_l1_cnt_2mb - 1UL; 1435 1436 /* Number of L1 entries in 2MB block */ 1437 unsigned int cnt = gpt_l1_cnt_2mb; 1438 1439 /* 1440 * Start check from the last L1 entry and continue until the first 1441 * non-matching to the passed Granules descriptor value is found. 1442 */ 1443 while (cnt-- != 0U) { 1444 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) { 1445 /* Non-matching L1 entry found */ 1446 return false; 1447 } 1448 } 1449 1450 return true; 1451 } 1452 1453 __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info, 1454 uint64_t l1_desc) 1455 { 1456 /* L1 entry index of the start of 2MB block */ 1457 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base)); 1458 1459 /* 2MB Contiguous descriptor */ 1460 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB); 1461 1462 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc); 1463 1464 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB); 1465 } 1466 1467 /* 1468 * Helper function to check if all 1st L1 entries of 2MB blocks 1469 * in 32MB have the same 2MB Contiguous descriptor value. 1470 * 1471 * Parameters 1472 * base Base address of the region to be checked 1473 * gpi_info Pointer to 'gpt_config_t' structure 1474 * l1_desc GPT Granules descriptor. 1475 * 1476 * Return 1477 * true if all L1 entries have the same descriptor value, false otherwise. 1478 */ 1479 __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info, 1480 uint64_t l1_desc) 1481 { 1482 /* The 1st L1 entry index of the last 2MB block in 32MB */ 1483 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) + 1484 (15UL * gpt_l1_cnt_2mb); 1485 1486 /* 2MB Contiguous descriptor */ 1487 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB); 1488 1489 /* Number of 2MB blocks in 32MB */ 1490 unsigned int cnt = 16U; 1491 1492 /* Set the first L1 entry to 2MB Contiguous descriptor */ 1493 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc; 1494 1495 /* 1496 * Start check from the 1st L1 entry of the last 2MB block and 1497 * continue until the first non-matching to 2MB Contiguous descriptor 1498 * value is found. 1499 */ 1500 while (cnt-- != 0U) { 1501 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) { 1502 /* Non-matching L1 entry found */ 1503 return false; 1504 } 1505 idx -= gpt_l1_cnt_2mb; 1506 } 1507 1508 return true; 1509 } 1510 1511 __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info, 1512 uint64_t l1_desc) 1513 { 1514 /* L1 entry index of the start of 32MB block */ 1515 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base)); 1516 1517 /* 32MB Contiguous descriptor */ 1518 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB); 1519 1520 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc); 1521 1522 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB); 1523 } 1524 1525 /* 1526 * Helper function to check if all 1st L1 entries of 32MB blocks 1527 * in 512MB have the same 32MB Contiguous descriptor value. 1528 * 1529 * Parameters 1530 * base Base address of the region to be checked 1531 * gpi_info Pointer to 'gpt_config_t' structure 1532 * l1_desc GPT Granules descriptor. 1533 * 1534 * Return 1535 * true if all L1 entries have the same descriptor value, false otherwise. 1536 */ 1537 __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info, 1538 uint64_t l1_desc) 1539 { 1540 /* The 1st L1 entry index of the last 32MB block in 512MB */ 1541 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) + 1542 (15UL * 16UL * gpt_l1_cnt_2mb); 1543 1544 /* 32MB Contiguous descriptor */ 1545 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB); 1546 1547 /* Number of 32MB blocks in 512MB */ 1548 unsigned int cnt = 16U; 1549 1550 /* Set the first L1 entry to 2MB Contiguous descriptor */ 1551 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc; 1552 1553 /* 1554 * Start check from the 1st L1 entry of the last 32MB block and 1555 * continue until the first non-matching to 32MB Contiguous descriptor 1556 * value is found. 1557 */ 1558 while (cnt-- != 0U) { 1559 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) { 1560 /* Non-matching L1 entry found */ 1561 return false; 1562 } 1563 idx -= 16UL * gpt_l1_cnt_2mb; 1564 } 1565 1566 return true; 1567 } 1568 1569 __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info, 1570 uint64_t l1_desc) 1571 { 1572 /* L1 entry index of the start of 512MB block */ 1573 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base)); 1574 1575 /* 512MB Contiguous descriptor */ 1576 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB); 1577 1578 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc); 1579 1580 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB); 1581 } 1582 1583 /* 1584 * Helper function to convert GPI entries in a single L1 table 1585 * from Granules to Contiguous descriptor. 1586 * 1587 * Parameters 1588 * base Base address of the region to be written 1589 * gpi_info Pointer to 'gpt_config_t' structure 1590 * l1_desc GPT Granules descriptor with all entries 1591 * set to the same GPI. 1592 */ 1593 __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info, 1594 uint64_t l1_desc) 1595 { 1596 /* Start with check for 2MB block */ 1597 if (!check_fuse_2mb(base, gpi_info, l1_desc)) { 1598 /* Check for 2MB fusing failed */ 1599 return; 1600 } 1601 1602 #if (RME_GPT_MAX_BLOCK == 2) 1603 fuse_2mb(base, gpi_info, l1_desc); 1604 #else 1605 /* Check for 32MB block */ 1606 if (!check_fuse_32mb(base, gpi_info, l1_desc)) { 1607 /* Check for 32MB fusing failed, fuse to 2MB */ 1608 fuse_2mb(base, gpi_info, l1_desc); 1609 return; 1610 } 1611 1612 #if (RME_GPT_MAX_BLOCK == 32) 1613 fuse_32mb(base, gpi_info, l1_desc); 1614 #else 1615 /* Check for 512MB block */ 1616 if (!check_fuse_512mb(base, gpi_info, l1_desc)) { 1617 /* Check for 512MB fusing failed, fuse to 32MB */ 1618 fuse_32mb(base, gpi_info, l1_desc); 1619 return; 1620 } 1621 1622 /* Fuse to 512MB */ 1623 fuse_512mb(base, gpi_info, l1_desc); 1624 1625 #endif /* RME_GPT_MAX_BLOCK == 32 */ 1626 #endif /* RME_GPT_MAX_BLOCK == 2 */ 1627 } 1628 1629 /* 1630 * Helper function to convert GPI entries in a single L1 table 1631 * from Contiguous to Granules descriptor. This function updates 1632 * descriptor to Granules in passed 'gpt_config_t' structure as 1633 * the result of shuttering. 1634 * 1635 * Parameters 1636 * base Base address of the region to be written 1637 * gpi_info Pointer to 'gpt_config_t' structure 1638 * l1_desc GPT Granules descriptor set this range to. 1639 */ 1640 __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info, 1641 uint64_t l1_desc) 1642 { 1643 /* Look-up table for 2MB, 32MB and 512MB locks shattering */ 1644 static const gpt_shatter_func gpt_shatter_lookup[] = { 1645 shatter_2mb, 1646 shatter_32mb, 1647 shatter_512mb 1648 }; 1649 1650 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */ 1651 static const gpt_tlbi_lookup_t tlbi_lookup[] = { 1652 { tlbirpalos_2m, ~(SZ_2M - 1UL) }, 1653 { tlbirpalos_32m, ~(SZ_32M - 1UL) }, 1654 { tlbirpalos_512m, ~(SZ_512M - 1UL) } 1655 }; 1656 1657 /* Get shattering level from Contig field of Contiguous descriptor */ 1658 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL; 1659 1660 /* Shatter contiguous block */ 1661 gpt_shatter_lookup[level](base, gpi_info, l1_desc); 1662 1663 tlbi_lookup[level].function(base & tlbi_lookup[level].mask); 1664 dsbosh(); 1665 1666 /* 1667 * Update 'gpt_config_t' structure's descriptor to Granules to reflect 1668 * the shattered GPI back to caller. 1669 */ 1670 gpi_info->gpt_l1_desc = l1_desc; 1671 } 1672 1673 /* 1674 * This function is the granule transition delegate service. When a granule 1675 * transition request occurs it is routed to this function to have the request, 1676 * if valid, fulfilled following A1.1.1 Delegate of RME supplement. 1677 * 1678 * TODO: implement support for transitioning multiple granules at once. 1679 * 1680 * Parameters 1681 * base Base address of the region to transition, must be 1682 * aligned to granule size. 1683 * size Size of region to transition, must be aligned to granule 1684 * size. 1685 * src_sec_state Security state of the caller. 1686 * 1687 * Return 1688 * Negative Linux error code in the event of a failure, 0 for success. 1689 */ 1690 int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1691 { 1692 gpi_info_t gpi_info; 1693 uint64_t nse, __unused l1_desc; 1694 unsigned int target_pas; 1695 int res; 1696 1697 /* Ensure that the tables have been set up before taking requests */ 1698 assert(gpt_config.plat_gpt_l0_base != 0UL); 1699 1700 /* Ensure that caches are enabled */ 1701 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1702 1703 /* See if this is a single or a range of granule transition */ 1704 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1705 return -EINVAL; 1706 } 1707 1708 /* Check that base and size are valid */ 1709 if ((ULONG_MAX - base) < size) { 1710 VERBOSE("GPT: Transition request address overflow!\n"); 1711 VERBOSE(" Base=0x%"PRIx64"\n", base); 1712 VERBOSE(" Size=0x%lx\n", size); 1713 return -EINVAL; 1714 } 1715 1716 /* Make sure base and size are valid */ 1717 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1718 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1719 (size == 0UL) || 1720 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1721 VERBOSE("GPT: Invalid granule transition address range!\n"); 1722 VERBOSE(" Base=0x%"PRIx64"\n", base); 1723 VERBOSE(" Size=0x%lx\n", size); 1724 return -EINVAL; 1725 } 1726 1727 /* Delegate request can only come from REALM or SECURE */ 1728 if ((src_sec_state != SMC_FROM_REALM) && 1729 (src_sec_state != SMC_FROM_SECURE)) { 1730 VERBOSE("GPT: Invalid caller security state 0x%x\n", 1731 src_sec_state); 1732 return -EINVAL; 1733 } 1734 1735 if (src_sec_state == SMC_FROM_REALM) { 1736 target_pas = GPT_GPI_REALM; 1737 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1738 l1_desc = GPT_L1_REALM_DESC; 1739 } else { 1740 target_pas = GPT_GPI_SECURE; 1741 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1742 l1_desc = GPT_L1_SECURE_DESC; 1743 } 1744 1745 res = get_gpi_params(base, &gpi_info); 1746 if (res != 0) { 1747 return res; 1748 } 1749 1750 /* 1751 * Access to GPT is controlled by a lock to ensure that no more 1752 * than one CPU is allowed to make changes at any given time. 1753 */ 1754 GPT_LOCK; 1755 read_gpi(&gpi_info); 1756 1757 /* Check that the current address is in NS state */ 1758 if (gpi_info.gpi != GPT_GPI_NS) { 1759 VERBOSE("GPT: Only Granule in NS state can be delegated.\n"); 1760 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state, 1761 gpi_info.gpi); 1762 GPT_UNLOCK; 1763 return -EPERM; 1764 } 1765 1766 #if (RME_GPT_MAX_BLOCK != 0) 1767 /* Check for Contiguous descriptor */ 1768 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) == 1769 GPT_L1_TYPE_CONT_DESC) { 1770 shatter_block(base, &gpi_info, GPT_L1_NS_DESC); 1771 } 1772 #endif 1773 /* 1774 * In order to maintain mutual distrust between Realm and Secure 1775 * states, remove any data speculatively fetched into the target 1776 * physical address space. 1777 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2. 1778 */ 1779 flush_page_to_popa(base | nse); 1780 1781 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1782 gpi_info.gpi_shift, gpi_info.idx, target_pas); 1783 1784 /* Ensure that all agents observe the new configuration */ 1785 tlbi_page_dsbosh(base); 1786 1787 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1788 1789 /* Ensure that the scrubbed data have made it past the PoPA */ 1790 flush_page_to_popa(base | nse); 1791 1792 #if (RME_GPT_MAX_BLOCK != 0) 1793 if (gpi_info.gpt_l1_desc == l1_desc) { 1794 /* Try to fuse */ 1795 fuse_block(base, &gpi_info, l1_desc); 1796 } 1797 #endif 1798 1799 /* Unlock the lock to GPT */ 1800 GPT_UNLOCK; 1801 1802 /* 1803 * The isb() will be done as part of context 1804 * synchronization when returning to lower EL. 1805 */ 1806 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n", 1807 base, gpi_info.gpi, target_pas); 1808 1809 return 0; 1810 } 1811 1812 /* 1813 * This function is the granule transition undelegate service. When a granule 1814 * transition request occurs it is routed to this function where the request is 1815 * validated then fulfilled if possible. 1816 * 1817 * TODO: implement support for transitioning multiple granules at once. 1818 * 1819 * Parameters 1820 * base Base address of the region to transition, must be 1821 * aligned to granule size. 1822 * size Size of region to transition, must be aligned to granule 1823 * size. 1824 * src_sec_state Security state of the caller. 1825 * 1826 * Return 1827 * Negative Linux error code in the event of a failure, 0 for success. 1828 */ 1829 int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1830 { 1831 gpi_info_t gpi_info; 1832 uint64_t nse, __unused l1_desc; 1833 int res; 1834 1835 /* Ensure that the tables have been set up before taking requests */ 1836 assert(gpt_config.plat_gpt_l0_base != 0UL); 1837 1838 /* Ensure that MMU and caches are enabled */ 1839 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1840 1841 /* See if this is a single or a range of granule transition */ 1842 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1843 return -EINVAL; 1844 } 1845 1846 /* Check that base and size are valid */ 1847 if ((ULONG_MAX - base) < size) { 1848 VERBOSE("GPT: Transition request address overflow!\n"); 1849 VERBOSE(" Base=0x%"PRIx64"\n", base); 1850 VERBOSE(" Size=0x%lx\n", size); 1851 return -EINVAL; 1852 } 1853 1854 /* Make sure base and size are valid */ 1855 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1856 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) || 1857 (size == 0UL) || 1858 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1859 VERBOSE("GPT: Invalid granule transition address range!\n"); 1860 VERBOSE(" Base=0x%"PRIx64"\n", base); 1861 VERBOSE(" Size=0x%lx\n", size); 1862 return -EINVAL; 1863 } 1864 1865 res = get_gpi_params(base, &gpi_info); 1866 if (res != 0) { 1867 return res; 1868 } 1869 1870 /* 1871 * Access to GPT is controlled by a lock to ensure that no more 1872 * than one CPU is allowed to make changes at any given time. 1873 */ 1874 GPT_LOCK; 1875 read_gpi(&gpi_info); 1876 1877 /* Check that the current address is in the delegated state */ 1878 if ((src_sec_state == SMC_FROM_REALM) && 1879 (gpi_info.gpi == GPT_GPI_REALM)) { 1880 l1_desc = GPT_L1_REALM_DESC; 1881 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1882 } else if ((src_sec_state == SMC_FROM_SECURE) && 1883 (gpi_info.gpi == GPT_GPI_SECURE)) { 1884 l1_desc = GPT_L1_SECURE_DESC; 1885 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1886 } else { 1887 VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n"); 1888 VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state, 1889 gpi_info.gpi); 1890 GPT_UNLOCK; 1891 return -EPERM; 1892 } 1893 1894 #if (RME_GPT_MAX_BLOCK != 0) 1895 /* Check for Contiguous descriptor */ 1896 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) == 1897 GPT_L1_TYPE_CONT_DESC) { 1898 shatter_block(base, &gpi_info, l1_desc); 1899 } 1900 #endif 1901 /* 1902 * In order to maintain mutual distrust between Realm and Secure 1903 * states, remove access now, in order to guarantee that writes 1904 * to the currently-accessible physical address space will not 1905 * later become observable. 1906 */ 1907 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1908 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS); 1909 1910 /* Ensure that all agents observe the new NO_ACCESS configuration */ 1911 tlbi_page_dsbosh(base); 1912 1913 /* Ensure that the scrubbed data have made it past the PoPA */ 1914 flush_page_to_popa(base | nse); 1915 1916 /* 1917 * Remove any data loaded speculatively in NS space from before 1918 * the scrubbing. 1919 */ 1920 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1921 1922 flush_page_to_popa(base | nse); 1923 1924 /* Clear existing GPI encoding and transition granule */ 1925 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1926 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS); 1927 1928 /* Ensure that all agents observe the new NS configuration */ 1929 tlbi_page_dsbosh(base); 1930 1931 #if (RME_GPT_MAX_BLOCK != 0) 1932 if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) { 1933 /* Try to fuse */ 1934 fuse_block(base, &gpi_info, GPT_L1_NS_DESC); 1935 } 1936 #endif 1937 /* Unlock the lock to GPT */ 1938 GPT_UNLOCK; 1939 1940 /* 1941 * The isb() will be done as part of context 1942 * synchronization when returning to lower EL. 1943 */ 1944 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n", 1945 base, gpi_info.gpi, GPT_GPI_NS); 1946 1947 return 0; 1948 } 1949