1 /* 2 * Copyright (c) 2022, Arm Limited. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <limits.h> 11 #include <stdint.h> 12 13 #include <arch.h> 14 #include <arch_helpers.h> 15 #include <common/debug.h> 16 #include "gpt_rme_private.h" 17 #include <lib/gpt_rme/gpt_rme.h> 18 #include <lib/smccc.h> 19 #include <lib/spinlock.h> 20 #include <lib/xlat_tables/xlat_tables_v2.h> 21 22 #if !ENABLE_RME 23 #error "ENABLE_RME must be enabled to use the GPT library." 24 #endif 25 26 /* 27 * Lookup T from PPS 28 * 29 * PPS Size T 30 * 0b000 4GB 32 31 * 0b001 64GB 36 32 * 0b010 1TB 40 33 * 0b011 4TB 42 34 * 0b100 16TB 44 35 * 0b101 256TB 48 36 * 0b110 4PB 52 37 * 38 * See section 15.1.27 of the RME specification. 39 */ 40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T, 41 PPS_1TB_T, PPS_4TB_T, 42 PPS_16TB_T, PPS_256TB_T, 43 PPS_4PB_T}; 44 45 /* 46 * Lookup P from PGS 47 * 48 * PGS Size P 49 * 0b00 4KB 12 50 * 0b10 16KB 14 51 * 0b01 64KB 16 52 * 53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. 54 * 55 * See section 15.1.27 of the RME specification. 56 */ 57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P}; 58 59 /* 60 * This structure contains GPT configuration data. 61 */ 62 typedef struct { 63 uintptr_t plat_gpt_l0_base; 64 gpccr_pps_e pps; 65 gpt_t_val_e t; 66 gpccr_pgs_e pgs; 67 gpt_p_val_e p; 68 } gpt_config_t; 69 70 static gpt_config_t gpt_config; 71 72 /* These variables are used during initialization of the L1 tables. */ 73 static unsigned int gpt_next_l1_tbl_idx; 74 static uintptr_t gpt_l1_tbl; 75 76 /* 77 * This function checks to see if a GPI value is valid. 78 * 79 * These are valid GPI values. 80 * GPT_GPI_NO_ACCESS U(0x0) 81 * GPT_GPI_SECURE U(0x8) 82 * GPT_GPI_NS U(0x9) 83 * GPT_GPI_ROOT U(0xA) 84 * GPT_GPI_REALM U(0xB) 85 * GPT_GPI_ANY U(0xF) 86 * 87 * Parameters 88 * gpi GPI to check for validity. 89 * 90 * Return 91 * true for a valid GPI, false for an invalid one. 92 */ 93 static bool gpt_is_gpi_valid(unsigned int gpi) 94 { 95 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) || 96 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) { 97 return true; 98 } 99 return false; 100 } 101 102 /* 103 * This function checks to see if two PAS regions overlap. 104 * 105 * Parameters 106 * base_1: base address of first PAS 107 * size_1: size of first PAS 108 * base_2: base address of second PAS 109 * size_2: size of second PAS 110 * 111 * Return 112 * True if PAS regions overlap, false if they do not. 113 */ 114 static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1, 115 uintptr_t base_2, size_t size_2) 116 { 117 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) { 118 return true; 119 } 120 return false; 121 } 122 123 /* 124 * This helper function checks to see if a PAS region from index 0 to 125 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table. 126 * 127 * Parameters 128 * l0_idx: Index of the L0 entry to check 129 * pas_regions: PAS region array 130 * pas_idx: Upper bound of the PAS array index. 131 * 132 * Return 133 * True if a PAS region occupies the L0 region in question, false if not. 134 */ 135 static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx, 136 pas_region_t *pas_regions, 137 unsigned int pas_idx) 138 { 139 /* Iterate over PAS regions up to pas_idx. */ 140 for (unsigned int i = 0U; i < pas_idx; i++) { 141 if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx), 142 GPT_L0GPTSZ_ACTUAL_SIZE, 143 pas_regions[i].base_pa, pas_regions[i].size)) { 144 return true; 145 } 146 } 147 return false; 148 } 149 150 /* 151 * This function iterates over all of the PAS regions and checks them to ensure 152 * proper alignment of base and size, that the GPI is valid, and that no regions 153 * overlap. As a part of the overlap checks, this function checks existing L0 154 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables 155 * is called multiple times to place L1 tables in different areas of memory. It 156 * also counts the number of L1 tables needed and returns it on success. 157 * 158 * Parameters 159 * *pas_regions Pointer to array of PAS region structures. 160 * pas_region_cnt Total number of PAS regions in the array. 161 * 162 * Return 163 * Negative Linux error code in the event of a failure, number of L1 regions 164 * required when successful. 165 */ 166 static int gpt_validate_pas_mappings(pas_region_t *pas_regions, 167 unsigned int pas_region_cnt) 168 { 169 unsigned int idx; 170 unsigned int l1_cnt = 0U; 171 unsigned int pas_l1_cnt; 172 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base; 173 174 assert(pas_regions != NULL); 175 assert(pas_region_cnt != 0U); 176 177 for (idx = 0U; idx < pas_region_cnt; idx++) { 178 /* Check for arithmetic overflow in region. */ 179 if ((ULONG_MAX - pas_regions[idx].base_pa) < 180 pas_regions[idx].size) { 181 ERROR("[GPT] Address overflow in PAS[%u]!\n", idx); 182 return -EOVERFLOW; 183 } 184 185 /* Initial checks for PAS validity. */ 186 if (((pas_regions[idx].base_pa + pas_regions[idx].size) > 187 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) || 188 !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) { 189 ERROR("[GPT] PAS[%u] is invalid!\n", idx); 190 return -EFAULT; 191 } 192 193 /* 194 * Make sure this PAS does not overlap with another one. We 195 * start from idx + 1 instead of 0 since prior PAS mappings will 196 * have already checked themselves against this one. 197 */ 198 for (unsigned int i = idx + 1; i < pas_region_cnt; i++) { 199 if (gpt_check_pas_overlap(pas_regions[idx].base_pa, 200 pas_regions[idx].size, 201 pas_regions[i].base_pa, 202 pas_regions[i].size)) { 203 ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n", 204 i, idx); 205 return -EFAULT; 206 } 207 } 208 209 /* 210 * Since this function can be called multiple times with 211 * separate L1 tables we need to check the existing L0 mapping 212 * to see if this PAS would fall into one that has already been 213 * initialized. 214 */ 215 for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa); 216 i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1); 217 i++) { 218 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) && 219 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) { 220 /* This descriptor is unused so continue. */ 221 continue; 222 } 223 224 /* 225 * This descriptor has been initialized in a previous 226 * call to this function so cannot be initialized again. 227 */ 228 ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n", 229 idx, i); 230 return -EFAULT; 231 } 232 233 /* Check for block mapping (L0) type. */ 234 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 235 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 236 /* Make sure base and size are block-aligned. */ 237 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) || 238 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) { 239 ERROR("[GPT] PAS[%u] is not block-aligned!\n", 240 idx); 241 return -EFAULT; 242 } 243 244 continue; 245 } 246 247 /* Check for granule mapping (L1) type. */ 248 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 249 GPT_PAS_ATTR_MAP_TYPE_GRANULE) { 250 /* Make sure base and size are granule-aligned. */ 251 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) || 252 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) { 253 ERROR("[GPT] PAS[%u] is not granule-aligned!\n", 254 idx); 255 return -EFAULT; 256 } 257 258 /* Find how many L1 tables this PAS occupies. */ 259 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa + 260 pas_regions[idx].size - 1) - 261 GPT_L0_IDX(pas_regions[idx].base_pa) + 1); 262 263 /* 264 * This creates a situation where, if multiple PAS 265 * regions occupy the same table descriptor, we can get 266 * an artificially high total L1 table count. The way we 267 * handle this is by checking each PAS against those 268 * before it in the array, and if they both occupy the 269 * same PAS we subtract from pas_l1_cnt and only the 270 * first PAS in the array gets to count it. 271 */ 272 273 /* 274 * If L1 count is greater than 1 we know the start and 275 * end PAs are in different L0 regions so we must check 276 * both for overlap against other PAS. 277 */ 278 if (pas_l1_cnt > 1) { 279 if (gpt_does_previous_pas_exist_here( 280 GPT_L0_IDX(pas_regions[idx].base_pa + 281 pas_regions[idx].size - 1), 282 pas_regions, idx)) { 283 pas_l1_cnt = pas_l1_cnt - 1; 284 } 285 } 286 287 if (gpt_does_previous_pas_exist_here( 288 GPT_L0_IDX(pas_regions[idx].base_pa), 289 pas_regions, idx)) { 290 pas_l1_cnt = pas_l1_cnt - 1; 291 } 292 293 l1_cnt += pas_l1_cnt; 294 continue; 295 } 296 297 /* If execution reaches this point, mapping type is invalid. */ 298 ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx, 299 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 300 return -EINVAL; 301 } 302 303 return l1_cnt; 304 } 305 306 /* 307 * This function validates L0 initialization parameters. 308 * 309 * Parameters 310 * l0_mem_base Base address of memory used for L0 tables. 311 * l1_mem_size Size of memory available for L0 tables. 312 * 313 * Return 314 * Negative Linux error code in the event of a failure, 0 for success. 315 */ 316 static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base, 317 size_t l0_mem_size) 318 { 319 size_t l0_alignment; 320 321 /* 322 * Make sure PPS is valid and then store it since macros need this value 323 * to work. 324 */ 325 if (pps > GPT_PPS_MAX) { 326 ERROR("[GPT] Invalid PPS: 0x%x\n", pps); 327 return -EINVAL; 328 } 329 gpt_config.pps = pps; 330 gpt_config.t = gpt_t_lookup[pps]; 331 332 /* Alignment must be the greater of 4k or l0 table size. */ 333 l0_alignment = PAGE_SIZE_4KB; 334 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) { 335 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t); 336 } 337 338 /* Check base address. */ 339 if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) { 340 ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base); 341 return -EFAULT; 342 } 343 344 /* Check size. */ 345 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) { 346 ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n", 347 GPT_L0_TABLE_SIZE(gpt_config.t), 348 l0_mem_size); 349 return -ENOMEM; 350 } 351 352 return 0; 353 } 354 355 /* 356 * In the event that L1 tables are needed, this function validates 357 * the L1 table generation parameters. 358 * 359 * Parameters 360 * l1_mem_base Base address of memory used for L1 table allocation. 361 * l1_mem_size Total size of memory available for L1 tables. 362 * l1_gpt_cnt Number of L1 tables needed. 363 * 364 * Return 365 * Negative Linux error code in the event of a failure, 0 for success. 366 */ 367 static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size, 368 unsigned int l1_gpt_cnt) 369 { 370 size_t l1_gpt_mem_sz; 371 372 /* Check if the granularity is supported */ 373 if (!xlat_arch_is_granule_size_supported( 374 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) { 375 return -EPERM; 376 } 377 378 /* Make sure L1 tables are aligned to their size. */ 379 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) { 380 ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n", 381 l1_mem_base); 382 return -EFAULT; 383 } 384 385 /* Get total memory needed for L1 tables. */ 386 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p); 387 388 /* Check for overflow. */ 389 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) { 390 ERROR("[GPT] Overflow calculating L1 memory size.\n"); 391 return -ENOMEM; 392 } 393 394 /* Make sure enough space was supplied. */ 395 if (l1_mem_size < l1_gpt_mem_sz) { 396 ERROR("[GPT] Inadequate memory for L1 GPTs. "); 397 ERROR(" Expected 0x%lx bytes. Got 0x%lx bytes\n", 398 l1_gpt_mem_sz, l1_mem_size); 399 return -ENOMEM; 400 } 401 402 VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); 403 return 0; 404 } 405 406 /* 407 * This function initializes L0 block descriptors (regions that cannot be 408 * transitioned at the granule level) according to the provided PAS. 409 * 410 * Parameters 411 * *pas Pointer to the structure defining the PAS region to 412 * initialize. 413 */ 414 static void gpt_generate_l0_blk_desc(pas_region_t *pas) 415 { 416 uint64_t gpt_desc; 417 unsigned int end_idx; 418 unsigned int idx; 419 uint64_t *l0_gpt_arr; 420 421 assert(gpt_config.plat_gpt_l0_base != 0U); 422 assert(pas != NULL); 423 424 /* 425 * Checking of PAS parameters has already been done in 426 * gpt_validate_pas_mappings so no need to check the same things again. 427 */ 428 429 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base; 430 431 /* Create the GPT Block descriptor for this PAS region */ 432 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs)); 433 434 /* Start index of this region in L0 GPTs */ 435 idx = GPT_L0_IDX(pas->base_pa); 436 437 /* 438 * Determine number of L0 GPT descriptors covered by 439 * this PAS region and use the count to populate these 440 * descriptors. 441 */ 442 end_idx = GPT_L0_IDX(pas->base_pa + pas->size); 443 444 /* Generate the needed block descriptors. */ 445 for (; idx < end_idx; idx++) { 446 l0_gpt_arr[idx] = gpt_desc; 447 VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n", 448 idx, &l0_gpt_arr[idx], 449 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) & 450 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]); 451 } 452 } 453 454 /* 455 * Helper function to determine if the end physical address lies in the same L0 456 * region as the current physical address. If true, the end physical address is 457 * returned else, the start address of the next region is returned. 458 * 459 * Parameters 460 * cur_pa Physical address of the current PA in the loop through 461 * the range. 462 * end_pa Physical address of the end PA in a PAS range. 463 * 464 * Return 465 * The PA of the end of the current range. 466 */ 467 static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa) 468 { 469 uintptr_t cur_idx; 470 uintptr_t end_idx; 471 472 cur_idx = GPT_L0_IDX(cur_pa); 473 end_idx = GPT_L0_IDX(end_pa); 474 475 assert(cur_idx <= end_idx); 476 477 if (cur_idx == end_idx) { 478 return end_pa; 479 } 480 481 return (cur_idx + 1U) << GPT_L0_IDX_SHIFT; 482 } 483 484 /* 485 * Helper function to fill out GPI entries in a single L1 table. This function 486 * fills out entire L1 descriptors at a time to save memory writes. 487 * 488 * Parameters 489 * gpi GPI to set this range to 490 * l1 Pointer to L1 table to fill out 491 * first Address of first granule in range. 492 * last Address of last granule in range (inclusive). 493 */ 494 static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first, 495 uintptr_t last) 496 { 497 uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi); 498 uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF; 499 500 assert(first <= last); 501 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); 502 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); 503 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last)); 504 assert(l1 != NULL); 505 506 /* Shift the mask if we're starting in the middle of an L1 entry. */ 507 gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2); 508 509 /* Fill out each L1 entry for this region. */ 510 for (unsigned int i = GPT_L1_IDX(gpt_config.p, first); 511 i <= GPT_L1_IDX(gpt_config.p, last); i++) { 512 /* Account for stopping in the middle of an L1 entry. */ 513 if (i == GPT_L1_IDX(gpt_config.p, last)) { 514 gpi_mask &= (gpi_mask >> ((15 - 515 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2)); 516 } 517 518 /* Write GPI values. */ 519 assert((l1[i] & gpi_mask) == 520 (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask)); 521 l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field); 522 523 /* Reset mask. */ 524 gpi_mask = 0xFFFFFFFFFFFFFFFF; 525 } 526 } 527 528 /* 529 * This function finds the next available unused L1 table and initializes all 530 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks 531 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the 532 * event that a PAS region stops midway through an L1 table, thus guaranteeing 533 * that all memory not explicitly assigned is GPI_ANY. This function does not 534 * check for overflow conditions, that should be done by the caller. 535 * 536 * Return 537 * Pointer to the next available L1 table. 538 */ 539 static uint64_t *gpt_get_new_l1_tbl(void) 540 { 541 /* Retrieve the next L1 table. */ 542 uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) + 543 (GPT_L1_TABLE_SIZE(gpt_config.p) * 544 gpt_next_l1_tbl_idx)); 545 546 /* Increment L1 counter. */ 547 gpt_next_l1_tbl_idx++; 548 549 /* Initialize all GPIs to GPT_GPI_ANY */ 550 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) { 551 l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY); 552 } 553 554 return l1; 555 } 556 557 /* 558 * When L1 tables are needed, this function creates the necessary L0 table 559 * descriptors and fills out the L1 table entries according to the supplied 560 * PAS range. 561 * 562 * Parameters 563 * *pas Pointer to the structure defining the PAS region. 564 */ 565 static void gpt_generate_l0_tbl_desc(pas_region_t *pas) 566 { 567 uintptr_t end_pa; 568 uintptr_t cur_pa; 569 uintptr_t last_gran_pa; 570 uint64_t *l0_gpt_base; 571 uint64_t *l1_gpt_arr; 572 unsigned int l0_idx; 573 574 assert(gpt_config.plat_gpt_l0_base != 0U); 575 assert(pas != NULL); 576 577 /* 578 * Checking of PAS parameters has already been done in 579 * gpt_validate_pas_mappings so no need to check the same things again. 580 */ 581 582 end_pa = pas->base_pa + pas->size; 583 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 584 585 /* We start working from the granule at base PA */ 586 cur_pa = pas->base_pa; 587 588 /* Iterate over each L0 region in this memory range. */ 589 for (l0_idx = GPT_L0_IDX(pas->base_pa); 590 l0_idx <= GPT_L0_IDX(end_pa - 1U); 591 l0_idx++) { 592 593 /* 594 * See if the L0 entry is already a table descriptor or if we 595 * need to create one. 596 */ 597 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) { 598 /* Get the L1 array from the L0 entry. */ 599 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]); 600 } else { 601 /* Get a new L1 table from the L1 memory space. */ 602 l1_gpt_arr = gpt_get_new_l1_tbl(); 603 604 /* Fill out the L0 descriptor and flush it. */ 605 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr); 606 } 607 608 VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n", 609 l0_idx, &l0_gpt_base[l0_idx], 610 (unsigned long long)(l1_gpt_arr), 611 l0_gpt_base[l0_idx]); 612 613 /* 614 * Determine the PA of the last granule in this L0 descriptor. 615 */ 616 last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) - 617 GPT_PGS_ACTUAL_SIZE(gpt_config.p); 618 619 /* 620 * Fill up L1 GPT entries between these two addresses. This 621 * function needs the addresses of the first granule and last 622 * granule in the range. 623 */ 624 gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr, 625 cur_pa, last_gran_pa); 626 627 /* Advance cur_pa to first granule in next L0 region. */ 628 cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa); 629 } 630 } 631 632 /* 633 * This function flushes a range of L0 descriptors used by a given PAS region 634 * array. There is a chance that some unmodified L0 descriptors would be flushed 635 * in the case that there are "holes" in an array of PAS regions but overall 636 * this should be faster than individually flushing each modified L0 descriptor 637 * as they are created. 638 * 639 * Parameters 640 * *pas Pointer to an array of PAS regions. 641 * pas_count Number of entries in the PAS array. 642 */ 643 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count) 644 { 645 unsigned int idx; 646 unsigned int start_idx; 647 unsigned int end_idx; 648 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base; 649 650 assert(pas != NULL); 651 assert(pas_count > 0); 652 653 /* Initial start and end values. */ 654 start_idx = GPT_L0_IDX(pas[0].base_pa); 655 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1); 656 657 /* Find lowest and highest L0 indices used in this PAS array. */ 658 for (idx = 1; idx < pas_count; idx++) { 659 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) { 660 start_idx = GPT_L0_IDX(pas[idx].base_pa); 661 } 662 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) { 663 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1); 664 } 665 } 666 667 /* 668 * Flush all covered L0 descriptors, add 1 because we need to include 669 * the end index value. 670 */ 671 flush_dcache_range((uintptr_t)&l0[start_idx], 672 ((end_idx + 1) - start_idx) * sizeof(uint64_t)); 673 } 674 675 /* 676 * Public API to enable granule protection checks once the tables have all been 677 * initialized. This function is called at first initialization and then again 678 * later during warm boots of CPU cores. 679 * 680 * Return 681 * Negative Linux error code in the event of a failure, 0 for success. 682 */ 683 int gpt_enable(void) 684 { 685 u_register_t gpccr_el3; 686 687 /* 688 * Granule tables must be initialised before enabling 689 * granule protection. 690 */ 691 if (gpt_config.plat_gpt_l0_base == 0U) { 692 ERROR("[GPT] Tables have not been initialized!\n"); 693 return -EPERM; 694 } 695 696 /* Write the base address of the L0 tables into GPTBR */ 697 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT) 698 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK); 699 700 /* GPCCR_EL3.PPS */ 701 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps); 702 703 /* GPCCR_EL3.PGS */ 704 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs); 705 706 /* 707 * Since EL3 maps the L1 region as Inner shareable, use the same 708 * shareability attribute for GPC as well so that 709 * GPC fetches are visible to PEs 710 */ 711 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS); 712 713 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ 714 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); 715 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); 716 717 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */ 718 write_gpccr_el3(gpccr_el3); 719 isb(); 720 721 /* Invalidate any stale TLB entries and any cached register fields */ 722 tlbipaallos(); 723 dsb(); 724 isb(); 725 726 /* Enable GPT */ 727 gpccr_el3 |= GPCCR_GPC_BIT; 728 729 /* TODO: Configure GPCCR_EL3_GPCP for Fault control. */ 730 write_gpccr_el3(gpccr_el3); 731 isb(); 732 tlbipaallos(); 733 dsb(); 734 isb(); 735 736 return 0; 737 } 738 739 /* 740 * Public API to disable granule protection checks. 741 */ 742 void gpt_disable(void) 743 { 744 u_register_t gpccr_el3 = read_gpccr_el3(); 745 746 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT); 747 dsbsy(); 748 isb(); 749 } 750 751 /* 752 * Public API that initializes the entire protected space to GPT_GPI_ANY using 753 * the L0 tables (block descriptors). Ideally, this function is invoked prior 754 * to DDR discovery and initialization. The MMU must be initialized before 755 * calling this function. 756 * 757 * Parameters 758 * pps PPS value to use for table generation 759 * l0_mem_base Base address of L0 tables in memory. 760 * l0_mem_size Total size of memory available for L0 tables. 761 * 762 * Return 763 * Negative Linux error code in the event of a failure, 0 for success. 764 */ 765 int gpt_init_l0_tables(unsigned int pps, uintptr_t l0_mem_base, 766 size_t l0_mem_size) 767 { 768 int ret; 769 uint64_t gpt_desc; 770 771 /* Ensure that MMU and Data caches are enabled. */ 772 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); 773 774 /* Validate other parameters. */ 775 ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size); 776 if (ret != 0) { 777 return ret; 778 } 779 780 /* Create the descriptor to initialize L0 entries with. */ 781 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY); 782 783 /* Iterate through all L0 entries */ 784 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) { 785 ((uint64_t *)l0_mem_base)[i] = gpt_desc; 786 } 787 788 /* Flush updated L0 tables to memory. */ 789 flush_dcache_range((uintptr_t)l0_mem_base, 790 (size_t)GPT_L0_TABLE_SIZE(gpt_config.t)); 791 792 /* Stash the L0 base address once initial setup is complete. */ 793 gpt_config.plat_gpt_l0_base = l0_mem_base; 794 795 return 0; 796 } 797 798 /* 799 * Public API that carves out PAS regions from the L0 tables and builds any L1 800 * tables that are needed. This function ideally is run after DDR discovery and 801 * initialization. The L0 tables must have already been initialized to GPI_ANY 802 * when this function is called. 803 * 804 * This function can be called multiple times with different L1 memory ranges 805 * and PAS regions if it is desirable to place L1 tables in different locations 806 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables 807 * in the DDR bank that they control) 808 * 809 * Parameters 810 * pgs PGS value to use for table generation. 811 * l1_mem_base Base address of memory used for L1 tables. 812 * l1_mem_size Total size of memory available for L1 tables. 813 * *pas_regions Pointer to PAS regions structure array. 814 * pas_count Total number of PAS regions. 815 * 816 * Return 817 * Negative Linux error code in the event of a failure, 0 for success. 818 */ 819 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base, 820 size_t l1_mem_size, pas_region_t *pas_regions, 821 unsigned int pas_count) 822 { 823 int ret; 824 int l1_gpt_cnt; 825 826 /* Ensure that MMU and Data caches are enabled. */ 827 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); 828 829 /* PGS is needed for gpt_validate_pas_mappings so check it now. */ 830 if (pgs > GPT_PGS_MAX) { 831 ERROR("[GPT] Invalid PGS: 0x%x\n", pgs); 832 return -EINVAL; 833 } 834 gpt_config.pgs = pgs; 835 gpt_config.p = gpt_p_lookup[pgs]; 836 837 /* Make sure L0 tables have been initialized. */ 838 if (gpt_config.plat_gpt_l0_base == 0U) { 839 ERROR("[GPT] L0 tables must be initialized first!\n"); 840 return -EPERM; 841 } 842 843 /* Check if L1 GPTs are required and how many. */ 844 l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count); 845 if (l1_gpt_cnt < 0) { 846 return l1_gpt_cnt; 847 } 848 849 VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt); 850 851 /* If L1 tables are needed then validate the L1 parameters. */ 852 if (l1_gpt_cnt > 0) { 853 ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size, 854 l1_gpt_cnt); 855 if (ret != 0) { 856 return ret; 857 } 858 859 /* Set up parameters for L1 table generation. */ 860 gpt_l1_tbl = l1_mem_base; 861 gpt_next_l1_tbl_idx = 0U; 862 } 863 864 INFO("[GPT] Boot Configuration\n"); 865 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 866 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 867 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 868 INFO(" PAS count: 0x%x\n", pas_count); 869 INFO(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); 870 871 /* Generate the tables in memory. */ 872 for (unsigned int idx = 0U; idx < pas_count; idx++) { 873 INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n", 874 idx, pas_regions[idx].base_pa, pas_regions[idx].size, 875 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs), 876 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 877 878 /* Check if a block or table descriptor is required */ 879 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 880 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 881 gpt_generate_l0_blk_desc(&pas_regions[idx]); 882 883 } else { 884 gpt_generate_l0_tbl_desc(&pas_regions[idx]); 885 } 886 } 887 888 /* Flush modified L0 tables. */ 889 flush_l0_for_pas_array(pas_regions, pas_count); 890 891 /* Flush L1 tables if needed. */ 892 if (l1_gpt_cnt > 0) { 893 flush_dcache_range(l1_mem_base, 894 GPT_L1_TABLE_SIZE(gpt_config.p) * 895 l1_gpt_cnt); 896 } 897 898 /* Make sure that all the entries are written to the memory. */ 899 dsbishst(); 900 tlbipaallos(); 901 dsb(); 902 isb(); 903 904 return 0; 905 } 906 907 /* 908 * Public API to initialize the runtime gpt_config structure based on the values 909 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization 910 * typically happens in a bootloader stage prior to setting up the EL3 runtime 911 * environment for the granule transition service so this function detects the 912 * initialization from a previous stage. Granule protection checks must be 913 * enabled already or this function will return an error. 914 * 915 * Return 916 * Negative Linux error code in the event of a failure, 0 for success. 917 */ 918 int gpt_runtime_init(void) 919 { 920 u_register_t reg; 921 922 /* Ensure that MMU and Data caches are enabled. */ 923 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); 924 925 /* Ensure GPC are already enabled. */ 926 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) { 927 ERROR("[GPT] Granule protection checks are not enabled!\n"); 928 return -EPERM; 929 } 930 931 /* 932 * Read the L0 table address from GPTBR, we don't need the L1 base 933 * address since those are included in the L0 tables as needed. 934 */ 935 reg = read_gptbr_el3(); 936 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) & 937 GPTBR_BADDR_MASK) << 938 GPTBR_BADDR_VAL_SHIFT; 939 940 /* Read GPCCR to get PGS and PPS values. */ 941 reg = read_gpccr_el3(); 942 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK; 943 gpt_config.t = gpt_t_lookup[gpt_config.pps]; 944 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK; 945 gpt_config.p = gpt_p_lookup[gpt_config.pgs]; 946 947 VERBOSE("[GPT] Runtime Configuration\n"); 948 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 949 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 950 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 951 VERBOSE(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); 952 953 return 0; 954 } 955 956 /* 957 * The L1 descriptors are protected by a spinlock to ensure that multiple 958 * CPUs do not attempt to change the descriptors at once. In the future it 959 * would be better to have separate spinlocks for each L1 descriptor. 960 */ 961 static spinlock_t gpt_lock; 962 963 /* 964 * A helper to write the value (target_pas << gpi_shift) to the index of 965 * the gpt_l1_addr 966 */ 967 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr, 968 unsigned int gpi_shift, unsigned int idx, 969 unsigned int target_pas) 970 { 971 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift); 972 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift); 973 gpt_l1_addr[idx] = *gpt_l1_desc; 974 } 975 976 /* 977 * Helper to retrieve the gpt_l1_* information from the base address 978 * returned in gpi_info 979 */ 980 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info) 981 { 982 uint64_t gpt_l0_desc, *gpt_l0_base; 983 984 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 985 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)]; 986 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) { 987 VERBOSE("[GPT] Granule is not covered by a table descriptor!\n"); 988 VERBOSE(" Base=0x%" PRIx64 "\n", base); 989 return -EINVAL; 990 } 991 992 /* Get the table index and GPI shift from PA. */ 993 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc); 994 gpi_info->idx = GPT_L1_IDX(gpt_config.p, base); 995 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2; 996 997 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx]; 998 gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) & 999 GPT_L1_GRAN_DESC_GPI_MASK; 1000 return 0; 1001 } 1002 1003 /* 1004 * This function is the granule transition delegate service. When a granule 1005 * transition request occurs it is routed to this function to have the request, 1006 * if valid, fulfilled following A1.1.1 Delegate of RME supplement 1007 * 1008 * TODO: implement support for transitioning multiple granules at once. 1009 * 1010 * Parameters 1011 * base Base address of the region to transition, must be 1012 * aligned to granule size. 1013 * size Size of region to transition, must be aligned to granule 1014 * size. 1015 * src_sec_state Security state of the caller. 1016 * 1017 * Return 1018 * Negative Linux error code in the event of a failure, 0 for success. 1019 */ 1020 int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1021 { 1022 gpi_info_t gpi_info; 1023 uint64_t nse; 1024 int res; 1025 unsigned int target_pas; 1026 1027 /* Ensure that the tables have been set up before taking requests. */ 1028 assert(gpt_config.plat_gpt_l0_base != 0UL); 1029 1030 /* Ensure that caches are enabled. */ 1031 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1032 1033 /* Delegate request can only come from REALM or SECURE */ 1034 assert(src_sec_state == SMC_FROM_REALM || 1035 src_sec_state == SMC_FROM_SECURE); 1036 1037 /* See if this is a single or a range of granule transition. */ 1038 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1039 return -EINVAL; 1040 } 1041 1042 /* Check that base and size are valid */ 1043 if ((ULONG_MAX - base) < size) { 1044 VERBOSE("[GPT] Transition request address overflow!\n"); 1045 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1046 VERBOSE(" Size=0x%lx\n", size); 1047 return -EINVAL; 1048 } 1049 1050 /* Make sure base and size are valid. */ 1051 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1052 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1053 (size == 0UL) || 1054 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1055 VERBOSE("[GPT] Invalid granule transition address range!\n"); 1056 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1057 VERBOSE(" Size=0x%lx\n", size); 1058 return -EINVAL; 1059 } 1060 1061 target_pas = GPT_GPI_REALM; 1062 if (src_sec_state == SMC_FROM_SECURE) { 1063 target_pas = GPT_GPI_SECURE; 1064 } 1065 1066 /* 1067 * Access to L1 tables is controlled by a global lock to ensure 1068 * that no more than one CPU is allowed to make changes at any 1069 * given time. 1070 */ 1071 spin_lock(&gpt_lock); 1072 res = get_gpi_params(base, &gpi_info); 1073 if (res != 0) { 1074 spin_unlock(&gpt_lock); 1075 return res; 1076 } 1077 1078 /* Check that the current address is in NS state */ 1079 if (gpi_info.gpi != GPT_GPI_NS) { 1080 VERBOSE("[GPT] Only Granule in NS state can be delegated.\n"); 1081 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state, 1082 gpi_info.gpi); 1083 spin_unlock(&gpt_lock); 1084 return -EPERM; 1085 } 1086 1087 if (src_sec_state == SMC_FROM_SECURE) { 1088 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1089 } else { 1090 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1091 } 1092 1093 /* 1094 * In order to maintain mutual distrust between Realm and Secure 1095 * states, remove any data speculatively fetched into the target 1096 * physical address space. Issue DC CIPAPA over address range 1097 */ 1098 flush_dcache_to_popa_range(nse | base, 1099 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1100 1101 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1102 gpi_info.gpi_shift, gpi_info.idx, target_pas); 1103 dsboshst(); 1104 1105 gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1106 dsbosh(); 1107 1108 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1109 1110 flush_dcache_to_popa_range(nse | base, 1111 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1112 1113 /* Unlock access to the L1 tables. */ 1114 spin_unlock(&gpt_lock); 1115 1116 /* 1117 * The isb() will be done as part of context 1118 * synchronization when returning to lower EL 1119 */ 1120 VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", 1121 base, gpi_info.gpi, target_pas); 1122 1123 return 0; 1124 } 1125 1126 /* 1127 * This function is the granule transition undelegate service. When a granule 1128 * transition request occurs it is routed to this function where the request is 1129 * validated then fulfilled if possible. 1130 * 1131 * TODO: implement support for transitioning multiple granules at once. 1132 * 1133 * Parameters 1134 * base Base address of the region to transition, must be 1135 * aligned to granule size. 1136 * size Size of region to transition, must be aligned to granule 1137 * size. 1138 * src_sec_state Security state of the caller. 1139 * 1140 * Return 1141 * Negative Linux error code in the event of a failure, 0 for success. 1142 */ 1143 int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1144 { 1145 gpi_info_t gpi_info; 1146 uint64_t nse; 1147 int res; 1148 1149 /* Ensure that the tables have been set up before taking requests. */ 1150 assert(gpt_config.plat_gpt_l0_base != 0UL); 1151 1152 /* Ensure that MMU and caches are enabled. */ 1153 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1154 1155 /* Delegate request can only come from REALM or SECURE */ 1156 assert(src_sec_state == SMC_FROM_REALM || 1157 src_sec_state == SMC_FROM_SECURE); 1158 1159 /* See if this is a single or a range of granule transition. */ 1160 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1161 return -EINVAL; 1162 } 1163 1164 /* Check that base and size are valid */ 1165 if ((ULONG_MAX - base) < size) { 1166 VERBOSE("[GPT] Transition request address overflow!\n"); 1167 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1168 VERBOSE(" Size=0x%lx\n", size); 1169 return -EINVAL; 1170 } 1171 1172 /* Make sure base and size are valid. */ 1173 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1174 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1175 (size == 0UL) || 1176 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1177 VERBOSE("[GPT] Invalid granule transition address range!\n"); 1178 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1179 VERBOSE(" Size=0x%lx\n", size); 1180 return -EINVAL; 1181 } 1182 1183 /* 1184 * Access to L1 tables is controlled by a global lock to ensure 1185 * that no more than one CPU is allowed to make changes at any 1186 * given time. 1187 */ 1188 spin_lock(&gpt_lock); 1189 1190 res = get_gpi_params(base, &gpi_info); 1191 if (res != 0) { 1192 spin_unlock(&gpt_lock); 1193 return res; 1194 } 1195 1196 /* Check that the current address is in the delegated state */ 1197 if ((src_sec_state == SMC_FROM_REALM && 1198 gpi_info.gpi != GPT_GPI_REALM) || 1199 (src_sec_state == SMC_FROM_SECURE && 1200 gpi_info.gpi != GPT_GPI_SECURE)) { 1201 VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n"); 1202 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state, 1203 gpi_info.gpi); 1204 spin_unlock(&gpt_lock); 1205 return -EPERM; 1206 } 1207 1208 1209 /* In order to maintain mutual distrust between Realm and Secure 1210 * states, remove access now, in order to guarantee that writes 1211 * to the currently-accessible physical address space will not 1212 * later become observable. 1213 */ 1214 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1215 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS); 1216 dsboshst(); 1217 1218 gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1219 dsbosh(); 1220 1221 if (src_sec_state == SMC_FROM_SECURE) { 1222 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1223 } else { 1224 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1225 } 1226 1227 /* Ensure that the scrubbed data has made it past the PoPA */ 1228 flush_dcache_to_popa_range(nse | base, 1229 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1230 1231 /* 1232 * Remove any data loaded speculatively 1233 * in NS space from before the scrubbing 1234 */ 1235 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1236 1237 flush_dcache_to_popa_range(nse | base, 1238 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1239 1240 /* Clear existing GPI encoding and transition granule. */ 1241 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1242 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS); 1243 dsboshst(); 1244 1245 /* Ensure that all agents observe the new NS configuration */ 1246 gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1247 dsbosh(); 1248 1249 /* Unlock access to the L1 tables. */ 1250 spin_unlock(&gpt_lock); 1251 1252 /* 1253 * The isb() will be done as part of context 1254 * synchronization when returning to lower EL 1255 */ 1256 VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", 1257 base, gpi_info.gpi, GPT_GPI_NS); 1258 1259 return 0; 1260 } 1261