1 /* 2 * Copyright (c) 2022, Arm Limited. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <limits.h> 11 #include <stdint.h> 12 13 #include <arch.h> 14 #include <arch_helpers.h> 15 #include <common/debug.h> 16 #include "gpt_rme_private.h" 17 #include <lib/gpt_rme/gpt_rme.h> 18 #include <lib/smccc.h> 19 #include <lib/spinlock.h> 20 #include <lib/xlat_tables/xlat_tables_v2.h> 21 22 #if !ENABLE_RME 23 #error "ENABLE_RME must be enabled to use the GPT library." 24 #endif 25 26 /* 27 * Lookup T from PPS 28 * 29 * PPS Size T 30 * 0b000 4GB 32 31 * 0b001 64GB 36 32 * 0b010 1TB 40 33 * 0b011 4TB 42 34 * 0b100 16TB 44 35 * 0b101 256TB 48 36 * 0b110 4PB 52 37 * 38 * See section 15.1.27 of the RME specification. 39 */ 40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T, 41 PPS_1TB_T, PPS_4TB_T, 42 PPS_16TB_T, PPS_256TB_T, 43 PPS_4PB_T}; 44 45 /* 46 * Lookup P from PGS 47 * 48 * PGS Size P 49 * 0b00 4KB 12 50 * 0b10 16KB 14 51 * 0b01 64KB 16 52 * 53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. 54 * 55 * See section 15.1.27 of the RME specification. 56 */ 57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P}; 58 59 /* 60 * This structure contains GPT configuration data. 61 */ 62 typedef struct { 63 uintptr_t plat_gpt_l0_base; 64 gpccr_pps_e pps; 65 gpt_t_val_e t; 66 gpccr_pgs_e pgs; 67 gpt_p_val_e p; 68 } gpt_config_t; 69 70 static gpt_config_t gpt_config; 71 72 /* These variables are used during initialization of the L1 tables. */ 73 static unsigned int gpt_next_l1_tbl_idx; 74 static uintptr_t gpt_l1_tbl; 75 76 /* 77 * This function checks to see if a GPI value is valid. 78 * 79 * These are valid GPI values. 80 * GPT_GPI_NO_ACCESS U(0x0) 81 * GPT_GPI_SECURE U(0x8) 82 * GPT_GPI_NS U(0x9) 83 * GPT_GPI_ROOT U(0xA) 84 * GPT_GPI_REALM U(0xB) 85 * GPT_GPI_ANY U(0xF) 86 * 87 * Parameters 88 * gpi GPI to check for validity. 89 * 90 * Return 91 * true for a valid GPI, false for an invalid one. 92 */ 93 static bool gpt_is_gpi_valid(unsigned int gpi) 94 { 95 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) || 96 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) { 97 return true; 98 } 99 return false; 100 } 101 102 /* 103 * This function checks to see if two PAS regions overlap. 104 * 105 * Parameters 106 * base_1: base address of first PAS 107 * size_1: size of first PAS 108 * base_2: base address of second PAS 109 * size_2: size of second PAS 110 * 111 * Return 112 * True if PAS regions overlap, false if they do not. 113 */ 114 static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1, 115 uintptr_t base_2, size_t size_2) 116 { 117 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) { 118 return true; 119 } 120 return false; 121 } 122 123 /* 124 * This helper function checks to see if a PAS region from index 0 to 125 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table. 126 * 127 * Parameters 128 * l0_idx: Index of the L0 entry to check 129 * pas_regions: PAS region array 130 * pas_idx: Upper bound of the PAS array index. 131 * 132 * Return 133 * True if a PAS region occupies the L0 region in question, false if not. 134 */ 135 static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx, 136 pas_region_t *pas_regions, 137 unsigned int pas_idx) 138 { 139 /* Iterate over PAS regions up to pas_idx. */ 140 for (unsigned int i = 0U; i < pas_idx; i++) { 141 if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx), 142 GPT_L0GPTSZ_ACTUAL_SIZE, 143 pas_regions[i].base_pa, pas_regions[i].size)) { 144 return true; 145 } 146 } 147 return false; 148 } 149 150 /* 151 * This function iterates over all of the PAS regions and checks them to ensure 152 * proper alignment of base and size, that the GPI is valid, and that no regions 153 * overlap. As a part of the overlap checks, this function checks existing L0 154 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables 155 * is called multiple times to place L1 tables in different areas of memory. It 156 * also counts the number of L1 tables needed and returns it on success. 157 * 158 * Parameters 159 * *pas_regions Pointer to array of PAS region structures. 160 * pas_region_cnt Total number of PAS regions in the array. 161 * 162 * Return 163 * Negative Linux error code in the event of a failure, number of L1 regions 164 * required when successful. 165 */ 166 static int gpt_validate_pas_mappings(pas_region_t *pas_regions, 167 unsigned int pas_region_cnt) 168 { 169 unsigned int idx; 170 unsigned int l1_cnt = 0U; 171 unsigned int pas_l1_cnt; 172 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base; 173 174 assert(pas_regions != NULL); 175 assert(pas_region_cnt != 0U); 176 177 for (idx = 0U; idx < pas_region_cnt; idx++) { 178 /* Check for arithmetic overflow in region. */ 179 if ((ULONG_MAX - pas_regions[idx].base_pa) < 180 pas_regions[idx].size) { 181 ERROR("[GPT] Address overflow in PAS[%u]!\n", idx); 182 return -EOVERFLOW; 183 } 184 185 /* Initial checks for PAS validity. */ 186 if (((pas_regions[idx].base_pa + pas_regions[idx].size) > 187 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) || 188 !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) { 189 ERROR("[GPT] PAS[%u] is invalid!\n", idx); 190 return -EFAULT; 191 } 192 193 /* 194 * Make sure this PAS does not overlap with another one. We 195 * start from idx + 1 instead of 0 since prior PAS mappings will 196 * have already checked themselves against this one. 197 */ 198 for (unsigned int i = idx + 1; i < pas_region_cnt; i++) { 199 if (gpt_check_pas_overlap(pas_regions[idx].base_pa, 200 pas_regions[idx].size, 201 pas_regions[i].base_pa, 202 pas_regions[i].size)) { 203 ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n", 204 i, idx); 205 return -EFAULT; 206 } 207 } 208 209 /* 210 * Since this function can be called multiple times with 211 * separate L1 tables we need to check the existing L0 mapping 212 * to see if this PAS would fall into one that has already been 213 * initialized. 214 */ 215 for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa); 216 i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1); 217 i++) { 218 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) && 219 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) { 220 /* This descriptor is unused so continue. */ 221 continue; 222 } 223 224 /* 225 * This descriptor has been initialized in a previous 226 * call to this function so cannot be initialized again. 227 */ 228 ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n", 229 idx, i); 230 return -EFAULT; 231 } 232 233 /* Check for block mapping (L0) type. */ 234 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 235 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 236 /* Make sure base and size are block-aligned. */ 237 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) || 238 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) { 239 ERROR("[GPT] PAS[%u] is not block-aligned!\n", 240 idx); 241 return -EFAULT; 242 } 243 244 continue; 245 } 246 247 /* Check for granule mapping (L1) type. */ 248 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 249 GPT_PAS_ATTR_MAP_TYPE_GRANULE) { 250 /* Make sure base and size are granule-aligned. */ 251 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) || 252 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) { 253 ERROR("[GPT] PAS[%u] is not granule-aligned!\n", 254 idx); 255 return -EFAULT; 256 } 257 258 /* Find how many L1 tables this PAS occupies. */ 259 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa + 260 pas_regions[idx].size - 1) - 261 GPT_L0_IDX(pas_regions[idx].base_pa) + 1); 262 263 /* 264 * This creates a situation where, if multiple PAS 265 * regions occupy the same table descriptor, we can get 266 * an artificially high total L1 table count. The way we 267 * handle this is by checking each PAS against those 268 * before it in the array, and if they both occupy the 269 * same PAS we subtract from pas_l1_cnt and only the 270 * first PAS in the array gets to count it. 271 */ 272 273 /* 274 * If L1 count is greater than 1 we know the start and 275 * end PAs are in different L0 regions so we must check 276 * both for overlap against other PAS. 277 */ 278 if (pas_l1_cnt > 1) { 279 if (gpt_does_previous_pas_exist_here( 280 GPT_L0_IDX(pas_regions[idx].base_pa + 281 pas_regions[idx].size - 1), 282 pas_regions, idx)) { 283 pas_l1_cnt = pas_l1_cnt - 1; 284 } 285 } 286 287 if (gpt_does_previous_pas_exist_here( 288 GPT_L0_IDX(pas_regions[idx].base_pa), 289 pas_regions, idx)) { 290 pas_l1_cnt = pas_l1_cnt - 1; 291 } 292 293 l1_cnt += pas_l1_cnt; 294 continue; 295 } 296 297 /* If execution reaches this point, mapping type is invalid. */ 298 ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx, 299 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 300 return -EINVAL; 301 } 302 303 return l1_cnt; 304 } 305 306 /* 307 * This function validates L0 initialization parameters. 308 * 309 * Parameters 310 * l0_mem_base Base address of memory used for L0 tables. 311 * l1_mem_size Size of memory available for L0 tables. 312 * 313 * Return 314 * Negative Linux error code in the event of a failure, 0 for success. 315 */ 316 static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base, 317 size_t l0_mem_size) 318 { 319 size_t l0_alignment; 320 321 /* 322 * Make sure PPS is valid and then store it since macros need this value 323 * to work. 324 */ 325 if (pps > GPT_PPS_MAX) { 326 ERROR("[GPT] Invalid PPS: 0x%x\n", pps); 327 return -EINVAL; 328 } 329 gpt_config.pps = pps; 330 gpt_config.t = gpt_t_lookup[pps]; 331 332 /* Alignment must be the greater of 4k or l0 table size. */ 333 l0_alignment = PAGE_SIZE_4KB; 334 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) { 335 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t); 336 } 337 338 /* Check base address. */ 339 if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) { 340 ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base); 341 return -EFAULT; 342 } 343 344 /* Check size. */ 345 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) { 346 ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n", 347 GPT_L0_TABLE_SIZE(gpt_config.t), 348 l0_mem_size); 349 return -ENOMEM; 350 } 351 352 return 0; 353 } 354 355 /* 356 * In the event that L1 tables are needed, this function validates 357 * the L1 table generation parameters. 358 * 359 * Parameters 360 * l1_mem_base Base address of memory used for L1 table allocation. 361 * l1_mem_size Total size of memory available for L1 tables. 362 * l1_gpt_cnt Number of L1 tables needed. 363 * 364 * Return 365 * Negative Linux error code in the event of a failure, 0 for success. 366 */ 367 static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size, 368 unsigned int l1_gpt_cnt) 369 { 370 size_t l1_gpt_mem_sz; 371 372 /* Check if the granularity is supported */ 373 if (!xlat_arch_is_granule_size_supported( 374 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) { 375 return -EPERM; 376 } 377 378 /* Make sure L1 tables are aligned to their size. */ 379 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) { 380 ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n", 381 l1_mem_base); 382 return -EFAULT; 383 } 384 385 /* Get total memory needed for L1 tables. */ 386 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p); 387 388 /* Check for overflow. */ 389 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) { 390 ERROR("[GPT] Overflow calculating L1 memory size.\n"); 391 return -ENOMEM; 392 } 393 394 /* Make sure enough space was supplied. */ 395 if (l1_mem_size < l1_gpt_mem_sz) { 396 ERROR("[GPT] Inadequate memory for L1 GPTs. "); 397 ERROR(" Expected 0x%lx bytes. Got 0x%lx bytes\n", 398 l1_gpt_mem_sz, l1_mem_size); 399 return -ENOMEM; 400 } 401 402 VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); 403 return 0; 404 } 405 406 /* 407 * This function initializes L0 block descriptors (regions that cannot be 408 * transitioned at the granule level) according to the provided PAS. 409 * 410 * Parameters 411 * *pas Pointer to the structure defining the PAS region to 412 * initialize. 413 */ 414 static void gpt_generate_l0_blk_desc(pas_region_t *pas) 415 { 416 uint64_t gpt_desc; 417 unsigned int end_idx; 418 unsigned int idx; 419 uint64_t *l0_gpt_arr; 420 421 assert(gpt_config.plat_gpt_l0_base != 0U); 422 assert(pas != NULL); 423 424 /* 425 * Checking of PAS parameters has already been done in 426 * gpt_validate_pas_mappings so no need to check the same things again. 427 */ 428 429 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base; 430 431 /* Create the GPT Block descriptor for this PAS region */ 432 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs)); 433 434 /* Start index of this region in L0 GPTs */ 435 idx = GPT_L0_IDX(pas->base_pa); 436 437 /* 438 * Determine number of L0 GPT descriptors covered by 439 * this PAS region and use the count to populate these 440 * descriptors. 441 */ 442 end_idx = GPT_L0_IDX(pas->base_pa + pas->size); 443 444 /* Generate the needed block descriptors. */ 445 for (; idx < end_idx; idx++) { 446 l0_gpt_arr[idx] = gpt_desc; 447 VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n", 448 idx, &l0_gpt_arr[idx], 449 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) & 450 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]); 451 } 452 } 453 454 /* 455 * Helper function to determine if the end physical address lies in the same L0 456 * region as the current physical address. If true, the end physical address is 457 * returned else, the start address of the next region is returned. 458 * 459 * Parameters 460 * cur_pa Physical address of the current PA in the loop through 461 * the range. 462 * end_pa Physical address of the end PA in a PAS range. 463 * 464 * Return 465 * The PA of the end of the current range. 466 */ 467 static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa) 468 { 469 uintptr_t cur_idx; 470 uintptr_t end_idx; 471 472 cur_idx = GPT_L0_IDX(cur_pa); 473 end_idx = GPT_L0_IDX(end_pa); 474 475 assert(cur_idx <= end_idx); 476 477 if (cur_idx == end_idx) { 478 return end_pa; 479 } 480 481 return (cur_idx + 1U) << GPT_L0_IDX_SHIFT; 482 } 483 484 /* 485 * Helper function to fill out GPI entries in a single L1 table. This function 486 * fills out entire L1 descriptors at a time to save memory writes. 487 * 488 * Parameters 489 * gpi GPI to set this range to 490 * l1 Pointer to L1 table to fill out 491 * first Address of first granule in range. 492 * last Address of last granule in range (inclusive). 493 */ 494 static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first, 495 uintptr_t last) 496 { 497 uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi); 498 uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF; 499 500 assert(first <= last); 501 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); 502 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); 503 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last)); 504 assert(l1 != NULL); 505 506 /* Shift the mask if we're starting in the middle of an L1 entry. */ 507 gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2); 508 509 /* Fill out each L1 entry for this region. */ 510 for (unsigned int i = GPT_L1_IDX(gpt_config.p, first); 511 i <= GPT_L1_IDX(gpt_config.p, last); i++) { 512 /* Account for stopping in the middle of an L1 entry. */ 513 if (i == GPT_L1_IDX(gpt_config.p, last)) { 514 gpi_mask &= (gpi_mask >> ((15 - 515 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2)); 516 } 517 518 /* Write GPI values. */ 519 assert((l1[i] & gpi_mask) == 520 (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask)); 521 l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field); 522 523 /* Reset mask. */ 524 gpi_mask = 0xFFFFFFFFFFFFFFFF; 525 } 526 } 527 528 /* 529 * This function finds the next available unused L1 table and initializes all 530 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks 531 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the 532 * event that a PAS region stops midway through an L1 table, thus guaranteeing 533 * that all memory not explicitly assigned is GPI_ANY. This function does not 534 * check for overflow conditions, that should be done by the caller. 535 * 536 * Return 537 * Pointer to the next available L1 table. 538 */ 539 static uint64_t *gpt_get_new_l1_tbl(void) 540 { 541 /* Retrieve the next L1 table. */ 542 uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) + 543 (GPT_L1_TABLE_SIZE(gpt_config.p) * 544 gpt_next_l1_tbl_idx)); 545 546 /* Increment L1 counter. */ 547 gpt_next_l1_tbl_idx++; 548 549 /* Initialize all GPIs to GPT_GPI_ANY */ 550 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) { 551 l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY); 552 } 553 554 return l1; 555 } 556 557 /* 558 * When L1 tables are needed, this function creates the necessary L0 table 559 * descriptors and fills out the L1 table entries according to the supplied 560 * PAS range. 561 * 562 * Parameters 563 * *pas Pointer to the structure defining the PAS region. 564 */ 565 static void gpt_generate_l0_tbl_desc(pas_region_t *pas) 566 { 567 uintptr_t end_pa; 568 uintptr_t cur_pa; 569 uintptr_t last_gran_pa; 570 uint64_t *l0_gpt_base; 571 uint64_t *l1_gpt_arr; 572 unsigned int l0_idx; 573 574 assert(gpt_config.plat_gpt_l0_base != 0U); 575 assert(pas != NULL); 576 577 /* 578 * Checking of PAS parameters has already been done in 579 * gpt_validate_pas_mappings so no need to check the same things again. 580 */ 581 582 end_pa = pas->base_pa + pas->size; 583 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 584 585 /* We start working from the granule at base PA */ 586 cur_pa = pas->base_pa; 587 588 /* Iterate over each L0 region in this memory range. */ 589 for (l0_idx = GPT_L0_IDX(pas->base_pa); 590 l0_idx <= GPT_L0_IDX(end_pa - 1U); 591 l0_idx++) { 592 593 /* 594 * See if the L0 entry is already a table descriptor or if we 595 * need to create one. 596 */ 597 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) { 598 /* Get the L1 array from the L0 entry. */ 599 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]); 600 } else { 601 /* Get a new L1 table from the L1 memory space. */ 602 l1_gpt_arr = gpt_get_new_l1_tbl(); 603 604 /* Fill out the L0 descriptor and flush it. */ 605 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr); 606 } 607 608 VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n", 609 l0_idx, &l0_gpt_base[l0_idx], 610 (unsigned long long)(l1_gpt_arr), 611 l0_gpt_base[l0_idx]); 612 613 /* 614 * Determine the PA of the last granule in this L0 descriptor. 615 */ 616 last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) - 617 GPT_PGS_ACTUAL_SIZE(gpt_config.p); 618 619 /* 620 * Fill up L1 GPT entries between these two addresses. This 621 * function needs the addresses of the first granule and last 622 * granule in the range. 623 */ 624 gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr, 625 cur_pa, last_gran_pa); 626 627 /* Advance cur_pa to first granule in next L0 region. */ 628 cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa); 629 } 630 } 631 632 /* 633 * This function flushes a range of L0 descriptors used by a given PAS region 634 * array. There is a chance that some unmodified L0 descriptors would be flushed 635 * in the case that there are "holes" in an array of PAS regions but overall 636 * this should be faster than individually flushing each modified L0 descriptor 637 * as they are created. 638 * 639 * Parameters 640 * *pas Pointer to an array of PAS regions. 641 * pas_count Number of entries in the PAS array. 642 */ 643 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count) 644 { 645 unsigned int idx; 646 unsigned int start_idx; 647 unsigned int end_idx; 648 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base; 649 650 assert(pas != NULL); 651 assert(pas_count > 0); 652 653 /* Initial start and end values. */ 654 start_idx = GPT_L0_IDX(pas[0].base_pa); 655 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1); 656 657 /* Find lowest and highest L0 indices used in this PAS array. */ 658 for (idx = 1; idx < pas_count; idx++) { 659 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) { 660 start_idx = GPT_L0_IDX(pas[idx].base_pa); 661 } 662 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) { 663 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1); 664 } 665 } 666 667 /* 668 * Flush all covered L0 descriptors, add 1 because we need to include 669 * the end index value. 670 */ 671 flush_dcache_range((uintptr_t)&l0[start_idx], 672 ((end_idx + 1) - start_idx) * sizeof(uint64_t)); 673 } 674 675 /* 676 * Public API to enable granule protection checks once the tables have all been 677 * initialized. This function is called at first initialization and then again 678 * later during warm boots of CPU cores. 679 * 680 * Return 681 * Negative Linux error code in the event of a failure, 0 for success. 682 */ 683 int gpt_enable(void) 684 { 685 u_register_t gpccr_el3; 686 687 /* 688 * Granule tables must be initialised before enabling 689 * granule protection. 690 */ 691 if (gpt_config.plat_gpt_l0_base == 0U) { 692 ERROR("[GPT] Tables have not been initialized!\n"); 693 return -EPERM; 694 } 695 696 /* Invalidate any stale TLB entries */ 697 tlbipaallos(); 698 dsb(); 699 700 /* Write the base address of the L0 tables into GPTBR */ 701 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT) 702 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK); 703 704 /* GPCCR_EL3.PPS */ 705 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps); 706 707 /* GPCCR_EL3.PGS */ 708 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs); 709 710 /* 711 * Since EL3 maps the L1 region as Inner shareable, use the same 712 * shareability attribute for GPC as well so that 713 * GPC fetches are visible to PEs 714 */ 715 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS); 716 717 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ 718 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); 719 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); 720 721 /* Enable GPT */ 722 gpccr_el3 |= GPCCR_GPC_BIT; 723 724 /* TODO: Configure GPCCR_EL3_GPCP for Fault control. */ 725 write_gpccr_el3(gpccr_el3); 726 isb(); 727 tlbipaallos(); 728 dsb(); 729 isb(); 730 731 return 0; 732 } 733 734 /* 735 * Public API to disable granule protection checks. 736 */ 737 void gpt_disable(void) 738 { 739 u_register_t gpccr_el3 = read_gpccr_el3(); 740 741 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT); 742 dsbsy(); 743 isb(); 744 } 745 746 /* 747 * Public API that initializes the entire protected space to GPT_GPI_ANY using 748 * the L0 tables (block descriptors). Ideally, this function is invoked prior 749 * to DDR discovery and initialization. The MMU must be initialized before 750 * calling this function. 751 * 752 * Parameters 753 * pps PPS value to use for table generation 754 * l0_mem_base Base address of L0 tables in memory. 755 * l0_mem_size Total size of memory available for L0 tables. 756 * 757 * Return 758 * Negative Linux error code in the event of a failure, 0 for success. 759 */ 760 int gpt_init_l0_tables(unsigned int pps, uintptr_t l0_mem_base, 761 size_t l0_mem_size) 762 { 763 int ret; 764 uint64_t gpt_desc; 765 766 /* Ensure that MMU and Data caches are enabled. */ 767 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); 768 769 /* Validate other parameters. */ 770 ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size); 771 if (ret != 0) { 772 return ret; 773 } 774 775 /* Create the descriptor to initialize L0 entries with. */ 776 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY); 777 778 /* Iterate through all L0 entries */ 779 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) { 780 ((uint64_t *)l0_mem_base)[i] = gpt_desc; 781 } 782 783 /* Flush updated L0 tables to memory. */ 784 flush_dcache_range((uintptr_t)l0_mem_base, 785 (size_t)GPT_L0_TABLE_SIZE(gpt_config.t)); 786 787 /* Stash the L0 base address once initial setup is complete. */ 788 gpt_config.plat_gpt_l0_base = l0_mem_base; 789 790 return 0; 791 } 792 793 /* 794 * Public API that carves out PAS regions from the L0 tables and builds any L1 795 * tables that are needed. This function ideally is run after DDR discovery and 796 * initialization. The L0 tables must have already been initialized to GPI_ANY 797 * when this function is called. 798 * 799 * This function can be called multiple times with different L1 memory ranges 800 * and PAS regions if it is desirable to place L1 tables in different locations 801 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables 802 * in the DDR bank that they control) 803 * 804 * Parameters 805 * pgs PGS value to use for table generation. 806 * l1_mem_base Base address of memory used for L1 tables. 807 * l1_mem_size Total size of memory available for L1 tables. 808 * *pas_regions Pointer to PAS regions structure array. 809 * pas_count Total number of PAS regions. 810 * 811 * Return 812 * Negative Linux error code in the event of a failure, 0 for success. 813 */ 814 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base, 815 size_t l1_mem_size, pas_region_t *pas_regions, 816 unsigned int pas_count) 817 { 818 int ret; 819 int l1_gpt_cnt; 820 821 /* Ensure that MMU and Data caches are enabled. */ 822 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); 823 824 /* PGS is needed for gpt_validate_pas_mappings so check it now. */ 825 if (pgs > GPT_PGS_MAX) { 826 ERROR("[GPT] Invalid PGS: 0x%x\n", pgs); 827 return -EINVAL; 828 } 829 gpt_config.pgs = pgs; 830 gpt_config.p = gpt_p_lookup[pgs]; 831 832 /* Make sure L0 tables have been initialized. */ 833 if (gpt_config.plat_gpt_l0_base == 0U) { 834 ERROR("[GPT] L0 tables must be initialized first!\n"); 835 return -EPERM; 836 } 837 838 /* Check if L1 GPTs are required and how many. */ 839 l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count); 840 if (l1_gpt_cnt < 0) { 841 return l1_gpt_cnt; 842 } 843 844 VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt); 845 846 /* If L1 tables are needed then validate the L1 parameters. */ 847 if (l1_gpt_cnt > 0) { 848 ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size, 849 l1_gpt_cnt); 850 if (ret != 0) { 851 return ret; 852 } 853 854 /* Set up parameters for L1 table generation. */ 855 gpt_l1_tbl = l1_mem_base; 856 gpt_next_l1_tbl_idx = 0U; 857 } 858 859 INFO("[GPT] Boot Configuration\n"); 860 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 861 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 862 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 863 INFO(" PAS count: 0x%x\n", pas_count); 864 INFO(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); 865 866 /* Generate the tables in memory. */ 867 for (unsigned int idx = 0U; idx < pas_count; idx++) { 868 INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n", 869 idx, pas_regions[idx].base_pa, pas_regions[idx].size, 870 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs), 871 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); 872 873 /* Check if a block or table descriptor is required */ 874 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == 875 GPT_PAS_ATTR_MAP_TYPE_BLOCK) { 876 gpt_generate_l0_blk_desc(&pas_regions[idx]); 877 878 } else { 879 gpt_generate_l0_tbl_desc(&pas_regions[idx]); 880 } 881 } 882 883 /* Flush modified L0 tables. */ 884 flush_l0_for_pas_array(pas_regions, pas_count); 885 886 /* Flush L1 tables if needed. */ 887 if (l1_gpt_cnt > 0) { 888 flush_dcache_range(l1_mem_base, 889 GPT_L1_TABLE_SIZE(gpt_config.p) * 890 l1_gpt_cnt); 891 } 892 893 /* Make sure that all the entries are written to the memory. */ 894 dsbishst(); 895 tlbipaallos(); 896 dsb(); 897 isb(); 898 899 return 0; 900 } 901 902 /* 903 * Public API to initialize the runtime gpt_config structure based on the values 904 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization 905 * typically happens in a bootloader stage prior to setting up the EL3 runtime 906 * environment for the granule transition service so this function detects the 907 * initialization from a previous stage. Granule protection checks must be 908 * enabled already or this function will return an error. 909 * 910 * Return 911 * Negative Linux error code in the event of a failure, 0 for success. 912 */ 913 int gpt_runtime_init(void) 914 { 915 u_register_t reg; 916 917 /* Ensure that MMU and Data caches are enabled. */ 918 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); 919 920 /* Ensure GPC are already enabled. */ 921 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) { 922 ERROR("[GPT] Granule protection checks are not enabled!\n"); 923 return -EPERM; 924 } 925 926 /* 927 * Read the L0 table address from GPTBR, we don't need the L1 base 928 * address since those are included in the L0 tables as needed. 929 */ 930 reg = read_gptbr_el3(); 931 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) & 932 GPTBR_BADDR_MASK) << 933 GPTBR_BADDR_VAL_SHIFT; 934 935 /* Read GPCCR to get PGS and PPS values. */ 936 reg = read_gpccr_el3(); 937 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK; 938 gpt_config.t = gpt_t_lookup[gpt_config.pps]; 939 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK; 940 gpt_config.p = gpt_p_lookup[gpt_config.pgs]; 941 942 VERBOSE("[GPT] Runtime Configuration\n"); 943 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); 944 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); 945 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); 946 VERBOSE(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); 947 948 return 0; 949 } 950 951 /* 952 * The L1 descriptors are protected by a spinlock to ensure that multiple 953 * CPUs do not attempt to change the descriptors at once. In the future it 954 * would be better to have separate spinlocks for each L1 descriptor. 955 */ 956 static spinlock_t gpt_lock; 957 958 /* 959 * A helper to write the value (target_pas << gpi_shift) to the index of 960 * the gpt_l1_addr 961 */ 962 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr, 963 unsigned int gpi_shift, unsigned int idx, 964 unsigned int target_pas) 965 { 966 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift); 967 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift); 968 gpt_l1_addr[idx] = *gpt_l1_desc; 969 } 970 971 /* 972 * Helper to retrieve the gpt_l1_* information from the base address 973 * returned in gpi_info 974 */ 975 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info) 976 { 977 uint64_t gpt_l0_desc, *gpt_l0_base; 978 979 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base; 980 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)]; 981 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) { 982 VERBOSE("[GPT] Granule is not covered by a table descriptor!\n"); 983 VERBOSE(" Base=0x%" PRIx64 "\n", base); 984 return -EINVAL; 985 } 986 987 /* Get the table index and GPI shift from PA. */ 988 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc); 989 gpi_info->idx = GPT_L1_IDX(gpt_config.p, base); 990 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2; 991 992 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx]; 993 gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) & 994 GPT_L1_GRAN_DESC_GPI_MASK; 995 return 0; 996 } 997 998 /* 999 * This function is the granule transition delegate service. When a granule 1000 * transition request occurs it is routed to this function to have the request, 1001 * if valid, fulfilled following A1.1.1 Delegate of RME supplement 1002 * 1003 * TODO: implement support for transitioning multiple granules at once. 1004 * 1005 * Parameters 1006 * base Base address of the region to transition, must be 1007 * aligned to granule size. 1008 * size Size of region to transition, must be aligned to granule 1009 * size. 1010 * src_sec_state Security state of the caller. 1011 * 1012 * Return 1013 * Negative Linux error code in the event of a failure, 0 for success. 1014 */ 1015 int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1016 { 1017 gpi_info_t gpi_info; 1018 uint64_t nse; 1019 int res; 1020 unsigned int target_pas; 1021 1022 /* Ensure that the tables have been set up before taking requests. */ 1023 assert(gpt_config.plat_gpt_l0_base != 0UL); 1024 1025 /* Ensure that caches are enabled. */ 1026 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1027 1028 /* Delegate request can only come from REALM or SECURE */ 1029 assert(src_sec_state == SMC_FROM_REALM || 1030 src_sec_state == SMC_FROM_SECURE); 1031 1032 /* See if this is a single or a range of granule transition. */ 1033 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1034 return -EINVAL; 1035 } 1036 1037 /* Check that base and size are valid */ 1038 if ((ULONG_MAX - base) < size) { 1039 VERBOSE("[GPT] Transition request address overflow!\n"); 1040 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1041 VERBOSE(" Size=0x%lx\n", size); 1042 return -EINVAL; 1043 } 1044 1045 /* Make sure base and size are valid. */ 1046 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1047 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1048 (size == 0UL) || 1049 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1050 VERBOSE("[GPT] Invalid granule transition address range!\n"); 1051 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1052 VERBOSE(" Size=0x%lx\n", size); 1053 return -EINVAL; 1054 } 1055 1056 target_pas = GPT_GPI_REALM; 1057 if (src_sec_state == SMC_FROM_SECURE) { 1058 target_pas = GPT_GPI_SECURE; 1059 } 1060 1061 /* 1062 * Access to L1 tables is controlled by a global lock to ensure 1063 * that no more than one CPU is allowed to make changes at any 1064 * given time. 1065 */ 1066 spin_lock(&gpt_lock); 1067 res = get_gpi_params(base, &gpi_info); 1068 if (res != 0) { 1069 spin_unlock(&gpt_lock); 1070 return res; 1071 } 1072 1073 /* Check that the current address is in NS state */ 1074 if (gpi_info.gpi != GPT_GPI_NS) { 1075 VERBOSE("[GPT] Only Granule in NS state can be delegated.\n"); 1076 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state, 1077 gpi_info.gpi); 1078 spin_unlock(&gpt_lock); 1079 return -EINVAL; 1080 } 1081 1082 if (src_sec_state == SMC_FROM_SECURE) { 1083 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1084 } else { 1085 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1086 } 1087 1088 /* 1089 * In order to maintain mutual distrust between Realm and Secure 1090 * states, remove any data speculatively fetched into the target 1091 * physical address space. Issue DC CIPAPA over address range 1092 */ 1093 flush_dcache_to_popa_range(nse | base, 1094 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1095 1096 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1097 gpi_info.gpi_shift, gpi_info.idx, target_pas); 1098 dsboshst(); 1099 1100 gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1101 dsbosh(); 1102 1103 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1104 1105 flush_dcache_to_popa_range(nse | base, 1106 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1107 1108 /* Unlock access to the L1 tables. */ 1109 spin_unlock(&gpt_lock); 1110 1111 /* 1112 * The isb() will be done as part of context 1113 * synchronization when returning to lower EL 1114 */ 1115 VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", 1116 base, gpi_info.gpi, target_pas); 1117 1118 return 0; 1119 } 1120 1121 /* 1122 * This function is the granule transition undelegate service. When a granule 1123 * transition request occurs it is routed to this function where the request is 1124 * validated then fulfilled if possible. 1125 * 1126 * TODO: implement support for transitioning multiple granules at once. 1127 * 1128 * Parameters 1129 * base Base address of the region to transition, must be 1130 * aligned to granule size. 1131 * size Size of region to transition, must be aligned to granule 1132 * size. 1133 * src_sec_state Security state of the caller. 1134 * 1135 * Return 1136 * Negative Linux error code in the event of a failure, 0 for success. 1137 */ 1138 int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state) 1139 { 1140 gpi_info_t gpi_info; 1141 uint64_t nse; 1142 int res; 1143 1144 /* Ensure that the tables have been set up before taking requests. */ 1145 assert(gpt_config.plat_gpt_l0_base != 0UL); 1146 1147 /* Ensure that MMU and caches are enabled. */ 1148 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL); 1149 1150 /* Delegate request can only come from REALM or SECURE */ 1151 assert(src_sec_state == SMC_FROM_REALM || 1152 src_sec_state == SMC_FROM_SECURE); 1153 1154 /* See if this is a single or a range of granule transition. */ 1155 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { 1156 return -EINVAL; 1157 } 1158 1159 /* Check that base and size are valid */ 1160 if ((ULONG_MAX - base) < size) { 1161 VERBOSE("[GPT] Transition request address overflow!\n"); 1162 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1163 VERBOSE(" Size=0x%lx\n", size); 1164 return -EINVAL; 1165 } 1166 1167 /* Make sure base and size are valid. */ 1168 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1169 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) || 1170 (size == 0UL) || 1171 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { 1172 VERBOSE("[GPT] Invalid granule transition address range!\n"); 1173 VERBOSE(" Base=0x%" PRIx64 "\n", base); 1174 VERBOSE(" Size=0x%lx\n", size); 1175 return -EINVAL; 1176 } 1177 1178 /* 1179 * Access to L1 tables is controlled by a global lock to ensure 1180 * that no more than one CPU is allowed to make changes at any 1181 * given time. 1182 */ 1183 spin_lock(&gpt_lock); 1184 1185 res = get_gpi_params(base, &gpi_info); 1186 if (res != 0) { 1187 spin_unlock(&gpt_lock); 1188 return res; 1189 } 1190 1191 /* Check that the current address is in the delegated state */ 1192 if ((src_sec_state == SMC_FROM_REALM && 1193 gpi_info.gpi != GPT_GPI_REALM) || 1194 (src_sec_state == SMC_FROM_SECURE && 1195 gpi_info.gpi != GPT_GPI_SECURE)) { 1196 VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n"); 1197 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state, 1198 gpi_info.gpi); 1199 spin_unlock(&gpt_lock); 1200 return -EINVAL; 1201 } 1202 1203 1204 /* In order to maintain mutual distrust between Realm and Secure 1205 * states, remove access now, in order to guarantee that writes 1206 * to the currently-accessible physical address space will not 1207 * later become observable. 1208 */ 1209 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1210 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS); 1211 dsboshst(); 1212 1213 gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1214 dsbosh(); 1215 1216 if (src_sec_state == SMC_FROM_SECURE) { 1217 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT; 1218 } else { 1219 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT; 1220 } 1221 1222 /* Ensure that the scrubbed data has made it past the PoPA */ 1223 flush_dcache_to_popa_range(nse | base, 1224 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1225 1226 /* 1227 * Remove any data loaded speculatively 1228 * in NS space from before the scrubbing 1229 */ 1230 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT; 1231 1232 flush_dcache_to_popa_range(nse | base, 1233 GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1234 1235 /* Clear existing GPI encoding and transition granule. */ 1236 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr, 1237 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS); 1238 dsboshst(); 1239 1240 /* Ensure that all agents observe the new NS configuration */ 1241 gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); 1242 dsbosh(); 1243 1244 /* Unlock access to the L1 tables. */ 1245 spin_unlock(&gpt_lock); 1246 1247 /* 1248 * The isb() will be done as part of context 1249 * synchronization when returning to lower EL 1250 */ 1251 VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", 1252 base, gpi_info.gpi, GPT_GPI_NS); 1253 1254 return 0; 1255 } 1256