1 /* 2 * Copyright (c) 2016-2017, Linaro Limited. All rights reserved. 3 * Copyright (c) 2014-2020, Arm Limited. All rights reserved. 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * All rights reserved. 6 * 7 * SPDX-License-Identifier: BSD-3-Clause 8 */ 9 10 #include <assert.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <platform_def.h> 15 16 #include <arch.h> 17 #include <arch_helpers.h> 18 #include <common/debug.h> 19 #include <lib/cassert.h> 20 #include <lib/utils.h> 21 #include <lib/xlat_tables/xlat_tables.h> 22 23 #include "../xlat_tables_private.h" 24 25 #ifdef ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING 26 #error "ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING flag is set. \ 27 This module is to be used when LPAE is not supported" 28 #endif 29 30 CASSERT(PLAT_VIRT_ADDR_SPACE_SIZE == (1ULL << 32), invalid_vaddr_space_size); 31 CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size); 32 33 #define MMU32B_UNSET_DESC ~0UL 34 #define MMU32B_INVALID_DESC 0UL 35 36 #define MT_UNKNOWN ~0U 37 38 /* 39 * MMU related values 40 */ 41 42 /* Sharable */ 43 #define MMU32B_TTB_S (1U << 1) 44 45 /* Not Outer Sharable */ 46 #define MMU32B_TTB_NOS (1U << 5) 47 48 /* Normal memory, Inner Non-cacheable */ 49 #define MMU32B_TTB_IRGN_NC 0U 50 51 /* Normal memory, Inner Write-Back Write-Allocate Cacheable */ 52 #define MMU32B_TTB_IRGN_WBWA (1U << 6) 53 54 /* Normal memory, Inner Write-Through Cacheable */ 55 #define MMU32B_TTB_IRGN_WT 1U 56 57 /* Normal memory, Inner Write-Back no Write-Allocate Cacheable */ 58 #define MMU32B_TTB_IRGN_WB (1U | (1U << 6)) 59 60 /* Normal memory, Outer Write-Back Write-Allocate Cacheable */ 61 #define MMU32B_TTB_RNG_WBWA (1U << 3) 62 63 #define MMU32B_DEFAULT_ATTRS \ 64 (MMU32B_TTB_S | MMU32B_TTB_NOS | \ 65 MMU32B_TTB_IRGN_WBWA | MMU32B_TTB_RNG_WBWA) 66 67 /* armv7 memory mapping attributes: section mapping */ 68 #define SECTION_SECURE (0U << 19) 69 #define SECTION_NOTSECURE (1U << 19) 70 #define SECTION_SHARED (1U << 16) 71 #define SECTION_NOTGLOBAL (1U << 17) 72 #define SECTION_ACCESS_FLAG (1U << 10) 73 #define SECTION_UNPRIV (1U << 11) 74 #define SECTION_RO (1U << 15) 75 #define SECTION_TEX(tex) ((((tex) >> 2) << 12) | \ 76 ((((tex) >> 1) & 0x1) << 3) | \ 77 (((tex) & 0x1) << 2)) 78 #define SECTION_DEVICE SECTION_TEX(MMU32B_ATTR_DEVICE_INDEX) 79 #define SECTION_NORMAL SECTION_TEX(MMU32B_ATTR_DEVICE_INDEX) 80 #define SECTION_NORMAL_CACHED \ 81 SECTION_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX) 82 83 #define SECTION_XN (1U << 4) 84 #define SECTION_PXN (1U << 0) 85 #define SECTION_SECTION (2U << 0) 86 87 #define SECTION_PT_NOTSECURE (1U << 3) 88 #define SECTION_PT_PT (1U << 0) 89 90 #define SMALL_PAGE_SMALL_PAGE (1U << 1) 91 #define SMALL_PAGE_SHARED (1U << 10) 92 #define SMALL_PAGE_NOTGLOBAL (1U << 11) 93 #define SMALL_PAGE_TEX(tex) ((((tex) >> 2) << 6) | \ 94 ((((tex) >> 1) & 0x1) << 3) | \ 95 (((tex) & 0x1) << 2)) 96 #define SMALL_PAGE_DEVICE \ 97 SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX) 98 #define SMALL_PAGE_NORMAL \ 99 SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX) 100 #define SMALL_PAGE_NORMAL_CACHED \ 101 SMALL_PAGE_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX) 102 #define SMALL_PAGE_ACCESS_FLAG (1U << 4) 103 #define SMALL_PAGE_UNPRIV (1U << 5) 104 #define SMALL_PAGE_RO (1U << 9) 105 #define SMALL_PAGE_XN (1U << 0) 106 107 /* The TEX, C and B bits concatenated */ 108 #define MMU32B_ATTR_DEVICE_INDEX 0U 109 #define MMU32B_ATTR_IWBWA_OWBWA_INDEX 1U 110 111 #define MMU32B_PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \ 112 ((uint32_t)(nos) << ((idx) + 24))) 113 #define MMU32B_NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \ 114 ((uint32_t)(or) << (2 * (idx) + 16))) 115 #define MMU32B_PRRR_DS0 (1U << 16) 116 #define MMU32B_PRRR_DS1 (1U << 17) 117 #define MMU32B_PRRR_NS0 (1U << 18) 118 #define MMU32B_PRRR_NS1 (1U << 19) 119 120 #define DACR_DOMAIN(num, perm) ((perm) << ((num) * 2)) 121 #define DACR_DOMAIN_PERM_NO_ACCESS 0U 122 #define DACR_DOMAIN_PERM_CLIENT 1U 123 #define DACR_DOMAIN_PERM_MANAGER 3U 124 125 #define NUM_1MB_IN_4GB (1UL << 12) 126 #define NUM_4K_IN_1MB (1UL << 8) 127 128 #define ONE_MB_SHIFT 20 129 130 /* mmu 32b integration */ 131 #define MMU32B_L1_TABLE_SIZE (NUM_1MB_IN_4GB * 4) 132 #define MMU32B_L2_TABLE_SIZE (NUM_4K_IN_1MB * 4) 133 #define MMU32B_L1_TABLE_ALIGN (1U << 14) 134 #define MMU32B_L2_TABLE_ALIGN (1U << 10) 135 136 static unsigned int next_xlat; 137 static unsigned long long xlat_max_pa; 138 static uintptr_t xlat_max_va; 139 140 static uint32_t mmu_l1_base[NUM_1MB_IN_4GB] 141 __aligned(MMU32B_L1_TABLE_ALIGN) __attribute__((section(".xlat_table"))); 142 143 static uint32_t mmu_l2_base[MAX_XLAT_TABLES][NUM_4K_IN_1MB] 144 __aligned(MMU32B_L2_TABLE_ALIGN) __attribute__((section(".xlat_table"))); 145 146 /* 147 * Array of all memory regions stored in order of ascending base address. 148 * The list is terminated by the first entry with size == 0. 149 */ 150 static mmap_region_t mmap[MAX_MMAP_REGIONS + 1]; 151 152 void print_mmap(void) 153 { 154 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 155 mmap_region_t *mm = mmap; 156 157 printf("init xlat - l1:%p l2:%p (%d)\n", 158 (void *)mmu_l1_base, (void *)mmu_l2_base, MAX_XLAT_TABLES); 159 printf("mmap:\n"); 160 while (mm->size) { 161 printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n", 162 (void *)mm->base_va, mm->base_pa, 163 mm->size, mm->attr); 164 ++mm; 165 }; 166 printf("\n"); 167 #endif 168 } 169 170 void mmap_add(const mmap_region_t *mm) 171 { 172 const mmap_region_t *mm_cursor = mm; 173 174 while ((mm_cursor->size != 0U) || (mm_cursor->attr != 0U)) { 175 mmap_add_region(mm_cursor->base_pa, mm_cursor->base_va, 176 mm_cursor->size, mm_cursor->attr); 177 mm_cursor++; 178 } 179 } 180 181 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, 182 size_t size, unsigned int attr) 183 { 184 mmap_region_t *mm = mmap; 185 const mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1U; 186 unsigned long long end_pa = base_pa + size - 1U; 187 uintptr_t end_va = base_va + size - 1U; 188 189 assert(IS_PAGE_ALIGNED(base_pa)); 190 assert(IS_PAGE_ALIGNED(base_va)); 191 assert(IS_PAGE_ALIGNED(size)); 192 193 if (size == 0U) { 194 return; 195 } 196 197 assert(base_pa < end_pa); /* Check for overflows */ 198 assert(base_va < end_va); 199 200 assert((base_va + (uintptr_t)size - (uintptr_t)1) <= 201 (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)); 202 assert((base_pa + (unsigned long long)size - 1ULL) <= 203 (PLAT_PHY_ADDR_SPACE_SIZE - 1U)); 204 205 #if ENABLE_ASSERTIONS 206 207 /* Check for PAs and VAs overlaps with all other regions */ 208 for (mm = mmap; mm->size; ++mm) { 209 210 uintptr_t mm_end_va = mm->base_va + mm->size - 1U; 211 212 /* 213 * Check if one of the regions is completely inside the other 214 * one. 215 */ 216 bool fully_overlapped_va = 217 ((base_va >= mm->base_va) && (end_va <= mm_end_va)) || 218 ((mm->base_va >= base_va) && (mm_end_va <= end_va)); 219 220 /* 221 * Full VA overlaps are only allowed if both regions are 222 * identity mapped (zero offset) or have the same VA to PA 223 * offset. Also, make sure that it's not the exact same area. 224 */ 225 if (fully_overlapped_va) { 226 assert((mm->base_va - mm->base_pa) == 227 (base_va - base_pa)); 228 assert((base_va != mm->base_va) || (size != mm->size)); 229 } else { 230 /* 231 * If the regions do not have fully overlapping VAs, 232 * then they must have fully separated VAs and PAs. 233 * Partial overlaps are not allowed 234 */ 235 236 unsigned long long mm_end_pa = 237 mm->base_pa + mm->size - 1; 238 239 bool separated_pa = (end_pa < mm->base_pa) || 240 (base_pa > mm_end_pa); 241 bool separated_va = (end_va < mm->base_va) || 242 (base_va > mm_end_va); 243 244 assert(separated_va && separated_pa); 245 } 246 } 247 248 mm = mmap; /* Restore pointer to the start of the array */ 249 250 #endif /* ENABLE_ASSERTIONS */ 251 252 /* Find correct place in mmap to insert new region */ 253 while ((mm->base_va < base_va) && (mm->size != 0U)) { 254 ++mm; 255 } 256 257 /* 258 * If a section is contained inside another one with the same base 259 * address, it must be placed after the one it is contained in: 260 * 261 * 1st |-----------------------| 262 * 2nd |------------| 263 * 3rd |------| 264 * 265 * This is required for mmap_region_attr() to get the attributes of the 266 * small region correctly. 267 */ 268 while ((mm->base_va == base_va) && (mm->size > size)) { 269 ++mm; 270 } 271 272 /* Make room for new region by moving other regions up by one place */ 273 (void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm); 274 275 /* Check we haven't lost the empty sentinel from the end of the array */ 276 assert(mm_last->size == 0U); 277 278 mm->base_pa = base_pa; 279 mm->base_va = base_va; 280 mm->size = size; 281 mm->attr = attr; 282 283 if (end_pa > xlat_max_pa) { 284 xlat_max_pa = end_pa; 285 } 286 if (end_va > xlat_max_va) { 287 xlat_max_va = end_va; 288 } 289 } 290 291 /* map all memory as shared/global/domain0/no-usr access */ 292 static uint32_t mmap_desc(unsigned attr, unsigned int addr_pa, 293 unsigned int level) 294 { 295 uint32_t desc; 296 297 switch (level) { 298 case 1U: 299 assert((addr_pa & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U); 300 301 desc = SECTION_SECTION | SECTION_SHARED; 302 303 desc |= (attr & MT_NS) != 0U ? SECTION_NOTSECURE : 0U; 304 305 desc |= SECTION_ACCESS_FLAG; 306 desc |= (attr & MT_RW) != 0U ? 0U : SECTION_RO; 307 308 desc |= (attr & MT_MEMORY) != 0U ? 309 SECTION_NORMAL_CACHED : SECTION_DEVICE; 310 311 if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) { 312 desc |= SECTION_XN; 313 } 314 break; 315 case 2U: 316 assert((addr_pa & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U); 317 318 desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED; 319 320 desc |= SMALL_PAGE_ACCESS_FLAG; 321 desc |= (attr & MT_RW) != 0U ? 0U : SMALL_PAGE_RO; 322 323 desc |= (attr & MT_MEMORY) != 0U ? 324 SMALL_PAGE_NORMAL_CACHED : SMALL_PAGE_DEVICE; 325 326 if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) { 327 desc |= SMALL_PAGE_XN; 328 } 329 break; 330 default: 331 panic(); 332 } 333 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 334 /* dump only the non-lpae level 2 tables */ 335 if (level == 2U) { 336 printf(attr & MT_MEMORY ? "MEM" : "dev"); 337 printf(attr & MT_RW ? "-rw" : "-RO"); 338 printf(attr & MT_NS ? "-NS" : "-S"); 339 } 340 #endif 341 return desc | addr_pa; 342 } 343 344 static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va, 345 size_t size, unsigned int *attr) 346 { 347 /* Don't assume that the area is contained in the first region */ 348 unsigned int ret = MT_UNKNOWN; 349 350 /* 351 * Get attributes from last (innermost) region that contains the 352 * requested area. Don't stop as soon as one region doesn't contain it 353 * because there may be other internal regions that contain this area: 354 * 355 * |-----------------------------1-----------------------------| 356 * |----2----| |-------3-------| |----5----| 357 * |--4--| 358 * 359 * |---| <- Area we want the attributes of. 360 * 361 * In this example, the area is contained in regions 1, 3 and 4 but not 362 * in region 2. The loop shouldn't stop at region 2 as inner regions 363 * have priority over outer regions, it should stop at region 5. 364 */ 365 for ( ; ; ++mm) { 366 367 if (mm->size == 0U) { 368 return ret; /* Reached end of list */ 369 } 370 371 if (mm->base_va > (base_va + size - 1U)) { 372 return ret; /* Next region is after area so end */ 373 } 374 375 if ((mm->base_va + mm->size - 1U) < base_va) { 376 continue; /* Next region has already been overtaken */ 377 } 378 379 if ((ret == 0U) && (mm->attr == *attr)) { 380 continue; /* Region doesn't override attribs so skip */ 381 } 382 383 if ((mm->base_va > base_va) || 384 ((mm->base_va + mm->size - 1U) < 385 (base_va + size - 1U))) { 386 return MT_UNKNOWN; /* Region doesn't fully cover area */ 387 } 388 389 *attr = mm->attr; 390 ret = 0U; 391 } 392 } 393 394 static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, 395 unsigned int base_va, 396 uint32_t *table, 397 unsigned int level) 398 { 399 unsigned int level_size_shift = (level == 1U) ? 400 ONE_MB_SHIFT : FOUR_KB_SHIFT; 401 unsigned int level_size = 1U << level_size_shift; 402 unsigned int level_index_mask = (level == 1U) ? 403 (NUM_1MB_IN_4GB - 1) << ONE_MB_SHIFT : 404 (NUM_4K_IN_1MB - 1) << FOUR_KB_SHIFT; 405 406 assert((level == 1U) || (level == 2U)); 407 408 VERBOSE("init xlat table at %p (level%1u)\n", (void *)table, level); 409 410 do { 411 uint32_t desc = MMU32B_UNSET_DESC; 412 413 if (mm->base_va + mm->size <= base_va) { 414 /* Area now after the region so skip it */ 415 ++mm; 416 continue; 417 } 418 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 419 /* dump only non-lpae level 2 tables content */ 420 if (level == 2U) { 421 printf(" 0x%lx %x " + 6 - 2 * level, 422 base_va, level_size); 423 } 424 #endif 425 if (mm->base_va >= base_va + level_size) { 426 /* Next region is after area so nothing to map yet */ 427 desc = MMU32B_INVALID_DESC; 428 } else if ((mm->base_va <= base_va) && 429 (mm->base_va + mm->size) >= 430 (base_va + level_size)) { 431 /* Next region covers all of area */ 432 unsigned int attr = mm->attr; 433 unsigned int r = mmap_region_attr(mm, base_va, 434 level_size, &attr); 435 436 if (r == 0U) { 437 desc = mmap_desc(attr, 438 base_va - mm->base_va + mm->base_pa, 439 level); 440 } 441 } 442 443 if (desc == MMU32B_UNSET_DESC) { 444 uintptr_t xlat_table; 445 446 /* 447 * Area not covered by a region so need finer table 448 * Reuse next level table if any (assert attrib matching). 449 * Otherwise allocate a xlat table. 450 */ 451 if (*table) { 452 assert((*table & 3) == SECTION_PT_PT); 453 assert(((*table & SECTION_PT_NOTSECURE) == 0U) 454 == ((mm->attr & MT_NS) == 0U)); 455 456 xlat_table = (*table) & 457 ~(MMU32B_L1_TABLE_ALIGN - 1); 458 desc = *table; 459 } else { 460 xlat_table = (uintptr_t)mmu_l2_base + 461 next_xlat * MMU32B_L2_TABLE_SIZE; 462 next_xlat++; 463 assert(next_xlat <= MAX_XLAT_TABLES); 464 (void)memset((char *)xlat_table, 0, 465 MMU32B_L2_TABLE_SIZE); 466 467 desc = xlat_table | SECTION_PT_PT; 468 desc |= (mm->attr & MT_NS) != 0U ? 469 SECTION_PT_NOTSECURE : 0; 470 } 471 /* Recurse to fill in new table */ 472 mm = init_xlation_table_inner(mm, base_va, 473 (uint32_t *)xlat_table, 474 level + 1); 475 } 476 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 477 /* dump only non-lpae level 2 tables content */ 478 if (level == 2U) { 479 printf("\n"); 480 } 481 #endif 482 *table++ = desc; 483 base_va += level_size; 484 } while ((mm->size != 0U) && ((base_va & level_index_mask) != 0U)); 485 486 return mm; 487 } 488 489 void init_xlat_tables(void) 490 { 491 print_mmap(); 492 493 assert(((unsigned int)mmu_l1_base & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U); 494 assert(((unsigned int)mmu_l2_base & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U); 495 496 (void)memset(mmu_l1_base, 0, MMU32B_L1_TABLE_SIZE); 497 498 init_xlation_table_inner(mmap, 0, (uint32_t *)mmu_l1_base, 1); 499 500 VERBOSE("init xlat - max_va=%p, max_pa=%llx\n", 501 (void *)xlat_max_va, xlat_max_pa); 502 assert(xlat_max_pa <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1)); 503 } 504 505 /******************************************************************************* 506 * Function for enabling the MMU in Secure PL1, assuming that the 507 * page-tables have already been created. 508 ******************************************************************************/ 509 void enable_mmu_svc_mon(unsigned int flags) 510 { 511 unsigned int prrr; 512 unsigned int nmrr; 513 unsigned int sctlr; 514 515 assert(IS_IN_SECURE()); 516 assert((read_sctlr() & SCTLR_M_BIT) == 0U); 517 518 /* Enable Access flag (simplified access permissions) and TEX remap */ 519 write_sctlr(read_sctlr() | SCTLR_AFE_BIT | SCTLR_TRE_BIT); 520 521 prrr = MMU32B_PRRR_IDX(MMU32B_ATTR_DEVICE_INDEX, 1, 0) 522 | MMU32B_PRRR_IDX(MMU32B_ATTR_IWBWA_OWBWA_INDEX, 2, 1); 523 nmrr = MMU32B_NMRR_IDX(MMU32B_ATTR_DEVICE_INDEX, 0, 0) 524 | MMU32B_NMRR_IDX(MMU32B_ATTR_IWBWA_OWBWA_INDEX, 1, 1); 525 526 prrr |= MMU32B_PRRR_NS1 | MMU32B_PRRR_DS1; 527 528 write_prrr(prrr); 529 write_nmrr(nmrr); 530 531 /* Program Domain access control register: domain 0 only */ 532 write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT)); 533 534 /* Invalidate TLBs at the current exception level */ 535 tlbiall(); 536 537 /* set MMU base xlat table entry (use only TTBR0) */ 538 write_ttbr0((uint32_t)mmu_l1_base | MMU32B_DEFAULT_ATTRS); 539 write_ttbr1(0U); 540 541 /* 542 * Ensure all translation table writes have drained 543 * into memory, the TLB invalidation is complete, 544 * and translation register writes are committed 545 * before enabling the MMU 546 */ 547 dsb(); 548 isb(); 549 550 sctlr = read_sctlr(); 551 sctlr |= SCTLR_M_BIT; 552 #ifdef ARMV7_SUPPORTS_VIRTUALIZATION 553 sctlr |= SCTLR_WXN_BIT; 554 #endif 555 556 if ((flags & DISABLE_DCACHE) != 0U) { 557 sctlr &= ~SCTLR_C_BIT; 558 } else { 559 sctlr |= SCTLR_C_BIT; 560 } 561 562 write_sctlr(sctlr); 563 564 /* Ensure the MMU enable takes effect immediately */ 565 isb(); 566 } 567