1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <debug.h> 10 #include <errno.h> 11 #include <platform_def.h> 12 #include <types.h> 13 #include <utils_def.h> 14 #include <xlat_tables_defs.h> 15 #include <xlat_tables_v2.h> 16 17 #include "xlat_tables_private.h" 18 19 #if LOG_LEVEL < LOG_LEVEL_VERBOSE 20 21 void xlat_mmap_print(__unused mmap_region_t *const mmap) 22 { 23 /* Empty */ 24 } 25 26 void xlat_tables_print(__unused xlat_ctx_t *ctx) 27 { 28 /* Empty */ 29 } 30 31 #else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */ 32 33 void xlat_mmap_print(mmap_region_t *const mmap) 34 { 35 tf_printf("mmap:\n"); 36 const mmap_region_t *mm = mmap; 37 38 while (mm->size != 0U) { 39 tf_printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x " 40 "granularity:0x%zx\n", mm->base_va, mm->base_pa, 41 mm->size, mm->attr, mm->granularity); 42 ++mm; 43 }; 44 tf_printf("\n"); 45 } 46 47 /* Print the attributes of the specified block descriptor. */ 48 static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc) 49 { 50 int mem_type_index = ATTR_INDEX_GET(desc); 51 int xlat_regime = ctx->xlat_regime; 52 53 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) { 54 tf_printf("MEM"); 55 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) { 56 tf_printf("NC"); 57 } else { 58 assert(mem_type_index == ATTR_DEVICE_INDEX); 59 tf_printf("DEV"); 60 } 61 62 if (xlat_regime == EL3_REGIME) { 63 /* For EL3 only check the AP[2] and XN bits. */ 64 tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW"); 65 tf_printf((desc & UPPER_ATTRS(XN)) ? "-XN" : "-EXEC"); 66 } else { 67 assert(xlat_regime == EL1_EL0_REGIME); 68 /* 69 * For EL0 and EL1: 70 * - In AArch64 PXN and UXN can be set independently but in 71 * AArch32 there is no UXN (XN affects both privilege levels). 72 * For consistency, we set them simultaneously in both cases. 73 * - RO and RW permissions must be the same in EL1 and EL0. If 74 * EL0 can access that memory region, so can EL1, with the 75 * same permissions. 76 */ 77 #if ENABLE_ASSERTIONS 78 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME); 79 uint64_t xn_perm = desc & xn_mask; 80 81 assert((xn_perm == xn_mask) || (xn_perm == 0ULL)); 82 #endif 83 tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW"); 84 /* Only check one of PXN and UXN, the other one is the same. */ 85 tf_printf((desc & UPPER_ATTRS(PXN)) ? "-XN" : "-EXEC"); 86 /* 87 * Privileged regions can only be accessed from EL1, user 88 * regions can be accessed from EL1 and EL0. 89 */ 90 tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) 91 ? "-USER" : "-PRIV"); 92 } 93 94 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S"); 95 } 96 97 static const char * const level_spacers[] = { 98 "[LV0] ", 99 " [LV1] ", 100 " [LV2] ", 101 " [LV3] " 102 }; 103 104 static const char *invalid_descriptors_ommited = 105 "%s(%d invalid descriptors omitted)\n"; 106 107 /* 108 * Recursive function that reads the translation tables passed as an argument 109 * and prints their status. 110 */ 111 static void xlat_tables_print_internal(xlat_ctx_t *ctx, 112 const uintptr_t table_base_va, 113 uint64_t *const table_base, const int table_entries, 114 const unsigned int level) 115 { 116 assert(level <= XLAT_TABLE_LEVEL_MAX); 117 118 uint64_t desc; 119 uintptr_t table_idx_va = table_base_va; 120 int table_idx = 0; 121 122 size_t level_size = XLAT_BLOCK_SIZE(level); 123 124 /* 125 * Keep track of how many invalid descriptors are counted in a row. 126 * Whenever multiple invalid descriptors are found, only the first one 127 * is printed, and a line is added to inform about how many descriptors 128 * have been omitted. 129 */ 130 int invalid_row_count = 0; 131 132 while (table_idx < table_entries) { 133 134 desc = table_base[table_idx]; 135 136 if ((desc & DESC_MASK) == INVALID_DESC) { 137 138 if (invalid_row_count == 0) { 139 tf_printf("%sVA:%p size:0x%zx\n", 140 level_spacers[level], 141 (void *)table_idx_va, level_size); 142 } 143 invalid_row_count++; 144 145 } else { 146 147 if (invalid_row_count > 1) { 148 tf_printf(invalid_descriptors_ommited, 149 level_spacers[level], 150 invalid_row_count - 1); 151 } 152 invalid_row_count = 0; 153 154 /* 155 * Check if this is a table or a block. Tables are only 156 * allowed in levels other than 3, but DESC_PAGE has the 157 * same value as DESC_TABLE, so we need to check. 158 */ 159 if (((desc & DESC_MASK) == TABLE_DESC) && 160 (level < XLAT_TABLE_LEVEL_MAX)) { 161 /* 162 * Do not print any PA for a table descriptor, 163 * as it doesn't directly map physical memory 164 * but instead points to the next translation 165 * table in the translation table walk. 166 */ 167 tf_printf("%sVA:%p size:0x%zx\n", 168 level_spacers[level], 169 (void *)table_idx_va, level_size); 170 171 uintptr_t addr_inner = desc & TABLE_ADDR_MASK; 172 173 xlat_tables_print_internal(ctx, table_idx_va, 174 (uint64_t *)addr_inner, 175 XLAT_TABLE_ENTRIES, level + 1); 176 } else { 177 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ", 178 level_spacers[level], 179 (void *)table_idx_va, 180 (unsigned long long)(desc & TABLE_ADDR_MASK), 181 level_size); 182 xlat_desc_print(ctx, desc); 183 tf_printf("\n"); 184 } 185 } 186 187 table_idx++; 188 table_idx_va += level_size; 189 } 190 191 if (invalid_row_count > 1) { 192 tf_printf(invalid_descriptors_ommited, 193 level_spacers[level], invalid_row_count - 1); 194 } 195 } 196 197 void xlat_tables_print(xlat_ctx_t *ctx) 198 { 199 const char *xlat_regime_str; 200 if (ctx->xlat_regime == EL1_EL0_REGIME) { 201 xlat_regime_str = "1&0"; 202 } else { 203 assert(ctx->xlat_regime == EL3_REGIME); 204 xlat_regime_str = "3"; 205 } 206 VERBOSE("Translation tables state:\n"); 207 VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str); 208 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address); 209 VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address); 210 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa); 211 VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va); 212 213 VERBOSE(" Initial lookup level: %i\n", ctx->base_level); 214 VERBOSE(" Entries @initial lookup level: %i\n", 215 ctx->base_table_entries); 216 217 int used_page_tables; 218 #if PLAT_XLAT_TABLES_DYNAMIC 219 used_page_tables = 0; 220 for (unsigned int i = 0; i < ctx->tables_num; ++i) { 221 if (ctx->tables_mapped_regions[i] != 0) 222 ++used_page_tables; 223 } 224 #else 225 used_page_tables = ctx->next_table; 226 #endif 227 VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n", 228 used_page_tables, ctx->tables_num, 229 ctx->tables_num - used_page_tables); 230 231 xlat_tables_print_internal(ctx, 0, ctx->base_table, 232 ctx->base_table_entries, ctx->base_level); 233 } 234 235 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */ 236 237 /* 238 * Do a translation table walk to find the block or page descriptor that maps 239 * virtual_addr. 240 * 241 * On success, return the address of the descriptor within the translation 242 * table. Its lookup level is stored in '*out_level'. 243 * On error, return NULL. 244 * 245 * xlat_table_base 246 * Base address for the initial lookup level. 247 * xlat_table_base_entries 248 * Number of entries in the translation table for the initial lookup level. 249 * virt_addr_space_size 250 * Size in bytes of the virtual address space. 251 */ 252 static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr, 253 void *xlat_table_base, 254 int xlat_table_base_entries, 255 unsigned long long virt_addr_space_size, 256 int *out_level) 257 { 258 unsigned int start_level; 259 uint64_t *table; 260 int entries; 261 262 start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size); 263 264 table = xlat_table_base; 265 entries = xlat_table_base_entries; 266 267 for (unsigned int level = start_level; 268 level <= XLAT_TABLE_LEVEL_MAX; 269 ++level) { 270 int idx; 271 uint64_t desc; 272 uint64_t desc_type; 273 274 idx = XLAT_TABLE_IDX(virtual_addr, level); 275 if (idx >= entries) { 276 WARN("Missing xlat table entry at address 0x%lx\n", 277 virtual_addr); 278 return NULL; 279 } 280 281 desc = table[idx]; 282 desc_type = desc & DESC_MASK; 283 284 if (desc_type == INVALID_DESC) { 285 VERBOSE("Invalid entry (memory not mapped)\n"); 286 return NULL; 287 } 288 289 if (level == XLAT_TABLE_LEVEL_MAX) { 290 /* 291 * Only page descriptors allowed at the final lookup 292 * level. 293 */ 294 assert(desc_type == PAGE_DESC); 295 *out_level = level; 296 return &table[idx]; 297 } 298 299 if (desc_type == BLOCK_DESC) { 300 *out_level = level; 301 return &table[idx]; 302 } 303 304 assert(desc_type == TABLE_DESC); 305 table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK); 306 entries = XLAT_TABLE_ENTRIES; 307 } 308 309 /* 310 * This shouldn't be reached, the translation table walk should end at 311 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop. 312 */ 313 assert(0); 314 315 return NULL; 316 } 317 318 319 static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va, 320 uint32_t *attributes, uint64_t **table_entry, 321 unsigned long long *addr_pa, int *table_level) 322 { 323 uint64_t *entry; 324 uint64_t desc; 325 int level; 326 unsigned long long virt_addr_space_size; 327 328 /* 329 * Sanity-check arguments. 330 */ 331 assert(ctx != NULL); 332 assert(ctx->initialized); 333 assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME); 334 335 virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1; 336 assert(virt_addr_space_size > 0); 337 338 entry = find_xlat_table_entry(base_va, 339 ctx->base_table, 340 ctx->base_table_entries, 341 virt_addr_space_size, 342 &level); 343 if (entry == NULL) { 344 WARN("Address %p is not mapped.\n", (void *)base_va); 345 return -EINVAL; 346 } 347 348 if (addr_pa != NULL) { 349 *addr_pa = *entry & TABLE_ADDR_MASK; 350 } 351 352 if (table_entry != NULL) { 353 *table_entry = entry; 354 } 355 356 if (table_level != NULL) { 357 *table_level = level; 358 } 359 360 desc = *entry; 361 362 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE 363 VERBOSE("Attributes: "); 364 xlat_desc_print(ctx, desc); 365 tf_printf("\n"); 366 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */ 367 368 assert(attributes != NULL); 369 *attributes = 0; 370 371 int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK; 372 373 if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) { 374 *attributes |= MT_MEMORY; 375 } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) { 376 *attributes |= MT_NON_CACHEABLE; 377 } else { 378 assert(attr_index == ATTR_DEVICE_INDEX); 379 *attributes |= MT_DEVICE; 380 } 381 382 int ap2_bit = (desc >> AP2_SHIFT) & 1; 383 384 if (ap2_bit == AP2_RW) 385 *attributes |= MT_RW; 386 387 if (ctx->xlat_regime == EL1_EL0_REGIME) { 388 int ap1_bit = (desc >> AP1_SHIFT) & 1; 389 if (ap1_bit == AP1_ACCESS_UNPRIVILEGED) 390 *attributes |= MT_USER; 391 } 392 393 int ns_bit = (desc >> NS_SHIFT) & 1; 394 395 if (ns_bit == 1) 396 *attributes |= MT_NS; 397 398 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime); 399 400 if ((desc & xn_mask) == xn_mask) { 401 *attributes |= MT_EXECUTE_NEVER; 402 } else { 403 assert((desc & xn_mask) == 0); 404 } 405 406 return 0; 407 } 408 409 410 int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, 411 uint32_t *attributes) 412 { 413 return get_mem_attributes_internal(ctx, base_va, attributes, 414 NULL, NULL, NULL); 415 } 416 417 418 int change_mem_attributes(xlat_ctx_t *ctx, 419 uintptr_t base_va, 420 size_t size, 421 uint32_t attr) 422 { 423 /* Note: This implementation isn't optimized. */ 424 425 assert(ctx != NULL); 426 assert(ctx->initialized); 427 428 unsigned long long virt_addr_space_size = 429 (unsigned long long)ctx->va_max_address + 1; 430 assert(virt_addr_space_size > 0); 431 432 if (!IS_PAGE_ALIGNED(base_va)) { 433 WARN("%s: Address %p is not aligned on a page boundary.\n", 434 __func__, (void *)base_va); 435 return -EINVAL; 436 } 437 438 if (size == 0) { 439 WARN("%s: Size is 0.\n", __func__); 440 return -EINVAL; 441 } 442 443 if ((size % PAGE_SIZE) != 0) { 444 WARN("%s: Size 0x%zx is not a multiple of a page size.\n", 445 __func__, size); 446 return -EINVAL; 447 } 448 449 if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) { 450 WARN("%s: Mapping memory as read-write and executable not allowed.\n", 451 __func__); 452 return -EINVAL; 453 } 454 455 int pages_count = size / PAGE_SIZE; 456 457 VERBOSE("Changing memory attributes of %i pages starting from address %p...\n", 458 pages_count, (void *)base_va); 459 460 uintptr_t base_va_original = base_va; 461 462 /* 463 * Sanity checks. 464 */ 465 for (int i = 0; i < pages_count; ++i) { 466 uint64_t *entry; 467 uint64_t desc; 468 int level; 469 470 entry = find_xlat_table_entry(base_va, 471 ctx->base_table, 472 ctx->base_table_entries, 473 virt_addr_space_size, 474 &level); 475 if (entry == NULL) { 476 WARN("Address %p is not mapped.\n", (void *)base_va); 477 return -EINVAL; 478 } 479 480 desc = *entry; 481 482 /* 483 * Check that all the required pages are mapped at page 484 * granularity. 485 */ 486 if (((desc & DESC_MASK) != PAGE_DESC) || 487 (level != XLAT_TABLE_LEVEL_MAX)) { 488 WARN("Address %p is not mapped at the right granularity.\n", 489 (void *)base_va); 490 WARN("Granularity is 0x%llx, should be 0x%x.\n", 491 (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE); 492 return -EINVAL; 493 } 494 495 /* 496 * If the region type is device, it shouldn't be executable. 497 */ 498 int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK; 499 if (attr_index == ATTR_DEVICE_INDEX) { 500 if ((attr & MT_EXECUTE_NEVER) == 0) { 501 WARN("Setting device memory as executable at address %p.", 502 (void *)base_va); 503 return -EINVAL; 504 } 505 } 506 507 base_va += PAGE_SIZE; 508 } 509 510 /* Restore original value. */ 511 base_va = base_va_original; 512 513 for (int i = 0; i < pages_count; ++i) { 514 515 uint32_t old_attr, new_attr; 516 uint64_t *entry; 517 int level; 518 unsigned long long addr_pa; 519 520 get_mem_attributes_internal(ctx, base_va, &old_attr, 521 &entry, &addr_pa, &level); 522 523 /* 524 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and 525 * MT_USER/MT_PRIVILEGED are taken into account. Any other 526 * information is ignored. 527 */ 528 529 /* Clean the old attributes so that they can be rebuilt. */ 530 new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER); 531 532 /* 533 * Update attributes, but filter out the ones this function 534 * isn't allowed to change. 535 */ 536 new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER); 537 538 /* 539 * The break-before-make sequence requires writing an invalid 540 * descriptor and making sure that the system sees the change 541 * before writing the new descriptor. 542 */ 543 *entry = INVALID_DESC; 544 545 /* Invalidate any cached copy of this mapping in the TLBs. */ 546 xlat_arch_tlbi_va(base_va, ctx->xlat_regime); 547 548 /* Ensure completion of the invalidation. */ 549 xlat_arch_tlbi_va_sync(); 550 551 /* Write new descriptor */ 552 *entry = xlat_desc(ctx, new_attr, addr_pa, level); 553 554 base_va += PAGE_SIZE; 555 } 556 557 /* Ensure that the last descriptor writen is seen by the system. */ 558 dsbish(); 559 560 return 0; 561 } 562