xref: /rk3399_ARM-atf/lib/xlat_tables_v2/xlat_tables_utils.c (revision c54c7fc358428daf69b4b118feb967afa60e998f)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 
13 #include <platform_def.h>
14 
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <lib/utils_def.h>
18 #include <lib/xlat_tables/xlat_tables_defs.h>
19 #include <lib/xlat_tables/xlat_tables_v2.h>
20 
21 #include "xlat_tables_private.h"
22 
23 #if LOG_LEVEL < LOG_LEVEL_VERBOSE
24 
25 void xlat_mmap_print(__unused const mmap_region_t *mmap)
26 {
27 	/* Empty */
28 }
29 
30 void xlat_tables_print(__unused xlat_ctx_t *ctx)
31 {
32 	/* Empty */
33 }
34 
35 #else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
36 
37 void xlat_mmap_print(const mmap_region_t *mmap)
38 {
39 	printf("mmap:\n");
40 	const mmap_region_t *mm = mmap;
41 
42 	while (mm->size != 0U) {
43 		printf(" VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x  granularity:0x%zx\n",
44 		       mm->base_va, mm->base_pa, mm->size, mm->attr,
45 		       mm->granularity);
46 		++mm;
47 	};
48 	printf("\n");
49 }
50 
51 /* Print the attributes of the specified block descriptor. */
52 static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
53 {
54 	uint64_t mem_type_index = ATTR_INDEX_GET(desc);
55 	int xlat_regime = ctx->xlat_regime;
56 
57 	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
58 		printf("MEM");
59 	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
60 		printf("NC");
61 	} else {
62 		assert(mem_type_index == ATTR_DEVICE_INDEX);
63 		printf("DEV");
64 	}
65 
66 	if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
67 		/* For EL3 and EL2 only check the AP[2] and XN bits. */
68 		printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
69 		printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
70 	} else {
71 		assert(xlat_regime == EL1_EL0_REGIME);
72 		/*
73 		 * For EL0 and EL1:
74 		 * - In AArch64 PXN and UXN can be set independently but in
75 		 *   AArch32 there is no UXN (XN affects both privilege levels).
76 		 *   For consistency, we set them simultaneously in both cases.
77 		 * - RO and RW permissions must be the same in EL1 and EL0. If
78 		 *   EL0 can access that memory region, so can EL1, with the
79 		 *   same permissions.
80 		 */
81 #if ENABLE_ASSERTIONS
82 		uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
83 		uint64_t xn_perm = desc & xn_mask;
84 
85 		assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
86 #endif
87 		printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
88 		/* Only check one of PXN and UXN, the other one is the same. */
89 		printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
90 		/*
91 		 * Privileged regions can only be accessed from EL1, user
92 		 * regions can be accessed from EL1 and EL0.
93 		 */
94 		printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
95 			  ? "-USER" : "-PRIV");
96 	}
97 
98 	printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
99 }
100 
101 static const char * const level_spacers[] = {
102 	"[LV0] ",
103 	"  [LV1] ",
104 	"    [LV2] ",
105 	"      [LV3] "
106 };
107 
108 static const char *invalid_descriptors_ommited =
109 		"%s(%d invalid descriptors omitted)\n";
110 
111 /*
112  * Function that reads the translation tables passed as an argument
113  * and prints their status.
114  */
115 static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
116 		const uint64_t *table_base, unsigned int table_entries,
117 		unsigned int level)
118 {
119 	assert(level <= XLAT_TABLE_LEVEL_MAX);
120 
121 	/*
122 	 * data structure to track DESC_TABLE entry before iterate into subtable
123 	 * of next translation level. it will be restored after return from
124 	 * subtable iteration.
125 	 */
126 	struct desc_table {
127 		const uint64_t *table_base;
128 		uintptr_t table_idx_va;
129 		unsigned int idx;
130 	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
131 		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
132 	unsigned int this_level = level;
133 	const uint64_t *this_base = table_base;
134 	unsigned int max_entries = table_entries;
135 	size_t level_size = XLAT_BLOCK_SIZE(this_level);
136 	unsigned int table_idx = 0U;
137 	uintptr_t table_idx_va = table_base_va;
138 
139 	/*
140 	 * Keep track of how many invalid descriptors are counted in a row.
141 	 * Whenever multiple invalid descriptors are found, only the first one
142 	 * is printed, and a line is added to inform about how many descriptors
143 	 * have been omitted.
144 	 */
145 	int invalid_row_count = 0;
146 
147 	while (this_base != NULL) {
148 		/* finish current xlat level */
149 		if (table_idx >= max_entries) {
150 			if (invalid_row_count > 1) {
151 				printf(invalid_descriptors_ommited,
152 					  level_spacers[this_level],
153 					  invalid_row_count - 1);
154 			}
155 			invalid_row_count = 0;
156 
157 			/* no parent level to iterate. */
158 			if (this_level <= level) {
159 				this_base = NULL;
160 				table_idx = max_entries + 1;
161 			} else {
162 				/* retore previous DESC_TABLE entry and start
163 				 * to iterate.
164 				 */
165 				this_level--;
166 				level_size = XLAT_BLOCK_SIZE(this_level);
167 				this_base = desc_tables[this_level].table_base;
168 				table_idx = desc_tables[this_level].idx;
169 				table_idx_va =
170 					desc_tables[this_level].table_idx_va;
171 				if (this_level == level) {
172 					max_entries = table_entries;
173 				} else {
174 					max_entries = XLAT_TABLE_ENTRIES;
175 				}
176 
177 				assert(this_base != NULL);
178 			}
179 		} else {
180 			uint64_t desc = this_base[table_idx];
181 
182 			if ((desc & DESC_MASK) == INVALID_DESC) {
183 				if (invalid_row_count == 0) {
184 					printf("%sVA:0x%lx size:0x%zx\n",
185 						  level_spacers[this_level],
186 						  table_idx_va, level_size);
187 				}
188 				invalid_row_count++;
189 				table_idx++;
190 				table_idx_va += level_size;
191 			} else {
192 				if (invalid_row_count > 1) {
193 					printf(invalid_descriptors_ommited,
194 						  level_spacers[this_level],
195 						  invalid_row_count - 1);
196 				}
197 				invalid_row_count = 0;
198 				/*
199 				 * Check if this is a table or a block. Tables
200 				 * are only allowed in levels other than 3, but
201 				 * DESC_PAGE has the same value as DESC_TABLE,
202 				 * so we need to check.
203 				 */
204 
205 				if (((desc & DESC_MASK) == TABLE_DESC) &&
206 				    (this_level < XLAT_TABLE_LEVEL_MAX)) {
207 					uintptr_t addr_inner;
208 
209 					/*
210 					 * Do not print any PA for a table
211 					 * descriptor, as it doesn't directly
212 					 * map physical memory but instead
213 					 * points to the next translation
214 					 * table in the translation table walk.
215 					 */
216 					printf("%sVA:0x%lx size:0x%zx\n",
217 					       level_spacers[this_level],
218 					       table_idx_va, level_size);
219 
220 					addr_inner = desc & TABLE_ADDR_MASK;
221 					/* save current xlat level */
222 					desc_tables[this_level].table_base =
223 						this_base;
224 					desc_tables[this_level].idx =
225 						table_idx + 1;
226 					desc_tables[this_level].table_idx_va =
227 						table_idx_va + level_size;
228 
229 					/* start iterating next level entries */
230 					this_base = (uint64_t *)addr_inner;
231 					max_entries = XLAT_TABLE_ENTRIES;
232 					this_level++;
233 					level_size =
234 						XLAT_BLOCK_SIZE(this_level);
235 					table_idx = 0U;
236 				} else {
237 					printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
238 					       level_spacers[this_level],
239 					       table_idx_va,
240 					       (uint64_t)(desc & TABLE_ADDR_MASK),
241 					       level_size);
242 					xlat_desc_print(ctx, desc);
243 					printf("\n");
244 
245 					table_idx++;
246 					table_idx_va += level_size;
247 
248 				}
249 			}
250 		}
251 	}
252 }
253 
254 void xlat_tables_print(xlat_ctx_t *ctx)
255 {
256 	const char *xlat_regime_str;
257 	int used_page_tables;
258 
259 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
260 		xlat_regime_str = "1&0";
261 	} else if (ctx->xlat_regime == EL2_REGIME) {
262 		xlat_regime_str = "2";
263 	} else {
264 		assert(ctx->xlat_regime == EL3_REGIME);
265 		xlat_regime_str = "3";
266 	}
267 	VERBOSE("Translation tables state:\n");
268 	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
269 	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
270 	VERBOSE("  Max allowed VA:  0x%lx\n", ctx->va_max_address);
271 	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
272 	VERBOSE("  Max mapped VA:   0x%lx\n", ctx->max_va);
273 
274 	VERBOSE("  Initial lookup level: %u\n", ctx->base_level);
275 	VERBOSE("  Entries @initial lookup level: %u\n",
276 		ctx->base_table_entries);
277 
278 #if PLAT_XLAT_TABLES_DYNAMIC
279 	used_page_tables = 0;
280 	for (int i = 0; i < ctx->tables_num; ++i) {
281 		if (ctx->tables_mapped_regions[i] != 0)
282 			++used_page_tables;
283 	}
284 #else
285 	used_page_tables = ctx->next_table;
286 #endif
287 	VERBOSE("  Used %d sub-tables out of %d (spare: %d)\n",
288 		used_page_tables, ctx->tables_num,
289 		ctx->tables_num - used_page_tables);
290 
291 	xlat_tables_print_internal(ctx, 0U, ctx->base_table,
292 				   ctx->base_table_entries, ctx->base_level);
293 }
294 
295 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
296 
297 /*
298  * Do a translation table walk to find the block or page descriptor that maps
299  * virtual_addr.
300  *
301  * On success, return the address of the descriptor within the translation
302  * table. Its lookup level is stored in '*out_level'.
303  * On error, return NULL.
304  *
305  * xlat_table_base
306  *   Base address for the initial lookup level.
307  * xlat_table_base_entries
308  *   Number of entries in the translation table for the initial lookup level.
309  * virt_addr_space_size
310  *   Size in bytes of the virtual address space.
311  */
312 static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
313 				       void *xlat_table_base,
314 				       unsigned int xlat_table_base_entries,
315 				       unsigned long long virt_addr_space_size,
316 				       unsigned int *out_level)
317 {
318 	unsigned int start_level;
319 	uint64_t *table;
320 	unsigned int entries;
321 
322 	start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
323 
324 	table = xlat_table_base;
325 	entries = xlat_table_base_entries;
326 
327 	for (unsigned int level = start_level;
328 	     level <= XLAT_TABLE_LEVEL_MAX;
329 	     ++level) {
330 		uint64_t idx, desc, desc_type;
331 
332 		idx = XLAT_TABLE_IDX(virtual_addr, level);
333 		if (idx >= entries) {
334 			WARN("Missing xlat table entry at address 0x%lx\n",
335 			     virtual_addr);
336 			return NULL;
337 		}
338 
339 		desc = table[idx];
340 		desc_type = desc & DESC_MASK;
341 
342 		if (desc_type == INVALID_DESC) {
343 			VERBOSE("Invalid entry (memory not mapped)\n");
344 			return NULL;
345 		}
346 
347 		if (level == XLAT_TABLE_LEVEL_MAX) {
348 			/*
349 			 * Only page descriptors allowed at the final lookup
350 			 * level.
351 			 */
352 			assert(desc_type == PAGE_DESC);
353 			*out_level = level;
354 			return &table[idx];
355 		}
356 
357 		if (desc_type == BLOCK_DESC) {
358 			*out_level = level;
359 			return &table[idx];
360 		}
361 
362 		assert(desc_type == TABLE_DESC);
363 		table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
364 		entries = XLAT_TABLE_ENTRIES;
365 	}
366 
367 	/*
368 	 * This shouldn't be reached, the translation table walk should end at
369 	 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
370 	 */
371 	assert(false);
372 
373 	return NULL;
374 }
375 
376 
377 static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
378 		uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
379 		unsigned long long *addr_pa, unsigned int *table_level)
380 {
381 	uint64_t *entry;
382 	uint64_t desc;
383 	unsigned int level;
384 	unsigned long long virt_addr_space_size;
385 
386 	/*
387 	 * Sanity-check arguments.
388 	 */
389 	assert(ctx != NULL);
390 	assert(ctx->initialized);
391 	assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
392 	       (ctx->xlat_regime == EL2_REGIME) ||
393 	       (ctx->xlat_regime == EL3_REGIME));
394 
395 	virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
396 	assert(virt_addr_space_size > 0U);
397 
398 	entry = find_xlat_table_entry(base_va,
399 				ctx->base_table,
400 				ctx->base_table_entries,
401 				virt_addr_space_size,
402 				&level);
403 	if (entry == NULL) {
404 		WARN("Address 0x%lx is not mapped.\n", base_va);
405 		return -EINVAL;
406 	}
407 
408 	if (addr_pa != NULL) {
409 		*addr_pa = *entry & TABLE_ADDR_MASK;
410 	}
411 
412 	if (table_entry != NULL) {
413 		*table_entry = entry;
414 	}
415 
416 	if (table_level != NULL) {
417 		*table_level = level;
418 	}
419 
420 	desc = *entry;
421 
422 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
423 	VERBOSE("Attributes: ");
424 	xlat_desc_print(ctx, desc);
425 	printf("\n");
426 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
427 
428 	assert(attributes != NULL);
429 	*attributes = 0U;
430 
431 	uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
432 
433 	if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
434 		*attributes |= MT_MEMORY;
435 	} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
436 		*attributes |= MT_NON_CACHEABLE;
437 	} else {
438 		assert(attr_index == ATTR_DEVICE_INDEX);
439 		*attributes |= MT_DEVICE;
440 	}
441 
442 	uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
443 
444 	if (ap2_bit == AP2_RW)
445 		*attributes |= MT_RW;
446 
447 	if (ctx->xlat_regime == EL1_EL0_REGIME) {
448 		uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
449 
450 		if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
451 			*attributes |= MT_USER;
452 	}
453 
454 	uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
455 
456 	if (ns_bit == 1U)
457 		*attributes |= MT_NS;
458 
459 	uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
460 
461 	if ((desc & xn_mask) == xn_mask) {
462 		*attributes |= MT_EXECUTE_NEVER;
463 	} else {
464 		assert((desc & xn_mask) == 0U);
465 	}
466 
467 	return 0;
468 }
469 
470 
471 int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
472 				uint32_t *attr)
473 {
474 	return xlat_get_mem_attributes_internal(ctx, base_va, attr,
475 				NULL, NULL, NULL);
476 }
477 
478 
479 int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
480 				   size_t size, uint32_t attr)
481 {
482 	/* Note: This implementation isn't optimized. */
483 
484 	assert(ctx != NULL);
485 	assert(ctx->initialized);
486 
487 	unsigned long long virt_addr_space_size =
488 		(unsigned long long)ctx->va_max_address + 1U;
489 	assert(virt_addr_space_size > 0U);
490 
491 	if (!IS_PAGE_ALIGNED(base_va)) {
492 		WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
493 		     __func__, base_va);
494 		return -EINVAL;
495 	}
496 
497 	if (size == 0U) {
498 		WARN("%s: Size is 0.\n", __func__);
499 		return -EINVAL;
500 	}
501 
502 	if ((size % PAGE_SIZE) != 0U) {
503 		WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
504 		     __func__, size);
505 		return -EINVAL;
506 	}
507 
508 	if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
509 		WARN("%s: Mapping memory as read-write and executable not allowed.\n",
510 		     __func__);
511 		return -EINVAL;
512 	}
513 
514 	size_t pages_count = size / PAGE_SIZE;
515 
516 	VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
517 		pages_count, base_va);
518 
519 	uintptr_t base_va_original = base_va;
520 
521 	/*
522 	 * Sanity checks.
523 	 */
524 	for (size_t i = 0U; i < pages_count; ++i) {
525 		const uint64_t *entry;
526 		uint64_t desc, attr_index;
527 		unsigned int level;
528 
529 		entry = find_xlat_table_entry(base_va,
530 					      ctx->base_table,
531 					      ctx->base_table_entries,
532 					      virt_addr_space_size,
533 					      &level);
534 		if (entry == NULL) {
535 			WARN("Address 0x%lx is not mapped.\n", base_va);
536 			return -EINVAL;
537 		}
538 
539 		desc = *entry;
540 
541 		/*
542 		 * Check that all the required pages are mapped at page
543 		 * granularity.
544 		 */
545 		if (((desc & DESC_MASK) != PAGE_DESC) ||
546 			(level != XLAT_TABLE_LEVEL_MAX)) {
547 			WARN("Address 0x%lx is not mapped at the right granularity.\n",
548 			     base_va);
549 			WARN("Granularity is 0x%llx, should be 0x%x.\n",
550 			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
551 			return -EINVAL;
552 		}
553 
554 		/*
555 		 * If the region type is device, it shouldn't be executable.
556 		 */
557 		attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
558 		if (attr_index == ATTR_DEVICE_INDEX) {
559 			if ((attr & MT_EXECUTE_NEVER) == 0U) {
560 				WARN("Setting device memory as executable at address 0x%lx.",
561 				     base_va);
562 				return -EINVAL;
563 			}
564 		}
565 
566 		base_va += PAGE_SIZE;
567 	}
568 
569 	/* Restore original value. */
570 	base_va = base_va_original;
571 
572 	for (unsigned int i = 0U; i < pages_count; ++i) {
573 
574 		uint32_t old_attr = 0U, new_attr;
575 		uint64_t *entry = NULL;
576 		unsigned int level = 0U;
577 		unsigned long long addr_pa = 0ULL;
578 
579 		(void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
580 					    &entry, &addr_pa, &level);
581 
582 		/*
583 		 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
584 		 * MT_USER/MT_PRIVILEGED are taken into account. Any other
585 		 * information is ignored.
586 		 */
587 
588 		/* Clean the old attributes so that they can be rebuilt. */
589 		new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
590 
591 		/*
592 		 * Update attributes, but filter out the ones this function
593 		 * isn't allowed to change.
594 		 */
595 		new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
596 
597 		/*
598 		 * The break-before-make sequence requires writing an invalid
599 		 * descriptor and making sure that the system sees the change
600 		 * before writing the new descriptor.
601 		 */
602 		*entry = INVALID_DESC;
603 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
604 		dccvac((uintptr_t)entry);
605 #endif
606 		/* Invalidate any cached copy of this mapping in the TLBs. */
607 		xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
608 
609 		/* Ensure completion of the invalidation. */
610 		xlat_arch_tlbi_va_sync();
611 
612 		/* Write new descriptor */
613 		*entry = xlat_desc(ctx, new_attr, addr_pa, level);
614 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
615 		dccvac((uintptr_t)entry);
616 #endif
617 		base_va += PAGE_SIZE;
618 	}
619 
620 	/* Ensure that the last descriptor writen is seen by the system. */
621 	dsbish();
622 
623 	return 0;
624 }
625