xref: /optee_os/core/arch/arm/plat-vexpress/main.c (revision abe38974ad2d4cbb72940f322210364fb3a9a490)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <platform_config.h>
29 #include <pm_debug.h>
30 
31 #include <stdint.h>
32 #include <string.h>
33 
34 #include <drivers/gic.h>
35 #include <drivers/pl011.h>
36 #include <sm/sm.h>
37 #include <sm/tee_mon.h>
38 
39 #include <util.h>
40 
41 #include <arm.h>
42 #include <kernel/thread.h>
43 #include <kernel/panic.h>
44 #include <trace.h>
45 #include <kernel/misc.h>
46 #include <kernel/tee_time.h>
47 #include <mm/tee_pager.h>
48 #include <mm/core_mmu.h>
49 #include <mm/tee_mmu_defs.h>
50 #include <mm/tee_mmu.h>
51 #include <mm/tee_mm.h>
52 #include <utee_defines.h>
53 #include <tee/tee_cryp_provider.h>
54 #include <tee/entry.h>
55 #include <tee/arch_svc.h>
56 #include <console.h>
57 #include <malloc.h>
58 #include "plat_tee_func.h"
59 
60 #include <assert.h>
61 
62 #define PADDR_INVALID		0xffffffff
63 
64 #ifndef CFG_WITH_LPAE
65 /* Main MMU L1 table for teecore */
66 static uint32_t main_mmu_l1_ttb[TEE_MMU_L1_NUM_ENTRIES]
67 	__attribute__((section(".nozi.mmu.l1"),
68 		       aligned(TEE_MMU_L1_ALIGNMENT)));
69 static uint32_t main_mmu_l2_ttb[TEE_MMU_L2_NUM_ENTRIES]
70 	__attribute__((section(".nozi.mmu.l2"),
71 		       aligned(TEE_MMU_L2_ALIGNMENT)));
72 
73 /* MMU L1 table for TAs, one for each Core */
74 static uint32_t main_mmu_ul1_ttb[CFG_NUM_THREADS][TEE_MMU_UL1_NUM_ENTRIES]
75         __attribute__((section(".nozi.mmu.ul1"),
76 		      aligned(TEE_MMU_UL1_ALIGNMENT)));
77 #endif
78 
79 extern uint8_t __text_init_start[];
80 extern uint8_t __data_start[];
81 extern uint8_t __data_end[];
82 extern uint8_t __bss_start[];
83 extern uint8_t __bss_end[];
84 extern uint8_t __init_start[];
85 extern uint8_t __init_size[];
86 extern uint8_t __heap1_start[];
87 extern uint8_t __heap1_end[];
88 extern uint8_t __heap2_start[];
89 extern uint8_t __heap2_end[];
90 extern uint8_t __pageable_part_start[];
91 extern uint8_t __pageable_part_end[];
92 extern uint8_t __pageable_start[];
93 extern uint8_t __pageable_end[];
94 
95 static void main_fiq(void);
96 #if defined(CFG_WITH_ARM_TRUSTED_FW)
97 /* Implemented in assembly, referenced in this file only */
98 uint32_t cpu_on_handler(uint32_t a0, uint32_t a1);
99 
100 static uint32_t main_cpu_off_handler(uint32_t a0, uint32_t a1);
101 static uint32_t main_cpu_suspend_handler(uint32_t a0, uint32_t a1);
102 static uint32_t main_cpu_resume_handler(uint32_t a0, uint32_t a1);
103 static uint32_t main_system_off_handler(uint32_t a0, uint32_t a1);
104 static uint32_t main_system_reset_handler(uint32_t a0, uint32_t a1);
105 #elif defined(CFG_WITH_SEC_MON)
106 static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1);
107 #else
108 #error Platform must use either ARM_TRUSTED_FW or SEC_MON
109 #endif
110 
111 static const struct thread_handlers handlers = {
112 	.std_smc = plat_tee_entry,
113 	.fast_smc = plat_tee_entry,
114 	.fiq = main_fiq,
115 	.svc = tee_svc_handler,
116 	.abort = tee_pager_abort_handler,
117 #if defined(CFG_WITH_ARM_TRUSTED_FW)
118 	.cpu_on = cpu_on_handler,
119 	.cpu_off = main_cpu_off_handler,
120 	.cpu_suspend = main_cpu_suspend_handler,
121 	.cpu_resume = main_cpu_resume_handler,
122 	.system_off = main_system_off_handler,
123 	.system_reset = main_system_reset_handler,
124 #elif defined(CFG_WITH_SEC_MON)
125 	.cpu_on = main_default_pm_handler,
126 	.cpu_off = main_default_pm_handler,
127 	.cpu_suspend = main_default_pm_handler,
128 	.cpu_resume = main_default_pm_handler,
129 	.system_off = main_default_pm_handler,
130 	.system_reset = main_default_pm_handler,
131 #endif
132 };
133 
134 #if defined(CFG_WITH_ARM_TRUSTED_FW)
135 static void main_init_sec_mon(uint32_t nsec_entry __unused)
136 {
137 	assert(nsec_entry == PADDR_INVALID);
138 	/* Do nothing as we don't have a secure monitor */
139 }
140 #elif defined(CFG_WITH_SEC_MON)
141 static void main_init_sec_mon(uint32_t nsec_entry)
142 {
143 	struct sm_nsec_ctx *nsec_ctx;
144 
145 	assert(nsec_entry != PADDR_INVALID);
146 
147 	/* Initialize secure monitor */
148 	nsec_ctx = sm_get_nsec_ctx();
149 	nsec_ctx->mon_lr = nsec_entry;
150 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
151 
152 }
153 #endif
154 
155 #if defined(CFG_WITH_ARM_TRUSTED_FW)
156 static void main_init_nsacr(void)
157 {
158 }
159 #else
160 static void main_init_nsacr(void)
161 {
162 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
163 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
164 }
165 #endif
166 
167 #ifdef CFG_WITH_VFP
168 static void main_init_cpacr(void)
169 {
170 	uint32_t cpacr = read_cpacr();
171 
172 	/* Enabled usage of CP10 and CP11 (SIMD/VFP) */
173 	cpacr &= ~CPACR_CP(10, CPACR_CP_ACCESS_FULL);
174 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_PL1_ONLY);
175 	cpacr &= ~CPACR_CP(11, CPACR_CP_ACCESS_FULL);
176 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_PL1_ONLY);
177 	write_cpacr(cpacr);
178 }
179 #else
180 static void main_init_cpacr(void)
181 {
182 	/* We're not using VFP/SIMD instructions, leave it disabled */
183 }
184 #endif
185 
186 #if PLATFORM_FLAVOR_IS(fvp) || PLATFORM_FLAVOR_IS(juno)
187 static void main_init_gic(void)
188 {
189 	/*
190 	 * On ARMv8, GIC configuration is initialized in ARM-TF,
191 	 */
192 	gic_init_base_addr(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
193 	gic_it_add(IT_CONSOLE_UART);
194 	/* Route FIQ to primary CPU */
195 	gic_it_set_cpu_mask(IT_CONSOLE_UART, gic_it_get_target(0));
196 	gic_it_set_prio(IT_CONSOLE_UART, 0x1);
197 	gic_it_enable(IT_CONSOLE_UART);
198 
199 }
200 #elif PLATFORM_FLAVOR_IS(qemu)
201 static void main_init_gic(void)
202 {
203 	/* Initialize GIC */
204 	gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
205 	gic_it_add(IT_CONSOLE_UART);
206 	gic_it_set_cpu_mask(IT_CONSOLE_UART, 0x1);
207 	gic_it_set_prio(IT_CONSOLE_UART, 0xff);
208 	gic_it_enable(IT_CONSOLE_UART);
209 }
210 #elif PLATFORM_FLAVOR_IS(qemu_virt)
211 static void main_init_gic(void)
212 {
213 	/* Initialize GIC */
214 	gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
215 }
216 #endif
217 
218 #ifdef CFG_WITH_PAGER
219 
220 static size_t get_block_size(void)
221 {
222 	struct core_mmu_table_info tbl_info;
223 	unsigned l;
224 
225 	if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
226 		panic();
227 	l = tbl_info.level - 1;
228 	if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
229 		panic();
230 	return 1 << tbl_info.shift;
231 }
232 
233 static void main_init_runtime(uint32_t pageable_part)
234 {
235 	size_t n;
236 	size_t init_size = (size_t)__init_size;
237 	size_t pageable_size = __pageable_end - __pageable_start;
238 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
239 			   TEE_SHA256_HASH_SIZE;
240 	tee_mm_entry_t *mm;
241 	uint8_t *paged_store;
242 	uint8_t *hashes;
243 	uint8_t *tmp_hashes = __init_start + init_size;
244 	size_t block_size;
245 
246 
247 	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
248 
249 
250 	/* Copy it right after the init area. */
251 	memcpy(tmp_hashes, __data_end + init_size, hash_size);
252 
253 	/*
254 	 * Zero BSS area. Note that globals that would normally would go
255 	 * into BSS which are used before this has to be put into .nozi.*
256 	 * to avoid getting overwritten.
257 	 */
258 	memset(__bss_start, 0, __bss_end - __bss_start);
259 
260 	malloc_init(__heap1_start, __heap1_end - __heap1_start);
261 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
262 
263 	hashes = malloc(hash_size);
264 	EMSG("hash_size %zu", hash_size);
265 	TEE_ASSERT(hashes);
266 	memcpy(hashes, tmp_hashes, hash_size);
267 
268 	/*
269 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
270 	 * DDR below.
271 	 */
272 	teecore_init_ta_ram();
273 
274 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
275 	TEE_ASSERT(mm);
276 	paged_store = (uint8_t *)tee_mm_get_smem(mm);
277 	/* Copy init part into pageable area */
278 	memcpy(paged_store, __init_start, init_size);
279 	/* Copy pageable part after init part into pageable area */
280 	memcpy(paged_store + init_size, (void *)pageable_part,
281 		__pageable_part_end - __pageable_part_start);
282 
283 	/* Check that hashes of what's in pageable area is OK */
284 	DMSG("Checking hashes of pageable area");
285 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
286 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
287 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
288 		TEE_Result res;
289 
290 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
291 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
292 		if (res != TEE_SUCCESS) {
293 			EMSG("Hash failed for page %zu at %p: res 0x%x",
294 				n, page, res);
295 			panic();
296 		}
297 	}
298 
299 	/*
300 	 * Copy what's not initialized in the last init page. Needed
301 	 * because we're not going fault in the init pages again. We can't
302 	 * fault in pages until we've switched to the new vector by calling
303 	 * thread_init_handlers() below.
304 	 */
305 	if (init_size % SMALL_PAGE_SIZE) {
306 		uint8_t *p;
307 
308 		memcpy(__init_start + init_size, paged_store + init_size,
309 			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
310 
311 		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
312 				~SMALL_PAGE_MASK);
313 
314 		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
315 		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
316 				     SMALL_PAGE_SIZE);
317 	}
318 
319 	/*
320 	 * Inialize the virtual memory pool used for main_mmu_l2_ttb which
321 	 * is supplied to tee_pager_init() below.
322 	 */
323 	block_size = get_block_size();
324 	if (!tee_mm_init(&tee_mm_vcore,
325 			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
326 			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
327 				block_size),
328 			SMALL_PAGE_SHIFT, 0))
329 		panic();
330 
331 	/*
332 	 * Claim virtual memory which isn't paged, note that there migth be
333 	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
334 	 * claimed to avoid later allocations to get that memory.
335 	 */
336 	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
337 			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
338 	TEE_ASSERT(mm);
339 
340 	/*
341 	 * Allocate virtual memory for the pageable area and let the pager
342 	 * take charge of all the pages already assigned to that memory.
343 	 */
344 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
345 			   pageable_size);
346 	TEE_ASSERT(mm);
347 	tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
348 			   paged_store, hashes);
349 	tee_pager_add_pages((vaddr_t)__pageable_start,
350 		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
351 	tee_pager_add_pages((vaddr_t)__pageable_start +
352 				ROUNDUP(init_size, SMALL_PAGE_SIZE),
353 			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
354 				SMALL_PAGE_SIZE, true);
355 
356 }
357 #else
358 static void main_init_runtime(uint32_t pageable_part __unused)
359 {
360 	/*
361 	 * Zero BSS area. Note that globals that would normally would go
362 	 * into BSS which are used before this has to be put into .nozi.*
363 	 * to avoid getting overwritten.
364 	 */
365 	memset(__bss_start, 0, __bss_end - __bss_start);
366 
367 	malloc_init(__heap1_start, __heap1_end - __heap1_start);
368 
369 	/*
370 	 * Initialized at this stage in the pager version of this function
371 	 * above
372 	 */
373 	teecore_init_ta_ram();
374 }
375 #endif
376 
377 static void main_init_primary_helper(uint32_t pageable_part,
378 				     uint32_t nsec_entry)
379 {
380 	/*
381 	 * Mask asynchronous exceptions before switch to the thread vector
382 	 * as the thread handler requires those to be masked while
383 	 * executing with the temporary stack. The thread subsystem also
384 	 * asserts that IRQ is blocked when using most if its functions.
385 	 */
386 	thread_set_exceptions(THREAD_EXCP_ALL);
387 	main_init_cpacr();
388 
389 	main_init_runtime(pageable_part);
390 
391 	DMSG("TEE initializing\n");
392 
393 	thread_init_primary(&handlers);
394 	thread_init_per_cpu();
395 	main_init_sec_mon(nsec_entry);
396 
397 
398 	main_init_gic();
399 	main_init_nsacr();
400 
401 	if (init_teecore() != TEE_SUCCESS)
402 		panic();
403 	DMSG("Primary CPU switching to normal world boot\n");
404 }
405 
406 static void main_init_secondary_helper(uint32_t nsec_entry)
407 {
408 	/*
409 	 * Mask asynchronous exceptions before switch to the thread vector
410 	 * as the thread handler requires those to be masked while
411 	 * executing with the temporary stack. The thread subsystem also
412 	 * asserts that IRQ is blocked when using most if its functions.
413 	 */
414 	thread_set_exceptions(THREAD_EXCP_ALL);
415 
416 	thread_init_per_cpu();
417 	main_init_sec_mon(nsec_entry);
418 	main_init_cpacr();
419 	main_init_nsacr();
420 
421 	DMSG("Secondary CPU Switching to normal world boot\n");
422 }
423 
424 #if defined(CFG_WITH_ARM_TRUSTED_FW)
425 /* called from assembly only */
426 uint32_t *main_init_primary(uint32_t pageable_part);
427 uint32_t *main_init_primary(uint32_t pageable_part)
428 {
429 	main_init_primary_helper(pageable_part, PADDR_INVALID);
430 	return thread_vector_table;
431 }
432 #elif defined(CFG_WITH_SEC_MON)
433 /* called from assembly only */
434 void main_init_primary(uint32_t pageable_part, uint32_t nsec_entry);
435 void main_init_primary(uint32_t pageable_part, uint32_t nsec_entry)
436 {
437 	main_init_primary_helper(pageable_part, nsec_entry);
438 }
439 
440 /* called from assembly only */
441 void main_init_secondary(uint32_t nsec_entry);
442 void main_init_secondary(uint32_t nsec_entry)
443 {
444 	main_init_secondary_helper(nsec_entry);
445 }
446 
447 #endif
448 
449 static void main_fiq(void)
450 {
451 	uint32_t iar;
452 
453 	DMSG("enter");
454 
455 	iar = gic_read_iar();
456 
457 	while (pl011_have_rx_data(CONSOLE_UART_BASE)) {
458 		DMSG("cpu %zu: got 0x%x",
459 		     get_core_pos(), pl011_getchar(CONSOLE_UART_BASE));
460 	}
461 
462 	gic_write_eoir(iar);
463 
464 	DMSG("return");
465 }
466 
467 #if defined(CFG_WITH_ARM_TRUSTED_FW)
468 static uint32_t main_cpu_off_handler(uint32_t a0, uint32_t a1)
469 {
470 	(void)&a0;
471 	(void)&a1;
472 	/* Could stop generic timer here */
473 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
474 	return 0;
475 }
476 
477 static uint32_t main_cpu_suspend_handler(uint32_t a0, uint32_t a1)
478 {
479 	(void)&a0;
480 	(void)&a1;
481 	/* Could save generic timer here */
482 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
483 	return 0;
484 }
485 
486 static uint32_t main_cpu_resume_handler(uint32_t a0, uint32_t a1)
487 {
488 	(void)&a0;
489 	(void)&a1;
490 	/* Could restore generic timer here */
491 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
492 	return 0;
493 }
494 
495 /* called from assembly only */
496 uint32_t main_cpu_on_handler(uint32_t a0, uint32_t a1);
497 uint32_t main_cpu_on_handler(uint32_t a0, uint32_t a1)
498 {
499 	(void)&a0;
500 	(void)&a1;
501 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
502 	main_init_secondary_helper(PADDR_INVALID);
503 	return 0;
504 }
505 
506 static uint32_t main_system_off_handler(uint32_t a0, uint32_t a1)
507 {
508 	(void)&a0;
509 	(void)&a1;
510 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
511 	return 0;
512 }
513 
514 static uint32_t main_system_reset_handler(uint32_t a0, uint32_t a1)
515 {
516 	(void)&a0;
517 	(void)&a1;
518 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
519 	return 0;
520 }
521 
522 #elif defined(CFG_WITH_SEC_MON)
523 static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1)
524 {
525 	/*
526 	 * This function is not supported in this configuration, and
527 	 * should never be called. Panic to catch unintended calls.
528 	 */
529 	(void)&a0;
530 	(void)&a1;
531 	panic();
532 	return 1;
533 }
534 #endif
535 
536 #ifndef CFG_WITH_LPAE
537 paddr_t core_mmu_get_main_ttb_pa(void)
538 {
539 	/* Note that this depends on flat mapping of TEE Core */
540 	paddr_t pa = (paddr_t)core_mmu_get_main_ttb_va();
541 
542 	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_L1_MASK));
543 	return pa;
544 }
545 
546 vaddr_t core_mmu_get_main_ttb_va(void)
547 {
548 	return (vaddr_t)main_mmu_l1_ttb;
549 }
550 
551 paddr_t core_mmu_get_ul1_ttb_pa(void)
552 {
553 	/* Note that this depends on flat mapping of TEE Core */
554 	paddr_t pa = (paddr_t)core_mmu_get_ul1_ttb_va();
555 
556 	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_UL1_MASK));
557 	return pa;
558 }
559 
560 vaddr_t core_mmu_get_ul1_ttb_va(void)
561 {
562 	return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
563 }
564 #endif
565 
566 void console_putc(int ch)
567 {
568 	pl011_putc(ch, CONSOLE_UART_BASE);
569 	if (ch == '\n')
570 		pl011_putc('\r', CONSOLE_UART_BASE);
571 }
572 
573 void console_flush(void)
574 {
575 	pl011_flush(CONSOLE_UART_BASE);
576 }
577 
578 #ifndef CFG_WITH_LPAE
579 void *core_mmu_alloc_l2(struct tee_mmap_region *mm)
580 {
581 	/* Can't have this in .bss since it's not initialized yet */
582 	static size_t l2_offs __attribute__((section(".data")));
583 	const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE;
584 	size_t l2_va_space = ((sizeof(main_mmu_l2_ttb) - l2_offs) /
585 			     TEE_MMU_L2_SIZE) * l2_va_size;
586 
587 	if (l2_offs)
588 		return NULL;
589 	if (mm->size > l2_va_space)
590 		return NULL;
591 	l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size;
592 	return main_mmu_l2_ttb;
593 }
594 #endif
595