xref: /optee_os/core/arch/arm/plat-vexpress/main.c (revision a38d95ce4287aa71af5aac98e9a5951434fc63ba)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <platform_config.h>
29 #include <pm_debug.h>
30 
31 #include <stdint.h>
32 #include <string.h>
33 
34 #include <drivers/gic.h>
35 #include <drivers/pl011.h>
36 #include <sm/sm.h>
37 #include <sm/tee_mon.h>
38 
39 #include <util.h>
40 
41 #include <arm.h>
42 #include <kernel/thread.h>
43 #include <kernel/panic.h>
44 #include <trace.h>
45 #include <kernel/misc.h>
46 #include <kernel/tee_time.h>
47 #include <mm/tee_pager.h>
48 #include <mm/core_mmu.h>
49 #include <mm/tee_mmu_defs.h>
50 #include <mm/tee_mmu.h>
51 #include <mm/tee_mm.h>
52 #include <utee_defines.h>
53 #include <tee/tee_cryp_provider.h>
54 #include <tee/entry.h>
55 #include <tee/arch_svc.h>
56 #include <console.h>
57 #include <malloc.h>
58 #include "plat_tee_func.h"
59 
60 #include <assert.h>
61 
62 #define PADDR_INVALID		0xffffffff
63 
64 #ifndef CFG_WITH_LPAE
65 /* Main MMU L1 table for teecore */
66 static uint32_t main_mmu_l1_ttb[TEE_MMU_L1_NUM_ENTRIES]
67 	__attribute__((section(".nozi.mmu.l1"),
68 		       aligned(TEE_MMU_L1_ALIGNMENT)));
69 static uint32_t main_mmu_l2_ttb[TEE_MMU_L2_NUM_ENTRIES]
70 	__attribute__((section(".nozi.mmu.l2"),
71 		       aligned(TEE_MMU_L2_ALIGNMENT)));
72 
73 /* MMU L1 table for TAs, one for each Core */
74 static uint32_t main_mmu_ul1_ttb[CFG_NUM_THREADS][TEE_MMU_UL1_NUM_ENTRIES]
75         __attribute__((section(".nozi.mmu.ul1"),
76 		      aligned(TEE_MMU_UL1_ALIGNMENT)));
77 #endif
78 
79 extern uint8_t __text_init_start[];
80 extern uint8_t __data_start[];
81 extern uint8_t __data_end[];
82 extern uint8_t __bss_start[];
83 extern uint8_t __bss_end[];
84 extern uint8_t __init_start[];
85 extern uint8_t __init_size[];
86 extern uint8_t __heap1_start[];
87 extern uint8_t __heap1_end[];
88 extern uint8_t __heap2_start[];
89 extern uint8_t __heap2_end[];
90 extern uint8_t __pageable_part_start[];
91 extern uint8_t __pageable_part_end[];
92 extern uint8_t __pageable_start[];
93 extern uint8_t __pageable_end[];
94 
95 static void main_fiq(void);
96 #if defined(CFG_WITH_ARM_TRUSTED_FW)
97 /* Implemented in assembly, referenced in this file only */
98 uint32_t cpu_on_handler(uint32_t a0, uint32_t a1);
99 
100 static uint32_t main_cpu_off_handler(uint32_t a0, uint32_t a1);
101 static uint32_t main_cpu_suspend_handler(uint32_t a0, uint32_t a1);
102 static uint32_t main_cpu_resume_handler(uint32_t a0, uint32_t a1);
103 static uint32_t main_system_off_handler(uint32_t a0, uint32_t a1);
104 static uint32_t main_system_reset_handler(uint32_t a0, uint32_t a1);
105 #else
106 static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1);
107 #endif
108 
109 static const struct thread_handlers handlers = {
110 	.std_smc = plat_tee_entry,
111 	.fast_smc = plat_tee_entry,
112 	.fiq = main_fiq,
113 	.svc = tee_svc_handler,
114 	.abort = tee_pager_abort_handler,
115 #if defined(CFG_WITH_ARM_TRUSTED_FW)
116 	.cpu_on = cpu_on_handler,
117 	.cpu_off = main_cpu_off_handler,
118 	.cpu_suspend = main_cpu_suspend_handler,
119 	.cpu_resume = main_cpu_resume_handler,
120 	.system_off = main_system_off_handler,
121 	.system_reset = main_system_reset_handler,
122 #else
123 	.cpu_on = main_default_pm_handler,
124 	.cpu_off = main_default_pm_handler,
125 	.cpu_suspend = main_default_pm_handler,
126 	.cpu_resume = main_default_pm_handler,
127 	.system_off = main_default_pm_handler,
128 	.system_reset = main_default_pm_handler,
129 #endif
130 };
131 
132 #if defined(CFG_WITH_ARM_TRUSTED_FW)
133 static void main_init_sec_mon(uint32_t nsec_entry __unused)
134 {
135 	assert(nsec_entry == PADDR_INVALID);
136 	/* Do nothing as we don't have a secure monitor */
137 }
138 #else
139 static void main_init_sec_mon(uint32_t nsec_entry)
140 {
141 	struct sm_nsec_ctx *nsec_ctx;
142 
143 	assert(nsec_entry != PADDR_INVALID);
144 
145 	/* Initialize secure monitor */
146 	nsec_ctx = sm_get_nsec_ctx();
147 	nsec_ctx->mon_lr = nsec_entry;
148 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
149 
150 }
151 #endif
152 
153 #if defined(CFG_WITH_ARM_TRUSTED_FW)
154 static void main_init_nsacr(void)
155 {
156 }
157 #else
158 static void main_init_nsacr(void)
159 {
160 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
161 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
162 }
163 #endif
164 
165 #ifdef CFG_WITH_VFP
166 static void main_init_cpacr(void)
167 {
168 	uint32_t cpacr = read_cpacr();
169 
170 	/* Enabled usage of CP10 and CP11 (SIMD/VFP) */
171 	cpacr &= ~CPACR_CP(10, CPACR_CP_ACCESS_FULL);
172 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_PL1_ONLY);
173 	cpacr &= ~CPACR_CP(11, CPACR_CP_ACCESS_FULL);
174 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_PL1_ONLY);
175 	write_cpacr(cpacr);
176 }
177 #else
178 static void main_init_cpacr(void)
179 {
180 	/* We're not using VFP/SIMD instructions, leave it disabled */
181 }
182 #endif
183 
184 #if PLATFORM_FLAVOR_IS(fvp) || PLATFORM_FLAVOR_IS(juno)
185 static void main_init_gic(void)
186 {
187 	/*
188 	 * On ARMv8, GIC configuration is initialized in ARM-TF,
189 	 */
190 	gic_init_base_addr(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
191 	gic_it_add(IT_CONSOLE_UART);
192 	/* Route FIQ to primary CPU */
193 	gic_it_set_cpu_mask(IT_CONSOLE_UART, gic_it_get_target(0));
194 	gic_it_set_prio(IT_CONSOLE_UART, 0x1);
195 	gic_it_enable(IT_CONSOLE_UART);
196 
197 }
198 #elif PLATFORM_FLAVOR_IS(qemu)
199 static void main_init_gic(void)
200 {
201 	/* Initialize GIC */
202 	gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
203 	gic_it_add(IT_CONSOLE_UART);
204 	gic_it_set_cpu_mask(IT_CONSOLE_UART, 0x1);
205 	gic_it_set_prio(IT_CONSOLE_UART, 0xff);
206 	gic_it_enable(IT_CONSOLE_UART);
207 }
208 #elif PLATFORM_FLAVOR_IS(qemu_virt)
209 static void main_init_gic(void)
210 {
211 	/* Initialize GIC */
212 	gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
213 }
214 #endif
215 
216 #ifdef CFG_WITH_PAGER
217 
218 static size_t get_block_size(void)
219 {
220 	struct core_mmu_table_info tbl_info;
221 	unsigned l;
222 
223 	if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
224 		panic();
225 	l = tbl_info.level - 1;
226 	if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
227 		panic();
228 	return 1 << tbl_info.shift;
229 }
230 
231 static void main_init_runtime(uint32_t pageable_part)
232 {
233 	size_t n;
234 	size_t init_size = (size_t)__init_size;
235 	size_t pageable_size = __pageable_end - __pageable_start;
236 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
237 			   TEE_SHA256_HASH_SIZE;
238 	tee_mm_entry_t *mm;
239 	uint8_t *paged_store;
240 	uint8_t *hashes;
241 	uint8_t *tmp_hashes = __init_start + init_size;
242 	size_t block_size;
243 
244 
245 	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
246 
247 
248 	/* Copy it right after the init area. */
249 	memcpy(tmp_hashes, __data_end + init_size, hash_size);
250 
251 	/*
252 	 * Zero BSS area. Note that globals that would normally would go
253 	 * into BSS which are used before this has to be put into .nozi.*
254 	 * to avoid getting overwritten.
255 	 */
256 	memset(__bss_start, 0, __bss_end - __bss_start);
257 
258 	malloc_init(__heap1_start, __heap1_end - __heap1_start);
259 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
260 
261 	hashes = malloc(hash_size);
262 	EMSG("hash_size %zu", hash_size);
263 	TEE_ASSERT(hashes);
264 	memcpy(hashes, tmp_hashes, hash_size);
265 
266 	/*
267 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
268 	 * DDR below.
269 	 */
270 	teecore_init_ta_ram();
271 
272 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
273 	TEE_ASSERT(mm);
274 	paged_store = (uint8_t *)tee_mm_get_smem(mm);
275 	/* Copy init part into pageable area */
276 	memcpy(paged_store, __init_start, init_size);
277 	/* Copy pageable part after init part into pageable area */
278 	memcpy(paged_store + init_size, (void *)pageable_part,
279 		__pageable_part_end - __pageable_part_start);
280 
281 	/* Check that hashes of what's in pageable area is OK */
282 	DMSG("Checking hashes of pageable area");
283 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
284 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
285 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
286 		TEE_Result res;
287 
288 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
289 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
290 		if (res != TEE_SUCCESS) {
291 			EMSG("Hash failed for page %zu at %p: res 0x%x",
292 				n, page, res);
293 			panic();
294 		}
295 	}
296 
297 	/*
298 	 * Copy what's not initialized in the last init page. Needed
299 	 * because we're not going fault in the init pages again. We can't
300 	 * fault in pages until we've switched to the new vector by calling
301 	 * thread_init_handlers() below.
302 	 */
303 	if (init_size % SMALL_PAGE_SIZE) {
304 		uint8_t *p;
305 
306 		memcpy(__init_start + init_size, paged_store + init_size,
307 			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
308 
309 		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
310 				~SMALL_PAGE_MASK);
311 
312 		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
313 		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
314 				     SMALL_PAGE_SIZE);
315 	}
316 
317 	/*
318 	 * Inialize the virtual memory pool used for main_mmu_l2_ttb which
319 	 * is supplied to tee_pager_init() below.
320 	 */
321 	block_size = get_block_size();
322 	if (!tee_mm_init(&tee_mm_vcore,
323 			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
324 			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
325 				block_size),
326 			SMALL_PAGE_SHIFT, 0))
327 		panic();
328 
329 	/*
330 	 * Claim virtual memory which isn't paged, note that there migth be
331 	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
332 	 * claimed to avoid later allocations to get that memory.
333 	 */
334 	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
335 			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
336 	TEE_ASSERT(mm);
337 
338 	/*
339 	 * Allocate virtual memory for the pageable area and let the pager
340 	 * take charge of all the pages already assigned to that memory.
341 	 */
342 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
343 			   pageable_size);
344 	TEE_ASSERT(mm);
345 	tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
346 			   paged_store, hashes);
347 	tee_pager_add_pages((vaddr_t)__pageable_start,
348 		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
349 	tee_pager_add_pages((vaddr_t)__pageable_start +
350 				ROUNDUP(init_size, SMALL_PAGE_SIZE),
351 			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
352 				SMALL_PAGE_SIZE, true);
353 
354 }
355 #else
356 static void main_init_runtime(uint32_t pageable_part __unused)
357 {
358 	/*
359 	 * Zero BSS area. Note that globals that would normally would go
360 	 * into BSS which are used before this has to be put into .nozi.*
361 	 * to avoid getting overwritten.
362 	 */
363 	memset(__bss_start, 0, __bss_end - __bss_start);
364 
365 	malloc_init(__heap1_start, __heap1_end - __heap1_start);
366 
367 	/*
368 	 * Initialized at this stage in the pager version of this function
369 	 * above
370 	 */
371 	teecore_init_ta_ram();
372 }
373 #endif
374 
375 static void main_init_primary_helper(uint32_t pageable_part,
376 				     uint32_t nsec_entry)
377 {
378 	/*
379 	 * Mask asynchronous exceptions before switch to the thread vector
380 	 * as the thread handler requires those to be masked while
381 	 * executing with the temporary stack. The thread subsystem also
382 	 * asserts that IRQ is blocked when using most if its functions.
383 	 */
384 	thread_set_exceptions(THREAD_EXCP_ALL);
385 	main_init_cpacr();
386 
387 	main_init_runtime(pageable_part);
388 
389 	DMSG("TEE initializing\n");
390 
391 	thread_init_primary(&handlers);
392 	thread_init_per_cpu();
393 	main_init_sec_mon(nsec_entry);
394 
395 
396 	main_init_gic();
397 	main_init_nsacr();
398 
399 	if (init_teecore() != TEE_SUCCESS)
400 		panic();
401 	DMSG("Primary CPU switching to normal world boot\n");
402 }
403 
404 static void main_init_secondary_helper(uint32_t nsec_entry)
405 {
406 	/*
407 	 * Mask asynchronous exceptions before switch to the thread vector
408 	 * as the thread handler requires those to be masked while
409 	 * executing with the temporary stack. The thread subsystem also
410 	 * asserts that IRQ is blocked when using most if its functions.
411 	 */
412 	thread_set_exceptions(THREAD_EXCP_ALL);
413 
414 	thread_init_per_cpu();
415 	main_init_sec_mon(nsec_entry);
416 	main_init_cpacr();
417 	main_init_nsacr();
418 
419 	DMSG("Secondary CPU Switching to normal world boot\n");
420 }
421 
422 #if defined(CFG_WITH_ARM_TRUSTED_FW)
423 /* called from assembly only */
424 uint32_t *main_init_primary(uint32_t pageable_part);
425 uint32_t *main_init_primary(uint32_t pageable_part)
426 {
427 	main_init_primary_helper(pageable_part, PADDR_INVALID);
428 	return thread_vector_table;
429 }
430 #else
431 /* called from assembly only */
432 void main_init_primary(uint32_t pageable_part, uint32_t nsec_entry);
433 void main_init_primary(uint32_t pageable_part, uint32_t nsec_entry)
434 {
435 	main_init_primary_helper(pageable_part, nsec_entry);
436 }
437 
438 /* called from assembly only */
439 void main_init_secondary(uint32_t nsec_entry);
440 void main_init_secondary(uint32_t nsec_entry)
441 {
442 	main_init_secondary_helper(nsec_entry);
443 }
444 #endif
445 
446 static void main_fiq(void)
447 {
448 	uint32_t iar;
449 
450 	DMSG("enter");
451 
452 	iar = gic_read_iar();
453 
454 	while (pl011_have_rx_data(CONSOLE_UART_BASE)) {
455 		DMSG("cpu %zu: got 0x%x",
456 		     get_core_pos(), pl011_getchar(CONSOLE_UART_BASE));
457 	}
458 
459 	gic_write_eoir(iar);
460 
461 	DMSG("return");
462 }
463 
464 #if defined(CFG_WITH_ARM_TRUSTED_FW)
465 static uint32_t main_cpu_off_handler(uint32_t a0, uint32_t a1)
466 {
467 	(void)&a0;
468 	(void)&a1;
469 	/* Could stop generic timer here */
470 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
471 	return 0;
472 }
473 
474 static uint32_t main_cpu_suspend_handler(uint32_t a0, uint32_t a1)
475 {
476 	(void)&a0;
477 	(void)&a1;
478 	/* Could save generic timer here */
479 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
480 	return 0;
481 }
482 
483 static uint32_t main_cpu_resume_handler(uint32_t a0, uint32_t a1)
484 {
485 	(void)&a0;
486 	(void)&a1;
487 	/* Could restore generic timer here */
488 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
489 	return 0;
490 }
491 
492 /* called from assembly only */
493 uint32_t main_cpu_on_handler(uint32_t a0, uint32_t a1);
494 uint32_t main_cpu_on_handler(uint32_t a0, uint32_t a1)
495 {
496 	(void)&a0;
497 	(void)&a1;
498 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
499 	main_init_secondary_helper(PADDR_INVALID);
500 	return 0;
501 }
502 
503 static uint32_t main_system_off_handler(uint32_t a0, uint32_t a1)
504 {
505 	(void)&a0;
506 	(void)&a1;
507 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
508 	return 0;
509 }
510 
511 static uint32_t main_system_reset_handler(uint32_t a0, uint32_t a1)
512 {
513 	(void)&a0;
514 	(void)&a1;
515 	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
516 	return 0;
517 }
518 
519 #else /* !CFG_WITH_ARM_TRUSTED_FW */
520 static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1)
521 {
522 	/*
523 	 * This function is not supported in this configuration, and
524 	 * should never be called. Panic to catch unintended calls.
525 	 */
526 	(void)&a0;
527 	(void)&a1;
528 	panic();
529 	return 1;
530 }
531 #endif
532 
533 #ifndef CFG_WITH_LPAE
534 paddr_t core_mmu_get_main_ttb_pa(void)
535 {
536 	/* Note that this depends on flat mapping of TEE Core */
537 	paddr_t pa = (paddr_t)core_mmu_get_main_ttb_va();
538 
539 	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_L1_MASK));
540 	return pa;
541 }
542 
543 vaddr_t core_mmu_get_main_ttb_va(void)
544 {
545 	return (vaddr_t)main_mmu_l1_ttb;
546 }
547 
548 paddr_t core_mmu_get_ul1_ttb_pa(void)
549 {
550 	/* Note that this depends on flat mapping of TEE Core */
551 	paddr_t pa = (paddr_t)core_mmu_get_ul1_ttb_va();
552 
553 	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_UL1_MASK));
554 	return pa;
555 }
556 
557 vaddr_t core_mmu_get_ul1_ttb_va(void)
558 {
559 	return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
560 }
561 #endif
562 
563 void console_putc(int ch)
564 {
565 	pl011_putc(ch, CONSOLE_UART_BASE);
566 	if (ch == '\n')
567 		pl011_putc('\r', CONSOLE_UART_BASE);
568 }
569 
570 void console_flush(void)
571 {
572 	pl011_flush(CONSOLE_UART_BASE);
573 }
574 
575 #ifndef CFG_WITH_LPAE
576 void *core_mmu_alloc_l2(struct tee_mmap_region *mm)
577 {
578 	/* Can't have this in .bss since it's not initialized yet */
579 	static size_t l2_offs __attribute__((section(".data")));
580 	const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE;
581 	size_t l2_va_space = ((sizeof(main_mmu_l2_ttb) - l2_offs) /
582 			     TEE_MMU_L2_SIZE) * l2_va_size;
583 
584 	if (l2_offs)
585 		return NULL;
586 	if (mm->size > l2_va_space)
587 		return NULL;
588 	l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size;
589 	return main_mmu_l2_ttb;
590 }
591 #endif
592