xref: /OK3568_Linux_fs/u-boot/arch/arm/cpu/armv8/cpu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * (C) Copyright 2008 Texas Insturments
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) Copyright 2002
5*4882a593Smuzhiyun  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
6*4882a593Smuzhiyun  * Marius Groeger <mgroeger@sysgo.de>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * (C) Copyright 2002
9*4882a593Smuzhiyun  * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <common.h>
15*4882a593Smuzhiyun #include <command.h>
16*4882a593Smuzhiyun #include <asm/system.h>
17*4882a593Smuzhiyun #include <asm/secure.h>
18*4882a593Smuzhiyun #include <linux/compiler.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * sdelay() - simple spin loop.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Will delay execution by roughly (@loops * 2) cycles.
24*4882a593Smuzhiyun  * This is necessary to be used before timers are accessible.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * A value of "0" will results in 2^64 loops.
27*4882a593Smuzhiyun  */
sdelay(unsigned long loops)28*4882a593Smuzhiyun void sdelay(unsigned long loops)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	__asm__ volatile ("1:\n" "subs %0, %0, #1\n"
31*4882a593Smuzhiyun 			  "b.ne 1b" : "=r" (loops) : "0"(loops) : "cc");
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
cleanup_before_linux(void)34*4882a593Smuzhiyun int cleanup_before_linux(void)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	/*
37*4882a593Smuzhiyun 	 * this function is called just before we call linux
38*4882a593Smuzhiyun 	 * it prepares the processor for linux
39*4882a593Smuzhiyun 	 *
40*4882a593Smuzhiyun 	 * disable interrupt and turn off caches etc ...
41*4882a593Smuzhiyun 	 */
42*4882a593Smuzhiyun 	disable_interrupts();
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	disable_serror();
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/*
47*4882a593Smuzhiyun 	 * Turn off I-cache and invalidate it
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	icache_disable();
50*4882a593Smuzhiyun 	invalidate_icache_all();
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/*
53*4882a593Smuzhiyun 	 * turn off D-cache
54*4882a593Smuzhiyun 	 * dcache_disable() in turn flushes the d-cache and disables MMU
55*4882a593Smuzhiyun 	 */
56*4882a593Smuzhiyun 	dcache_disable();
57*4882a593Smuzhiyun 	invalidate_dcache_all();
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #ifdef CONFIG_ARMV8_PSCI
relocate_secure_section(void)63*4882a593Smuzhiyun static void relocate_secure_section(void)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun #ifdef CONFIG_ARMV8_SECURE_BASE
66*4882a593Smuzhiyun 	size_t sz = __secure_end - __secure_start;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	memcpy((void *)CONFIG_ARMV8_SECURE_BASE, __secure_start, sz);
69*4882a593Smuzhiyun 	flush_dcache_range(CONFIG_ARMV8_SECURE_BASE,
70*4882a593Smuzhiyun 			   CONFIG_ARMV8_SECURE_BASE + sz + 1);
71*4882a593Smuzhiyun 	invalidate_icache_all();
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
armv8_setup_psci(void)75*4882a593Smuzhiyun void armv8_setup_psci(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	relocate_secure_section();
78*4882a593Smuzhiyun 	secure_ram_addr(psci_setup_vectors)();
79*4882a593Smuzhiyun 	secure_ram_addr(psci_arch_init)();
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun #endif
82