1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * (C) Copyright 2013
3*4882a593Smuzhiyun * Andre Przywara, Linaro <andre.przywara@linaro.org>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Routines to transition ARMv7 processors from secure into non-secure state
6*4882a593Smuzhiyun * and from non-secure SVC into HYP mode
7*4882a593Smuzhiyun * needed to enable ARMv7 virtualization for current hypervisors
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <common.h>
13*4882a593Smuzhiyun #include <asm/armv7.h>
14*4882a593Smuzhiyun #include <asm/gic.h>
15*4882a593Smuzhiyun #include <asm/io.h>
16*4882a593Smuzhiyun #include <asm/secure.h>
17*4882a593Smuzhiyun
read_id_pfr1(void)18*4882a593Smuzhiyun static unsigned int read_id_pfr1(void)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun unsigned int reg;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg));
23*4882a593Smuzhiyun return reg;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun
get_gicd_base_address(void)26*4882a593Smuzhiyun static unsigned long get_gicd_base_address(void)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun #ifdef CONFIG_ARM_GIC_BASE_ADDRESS
29*4882a593Smuzhiyun return CONFIG_ARM_GIC_BASE_ADDRESS + GIC_DIST_OFFSET;
30*4882a593Smuzhiyun #else
31*4882a593Smuzhiyun unsigned periphbase;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* get the GIC base address from the CBAR register */
34*4882a593Smuzhiyun asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase));
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to
37*4882a593Smuzhiyun * encode this). Bail out here since we cannot access this without
38*4882a593Smuzhiyun * enabling paging.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun if ((periphbase & 0xff) != 0) {
41*4882a593Smuzhiyun printf("nonsec: PERIPHBASE is above 4 GB, no access.\n");
42*4882a593Smuzhiyun return -1;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return (periphbase & CBAR_MASK) + GIC_DIST_OFFSET;
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Define a specific version of this function to enable any available
50*4882a593Smuzhiyun * hardware protections for the reserved region */
protect_secure_section(void)51*4882a593Smuzhiyun void __weak protect_secure_section(void) {}
52*4882a593Smuzhiyun
relocate_secure_section(void)53*4882a593Smuzhiyun static void relocate_secure_section(void)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun #ifdef CONFIG_ARMV7_SECURE_BASE
56*4882a593Smuzhiyun size_t sz = __secure_end - __secure_start;
57*4882a593Smuzhiyun unsigned long szflush = ALIGN(sz + 1, CONFIG_SYS_CACHELINE_SIZE);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun memcpy((void *)CONFIG_ARMV7_SECURE_BASE, __secure_start, sz);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun flush_dcache_range(CONFIG_ARMV7_SECURE_BASE,
62*4882a593Smuzhiyun CONFIG_ARMV7_SECURE_BASE + szflush);
63*4882a593Smuzhiyun protect_secure_section();
64*4882a593Smuzhiyun invalidate_icache_all();
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
kick_secondary_cpus_gic(unsigned long gicdaddr)68*4882a593Smuzhiyun static void kick_secondary_cpus_gic(unsigned long gicdaddr)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun /* kick all CPUs (except this one) by writing to GICD_SGIR */
71*4882a593Smuzhiyun writel(1U << 24, gicdaddr + GICD_SGIR);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
smp_kick_all_cpus(void)74*4882a593Smuzhiyun void __weak smp_kick_all_cpus(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun unsigned long gic_dist_addr;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun gic_dist_addr = get_gicd_base_address();
79*4882a593Smuzhiyun if (gic_dist_addr == -1)
80*4882a593Smuzhiyun return;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun kick_secondary_cpus_gic(gic_dist_addr);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
psci_board_init(void)85*4882a593Smuzhiyun __weak void psci_board_init(void)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
armv7_init_nonsec(void)89*4882a593Smuzhiyun int armv7_init_nonsec(void)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun unsigned int reg;
92*4882a593Smuzhiyun unsigned itlinesnr, i;
93*4882a593Smuzhiyun unsigned long gic_dist_addr;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* check whether the CPU supports the security extensions */
96*4882a593Smuzhiyun reg = read_id_pfr1();
97*4882a593Smuzhiyun if ((reg & 0xF0) == 0) {
98*4882a593Smuzhiyun printf("nonsec: Security extensions not implemented.\n");
99*4882a593Smuzhiyun return -1;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* the SCR register will be set directly in the monitor mode handler,
103*4882a593Smuzhiyun * according to the spec one should not tinker with it in secure state
104*4882a593Smuzhiyun * in SVC mode. Do not try to read it once in non-secure state,
105*4882a593Smuzhiyun * any access to it will trap.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun gic_dist_addr = get_gicd_base_address();
109*4882a593Smuzhiyun if (gic_dist_addr == -1)
110*4882a593Smuzhiyun return -1;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* enable the GIC distributor */
113*4882a593Smuzhiyun writel(readl(gic_dist_addr + GICD_CTLR) | 0x03,
114*4882a593Smuzhiyun gic_dist_addr + GICD_CTLR);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* TYPER[4:0] contains an encoded number of available interrupts */
117*4882a593Smuzhiyun itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* set all bits in the GIC group registers to one to allow access
120*4882a593Smuzhiyun * from non-secure state. The first 32 interrupts are private per
121*4882a593Smuzhiyun * CPU and will be set later when enabling the GIC for each core
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun for (i = 1; i <= itlinesnr; i++)
124*4882a593Smuzhiyun writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun psci_board_init();
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * Relocate secure section before any cpu runs in secure ram.
130*4882a593Smuzhiyun * smp_kick_all_cpus may enable other cores and runs into secure
131*4882a593Smuzhiyun * ram, so need to relocate secure section before enabling other
132*4882a593Smuzhiyun * cores.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun relocate_secure_section();
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #ifndef CONFIG_ARMV7_PSCI
137*4882a593Smuzhiyun smp_set_core_boot_addr((unsigned long)secure_ram_addr(_smp_pen), -1);
138*4882a593Smuzhiyun smp_kick_all_cpus();
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* call the non-sec switching code on this CPU also */
142*4882a593Smuzhiyun secure_ram_addr(_nonsec_init)();
143*4882a593Smuzhiyun return 0;
144*4882a593Smuzhiyun }
145