xref: /rk3399_ARM-atf/plat/hisilicon/hikey960/hikey960_bl2_setup.c (revision 09d40e0e08283a249e7dce0e106c07c5141f9b7e)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10 
11 #include <platform_def.h>
12 
13 #include <arch_helpers.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <common/desc_image_load.h>
17 #include <drivers/arm/pl011.h>
18 #include <drivers/delay_timer.h>
19 #include <drivers/dw_ufs.h>
20 #include <drivers/generic_delay_timer.h>
21 #include <drivers/ufs.h>
22 #include <lib/mmio.h>
23 #ifdef SPD_opteed
24 #include <lib/optee_utils.h>
25 #endif
26 
27 #include <hi3660.h>
28 #include "hikey960_def.h"
29 #include "hikey960_private.h"
30 
31 /*
32  * The next 2 constants identify the extents of the code & RO data region.
33  * These addresses are used by the MMU setup code and therefore they must be
34  * page-aligned.  It is the responsibility of the linker script to ensure that
35  * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
36  */
37 #define BL2_RO_BASE (unsigned long)(&__RO_START__)
38 #define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
39 
40 #define BL2_RW_BASE		(BL2_RO_LIMIT)
41 
42 /*
43  * The next 2 constants identify the extents of the coherent memory region.
44  * These addresses are used by the MMU setup code and therefore they must be
45  * page-aligned.  It is the responsibility of the linker script to ensure that
46  * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
47  * page-aligned addresses.
48  */
49 #define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
50 #define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
51 
52 static meminfo_t bl2_el3_tzram_layout;
53 static console_pl011_t console;
54 extern int load_lpm3(void);
55 
56 enum {
57 	BOOT_MODE_RECOVERY = 0,
58 	BOOT_MODE_NORMAL,
59 	BOOT_MODE_MASK = 1,
60 };
61 
62 /*******************************************************************************
63  * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
64  * Return 0 on success, -1 otherwise.
65  ******************************************************************************/
66 int plat_hikey960_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
67 {
68 	int i;
69 	int *buf;
70 
71 	assert(scp_bl2_image_info->image_size < SCP_BL2_SIZE);
72 
73 	INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
74 
75 	INFO("BL2: SCP_BL2: 0x%lx@0x%x\n",
76 	     scp_bl2_image_info->image_base,
77 	     scp_bl2_image_info->image_size);
78 
79 	buf = (int *)scp_bl2_image_info->image_base;
80 
81 	INFO("BL2: SCP_BL2 HEAD:\n");
82 	for (i = 0; i < 64; i += 4)
83 		INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
84 			buf[i], buf[i+1], buf[i+2], buf[i+3]);
85 
86 	buf = (int *)(scp_bl2_image_info->image_base +
87 		      scp_bl2_image_info->image_size - 256);
88 
89 	INFO("BL2: SCP_BL2 TAIL:\n");
90 	for (i = 0; i < 64; i += 4)
91 		INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
92 			buf[i], buf[i+1], buf[i+2], buf[i+3]);
93 
94 	INFO("BL2: SCP_BL2 transferred to SCP\n");
95 
96 	load_lpm3();
97 	(void)buf;
98 
99 	return 0;
100 }
101 
102 static void hikey960_ufs_reset(void)
103 {
104 	unsigned int data, mask;
105 
106 	mmio_write_32(CRG_PERDIS7_REG, 1 << 14);
107 	mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN);
108 	do {
109 		data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG);
110 	} while (data & BIT_SYSCTRL_REF_CLOCK_EN);
111 	/* use abb clk */
112 	mmio_clrbits_32(UFS_SYS_UFS_SYSCTRL_REG, BIT_UFS_REFCLK_SRC_SE1);
113 	mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_REFCLK_ISO_EN);
114 	mmio_write_32(PCTRL_PERI_CTRL3_REG, (1 << 0) | (1 << 16));
115 	mdelay(1);
116 	mmio_write_32(CRG_PEREN7_REG, 1 << 14);
117 	mmio_setbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN);
118 
119 	mmio_write_32(CRG_PERRSTEN3_REG, PERI_UFS_BIT);
120 	do {
121 		data = mmio_read_32(CRG_PERRSTSTAT3_REG);
122 	} while ((data & PERI_UFS_BIT) == 0);
123 	mmio_setbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_MTCMOS_EN);
124 	mdelay(1);
125 	mmio_setbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_PWR_READY);
126 	mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
127 		      MASK_UFS_DEVICE_RESET);
128 	/* clear SC_DIV_UFS_PERIBUS */
129 	mask = SC_DIV_UFS_PERIBUS << 16;
130 	mmio_write_32(CRG_CLKDIV17_REG, mask);
131 	/* set SC_DIV_UFSPHY_CFG(3) */
132 	mask = SC_DIV_UFSPHY_CFG_MASK << 16;
133 	data = SC_DIV_UFSPHY_CFG(3);
134 	mmio_write_32(CRG_CLKDIV16_REG, mask | data);
135 	data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG);
136 	data &= ~MASK_SYSCTRL_CFG_CLOCK_FREQ;
137 	data |= 0x39;
138 	mmio_write_32(UFS_SYS_PHY_CLK_CTRL_REG, data);
139 	mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, MASK_SYSCTRL_REF_CLOCK_SEL);
140 	mmio_setbits_32(UFS_SYS_CLOCK_GATE_BYPASS_REG,
141 			MASK_UFS_CLK_GATE_BYPASS);
142 	mmio_setbits_32(UFS_SYS_UFS_SYSCTRL_REG, MASK_UFS_SYSCTRL_BYPASS);
143 
144 	mmio_setbits_32(UFS_SYS_PSW_CLK_CTRL_REG, BIT_SYSCTRL_PSW_CLK_EN);
145 	mmio_clrbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_ISO_CTRL);
146 	mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_PHY_ISO_CTRL);
147 	mmio_clrbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_LP_ISOL_EN);
148 	mmio_write_32(CRG_PERRSTDIS3_REG, PERI_ARST_UFS_BIT);
149 	mmio_setbits_32(UFS_SYS_RESET_CTRL_EN_REG, BIT_SYSCTRL_LP_RESET_N);
150 	mdelay(1);
151 	mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
152 		      MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET);
153 	mdelay(20);
154 	mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
155 		      0x03300330);
156 
157 	mmio_write_32(CRG_PERRSTDIS3_REG, PERI_UFS_BIT);
158 	do {
159 		data = mmio_read_32(CRG_PERRSTSTAT3_REG);
160 	} while (data & PERI_UFS_BIT);
161 }
162 
163 static void hikey960_init_ufs(void)
164 {
165 	dw_ufs_params_t ufs_params;
166 
167 	memset(&ufs_params, 0, sizeof(ufs_params_t));
168 	ufs_params.reg_base = UFS_REG_BASE;
169 	ufs_params.desc_base = HIKEY960_UFS_DESC_BASE;
170 	ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE;
171 	hikey960_ufs_reset();
172 	dw_ufs_init(&ufs_params);
173 }
174 
175 /*******************************************************************************
176  * Gets SPSR for BL32 entry
177  ******************************************************************************/
178 uint32_t hikey960_get_spsr_for_bl32_entry(void)
179 {
180 	/*
181 	 * The Secure Payload Dispatcher service is responsible for
182 	 * setting the SPSR prior to entry into the BL3-2 image.
183 	 */
184 	return 0;
185 }
186 
187 /*******************************************************************************
188  * Gets SPSR for BL33 entry
189  ******************************************************************************/
190 #ifndef AARCH32
191 uint32_t hikey960_get_spsr_for_bl33_entry(void)
192 {
193 	unsigned int mode;
194 	uint32_t spsr;
195 
196 	/* Figure out what mode we enter the non-secure world in */
197 	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
198 
199 	/*
200 	 * TODO: Consider the possibility of specifying the SPSR in
201 	 * the FIP ToC and allowing the platform to have a say as
202 	 * well.
203 	 */
204 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
205 	return spsr;
206 }
207 #else
208 uint32_t hikey960_get_spsr_for_bl33_entry(void)
209 {
210 	unsigned int hyp_status, mode, spsr;
211 
212 	hyp_status = GET_VIRT_EXT(read_id_pfr1());
213 
214 	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
215 
216 	/*
217 	 * TODO: Consider the possibility of specifying the SPSR in
218 	 * the FIP ToC and allowing the platform to have a say as
219 	 * well.
220 	 */
221 	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
222 			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
223 	return spsr;
224 }
225 #endif /* AARCH32 */
226 
227 int hikey960_bl2_handle_post_image_load(unsigned int image_id)
228 {
229 	int err = 0;
230 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
231 #ifdef SPD_opteed
232 	bl_mem_params_node_t *pager_mem_params = NULL;
233 	bl_mem_params_node_t *paged_mem_params = NULL;
234 #endif
235 	assert(bl_mem_params);
236 
237 	switch (image_id) {
238 #ifdef AARCH64
239 	case BL32_IMAGE_ID:
240 #ifdef SPD_opteed
241 		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
242 		assert(pager_mem_params);
243 
244 		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
245 		assert(paged_mem_params);
246 
247 		err = parse_optee_header(&bl_mem_params->ep_info,
248 				&pager_mem_params->image_info,
249 				&paged_mem_params->image_info);
250 		if (err != 0) {
251 			WARN("OPTEE header parse error.\n");
252 		}
253 #endif
254 		bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl32_entry();
255 		break;
256 #endif
257 
258 	case BL33_IMAGE_ID:
259 		/* BL33 expects to receive the primary CPU MPID (through r0) */
260 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
261 		bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl33_entry();
262 		break;
263 
264 #ifdef SCP_BL2_BASE
265 	case SCP_BL2_IMAGE_ID:
266 		/* The subsequent handling of SCP_BL2 is platform specific */
267 		err = plat_hikey960_bl2_handle_scp_bl2(&bl_mem_params->image_info);
268 		if (err) {
269 			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
270 		}
271 		break;
272 #endif
273 	default:
274 		/* Do nothing in default case */
275 		break;
276 	}
277 
278 	return err;
279 }
280 
281 /*******************************************************************************
282  * This function can be used by the platforms to update/use image
283  * information for given `image_id`.
284  ******************************************************************************/
285 int bl2_plat_handle_post_image_load(unsigned int image_id)
286 {
287 	return hikey960_bl2_handle_post_image_load(image_id);
288 }
289 
290 void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2,
291 				  u_register_t arg3, u_register_t arg4)
292 {
293 	unsigned int id, uart_base;
294 
295 	generic_delay_timer_init();
296 	hikey960_read_boardid(&id);
297 	if (id == 5300)
298 		uart_base = PL011_UART5_BASE;
299 	else
300 		uart_base = PL011_UART6_BASE;
301 	/* Initialize the console to provide early debug support */
302 	console_pl011_register(uart_base, PL011_UART_CLK_IN_HZ,
303 			       PL011_BAUDRATE, &console);
304 	/*
305 	 * Allow BL2 to see the whole Trusted RAM.
306 	 */
307 	bl2_el3_tzram_layout.total_base = BL2_RW_BASE;
308 	bl2_el3_tzram_layout.total_size = BL31_LIMIT - BL2_RW_BASE;
309 }
310 
311 void bl2_el3_plat_arch_setup(void)
312 {
313 	hikey960_init_mmu_el3(bl2_el3_tzram_layout.total_base,
314 			      bl2_el3_tzram_layout.total_size,
315 			      BL2_RO_BASE,
316 			      BL2_RO_LIMIT,
317 			      BL2_COHERENT_RAM_BASE,
318 			      BL2_COHERENT_RAM_LIMIT);
319 }
320 
321 void bl2_platform_setup(void)
322 {
323 	/* disable WDT0 */
324 	if (mmio_read_32(WDT0_REG_BASE + WDT_LOCK_OFFSET) == WDT_LOCKED) {
325 		mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, WDT_UNLOCK);
326 		mmio_write_32(WDT0_REG_BASE + WDT_CONTROL_OFFSET, 0);
327 		mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, 0);
328 	}
329 	hikey960_clk_init();
330 	hikey960_pmu_init();
331 	hikey960_regulator_enable();
332 	hikey960_tzc_init();
333 	hikey960_peri_init();
334 	hikey960_pinmux_init();
335 	hikey960_gpio_init();
336 	hikey960_init_ufs();
337 	hikey960_io_setup();
338 }
339