xref: /rk3399_ARM-atf/plat/hisilicon/hikey960/hikey960_bl2_setup.c (revision 2de0c5cc4fac47dcc5df295bd1eaf3a6da528424)
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bl_common.h>
10 #include <console.h>
11 #include <debug.h>
12 #include <desc_image_load.h>
13 #include <errno.h>
14 #include <generic_delay_timer.h>
15 #include <hi3660.h>
16 #include <mmio.h>
17 #include <platform_def.h>
18 #include <string.h>
19 #include <ufs.h>
20 
21 #include "hikey960_def.h"
22 #include "hikey960_private.h"
23 
24 /*
25  * The next 2 constants identify the extents of the code & RO data region.
26  * These addresses are used by the MMU setup code and therefore they must be
27  * page-aligned.  It is the responsibility of the linker script to ensure that
28  * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
29  */
30 #define BL2_RO_BASE (unsigned long)(&__RO_START__)
31 #define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
32 
33 /*
34  * The next 2 constants identify the extents of the coherent memory region.
35  * These addresses are used by the MMU setup code and therefore they must be
36  * page-aligned.  It is the responsibility of the linker script to ensure that
37  * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
38  * page-aligned addresses.
39  */
40 #define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
41 #define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
42 
43 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
44 
45 #if !LOAD_IMAGE_V2
46 
47 /*******************************************************************************
48  * This structure represents the superset of information that is passed to
49  * BL31, e.g. while passing control to it from BL2, bl31_params
50  * and other platform specific params
51  ******************************************************************************/
52 typedef struct bl2_to_bl31_params_mem {
53 	bl31_params_t		bl31_params;
54 	image_info_t		bl31_image_info;
55 	image_info_t		bl32_image_info;
56 	image_info_t		bl33_image_info;
57 	entry_point_info_t	bl33_ep_info;
58 	entry_point_info_t	bl32_ep_info;
59 	entry_point_info_t	bl31_ep_info;
60 } bl2_to_bl31_params_mem_t;
61 
62 static bl2_to_bl31_params_mem_t bl31_params_mem;
63 
64 meminfo_t *bl2_plat_sec_mem_layout(void)
65 {
66 	return &bl2_tzram_layout;
67 }
68 
69 bl31_params_t *bl2_plat_get_bl31_params(void)
70 {
71 	bl31_params_t *bl2_to_bl31_params = NULL;
72 
73 	/*
74 	 * Initialise the memory for all the arguments that needs to
75 	 * be passed to BL3-1
76 	 */
77 	memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
78 
79 	/* Assign memory for TF related information */
80 	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
81 	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
82 
83 	/* Fill BL3-1 related information */
84 	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
85 	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
86 		VERSION_1, 0);
87 
88 	/* Fill BL3-2 related information if it exists */
89 #if BL32_BASE
90 	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
91 	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
92 		VERSION_1, 0);
93 	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
94 	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
95 		VERSION_1, 0);
96 #endif
97 
98 	/* Fill BL3-3 related information */
99 	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
100 	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
101 		PARAM_EP, VERSION_1, 0);
102 
103 	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
104 	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
105 
106 	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
107 	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
108 		VERSION_1, 0);
109 
110 	return bl2_to_bl31_params;
111 }
112 
113 /*******************************************************************************
114  * Populate the extents of memory available for loading SCP_BL2 (if used),
115  * i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
116  ******************************************************************************/
117 void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
118 {
119 	hikey960_init_ufs();
120 	hikey960_io_setup();
121 
122 	*scp_bl2_meminfo = bl2_tzram_layout;
123 }
124 #endif /* LOAD_IMAGE_V2 */
125 
126 extern int load_lpm3(void);
127 
128 /*******************************************************************************
129  * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
130  * Return 0 on success, -1 otherwise.
131  ******************************************************************************/
132 #if LOAD_IMAGE_V2
133 int plat_hikey960_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
134 #else
135 int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
136 #endif
137 {
138 	int i;
139 	int *buf;
140 
141 	assert(scp_bl2_image_info->image_size < SCP_BL2_SIZE);
142 
143 	INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
144 
145 	INFO("BL2: SCP_BL2: 0x%lx@0x%x\n",
146 	     scp_bl2_image_info->image_base,
147 	     scp_bl2_image_info->image_size);
148 
149 	buf = (int *)scp_bl2_image_info->image_base;
150 
151 	INFO("BL2: SCP_BL2 HEAD:\n");
152 	for (i = 0; i < 64; i += 4)
153 		INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
154 			buf[i], buf[i+1], buf[i+2], buf[i+3]);
155 
156 	buf = (int *)(scp_bl2_image_info->image_base +
157 		      scp_bl2_image_info->image_size - 256);
158 
159 	INFO("BL2: SCP_BL2 TAIL:\n");
160 	for (i = 0; i < 64; i += 4)
161 		INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
162 			buf[i], buf[i+1], buf[i+2], buf[i+3]);
163 
164 	INFO("BL2: SCP_BL2 transferred to SCP\n");
165 
166 	load_lpm3();
167 	(void)buf;
168 
169 	return 0;
170 }
171 
172 void hikey960_init_ufs(void)
173 {
174 	ufs_params_t ufs_params;
175 
176 	memset(&ufs_params, 0, sizeof(ufs_params_t));
177 	ufs_params.reg_base = UFS_REG_BASE;
178 	ufs_params.desc_base = HIKEY960_UFS_DESC_BASE;
179 	ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE;
180 	ufs_params.flags = UFS_FLAGS_SKIPINIT;
181 	ufs_init(NULL, &ufs_params);
182 }
183 
184 /*******************************************************************************
185  * Gets SPSR for BL32 entry
186  ******************************************************************************/
187 uint32_t hikey960_get_spsr_for_bl32_entry(void)
188 {
189 	/*
190 	 * The Secure Payload Dispatcher service is responsible for
191 	 * setting the SPSR prior to entry into the BL3-2 image.
192 	 */
193 	return 0;
194 }
195 
196 /*******************************************************************************
197  * Gets SPSR for BL33 entry
198  ******************************************************************************/
199 #ifndef AARCH32
200 uint32_t hikey960_get_spsr_for_bl33_entry(void)
201 {
202 	unsigned int mode;
203 	uint32_t spsr;
204 
205 	/* Figure out what mode we enter the non-secure world in */
206 	mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
207 
208 	/*
209 	 * TODO: Consider the possibility of specifying the SPSR in
210 	 * the FIP ToC and allowing the platform to have a say as
211 	 * well.
212 	 */
213 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
214 	return spsr;
215 }
216 #else
217 uint32_t hikey960_get_spsr_for_bl33_entry(void)
218 {
219 	unsigned int hyp_status, mode, spsr;
220 
221 	hyp_status = GET_VIRT_EXT(read_id_pfr1());
222 
223 	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
224 
225 	/*
226 	 * TODO: Consider the possibility of specifying the SPSR in
227 	 * the FIP ToC and allowing the platform to have a say as
228 	 * well.
229 	 */
230 	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
231 			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
232 	return spsr;
233 }
234 #endif /* AARCH32 */
235 
236 #if LOAD_IMAGE_V2
237 int hikey960_bl2_handle_post_image_load(unsigned int image_id)
238 {
239 	int err = 0;
240 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
241 	assert(bl_mem_params);
242 
243 	switch (image_id) {
244 #ifdef AARCH64
245 	case BL32_IMAGE_ID:
246 		bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl32_entry();
247 		break;
248 #endif
249 
250 	case BL33_IMAGE_ID:
251 		/* BL33 expects to receive the primary CPU MPID (through r0) */
252 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
253 		bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl33_entry();
254 		break;
255 
256 #ifdef SCP_BL2_BASE
257 	case SCP_BL2_IMAGE_ID:
258 		/* The subsequent handling of SCP_BL2 is platform specific */
259 		err = plat_hikey960_bl2_handle_scp_bl2(&bl_mem_params->image_info);
260 		if (err) {
261 			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
262 		}
263 		break;
264 #endif
265 	}
266 
267 	return err;
268 }
269 
270 /*******************************************************************************
271  * This function can be used by the platforms to update/use image
272  * information for given `image_id`.
273  ******************************************************************************/
274 int bl2_plat_handle_post_image_load(unsigned int image_id)
275 {
276 	return hikey960_bl2_handle_post_image_load(image_id);
277 }
278 
279 #else /* LOAD_IMAGE_V2 */
280 
281 struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
282 {
283 #if DEBUG
284 	bl31_params_mem.bl31_ep_info.args.arg1 = HIKEY960_BL31_PLAT_PARAM_VAL;
285 #endif
286 
287 	return &bl31_params_mem.bl31_ep_info;
288 }
289 
290 void bl2_plat_set_bl31_ep_info(image_info_t *image,
291 			       entry_point_info_t *bl31_ep_info)
292 {
293 	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
294 	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
295 				       DISABLE_ALL_EXCEPTIONS);
296 }
297 
298 /*******************************************************************************
299  * Before calling this function BL32 is loaded in memory and its entrypoint
300  * is set by load_image. This is a placeholder for the platform to change
301  * the entrypoint of BL32 and set SPSR and security state.
302  * On Hikey we only set the security state of the entrypoint
303  ******************************************************************************/
304 #ifdef BL32_BASE
305 void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
306 					entry_point_info_t *bl32_ep_info)
307 {
308 	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
309 	/*
310 	 * The Secure Payload Dispatcher service is responsible for
311 	 * setting the SPSR prior to entry into the BL32 image.
312 	 */
313 	bl32_ep_info->spsr = 0;
314 }
315 
316 /*******************************************************************************
317  * Populate the extents of memory available for loading BL32
318  ******************************************************************************/
319 void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
320 {
321 	/*
322 	 * Populate the extents of memory available for loading BL32.
323 	 */
324 	bl32_meminfo->total_base = BL32_BASE;
325 	bl32_meminfo->free_base = BL32_BASE;
326 	bl32_meminfo->total_size =
327 			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
328 	bl32_meminfo->free_size =
329 			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
330 }
331 #endif /* BL32_BASE */
332 
333 void bl2_plat_set_bl33_ep_info(image_info_t *image,
334 			       entry_point_info_t *bl33_ep_info)
335 {
336 	unsigned long el_status;
337 	unsigned int mode;
338 
339 	/* Figure out what mode we enter the non-secure world in */
340 	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
341 	el_status &= ID_AA64PFR0_ELX_MASK;
342 
343 	if (el_status)
344 		mode = MODE_EL2;
345 	else
346 		mode = MODE_EL1;
347 
348 	/*
349 	 * TODO: Consider the possibility of specifying the SPSR in
350 	 * the FIP ToC and allowing the platform to have a say as
351 	 * well.
352 	 */
353 	bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX,
354 				       DISABLE_ALL_EXCEPTIONS);
355 	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
356 }
357 
358 void bl2_plat_flush_bl31_params(void)
359 {
360 	flush_dcache_range((unsigned long)&bl31_params_mem,
361 			   sizeof(bl2_to_bl31_params_mem_t));
362 }
363 
364 void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
365 {
366 	bl33_meminfo->total_base = DDR_BASE;
367 	bl33_meminfo->total_size = DDR_SIZE;
368 	bl33_meminfo->free_base = DDR_BASE;
369 	bl33_meminfo->free_size = DDR_SIZE;
370 }
371 #endif /* LOAD_IMAGE_V2 */
372 
373 void bl2_early_platform_setup(meminfo_t *mem_layout)
374 {
375 	unsigned int id, uart_base;
376 
377 	generic_delay_timer_init();
378 	hikey960_read_boardid(&id);
379 	if (id == 5300)
380 		uart_base = PL011_UART5_BASE;
381 	else
382 		uart_base = PL011_UART6_BASE;
383 
384 	/* Initialize the console to provide early debug support */
385 	console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
386 
387 	/* Setup the BL2 memory layout */
388 	bl2_tzram_layout = *mem_layout;
389 }
390 
391 void bl2_plat_arch_setup(void)
392 {
393 	hikey960_init_mmu_el1(bl2_tzram_layout.total_base,
394 			      bl2_tzram_layout.total_size,
395 			      BL2_RO_BASE,
396 			      BL2_RO_LIMIT,
397 			      BL2_COHERENT_RAM_BASE,
398 			      BL2_COHERENT_RAM_LIMIT);
399 }
400 
401 void bl2_platform_setup(void)
402 {
403 	/* disable WDT0 */
404 	if (mmio_read_32(WDT0_REG_BASE + WDT_LOCK_OFFSET) == WDT_LOCKED) {
405 		mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, WDT_UNLOCK);
406 		mmio_write_32(WDT0_REG_BASE + WDT_CONTROL_OFFSET, 0);
407 		mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, 0);
408 	}
409 }
410