xref: /rk3399_ARM-atf/common/bl_common.c (revision 08c28d5385f8fae3d5c61475a109b86ef11770d0)
1 /*
2  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <stdio.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <assert.h>
35 #include <arch_helpers.h>
36 #include <console.h>
37 #include <platform.h>
38 #include <semihosting.h>
39 #include <bl_common.h>
40 #include "io_storage.h"
41 #include "debug.h"
42 
43 unsigned long page_align(unsigned long value, unsigned dir)
44 {
45 	unsigned long page_size = 1 << FOUR_KB_SHIFT;
46 
47 	/* Round up the limit to the next page boundary */
48 	if (value & (page_size - 1)) {
49 		value &= ~(page_size - 1);
50 		if (dir == UP)
51 			value += page_size;
52 	}
53 
54 	return value;
55 }
56 
57 static inline unsigned int is_page_aligned (unsigned long addr) {
58 	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
59 
60 	return (addr & (page_size - 1)) == 0;
61 }
62 
63 void change_security_state(unsigned int target_security_state)
64 {
65 	unsigned long scr = read_scr();
66 
67 	if (target_security_state == SECURE)
68 		scr &= ~SCR_NS_BIT;
69 	else if (target_security_state == NON_SECURE)
70 		scr |= SCR_NS_BIT;
71 	else
72 		assert(0);
73 
74 	write_scr(scr);
75 }
76 
77 void __dead2 drop_el(aapcs64_params *args,
78 		     unsigned long spsr,
79 		     unsigned long entrypoint)
80 {
81 	write_spsr(spsr);
82 	write_elr(entrypoint);
83 	eret(args->arg0,
84 	     args->arg1,
85 	     args->arg2,
86 	     args->arg3,
87 	     args->arg4,
88 	     args->arg5,
89 	     args->arg6,
90 	     args->arg7);
91 }
92 
93 void __dead2 raise_el(aapcs64_params *args)
94 {
95 	smc(args->arg0,
96 	    args->arg1,
97 	    args->arg2,
98 	    args->arg3,
99 	    args->arg4,
100 	    args->arg5,
101 	    args->arg6,
102 	    args->arg7);
103 }
104 
105 /*
106  * TODO: If we are not EL3 then currently we only issue an SMC.
107  * Add support for dropping into EL0 etc. Consider adding support
108  * for switching from S-EL1 to S-EL0/1 etc.
109  */
110 void __dead2 change_el(el_change_info *info)
111 {
112 	unsigned long current_el = read_current_el();
113 
114 	if (GET_EL(current_el) == MODE_EL3) {
115 		/*
116 		 * We can go anywhere from EL3. So find where.
117 		 * TODO: Lots to do if we are going non-secure.
118 		 * Flip the NS bit. Restore NS registers etc.
119 		 * Just doing the bare minimal for now.
120 		 */
121 
122 		if (info->security_state == NON_SECURE)
123 			change_security_state(info->security_state);
124 
125 		drop_el(&info->args, info->spsr, info->entrypoint);
126 	} else
127 		raise_el(&info->args);
128 }
129 
130 /* TODO: add a parameter for DAIF. not needed right now */
131 unsigned long make_spsr(unsigned long target_el,
132 			unsigned long target_sp,
133 			unsigned long target_rw)
134 {
135 	unsigned long spsr;
136 
137 	/* Disable all exceptions & setup the EL */
138 	spsr = (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
139 		<< PSR_DAIF_SHIFT;
140 	spsr |= PSR_MODE(target_rw, target_el, target_sp);
141 
142 	return spsr;
143 }
144 
145 /*******************************************************************************
146  * The next two functions are the weak definitions. Platform specific
147  * code can override them if it wishes to.
148  ******************************************************************************/
149 
150 /*******************************************************************************
151  * Function that takes a memory layout into which BL31 has been either top or
152  * bottom loaded. Using this information, it populates bl31_mem_layout to tell
153  * BL31 how much memory it has access to and how much is available for use. It
154  * does not need the address where BL31 has been loaded as BL31 will reclaim
155  * all the memory used by BL2.
156  * TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
157  * routine.
158  ******************************************************************************/
159 void init_bl31_mem_layout(const meminfo *bl2_mem_layout,
160 			  meminfo *bl31_mem_layout,
161 			  unsigned int load_type)
162 {
163 	if (load_type == BOT_LOAD) {
164 		/*
165 		 * ------------                             ^
166 		 * |   BL2    |                             |
167 		 * |----------|                 ^           |  BL2
168 		 * |          |                 | BL2 free  |  total
169 		 * |          |                 |   size    |  size
170 		 * |----------| BL2 free base   v           |
171 		 * |   BL31   |                             |
172 		 * ------------ BL2 total base              v
173 		 */
174 		unsigned long bl31_size;
175 
176 		bl31_mem_layout->free_base = bl2_mem_layout->free_base;
177 
178 		bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
179 		bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
180 	} else {
181 		/*
182 		 * ------------                             ^
183 		 * |   BL31   |                             |
184 		 * |----------|                 ^           |  BL2
185 		 * |          |                 | BL2 free  |  total
186 		 * |          |                 |   size    |  size
187 		 * |----------| BL2 free base   v           |
188 		 * |   BL2    |                             |
189 		 * ------------ BL2 total base              v
190 		 */
191 		unsigned long bl2_size;
192 
193 		bl31_mem_layout->free_base = bl2_mem_layout->total_base;
194 
195 		bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
196 		bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
197 	}
198 
199 	bl31_mem_layout->total_base = bl2_mem_layout->total_base;
200 	bl31_mem_layout->total_size = bl2_mem_layout->total_size;
201 	bl31_mem_layout->attr = load_type;
202 
203 	flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo));
204 	return;
205 }
206 
207 /*******************************************************************************
208  * Function that takes a memory layout into which BL2 has been either top or
209  * bottom loaded along with the address where BL2 has been loaded in it. Using
210  * this information, it populates bl2_mem_layout to tell BL2 how much memory
211  * it has access to and how much is available for use.
212  ******************************************************************************/
213 void init_bl2_mem_layout(meminfo *bl1_mem_layout,
214 			 meminfo *bl2_mem_layout,
215 			 unsigned int load_type,
216 			 unsigned long bl2_base)
217 {
218 	unsigned tmp;
219 
220 	if (load_type == BOT_LOAD) {
221 		bl2_mem_layout->total_base = bl2_base;
222 		tmp = bl1_mem_layout->free_base - bl2_base;
223 		bl2_mem_layout->total_size = bl1_mem_layout->free_size + tmp;
224 
225 	} else {
226 		bl2_mem_layout->total_base = bl1_mem_layout->free_base;
227 		tmp = bl1_mem_layout->total_base + bl1_mem_layout->total_size;
228 		bl2_mem_layout->total_size = tmp - bl1_mem_layout->free_base;
229 	}
230 
231 	bl2_mem_layout->free_base = bl1_mem_layout->free_base;
232 	bl2_mem_layout->free_size = bl1_mem_layout->free_size;
233 	bl2_mem_layout->attr = load_type;
234 
235 	flush_dcache_range((unsigned long) bl2_mem_layout, sizeof(meminfo));
236 	return;
237 }
238 
239 static void dump_load_info(unsigned long image_load_addr,
240 			   unsigned long image_size,
241 			   const meminfo *mem_layout)
242 {
243 #if DEBUG
244 	printf("Trying to load image at address 0x%lx, size = 0x%lx\r\n",
245 		image_load_addr, image_size);
246 	printf("Current memory layout:\r\n");
247 	printf("  total region = [0x%lx, 0x%lx]\r\n", mem_layout->total_base,
248 			mem_layout->total_base + mem_layout->total_size);
249 	printf("  free region = [0x%lx, 0x%lx]\r\n", mem_layout->free_base,
250 			mem_layout->free_base + mem_layout->free_size);
251 #endif
252 }
253 
254 /*******************************************************************************
255  * Generic function to load an image into the trusted RAM,
256  * given a name, extents of free memory & whether the image should be loaded at
257  * the bottom or top of the free memory. It updates the memory layout if the
258  * load is successful.
259  ******************************************************************************/
260 unsigned long load_image(meminfo *mem_layout,
261 			 const char *image_name,
262 			 unsigned int load_type,
263 			 unsigned long fixed_addr)
264 {
265 	io_dev_handle dev_handle;
266 	io_handle image_handle;
267 	void *image_spec;
268 	unsigned long temp_image_base = 0;
269 	unsigned long image_base = 0;
270 	long offset = 0;
271 	size_t image_size = 0;
272 	size_t bytes_read = 0;
273 	int io_result = IO_FAIL;
274 
275 	assert(mem_layout != NULL);
276 	assert(image_name != NULL);
277 
278 	/* Obtain a reference to the image by querying the platform layer */
279 	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
280 	if (io_result != IO_SUCCESS) {
281 		WARN("Failed to obtain reference to image '%s' (%i)\n",
282 			image_name, io_result);
283 		return 0;
284 	}
285 
286 	/* Attempt to access the image */
287 	io_result = io_open(dev_handle, image_spec, &image_handle);
288 	if (io_result != IO_SUCCESS) {
289 		WARN("Failed to access image '%s' (%i)\n",
290 			image_name, io_result);
291 		return 0;
292 	}
293 
294 	/* Find the size of the image */
295 	io_result = io_size(image_handle, &image_size);
296 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
297 		WARN("Failed to determine the size of the image '%s' file (%i)\n",
298 			image_name, io_result);
299 		goto fail;
300 	}
301 
302 	/* See if we have enough space */
303 	if (image_size > mem_layout->free_size) {
304 		WARN("Cannot load '%s' file: Not enough space.\n",
305 			image_name);
306 		dump_load_info(0, image_size, mem_layout);
307 		goto fail;
308 	}
309 
310 	switch (load_type) {
311 
312 	case TOP_LOAD:
313 
314 	  /* Load the image in the top of free memory */
315 	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
316 	  temp_image_base -= image_size;
317 
318 	  /* Page align base address and check whether the image still fits */
319 	  image_base = page_align(temp_image_base, DOWN);
320 	  assert(image_base <= temp_image_base);
321 
322 	  if (image_base < mem_layout->free_base) {
323 		WARN("Cannot load '%s' file: Not enough space.\n",
324 			image_name);
325 		dump_load_info(image_base, image_size, mem_layout);
326 		goto fail;
327 	  }
328 
329 	  /* Calculate the amount of extra memory used due to alignment */
330 	  offset = temp_image_base - image_base;
331 
332 	  break;
333 
334 	case BOT_LOAD:
335 
336 	  /* Load the BL2 image in the bottom of free memory */
337 	  temp_image_base = mem_layout->free_base;
338 	  image_base = page_align(temp_image_base, UP);
339 	  assert(image_base >= temp_image_base);
340 
341 	  /* Page align base address and check whether the image still fits */
342 	  if (image_base + image_size >
343 	      mem_layout->free_base + mem_layout->free_size) {
344 		  WARN("Cannot load '%s' file: Not enough space.\n",
345 			  image_name);
346 		  dump_load_info(image_base, image_size, mem_layout);
347 		  goto fail;
348 	  }
349 
350 	  /* Calculate the amount of extra memory used due to alignment */
351 	  offset = image_base - temp_image_base;
352 
353 	  break;
354 
355 	default:
356 	  assert(0);
357 
358 	}
359 
360 	/*
361 	 * Some images must be loaded at a fixed address, not a dynamic one.
362 	 *
363 	 * This has been implemented as a hack on top of the existing dynamic
364 	 * loading mechanism, for the time being.  If the 'fixed_addr' function
365 	 * argument is different from zero, then it will force the load address.
366 	 * So we still have this principle of top/bottom loading but the code
367 	 * determining the load address is bypassed and the load address is
368 	 * forced to the fixed one.
369 	 *
370 	 * This can result in quite a lot of wasted space because we still use
371 	 * 1 sole meminfo structure to represent the extents of free memory,
372 	 * where we should use some sort of linked list.
373 	 *
374 	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
375 	 *      layout should look as follows:
376 	 * ------------ 0x04040000
377 	 * |          |  <- Free space (1)
378 	 * |----------|
379 	 * |   BL2    |
380 	 * |----------| 0x04020000
381 	 * |          |  <- Free space (2)
382 	 * |----------|
383 	 * |   BL1    |
384 	 * ------------ 0x04000000
385 	 *
386 	 * But in the current hacky implementation, we'll need to specify
387 	 * whether BL2 is loaded at the top or bottom of the free memory.
388 	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
389 	 * will give the following view of the memory, hiding the chunk of
390 	 * free memory above BL2:
391 	 * ------------ 0x04040000
392 	 * |          |
393 	 * |          |
394 	 * |   BL2    |
395 	 * |----------| 0x04020000
396 	 * |          |  <- Free space (2)
397 	 * |----------|
398 	 * |   BL1    |
399 	 * ------------ 0x04000000
400 	 */
401 	if (fixed_addr != 0) {
402 		/* Load the image at the given address. */
403 		image_base = fixed_addr;
404 
405 		/* Check whether the image fits. */
406 		if ((image_base < mem_layout->free_base) ||
407 		    (image_base + image_size >
408 		       mem_layout->free_base + mem_layout->free_size)) {
409 			WARN("Cannot load '%s' file: Not enough space.\n",
410 				image_name);
411 			dump_load_info(image_base, image_size, mem_layout);
412 			goto fail;
413 		}
414 
415 		/* Check whether the fixed load address is page-aligned. */
416 		if (!is_page_aligned(image_base)) {
417 			WARN("Cannot load '%s' file at unaligned address 0x%lx\n",
418 				image_name, fixed_addr);
419 			goto fail;
420 		}
421 
422 		/*
423 		 * Calculate the amount of extra memory used due to fixed
424 		 * loading.
425 		 */
426 		if (load_type == TOP_LOAD) {
427 			unsigned long max_addr, space_used;
428 			/*
429 			 * ------------ max_addr
430 			 * | /wasted/ |                 | offset
431 			 * |..........|..............................
432 			 * |  image   |                 | image_flen
433 			 * |----------| fixed_addr
434 			 * |          |
435 			 * |          |
436 			 * ------------ total_base
437 			 */
438 			max_addr = mem_layout->total_base + mem_layout->total_size;
439 			/*
440 			 * Compute the amount of memory used by the image.
441 			 * Corresponds to all space above the image load
442 			 * address.
443 			 */
444 			space_used = max_addr - fixed_addr;
445 			/*
446 			 * Calculate the amount of wasted memory within the
447 			 * amount of memory used by the image.
448 			 */
449 			offset = space_used - image_size;
450 		} else /* BOT_LOAD */
451 			/*
452 			 * ------------
453 			 * |          |
454 			 * |          |
455 			 * |----------|
456 			 * |  image   |
457 			 * |..........| fixed_addr
458 			 * | /wasted/ |                 | offset
459 			 * ------------ total_base
460 			 */
461 			offset = fixed_addr - mem_layout->total_base;
462 	}
463 
464 	/* We have enough space so load the image now */
465 	/* TODO: Consider whether to try to recover/retry a partially successful read */
466 	io_result = io_read(image_handle, (void *)image_base, image_size, &bytes_read);
467 	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
468 		WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
469 		goto fail;
470 	}
471 
472 	/*
473 	 * File has been successfully loaded. Update the free memory
474 	 * data structure & flush the contents of the TZRAM so that
475 	 * the next EL can see it.
476 	 */
477 	/* Update the memory contents */
478 	flush_dcache_range(image_base, image_size);
479 
480 	mem_layout->free_size -= image_size + offset;
481 
482 	/* Update the base of free memory since its moved up */
483 	if (load_type == BOT_LOAD)
484 		mem_layout->free_base += offset + image_size;
485 
486 exit:
487 	io_result = io_close(image_handle);
488 	/* Ignore improbable/unrecoverable error in 'close' */
489 
490 	/* TODO: Consider maintaining open device connection from this bootloader stage */
491 	io_result = io_dev_close(dev_handle);
492 	/* Ignore improbable/unrecoverable error in 'dev_close' */
493 
494 	return image_base;
495 
496 fail:	image_base = 0;
497 	goto exit;
498 }
499 
500 /*******************************************************************************
501  * Run a loaded image from the given entry point. This could result in either
502  * dropping into a lower exception level or jumping to a higher exception level.
503  * The only way of doing the latter is through an SMC. In either case, setup the
504  * parameters for the EL change request correctly.
505  ******************************************************************************/
506 void __dead2 run_image(unsigned long entrypoint,
507 		       unsigned long spsr,
508 		       unsigned long target_security_state,
509 		       void *first_arg,
510 		       void *second_arg)
511 {
512 	el_change_info run_image_info;
513 	unsigned long current_el = read_current_el();
514 
515 	/* Tell next EL what we want done */
516 	run_image_info.args.arg0 = RUN_IMAGE;
517 	run_image_info.entrypoint = entrypoint;
518 	run_image_info.spsr = spsr;
519 	run_image_info.security_state = target_security_state;
520 
521 	/*
522 	 * If we are EL3 then only an eret can take us to the desired
523 	 * exception level. Else for the time being assume that we have
524 	 * to jump to a higher EL and issue an SMC. Contents of argY
525 	 * will go into the general purpose register xY e.g. arg0->x0
526 	 */
527 	if (GET_EL(current_el) == MODE_EL3) {
528 		run_image_info.args.arg1 = (unsigned long) first_arg;
529 		run_image_info.args.arg2 = (unsigned long) second_arg;
530 	} else {
531 		run_image_info.args.arg1 = entrypoint;
532 		run_image_info.args.arg2 = spsr;
533 		run_image_info.args.arg3 = (unsigned long) first_arg;
534 		run_image_info.args.arg4 = (unsigned long) second_arg;
535 	}
536 
537 	change_el(&run_image_info);
538 }
539