xref: /rk3399_ARM-atf/common/bl_common.c (revision 23ff9baa7e01eac3a451f2e8ed768c9b90d3567a)
1 /*
2  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <debug.h>
36 #include <io_storage.h>
37 #include <platform.h>
38 #include <stdio.h>
39 
40 unsigned long page_align(unsigned long value, unsigned dir)
41 {
42 	unsigned long page_size = 1 << FOUR_KB_SHIFT;
43 
44 	/* Round up the limit to the next page boundary */
45 	if (value & (page_size - 1)) {
46 		value &= ~(page_size - 1);
47 		if (dir == UP)
48 			value += page_size;
49 	}
50 
51 	return value;
52 }
53 
54 static inline unsigned int is_page_aligned (unsigned long addr) {
55 	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
56 
57 	return (addr & (page_size - 1)) == 0;
58 }
59 
60 void change_security_state(unsigned int target_security_state)
61 {
62 	unsigned long scr = read_scr();
63 
64 	if (target_security_state == SECURE)
65 		scr &= ~SCR_NS_BIT;
66 	else if (target_security_state == NON_SECURE)
67 		scr |= SCR_NS_BIT;
68 	else
69 		assert(0);
70 
71 	write_scr(scr);
72 }
73 
74 void __dead2 drop_el(aapcs64_params_t *args,
75 		     unsigned long spsr,
76 		     unsigned long entrypoint)
77 {
78 	write_spsr_el3(spsr);
79 	write_elr_el3(entrypoint);
80 	eret(args->arg0,
81 	     args->arg1,
82 	     args->arg2,
83 	     args->arg3,
84 	     args->arg4,
85 	     args->arg5,
86 	     args->arg6,
87 	     args->arg7);
88 }
89 
90 void __dead2 raise_el(aapcs64_params_t *args)
91 {
92 	smc(args->arg0,
93 	    args->arg1,
94 	    args->arg2,
95 	    args->arg3,
96 	    args->arg4,
97 	    args->arg5,
98 	    args->arg6,
99 	    args->arg7);
100 }
101 
102 /*
103  * TODO: If we are not EL3 then currently we only issue an SMC.
104  * Add support for dropping into EL0 etc. Consider adding support
105  * for switching from S-EL1 to S-EL0/1 etc.
106  */
107 void __dead2 change_el(el_change_info_t *info)
108 {
109 	if (IS_IN_EL3()) {
110 		/*
111 		 * We can go anywhere from EL3. So find where.
112 		 * TODO: Lots to do if we are going non-secure.
113 		 * Flip the NS bit. Restore NS registers etc.
114 		 * Just doing the bare minimal for now.
115 		 */
116 
117 		if (info->security_state == NON_SECURE)
118 			change_security_state(info->security_state);
119 
120 		drop_el(&info->args, info->spsr, info->entrypoint);
121 	} else
122 		raise_el(&info->args);
123 }
124 
125 
126 /*******************************************************************************
127  * The next two functions are the weak definitions. Platform specific
128  * code can override them if it wishes to.
129  ******************************************************************************/
130 
131 /*******************************************************************************
132  * Function that takes a memory layout into which BL31 has been either top or
133  * bottom loaded. Using this information, it populates bl31_mem_layout to tell
134  * BL31 how much memory it has access to and how much is available for use. It
135  * does not need the address where BL31 has been loaded as BL31 will reclaim
136  * all the memory used by BL2.
137  * TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
138  * routine.
139  ******************************************************************************/
140 void init_bl31_mem_layout(const meminfo_t *bl2_mem_layout,
141 			  meminfo_t *bl31_mem_layout,
142 			  unsigned int load_type)
143 {
144 	if (load_type == BOT_LOAD) {
145 		/*
146 		 * ------------                             ^
147 		 * |   BL2    |                             |
148 		 * |----------|                 ^           |  BL2
149 		 * |          |                 | BL2 free  |  total
150 		 * |          |                 |   size    |  size
151 		 * |----------| BL2 free base   v           |
152 		 * |   BL31   |                             |
153 		 * ------------ BL2 total base              v
154 		 */
155 		unsigned long bl31_size;
156 
157 		bl31_mem_layout->free_base = bl2_mem_layout->free_base;
158 
159 		bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
160 		bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
161 	} else {
162 		/*
163 		 * ------------                             ^
164 		 * |   BL31   |                             |
165 		 * |----------|                 ^           |  BL2
166 		 * |          |                 | BL2 free  |  total
167 		 * |          |                 |   size    |  size
168 		 * |----------| BL2 free base   v           |
169 		 * |   BL2    |                             |
170 		 * ------------ BL2 total base              v
171 		 */
172 		unsigned long bl2_size;
173 
174 		bl31_mem_layout->free_base = bl2_mem_layout->total_base;
175 
176 		bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
177 		bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
178 	}
179 
180 	bl31_mem_layout->total_base = bl2_mem_layout->total_base;
181 	bl31_mem_layout->total_size = bl2_mem_layout->total_size;
182 	bl31_mem_layout->attr = load_type;
183 
184 	flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo_t));
185 	return;
186 }
187 
188 /*******************************************************************************
189  * Function that takes a memory layout into which BL2 has been either top or
190  * bottom loaded along with the address where BL2 has been loaded in it. Using
191  * this information, it populates bl2_mem_layout to tell BL2 how much memory
192  * it has access to and how much is available for use.
193  ******************************************************************************/
194 void init_bl2_mem_layout(meminfo_t *bl1_mem_layout,
195 			 meminfo_t *bl2_mem_layout,
196 			 unsigned int load_type,
197 			 unsigned long bl2_base)
198 {
199 	unsigned tmp;
200 
201 	if (load_type == BOT_LOAD) {
202 		bl2_mem_layout->total_base = bl2_base;
203 		tmp = bl1_mem_layout->free_base - bl2_base;
204 		bl2_mem_layout->total_size = bl1_mem_layout->free_size + tmp;
205 
206 	} else {
207 		bl2_mem_layout->total_base = bl1_mem_layout->free_base;
208 		tmp = bl1_mem_layout->total_base + bl1_mem_layout->total_size;
209 		bl2_mem_layout->total_size = tmp - bl1_mem_layout->free_base;
210 	}
211 
212 	bl2_mem_layout->free_base = bl1_mem_layout->free_base;
213 	bl2_mem_layout->free_size = bl1_mem_layout->free_size;
214 	bl2_mem_layout->attr = load_type;
215 
216 	flush_dcache_range((unsigned long) bl2_mem_layout, sizeof(meminfo_t));
217 	return;
218 }
219 
220 static void dump_load_info(unsigned long image_load_addr,
221 			   unsigned long image_size,
222 			   const meminfo_t *mem_layout)
223 {
224 #if DEBUG
225 	printf("Trying to load image at address 0x%lx, size = 0x%lx\r\n",
226 		image_load_addr, image_size);
227 	printf("Current memory layout:\r\n");
228 	printf("  total region = [0x%lx, 0x%lx]\r\n", mem_layout->total_base,
229 			mem_layout->total_base + mem_layout->total_size);
230 	printf("  free region = [0x%lx, 0x%lx]\r\n", mem_layout->free_base,
231 			mem_layout->free_base + mem_layout->free_size);
232 #endif
233 }
234 
235 /* Generic function to return the size of an image */
236 unsigned long image_size(const char *image_name)
237 {
238 	uintptr_t dev_handle;
239 	uintptr_t image_handle;
240 	uintptr_t image_spec;
241 	size_t image_size = 0;
242 	int io_result = IO_FAIL;
243 
244 	assert(image_name != NULL);
245 
246 	/* Obtain a reference to the image by querying the platform layer */
247 	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
248 	if (io_result != IO_SUCCESS) {
249 		WARN("Failed to obtain reference to image '%s' (%i)\n",
250 			image_name, io_result);
251 		return 0;
252 	}
253 
254 	/* Attempt to access the image */
255 	io_result = io_open(dev_handle, image_spec, &image_handle);
256 	if (io_result != IO_SUCCESS) {
257 		WARN("Failed to access image '%s' (%i)\n",
258 			image_name, io_result);
259 		return 0;
260 	}
261 
262 	/* Find the size of the image */
263 	io_result = io_size(image_handle, &image_size);
264 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
265 		WARN("Failed to determine the size of the image '%s' file (%i)\n",
266 			image_name, io_result);
267 	}
268 	io_result = io_close(image_handle);
269 	/* Ignore improbable/unrecoverable error in 'close' */
270 
271 	/* TODO: Consider maintaining open device connection from this
272 	 * bootloader stage
273 	 */
274 	io_result = io_dev_close(dev_handle);
275 	/* Ignore improbable/unrecoverable error in 'dev_close' */
276 
277 	return image_size;
278 }
279 /*******************************************************************************
280  * Generic function to load an image into the trusted RAM,
281  * given a name, extents of free memory & whether the image should be loaded at
282  * the bottom or top of the free memory. It updates the memory layout if the
283  * load is successful.
284  ******************************************************************************/
285 unsigned long load_image(meminfo_t *mem_layout,
286 			 const char *image_name,
287 			 unsigned int load_type,
288 			 unsigned long fixed_addr)
289 {
290 	uintptr_t dev_handle;
291 	uintptr_t image_handle;
292 	uintptr_t image_spec;
293 	unsigned long temp_image_base = 0;
294 	unsigned long image_base = 0;
295 	long offset = 0;
296 	size_t image_size = 0;
297 	size_t bytes_read = 0;
298 	int io_result = IO_FAIL;
299 
300 	assert(mem_layout != NULL);
301 	assert(image_name != NULL);
302 
303 	/* Obtain a reference to the image by querying the platform layer */
304 	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
305 	if (io_result != IO_SUCCESS) {
306 		WARN("Failed to obtain reference to image '%s' (%i)\n",
307 			image_name, io_result);
308 		return 0;
309 	}
310 
311 	/* Attempt to access the image */
312 	io_result = io_open(dev_handle, image_spec, &image_handle);
313 	if (io_result != IO_SUCCESS) {
314 		WARN("Failed to access image '%s' (%i)\n",
315 			image_name, io_result);
316 		return 0;
317 	}
318 
319 	/* Find the size of the image */
320 	io_result = io_size(image_handle, &image_size);
321 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
322 		WARN("Failed to determine the size of the image '%s' file (%i)\n",
323 			image_name, io_result);
324 		goto fail;
325 	}
326 
327 	/* See if we have enough space */
328 	if (image_size > mem_layout->free_size) {
329 		WARN("Cannot load '%s' file: Not enough space.\n",
330 			image_name);
331 		dump_load_info(0, image_size, mem_layout);
332 		goto fail;
333 	}
334 
335 	switch (load_type) {
336 
337 	case TOP_LOAD:
338 
339 	  /* Load the image in the top of free memory */
340 	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
341 	  temp_image_base -= image_size;
342 
343 	  /* Page align base address and check whether the image still fits */
344 	  image_base = page_align(temp_image_base, DOWN);
345 	  assert(image_base <= temp_image_base);
346 
347 	  if (image_base < mem_layout->free_base) {
348 		WARN("Cannot load '%s' file: Not enough space.\n",
349 			image_name);
350 		dump_load_info(image_base, image_size, mem_layout);
351 		goto fail;
352 	  }
353 
354 	  /* Calculate the amount of extra memory used due to alignment */
355 	  offset = temp_image_base - image_base;
356 
357 	  break;
358 
359 	case BOT_LOAD:
360 
361 	  /* Load the BL2 image in the bottom of free memory */
362 	  temp_image_base = mem_layout->free_base;
363 	  image_base = page_align(temp_image_base, UP);
364 	  assert(image_base >= temp_image_base);
365 
366 	  /* Page align base address and check whether the image still fits */
367 	  if (image_base + image_size >
368 	      mem_layout->free_base + mem_layout->free_size) {
369 		  WARN("Cannot load '%s' file: Not enough space.\n",
370 			  image_name);
371 		  dump_load_info(image_base, image_size, mem_layout);
372 		  goto fail;
373 	  }
374 
375 	  /* Calculate the amount of extra memory used due to alignment */
376 	  offset = image_base - temp_image_base;
377 
378 	  break;
379 
380 	default:
381 	  assert(0);
382 
383 	}
384 
385 	/*
386 	 * Some images must be loaded at a fixed address, not a dynamic one.
387 	 *
388 	 * This has been implemented as a hack on top of the existing dynamic
389 	 * loading mechanism, for the time being.  If the 'fixed_addr' function
390 	 * argument is different from zero, then it will force the load address.
391 	 * So we still have this principle of top/bottom loading but the code
392 	 * determining the load address is bypassed and the load address is
393 	 * forced to the fixed one.
394 	 *
395 	 * This can result in quite a lot of wasted space because we still use
396 	 * 1 sole meminfo structure to represent the extents of free memory,
397 	 * where we should use some sort of linked list.
398 	 *
399 	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
400 	 *      layout should look as follows:
401 	 * ------------ 0x04040000
402 	 * |          |  <- Free space (1)
403 	 * |----------|
404 	 * |   BL2    |
405 	 * |----------| 0x04020000
406 	 * |          |  <- Free space (2)
407 	 * |----------|
408 	 * |   BL1    |
409 	 * ------------ 0x04000000
410 	 *
411 	 * But in the current hacky implementation, we'll need to specify
412 	 * whether BL2 is loaded at the top or bottom of the free memory.
413 	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
414 	 * will give the following view of the memory, hiding the chunk of
415 	 * free memory above BL2:
416 	 * ------------ 0x04040000
417 	 * |          |
418 	 * |          |
419 	 * |   BL2    |
420 	 * |----------| 0x04020000
421 	 * |          |  <- Free space (2)
422 	 * |----------|
423 	 * |   BL1    |
424 	 * ------------ 0x04000000
425 	 */
426 	if (fixed_addr != 0) {
427 		/* Load the image at the given address. */
428 		image_base = fixed_addr;
429 
430 		/* Check whether the image fits. */
431 		if ((image_base < mem_layout->free_base) ||
432 		    (image_base + image_size >
433 		       mem_layout->free_base + mem_layout->free_size)) {
434 			WARN("Cannot load '%s' file: Not enough space.\n",
435 				image_name);
436 			dump_load_info(image_base, image_size, mem_layout);
437 			goto fail;
438 		}
439 
440 		/* Check whether the fixed load address is page-aligned. */
441 		if (!is_page_aligned(image_base)) {
442 			WARN("Cannot load '%s' file at unaligned address 0x%lx\n",
443 				image_name, fixed_addr);
444 			goto fail;
445 		}
446 
447 		/*
448 		 * Calculate the amount of extra memory used due to fixed
449 		 * loading.
450 		 */
451 		if (load_type == TOP_LOAD) {
452 			unsigned long max_addr, space_used;
453 			/*
454 			 * ------------ max_addr
455 			 * | /wasted/ |                 | offset
456 			 * |..........|..............................
457 			 * |  image   |                 | image_flen
458 			 * |----------| fixed_addr
459 			 * |          |
460 			 * |          |
461 			 * ------------ total_base
462 			 */
463 			max_addr = mem_layout->total_base + mem_layout->total_size;
464 			/*
465 			 * Compute the amount of memory used by the image.
466 			 * Corresponds to all space above the image load
467 			 * address.
468 			 */
469 			space_used = max_addr - fixed_addr;
470 			/*
471 			 * Calculate the amount of wasted memory within the
472 			 * amount of memory used by the image.
473 			 */
474 			offset = space_used - image_size;
475 		} else /* BOT_LOAD */
476 			/*
477 			 * ------------
478 			 * |          |
479 			 * |          |
480 			 * |----------|
481 			 * |  image   |
482 			 * |..........| fixed_addr
483 			 * | /wasted/ |                 | offset
484 			 * ------------ total_base
485 			 */
486 			offset = fixed_addr - mem_layout->total_base;
487 	}
488 
489 	/* We have enough space so load the image now */
490 	/* TODO: Consider whether to try to recover/retry a partially successful read */
491 	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
492 	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
493 		WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
494 		goto fail;
495 	}
496 
497 	/*
498 	 * File has been successfully loaded. Update the free memory
499 	 * data structure & flush the contents of the TZRAM so that
500 	 * the next EL can see it.
501 	 */
502 	/* Update the memory contents */
503 	flush_dcache_range(image_base, image_size);
504 
505 	mem_layout->free_size -= image_size + offset;
506 
507 	/* Update the base of free memory since its moved up */
508 	if (load_type == BOT_LOAD)
509 		mem_layout->free_base += offset + image_size;
510 
511 exit:
512 	io_result = io_close(image_handle);
513 	/* Ignore improbable/unrecoverable error in 'close' */
514 
515 	/* TODO: Consider maintaining open device connection from this bootloader stage */
516 	io_result = io_dev_close(dev_handle);
517 	/* Ignore improbable/unrecoverable error in 'dev_close' */
518 
519 	return image_base;
520 
521 fail:	image_base = 0;
522 	goto exit;
523 }
524 
525 /*******************************************************************************
526  * Run a loaded image from the given entry point. This could result in either
527  * dropping into a lower exception level or jumping to a higher exception level.
528  * The only way of doing the latter is through an SMC. In either case, setup the
529  * parameters for the EL change request correctly.
530  ******************************************************************************/
531 void __dead2 run_image(unsigned long entrypoint,
532 		       unsigned long spsr,
533 		       unsigned long target_security_state,
534 		       void *first_arg,
535 		       void *second_arg)
536 {
537 	el_change_info_t run_image_info;
538 
539 	/* Tell next EL what we want done */
540 	run_image_info.args.arg0 = RUN_IMAGE;
541 	run_image_info.entrypoint = entrypoint;
542 	run_image_info.spsr = spsr;
543 	run_image_info.security_state = target_security_state;
544 
545 	/*
546 	 * If we are EL3 then only an eret can take us to the desired
547 	 * exception level. Else for the time being assume that we have
548 	 * to jump to a higher EL and issue an SMC. Contents of argY
549 	 * will go into the general purpose register xY e.g. arg0->x0
550 	 */
551 	if (IS_IN_EL3()) {
552 		run_image_info.args.arg1 = (unsigned long) first_arg;
553 		run_image_info.args.arg2 = (unsigned long) second_arg;
554 	} else {
555 		run_image_info.args.arg1 = entrypoint;
556 		run_image_info.args.arg2 = spsr;
557 		run_image_info.args.arg3 = (unsigned long) first_arg;
558 		run_image_info.args.arg4 = (unsigned long) second_arg;
559 	}
560 
561 	change_el(&run_image_info);
562 }
563