xref: /rk3399_ARM-atf/common/bl_common.c (revision 08c28d5385f8fae3d5c61475a109b86ef11770d0)
14f6ad66aSAchin Gupta /*
2e83b0cadSDan Handley  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta  *
44f6ad66aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
54f6ad66aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
64f6ad66aSAchin Gupta  *
74f6ad66aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
84f6ad66aSAchin Gupta  * list of conditions and the following disclaimer.
94f6ad66aSAchin Gupta  *
104f6ad66aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
114f6ad66aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
124f6ad66aSAchin Gupta  * and/or other materials provided with the distribution.
134f6ad66aSAchin Gupta  *
144f6ad66aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
154f6ad66aSAchin Gupta  * to endorse or promote products derived from this software without specific
164f6ad66aSAchin Gupta  * prior written permission.
174f6ad66aSAchin Gupta  *
184f6ad66aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
194f6ad66aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
204f6ad66aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
214f6ad66aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
224f6ad66aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
234f6ad66aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
244f6ad66aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
254f6ad66aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
264f6ad66aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
274f6ad66aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
284f6ad66aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
294f6ad66aSAchin Gupta  */
304f6ad66aSAchin Gupta 
314f6ad66aSAchin Gupta #include <stdio.h>
324f6ad66aSAchin Gupta #include <string.h>
334f6ad66aSAchin Gupta #include <errno.h>
344f6ad66aSAchin Gupta #include <assert.h>
354f6ad66aSAchin Gupta #include <arch_helpers.h>
364f6ad66aSAchin Gupta #include <console.h>
374f6ad66aSAchin Gupta #include <platform.h>
384f6ad66aSAchin Gupta #include <semihosting.h>
394f6ad66aSAchin Gupta #include <bl_common.h>
409d72b4eaSJames Morrissey #include "io_storage.h"
419d72b4eaSJames Morrissey #include "debug.h"
424f6ad66aSAchin Gupta 
434f6ad66aSAchin Gupta unsigned long page_align(unsigned long value, unsigned dir)
444f6ad66aSAchin Gupta {
454f6ad66aSAchin Gupta 	unsigned long page_size = 1 << FOUR_KB_SHIFT;
464f6ad66aSAchin Gupta 
474f6ad66aSAchin Gupta 	/* Round up the limit to the next page boundary */
484f6ad66aSAchin Gupta 	if (value & (page_size - 1)) {
494f6ad66aSAchin Gupta 		value &= ~(page_size - 1);
504f6ad66aSAchin Gupta 		if (dir == UP)
514f6ad66aSAchin Gupta 			value += page_size;
524f6ad66aSAchin Gupta 	}
534f6ad66aSAchin Gupta 
544f6ad66aSAchin Gupta 	return value;
554f6ad66aSAchin Gupta }
564f6ad66aSAchin Gupta 
574f6ad66aSAchin Gupta static inline unsigned int is_page_aligned (unsigned long addr) {
584f6ad66aSAchin Gupta 	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
594f6ad66aSAchin Gupta 
604f6ad66aSAchin Gupta 	return (addr & (page_size - 1)) == 0;
614f6ad66aSAchin Gupta }
624f6ad66aSAchin Gupta 
634f6ad66aSAchin Gupta void change_security_state(unsigned int target_security_state)
644f6ad66aSAchin Gupta {
654f6ad66aSAchin Gupta 	unsigned long scr = read_scr();
664f6ad66aSAchin Gupta 
674f6ad66aSAchin Gupta 	if (target_security_state == SECURE)
684f6ad66aSAchin Gupta 		scr &= ~SCR_NS_BIT;
694f6ad66aSAchin Gupta 	else if (target_security_state == NON_SECURE)
704f6ad66aSAchin Gupta 		scr |= SCR_NS_BIT;
714f6ad66aSAchin Gupta 	else
724f6ad66aSAchin Gupta 		assert(0);
734f6ad66aSAchin Gupta 
744f6ad66aSAchin Gupta 	write_scr(scr);
754f6ad66aSAchin Gupta }
764f6ad66aSAchin Gupta 
77e4d084eaSAchin Gupta void __dead2 drop_el(aapcs64_params *args,
784f6ad66aSAchin Gupta 		     unsigned long spsr,
794f6ad66aSAchin Gupta 		     unsigned long entrypoint)
804f6ad66aSAchin Gupta {
814f6ad66aSAchin Gupta 	write_spsr(spsr);
824f6ad66aSAchin Gupta 	write_elr(entrypoint);
834f6ad66aSAchin Gupta 	eret(args->arg0,
844f6ad66aSAchin Gupta 	     args->arg1,
854f6ad66aSAchin Gupta 	     args->arg2,
864f6ad66aSAchin Gupta 	     args->arg3,
874f6ad66aSAchin Gupta 	     args->arg4,
884f6ad66aSAchin Gupta 	     args->arg5,
894f6ad66aSAchin Gupta 	     args->arg6,
904f6ad66aSAchin Gupta 	     args->arg7);
914f6ad66aSAchin Gupta }
924f6ad66aSAchin Gupta 
93e4d084eaSAchin Gupta void __dead2 raise_el(aapcs64_params *args)
944f6ad66aSAchin Gupta {
95e4d084eaSAchin Gupta 	smc(args->arg0,
964f6ad66aSAchin Gupta 	    args->arg1,
974f6ad66aSAchin Gupta 	    args->arg2,
984f6ad66aSAchin Gupta 	    args->arg3,
994f6ad66aSAchin Gupta 	    args->arg4,
1004f6ad66aSAchin Gupta 	    args->arg5,
1014f6ad66aSAchin Gupta 	    args->arg6,
1024f6ad66aSAchin Gupta 	    args->arg7);
1034f6ad66aSAchin Gupta }
1044f6ad66aSAchin Gupta 
1054f6ad66aSAchin Gupta /*
1064f6ad66aSAchin Gupta  * TODO: If we are not EL3 then currently we only issue an SMC.
1074f6ad66aSAchin Gupta  * Add support for dropping into EL0 etc. Consider adding support
1084f6ad66aSAchin Gupta  * for switching from S-EL1 to S-EL0/1 etc.
1094f6ad66aSAchin Gupta  */
110e4d084eaSAchin Gupta void __dead2 change_el(el_change_info *info)
1114f6ad66aSAchin Gupta {
1124f6ad66aSAchin Gupta 	unsigned long current_el = read_current_el();
1134f6ad66aSAchin Gupta 
1144f6ad66aSAchin Gupta 	if (GET_EL(current_el) == MODE_EL3) {
1154f6ad66aSAchin Gupta 		/*
1164f6ad66aSAchin Gupta 		 * We can go anywhere from EL3. So find where.
1174f6ad66aSAchin Gupta 		 * TODO: Lots to do if we are going non-secure.
1184f6ad66aSAchin Gupta 		 * Flip the NS bit. Restore NS registers etc.
1194f6ad66aSAchin Gupta 		 * Just doing the bare minimal for now.
1204f6ad66aSAchin Gupta 		 */
1214f6ad66aSAchin Gupta 
1224f6ad66aSAchin Gupta 		if (info->security_state == NON_SECURE)
1234f6ad66aSAchin Gupta 			change_security_state(info->security_state);
1244f6ad66aSAchin Gupta 
125e4d084eaSAchin Gupta 		drop_el(&info->args, info->spsr, info->entrypoint);
1264f6ad66aSAchin Gupta 	} else
127e4d084eaSAchin Gupta 		raise_el(&info->args);
1284f6ad66aSAchin Gupta }
1294f6ad66aSAchin Gupta 
1304f6ad66aSAchin Gupta /* TODO: add a parameter for DAIF. not needed right now */
1314f6ad66aSAchin Gupta unsigned long make_spsr(unsigned long target_el,
1324f6ad66aSAchin Gupta 			unsigned long target_sp,
1334f6ad66aSAchin Gupta 			unsigned long target_rw)
1344f6ad66aSAchin Gupta {
1354f6ad66aSAchin Gupta 	unsigned long spsr;
1364f6ad66aSAchin Gupta 
1374f6ad66aSAchin Gupta 	/* Disable all exceptions & setup the EL */
1384f6ad66aSAchin Gupta 	spsr = (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
1394f6ad66aSAchin Gupta 		<< PSR_DAIF_SHIFT;
1404f6ad66aSAchin Gupta 	spsr |= PSR_MODE(target_rw, target_el, target_sp);
1414f6ad66aSAchin Gupta 
1424f6ad66aSAchin Gupta 	return spsr;
1434f6ad66aSAchin Gupta }
1444f6ad66aSAchin Gupta 
1454f6ad66aSAchin Gupta /*******************************************************************************
1464f6ad66aSAchin Gupta  * The next two functions are the weak definitions. Platform specific
1474f6ad66aSAchin Gupta  * code can override them if it wishes to.
1484f6ad66aSAchin Gupta  ******************************************************************************/
1494f6ad66aSAchin Gupta 
1504f6ad66aSAchin Gupta /*******************************************************************************
1514f6ad66aSAchin Gupta  * Function that takes a memory layout into which BL31 has been either top or
1524f6ad66aSAchin Gupta  * bottom loaded. Using this information, it populates bl31_mem_layout to tell
1534f6ad66aSAchin Gupta  * BL31 how much memory it has access to and how much is available for use. It
1544f6ad66aSAchin Gupta  * does not need the address where BL31 has been loaded as BL31 will reclaim
1554f6ad66aSAchin Gupta  * all the memory used by BL2.
1564f6ad66aSAchin Gupta  * TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
1574f6ad66aSAchin Gupta  * routine.
1584f6ad66aSAchin Gupta  ******************************************************************************/
1594f6ad66aSAchin Gupta void init_bl31_mem_layout(const meminfo *bl2_mem_layout,
1604f6ad66aSAchin Gupta 			  meminfo *bl31_mem_layout,
1614f6ad66aSAchin Gupta 			  unsigned int load_type)
1624f6ad66aSAchin Gupta {
1634f6ad66aSAchin Gupta 	if (load_type == BOT_LOAD) {
1644f6ad66aSAchin Gupta 		/*
1654f6ad66aSAchin Gupta 		 * ------------                             ^
1664f6ad66aSAchin Gupta 		 * |   BL2    |                             |
1674f6ad66aSAchin Gupta 		 * |----------|                 ^           |  BL2
1684f6ad66aSAchin Gupta 		 * |          |                 | BL2 free  |  total
1694f6ad66aSAchin Gupta 		 * |          |                 |   size    |  size
1704f6ad66aSAchin Gupta 		 * |----------| BL2 free base   v           |
1714f6ad66aSAchin Gupta 		 * |   BL31   |                             |
1724f6ad66aSAchin Gupta 		 * ------------ BL2 total base              v
1734f6ad66aSAchin Gupta 		 */
1744f6ad66aSAchin Gupta 		unsigned long bl31_size;
1754f6ad66aSAchin Gupta 
1764f6ad66aSAchin Gupta 		bl31_mem_layout->free_base = bl2_mem_layout->free_base;
1774f6ad66aSAchin Gupta 
1784f6ad66aSAchin Gupta 		bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
1794f6ad66aSAchin Gupta 		bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
1804f6ad66aSAchin Gupta 	} else {
1814f6ad66aSAchin Gupta 		/*
1824f6ad66aSAchin Gupta 		 * ------------                             ^
1834f6ad66aSAchin Gupta 		 * |   BL31   |                             |
1844f6ad66aSAchin Gupta 		 * |----------|                 ^           |  BL2
1854f6ad66aSAchin Gupta 		 * |          |                 | BL2 free  |  total
1864f6ad66aSAchin Gupta 		 * |          |                 |   size    |  size
1874f6ad66aSAchin Gupta 		 * |----------| BL2 free base   v           |
1884f6ad66aSAchin Gupta 		 * |   BL2    |                             |
1894f6ad66aSAchin Gupta 		 * ------------ BL2 total base              v
1904f6ad66aSAchin Gupta 		 */
1914f6ad66aSAchin Gupta 		unsigned long bl2_size;
1924f6ad66aSAchin Gupta 
1934f6ad66aSAchin Gupta 		bl31_mem_layout->free_base = bl2_mem_layout->total_base;
1944f6ad66aSAchin Gupta 
1954f6ad66aSAchin Gupta 		bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
1964f6ad66aSAchin Gupta 		bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
1974f6ad66aSAchin Gupta 	}
1984f6ad66aSAchin Gupta 
1994f6ad66aSAchin Gupta 	bl31_mem_layout->total_base = bl2_mem_layout->total_base;
2004f6ad66aSAchin Gupta 	bl31_mem_layout->total_size = bl2_mem_layout->total_size;
2014f6ad66aSAchin Gupta 	bl31_mem_layout->attr = load_type;
2024f6ad66aSAchin Gupta 
2034f6ad66aSAchin Gupta 	flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo));
2044f6ad66aSAchin Gupta 	return;
2054f6ad66aSAchin Gupta }
2064f6ad66aSAchin Gupta 
2074f6ad66aSAchin Gupta /*******************************************************************************
2084f6ad66aSAchin Gupta  * Function that takes a memory layout into which BL2 has been either top or
2094f6ad66aSAchin Gupta  * bottom loaded along with the address where BL2 has been loaded in it. Using
2104f6ad66aSAchin Gupta  * this information, it populates bl2_mem_layout to tell BL2 how much memory
2114f6ad66aSAchin Gupta  * it has access to and how much is available for use.
2124f6ad66aSAchin Gupta  ******************************************************************************/
2134f6ad66aSAchin Gupta void init_bl2_mem_layout(meminfo *bl1_mem_layout,
2144f6ad66aSAchin Gupta 			 meminfo *bl2_mem_layout,
2154f6ad66aSAchin Gupta 			 unsigned int load_type,
2164f6ad66aSAchin Gupta 			 unsigned long bl2_base)
2174f6ad66aSAchin Gupta {
2184f6ad66aSAchin Gupta 	unsigned tmp;
2194f6ad66aSAchin Gupta 
2204f6ad66aSAchin Gupta 	if (load_type == BOT_LOAD) {
2214f6ad66aSAchin Gupta 		bl2_mem_layout->total_base = bl2_base;
2224f6ad66aSAchin Gupta 		tmp = bl1_mem_layout->free_base - bl2_base;
2234f6ad66aSAchin Gupta 		bl2_mem_layout->total_size = bl1_mem_layout->free_size + tmp;
2244f6ad66aSAchin Gupta 
2254f6ad66aSAchin Gupta 	} else {
2264f6ad66aSAchin Gupta 		bl2_mem_layout->total_base = bl1_mem_layout->free_base;
2274f6ad66aSAchin Gupta 		tmp = bl1_mem_layout->total_base + bl1_mem_layout->total_size;
2284f6ad66aSAchin Gupta 		bl2_mem_layout->total_size = tmp - bl1_mem_layout->free_base;
2294f6ad66aSAchin Gupta 	}
2304f6ad66aSAchin Gupta 
2314f6ad66aSAchin Gupta 	bl2_mem_layout->free_base = bl1_mem_layout->free_base;
2324f6ad66aSAchin Gupta 	bl2_mem_layout->free_size = bl1_mem_layout->free_size;
2334f6ad66aSAchin Gupta 	bl2_mem_layout->attr = load_type;
2344f6ad66aSAchin Gupta 
2354f6ad66aSAchin Gupta 	flush_dcache_range((unsigned long) bl2_mem_layout, sizeof(meminfo));
2364f6ad66aSAchin Gupta 	return;
2374f6ad66aSAchin Gupta }
2384f6ad66aSAchin Gupta 
2394f6ad66aSAchin Gupta static void dump_load_info(unsigned long image_load_addr,
2404f6ad66aSAchin Gupta 			   unsigned long image_size,
2414f6ad66aSAchin Gupta 			   const meminfo *mem_layout)
2424f6ad66aSAchin Gupta {
2434f6ad66aSAchin Gupta #if DEBUG
2444f6ad66aSAchin Gupta 	printf("Trying to load image at address 0x%lx, size = 0x%lx\r\n",
2454f6ad66aSAchin Gupta 		image_load_addr, image_size);
2464f6ad66aSAchin Gupta 	printf("Current memory layout:\r\n");
2474f6ad66aSAchin Gupta 	printf("  total region = [0x%lx, 0x%lx]\r\n", mem_layout->total_base,
2484f6ad66aSAchin Gupta 			mem_layout->total_base + mem_layout->total_size);
2494f6ad66aSAchin Gupta 	printf("  free region = [0x%lx, 0x%lx]\r\n", mem_layout->free_base,
2504f6ad66aSAchin Gupta 			mem_layout->free_base + mem_layout->free_size);
2514f6ad66aSAchin Gupta #endif
2524f6ad66aSAchin Gupta }
2534f6ad66aSAchin Gupta 
2544f6ad66aSAchin Gupta /*******************************************************************************
2559d72b4eaSJames Morrissey  * Generic function to load an image into the trusted RAM,
2564f6ad66aSAchin Gupta  * given a name, extents of free memory & whether the image should be loaded at
2574f6ad66aSAchin Gupta  * the bottom or top of the free memory. It updates the memory layout if the
2584f6ad66aSAchin Gupta  * load is successful.
2594f6ad66aSAchin Gupta  ******************************************************************************/
2604f6ad66aSAchin Gupta unsigned long load_image(meminfo *mem_layout,
2614f6ad66aSAchin Gupta 			 const char *image_name,
2624f6ad66aSAchin Gupta 			 unsigned int load_type,
2634f6ad66aSAchin Gupta 			 unsigned long fixed_addr)
2644f6ad66aSAchin Gupta {
2659d72b4eaSJames Morrissey 	io_dev_handle dev_handle;
2669d72b4eaSJames Morrissey 	io_handle image_handle;
2679d72b4eaSJames Morrissey 	void *image_spec;
26840a6f647SJames Morrissey 	unsigned long temp_image_base = 0;
26940a6f647SJames Morrissey 	unsigned long image_base = 0;
27040a6f647SJames Morrissey 	long offset = 0;
2719d72b4eaSJames Morrissey 	size_t image_size = 0;
2729d72b4eaSJames Morrissey 	size_t bytes_read = 0;
2739d72b4eaSJames Morrissey 	int io_result = IO_FAIL;
2744f6ad66aSAchin Gupta 
2759d72b4eaSJames Morrissey 	assert(mem_layout != NULL);
2769d72b4eaSJames Morrissey 	assert(image_name != NULL);
2779d72b4eaSJames Morrissey 
2789d72b4eaSJames Morrissey 	/* Obtain a reference to the image by querying the platform layer */
2799d72b4eaSJames Morrissey 	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
2809d72b4eaSJames Morrissey 	if (io_result != IO_SUCCESS) {
281*08c28d53SJeenu Viswambharan 		WARN("Failed to obtain reference to image '%s' (%i)\n",
2829d72b4eaSJames Morrissey 			image_name, io_result);
2834f6ad66aSAchin Gupta 		return 0;
2844f6ad66aSAchin Gupta 	}
2854f6ad66aSAchin Gupta 
2869d72b4eaSJames Morrissey 	/* Attempt to access the image */
2879d72b4eaSJames Morrissey 	io_result = io_open(dev_handle, image_spec, &image_handle);
2889d72b4eaSJames Morrissey 	if (io_result != IO_SUCCESS) {
289*08c28d53SJeenu Viswambharan 		WARN("Failed to access image '%s' (%i)\n",
2909d72b4eaSJames Morrissey 			image_name, io_result);
2914f6ad66aSAchin Gupta 		return 0;
2924f6ad66aSAchin Gupta 	}
2934f6ad66aSAchin Gupta 
2949d72b4eaSJames Morrissey 	/* Find the size of the image */
2959d72b4eaSJames Morrissey 	io_result = io_size(image_handle, &image_size);
2969d72b4eaSJames Morrissey 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
297*08c28d53SJeenu Viswambharan 		WARN("Failed to determine the size of the image '%s' file (%i)\n",
2989d72b4eaSJames Morrissey 			image_name, io_result);
2999d72b4eaSJames Morrissey 		goto fail;
3009d72b4eaSJames Morrissey 	}
3019d72b4eaSJames Morrissey 
3029d72b4eaSJames Morrissey 	/* See if we have enough space */
3039d72b4eaSJames Morrissey 	if (image_size > mem_layout->free_size) {
304*08c28d53SJeenu Viswambharan 		WARN("Cannot load '%s' file: Not enough space.\n",
3059d72b4eaSJames Morrissey 			image_name);
3069d72b4eaSJames Morrissey 		dump_load_info(0, image_size, mem_layout);
3079d72b4eaSJames Morrissey 		goto fail;
3089d72b4eaSJames Morrissey 	}
3099d72b4eaSJames Morrissey 
3104f6ad66aSAchin Gupta 	switch (load_type) {
3114f6ad66aSAchin Gupta 
3124f6ad66aSAchin Gupta 	case TOP_LOAD:
3134f6ad66aSAchin Gupta 
3144f6ad66aSAchin Gupta 	  /* Load the image in the top of free memory */
3154f6ad66aSAchin Gupta 	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
3169d72b4eaSJames Morrissey 	  temp_image_base -= image_size;
3174f6ad66aSAchin Gupta 
3184f6ad66aSAchin Gupta 	  /* Page align base address and check whether the image still fits */
3194f6ad66aSAchin Gupta 	  image_base = page_align(temp_image_base, DOWN);
3204f6ad66aSAchin Gupta 	  assert(image_base <= temp_image_base);
3214f6ad66aSAchin Gupta 
3224f6ad66aSAchin Gupta 	  if (image_base < mem_layout->free_base) {
323*08c28d53SJeenu Viswambharan 		WARN("Cannot load '%s' file: Not enough space.\n",
3244f6ad66aSAchin Gupta 			image_name);
3259d72b4eaSJames Morrissey 		dump_load_info(image_base, image_size, mem_layout);
3269d72b4eaSJames Morrissey 		goto fail;
3274f6ad66aSAchin Gupta 	  }
3284f6ad66aSAchin Gupta 
3294f6ad66aSAchin Gupta 	  /* Calculate the amount of extra memory used due to alignment */
3304f6ad66aSAchin Gupta 	  offset = temp_image_base - image_base;
3314f6ad66aSAchin Gupta 
3324f6ad66aSAchin Gupta 	  break;
3334f6ad66aSAchin Gupta 
3344f6ad66aSAchin Gupta 	case BOT_LOAD:
3354f6ad66aSAchin Gupta 
3364f6ad66aSAchin Gupta 	  /* Load the BL2 image in the bottom of free memory */
3374f6ad66aSAchin Gupta 	  temp_image_base = mem_layout->free_base;
3384f6ad66aSAchin Gupta 	  image_base = page_align(temp_image_base, UP);
3394f6ad66aSAchin Gupta 	  assert(image_base >= temp_image_base);
3404f6ad66aSAchin Gupta 
3414f6ad66aSAchin Gupta 	  /* Page align base address and check whether the image still fits */
3429d72b4eaSJames Morrissey 	  if (image_base + image_size >
3434f6ad66aSAchin Gupta 	      mem_layout->free_base + mem_layout->free_size) {
344*08c28d53SJeenu Viswambharan 		  WARN("Cannot load '%s' file: Not enough space.\n",
3454f6ad66aSAchin Gupta 			  image_name);
3469d72b4eaSJames Morrissey 		  dump_load_info(image_base, image_size, mem_layout);
3479d72b4eaSJames Morrissey 		  goto fail;
3484f6ad66aSAchin Gupta 	  }
3494f6ad66aSAchin Gupta 
3504f6ad66aSAchin Gupta 	  /* Calculate the amount of extra memory used due to alignment */
3514f6ad66aSAchin Gupta 	  offset = image_base - temp_image_base;
3524f6ad66aSAchin Gupta 
3534f6ad66aSAchin Gupta 	  break;
3544f6ad66aSAchin Gupta 
3554f6ad66aSAchin Gupta 	default:
3564f6ad66aSAchin Gupta 	  assert(0);
3574f6ad66aSAchin Gupta 
3584f6ad66aSAchin Gupta 	}
3594f6ad66aSAchin Gupta 
3604f6ad66aSAchin Gupta 	/*
3614f6ad66aSAchin Gupta 	 * Some images must be loaded at a fixed address, not a dynamic one.
3624f6ad66aSAchin Gupta 	 *
3634f6ad66aSAchin Gupta 	 * This has been implemented as a hack on top of the existing dynamic
3644f6ad66aSAchin Gupta 	 * loading mechanism, for the time being.  If the 'fixed_addr' function
3654f6ad66aSAchin Gupta 	 * argument is different from zero, then it will force the load address.
3664f6ad66aSAchin Gupta 	 * So we still have this principle of top/bottom loading but the code
3674f6ad66aSAchin Gupta 	 * determining the load address is bypassed and the load address is
3684f6ad66aSAchin Gupta 	 * forced to the fixed one.
3694f6ad66aSAchin Gupta 	 *
3704f6ad66aSAchin Gupta 	 * This can result in quite a lot of wasted space because we still use
3714f6ad66aSAchin Gupta 	 * 1 sole meminfo structure to represent the extents of free memory,
3724f6ad66aSAchin Gupta 	 * where we should use some sort of linked list.
3734f6ad66aSAchin Gupta 	 *
3744f6ad66aSAchin Gupta 	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
3754f6ad66aSAchin Gupta 	 *      layout should look as follows:
3764f6ad66aSAchin Gupta 	 * ------------ 0x04040000
3774f6ad66aSAchin Gupta 	 * |          |  <- Free space (1)
3784f6ad66aSAchin Gupta 	 * |----------|
3794f6ad66aSAchin Gupta 	 * |   BL2    |
3804f6ad66aSAchin Gupta 	 * |----------| 0x04020000
3814f6ad66aSAchin Gupta 	 * |          |  <- Free space (2)
3824f6ad66aSAchin Gupta 	 * |----------|
3834f6ad66aSAchin Gupta 	 * |   BL1    |
3844f6ad66aSAchin Gupta 	 * ------------ 0x04000000
3854f6ad66aSAchin Gupta 	 *
3864f6ad66aSAchin Gupta 	 * But in the current hacky implementation, we'll need to specify
3874f6ad66aSAchin Gupta 	 * whether BL2 is loaded at the top or bottom of the free memory.
3884f6ad66aSAchin Gupta 	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
3894f6ad66aSAchin Gupta 	 * will give the following view of the memory, hiding the chunk of
3904f6ad66aSAchin Gupta 	 * free memory above BL2:
3914f6ad66aSAchin Gupta 	 * ------------ 0x04040000
3924f6ad66aSAchin Gupta 	 * |          |
3934f6ad66aSAchin Gupta 	 * |          |
3944f6ad66aSAchin Gupta 	 * |   BL2    |
3954f6ad66aSAchin Gupta 	 * |----------| 0x04020000
3964f6ad66aSAchin Gupta 	 * |          |  <- Free space (2)
3974f6ad66aSAchin Gupta 	 * |----------|
3984f6ad66aSAchin Gupta 	 * |   BL1    |
3994f6ad66aSAchin Gupta 	 * ------------ 0x04000000
4004f6ad66aSAchin Gupta 	 */
4014f6ad66aSAchin Gupta 	if (fixed_addr != 0) {
4024f6ad66aSAchin Gupta 		/* Load the image at the given address. */
4034f6ad66aSAchin Gupta 		image_base = fixed_addr;
4044f6ad66aSAchin Gupta 
4054f6ad66aSAchin Gupta 		/* Check whether the image fits. */
4064f6ad66aSAchin Gupta 		if ((image_base < mem_layout->free_base) ||
4079d72b4eaSJames Morrissey 		    (image_base + image_size >
4084f6ad66aSAchin Gupta 		       mem_layout->free_base + mem_layout->free_size)) {
409*08c28d53SJeenu Viswambharan 			WARN("Cannot load '%s' file: Not enough space.\n",
4104f6ad66aSAchin Gupta 				image_name);
4119d72b4eaSJames Morrissey 			dump_load_info(image_base, image_size, mem_layout);
4129d72b4eaSJames Morrissey 			goto fail;
4134f6ad66aSAchin Gupta 		}
4144f6ad66aSAchin Gupta 
4154f6ad66aSAchin Gupta 		/* Check whether the fixed load address is page-aligned. */
4164f6ad66aSAchin Gupta 		if (!is_page_aligned(image_base)) {
417*08c28d53SJeenu Viswambharan 			WARN("Cannot load '%s' file at unaligned address 0x%lx\n",
4184f6ad66aSAchin Gupta 				image_name, fixed_addr);
4199d72b4eaSJames Morrissey 			goto fail;
4204f6ad66aSAchin Gupta 		}
4214f6ad66aSAchin Gupta 
4224f6ad66aSAchin Gupta 		/*
4234f6ad66aSAchin Gupta 		 * Calculate the amount of extra memory used due to fixed
4244f6ad66aSAchin Gupta 		 * loading.
4254f6ad66aSAchin Gupta 		 */
4264f6ad66aSAchin Gupta 		if (load_type == TOP_LOAD) {
4274f6ad66aSAchin Gupta 			unsigned long max_addr, space_used;
4284f6ad66aSAchin Gupta 			/*
4294f6ad66aSAchin Gupta 			 * ------------ max_addr
4304f6ad66aSAchin Gupta 			 * | /wasted/ |                 | offset
4314f6ad66aSAchin Gupta 			 * |..........|..............................
4324f6ad66aSAchin Gupta 			 * |  image   |                 | image_flen
4334f6ad66aSAchin Gupta 			 * |----------| fixed_addr
4344f6ad66aSAchin Gupta 			 * |          |
4354f6ad66aSAchin Gupta 			 * |          |
4364f6ad66aSAchin Gupta 			 * ------------ total_base
4374f6ad66aSAchin Gupta 			 */
4384f6ad66aSAchin Gupta 			max_addr = mem_layout->total_base + mem_layout->total_size;
4394f6ad66aSAchin Gupta 			/*
4404f6ad66aSAchin Gupta 			 * Compute the amount of memory used by the image.
4414f6ad66aSAchin Gupta 			 * Corresponds to all space above the image load
4424f6ad66aSAchin Gupta 			 * address.
4434f6ad66aSAchin Gupta 			 */
4444f6ad66aSAchin Gupta 			space_used = max_addr - fixed_addr;
4454f6ad66aSAchin Gupta 			/*
4464f6ad66aSAchin Gupta 			 * Calculate the amount of wasted memory within the
4474f6ad66aSAchin Gupta 			 * amount of memory used by the image.
4484f6ad66aSAchin Gupta 			 */
4499d72b4eaSJames Morrissey 			offset = space_used - image_size;
4504f6ad66aSAchin Gupta 		} else /* BOT_LOAD */
4514f6ad66aSAchin Gupta 			/*
4524f6ad66aSAchin Gupta 			 * ------------
4534f6ad66aSAchin Gupta 			 * |          |
4544f6ad66aSAchin Gupta 			 * |          |
4554f6ad66aSAchin Gupta 			 * |----------|
4564f6ad66aSAchin Gupta 			 * |  image   |
4574f6ad66aSAchin Gupta 			 * |..........| fixed_addr
4584f6ad66aSAchin Gupta 			 * | /wasted/ |                 | offset
4594f6ad66aSAchin Gupta 			 * ------------ total_base
4604f6ad66aSAchin Gupta 			 */
4614f6ad66aSAchin Gupta 			offset = fixed_addr - mem_layout->total_base;
4624f6ad66aSAchin Gupta 	}
4634f6ad66aSAchin Gupta 
4644f6ad66aSAchin Gupta 	/* We have enough space so load the image now */
4659d72b4eaSJames Morrissey 	/* TODO: Consider whether to try to recover/retry a partially successful read */
4669d72b4eaSJames Morrissey 	io_result = io_read(image_handle, (void *)image_base, image_size, &bytes_read);
4679d72b4eaSJames Morrissey 	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
468*08c28d53SJeenu Viswambharan 		WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
4699d72b4eaSJames Morrissey 		goto fail;
4704f6ad66aSAchin Gupta 	}
4714f6ad66aSAchin Gupta 
4724f6ad66aSAchin Gupta 	/*
4734f6ad66aSAchin Gupta 	 * File has been successfully loaded. Update the free memory
4744f6ad66aSAchin Gupta 	 * data structure & flush the contents of the TZRAM so that
4754f6ad66aSAchin Gupta 	 * the next EL can see it.
4764f6ad66aSAchin Gupta 	 */
4774f6ad66aSAchin Gupta 	/* Update the memory contents */
4789d72b4eaSJames Morrissey 	flush_dcache_range(image_base, image_size);
4794f6ad66aSAchin Gupta 
4809d72b4eaSJames Morrissey 	mem_layout->free_size -= image_size + offset;
4814f6ad66aSAchin Gupta 
4824f6ad66aSAchin Gupta 	/* Update the base of free memory since its moved up */
4834f6ad66aSAchin Gupta 	if (load_type == BOT_LOAD)
4849d72b4eaSJames Morrissey 		mem_layout->free_base += offset + image_size;
4859d72b4eaSJames Morrissey 
4869d72b4eaSJames Morrissey exit:
4879d72b4eaSJames Morrissey 	io_result = io_close(image_handle);
4889d72b4eaSJames Morrissey 	/* Ignore improbable/unrecoverable error in 'close' */
4899d72b4eaSJames Morrissey 
4909d72b4eaSJames Morrissey 	/* TODO: Consider maintaining open device connection from this bootloader stage */
4919d72b4eaSJames Morrissey 	io_result = io_dev_close(dev_handle);
4929d72b4eaSJames Morrissey 	/* Ignore improbable/unrecoverable error in 'dev_close' */
4934f6ad66aSAchin Gupta 
4944f6ad66aSAchin Gupta 	return image_base;
4959d72b4eaSJames Morrissey 
4969d72b4eaSJames Morrissey fail:	image_base = 0;
4979d72b4eaSJames Morrissey 	goto exit;
4984f6ad66aSAchin Gupta }
4994f6ad66aSAchin Gupta 
5004f6ad66aSAchin Gupta /*******************************************************************************
5014f6ad66aSAchin Gupta  * Run a loaded image from the given entry point. This could result in either
5024f6ad66aSAchin Gupta  * dropping into a lower exception level or jumping to a higher exception level.
5034f6ad66aSAchin Gupta  * The only way of doing the latter is through an SMC. In either case, setup the
5044f6ad66aSAchin Gupta  * parameters for the EL change request correctly.
5054f6ad66aSAchin Gupta  ******************************************************************************/
506e4d084eaSAchin Gupta void __dead2 run_image(unsigned long entrypoint,
5074f6ad66aSAchin Gupta 		       unsigned long spsr,
5084f6ad66aSAchin Gupta 		       unsigned long target_security_state,
509e4d084eaSAchin Gupta 		       void *first_arg,
510e4d084eaSAchin Gupta 		       void *second_arg)
5114f6ad66aSAchin Gupta {
5124f6ad66aSAchin Gupta 	el_change_info run_image_info;
5134f6ad66aSAchin Gupta 	unsigned long current_el = read_current_el();
5144f6ad66aSAchin Gupta 
5154f6ad66aSAchin Gupta 	/* Tell next EL what we want done */
5164f6ad66aSAchin Gupta 	run_image_info.args.arg0 = RUN_IMAGE;
5174f6ad66aSAchin Gupta 	run_image_info.entrypoint = entrypoint;
5184f6ad66aSAchin Gupta 	run_image_info.spsr = spsr;
5194f6ad66aSAchin Gupta 	run_image_info.security_state = target_security_state;
5204f6ad66aSAchin Gupta 
5214f6ad66aSAchin Gupta 	/*
5224f6ad66aSAchin Gupta 	 * If we are EL3 then only an eret can take us to the desired
5234f6ad66aSAchin Gupta 	 * exception level. Else for the time being assume that we have
5244f6ad66aSAchin Gupta 	 * to jump to a higher EL and issue an SMC. Contents of argY
5254f6ad66aSAchin Gupta 	 * will go into the general purpose register xY e.g. arg0->x0
5264f6ad66aSAchin Gupta 	 */
5274f6ad66aSAchin Gupta 	if (GET_EL(current_el) == MODE_EL3) {
528e4d084eaSAchin Gupta 		run_image_info.args.arg1 = (unsigned long) first_arg;
529e4d084eaSAchin Gupta 		run_image_info.args.arg2 = (unsigned long) second_arg;
5304f6ad66aSAchin Gupta 	} else {
5314f6ad66aSAchin Gupta 		run_image_info.args.arg1 = entrypoint;
5324f6ad66aSAchin Gupta 		run_image_info.args.arg2 = spsr;
533e4d084eaSAchin Gupta 		run_image_info.args.arg3 = (unsigned long) first_arg;
534e4d084eaSAchin Gupta 		run_image_info.args.arg4 = (unsigned long) second_arg;
5354f6ad66aSAchin Gupta 	}
5364f6ad66aSAchin Gupta 
537e4d084eaSAchin Gupta 	change_el(&run_image_info);
5384f6ad66aSAchin Gupta }
539