xref: /rk3399_ARM-atf/common/bl_common.c (revision 16948ae1d9e14190229f0fd8602f8cc0f25d57d2)
14f6ad66aSAchin Gupta /*
21b70db06SDan Handley  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta  *
44f6ad66aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
54f6ad66aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
64f6ad66aSAchin Gupta  *
74f6ad66aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
84f6ad66aSAchin Gupta  * list of conditions and the following disclaimer.
94f6ad66aSAchin Gupta  *
104f6ad66aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
114f6ad66aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
124f6ad66aSAchin Gupta  * and/or other materials provided with the distribution.
134f6ad66aSAchin Gupta  *
144f6ad66aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
154f6ad66aSAchin Gupta  * to endorse or promote products derived from this software without specific
164f6ad66aSAchin Gupta  * prior written permission.
174f6ad66aSAchin Gupta  *
184f6ad66aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
194f6ad66aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
204f6ad66aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
214f6ad66aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
224f6ad66aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
234f6ad66aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
244f6ad66aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
254f6ad66aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
264f6ad66aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
274f6ad66aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
284f6ad66aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
294f6ad66aSAchin Gupta  */
304f6ad66aSAchin Gupta 
3197043ac9SDan Handley #include <arch.h>
324f6ad66aSAchin Gupta #include <arch_helpers.h>
3397043ac9SDan Handley #include <assert.h>
344f6ad66aSAchin Gupta #include <bl_common.h>
3535e98e55SDan Handley #include <debug.h>
368f55dfb4SSandrine Bailleux #include <errno.h>
3797043ac9SDan Handley #include <io_storage.h>
3897043ac9SDan Handley #include <platform.h>
394f6ad66aSAchin Gupta 
404f6ad66aSAchin Gupta unsigned long page_align(unsigned long value, unsigned dir)
414f6ad66aSAchin Gupta {
424f6ad66aSAchin Gupta 	unsigned long page_size = 1 << FOUR_KB_SHIFT;
434f6ad66aSAchin Gupta 
444f6ad66aSAchin Gupta 	/* Round up the limit to the next page boundary */
454f6ad66aSAchin Gupta 	if (value & (page_size - 1)) {
464f6ad66aSAchin Gupta 		value &= ~(page_size - 1);
474f6ad66aSAchin Gupta 		if (dir == UP)
484f6ad66aSAchin Gupta 			value += page_size;
494f6ad66aSAchin Gupta 	}
504f6ad66aSAchin Gupta 
514f6ad66aSAchin Gupta 	return value;
524f6ad66aSAchin Gupta }
534f6ad66aSAchin Gupta 
544f6ad66aSAchin Gupta static inline unsigned int is_page_aligned (unsigned long addr) {
554f6ad66aSAchin Gupta 	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
564f6ad66aSAchin Gupta 
574f6ad66aSAchin Gupta 	return (addr & (page_size - 1)) == 0;
584f6ad66aSAchin Gupta }
594f6ad66aSAchin Gupta 
604f6ad66aSAchin Gupta void change_security_state(unsigned int target_security_state)
614f6ad66aSAchin Gupta {
624f6ad66aSAchin Gupta 	unsigned long scr = read_scr();
634f6ad66aSAchin Gupta 
64d3280bebSJuan Castillo 	assert(sec_state_is_valid(target_security_state));
654f6ad66aSAchin Gupta 	if (target_security_state == SECURE)
664f6ad66aSAchin Gupta 		scr &= ~SCR_NS_BIT;
674f6ad66aSAchin Gupta 	else
68d3280bebSJuan Castillo 		scr |= SCR_NS_BIT;
694f6ad66aSAchin Gupta 
704f6ad66aSAchin Gupta 	write_scr(scr);
714f6ad66aSAchin Gupta }
724f6ad66aSAchin Gupta 
738f55dfb4SSandrine Bailleux /******************************************************************************
748f55dfb4SSandrine Bailleux  * Determine whether the memory region delimited by 'addr' and 'size' is free,
758f55dfb4SSandrine Bailleux  * given the extents of free memory.
768f55dfb4SSandrine Bailleux  * Return 1 if it is free, 0 otherwise.
778f55dfb4SSandrine Bailleux  *****************************************************************************/
788f55dfb4SSandrine Bailleux static int is_mem_free(uint64_t free_base, size_t free_size,
798f55dfb4SSandrine Bailleux 		       uint64_t addr, size_t size)
804f6ad66aSAchin Gupta {
818f55dfb4SSandrine Bailleux 	return (addr >= free_base) && (addr + size <= free_base + free_size);
824f6ad66aSAchin Gupta }
834f6ad66aSAchin Gupta 
848f55dfb4SSandrine Bailleux /******************************************************************************
858f55dfb4SSandrine Bailleux  * Inside a given memory region, determine whether a sub-region of memory is
868f55dfb4SSandrine Bailleux  * closer from the top or the bottom of the encompassing region. Return the
878f55dfb4SSandrine Bailleux  * size of the smallest chunk of free memory surrounding the sub-region in
888f55dfb4SSandrine Bailleux  * 'small_chunk_size'.
898f55dfb4SSandrine Bailleux  *****************************************************************************/
908f55dfb4SSandrine Bailleux static unsigned int choose_mem_pos(uint64_t mem_start, uint64_t mem_end,
918f55dfb4SSandrine Bailleux 				   uint64_t submem_start, uint64_t submem_end,
928f55dfb4SSandrine Bailleux 				   size_t *small_chunk_size)
938f55dfb4SSandrine Bailleux {
948f55dfb4SSandrine Bailleux 	size_t top_chunk_size, bottom_chunk_size;
954f6ad66aSAchin Gupta 
968f55dfb4SSandrine Bailleux 	assert(mem_start <= submem_start);
978f55dfb4SSandrine Bailleux 	assert(submem_start <= submem_end);
988f55dfb4SSandrine Bailleux 	assert(submem_end <= mem_end);
998f55dfb4SSandrine Bailleux 	assert(small_chunk_size != NULL);
1008f55dfb4SSandrine Bailleux 
1018f55dfb4SSandrine Bailleux 	top_chunk_size = mem_end - submem_end;
1028f55dfb4SSandrine Bailleux 	bottom_chunk_size = submem_start - mem_start;
1038f55dfb4SSandrine Bailleux 
1048f55dfb4SSandrine Bailleux 	if (top_chunk_size < bottom_chunk_size) {
1058f55dfb4SSandrine Bailleux 		*small_chunk_size = top_chunk_size;
1068f55dfb4SSandrine Bailleux 		return TOP;
1078f55dfb4SSandrine Bailleux 	} else {
1088f55dfb4SSandrine Bailleux 		*small_chunk_size = bottom_chunk_size;
1098f55dfb4SSandrine Bailleux 		return BOTTOM;
1108f55dfb4SSandrine Bailleux 	}
1118f55dfb4SSandrine Bailleux }
1128f55dfb4SSandrine Bailleux 
1138f55dfb4SSandrine Bailleux /******************************************************************************
1148f55dfb4SSandrine Bailleux  * Reserve the memory region delimited by 'addr' and 'size'. The extents of free
1158f55dfb4SSandrine Bailleux  * memory are passed in 'free_base' and 'free_size' and they will be updated to
1168f55dfb4SSandrine Bailleux  * reflect the memory usage.
1178f55dfb4SSandrine Bailleux  * The caller must ensure the memory to reserve is free.
1188f55dfb4SSandrine Bailleux  *****************************************************************************/
1198f55dfb4SSandrine Bailleux void reserve_mem(uint64_t *free_base, size_t *free_size,
1208f55dfb4SSandrine Bailleux 		 uint64_t addr, size_t size)
1218f55dfb4SSandrine Bailleux {
1228f55dfb4SSandrine Bailleux 	size_t discard_size;
1238f55dfb4SSandrine Bailleux 	size_t reserved_size;
1248f55dfb4SSandrine Bailleux 	unsigned int pos;
1258f55dfb4SSandrine Bailleux 
1268f55dfb4SSandrine Bailleux 	assert(free_base != NULL);
1278f55dfb4SSandrine Bailleux 	assert(free_size != NULL);
1288f55dfb4SSandrine Bailleux 	assert(is_mem_free(*free_base, *free_size, addr, size));
1298f55dfb4SSandrine Bailleux 
1308f55dfb4SSandrine Bailleux 	pos = choose_mem_pos(*free_base, *free_base + *free_size,
1318f55dfb4SSandrine Bailleux 			     addr, addr + size,
1328f55dfb4SSandrine Bailleux 			     &discard_size);
1338f55dfb4SSandrine Bailleux 
1348f55dfb4SSandrine Bailleux 	reserved_size = size + discard_size;
1358f55dfb4SSandrine Bailleux 	*free_size -= reserved_size;
1368f55dfb4SSandrine Bailleux 
1378f55dfb4SSandrine Bailleux 	if (pos == BOTTOM)
1388f55dfb4SSandrine Bailleux 		*free_base = addr + size;
1398f55dfb4SSandrine Bailleux 
1401b70db06SDan Handley 	VERBOSE("Reserved 0x%lx bytes (discarded 0x%lx bytes %s)\n",
1418f55dfb4SSandrine Bailleux 	     reserved_size, discard_size,
1428f55dfb4SSandrine Bailleux 	     pos == TOP ? "above" : "below");
1434f6ad66aSAchin Gupta }
1444f6ad66aSAchin Gupta 
1454f6ad66aSAchin Gupta static void dump_load_info(unsigned long image_load_addr,
1464f6ad66aSAchin Gupta 			   unsigned long image_size,
147fb037bfbSDan Handley 			   const meminfo_t *mem_layout)
1484f6ad66aSAchin Gupta {
1496ad2e461SDan Handley 	INFO("Trying to load image at address 0x%lx, size = 0x%lx\n",
1504f6ad66aSAchin Gupta 		image_load_addr, image_size);
1516ad2e461SDan Handley 	INFO("Current memory layout:\n");
1526ad2e461SDan Handley 	INFO("  total region = [0x%lx, 0x%lx]\n", mem_layout->total_base,
1534f6ad66aSAchin Gupta 			mem_layout->total_base + mem_layout->total_size);
1546ad2e461SDan Handley 	INFO("  free region = [0x%lx, 0x%lx]\n", mem_layout->free_base,
1554f6ad66aSAchin Gupta 			mem_layout->free_base + mem_layout->free_size);
1564f6ad66aSAchin Gupta }
1574f6ad66aSAchin Gupta 
158ee9ad785SRyan Harkin /* Generic function to return the size of an image */
159*16948ae1SJuan Castillo unsigned long image_size(unsigned int image_id)
160ee9ad785SRyan Harkin {
161625de1d4SDan Handley 	uintptr_t dev_handle;
162625de1d4SDan Handley 	uintptr_t image_handle;
163625de1d4SDan Handley 	uintptr_t image_spec;
164ee9ad785SRyan Harkin 	size_t image_size = 0;
165ee9ad785SRyan Harkin 	int io_result = IO_FAIL;
166ee9ad785SRyan Harkin 
167ee9ad785SRyan Harkin 	/* Obtain a reference to the image by querying the platform layer */
168*16948ae1SJuan Castillo 	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
169ee9ad785SRyan Harkin 	if (io_result != IO_SUCCESS) {
170*16948ae1SJuan Castillo 		WARN("Failed to obtain reference to image id=%u (%i)\n",
171*16948ae1SJuan Castillo 			image_id, io_result);
172ee9ad785SRyan Harkin 		return 0;
173ee9ad785SRyan Harkin 	}
174ee9ad785SRyan Harkin 
175ee9ad785SRyan Harkin 	/* Attempt to access the image */
176ee9ad785SRyan Harkin 	io_result = io_open(dev_handle, image_spec, &image_handle);
177ee9ad785SRyan Harkin 	if (io_result != IO_SUCCESS) {
178*16948ae1SJuan Castillo 		WARN("Failed to access image id=%u (%i)\n",
179*16948ae1SJuan Castillo 			image_id, io_result);
180ee9ad785SRyan Harkin 		return 0;
181ee9ad785SRyan Harkin 	}
182ee9ad785SRyan Harkin 
183ee9ad785SRyan Harkin 	/* Find the size of the image */
184ee9ad785SRyan Harkin 	io_result = io_size(image_handle, &image_size);
185ee9ad785SRyan Harkin 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
186*16948ae1SJuan Castillo 		WARN("Failed to determine the size of the image id=%u (%i)\n",
187*16948ae1SJuan Castillo 			image_id, io_result);
188ee9ad785SRyan Harkin 	}
189ee9ad785SRyan Harkin 	io_result = io_close(image_handle);
190ee9ad785SRyan Harkin 	/* Ignore improbable/unrecoverable error in 'close' */
191ee9ad785SRyan Harkin 
192ee9ad785SRyan Harkin 	/* TODO: Consider maintaining open device connection from this
193ee9ad785SRyan Harkin 	 * bootloader stage
194ee9ad785SRyan Harkin 	 */
195ee9ad785SRyan Harkin 	io_result = io_dev_close(dev_handle);
196ee9ad785SRyan Harkin 	/* Ignore improbable/unrecoverable error in 'dev_close' */
197ee9ad785SRyan Harkin 
198ee9ad785SRyan Harkin 	return image_size;
199ee9ad785SRyan Harkin }
2008f55dfb4SSandrine Bailleux 
2014f6ad66aSAchin Gupta /*******************************************************************************
2028f55dfb4SSandrine Bailleux  * Generic function to load an image at a specific address given a name and
2038f55dfb4SSandrine Bailleux  * extents of free memory. It updates the memory layout if the load is
2048f55dfb4SSandrine Bailleux  * successful, as well as the image information and the entry point information.
2058f55dfb4SSandrine Bailleux  * The caller might pass a NULL pointer for the entry point if it is not
2068f55dfb4SSandrine Bailleux  * interested in this information, e.g. because the image just needs to be
2078f55dfb4SSandrine Bailleux  * loaded in memory but won't ever be executed.
2088f55dfb4SSandrine Bailleux  * Returns 0 on success, a negative error code otherwise.
2094f6ad66aSAchin Gupta  ******************************************************************************/
2104112bfa0SVikram Kanigiri int load_image(meminfo_t *mem_layout,
211*16948ae1SJuan Castillo 	       unsigned int image_id,
2128f55dfb4SSandrine Bailleux 	       uint64_t image_base,
2134112bfa0SVikram Kanigiri 	       image_info_t *image_data,
2144112bfa0SVikram Kanigiri 	       entry_point_info_t *entry_point_info)
2154f6ad66aSAchin Gupta {
216625de1d4SDan Handley 	uintptr_t dev_handle;
217625de1d4SDan Handley 	uintptr_t image_handle;
218625de1d4SDan Handley 	uintptr_t image_spec;
2198f55dfb4SSandrine Bailleux 	size_t image_size;
2208f55dfb4SSandrine Bailleux 	size_t bytes_read;
2219d72b4eaSJames Morrissey 	int io_result = IO_FAIL;
2224f6ad66aSAchin Gupta 
2239d72b4eaSJames Morrissey 	assert(mem_layout != NULL);
2248f55dfb4SSandrine Bailleux 	assert(image_data != NULL);
2254112bfa0SVikram Kanigiri 	assert(image_data->h.version >= VERSION_1);
2269d72b4eaSJames Morrissey 
2279d72b4eaSJames Morrissey 	/* Obtain a reference to the image by querying the platform layer */
228*16948ae1SJuan Castillo 	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
2299d72b4eaSJames Morrissey 	if (io_result != IO_SUCCESS) {
230*16948ae1SJuan Castillo 		WARN("Failed to obtain reference to image id=%u (%i)\n",
231*16948ae1SJuan Castillo 			image_id, io_result);
2324112bfa0SVikram Kanigiri 		return io_result;
2334f6ad66aSAchin Gupta 	}
2344f6ad66aSAchin Gupta 
2359d72b4eaSJames Morrissey 	/* Attempt to access the image */
2369d72b4eaSJames Morrissey 	io_result = io_open(dev_handle, image_spec, &image_handle);
2379d72b4eaSJames Morrissey 	if (io_result != IO_SUCCESS) {
238*16948ae1SJuan Castillo 		WARN("Failed to access image id=%u (%i)\n",
239*16948ae1SJuan Castillo 			image_id, io_result);
2404112bfa0SVikram Kanigiri 		return io_result;
2414f6ad66aSAchin Gupta 	}
2424f6ad66aSAchin Gupta 
243*16948ae1SJuan Castillo 	INFO("Loading image id=%u at address 0x%lx\n", image_id, image_base);
2448f55dfb4SSandrine Bailleux 
2459d72b4eaSJames Morrissey 	/* Find the size of the image */
2469d72b4eaSJames Morrissey 	io_result = io_size(image_handle, &image_size);
2479d72b4eaSJames Morrissey 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
248*16948ae1SJuan Castillo 		WARN("Failed to determine the size of the image id=%u (%i)\n",
249*16948ae1SJuan Castillo 			image_id, io_result);
2504112bfa0SVikram Kanigiri 		goto exit;
2519d72b4eaSJames Morrissey 	}
2529d72b4eaSJames Morrissey 
2538f55dfb4SSandrine Bailleux 	/* Check that the memory where the image will be loaded is free */
2548f55dfb4SSandrine Bailleux 	if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
2558f55dfb4SSandrine Bailleux 			 image_base, image_size)) {
2568f55dfb4SSandrine Bailleux 		WARN("Failed to reserve memory: 0x%lx - 0x%lx\n",
2578f55dfb4SSandrine Bailleux 			image_base, image_base + image_size);
2589d72b4eaSJames Morrissey 		dump_load_info(image_base, image_size, mem_layout);
2594112bfa0SVikram Kanigiri 		io_result = -ENOMEM;
2604112bfa0SVikram Kanigiri 		goto exit;
2614f6ad66aSAchin Gupta 	}
2624f6ad66aSAchin Gupta 
2634f6ad66aSAchin Gupta 	/* We have enough space so load the image now */
2649d72b4eaSJames Morrissey 	/* TODO: Consider whether to try to recover/retry a partially successful read */
265625de1d4SDan Handley 	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
2669d72b4eaSJames Morrissey 	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
267*16948ae1SJuan Castillo 		WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
2684112bfa0SVikram Kanigiri 		goto exit;
2694f6ad66aSAchin Gupta 	}
2704f6ad66aSAchin Gupta 
2718f55dfb4SSandrine Bailleux 	/*
2728f55dfb4SSandrine Bailleux 	 * Update the memory usage info.
2738f55dfb4SSandrine Bailleux 	 * This is done after the actual loading so that it is not updated when
2748f55dfb4SSandrine Bailleux 	 * the load is unsuccessful.
275c5fb47c3SJuan Castillo 	 * If the caller does not provide an entry point, bypass the memory
276c5fb47c3SJuan Castillo 	 * reservation.
2778f55dfb4SSandrine Bailleux 	 */
278c5fb47c3SJuan Castillo 	if (entry_point_info != NULL) {
2798f55dfb4SSandrine Bailleux 		reserve_mem(&mem_layout->free_base, &mem_layout->free_size,
2808f55dfb4SSandrine Bailleux 				image_base, image_size);
281c5fb47c3SJuan Castillo 	} else {
282c5fb47c3SJuan Castillo 		INFO("Skip reserving memory: 0x%lx - 0x%lx\n",
283c5fb47c3SJuan Castillo 				image_base, image_base + image_size);
284c5fb47c3SJuan Castillo 	}
2858f55dfb4SSandrine Bailleux 
2864112bfa0SVikram Kanigiri 	image_data->image_base = image_base;
2874112bfa0SVikram Kanigiri 	image_data->image_size = image_size;
2884112bfa0SVikram Kanigiri 
28963db7ba2SSandrine Bailleux 	if (entry_point_info != NULL)
2904112bfa0SVikram Kanigiri 		entry_point_info->pc = image_base;
2914112bfa0SVikram Kanigiri 
2924f6ad66aSAchin Gupta 	/*
2938f55dfb4SSandrine Bailleux 	 * File has been successfully loaded.
2948f55dfb4SSandrine Bailleux 	 * Flush the image in TZRAM so that the next EL can see it.
2954f6ad66aSAchin Gupta 	 */
2969d72b4eaSJames Morrissey 	flush_dcache_range(image_base, image_size);
2974f6ad66aSAchin Gupta 
298*16948ae1SJuan Castillo 	INFO("Image id=%u loaded: 0x%lx - 0x%lx\n", image_id, image_base,
2998f55dfb4SSandrine Bailleux 	     image_base + image_size);
3009d72b4eaSJames Morrissey 
3019d72b4eaSJames Morrissey exit:
3024112bfa0SVikram Kanigiri 	io_close(image_handle);
3039d72b4eaSJames Morrissey 	/* Ignore improbable/unrecoverable error in 'close' */
3049d72b4eaSJames Morrissey 
3059d72b4eaSJames Morrissey 	/* TODO: Consider maintaining open device connection from this bootloader stage */
3064112bfa0SVikram Kanigiri 	io_dev_close(dev_handle);
3079d72b4eaSJames Morrissey 	/* Ignore improbable/unrecoverable error in 'dev_close' */
3084f6ad66aSAchin Gupta 
3094112bfa0SVikram Kanigiri 	return io_result;
3104f6ad66aSAchin Gupta }
311