xref: /rk3399_ARM-atf/common/bl_common.c (revision 7d37aa171158422b5ee7ee6c3cdad58f6aa066b4)
1 /*
2  * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <debug.h>
36 #include <errno.h>
37 #include <io_storage.h>
38 #include <platform.h>
39 
40 unsigned long page_align(unsigned long value, unsigned dir)
41 {
42 	unsigned long page_size = 1 << FOUR_KB_SHIFT;
43 
44 	/* Round up the limit to the next page boundary */
45 	if (value & (page_size - 1)) {
46 		value &= ~(page_size - 1);
47 		if (dir == UP)
48 			value += page_size;
49 	}
50 
51 	return value;
52 }
53 
54 static inline unsigned int is_page_aligned (unsigned long addr) {
55 	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
56 
57 	return (addr & (page_size - 1)) == 0;
58 }
59 
60 void change_security_state(unsigned int target_security_state)
61 {
62 	unsigned long scr = read_scr();
63 
64 	assert(sec_state_is_valid(target_security_state));
65 	if (target_security_state == SECURE)
66 		scr &= ~SCR_NS_BIT;
67 	else
68 		scr |= SCR_NS_BIT;
69 
70 	write_scr(scr);
71 }
72 
73 /******************************************************************************
74  * Determine whether the memory region delimited by 'addr' and 'size' is free,
75  * given the extents of free memory.
76  * Return 1 if it is free, 0 otherwise.
77  *****************************************************************************/
78 static int is_mem_free(uint64_t free_base, size_t free_size,
79 		       uint64_t addr, size_t size)
80 {
81 	return (addr >= free_base) && (addr + size <= free_base + free_size);
82 }
83 
84 /******************************************************************************
85  * Inside a given memory region, determine whether a sub-region of memory is
86  * closer from the top or the bottom of the encompassing region. Return the
87  * size of the smallest chunk of free memory surrounding the sub-region in
88  * 'small_chunk_size'.
89  *****************************************************************************/
90 static unsigned int choose_mem_pos(uint64_t mem_start, uint64_t mem_end,
91 				   uint64_t submem_start, uint64_t submem_end,
92 				   size_t *small_chunk_size)
93 {
94 	size_t top_chunk_size, bottom_chunk_size;
95 
96 	assert(mem_start <= submem_start);
97 	assert(submem_start <= submem_end);
98 	assert(submem_end <= mem_end);
99 	assert(small_chunk_size != NULL);
100 
101 	top_chunk_size = mem_end - submem_end;
102 	bottom_chunk_size = submem_start - mem_start;
103 
104 	if (top_chunk_size < bottom_chunk_size) {
105 		*small_chunk_size = top_chunk_size;
106 		return TOP;
107 	} else {
108 		*small_chunk_size = bottom_chunk_size;
109 		return BOTTOM;
110 	}
111 }
112 
113 /******************************************************************************
114  * Reserve the memory region delimited by 'addr' and 'size'. The extents of free
115  * memory are passed in 'free_base' and 'free_size' and they will be updated to
116  * reflect the memory usage.
117  * The caller must ensure the memory to reserve is free.
118  *****************************************************************************/
119 void reserve_mem(uint64_t *free_base, size_t *free_size,
120 		 uint64_t addr, size_t size)
121 {
122 	size_t discard_size;
123 	size_t reserved_size;
124 	unsigned int pos;
125 
126 	assert(free_base != NULL);
127 	assert(free_size != NULL);
128 	assert(is_mem_free(*free_base, *free_size, addr, size));
129 
130 	pos = choose_mem_pos(*free_base, *free_base + *free_size,
131 			     addr, addr + size,
132 			     &discard_size);
133 
134 	reserved_size = size + discard_size;
135 	*free_size -= reserved_size;
136 
137 	if (pos == BOTTOM)
138 		*free_base = addr + size;
139 
140 	VERBOSE("Reserved 0x%lx bytes (discarded 0x%lx bytes %s)\n",
141 	     reserved_size, discard_size,
142 	     pos == TOP ? "above" : "below");
143 }
144 
145 static void dump_load_info(unsigned long image_load_addr,
146 			   unsigned long image_size,
147 			   const meminfo_t *mem_layout)
148 {
149 	INFO("Trying to load image at address 0x%lx, size = 0x%lx\n",
150 		image_load_addr, image_size);
151 	INFO("Current memory layout:\n");
152 	INFO("  total region = [0x%lx, 0x%lx]\n", mem_layout->total_base,
153 			mem_layout->total_base + mem_layout->total_size);
154 	INFO("  free region = [0x%lx, 0x%lx]\n", mem_layout->free_base,
155 			mem_layout->free_base + mem_layout->free_size);
156 }
157 
158 /* Generic function to return the size of an image */
159 unsigned long image_size(unsigned int image_id)
160 {
161 	uintptr_t dev_handle;
162 	uintptr_t image_handle;
163 	uintptr_t image_spec;
164 	size_t image_size = 0;
165 	int io_result = IO_FAIL;
166 
167 	/* Obtain a reference to the image by querying the platform layer */
168 	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
169 	if (io_result != IO_SUCCESS) {
170 		WARN("Failed to obtain reference to image id=%u (%i)\n",
171 			image_id, io_result);
172 		return 0;
173 	}
174 
175 	/* Attempt to access the image */
176 	io_result = io_open(dev_handle, image_spec, &image_handle);
177 	if (io_result != IO_SUCCESS) {
178 		WARN("Failed to access image id=%u (%i)\n",
179 			image_id, io_result);
180 		return 0;
181 	}
182 
183 	/* Find the size of the image */
184 	io_result = io_size(image_handle, &image_size);
185 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
186 		WARN("Failed to determine the size of the image id=%u (%i)\n",
187 			image_id, io_result);
188 	}
189 	io_result = io_close(image_handle);
190 	/* Ignore improbable/unrecoverable error in 'close' */
191 
192 	/* TODO: Consider maintaining open device connection from this
193 	 * bootloader stage
194 	 */
195 	io_result = io_dev_close(dev_handle);
196 	/* Ignore improbable/unrecoverable error in 'dev_close' */
197 
198 	return image_size;
199 }
200 
201 /*******************************************************************************
202  * Generic function to load an image at a specific address given a name and
203  * extents of free memory. It updates the memory layout if the load is
204  * successful, as well as the image information and the entry point information.
205  * The caller might pass a NULL pointer for the entry point if it is not
206  * interested in this information, e.g. because the image just needs to be
207  * loaded in memory but won't ever be executed.
208  * Returns 0 on success, a negative error code otherwise.
209  ******************************************************************************/
210 int load_image(meminfo_t *mem_layout,
211 	       unsigned int image_id,
212 	       uint64_t image_base,
213 	       image_info_t *image_data,
214 	       entry_point_info_t *entry_point_info)
215 {
216 	uintptr_t dev_handle;
217 	uintptr_t image_handle;
218 	uintptr_t image_spec;
219 	size_t image_size;
220 	size_t bytes_read;
221 	int io_result = IO_FAIL;
222 
223 	assert(mem_layout != NULL);
224 	assert(image_data != NULL);
225 	assert(image_data->h.version >= VERSION_1);
226 
227 	/* Obtain a reference to the image by querying the platform layer */
228 	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
229 	if (io_result != IO_SUCCESS) {
230 		WARN("Failed to obtain reference to image id=%u (%i)\n",
231 			image_id, io_result);
232 		return io_result;
233 	}
234 
235 	/* Attempt to access the image */
236 	io_result = io_open(dev_handle, image_spec, &image_handle);
237 	if (io_result != IO_SUCCESS) {
238 		WARN("Failed to access image id=%u (%i)\n",
239 			image_id, io_result);
240 		return io_result;
241 	}
242 
243 	INFO("Loading image id=%u at address 0x%lx\n", image_id, image_base);
244 
245 	/* Find the size of the image */
246 	io_result = io_size(image_handle, &image_size);
247 	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
248 		WARN("Failed to determine the size of the image id=%u (%i)\n",
249 			image_id, io_result);
250 		goto exit;
251 	}
252 
253 	/* Check that the memory where the image will be loaded is free */
254 	if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
255 			 image_base, image_size)) {
256 		WARN("Failed to reserve memory: 0x%lx - 0x%lx\n",
257 			image_base, image_base + image_size);
258 		dump_load_info(image_base, image_size, mem_layout);
259 		io_result = -ENOMEM;
260 		goto exit;
261 	}
262 
263 	/* We have enough space so load the image now */
264 	/* TODO: Consider whether to try to recover/retry a partially successful read */
265 	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
266 	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
267 		WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
268 		goto exit;
269 	}
270 
271 	/*
272 	 * Update the memory usage info.
273 	 * This is done after the actual loading so that it is not updated when
274 	 * the load is unsuccessful.
275 	 * If the caller does not provide an entry point, bypass the memory
276 	 * reservation.
277 	 */
278 	if (entry_point_info != NULL) {
279 		reserve_mem(&mem_layout->free_base, &mem_layout->free_size,
280 				image_base, image_size);
281 	} else {
282 		INFO("Skip reserving memory: 0x%lx - 0x%lx\n",
283 				image_base, image_base + image_size);
284 	}
285 
286 	image_data->image_base = image_base;
287 	image_data->image_size = image_size;
288 
289 	if (entry_point_info != NULL)
290 		entry_point_info->pc = image_base;
291 
292 	/*
293 	 * File has been successfully loaded.
294 	 * Flush the image in TZRAM so that the next EL can see it.
295 	 */
296 	flush_dcache_range(image_base, image_size);
297 
298 	INFO("Image id=%u loaded: 0x%lx - 0x%lx\n", image_id, image_base,
299 	     image_base + image_size);
300 
301 exit:
302 	io_close(image_handle);
303 	/* Ignore improbable/unrecoverable error in 'close' */
304 
305 	/* TODO: Consider maintaining open device connection from this bootloader stage */
306 	io_dev_close(dev_handle);
307 	/* Ignore improbable/unrecoverable error in 'dev_close' */
308 
309 	return io_result;
310 }
311