xref: /rk3399_ARM-atf/common/bl_common.c (revision 4f6ad66ae9fcc8bcb3b0fcee10b7ab1ffcaf1a56)
1*4f6ad66aSAchin Gupta /*
2*4f6ad66aSAchin Gupta  * Copyright (c) 2013, ARM Limited. All rights reserved.
3*4f6ad66aSAchin Gupta  *
4*4f6ad66aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
5*4f6ad66aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
6*4f6ad66aSAchin Gupta  *
7*4f6ad66aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
8*4f6ad66aSAchin Gupta  * list of conditions and the following disclaimer.
9*4f6ad66aSAchin Gupta  *
10*4f6ad66aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
11*4f6ad66aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
12*4f6ad66aSAchin Gupta  * and/or other materials provided with the distribution.
13*4f6ad66aSAchin Gupta  *
14*4f6ad66aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
15*4f6ad66aSAchin Gupta  * to endorse or promote products derived from this software without specific
16*4f6ad66aSAchin Gupta  * prior written permission.
17*4f6ad66aSAchin Gupta  *
18*4f6ad66aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*4f6ad66aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*4f6ad66aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*4f6ad66aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*4f6ad66aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*4f6ad66aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*4f6ad66aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*4f6ad66aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*4f6ad66aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*4f6ad66aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*4f6ad66aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
29*4f6ad66aSAchin Gupta  */
30*4f6ad66aSAchin Gupta 
31*4f6ad66aSAchin Gupta #include <stdio.h>
32*4f6ad66aSAchin Gupta #include <string.h>
33*4f6ad66aSAchin Gupta #include <errno.h>
34*4f6ad66aSAchin Gupta #include <assert.h>
35*4f6ad66aSAchin Gupta #include <arch_helpers.h>
36*4f6ad66aSAchin Gupta #include <console.h>
37*4f6ad66aSAchin Gupta #include <platform.h>
38*4f6ad66aSAchin Gupta #include <semihosting.h>
39*4f6ad66aSAchin Gupta #include <bl_common.h>
40*4f6ad66aSAchin Gupta #include <bl1.h>
41*4f6ad66aSAchin Gupta 
42*4f6ad66aSAchin Gupta /***********************************************************
43*4f6ad66aSAchin Gupta  * Memory for sharing data while changing exception levels.
44*4f6ad66aSAchin Gupta  * Only used by the primary core.
45*4f6ad66aSAchin Gupta  **********************************************************/
46*4f6ad66aSAchin Gupta unsigned char bl2_el_change_mem_ptr[EL_CHANGE_MEM_SIZE];
47*4f6ad66aSAchin Gupta 
48*4f6ad66aSAchin Gupta unsigned long *get_el_change_mem_ptr(void)
49*4f6ad66aSAchin Gupta {
50*4f6ad66aSAchin Gupta 	return (unsigned long *) bl2_el_change_mem_ptr;
51*4f6ad66aSAchin Gupta }
52*4f6ad66aSAchin Gupta 
53*4f6ad66aSAchin Gupta unsigned long page_align(unsigned long value, unsigned dir)
54*4f6ad66aSAchin Gupta {
55*4f6ad66aSAchin Gupta 	unsigned long page_size = 1 << FOUR_KB_SHIFT;
56*4f6ad66aSAchin Gupta 
57*4f6ad66aSAchin Gupta 	/* Round up the limit to the next page boundary */
58*4f6ad66aSAchin Gupta 	if (value & (page_size - 1)) {
59*4f6ad66aSAchin Gupta 		value &= ~(page_size - 1);
60*4f6ad66aSAchin Gupta 		if (dir == UP)
61*4f6ad66aSAchin Gupta 			value += page_size;
62*4f6ad66aSAchin Gupta 	}
63*4f6ad66aSAchin Gupta 
64*4f6ad66aSAchin Gupta 	return value;
65*4f6ad66aSAchin Gupta }
66*4f6ad66aSAchin Gupta 
67*4f6ad66aSAchin Gupta static inline unsigned int is_page_aligned (unsigned long addr) {
68*4f6ad66aSAchin Gupta 	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
69*4f6ad66aSAchin Gupta 
70*4f6ad66aSAchin Gupta 	return (addr & (page_size - 1)) == 0;
71*4f6ad66aSAchin Gupta }
72*4f6ad66aSAchin Gupta 
73*4f6ad66aSAchin Gupta void change_security_state(unsigned int target_security_state)
74*4f6ad66aSAchin Gupta {
75*4f6ad66aSAchin Gupta 	unsigned long scr = read_scr();
76*4f6ad66aSAchin Gupta 
77*4f6ad66aSAchin Gupta 	if (target_security_state == SECURE)
78*4f6ad66aSAchin Gupta 		scr &= ~SCR_NS_BIT;
79*4f6ad66aSAchin Gupta 	else if (target_security_state == NON_SECURE)
80*4f6ad66aSAchin Gupta 		scr |= SCR_NS_BIT;
81*4f6ad66aSAchin Gupta 	else
82*4f6ad66aSAchin Gupta 		assert(0);
83*4f6ad66aSAchin Gupta 
84*4f6ad66aSAchin Gupta 	write_scr(scr);
85*4f6ad66aSAchin Gupta }
86*4f6ad66aSAchin Gupta 
87*4f6ad66aSAchin Gupta int drop_el(aapcs64_params *args,
88*4f6ad66aSAchin Gupta 	    unsigned long spsr,
89*4f6ad66aSAchin Gupta 	    unsigned long entrypoint)
90*4f6ad66aSAchin Gupta {
91*4f6ad66aSAchin Gupta 	write_spsr(spsr);
92*4f6ad66aSAchin Gupta 	write_elr(entrypoint);
93*4f6ad66aSAchin Gupta 	eret(args->arg0,
94*4f6ad66aSAchin Gupta 	     args->arg1,
95*4f6ad66aSAchin Gupta 	     args->arg2,
96*4f6ad66aSAchin Gupta 	     args->arg3,
97*4f6ad66aSAchin Gupta 	     args->arg4,
98*4f6ad66aSAchin Gupta 	     args->arg5,
99*4f6ad66aSAchin Gupta 	     args->arg6,
100*4f6ad66aSAchin Gupta 	     args->arg7);
101*4f6ad66aSAchin Gupta 	return -EINVAL;
102*4f6ad66aSAchin Gupta }
103*4f6ad66aSAchin Gupta 
104*4f6ad66aSAchin Gupta long raise_el(aapcs64_params *args)
105*4f6ad66aSAchin Gupta {
106*4f6ad66aSAchin Gupta 	return smc(args->arg0,
107*4f6ad66aSAchin Gupta 		   args->arg1,
108*4f6ad66aSAchin Gupta 		   args->arg2,
109*4f6ad66aSAchin Gupta 		   args->arg3,
110*4f6ad66aSAchin Gupta 		   args->arg4,
111*4f6ad66aSAchin Gupta 		   args->arg5,
112*4f6ad66aSAchin Gupta 		   args->arg6,
113*4f6ad66aSAchin Gupta 		   args->arg7);
114*4f6ad66aSAchin Gupta }
115*4f6ad66aSAchin Gupta 
116*4f6ad66aSAchin Gupta /*
117*4f6ad66aSAchin Gupta  * TODO: If we are not EL3 then currently we only issue an SMC.
118*4f6ad66aSAchin Gupta  * Add support for dropping into EL0 etc. Consider adding support
119*4f6ad66aSAchin Gupta  * for switching from S-EL1 to S-EL0/1 etc.
120*4f6ad66aSAchin Gupta  */
121*4f6ad66aSAchin Gupta long change_el(el_change_info *info)
122*4f6ad66aSAchin Gupta {
123*4f6ad66aSAchin Gupta 	unsigned long current_el = read_current_el();
124*4f6ad66aSAchin Gupta 
125*4f6ad66aSAchin Gupta 	if (GET_EL(current_el) == MODE_EL3) {
126*4f6ad66aSAchin Gupta 		/*
127*4f6ad66aSAchin Gupta 		 * We can go anywhere from EL3. So find where.
128*4f6ad66aSAchin Gupta 		 * TODO: Lots to do if we are going non-secure.
129*4f6ad66aSAchin Gupta 		 * Flip the NS bit. Restore NS registers etc.
130*4f6ad66aSAchin Gupta 		 * Just doing the bare minimal for now.
131*4f6ad66aSAchin Gupta 		 */
132*4f6ad66aSAchin Gupta 
133*4f6ad66aSAchin Gupta 		if (info->security_state == NON_SECURE)
134*4f6ad66aSAchin Gupta 			change_security_state(info->security_state);
135*4f6ad66aSAchin Gupta 
136*4f6ad66aSAchin Gupta 		return drop_el(&info->args, info->spsr, info->entrypoint);
137*4f6ad66aSAchin Gupta 	} else
138*4f6ad66aSAchin Gupta 		return raise_el(&info->args);
139*4f6ad66aSAchin Gupta }
140*4f6ad66aSAchin Gupta 
141*4f6ad66aSAchin Gupta /* TODO: add a parameter for DAIF. not needed right now */
142*4f6ad66aSAchin Gupta unsigned long make_spsr(unsigned long target_el,
143*4f6ad66aSAchin Gupta 			unsigned long target_sp,
144*4f6ad66aSAchin Gupta 			unsigned long target_rw)
145*4f6ad66aSAchin Gupta {
146*4f6ad66aSAchin Gupta 	unsigned long spsr;
147*4f6ad66aSAchin Gupta 
148*4f6ad66aSAchin Gupta 	/* Disable all exceptions & setup the EL */
149*4f6ad66aSAchin Gupta 	spsr = (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
150*4f6ad66aSAchin Gupta 		<< PSR_DAIF_SHIFT;
151*4f6ad66aSAchin Gupta 	spsr |= PSR_MODE(target_rw, target_el, target_sp);
152*4f6ad66aSAchin Gupta 
153*4f6ad66aSAchin Gupta 	return spsr;
154*4f6ad66aSAchin Gupta }
155*4f6ad66aSAchin Gupta 
156*4f6ad66aSAchin Gupta /*******************************************************************************
157*4f6ad66aSAchin Gupta  * The next two functions are the weak definitions. Platform specific
158*4f6ad66aSAchin Gupta  * code can override them if it wishes to.
159*4f6ad66aSAchin Gupta  ******************************************************************************/
160*4f6ad66aSAchin Gupta 
161*4f6ad66aSAchin Gupta /*******************************************************************************
162*4f6ad66aSAchin Gupta  * Function that takes a memory layout into which BL31 has been either top or
163*4f6ad66aSAchin Gupta  * bottom loaded. Using this information, it populates bl31_mem_layout to tell
164*4f6ad66aSAchin Gupta  * BL31 how much memory it has access to and how much is available for use. It
165*4f6ad66aSAchin Gupta  * does not need the address where BL31 has been loaded as BL31 will reclaim
166*4f6ad66aSAchin Gupta  * all the memory used by BL2.
167*4f6ad66aSAchin Gupta  * TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
168*4f6ad66aSAchin Gupta  * routine.
169*4f6ad66aSAchin Gupta  ******************************************************************************/
170*4f6ad66aSAchin Gupta void init_bl31_mem_layout(const meminfo *bl2_mem_layout,
171*4f6ad66aSAchin Gupta 			  meminfo *bl31_mem_layout,
172*4f6ad66aSAchin Gupta 			  unsigned int load_type)
173*4f6ad66aSAchin Gupta {
174*4f6ad66aSAchin Gupta 	if (load_type == BOT_LOAD) {
175*4f6ad66aSAchin Gupta 		/*
176*4f6ad66aSAchin Gupta 		 * ------------                             ^
177*4f6ad66aSAchin Gupta 		 * |   BL2    |                             |
178*4f6ad66aSAchin Gupta 		 * |----------|                 ^           |  BL2
179*4f6ad66aSAchin Gupta 		 * |          |                 | BL2 free  |  total
180*4f6ad66aSAchin Gupta 		 * |          |                 |   size    |  size
181*4f6ad66aSAchin Gupta 		 * |----------| BL2 free base   v           |
182*4f6ad66aSAchin Gupta 		 * |   BL31   |                             |
183*4f6ad66aSAchin Gupta 		 * ------------ BL2 total base              v
184*4f6ad66aSAchin Gupta 		 */
185*4f6ad66aSAchin Gupta 		unsigned long bl31_size;
186*4f6ad66aSAchin Gupta 
187*4f6ad66aSAchin Gupta 		bl31_mem_layout->free_base = bl2_mem_layout->free_base;
188*4f6ad66aSAchin Gupta 
189*4f6ad66aSAchin Gupta 		bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
190*4f6ad66aSAchin Gupta 		bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
191*4f6ad66aSAchin Gupta 	} else {
192*4f6ad66aSAchin Gupta 		/*
193*4f6ad66aSAchin Gupta 		 * ------------                             ^
194*4f6ad66aSAchin Gupta 		 * |   BL31   |                             |
195*4f6ad66aSAchin Gupta 		 * |----------|                 ^           |  BL2
196*4f6ad66aSAchin Gupta 		 * |          |                 | BL2 free  |  total
197*4f6ad66aSAchin Gupta 		 * |          |                 |   size    |  size
198*4f6ad66aSAchin Gupta 		 * |----------| BL2 free base   v           |
199*4f6ad66aSAchin Gupta 		 * |   BL2    |                             |
200*4f6ad66aSAchin Gupta 		 * ------------ BL2 total base              v
201*4f6ad66aSAchin Gupta 		 */
202*4f6ad66aSAchin Gupta 		unsigned long bl2_size;
203*4f6ad66aSAchin Gupta 
204*4f6ad66aSAchin Gupta 		bl31_mem_layout->free_base = bl2_mem_layout->total_base;
205*4f6ad66aSAchin Gupta 
206*4f6ad66aSAchin Gupta 		bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
207*4f6ad66aSAchin Gupta 		bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
208*4f6ad66aSAchin Gupta 	}
209*4f6ad66aSAchin Gupta 
210*4f6ad66aSAchin Gupta 	bl31_mem_layout->total_base = bl2_mem_layout->total_base;
211*4f6ad66aSAchin Gupta 	bl31_mem_layout->total_size = bl2_mem_layout->total_size;
212*4f6ad66aSAchin Gupta 	bl31_mem_layout->attr = load_type;
213*4f6ad66aSAchin Gupta 
214*4f6ad66aSAchin Gupta 	flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo));
215*4f6ad66aSAchin Gupta 	return;
216*4f6ad66aSAchin Gupta }
217*4f6ad66aSAchin Gupta 
218*4f6ad66aSAchin Gupta /*******************************************************************************
219*4f6ad66aSAchin Gupta  * Function that takes a memory layout into which BL2 has been either top or
220*4f6ad66aSAchin Gupta  * bottom loaded along with the address where BL2 has been loaded in it. Using
221*4f6ad66aSAchin Gupta  * this information, it populates bl2_mem_layout to tell BL2 how much memory
222*4f6ad66aSAchin Gupta  * it has access to and how much is available for use.
223*4f6ad66aSAchin Gupta  ******************************************************************************/
224*4f6ad66aSAchin Gupta void init_bl2_mem_layout(meminfo *bl1_mem_layout,
225*4f6ad66aSAchin Gupta 			 meminfo *bl2_mem_layout,
226*4f6ad66aSAchin Gupta 			 unsigned int load_type,
227*4f6ad66aSAchin Gupta 			 unsigned long bl2_base)
228*4f6ad66aSAchin Gupta {
229*4f6ad66aSAchin Gupta 	unsigned tmp;
230*4f6ad66aSAchin Gupta 
231*4f6ad66aSAchin Gupta 	if (load_type == BOT_LOAD) {
232*4f6ad66aSAchin Gupta 		bl2_mem_layout->total_base = bl2_base;
233*4f6ad66aSAchin Gupta 		tmp = bl1_mem_layout->free_base - bl2_base;
234*4f6ad66aSAchin Gupta 		bl2_mem_layout->total_size = bl1_mem_layout->free_size + tmp;
235*4f6ad66aSAchin Gupta 
236*4f6ad66aSAchin Gupta 	} else {
237*4f6ad66aSAchin Gupta 		bl2_mem_layout->total_base = bl1_mem_layout->free_base;
238*4f6ad66aSAchin Gupta 		tmp = bl1_mem_layout->total_base + bl1_mem_layout->total_size;
239*4f6ad66aSAchin Gupta 		bl2_mem_layout->total_size = tmp - bl1_mem_layout->free_base;
240*4f6ad66aSAchin Gupta 	}
241*4f6ad66aSAchin Gupta 
242*4f6ad66aSAchin Gupta 	bl2_mem_layout->free_base = bl1_mem_layout->free_base;
243*4f6ad66aSAchin Gupta 	bl2_mem_layout->free_size = bl1_mem_layout->free_size;
244*4f6ad66aSAchin Gupta 	bl2_mem_layout->attr = load_type;
245*4f6ad66aSAchin Gupta 
246*4f6ad66aSAchin Gupta 	flush_dcache_range((unsigned long) bl2_mem_layout, sizeof(meminfo));
247*4f6ad66aSAchin Gupta 	return;
248*4f6ad66aSAchin Gupta }
249*4f6ad66aSAchin Gupta 
250*4f6ad66aSAchin Gupta static void dump_load_info(unsigned long image_load_addr,
251*4f6ad66aSAchin Gupta 			   unsigned long image_size,
252*4f6ad66aSAchin Gupta 			   const meminfo *mem_layout)
253*4f6ad66aSAchin Gupta {
254*4f6ad66aSAchin Gupta #if DEBUG
255*4f6ad66aSAchin Gupta 	printf("Trying to load image at address 0x%lx, size = 0x%lx\r\n",
256*4f6ad66aSAchin Gupta 		image_load_addr, image_size);
257*4f6ad66aSAchin Gupta 	printf("Current memory layout:\r\n");
258*4f6ad66aSAchin Gupta 	printf("  total region = [0x%lx, 0x%lx]\r\n", mem_layout->total_base,
259*4f6ad66aSAchin Gupta 			mem_layout->total_base + mem_layout->total_size);
260*4f6ad66aSAchin Gupta 	printf("  free region = [0x%lx, 0x%lx]\r\n", mem_layout->free_base,
261*4f6ad66aSAchin Gupta 			mem_layout->free_base + mem_layout->free_size);
262*4f6ad66aSAchin Gupta #endif
263*4f6ad66aSAchin Gupta }
264*4f6ad66aSAchin Gupta 
265*4f6ad66aSAchin Gupta /*******************************************************************************
266*4f6ad66aSAchin Gupta  * Generic function to load an image into the trusted RAM using semihosting
267*4f6ad66aSAchin Gupta  * given a name, extents of free memory & whether the image should be loaded at
268*4f6ad66aSAchin Gupta  * the bottom or top of the free memory. It updates the memory layout if the
269*4f6ad66aSAchin Gupta  * load is successful.
270*4f6ad66aSAchin Gupta  ******************************************************************************/
271*4f6ad66aSAchin Gupta unsigned long load_image(meminfo *mem_layout,
272*4f6ad66aSAchin Gupta 			 const char *image_name,
273*4f6ad66aSAchin Gupta 			 unsigned int load_type,
274*4f6ad66aSAchin Gupta 			 unsigned long fixed_addr)
275*4f6ad66aSAchin Gupta {
276*4f6ad66aSAchin Gupta 	unsigned long temp_image_base, image_base;
277*4f6ad66aSAchin Gupta 	long offset;
278*4f6ad66aSAchin Gupta 	int image_flen;
279*4f6ad66aSAchin Gupta 
280*4f6ad66aSAchin Gupta 	/* Find the size of the image */
281*4f6ad66aSAchin Gupta 	image_flen = semihosting_get_flen(image_name);
282*4f6ad66aSAchin Gupta 	if (image_flen < 0) {
283*4f6ad66aSAchin Gupta 		printf("ERROR: Cannot access '%s' file (%i).\r\n",
284*4f6ad66aSAchin Gupta 			image_name, image_flen);
285*4f6ad66aSAchin Gupta 		return 0;
286*4f6ad66aSAchin Gupta 	}
287*4f6ad66aSAchin Gupta 
288*4f6ad66aSAchin Gupta 	/* See if we have enough space */
289*4f6ad66aSAchin Gupta 	if (image_flen > mem_layout->free_size) {
290*4f6ad66aSAchin Gupta 		printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
291*4f6ad66aSAchin Gupta 			image_name);
292*4f6ad66aSAchin Gupta 		dump_load_info(0, image_flen, mem_layout);
293*4f6ad66aSAchin Gupta 		return 0;
294*4f6ad66aSAchin Gupta 	}
295*4f6ad66aSAchin Gupta 
296*4f6ad66aSAchin Gupta 	switch (load_type) {
297*4f6ad66aSAchin Gupta 
298*4f6ad66aSAchin Gupta 	case TOP_LOAD:
299*4f6ad66aSAchin Gupta 
300*4f6ad66aSAchin Gupta 	  /* Load the image in the top of free memory */
301*4f6ad66aSAchin Gupta 	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
302*4f6ad66aSAchin Gupta 	  temp_image_base -= image_flen;
303*4f6ad66aSAchin Gupta 
304*4f6ad66aSAchin Gupta 	  /* Page align base address and check whether the image still fits */
305*4f6ad66aSAchin Gupta 	  image_base = page_align(temp_image_base, DOWN);
306*4f6ad66aSAchin Gupta 	  assert(image_base <= temp_image_base);
307*4f6ad66aSAchin Gupta 
308*4f6ad66aSAchin Gupta 	  if (image_base < mem_layout->free_base) {
309*4f6ad66aSAchin Gupta 		  printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
310*4f6ad66aSAchin Gupta 			  image_name);
311*4f6ad66aSAchin Gupta 		  dump_load_info(image_base, image_flen, mem_layout);
312*4f6ad66aSAchin Gupta 		  return 0;
313*4f6ad66aSAchin Gupta 	  }
314*4f6ad66aSAchin Gupta 
315*4f6ad66aSAchin Gupta 	  /* Calculate the amount of extra memory used due to alignment */
316*4f6ad66aSAchin Gupta 	  offset = temp_image_base - image_base;
317*4f6ad66aSAchin Gupta 
318*4f6ad66aSAchin Gupta 	  break;
319*4f6ad66aSAchin Gupta 
320*4f6ad66aSAchin Gupta 	case BOT_LOAD:
321*4f6ad66aSAchin Gupta 
322*4f6ad66aSAchin Gupta 	  /* Load the BL2 image in the bottom of free memory */
323*4f6ad66aSAchin Gupta 	  temp_image_base = mem_layout->free_base;
324*4f6ad66aSAchin Gupta 	  image_base = page_align(temp_image_base, UP);
325*4f6ad66aSAchin Gupta 	  assert(image_base >= temp_image_base);
326*4f6ad66aSAchin Gupta 
327*4f6ad66aSAchin Gupta 	  /* Page align base address and check whether the image still fits */
328*4f6ad66aSAchin Gupta 	  if (image_base + image_flen >
329*4f6ad66aSAchin Gupta 	      mem_layout->free_base + mem_layout->free_size) {
330*4f6ad66aSAchin Gupta 		  printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
331*4f6ad66aSAchin Gupta 			  image_name);
332*4f6ad66aSAchin Gupta 		  dump_load_info(image_base, image_flen, mem_layout);
333*4f6ad66aSAchin Gupta 		  return 0;
334*4f6ad66aSAchin Gupta 	  }
335*4f6ad66aSAchin Gupta 
336*4f6ad66aSAchin Gupta 	  /* Calculate the amount of extra memory used due to alignment */
337*4f6ad66aSAchin Gupta 	  offset = image_base - temp_image_base;
338*4f6ad66aSAchin Gupta 
339*4f6ad66aSAchin Gupta 	  break;
340*4f6ad66aSAchin Gupta 
341*4f6ad66aSAchin Gupta 	default:
342*4f6ad66aSAchin Gupta 	  assert(0);
343*4f6ad66aSAchin Gupta 
344*4f6ad66aSAchin Gupta 	}
345*4f6ad66aSAchin Gupta 
346*4f6ad66aSAchin Gupta 	/*
347*4f6ad66aSAchin Gupta 	 * Some images must be loaded at a fixed address, not a dynamic one.
348*4f6ad66aSAchin Gupta 	 *
349*4f6ad66aSAchin Gupta 	 * This has been implemented as a hack on top of the existing dynamic
350*4f6ad66aSAchin Gupta 	 * loading mechanism, for the time being.  If the 'fixed_addr' function
351*4f6ad66aSAchin Gupta 	 * argument is different from zero, then it will force the load address.
352*4f6ad66aSAchin Gupta 	 * So we still have this principle of top/bottom loading but the code
353*4f6ad66aSAchin Gupta 	 * determining the load address is bypassed and the load address is
354*4f6ad66aSAchin Gupta 	 * forced to the fixed one.
355*4f6ad66aSAchin Gupta 	 *
356*4f6ad66aSAchin Gupta 	 * This can result in quite a lot of wasted space because we still use
357*4f6ad66aSAchin Gupta 	 * 1 sole meminfo structure to represent the extents of free memory,
358*4f6ad66aSAchin Gupta 	 * where we should use some sort of linked list.
359*4f6ad66aSAchin Gupta 	 *
360*4f6ad66aSAchin Gupta 	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
361*4f6ad66aSAchin Gupta 	 *      layout should look as follows:
362*4f6ad66aSAchin Gupta 	 * ------------ 0x04040000
363*4f6ad66aSAchin Gupta 	 * |          |  <- Free space (1)
364*4f6ad66aSAchin Gupta 	 * |----------|
365*4f6ad66aSAchin Gupta 	 * |   BL2    |
366*4f6ad66aSAchin Gupta 	 * |----------| 0x04020000
367*4f6ad66aSAchin Gupta 	 * |          |  <- Free space (2)
368*4f6ad66aSAchin Gupta 	 * |----------|
369*4f6ad66aSAchin Gupta 	 * |   BL1    |
370*4f6ad66aSAchin Gupta 	 * ------------ 0x04000000
371*4f6ad66aSAchin Gupta 	 *
372*4f6ad66aSAchin Gupta 	 * But in the current hacky implementation, we'll need to specify
373*4f6ad66aSAchin Gupta 	 * whether BL2 is loaded at the top or bottom of the free memory.
374*4f6ad66aSAchin Gupta 	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
375*4f6ad66aSAchin Gupta 	 * will give the following view of the memory, hiding the chunk of
376*4f6ad66aSAchin Gupta 	 * free memory above BL2:
377*4f6ad66aSAchin Gupta 	 * ------------ 0x04040000
378*4f6ad66aSAchin Gupta 	 * |          |
379*4f6ad66aSAchin Gupta 	 * |          |
380*4f6ad66aSAchin Gupta 	 * |   BL2    |
381*4f6ad66aSAchin Gupta 	 * |----------| 0x04020000
382*4f6ad66aSAchin Gupta 	 * |          |  <- Free space (2)
383*4f6ad66aSAchin Gupta 	 * |----------|
384*4f6ad66aSAchin Gupta 	 * |   BL1    |
385*4f6ad66aSAchin Gupta 	 * ------------ 0x04000000
386*4f6ad66aSAchin Gupta 	 */
387*4f6ad66aSAchin Gupta 	if (fixed_addr != 0) {
388*4f6ad66aSAchin Gupta 		/* Load the image at the given address. */
389*4f6ad66aSAchin Gupta 		image_base = fixed_addr;
390*4f6ad66aSAchin Gupta 
391*4f6ad66aSAchin Gupta 		/* Check whether the image fits. */
392*4f6ad66aSAchin Gupta 		if ((image_base < mem_layout->free_base) ||
393*4f6ad66aSAchin Gupta 		    (image_base + image_flen >
394*4f6ad66aSAchin Gupta 		       mem_layout->free_base + mem_layout->free_size)) {
395*4f6ad66aSAchin Gupta 			printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
396*4f6ad66aSAchin Gupta 				image_name);
397*4f6ad66aSAchin Gupta 			dump_load_info(image_base, image_flen, mem_layout);
398*4f6ad66aSAchin Gupta 			return 0;
399*4f6ad66aSAchin Gupta 		}
400*4f6ad66aSAchin Gupta 
401*4f6ad66aSAchin Gupta 		/* Check whether the fixed load address is page-aligned. */
402*4f6ad66aSAchin Gupta 		if (!is_page_aligned(image_base)) {
403*4f6ad66aSAchin Gupta 			printf("ERROR: Cannot load '%s' file at unaligned address 0x%lx.\r\n",
404*4f6ad66aSAchin Gupta 				image_name, fixed_addr);
405*4f6ad66aSAchin Gupta 			return 0;
406*4f6ad66aSAchin Gupta 		}
407*4f6ad66aSAchin Gupta 
408*4f6ad66aSAchin Gupta 		/*
409*4f6ad66aSAchin Gupta 		 * Calculate the amount of extra memory used due to fixed
410*4f6ad66aSAchin Gupta 		 * loading.
411*4f6ad66aSAchin Gupta 		 */
412*4f6ad66aSAchin Gupta 		if (load_type == TOP_LOAD) {
413*4f6ad66aSAchin Gupta 			unsigned long max_addr, space_used;
414*4f6ad66aSAchin Gupta 			/*
415*4f6ad66aSAchin Gupta 			 * ------------ max_addr
416*4f6ad66aSAchin Gupta 			 * | /wasted/ |                 | offset
417*4f6ad66aSAchin Gupta 			 * |..........|..............................
418*4f6ad66aSAchin Gupta 			 * |  image   |                 | image_flen
419*4f6ad66aSAchin Gupta 			 * |----------| fixed_addr
420*4f6ad66aSAchin Gupta 			 * |          |
421*4f6ad66aSAchin Gupta 			 * |          |
422*4f6ad66aSAchin Gupta 			 * ------------ total_base
423*4f6ad66aSAchin Gupta 			 */
424*4f6ad66aSAchin Gupta 			max_addr = mem_layout->total_base + mem_layout->total_size;
425*4f6ad66aSAchin Gupta 			/*
426*4f6ad66aSAchin Gupta 			 * Compute the amount of memory used by the image.
427*4f6ad66aSAchin Gupta 			 * Corresponds to all space above the image load
428*4f6ad66aSAchin Gupta 			 * address.
429*4f6ad66aSAchin Gupta 			 */
430*4f6ad66aSAchin Gupta 			space_used = max_addr - fixed_addr;
431*4f6ad66aSAchin Gupta 			/*
432*4f6ad66aSAchin Gupta 			 * Calculate the amount of wasted memory within the
433*4f6ad66aSAchin Gupta 			 * amount of memory used by the image.
434*4f6ad66aSAchin Gupta 			 */
435*4f6ad66aSAchin Gupta 			offset = space_used - image_flen;
436*4f6ad66aSAchin Gupta 		} else /* BOT_LOAD */
437*4f6ad66aSAchin Gupta 			/*
438*4f6ad66aSAchin Gupta 			 * ------------
439*4f6ad66aSAchin Gupta 			 * |          |
440*4f6ad66aSAchin Gupta 			 * |          |
441*4f6ad66aSAchin Gupta 			 * |----------|
442*4f6ad66aSAchin Gupta 			 * |  image   |
443*4f6ad66aSAchin Gupta 			 * |..........| fixed_addr
444*4f6ad66aSAchin Gupta 			 * | /wasted/ |                 | offset
445*4f6ad66aSAchin Gupta 			 * ------------ total_base
446*4f6ad66aSAchin Gupta 			 */
447*4f6ad66aSAchin Gupta 			offset = fixed_addr - mem_layout->total_base;
448*4f6ad66aSAchin Gupta 	}
449*4f6ad66aSAchin Gupta 
450*4f6ad66aSAchin Gupta 	/* We have enough space so load the image now */
451*4f6ad66aSAchin Gupta 	image_flen = semihosting_download_file(image_name,
452*4f6ad66aSAchin Gupta 					       image_flen,
453*4f6ad66aSAchin Gupta 					       (void *) image_base);
454*4f6ad66aSAchin Gupta 	if (image_flen <= 0) {
455*4f6ad66aSAchin Gupta 		printf("ERROR: Failed to load '%s' file from semihosting (%i).\r\n",
456*4f6ad66aSAchin Gupta 			image_name, image_flen);
457*4f6ad66aSAchin Gupta 		return 0;
458*4f6ad66aSAchin Gupta 	}
459*4f6ad66aSAchin Gupta 
460*4f6ad66aSAchin Gupta 	/*
461*4f6ad66aSAchin Gupta 	 * File has been successfully loaded. Update the free memory
462*4f6ad66aSAchin Gupta 	 * data structure & flush the contents of the TZRAM so that
463*4f6ad66aSAchin Gupta 	 * the next EL can see it.
464*4f6ad66aSAchin Gupta 	 */
465*4f6ad66aSAchin Gupta 	/* Update the memory contents */
466*4f6ad66aSAchin Gupta 	flush_dcache_range(image_base, image_flen);
467*4f6ad66aSAchin Gupta 
468*4f6ad66aSAchin Gupta 	mem_layout->free_size -= image_flen + offset;
469*4f6ad66aSAchin Gupta 
470*4f6ad66aSAchin Gupta 	/* Update the base of free memory since its moved up */
471*4f6ad66aSAchin Gupta 	if (load_type == BOT_LOAD)
472*4f6ad66aSAchin Gupta 		mem_layout->free_base += offset + image_flen;
473*4f6ad66aSAchin Gupta 
474*4f6ad66aSAchin Gupta 	return image_base;
475*4f6ad66aSAchin Gupta }
476*4f6ad66aSAchin Gupta 
477*4f6ad66aSAchin Gupta /*******************************************************************************
478*4f6ad66aSAchin Gupta  * Run a loaded image from the given entry point. This could result in either
479*4f6ad66aSAchin Gupta  * dropping into a lower exception level or jumping to a higher exception level.
480*4f6ad66aSAchin Gupta  * The only way of doing the latter is through an SMC. In either case, setup the
481*4f6ad66aSAchin Gupta  * parameters for the EL change request correctly.
482*4f6ad66aSAchin Gupta  ******************************************************************************/
483*4f6ad66aSAchin Gupta int run_image(unsigned long entrypoint,
484*4f6ad66aSAchin Gupta 	      unsigned long spsr,
485*4f6ad66aSAchin Gupta 	      unsigned long target_security_state,
486*4f6ad66aSAchin Gupta 	      meminfo *mem_layout,
487*4f6ad66aSAchin Gupta 	      void *data)
488*4f6ad66aSAchin Gupta {
489*4f6ad66aSAchin Gupta 	el_change_info run_image_info;
490*4f6ad66aSAchin Gupta 	unsigned long current_el = read_current_el();
491*4f6ad66aSAchin Gupta 
492*4f6ad66aSAchin Gupta 	/* Tell next EL what we want done */
493*4f6ad66aSAchin Gupta 	run_image_info.args.arg0 = RUN_IMAGE;
494*4f6ad66aSAchin Gupta 	run_image_info.entrypoint = entrypoint;
495*4f6ad66aSAchin Gupta 	run_image_info.spsr = spsr;
496*4f6ad66aSAchin Gupta 	run_image_info.security_state = target_security_state;
497*4f6ad66aSAchin Gupta 	run_image_info.next = 0;
498*4f6ad66aSAchin Gupta 
499*4f6ad66aSAchin Gupta 	/*
500*4f6ad66aSAchin Gupta 	 * If we are EL3 then only an eret can take us to the desired
501*4f6ad66aSAchin Gupta 	 * exception level. Else for the time being assume that we have
502*4f6ad66aSAchin Gupta 	 * to jump to a higher EL and issue an SMC. Contents of argY
503*4f6ad66aSAchin Gupta 	 * will go into the general purpose register xY e.g. arg0->x0
504*4f6ad66aSAchin Gupta 	 */
505*4f6ad66aSAchin Gupta 	if (GET_EL(current_el) == MODE_EL3) {
506*4f6ad66aSAchin Gupta 		run_image_info.args.arg1 = (unsigned long) mem_layout;
507*4f6ad66aSAchin Gupta 		run_image_info.args.arg2 = (unsigned long) data;
508*4f6ad66aSAchin Gupta 	} else {
509*4f6ad66aSAchin Gupta 		run_image_info.args.arg1 = entrypoint;
510*4f6ad66aSAchin Gupta 		run_image_info.args.arg2 = spsr;
511*4f6ad66aSAchin Gupta 		run_image_info.args.arg3 = (unsigned long) mem_layout;
512*4f6ad66aSAchin Gupta 		run_image_info.args.arg4 = (unsigned long) data;
513*4f6ad66aSAchin Gupta 	}
514*4f6ad66aSAchin Gupta 
515*4f6ad66aSAchin Gupta 	return change_el(&run_image_info);
516*4f6ad66aSAchin Gupta }
517