xref: /rk3399_rockchip-uboot/lib/efi_loader/efi_memory.c (revision edcef3ba1d2d5beb92fcd7df253e196e77ba174d)
1 /*
2  *  EFI application memory management
3  *
4  *  Copyright (c) 2016 Alexander Graf
5  *
6  *  SPDX-License-Identifier:     GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <efi_loader.h>
11 #include <malloc.h>
12 #include <asm/global_data.h>
13 #include <libfdt_env.h>
14 #include <linux/list_sort.h>
15 #include <inttypes.h>
16 #include <watchdog.h>
17 
18 DECLARE_GLOBAL_DATA_PTR;
19 
20 struct efi_mem_list {
21 	struct list_head link;
22 	struct efi_mem_desc desc;
23 };
24 
25 /* This list contains all memory map items */
26 LIST_HEAD(efi_mem);
27 
28 #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER
29 void *efi_bounce_buffer;
30 #endif
31 
32 /*
33  * Sorts the memory list from highest address to lowest address
34  *
35  * When allocating memory we should always start from the highest
36  * address chunk, so sort the memory list such that the first list
37  * iterator gets the highest address and goes lower from there.
38  */
39 static int efi_mem_cmp(void *priv, struct list_head *a, struct list_head *b)
40 {
41 	struct efi_mem_list *mema = list_entry(a, struct efi_mem_list, link);
42 	struct efi_mem_list *memb = list_entry(b, struct efi_mem_list, link);
43 
44 	if (mema->desc.physical_start == memb->desc.physical_start)
45 		return 0;
46 	else if (mema->desc.physical_start < memb->desc.physical_start)
47 		return 1;
48 	else
49 		return -1;
50 }
51 
52 static void efi_mem_sort(void)
53 {
54 	list_sort(NULL, &efi_mem, efi_mem_cmp);
55 }
56 
57 /*
58  * Unmaps all memory occupied by the carve_desc region from the
59  * list entry pointed to by map.
60  *
61  * Returns 1 if carving was performed or 0 if the regions don't overlap.
62  * Returns -1 if it would affect non-RAM regions but overlap_only_ram is set.
63  * Carving is only guaranteed to complete when all regions return 0.
64  */
65 static int efi_mem_carve_out(struct efi_mem_list *map,
66 			     struct efi_mem_desc *carve_desc,
67 			     bool overlap_only_ram)
68 {
69 	struct efi_mem_list *newmap;
70 	struct efi_mem_desc *map_desc = &map->desc;
71 	uint64_t map_start = map_desc->physical_start;
72 	uint64_t map_end = map_start + (map_desc->num_pages << EFI_PAGE_SHIFT);
73 	uint64_t carve_start = carve_desc->physical_start;
74 	uint64_t carve_end = carve_start +
75 			     (carve_desc->num_pages << EFI_PAGE_SHIFT);
76 
77 	/* check whether we're overlapping */
78 	if ((carve_end <= map_start) || (carve_start >= map_end))
79 		return 0;
80 
81 	/* We're overlapping with non-RAM, warn the caller if desired */
82 	if (overlap_only_ram && (map_desc->type != EFI_CONVENTIONAL_MEMORY))
83 		return -1;
84 
85 	/* Sanitize carve_start and carve_end to lie within our bounds */
86 	carve_start = max(carve_start, map_start);
87 	carve_end = min(carve_end, map_end);
88 
89 	/* Carving at the beginning of our map? Just move it! */
90 	if (carve_start == map_start) {
91 		if (map_end == carve_end) {
92 			/* Full overlap, just remove map */
93 			list_del(&map->link);
94 		}
95 
96 		map_desc->physical_start = carve_end;
97 		map_desc->num_pages = (map_end - carve_end) >> EFI_PAGE_SHIFT;
98 		return 1;
99 	}
100 
101 	/*
102 	 * Overlapping maps, just split the list map at carve_start,
103 	 * it will get moved or removed in the next iteration.
104 	 *
105 	 * [ map_desc |__carve_start__| newmap ]
106 	 */
107 
108 	/* Create a new map from [ carve_start ... map_end ] */
109 	newmap = calloc(1, sizeof(*newmap));
110 	newmap->desc = map->desc;
111 	newmap->desc.physical_start = carve_start;
112 	newmap->desc.num_pages = (map_end - carve_start) >> EFI_PAGE_SHIFT;
113         list_add_tail(&newmap->link, &efi_mem);
114 
115 	/* Shrink the map to [ map_start ... carve_start ] */
116 	map_desc->num_pages = (carve_start - map_start) >> EFI_PAGE_SHIFT;
117 
118 	return 1;
119 }
120 
121 uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type,
122 			    bool overlap_only_ram)
123 {
124 	struct list_head *lhandle;
125 	struct efi_mem_list *newlist;
126 	bool do_carving;
127 
128 	if (!pages)
129 		return start;
130 
131 	newlist = calloc(1, sizeof(*newlist));
132 	newlist->desc.type = memory_type;
133 	newlist->desc.physical_start = start;
134 	newlist->desc.virtual_start = start;
135 	newlist->desc.num_pages = pages;
136 
137 	switch (memory_type) {
138 	case EFI_RUNTIME_SERVICES_CODE:
139 	case EFI_RUNTIME_SERVICES_DATA:
140 		newlist->desc.attribute = (1 << EFI_MEMORY_WB_SHIFT) |
141 					  (1ULL << EFI_MEMORY_RUNTIME_SHIFT);
142 		break;
143 	case EFI_MMAP_IO:
144 		newlist->desc.attribute = 1ULL << EFI_MEMORY_RUNTIME_SHIFT;
145 		break;
146 	default:
147 		newlist->desc.attribute = 1 << EFI_MEMORY_WB_SHIFT;
148 		break;
149 	}
150 
151 	/* Add our new map */
152 	do {
153 		do_carving = false;
154 		list_for_each(lhandle, &efi_mem) {
155 			struct efi_mem_list *lmem;
156 			int r;
157 
158 			lmem = list_entry(lhandle, struct efi_mem_list, link);
159 			r = efi_mem_carve_out(lmem, &newlist->desc,
160 					      overlap_only_ram);
161 			if (r < 0) {
162 				return 0;
163 			} else if (r) {
164 				do_carving = true;
165 				break;
166 			}
167 		}
168 	} while (do_carving);
169 
170 	/* Add our new map */
171         list_add_tail(&newlist->link, &efi_mem);
172 
173 	/* And make sure memory is listed in descending order */
174 	efi_mem_sort();
175 
176 	return start;
177 }
178 
179 static uint64_t efi_find_free_memory(uint64_t len, uint64_t max_addr)
180 {
181 	struct list_head *lhandle;
182 
183 	list_for_each(lhandle, &efi_mem) {
184 		struct efi_mem_list *lmem = list_entry(lhandle,
185 			struct efi_mem_list, link);
186 		struct efi_mem_desc *desc = &lmem->desc;
187 		uint64_t desc_len = desc->num_pages << EFI_PAGE_SHIFT;
188 		uint64_t desc_end = desc->physical_start + desc_len;
189 		uint64_t curmax = min(max_addr, desc_end);
190 		uint64_t ret = curmax - len;
191 
192 		/* We only take memory from free RAM */
193 		if (desc->type != EFI_CONVENTIONAL_MEMORY)
194 			continue;
195 
196 		/* Out of bounds for max_addr */
197 		if ((ret + len) > max_addr)
198 			continue;
199 
200 		/* Out of bounds for upper map limit */
201 		if ((ret + len) > desc_end)
202 			continue;
203 
204 		/* Out of bounds for lower map limit */
205 		if (ret < desc->physical_start)
206 			continue;
207 
208 		/* Return the highest address in this map within bounds */
209 		return ret;
210 	}
211 
212 	return 0;
213 }
214 
215 efi_status_t efi_allocate_pages(int type, int memory_type,
216 				unsigned long pages, uint64_t *memory)
217 {
218 	u64 len = pages << EFI_PAGE_SHIFT;
219 	efi_status_t r = EFI_SUCCESS;
220 	uint64_t addr;
221 
222 	switch (type) {
223 	case 0:
224 		/* Any page */
225 		addr = efi_find_free_memory(len, gd->start_addr_sp);
226 		if (!addr) {
227 			r = EFI_NOT_FOUND;
228 			break;
229 		}
230 		break;
231 	case 1:
232 		/* Max address */
233 		addr = efi_find_free_memory(len, *memory);
234 		if (!addr) {
235 			r = EFI_NOT_FOUND;
236 			break;
237 		}
238 		break;
239 	case 2:
240 		/* Exact address, reserve it. The addr is already in *memory. */
241 		addr = *memory;
242 		break;
243 	default:
244 		/* UEFI doesn't specify other allocation types */
245 		r = EFI_INVALID_PARAMETER;
246 		break;
247 	}
248 
249 	if (r == EFI_SUCCESS) {
250 		uint64_t ret;
251 
252 		/* Reserve that map in our memory maps */
253 		ret = efi_add_memory_map(addr, pages, memory_type, true);
254 		if (ret == addr) {
255 			*memory = addr;
256 		} else {
257 			/* Map would overlap, bail out */
258 			r = EFI_OUT_OF_RESOURCES;
259 		}
260 	}
261 
262 	return r;
263 }
264 
265 void *efi_alloc(uint64_t len, int memory_type)
266 {
267 	uint64_t ret = 0;
268 	uint64_t pages = (len + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
269 	efi_status_t r;
270 
271 	r = efi_allocate_pages(0, memory_type, pages, &ret);
272 	if (r == EFI_SUCCESS)
273 		return (void*)(uintptr_t)ret;
274 
275 	return NULL;
276 }
277 
278 efi_status_t efi_free_pages(uint64_t memory, unsigned long pages)
279 {
280 	/* We don't free, let's cross our fingers we have plenty RAM */
281 	return EFI_SUCCESS;
282 }
283 
284 efi_status_t efi_get_memory_map(unsigned long *memory_map_size,
285 			       struct efi_mem_desc *memory_map,
286 			       unsigned long *map_key,
287 			       unsigned long *descriptor_size,
288 			       uint32_t *descriptor_version)
289 {
290 	ulong map_size = 0;
291 	int map_entries = 0;
292 	struct list_head *lhandle;
293 
294 	list_for_each(lhandle, &efi_mem)
295 		map_entries++;
296 
297 	map_size = map_entries * sizeof(struct efi_mem_desc);
298 
299 	*memory_map_size = map_size;
300 
301 	if (descriptor_size)
302 		*descriptor_size = sizeof(struct efi_mem_desc);
303 
304 	if (*memory_map_size < map_size)
305 		return EFI_BUFFER_TOO_SMALL;
306 
307 	/* Copy list into array */
308 	if (memory_map) {
309 		/* Return the list in ascending order */
310 		memory_map = &memory_map[map_entries - 1];
311 		list_for_each(lhandle, &efi_mem) {
312 			struct efi_mem_list *lmem;
313 
314 			lmem = list_entry(lhandle, struct efi_mem_list, link);
315 			*memory_map = lmem->desc;
316 			memory_map--;
317 		}
318 	}
319 
320 	return EFI_SUCCESS;
321 }
322 
323 int efi_memory_init(void)
324 {
325 	unsigned long runtime_start, runtime_end, runtime_pages;
326 	unsigned long uboot_start, uboot_pages;
327 	unsigned long uboot_stack_size = 16 * 1024 * 1024;
328 	int i;
329 
330 	/* Add RAM */
331 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
332 		u64 ram_start = gd->bd->bi_dram[i].start;
333 		u64 ram_size = gd->bd->bi_dram[i].size;
334 		u64 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
335 		u64 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
336 
337 		efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
338 				   false);
339 	}
340 
341 	/* Add U-Boot */
342 	uboot_start = (gd->start_addr_sp - uboot_stack_size) & ~EFI_PAGE_MASK;
343 	uboot_pages = (gd->ram_top - uboot_start) >> EFI_PAGE_SHIFT;
344 	efi_add_memory_map(uboot_start, uboot_pages, EFI_LOADER_DATA, false);
345 
346 	/* Add Runtime Services */
347 	runtime_start = (ulong)&__efi_runtime_start & ~EFI_PAGE_MASK;
348 	runtime_end = (ulong)&__efi_runtime_stop;
349 	runtime_end = (runtime_end + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
350 	runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT;
351 	efi_add_memory_map(runtime_start, runtime_pages,
352 			   EFI_RUNTIME_SERVICES_CODE, false);
353 
354 #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER
355 	/* Request a 32bit 64MB bounce buffer region */
356 	uint64_t efi_bounce_buffer_addr = 0xffffffff;
357 
358 	if (efi_allocate_pages(1, EFI_LOADER_DATA,
359 			       (64 * 1024 * 1024) >> EFI_PAGE_SHIFT,
360 			       &efi_bounce_buffer_addr) != EFI_SUCCESS)
361 		return -1;
362 
363 	efi_bounce_buffer = (void*)(uintptr_t)efi_bounce_buffer_addr;
364 #endif
365 
366 	return 0;
367 }
368