xref: /OK3568_Linux_fs/kernel/drivers/firmware/efi/memmap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Common EFI memory map functions.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #define pr_fmt(fmt) "efi: " fmt
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/efi.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <asm/early_ioremap.h>
13*4882a593Smuzhiyun #include <linux/memblock.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun 
__efi_memmap_alloc_early(unsigned long size)16*4882a593Smuzhiyun static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
__efi_memmap_alloc_late(unsigned long size)21*4882a593Smuzhiyun static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	unsigned int order = get_order(size);
24*4882a593Smuzhiyun 	struct page *p = alloc_pages(GFP_KERNEL, order);
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (!p)
27*4882a593Smuzhiyun 		return 0;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	return PFN_PHYS(page_to_pfn(p));
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
__efi_memmap_free(u64 phys,unsigned long size,unsigned long flags)32*4882a593Smuzhiyun void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	if (flags & EFI_MEMMAP_MEMBLOCK) {
35*4882a593Smuzhiyun 		if (slab_is_available())
36*4882a593Smuzhiyun 			memblock_free_late(phys, size);
37*4882a593Smuzhiyun 		else
38*4882a593Smuzhiyun 			memblock_free(phys, size);
39*4882a593Smuzhiyun 	} else if (flags & EFI_MEMMAP_SLAB) {
40*4882a593Smuzhiyun 		struct page *p = pfn_to_page(PHYS_PFN(phys));
41*4882a593Smuzhiyun 		unsigned int order = get_order(size);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 		free_pages((unsigned long) page_address(p), order);
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
efi_memmap_free(void)47*4882a593Smuzhiyun static void __init efi_memmap_free(void)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	__efi_memmap_free(efi.memmap.phys_map,
50*4882a593Smuzhiyun 			efi.memmap.desc_size * efi.memmap.nr_map,
51*4882a593Smuzhiyun 			efi.memmap.flags);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun  * efi_memmap_alloc - Allocate memory for the EFI memory map
56*4882a593Smuzhiyun  * @num_entries: Number of entries in the allocated map.
57*4882a593Smuzhiyun  * @data: efi memmap installation parameters
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * Depending on whether mm_init() has already been invoked or not,
60*4882a593Smuzhiyun  * either memblock or "normal" page allocation is used.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Returns the physical address of the allocated memory map on
63*4882a593Smuzhiyun  * success, zero on failure.
64*4882a593Smuzhiyun  */
efi_memmap_alloc(unsigned int num_entries,struct efi_memory_map_data * data)65*4882a593Smuzhiyun int __init efi_memmap_alloc(unsigned int num_entries,
66*4882a593Smuzhiyun 		struct efi_memory_map_data *data)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	/* Expect allocation parameters are zero initialized */
69*4882a593Smuzhiyun 	WARN_ON(data->phys_map || data->size);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	data->size = num_entries * efi.memmap.desc_size;
72*4882a593Smuzhiyun 	data->desc_version = efi.memmap.desc_version;
73*4882a593Smuzhiyun 	data->desc_size = efi.memmap.desc_size;
74*4882a593Smuzhiyun 	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
75*4882a593Smuzhiyun 	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (slab_is_available()) {
78*4882a593Smuzhiyun 		data->flags |= EFI_MEMMAP_SLAB;
79*4882a593Smuzhiyun 		data->phys_map = __efi_memmap_alloc_late(data->size);
80*4882a593Smuzhiyun 	} else {
81*4882a593Smuzhiyun 		data->flags |= EFI_MEMMAP_MEMBLOCK;
82*4882a593Smuzhiyun 		data->phys_map = __efi_memmap_alloc_early(data->size);
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!data->phys_map)
86*4882a593Smuzhiyun 		return -ENOMEM;
87*4882a593Smuzhiyun 	return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * __efi_memmap_init - Common code for mapping the EFI memory map
92*4882a593Smuzhiyun  * @data: EFI memory map data
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  * This function takes care of figuring out which function to use to
95*4882a593Smuzhiyun  * map the EFI memory map in efi.memmap based on how far into the boot
96*4882a593Smuzhiyun  * we are.
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
99*4882a593Smuzhiyun  * only have access to the early_memremap*() functions as the vmalloc
100*4882a593Smuzhiyun  * space isn't setup.  Once the kernel is fully booted we can fallback
101*4882a593Smuzhiyun  * to the more robust memremap*() API.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Returns zero on success, a negative error code on failure.
104*4882a593Smuzhiyun  */
__efi_memmap_init(struct efi_memory_map_data * data)105*4882a593Smuzhiyun static int __init __efi_memmap_init(struct efi_memory_map_data *data)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct efi_memory_map map;
108*4882a593Smuzhiyun 	phys_addr_t phys_map;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (efi_enabled(EFI_PARAVIRT))
111*4882a593Smuzhiyun 		return 0;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	phys_map = data->phys_map;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (data->flags & EFI_MEMMAP_LATE)
116*4882a593Smuzhiyun 		map.map = memremap(phys_map, data->size, MEMREMAP_WB);
117*4882a593Smuzhiyun 	else
118*4882a593Smuzhiyun 		map.map = early_memremap(phys_map, data->size);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (!map.map) {
121*4882a593Smuzhiyun 		pr_err("Could not map the memory map!\n");
122*4882a593Smuzhiyun 		return -ENOMEM;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
126*4882a593Smuzhiyun 	efi_memmap_free();
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	map.phys_map = data->phys_map;
129*4882a593Smuzhiyun 	map.nr_map = data->size / data->desc_size;
130*4882a593Smuzhiyun 	map.map_end = map.map + data->size;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	map.desc_version = data->desc_version;
133*4882a593Smuzhiyun 	map.desc_size = data->desc_size;
134*4882a593Smuzhiyun 	map.flags = data->flags;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	set_bit(EFI_MEMMAP, &efi.flags);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	efi.memmap = map;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun  * efi_memmap_init_early - Map the EFI memory map data structure
145*4882a593Smuzhiyun  * @data: EFI memory map data
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * Use early_memremap() to map the passed in EFI memory map and assign
148*4882a593Smuzhiyun  * it to efi.memmap.
149*4882a593Smuzhiyun  */
efi_memmap_init_early(struct efi_memory_map_data * data)150*4882a593Smuzhiyun int __init efi_memmap_init_early(struct efi_memory_map_data *data)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	/* Cannot go backwards */
153*4882a593Smuzhiyun 	WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	data->flags = 0;
156*4882a593Smuzhiyun 	return __efi_memmap_init(data);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
efi_memmap_unmap(void)159*4882a593Smuzhiyun void __init efi_memmap_unmap(void)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	if (!efi_enabled(EFI_MEMMAP))
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
165*4882a593Smuzhiyun 		unsigned long size;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 		size = efi.memmap.desc_size * efi.memmap.nr_map;
168*4882a593Smuzhiyun 		early_memunmap(efi.memmap.map, size);
169*4882a593Smuzhiyun 	} else {
170*4882a593Smuzhiyun 		memunmap(efi.memmap.map);
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	efi.memmap.map = NULL;
174*4882a593Smuzhiyun 	clear_bit(EFI_MEMMAP, &efi.flags);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun  * efi_memmap_init_late - Map efi.memmap with memremap()
179*4882a593Smuzhiyun  * @phys_addr: Physical address of the new EFI memory map
180*4882a593Smuzhiyun  * @size: Size in bytes of the new EFI memory map
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * Setup a mapping of the EFI memory map using ioremap_cache(). This
183*4882a593Smuzhiyun  * function should only be called once the vmalloc space has been
184*4882a593Smuzhiyun  * setup and is therefore not suitable for calling during early EFI
185*4882a593Smuzhiyun  * initialise, e.g. in efi_init(). Additionally, it expects
186*4882a593Smuzhiyun  * efi_memmap_init_early() to have already been called.
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * The reason there are two EFI memmap initialisation
189*4882a593Smuzhiyun  * (efi_memmap_init_early() and this late version) is because the
190*4882a593Smuzhiyun  * early EFI memmap should be explicitly unmapped once EFI
191*4882a593Smuzhiyun  * initialisation is complete as the fixmap space used to map the EFI
192*4882a593Smuzhiyun  * memmap (via early_memremap()) is a scarce resource.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * This late mapping is intended to persist for the duration of
195*4882a593Smuzhiyun  * runtime so that things like efi_mem_desc_lookup() and
196*4882a593Smuzhiyun  * efi_mem_attributes() always work.
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * Returns zero on success, a negative error code on failure.
199*4882a593Smuzhiyun  */
efi_memmap_init_late(phys_addr_t addr,unsigned long size)200*4882a593Smuzhiyun int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct efi_memory_map_data data = {
203*4882a593Smuzhiyun 		.phys_map = addr,
204*4882a593Smuzhiyun 		.size = size,
205*4882a593Smuzhiyun 		.flags = EFI_MEMMAP_LATE,
206*4882a593Smuzhiyun 	};
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/* Did we forget to unmap the early EFI memmap? */
209*4882a593Smuzhiyun 	WARN_ON(efi.memmap.map);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Were we already called? */
212*4882a593Smuzhiyun 	WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/*
215*4882a593Smuzhiyun 	 * It makes no sense to allow callers to register different
216*4882a593Smuzhiyun 	 * values for the following fields. Copy them out of the
217*4882a593Smuzhiyun 	 * existing early EFI memmap.
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	data.desc_version = efi.memmap.desc_version;
220*4882a593Smuzhiyun 	data.desc_size = efi.memmap.desc_size;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return __efi_memmap_init(&data);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /**
226*4882a593Smuzhiyun  * efi_memmap_install - Install a new EFI memory map in efi.memmap
227*4882a593Smuzhiyun  * @ctx: map allocation parameters (address, size, flags)
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * Unlike efi_memmap_init_*(), this function does not allow the caller
230*4882a593Smuzhiyun  * to switch from early to late mappings. It simply uses the existing
231*4882a593Smuzhiyun  * mapping function and installs the new memmap.
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * Returns zero on success, a negative error code on failure.
234*4882a593Smuzhiyun  */
efi_memmap_install(struct efi_memory_map_data * data)235*4882a593Smuzhiyun int __init efi_memmap_install(struct efi_memory_map_data *data)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	efi_memmap_unmap();
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return __efi_memmap_init(data);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /**
243*4882a593Smuzhiyun  * efi_memmap_split_count - Count number of additional EFI memmap entries
244*4882a593Smuzhiyun  * @md: EFI memory descriptor to split
245*4882a593Smuzhiyun  * @range: Address range (start, end) to split around
246*4882a593Smuzhiyun  *
247*4882a593Smuzhiyun  * Returns the number of additional EFI memmap entries required to
248*4882a593Smuzhiyun  * accomodate @range.
249*4882a593Smuzhiyun  */
efi_memmap_split_count(efi_memory_desc_t * md,struct range * range)250*4882a593Smuzhiyun int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	u64 m_start, m_end;
253*4882a593Smuzhiyun 	u64 start, end;
254*4882a593Smuzhiyun 	int count = 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	start = md->phys_addr;
257*4882a593Smuzhiyun 	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* modifying range */
260*4882a593Smuzhiyun 	m_start = range->start;
261*4882a593Smuzhiyun 	m_end = range->end;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (m_start <= start) {
264*4882a593Smuzhiyun 		/* split into 2 parts */
265*4882a593Smuzhiyun 		if (start < m_end && m_end < end)
266*4882a593Smuzhiyun 			count++;
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (start < m_start && m_start < end) {
270*4882a593Smuzhiyun 		/* split into 3 parts */
271*4882a593Smuzhiyun 		if (m_end < end)
272*4882a593Smuzhiyun 			count += 2;
273*4882a593Smuzhiyun 		/* split into 2 parts */
274*4882a593Smuzhiyun 		if (end <= m_end)
275*4882a593Smuzhiyun 			count++;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return count;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * efi_memmap_insert - Insert a memory region in an EFI memmap
283*4882a593Smuzhiyun  * @old_memmap: The existing EFI memory map structure
284*4882a593Smuzhiyun  * @buf: Address of buffer to store new map
285*4882a593Smuzhiyun  * @mem: Memory map entry to insert
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * It is suggested that you call efi_memmap_split_count() first
288*4882a593Smuzhiyun  * to see how large @buf needs to be.
289*4882a593Smuzhiyun  */
efi_memmap_insert(struct efi_memory_map * old_memmap,void * buf,struct efi_mem_range * mem)290*4882a593Smuzhiyun void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
291*4882a593Smuzhiyun 			      struct efi_mem_range *mem)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	u64 m_start, m_end, m_attr;
294*4882a593Smuzhiyun 	efi_memory_desc_t *md;
295*4882a593Smuzhiyun 	u64 start, end;
296*4882a593Smuzhiyun 	void *old, *new;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	/* modifying range */
299*4882a593Smuzhiyun 	m_start = mem->range.start;
300*4882a593Smuzhiyun 	m_end = mem->range.end;
301*4882a593Smuzhiyun 	m_attr = mem->attribute;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/*
304*4882a593Smuzhiyun 	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
305*4882a593Smuzhiyun 	 * units. Ensure that the region described by 'mem' is aligned
306*4882a593Smuzhiyun 	 * correctly.
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
309*4882a593Smuzhiyun 	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
310*4882a593Smuzhiyun 		WARN_ON(1);
311*4882a593Smuzhiyun 		return;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	for (old = old_memmap->map, new = buf;
315*4882a593Smuzhiyun 	     old < old_memmap->map_end;
316*4882a593Smuzhiyun 	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		/* copy original EFI memory descriptor */
319*4882a593Smuzhiyun 		memcpy(new, old, old_memmap->desc_size);
320*4882a593Smuzhiyun 		md = new;
321*4882a593Smuzhiyun 		start = md->phys_addr;
322*4882a593Smuzhiyun 		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		if (m_start <= start && end <= m_end)
325*4882a593Smuzhiyun 			md->attribute |= m_attr;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		if (m_start <= start &&
328*4882a593Smuzhiyun 		    (start < m_end && m_end < end)) {
329*4882a593Smuzhiyun 			/* first part */
330*4882a593Smuzhiyun 			md->attribute |= m_attr;
331*4882a593Smuzhiyun 			md->num_pages = (m_end - md->phys_addr + 1) >>
332*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
333*4882a593Smuzhiyun 			/* latter part */
334*4882a593Smuzhiyun 			new += old_memmap->desc_size;
335*4882a593Smuzhiyun 			memcpy(new, old, old_memmap->desc_size);
336*4882a593Smuzhiyun 			md = new;
337*4882a593Smuzhiyun 			md->phys_addr = m_end + 1;
338*4882a593Smuzhiyun 			md->num_pages = (end - md->phys_addr + 1) >>
339*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
340*4882a593Smuzhiyun 		}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		if ((start < m_start && m_start < end) && m_end < end) {
343*4882a593Smuzhiyun 			/* first part */
344*4882a593Smuzhiyun 			md->num_pages = (m_start - md->phys_addr) >>
345*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
346*4882a593Smuzhiyun 			/* middle part */
347*4882a593Smuzhiyun 			new += old_memmap->desc_size;
348*4882a593Smuzhiyun 			memcpy(new, old, old_memmap->desc_size);
349*4882a593Smuzhiyun 			md = new;
350*4882a593Smuzhiyun 			md->attribute |= m_attr;
351*4882a593Smuzhiyun 			md->phys_addr = m_start;
352*4882a593Smuzhiyun 			md->num_pages = (m_end - m_start + 1) >>
353*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
354*4882a593Smuzhiyun 			/* last part */
355*4882a593Smuzhiyun 			new += old_memmap->desc_size;
356*4882a593Smuzhiyun 			memcpy(new, old, old_memmap->desc_size);
357*4882a593Smuzhiyun 			md = new;
358*4882a593Smuzhiyun 			md->phys_addr = m_end + 1;
359*4882a593Smuzhiyun 			md->num_pages = (end - m_end) >>
360*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		if ((start < m_start && m_start < end) &&
364*4882a593Smuzhiyun 		    (end <= m_end)) {
365*4882a593Smuzhiyun 			/* first part */
366*4882a593Smuzhiyun 			md->num_pages = (m_start - md->phys_addr) >>
367*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
368*4882a593Smuzhiyun 			/* latter part */
369*4882a593Smuzhiyun 			new += old_memmap->desc_size;
370*4882a593Smuzhiyun 			memcpy(new, old, old_memmap->desc_size);
371*4882a593Smuzhiyun 			md = new;
372*4882a593Smuzhiyun 			md->phys_addr = m_start;
373*4882a593Smuzhiyun 			md->num_pages = (end - md->phys_addr + 1) >>
374*4882a593Smuzhiyun 				EFI_PAGE_SHIFT;
375*4882a593Smuzhiyun 			md->attribute |= m_attr;
376*4882a593Smuzhiyun 		}
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun }
379