xref: /OK3568_Linux_fs/kernel/arch/arm64/mm/init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Based on arch/arm/mm/init.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1995-2005 Russell King
6*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/export.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/swap.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/cache.h>
15*4882a593Smuzhiyun #include <linux/mman.h>
16*4882a593Smuzhiyun #include <linux/nodemask.h>
17*4882a593Smuzhiyun #include <linux/initrd.h>
18*4882a593Smuzhiyun #include <linux/gfp.h>
19*4882a593Smuzhiyun #include <linux/memblock.h>
20*4882a593Smuzhiyun #include <linux/sort.h>
21*4882a593Smuzhiyun #include <linux/of.h>
22*4882a593Smuzhiyun #include <linux/of_fdt.h>
23*4882a593Smuzhiyun #include <linux/dma-direct.h>
24*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
25*4882a593Smuzhiyun #include <linux/efi.h>
26*4882a593Smuzhiyun #include <linux/swiotlb.h>
27*4882a593Smuzhiyun #include <linux/vmalloc.h>
28*4882a593Smuzhiyun #include <linux/mm.h>
29*4882a593Smuzhiyun #include <linux/kexec.h>
30*4882a593Smuzhiyun #include <linux/crash_dump.h>
31*4882a593Smuzhiyun #include <linux/hugetlb.h>
32*4882a593Smuzhiyun #include <linux/acpi_iort.h>
33*4882a593Smuzhiyun #include <linux/rk-dma-heap.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <asm/boot.h>
36*4882a593Smuzhiyun #include <asm/fixmap.h>
37*4882a593Smuzhiyun #include <asm/kasan.h>
38*4882a593Smuzhiyun #include <asm/kernel-pgtable.h>
39*4882a593Smuzhiyun #include <asm/kvm_host.h>
40*4882a593Smuzhiyun #include <asm/memory.h>
41*4882a593Smuzhiyun #include <asm/numa.h>
42*4882a593Smuzhiyun #include <asm/sections.h>
43*4882a593Smuzhiyun #include <asm/setup.h>
44*4882a593Smuzhiyun #include <linux/sizes.h>
45*4882a593Smuzhiyun #include <asm/tlb.h>
46*4882a593Smuzhiyun #include <asm/alternative.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * We need to be able to catch inadvertent references to memstart_addr
50*4882a593Smuzhiyun  * that occur (potentially in generic code) before arm64_memblock_init()
51*4882a593Smuzhiyun  * executes, which assigns it its actual value. So use a default value
52*4882a593Smuzhiyun  * that cannot be mistaken for a real physical address.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun s64 memstart_addr __ro_after_init = -1;
55*4882a593Smuzhiyun EXPORT_SYMBOL(memstart_addr);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * If the corresponding config options are enabled, we create both ZONE_DMA
59*4882a593Smuzhiyun  * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
60*4882a593Smuzhiyun  * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
61*4882a593Smuzhiyun  * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
62*4882a593Smuzhiyun  * otherwise it is empty.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  * Memory reservation for crash kernel either done early or deferred
65*4882a593Smuzhiyun  * depending on DMA memory zones configs (ZONE_DMA) --
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
68*4882a593Smuzhiyun  * here instead of max_zone_phys().  This lets early reservation of
69*4882a593Smuzhiyun  * crash kernel memory which has a dependency on arm64_dma_phys_limit.
70*4882a593Smuzhiyun  * Reserving memory early for crash kernel allows linear creation of block
71*4882a593Smuzhiyun  * mappings (greater than page-granularity) for all the memory bank rangs.
72*4882a593Smuzhiyun  * In this scheme a comparatively quicker boot is observed.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * If ZONE_DMA configs are defined, crash kernel memory reservation
75*4882a593Smuzhiyun  * is delayed until DMA zone memory range size initilazation performed in
76*4882a593Smuzhiyun  * zone_sizes_init().  The defer is necessary to steer clear of DMA zone
77*4882a593Smuzhiyun  * memory range to avoid overlap allocation.  So crash kernel memory boundaries
78*4882a593Smuzhiyun  * are not known when mapping all bank memory ranges, which otherwise means
79*4882a593Smuzhiyun  * not possible to exclude crash kernel range from creating block mappings
80*4882a593Smuzhiyun  * so page-granularity mappings are created for the entire memory range.
81*4882a593Smuzhiyun  * Hence a slightly slower boot is observed.
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * Note: Page-granularity mapppings are necessary for crash kernel memory
84*4882a593Smuzhiyun  * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
85*4882a593Smuzhiyun  */
86*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
87*4882a593Smuzhiyun phys_addr_t __ro_after_init arm64_dma_phys_limit;
88*4882a593Smuzhiyun #else
89*4882a593Smuzhiyun phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
94*4882a593Smuzhiyun  * CONFIG_ZONE_DMA32.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun static bool disable_dma32 __ro_after_init;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #ifdef CONFIG_KEXEC_CORE
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun  * reserve_crashkernel() - reserves memory for crash kernel
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  * This function reserves memory area given in "crashkernel=" kernel command
103*4882a593Smuzhiyun  * line parameter. The memory reserved is used by dump capture kernel when
104*4882a593Smuzhiyun  * primary kernel is crashing.
105*4882a593Smuzhiyun  */
reserve_crashkernel(void)106*4882a593Smuzhiyun static void __init reserve_crashkernel(void)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	unsigned long long crash_base, crash_size;
109*4882a593Smuzhiyun 	int ret;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
112*4882a593Smuzhiyun 				&crash_size, &crash_base);
113*4882a593Smuzhiyun 	/* no crashkernel= or invalid value specified */
114*4882a593Smuzhiyun 	if (ret || !crash_size)
115*4882a593Smuzhiyun 		return;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	crash_size = PAGE_ALIGN(crash_size);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (crash_base == 0) {
120*4882a593Smuzhiyun 		/* Current arm64 boot protocol requires 2MB alignment */
121*4882a593Smuzhiyun 		crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
122*4882a593Smuzhiyun 				crash_size, SZ_2M);
123*4882a593Smuzhiyun 		if (crash_base == 0) {
124*4882a593Smuzhiyun 			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
125*4882a593Smuzhiyun 				crash_size);
126*4882a593Smuzhiyun 			return;
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 	} else {
129*4882a593Smuzhiyun 		/* User specifies base address explicitly. */
130*4882a593Smuzhiyun 		if (!memblock_is_region_memory(crash_base, crash_size)) {
131*4882a593Smuzhiyun 			pr_warn("cannot reserve crashkernel: region is not memory\n");
132*4882a593Smuzhiyun 			return;
133*4882a593Smuzhiyun 		}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		if (memblock_is_region_reserved(crash_base, crash_size)) {
136*4882a593Smuzhiyun 			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
137*4882a593Smuzhiyun 			return;
138*4882a593Smuzhiyun 		}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		if (!IS_ALIGNED(crash_base, SZ_2M)) {
141*4882a593Smuzhiyun 			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
142*4882a593Smuzhiyun 			return;
143*4882a593Smuzhiyun 		}
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 	memblock_reserve(crash_base, crash_size);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
148*4882a593Smuzhiyun 		crash_base, crash_base + crash_size, crash_size >> 20);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	crashk_res.start = crash_base;
151*4882a593Smuzhiyun 	crashk_res.end = crash_base + crash_size - 1;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun #else
reserve_crashkernel(void)154*4882a593Smuzhiyun static void __init reserve_crashkernel(void)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun #endif /* CONFIG_KEXEC_CORE */
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #ifdef CONFIG_CRASH_DUMP
early_init_dt_scan_elfcorehdr(unsigned long node,const char * uname,int depth,void * data)160*4882a593Smuzhiyun static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
161*4882a593Smuzhiyun 		const char *uname, int depth, void *data)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	const __be32 *reg;
164*4882a593Smuzhiyun 	int len;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (depth != 1 || strcmp(uname, "chosen") != 0)
167*4882a593Smuzhiyun 		return 0;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
170*4882a593Smuzhiyun 	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
171*4882a593Smuzhiyun 		return 1;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
174*4882a593Smuzhiyun 	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return 1;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * reserve_elfcorehdr() - reserves memory for elf core header
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * This function reserves the memory occupied by an elf core header
183*4882a593Smuzhiyun  * described in the device tree. This region contains all the
184*4882a593Smuzhiyun  * information about primary kernel's core image and is used by a dump
185*4882a593Smuzhiyun  * capture kernel to access the system memory on primary kernel.
186*4882a593Smuzhiyun  */
reserve_elfcorehdr(void)187*4882a593Smuzhiyun static void __init reserve_elfcorehdr(void)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (!elfcorehdr_size)
192*4882a593Smuzhiyun 		return;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
195*4882a593Smuzhiyun 		pr_warn("elfcorehdr is overlapped\n");
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
202*4882a593Smuzhiyun 		elfcorehdr_size >> 10, elfcorehdr_addr);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun #else
reserve_elfcorehdr(void)205*4882a593Smuzhiyun static void __init reserve_elfcorehdr(void)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun #endif /* CONFIG_CRASH_DUMP */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun  * Return the maximum physical address for a zone accessible by the given bits
212*4882a593Smuzhiyun  * limit. If DRAM starts above 32-bit, expand the zone to the maximum
213*4882a593Smuzhiyun  * available memory, otherwise cap it at 32-bit.
214*4882a593Smuzhiyun  */
max_zone_phys(unsigned int zone_bits)215*4882a593Smuzhiyun static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
218*4882a593Smuzhiyun 	phys_addr_t phys_start = memblock_start_of_DRAM();
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (phys_start > U32_MAX)
221*4882a593Smuzhiyun 		zone_mask = PHYS_ADDR_MAX;
222*4882a593Smuzhiyun 	else if (phys_start > zone_mask)
223*4882a593Smuzhiyun 		zone_mask = U32_MAX;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
zone_sizes_init(unsigned long min,unsigned long max)228*4882a593Smuzhiyun static void __init zone_sizes_init(unsigned long min, unsigned long max)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
231*4882a593Smuzhiyun 	unsigned int __maybe_unused acpi_zone_dma_bits;
232*4882a593Smuzhiyun 	unsigned int __maybe_unused dt_zone_dma_bits;
233*4882a593Smuzhiyun 	phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA
236*4882a593Smuzhiyun 	acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
237*4882a593Smuzhiyun 	dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
238*4882a593Smuzhiyun 	zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
239*4882a593Smuzhiyun 	arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
240*4882a593Smuzhiyun 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
241*4882a593Smuzhiyun #endif
242*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA32
243*4882a593Smuzhiyun 	max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : PFN_DOWN(dma32_phys_limit);
244*4882a593Smuzhiyun 	if (!arm64_dma_phys_limit)
245*4882a593Smuzhiyun 		arm64_dma_phys_limit = dma32_phys_limit;
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun 	max_zone_pfns[ZONE_NORMAL] = max;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	free_area_init(max_zone_pfns);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
early_disable_dma32(char * buf)252*4882a593Smuzhiyun static int __init early_disable_dma32(char *buf)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	if (!buf)
255*4882a593Smuzhiyun 		return -EINVAL;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!strcmp(buf, "on"))
258*4882a593Smuzhiyun 		disable_dma32 = true;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun early_param("disable_dma32", early_disable_dma32);
263*4882a593Smuzhiyun 
pfn_valid(unsigned long pfn)264*4882a593Smuzhiyun int pfn_valid(unsigned long pfn)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	phys_addr_t addr = pfn << PAGE_SHIFT;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if ((addr >> PAGE_SHIFT) != pfn)
269*4882a593Smuzhiyun 		return 0;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM
272*4882a593Smuzhiyun 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
273*4882a593Smuzhiyun 		return 0;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (!valid_section(__pfn_to_section(pfn)))
276*4882a593Smuzhiyun 		return 0;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/*
279*4882a593Smuzhiyun 	 * ZONE_DEVICE memory does not have the memblock entries.
280*4882a593Smuzhiyun 	 * memblock_is_map_memory() check for ZONE_DEVICE based
281*4882a593Smuzhiyun 	 * addresses will always fail. Even the normal hotplugged
282*4882a593Smuzhiyun 	 * memory will never have MEMBLOCK_NOMAP flag set in their
283*4882a593Smuzhiyun 	 * memblock entries. Skip memblock search for all non early
284*4882a593Smuzhiyun 	 * memory sections covering all of hotplug memory including
285*4882a593Smuzhiyun 	 * both normal and ZONE_DEVICE based.
286*4882a593Smuzhiyun 	 */
287*4882a593Smuzhiyun 	if (!early_section(__pfn_to_section(pfn)))
288*4882a593Smuzhiyun 		return pfn_section_valid(__pfn_to_section(pfn), pfn);
289*4882a593Smuzhiyun #endif
290*4882a593Smuzhiyun 	return memblock_is_map_memory(addr);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun EXPORT_SYMBOL(pfn_valid);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun static phys_addr_t memory_limit = PHYS_ADDR_MAX;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun  * Limit the memory size that was specified via FDT.
298*4882a593Smuzhiyun  */
early_mem(char * p)299*4882a593Smuzhiyun static int __init early_mem(char *p)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	if (!p)
302*4882a593Smuzhiyun 		return 1;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	memory_limit = memparse(p, &p) & PAGE_MASK;
305*4882a593Smuzhiyun 	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun early_param("mem", early_mem);
310*4882a593Smuzhiyun 
early_init_dt_scan_usablemem(unsigned long node,const char * uname,int depth,void * data)311*4882a593Smuzhiyun static int __init early_init_dt_scan_usablemem(unsigned long node,
312*4882a593Smuzhiyun 		const char *uname, int depth, void *data)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct memblock_region *usablemem = data;
315*4882a593Smuzhiyun 	const __be32 *reg;
316*4882a593Smuzhiyun 	int len;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (depth != 1 || strcmp(uname, "chosen") != 0)
319*4882a593Smuzhiyun 		return 0;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
322*4882a593Smuzhiyun 	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
323*4882a593Smuzhiyun 		return 1;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
326*4882a593Smuzhiyun 	usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 1;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
fdt_enforce_memory_region(void)331*4882a593Smuzhiyun static void __init fdt_enforce_memory_region(void)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct memblock_region reg = {
334*4882a593Smuzhiyun 		.size = 0,
335*4882a593Smuzhiyun 	};
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (reg.size)
340*4882a593Smuzhiyun 		memblock_cap_memory_range(reg.base, reg.size);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
arm64_memblock_init(void)343*4882a593Smuzhiyun void __init arm64_memblock_init(void)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	const s64 linear_region_size = BIT(vabits_actual - 1);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* Handle linux,usable-memory-range property */
348*4882a593Smuzhiyun 	fdt_enforce_memory_region();
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Remove memory above our supported physical address size */
351*4882a593Smuzhiyun 	memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/*
354*4882a593Smuzhiyun 	 * Select a suitable value for the base of physical memory.
355*4882a593Smuzhiyun 	 */
356*4882a593Smuzhiyun 	memstart_addr = round_down(memblock_start_of_DRAM(),
357*4882a593Smuzhiyun 				   ARM64_MEMSTART_ALIGN);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/*
360*4882a593Smuzhiyun 	 * Remove the memory that we will not be able to cover with the
361*4882a593Smuzhiyun 	 * linear mapping. Take care not to clip the kernel which may be
362*4882a593Smuzhiyun 	 * high in memory.
363*4882a593Smuzhiyun 	 */
364*4882a593Smuzhiyun 	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
365*4882a593Smuzhiyun 			__pa_symbol(_end)), ULLONG_MAX);
366*4882a593Smuzhiyun 	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
367*4882a593Smuzhiyun 		/* ensure that memstart_addr remains sufficiently aligned */
368*4882a593Smuzhiyun 		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
369*4882a593Smuzhiyun 					 ARM64_MEMSTART_ALIGN);
370*4882a593Smuzhiyun 		memblock_remove(0, memstart_addr);
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/*
374*4882a593Smuzhiyun 	 * If we are running with a 52-bit kernel VA config on a system that
375*4882a593Smuzhiyun 	 * does not support it, we have to place the available physical
376*4882a593Smuzhiyun 	 * memory in the 48-bit addressable part of the linear region, i.e.,
377*4882a593Smuzhiyun 	 * we have to move it upward. Since memstart_addr represents the
378*4882a593Smuzhiyun 	 * physical address of PAGE_OFFSET, we have to *subtract* from it.
379*4882a593Smuzhiyun 	 */
380*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
381*4882a593Smuzhiyun 		memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/*
384*4882a593Smuzhiyun 	 * Apply the memory limit if it was set. Since the kernel may be loaded
385*4882a593Smuzhiyun 	 * high up in memory, add back the kernel region that must be accessible
386*4882a593Smuzhiyun 	 * via the linear mapping.
387*4882a593Smuzhiyun 	 */
388*4882a593Smuzhiyun 	if (memory_limit != PHYS_ADDR_MAX) {
389*4882a593Smuzhiyun 		memblock_mem_limit_remove_map(memory_limit);
390*4882a593Smuzhiyun 		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
394*4882a593Smuzhiyun 		/*
395*4882a593Smuzhiyun 		 * Add back the memory we just removed if it results in the
396*4882a593Smuzhiyun 		 * initrd to become inaccessible via the linear mapping.
397*4882a593Smuzhiyun 		 * Otherwise, this is a no-op
398*4882a593Smuzhiyun 		 */
399*4882a593Smuzhiyun 		u64 base = phys_initrd_start & PAGE_MASK;
400*4882a593Smuzhiyun 		u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		/*
403*4882a593Smuzhiyun 		 * We can only add back the initrd memory if we don't end up
404*4882a593Smuzhiyun 		 * with more memory than we can address via the linear mapping.
405*4882a593Smuzhiyun 		 * It is up to the bootloader to position the kernel and the
406*4882a593Smuzhiyun 		 * initrd reasonably close to each other (i.e., within 32 GB of
407*4882a593Smuzhiyun 		 * each other) so that all granule/#levels combinations can
408*4882a593Smuzhiyun 		 * always access both.
409*4882a593Smuzhiyun 		 */
410*4882a593Smuzhiyun 		if (WARN(base < memblock_start_of_DRAM() ||
411*4882a593Smuzhiyun 			 base + size > memblock_start_of_DRAM() +
412*4882a593Smuzhiyun 				       linear_region_size,
413*4882a593Smuzhiyun 			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
414*4882a593Smuzhiyun 			phys_initrd_size = 0;
415*4882a593Smuzhiyun 		} else {
416*4882a593Smuzhiyun 			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
417*4882a593Smuzhiyun 			memblock_add(base, size);
418*4882a593Smuzhiyun 			memblock_reserve(base, size);
419*4882a593Smuzhiyun 		}
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
423*4882a593Smuzhiyun 		extern u16 memstart_offset_seed;
424*4882a593Smuzhiyun 		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
425*4882a593Smuzhiyun 		int parange = cpuid_feature_extract_unsigned_field(
426*4882a593Smuzhiyun 					mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
427*4882a593Smuzhiyun 		s64 range = linear_region_size -
428*4882a593Smuzhiyun 			    BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		/*
431*4882a593Smuzhiyun 		 * If the size of the linear region exceeds, by a sufficient
432*4882a593Smuzhiyun 		 * margin, the size of the region that the physical memory can
433*4882a593Smuzhiyun 		 * span, randomize the linear region as well.
434*4882a593Smuzhiyun 		 */
435*4882a593Smuzhiyun 		if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
436*4882a593Smuzhiyun 			range /= ARM64_MEMSTART_ALIGN;
437*4882a593Smuzhiyun 			memstart_addr -= ARM64_MEMSTART_ALIGN *
438*4882a593Smuzhiyun 					 ((range * memstart_offset_seed) >> 16);
439*4882a593Smuzhiyun 		}
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/*
443*4882a593Smuzhiyun 	 * Register the kernel text, kernel data, initrd, and initial
444*4882a593Smuzhiyun 	 * pagetables with memblock.
445*4882a593Smuzhiyun 	 */
446*4882a593Smuzhiyun 	memblock_reserve(__pa_symbol(_text), _end - _text);
447*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
448*4882a593Smuzhiyun 		/* the generic initrd code expects virtual addresses */
449*4882a593Smuzhiyun 		initrd_start = __phys_to_virt(phys_initrd_start);
450*4882a593Smuzhiyun 		initrd_end = initrd_start + phys_initrd_size;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	early_init_fdt_scan_reserved_mem();
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	reserve_elfcorehdr();
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
458*4882a593Smuzhiyun 		reserve_crashkernel();
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
bootmem_init(void)463*4882a593Smuzhiyun void __init bootmem_init(void)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	unsigned long min, max;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	min = PFN_UP(memblock_start_of_DRAM());
468*4882a593Smuzhiyun 	max = PFN_DOWN(memblock_end_of_DRAM());
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	max_pfn = max_low_pfn = max;
473*4882a593Smuzhiyun 	min_low_pfn = min;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	arm64_numa_init();
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	/*
478*4882a593Smuzhiyun 	 * must be done after arm64_numa_init() which calls numa_init() to
479*4882a593Smuzhiyun 	 * initialize node_online_map that gets used in hugetlb_cma_reserve()
480*4882a593Smuzhiyun 	 * while allocating required CMA size across online nodes.
481*4882a593Smuzhiyun 	 */
482*4882a593Smuzhiyun #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
483*4882a593Smuzhiyun 	arm64_hugetlb_cma_reserve();
484*4882a593Smuzhiyun #endif
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	dma_pernuma_cma_reserve();
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	kvm_hyp_reserve();
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	/*
491*4882a593Smuzhiyun 	 * sparse_init() tries to allocate memory from memblock, so must be
492*4882a593Smuzhiyun 	 * done after the fixed reservations
493*4882a593Smuzhiyun 	 */
494*4882a593Smuzhiyun 	sparse_init();
495*4882a593Smuzhiyun 	zone_sizes_init(min, max);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/*
498*4882a593Smuzhiyun 	 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
499*4882a593Smuzhiyun 	 */
500*4882a593Smuzhiyun 	dma_contiguous_reserve(arm64_dma_phys_limit);
501*4882a593Smuzhiyun 	rk_dma_heap_cma_setup();
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/*
504*4882a593Smuzhiyun 	 * request_standard_resources() depends on crashkernel's memory being
505*4882a593Smuzhiyun 	 * reserved, so do it here.
506*4882a593Smuzhiyun 	 */
507*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
508*4882a593Smuzhiyun 		reserve_crashkernel();
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	memblock_dump_all();
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun #ifndef CONFIG_SPARSEMEM_VMEMMAP
free_memmap(unsigned long start_pfn,unsigned long end_pfn)514*4882a593Smuzhiyun static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct page *start_pg, *end_pg;
517*4882a593Smuzhiyun 	unsigned long pg, pgend;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/*
520*4882a593Smuzhiyun 	 * Convert start_pfn/end_pfn to a struct page pointer.
521*4882a593Smuzhiyun 	 */
522*4882a593Smuzhiyun 	start_pg = pfn_to_page(start_pfn - 1) + 1;
523*4882a593Smuzhiyun 	end_pg = pfn_to_page(end_pfn - 1) + 1;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/*
526*4882a593Smuzhiyun 	 * Convert to physical addresses, and round start upwards and end
527*4882a593Smuzhiyun 	 * downwards.
528*4882a593Smuzhiyun 	 */
529*4882a593Smuzhiyun 	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
530*4882a593Smuzhiyun 	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/*
533*4882a593Smuzhiyun 	 * If there are free pages between these, free the section of the
534*4882a593Smuzhiyun 	 * memmap array.
535*4882a593Smuzhiyun 	 */
536*4882a593Smuzhiyun 	if (pg < pgend)
537*4882a593Smuzhiyun 		memblock_free(pg, pgend - pg);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /*
541*4882a593Smuzhiyun  * The mem_map array can get very big. Free the unused area of the memory map.
542*4882a593Smuzhiyun  */
free_unused_memmap(void)543*4882a593Smuzhiyun static void __init free_unused_memmap(void)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	unsigned long start, end, prev_end = 0;
546*4882a593Smuzhiyun 	int i;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
549*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM
550*4882a593Smuzhiyun 		/*
551*4882a593Smuzhiyun 		 * Take care not to free memmap entries that don't exist due
552*4882a593Smuzhiyun 		 * to SPARSEMEM sections which aren't present.
553*4882a593Smuzhiyun 		 */
554*4882a593Smuzhiyun 		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
555*4882a593Smuzhiyun #endif
556*4882a593Smuzhiyun 		/*
557*4882a593Smuzhiyun 		 * If we had a previous bank, and there is a space between the
558*4882a593Smuzhiyun 		 * current bank and the previous, free it.
559*4882a593Smuzhiyun 		 */
560*4882a593Smuzhiyun 		if (prev_end && prev_end < start)
561*4882a593Smuzhiyun 			free_memmap(prev_end, start);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 		/*
564*4882a593Smuzhiyun 		 * Align up here since the VM subsystem insists that the
565*4882a593Smuzhiyun 		 * memmap entries are valid from the bank end aligned to
566*4882a593Smuzhiyun 		 * MAX_ORDER_NR_PAGES.
567*4882a593Smuzhiyun 		 */
568*4882a593Smuzhiyun 		prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
569*4882a593Smuzhiyun 	}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM
572*4882a593Smuzhiyun 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
573*4882a593Smuzhiyun 		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun #endif	/* !CONFIG_SPARSEMEM_VMEMMAP */
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun /*
579*4882a593Smuzhiyun  * mem_init() marks the free areas in the mem_map and tells us how much memory
580*4882a593Smuzhiyun  * is free.  This is done after various parts of the system have claimed their
581*4882a593Smuzhiyun  * memory after the kernel image.
582*4882a593Smuzhiyun  */
mem_init(void)583*4882a593Smuzhiyun void __init mem_init(void)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	if (swiotlb_force == SWIOTLB_FORCE ||
586*4882a593Smuzhiyun 	    max_pfn > PFN_DOWN(arm64_dma_phys_limit))
587*4882a593Smuzhiyun 		swiotlb_init(1);
588*4882a593Smuzhiyun 	else
589*4882a593Smuzhiyun 		swiotlb_force = SWIOTLB_NO_FORCE;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun #ifndef CONFIG_SPARSEMEM_VMEMMAP
594*4882a593Smuzhiyun 	free_unused_memmap();
595*4882a593Smuzhiyun #endif
596*4882a593Smuzhiyun 	/* this will put all unused low memory onto the freelists */
597*4882a593Smuzhiyun 	memblock_free_all();
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	mem_init_print_info(NULL);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/*
602*4882a593Smuzhiyun 	 * Check boundaries twice: Some fundamental inconsistencies can be
603*4882a593Smuzhiyun 	 * detected at build time already.
604*4882a593Smuzhiyun 	 */
605*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
606*4882a593Smuzhiyun 	BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
607*4882a593Smuzhiyun #endif
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
610*4882a593Smuzhiyun 		extern int sysctl_overcommit_memory;
611*4882a593Smuzhiyun 		/*
612*4882a593Smuzhiyun 		 * On a machine this small we won't get anywhere without
613*4882a593Smuzhiyun 		 * overcommit, so turn it on by default.
614*4882a593Smuzhiyun 		 */
615*4882a593Smuzhiyun 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
free_initmem(void)619*4882a593Smuzhiyun void free_initmem(void)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	free_reserved_area(lm_alias(__init_begin),
622*4882a593Smuzhiyun 			   lm_alias(__init_end),
623*4882a593Smuzhiyun 			   POISON_FREE_INITMEM, "unused kernel");
624*4882a593Smuzhiyun 	/*
625*4882a593Smuzhiyun 	 * Unmap the __init region but leave the VM area in place. This
626*4882a593Smuzhiyun 	 * prevents the region from being reused for kernel modules, which
627*4882a593Smuzhiyun 	 * is not supported by kallsyms.
628*4882a593Smuzhiyun 	 */
629*4882a593Smuzhiyun 	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
dump_mem_limit(void)632*4882a593Smuzhiyun void dump_mem_limit(void)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	if (memory_limit != PHYS_ADDR_MAX) {
635*4882a593Smuzhiyun 		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
636*4882a593Smuzhiyun 	} else {
637*4882a593Smuzhiyun 		pr_emerg("Memory Limit: none\n");
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun }
640