xref: /OK3568_Linux_fs/kernel/arch/microblaze/mm/init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3*4882a593Smuzhiyun  * Copyright (C) 2006 Atmark Techno, Inc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
6*4882a593Smuzhiyun  * License. See the file "COPYING" in the main directory of this archive
7*4882a593Smuzhiyun  * for more details.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
11*4882a593Smuzhiyun #include <linux/memblock.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/mm.h> /* mem_init */
15*4882a593Smuzhiyun #include <linux/initrd.h>
16*4882a593Smuzhiyun #include <linux/pagemap.h>
17*4882a593Smuzhiyun #include <linux/pfn.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/swap.h>
20*4882a593Smuzhiyun #include <linux/export.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <asm/page.h>
23*4882a593Smuzhiyun #include <asm/mmu_context.h>
24*4882a593Smuzhiyun #include <asm/pgalloc.h>
25*4882a593Smuzhiyun #include <asm/sections.h>
26*4882a593Smuzhiyun #include <asm/tlb.h>
27*4882a593Smuzhiyun #include <asm/fixmap.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Use for MMU and noMMU because of PCI generic code */
30*4882a593Smuzhiyun int mem_init_done;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #ifndef CONFIG_MMU
33*4882a593Smuzhiyun unsigned int __page_offset;
34*4882a593Smuzhiyun EXPORT_SYMBOL(__page_offset);
35*4882a593Smuzhiyun #endif /* CONFIG_MMU */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun char *klimit = _end;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Initialize the bootmem system and give it all the memory we
41*4882a593Smuzhiyun  * have available.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun unsigned long memory_start;
44*4882a593Smuzhiyun EXPORT_SYMBOL(memory_start);
45*4882a593Smuzhiyun unsigned long memory_size;
46*4882a593Smuzhiyun EXPORT_SYMBOL(memory_size);
47*4882a593Smuzhiyun unsigned long lowmem_size;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun EXPORT_SYMBOL(min_low_pfn);
50*4882a593Smuzhiyun EXPORT_SYMBOL(max_low_pfn);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
53*4882a593Smuzhiyun pte_t *kmap_pte;
54*4882a593Smuzhiyun EXPORT_SYMBOL(kmap_pte);
55*4882a593Smuzhiyun 
highmem_init(void)56*4882a593Smuzhiyun static void __init highmem_init(void)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	pr_debug("%x\n", (u32)PKMAP_BASE);
59*4882a593Smuzhiyun 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
60*4882a593Smuzhiyun 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
highmem_setup(void)65*4882a593Smuzhiyun static void highmem_setup(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	unsigned long pfn;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
70*4882a593Smuzhiyun 		struct page *page = pfn_to_page(pfn);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 		/* FIXME not sure about */
73*4882a593Smuzhiyun 		if (!memblock_is_reserved(pfn << PAGE_SHIFT))
74*4882a593Smuzhiyun 			free_highmem_page(page);
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun #endif /* CONFIG_HIGHMEM */
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun  * paging_init() sets up the page tables - in fact we've already done this.
81*4882a593Smuzhiyun  */
paging_init(void)82*4882a593Smuzhiyun static void __init paging_init(void)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	unsigned long zones_size[MAX_NR_ZONES];
85*4882a593Smuzhiyun #ifdef CONFIG_MMU
86*4882a593Smuzhiyun 	int idx;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* Setup fixmaps */
89*4882a593Smuzhiyun 	for (idx = 0; idx < __end_of_fixed_addresses; idx++)
90*4882a593Smuzhiyun 		clear_fixmap(idx);
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/* Clean every zones */
94*4882a593Smuzhiyun 	memset(zones_size, 0, sizeof(zones_size));
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
97*4882a593Smuzhiyun 	highmem_init();
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	zones_size[ZONE_DMA] = max_low_pfn;
100*4882a593Smuzhiyun 	zones_size[ZONE_HIGHMEM] = max_pfn;
101*4882a593Smuzhiyun #else
102*4882a593Smuzhiyun 	zones_size[ZONE_DMA] = max_pfn;
103*4882a593Smuzhiyun #endif
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/* We don't have holes in memory map */
106*4882a593Smuzhiyun 	free_area_init(zones_size);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
setup_memory(void)109*4882a593Smuzhiyun void __init setup_memory(void)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun #ifndef CONFIG_MMU
112*4882a593Smuzhiyun 	u32 kernel_align_start, kernel_align_size;
113*4882a593Smuzhiyun 	phys_addr_t start, end;
114*4882a593Smuzhiyun 	u64 i;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* Find main memory where is the kernel */
117*4882a593Smuzhiyun 	for_each_mem_range(i, &start, &end) {
118*4882a593Smuzhiyun 		memory_start = start;
119*4882a593Smuzhiyun 		lowmem_size = end - start;
120*4882a593Smuzhiyun 		if ((memory_start <= (u32)_text) &&
121*4882a593Smuzhiyun 			((u32)_text <= (memory_start + lowmem_size - 1))) {
122*4882a593Smuzhiyun 			memory_size = lowmem_size;
123*4882a593Smuzhiyun 			PAGE_OFFSET = memory_start;
124*4882a593Smuzhiyun 			pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
125*4882a593Smuzhiyun 				__func__, (u32) memory_start,
126*4882a593Smuzhiyun 					(u32) memory_size);
127*4882a593Smuzhiyun 			break;
128*4882a593Smuzhiyun 		}
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (!memory_start || !memory_size) {
132*4882a593Smuzhiyun 		panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
133*4882a593Smuzhiyun 			__func__, (u32) memory_start, (u32) memory_size);
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* reservation of region where is the kernel */
137*4882a593Smuzhiyun 	kernel_align_start = PAGE_DOWN((u32)_text);
138*4882a593Smuzhiyun 	/* ALIGN can be remove because _end in vmlinux.lds.S is align */
139*4882a593Smuzhiyun 	kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
140*4882a593Smuzhiyun 	pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
141*4882a593Smuzhiyun 		__func__, kernel_align_start, kernel_align_start
142*4882a593Smuzhiyun 			+ kernel_align_size, kernel_align_size);
143*4882a593Smuzhiyun 	memblock_reserve(kernel_align_start, kernel_align_size);
144*4882a593Smuzhiyun #endif
145*4882a593Smuzhiyun 	/*
146*4882a593Smuzhiyun 	 * Kernel:
147*4882a593Smuzhiyun 	 * start: base phys address of kernel - page align
148*4882a593Smuzhiyun 	 * end: base phys address of kernel - page align
149*4882a593Smuzhiyun 	 *
150*4882a593Smuzhiyun 	 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
151*4882a593Smuzhiyun 	 * max_low_pfn
152*4882a593Smuzhiyun 	 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* memory start is from the kernel end (aligned) to higher addr */
156*4882a593Smuzhiyun 	min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
157*4882a593Smuzhiyun 	/* RAM is assumed contiguous */
158*4882a593Smuzhiyun 	max_mapnr = memory_size >> PAGE_SHIFT;
159*4882a593Smuzhiyun 	max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
160*4882a593Smuzhiyun 	max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
163*4882a593Smuzhiyun 	pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
164*4882a593Smuzhiyun 	pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
165*4882a593Smuzhiyun 	pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	paging_init();
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
mem_init(void)170*4882a593Smuzhiyun void __init mem_init(void)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	high_memory = (void *)__va(memory_start + lowmem_size - 1);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* this will put all memory onto the freelists */
175*4882a593Smuzhiyun 	memblock_free_all();
176*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
177*4882a593Smuzhiyun 	highmem_setup();
178*4882a593Smuzhiyun #endif
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	mem_init_print_info(NULL);
181*4882a593Smuzhiyun 	mem_init_done = 1;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #ifndef CONFIG_MMU
page_is_ram(unsigned long pfn)185*4882a593Smuzhiyun int page_is_ram(unsigned long pfn)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	return __range_ok(pfn, 0);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun #else
page_is_ram(unsigned long pfn)190*4882a593Smuzhiyun int page_is_ram(unsigned long pfn)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	return pfn < max_low_pfn;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * Check for command-line options that affect what MMU_init will do.
197*4882a593Smuzhiyun  */
mm_cmdline_setup(void)198*4882a593Smuzhiyun static void mm_cmdline_setup(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	unsigned long maxmem = 0;
201*4882a593Smuzhiyun 	char *p = cmd_line;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Look for mem= option on command line */
204*4882a593Smuzhiyun 	p = strstr(cmd_line, "mem=");
205*4882a593Smuzhiyun 	if (p) {
206*4882a593Smuzhiyun 		p += 4;
207*4882a593Smuzhiyun 		maxmem = memparse(p, &p);
208*4882a593Smuzhiyun 		if (maxmem && memory_size > maxmem) {
209*4882a593Smuzhiyun 			memory_size = maxmem;
210*4882a593Smuzhiyun 			memblock.memory.regions[0].size = memory_size;
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
217*4882a593Smuzhiyun  */
mmu_init_hw(void)218*4882a593Smuzhiyun static void __init mmu_init_hw(void)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	/*
221*4882a593Smuzhiyun 	 * The Zone Protection Register (ZPR) defines how protection will
222*4882a593Smuzhiyun 	 * be applied to every page which is a member of a given zone. At
223*4882a593Smuzhiyun 	 * present, we utilize only two of the zones.
224*4882a593Smuzhiyun 	 * The zone index bits (of ZSEL) in the PTE are used for software
225*4882a593Smuzhiyun 	 * indicators, except the LSB.  For user access, zone 1 is used,
226*4882a593Smuzhiyun 	 * for kernel access, zone 0 is used.  We set all but zone 1
227*4882a593Smuzhiyun 	 * to zero, allowing only kernel access as indicated in the PTE.
228*4882a593Smuzhiyun 	 * For zone 1, we set a 01 binary (a value of 10 will not work)
229*4882a593Smuzhiyun 	 * to allow user access as indicated in the PTE.  This also allows
230*4882a593Smuzhiyun 	 * kernel access as indicated in the PTE.
231*4882a593Smuzhiyun 	 */
232*4882a593Smuzhiyun 	__asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
233*4882a593Smuzhiyun 			"mts rzpr, r11;"
234*4882a593Smuzhiyun 			: : : "r11");
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun  * MMU_init sets up the basic memory mappings for the kernel,
239*4882a593Smuzhiyun  * including both RAM and possibly some I/O regions,
240*4882a593Smuzhiyun  * and sets up the page tables and the MMU hardware ready to go.
241*4882a593Smuzhiyun  */
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* called from head.S */
mmu_init(void)244*4882a593Smuzhiyun asmlinkage void __init mmu_init(void)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	unsigned int kstart, ksize;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (!memblock.reserved.cnt) {
249*4882a593Smuzhiyun 		pr_emerg("Error memory count\n");
250*4882a593Smuzhiyun 		machine_restart(NULL);
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if ((u32) memblock.memory.regions[0].size < 0x400000) {
254*4882a593Smuzhiyun 		pr_emerg("Memory must be greater than 4MB\n");
255*4882a593Smuzhiyun 		machine_restart(NULL);
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
259*4882a593Smuzhiyun 		pr_emerg("Kernel size is greater than memory node\n");
260*4882a593Smuzhiyun 		machine_restart(NULL);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* Find main memory where the kernel is */
264*4882a593Smuzhiyun 	memory_start = (u32) memblock.memory.regions[0].base;
265*4882a593Smuzhiyun 	lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (lowmem_size > CONFIG_LOWMEM_SIZE) {
268*4882a593Smuzhiyun 		lowmem_size = CONFIG_LOWMEM_SIZE;
269*4882a593Smuzhiyun #ifndef CONFIG_HIGHMEM
270*4882a593Smuzhiyun 		memory_size = lowmem_size;
271*4882a593Smuzhiyun #endif
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	mm_cmdline_setup(); /* FIXME parse args from command line - not used */
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/*
277*4882a593Smuzhiyun 	 * Map out the kernel text/data/bss from the available physical
278*4882a593Smuzhiyun 	 * memory.
279*4882a593Smuzhiyun 	 */
280*4882a593Smuzhiyun 	kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
281*4882a593Smuzhiyun 	/* kernel size */
282*4882a593Smuzhiyun 	ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
283*4882a593Smuzhiyun 	memblock_reserve(kstart, ksize);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #if defined(CONFIG_BLK_DEV_INITRD)
286*4882a593Smuzhiyun 	/* Remove the init RAM disk from the available memory. */
287*4882a593Smuzhiyun 	if (initrd_start) {
288*4882a593Smuzhiyun 		unsigned long size;
289*4882a593Smuzhiyun 		size = initrd_end - initrd_start;
290*4882a593Smuzhiyun 		memblock_reserve(__virt_to_phys(initrd_start), size);
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_INITRD */
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* Initialize the MMU hardware */
295*4882a593Smuzhiyun 	mmu_init_hw();
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Map in all of RAM starting at CONFIG_KERNEL_START */
298*4882a593Smuzhiyun 	mapin_ram();
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Extend vmalloc and ioremap area as big as possible */
301*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
302*4882a593Smuzhiyun 	ioremap_base = ioremap_bot = PKMAP_BASE;
303*4882a593Smuzhiyun #else
304*4882a593Smuzhiyun 	ioremap_base = ioremap_bot = FIXADDR_START;
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Initialize the context management stuff */
308*4882a593Smuzhiyun 	mmu_context_init();
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* Shortly after that, the entire linear mapping will be available */
311*4882a593Smuzhiyun 	/* This will also cause that unflatten device tree will be allocated
312*4882a593Smuzhiyun 	 * inside 768MB limit */
313*4882a593Smuzhiyun 	memblock_set_current_limit(memory_start + lowmem_size - 1);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	parse_early_param();
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* CMA initialization */
318*4882a593Smuzhiyun 	dma_contiguous_reserve(memory_start + lowmem_size - 1);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /* This is only called until mem_init is done. */
early_get_page(void)322*4882a593Smuzhiyun void __init *early_get_page(void)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	/*
325*4882a593Smuzhiyun 	 * Mem start + kernel_tlb -> here is limit
326*4882a593Smuzhiyun 	 * because of mem mapping from head.S
327*4882a593Smuzhiyun 	 */
328*4882a593Smuzhiyun 	return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
329*4882a593Smuzhiyun 				MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb,
330*4882a593Smuzhiyun 				NUMA_NO_NODE);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun #endif /* CONFIG_MMU */
334*4882a593Smuzhiyun 
zalloc_maybe_bootmem(size_t size,gfp_t mask)335*4882a593Smuzhiyun void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	void *p;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (mem_init_done) {
340*4882a593Smuzhiyun 		p = kzalloc(size, mask);
341*4882a593Smuzhiyun 	} else {
342*4882a593Smuzhiyun 		p = memblock_alloc(size, SMP_CACHE_BYTES);
343*4882a593Smuzhiyun 		if (!p)
344*4882a593Smuzhiyun 			panic("%s: Failed to allocate %zu bytes\n",
345*4882a593Smuzhiyun 			      __func__, size);
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return p;
349*4882a593Smuzhiyun }
350