xref: /OK3568_Linux_fs/kernel/arch/x86/mm/init_64.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/arch/x86_64/mm/init.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1995  Linus Torvalds
6*4882a593Smuzhiyun  *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
7*4882a593Smuzhiyun  *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/signal.h>
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/string.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/ptrace.h>
17*4882a593Smuzhiyun #include <linux/mman.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <linux/swap.h>
20*4882a593Smuzhiyun #include <linux/smp.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/initrd.h>
23*4882a593Smuzhiyun #include <linux/pagemap.h>
24*4882a593Smuzhiyun #include <linux/memblock.h>
25*4882a593Smuzhiyun #include <linux/proc_fs.h>
26*4882a593Smuzhiyun #include <linux/pci.h>
27*4882a593Smuzhiyun #include <linux/pfn.h>
28*4882a593Smuzhiyun #include <linux/poison.h>
29*4882a593Smuzhiyun #include <linux/dma-mapping.h>
30*4882a593Smuzhiyun #include <linux/memory.h>
31*4882a593Smuzhiyun #include <linux/memory_hotplug.h>
32*4882a593Smuzhiyun #include <linux/memremap.h>
33*4882a593Smuzhiyun #include <linux/nmi.h>
34*4882a593Smuzhiyun #include <linux/gfp.h>
35*4882a593Smuzhiyun #include <linux/kcore.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <asm/processor.h>
38*4882a593Smuzhiyun #include <asm/bios_ebda.h>
39*4882a593Smuzhiyun #include <linux/uaccess.h>
40*4882a593Smuzhiyun #include <asm/pgalloc.h>
41*4882a593Smuzhiyun #include <asm/dma.h>
42*4882a593Smuzhiyun #include <asm/fixmap.h>
43*4882a593Smuzhiyun #include <asm/e820/api.h>
44*4882a593Smuzhiyun #include <asm/apic.h>
45*4882a593Smuzhiyun #include <asm/tlb.h>
46*4882a593Smuzhiyun #include <asm/mmu_context.h>
47*4882a593Smuzhiyun #include <asm/proto.h>
48*4882a593Smuzhiyun #include <asm/smp.h>
49*4882a593Smuzhiyun #include <asm/sections.h>
50*4882a593Smuzhiyun #include <asm/kdebug.h>
51*4882a593Smuzhiyun #include <asm/numa.h>
52*4882a593Smuzhiyun #include <asm/set_memory.h>
53*4882a593Smuzhiyun #include <asm/init.h>
54*4882a593Smuzhiyun #include <asm/uv/uv.h>
55*4882a593Smuzhiyun #include <asm/setup.h>
56*4882a593Smuzhiyun #include <asm/ftrace.h>
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #include "mm_internal.h"
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #include "ident_map.c"
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define DEFINE_POPULATE(fname, type1, type2, init)		\
63*4882a593Smuzhiyun static inline void fname##_init(struct mm_struct *mm,		\
64*4882a593Smuzhiyun 		type1##_t *arg1, type2##_t *arg2, bool init)	\
65*4882a593Smuzhiyun {								\
66*4882a593Smuzhiyun 	if (init)						\
67*4882a593Smuzhiyun 		fname##_safe(mm, arg1, arg2);			\
68*4882a593Smuzhiyun 	else							\
69*4882a593Smuzhiyun 		fname(mm, arg1, arg2);				\
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun DEFINE_POPULATE(p4d_populate, p4d, pud, init)
73*4882a593Smuzhiyun DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
74*4882a593Smuzhiyun DEFINE_POPULATE(pud_populate, pud, pmd, init)
75*4882a593Smuzhiyun DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define DEFINE_ENTRY(type1, type2, init)			\
78*4882a593Smuzhiyun static inline void set_##type1##_init(type1##_t *arg1,		\
79*4882a593Smuzhiyun 			type2##_t arg2, bool init)		\
80*4882a593Smuzhiyun {								\
81*4882a593Smuzhiyun 	if (init)						\
82*4882a593Smuzhiyun 		set_##type1##_safe(arg1, arg2);			\
83*4882a593Smuzhiyun 	else							\
84*4882a593Smuzhiyun 		set_##type1(arg1, arg2);			\
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun DEFINE_ENTRY(p4d, p4d, init)
88*4882a593Smuzhiyun DEFINE_ENTRY(pud, pud, init)
89*4882a593Smuzhiyun DEFINE_ENTRY(pmd, pmd, init)
90*4882a593Smuzhiyun DEFINE_ENTRY(pte, pte, init)
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
95*4882a593Smuzhiyun  * physical space so we can cache the place of the first one and move
96*4882a593Smuzhiyun  * around without checking the pgd every time.
97*4882a593Smuzhiyun  */
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* Bits supported by the hardware: */
100*4882a593Smuzhiyun pteval_t __supported_pte_mask __read_mostly = ~0;
101*4882a593Smuzhiyun /* Bits allowed in normal kernel mappings: */
102*4882a593Smuzhiyun pteval_t __default_kernel_pte_mask __read_mostly = ~0;
103*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__supported_pte_mask);
104*4882a593Smuzhiyun /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
105*4882a593Smuzhiyun EXPORT_SYMBOL(__default_kernel_pte_mask);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun int force_personality32;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * noexec32=on|off
111*4882a593Smuzhiyun  * Control non executable heap for 32bit processes.
112*4882a593Smuzhiyun  * To control the stack too use noexec=off
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
115*4882a593Smuzhiyun  * off	PROT_READ implies PROT_EXEC
116*4882a593Smuzhiyun  */
nonx32_setup(char * str)117*4882a593Smuzhiyun static int __init nonx32_setup(char *str)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	if (!strcmp(str, "on"))
120*4882a593Smuzhiyun 		force_personality32 &= ~READ_IMPLIES_EXEC;
121*4882a593Smuzhiyun 	else if (!strcmp(str, "off"))
122*4882a593Smuzhiyun 		force_personality32 |= READ_IMPLIES_EXEC;
123*4882a593Smuzhiyun 	return 1;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun __setup("noexec32=", nonx32_setup);
126*4882a593Smuzhiyun 
sync_global_pgds_l5(unsigned long start,unsigned long end)127*4882a593Smuzhiyun static void sync_global_pgds_l5(unsigned long start, unsigned long end)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	unsigned long addr;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
132*4882a593Smuzhiyun 		const pgd_t *pgd_ref = pgd_offset_k(addr);
133*4882a593Smuzhiyun 		struct page *page;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		/* Check for overflow */
136*4882a593Smuzhiyun 		if (addr < start)
137*4882a593Smuzhiyun 			break;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 		if (pgd_none(*pgd_ref))
140*4882a593Smuzhiyun 			continue;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		spin_lock(&pgd_lock);
143*4882a593Smuzhiyun 		list_for_each_entry(page, &pgd_list, lru) {
144*4882a593Smuzhiyun 			pgd_t *pgd;
145*4882a593Smuzhiyun 			spinlock_t *pgt_lock;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
148*4882a593Smuzhiyun 			/* the pgt_lock only for Xen */
149*4882a593Smuzhiyun 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
150*4882a593Smuzhiyun 			spin_lock(pgt_lock);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
153*4882a593Smuzhiyun 				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 			if (pgd_none(*pgd))
156*4882a593Smuzhiyun 				set_pgd(pgd, *pgd_ref);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 			spin_unlock(pgt_lock);
159*4882a593Smuzhiyun 		}
160*4882a593Smuzhiyun 		spin_unlock(&pgd_lock);
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
sync_global_pgds_l4(unsigned long start,unsigned long end)164*4882a593Smuzhiyun static void sync_global_pgds_l4(unsigned long start, unsigned long end)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	unsigned long addr;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
169*4882a593Smuzhiyun 		pgd_t *pgd_ref = pgd_offset_k(addr);
170*4882a593Smuzhiyun 		const p4d_t *p4d_ref;
171*4882a593Smuzhiyun 		struct page *page;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 		/*
174*4882a593Smuzhiyun 		 * With folded p4d, pgd_none() is always false, we need to
175*4882a593Smuzhiyun 		 * handle synchonization on p4d level.
176*4882a593Smuzhiyun 		 */
177*4882a593Smuzhiyun 		MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
178*4882a593Smuzhiyun 		p4d_ref = p4d_offset(pgd_ref, addr);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		if (p4d_none(*p4d_ref))
181*4882a593Smuzhiyun 			continue;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		spin_lock(&pgd_lock);
184*4882a593Smuzhiyun 		list_for_each_entry(page, &pgd_list, lru) {
185*4882a593Smuzhiyun 			pgd_t *pgd;
186*4882a593Smuzhiyun 			p4d_t *p4d;
187*4882a593Smuzhiyun 			spinlock_t *pgt_lock;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
190*4882a593Smuzhiyun 			p4d = p4d_offset(pgd, addr);
191*4882a593Smuzhiyun 			/* the pgt_lock only for Xen */
192*4882a593Smuzhiyun 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
193*4882a593Smuzhiyun 			spin_lock(pgt_lock);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 			if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
196*4882a593Smuzhiyun 				BUG_ON(p4d_page_vaddr(*p4d)
197*4882a593Smuzhiyun 				       != p4d_page_vaddr(*p4d_ref));
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 			if (p4d_none(*p4d))
200*4882a593Smuzhiyun 				set_p4d(p4d, *p4d_ref);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 			spin_unlock(pgt_lock);
203*4882a593Smuzhiyun 		}
204*4882a593Smuzhiyun 		spin_unlock(&pgd_lock);
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun  * When memory was added make sure all the processes MM have
210*4882a593Smuzhiyun  * suitable PGD entries in the local PGD level page.
211*4882a593Smuzhiyun  */
sync_global_pgds(unsigned long start,unsigned long end)212*4882a593Smuzhiyun static void sync_global_pgds(unsigned long start, unsigned long end)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	if (pgtable_l5_enabled())
215*4882a593Smuzhiyun 		sync_global_pgds_l5(start, end);
216*4882a593Smuzhiyun 	else
217*4882a593Smuzhiyun 		sync_global_pgds_l4(start, end);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun  * NOTE: This function is marked __ref because it calls __init function
222*4882a593Smuzhiyun  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
223*4882a593Smuzhiyun  */
spp_getpage(void)224*4882a593Smuzhiyun static __ref void *spp_getpage(void)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	void *ptr;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (after_bootmem)
229*4882a593Smuzhiyun 		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
230*4882a593Smuzhiyun 	else
231*4882a593Smuzhiyun 		ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
234*4882a593Smuzhiyun 		panic("set_pte_phys: cannot allocate page data %s\n",
235*4882a593Smuzhiyun 			after_bootmem ? "after bootmem" : "");
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	pr_debug("spp_getpage %p\n", ptr);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return ptr;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
fill_p4d(pgd_t * pgd,unsigned long vaddr)243*4882a593Smuzhiyun static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	if (pgd_none(*pgd)) {
246*4882a593Smuzhiyun 		p4d_t *p4d = (p4d_t *)spp_getpage();
247*4882a593Smuzhiyun 		pgd_populate(&init_mm, pgd, p4d);
248*4882a593Smuzhiyun 		if (p4d != p4d_offset(pgd, 0))
249*4882a593Smuzhiyun 			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
250*4882a593Smuzhiyun 			       p4d, p4d_offset(pgd, 0));
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 	return p4d_offset(pgd, vaddr);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
fill_pud(p4d_t * p4d,unsigned long vaddr)255*4882a593Smuzhiyun static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	if (p4d_none(*p4d)) {
258*4882a593Smuzhiyun 		pud_t *pud = (pud_t *)spp_getpage();
259*4882a593Smuzhiyun 		p4d_populate(&init_mm, p4d, pud);
260*4882a593Smuzhiyun 		if (pud != pud_offset(p4d, 0))
261*4882a593Smuzhiyun 			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
262*4882a593Smuzhiyun 			       pud, pud_offset(p4d, 0));
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	return pud_offset(p4d, vaddr);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
fill_pmd(pud_t * pud,unsigned long vaddr)267*4882a593Smuzhiyun static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	if (pud_none(*pud)) {
270*4882a593Smuzhiyun 		pmd_t *pmd = (pmd_t *) spp_getpage();
271*4882a593Smuzhiyun 		pud_populate(&init_mm, pud, pmd);
272*4882a593Smuzhiyun 		if (pmd != pmd_offset(pud, 0))
273*4882a593Smuzhiyun 			printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
274*4882a593Smuzhiyun 			       pmd, pmd_offset(pud, 0));
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 	return pmd_offset(pud, vaddr);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
fill_pte(pmd_t * pmd,unsigned long vaddr)279*4882a593Smuzhiyun static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	if (pmd_none(*pmd)) {
282*4882a593Smuzhiyun 		pte_t *pte = (pte_t *) spp_getpage();
283*4882a593Smuzhiyun 		pmd_populate_kernel(&init_mm, pmd, pte);
284*4882a593Smuzhiyun 		if (pte != pte_offset_kernel(pmd, 0))
285*4882a593Smuzhiyun 			printk(KERN_ERR "PAGETABLE BUG #03!\n");
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 	return pte_offset_kernel(pmd, vaddr);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
__set_pte_vaddr(pud_t * pud,unsigned long vaddr,pte_t new_pte)290*4882a593Smuzhiyun static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	pmd_t *pmd = fill_pmd(pud, vaddr);
293*4882a593Smuzhiyun 	pte_t *pte = fill_pte(pmd, vaddr);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	set_pte(pte, new_pte);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * It's enough to flush this one mapping.
299*4882a593Smuzhiyun 	 * (PGE mappings get flushed as well)
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 	flush_tlb_one_kernel(vaddr);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
set_pte_vaddr_p4d(p4d_t * p4d_page,unsigned long vaddr,pte_t new_pte)304*4882a593Smuzhiyun void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	p4d_t *p4d = p4d_page + p4d_index(vaddr);
307*4882a593Smuzhiyun 	pud_t *pud = fill_pud(p4d, vaddr);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	__set_pte_vaddr(pud, vaddr, new_pte);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
set_pte_vaddr_pud(pud_t * pud_page,unsigned long vaddr,pte_t new_pte)312*4882a593Smuzhiyun void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	pud_t *pud = pud_page + pud_index(vaddr);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	__set_pte_vaddr(pud, vaddr, new_pte);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
set_pte_vaddr(unsigned long vaddr,pte_t pteval)319*4882a593Smuzhiyun void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	pgd_t *pgd;
322*4882a593Smuzhiyun 	p4d_t *p4d_page;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	pgd = pgd_offset_k(vaddr);
327*4882a593Smuzhiyun 	if (pgd_none(*pgd)) {
328*4882a593Smuzhiyun 		printk(KERN_ERR
329*4882a593Smuzhiyun 			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
330*4882a593Smuzhiyun 		return;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	p4d_page = p4d_offset(pgd, 0);
334*4882a593Smuzhiyun 	set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
populate_extra_pmd(unsigned long vaddr)337*4882a593Smuzhiyun pmd_t * __init populate_extra_pmd(unsigned long vaddr)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	pgd_t *pgd;
340*4882a593Smuzhiyun 	p4d_t *p4d;
341*4882a593Smuzhiyun 	pud_t *pud;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	pgd = pgd_offset_k(vaddr);
344*4882a593Smuzhiyun 	p4d = fill_p4d(pgd, vaddr);
345*4882a593Smuzhiyun 	pud = fill_pud(p4d, vaddr);
346*4882a593Smuzhiyun 	return fill_pmd(pud, vaddr);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
populate_extra_pte(unsigned long vaddr)349*4882a593Smuzhiyun pte_t * __init populate_extra_pte(unsigned long vaddr)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	pmd_t *pmd;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	pmd = populate_extra_pmd(vaddr);
354*4882a593Smuzhiyun 	return fill_pte(pmd, vaddr);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun  * Create large page table mappings for a range of physical addresses.
359*4882a593Smuzhiyun  */
__init_extra_mapping(unsigned long phys,unsigned long size,enum page_cache_mode cache)360*4882a593Smuzhiyun static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
361*4882a593Smuzhiyun 					enum page_cache_mode cache)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	pgd_t *pgd;
364*4882a593Smuzhiyun 	p4d_t *p4d;
365*4882a593Smuzhiyun 	pud_t *pud;
366*4882a593Smuzhiyun 	pmd_t *pmd;
367*4882a593Smuzhiyun 	pgprot_t prot;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
370*4882a593Smuzhiyun 		protval_4k_2_large(cachemode2protval(cache));
371*4882a593Smuzhiyun 	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
372*4882a593Smuzhiyun 	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
373*4882a593Smuzhiyun 		pgd = pgd_offset_k((unsigned long)__va(phys));
374*4882a593Smuzhiyun 		if (pgd_none(*pgd)) {
375*4882a593Smuzhiyun 			p4d = (p4d_t *) spp_getpage();
376*4882a593Smuzhiyun 			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
377*4882a593Smuzhiyun 						_PAGE_USER));
378*4882a593Smuzhiyun 		}
379*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
380*4882a593Smuzhiyun 		if (p4d_none(*p4d)) {
381*4882a593Smuzhiyun 			pud = (pud_t *) spp_getpage();
382*4882a593Smuzhiyun 			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
383*4882a593Smuzhiyun 						_PAGE_USER));
384*4882a593Smuzhiyun 		}
385*4882a593Smuzhiyun 		pud = pud_offset(p4d, (unsigned long)__va(phys));
386*4882a593Smuzhiyun 		if (pud_none(*pud)) {
387*4882a593Smuzhiyun 			pmd = (pmd_t *) spp_getpage();
388*4882a593Smuzhiyun 			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
389*4882a593Smuzhiyun 						_PAGE_USER));
390*4882a593Smuzhiyun 		}
391*4882a593Smuzhiyun 		pmd = pmd_offset(pud, phys);
392*4882a593Smuzhiyun 		BUG_ON(!pmd_none(*pmd));
393*4882a593Smuzhiyun 		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
init_extra_mapping_wb(unsigned long phys,unsigned long size)397*4882a593Smuzhiyun void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
init_extra_mapping_uc(unsigned long phys,unsigned long size)402*4882a593Smuzhiyun void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /*
408*4882a593Smuzhiyun  * The head.S code sets up the kernel high mapping:
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
411*4882a593Smuzhiyun  *
412*4882a593Smuzhiyun  * phys_base holds the negative offset to the kernel, which is added
413*4882a593Smuzhiyun  * to the compile time generated pmds. This results in invalid pmds up
414*4882a593Smuzhiyun  * to the point where we hit the physaddr 0 mapping.
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * We limit the mappings to the region from _text to _brk_end.  _brk_end
417*4882a593Smuzhiyun  * is rounded up to the 2MB boundary. This catches the invalid pmds as
418*4882a593Smuzhiyun  * well, as they are located before _text:
419*4882a593Smuzhiyun  */
cleanup_highmap(void)420*4882a593Smuzhiyun void __init cleanup_highmap(void)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	unsigned long vaddr = __START_KERNEL_map;
423*4882a593Smuzhiyun 	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
424*4882a593Smuzhiyun 	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
425*4882a593Smuzhiyun 	pmd_t *pmd = level2_kernel_pgt;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/*
428*4882a593Smuzhiyun 	 * Native path, max_pfn_mapped is not set yet.
429*4882a593Smuzhiyun 	 * Xen has valid max_pfn_mapped set in
430*4882a593Smuzhiyun 	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
431*4882a593Smuzhiyun 	 */
432*4882a593Smuzhiyun 	if (max_pfn_mapped)
433*4882a593Smuzhiyun 		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
436*4882a593Smuzhiyun 		if (pmd_none(*pmd))
437*4882a593Smuzhiyun 			continue;
438*4882a593Smuzhiyun 		if (vaddr < (unsigned long) _text || vaddr > end)
439*4882a593Smuzhiyun 			set_pmd(pmd, __pmd(0));
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun /*
444*4882a593Smuzhiyun  * Create PTE level page table mapping for physical addresses.
445*4882a593Smuzhiyun  * It returns the last physical address mapped.
446*4882a593Smuzhiyun  */
447*4882a593Smuzhiyun static unsigned long __meminit
phys_pte_init(pte_t * pte_page,unsigned long paddr,unsigned long paddr_end,pgprot_t prot,bool init)448*4882a593Smuzhiyun phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
449*4882a593Smuzhiyun 	      pgprot_t prot, bool init)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	unsigned long pages = 0, paddr_next;
452*4882a593Smuzhiyun 	unsigned long paddr_last = paddr_end;
453*4882a593Smuzhiyun 	pte_t *pte;
454*4882a593Smuzhiyun 	int i;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	pte = pte_page + pte_index(paddr);
457*4882a593Smuzhiyun 	i = pte_index(paddr);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
460*4882a593Smuzhiyun 		paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
461*4882a593Smuzhiyun 		if (paddr >= paddr_end) {
462*4882a593Smuzhiyun 			if (!after_bootmem &&
463*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
464*4882a593Smuzhiyun 					     E820_TYPE_RAM) &&
465*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
466*4882a593Smuzhiyun 					     E820_TYPE_RESERVED_KERN))
467*4882a593Smuzhiyun 				set_pte_init(pte, __pte(0), init);
468*4882a593Smuzhiyun 			continue;
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		/*
472*4882a593Smuzhiyun 		 * We will re-use the existing mapping.
473*4882a593Smuzhiyun 		 * Xen for example has some special requirements, like mapping
474*4882a593Smuzhiyun 		 * pagetable pages as RO. So assume someone who pre-setup
475*4882a593Smuzhiyun 		 * these mappings are more intelligent.
476*4882a593Smuzhiyun 		 */
477*4882a593Smuzhiyun 		if (!pte_none(*pte)) {
478*4882a593Smuzhiyun 			if (!after_bootmem)
479*4882a593Smuzhiyun 				pages++;
480*4882a593Smuzhiyun 			continue;
481*4882a593Smuzhiyun 		}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 		if (0)
484*4882a593Smuzhiyun 			pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
485*4882a593Smuzhiyun 				pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
486*4882a593Smuzhiyun 		pages++;
487*4882a593Smuzhiyun 		set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
488*4882a593Smuzhiyun 		paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	update_page_count(PG_LEVEL_4K, pages);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	return paddr_last;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun  * Create PMD level page table mapping for physical addresses. The virtual
498*4882a593Smuzhiyun  * and physical address have to be aligned at this level.
499*4882a593Smuzhiyun  * It returns the last physical address mapped.
500*4882a593Smuzhiyun  */
501*4882a593Smuzhiyun static unsigned long __meminit
phys_pmd_init(pmd_t * pmd_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)502*4882a593Smuzhiyun phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
503*4882a593Smuzhiyun 	      unsigned long page_size_mask, pgprot_t prot, bool init)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	unsigned long pages = 0, paddr_next;
506*4882a593Smuzhiyun 	unsigned long paddr_last = paddr_end;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	int i = pmd_index(paddr);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
511*4882a593Smuzhiyun 		pmd_t *pmd = pmd_page + pmd_index(paddr);
512*4882a593Smuzhiyun 		pte_t *pte;
513*4882a593Smuzhiyun 		pgprot_t new_prot = prot;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
516*4882a593Smuzhiyun 		if (paddr >= paddr_end) {
517*4882a593Smuzhiyun 			if (!after_bootmem &&
518*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
519*4882a593Smuzhiyun 					     E820_TYPE_RAM) &&
520*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
521*4882a593Smuzhiyun 					     E820_TYPE_RESERVED_KERN))
522*4882a593Smuzhiyun 				set_pmd_init(pmd, __pmd(0), init);
523*4882a593Smuzhiyun 			continue;
524*4882a593Smuzhiyun 		}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 		if (!pmd_none(*pmd)) {
527*4882a593Smuzhiyun 			if (!pmd_large(*pmd)) {
528*4882a593Smuzhiyun 				spin_lock(&init_mm.page_table_lock);
529*4882a593Smuzhiyun 				pte = (pte_t *)pmd_page_vaddr(*pmd);
530*4882a593Smuzhiyun 				paddr_last = phys_pte_init(pte, paddr,
531*4882a593Smuzhiyun 							   paddr_end, prot,
532*4882a593Smuzhiyun 							   init);
533*4882a593Smuzhiyun 				spin_unlock(&init_mm.page_table_lock);
534*4882a593Smuzhiyun 				continue;
535*4882a593Smuzhiyun 			}
536*4882a593Smuzhiyun 			/*
537*4882a593Smuzhiyun 			 * If we are ok with PG_LEVEL_2M mapping, then we will
538*4882a593Smuzhiyun 			 * use the existing mapping,
539*4882a593Smuzhiyun 			 *
540*4882a593Smuzhiyun 			 * Otherwise, we will split the large page mapping but
541*4882a593Smuzhiyun 			 * use the same existing protection bits except for
542*4882a593Smuzhiyun 			 * large page, so that we don't violate Intel's TLB
543*4882a593Smuzhiyun 			 * Application note (317080) which says, while changing
544*4882a593Smuzhiyun 			 * the page sizes, new and old translations should
545*4882a593Smuzhiyun 			 * not differ with respect to page frame and
546*4882a593Smuzhiyun 			 * attributes.
547*4882a593Smuzhiyun 			 */
548*4882a593Smuzhiyun 			if (page_size_mask & (1 << PG_LEVEL_2M)) {
549*4882a593Smuzhiyun 				if (!after_bootmem)
550*4882a593Smuzhiyun 					pages++;
551*4882a593Smuzhiyun 				paddr_last = paddr_next;
552*4882a593Smuzhiyun 				continue;
553*4882a593Smuzhiyun 			}
554*4882a593Smuzhiyun 			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
555*4882a593Smuzhiyun 		}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		if (page_size_mask & (1<<PG_LEVEL_2M)) {
558*4882a593Smuzhiyun 			pages++;
559*4882a593Smuzhiyun 			spin_lock(&init_mm.page_table_lock);
560*4882a593Smuzhiyun 			set_pte_init((pte_t *)pmd,
561*4882a593Smuzhiyun 				     pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
562*4882a593Smuzhiyun 					     __pgprot(pgprot_val(prot) | _PAGE_PSE)),
563*4882a593Smuzhiyun 				     init);
564*4882a593Smuzhiyun 			spin_unlock(&init_mm.page_table_lock);
565*4882a593Smuzhiyun 			paddr_last = paddr_next;
566*4882a593Smuzhiyun 			continue;
567*4882a593Smuzhiyun 		}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 		pte = alloc_low_page();
570*4882a593Smuzhiyun 		paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		spin_lock(&init_mm.page_table_lock);
573*4882a593Smuzhiyun 		pmd_populate_kernel_init(&init_mm, pmd, pte, init);
574*4882a593Smuzhiyun 		spin_unlock(&init_mm.page_table_lock);
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 	update_page_count(PG_LEVEL_2M, pages);
577*4882a593Smuzhiyun 	return paddr_last;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun  * Create PUD level page table mapping for physical addresses. The virtual
582*4882a593Smuzhiyun  * and physical address do not have to be aligned at this level. KASLR can
583*4882a593Smuzhiyun  * randomize virtual addresses up to this level.
584*4882a593Smuzhiyun  * It returns the last physical address mapped.
585*4882a593Smuzhiyun  */
586*4882a593Smuzhiyun static unsigned long __meminit
phys_pud_init(pud_t * pud_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t _prot,bool init)587*4882a593Smuzhiyun phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
588*4882a593Smuzhiyun 	      unsigned long page_size_mask, pgprot_t _prot, bool init)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	unsigned long pages = 0, paddr_next;
591*4882a593Smuzhiyun 	unsigned long paddr_last = paddr_end;
592*4882a593Smuzhiyun 	unsigned long vaddr = (unsigned long)__va(paddr);
593*4882a593Smuzhiyun 	int i = pud_index(vaddr);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
596*4882a593Smuzhiyun 		pud_t *pud;
597*4882a593Smuzhiyun 		pmd_t *pmd;
598*4882a593Smuzhiyun 		pgprot_t prot = _prot;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		vaddr = (unsigned long)__va(paddr);
601*4882a593Smuzhiyun 		pud = pud_page + pud_index(vaddr);
602*4882a593Smuzhiyun 		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 		if (paddr >= paddr_end) {
605*4882a593Smuzhiyun 			if (!after_bootmem &&
606*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
607*4882a593Smuzhiyun 					     E820_TYPE_RAM) &&
608*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
609*4882a593Smuzhiyun 					     E820_TYPE_RESERVED_KERN))
610*4882a593Smuzhiyun 				set_pud_init(pud, __pud(0), init);
611*4882a593Smuzhiyun 			continue;
612*4882a593Smuzhiyun 		}
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 		if (!pud_none(*pud)) {
615*4882a593Smuzhiyun 			if (!pud_large(*pud)) {
616*4882a593Smuzhiyun 				pmd = pmd_offset(pud, 0);
617*4882a593Smuzhiyun 				paddr_last = phys_pmd_init(pmd, paddr,
618*4882a593Smuzhiyun 							   paddr_end,
619*4882a593Smuzhiyun 							   page_size_mask,
620*4882a593Smuzhiyun 							   prot, init);
621*4882a593Smuzhiyun 				continue;
622*4882a593Smuzhiyun 			}
623*4882a593Smuzhiyun 			/*
624*4882a593Smuzhiyun 			 * If we are ok with PG_LEVEL_1G mapping, then we will
625*4882a593Smuzhiyun 			 * use the existing mapping.
626*4882a593Smuzhiyun 			 *
627*4882a593Smuzhiyun 			 * Otherwise, we will split the gbpage mapping but use
628*4882a593Smuzhiyun 			 * the same existing protection  bits except for large
629*4882a593Smuzhiyun 			 * page, so that we don't violate Intel's TLB
630*4882a593Smuzhiyun 			 * Application note (317080) which says, while changing
631*4882a593Smuzhiyun 			 * the page sizes, new and old translations should
632*4882a593Smuzhiyun 			 * not differ with respect to page frame and
633*4882a593Smuzhiyun 			 * attributes.
634*4882a593Smuzhiyun 			 */
635*4882a593Smuzhiyun 			if (page_size_mask & (1 << PG_LEVEL_1G)) {
636*4882a593Smuzhiyun 				if (!after_bootmem)
637*4882a593Smuzhiyun 					pages++;
638*4882a593Smuzhiyun 				paddr_last = paddr_next;
639*4882a593Smuzhiyun 				continue;
640*4882a593Smuzhiyun 			}
641*4882a593Smuzhiyun 			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		if (page_size_mask & (1<<PG_LEVEL_1G)) {
645*4882a593Smuzhiyun 			pages++;
646*4882a593Smuzhiyun 			spin_lock(&init_mm.page_table_lock);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 			prot = __pgprot(pgprot_val(prot) | _PAGE_PSE);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 			set_pte_init((pte_t *)pud,
651*4882a593Smuzhiyun 				     pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
652*4882a593Smuzhiyun 					     prot),
653*4882a593Smuzhiyun 				     init);
654*4882a593Smuzhiyun 			spin_unlock(&init_mm.page_table_lock);
655*4882a593Smuzhiyun 			paddr_last = paddr_next;
656*4882a593Smuzhiyun 			continue;
657*4882a593Smuzhiyun 		}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 		pmd = alloc_low_page();
660*4882a593Smuzhiyun 		paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
661*4882a593Smuzhiyun 					   page_size_mask, prot, init);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 		spin_lock(&init_mm.page_table_lock);
664*4882a593Smuzhiyun 		pud_populate_init(&init_mm, pud, pmd, init);
665*4882a593Smuzhiyun 		spin_unlock(&init_mm.page_table_lock);
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	update_page_count(PG_LEVEL_1G, pages);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	return paddr_last;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun static unsigned long __meminit
phys_p4d_init(p4d_t * p4d_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)674*4882a593Smuzhiyun phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
675*4882a593Smuzhiyun 	      unsigned long page_size_mask, pgprot_t prot, bool init)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	paddr_last = paddr_end;
680*4882a593Smuzhiyun 	vaddr = (unsigned long)__va(paddr);
681*4882a593Smuzhiyun 	vaddr_end = (unsigned long)__va(paddr_end);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	if (!pgtable_l5_enabled())
684*4882a593Smuzhiyun 		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
685*4882a593Smuzhiyun 				     page_size_mask, prot, init);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
688*4882a593Smuzhiyun 		p4d_t *p4d = p4d_page + p4d_index(vaddr);
689*4882a593Smuzhiyun 		pud_t *pud;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
692*4882a593Smuzhiyun 		paddr = __pa(vaddr);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 		if (paddr >= paddr_end) {
695*4882a593Smuzhiyun 			paddr_next = __pa(vaddr_next);
696*4882a593Smuzhiyun 			if (!after_bootmem &&
697*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
698*4882a593Smuzhiyun 					     E820_TYPE_RAM) &&
699*4882a593Smuzhiyun 			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
700*4882a593Smuzhiyun 					     E820_TYPE_RESERVED_KERN))
701*4882a593Smuzhiyun 				set_p4d_init(p4d, __p4d(0), init);
702*4882a593Smuzhiyun 			continue;
703*4882a593Smuzhiyun 		}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 		if (!p4d_none(*p4d)) {
706*4882a593Smuzhiyun 			pud = pud_offset(p4d, 0);
707*4882a593Smuzhiyun 			paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
708*4882a593Smuzhiyun 					page_size_mask, prot, init);
709*4882a593Smuzhiyun 			continue;
710*4882a593Smuzhiyun 		}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		pud = alloc_low_page();
713*4882a593Smuzhiyun 		paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
714*4882a593Smuzhiyun 					   page_size_mask, prot, init);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 		spin_lock(&init_mm.page_table_lock);
717*4882a593Smuzhiyun 		p4d_populate_init(&init_mm, p4d, pud, init);
718*4882a593Smuzhiyun 		spin_unlock(&init_mm.page_table_lock);
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	return paddr_last;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun static unsigned long __meminit
__kernel_physical_mapping_init(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)725*4882a593Smuzhiyun __kernel_physical_mapping_init(unsigned long paddr_start,
726*4882a593Smuzhiyun 			       unsigned long paddr_end,
727*4882a593Smuzhiyun 			       unsigned long page_size_mask,
728*4882a593Smuzhiyun 			       pgprot_t prot, bool init)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	bool pgd_changed = false;
731*4882a593Smuzhiyun 	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	paddr_last = paddr_end;
734*4882a593Smuzhiyun 	vaddr = (unsigned long)__va(paddr_start);
735*4882a593Smuzhiyun 	vaddr_end = (unsigned long)__va(paddr_end);
736*4882a593Smuzhiyun 	vaddr_start = vaddr;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
739*4882a593Smuzhiyun 		pgd_t *pgd = pgd_offset_k(vaddr);
740*4882a593Smuzhiyun 		p4d_t *p4d;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 		if (pgd_val(*pgd)) {
745*4882a593Smuzhiyun 			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
746*4882a593Smuzhiyun 			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
747*4882a593Smuzhiyun 						   __pa(vaddr_end),
748*4882a593Smuzhiyun 						   page_size_mask,
749*4882a593Smuzhiyun 						   prot, init);
750*4882a593Smuzhiyun 			continue;
751*4882a593Smuzhiyun 		}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 		p4d = alloc_low_page();
754*4882a593Smuzhiyun 		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
755*4882a593Smuzhiyun 					   page_size_mask, prot, init);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		spin_lock(&init_mm.page_table_lock);
758*4882a593Smuzhiyun 		if (pgtable_l5_enabled())
759*4882a593Smuzhiyun 			pgd_populate_init(&init_mm, pgd, p4d, init);
760*4882a593Smuzhiyun 		else
761*4882a593Smuzhiyun 			p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
762*4882a593Smuzhiyun 					  (pud_t *) p4d, init);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		spin_unlock(&init_mm.page_table_lock);
765*4882a593Smuzhiyun 		pgd_changed = true;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (pgd_changed)
769*4882a593Smuzhiyun 		sync_global_pgds(vaddr_start, vaddr_end - 1);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	return paddr_last;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun  * Create page table mapping for the physical memory for specific physical
777*4882a593Smuzhiyun  * addresses. Note that it can only be used to populate non-present entries.
778*4882a593Smuzhiyun  * The virtual and physical addresses have to be aligned on PMD level
779*4882a593Smuzhiyun  * down. It returns the last physical address mapped.
780*4882a593Smuzhiyun  */
781*4882a593Smuzhiyun unsigned long __meminit
kernel_physical_mapping_init(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot)782*4882a593Smuzhiyun kernel_physical_mapping_init(unsigned long paddr_start,
783*4882a593Smuzhiyun 			     unsigned long paddr_end,
784*4882a593Smuzhiyun 			     unsigned long page_size_mask, pgprot_t prot)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	return __kernel_physical_mapping_init(paddr_start, paddr_end,
787*4882a593Smuzhiyun 					      page_size_mask, prot, true);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun  * This function is similar to kernel_physical_mapping_init() above with the
792*4882a593Smuzhiyun  * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
793*4882a593Smuzhiyun  * when updating the mapping. The caller is responsible to flush the TLBs after
794*4882a593Smuzhiyun  * the function returns.
795*4882a593Smuzhiyun  */
796*4882a593Smuzhiyun unsigned long __meminit
kernel_physical_mapping_change(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask)797*4882a593Smuzhiyun kernel_physical_mapping_change(unsigned long paddr_start,
798*4882a593Smuzhiyun 			       unsigned long paddr_end,
799*4882a593Smuzhiyun 			       unsigned long page_size_mask)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	return __kernel_physical_mapping_init(paddr_start, paddr_end,
802*4882a593Smuzhiyun 					      page_size_mask, PAGE_KERNEL,
803*4882a593Smuzhiyun 					      false);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun #ifndef CONFIG_NUMA
initmem_init(void)807*4882a593Smuzhiyun void __init initmem_init(void)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun #endif
812*4882a593Smuzhiyun 
paging_init(void)813*4882a593Smuzhiyun void __init paging_init(void)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	sparse_init();
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	/*
818*4882a593Smuzhiyun 	 * clear the default setting with node 0
819*4882a593Smuzhiyun 	 * note: don't use nodes_clear here, that is really clearing when
820*4882a593Smuzhiyun 	 *	 numa support is not compiled in, and later node_set_state
821*4882a593Smuzhiyun 	 *	 will not set it back.
822*4882a593Smuzhiyun 	 */
823*4882a593Smuzhiyun 	node_clear_state(0, N_MEMORY);
824*4882a593Smuzhiyun 	node_clear_state(0, N_NORMAL_MEMORY);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	zone_sizes_init();
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun  * Memory hotplug specific functions
831*4882a593Smuzhiyun  */
832*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun  * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
835*4882a593Smuzhiyun  * updating.
836*4882a593Smuzhiyun  */
update_end_of_memory_vars(u64 start,u64 size)837*4882a593Smuzhiyun static void update_end_of_memory_vars(u64 start, u64 size)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	unsigned long end_pfn = PFN_UP(start + size);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	if (end_pfn > max_pfn) {
842*4882a593Smuzhiyun 		max_pfn = end_pfn;
843*4882a593Smuzhiyun 		max_low_pfn = end_pfn;
844*4882a593Smuzhiyun 		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)848*4882a593Smuzhiyun int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
849*4882a593Smuzhiyun 	      struct mhp_params *params)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun 	int ret;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	ret = __add_pages(nid, start_pfn, nr_pages, params);
854*4882a593Smuzhiyun 	WARN_ON_ONCE(ret);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/* update max_pfn, max_low_pfn and high_memory */
857*4882a593Smuzhiyun 	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
858*4882a593Smuzhiyun 				  nr_pages << PAGE_SHIFT);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	return ret;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)863*4882a593Smuzhiyun int arch_add_memory(int nid, u64 start, u64 size,
864*4882a593Smuzhiyun 		    struct mhp_params *params)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	unsigned long start_pfn = start >> PAGE_SHIFT;
867*4882a593Smuzhiyun 	unsigned long nr_pages = size >> PAGE_SHIFT;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	init_memory_mapping(start, start + size, params->pgprot);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return add_pages(nid, start_pfn, nr_pages, params);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun #define PAGE_INUSE 0xFD
875*4882a593Smuzhiyun 
free_pagetable(struct page * page,int order)876*4882a593Smuzhiyun static void __meminit free_pagetable(struct page *page, int order)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun 	unsigned long magic;
879*4882a593Smuzhiyun 	unsigned int nr_pages = 1 << order;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	/* bootmem page has reserved flag */
882*4882a593Smuzhiyun 	if (PageReserved(page)) {
883*4882a593Smuzhiyun 		__ClearPageReserved(page);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 		magic = (unsigned long)page->freelist;
886*4882a593Smuzhiyun 		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
887*4882a593Smuzhiyun 			while (nr_pages--)
888*4882a593Smuzhiyun 				put_page_bootmem(page++);
889*4882a593Smuzhiyun 		} else
890*4882a593Smuzhiyun 			while (nr_pages--)
891*4882a593Smuzhiyun 				free_reserved_page(page++);
892*4882a593Smuzhiyun 	} else
893*4882a593Smuzhiyun 		free_pages((unsigned long)page_address(page), order);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
free_hugepage_table(struct page * page,struct vmem_altmap * altmap)896*4882a593Smuzhiyun static void __meminit free_hugepage_table(struct page *page,
897*4882a593Smuzhiyun 		struct vmem_altmap *altmap)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	if (altmap)
900*4882a593Smuzhiyun 		vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
901*4882a593Smuzhiyun 	else
902*4882a593Smuzhiyun 		free_pagetable(page, get_order(PMD_SIZE));
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun 
free_pte_table(pte_t * pte_start,pmd_t * pmd)905*4882a593Smuzhiyun static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	pte_t *pte;
908*4882a593Smuzhiyun 	int i;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++) {
911*4882a593Smuzhiyun 		pte = pte_start + i;
912*4882a593Smuzhiyun 		if (!pte_none(*pte))
913*4882a593Smuzhiyun 			return;
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* free a pte talbe */
917*4882a593Smuzhiyun 	free_pagetable(pmd_page(*pmd), 0);
918*4882a593Smuzhiyun 	spin_lock(&init_mm.page_table_lock);
919*4882a593Smuzhiyun 	pmd_clear(pmd);
920*4882a593Smuzhiyun 	spin_unlock(&init_mm.page_table_lock);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
free_pmd_table(pmd_t * pmd_start,pud_t * pud)923*4882a593Smuzhiyun static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun 	pmd_t *pmd;
926*4882a593Smuzhiyun 	int i;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PMD; i++) {
929*4882a593Smuzhiyun 		pmd = pmd_start + i;
930*4882a593Smuzhiyun 		if (!pmd_none(*pmd))
931*4882a593Smuzhiyun 			return;
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	/* free a pmd talbe */
935*4882a593Smuzhiyun 	free_pagetable(pud_page(*pud), 0);
936*4882a593Smuzhiyun 	spin_lock(&init_mm.page_table_lock);
937*4882a593Smuzhiyun 	pud_clear(pud);
938*4882a593Smuzhiyun 	spin_unlock(&init_mm.page_table_lock);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
free_pud_table(pud_t * pud_start,p4d_t * p4d)941*4882a593Smuzhiyun static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	pud_t *pud;
944*4882a593Smuzhiyun 	int i;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PUD; i++) {
947*4882a593Smuzhiyun 		pud = pud_start + i;
948*4882a593Smuzhiyun 		if (!pud_none(*pud))
949*4882a593Smuzhiyun 			return;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/* free a pud talbe */
953*4882a593Smuzhiyun 	free_pagetable(p4d_page(*p4d), 0);
954*4882a593Smuzhiyun 	spin_lock(&init_mm.page_table_lock);
955*4882a593Smuzhiyun 	p4d_clear(p4d);
956*4882a593Smuzhiyun 	spin_unlock(&init_mm.page_table_lock);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun static void __meminit
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end,bool direct)960*4882a593Smuzhiyun remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
961*4882a593Smuzhiyun 		 bool direct)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	unsigned long next, pages = 0;
964*4882a593Smuzhiyun 	pte_t *pte;
965*4882a593Smuzhiyun 	void *page_addr;
966*4882a593Smuzhiyun 	phys_addr_t phys_addr;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	pte = pte_start + pte_index(addr);
969*4882a593Smuzhiyun 	for (; addr < end; addr = next, pte++) {
970*4882a593Smuzhiyun 		next = (addr + PAGE_SIZE) & PAGE_MASK;
971*4882a593Smuzhiyun 		if (next > end)
972*4882a593Smuzhiyun 			next = end;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 		if (!pte_present(*pte))
975*4882a593Smuzhiyun 			continue;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 		/*
978*4882a593Smuzhiyun 		 * We mapped [0,1G) memory as identity mapping when
979*4882a593Smuzhiyun 		 * initializing, in arch/x86/kernel/head_64.S. These
980*4882a593Smuzhiyun 		 * pagetables cannot be removed.
981*4882a593Smuzhiyun 		 */
982*4882a593Smuzhiyun 		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
983*4882a593Smuzhiyun 		if (phys_addr < (phys_addr_t)0x40000000)
984*4882a593Smuzhiyun 			return;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
987*4882a593Smuzhiyun 			/*
988*4882a593Smuzhiyun 			 * Do not free direct mapping pages since they were
989*4882a593Smuzhiyun 			 * freed when offlining, or simplely not in use.
990*4882a593Smuzhiyun 			 */
991*4882a593Smuzhiyun 			if (!direct)
992*4882a593Smuzhiyun 				free_pagetable(pte_page(*pte), 0);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 			spin_lock(&init_mm.page_table_lock);
995*4882a593Smuzhiyun 			pte_clear(&init_mm, addr, pte);
996*4882a593Smuzhiyun 			spin_unlock(&init_mm.page_table_lock);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 			/* For non-direct mapping, pages means nothing. */
999*4882a593Smuzhiyun 			pages++;
1000*4882a593Smuzhiyun 		} else {
1001*4882a593Smuzhiyun 			/*
1002*4882a593Smuzhiyun 			 * If we are here, we are freeing vmemmap pages since
1003*4882a593Smuzhiyun 			 * direct mapped memory ranges to be freed are aligned.
1004*4882a593Smuzhiyun 			 *
1005*4882a593Smuzhiyun 			 * If we are not removing the whole page, it means
1006*4882a593Smuzhiyun 			 * other page structs in this page are being used and
1007*4882a593Smuzhiyun 			 * we canot remove them. So fill the unused page_structs
1008*4882a593Smuzhiyun 			 * with 0xFD, and remove the page when it is wholly
1009*4882a593Smuzhiyun 			 * filled with 0xFD.
1010*4882a593Smuzhiyun 			 */
1011*4882a593Smuzhiyun 			memset((void *)addr, PAGE_INUSE, next - addr);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 			page_addr = page_address(pte_page(*pte));
1014*4882a593Smuzhiyun 			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
1015*4882a593Smuzhiyun 				free_pagetable(pte_page(*pte), 0);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 				spin_lock(&init_mm.page_table_lock);
1018*4882a593Smuzhiyun 				pte_clear(&init_mm, addr, pte);
1019*4882a593Smuzhiyun 				spin_unlock(&init_mm.page_table_lock);
1020*4882a593Smuzhiyun 			}
1021*4882a593Smuzhiyun 		}
1022*4882a593Smuzhiyun 	}
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	/* Call free_pte_table() in remove_pmd_table(). */
1025*4882a593Smuzhiyun 	flush_tlb_all();
1026*4882a593Smuzhiyun 	if (direct)
1027*4882a593Smuzhiyun 		update_page_count(PG_LEVEL_4K, -pages);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun static void __meminit
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end,bool direct,struct vmem_altmap * altmap)1031*4882a593Smuzhiyun remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
1032*4882a593Smuzhiyun 		 bool direct, struct vmem_altmap *altmap)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	unsigned long next, pages = 0;
1035*4882a593Smuzhiyun 	pte_t *pte_base;
1036*4882a593Smuzhiyun 	pmd_t *pmd;
1037*4882a593Smuzhiyun 	void *page_addr;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	pmd = pmd_start + pmd_index(addr);
1040*4882a593Smuzhiyun 	for (; addr < end; addr = next, pmd++) {
1041*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 		if (!pmd_present(*pmd))
1044*4882a593Smuzhiyun 			continue;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 		if (pmd_large(*pmd)) {
1047*4882a593Smuzhiyun 			if (IS_ALIGNED(addr, PMD_SIZE) &&
1048*4882a593Smuzhiyun 			    IS_ALIGNED(next, PMD_SIZE)) {
1049*4882a593Smuzhiyun 				if (!direct)
1050*4882a593Smuzhiyun 					free_hugepage_table(pmd_page(*pmd),
1051*4882a593Smuzhiyun 							    altmap);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 				spin_lock(&init_mm.page_table_lock);
1054*4882a593Smuzhiyun 				pmd_clear(pmd);
1055*4882a593Smuzhiyun 				spin_unlock(&init_mm.page_table_lock);
1056*4882a593Smuzhiyun 				pages++;
1057*4882a593Smuzhiyun 			} else {
1058*4882a593Smuzhiyun 				/* If here, we are freeing vmemmap pages. */
1059*4882a593Smuzhiyun 				memset((void *)addr, PAGE_INUSE, next - addr);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 				page_addr = page_address(pmd_page(*pmd));
1062*4882a593Smuzhiyun 				if (!memchr_inv(page_addr, PAGE_INUSE,
1063*4882a593Smuzhiyun 						PMD_SIZE)) {
1064*4882a593Smuzhiyun 					free_hugepage_table(pmd_page(*pmd),
1065*4882a593Smuzhiyun 							    altmap);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 					spin_lock(&init_mm.page_table_lock);
1068*4882a593Smuzhiyun 					pmd_clear(pmd);
1069*4882a593Smuzhiyun 					spin_unlock(&init_mm.page_table_lock);
1070*4882a593Smuzhiyun 				}
1071*4882a593Smuzhiyun 			}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 			continue;
1074*4882a593Smuzhiyun 		}
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1077*4882a593Smuzhiyun 		remove_pte_table(pte_base, addr, next, direct);
1078*4882a593Smuzhiyun 		free_pte_table(pte_base, pmd);
1079*4882a593Smuzhiyun 	}
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	/* Call free_pmd_table() in remove_pud_table(). */
1082*4882a593Smuzhiyun 	if (direct)
1083*4882a593Smuzhiyun 		update_page_count(PG_LEVEL_2M, -pages);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun static void __meminit
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end,struct vmem_altmap * altmap,bool direct)1087*4882a593Smuzhiyun remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1088*4882a593Smuzhiyun 		 struct vmem_altmap *altmap, bool direct)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	unsigned long next, pages = 0;
1091*4882a593Smuzhiyun 	pmd_t *pmd_base;
1092*4882a593Smuzhiyun 	pud_t *pud;
1093*4882a593Smuzhiyun 	void *page_addr;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	pud = pud_start + pud_index(addr);
1096*4882a593Smuzhiyun 	for (; addr < end; addr = next, pud++) {
1097*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 		if (!pud_present(*pud))
1100*4882a593Smuzhiyun 			continue;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 		if (pud_large(*pud)) {
1103*4882a593Smuzhiyun 			if (IS_ALIGNED(addr, PUD_SIZE) &&
1104*4882a593Smuzhiyun 			    IS_ALIGNED(next, PUD_SIZE)) {
1105*4882a593Smuzhiyun 				if (!direct)
1106*4882a593Smuzhiyun 					free_pagetable(pud_page(*pud),
1107*4882a593Smuzhiyun 						       get_order(PUD_SIZE));
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 				spin_lock(&init_mm.page_table_lock);
1110*4882a593Smuzhiyun 				pud_clear(pud);
1111*4882a593Smuzhiyun 				spin_unlock(&init_mm.page_table_lock);
1112*4882a593Smuzhiyun 				pages++;
1113*4882a593Smuzhiyun 			} else {
1114*4882a593Smuzhiyun 				/* If here, we are freeing vmemmap pages. */
1115*4882a593Smuzhiyun 				memset((void *)addr, PAGE_INUSE, next - addr);
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 				page_addr = page_address(pud_page(*pud));
1118*4882a593Smuzhiyun 				if (!memchr_inv(page_addr, PAGE_INUSE,
1119*4882a593Smuzhiyun 						PUD_SIZE)) {
1120*4882a593Smuzhiyun 					free_pagetable(pud_page(*pud),
1121*4882a593Smuzhiyun 						       get_order(PUD_SIZE));
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 					spin_lock(&init_mm.page_table_lock);
1124*4882a593Smuzhiyun 					pud_clear(pud);
1125*4882a593Smuzhiyun 					spin_unlock(&init_mm.page_table_lock);
1126*4882a593Smuzhiyun 				}
1127*4882a593Smuzhiyun 			}
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 			continue;
1130*4882a593Smuzhiyun 		}
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 		pmd_base = pmd_offset(pud, 0);
1133*4882a593Smuzhiyun 		remove_pmd_table(pmd_base, addr, next, direct, altmap);
1134*4882a593Smuzhiyun 		free_pmd_table(pmd_base, pud);
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (direct)
1138*4882a593Smuzhiyun 		update_page_count(PG_LEVEL_1G, -pages);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun static void __meminit
remove_p4d_table(p4d_t * p4d_start,unsigned long addr,unsigned long end,struct vmem_altmap * altmap,bool direct)1142*4882a593Smuzhiyun remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1143*4882a593Smuzhiyun 		 struct vmem_altmap *altmap, bool direct)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	unsigned long next, pages = 0;
1146*4882a593Smuzhiyun 	pud_t *pud_base;
1147*4882a593Smuzhiyun 	p4d_t *p4d;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	p4d = p4d_start + p4d_index(addr);
1150*4882a593Smuzhiyun 	for (; addr < end; addr = next, p4d++) {
1151*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		if (!p4d_present(*p4d))
1154*4882a593Smuzhiyun 			continue;
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 		BUILD_BUG_ON(p4d_large(*p4d));
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 		pud_base = pud_offset(p4d, 0);
1159*4882a593Smuzhiyun 		remove_pud_table(pud_base, addr, next, altmap, direct);
1160*4882a593Smuzhiyun 		/*
1161*4882a593Smuzhiyun 		 * For 4-level page tables we do not want to free PUDs, but in the
1162*4882a593Smuzhiyun 		 * 5-level case we should free them. This code will have to change
1163*4882a593Smuzhiyun 		 * to adapt for boot-time switching between 4 and 5 level page tables.
1164*4882a593Smuzhiyun 		 */
1165*4882a593Smuzhiyun 		if (pgtable_l5_enabled())
1166*4882a593Smuzhiyun 			free_pud_table(pud_base, p4d);
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	if (direct)
1170*4882a593Smuzhiyun 		update_page_count(PG_LEVEL_512G, -pages);
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun /* start and end are both virtual address. */
1174*4882a593Smuzhiyun static void __meminit
remove_pagetable(unsigned long start,unsigned long end,bool direct,struct vmem_altmap * altmap)1175*4882a593Smuzhiyun remove_pagetable(unsigned long start, unsigned long end, bool direct,
1176*4882a593Smuzhiyun 		struct vmem_altmap *altmap)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	unsigned long next;
1179*4882a593Smuzhiyun 	unsigned long addr;
1180*4882a593Smuzhiyun 	pgd_t *pgd;
1181*4882a593Smuzhiyun 	p4d_t *p4d;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	for (addr = start; addr < end; addr = next) {
1184*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 		pgd = pgd_offset_k(addr);
1187*4882a593Smuzhiyun 		if (!pgd_present(*pgd))
1188*4882a593Smuzhiyun 			continue;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, 0);
1191*4882a593Smuzhiyun 		remove_p4d_table(p4d, addr, next, altmap, direct);
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	flush_tlb_all();
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)1197*4882a593Smuzhiyun void __ref vmemmap_free(unsigned long start, unsigned long end,
1198*4882a593Smuzhiyun 		struct vmem_altmap *altmap)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	remove_pagetable(start, end, false, altmap);
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun static void __meminit
kernel_physical_mapping_remove(unsigned long start,unsigned long end)1204*4882a593Smuzhiyun kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	start = (unsigned long)__va(start);
1207*4882a593Smuzhiyun 	end = (unsigned long)__va(end);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	remove_pagetable(start, end, true, NULL);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
arch_remove_memory(int nid,u64 start,u64 size,struct vmem_altmap * altmap)1212*4882a593Smuzhiyun void __ref arch_remove_memory(int nid, u64 start, u64 size,
1213*4882a593Smuzhiyun 			      struct vmem_altmap *altmap)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	unsigned long start_pfn = start >> PAGE_SHIFT;
1216*4882a593Smuzhiyun 	unsigned long nr_pages = size >> PAGE_SHIFT;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	__remove_pages(start_pfn, nr_pages, altmap);
1219*4882a593Smuzhiyun 	kernel_physical_mapping_remove(start, start + size);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTPLUG */
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun static struct kcore_list kcore_vsyscall;
1224*4882a593Smuzhiyun 
register_page_bootmem_info(void)1225*4882a593Smuzhiyun static void __init register_page_bootmem_info(void)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1228*4882a593Smuzhiyun 	int i;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	for_each_online_node(i)
1231*4882a593Smuzhiyun 		register_page_bootmem_info_node(NODE_DATA(i));
1232*4882a593Smuzhiyun #endif
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun /*
1236*4882a593Smuzhiyun  * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1237*4882a593Smuzhiyun  * Only the level which needs to be synchronized between all page-tables is
1238*4882a593Smuzhiyun  * allocated because the synchronization can be expensive.
1239*4882a593Smuzhiyun  */
preallocate_vmalloc_pages(void)1240*4882a593Smuzhiyun static void __init preallocate_vmalloc_pages(void)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun 	unsigned long addr;
1243*4882a593Smuzhiyun 	const char *lvl;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
1246*4882a593Smuzhiyun 		pgd_t *pgd = pgd_offset_k(addr);
1247*4882a593Smuzhiyun 		p4d_t *p4d;
1248*4882a593Smuzhiyun 		pud_t *pud;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 		lvl = "p4d";
1251*4882a593Smuzhiyun 		p4d = p4d_alloc(&init_mm, pgd, addr);
1252*4882a593Smuzhiyun 		if (!p4d)
1253*4882a593Smuzhiyun 			goto failed;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 		if (pgtable_l5_enabled())
1256*4882a593Smuzhiyun 			continue;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 		/*
1259*4882a593Smuzhiyun 		 * The goal here is to allocate all possibly required
1260*4882a593Smuzhiyun 		 * hardware page tables pointed to by the top hardware
1261*4882a593Smuzhiyun 		 * level.
1262*4882a593Smuzhiyun 		 *
1263*4882a593Smuzhiyun 		 * On 4-level systems, the P4D layer is folded away and
1264*4882a593Smuzhiyun 		 * the above code does no preallocation.  Below, go down
1265*4882a593Smuzhiyun 		 * to the pud _software_ level to ensure the second
1266*4882a593Smuzhiyun 		 * hardware level is allocated on 4-level systems too.
1267*4882a593Smuzhiyun 		 */
1268*4882a593Smuzhiyun 		lvl = "pud";
1269*4882a593Smuzhiyun 		pud = pud_alloc(&init_mm, p4d, addr);
1270*4882a593Smuzhiyun 		if (!pud)
1271*4882a593Smuzhiyun 			goto failed;
1272*4882a593Smuzhiyun 	}
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	return;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun failed:
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	/*
1279*4882a593Smuzhiyun 	 * The pages have to be there now or they will be missing in
1280*4882a593Smuzhiyun 	 * process page-tables later.
1281*4882a593Smuzhiyun 	 */
1282*4882a593Smuzhiyun 	panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun 
mem_init(void)1285*4882a593Smuzhiyun void __init mem_init(void)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun 	pci_iommu_alloc();
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	/* clear_bss() already clear the empty_zero_page */
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	/* this will put all memory onto the freelists */
1292*4882a593Smuzhiyun 	memblock_free_all();
1293*4882a593Smuzhiyun 	after_bootmem = 1;
1294*4882a593Smuzhiyun 	x86_init.hyper.init_after_bootmem();
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	/*
1297*4882a593Smuzhiyun 	 * Must be done after boot memory is put on freelist, because here we
1298*4882a593Smuzhiyun 	 * might set fields in deferred struct pages that have not yet been
1299*4882a593Smuzhiyun 	 * initialized, and memblock_free_all() initializes all the reserved
1300*4882a593Smuzhiyun 	 * deferred pages for us.
1301*4882a593Smuzhiyun 	 */
1302*4882a593Smuzhiyun 	register_page_bootmem_info();
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	/* Register memory areas for /proc/kcore */
1305*4882a593Smuzhiyun 	if (get_gate_vma(&init_mm))
1306*4882a593Smuzhiyun 		kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	preallocate_vmalloc_pages();
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	mem_init_print_info(NULL);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
deferred_page_init_max_threads(const struct cpumask * node_cpumask)1314*4882a593Smuzhiyun int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun 	/*
1317*4882a593Smuzhiyun 	 * More CPUs always led to greater speedups on tested systems, up to
1318*4882a593Smuzhiyun 	 * all the nodes' CPUs.  Use all since the system is otherwise idle
1319*4882a593Smuzhiyun 	 * now.
1320*4882a593Smuzhiyun 	 */
1321*4882a593Smuzhiyun 	return max_t(int, cpumask_weight(node_cpumask), 1);
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun #endif
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun int kernel_set_to_readonly;
1326*4882a593Smuzhiyun 
mark_rodata_ro(void)1327*4882a593Smuzhiyun void mark_rodata_ro(void)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	unsigned long start = PFN_ALIGN(_text);
1330*4882a593Smuzhiyun 	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1331*4882a593Smuzhiyun 	unsigned long end = (unsigned long)__end_rodata_hpage_align;
1332*4882a593Smuzhiyun 	unsigned long text_end = PFN_ALIGN(_etext);
1333*4882a593Smuzhiyun 	unsigned long rodata_end = PFN_ALIGN(__end_rodata);
1334*4882a593Smuzhiyun 	unsigned long all_end;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1337*4882a593Smuzhiyun 	       (end - start) >> 10);
1338*4882a593Smuzhiyun 	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	kernel_set_to_readonly = 1;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	/*
1343*4882a593Smuzhiyun 	 * The rodata/data/bss/brk section (but not the kernel text!)
1344*4882a593Smuzhiyun 	 * should also be not-executable.
1345*4882a593Smuzhiyun 	 *
1346*4882a593Smuzhiyun 	 * We align all_end to PMD_SIZE because the existing mapping
1347*4882a593Smuzhiyun 	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1348*4882a593Smuzhiyun 	 * split the PMD and the reminder between _brk_end and the end
1349*4882a593Smuzhiyun 	 * of the PMD will remain mapped executable.
1350*4882a593Smuzhiyun 	 *
1351*4882a593Smuzhiyun 	 * Any PMD which was setup after the one which covers _brk_end
1352*4882a593Smuzhiyun 	 * has been zapped already via cleanup_highmem().
1353*4882a593Smuzhiyun 	 */
1354*4882a593Smuzhiyun 	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1355*4882a593Smuzhiyun 	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	set_ftrace_ops_ro();
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun #ifdef CONFIG_CPA_DEBUG
1360*4882a593Smuzhiyun 	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1361*4882a593Smuzhiyun 	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	printk(KERN_INFO "Testing CPA: again\n");
1364*4882a593Smuzhiyun 	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1365*4882a593Smuzhiyun #endif
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	free_kernel_image_pages("unused kernel image (text/rodata gap)",
1368*4882a593Smuzhiyun 				(void *)text_end, (void *)rodata_start);
1369*4882a593Smuzhiyun 	free_kernel_image_pages("unused kernel image (rodata/data gap)",
1370*4882a593Smuzhiyun 				(void *)rodata_end, (void *)_sdata);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	debug_checkwx();
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun 
kern_addr_valid(unsigned long addr)1375*4882a593Smuzhiyun int kern_addr_valid(unsigned long addr)
1376*4882a593Smuzhiyun {
1377*4882a593Smuzhiyun 	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1378*4882a593Smuzhiyun 	pgd_t *pgd;
1379*4882a593Smuzhiyun 	p4d_t *p4d;
1380*4882a593Smuzhiyun 	pud_t *pud;
1381*4882a593Smuzhiyun 	pmd_t *pmd;
1382*4882a593Smuzhiyun 	pte_t *pte;
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	if (above != 0 && above != -1UL)
1385*4882a593Smuzhiyun 		return 0;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	pgd = pgd_offset_k(addr);
1388*4882a593Smuzhiyun 	if (pgd_none(*pgd))
1389*4882a593Smuzhiyun 		return 0;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
1392*4882a593Smuzhiyun 	if (!p4d_present(*p4d))
1393*4882a593Smuzhiyun 		return 0;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	pud = pud_offset(p4d, addr);
1396*4882a593Smuzhiyun 	if (!pud_present(*pud))
1397*4882a593Smuzhiyun 		return 0;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	if (pud_large(*pud))
1400*4882a593Smuzhiyun 		return pfn_valid(pud_pfn(*pud));
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	pmd = pmd_offset(pud, addr);
1403*4882a593Smuzhiyun 	if (!pmd_present(*pmd))
1404*4882a593Smuzhiyun 		return 0;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	if (pmd_large(*pmd))
1407*4882a593Smuzhiyun 		return pfn_valid(pmd_pfn(*pmd));
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	pte = pte_offset_kernel(pmd, addr);
1410*4882a593Smuzhiyun 	if (pte_none(*pte))
1411*4882a593Smuzhiyun 		return 0;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	return pfn_valid(pte_pfn(*pte));
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun /*
1417*4882a593Smuzhiyun  * Block size is the minimum amount of memory which can be hotplugged or
1418*4882a593Smuzhiyun  * hotremoved. It must be power of two and must be equal or larger than
1419*4882a593Smuzhiyun  * MIN_MEMORY_BLOCK_SIZE.
1420*4882a593Smuzhiyun  */
1421*4882a593Smuzhiyun #define MAX_BLOCK_SIZE (2UL << 30)
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun /* Amount of ram needed to start using large blocks */
1424*4882a593Smuzhiyun #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun /* Adjustable memory block size */
1427*4882a593Smuzhiyun static unsigned long set_memory_block_size;
set_memory_block_size_order(unsigned int order)1428*4882a593Smuzhiyun int __init set_memory_block_size_order(unsigned int order)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun 	unsigned long size = 1UL << order;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
1433*4882a593Smuzhiyun 		return -EINVAL;
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	set_memory_block_size = size;
1436*4882a593Smuzhiyun 	return 0;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
probe_memory_block_size(void)1439*4882a593Smuzhiyun static unsigned long probe_memory_block_size(void)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun 	unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
1442*4882a593Smuzhiyun 	unsigned long bz;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	/* If memory block size has been set, then use it */
1445*4882a593Smuzhiyun 	bz = set_memory_block_size;
1446*4882a593Smuzhiyun 	if (bz)
1447*4882a593Smuzhiyun 		goto done;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1450*4882a593Smuzhiyun 	if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
1451*4882a593Smuzhiyun 		bz = MIN_MEMORY_BLOCK_SIZE;
1452*4882a593Smuzhiyun 		goto done;
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	/*
1456*4882a593Smuzhiyun 	 * Use max block size to minimize overhead on bare metal, where
1457*4882a593Smuzhiyun 	 * alignment for memory hotplug isn't a concern.
1458*4882a593Smuzhiyun 	 */
1459*4882a593Smuzhiyun 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1460*4882a593Smuzhiyun 		bz = MAX_BLOCK_SIZE;
1461*4882a593Smuzhiyun 		goto done;
1462*4882a593Smuzhiyun 	}
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	/* Find the largest allowed block size that aligns to memory end */
1465*4882a593Smuzhiyun 	for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
1466*4882a593Smuzhiyun 		if (IS_ALIGNED(boot_mem_end, bz))
1467*4882a593Smuzhiyun 			break;
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun done:
1470*4882a593Smuzhiyun 	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	return bz;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun static unsigned long memory_block_size_probed;
memory_block_size_bytes(void)1476*4882a593Smuzhiyun unsigned long memory_block_size_bytes(void)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun 	if (!memory_block_size_probed)
1479*4882a593Smuzhiyun 		memory_block_size_probed = probe_memory_block_size();
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	return memory_block_size_probed;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_VMEMMAP
1485*4882a593Smuzhiyun /*
1486*4882a593Smuzhiyun  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1487*4882a593Smuzhiyun  */
1488*4882a593Smuzhiyun static long __meminitdata addr_start, addr_end;
1489*4882a593Smuzhiyun static void __meminitdata *p_start, *p_end;
1490*4882a593Smuzhiyun static int __meminitdata node_start;
1491*4882a593Smuzhiyun 
vmemmap_populate_hugepages(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1492*4882a593Smuzhiyun static int __meminit vmemmap_populate_hugepages(unsigned long start,
1493*4882a593Smuzhiyun 		unsigned long end, int node, struct vmem_altmap *altmap)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun 	unsigned long addr;
1496*4882a593Smuzhiyun 	unsigned long next;
1497*4882a593Smuzhiyun 	pgd_t *pgd;
1498*4882a593Smuzhiyun 	p4d_t *p4d;
1499*4882a593Smuzhiyun 	pud_t *pud;
1500*4882a593Smuzhiyun 	pmd_t *pmd;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	for (addr = start; addr < end; addr = next) {
1503*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 		pgd = vmemmap_pgd_populate(addr, node);
1506*4882a593Smuzhiyun 		if (!pgd)
1507*4882a593Smuzhiyun 			return -ENOMEM;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 		p4d = vmemmap_p4d_populate(pgd, addr, node);
1510*4882a593Smuzhiyun 		if (!p4d)
1511*4882a593Smuzhiyun 			return -ENOMEM;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 		pud = vmemmap_pud_populate(p4d, addr, node);
1514*4882a593Smuzhiyun 		if (!pud)
1515*4882a593Smuzhiyun 			return -ENOMEM;
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 		pmd = pmd_offset(pud, addr);
1518*4882a593Smuzhiyun 		if (pmd_none(*pmd)) {
1519*4882a593Smuzhiyun 			void *p;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 			p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1522*4882a593Smuzhiyun 			if (p) {
1523*4882a593Smuzhiyun 				pte_t entry;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1526*4882a593Smuzhiyun 						PAGE_KERNEL_LARGE);
1527*4882a593Smuzhiyun 				set_pmd(pmd, __pmd(pte_val(entry)));
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 				/* check to see if we have contiguous blocks */
1530*4882a593Smuzhiyun 				if (p_end != p || node_start != node) {
1531*4882a593Smuzhiyun 					if (p_start)
1532*4882a593Smuzhiyun 						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1533*4882a593Smuzhiyun 						       addr_start, addr_end-1, p_start, p_end-1, node_start);
1534*4882a593Smuzhiyun 					addr_start = addr;
1535*4882a593Smuzhiyun 					node_start = node;
1536*4882a593Smuzhiyun 					p_start = p;
1537*4882a593Smuzhiyun 				}
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 				addr_end = addr + PMD_SIZE;
1540*4882a593Smuzhiyun 				p_end = p + PMD_SIZE;
1541*4882a593Smuzhiyun 				continue;
1542*4882a593Smuzhiyun 			} else if (altmap)
1543*4882a593Smuzhiyun 				return -ENOMEM; /* no fallback */
1544*4882a593Smuzhiyun 		} else if (pmd_large(*pmd)) {
1545*4882a593Smuzhiyun 			vmemmap_verify((pte_t *)pmd, node, addr, next);
1546*4882a593Smuzhiyun 			continue;
1547*4882a593Smuzhiyun 		}
1548*4882a593Smuzhiyun 		if (vmemmap_populate_basepages(addr, next, node, NULL))
1549*4882a593Smuzhiyun 			return -ENOMEM;
1550*4882a593Smuzhiyun 	}
1551*4882a593Smuzhiyun 	return 0;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun 
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1554*4882a593Smuzhiyun int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1555*4882a593Smuzhiyun 		struct vmem_altmap *altmap)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun 	int err;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	if (end - start < PAGES_PER_SECTION * sizeof(struct page))
1560*4882a593Smuzhiyun 		err = vmemmap_populate_basepages(start, end, node, NULL);
1561*4882a593Smuzhiyun 	else if (boot_cpu_has(X86_FEATURE_PSE))
1562*4882a593Smuzhiyun 		err = vmemmap_populate_hugepages(start, end, node, altmap);
1563*4882a593Smuzhiyun 	else if (altmap) {
1564*4882a593Smuzhiyun 		pr_err_once("%s: no cpu support for altmap allocations\n",
1565*4882a593Smuzhiyun 				__func__);
1566*4882a593Smuzhiyun 		err = -ENOMEM;
1567*4882a593Smuzhiyun 	} else
1568*4882a593Smuzhiyun 		err = vmemmap_populate_basepages(start, end, node, NULL);
1569*4882a593Smuzhiyun 	if (!err)
1570*4882a593Smuzhiyun 		sync_global_pgds(start, end - 1);
1571*4882a593Smuzhiyun 	return err;
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
register_page_bootmem_memmap(unsigned long section_nr,struct page * start_page,unsigned long nr_pages)1575*4882a593Smuzhiyun void register_page_bootmem_memmap(unsigned long section_nr,
1576*4882a593Smuzhiyun 				  struct page *start_page, unsigned long nr_pages)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)start_page;
1579*4882a593Smuzhiyun 	unsigned long end = (unsigned long)(start_page + nr_pages);
1580*4882a593Smuzhiyun 	unsigned long next;
1581*4882a593Smuzhiyun 	pgd_t *pgd;
1582*4882a593Smuzhiyun 	p4d_t *p4d;
1583*4882a593Smuzhiyun 	pud_t *pud;
1584*4882a593Smuzhiyun 	pmd_t *pmd;
1585*4882a593Smuzhiyun 	unsigned int nr_pmd_pages;
1586*4882a593Smuzhiyun 	struct page *page;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	for (; addr < end; addr = next) {
1589*4882a593Smuzhiyun 		pte_t *pte = NULL;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 		pgd = pgd_offset_k(addr);
1592*4882a593Smuzhiyun 		if (pgd_none(*pgd)) {
1593*4882a593Smuzhiyun 			next = (addr + PAGE_SIZE) & PAGE_MASK;
1594*4882a593Smuzhiyun 			continue;
1595*4882a593Smuzhiyun 		}
1596*4882a593Smuzhiyun 		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, addr);
1599*4882a593Smuzhiyun 		if (p4d_none(*p4d)) {
1600*4882a593Smuzhiyun 			next = (addr + PAGE_SIZE) & PAGE_MASK;
1601*4882a593Smuzhiyun 			continue;
1602*4882a593Smuzhiyun 		}
1603*4882a593Smuzhiyun 		get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 		pud = pud_offset(p4d, addr);
1606*4882a593Smuzhiyun 		if (pud_none(*pud)) {
1607*4882a593Smuzhiyun 			next = (addr + PAGE_SIZE) & PAGE_MASK;
1608*4882a593Smuzhiyun 			continue;
1609*4882a593Smuzhiyun 		}
1610*4882a593Smuzhiyun 		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 		if (!boot_cpu_has(X86_FEATURE_PSE)) {
1613*4882a593Smuzhiyun 			next = (addr + PAGE_SIZE) & PAGE_MASK;
1614*4882a593Smuzhiyun 			pmd = pmd_offset(pud, addr);
1615*4882a593Smuzhiyun 			if (pmd_none(*pmd))
1616*4882a593Smuzhiyun 				continue;
1617*4882a593Smuzhiyun 			get_page_bootmem(section_nr, pmd_page(*pmd),
1618*4882a593Smuzhiyun 					 MIX_SECTION_INFO);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 			pte = pte_offset_kernel(pmd, addr);
1621*4882a593Smuzhiyun 			if (pte_none(*pte))
1622*4882a593Smuzhiyun 				continue;
1623*4882a593Smuzhiyun 			get_page_bootmem(section_nr, pte_page(*pte),
1624*4882a593Smuzhiyun 					 SECTION_INFO);
1625*4882a593Smuzhiyun 		} else {
1626*4882a593Smuzhiyun 			next = pmd_addr_end(addr, end);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 			pmd = pmd_offset(pud, addr);
1629*4882a593Smuzhiyun 			if (pmd_none(*pmd))
1630*4882a593Smuzhiyun 				continue;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 			nr_pmd_pages = 1 << get_order(PMD_SIZE);
1633*4882a593Smuzhiyun 			page = pmd_page(*pmd);
1634*4882a593Smuzhiyun 			while (nr_pmd_pages--)
1635*4882a593Smuzhiyun 				get_page_bootmem(section_nr, page++,
1636*4882a593Smuzhiyun 						 SECTION_INFO);
1637*4882a593Smuzhiyun 		}
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun #endif
1641*4882a593Smuzhiyun 
vmemmap_populate_print_last(void)1642*4882a593Smuzhiyun void __meminit vmemmap_populate_print_last(void)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun 	if (p_start) {
1645*4882a593Smuzhiyun 		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1646*4882a593Smuzhiyun 			addr_start, addr_end-1, p_start, p_end-1, node_start);
1647*4882a593Smuzhiyun 		p_start = NULL;
1648*4882a593Smuzhiyun 		p_end = NULL;
1649*4882a593Smuzhiyun 		node_start = 0;
1650*4882a593Smuzhiyun 	}
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun #endif
1653