1*4882a593Smuzhiyun #include <linux/gfp.h>
2*4882a593Smuzhiyun #include <linux/initrd.h>
3*4882a593Smuzhiyun #include <linux/ioport.h>
4*4882a593Smuzhiyun #include <linux/swap.h>
5*4882a593Smuzhiyun #include <linux/memblock.h>
6*4882a593Smuzhiyun #include <linux/swapfile.h>
7*4882a593Smuzhiyun #include <linux/swapops.h>
8*4882a593Smuzhiyun #include <linux/kmemleak.h>
9*4882a593Smuzhiyun #include <linux/sched/task.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/set_memory.h>
12*4882a593Smuzhiyun #include <asm/e820/api.h>
13*4882a593Smuzhiyun #include <asm/init.h>
14*4882a593Smuzhiyun #include <asm/page.h>
15*4882a593Smuzhiyun #include <asm/page_types.h>
16*4882a593Smuzhiyun #include <asm/sections.h>
17*4882a593Smuzhiyun #include <asm/setup.h>
18*4882a593Smuzhiyun #include <asm/tlbflush.h>
19*4882a593Smuzhiyun #include <asm/tlb.h>
20*4882a593Smuzhiyun #include <asm/proto.h>
21*4882a593Smuzhiyun #include <asm/dma.h> /* for MAX_DMA_PFN */
22*4882a593Smuzhiyun #include <asm/microcode.h>
23*4882a593Smuzhiyun #include <asm/kaslr.h>
24*4882a593Smuzhiyun #include <asm/hypervisor.h>
25*4882a593Smuzhiyun #include <asm/cpufeature.h>
26*4882a593Smuzhiyun #include <asm/pti.h>
27*4882a593Smuzhiyun #include <asm/text-patching.h>
28*4882a593Smuzhiyun #include <asm/memtype.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * We need to define the tracepoints somewhere, and tlb.c
32*4882a593Smuzhiyun * is only compied when SMP=y.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
35*4882a593Smuzhiyun #include <trace/events/tlb.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include "mm_internal.h"
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * Tables translating between page_cache_type_t and pte encoding.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * The default values are defined statically as minimal supported mode;
43*4882a593Smuzhiyun * WC and WT fall back to UC-. pat_init() updates these values to support
44*4882a593Smuzhiyun * more cache modes, WC and WT, when it is safe to do so. See pat_init()
45*4882a593Smuzhiyun * for the details. Note, __early_ioremap() used during early boot-time
46*4882a593Smuzhiyun * takes pgprot_t (pte encoding) and does not use these tables.
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * Index into __cachemode2pte_tbl[] is the cachemode.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
51*4882a593Smuzhiyun * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
54*4882a593Smuzhiyun [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
55*4882a593Smuzhiyun [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
56*4882a593Smuzhiyun [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57*4882a593Smuzhiyun [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58*4882a593Smuzhiyun [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59*4882a593Smuzhiyun [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
cachemode2protval(enum page_cache_mode pcm)62*4882a593Smuzhiyun unsigned long cachemode2protval(enum page_cache_mode pcm)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun if (likely(pcm == 0))
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun return __cachemode2pte_tbl[pcm];
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun EXPORT_SYMBOL(cachemode2protval);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static uint8_t __pte2cachemode_tbl[8] = {
71*4882a593Smuzhiyun [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
72*4882a593Smuzhiyun [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
73*4882a593Smuzhiyun [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74*4882a593Smuzhiyun [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75*4882a593Smuzhiyun [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
76*4882a593Smuzhiyun [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
77*4882a593Smuzhiyun [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
78*4882a593Smuzhiyun [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Check that the write-protect PAT entry is set for write-protect.
83*4882a593Smuzhiyun * To do this without making assumptions how PAT has been set up (Xen has
84*4882a593Smuzhiyun * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
85*4882a593Smuzhiyun * mode via the __cachemode2pte_tbl[] into protection bits (those protection
86*4882a593Smuzhiyun * bits will select a cache mode of WP or better), and then translate the
87*4882a593Smuzhiyun * protection bits back into the cache mode using __pte2cm_idx() and the
88*4882a593Smuzhiyun * __pte2cachemode_tbl[] array. This will return the really used cache mode.
89*4882a593Smuzhiyun */
x86_has_pat_wp(void)90*4882a593Smuzhiyun bool x86_has_pat_wp(void)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
pgprot2cachemode(pgprot_t pgprot)97*4882a593Smuzhiyun enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun unsigned long masked;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
102*4882a593Smuzhiyun if (likely(masked == 0))
103*4882a593Smuzhiyun return 0;
104*4882a593Smuzhiyun return __pte2cachemode_tbl[__pte2cm_idx(masked)];
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun static unsigned long __initdata pgt_buf_start;
108*4882a593Smuzhiyun static unsigned long __initdata pgt_buf_end;
109*4882a593Smuzhiyun static unsigned long __initdata pgt_buf_top;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun static unsigned long min_pfn_mapped;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun static bool __initdata can_use_brk_pgt = true;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
117*4882a593Smuzhiyun * CONFIG_ZONE_DMA32.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun static bool disable_dma32 __ro_after_init;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Pages returned are already directly mapped.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * Changing that is likely to break Xen, see commit:
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * for detailed information.
129*4882a593Smuzhiyun */
alloc_low_pages(unsigned int num)130*4882a593Smuzhiyun __ref void *alloc_low_pages(unsigned int num)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun unsigned long pfn;
133*4882a593Smuzhiyun int i;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (after_bootmem) {
136*4882a593Smuzhiyun unsigned int order;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun order = get_order((unsigned long)num << PAGE_SHIFT);
139*4882a593Smuzhiyun return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
143*4882a593Smuzhiyun unsigned long ret = 0;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (min_pfn_mapped < max_pfn_mapped) {
146*4882a593Smuzhiyun ret = memblock_find_in_range(
147*4882a593Smuzhiyun min_pfn_mapped << PAGE_SHIFT,
148*4882a593Smuzhiyun max_pfn_mapped << PAGE_SHIFT,
149*4882a593Smuzhiyun PAGE_SIZE * num , PAGE_SIZE);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun if (ret)
152*4882a593Smuzhiyun memblock_reserve(ret, PAGE_SIZE * num);
153*4882a593Smuzhiyun else if (can_use_brk_pgt)
154*4882a593Smuzhiyun ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (!ret)
157*4882a593Smuzhiyun panic("alloc_low_pages: can not alloc memory");
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun pfn = ret >> PAGE_SHIFT;
160*4882a593Smuzhiyun } else {
161*4882a593Smuzhiyun pfn = pgt_buf_end;
162*4882a593Smuzhiyun pgt_buf_end += num;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun for (i = 0; i < num; i++) {
166*4882a593Smuzhiyun void *adr;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun adr = __va((pfn + i) << PAGE_SHIFT);
169*4882a593Smuzhiyun clear_page(adr);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return __va(pfn << PAGE_SHIFT);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
177*4882a593Smuzhiyun * With KASLR memory randomization, depending on the machine e820 memory
178*4882a593Smuzhiyun * and the PUD alignment. We may need twice more pages when KASLR memory
179*4882a593Smuzhiyun * randomization is enabled.
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun #ifndef CONFIG_RANDOMIZE_MEMORY
182*4882a593Smuzhiyun #define INIT_PGD_PAGE_COUNT 6
183*4882a593Smuzhiyun #else
184*4882a593Smuzhiyun #define INIT_PGD_PAGE_COUNT 12
185*4882a593Smuzhiyun #endif
186*4882a593Smuzhiyun #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
187*4882a593Smuzhiyun RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
early_alloc_pgt_buf(void)188*4882a593Smuzhiyun void __init early_alloc_pgt_buf(void)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun unsigned long tables = INIT_PGT_BUF_SIZE;
191*4882a593Smuzhiyun phys_addr_t base;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun base = __pa(extend_brk(tables, PAGE_SIZE));
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun pgt_buf_start = base >> PAGE_SHIFT;
196*4882a593Smuzhiyun pgt_buf_end = pgt_buf_start;
197*4882a593Smuzhiyun pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun int after_bootmem;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun struct map_range {
205*4882a593Smuzhiyun unsigned long start;
206*4882a593Smuzhiyun unsigned long end;
207*4882a593Smuzhiyun unsigned page_size_mask;
208*4882a593Smuzhiyun };
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun static int page_size_mask;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * Save some of cr4 feature set we're using (e.g. Pentium 4MB
214*4882a593Smuzhiyun * enable and PPro Global page enable), so that any CPU's that boot
215*4882a593Smuzhiyun * up after us can get the correct flags. Invoked on the boot CPU.
216*4882a593Smuzhiyun */
cr4_set_bits_and_update_boot(unsigned long mask)217*4882a593Smuzhiyun static inline void cr4_set_bits_and_update_boot(unsigned long mask)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun mmu_cr4_features |= mask;
220*4882a593Smuzhiyun if (trampoline_cr4_features)
221*4882a593Smuzhiyun *trampoline_cr4_features = mmu_cr4_features;
222*4882a593Smuzhiyun cr4_set_bits(mask);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
probe_page_size_mask(void)225*4882a593Smuzhiyun static void __init probe_page_size_mask(void)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * For pagealloc debugging, identity mapping will use small pages.
229*4882a593Smuzhiyun * This will simplify cpa(), which otherwise needs to support splitting
230*4882a593Smuzhiyun * large pages into small in interrupt context, etc.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
233*4882a593Smuzhiyun page_size_mask |= 1 << PG_LEVEL_2M;
234*4882a593Smuzhiyun else
235*4882a593Smuzhiyun direct_gbpages = 0;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Enable PSE if available */
238*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PSE))
239*4882a593Smuzhiyun cr4_set_bits_and_update_boot(X86_CR4_PSE);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Enable PGE if available */
242*4882a593Smuzhiyun __supported_pte_mask &= ~_PAGE_GLOBAL;
243*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PGE)) {
244*4882a593Smuzhiyun cr4_set_bits_and_update_boot(X86_CR4_PGE);
245*4882a593Smuzhiyun __supported_pte_mask |= _PAGE_GLOBAL;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* By the default is everything supported: */
249*4882a593Smuzhiyun __default_kernel_pte_mask = __supported_pte_mask;
250*4882a593Smuzhiyun /* Except when with PTI where the kernel is mostly non-Global: */
251*4882a593Smuzhiyun if (cpu_feature_enabled(X86_FEATURE_PTI))
252*4882a593Smuzhiyun __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Enable 1 GB linear kernel mappings if available: */
255*4882a593Smuzhiyun if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
256*4882a593Smuzhiyun printk(KERN_INFO "Using GB pages for direct mapping\n");
257*4882a593Smuzhiyun page_size_mask |= 1 << PG_LEVEL_1G;
258*4882a593Smuzhiyun } else {
259*4882a593Smuzhiyun direct_gbpages = 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
setup_pcid(void)263*4882a593Smuzhiyun static void setup_pcid(void)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_X86_64))
266*4882a593Smuzhiyun return;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (!boot_cpu_has(X86_FEATURE_PCID))
269*4882a593Smuzhiyun return;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PGE)) {
272*4882a593Smuzhiyun /*
273*4882a593Smuzhiyun * This can't be cr4_set_bits_and_update_boot() -- the
274*4882a593Smuzhiyun * trampoline code can't handle CR4.PCIDE and it wouldn't
275*4882a593Smuzhiyun * do any good anyway. Despite the name,
276*4882a593Smuzhiyun * cr4_set_bits_and_update_boot() doesn't actually cause
277*4882a593Smuzhiyun * the bits in question to remain set all the way through
278*4882a593Smuzhiyun * the secondary boot asm.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * Instead, we brute-force it and set CR4.PCIDE manually in
281*4882a593Smuzhiyun * start_secondary().
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun cr4_set_bits(X86_CR4_PCIDE);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * INVPCID's single-context modes (2/3) only work if we set
287*4882a593Smuzhiyun * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
288*4882a593Smuzhiyun * on systems that have X86_CR4_PCIDE clear, or that have
289*4882a593Smuzhiyun * no INVPCID support at all.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_INVPCID))
292*4882a593Smuzhiyun setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * flush_tlb_all(), as currently implemented, won't work if
296*4882a593Smuzhiyun * PCID is on but PGE is not. Since that combination
297*4882a593Smuzhiyun * doesn't exist on real hardware, there's no reason to try
298*4882a593Smuzhiyun * to fully support it, but it's polite to avoid corrupting
299*4882a593Smuzhiyun * data if we're on an improperly configured VM.
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun setup_clear_cpu_cap(X86_FEATURE_PCID);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun #ifdef CONFIG_X86_32
306*4882a593Smuzhiyun #define NR_RANGE_MR 3
307*4882a593Smuzhiyun #else /* CONFIG_X86_64 */
308*4882a593Smuzhiyun #define NR_RANGE_MR 5
309*4882a593Smuzhiyun #endif
310*4882a593Smuzhiyun
save_mr(struct map_range * mr,int nr_range,unsigned long start_pfn,unsigned long end_pfn,unsigned long page_size_mask)311*4882a593Smuzhiyun static int __meminit save_mr(struct map_range *mr, int nr_range,
312*4882a593Smuzhiyun unsigned long start_pfn, unsigned long end_pfn,
313*4882a593Smuzhiyun unsigned long page_size_mask)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun if (start_pfn < end_pfn) {
316*4882a593Smuzhiyun if (nr_range >= NR_RANGE_MR)
317*4882a593Smuzhiyun panic("run out of range for init_memory_mapping\n");
318*4882a593Smuzhiyun mr[nr_range].start = start_pfn<<PAGE_SHIFT;
319*4882a593Smuzhiyun mr[nr_range].end = end_pfn<<PAGE_SHIFT;
320*4882a593Smuzhiyun mr[nr_range].page_size_mask = page_size_mask;
321*4882a593Smuzhiyun nr_range++;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun return nr_range;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * adjust the page_size_mask for small range to go with
329*4882a593Smuzhiyun * big page size instead small one if nearby are ram too.
330*4882a593Smuzhiyun */
adjust_range_page_size_mask(struct map_range * mr,int nr_range)331*4882a593Smuzhiyun static void __ref adjust_range_page_size_mask(struct map_range *mr,
332*4882a593Smuzhiyun int nr_range)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun int i;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun for (i = 0; i < nr_range; i++) {
337*4882a593Smuzhiyun if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
338*4882a593Smuzhiyun !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
339*4882a593Smuzhiyun unsigned long start = round_down(mr[i].start, PMD_SIZE);
340*4882a593Smuzhiyun unsigned long end = round_up(mr[i].end, PMD_SIZE);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun #ifdef CONFIG_X86_32
343*4882a593Smuzhiyun if ((end >> PAGE_SHIFT) > max_low_pfn)
344*4882a593Smuzhiyun continue;
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (memblock_is_region_memory(start, end - start))
348*4882a593Smuzhiyun mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
351*4882a593Smuzhiyun !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
352*4882a593Smuzhiyun unsigned long start = round_down(mr[i].start, PUD_SIZE);
353*4882a593Smuzhiyun unsigned long end = round_up(mr[i].end, PUD_SIZE);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (memblock_is_region_memory(start, end - start))
356*4882a593Smuzhiyun mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
page_size_string(struct map_range * mr)361*4882a593Smuzhiyun static const char *page_size_string(struct map_range *mr)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun static const char str_1g[] = "1G";
364*4882a593Smuzhiyun static const char str_2m[] = "2M";
365*4882a593Smuzhiyun static const char str_4m[] = "4M";
366*4882a593Smuzhiyun static const char str_4k[] = "4k";
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (mr->page_size_mask & (1<<PG_LEVEL_1G))
369*4882a593Smuzhiyun return str_1g;
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * 32-bit without PAE has a 4M large page size.
372*4882a593Smuzhiyun * PG_LEVEL_2M is misnamed, but we can at least
373*4882a593Smuzhiyun * print out the right size in the string.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_X86_32) &&
376*4882a593Smuzhiyun !IS_ENABLED(CONFIG_X86_PAE) &&
377*4882a593Smuzhiyun mr->page_size_mask & (1<<PG_LEVEL_2M))
378*4882a593Smuzhiyun return str_4m;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (mr->page_size_mask & (1<<PG_LEVEL_2M))
381*4882a593Smuzhiyun return str_2m;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return str_4k;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
split_mem_range(struct map_range * mr,int nr_range,unsigned long start,unsigned long end)386*4882a593Smuzhiyun static int __meminit split_mem_range(struct map_range *mr, int nr_range,
387*4882a593Smuzhiyun unsigned long start,
388*4882a593Smuzhiyun unsigned long end)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun unsigned long start_pfn, end_pfn, limit_pfn;
391*4882a593Smuzhiyun unsigned long pfn;
392*4882a593Smuzhiyun int i;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun limit_pfn = PFN_DOWN(end);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* head if not big page alignment ? */
397*4882a593Smuzhiyun pfn = start_pfn = PFN_DOWN(start);
398*4882a593Smuzhiyun #ifdef CONFIG_X86_32
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun * Don't use a large page for the first 2/4MB of memory
401*4882a593Smuzhiyun * because there are often fixed size MTRRs in there
402*4882a593Smuzhiyun * and overlapping MTRRs into large pages can cause
403*4882a593Smuzhiyun * slowdowns.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun if (pfn == 0)
406*4882a593Smuzhiyun end_pfn = PFN_DOWN(PMD_SIZE);
407*4882a593Smuzhiyun else
408*4882a593Smuzhiyun end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
409*4882a593Smuzhiyun #else /* CONFIG_X86_64 */
410*4882a593Smuzhiyun end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
411*4882a593Smuzhiyun #endif
412*4882a593Smuzhiyun if (end_pfn > limit_pfn)
413*4882a593Smuzhiyun end_pfn = limit_pfn;
414*4882a593Smuzhiyun if (start_pfn < end_pfn) {
415*4882a593Smuzhiyun nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
416*4882a593Smuzhiyun pfn = end_pfn;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* big page (2M) range */
420*4882a593Smuzhiyun start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
421*4882a593Smuzhiyun #ifdef CONFIG_X86_32
422*4882a593Smuzhiyun end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
423*4882a593Smuzhiyun #else /* CONFIG_X86_64 */
424*4882a593Smuzhiyun end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
425*4882a593Smuzhiyun if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
426*4882a593Smuzhiyun end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
427*4882a593Smuzhiyun #endif
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (start_pfn < end_pfn) {
430*4882a593Smuzhiyun nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
431*4882a593Smuzhiyun page_size_mask & (1<<PG_LEVEL_2M));
432*4882a593Smuzhiyun pfn = end_pfn;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun #ifdef CONFIG_X86_64
436*4882a593Smuzhiyun /* big page (1G) range */
437*4882a593Smuzhiyun start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
438*4882a593Smuzhiyun end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
439*4882a593Smuzhiyun if (start_pfn < end_pfn) {
440*4882a593Smuzhiyun nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
441*4882a593Smuzhiyun page_size_mask &
442*4882a593Smuzhiyun ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
443*4882a593Smuzhiyun pfn = end_pfn;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* tail is not big page (1G) alignment */
447*4882a593Smuzhiyun start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
448*4882a593Smuzhiyun end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
449*4882a593Smuzhiyun if (start_pfn < end_pfn) {
450*4882a593Smuzhiyun nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
451*4882a593Smuzhiyun page_size_mask & (1<<PG_LEVEL_2M));
452*4882a593Smuzhiyun pfn = end_pfn;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* tail is not big page (2M) alignment */
457*4882a593Smuzhiyun start_pfn = pfn;
458*4882a593Smuzhiyun end_pfn = limit_pfn;
459*4882a593Smuzhiyun nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (!after_bootmem)
462*4882a593Smuzhiyun adjust_range_page_size_mask(mr, nr_range);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* try to merge same page size and continuous */
465*4882a593Smuzhiyun for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
466*4882a593Smuzhiyun unsigned long old_start;
467*4882a593Smuzhiyun if (mr[i].end != mr[i+1].start ||
468*4882a593Smuzhiyun mr[i].page_size_mask != mr[i+1].page_size_mask)
469*4882a593Smuzhiyun continue;
470*4882a593Smuzhiyun /* move it */
471*4882a593Smuzhiyun old_start = mr[i].start;
472*4882a593Smuzhiyun memmove(&mr[i], &mr[i+1],
473*4882a593Smuzhiyun (nr_range - 1 - i) * sizeof(struct map_range));
474*4882a593Smuzhiyun mr[i--].start = old_start;
475*4882a593Smuzhiyun nr_range--;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun for (i = 0; i < nr_range; i++)
479*4882a593Smuzhiyun pr_debug(" [mem %#010lx-%#010lx] page %s\n",
480*4882a593Smuzhiyun mr[i].start, mr[i].end - 1,
481*4882a593Smuzhiyun page_size_string(&mr[i]));
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return nr_range;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun struct range pfn_mapped[E820_MAX_ENTRIES];
487*4882a593Smuzhiyun int nr_pfn_mapped;
488*4882a593Smuzhiyun
add_pfn_range_mapped(unsigned long start_pfn,unsigned long end_pfn)489*4882a593Smuzhiyun static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
492*4882a593Smuzhiyun nr_pfn_mapped, start_pfn, end_pfn);
493*4882a593Smuzhiyun nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun max_pfn_mapped = max(max_pfn_mapped, end_pfn);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
498*4882a593Smuzhiyun max_low_pfn_mapped = max(max_low_pfn_mapped,
499*4882a593Smuzhiyun min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
pfn_range_is_mapped(unsigned long start_pfn,unsigned long end_pfn)502*4882a593Smuzhiyun bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun int i;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun for (i = 0; i < nr_pfn_mapped; i++)
507*4882a593Smuzhiyun if ((start_pfn >= pfn_mapped[i].start) &&
508*4882a593Smuzhiyun (end_pfn <= pfn_mapped[i].end))
509*4882a593Smuzhiyun return true;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return false;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun * Setup the direct mapping of the physical memory at PAGE_OFFSET.
516*4882a593Smuzhiyun * This runs before bootmem is initialized and gets pages directly from
517*4882a593Smuzhiyun * the physical memory. To access them they are temporarily mapped.
518*4882a593Smuzhiyun */
init_memory_mapping(unsigned long start,unsigned long end,pgprot_t prot)519*4882a593Smuzhiyun unsigned long __ref init_memory_mapping(unsigned long start,
520*4882a593Smuzhiyun unsigned long end, pgprot_t prot)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct map_range mr[NR_RANGE_MR];
523*4882a593Smuzhiyun unsigned long ret = 0;
524*4882a593Smuzhiyun int nr_range, i;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
527*4882a593Smuzhiyun start, end - 1);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun memset(mr, 0, sizeof(mr));
530*4882a593Smuzhiyun nr_range = split_mem_range(mr, 0, start, end);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun for (i = 0; i < nr_range; i++)
533*4882a593Smuzhiyun ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
534*4882a593Smuzhiyun mr[i].page_size_mask,
535*4882a593Smuzhiyun prot);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun return ret >> PAGE_SHIFT;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * We need to iterate through the E820 memory map and create direct mappings
544*4882a593Smuzhiyun * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
545*4882a593Smuzhiyun * create direct mappings for all pfns from [0 to max_low_pfn) and
546*4882a593Smuzhiyun * [4GB to max_pfn) because of possible memory holes in high addresses
547*4882a593Smuzhiyun * that cannot be marked as UC by fixed/variable range MTRRs.
548*4882a593Smuzhiyun * Depending on the alignment of E820 ranges, this may possibly result
549*4882a593Smuzhiyun * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * init_mem_mapping() calls init_range_memory_mapping() with big range.
552*4882a593Smuzhiyun * That range would have hole in the middle or ends, and only ram parts
553*4882a593Smuzhiyun * will be mapped in init_range_memory_mapping().
554*4882a593Smuzhiyun */
init_range_memory_mapping(unsigned long r_start,unsigned long r_end)555*4882a593Smuzhiyun static unsigned long __init init_range_memory_mapping(
556*4882a593Smuzhiyun unsigned long r_start,
557*4882a593Smuzhiyun unsigned long r_end)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun unsigned long start_pfn, end_pfn;
560*4882a593Smuzhiyun unsigned long mapped_ram_size = 0;
561*4882a593Smuzhiyun int i;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
564*4882a593Smuzhiyun u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
565*4882a593Smuzhiyun u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
566*4882a593Smuzhiyun if (start >= end)
567*4882a593Smuzhiyun continue;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun * if it is overlapping with brk pgt, we need to
571*4882a593Smuzhiyun * alloc pgt buf from memblock instead.
572*4882a593Smuzhiyun */
573*4882a593Smuzhiyun can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
574*4882a593Smuzhiyun min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
575*4882a593Smuzhiyun init_memory_mapping(start, end, PAGE_KERNEL);
576*4882a593Smuzhiyun mapped_ram_size += end - start;
577*4882a593Smuzhiyun can_use_brk_pgt = true;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun return mapped_ram_size;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
get_new_step_size(unsigned long step_size)583*4882a593Smuzhiyun static unsigned long __init get_new_step_size(unsigned long step_size)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun /*
586*4882a593Smuzhiyun * Initial mapped size is PMD_SIZE (2M).
587*4882a593Smuzhiyun * We can not set step_size to be PUD_SIZE (1G) yet.
588*4882a593Smuzhiyun * In worse case, when we cross the 1G boundary, and
589*4882a593Smuzhiyun * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
590*4882a593Smuzhiyun * to map 1G range with PTE. Hence we use one less than the
591*4882a593Smuzhiyun * difference of page table level shifts.
592*4882a593Smuzhiyun *
593*4882a593Smuzhiyun * Don't need to worry about overflow in the top-down case, on 32bit,
594*4882a593Smuzhiyun * when step_size is 0, round_down() returns 0 for start, and that
595*4882a593Smuzhiyun * turns it into 0x100000000ULL.
596*4882a593Smuzhiyun * In the bottom-up case, round_up(x, 0) returns 0 though too, which
597*4882a593Smuzhiyun * needs to be taken into consideration by the code below.
598*4882a593Smuzhiyun */
599*4882a593Smuzhiyun return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /**
603*4882a593Smuzhiyun * memory_map_top_down - Map [map_start, map_end) top down
604*4882a593Smuzhiyun * @map_start: start address of the target memory range
605*4882a593Smuzhiyun * @map_end: end address of the target memory range
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * This function will setup direct mapping for memory range
608*4882a593Smuzhiyun * [map_start, map_end) in top-down. That said, the page tables
609*4882a593Smuzhiyun * will be allocated at the end of the memory, and we map the
610*4882a593Smuzhiyun * memory in top-down.
611*4882a593Smuzhiyun */
memory_map_top_down(unsigned long map_start,unsigned long map_end)612*4882a593Smuzhiyun static void __init memory_map_top_down(unsigned long map_start,
613*4882a593Smuzhiyun unsigned long map_end)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun unsigned long real_end, start, last_start;
616*4882a593Smuzhiyun unsigned long step_size;
617*4882a593Smuzhiyun unsigned long addr;
618*4882a593Smuzhiyun unsigned long mapped_ram_size = 0;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* xen has big range in reserved near end of ram, skip it at first.*/
621*4882a593Smuzhiyun addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
622*4882a593Smuzhiyun real_end = addr + PMD_SIZE;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* step_size need to be small so pgt_buf from BRK could cover it */
625*4882a593Smuzhiyun step_size = PMD_SIZE;
626*4882a593Smuzhiyun max_pfn_mapped = 0; /* will get exact value next */
627*4882a593Smuzhiyun min_pfn_mapped = real_end >> PAGE_SHIFT;
628*4882a593Smuzhiyun last_start = start = real_end;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun * We start from the top (end of memory) and go to the bottom.
632*4882a593Smuzhiyun * The memblock_find_in_range() gets us a block of RAM from the
633*4882a593Smuzhiyun * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
634*4882a593Smuzhiyun * for page table.
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun while (last_start > map_start) {
637*4882a593Smuzhiyun if (last_start > step_size) {
638*4882a593Smuzhiyun start = round_down(last_start - 1, step_size);
639*4882a593Smuzhiyun if (start < map_start)
640*4882a593Smuzhiyun start = map_start;
641*4882a593Smuzhiyun } else
642*4882a593Smuzhiyun start = map_start;
643*4882a593Smuzhiyun mapped_ram_size += init_range_memory_mapping(start,
644*4882a593Smuzhiyun last_start);
645*4882a593Smuzhiyun last_start = start;
646*4882a593Smuzhiyun min_pfn_mapped = last_start >> PAGE_SHIFT;
647*4882a593Smuzhiyun if (mapped_ram_size >= step_size)
648*4882a593Smuzhiyun step_size = get_new_step_size(step_size);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (real_end < map_end)
652*4882a593Smuzhiyun init_range_memory_mapping(real_end, map_end);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /**
656*4882a593Smuzhiyun * memory_map_bottom_up - Map [map_start, map_end) bottom up
657*4882a593Smuzhiyun * @map_start: start address of the target memory range
658*4882a593Smuzhiyun * @map_end: end address of the target memory range
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * This function will setup direct mapping for memory range
661*4882a593Smuzhiyun * [map_start, map_end) in bottom-up. Since we have limited the
662*4882a593Smuzhiyun * bottom-up allocation above the kernel, the page tables will
663*4882a593Smuzhiyun * be allocated just above the kernel and we map the memory
664*4882a593Smuzhiyun * in [map_start, map_end) in bottom-up.
665*4882a593Smuzhiyun */
memory_map_bottom_up(unsigned long map_start,unsigned long map_end)666*4882a593Smuzhiyun static void __init memory_map_bottom_up(unsigned long map_start,
667*4882a593Smuzhiyun unsigned long map_end)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun unsigned long next, start;
670*4882a593Smuzhiyun unsigned long mapped_ram_size = 0;
671*4882a593Smuzhiyun /* step_size need to be small so pgt_buf from BRK could cover it */
672*4882a593Smuzhiyun unsigned long step_size = PMD_SIZE;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun start = map_start;
675*4882a593Smuzhiyun min_pfn_mapped = start >> PAGE_SHIFT;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /*
678*4882a593Smuzhiyun * We start from the bottom (@map_start) and go to the top (@map_end).
679*4882a593Smuzhiyun * The memblock_find_in_range() gets us a block of RAM from the
680*4882a593Smuzhiyun * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
681*4882a593Smuzhiyun * for page table.
682*4882a593Smuzhiyun */
683*4882a593Smuzhiyun while (start < map_end) {
684*4882a593Smuzhiyun if (step_size && map_end - start > step_size) {
685*4882a593Smuzhiyun next = round_up(start + 1, step_size);
686*4882a593Smuzhiyun if (next > map_end)
687*4882a593Smuzhiyun next = map_end;
688*4882a593Smuzhiyun } else {
689*4882a593Smuzhiyun next = map_end;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun mapped_ram_size += init_range_memory_mapping(start, next);
693*4882a593Smuzhiyun start = next;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (mapped_ram_size >= step_size)
696*4882a593Smuzhiyun step_size = get_new_step_size(step_size);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun * The real mode trampoline, which is required for bootstrapping CPUs
702*4882a593Smuzhiyun * occupies only a small area under the low 1MB. See reserve_real_mode()
703*4882a593Smuzhiyun * for details.
704*4882a593Smuzhiyun *
705*4882a593Smuzhiyun * If KASLR is disabled the first PGD entry of the direct mapping is copied
706*4882a593Smuzhiyun * to map the real mode trampoline.
707*4882a593Smuzhiyun *
708*4882a593Smuzhiyun * If KASLR is enabled, copy only the PUD which covers the low 1MB
709*4882a593Smuzhiyun * area. This limits the randomization granularity to 1GB for both 4-level
710*4882a593Smuzhiyun * and 5-level paging.
711*4882a593Smuzhiyun */
init_trampoline(void)712*4882a593Smuzhiyun static void __init init_trampoline(void)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun #ifdef CONFIG_X86_64
715*4882a593Smuzhiyun if (!kaslr_memory_enabled())
716*4882a593Smuzhiyun trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
717*4882a593Smuzhiyun else
718*4882a593Smuzhiyun init_trampoline_kaslr();
719*4882a593Smuzhiyun #endif
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
init_mem_mapping(void)722*4882a593Smuzhiyun void __init init_mem_mapping(void)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun unsigned long end;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun pti_check_boottime_disable();
727*4882a593Smuzhiyun probe_page_size_mask();
728*4882a593Smuzhiyun setup_pcid();
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun #ifdef CONFIG_X86_64
731*4882a593Smuzhiyun end = max_pfn << PAGE_SHIFT;
732*4882a593Smuzhiyun #else
733*4882a593Smuzhiyun end = max_low_pfn << PAGE_SHIFT;
734*4882a593Smuzhiyun #endif
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* the ISA range is always mapped regardless of memory holes */
737*4882a593Smuzhiyun init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* Init the trampoline, possibly with KASLR memory offset */
740*4882a593Smuzhiyun init_trampoline();
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /*
743*4882a593Smuzhiyun * If the allocation is in bottom-up direction, we setup direct mapping
744*4882a593Smuzhiyun * in bottom-up, otherwise we setup direct mapping in top-down.
745*4882a593Smuzhiyun */
746*4882a593Smuzhiyun if (memblock_bottom_up()) {
747*4882a593Smuzhiyun unsigned long kernel_end = __pa_symbol(_end);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /*
750*4882a593Smuzhiyun * we need two separate calls here. This is because we want to
751*4882a593Smuzhiyun * allocate page tables above the kernel. So we first map
752*4882a593Smuzhiyun * [kernel_end, end) to make memory above the kernel be mapped
753*4882a593Smuzhiyun * as soon as possible. And then use page tables allocated above
754*4882a593Smuzhiyun * the kernel to map [ISA_END_ADDRESS, kernel_end).
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun memory_map_bottom_up(kernel_end, end);
757*4882a593Smuzhiyun memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
758*4882a593Smuzhiyun } else {
759*4882a593Smuzhiyun memory_map_top_down(ISA_END_ADDRESS, end);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun #ifdef CONFIG_X86_64
763*4882a593Smuzhiyun if (max_pfn > max_low_pfn) {
764*4882a593Smuzhiyun /* can we preseve max_low_pfn ?*/
765*4882a593Smuzhiyun max_low_pfn = max_pfn;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun #else
768*4882a593Smuzhiyun early_ioremap_page_table_range_init();
769*4882a593Smuzhiyun #endif
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun load_cr3(swapper_pg_dir);
772*4882a593Smuzhiyun __flush_tlb_all();
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun x86_init.hyper.init_mem_mapping();
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /*
780*4882a593Smuzhiyun * Initialize an mm_struct to be used during poking and a pointer to be used
781*4882a593Smuzhiyun * during patching.
782*4882a593Smuzhiyun */
poking_init(void)783*4882a593Smuzhiyun void __init poking_init(void)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun spinlock_t *ptl;
786*4882a593Smuzhiyun pte_t *ptep;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun poking_mm = copy_init_mm();
789*4882a593Smuzhiyun BUG_ON(!poking_mm);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * Randomize the poking address, but make sure that the following page
793*4882a593Smuzhiyun * will be mapped at the same PMD. We need 2 pages, so find space for 3,
794*4882a593Smuzhiyun * and adjust the address if the PMD ends after the first one.
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun poking_addr = TASK_UNMAPPED_BASE;
797*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
798*4882a593Smuzhiyun poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
799*4882a593Smuzhiyun (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
802*4882a593Smuzhiyun poking_addr += PAGE_SIZE;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * We need to trigger the allocation of the page-tables that will be
806*4882a593Smuzhiyun * needed for poking now. Later, poking may be performed in an atomic
807*4882a593Smuzhiyun * section, which might cause allocation to fail.
808*4882a593Smuzhiyun */
809*4882a593Smuzhiyun ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
810*4882a593Smuzhiyun BUG_ON(!ptep);
811*4882a593Smuzhiyun pte_unmap_unlock(ptep, ptl);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun * devmem_is_allowed() checks to see if /dev/mem access to a certain address
816*4882a593Smuzhiyun * is valid. The argument is a physical page number.
817*4882a593Smuzhiyun *
818*4882a593Smuzhiyun * On x86, access has to be given to the first megabyte of RAM because that
819*4882a593Smuzhiyun * area traditionally contains BIOS code and data regions used by X, dosemu,
820*4882a593Smuzhiyun * and similar apps. Since they map the entire memory range, the whole range
821*4882a593Smuzhiyun * must be allowed (for mapping), but any areas that would otherwise be
822*4882a593Smuzhiyun * disallowed are flagged as being "zero filled" instead of rejected.
823*4882a593Smuzhiyun * Access has to be given to non-kernel-ram areas as well, these contain the
824*4882a593Smuzhiyun * PCI mmio resources as well as potential bios/acpi data regions.
825*4882a593Smuzhiyun */
devmem_is_allowed(unsigned long pagenr)826*4882a593Smuzhiyun int devmem_is_allowed(unsigned long pagenr)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
829*4882a593Smuzhiyun IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
830*4882a593Smuzhiyun != REGION_DISJOINT) {
831*4882a593Smuzhiyun /*
832*4882a593Smuzhiyun * For disallowed memory regions in the low 1MB range,
833*4882a593Smuzhiyun * request that the page be shown as all zeros.
834*4882a593Smuzhiyun */
835*4882a593Smuzhiyun if (pagenr < 256)
836*4882a593Smuzhiyun return 2;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun return 0;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun * This must follow RAM test, since System RAM is considered a
843*4882a593Smuzhiyun * restricted resource under CONFIG_STRICT_IOMEM.
844*4882a593Smuzhiyun */
845*4882a593Smuzhiyun if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
846*4882a593Smuzhiyun /* Low 1MB bypasses iomem restrictions. */
847*4882a593Smuzhiyun if (pagenr < 256)
848*4882a593Smuzhiyun return 1;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return 0;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return 1;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
free_init_pages(const char * what,unsigned long begin,unsigned long end)856*4882a593Smuzhiyun void free_init_pages(const char *what, unsigned long begin, unsigned long end)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun unsigned long begin_aligned, end_aligned;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* Make sure boundaries are page aligned */
861*4882a593Smuzhiyun begin_aligned = PAGE_ALIGN(begin);
862*4882a593Smuzhiyun end_aligned = end & PAGE_MASK;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
865*4882a593Smuzhiyun begin = begin_aligned;
866*4882a593Smuzhiyun end = end_aligned;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (begin >= end)
870*4882a593Smuzhiyun return;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun /*
873*4882a593Smuzhiyun * If debugging page accesses then do not free this memory but
874*4882a593Smuzhiyun * mark them not present - any buggy init-section access will
875*4882a593Smuzhiyun * create a kernel page fault:
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun if (debug_pagealloc_enabled()) {
878*4882a593Smuzhiyun pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
879*4882a593Smuzhiyun begin, end - 1);
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * Inform kmemleak about the hole in the memory since the
882*4882a593Smuzhiyun * corresponding pages will be unmapped.
883*4882a593Smuzhiyun */
884*4882a593Smuzhiyun kmemleak_free_part((void *)begin, end - begin);
885*4882a593Smuzhiyun set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
886*4882a593Smuzhiyun } else {
887*4882a593Smuzhiyun /*
888*4882a593Smuzhiyun * We just marked the kernel text read only above, now that
889*4882a593Smuzhiyun * we are going to free part of that, we need to make that
890*4882a593Smuzhiyun * writeable and non-executable first.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
893*4882a593Smuzhiyun set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun free_reserved_area((void *)begin, (void *)end,
896*4882a593Smuzhiyun POISON_FREE_INITMEM, what);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /*
901*4882a593Smuzhiyun * begin/end can be in the direct map or the "high kernel mapping"
902*4882a593Smuzhiyun * used for the kernel image only. free_init_pages() will do the
903*4882a593Smuzhiyun * right thing for either kind of address.
904*4882a593Smuzhiyun */
free_kernel_image_pages(const char * what,void * begin,void * end)905*4882a593Smuzhiyun void free_kernel_image_pages(const char *what, void *begin, void *end)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun unsigned long begin_ul = (unsigned long)begin;
908*4882a593Smuzhiyun unsigned long end_ul = (unsigned long)end;
909*4882a593Smuzhiyun unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun free_init_pages(what, begin_ul, end_ul);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /*
914*4882a593Smuzhiyun * PTI maps some of the kernel into userspace. For performance,
915*4882a593Smuzhiyun * this includes some kernel areas that do not contain secrets.
916*4882a593Smuzhiyun * Those areas might be adjacent to the parts of the kernel image
917*4882a593Smuzhiyun * being freed, which may contain secrets. Remove the "high kernel
918*4882a593Smuzhiyun * image mapping" for these freed areas, ensuring they are not even
919*4882a593Smuzhiyun * potentially vulnerable to Meltdown regardless of the specific
920*4882a593Smuzhiyun * optimizations PTI is currently using.
921*4882a593Smuzhiyun *
922*4882a593Smuzhiyun * The "noalias" prevents unmapping the direct map alias which is
923*4882a593Smuzhiyun * needed to access the freed pages.
924*4882a593Smuzhiyun *
925*4882a593Smuzhiyun * This is only valid for 64bit kernels. 32bit has only one mapping
926*4882a593Smuzhiyun * which can't be treated in this way for obvious reasons.
927*4882a593Smuzhiyun */
928*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
929*4882a593Smuzhiyun set_memory_np_noalias(begin_ul, len_pages);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
free_initmem(void)932*4882a593Smuzhiyun void __ref free_initmem(void)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun e820__reallocate_tables();
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun mem_encrypt_free_decrypted_mem();
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun free_kernel_image_pages("unused kernel image (initmem)",
939*4882a593Smuzhiyun &__init_begin, &__init_end);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)943*4882a593Smuzhiyun void __init free_initrd_mem(unsigned long start, unsigned long end)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun /*
946*4882a593Smuzhiyun * end could be not aligned, and We can not align that,
947*4882a593Smuzhiyun * decompresser could be confused by aligned initrd_end
948*4882a593Smuzhiyun * We already reserve the end partial page before in
949*4882a593Smuzhiyun * - i386_start_kernel()
950*4882a593Smuzhiyun * - x86_64_start_kernel()
951*4882a593Smuzhiyun * - relocate_initrd()
952*4882a593Smuzhiyun * So here We can do PAGE_ALIGN() safely to get partial page to be freed
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun free_init_pages("initrd", start, PAGE_ALIGN(end));
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun #endif
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /*
959*4882a593Smuzhiyun * Calculate the precise size of the DMA zone (first 16 MB of RAM),
960*4882a593Smuzhiyun * and pass it to the MM layer - to help it set zone watermarks more
961*4882a593Smuzhiyun * accurately.
962*4882a593Smuzhiyun *
963*4882a593Smuzhiyun * Done on 64-bit systems only for the time being, although 32-bit systems
964*4882a593Smuzhiyun * might benefit from this as well.
965*4882a593Smuzhiyun */
memblock_find_dma_reserve(void)966*4882a593Smuzhiyun void __init memblock_find_dma_reserve(void)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun #ifdef CONFIG_X86_64
969*4882a593Smuzhiyun u64 nr_pages = 0, nr_free_pages = 0;
970*4882a593Smuzhiyun unsigned long start_pfn, end_pfn;
971*4882a593Smuzhiyun phys_addr_t start_addr, end_addr;
972*4882a593Smuzhiyun int i;
973*4882a593Smuzhiyun u64 u;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /*
976*4882a593Smuzhiyun * Iterate over all memory ranges (free and reserved ones alike),
977*4882a593Smuzhiyun * to calculate the total number of pages in the first 16 MB of RAM:
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun nr_pages = 0;
980*4882a593Smuzhiyun for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
981*4882a593Smuzhiyun start_pfn = min(start_pfn, MAX_DMA_PFN);
982*4882a593Smuzhiyun end_pfn = min(end_pfn, MAX_DMA_PFN);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun nr_pages += end_pfn - start_pfn;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /*
988*4882a593Smuzhiyun * Iterate over free memory ranges to calculate the number of free
989*4882a593Smuzhiyun * pages in the DMA zone, while not counting potential partial
990*4882a593Smuzhiyun * pages at the beginning or the end of the range:
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun nr_free_pages = 0;
993*4882a593Smuzhiyun for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
994*4882a593Smuzhiyun start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
995*4882a593Smuzhiyun end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun if (start_pfn < end_pfn)
998*4882a593Smuzhiyun nr_free_pages += end_pfn - start_pfn;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun set_dma_reserve(nr_pages - nr_free_pages);
1002*4882a593Smuzhiyun #endif
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
zone_sizes_init(void)1005*4882a593Smuzhiyun void __init zone_sizes_init(void)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun unsigned long max_zone_pfns[MAX_NR_ZONES];
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA
1012*4882a593Smuzhiyun max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
1013*4882a593Smuzhiyun #endif
1014*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA32
1015*4882a593Smuzhiyun max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : min(MAX_DMA32_PFN, max_low_pfn);
1016*4882a593Smuzhiyun #endif
1017*4882a593Smuzhiyun max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1018*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
1019*4882a593Smuzhiyun max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
1020*4882a593Smuzhiyun #endif
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun free_area_init(max_zone_pfns);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
early_disable_dma32(char * buf)1025*4882a593Smuzhiyun static int __init early_disable_dma32(char *buf)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun if (!buf)
1028*4882a593Smuzhiyun return -EINVAL;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun if (!strcmp(buf, "on"))
1031*4882a593Smuzhiyun disable_dma32 = true;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun return 0;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun early_param("disable_dma32", early_disable_dma32);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
1038*4882a593Smuzhiyun .loaded_mm = &init_mm,
1039*4882a593Smuzhiyun .next_asid = 1,
1040*4882a593Smuzhiyun .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1041*4882a593Smuzhiyun };
1042*4882a593Smuzhiyun
update_cache_mode_entry(unsigned entry,enum page_cache_mode cache)1043*4882a593Smuzhiyun void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun /* entry 0 MUST be WB (hardwired to speed up translations) */
1046*4882a593Smuzhiyun BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1049*4882a593Smuzhiyun __pte2cachemode_tbl[entry] = cache;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun #ifdef CONFIG_SWAP
max_swapfile_size(void)1053*4882a593Smuzhiyun unsigned long max_swapfile_size(void)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun unsigned long pages;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun pages = generic_max_swapfile_size();
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
1060*4882a593Smuzhiyun /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1061*4882a593Smuzhiyun unsigned long long l1tf_limit = l1tf_pfn_limit();
1062*4882a593Smuzhiyun /*
1063*4882a593Smuzhiyun * We encode swap offsets also with 3 bits below those for pfn
1064*4882a593Smuzhiyun * which makes the usable limit higher.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 2
1067*4882a593Smuzhiyun l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1068*4882a593Smuzhiyun #endif
1069*4882a593Smuzhiyun pages = min_t(unsigned long long, l1tf_limit, pages);
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun return pages;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun #endif
1074