xref: /OK3568_Linux_fs/kernel/arch/x86/mm/pat/set_memory.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2002 Andi Kleen, SuSE Labs.
4*4882a593Smuzhiyun  * Thanks to Ben LaHaise for precious feedback.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/highmem.h>
7*4882a593Smuzhiyun #include <linux/memblock.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/seq_file.h>
12*4882a593Smuzhiyun #include <linux/debugfs.h>
13*4882a593Smuzhiyun #include <linux/pfn.h>
14*4882a593Smuzhiyun #include <linux/percpu.h>
15*4882a593Smuzhiyun #include <linux/gfp.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/vmalloc.h>
18*4882a593Smuzhiyun #include <linux/libnvdimm.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <asm/e820/api.h>
21*4882a593Smuzhiyun #include <asm/processor.h>
22*4882a593Smuzhiyun #include <asm/tlbflush.h>
23*4882a593Smuzhiyun #include <asm/sections.h>
24*4882a593Smuzhiyun #include <asm/setup.h>
25*4882a593Smuzhiyun #include <linux/uaccess.h>
26*4882a593Smuzhiyun #include <asm/pgalloc.h>
27*4882a593Smuzhiyun #include <asm/proto.h>
28*4882a593Smuzhiyun #include <asm/memtype.h>
29*4882a593Smuzhiyun #include <asm/set_memory.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include "../mm_internal.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * The current flushing context - we pass it instead of 5 arguments:
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun struct cpa_data {
37*4882a593Smuzhiyun 	unsigned long	*vaddr;
38*4882a593Smuzhiyun 	pgd_t		*pgd;
39*4882a593Smuzhiyun 	pgprot_t	mask_set;
40*4882a593Smuzhiyun 	pgprot_t	mask_clr;
41*4882a593Smuzhiyun 	unsigned long	numpages;
42*4882a593Smuzhiyun 	unsigned long	curpage;
43*4882a593Smuzhiyun 	unsigned long	pfn;
44*4882a593Smuzhiyun 	unsigned int	flags;
45*4882a593Smuzhiyun 	unsigned int	force_split		: 1,
46*4882a593Smuzhiyun 			force_static_prot	: 1,
47*4882a593Smuzhiyun 			force_flush_all		: 1;
48*4882a593Smuzhiyun 	struct page	**pages;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun enum cpa_warn {
52*4882a593Smuzhiyun 	CPA_CONFLICT,
53*4882a593Smuzhiyun 	CPA_PROTECT,
54*4882a593Smuzhiyun 	CPA_DETECT,
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static const int cpa_warn_level = CPA_PROTECT;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
61*4882a593Smuzhiyun  * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
62*4882a593Smuzhiyun  * entries change the page attribute in parallel to some other cpu
63*4882a593Smuzhiyun  * splitting a large page entry along with changing the attribute.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun static DEFINE_SPINLOCK(cpa_lock);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define CPA_FLUSHTLB 1
68*4882a593Smuzhiyun #define CPA_ARRAY 2
69*4882a593Smuzhiyun #define CPA_PAGES_ARRAY 4
70*4882a593Smuzhiyun #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
71*4882a593Smuzhiyun 
cachemode2pgprot(enum page_cache_mode pcm)72*4882a593Smuzhiyun static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return __pgprot(cachemode2protval(pcm));
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
78*4882a593Smuzhiyun static unsigned long direct_pages_count[PG_LEVEL_NUM];
79*4882a593Smuzhiyun 
update_page_count(int level,unsigned long pages)80*4882a593Smuzhiyun void update_page_count(int level, unsigned long pages)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	/* Protect against CPA */
83*4882a593Smuzhiyun 	spin_lock(&pgd_lock);
84*4882a593Smuzhiyun 	direct_pages_count[level] += pages;
85*4882a593Smuzhiyun 	spin_unlock(&pgd_lock);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
split_page_count(int level)88*4882a593Smuzhiyun static void split_page_count(int level)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	if (direct_pages_count[level] == 0)
91*4882a593Smuzhiyun 		return;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	direct_pages_count[level]--;
94*4882a593Smuzhiyun 	direct_pages_count[level - 1] += PTRS_PER_PTE;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
arch_report_meminfo(struct seq_file * m)97*4882a593Smuzhiyun void arch_report_meminfo(struct seq_file *m)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	seq_printf(m, "DirectMap4k:    %8lu kB\n",
100*4882a593Smuzhiyun 			direct_pages_count[PG_LEVEL_4K] << 2);
101*4882a593Smuzhiyun #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
102*4882a593Smuzhiyun 	seq_printf(m, "DirectMap2M:    %8lu kB\n",
103*4882a593Smuzhiyun 			direct_pages_count[PG_LEVEL_2M] << 11);
104*4882a593Smuzhiyun #else
105*4882a593Smuzhiyun 	seq_printf(m, "DirectMap4M:    %8lu kB\n",
106*4882a593Smuzhiyun 			direct_pages_count[PG_LEVEL_2M] << 12);
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun 	if (direct_gbpages)
109*4882a593Smuzhiyun 		seq_printf(m, "DirectMap1G:    %8lu kB\n",
110*4882a593Smuzhiyun 			direct_pages_count[PG_LEVEL_1G] << 20);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun #else
split_page_count(int level)113*4882a593Smuzhiyun static inline void split_page_count(int level) { }
114*4882a593Smuzhiyun #endif
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #ifdef CONFIG_X86_CPA_STATISTICS
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static unsigned long cpa_1g_checked;
119*4882a593Smuzhiyun static unsigned long cpa_1g_sameprot;
120*4882a593Smuzhiyun static unsigned long cpa_1g_preserved;
121*4882a593Smuzhiyun static unsigned long cpa_2m_checked;
122*4882a593Smuzhiyun static unsigned long cpa_2m_sameprot;
123*4882a593Smuzhiyun static unsigned long cpa_2m_preserved;
124*4882a593Smuzhiyun static unsigned long cpa_4k_install;
125*4882a593Smuzhiyun 
cpa_inc_1g_checked(void)126*4882a593Smuzhiyun static inline void cpa_inc_1g_checked(void)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	cpa_1g_checked++;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
cpa_inc_2m_checked(void)131*4882a593Smuzhiyun static inline void cpa_inc_2m_checked(void)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	cpa_2m_checked++;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
cpa_inc_4k_install(void)136*4882a593Smuzhiyun static inline void cpa_inc_4k_install(void)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	data_race(cpa_4k_install++);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
cpa_inc_lp_sameprot(int level)141*4882a593Smuzhiyun static inline void cpa_inc_lp_sameprot(int level)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	if (level == PG_LEVEL_1G)
144*4882a593Smuzhiyun 		cpa_1g_sameprot++;
145*4882a593Smuzhiyun 	else
146*4882a593Smuzhiyun 		cpa_2m_sameprot++;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
cpa_inc_lp_preserved(int level)149*4882a593Smuzhiyun static inline void cpa_inc_lp_preserved(int level)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	if (level == PG_LEVEL_1G)
152*4882a593Smuzhiyun 		cpa_1g_preserved++;
153*4882a593Smuzhiyun 	else
154*4882a593Smuzhiyun 		cpa_2m_preserved++;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
cpastats_show(struct seq_file * m,void * p)157*4882a593Smuzhiyun static int cpastats_show(struct seq_file *m, void *p)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	seq_printf(m, "1G pages checked:     %16lu\n", cpa_1g_checked);
160*4882a593Smuzhiyun 	seq_printf(m, "1G pages sameprot:    %16lu\n", cpa_1g_sameprot);
161*4882a593Smuzhiyun 	seq_printf(m, "1G pages preserved:   %16lu\n", cpa_1g_preserved);
162*4882a593Smuzhiyun 	seq_printf(m, "2M pages checked:     %16lu\n", cpa_2m_checked);
163*4882a593Smuzhiyun 	seq_printf(m, "2M pages sameprot:    %16lu\n", cpa_2m_sameprot);
164*4882a593Smuzhiyun 	seq_printf(m, "2M pages preserved:   %16lu\n", cpa_2m_preserved);
165*4882a593Smuzhiyun 	seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
166*4882a593Smuzhiyun 	return 0;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
cpastats_open(struct inode * inode,struct file * file)169*4882a593Smuzhiyun static int cpastats_open(struct inode *inode, struct file *file)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	return single_open(file, cpastats_show, NULL);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun static const struct file_operations cpastats_fops = {
175*4882a593Smuzhiyun 	.open		= cpastats_open,
176*4882a593Smuzhiyun 	.read		= seq_read,
177*4882a593Smuzhiyun 	.llseek		= seq_lseek,
178*4882a593Smuzhiyun 	.release	= single_release,
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
cpa_stats_init(void)181*4882a593Smuzhiyun static int __init cpa_stats_init(void)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
184*4882a593Smuzhiyun 			    &cpastats_fops);
185*4882a593Smuzhiyun 	return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun late_initcall(cpa_stats_init);
188*4882a593Smuzhiyun #else
cpa_inc_1g_checked(void)189*4882a593Smuzhiyun static inline void cpa_inc_1g_checked(void) { }
cpa_inc_2m_checked(void)190*4882a593Smuzhiyun static inline void cpa_inc_2m_checked(void) { }
cpa_inc_4k_install(void)191*4882a593Smuzhiyun static inline void cpa_inc_4k_install(void) { }
cpa_inc_lp_sameprot(int level)192*4882a593Smuzhiyun static inline void cpa_inc_lp_sameprot(int level) { }
cpa_inc_lp_preserved(int level)193*4882a593Smuzhiyun static inline void cpa_inc_lp_preserved(int level) { }
194*4882a593Smuzhiyun #endif
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun static inline int
within(unsigned long addr,unsigned long start,unsigned long end)198*4882a593Smuzhiyun within(unsigned long addr, unsigned long start, unsigned long end)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	return addr >= start && addr < end;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun static inline int
within_inclusive(unsigned long addr,unsigned long start,unsigned long end)204*4882a593Smuzhiyun within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	return addr >= start && addr <= end;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun #ifdef CONFIG_X86_64
210*4882a593Smuzhiyun 
highmap_start_pfn(void)211*4882a593Smuzhiyun static inline unsigned long highmap_start_pfn(void)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	return __pa_symbol(_text) >> PAGE_SHIFT;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
highmap_end_pfn(void)216*4882a593Smuzhiyun static inline unsigned long highmap_end_pfn(void)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	/* Do not reference physical address outside the kernel. */
219*4882a593Smuzhiyun 	return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
__cpa_pfn_in_highmap(unsigned long pfn)222*4882a593Smuzhiyun static bool __cpa_pfn_in_highmap(unsigned long pfn)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	/*
225*4882a593Smuzhiyun 	 * Kernel text has an alias mapping at a high address, known
226*4882a593Smuzhiyun 	 * here as "highmap".
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun 	return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #else
232*4882a593Smuzhiyun 
__cpa_pfn_in_highmap(unsigned long pfn)233*4882a593Smuzhiyun static bool __cpa_pfn_in_highmap(unsigned long pfn)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	/* There is no highmap on 32-bit */
236*4882a593Smuzhiyun 	return false;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #endif
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * See set_mce_nospec().
243*4882a593Smuzhiyun  *
244*4882a593Smuzhiyun  * Machine check recovery code needs to change cache mode of poisoned pages to
245*4882a593Smuzhiyun  * UC to avoid speculative access logging another error. But passing the
246*4882a593Smuzhiyun  * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
247*4882a593Smuzhiyun  * speculative access. So we cheat and flip the top bit of the address. This
248*4882a593Smuzhiyun  * works fine for the code that updates the page tables. But at the end of the
249*4882a593Smuzhiyun  * process we need to flush the TLB and cache and the non-canonical address
250*4882a593Smuzhiyun  * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * But in the common case we already have a canonical address. This code
253*4882a593Smuzhiyun  * will fix the top bit if needed and is a no-op otherwise.
254*4882a593Smuzhiyun  */
fix_addr(unsigned long addr)255*4882a593Smuzhiyun static inline unsigned long fix_addr(unsigned long addr)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun #ifdef CONFIG_X86_64
258*4882a593Smuzhiyun 	return (long)(addr << 1) >> 1;
259*4882a593Smuzhiyun #else
260*4882a593Smuzhiyun 	return addr;
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
__cpa_addr(struct cpa_data * cpa,unsigned long idx)264*4882a593Smuzhiyun static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	if (cpa->flags & CPA_PAGES_ARRAY) {
267*4882a593Smuzhiyun 		struct page *page = cpa->pages[idx];
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		if (unlikely(PageHighMem(page)))
270*4882a593Smuzhiyun 			return 0;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		return (unsigned long)page_address(page);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (cpa->flags & CPA_ARRAY)
276*4882a593Smuzhiyun 		return cpa->vaddr[idx];
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return *cpa->vaddr + idx * PAGE_SIZE;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun  * Flushing functions
283*4882a593Smuzhiyun  */
284*4882a593Smuzhiyun 
clflush_cache_range_opt(void * vaddr,unsigned int size)285*4882a593Smuzhiyun static void clflush_cache_range_opt(void *vaddr, unsigned int size)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
288*4882a593Smuzhiyun 	void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
289*4882a593Smuzhiyun 	void *vend = vaddr + size;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (p >= vend)
292*4882a593Smuzhiyun 		return;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	for (; p < vend; p += clflush_size)
295*4882a593Smuzhiyun 		clflushopt(p);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun  * clflush_cache_range - flush a cache range with clflush
300*4882a593Smuzhiyun  * @vaddr:	virtual start address
301*4882a593Smuzhiyun  * @size:	number of bytes to flush
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
304*4882a593Smuzhiyun  * SFENCE to avoid ordering issues.
305*4882a593Smuzhiyun  */
clflush_cache_range(void * vaddr,unsigned int size)306*4882a593Smuzhiyun void clflush_cache_range(void *vaddr, unsigned int size)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	mb();
309*4882a593Smuzhiyun 	clflush_cache_range_opt(vaddr, size);
310*4882a593Smuzhiyun 	mb();
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clflush_cache_range);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PMEM_API
arch_invalidate_pmem(void * addr,size_t size)315*4882a593Smuzhiyun void arch_invalidate_pmem(void *addr, size_t size)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	clflush_cache_range(addr, size);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
320*4882a593Smuzhiyun #endif
321*4882a593Smuzhiyun 
__cpa_flush_all(void * arg)322*4882a593Smuzhiyun static void __cpa_flush_all(void *arg)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	unsigned long cache = (unsigned long)arg;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/*
327*4882a593Smuzhiyun 	 * Flush all to work around Errata in early athlons regarding
328*4882a593Smuzhiyun 	 * large page flushing.
329*4882a593Smuzhiyun 	 */
330*4882a593Smuzhiyun 	__flush_tlb_all();
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (cache && boot_cpu_data.x86 >= 4)
333*4882a593Smuzhiyun 		wbinvd();
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
cpa_flush_all(unsigned long cache)336*4882a593Smuzhiyun static void cpa_flush_all(unsigned long cache)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	on_each_cpu(__cpa_flush_all, (void *) cache, 1);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
__cpa_flush_tlb(void * data)343*4882a593Smuzhiyun static void __cpa_flush_tlb(void *data)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct cpa_data *cpa = data;
346*4882a593Smuzhiyun 	unsigned int i;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	for (i = 0; i < cpa->numpages; i++)
349*4882a593Smuzhiyun 		flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
cpa_flush(struct cpa_data * data,int cache)352*4882a593Smuzhiyun static void cpa_flush(struct cpa_data *data, int cache)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct cpa_data *cpa = data;
355*4882a593Smuzhiyun 	unsigned int i;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
360*4882a593Smuzhiyun 		cpa_flush_all(cache);
361*4882a593Smuzhiyun 		return;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
365*4882a593Smuzhiyun 		flush_tlb_all();
366*4882a593Smuzhiyun 	else
367*4882a593Smuzhiyun 		on_each_cpu(__cpa_flush_tlb, cpa, 1);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (!cache)
370*4882a593Smuzhiyun 		return;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	mb();
373*4882a593Smuzhiyun 	for (i = 0; i < cpa->numpages; i++) {
374*4882a593Smuzhiyun 		unsigned long addr = __cpa_addr(cpa, i);
375*4882a593Smuzhiyun 		unsigned int level;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		pte_t *pte = lookup_address(addr, &level);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		/*
380*4882a593Smuzhiyun 		 * Only flush present addresses:
381*4882a593Smuzhiyun 		 */
382*4882a593Smuzhiyun 		if (pte && (pte_val(*pte) & _PAGE_PRESENT))
383*4882a593Smuzhiyun 			clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 	mb();
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
overlaps(unsigned long r1_start,unsigned long r1_end,unsigned long r2_start,unsigned long r2_end)388*4882a593Smuzhiyun static bool overlaps(unsigned long r1_start, unsigned long r1_end,
389*4882a593Smuzhiyun 		     unsigned long r2_start, unsigned long r2_end)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	return (r1_start <= r2_end && r1_end >= r2_start) ||
392*4882a593Smuzhiyun 		(r2_start <= r1_end && r2_end >= r1_start);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun #ifdef CONFIG_PCI_BIOS
396*4882a593Smuzhiyun /*
397*4882a593Smuzhiyun  * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
398*4882a593Smuzhiyun  * based config access (CONFIG_PCI_GOBIOS) support.
399*4882a593Smuzhiyun  */
400*4882a593Smuzhiyun #define BIOS_PFN	PFN_DOWN(BIOS_BEGIN)
401*4882a593Smuzhiyun #define BIOS_PFN_END	PFN_DOWN(BIOS_END - 1)
402*4882a593Smuzhiyun 
protect_pci_bios(unsigned long spfn,unsigned long epfn)403*4882a593Smuzhiyun static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
406*4882a593Smuzhiyun 		return _PAGE_NX;
407*4882a593Smuzhiyun 	return 0;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun #else
protect_pci_bios(unsigned long spfn,unsigned long epfn)410*4882a593Smuzhiyun static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun #endif
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun  * The .rodata section needs to be read-only. Using the pfn catches all
418*4882a593Smuzhiyun  * aliases.  This also includes __ro_after_init, so do not enforce until
419*4882a593Smuzhiyun  * kernel_set_to_readonly is true.
420*4882a593Smuzhiyun  */
protect_rodata(unsigned long spfn,unsigned long epfn)421*4882a593Smuzhiyun static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/*
426*4882a593Smuzhiyun 	 * Note: __end_rodata is at page aligned and not inclusive, so
427*4882a593Smuzhiyun 	 * subtract 1 to get the last enforced PFN in the rodata area.
428*4882a593Smuzhiyun 	 */
429*4882a593Smuzhiyun 	epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
432*4882a593Smuzhiyun 		return _PAGE_RW;
433*4882a593Smuzhiyun 	return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun  * Protect kernel text against becoming non executable by forbidding
438*4882a593Smuzhiyun  * _PAGE_NX.  This protects only the high kernel mapping (_text -> _etext)
439*4882a593Smuzhiyun  * out of which the kernel actually executes.  Do not protect the low
440*4882a593Smuzhiyun  * mapping.
441*4882a593Smuzhiyun  *
442*4882a593Smuzhiyun  * This does not cover __inittext since that is gone after boot.
443*4882a593Smuzhiyun  */
protect_kernel_text(unsigned long start,unsigned long end)444*4882a593Smuzhiyun static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	unsigned long t_end = (unsigned long)_etext - 1;
447*4882a593Smuzhiyun 	unsigned long t_start = (unsigned long)_text;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if (overlaps(start, end, t_start, t_end))
450*4882a593Smuzhiyun 		return _PAGE_NX;
451*4882a593Smuzhiyun 	return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun #if defined(CONFIG_X86_64)
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun  * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
457*4882a593Smuzhiyun  * kernel text mappings for the large page aligned text, rodata sections
458*4882a593Smuzhiyun  * will be always read-only. For the kernel identity mappings covering the
459*4882a593Smuzhiyun  * holes caused by this alignment can be anything that user asks.
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * This will preserve the large page mappings for kernel text/data at no
462*4882a593Smuzhiyun  * extra cost.
463*4882a593Smuzhiyun  */
protect_kernel_text_ro(unsigned long start,unsigned long end)464*4882a593Smuzhiyun static pgprotval_t protect_kernel_text_ro(unsigned long start,
465*4882a593Smuzhiyun 					  unsigned long end)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
468*4882a593Smuzhiyun 	unsigned long t_start = (unsigned long)_text;
469*4882a593Smuzhiyun 	unsigned int level;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
472*4882a593Smuzhiyun 		return 0;
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * Don't enforce the !RW mapping for the kernel text mapping, if
475*4882a593Smuzhiyun 	 * the current mapping is already using small page mapping.  No
476*4882a593Smuzhiyun 	 * need to work hard to preserve large page mappings in this case.
477*4882a593Smuzhiyun 	 *
478*4882a593Smuzhiyun 	 * This also fixes the Linux Xen paravirt guest boot failure caused
479*4882a593Smuzhiyun 	 * by unexpected read-only mappings for kernel identity
480*4882a593Smuzhiyun 	 * mappings. In this paravirt guest case, the kernel text mapping
481*4882a593Smuzhiyun 	 * and the kernel identity mapping share the same page-table pages,
482*4882a593Smuzhiyun 	 * so the protections for kernel text and identity mappings have to
483*4882a593Smuzhiyun 	 * be the same.
484*4882a593Smuzhiyun 	 */
485*4882a593Smuzhiyun 	if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
486*4882a593Smuzhiyun 		return _PAGE_RW;
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun #else
protect_kernel_text_ro(unsigned long start,unsigned long end)490*4882a593Smuzhiyun static pgprotval_t protect_kernel_text_ro(unsigned long start,
491*4882a593Smuzhiyun 					  unsigned long end)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun #endif
496*4882a593Smuzhiyun 
conflicts(pgprot_t prot,pgprotval_t val)497*4882a593Smuzhiyun static inline bool conflicts(pgprot_t prot, pgprotval_t val)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	return (pgprot_val(prot) & ~val) != pgprot_val(prot);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
check_conflict(int warnlvl,pgprot_t prot,pgprotval_t val,unsigned long start,unsigned long end,unsigned long pfn,const char * txt)502*4882a593Smuzhiyun static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
503*4882a593Smuzhiyun 				  unsigned long start, unsigned long end,
504*4882a593Smuzhiyun 				  unsigned long pfn, const char *txt)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	static const char *lvltxt[] = {
507*4882a593Smuzhiyun 		[CPA_CONFLICT]	= "conflict",
508*4882a593Smuzhiyun 		[CPA_PROTECT]	= "protect",
509*4882a593Smuzhiyun 		[CPA_DETECT]	= "detect",
510*4882a593Smuzhiyun 	};
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	if (warnlvl > cpa_warn_level || !conflicts(prot, val))
513*4882a593Smuzhiyun 		return;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
516*4882a593Smuzhiyun 		lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
517*4882a593Smuzhiyun 		(unsigned long long)val);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /*
521*4882a593Smuzhiyun  * Certain areas of memory on x86 require very specific protection flags,
522*4882a593Smuzhiyun  * for example the BIOS area or kernel text. Callers don't always get this
523*4882a593Smuzhiyun  * right (again, ioremap() on BIOS memory is not uncommon) so this function
524*4882a593Smuzhiyun  * checks and fixes these known static required protection bits.
525*4882a593Smuzhiyun  */
static_protections(pgprot_t prot,unsigned long start,unsigned long pfn,unsigned long npg,unsigned long lpsize,int warnlvl)526*4882a593Smuzhiyun static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
527*4882a593Smuzhiyun 					  unsigned long pfn, unsigned long npg,
528*4882a593Smuzhiyun 					  unsigned long lpsize, int warnlvl)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	pgprotval_t forbidden, res;
531*4882a593Smuzhiyun 	unsigned long end;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/*
534*4882a593Smuzhiyun 	 * There is no point in checking RW/NX conflicts when the requested
535*4882a593Smuzhiyun 	 * mapping is setting the page !PRESENT.
536*4882a593Smuzhiyun 	 */
537*4882a593Smuzhiyun 	if (!(pgprot_val(prot) & _PAGE_PRESENT))
538*4882a593Smuzhiyun 		return prot;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/* Operate on the virtual address */
541*4882a593Smuzhiyun 	end = start + npg * PAGE_SIZE - 1;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	res = protect_kernel_text(start, end);
544*4882a593Smuzhiyun 	check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
545*4882a593Smuzhiyun 	forbidden = res;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/*
548*4882a593Smuzhiyun 	 * Special case to preserve a large page. If the change spawns the
549*4882a593Smuzhiyun 	 * full large page mapping then there is no point to split it
550*4882a593Smuzhiyun 	 * up. Happens with ftrace and is going to be removed once ftrace
551*4882a593Smuzhiyun 	 * switched to text_poke().
552*4882a593Smuzhiyun 	 */
553*4882a593Smuzhiyun 	if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
554*4882a593Smuzhiyun 		res = protect_kernel_text_ro(start, end);
555*4882a593Smuzhiyun 		check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
556*4882a593Smuzhiyun 		forbidden |= res;
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* Check the PFN directly */
560*4882a593Smuzhiyun 	res = protect_pci_bios(pfn, pfn + npg - 1);
561*4882a593Smuzhiyun 	check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
562*4882a593Smuzhiyun 	forbidden |= res;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	res = protect_rodata(pfn, pfn + npg - 1);
565*4882a593Smuzhiyun 	check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
566*4882a593Smuzhiyun 	forbidden |= res;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	return __pgprot(pgprot_val(prot) & ~forbidden);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun  * Lookup the page table entry for a virtual address in a specific pgd.
573*4882a593Smuzhiyun  * Return a pointer to the entry and the level of the mapping.
574*4882a593Smuzhiyun  */
lookup_address_in_pgd(pgd_t * pgd,unsigned long address,unsigned int * level)575*4882a593Smuzhiyun pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
576*4882a593Smuzhiyun 			     unsigned int *level)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	p4d_t *p4d;
579*4882a593Smuzhiyun 	pud_t *pud;
580*4882a593Smuzhiyun 	pmd_t *pmd;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	*level = PG_LEVEL_NONE;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (pgd_none(*pgd))
585*4882a593Smuzhiyun 		return NULL;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, address);
588*4882a593Smuzhiyun 	if (p4d_none(*p4d))
589*4882a593Smuzhiyun 		return NULL;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	*level = PG_LEVEL_512G;
592*4882a593Smuzhiyun 	if (p4d_large(*p4d) || !p4d_present(*p4d))
593*4882a593Smuzhiyun 		return (pte_t *)p4d;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	pud = pud_offset(p4d, address);
596*4882a593Smuzhiyun 	if (pud_none(*pud))
597*4882a593Smuzhiyun 		return NULL;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	*level = PG_LEVEL_1G;
600*4882a593Smuzhiyun 	if (pud_large(*pud) || !pud_present(*pud))
601*4882a593Smuzhiyun 		return (pte_t *)pud;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	pmd = pmd_offset(pud, address);
604*4882a593Smuzhiyun 	if (pmd_none(*pmd))
605*4882a593Smuzhiyun 		return NULL;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	*level = PG_LEVEL_2M;
608*4882a593Smuzhiyun 	if (pmd_large(*pmd) || !pmd_present(*pmd))
609*4882a593Smuzhiyun 		return (pte_t *)pmd;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	*level = PG_LEVEL_4K;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	return pte_offset_kernel(pmd, address);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun /*
617*4882a593Smuzhiyun  * Lookup the page table entry for a virtual address. Return a pointer
618*4882a593Smuzhiyun  * to the entry and the level of the mapping.
619*4882a593Smuzhiyun  *
620*4882a593Smuzhiyun  * Note: We return pud and pmd either when the entry is marked large
621*4882a593Smuzhiyun  * or when the present bit is not set. Otherwise we would return a
622*4882a593Smuzhiyun  * pointer to a nonexisting mapping.
623*4882a593Smuzhiyun  */
lookup_address(unsigned long address,unsigned int * level)624*4882a593Smuzhiyun pte_t *lookup_address(unsigned long address, unsigned int *level)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	return lookup_address_in_pgd(pgd_offset_k(address), address, level);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lookup_address);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun  * Lookup the page table entry for a virtual address in a given mm. Return a
632*4882a593Smuzhiyun  * pointer to the entry and the level of the mapping.
633*4882a593Smuzhiyun  */
lookup_address_in_mm(struct mm_struct * mm,unsigned long address,unsigned int * level)634*4882a593Smuzhiyun pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
635*4882a593Smuzhiyun 			    unsigned int *level)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lookup_address_in_mm);
640*4882a593Smuzhiyun 
_lookup_address_cpa(struct cpa_data * cpa,unsigned long address,unsigned int * level)641*4882a593Smuzhiyun static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
642*4882a593Smuzhiyun 				  unsigned int *level)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	if (cpa->pgd)
645*4882a593Smuzhiyun 		return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
646*4882a593Smuzhiyun 					       address, level);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	return lookup_address(address, level);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun  * Lookup the PMD entry for a virtual address. Return a pointer to the entry
653*4882a593Smuzhiyun  * or NULL if not present.
654*4882a593Smuzhiyun  */
lookup_pmd_address(unsigned long address)655*4882a593Smuzhiyun pmd_t *lookup_pmd_address(unsigned long address)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	pgd_t *pgd;
658*4882a593Smuzhiyun 	p4d_t *p4d;
659*4882a593Smuzhiyun 	pud_t *pud;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	pgd = pgd_offset_k(address);
662*4882a593Smuzhiyun 	if (pgd_none(*pgd))
663*4882a593Smuzhiyun 		return NULL;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, address);
666*4882a593Smuzhiyun 	if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
667*4882a593Smuzhiyun 		return NULL;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	pud = pud_offset(p4d, address);
670*4882a593Smuzhiyun 	if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
671*4882a593Smuzhiyun 		return NULL;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	return pmd_offset(pud, address);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /*
677*4882a593Smuzhiyun  * This is necessary because __pa() does not work on some
678*4882a593Smuzhiyun  * kinds of memory, like vmalloc() or the alloc_remap()
679*4882a593Smuzhiyun  * areas on 32-bit NUMA systems.  The percpu areas can
680*4882a593Smuzhiyun  * end up in this kind of memory, for instance.
681*4882a593Smuzhiyun  *
682*4882a593Smuzhiyun  * This could be optimized, but it is only intended to be
683*4882a593Smuzhiyun  * used at inititalization time, and keeping it
684*4882a593Smuzhiyun  * unoptimized should increase the testing coverage for
685*4882a593Smuzhiyun  * the more obscure platforms.
686*4882a593Smuzhiyun  */
slow_virt_to_phys(void * __virt_addr)687*4882a593Smuzhiyun phys_addr_t slow_virt_to_phys(void *__virt_addr)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	unsigned long virt_addr = (unsigned long)__virt_addr;
690*4882a593Smuzhiyun 	phys_addr_t phys_addr;
691*4882a593Smuzhiyun 	unsigned long offset;
692*4882a593Smuzhiyun 	enum pg_level level;
693*4882a593Smuzhiyun 	pte_t *pte;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	pte = lookup_address(virt_addr, &level);
696*4882a593Smuzhiyun 	BUG_ON(!pte);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/*
699*4882a593Smuzhiyun 	 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
700*4882a593Smuzhiyun 	 * before being left-shifted PAGE_SHIFT bits -- this trick is to
701*4882a593Smuzhiyun 	 * make 32-PAE kernel work correctly.
702*4882a593Smuzhiyun 	 */
703*4882a593Smuzhiyun 	switch (level) {
704*4882a593Smuzhiyun 	case PG_LEVEL_1G:
705*4882a593Smuzhiyun 		phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
706*4882a593Smuzhiyun 		offset = virt_addr & ~PUD_PAGE_MASK;
707*4882a593Smuzhiyun 		break;
708*4882a593Smuzhiyun 	case PG_LEVEL_2M:
709*4882a593Smuzhiyun 		phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
710*4882a593Smuzhiyun 		offset = virt_addr & ~PMD_PAGE_MASK;
711*4882a593Smuzhiyun 		break;
712*4882a593Smuzhiyun 	default:
713*4882a593Smuzhiyun 		phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
714*4882a593Smuzhiyun 		offset = virt_addr & ~PAGE_MASK;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	return (phys_addr_t)(phys_addr | offset);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(slow_virt_to_phys);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun  * Set the new pmd in all the pgds we know about:
723*4882a593Smuzhiyun  */
__set_pmd_pte(pte_t * kpte,unsigned long address,pte_t pte)724*4882a593Smuzhiyun static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	/* change init_mm */
727*4882a593Smuzhiyun 	set_pte_atomic(kpte, pte);
728*4882a593Smuzhiyun #ifdef CONFIG_X86_32
729*4882a593Smuzhiyun 	if (!SHARED_KERNEL_PMD) {
730*4882a593Smuzhiyun 		struct page *page;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 		list_for_each_entry(page, &pgd_list, lru) {
733*4882a593Smuzhiyun 			pgd_t *pgd;
734*4882a593Smuzhiyun 			p4d_t *p4d;
735*4882a593Smuzhiyun 			pud_t *pud;
736*4882a593Smuzhiyun 			pmd_t *pmd;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
739*4882a593Smuzhiyun 			p4d = p4d_offset(pgd, address);
740*4882a593Smuzhiyun 			pud = pud_offset(p4d, address);
741*4882a593Smuzhiyun 			pmd = pmd_offset(pud, address);
742*4882a593Smuzhiyun 			set_pte_atomic((pte_t *)pmd, pte);
743*4882a593Smuzhiyun 		}
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun #endif
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
pgprot_clear_protnone_bits(pgprot_t prot)748*4882a593Smuzhiyun static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	/*
751*4882a593Smuzhiyun 	 * _PAGE_GLOBAL means "global page" for present PTEs.
752*4882a593Smuzhiyun 	 * But, it is also used to indicate _PAGE_PROTNONE
753*4882a593Smuzhiyun 	 * for non-present PTEs.
754*4882a593Smuzhiyun 	 *
755*4882a593Smuzhiyun 	 * This ensures that a _PAGE_GLOBAL PTE going from
756*4882a593Smuzhiyun 	 * present to non-present is not confused as
757*4882a593Smuzhiyun 	 * _PAGE_PROTNONE.
758*4882a593Smuzhiyun 	 */
759*4882a593Smuzhiyun 	if (!(pgprot_val(prot) & _PAGE_PRESENT))
760*4882a593Smuzhiyun 		pgprot_val(prot) &= ~_PAGE_GLOBAL;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	return prot;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
__should_split_large_page(pte_t * kpte,unsigned long address,struct cpa_data * cpa)765*4882a593Smuzhiyun static int __should_split_large_page(pte_t *kpte, unsigned long address,
766*4882a593Smuzhiyun 				     struct cpa_data *cpa)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
769*4882a593Smuzhiyun 	pgprot_t old_prot, new_prot, req_prot, chk_prot;
770*4882a593Smuzhiyun 	pte_t new_pte, *tmp;
771*4882a593Smuzhiyun 	enum pg_level level;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/*
774*4882a593Smuzhiyun 	 * Check for races, another CPU might have split this page
775*4882a593Smuzhiyun 	 * up already:
776*4882a593Smuzhiyun 	 */
777*4882a593Smuzhiyun 	tmp = _lookup_address_cpa(cpa, address, &level);
778*4882a593Smuzhiyun 	if (tmp != kpte)
779*4882a593Smuzhiyun 		return 1;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	switch (level) {
782*4882a593Smuzhiyun 	case PG_LEVEL_2M:
783*4882a593Smuzhiyun 		old_prot = pmd_pgprot(*(pmd_t *)kpte);
784*4882a593Smuzhiyun 		old_pfn = pmd_pfn(*(pmd_t *)kpte);
785*4882a593Smuzhiyun 		cpa_inc_2m_checked();
786*4882a593Smuzhiyun 		break;
787*4882a593Smuzhiyun 	case PG_LEVEL_1G:
788*4882a593Smuzhiyun 		old_prot = pud_pgprot(*(pud_t *)kpte);
789*4882a593Smuzhiyun 		old_pfn = pud_pfn(*(pud_t *)kpte);
790*4882a593Smuzhiyun 		cpa_inc_1g_checked();
791*4882a593Smuzhiyun 		break;
792*4882a593Smuzhiyun 	default:
793*4882a593Smuzhiyun 		return -EINVAL;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	psize = page_level_size(level);
797*4882a593Smuzhiyun 	pmask = page_level_mask(level);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	/*
800*4882a593Smuzhiyun 	 * Calculate the number of pages, which fit into this large
801*4882a593Smuzhiyun 	 * page starting at address:
802*4882a593Smuzhiyun 	 */
803*4882a593Smuzhiyun 	lpaddr = (address + psize) & pmask;
804*4882a593Smuzhiyun 	numpages = (lpaddr - address) >> PAGE_SHIFT;
805*4882a593Smuzhiyun 	if (numpages < cpa->numpages)
806*4882a593Smuzhiyun 		cpa->numpages = numpages;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/*
809*4882a593Smuzhiyun 	 * We are safe now. Check whether the new pgprot is the same:
810*4882a593Smuzhiyun 	 * Convert protection attributes to 4k-format, as cpa->mask* are set
811*4882a593Smuzhiyun 	 * up accordingly.
812*4882a593Smuzhiyun 	 */
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
815*4882a593Smuzhiyun 	req_prot = pgprot_large_2_4k(old_prot);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
818*4882a593Smuzhiyun 	pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	/*
821*4882a593Smuzhiyun 	 * req_prot is in format of 4k pages. It must be converted to large
822*4882a593Smuzhiyun 	 * page format: the caching mode includes the PAT bit located at
823*4882a593Smuzhiyun 	 * different bit positions in the two formats.
824*4882a593Smuzhiyun 	 */
825*4882a593Smuzhiyun 	req_prot = pgprot_4k_2_large(req_prot);
826*4882a593Smuzhiyun 	req_prot = pgprot_clear_protnone_bits(req_prot);
827*4882a593Smuzhiyun 	if (pgprot_val(req_prot) & _PAGE_PRESENT)
828*4882a593Smuzhiyun 		pgprot_val(req_prot) |= _PAGE_PSE;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/*
831*4882a593Smuzhiyun 	 * old_pfn points to the large page base pfn. So we need to add the
832*4882a593Smuzhiyun 	 * offset of the virtual address:
833*4882a593Smuzhiyun 	 */
834*4882a593Smuzhiyun 	pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
835*4882a593Smuzhiyun 	cpa->pfn = pfn;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	/*
838*4882a593Smuzhiyun 	 * Calculate the large page base address and the number of 4K pages
839*4882a593Smuzhiyun 	 * in the large page
840*4882a593Smuzhiyun 	 */
841*4882a593Smuzhiyun 	lpaddr = address & pmask;
842*4882a593Smuzhiyun 	numpages = psize >> PAGE_SHIFT;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/*
845*4882a593Smuzhiyun 	 * Sanity check that the existing mapping is correct versus the static
846*4882a593Smuzhiyun 	 * protections. static_protections() guards against !PRESENT, so no
847*4882a593Smuzhiyun 	 * extra conditional required here.
848*4882a593Smuzhiyun 	 */
849*4882a593Smuzhiyun 	chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
850*4882a593Smuzhiyun 				      psize, CPA_CONFLICT);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
853*4882a593Smuzhiyun 		/*
854*4882a593Smuzhiyun 		 * Split the large page and tell the split code to
855*4882a593Smuzhiyun 		 * enforce static protections.
856*4882a593Smuzhiyun 		 */
857*4882a593Smuzhiyun 		cpa->force_static_prot = 1;
858*4882a593Smuzhiyun 		return 1;
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/*
862*4882a593Smuzhiyun 	 * Optimization: If the requested pgprot is the same as the current
863*4882a593Smuzhiyun 	 * pgprot, then the large page can be preserved and no updates are
864*4882a593Smuzhiyun 	 * required independent of alignment and length of the requested
865*4882a593Smuzhiyun 	 * range. The above already established that the current pgprot is
866*4882a593Smuzhiyun 	 * correct, which in consequence makes the requested pgprot correct
867*4882a593Smuzhiyun 	 * as well if it is the same. The static protection scan below will
868*4882a593Smuzhiyun 	 * not come to a different conclusion.
869*4882a593Smuzhiyun 	 */
870*4882a593Smuzhiyun 	if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
871*4882a593Smuzhiyun 		cpa_inc_lp_sameprot(level);
872*4882a593Smuzhiyun 		return 0;
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	/*
876*4882a593Smuzhiyun 	 * If the requested range does not cover the full page, split it up
877*4882a593Smuzhiyun 	 */
878*4882a593Smuzhiyun 	if (address != lpaddr || cpa->numpages != numpages)
879*4882a593Smuzhiyun 		return 1;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	/*
882*4882a593Smuzhiyun 	 * Check whether the requested pgprot is conflicting with a static
883*4882a593Smuzhiyun 	 * protection requirement in the large page.
884*4882a593Smuzhiyun 	 */
885*4882a593Smuzhiyun 	new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
886*4882a593Smuzhiyun 				      psize, CPA_DETECT);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	/*
889*4882a593Smuzhiyun 	 * If there is a conflict, split the large page.
890*4882a593Smuzhiyun 	 *
891*4882a593Smuzhiyun 	 * There used to be a 4k wise evaluation trying really hard to
892*4882a593Smuzhiyun 	 * preserve the large pages, but experimentation has shown, that this
893*4882a593Smuzhiyun 	 * does not help at all. There might be corner cases which would
894*4882a593Smuzhiyun 	 * preserve one large page occasionally, but it's really not worth the
895*4882a593Smuzhiyun 	 * extra code and cycles for the common case.
896*4882a593Smuzhiyun 	 */
897*4882a593Smuzhiyun 	if (pgprot_val(req_prot) != pgprot_val(new_prot))
898*4882a593Smuzhiyun 		return 1;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	/* All checks passed. Update the large page mapping. */
901*4882a593Smuzhiyun 	new_pte = pfn_pte(old_pfn, new_prot);
902*4882a593Smuzhiyun 	__set_pmd_pte(kpte, address, new_pte);
903*4882a593Smuzhiyun 	cpa->flags |= CPA_FLUSHTLB;
904*4882a593Smuzhiyun 	cpa_inc_lp_preserved(level);
905*4882a593Smuzhiyun 	return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
should_split_large_page(pte_t * kpte,unsigned long address,struct cpa_data * cpa)908*4882a593Smuzhiyun static int should_split_large_page(pte_t *kpte, unsigned long address,
909*4882a593Smuzhiyun 				   struct cpa_data *cpa)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	int do_split;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	if (cpa->force_split)
914*4882a593Smuzhiyun 		return 1;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	spin_lock(&pgd_lock);
917*4882a593Smuzhiyun 	do_split = __should_split_large_page(kpte, address, cpa);
918*4882a593Smuzhiyun 	spin_unlock(&pgd_lock);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	return do_split;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
split_set_pte(struct cpa_data * cpa,pte_t * pte,unsigned long pfn,pgprot_t ref_prot,unsigned long address,unsigned long size)923*4882a593Smuzhiyun static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
924*4882a593Smuzhiyun 			  pgprot_t ref_prot, unsigned long address,
925*4882a593Smuzhiyun 			  unsigned long size)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	unsigned int npg = PFN_DOWN(size);
928*4882a593Smuzhiyun 	pgprot_t prot;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	/*
931*4882a593Smuzhiyun 	 * If should_split_large_page() discovered an inconsistent mapping,
932*4882a593Smuzhiyun 	 * remove the invalid protection in the split mapping.
933*4882a593Smuzhiyun 	 */
934*4882a593Smuzhiyun 	if (!cpa->force_static_prot)
935*4882a593Smuzhiyun 		goto set;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	/* Hand in lpsize = 0 to enforce the protection mechanism */
938*4882a593Smuzhiyun 	prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	if (pgprot_val(prot) == pgprot_val(ref_prot))
941*4882a593Smuzhiyun 		goto set;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	/*
944*4882a593Smuzhiyun 	 * If this is splitting a PMD, fix it up. PUD splits cannot be
945*4882a593Smuzhiyun 	 * fixed trivially as that would require to rescan the newly
946*4882a593Smuzhiyun 	 * installed PMD mappings after returning from split_large_page()
947*4882a593Smuzhiyun 	 * so an eventual further split can allocate the necessary PTE
948*4882a593Smuzhiyun 	 * pages. Warn for now and revisit it in case this actually
949*4882a593Smuzhiyun 	 * happens.
950*4882a593Smuzhiyun 	 */
951*4882a593Smuzhiyun 	if (size == PAGE_SIZE)
952*4882a593Smuzhiyun 		ref_prot = prot;
953*4882a593Smuzhiyun 	else
954*4882a593Smuzhiyun 		pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
955*4882a593Smuzhiyun set:
956*4882a593Smuzhiyun 	set_pte(pte, pfn_pte(pfn, ref_prot));
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun static int
__split_large_page(struct cpa_data * cpa,pte_t * kpte,unsigned long address,struct page * base)960*4882a593Smuzhiyun __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
961*4882a593Smuzhiyun 		   struct page *base)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
964*4882a593Smuzhiyun 	pte_t *pbase = (pte_t *)page_address(base);
965*4882a593Smuzhiyun 	unsigned int i, level;
966*4882a593Smuzhiyun 	pgprot_t ref_prot;
967*4882a593Smuzhiyun 	pte_t *tmp;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	spin_lock(&pgd_lock);
970*4882a593Smuzhiyun 	/*
971*4882a593Smuzhiyun 	 * Check for races, another CPU might have split this page
972*4882a593Smuzhiyun 	 * up for us already:
973*4882a593Smuzhiyun 	 */
974*4882a593Smuzhiyun 	tmp = _lookup_address_cpa(cpa, address, &level);
975*4882a593Smuzhiyun 	if (tmp != kpte) {
976*4882a593Smuzhiyun 		spin_unlock(&pgd_lock);
977*4882a593Smuzhiyun 		return 1;
978*4882a593Smuzhiyun 	}
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	paravirt_alloc_pte(&init_mm, page_to_pfn(base));
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	switch (level) {
983*4882a593Smuzhiyun 	case PG_LEVEL_2M:
984*4882a593Smuzhiyun 		ref_prot = pmd_pgprot(*(pmd_t *)kpte);
985*4882a593Smuzhiyun 		/*
986*4882a593Smuzhiyun 		 * Clear PSE (aka _PAGE_PAT) and move
987*4882a593Smuzhiyun 		 * PAT bit to correct position.
988*4882a593Smuzhiyun 		 */
989*4882a593Smuzhiyun 		ref_prot = pgprot_large_2_4k(ref_prot);
990*4882a593Smuzhiyun 		ref_pfn = pmd_pfn(*(pmd_t *)kpte);
991*4882a593Smuzhiyun 		lpaddr = address & PMD_MASK;
992*4882a593Smuzhiyun 		lpinc = PAGE_SIZE;
993*4882a593Smuzhiyun 		break;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	case PG_LEVEL_1G:
996*4882a593Smuzhiyun 		ref_prot = pud_pgprot(*(pud_t *)kpte);
997*4882a593Smuzhiyun 		ref_pfn = pud_pfn(*(pud_t *)kpte);
998*4882a593Smuzhiyun 		pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
999*4882a593Smuzhiyun 		lpaddr = address & PUD_MASK;
1000*4882a593Smuzhiyun 		lpinc = PMD_SIZE;
1001*4882a593Smuzhiyun 		/*
1002*4882a593Smuzhiyun 		 * Clear the PSE flags if the PRESENT flag is not set
1003*4882a593Smuzhiyun 		 * otherwise pmd_present/pmd_huge will return true
1004*4882a593Smuzhiyun 		 * even on a non present pmd.
1005*4882a593Smuzhiyun 		 */
1006*4882a593Smuzhiyun 		if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
1007*4882a593Smuzhiyun 			pgprot_val(ref_prot) &= ~_PAGE_PSE;
1008*4882a593Smuzhiyun 		break;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	default:
1011*4882a593Smuzhiyun 		spin_unlock(&pgd_lock);
1012*4882a593Smuzhiyun 		return 1;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	ref_prot = pgprot_clear_protnone_bits(ref_prot);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	/*
1018*4882a593Smuzhiyun 	 * Get the target pfn from the original entry:
1019*4882a593Smuzhiyun 	 */
1020*4882a593Smuzhiyun 	pfn = ref_pfn;
1021*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
1022*4882a593Smuzhiyun 		split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	if (virt_addr_valid(address)) {
1025*4882a593Smuzhiyun 		unsigned long pfn = PFN_DOWN(__pa(address));
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 		if (pfn_range_is_mapped(pfn, pfn + 1))
1028*4882a593Smuzhiyun 			split_page_count(level);
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	/*
1032*4882a593Smuzhiyun 	 * Install the new, split up pagetable.
1033*4882a593Smuzhiyun 	 *
1034*4882a593Smuzhiyun 	 * We use the standard kernel pagetable protections for the new
1035*4882a593Smuzhiyun 	 * pagetable protections, the actual ptes set above control the
1036*4882a593Smuzhiyun 	 * primary protection behavior:
1037*4882a593Smuzhiyun 	 */
1038*4882a593Smuzhiyun 	__set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	/*
1041*4882a593Smuzhiyun 	 * Do a global flush tlb after splitting the large page
1042*4882a593Smuzhiyun 	 * and before we do the actual change page attribute in the PTE.
1043*4882a593Smuzhiyun 	 *
1044*4882a593Smuzhiyun 	 * Without this, we violate the TLB application note, that says:
1045*4882a593Smuzhiyun 	 * "The TLBs may contain both ordinary and large-page
1046*4882a593Smuzhiyun 	 *  translations for a 4-KByte range of linear addresses. This
1047*4882a593Smuzhiyun 	 *  may occur if software modifies the paging structures so that
1048*4882a593Smuzhiyun 	 *  the page size used for the address range changes. If the two
1049*4882a593Smuzhiyun 	 *  translations differ with respect to page frame or attributes
1050*4882a593Smuzhiyun 	 *  (e.g., permissions), processor behavior is undefined and may
1051*4882a593Smuzhiyun 	 *  be implementation-specific."
1052*4882a593Smuzhiyun 	 *
1053*4882a593Smuzhiyun 	 * We do this global tlb flush inside the cpa_lock, so that we
1054*4882a593Smuzhiyun 	 * don't allow any other cpu, with stale tlb entries change the
1055*4882a593Smuzhiyun 	 * page attribute in parallel, that also falls into the
1056*4882a593Smuzhiyun 	 * just split large page entry.
1057*4882a593Smuzhiyun 	 */
1058*4882a593Smuzhiyun 	flush_tlb_all();
1059*4882a593Smuzhiyun 	spin_unlock(&pgd_lock);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	return 0;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
split_large_page(struct cpa_data * cpa,pte_t * kpte,unsigned long address)1064*4882a593Smuzhiyun static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1065*4882a593Smuzhiyun 			    unsigned long address)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun 	struct page *base;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	if (!debug_pagealloc_enabled())
1070*4882a593Smuzhiyun 		spin_unlock(&cpa_lock);
1071*4882a593Smuzhiyun 	base = alloc_pages(GFP_KERNEL, 0);
1072*4882a593Smuzhiyun 	if (!debug_pagealloc_enabled())
1073*4882a593Smuzhiyun 		spin_lock(&cpa_lock);
1074*4882a593Smuzhiyun 	if (!base)
1075*4882a593Smuzhiyun 		return -ENOMEM;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (__split_large_page(cpa, kpte, address, base))
1078*4882a593Smuzhiyun 		__free_page(base);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	return 0;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun 
try_to_free_pte_page(pte_t * pte)1083*4882a593Smuzhiyun static bool try_to_free_pte_page(pte_t *pte)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun 	int i;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++)
1088*4882a593Smuzhiyun 		if (!pte_none(pte[i]))
1089*4882a593Smuzhiyun 			return false;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	free_page((unsigned long)pte);
1092*4882a593Smuzhiyun 	return true;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
try_to_free_pmd_page(pmd_t * pmd)1095*4882a593Smuzhiyun static bool try_to_free_pmd_page(pmd_t *pmd)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	int i;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PMD; i++)
1100*4882a593Smuzhiyun 		if (!pmd_none(pmd[i]))
1101*4882a593Smuzhiyun 			return false;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	free_page((unsigned long)pmd);
1104*4882a593Smuzhiyun 	return true;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
unmap_pte_range(pmd_t * pmd,unsigned long start,unsigned long end)1107*4882a593Smuzhiyun static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun 	pte_t *pte = pte_offset_kernel(pmd, start);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	while (start < end) {
1112*4882a593Smuzhiyun 		set_pte(pte, __pte(0));
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 		start += PAGE_SIZE;
1115*4882a593Smuzhiyun 		pte++;
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1119*4882a593Smuzhiyun 		pmd_clear(pmd);
1120*4882a593Smuzhiyun 		return true;
1121*4882a593Smuzhiyun 	}
1122*4882a593Smuzhiyun 	return false;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
__unmap_pmd_range(pud_t * pud,pmd_t * pmd,unsigned long start,unsigned long end)1125*4882a593Smuzhiyun static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1126*4882a593Smuzhiyun 			      unsigned long start, unsigned long end)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	if (unmap_pte_range(pmd, start, end))
1129*4882a593Smuzhiyun 		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1130*4882a593Smuzhiyun 			pud_clear(pud);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
unmap_pmd_range(pud_t * pud,unsigned long start,unsigned long end)1133*4882a593Smuzhiyun static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	pmd_t *pmd = pmd_offset(pud, start);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	/*
1138*4882a593Smuzhiyun 	 * Not on a 2MB page boundary?
1139*4882a593Smuzhiyun 	 */
1140*4882a593Smuzhiyun 	if (start & (PMD_SIZE - 1)) {
1141*4882a593Smuzhiyun 		unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1142*4882a593Smuzhiyun 		unsigned long pre_end = min_t(unsigned long, end, next_page);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 		__unmap_pmd_range(pud, pmd, start, pre_end);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 		start = pre_end;
1147*4882a593Smuzhiyun 		pmd++;
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	/*
1151*4882a593Smuzhiyun 	 * Try to unmap in 2M chunks.
1152*4882a593Smuzhiyun 	 */
1153*4882a593Smuzhiyun 	while (end - start >= PMD_SIZE) {
1154*4882a593Smuzhiyun 		if (pmd_large(*pmd))
1155*4882a593Smuzhiyun 			pmd_clear(pmd);
1156*4882a593Smuzhiyun 		else
1157*4882a593Smuzhiyun 			__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		start += PMD_SIZE;
1160*4882a593Smuzhiyun 		pmd++;
1161*4882a593Smuzhiyun 	}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	/*
1164*4882a593Smuzhiyun 	 * 4K leftovers?
1165*4882a593Smuzhiyun 	 */
1166*4882a593Smuzhiyun 	if (start < end)
1167*4882a593Smuzhiyun 		return __unmap_pmd_range(pud, pmd, start, end);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	/*
1170*4882a593Smuzhiyun 	 * Try again to free the PMD page if haven't succeeded above.
1171*4882a593Smuzhiyun 	 */
1172*4882a593Smuzhiyun 	if (!pud_none(*pud))
1173*4882a593Smuzhiyun 		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1174*4882a593Smuzhiyun 			pud_clear(pud);
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
unmap_pud_range(p4d_t * p4d,unsigned long start,unsigned long end)1177*4882a593Smuzhiyun static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun 	pud_t *pud = pud_offset(p4d, start);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	/*
1182*4882a593Smuzhiyun 	 * Not on a GB page boundary?
1183*4882a593Smuzhiyun 	 */
1184*4882a593Smuzhiyun 	if (start & (PUD_SIZE - 1)) {
1185*4882a593Smuzhiyun 		unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1186*4882a593Smuzhiyun 		unsigned long pre_end	= min_t(unsigned long, end, next_page);
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 		unmap_pmd_range(pud, start, pre_end);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		start = pre_end;
1191*4882a593Smuzhiyun 		pud++;
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	/*
1195*4882a593Smuzhiyun 	 * Try to unmap in 1G chunks?
1196*4882a593Smuzhiyun 	 */
1197*4882a593Smuzhiyun 	while (end - start >= PUD_SIZE) {
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 		if (pud_large(*pud))
1200*4882a593Smuzhiyun 			pud_clear(pud);
1201*4882a593Smuzhiyun 		else
1202*4882a593Smuzhiyun 			unmap_pmd_range(pud, start, start + PUD_SIZE);
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 		start += PUD_SIZE;
1205*4882a593Smuzhiyun 		pud++;
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/*
1209*4882a593Smuzhiyun 	 * 2M leftovers?
1210*4882a593Smuzhiyun 	 */
1211*4882a593Smuzhiyun 	if (start < end)
1212*4882a593Smuzhiyun 		unmap_pmd_range(pud, start, end);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	/*
1215*4882a593Smuzhiyun 	 * No need to try to free the PUD page because we'll free it in
1216*4882a593Smuzhiyun 	 * populate_pgd's error path
1217*4882a593Smuzhiyun 	 */
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun 
alloc_pte_page(pmd_t * pmd)1220*4882a593Smuzhiyun static int alloc_pte_page(pmd_t *pmd)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun 	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
1223*4882a593Smuzhiyun 	if (!pte)
1224*4882a593Smuzhiyun 		return -1;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1227*4882a593Smuzhiyun 	return 0;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
alloc_pmd_page(pud_t * pud)1230*4882a593Smuzhiyun static int alloc_pmd_page(pud_t *pud)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
1233*4882a593Smuzhiyun 	if (!pmd)
1234*4882a593Smuzhiyun 		return -1;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1237*4882a593Smuzhiyun 	return 0;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun 
populate_pte(struct cpa_data * cpa,unsigned long start,unsigned long end,unsigned num_pages,pmd_t * pmd,pgprot_t pgprot)1240*4882a593Smuzhiyun static void populate_pte(struct cpa_data *cpa,
1241*4882a593Smuzhiyun 			 unsigned long start, unsigned long end,
1242*4882a593Smuzhiyun 			 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	pte_t *pte;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	pte = pte_offset_kernel(pmd, start);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	pgprot = pgprot_clear_protnone_bits(pgprot);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	while (num_pages-- && start < end) {
1251*4882a593Smuzhiyun 		set_pte(pte, pfn_pte(cpa->pfn, pgprot));
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 		start	 += PAGE_SIZE;
1254*4882a593Smuzhiyun 		cpa->pfn++;
1255*4882a593Smuzhiyun 		pte++;
1256*4882a593Smuzhiyun 	}
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun 
populate_pmd(struct cpa_data * cpa,unsigned long start,unsigned long end,unsigned num_pages,pud_t * pud,pgprot_t pgprot)1259*4882a593Smuzhiyun static long populate_pmd(struct cpa_data *cpa,
1260*4882a593Smuzhiyun 			 unsigned long start, unsigned long end,
1261*4882a593Smuzhiyun 			 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	long cur_pages = 0;
1264*4882a593Smuzhiyun 	pmd_t *pmd;
1265*4882a593Smuzhiyun 	pgprot_t pmd_pgprot;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	/*
1268*4882a593Smuzhiyun 	 * Not on a 2M boundary?
1269*4882a593Smuzhiyun 	 */
1270*4882a593Smuzhiyun 	if (start & (PMD_SIZE - 1)) {
1271*4882a593Smuzhiyun 		unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1272*4882a593Smuzhiyun 		unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 		pre_end   = min_t(unsigned long, pre_end, next_page);
1275*4882a593Smuzhiyun 		cur_pages = (pre_end - start) >> PAGE_SHIFT;
1276*4882a593Smuzhiyun 		cur_pages = min_t(unsigned int, num_pages, cur_pages);
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 		/*
1279*4882a593Smuzhiyun 		 * Need a PTE page?
1280*4882a593Smuzhiyun 		 */
1281*4882a593Smuzhiyun 		pmd = pmd_offset(pud, start);
1282*4882a593Smuzhiyun 		if (pmd_none(*pmd))
1283*4882a593Smuzhiyun 			if (alloc_pte_page(pmd))
1284*4882a593Smuzhiyun 				return -1;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 		populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		start = pre_end;
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	/*
1292*4882a593Smuzhiyun 	 * We mapped them all?
1293*4882a593Smuzhiyun 	 */
1294*4882a593Smuzhiyun 	if (num_pages == cur_pages)
1295*4882a593Smuzhiyun 		return cur_pages;
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	pmd_pgprot = pgprot_4k_2_large(pgprot);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	while (end - start >= PMD_SIZE) {
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 		/*
1302*4882a593Smuzhiyun 		 * We cannot use a 1G page so allocate a PMD page if needed.
1303*4882a593Smuzhiyun 		 */
1304*4882a593Smuzhiyun 		if (pud_none(*pud))
1305*4882a593Smuzhiyun 			if (alloc_pmd_page(pud))
1306*4882a593Smuzhiyun 				return -1;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		pmd = pmd_offset(pud, start);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1311*4882a593Smuzhiyun 					canon_pgprot(pmd_pgprot))));
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 		start	  += PMD_SIZE;
1314*4882a593Smuzhiyun 		cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
1315*4882a593Smuzhiyun 		cur_pages += PMD_SIZE >> PAGE_SHIFT;
1316*4882a593Smuzhiyun 	}
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	/*
1319*4882a593Smuzhiyun 	 * Map trailing 4K pages.
1320*4882a593Smuzhiyun 	 */
1321*4882a593Smuzhiyun 	if (start < end) {
1322*4882a593Smuzhiyun 		pmd = pmd_offset(pud, start);
1323*4882a593Smuzhiyun 		if (pmd_none(*pmd))
1324*4882a593Smuzhiyun 			if (alloc_pte_page(pmd))
1325*4882a593Smuzhiyun 				return -1;
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 		populate_pte(cpa, start, end, num_pages - cur_pages,
1328*4882a593Smuzhiyun 			     pmd, pgprot);
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun 	return num_pages;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
populate_pud(struct cpa_data * cpa,unsigned long start,p4d_t * p4d,pgprot_t pgprot)1333*4882a593Smuzhiyun static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1334*4882a593Smuzhiyun 			pgprot_t pgprot)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun 	pud_t *pud;
1337*4882a593Smuzhiyun 	unsigned long end;
1338*4882a593Smuzhiyun 	long cur_pages = 0;
1339*4882a593Smuzhiyun 	pgprot_t pud_pgprot;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	end = start + (cpa->numpages << PAGE_SHIFT);
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/*
1344*4882a593Smuzhiyun 	 * Not on a Gb page boundary? => map everything up to it with
1345*4882a593Smuzhiyun 	 * smaller pages.
1346*4882a593Smuzhiyun 	 */
1347*4882a593Smuzhiyun 	if (start & (PUD_SIZE - 1)) {
1348*4882a593Smuzhiyun 		unsigned long pre_end;
1349*4882a593Smuzhiyun 		unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 		pre_end   = min_t(unsigned long, end, next_page);
1352*4882a593Smuzhiyun 		cur_pages = (pre_end - start) >> PAGE_SHIFT;
1353*4882a593Smuzhiyun 		cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		pud = pud_offset(p4d, start);
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 		/*
1358*4882a593Smuzhiyun 		 * Need a PMD page?
1359*4882a593Smuzhiyun 		 */
1360*4882a593Smuzhiyun 		if (pud_none(*pud))
1361*4882a593Smuzhiyun 			if (alloc_pmd_page(pud))
1362*4882a593Smuzhiyun 				return -1;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 		cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1365*4882a593Smuzhiyun 					 pud, pgprot);
1366*4882a593Smuzhiyun 		if (cur_pages < 0)
1367*4882a593Smuzhiyun 			return cur_pages;
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 		start = pre_end;
1370*4882a593Smuzhiyun 	}
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	/* We mapped them all? */
1373*4882a593Smuzhiyun 	if (cpa->numpages == cur_pages)
1374*4882a593Smuzhiyun 		return cur_pages;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	pud = pud_offset(p4d, start);
1377*4882a593Smuzhiyun 	pud_pgprot = pgprot_4k_2_large(pgprot);
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	/*
1380*4882a593Smuzhiyun 	 * Map everything starting from the Gb boundary, possibly with 1G pages
1381*4882a593Smuzhiyun 	 */
1382*4882a593Smuzhiyun 	while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1383*4882a593Smuzhiyun 		set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1384*4882a593Smuzhiyun 				   canon_pgprot(pud_pgprot))));
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 		start	  += PUD_SIZE;
1387*4882a593Smuzhiyun 		cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
1388*4882a593Smuzhiyun 		cur_pages += PUD_SIZE >> PAGE_SHIFT;
1389*4882a593Smuzhiyun 		pud++;
1390*4882a593Smuzhiyun 	}
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	/* Map trailing leftover */
1393*4882a593Smuzhiyun 	if (start < end) {
1394*4882a593Smuzhiyun 		long tmp;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 		pud = pud_offset(p4d, start);
1397*4882a593Smuzhiyun 		if (pud_none(*pud))
1398*4882a593Smuzhiyun 			if (alloc_pmd_page(pud))
1399*4882a593Smuzhiyun 				return -1;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 		tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1402*4882a593Smuzhiyun 				   pud, pgprot);
1403*4882a593Smuzhiyun 		if (tmp < 0)
1404*4882a593Smuzhiyun 			return cur_pages;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 		cur_pages += tmp;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 	return cur_pages;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun /*
1412*4882a593Smuzhiyun  * Restrictions for kernel page table do not necessarily apply when mapping in
1413*4882a593Smuzhiyun  * an alternate PGD.
1414*4882a593Smuzhiyun  */
populate_pgd(struct cpa_data * cpa,unsigned long addr)1415*4882a593Smuzhiyun static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun 	pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1418*4882a593Smuzhiyun 	pud_t *pud = NULL;	/* shut up gcc */
1419*4882a593Smuzhiyun 	p4d_t *p4d;
1420*4882a593Smuzhiyun 	pgd_t *pgd_entry;
1421*4882a593Smuzhiyun 	long ret;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	pgd_entry = cpa->pgd + pgd_index(addr);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	if (pgd_none(*pgd_entry)) {
1426*4882a593Smuzhiyun 		p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
1427*4882a593Smuzhiyun 		if (!p4d)
1428*4882a593Smuzhiyun 			return -1;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 		set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1431*4882a593Smuzhiyun 	}
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	/*
1434*4882a593Smuzhiyun 	 * Allocate a PUD page and hand it down for mapping.
1435*4882a593Smuzhiyun 	 */
1436*4882a593Smuzhiyun 	p4d = p4d_offset(pgd_entry, addr);
1437*4882a593Smuzhiyun 	if (p4d_none(*p4d)) {
1438*4882a593Smuzhiyun 		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
1439*4882a593Smuzhiyun 		if (!pud)
1440*4882a593Smuzhiyun 			return -1;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
1443*4882a593Smuzhiyun 	}
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1446*4882a593Smuzhiyun 	pgprot_val(pgprot) |=  pgprot_val(cpa->mask_set);
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	ret = populate_pud(cpa, addr, p4d, pgprot);
1449*4882a593Smuzhiyun 	if (ret < 0) {
1450*4882a593Smuzhiyun 		/*
1451*4882a593Smuzhiyun 		 * Leave the PUD page in place in case some other CPU or thread
1452*4882a593Smuzhiyun 		 * already found it, but remove any useless entries we just
1453*4882a593Smuzhiyun 		 * added to it.
1454*4882a593Smuzhiyun 		 */
1455*4882a593Smuzhiyun 		unmap_pud_range(p4d, addr,
1456*4882a593Smuzhiyun 				addr + (cpa->numpages << PAGE_SHIFT));
1457*4882a593Smuzhiyun 		return ret;
1458*4882a593Smuzhiyun 	}
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 	cpa->numpages = ret;
1461*4882a593Smuzhiyun 	return 0;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun 
__cpa_process_fault(struct cpa_data * cpa,unsigned long vaddr,int primary)1464*4882a593Smuzhiyun static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1465*4882a593Smuzhiyun 			       int primary)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun 	if (cpa->pgd) {
1468*4882a593Smuzhiyun 		/*
1469*4882a593Smuzhiyun 		 * Right now, we only execute this code path when mapping
1470*4882a593Smuzhiyun 		 * the EFI virtual memory map regions, no other users
1471*4882a593Smuzhiyun 		 * provide a ->pgd value. This may change in the future.
1472*4882a593Smuzhiyun 		 */
1473*4882a593Smuzhiyun 		return populate_pgd(cpa, vaddr);
1474*4882a593Smuzhiyun 	}
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	/*
1477*4882a593Smuzhiyun 	 * Ignore all non primary paths.
1478*4882a593Smuzhiyun 	 */
1479*4882a593Smuzhiyun 	if (!primary) {
1480*4882a593Smuzhiyun 		cpa->numpages = 1;
1481*4882a593Smuzhiyun 		return 0;
1482*4882a593Smuzhiyun 	}
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	/*
1485*4882a593Smuzhiyun 	 * Ignore the NULL PTE for kernel identity mapping, as it is expected
1486*4882a593Smuzhiyun 	 * to have holes.
1487*4882a593Smuzhiyun 	 * Also set numpages to '1' indicating that we processed cpa req for
1488*4882a593Smuzhiyun 	 * one virtual address page and its pfn. TBD: numpages can be set based
1489*4882a593Smuzhiyun 	 * on the initial value and the level returned by lookup_address().
1490*4882a593Smuzhiyun 	 */
1491*4882a593Smuzhiyun 	if (within(vaddr, PAGE_OFFSET,
1492*4882a593Smuzhiyun 		   PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1493*4882a593Smuzhiyun 		cpa->numpages = 1;
1494*4882a593Smuzhiyun 		cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1495*4882a593Smuzhiyun 		return 0;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	} else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1498*4882a593Smuzhiyun 		/* Faults in the highmap are OK, so do not warn: */
1499*4882a593Smuzhiyun 		return -EFAULT;
1500*4882a593Smuzhiyun 	} else {
1501*4882a593Smuzhiyun 		WARN(1, KERN_WARNING "CPA: called for zero pte. "
1502*4882a593Smuzhiyun 			"vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1503*4882a593Smuzhiyun 			*cpa->vaddr);
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 		return -EFAULT;
1506*4882a593Smuzhiyun 	}
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun 
__change_page_attr(struct cpa_data * cpa,int primary)1509*4882a593Smuzhiyun static int __change_page_attr(struct cpa_data *cpa, int primary)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	unsigned long address;
1512*4882a593Smuzhiyun 	int do_split, err;
1513*4882a593Smuzhiyun 	unsigned int level;
1514*4882a593Smuzhiyun 	pte_t *kpte, old_pte;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	address = __cpa_addr(cpa, cpa->curpage);
1517*4882a593Smuzhiyun repeat:
1518*4882a593Smuzhiyun 	kpte = _lookup_address_cpa(cpa, address, &level);
1519*4882a593Smuzhiyun 	if (!kpte)
1520*4882a593Smuzhiyun 		return __cpa_process_fault(cpa, address, primary);
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	old_pte = *kpte;
1523*4882a593Smuzhiyun 	if (pte_none(old_pte))
1524*4882a593Smuzhiyun 		return __cpa_process_fault(cpa, address, primary);
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	if (level == PG_LEVEL_4K) {
1527*4882a593Smuzhiyun 		pte_t new_pte;
1528*4882a593Smuzhiyun 		pgprot_t new_prot = pte_pgprot(old_pte);
1529*4882a593Smuzhiyun 		unsigned long pfn = pte_pfn(old_pte);
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 		pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1532*4882a593Smuzhiyun 		pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 		cpa_inc_4k_install();
1535*4882a593Smuzhiyun 		/* Hand in lpsize = 0 to enforce the protection mechanism */
1536*4882a593Smuzhiyun 		new_prot = static_protections(new_prot, address, pfn, 1, 0,
1537*4882a593Smuzhiyun 					      CPA_PROTECT);
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 		new_prot = pgprot_clear_protnone_bits(new_prot);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 		/*
1542*4882a593Smuzhiyun 		 * We need to keep the pfn from the existing PTE,
1543*4882a593Smuzhiyun 		 * after all we're only going to change it's attributes
1544*4882a593Smuzhiyun 		 * not the memory it points to
1545*4882a593Smuzhiyun 		 */
1546*4882a593Smuzhiyun 		new_pte = pfn_pte(pfn, new_prot);
1547*4882a593Smuzhiyun 		cpa->pfn = pfn;
1548*4882a593Smuzhiyun 		/*
1549*4882a593Smuzhiyun 		 * Do we really change anything ?
1550*4882a593Smuzhiyun 		 */
1551*4882a593Smuzhiyun 		if (pte_val(old_pte) != pte_val(new_pte)) {
1552*4882a593Smuzhiyun 			set_pte_atomic(kpte, new_pte);
1553*4882a593Smuzhiyun 			cpa->flags |= CPA_FLUSHTLB;
1554*4882a593Smuzhiyun 		}
1555*4882a593Smuzhiyun 		cpa->numpages = 1;
1556*4882a593Smuzhiyun 		return 0;
1557*4882a593Smuzhiyun 	}
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	/*
1560*4882a593Smuzhiyun 	 * Check, whether we can keep the large page intact
1561*4882a593Smuzhiyun 	 * and just change the pte:
1562*4882a593Smuzhiyun 	 */
1563*4882a593Smuzhiyun 	do_split = should_split_large_page(kpte, address, cpa);
1564*4882a593Smuzhiyun 	/*
1565*4882a593Smuzhiyun 	 * When the range fits into the existing large page,
1566*4882a593Smuzhiyun 	 * return. cp->numpages and cpa->tlbflush have been updated in
1567*4882a593Smuzhiyun 	 * try_large_page:
1568*4882a593Smuzhiyun 	 */
1569*4882a593Smuzhiyun 	if (do_split <= 0)
1570*4882a593Smuzhiyun 		return do_split;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	/*
1573*4882a593Smuzhiyun 	 * We have to split the large page:
1574*4882a593Smuzhiyun 	 */
1575*4882a593Smuzhiyun 	err = split_large_page(cpa, kpte, address);
1576*4882a593Smuzhiyun 	if (!err)
1577*4882a593Smuzhiyun 		goto repeat;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	return err;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1583*4882a593Smuzhiyun 
cpa_process_alias(struct cpa_data * cpa)1584*4882a593Smuzhiyun static int cpa_process_alias(struct cpa_data *cpa)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	struct cpa_data alias_cpa;
1587*4882a593Smuzhiyun 	unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1588*4882a593Smuzhiyun 	unsigned long vaddr;
1589*4882a593Smuzhiyun 	int ret;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
1592*4882a593Smuzhiyun 		return 0;
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	/*
1595*4882a593Smuzhiyun 	 * No need to redo, when the primary call touched the direct
1596*4882a593Smuzhiyun 	 * mapping already:
1597*4882a593Smuzhiyun 	 */
1598*4882a593Smuzhiyun 	vaddr = __cpa_addr(cpa, cpa->curpage);
1599*4882a593Smuzhiyun 	if (!(within(vaddr, PAGE_OFFSET,
1600*4882a593Smuzhiyun 		    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 		alias_cpa = *cpa;
1603*4882a593Smuzhiyun 		alias_cpa.vaddr = &laddr;
1604*4882a593Smuzhiyun 		alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1605*4882a593Smuzhiyun 		alias_cpa.curpage = 0;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		cpa->force_flush_all = 1;
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 		ret = __change_page_attr_set_clr(&alias_cpa, 0);
1610*4882a593Smuzhiyun 		if (ret)
1611*4882a593Smuzhiyun 			return ret;
1612*4882a593Smuzhiyun 	}
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun #ifdef CONFIG_X86_64
1615*4882a593Smuzhiyun 	/*
1616*4882a593Smuzhiyun 	 * If the primary call didn't touch the high mapping already
1617*4882a593Smuzhiyun 	 * and the physical address is inside the kernel map, we need
1618*4882a593Smuzhiyun 	 * to touch the high mapped kernel as well:
1619*4882a593Smuzhiyun 	 */
1620*4882a593Smuzhiyun 	if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1621*4882a593Smuzhiyun 	    __cpa_pfn_in_highmap(cpa->pfn)) {
1622*4882a593Smuzhiyun 		unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1623*4882a593Smuzhiyun 					       __START_KERNEL_map - phys_base;
1624*4882a593Smuzhiyun 		alias_cpa = *cpa;
1625*4882a593Smuzhiyun 		alias_cpa.vaddr = &temp_cpa_vaddr;
1626*4882a593Smuzhiyun 		alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1627*4882a593Smuzhiyun 		alias_cpa.curpage = 0;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 		cpa->force_flush_all = 1;
1630*4882a593Smuzhiyun 		/*
1631*4882a593Smuzhiyun 		 * The high mapping range is imprecise, so ignore the
1632*4882a593Smuzhiyun 		 * return value.
1633*4882a593Smuzhiyun 		 */
1634*4882a593Smuzhiyun 		__change_page_attr_set_clr(&alias_cpa, 0);
1635*4882a593Smuzhiyun 	}
1636*4882a593Smuzhiyun #endif
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	return 0;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun 
__change_page_attr_set_clr(struct cpa_data * cpa,int checkalias)1641*4882a593Smuzhiyun static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun 	unsigned long numpages = cpa->numpages;
1644*4882a593Smuzhiyun 	unsigned long rempages = numpages;
1645*4882a593Smuzhiyun 	int ret = 0;
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	while (rempages) {
1648*4882a593Smuzhiyun 		/*
1649*4882a593Smuzhiyun 		 * Store the remaining nr of pages for the large page
1650*4882a593Smuzhiyun 		 * preservation check.
1651*4882a593Smuzhiyun 		 */
1652*4882a593Smuzhiyun 		cpa->numpages = rempages;
1653*4882a593Smuzhiyun 		/* for array changes, we can't use large page */
1654*4882a593Smuzhiyun 		if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1655*4882a593Smuzhiyun 			cpa->numpages = 1;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 		if (!debug_pagealloc_enabled())
1658*4882a593Smuzhiyun 			spin_lock(&cpa_lock);
1659*4882a593Smuzhiyun 		ret = __change_page_attr(cpa, checkalias);
1660*4882a593Smuzhiyun 		if (!debug_pagealloc_enabled())
1661*4882a593Smuzhiyun 			spin_unlock(&cpa_lock);
1662*4882a593Smuzhiyun 		if (ret)
1663*4882a593Smuzhiyun 			goto out;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 		if (checkalias) {
1666*4882a593Smuzhiyun 			ret = cpa_process_alias(cpa);
1667*4882a593Smuzhiyun 			if (ret)
1668*4882a593Smuzhiyun 				goto out;
1669*4882a593Smuzhiyun 		}
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 		/*
1672*4882a593Smuzhiyun 		 * Adjust the number of pages with the result of the
1673*4882a593Smuzhiyun 		 * CPA operation. Either a large page has been
1674*4882a593Smuzhiyun 		 * preserved or a single page update happened.
1675*4882a593Smuzhiyun 		 */
1676*4882a593Smuzhiyun 		BUG_ON(cpa->numpages > rempages || !cpa->numpages);
1677*4882a593Smuzhiyun 		rempages -= cpa->numpages;
1678*4882a593Smuzhiyun 		cpa->curpage += cpa->numpages;
1679*4882a593Smuzhiyun 	}
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun out:
1682*4882a593Smuzhiyun 	/* Restore the original numpages */
1683*4882a593Smuzhiyun 	cpa->numpages = numpages;
1684*4882a593Smuzhiyun 	return ret;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun 
change_page_attr_set_clr(unsigned long * addr,int numpages,pgprot_t mask_set,pgprot_t mask_clr,int force_split,int in_flag,struct page ** pages)1687*4882a593Smuzhiyun static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1688*4882a593Smuzhiyun 				    pgprot_t mask_set, pgprot_t mask_clr,
1689*4882a593Smuzhiyun 				    int force_split, int in_flag,
1690*4882a593Smuzhiyun 				    struct page **pages)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun 	struct cpa_data cpa;
1693*4882a593Smuzhiyun 	int ret, cache, checkalias;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	memset(&cpa, 0, sizeof(cpa));
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	/*
1698*4882a593Smuzhiyun 	 * Check, if we are requested to set a not supported
1699*4882a593Smuzhiyun 	 * feature.  Clearing non-supported features is OK.
1700*4882a593Smuzhiyun 	 */
1701*4882a593Smuzhiyun 	mask_set = canon_pgprot(mask_set);
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1704*4882a593Smuzhiyun 		return 0;
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	/* Ensure we are PAGE_SIZE aligned */
1707*4882a593Smuzhiyun 	if (in_flag & CPA_ARRAY) {
1708*4882a593Smuzhiyun 		int i;
1709*4882a593Smuzhiyun 		for (i = 0; i < numpages; i++) {
1710*4882a593Smuzhiyun 			if (addr[i] & ~PAGE_MASK) {
1711*4882a593Smuzhiyun 				addr[i] &= PAGE_MASK;
1712*4882a593Smuzhiyun 				WARN_ON_ONCE(1);
1713*4882a593Smuzhiyun 			}
1714*4882a593Smuzhiyun 		}
1715*4882a593Smuzhiyun 	} else if (!(in_flag & CPA_PAGES_ARRAY)) {
1716*4882a593Smuzhiyun 		/*
1717*4882a593Smuzhiyun 		 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1718*4882a593Smuzhiyun 		 * No need to check in that case
1719*4882a593Smuzhiyun 		 */
1720*4882a593Smuzhiyun 		if (*addr & ~PAGE_MASK) {
1721*4882a593Smuzhiyun 			*addr &= PAGE_MASK;
1722*4882a593Smuzhiyun 			/*
1723*4882a593Smuzhiyun 			 * People should not be passing in unaligned addresses:
1724*4882a593Smuzhiyun 			 */
1725*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
1726*4882a593Smuzhiyun 		}
1727*4882a593Smuzhiyun 	}
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	/* Must avoid aliasing mappings in the highmem code */
1730*4882a593Smuzhiyun 	kmap_flush_unused();
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	vm_unmap_aliases();
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	cpa.vaddr = addr;
1735*4882a593Smuzhiyun 	cpa.pages = pages;
1736*4882a593Smuzhiyun 	cpa.numpages = numpages;
1737*4882a593Smuzhiyun 	cpa.mask_set = mask_set;
1738*4882a593Smuzhiyun 	cpa.mask_clr = mask_clr;
1739*4882a593Smuzhiyun 	cpa.flags = 0;
1740*4882a593Smuzhiyun 	cpa.curpage = 0;
1741*4882a593Smuzhiyun 	cpa.force_split = force_split;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1744*4882a593Smuzhiyun 		cpa.flags |= in_flag;
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	/* No alias checking for _NX bit modifications */
1747*4882a593Smuzhiyun 	checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1748*4882a593Smuzhiyun 	/* Has caller explicitly disabled alias checking? */
1749*4882a593Smuzhiyun 	if (in_flag & CPA_NO_CHECK_ALIAS)
1750*4882a593Smuzhiyun 		checkalias = 0;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	ret = __change_page_attr_set_clr(&cpa, checkalias);
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	/*
1755*4882a593Smuzhiyun 	 * Check whether we really changed something:
1756*4882a593Smuzhiyun 	 */
1757*4882a593Smuzhiyun 	if (!(cpa.flags & CPA_FLUSHTLB))
1758*4882a593Smuzhiyun 		goto out;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	/*
1761*4882a593Smuzhiyun 	 * No need to flush, when we did not set any of the caching
1762*4882a593Smuzhiyun 	 * attributes:
1763*4882a593Smuzhiyun 	 */
1764*4882a593Smuzhiyun 	cache = !!pgprot2cachemode(mask_set);
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	/*
1767*4882a593Smuzhiyun 	 * On error; flush everything to be sure.
1768*4882a593Smuzhiyun 	 */
1769*4882a593Smuzhiyun 	if (ret) {
1770*4882a593Smuzhiyun 		cpa_flush_all(cache);
1771*4882a593Smuzhiyun 		goto out;
1772*4882a593Smuzhiyun 	}
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	cpa_flush(&cpa, cache);
1775*4882a593Smuzhiyun out:
1776*4882a593Smuzhiyun 	return ret;
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun 
change_page_attr_set(unsigned long * addr,int numpages,pgprot_t mask,int array)1779*4882a593Smuzhiyun static inline int change_page_attr_set(unsigned long *addr, int numpages,
1780*4882a593Smuzhiyun 				       pgprot_t mask, int array)
1781*4882a593Smuzhiyun {
1782*4882a593Smuzhiyun 	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1783*4882a593Smuzhiyun 		(array ? CPA_ARRAY : 0), NULL);
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun 
change_page_attr_clear(unsigned long * addr,int numpages,pgprot_t mask,int array)1786*4882a593Smuzhiyun static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1787*4882a593Smuzhiyun 					 pgprot_t mask, int array)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun 	return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1790*4882a593Smuzhiyun 		(array ? CPA_ARRAY : 0), NULL);
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
cpa_set_pages_array(struct page ** pages,int numpages,pgprot_t mask)1793*4882a593Smuzhiyun static inline int cpa_set_pages_array(struct page **pages, int numpages,
1794*4882a593Smuzhiyun 				       pgprot_t mask)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun 	return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1797*4882a593Smuzhiyun 		CPA_PAGES_ARRAY, pages);
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun 
cpa_clear_pages_array(struct page ** pages,int numpages,pgprot_t mask)1800*4882a593Smuzhiyun static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1801*4882a593Smuzhiyun 					 pgprot_t mask)
1802*4882a593Smuzhiyun {
1803*4882a593Smuzhiyun 	return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1804*4882a593Smuzhiyun 		CPA_PAGES_ARRAY, pages);
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun /*
1808*4882a593Smuzhiyun  * _set_memory_prot is an internal helper for callers that have been passed
1809*4882a593Smuzhiyun  * a pgprot_t value from upper layers and a reservation has already been taken.
1810*4882a593Smuzhiyun  * If you want to set the pgprot to a specific page protocol, use the
1811*4882a593Smuzhiyun  * set_memory_xx() functions.
1812*4882a593Smuzhiyun  */
__set_memory_prot(unsigned long addr,int numpages,pgprot_t prot)1813*4882a593Smuzhiyun int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun 	return change_page_attr_set_clr(&addr, numpages, prot,
1816*4882a593Smuzhiyun 					__pgprot(~pgprot_val(prot)), 0, 0,
1817*4882a593Smuzhiyun 					NULL);
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun 
_set_memory_uc(unsigned long addr,int numpages)1820*4882a593Smuzhiyun int _set_memory_uc(unsigned long addr, int numpages)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun 	/*
1823*4882a593Smuzhiyun 	 * for now UC MINUS. see comments in ioremap()
1824*4882a593Smuzhiyun 	 * If you really need strong UC use ioremap_uc(), but note
1825*4882a593Smuzhiyun 	 * that you cannot override IO areas with set_memory_*() as
1826*4882a593Smuzhiyun 	 * these helpers cannot work with IO memory.
1827*4882a593Smuzhiyun 	 */
1828*4882a593Smuzhiyun 	return change_page_attr_set(&addr, numpages,
1829*4882a593Smuzhiyun 				    cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1830*4882a593Smuzhiyun 				    0);
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun 
set_memory_uc(unsigned long addr,int numpages)1833*4882a593Smuzhiyun int set_memory_uc(unsigned long addr, int numpages)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun 	int ret;
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	/*
1838*4882a593Smuzhiyun 	 * for now UC MINUS. see comments in ioremap()
1839*4882a593Smuzhiyun 	 */
1840*4882a593Smuzhiyun 	ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1841*4882a593Smuzhiyun 			      _PAGE_CACHE_MODE_UC_MINUS, NULL);
1842*4882a593Smuzhiyun 	if (ret)
1843*4882a593Smuzhiyun 		goto out_err;
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	ret = _set_memory_uc(addr, numpages);
1846*4882a593Smuzhiyun 	if (ret)
1847*4882a593Smuzhiyun 		goto out_free;
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	return 0;
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun out_free:
1852*4882a593Smuzhiyun 	memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1853*4882a593Smuzhiyun out_err:
1854*4882a593Smuzhiyun 	return ret;
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun EXPORT_SYMBOL(set_memory_uc);
1857*4882a593Smuzhiyun 
_set_memory_wc(unsigned long addr,int numpages)1858*4882a593Smuzhiyun int _set_memory_wc(unsigned long addr, int numpages)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun 	int ret;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	ret = change_page_attr_set(&addr, numpages,
1863*4882a593Smuzhiyun 				   cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1864*4882a593Smuzhiyun 				   0);
1865*4882a593Smuzhiyun 	if (!ret) {
1866*4882a593Smuzhiyun 		ret = change_page_attr_set_clr(&addr, numpages,
1867*4882a593Smuzhiyun 					       cachemode2pgprot(_PAGE_CACHE_MODE_WC),
1868*4882a593Smuzhiyun 					       __pgprot(_PAGE_CACHE_MASK),
1869*4882a593Smuzhiyun 					       0, 0, NULL);
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun 	return ret;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun 
set_memory_wc(unsigned long addr,int numpages)1874*4882a593Smuzhiyun int set_memory_wc(unsigned long addr, int numpages)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun 	int ret;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1879*4882a593Smuzhiyun 		_PAGE_CACHE_MODE_WC, NULL);
1880*4882a593Smuzhiyun 	if (ret)
1881*4882a593Smuzhiyun 		return ret;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	ret = _set_memory_wc(addr, numpages);
1884*4882a593Smuzhiyun 	if (ret)
1885*4882a593Smuzhiyun 		memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	return ret;
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun EXPORT_SYMBOL(set_memory_wc);
1890*4882a593Smuzhiyun 
_set_memory_wt(unsigned long addr,int numpages)1891*4882a593Smuzhiyun int _set_memory_wt(unsigned long addr, int numpages)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun 	return change_page_attr_set(&addr, numpages,
1894*4882a593Smuzhiyun 				    cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun 
_set_memory_wb(unsigned long addr,int numpages)1897*4882a593Smuzhiyun int _set_memory_wb(unsigned long addr, int numpages)
1898*4882a593Smuzhiyun {
1899*4882a593Smuzhiyun 	/* WB cache mode is hard wired to all cache attribute bits being 0 */
1900*4882a593Smuzhiyun 	return change_page_attr_clear(&addr, numpages,
1901*4882a593Smuzhiyun 				      __pgprot(_PAGE_CACHE_MASK), 0);
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun 
set_memory_wb(unsigned long addr,int numpages)1904*4882a593Smuzhiyun int set_memory_wb(unsigned long addr, int numpages)
1905*4882a593Smuzhiyun {
1906*4882a593Smuzhiyun 	int ret;
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	ret = _set_memory_wb(addr, numpages);
1909*4882a593Smuzhiyun 	if (ret)
1910*4882a593Smuzhiyun 		return ret;
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1913*4882a593Smuzhiyun 	return 0;
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun EXPORT_SYMBOL(set_memory_wb);
1916*4882a593Smuzhiyun 
set_memory_x(unsigned long addr,int numpages)1917*4882a593Smuzhiyun int set_memory_x(unsigned long addr, int numpages)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun 	if (!(__supported_pte_mask & _PAGE_NX))
1920*4882a593Smuzhiyun 		return 0;
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun 
set_memory_nx(unsigned long addr,int numpages)1925*4882a593Smuzhiyun int set_memory_nx(unsigned long addr, int numpages)
1926*4882a593Smuzhiyun {
1927*4882a593Smuzhiyun 	if (!(__supported_pte_mask & _PAGE_NX))
1928*4882a593Smuzhiyun 		return 0;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun 
set_memory_ro(unsigned long addr,int numpages)1933*4882a593Smuzhiyun int set_memory_ro(unsigned long addr, int numpages)
1934*4882a593Smuzhiyun {
1935*4882a593Smuzhiyun 	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun 
set_memory_rw(unsigned long addr,int numpages)1938*4882a593Smuzhiyun int set_memory_rw(unsigned long addr, int numpages)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun 	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun 
set_memory_np(unsigned long addr,int numpages)1943*4882a593Smuzhiyun int set_memory_np(unsigned long addr, int numpages)
1944*4882a593Smuzhiyun {
1945*4882a593Smuzhiyun 	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun 
set_memory_np_noalias(unsigned long addr,int numpages)1948*4882a593Smuzhiyun int set_memory_np_noalias(unsigned long addr, int numpages)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun 	int cpa_flags = CPA_NO_CHECK_ALIAS;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1953*4882a593Smuzhiyun 					__pgprot(_PAGE_PRESENT), 0,
1954*4882a593Smuzhiyun 					cpa_flags, NULL);
1955*4882a593Smuzhiyun }
1956*4882a593Smuzhiyun 
set_memory_4k(unsigned long addr,int numpages)1957*4882a593Smuzhiyun int set_memory_4k(unsigned long addr, int numpages)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun 	return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1960*4882a593Smuzhiyun 					__pgprot(0), 1, 0, NULL);
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun 
set_memory_nonglobal(unsigned long addr,int numpages)1963*4882a593Smuzhiyun int set_memory_nonglobal(unsigned long addr, int numpages)
1964*4882a593Smuzhiyun {
1965*4882a593Smuzhiyun 	return change_page_attr_clear(&addr, numpages,
1966*4882a593Smuzhiyun 				      __pgprot(_PAGE_GLOBAL), 0);
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun 
set_memory_global(unsigned long addr,int numpages)1969*4882a593Smuzhiyun int set_memory_global(unsigned long addr, int numpages)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun 	return change_page_attr_set(&addr, numpages,
1972*4882a593Smuzhiyun 				    __pgprot(_PAGE_GLOBAL), 0);
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun 
__set_memory_enc_dec(unsigned long addr,int numpages,bool enc)1975*4882a593Smuzhiyun static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun 	struct cpa_data cpa;
1978*4882a593Smuzhiyun 	int ret;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	/* Nothing to do if memory encryption is not active */
1981*4882a593Smuzhiyun 	if (!mem_encrypt_active())
1982*4882a593Smuzhiyun 		return 0;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	/* Should not be working on unaligned addresses */
1985*4882a593Smuzhiyun 	if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
1986*4882a593Smuzhiyun 		addr &= PAGE_MASK;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	memset(&cpa, 0, sizeof(cpa));
1989*4882a593Smuzhiyun 	cpa.vaddr = &addr;
1990*4882a593Smuzhiyun 	cpa.numpages = numpages;
1991*4882a593Smuzhiyun 	cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
1992*4882a593Smuzhiyun 	cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
1993*4882a593Smuzhiyun 	cpa.pgd = init_mm.pgd;
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	/* Must avoid aliasing mappings in the highmem code */
1996*4882a593Smuzhiyun 	kmap_flush_unused();
1997*4882a593Smuzhiyun 	vm_unmap_aliases();
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 	/*
2000*4882a593Smuzhiyun 	 * Before changing the encryption attribute, we need to flush caches.
2001*4882a593Smuzhiyun 	 */
2002*4882a593Smuzhiyun 	cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	ret = __change_page_attr_set_clr(&cpa, 1);
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 	/*
2007*4882a593Smuzhiyun 	 * After changing the encryption attribute, we need to flush TLBs again
2008*4882a593Smuzhiyun 	 * in case any speculative TLB caching occurred (but no need to flush
2009*4882a593Smuzhiyun 	 * caches again).  We could just use cpa_flush_all(), but in case TLB
2010*4882a593Smuzhiyun 	 * flushing gets optimized in the cpa_flush() path use the same logic
2011*4882a593Smuzhiyun 	 * as above.
2012*4882a593Smuzhiyun 	 */
2013*4882a593Smuzhiyun 	cpa_flush(&cpa, 0);
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	return ret;
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun 
set_memory_encrypted(unsigned long addr,int numpages)2018*4882a593Smuzhiyun int set_memory_encrypted(unsigned long addr, int numpages)
2019*4882a593Smuzhiyun {
2020*4882a593Smuzhiyun 	return __set_memory_enc_dec(addr, numpages, true);
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(set_memory_encrypted);
2023*4882a593Smuzhiyun 
set_memory_decrypted(unsigned long addr,int numpages)2024*4882a593Smuzhiyun int set_memory_decrypted(unsigned long addr, int numpages)
2025*4882a593Smuzhiyun {
2026*4882a593Smuzhiyun 	return __set_memory_enc_dec(addr, numpages, false);
2027*4882a593Smuzhiyun }
2028*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(set_memory_decrypted);
2029*4882a593Smuzhiyun 
set_pages_uc(struct page * page,int numpages)2030*4882a593Smuzhiyun int set_pages_uc(struct page *page, int numpages)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)page_address(page);
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	return set_memory_uc(addr, numpages);
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun EXPORT_SYMBOL(set_pages_uc);
2037*4882a593Smuzhiyun 
_set_pages_array(struct page ** pages,int numpages,enum page_cache_mode new_type)2038*4882a593Smuzhiyun static int _set_pages_array(struct page **pages, int numpages,
2039*4882a593Smuzhiyun 		enum page_cache_mode new_type)
2040*4882a593Smuzhiyun {
2041*4882a593Smuzhiyun 	unsigned long start;
2042*4882a593Smuzhiyun 	unsigned long end;
2043*4882a593Smuzhiyun 	enum page_cache_mode set_type;
2044*4882a593Smuzhiyun 	int i;
2045*4882a593Smuzhiyun 	int free_idx;
2046*4882a593Smuzhiyun 	int ret;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	for (i = 0; i < numpages; i++) {
2049*4882a593Smuzhiyun 		if (PageHighMem(pages[i]))
2050*4882a593Smuzhiyun 			continue;
2051*4882a593Smuzhiyun 		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2052*4882a593Smuzhiyun 		end = start + PAGE_SIZE;
2053*4882a593Smuzhiyun 		if (memtype_reserve(start, end, new_type, NULL))
2054*4882a593Smuzhiyun 			goto err_out;
2055*4882a593Smuzhiyun 	}
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	/* If WC, set to UC- first and then WC */
2058*4882a593Smuzhiyun 	set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2059*4882a593Smuzhiyun 				_PAGE_CACHE_MODE_UC_MINUS : new_type;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	ret = cpa_set_pages_array(pages, numpages,
2062*4882a593Smuzhiyun 				  cachemode2pgprot(set_type));
2063*4882a593Smuzhiyun 	if (!ret && new_type == _PAGE_CACHE_MODE_WC)
2064*4882a593Smuzhiyun 		ret = change_page_attr_set_clr(NULL, numpages,
2065*4882a593Smuzhiyun 					       cachemode2pgprot(
2066*4882a593Smuzhiyun 						_PAGE_CACHE_MODE_WC),
2067*4882a593Smuzhiyun 					       __pgprot(_PAGE_CACHE_MASK),
2068*4882a593Smuzhiyun 					       0, CPA_PAGES_ARRAY, pages);
2069*4882a593Smuzhiyun 	if (ret)
2070*4882a593Smuzhiyun 		goto err_out;
2071*4882a593Smuzhiyun 	return 0; /* Success */
2072*4882a593Smuzhiyun err_out:
2073*4882a593Smuzhiyun 	free_idx = i;
2074*4882a593Smuzhiyun 	for (i = 0; i < free_idx; i++) {
2075*4882a593Smuzhiyun 		if (PageHighMem(pages[i]))
2076*4882a593Smuzhiyun 			continue;
2077*4882a593Smuzhiyun 		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2078*4882a593Smuzhiyun 		end = start + PAGE_SIZE;
2079*4882a593Smuzhiyun 		memtype_free(start, end);
2080*4882a593Smuzhiyun 	}
2081*4882a593Smuzhiyun 	return -EINVAL;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun 
set_pages_array_uc(struct page ** pages,int numpages)2084*4882a593Smuzhiyun int set_pages_array_uc(struct page **pages, int numpages)
2085*4882a593Smuzhiyun {
2086*4882a593Smuzhiyun 	return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun EXPORT_SYMBOL(set_pages_array_uc);
2089*4882a593Smuzhiyun 
set_pages_array_wc(struct page ** pages,int numpages)2090*4882a593Smuzhiyun int set_pages_array_wc(struct page **pages, int numpages)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun 	return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun EXPORT_SYMBOL(set_pages_array_wc);
2095*4882a593Smuzhiyun 
set_pages_array_wt(struct page ** pages,int numpages)2096*4882a593Smuzhiyun int set_pages_array_wt(struct page **pages, int numpages)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun 	return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT);
2099*4882a593Smuzhiyun }
2100*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(set_pages_array_wt);
2101*4882a593Smuzhiyun 
set_pages_wb(struct page * page,int numpages)2102*4882a593Smuzhiyun int set_pages_wb(struct page *page, int numpages)
2103*4882a593Smuzhiyun {
2104*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)page_address(page);
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	return set_memory_wb(addr, numpages);
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun EXPORT_SYMBOL(set_pages_wb);
2109*4882a593Smuzhiyun 
set_pages_array_wb(struct page ** pages,int numpages)2110*4882a593Smuzhiyun int set_pages_array_wb(struct page **pages, int numpages)
2111*4882a593Smuzhiyun {
2112*4882a593Smuzhiyun 	int retval;
2113*4882a593Smuzhiyun 	unsigned long start;
2114*4882a593Smuzhiyun 	unsigned long end;
2115*4882a593Smuzhiyun 	int i;
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	/* WB cache mode is hard wired to all cache attribute bits being 0 */
2118*4882a593Smuzhiyun 	retval = cpa_clear_pages_array(pages, numpages,
2119*4882a593Smuzhiyun 			__pgprot(_PAGE_CACHE_MASK));
2120*4882a593Smuzhiyun 	if (retval)
2121*4882a593Smuzhiyun 		return retval;
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	for (i = 0; i < numpages; i++) {
2124*4882a593Smuzhiyun 		if (PageHighMem(pages[i]))
2125*4882a593Smuzhiyun 			continue;
2126*4882a593Smuzhiyun 		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2127*4882a593Smuzhiyun 		end = start + PAGE_SIZE;
2128*4882a593Smuzhiyun 		memtype_free(start, end);
2129*4882a593Smuzhiyun 	}
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun EXPORT_SYMBOL(set_pages_array_wb);
2134*4882a593Smuzhiyun 
set_pages_ro(struct page * page,int numpages)2135*4882a593Smuzhiyun int set_pages_ro(struct page *page, int numpages)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)page_address(page);
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	return set_memory_ro(addr, numpages);
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun 
set_pages_rw(struct page * page,int numpages)2142*4882a593Smuzhiyun int set_pages_rw(struct page *page, int numpages)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)page_address(page);
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	return set_memory_rw(addr, numpages);
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun 
__set_pages_p(struct page * page,int numpages)2149*4882a593Smuzhiyun static int __set_pages_p(struct page *page, int numpages)
2150*4882a593Smuzhiyun {
2151*4882a593Smuzhiyun 	unsigned long tempaddr = (unsigned long) page_address(page);
2152*4882a593Smuzhiyun 	struct cpa_data cpa = { .vaddr = &tempaddr,
2153*4882a593Smuzhiyun 				.pgd = NULL,
2154*4882a593Smuzhiyun 				.numpages = numpages,
2155*4882a593Smuzhiyun 				.mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2156*4882a593Smuzhiyun 				.mask_clr = __pgprot(0),
2157*4882a593Smuzhiyun 				.flags = 0};
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 	/*
2160*4882a593Smuzhiyun 	 * No alias checking needed for setting present flag. otherwise,
2161*4882a593Smuzhiyun 	 * we may need to break large pages for 64-bit kernel text
2162*4882a593Smuzhiyun 	 * mappings (this adds to complexity if we want to do this from
2163*4882a593Smuzhiyun 	 * atomic context especially). Let's keep it simple!
2164*4882a593Smuzhiyun 	 */
2165*4882a593Smuzhiyun 	return __change_page_attr_set_clr(&cpa, 0);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun 
__set_pages_np(struct page * page,int numpages)2168*4882a593Smuzhiyun static int __set_pages_np(struct page *page, int numpages)
2169*4882a593Smuzhiyun {
2170*4882a593Smuzhiyun 	unsigned long tempaddr = (unsigned long) page_address(page);
2171*4882a593Smuzhiyun 	struct cpa_data cpa = { .vaddr = &tempaddr,
2172*4882a593Smuzhiyun 				.pgd = NULL,
2173*4882a593Smuzhiyun 				.numpages = numpages,
2174*4882a593Smuzhiyun 				.mask_set = __pgprot(0),
2175*4882a593Smuzhiyun 				.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2176*4882a593Smuzhiyun 				.flags = 0};
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	/*
2179*4882a593Smuzhiyun 	 * No alias checking needed for setting not present flag. otherwise,
2180*4882a593Smuzhiyun 	 * we may need to break large pages for 64-bit kernel text
2181*4882a593Smuzhiyun 	 * mappings (this adds to complexity if we want to do this from
2182*4882a593Smuzhiyun 	 * atomic context especially). Let's keep it simple!
2183*4882a593Smuzhiyun 	 */
2184*4882a593Smuzhiyun 	return __change_page_attr_set_clr(&cpa, 0);
2185*4882a593Smuzhiyun }
2186*4882a593Smuzhiyun 
set_direct_map_invalid_noflush(struct page * page)2187*4882a593Smuzhiyun int set_direct_map_invalid_noflush(struct page *page)
2188*4882a593Smuzhiyun {
2189*4882a593Smuzhiyun 	return __set_pages_np(page, 1);
2190*4882a593Smuzhiyun }
2191*4882a593Smuzhiyun 
set_direct_map_default_noflush(struct page * page)2192*4882a593Smuzhiyun int set_direct_map_default_noflush(struct page *page)
2193*4882a593Smuzhiyun {
2194*4882a593Smuzhiyun 	return __set_pages_p(page, 1);
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun 
__kernel_map_pages(struct page * page,int numpages,int enable)2197*4882a593Smuzhiyun void __kernel_map_pages(struct page *page, int numpages, int enable)
2198*4882a593Smuzhiyun {
2199*4882a593Smuzhiyun 	if (PageHighMem(page))
2200*4882a593Smuzhiyun 		return;
2201*4882a593Smuzhiyun 	if (!enable) {
2202*4882a593Smuzhiyun 		debug_check_no_locks_freed(page_address(page),
2203*4882a593Smuzhiyun 					   numpages * PAGE_SIZE);
2204*4882a593Smuzhiyun 	}
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	/*
2207*4882a593Smuzhiyun 	 * The return value is ignored as the calls cannot fail.
2208*4882a593Smuzhiyun 	 * Large pages for identity mappings are not used at boot time
2209*4882a593Smuzhiyun 	 * and hence no memory allocations during large page split.
2210*4882a593Smuzhiyun 	 */
2211*4882a593Smuzhiyun 	if (enable)
2212*4882a593Smuzhiyun 		__set_pages_p(page, numpages);
2213*4882a593Smuzhiyun 	else
2214*4882a593Smuzhiyun 		__set_pages_np(page, numpages);
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	/*
2217*4882a593Smuzhiyun 	 * We should perform an IPI and flush all tlbs,
2218*4882a593Smuzhiyun 	 * but that can deadlock->flush only current cpu.
2219*4882a593Smuzhiyun 	 * Preemption needs to be disabled around __flush_tlb_all() due to
2220*4882a593Smuzhiyun 	 * CR3 reload in __native_flush_tlb().
2221*4882a593Smuzhiyun 	 */
2222*4882a593Smuzhiyun 	preempt_disable();
2223*4882a593Smuzhiyun 	__flush_tlb_all();
2224*4882a593Smuzhiyun 	preempt_enable();
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	arch_flush_lazy_mmu_mode();
2227*4882a593Smuzhiyun }
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun #ifdef CONFIG_HIBERNATION
kernel_page_present(struct page * page)2230*4882a593Smuzhiyun bool kernel_page_present(struct page *page)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun 	unsigned int level;
2233*4882a593Smuzhiyun 	pte_t *pte;
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	if (PageHighMem(page))
2236*4882a593Smuzhiyun 		return false;
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	pte = lookup_address((unsigned long)page_address(page), &level);
2239*4882a593Smuzhiyun 	return (pte_val(*pte) & _PAGE_PRESENT);
2240*4882a593Smuzhiyun }
2241*4882a593Smuzhiyun #endif /* CONFIG_HIBERNATION */
2242*4882a593Smuzhiyun 
kernel_map_pages_in_pgd(pgd_t * pgd,u64 pfn,unsigned long address,unsigned numpages,unsigned long page_flags)2243*4882a593Smuzhiyun int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2244*4882a593Smuzhiyun 				   unsigned numpages, unsigned long page_flags)
2245*4882a593Smuzhiyun {
2246*4882a593Smuzhiyun 	int retval = -EINVAL;
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	struct cpa_data cpa = {
2249*4882a593Smuzhiyun 		.vaddr = &address,
2250*4882a593Smuzhiyun 		.pfn = pfn,
2251*4882a593Smuzhiyun 		.pgd = pgd,
2252*4882a593Smuzhiyun 		.numpages = numpages,
2253*4882a593Smuzhiyun 		.mask_set = __pgprot(0),
2254*4882a593Smuzhiyun 		.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
2255*4882a593Smuzhiyun 		.flags = 0,
2256*4882a593Smuzhiyun 	};
2257*4882a593Smuzhiyun 
2258*4882a593Smuzhiyun 	WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	if (!(__supported_pte_mask & _PAGE_NX))
2261*4882a593Smuzhiyun 		goto out;
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	if (!(page_flags & _PAGE_ENC))
2264*4882a593Smuzhiyun 		cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	retval = __change_page_attr_set_clr(&cpa, 0);
2269*4882a593Smuzhiyun 	__flush_tlb_all();
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun out:
2272*4882a593Smuzhiyun 	return retval;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun /*
2276*4882a593Smuzhiyun  * __flush_tlb_all() flushes mappings only on current CPU and hence this
2277*4882a593Smuzhiyun  * function shouldn't be used in an SMP environment. Presently, it's used only
2278*4882a593Smuzhiyun  * during boot (way before smp_init()) by EFI subsystem and hence is ok.
2279*4882a593Smuzhiyun  */
kernel_unmap_pages_in_pgd(pgd_t * pgd,unsigned long address,unsigned long numpages)2280*4882a593Smuzhiyun int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2281*4882a593Smuzhiyun 				     unsigned long numpages)
2282*4882a593Smuzhiyun {
2283*4882a593Smuzhiyun 	int retval;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	/*
2286*4882a593Smuzhiyun 	 * The typical sequence for unmapping is to find a pte through
2287*4882a593Smuzhiyun 	 * lookup_address_in_pgd() (ideally, it should never return NULL because
2288*4882a593Smuzhiyun 	 * the address is already mapped) and change it's protections. As pfn is
2289*4882a593Smuzhiyun 	 * the *target* of a mapping, it's not useful while unmapping.
2290*4882a593Smuzhiyun 	 */
2291*4882a593Smuzhiyun 	struct cpa_data cpa = {
2292*4882a593Smuzhiyun 		.vaddr		= &address,
2293*4882a593Smuzhiyun 		.pfn		= 0,
2294*4882a593Smuzhiyun 		.pgd		= pgd,
2295*4882a593Smuzhiyun 		.numpages	= numpages,
2296*4882a593Smuzhiyun 		.mask_set	= __pgprot(0),
2297*4882a593Smuzhiyun 		.mask_clr	= __pgprot(_PAGE_PRESENT | _PAGE_RW),
2298*4882a593Smuzhiyun 		.flags		= 0,
2299*4882a593Smuzhiyun 	};
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 	WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	retval = __change_page_attr_set_clr(&cpa, 0);
2304*4882a593Smuzhiyun 	__flush_tlb_all();
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun 	return retval;
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun /*
2310*4882a593Smuzhiyun  * The testcases use internal knowledge of the implementation that shouldn't
2311*4882a593Smuzhiyun  * be exposed to the rest of the kernel. Include these directly here.
2312*4882a593Smuzhiyun  */
2313*4882a593Smuzhiyun #ifdef CONFIG_CPA_DEBUG
2314*4882a593Smuzhiyun #include "cpa-test.c"
2315*4882a593Smuzhiyun #endif
2316