xref: /OK3568_Linux_fs/kernel/arch/arm/mm/fault-armv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/arch/arm/mm/fault-armv.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1995  Linus Torvalds
6*4882a593Smuzhiyun  *  Modifications for ARM processor (c) 1995-2002 Russell King
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/vmalloc.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/pagemap.h>
15*4882a593Smuzhiyun #include <linux/gfp.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/bugs.h>
18*4882a593Smuzhiyun #include <asm/cacheflush.h>
19*4882a593Smuzhiyun #include <asm/cachetype.h>
20*4882a593Smuzhiyun #include <asm/tlbflush.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "mm.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 6
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * We take the easy way out of this problem - we make the
29*4882a593Smuzhiyun  * PTE uncacheable.  However, we leave the write buffer on.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * Note that the pte lock held when calling update_mmu_cache must also
32*4882a593Smuzhiyun  * guard the pte (somewhere else in the same mm) that we modify here.
33*4882a593Smuzhiyun  * Therefore those configurations which might call adjust_pte (those
34*4882a593Smuzhiyun  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
35*4882a593Smuzhiyun  */
do_adjust_pte(struct vm_area_struct * vma,unsigned long address,unsigned long pfn,pte_t * ptep)36*4882a593Smuzhiyun static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
37*4882a593Smuzhiyun 	unsigned long pfn, pte_t *ptep)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	pte_t entry = *ptep;
40*4882a593Smuzhiyun 	int ret;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/*
43*4882a593Smuzhiyun 	 * If this page is present, it's actually being shared.
44*4882a593Smuzhiyun 	 */
45*4882a593Smuzhiyun 	ret = pte_present(entry);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/*
48*4882a593Smuzhiyun 	 * If this page isn't present, or is already setup to
49*4882a593Smuzhiyun 	 * fault (ie, is old), we can safely ignore any issues.
50*4882a593Smuzhiyun 	 */
51*4882a593Smuzhiyun 	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
52*4882a593Smuzhiyun 		flush_cache_page(vma, address, pfn);
53*4882a593Smuzhiyun 		outer_flush_range((pfn << PAGE_SHIFT),
54*4882a593Smuzhiyun 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
55*4882a593Smuzhiyun 		pte_val(entry) &= ~L_PTE_MT_MASK;
56*4882a593Smuzhiyun 		pte_val(entry) |= shared_pte_mask;
57*4882a593Smuzhiyun 		set_pte_at(vma->vm_mm, address, ptep, entry);
58*4882a593Smuzhiyun 		flush_tlb_page(vma, address);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return ret;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #if USE_SPLIT_PTE_PTLOCKS
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * If we are using split PTE locks, then we need to take the page
67*4882a593Smuzhiyun  * lock here.  Otherwise we are using shared mm->page_table_lock
68*4882a593Smuzhiyun  * which is already locked, thus cannot take it.
69*4882a593Smuzhiyun  */
do_pte_lock(spinlock_t * ptl)70*4882a593Smuzhiyun static inline void do_pte_lock(spinlock_t *ptl)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	/*
73*4882a593Smuzhiyun 	 * Use nested version here to indicate that we are already
74*4882a593Smuzhiyun 	 * holding one similar spinlock.
75*4882a593Smuzhiyun 	 */
76*4882a593Smuzhiyun 	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
do_pte_unlock(spinlock_t * ptl)79*4882a593Smuzhiyun static inline void do_pte_unlock(spinlock_t *ptl)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	spin_unlock(ptl);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun #else /* !USE_SPLIT_PTE_PTLOCKS */
do_pte_lock(spinlock_t * ptl)84*4882a593Smuzhiyun static inline void do_pte_lock(spinlock_t *ptl) {}
do_pte_unlock(spinlock_t * ptl)85*4882a593Smuzhiyun static inline void do_pte_unlock(spinlock_t *ptl) {}
86*4882a593Smuzhiyun #endif /* USE_SPLIT_PTE_PTLOCKS */
87*4882a593Smuzhiyun 
adjust_pte(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)88*4882a593Smuzhiyun static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
89*4882a593Smuzhiyun 	unsigned long pfn)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	spinlock_t *ptl;
92*4882a593Smuzhiyun 	pgd_t *pgd;
93*4882a593Smuzhiyun 	p4d_t *p4d;
94*4882a593Smuzhiyun 	pud_t *pud;
95*4882a593Smuzhiyun 	pmd_t *pmd;
96*4882a593Smuzhiyun 	pte_t *pte;
97*4882a593Smuzhiyun 	int ret;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	pgd = pgd_offset(vma->vm_mm, address);
100*4882a593Smuzhiyun 	if (pgd_none_or_clear_bad(pgd))
101*4882a593Smuzhiyun 		return 0;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, address);
104*4882a593Smuzhiyun 	if (p4d_none_or_clear_bad(p4d))
105*4882a593Smuzhiyun 		return 0;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	pud = pud_offset(p4d, address);
108*4882a593Smuzhiyun 	if (pud_none_or_clear_bad(pud))
109*4882a593Smuzhiyun 		return 0;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	pmd = pmd_offset(pud, address);
112*4882a593Smuzhiyun 	if (pmd_none_or_clear_bad(pmd))
113*4882a593Smuzhiyun 		return 0;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * This is called while another page table is mapped, so we
117*4882a593Smuzhiyun 	 * must use the nested version.  This also means we need to
118*4882a593Smuzhiyun 	 * open-code the spin-locking.
119*4882a593Smuzhiyun 	 */
120*4882a593Smuzhiyun 	ptl = pte_lockptr(vma->vm_mm, pmd);
121*4882a593Smuzhiyun 	pte = pte_offset_map(pmd, address);
122*4882a593Smuzhiyun 	do_pte_lock(ptl);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	ret = do_adjust_pte(vma, address, pfn, pte);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	do_pte_unlock(ptl);
127*4882a593Smuzhiyun 	pte_unmap(pte);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return ret;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static void
make_coherent(struct address_space * mapping,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned long pfn)133*4882a593Smuzhiyun make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
134*4882a593Smuzhiyun 	unsigned long addr, pte_t *ptep, unsigned long pfn)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
137*4882a593Smuzhiyun 	struct vm_area_struct *mpnt;
138*4882a593Smuzhiyun 	unsigned long offset;
139*4882a593Smuzhiyun 	pgoff_t pgoff;
140*4882a593Smuzhiyun 	int aliases = 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * If we have any shared mappings that are in the same mm
146*4882a593Smuzhiyun 	 * space, then we need to handle them specially to maintain
147*4882a593Smuzhiyun 	 * cache coherency.
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	flush_dcache_mmap_lock(mapping);
150*4882a593Smuzhiyun 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
151*4882a593Smuzhiyun 		/*
152*4882a593Smuzhiyun 		 * If this VMA is not in our MM, we can ignore it.
153*4882a593Smuzhiyun 		 * Note that we intentionally mask out the VMA
154*4882a593Smuzhiyun 		 * that we are fixing up.
155*4882a593Smuzhiyun 		 */
156*4882a593Smuzhiyun 		if (mpnt->vm_mm != mm || mpnt == vma)
157*4882a593Smuzhiyun 			continue;
158*4882a593Smuzhiyun 		if (!(mpnt->vm_flags & VM_MAYSHARE))
159*4882a593Smuzhiyun 			continue;
160*4882a593Smuzhiyun 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
161*4882a593Smuzhiyun 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 	flush_dcache_mmap_unlock(mapping);
164*4882a593Smuzhiyun 	if (aliases)
165*4882a593Smuzhiyun 		do_adjust_pte(vma, addr, pfn, ptep);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun  * Take care of architecture specific things when placing a new PTE into
170*4882a593Smuzhiyun  * a page table, or changing an existing PTE.  Basically, there are two
171*4882a593Smuzhiyun  * things that we need to take care of:
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  *  1. If PG_dcache_clean is not set for the page, we need to ensure
174*4882a593Smuzhiyun  *     that any cache entries for the kernels virtual memory
175*4882a593Smuzhiyun  *     range are written back to the page.
176*4882a593Smuzhiyun  *  2. If we have multiple shared mappings of the same space in
177*4882a593Smuzhiyun  *     an object, we need to deal with the cache aliasing issues.
178*4882a593Smuzhiyun  *
179*4882a593Smuzhiyun  * Note that the pte lock will be held.
180*4882a593Smuzhiyun  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)181*4882a593Smuzhiyun void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
182*4882a593Smuzhiyun 	pte_t *ptep)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	unsigned long pfn = pte_pfn(*ptep);
185*4882a593Smuzhiyun 	struct address_space *mapping;
186*4882a593Smuzhiyun 	struct page *page;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (!pfn_valid(pfn))
189*4882a593Smuzhiyun 		return;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/*
192*4882a593Smuzhiyun 	 * The zero page is never written to, so never has any dirty
193*4882a593Smuzhiyun 	 * cache lines, and therefore never needs to be flushed.
194*4882a593Smuzhiyun 	 */
195*4882a593Smuzhiyun 	page = pfn_to_page(pfn);
196*4882a593Smuzhiyun 	if (page == ZERO_PAGE(0))
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	mapping = page_mapping_file(page);
200*4882a593Smuzhiyun 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
201*4882a593Smuzhiyun 		__flush_dcache_page(mapping, page);
202*4882a593Smuzhiyun 	if (mapping) {
203*4882a593Smuzhiyun 		if (cache_is_vivt())
204*4882a593Smuzhiyun 			make_coherent(mapping, vma, addr, ptep, pfn);
205*4882a593Smuzhiyun 		else if (vma->vm_flags & VM_EXEC)
206*4882a593Smuzhiyun 			__flush_icache_all();
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun #endif	/* __LINUX_ARM_ARCH__ < 6 */
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * Check whether the write buffer has physical address aliasing
213*4882a593Smuzhiyun  * issues.  If it has, we need to avoid them for the case where
214*4882a593Smuzhiyun  * we have several shared mappings of the same object in user
215*4882a593Smuzhiyun  * space.
216*4882a593Smuzhiyun  */
check_writebuffer(unsigned long * p1,unsigned long * p2)217*4882a593Smuzhiyun static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	register unsigned long zero = 0, one = 1, val;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	local_irq_disable();
222*4882a593Smuzhiyun 	mb();
223*4882a593Smuzhiyun 	*p1 = one;
224*4882a593Smuzhiyun 	mb();
225*4882a593Smuzhiyun 	*p2 = zero;
226*4882a593Smuzhiyun 	mb();
227*4882a593Smuzhiyun 	val = *p1;
228*4882a593Smuzhiyun 	mb();
229*4882a593Smuzhiyun 	local_irq_enable();
230*4882a593Smuzhiyun 	return val != zero;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
check_writebuffer_bugs(void)233*4882a593Smuzhiyun void __init check_writebuffer_bugs(void)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	struct page *page;
236*4882a593Smuzhiyun 	const char *reason;
237*4882a593Smuzhiyun 	unsigned long v = 1;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	pr_info("CPU: Testing write buffer coherency: ");
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	page = alloc_page(GFP_KERNEL);
242*4882a593Smuzhiyun 	if (page) {
243*4882a593Smuzhiyun 		unsigned long *p1, *p2;
244*4882a593Smuzhiyun 		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
245*4882a593Smuzhiyun 					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
248*4882a593Smuzhiyun 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		if (p1 && p2) {
251*4882a593Smuzhiyun 			v = check_writebuffer(p1, p2);
252*4882a593Smuzhiyun 			reason = "enabling work-around";
253*4882a593Smuzhiyun 		} else {
254*4882a593Smuzhiyun 			reason = "unable to map memory\n";
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		vunmap(p1);
258*4882a593Smuzhiyun 		vunmap(p2);
259*4882a593Smuzhiyun 		put_page(page);
260*4882a593Smuzhiyun 	} else {
261*4882a593Smuzhiyun 		reason = "unable to grab page\n";
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (v) {
265*4882a593Smuzhiyun 		pr_cont("failed, %s\n", reason);
266*4882a593Smuzhiyun 		shared_pte_mask = L_PTE_MT_UNCACHED;
267*4882a593Smuzhiyun 	} else {
268*4882a593Smuzhiyun 		pr_cont("ok\n");
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun }
271