xref: /OK3568_Linux_fs/kernel/arch/ia64/mm/fault.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MMU fault handling support.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1998-2002 Hewlett-Packard Co
6*4882a593Smuzhiyun  *	David Mosberger-Tang <davidm@hpl.hp.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/sched/signal.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/extable.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/kprobes.h>
14*4882a593Smuzhiyun #include <linux/kdebug.h>
15*4882a593Smuzhiyun #include <linux/prefetch.h>
16*4882a593Smuzhiyun #include <linux/uaccess.h>
17*4882a593Smuzhiyun #include <linux/perf_event.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/processor.h>
20*4882a593Smuzhiyun #include <asm/exception.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun extern int die(char *, struct pt_regs *, long);
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
26*4882a593Smuzhiyun  * (inside region 5, on ia64) and that page is present.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun static int
mapped_kernel_page_is_present(unsigned long address)29*4882a593Smuzhiyun mapped_kernel_page_is_present (unsigned long address)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	pgd_t *pgd;
32*4882a593Smuzhiyun 	p4d_t *p4d;
33*4882a593Smuzhiyun 	pud_t *pud;
34*4882a593Smuzhiyun 	pmd_t *pmd;
35*4882a593Smuzhiyun 	pte_t *ptep, pte;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	pgd = pgd_offset_k(address);
38*4882a593Smuzhiyun 	if (pgd_none(*pgd) || pgd_bad(*pgd))
39*4882a593Smuzhiyun 		return 0;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, address);
42*4882a593Smuzhiyun 	if (p4d_none(*p4d) || p4d_bad(*p4d))
43*4882a593Smuzhiyun 		return 0;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	pud = pud_offset(p4d, address);
46*4882a593Smuzhiyun 	if (pud_none(*pud) || pud_bad(*pud))
47*4882a593Smuzhiyun 		return 0;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	pmd = pmd_offset(pud, address);
50*4882a593Smuzhiyun 	if (pmd_none(*pmd) || pmd_bad(*pmd))
51*4882a593Smuzhiyun 		return 0;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	ptep = pte_offset_kernel(pmd, address);
54*4882a593Smuzhiyun 	if (!ptep)
55*4882a593Smuzhiyun 		return 0;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	pte = *ptep;
58*4882a593Smuzhiyun 	return pte_present(pte);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #	define VM_READ_BIT	0
62*4882a593Smuzhiyun #	define VM_WRITE_BIT	1
63*4882a593Smuzhiyun #	define VM_EXEC_BIT	2
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun void __kprobes
ia64_do_page_fault(unsigned long address,unsigned long isr,struct pt_regs * regs)66*4882a593Smuzhiyun ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	int signal = SIGSEGV, code = SEGV_MAPERR;
69*4882a593Smuzhiyun 	struct vm_area_struct *vma, *prev_vma;
70*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
71*4882a593Smuzhiyun 	unsigned long mask;
72*4882a593Smuzhiyun 	vm_fault_t fault;
73*4882a593Smuzhiyun 	unsigned int flags = FAULT_FLAG_DEFAULT;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
76*4882a593Smuzhiyun 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* mmap_lock is performance critical.... */
79*4882a593Smuzhiyun 	prefetchw(&mm->mmap_lock);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/*
82*4882a593Smuzhiyun 	 * If we're in an interrupt or have no user context, we must not take the fault..
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	if (faulthandler_disabled() || !mm)
85*4882a593Smuzhiyun 		goto no_context;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #ifdef CONFIG_VIRTUAL_MEM_MAP
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * If fault is in region 5 and we are in the kernel, we may already
90*4882a593Smuzhiyun 	 * have the mmap_lock (pfn_valid macro is called during mmap). There
91*4882a593Smuzhiyun 	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
92*4882a593Smuzhiyun 	 * and go directly to the exception handling code.
93*4882a593Smuzhiyun 	 */
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
96*4882a593Smuzhiyun 		goto bad_area_no_up;
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/*
100*4882a593Smuzhiyun 	 * This is to handle the kprobes on user space access instructions
101*4882a593Smuzhiyun 	 */
102*4882a593Smuzhiyun 	if (kprobe_page_fault(regs, TRAP_BRKPT))
103*4882a593Smuzhiyun 		return;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (user_mode(regs))
106*4882a593Smuzhiyun 		flags |= FAULT_FLAG_USER;
107*4882a593Smuzhiyun 	if (mask & VM_WRITE)
108*4882a593Smuzhiyun 		flags |= FAULT_FLAG_WRITE;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
111*4882a593Smuzhiyun retry:
112*4882a593Smuzhiyun 	mmap_read_lock(mm);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	vma = find_vma_prev(mm, address, &prev_vma);
115*4882a593Smuzhiyun 	if (!vma && !prev_vma )
116*4882a593Smuzhiyun 		goto bad_area;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun         /*
119*4882a593Smuzhiyun          * find_vma_prev() returns vma such that address < vma->vm_end or NULL
120*4882a593Smuzhiyun          *
121*4882a593Smuzhiyun          * May find no vma, but could be that the last vm area is the
122*4882a593Smuzhiyun          * register backing store that needs to expand upwards, in
123*4882a593Smuzhiyun          * this case vma will be null, but prev_vma will ne non-null
124*4882a593Smuzhiyun          */
125*4882a593Smuzhiyun         if (( !vma && prev_vma ) || (address < vma->vm_start) )
126*4882a593Smuzhiyun 		goto check_expansion;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun   good_area:
129*4882a593Smuzhiyun 	code = SEGV_ACCERR;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
134*4882a593Smuzhiyun 	    || (1 << VM_EXEC_BIT) != VM_EXEC)
135*4882a593Smuzhiyun #		error File is out of sync with <linux/mm.h>.  Please update.
136*4882a593Smuzhiyun #	endif
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
139*4882a593Smuzhiyun 		goto bad_area;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if ((vma->vm_flags & mask) != mask)
142*4882a593Smuzhiyun 		goto bad_area;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * If for any reason at all we couldn't handle the fault, make
146*4882a593Smuzhiyun 	 * sure we exit gracefully rather than endlessly redo the
147*4882a593Smuzhiyun 	 * fault.
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	fault = handle_mm_fault(vma, address, flags, regs);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (fault_signal_pending(fault, regs))
152*4882a593Smuzhiyun 		return;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (unlikely(fault & VM_FAULT_ERROR)) {
155*4882a593Smuzhiyun 		/*
156*4882a593Smuzhiyun 		 * We ran out of memory, or some other thing happened
157*4882a593Smuzhiyun 		 * to us that made us unable to handle the page fault
158*4882a593Smuzhiyun 		 * gracefully.
159*4882a593Smuzhiyun 		 */
160*4882a593Smuzhiyun 		if (fault & VM_FAULT_OOM) {
161*4882a593Smuzhiyun 			goto out_of_memory;
162*4882a593Smuzhiyun 		} else if (fault & VM_FAULT_SIGSEGV) {
163*4882a593Smuzhiyun 			goto bad_area;
164*4882a593Smuzhiyun 		} else if (fault & VM_FAULT_SIGBUS) {
165*4882a593Smuzhiyun 			signal = SIGBUS;
166*4882a593Smuzhiyun 			goto bad_area;
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 		BUG();
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
172*4882a593Smuzhiyun 		if (fault & VM_FAULT_RETRY) {
173*4882a593Smuzhiyun 			flags |= FAULT_FLAG_TRIED;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 			 /* No need to mmap_read_unlock(mm) as we would
176*4882a593Smuzhiyun 			 * have already released it in __lock_page_or_retry
177*4882a593Smuzhiyun 			 * in mm/filemap.c.
178*4882a593Smuzhiyun 			 */
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 			goto retry;
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	mmap_read_unlock(mm);
185*4882a593Smuzhiyun 	return;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun   check_expansion:
188*4882a593Smuzhiyun 	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
189*4882a593Smuzhiyun 		if (!vma)
190*4882a593Smuzhiyun 			goto bad_area;
191*4882a593Smuzhiyun 		if (!(vma->vm_flags & VM_GROWSDOWN))
192*4882a593Smuzhiyun 			goto bad_area;
193*4882a593Smuzhiyun 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
194*4882a593Smuzhiyun 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
195*4882a593Smuzhiyun 			goto bad_area;
196*4882a593Smuzhiyun 		if (expand_stack(vma, address))
197*4882a593Smuzhiyun 			goto bad_area;
198*4882a593Smuzhiyun 	} else {
199*4882a593Smuzhiyun 		vma = prev_vma;
200*4882a593Smuzhiyun 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
201*4882a593Smuzhiyun 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
202*4882a593Smuzhiyun 			goto bad_area;
203*4882a593Smuzhiyun 		/*
204*4882a593Smuzhiyun 		 * Since the register backing store is accessed sequentially,
205*4882a593Smuzhiyun 		 * we disallow growing it by more than a page at a time.
206*4882a593Smuzhiyun 		 */
207*4882a593Smuzhiyun 		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
208*4882a593Smuzhiyun 			goto bad_area;
209*4882a593Smuzhiyun 		if (expand_upwards(vma, address))
210*4882a593Smuzhiyun 			goto bad_area;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	goto good_area;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun   bad_area:
215*4882a593Smuzhiyun 	mmap_read_unlock(mm);
216*4882a593Smuzhiyun #ifdef CONFIG_VIRTUAL_MEM_MAP
217*4882a593Smuzhiyun   bad_area_no_up:
218*4882a593Smuzhiyun #endif
219*4882a593Smuzhiyun 	if ((isr & IA64_ISR_SP)
220*4882a593Smuzhiyun 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
221*4882a593Smuzhiyun 	{
222*4882a593Smuzhiyun 		/*
223*4882a593Smuzhiyun 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
224*4882a593Smuzhiyun 		 * bit in the psr to ensure forward progress.  (Target register will get a
225*4882a593Smuzhiyun 		 * NaT for ld.s, lfetch will be canceled.)
226*4882a593Smuzhiyun 		 */
227*4882a593Smuzhiyun 		ia64_psr(regs)->ed = 1;
228*4882a593Smuzhiyun 		return;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 	if (user_mode(regs)) {
231*4882a593Smuzhiyun 		force_sig_fault(signal, code, (void __user *) address,
232*4882a593Smuzhiyun 				0, __ISR_VALID, isr);
233*4882a593Smuzhiyun 		return;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun   no_context:
237*4882a593Smuzhiyun 	if ((isr & IA64_ISR_SP)
238*4882a593Smuzhiyun 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
239*4882a593Smuzhiyun 	{
240*4882a593Smuzhiyun 		/*
241*4882a593Smuzhiyun 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
242*4882a593Smuzhiyun 		 * bit in the psr to ensure forward progress.  (Target register will get a
243*4882a593Smuzhiyun 		 * NaT for ld.s, lfetch will be canceled.)
244*4882a593Smuzhiyun 		 */
245*4882a593Smuzhiyun 		ia64_psr(regs)->ed = 1;
246*4882a593Smuzhiyun 		return;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Since we have no vma's for region 5, we might get here even if the address is
251*4882a593Smuzhiyun 	 * valid, due to the VHPT walker inserting a non present translation that becomes
252*4882a593Smuzhiyun 	 * stale. If that happens, the non present fault handler already purged the stale
253*4882a593Smuzhiyun 	 * translation, which fixed the problem. So, we check to see if the translation is
254*4882a593Smuzhiyun 	 * valid, and return if it is.
255*4882a593Smuzhiyun 	 */
256*4882a593Smuzhiyun 	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
257*4882a593Smuzhiyun 		return;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (ia64_done_with_exception(regs))
260*4882a593Smuzhiyun 		return;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/*
263*4882a593Smuzhiyun 	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
264*4882a593Smuzhiyun 	 * with extreme prejudice.
265*4882a593Smuzhiyun 	 */
266*4882a593Smuzhiyun 	bust_spinlocks(1);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (address < PAGE_SIZE)
269*4882a593Smuzhiyun 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
270*4882a593Smuzhiyun 	else
271*4882a593Smuzhiyun 		printk(KERN_ALERT "Unable to handle kernel paging request at "
272*4882a593Smuzhiyun 		       "virtual address %016lx\n", address);
273*4882a593Smuzhiyun 	if (die("Oops", regs, isr))
274*4882a593Smuzhiyun 		regs = NULL;
275*4882a593Smuzhiyun 	bust_spinlocks(0);
276*4882a593Smuzhiyun 	if (regs)
277*4882a593Smuzhiyun 		do_exit(SIGKILL);
278*4882a593Smuzhiyun 	return;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun   out_of_memory:
281*4882a593Smuzhiyun 	mmap_read_unlock(mm);
282*4882a593Smuzhiyun 	if (!user_mode(regs))
283*4882a593Smuzhiyun 		goto no_context;
284*4882a593Smuzhiyun 	pagefault_out_of_memory();
285*4882a593Smuzhiyun }
286