xref: /OK3568_Linux_fs/kernel/arch/sparc/mm/fault_32.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * fault.c:  Page fault handlers for the Sparc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6*4882a593Smuzhiyun  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7*4882a593Smuzhiyun  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/head.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/ptrace.h>
16*4882a593Smuzhiyun #include <linux/mman.h>
17*4882a593Smuzhiyun #include <linux/threads.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/signal.h>
20*4882a593Smuzhiyun #include <linux/mm.h>
21*4882a593Smuzhiyun #include <linux/smp.h>
22*4882a593Smuzhiyun #include <linux/perf_event.h>
23*4882a593Smuzhiyun #include <linux/interrupt.h>
24*4882a593Smuzhiyun #include <linux/kdebug.h>
25*4882a593Smuzhiyun #include <linux/uaccess.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <asm/page.h>
28*4882a593Smuzhiyun #include <asm/openprom.h>
29*4882a593Smuzhiyun #include <asm/oplib.h>
30*4882a593Smuzhiyun #include <asm/setup.h>
31*4882a593Smuzhiyun #include <asm/smp.h>
32*4882a593Smuzhiyun #include <asm/traps.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "mm_32.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun int show_unhandled_signals = 1;
37*4882a593Smuzhiyun 
unhandled_fault(unsigned long address,struct task_struct * tsk,struct pt_regs * regs)38*4882a593Smuzhiyun static void __noreturn unhandled_fault(unsigned long address,
39*4882a593Smuzhiyun 				       struct task_struct *tsk,
40*4882a593Smuzhiyun 				       struct pt_regs *regs)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	if ((unsigned long) address < PAGE_SIZE) {
43*4882a593Smuzhiyun 		printk(KERN_ALERT
44*4882a593Smuzhiyun 		    "Unable to handle kernel NULL pointer dereference\n");
45*4882a593Smuzhiyun 	} else {
46*4882a593Smuzhiyun 		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
47*4882a593Smuzhiyun 		       address);
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
50*4882a593Smuzhiyun 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
51*4882a593Smuzhiyun 	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
52*4882a593Smuzhiyun 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
53*4882a593Smuzhiyun 			(unsigned long) tsk->active_mm->pgd));
54*4882a593Smuzhiyun 	die_if_kernel("Oops", regs);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
lookup_fault(unsigned long pc,unsigned long ret_pc,unsigned long address)57*4882a593Smuzhiyun asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
58*4882a593Smuzhiyun 			    unsigned long address)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct pt_regs regs;
61*4882a593Smuzhiyun 	unsigned long g2;
62*4882a593Smuzhiyun 	unsigned int insn;
63*4882a593Smuzhiyun 	int i;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	i = search_extables_range(ret_pc, &g2);
66*4882a593Smuzhiyun 	switch (i) {
67*4882a593Smuzhiyun 	case 3:
68*4882a593Smuzhiyun 		/* load & store will be handled by fixup */
69*4882a593Smuzhiyun 		return 3;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	case 1:
72*4882a593Smuzhiyun 		/* store will be handled by fixup, load will bump out */
73*4882a593Smuzhiyun 		/* for _to_ macros */
74*4882a593Smuzhiyun 		insn = *((unsigned int *) pc);
75*4882a593Smuzhiyun 		if ((insn >> 21) & 1)
76*4882a593Smuzhiyun 			return 1;
77*4882a593Smuzhiyun 		break;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	case 2:
80*4882a593Smuzhiyun 		/* load will be handled by fixup, store will bump out */
81*4882a593Smuzhiyun 		/* for _from_ macros */
82*4882a593Smuzhiyun 		insn = *((unsigned int *) pc);
83*4882a593Smuzhiyun 		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
84*4882a593Smuzhiyun 			return 2;
85*4882a593Smuzhiyun 		break;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	default:
88*4882a593Smuzhiyun 		break;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	memset(&regs, 0, sizeof(regs));
92*4882a593Smuzhiyun 	regs.pc = pc;
93*4882a593Smuzhiyun 	regs.npc = pc + 4;
94*4882a593Smuzhiyun 	__asm__ __volatile__(
95*4882a593Smuzhiyun 		"rd %%psr, %0\n\t"
96*4882a593Smuzhiyun 		"nop\n\t"
97*4882a593Smuzhiyun 		"nop\n\t"
98*4882a593Smuzhiyun 		"nop\n" : "=r" (regs.psr));
99*4882a593Smuzhiyun 	unhandled_fault(address, current, &regs);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	/* Not reached */
102*4882a593Smuzhiyun 	return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static inline void
show_signal_msg(struct pt_regs * regs,int sig,int code,unsigned long address,struct task_struct * tsk)106*4882a593Smuzhiyun show_signal_msg(struct pt_regs *regs, int sig, int code,
107*4882a593Smuzhiyun 		unsigned long address, struct task_struct *tsk)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	if (!unhandled_signal(tsk, sig))
110*4882a593Smuzhiyun 		return;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (!printk_ratelimit())
113*4882a593Smuzhiyun 		return;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
116*4882a593Smuzhiyun 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
117*4882a593Smuzhiyun 	       tsk->comm, task_pid_nr(tsk), address,
118*4882a593Smuzhiyun 	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
119*4882a593Smuzhiyun 	       (void *)regs->u_regs[UREG_FP], code);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	print_vma_addr(KERN_CONT " in ", regs->pc);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	printk(KERN_CONT "\n");
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
__do_fault_siginfo(int code,int sig,struct pt_regs * regs,unsigned long addr)126*4882a593Smuzhiyun static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
127*4882a593Smuzhiyun 			       unsigned long addr)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	if (unlikely(show_unhandled_signals))
130*4882a593Smuzhiyun 		show_signal_msg(regs, sig, code,
131*4882a593Smuzhiyun 				addr, current);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	force_sig_fault(sig, code, (void __user *) addr, 0);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
compute_si_addr(struct pt_regs * regs,int text_fault)136*4882a593Smuzhiyun static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	unsigned int insn;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (text_fault)
141*4882a593Smuzhiyun 		return regs->pc;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (regs->psr & PSR_PS)
144*4882a593Smuzhiyun 		insn = *(unsigned int *) regs->pc;
145*4882a593Smuzhiyun 	else
146*4882a593Smuzhiyun 		__get_user(insn, (unsigned int *) regs->pc);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return safe_compute_effective_address(regs, insn);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
do_fault_siginfo(int code,int sig,struct pt_regs * regs,int text_fault)151*4882a593Smuzhiyun static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
152*4882a593Smuzhiyun 				      int text_fault)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	unsigned long addr = compute_si_addr(regs, text_fault);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	__do_fault_siginfo(code, sig, regs, addr);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
do_sparc_fault(struct pt_regs * regs,int text_fault,int write,unsigned long address)159*4882a593Smuzhiyun asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
160*4882a593Smuzhiyun 			       unsigned long address)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct vm_area_struct *vma;
163*4882a593Smuzhiyun 	struct task_struct *tsk = current;
164*4882a593Smuzhiyun 	struct mm_struct *mm = tsk->mm;
165*4882a593Smuzhiyun 	unsigned int fixup;
166*4882a593Smuzhiyun 	unsigned long g2;
167*4882a593Smuzhiyun 	int from_user = !(regs->psr & PSR_PS);
168*4882a593Smuzhiyun 	int code;
169*4882a593Smuzhiyun 	vm_fault_t fault;
170*4882a593Smuzhiyun 	unsigned int flags = FAULT_FLAG_DEFAULT;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (text_fault)
173*4882a593Smuzhiyun 		address = regs->pc;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/*
176*4882a593Smuzhiyun 	 * We fault-in kernel-space virtual memory on-demand. The
177*4882a593Smuzhiyun 	 * 'reference' page table is init_mm.pgd.
178*4882a593Smuzhiyun 	 *
179*4882a593Smuzhiyun 	 * NOTE! We MUST NOT take any locks for this case. We may
180*4882a593Smuzhiyun 	 * be in an interrupt or a critical region, and should
181*4882a593Smuzhiyun 	 * only copy the information from the master page table,
182*4882a593Smuzhiyun 	 * nothing more.
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	code = SEGV_MAPERR;
185*4882a593Smuzhiyun 	if (address >= TASK_SIZE)
186*4882a593Smuzhiyun 		goto vmalloc_fault;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/*
189*4882a593Smuzhiyun 	 * If we're in an interrupt or have no user
190*4882a593Smuzhiyun 	 * context, we must not take the fault..
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	if (pagefault_disabled() || !mm)
193*4882a593Smuzhiyun 		goto no_context;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun retry:
198*4882a593Smuzhiyun 	mmap_read_lock(mm);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (!from_user && address >= PAGE_OFFSET)
201*4882a593Smuzhiyun 		goto bad_area;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	vma = find_vma(mm, address);
204*4882a593Smuzhiyun 	if (!vma)
205*4882a593Smuzhiyun 		goto bad_area;
206*4882a593Smuzhiyun 	if (vma->vm_start <= address)
207*4882a593Smuzhiyun 		goto good_area;
208*4882a593Smuzhiyun 	if (!(vma->vm_flags & VM_GROWSDOWN))
209*4882a593Smuzhiyun 		goto bad_area;
210*4882a593Smuzhiyun 	if (expand_stack(vma, address))
211*4882a593Smuzhiyun 		goto bad_area;
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * Ok, we have a good vm_area for this memory access, so
214*4882a593Smuzhiyun 	 * we can handle it..
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun good_area:
217*4882a593Smuzhiyun 	code = SEGV_ACCERR;
218*4882a593Smuzhiyun 	if (write) {
219*4882a593Smuzhiyun 		if (!(vma->vm_flags & VM_WRITE))
220*4882a593Smuzhiyun 			goto bad_area;
221*4882a593Smuzhiyun 	} else {
222*4882a593Smuzhiyun 		/* Allow reads even for write-only mappings */
223*4882a593Smuzhiyun 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
224*4882a593Smuzhiyun 			goto bad_area;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (from_user)
228*4882a593Smuzhiyun 		flags |= FAULT_FLAG_USER;
229*4882a593Smuzhiyun 	if (write)
230*4882a593Smuzhiyun 		flags |= FAULT_FLAG_WRITE;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/*
233*4882a593Smuzhiyun 	 * If for any reason at all we couldn't handle the fault,
234*4882a593Smuzhiyun 	 * make sure we exit gracefully rather than endlessly redo
235*4882a593Smuzhiyun 	 * the fault.
236*4882a593Smuzhiyun 	 */
237*4882a593Smuzhiyun 	fault = handle_mm_fault(vma, address, flags, regs);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (fault_signal_pending(fault, regs))
240*4882a593Smuzhiyun 		return;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (unlikely(fault & VM_FAULT_ERROR)) {
243*4882a593Smuzhiyun 		if (fault & VM_FAULT_OOM)
244*4882a593Smuzhiyun 			goto out_of_memory;
245*4882a593Smuzhiyun 		else if (fault & VM_FAULT_SIGSEGV)
246*4882a593Smuzhiyun 			goto bad_area;
247*4882a593Smuzhiyun 		else if (fault & VM_FAULT_SIGBUS)
248*4882a593Smuzhiyun 			goto do_sigbus;
249*4882a593Smuzhiyun 		BUG();
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
253*4882a593Smuzhiyun 		if (fault & VM_FAULT_RETRY) {
254*4882a593Smuzhiyun 			flags |= FAULT_FLAG_TRIED;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 			/* No need to mmap_read_unlock(mm) as we would
257*4882a593Smuzhiyun 			 * have already released it in __lock_page_or_retry
258*4882a593Smuzhiyun 			 * in mm/filemap.c.
259*4882a593Smuzhiyun 			 */
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 			goto retry;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	mmap_read_unlock(mm);
266*4882a593Smuzhiyun 	return;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * Something tried to access memory that isn't in our memory map..
270*4882a593Smuzhiyun 	 * Fix it, but check if it's kernel or user first..
271*4882a593Smuzhiyun 	 */
272*4882a593Smuzhiyun bad_area:
273*4882a593Smuzhiyun 	mmap_read_unlock(mm);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun bad_area_nosemaphore:
276*4882a593Smuzhiyun 	/* User mode accesses just cause a SIGSEGV */
277*4882a593Smuzhiyun 	if (from_user) {
278*4882a593Smuzhiyun 		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
279*4882a593Smuzhiyun 		return;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* Is this in ex_table? */
283*4882a593Smuzhiyun no_context:
284*4882a593Smuzhiyun 	g2 = regs->u_regs[UREG_G2];
285*4882a593Smuzhiyun 	if (!from_user) {
286*4882a593Smuzhiyun 		fixup = search_extables_range(regs->pc, &g2);
287*4882a593Smuzhiyun 		/* Values below 10 are reserved for other things */
288*4882a593Smuzhiyun 		if (fixup > 10) {
289*4882a593Smuzhiyun 			extern const unsigned int __memset_start[];
290*4882a593Smuzhiyun 			extern const unsigned int __memset_end[];
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #ifdef DEBUG_EXCEPTIONS
293*4882a593Smuzhiyun 			printk("Exception: PC<%08lx> faddr<%08lx>\n",
294*4882a593Smuzhiyun 			       regs->pc, address);
295*4882a593Smuzhiyun 			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
296*4882a593Smuzhiyun 				regs->pc, fixup, g2);
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun 			if ((regs->pc >= (unsigned long)__memset_start &&
299*4882a593Smuzhiyun 			     regs->pc < (unsigned long)__memset_end)) {
300*4882a593Smuzhiyun 				regs->u_regs[UREG_I4] = address;
301*4882a593Smuzhiyun 				regs->u_regs[UREG_I5] = regs->pc;
302*4882a593Smuzhiyun 			}
303*4882a593Smuzhiyun 			regs->u_regs[UREG_G2] = g2;
304*4882a593Smuzhiyun 			regs->pc = fixup;
305*4882a593Smuzhiyun 			regs->npc = regs->pc + 4;
306*4882a593Smuzhiyun 			return;
307*4882a593Smuzhiyun 		}
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	unhandled_fault(address, tsk, regs);
311*4882a593Smuzhiyun 	do_exit(SIGKILL);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun  * We ran out of memory, or some other thing happened to us that made
315*4882a593Smuzhiyun  * us unable to handle the page fault gracefully.
316*4882a593Smuzhiyun  */
317*4882a593Smuzhiyun out_of_memory:
318*4882a593Smuzhiyun 	mmap_read_unlock(mm);
319*4882a593Smuzhiyun 	if (from_user) {
320*4882a593Smuzhiyun 		pagefault_out_of_memory();
321*4882a593Smuzhiyun 		return;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 	goto no_context;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun do_sigbus:
326*4882a593Smuzhiyun 	mmap_read_unlock(mm);
327*4882a593Smuzhiyun 	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
328*4882a593Smuzhiyun 	if (!from_user)
329*4882a593Smuzhiyun 		goto no_context;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun vmalloc_fault:
332*4882a593Smuzhiyun 	{
333*4882a593Smuzhiyun 		/*
334*4882a593Smuzhiyun 		 * Synchronize this task's top level page-table
335*4882a593Smuzhiyun 		 * with the 'reference' page table.
336*4882a593Smuzhiyun 		 */
337*4882a593Smuzhiyun 		int offset = pgd_index(address);
338*4882a593Smuzhiyun 		pgd_t *pgd, *pgd_k;
339*4882a593Smuzhiyun 		p4d_t *p4d, *p4d_k;
340*4882a593Smuzhiyun 		pud_t *pud, *pud_k;
341*4882a593Smuzhiyun 		pmd_t *pmd, *pmd_k;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		pgd = tsk->active_mm->pgd + offset;
344*4882a593Smuzhiyun 		pgd_k = init_mm.pgd + offset;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 		if (!pgd_present(*pgd)) {
347*4882a593Smuzhiyun 			if (!pgd_present(*pgd_k))
348*4882a593Smuzhiyun 				goto bad_area_nosemaphore;
349*4882a593Smuzhiyun 			pgd_val(*pgd) = pgd_val(*pgd_k);
350*4882a593Smuzhiyun 			return;
351*4882a593Smuzhiyun 		}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, address);
354*4882a593Smuzhiyun 		pud = pud_offset(p4d, address);
355*4882a593Smuzhiyun 		pmd = pmd_offset(pud, address);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		p4d_k = p4d_offset(pgd_k, address);
358*4882a593Smuzhiyun 		pud_k = pud_offset(p4d_k, address);
359*4882a593Smuzhiyun 		pmd_k = pmd_offset(pud_k, address);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
362*4882a593Smuzhiyun 			goto bad_area_nosemaphore;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 		*pmd = *pmd_k;
365*4882a593Smuzhiyun 		return;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /* This always deals with user addresses. */
force_user_fault(unsigned long address,int write)370*4882a593Smuzhiyun static void force_user_fault(unsigned long address, int write)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct vm_area_struct *vma;
373*4882a593Smuzhiyun 	struct task_struct *tsk = current;
374*4882a593Smuzhiyun 	struct mm_struct *mm = tsk->mm;
375*4882a593Smuzhiyun 	unsigned int flags = FAULT_FLAG_USER;
376*4882a593Smuzhiyun 	int code;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	code = SEGV_MAPERR;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	mmap_read_lock(mm);
381*4882a593Smuzhiyun 	vma = find_vma(mm, address);
382*4882a593Smuzhiyun 	if (!vma)
383*4882a593Smuzhiyun 		goto bad_area;
384*4882a593Smuzhiyun 	if (vma->vm_start <= address)
385*4882a593Smuzhiyun 		goto good_area;
386*4882a593Smuzhiyun 	if (!(vma->vm_flags & VM_GROWSDOWN))
387*4882a593Smuzhiyun 		goto bad_area;
388*4882a593Smuzhiyun 	if (expand_stack(vma, address))
389*4882a593Smuzhiyun 		goto bad_area;
390*4882a593Smuzhiyun good_area:
391*4882a593Smuzhiyun 	code = SEGV_ACCERR;
392*4882a593Smuzhiyun 	if (write) {
393*4882a593Smuzhiyun 		if (!(vma->vm_flags & VM_WRITE))
394*4882a593Smuzhiyun 			goto bad_area;
395*4882a593Smuzhiyun 		flags |= FAULT_FLAG_WRITE;
396*4882a593Smuzhiyun 	} else {
397*4882a593Smuzhiyun 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
398*4882a593Smuzhiyun 			goto bad_area;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 	switch (handle_mm_fault(vma, address, flags, NULL)) {
401*4882a593Smuzhiyun 	case VM_FAULT_SIGBUS:
402*4882a593Smuzhiyun 	case VM_FAULT_OOM:
403*4882a593Smuzhiyun 		goto do_sigbus;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 	mmap_read_unlock(mm);
406*4882a593Smuzhiyun 	return;
407*4882a593Smuzhiyun bad_area:
408*4882a593Smuzhiyun 	mmap_read_unlock(mm);
409*4882a593Smuzhiyun 	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
410*4882a593Smuzhiyun 	return;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun do_sigbus:
413*4882a593Smuzhiyun 	mmap_read_unlock(mm);
414*4882a593Smuzhiyun 	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
check_stack_aligned(unsigned long sp)417*4882a593Smuzhiyun static void check_stack_aligned(unsigned long sp)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	if (sp & 0x7UL)
420*4882a593Smuzhiyun 		force_sig(SIGILL);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
window_overflow_fault(void)423*4882a593Smuzhiyun void window_overflow_fault(void)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	unsigned long sp;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	sp = current_thread_info()->rwbuf_stkptrs[0];
428*4882a593Smuzhiyun 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
429*4882a593Smuzhiyun 		force_user_fault(sp + 0x38, 1);
430*4882a593Smuzhiyun 	force_user_fault(sp, 1);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	check_stack_aligned(sp);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
window_underflow_fault(unsigned long sp)435*4882a593Smuzhiyun void window_underflow_fault(unsigned long sp)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
438*4882a593Smuzhiyun 		force_user_fault(sp + 0x38, 0);
439*4882a593Smuzhiyun 	force_user_fault(sp, 0);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	check_stack_aligned(sp);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
window_ret_fault(struct pt_regs * regs)444*4882a593Smuzhiyun void window_ret_fault(struct pt_regs *regs)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	unsigned long sp;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	sp = regs->u_regs[UREG_FP];
449*4882a593Smuzhiyun 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
450*4882a593Smuzhiyun 		force_user_fault(sp + 0x38, 0);
451*4882a593Smuzhiyun 	force_user_fault(sp, 0);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	check_stack_aligned(sp);
454*4882a593Smuzhiyun }
455