1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * arch/microblaze/mm/fault.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Derived from "arch/ppc/mm/fault.c"
7*4882a593Smuzhiyun * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Derived from "arch/i386/mm/fault.c"
10*4882a593Smuzhiyun * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Modified by Cort Dougan and Paul Mackerras.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General
15*4882a593Smuzhiyun * Public License. See the file COPYING in the main directory of this
16*4882a593Smuzhiyun * archive for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/extable.h>
21*4882a593Smuzhiyun #include <linux/signal.h>
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/kernel.h>
24*4882a593Smuzhiyun #include <linux/errno.h>
25*4882a593Smuzhiyun #include <linux/string.h>
26*4882a593Smuzhiyun #include <linux/types.h>
27*4882a593Smuzhiyun #include <linux/ptrace.h>
28*4882a593Smuzhiyun #include <linux/mman.h>
29*4882a593Smuzhiyun #include <linux/mm.h>
30*4882a593Smuzhiyun #include <linux/interrupt.h>
31*4882a593Smuzhiyun #include <linux/perf_event.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <asm/page.h>
34*4882a593Smuzhiyun #include <asm/mmu.h>
35*4882a593Smuzhiyun #include <linux/mmu_context.h>
36*4882a593Smuzhiyun #include <linux/uaccess.h>
37*4882a593Smuzhiyun #include <asm/exceptions.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static unsigned long pte_misses; /* updated by do_page_fault() */
40*4882a593Smuzhiyun static unsigned long pte_errors; /* updated by do_page_fault() */
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * Check whether the instruction at regs->pc is a store using
44*4882a593Smuzhiyun * an update addressing form which will update r1.
45*4882a593Smuzhiyun */
store_updates_sp(struct pt_regs * regs)46*4882a593Smuzhiyun static int store_updates_sp(struct pt_regs *regs)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun unsigned int inst;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (get_user(inst, (unsigned int __user *)regs->pc))
51*4882a593Smuzhiyun return 0;
52*4882a593Smuzhiyun /* check for 1 in the rD field */
53*4882a593Smuzhiyun if (((inst >> 21) & 0x1f) != 1)
54*4882a593Smuzhiyun return 0;
55*4882a593Smuzhiyun /* check for store opcodes */
56*4882a593Smuzhiyun if ((inst & 0xd0000000) == 0xd0000000)
57*4882a593Smuzhiyun return 1;
58*4882a593Smuzhiyun return 0;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * bad_page_fault is called when we have a bad access from the kernel.
64*4882a593Smuzhiyun * It is called from do_page_fault above and from some of the procedures
65*4882a593Smuzhiyun * in traps.c.
66*4882a593Smuzhiyun */
bad_page_fault(struct pt_regs * regs,unsigned long address,int sig)67*4882a593Smuzhiyun void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun const struct exception_table_entry *fixup;
70*4882a593Smuzhiyun /* MS: no context */
71*4882a593Smuzhiyun /* Are we prepared to handle this fault? */
72*4882a593Smuzhiyun fixup = search_exception_tables(regs->pc);
73*4882a593Smuzhiyun if (fixup) {
74*4882a593Smuzhiyun regs->pc = fixup->fixup;
75*4882a593Smuzhiyun return;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* kernel has accessed a bad area */
79*4882a593Smuzhiyun die("kernel access of bad area", regs, sig);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * The error_code parameter is ESR for a data fault,
84*4882a593Smuzhiyun * 0 for an instruction fault.
85*4882a593Smuzhiyun */
do_page_fault(struct pt_regs * regs,unsigned long address,unsigned long error_code)86*4882a593Smuzhiyun void do_page_fault(struct pt_regs *regs, unsigned long address,
87*4882a593Smuzhiyun unsigned long error_code)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct vm_area_struct *vma;
90*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
91*4882a593Smuzhiyun int code = SEGV_MAPERR;
92*4882a593Smuzhiyun int is_write = error_code & ESR_S;
93*4882a593Smuzhiyun vm_fault_t fault;
94*4882a593Smuzhiyun unsigned int flags = FAULT_FLAG_DEFAULT;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun regs->ear = address;
97*4882a593Smuzhiyun regs->esr = error_code;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* On a kernel SLB miss we can only check for a valid exception entry */
100*4882a593Smuzhiyun if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
101*4882a593Smuzhiyun pr_warn("kernel task_size exceed");
102*4882a593Smuzhiyun _exception(SIGSEGV, regs, code, address);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* for instr TLB miss and instr storage exception ESR_S is undefined */
106*4882a593Smuzhiyun if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
107*4882a593Smuzhiyun is_write = 0;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun if (unlikely(faulthandler_disabled() || !mm)) {
110*4882a593Smuzhiyun if (kernel_mode(regs))
111*4882a593Smuzhiyun goto bad_area_nosemaphore;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* faulthandler_disabled() in user mode is really bad,
114*4882a593Smuzhiyun as is current->mm == NULL. */
115*4882a593Smuzhiyun pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
116*4882a593Smuzhiyun mm);
117*4882a593Smuzhiyun pr_emerg("r15 = %lx MSR = %lx\n",
118*4882a593Smuzhiyun regs->r15, regs->msr);
119*4882a593Smuzhiyun die("Weird page fault", regs, SIGSEGV);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (user_mode(regs))
123*4882a593Smuzhiyun flags |= FAULT_FLAG_USER;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* When running in the kernel we expect faults to occur only to
128*4882a593Smuzhiyun * addresses in user space. All other faults represent errors in the
129*4882a593Smuzhiyun * kernel and should generate an OOPS. Unfortunately, in the case of an
130*4882a593Smuzhiyun * erroneous fault occurring in a code path which already holds mmap_lock
131*4882a593Smuzhiyun * we will deadlock attempting to validate the fault against the
132*4882a593Smuzhiyun * address space. Luckily the kernel only validly references user
133*4882a593Smuzhiyun * space from well defined areas of code, which are listed in the
134*4882a593Smuzhiyun * exceptions table.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * As the vast majority of faults will be valid we will only perform
137*4882a593Smuzhiyun * the source reference check when there is a possibility of a deadlock.
138*4882a593Smuzhiyun * Attempt to lock the address space, if we cannot we then validate the
139*4882a593Smuzhiyun * source. If this is invalid we can skip the address space check,
140*4882a593Smuzhiyun * thus avoiding the deadlock.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun if (unlikely(!mmap_read_trylock(mm))) {
143*4882a593Smuzhiyun if (kernel_mode(regs) && !search_exception_tables(regs->pc))
144*4882a593Smuzhiyun goto bad_area_nosemaphore;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun retry:
147*4882a593Smuzhiyun mmap_read_lock(mm);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun vma = find_vma(mm, address);
151*4882a593Smuzhiyun if (unlikely(!vma))
152*4882a593Smuzhiyun goto bad_area;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (vma->vm_start <= address)
155*4882a593Smuzhiyun goto good_area;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
158*4882a593Smuzhiyun goto bad_area;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (unlikely(!is_write))
161*4882a593Smuzhiyun goto bad_area;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * N.B. The ABI allows programs to access up to
165*4882a593Smuzhiyun * a few hundred bytes below the stack pointer (TBD).
166*4882a593Smuzhiyun * The kernel signal delivery code writes up to about 1.5kB
167*4882a593Smuzhiyun * below the stack pointer (r1) before decrementing it.
168*4882a593Smuzhiyun * The exec code can write slightly over 640kB to the stack
169*4882a593Smuzhiyun * before setting the user r1. Thus we allow the stack to
170*4882a593Smuzhiyun * expand to 1MB without further checks.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun if (unlikely(address + 0x100000 < vma->vm_end)) {
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* get user regs even if this fault is in kernel mode */
175*4882a593Smuzhiyun struct pt_regs *uregs = current->thread.regs;
176*4882a593Smuzhiyun if (uregs == NULL)
177*4882a593Smuzhiyun goto bad_area;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * A user-mode access to an address a long way below
181*4882a593Smuzhiyun * the stack pointer is only valid if the instruction
182*4882a593Smuzhiyun * is one which would update the stack pointer to the
183*4882a593Smuzhiyun * address accessed if the instruction completed,
184*4882a593Smuzhiyun * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
185*4882a593Smuzhiyun * (or the byte, halfword, float or double forms).
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * If we don't check this then any write to the area
188*4882a593Smuzhiyun * between the last mapped region and the stack will
189*4882a593Smuzhiyun * expand the stack rather than segfaulting.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun if (address + 2048 < uregs->r1
192*4882a593Smuzhiyun && (kernel_mode(regs) || !store_updates_sp(regs)))
193*4882a593Smuzhiyun goto bad_area;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun if (expand_stack(vma, address))
196*4882a593Smuzhiyun goto bad_area;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun good_area:
199*4882a593Smuzhiyun code = SEGV_ACCERR;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* a write */
202*4882a593Smuzhiyun if (unlikely(is_write)) {
203*4882a593Smuzhiyun if (unlikely(!(vma->vm_flags & VM_WRITE)))
204*4882a593Smuzhiyun goto bad_area;
205*4882a593Smuzhiyun flags |= FAULT_FLAG_WRITE;
206*4882a593Smuzhiyun /* a read */
207*4882a593Smuzhiyun } else {
208*4882a593Smuzhiyun /* protection fault */
209*4882a593Smuzhiyun if (unlikely(error_code & 0x08000000))
210*4882a593Smuzhiyun goto bad_area;
211*4882a593Smuzhiyun if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
212*4882a593Smuzhiyun goto bad_area;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * If for any reason at all we couldn't handle the fault,
217*4882a593Smuzhiyun * make sure we exit gracefully rather than endlessly redo
218*4882a593Smuzhiyun * the fault.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun fault = handle_mm_fault(vma, address, flags, regs);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (fault_signal_pending(fault, regs))
223*4882a593Smuzhiyun return;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (unlikely(fault & VM_FAULT_ERROR)) {
226*4882a593Smuzhiyun if (fault & VM_FAULT_OOM)
227*4882a593Smuzhiyun goto out_of_memory;
228*4882a593Smuzhiyun else if (fault & VM_FAULT_SIGSEGV)
229*4882a593Smuzhiyun goto bad_area;
230*4882a593Smuzhiyun else if (fault & VM_FAULT_SIGBUS)
231*4882a593Smuzhiyun goto do_sigbus;
232*4882a593Smuzhiyun BUG();
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (flags & FAULT_FLAG_ALLOW_RETRY) {
236*4882a593Smuzhiyun if (fault & VM_FAULT_RETRY) {
237*4882a593Smuzhiyun flags |= FAULT_FLAG_TRIED;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * No need to mmap_read_unlock(mm) as we would
241*4882a593Smuzhiyun * have already released it in __lock_page_or_retry
242*4882a593Smuzhiyun * in mm/filemap.c.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun goto retry;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun mmap_read_unlock(mm);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * keep track of tlb+htab misses that are good addrs but
253*4882a593Smuzhiyun * just need pte's created via handle_mm_fault()
254*4882a593Smuzhiyun * -- Cort
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun pte_misses++;
257*4882a593Smuzhiyun return;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun bad_area:
260*4882a593Smuzhiyun mmap_read_unlock(mm);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun bad_area_nosemaphore:
263*4882a593Smuzhiyun pte_errors++;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* User mode accesses cause a SIGSEGV */
266*4882a593Smuzhiyun if (user_mode(regs)) {
267*4882a593Smuzhiyun _exception(SIGSEGV, regs, code, address);
268*4882a593Smuzhiyun return;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun bad_page_fault(regs, address, SIGSEGV);
272*4882a593Smuzhiyun return;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * We ran out of memory, or some other thing happened to us that made
276*4882a593Smuzhiyun * us unable to handle the page fault gracefully.
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun out_of_memory:
279*4882a593Smuzhiyun mmap_read_unlock(mm);
280*4882a593Smuzhiyun if (!user_mode(regs))
281*4882a593Smuzhiyun bad_page_fault(regs, address, SIGKILL);
282*4882a593Smuzhiyun else
283*4882a593Smuzhiyun pagefault_out_of_memory();
284*4882a593Smuzhiyun return;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun do_sigbus:
287*4882a593Smuzhiyun mmap_read_unlock(mm);
288*4882a593Smuzhiyun if (user_mode(regs)) {
289*4882a593Smuzhiyun force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
290*4882a593Smuzhiyun return;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun bad_page_fault(regs, address, SIGBUS);
293*4882a593Smuzhiyun }
294