1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * OpenRISC fault.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Linux architectural port borrowing liberally from similar works of
6*4882a593Smuzhiyun * others. All original copyrights apply as per the original source
7*4882a593Smuzhiyun * declaration.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Modifications for the OpenRISC architecture:
10*4882a593Smuzhiyun * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11*4882a593Smuzhiyun * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/extable.h>
17*4882a593Smuzhiyun #include <linux/sched/signal.h>
18*4882a593Smuzhiyun #include <linux/perf_event.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/uaccess.h>
21*4882a593Smuzhiyun #include <asm/siginfo.h>
22*4882a593Smuzhiyun #include <asm/signal.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define NUM_TLB_ENTRIES 64
25*4882a593Smuzhiyun #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun unsigned long pte_misses; /* updated by do_page_fault() */
28*4882a593Smuzhiyun unsigned long pte_errors; /* updated by do_page_fault() */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* __PHX__ :: - check the vmalloc_fault in do_page_fault()
31*4882a593Smuzhiyun * - also look into include/asm-or32/mmu_context.h
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun volatile pgd_t *current_pgd[NR_CPUS];
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun extern void die(char *, struct pt_regs *, long);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * This routine handles page faults. It determines the address,
39*4882a593Smuzhiyun * and the problem, and then passes it off to one of the appropriate
40*4882a593Smuzhiyun * routines.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * If this routine detects a bad access, it returns 1, otherwise it
43*4882a593Smuzhiyun * returns 0.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun
do_page_fault(struct pt_regs * regs,unsigned long address,unsigned long vector,int write_acc)46*4882a593Smuzhiyun asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
47*4882a593Smuzhiyun unsigned long vector, int write_acc)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct task_struct *tsk;
50*4882a593Smuzhiyun struct mm_struct *mm;
51*4882a593Smuzhiyun struct vm_area_struct *vma;
52*4882a593Smuzhiyun int si_code;
53*4882a593Smuzhiyun vm_fault_t fault;
54*4882a593Smuzhiyun unsigned int flags = FAULT_FLAG_DEFAULT;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun tsk = current;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * We fault-in kernel-space virtual memory on-demand. The
60*4882a593Smuzhiyun * 'reference' page table is init_mm.pgd.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * NOTE! We MUST NOT take any locks for this case. We may
63*4882a593Smuzhiyun * be in an interrupt or a critical region, and should
64*4882a593Smuzhiyun * only copy the information from the master page table,
65*4882a593Smuzhiyun * nothing more.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * NOTE2: This is done so that, when updating the vmalloc
68*4882a593Smuzhiyun * mappings we don't have to walk all processes pgdirs and
69*4882a593Smuzhiyun * add the high mappings all at once. Instead we do it as they
70*4882a593Smuzhiyun * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
71*4882a593Smuzhiyun * bit set so sometimes the TLB can use a lingering entry.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * This verifies that the fault happens in kernel space
74*4882a593Smuzhiyun * and that the fault was not a protection error.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (address >= VMALLOC_START &&
78*4882a593Smuzhiyun (vector != 0x300 && vector != 0x400) &&
79*4882a593Smuzhiyun !user_mode(regs))
80*4882a593Smuzhiyun goto vmalloc_fault;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* If exceptions were enabled, we can reenable them here */
83*4882a593Smuzhiyun if (user_mode(regs)) {
84*4882a593Smuzhiyun /* Exception was in userspace: reenable interrupts */
85*4882a593Smuzhiyun local_irq_enable();
86*4882a593Smuzhiyun flags |= FAULT_FLAG_USER;
87*4882a593Smuzhiyun } else {
88*4882a593Smuzhiyun /* If exception was in a syscall, then IRQ's may have
89*4882a593Smuzhiyun * been enabled or disabled. If they were enabled,
90*4882a593Smuzhiyun * reenable them.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
93*4882a593Smuzhiyun local_irq_enable();
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun mm = tsk->mm;
97*4882a593Smuzhiyun si_code = SEGV_MAPERR;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * If we're in an interrupt or have no user
101*4882a593Smuzhiyun * context, we must not take the fault..
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (in_interrupt() || !mm)
105*4882a593Smuzhiyun goto no_context;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun retry:
110*4882a593Smuzhiyun mmap_read_lock(mm);
111*4882a593Smuzhiyun vma = find_vma(mm, address);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!vma)
114*4882a593Smuzhiyun goto bad_area;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (vma->vm_start <= address)
117*4882a593Smuzhiyun goto good_area;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (!(vma->vm_flags & VM_GROWSDOWN))
120*4882a593Smuzhiyun goto bad_area;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (user_mode(regs)) {
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * accessing the stack below usp is always a bug.
125*4882a593Smuzhiyun * we get page-aligned addresses so we can only check
126*4882a593Smuzhiyun * if we're within a page from usp, but that might be
127*4882a593Smuzhiyun * enough to catch brutal errors at least.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (address + PAGE_SIZE < regs->sp)
130*4882a593Smuzhiyun goto bad_area;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun if (expand_stack(vma, address))
133*4882a593Smuzhiyun goto bad_area;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * Ok, we have a good vm_area for this memory access, so
137*4882a593Smuzhiyun * we can handle it..
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun good_area:
141*4882a593Smuzhiyun si_code = SEGV_ACCERR;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* first do some preliminary protection checks */
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (write_acc) {
146*4882a593Smuzhiyun if (!(vma->vm_flags & VM_WRITE))
147*4882a593Smuzhiyun goto bad_area;
148*4882a593Smuzhiyun flags |= FAULT_FLAG_WRITE;
149*4882a593Smuzhiyun } else {
150*4882a593Smuzhiyun /* not present */
151*4882a593Smuzhiyun if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
152*4882a593Smuzhiyun goto bad_area;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* are we trying to execute nonexecutable area */
156*4882a593Smuzhiyun if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
157*4882a593Smuzhiyun goto bad_area;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * If for any reason at all we couldn't handle the fault,
161*4882a593Smuzhiyun * make sure we exit gracefully rather than endlessly redo
162*4882a593Smuzhiyun * the fault.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun fault = handle_mm_fault(vma, address, flags, regs);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (fault_signal_pending(fault, regs))
168*4882a593Smuzhiyun return;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (unlikely(fault & VM_FAULT_ERROR)) {
171*4882a593Smuzhiyun if (fault & VM_FAULT_OOM)
172*4882a593Smuzhiyun goto out_of_memory;
173*4882a593Smuzhiyun else if (fault & VM_FAULT_SIGSEGV)
174*4882a593Smuzhiyun goto bad_area;
175*4882a593Smuzhiyun else if (fault & VM_FAULT_SIGBUS)
176*4882a593Smuzhiyun goto do_sigbus;
177*4882a593Smuzhiyun BUG();
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (flags & FAULT_FLAG_ALLOW_RETRY) {
181*4882a593Smuzhiyun /*RGD modeled on Cris */
182*4882a593Smuzhiyun if (fault & VM_FAULT_RETRY) {
183*4882a593Smuzhiyun flags |= FAULT_FLAG_TRIED;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* No need to mmap_read_unlock(mm) as we would
186*4882a593Smuzhiyun * have already released it in __lock_page_or_retry
187*4882a593Smuzhiyun * in mm/filemap.c.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun goto retry;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun mmap_read_unlock(mm);
195*4882a593Smuzhiyun return;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Something tried to access memory that isn't in our memory map..
199*4882a593Smuzhiyun * Fix it, but check if it's kernel or user first..
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun bad_area:
203*4882a593Smuzhiyun mmap_read_unlock(mm);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun bad_area_nosemaphore:
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* User mode accesses just cause a SIGSEGV */
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (user_mode(regs)) {
210*4882a593Smuzhiyun force_sig_fault(SIGSEGV, si_code, (void __user *)address);
211*4882a593Smuzhiyun return;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun no_context:
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Are we prepared to handle this kernel fault?
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * (The kernel has valid exception-points in the source
219*4882a593Smuzhiyun * when it acesses user-memory. When it fails in one
220*4882a593Smuzhiyun * of those points, we find it in a table and do a jump
221*4882a593Smuzhiyun * to some fixup code that loads an appropriate error
222*4882a593Smuzhiyun * code)
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun const struct exception_table_entry *entry;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun __asm__ __volatile__("l.nop 42");
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if ((entry = search_exception_tables(regs->pc)) != NULL) {
231*4882a593Smuzhiyun /* Adjust the instruction pointer in the stackframe */
232*4882a593Smuzhiyun regs->pc = entry->fixup;
233*4882a593Smuzhiyun return;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Oops. The kernel tried to access some bad page. We'll have to
239*4882a593Smuzhiyun * terminate things with extreme prejudice.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if ((unsigned long)(address) < PAGE_SIZE)
243*4882a593Smuzhiyun printk(KERN_ALERT
244*4882a593Smuzhiyun "Unable to handle kernel NULL pointer dereference");
245*4882a593Smuzhiyun else
246*4882a593Smuzhiyun printk(KERN_ALERT "Unable to handle kernel access");
247*4882a593Smuzhiyun printk(" at virtual address 0x%08lx\n", address);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun die("Oops", regs, write_acc);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun do_exit(SIGKILL);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * We ran out of memory, or some other thing happened to us that made
255*4882a593Smuzhiyun * us unable to handle the page fault gracefully.
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun out_of_memory:
259*4882a593Smuzhiyun __asm__ __volatile__("l.nop 42");
260*4882a593Smuzhiyun __asm__ __volatile__("l.nop 1");
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun mmap_read_unlock(mm);
263*4882a593Smuzhiyun if (!user_mode(regs))
264*4882a593Smuzhiyun goto no_context;
265*4882a593Smuzhiyun pagefault_out_of_memory();
266*4882a593Smuzhiyun return;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun do_sigbus:
269*4882a593Smuzhiyun mmap_read_unlock(mm);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Send a sigbus, regardless of whether we were in kernel
273*4882a593Smuzhiyun * or user mode.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Kernel mode? Handle exceptions or die */
278*4882a593Smuzhiyun if (!user_mode(regs))
279*4882a593Smuzhiyun goto no_context;
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun vmalloc_fault:
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Synchronize this task's top level page-table
286*4882a593Smuzhiyun * with the 'reference' page table.
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * Use current_pgd instead of tsk->active_mm->pgd
289*4882a593Smuzhiyun * since the latter might be unavailable if this
290*4882a593Smuzhiyun * code is executed in a misfortunately run irq
291*4882a593Smuzhiyun * (like inside schedule() between switch_mm and
292*4882a593Smuzhiyun * switch_to...).
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun int offset = pgd_index(address);
296*4882a593Smuzhiyun pgd_t *pgd, *pgd_k;
297*4882a593Smuzhiyun p4d_t *p4d, *p4d_k;
298*4882a593Smuzhiyun pud_t *pud, *pud_k;
299*4882a593Smuzhiyun pmd_t *pmd, *pmd_k;
300*4882a593Smuzhiyun pte_t *pte_k;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun phx_warn("do_page_fault(): vmalloc_fault will not work, "
304*4882a593Smuzhiyun "since current_pgd assign a proper value somewhere\n"
305*4882a593Smuzhiyun "anyhow we don't need this at the moment\n");
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun phx_mmu("vmalloc_fault");
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
310*4882a593Smuzhiyun pgd_k = init_mm.pgd + offset;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Since we're two-level, we don't need to do both
313*4882a593Smuzhiyun * set_pgd and set_pmd (they do the same thing). If
314*4882a593Smuzhiyun * we go three-level at some point, do the right thing
315*4882a593Smuzhiyun * with pgd_present and set_pgd here.
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * Also, since the vmalloc area is global, we don't
318*4882a593Smuzhiyun * need to copy individual PTE's, it is enough to
319*4882a593Smuzhiyun * copy the pgd pointer into the pte page of the
320*4882a593Smuzhiyun * root task. If that is there, we'll find our pte if
321*4882a593Smuzhiyun * it exists.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun p4d = p4d_offset(pgd, address);
325*4882a593Smuzhiyun p4d_k = p4d_offset(pgd_k, address);
326*4882a593Smuzhiyun if (!p4d_present(*p4d_k))
327*4882a593Smuzhiyun goto no_context;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun pud = pud_offset(p4d, address);
330*4882a593Smuzhiyun pud_k = pud_offset(p4d_k, address);
331*4882a593Smuzhiyun if (!pud_present(*pud_k))
332*4882a593Smuzhiyun goto no_context;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun pmd = pmd_offset(pud, address);
335*4882a593Smuzhiyun pmd_k = pmd_offset(pud_k, address);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (!pmd_present(*pmd_k))
338*4882a593Smuzhiyun goto bad_area_nosemaphore;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun set_pmd(pmd, *pmd_k);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Make sure the actual PTE exists as well to
343*4882a593Smuzhiyun * catch kernel vmalloc-area accesses to non-mapped
344*4882a593Smuzhiyun * addresses. If we don't do this, this will just
345*4882a593Smuzhiyun * silently loop forever.
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun pte_k = pte_offset_kernel(pmd_k, address);
349*4882a593Smuzhiyun if (!pte_present(*pte_k))
350*4882a593Smuzhiyun goto no_context;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355