1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
8*4882a593Smuzhiyun * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
9*4882a593Smuzhiyun * Copyright 1999 Hewlett Packard Co.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/ptrace.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/sched/debug.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/extable.h>
19*4882a593Smuzhiyun #include <linux/uaccess.h>
20*4882a593Smuzhiyun #include <linux/hugetlb.h>
21*4882a593Smuzhiyun #include <linux/perf_event.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <asm/traps.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* Various important other fields */
26*4882a593Smuzhiyun #define bit22set(x) (x & 0x00000200)
27*4882a593Smuzhiyun #define bits23_25set(x) (x & 0x000001c0)
28*4882a593Smuzhiyun #define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
29*4882a593Smuzhiyun /* extended opcode is 0x6a */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define BITSSET 0x1c0 /* for identifying LDCW */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun int show_unhandled_signals = 1;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * parisc_acctyp(unsigned int inst) --
38*4882a593Smuzhiyun * Given a PA-RISC memory access instruction, determine if the
39*4882a593Smuzhiyun * the instruction would perform a memory read or memory write
40*4882a593Smuzhiyun * operation.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * This function assumes that the given instruction is a memory access
43*4882a593Smuzhiyun * instruction (i.e. you should really only call it if you know that
44*4882a593Smuzhiyun * the instruction has generated some sort of a memory access fault).
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Returns:
47*4882a593Smuzhiyun * VM_READ if read operation
48*4882a593Smuzhiyun * VM_WRITE if write operation
49*4882a593Smuzhiyun * VM_EXEC if execute operation
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun static unsigned long
parisc_acctyp(unsigned long code,unsigned int inst)52*4882a593Smuzhiyun parisc_acctyp(unsigned long code, unsigned int inst)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun if (code == 6 || code == 16)
55*4882a593Smuzhiyun return VM_EXEC;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun switch (inst & 0xf0000000) {
58*4882a593Smuzhiyun case 0x40000000: /* load */
59*4882a593Smuzhiyun case 0x50000000: /* new load */
60*4882a593Smuzhiyun return VM_READ;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun case 0x60000000: /* store */
63*4882a593Smuzhiyun case 0x70000000: /* new store */
64*4882a593Smuzhiyun return VM_WRITE;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun case 0x20000000: /* coproc */
67*4882a593Smuzhiyun case 0x30000000: /* coproc2 */
68*4882a593Smuzhiyun if (bit22set(inst))
69*4882a593Smuzhiyun return VM_WRITE;
70*4882a593Smuzhiyun fallthrough;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun case 0x0: /* indexed/memory management */
73*4882a593Smuzhiyun if (bit22set(inst)) {
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Check for the 'Graphics Flush Read' instruction.
76*4882a593Smuzhiyun * It resembles an FDC instruction, except for bits
77*4882a593Smuzhiyun * 20 and 21. Any combination other than zero will
78*4882a593Smuzhiyun * utilize the block mover functionality on some
79*4882a593Smuzhiyun * older PA-RISC platforms. The case where a block
80*4882a593Smuzhiyun * move is performed from VM to graphics IO space
81*4882a593Smuzhiyun * should be treated as a READ.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * The significance of bits 20,21 in the FDC
84*4882a593Smuzhiyun * instruction is:
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * 00 Flush data cache (normal instruction behavior)
87*4882a593Smuzhiyun * 01 Graphics flush write (IO space -> VM)
88*4882a593Smuzhiyun * 10 Graphics flush read (VM -> IO space)
89*4882a593Smuzhiyun * 11 Graphics flush read/write (VM <-> IO space)
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun if (isGraphicsFlushRead(inst))
92*4882a593Smuzhiyun return VM_READ;
93*4882a593Smuzhiyun return VM_WRITE;
94*4882a593Smuzhiyun } else {
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun * Check for LDCWX and LDCWS (semaphore instructions).
97*4882a593Smuzhiyun * If bits 23 through 25 are all 1's it is one of
98*4882a593Smuzhiyun * the above two instructions and is a write.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Note: With the limited bits we are looking at,
101*4882a593Smuzhiyun * this will also catch PROBEW and PROBEWI. However,
102*4882a593Smuzhiyun * these should never get in here because they don't
103*4882a593Smuzhiyun * generate exceptions of the type:
104*4882a593Smuzhiyun * Data TLB miss fault/data page fault
105*4882a593Smuzhiyun * Data memory protection trap
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun if (bits23_25set(inst) == BITSSET)
108*4882a593Smuzhiyun return VM_WRITE;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun return VM_READ; /* Default */
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun return VM_READ; /* Default */
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #undef bit22set
116*4882a593Smuzhiyun #undef bits23_25set
117*4882a593Smuzhiyun #undef isGraphicsFlushRead
118*4882a593Smuzhiyun #undef BITSSET
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #if 0
122*4882a593Smuzhiyun /* This is the treewalk to find a vma which is the highest that has
123*4882a593Smuzhiyun * a start < addr. We're using find_vma_prev instead right now, but
124*4882a593Smuzhiyun * we might want to use this at some point in the future. Probably
125*4882a593Smuzhiyun * not, but I want it committed to CVS so I don't lose it :-)
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun while (tree != vm_avl_empty) {
128*4882a593Smuzhiyun if (tree->vm_start > addr) {
129*4882a593Smuzhiyun tree = tree->vm_avl_left;
130*4882a593Smuzhiyun } else {
131*4882a593Smuzhiyun prev = tree;
132*4882a593Smuzhiyun if (prev->vm_next == NULL)
133*4882a593Smuzhiyun break;
134*4882a593Smuzhiyun if (prev->vm_next->vm_start > addr)
135*4882a593Smuzhiyun break;
136*4882a593Smuzhiyun tree = tree->vm_avl_right;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun
fixup_exception(struct pt_regs * regs)141*4882a593Smuzhiyun int fixup_exception(struct pt_regs *regs)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun const struct exception_table_entry *fix;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun fix = search_exception_tables(regs->iaoq[0]);
146*4882a593Smuzhiyun if (fix) {
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * Fix up get_user() and put_user().
149*4882a593Smuzhiyun * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
150*4882a593Smuzhiyun * bit in the relative address of the fixup routine to indicate
151*4882a593Smuzhiyun * that %r8 should be loaded with -EFAULT to report a userspace
152*4882a593Smuzhiyun * access error.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun if (fix->fixup & 1) {
155*4882a593Smuzhiyun regs->gr[8] = -EFAULT;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* zero target register for get_user() */
158*4882a593Smuzhiyun if (parisc_acctyp(0, regs->iir) == VM_READ) {
159*4882a593Smuzhiyun int treg = regs->iir & 0x1f;
160*4882a593Smuzhiyun BUG_ON(treg == 0);
161*4882a593Smuzhiyun regs->gr[treg] = 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
166*4882a593Smuzhiyun regs->iaoq[0] &= ~3;
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * NOTE: In some cases the faulting instruction
169*4882a593Smuzhiyun * may be in the delay slot of a branch. We
170*4882a593Smuzhiyun * don't want to take the branch, so we don't
171*4882a593Smuzhiyun * increment iaoq[1], instead we set it to be
172*4882a593Smuzhiyun * iaoq[0]+4, and clear the B bit in the PSW
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun regs->iaoq[1] = regs->iaoq[0] + 4;
175*4882a593Smuzhiyun regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return 1;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun return 0;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * parisc hardware trap list
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * Documented in section 3 "Addressing and Access Control" of the
187*4882a593Smuzhiyun * "PA-RISC 1.1 Architecture and Instruction Set Reference Manual"
188*4882a593Smuzhiyun * https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf
189*4882a593Smuzhiyun *
190*4882a593Smuzhiyun * For implementation see handle_interruption() in traps.c
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun static const char * const trap_description[] = {
193*4882a593Smuzhiyun [1] "High-priority machine check (HPMC)",
194*4882a593Smuzhiyun [2] "Power failure interrupt",
195*4882a593Smuzhiyun [3] "Recovery counter trap",
196*4882a593Smuzhiyun [5] "Low-priority machine check",
197*4882a593Smuzhiyun [6] "Instruction TLB miss fault",
198*4882a593Smuzhiyun [7] "Instruction access rights / protection trap",
199*4882a593Smuzhiyun [8] "Illegal instruction trap",
200*4882a593Smuzhiyun [9] "Break instruction trap",
201*4882a593Smuzhiyun [10] "Privileged operation trap",
202*4882a593Smuzhiyun [11] "Privileged register trap",
203*4882a593Smuzhiyun [12] "Overflow trap",
204*4882a593Smuzhiyun [13] "Conditional trap",
205*4882a593Smuzhiyun [14] "FP Assist Exception trap",
206*4882a593Smuzhiyun [15] "Data TLB miss fault",
207*4882a593Smuzhiyun [16] "Non-access ITLB miss fault",
208*4882a593Smuzhiyun [17] "Non-access DTLB miss fault",
209*4882a593Smuzhiyun [18] "Data memory protection/unaligned access trap",
210*4882a593Smuzhiyun [19] "Data memory break trap",
211*4882a593Smuzhiyun [20] "TLB dirty bit trap",
212*4882a593Smuzhiyun [21] "Page reference trap",
213*4882a593Smuzhiyun [22] "Assist emulation trap",
214*4882a593Smuzhiyun [25] "Taken branch trap",
215*4882a593Smuzhiyun [26] "Data memory access rights trap",
216*4882a593Smuzhiyun [27] "Data memory protection ID trap",
217*4882a593Smuzhiyun [28] "Unaligned data reference trap",
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun
trap_name(unsigned long code)220*4882a593Smuzhiyun const char *trap_name(unsigned long code)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun const char *t = NULL;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (code < ARRAY_SIZE(trap_description))
225*4882a593Smuzhiyun t = trap_description[code];
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun return t ? t : "Unknown trap";
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * Print out info about fatal segfaults, if the show_unhandled_signals
232*4882a593Smuzhiyun * sysctl is set:
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun static inline void
show_signal_msg(struct pt_regs * regs,unsigned long code,unsigned long address,struct task_struct * tsk,struct vm_area_struct * vma)235*4882a593Smuzhiyun show_signal_msg(struct pt_regs *regs, unsigned long code,
236*4882a593Smuzhiyun unsigned long address, struct task_struct *tsk,
237*4882a593Smuzhiyun struct vm_area_struct *vma)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun if (!unhandled_signal(tsk, SIGSEGV))
240*4882a593Smuzhiyun return;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (!printk_ratelimit())
243*4882a593Smuzhiyun return;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun pr_warn("\n");
246*4882a593Smuzhiyun pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
247*4882a593Smuzhiyun tsk->comm, code, address);
248*4882a593Smuzhiyun print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
251*4882a593Smuzhiyun vma ? ',':'\n');
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (vma)
254*4882a593Smuzhiyun pr_cont(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
255*4882a593Smuzhiyun vma->vm_start, vma->vm_end);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun show_regs(regs);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
do_page_fault(struct pt_regs * regs,unsigned long code,unsigned long address)260*4882a593Smuzhiyun void do_page_fault(struct pt_regs *regs, unsigned long code,
261*4882a593Smuzhiyun unsigned long address)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct vm_area_struct *vma, *prev_vma;
264*4882a593Smuzhiyun struct task_struct *tsk;
265*4882a593Smuzhiyun struct mm_struct *mm;
266*4882a593Smuzhiyun unsigned long acc_type;
267*4882a593Smuzhiyun vm_fault_t fault = 0;
268*4882a593Smuzhiyun unsigned int flags;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (faulthandler_disabled())
271*4882a593Smuzhiyun goto no_context;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun tsk = current;
274*4882a593Smuzhiyun mm = tsk->mm;
275*4882a593Smuzhiyun if (!mm)
276*4882a593Smuzhiyun goto no_context;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun flags = FAULT_FLAG_DEFAULT;
279*4882a593Smuzhiyun if (user_mode(regs))
280*4882a593Smuzhiyun flags |= FAULT_FLAG_USER;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun acc_type = parisc_acctyp(code, regs->iir);
283*4882a593Smuzhiyun if (acc_type & VM_WRITE)
284*4882a593Smuzhiyun flags |= FAULT_FLAG_WRITE;
285*4882a593Smuzhiyun perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
286*4882a593Smuzhiyun retry:
287*4882a593Smuzhiyun mmap_read_lock(mm);
288*4882a593Smuzhiyun vma = find_vma_prev(mm, address, &prev_vma);
289*4882a593Smuzhiyun if (!vma || address < vma->vm_start)
290*4882a593Smuzhiyun goto check_expansion;
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Ok, we have a good vm_area for this memory access. We still need to
293*4882a593Smuzhiyun * check the access permissions.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun good_area:
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if ((vma->vm_flags & acc_type) != acc_type)
299*4882a593Smuzhiyun goto bad_area;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * If for any reason at all we couldn't handle the fault, make
303*4882a593Smuzhiyun * sure we exit gracefully rather than endlessly redo the
304*4882a593Smuzhiyun * fault.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun fault = handle_mm_fault(vma, address, flags, regs);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (fault_signal_pending(fault, regs))
310*4882a593Smuzhiyun return;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (unlikely(fault & VM_FAULT_ERROR)) {
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun * We hit a shared mapping outside of the file, or some
315*4882a593Smuzhiyun * other thing happened to us that made us unable to
316*4882a593Smuzhiyun * handle the page fault gracefully.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun if (fault & VM_FAULT_OOM)
319*4882a593Smuzhiyun goto out_of_memory;
320*4882a593Smuzhiyun else if (fault & VM_FAULT_SIGSEGV)
321*4882a593Smuzhiyun goto bad_area;
322*4882a593Smuzhiyun else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
323*4882a593Smuzhiyun VM_FAULT_HWPOISON_LARGE))
324*4882a593Smuzhiyun goto bad_area;
325*4882a593Smuzhiyun BUG();
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun if (flags & FAULT_FLAG_ALLOW_RETRY) {
328*4882a593Smuzhiyun if (fault & VM_FAULT_RETRY) {
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * No need to mmap_read_unlock(mm) as we would
331*4882a593Smuzhiyun * have already released it in __lock_page_or_retry
332*4882a593Smuzhiyun * in mm/filemap.c.
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun flags |= FAULT_FLAG_TRIED;
335*4882a593Smuzhiyun goto retry;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun mmap_read_unlock(mm);
339*4882a593Smuzhiyun return;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun check_expansion:
342*4882a593Smuzhiyun vma = prev_vma;
343*4882a593Smuzhiyun if (vma && (expand_stack(vma, address) == 0))
344*4882a593Smuzhiyun goto good_area;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /*
347*4882a593Smuzhiyun * Something tried to access memory that isn't in our memory map..
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun bad_area:
350*4882a593Smuzhiyun mmap_read_unlock(mm);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (user_mode(regs)) {
353*4882a593Smuzhiyun int signo, si_code;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun switch (code) {
356*4882a593Smuzhiyun case 15: /* Data TLB miss fault/Data page fault */
357*4882a593Smuzhiyun /* send SIGSEGV when outside of vma */
358*4882a593Smuzhiyun if (!vma ||
359*4882a593Smuzhiyun address < vma->vm_start || address >= vma->vm_end) {
360*4882a593Smuzhiyun signo = SIGSEGV;
361*4882a593Smuzhiyun si_code = SEGV_MAPERR;
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* send SIGSEGV for wrong permissions */
366*4882a593Smuzhiyun if ((vma->vm_flags & acc_type) != acc_type) {
367*4882a593Smuzhiyun signo = SIGSEGV;
368*4882a593Smuzhiyun si_code = SEGV_ACCERR;
369*4882a593Smuzhiyun break;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* probably address is outside of mapped file */
373*4882a593Smuzhiyun fallthrough;
374*4882a593Smuzhiyun case 17: /* NA data TLB miss / page fault */
375*4882a593Smuzhiyun case 18: /* Unaligned access - PCXS only */
376*4882a593Smuzhiyun signo = SIGBUS;
377*4882a593Smuzhiyun si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun case 16: /* Non-access instruction TLB miss fault */
380*4882a593Smuzhiyun case 26: /* PCXL: Data memory access rights trap */
381*4882a593Smuzhiyun default:
382*4882a593Smuzhiyun signo = SIGSEGV;
383*4882a593Smuzhiyun si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
384*4882a593Smuzhiyun break;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
387*4882a593Smuzhiyun if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
388*4882a593Smuzhiyun unsigned int lsb = 0;
389*4882a593Smuzhiyun printk(KERN_ERR
390*4882a593Smuzhiyun "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
391*4882a593Smuzhiyun tsk->comm, tsk->pid, address);
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Either small page or large page may be poisoned.
394*4882a593Smuzhiyun * In other words, VM_FAULT_HWPOISON_LARGE and
395*4882a593Smuzhiyun * VM_FAULT_HWPOISON are mutually exclusive.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun if (fault & VM_FAULT_HWPOISON_LARGE)
398*4882a593Smuzhiyun lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
399*4882a593Smuzhiyun else if (fault & VM_FAULT_HWPOISON)
400*4882a593Smuzhiyun lsb = PAGE_SHIFT;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun force_sig_mceerr(BUS_MCEERR_AR, (void __user *) address,
403*4882a593Smuzhiyun lsb);
404*4882a593Smuzhiyun return;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun show_signal_msg(regs, code, address, tsk, vma);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun force_sig_fault(signo, si_code, (void __user *) address);
410*4882a593Smuzhiyun return;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun no_context:
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (!user_mode(regs) && fixup_exception(regs)) {
416*4882a593Smuzhiyun return;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun out_of_memory:
422*4882a593Smuzhiyun mmap_read_unlock(mm);
423*4882a593Smuzhiyun if (!user_mode(regs))
424*4882a593Smuzhiyun goto no_context;
425*4882a593Smuzhiyun pagefault_out_of_memory();
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Handle non-access data TLB miss faults.
429*4882a593Smuzhiyun *
430*4882a593Smuzhiyun * For probe instructions, accesses to userspace are considered allowed
431*4882a593Smuzhiyun * if they lie in a valid VMA and the access type matches. We are not
432*4882a593Smuzhiyun * allowed to handle MM faults here so there may be situations where an
433*4882a593Smuzhiyun * actual access would fail even though a probe was successful.
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun int
handle_nadtlb_fault(struct pt_regs * regs)436*4882a593Smuzhiyun handle_nadtlb_fault(struct pt_regs *regs)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun unsigned long insn = regs->iir;
439*4882a593Smuzhiyun int breg, treg, xreg, val = 0;
440*4882a593Smuzhiyun struct vm_area_struct *vma, *prev_vma;
441*4882a593Smuzhiyun struct task_struct *tsk;
442*4882a593Smuzhiyun struct mm_struct *mm;
443*4882a593Smuzhiyun unsigned long address;
444*4882a593Smuzhiyun unsigned long acc_type;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun switch (insn & 0x380) {
447*4882a593Smuzhiyun case 0x280:
448*4882a593Smuzhiyun /* FDC instruction */
449*4882a593Smuzhiyun fallthrough;
450*4882a593Smuzhiyun case 0x380:
451*4882a593Smuzhiyun /* PDC and FIC instructions */
452*4882a593Smuzhiyun if (printk_ratelimit()) {
453*4882a593Smuzhiyun pr_warn("BUG: nullifying cache flush/purge instruction\n");
454*4882a593Smuzhiyun show_regs(regs);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun if (insn & 0x20) {
457*4882a593Smuzhiyun /* Base modification */
458*4882a593Smuzhiyun breg = (insn >> 21) & 0x1f;
459*4882a593Smuzhiyun xreg = (insn >> 16) & 0x1f;
460*4882a593Smuzhiyun if (breg && xreg)
461*4882a593Smuzhiyun regs->gr[breg] += regs->gr[xreg];
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun regs->gr[0] |= PSW_N;
464*4882a593Smuzhiyun return 1;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun case 0x180:
467*4882a593Smuzhiyun /* PROBE instruction */
468*4882a593Smuzhiyun treg = insn & 0x1f;
469*4882a593Smuzhiyun if (regs->isr) {
470*4882a593Smuzhiyun tsk = current;
471*4882a593Smuzhiyun mm = tsk->mm;
472*4882a593Smuzhiyun if (mm) {
473*4882a593Smuzhiyun /* Search for VMA */
474*4882a593Smuzhiyun address = regs->ior;
475*4882a593Smuzhiyun mmap_read_lock(mm);
476*4882a593Smuzhiyun vma = find_vma_prev(mm, address, &prev_vma);
477*4882a593Smuzhiyun mmap_read_unlock(mm);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /*
480*4882a593Smuzhiyun * Check if access to the VMA is okay.
481*4882a593Smuzhiyun * We don't allow for stack expansion.
482*4882a593Smuzhiyun */
483*4882a593Smuzhiyun acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
484*4882a593Smuzhiyun if (vma
485*4882a593Smuzhiyun && address >= vma->vm_start
486*4882a593Smuzhiyun && (vma->vm_flags & acc_type) == acc_type)
487*4882a593Smuzhiyun val = 1;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun if (treg)
491*4882a593Smuzhiyun regs->gr[treg] = val;
492*4882a593Smuzhiyun regs->gr[0] |= PSW_N;
493*4882a593Smuzhiyun return 1;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun case 0x300:
496*4882a593Smuzhiyun /* LPA instruction */
497*4882a593Smuzhiyun if (insn & 0x20) {
498*4882a593Smuzhiyun /* Base modification */
499*4882a593Smuzhiyun breg = (insn >> 21) & 0x1f;
500*4882a593Smuzhiyun xreg = (insn >> 16) & 0x1f;
501*4882a593Smuzhiyun if (breg && xreg)
502*4882a593Smuzhiyun regs->gr[breg] += regs->gr[xreg];
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun treg = insn & 0x1f;
505*4882a593Smuzhiyun if (treg)
506*4882a593Smuzhiyun regs->gr[treg] = 0;
507*4882a593Smuzhiyun regs->gr[0] |= PSW_N;
508*4882a593Smuzhiyun return 1;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun default:
511*4882a593Smuzhiyun break;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun }
516