xref: /OK3568_Linux_fs/kernel/arch/ia64/kernel/traps.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Architecture-specific trap handling.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1998-2003 Hewlett-Packard Co
6*4882a593Smuzhiyun  *	David Mosberger-Tang <davidm@hpl.hp.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/sched/signal.h>
14*4882a593Smuzhiyun #include <linux/sched/debug.h>
15*4882a593Smuzhiyun #include <linux/tty.h>
16*4882a593Smuzhiyun #include <linux/vt_kern.h>		/* For unblank_screen() */
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun #include <linux/extable.h>
19*4882a593Smuzhiyun #include <linux/hardirq.h>
20*4882a593Smuzhiyun #include <linux/kprobes.h>
21*4882a593Smuzhiyun #include <linux/delay.h>		/* for ssleep() */
22*4882a593Smuzhiyun #include <linux/kdebug.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <asm/fpswa.h>
26*4882a593Smuzhiyun #include <asm/intrinsics.h>
27*4882a593Smuzhiyun #include <asm/processor.h>
28*4882a593Smuzhiyun #include <asm/exception.h>
29*4882a593Smuzhiyun #include <asm/setup.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun fpswa_interface_t *fpswa_interface;
32*4882a593Smuzhiyun EXPORT_SYMBOL(fpswa_interface);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun void __init
trap_init(void)35*4882a593Smuzhiyun trap_init (void)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if (ia64_boot_param->fpswa)
38*4882a593Smuzhiyun 		/* FPSWA fixup: make the interface pointer a kernel virtual address: */
39*4882a593Smuzhiyun 		fpswa_interface = __va(ia64_boot_param->fpswa);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun int
die(const char * str,struct pt_regs * regs,long err)43*4882a593Smuzhiyun die (const char *str, struct pt_regs *regs, long err)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	static struct {
46*4882a593Smuzhiyun 		spinlock_t lock;
47*4882a593Smuzhiyun 		u32 lock_owner;
48*4882a593Smuzhiyun 		int lock_owner_depth;
49*4882a593Smuzhiyun 	} die = {
50*4882a593Smuzhiyun 		.lock =	__SPIN_LOCK_UNLOCKED(die.lock),
51*4882a593Smuzhiyun 		.lock_owner = -1,
52*4882a593Smuzhiyun 		.lock_owner_depth = 0
53*4882a593Smuzhiyun 	};
54*4882a593Smuzhiyun 	static int die_counter;
55*4882a593Smuzhiyun 	int cpu = get_cpu();
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (die.lock_owner != cpu) {
58*4882a593Smuzhiyun 		console_verbose();
59*4882a593Smuzhiyun 		spin_lock_irq(&die.lock);
60*4882a593Smuzhiyun 		die.lock_owner = cpu;
61*4882a593Smuzhiyun 		die.lock_owner_depth = 0;
62*4882a593Smuzhiyun 		bust_spinlocks(1);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 	put_cpu();
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (++die.lock_owner_depth < 3) {
67*4882a593Smuzhiyun 		printk("%s[%d]: %s %ld [%d]\n",
68*4882a593Smuzhiyun 		current->comm, task_pid_nr(current), str, err, ++die_counter);
69*4882a593Smuzhiyun 		if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
70*4882a593Smuzhiyun 	            != NOTIFY_STOP)
71*4882a593Smuzhiyun 			show_regs(regs);
72*4882a593Smuzhiyun 		else
73*4882a593Smuzhiyun 			regs = NULL;
74*4882a593Smuzhiyun   	} else
75*4882a593Smuzhiyun 		printk(KERN_ERR "Recursive die() failure, output suppressed\n");
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	bust_spinlocks(0);
78*4882a593Smuzhiyun 	die.lock_owner = -1;
79*4882a593Smuzhiyun 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
80*4882a593Smuzhiyun 	spin_unlock_irq(&die.lock);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (!regs)
83*4882a593Smuzhiyun 		return 1;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (panic_on_oops)
86*4882a593Smuzhiyun 		panic("Fatal exception");
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun   	do_exit(SIGSEGV);
89*4882a593Smuzhiyun 	return 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun int
die_if_kernel(char * str,struct pt_regs * regs,long err)93*4882a593Smuzhiyun die_if_kernel (char *str, struct pt_regs *regs, long err)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	if (!user_mode(regs))
96*4882a593Smuzhiyun 		return die(str, regs, err);
97*4882a593Smuzhiyun 	return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun void
ia64_bad_break(unsigned long break_num,struct pt_regs * regs)101*4882a593Smuzhiyun __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	int sig, code;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	switch (break_num) {
106*4882a593Smuzhiyun 	      case 0: /* unknown error (used by GCC for __builtin_abort()) */
107*4882a593Smuzhiyun 		if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
108*4882a593Smuzhiyun 			       	== NOTIFY_STOP)
109*4882a593Smuzhiyun 			return;
110*4882a593Smuzhiyun 		if (die_if_kernel("bugcheck!", regs, break_num))
111*4882a593Smuzhiyun 			return;
112*4882a593Smuzhiyun 		sig = SIGILL; code = ILL_ILLOPC;
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	      case 1: /* integer divide by zero */
116*4882a593Smuzhiyun 		sig = SIGFPE; code = FPE_INTDIV;
117*4882a593Smuzhiyun 		break;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	      case 2: /* integer overflow */
120*4882a593Smuzhiyun 		sig = SIGFPE; code = FPE_INTOVF;
121*4882a593Smuzhiyun 		break;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	      case 3: /* range check/bounds check */
124*4882a593Smuzhiyun 		sig = SIGFPE; code = FPE_FLTSUB;
125*4882a593Smuzhiyun 		break;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	      case 4: /* null pointer dereference */
128*4882a593Smuzhiyun 		sig = SIGSEGV; code = SEGV_MAPERR;
129*4882a593Smuzhiyun 		break;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	      case 5: /* misaligned data */
132*4882a593Smuzhiyun 		sig = SIGSEGV; code = BUS_ADRALN;
133*4882a593Smuzhiyun 		break;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	      case 6: /* decimal overflow */
136*4882a593Smuzhiyun 		sig = SIGFPE; code = __FPE_DECOVF;
137*4882a593Smuzhiyun 		break;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	      case 7: /* decimal divide by zero */
140*4882a593Smuzhiyun 		sig = SIGFPE; code = __FPE_DECDIV;
141*4882a593Smuzhiyun 		break;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	      case 8: /* packed decimal error */
144*4882a593Smuzhiyun 		sig = SIGFPE; code = __FPE_DECERR;
145*4882a593Smuzhiyun 		break;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	      case 9: /* invalid ASCII digit */
148*4882a593Smuzhiyun 		sig = SIGFPE; code = __FPE_INVASC;
149*4882a593Smuzhiyun 		break;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	      case 10: /* invalid decimal digit */
152*4882a593Smuzhiyun 		sig = SIGFPE; code = __FPE_INVDEC;
153*4882a593Smuzhiyun 		break;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	      case 11: /* paragraph stack overflow */
156*4882a593Smuzhiyun 		sig = SIGSEGV; code = __SEGV_PSTKOVF;
157*4882a593Smuzhiyun 		break;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	      case 0x3f000 ... 0x3ffff:	/* bundle-update in progress */
160*4882a593Smuzhiyun 		sig = SIGILL; code = __ILL_BNDMOD;
161*4882a593Smuzhiyun 		break;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	      default:
164*4882a593Smuzhiyun 		if ((break_num < 0x40000 || break_num > 0x100000)
165*4882a593Smuzhiyun 		    && die_if_kernel("Bad break", regs, break_num))
166*4882a593Smuzhiyun 			return;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		if (break_num < 0x80000) {
169*4882a593Smuzhiyun 			sig = SIGILL; code = __ILL_BREAK;
170*4882a593Smuzhiyun 		} else {
171*4882a593Smuzhiyun 			if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
172*4882a593Smuzhiyun 					== NOTIFY_STOP)
173*4882a593Smuzhiyun 				return;
174*4882a593Smuzhiyun 			sig = SIGTRAP; code = TRAP_BRKPT;
175*4882a593Smuzhiyun 		}
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 	force_sig_fault(sig, code,
178*4882a593Smuzhiyun 			(void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
179*4882a593Smuzhiyun 			break_num, 0 /* clear __ISR_VALID */, 0);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun  * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
184*4882a593Smuzhiyun  * and it doesn't own the fp-high register partition.  When this happens, we save the
185*4882a593Smuzhiyun  * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
186*4882a593Smuzhiyun  * the fp-high partition of the current task (if necessary).  Note that the kernel has
187*4882a593Smuzhiyun  * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
188*4882a593Smuzhiyun  * care of clearing psr.dfh.
189*4882a593Smuzhiyun  */
190*4882a593Smuzhiyun static inline void
disabled_fph_fault(struct pt_regs * regs)191*4882a593Smuzhiyun disabled_fph_fault (struct pt_regs *regs)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct ia64_psr *psr = ia64_psr(regs);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* first, grant user-level access to fph partition: */
196*4882a593Smuzhiyun 	psr->dfh = 0;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/*
199*4882a593Smuzhiyun 	 * Make sure that no other task gets in on this processor
200*4882a593Smuzhiyun 	 * while we're claiming the FPU
201*4882a593Smuzhiyun 	 */
202*4882a593Smuzhiyun 	preempt_disable();
203*4882a593Smuzhiyun #ifndef CONFIG_SMP
204*4882a593Smuzhiyun 	{
205*4882a593Smuzhiyun 		struct task_struct *fpu_owner
206*4882a593Smuzhiyun 			= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		if (ia64_is_local_fpu_owner(current)) {
209*4882a593Smuzhiyun 			preempt_enable_no_resched();
210*4882a593Smuzhiyun 			return;
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		if (fpu_owner)
214*4882a593Smuzhiyun 			ia64_flush_fph(fpu_owner);
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun #endif /* !CONFIG_SMP */
217*4882a593Smuzhiyun 	ia64_set_local_fpu_owner(current);
218*4882a593Smuzhiyun 	if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
219*4882a593Smuzhiyun 		__ia64_load_fpu(current->thread.fph);
220*4882a593Smuzhiyun 		psr->mfh = 0;
221*4882a593Smuzhiyun 	} else {
222*4882a593Smuzhiyun 		__ia64_init_fpu();
223*4882a593Smuzhiyun 		/*
224*4882a593Smuzhiyun 		 * Set mfh because the state in thread.fph does not match the state in
225*4882a593Smuzhiyun 		 * the fph partition.
226*4882a593Smuzhiyun 		 */
227*4882a593Smuzhiyun 		psr->mfh = 1;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	preempt_enable_no_resched();
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun static inline int
fp_emulate(int fp_fault,void * bundle,long * ipsr,long * fpsr,long * isr,long * pr,long * ifs,struct pt_regs * regs)233*4882a593Smuzhiyun fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
234*4882a593Smuzhiyun 	    struct pt_regs *regs)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	fp_state_t fp_state;
237*4882a593Smuzhiyun 	fpswa_ret_t ret;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (!fpswa_interface)
240*4882a593Smuzhiyun 		return -1;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	memset(&fp_state, 0, sizeof(fp_state_t));
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/*
245*4882a593Smuzhiyun 	 * compute fp_state.  only FP registers f6 - f11 are used by the
246*4882a593Smuzhiyun 	 * kernel, so set those bits in the mask and set the low volatile
247*4882a593Smuzhiyun 	 * pointer to point to these registers.
248*4882a593Smuzhiyun 	 */
249*4882a593Smuzhiyun 	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
252*4882a593Smuzhiyun 	/*
253*4882a593Smuzhiyun 	 * unsigned long (*EFI_FPSWA) (
254*4882a593Smuzhiyun 	 *      unsigned long    trap_type,
255*4882a593Smuzhiyun 	 *	void             *Bundle,
256*4882a593Smuzhiyun 	 *	unsigned long    *pipsr,
257*4882a593Smuzhiyun 	 *	unsigned long    *pfsr,
258*4882a593Smuzhiyun 	 *	unsigned long    *pisr,
259*4882a593Smuzhiyun 	 *	unsigned long    *ppreds,
260*4882a593Smuzhiyun 	 *	unsigned long    *pifs,
261*4882a593Smuzhiyun 	 *	void             *fp_state);
262*4882a593Smuzhiyun 	 */
263*4882a593Smuzhiyun 	ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
264*4882a593Smuzhiyun 					(unsigned long *) ipsr, (unsigned long *) fpsr,
265*4882a593Smuzhiyun 					(unsigned long *) isr, (unsigned long *) pr,
266*4882a593Smuzhiyun 					(unsigned long *) ifs, &fp_state);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return ret.status;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun struct fpu_swa_msg {
272*4882a593Smuzhiyun 	unsigned long count;
273*4882a593Smuzhiyun 	unsigned long time;
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
276*4882a593Smuzhiyun DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
277*4882a593Smuzhiyun static struct fpu_swa_msg last __cacheline_aligned;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun  * Handle floating-point assist faults and traps.
282*4882a593Smuzhiyun  */
283*4882a593Smuzhiyun static int
handle_fpu_swa(int fp_fault,struct pt_regs * regs,unsigned long isr)284*4882a593Smuzhiyun handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	long exception, bundle[2];
287*4882a593Smuzhiyun 	unsigned long fault_ip;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	fault_ip = regs->cr_iip;
290*4882a593Smuzhiyun 	if (!fp_fault && (ia64_psr(regs)->ri == 0))
291*4882a593Smuzhiyun 		fault_ip -= 16;
292*4882a593Smuzhiyun 	if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
293*4882a593Smuzhiyun 		return -1;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT))  {
296*4882a593Smuzhiyun 		unsigned long count, current_jiffies = jiffies;
297*4882a593Smuzhiyun 		struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		if (unlikely(current_jiffies > cp->time))
300*4882a593Smuzhiyun 			cp->count = 0;
301*4882a593Smuzhiyun 		if (unlikely(cp->count < 5)) {
302*4882a593Smuzhiyun 			cp->count++;
303*4882a593Smuzhiyun 			cp->time = current_jiffies + 5 * HZ;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 			/* minimize races by grabbing a copy of count BEFORE checking last.time. */
306*4882a593Smuzhiyun 			count = last.count;
307*4882a593Smuzhiyun 			barrier();
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 			/*
310*4882a593Smuzhiyun 			 * Lower 4 bits are used as a count. Upper bits are a sequence
311*4882a593Smuzhiyun 			 * number that is updated when count is reset. The cmpxchg will
312*4882a593Smuzhiyun 			 * fail is seqno has changed. This minimizes mutiple cpus
313*4882a593Smuzhiyun 			 * resetting the count.
314*4882a593Smuzhiyun 			 */
315*4882a593Smuzhiyun 			if (current_jiffies > last.time)
316*4882a593Smuzhiyun 				(void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 			/* used fetchadd to atomically update the count */
319*4882a593Smuzhiyun 			if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
320*4882a593Smuzhiyun 				last.time = current_jiffies + 5 * HZ;
321*4882a593Smuzhiyun 				printk(KERN_WARNING
322*4882a593Smuzhiyun 		       			"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
323*4882a593Smuzhiyun 		       			current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
324*4882a593Smuzhiyun 			}
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
329*4882a593Smuzhiyun 			       &regs->cr_ifs, regs);
330*4882a593Smuzhiyun 	if (fp_fault) {
331*4882a593Smuzhiyun 		if (exception == 0) {
332*4882a593Smuzhiyun 			/* emulation was successful */
333*4882a593Smuzhiyun 			ia64_increment_ip(regs);
334*4882a593Smuzhiyun 		} else if (exception == -1) {
335*4882a593Smuzhiyun 			printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
336*4882a593Smuzhiyun 			return -1;
337*4882a593Smuzhiyun 		} else {
338*4882a593Smuzhiyun 			/* is next instruction a trap? */
339*4882a593Smuzhiyun 			int si_code;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 			if (exception & 2) {
342*4882a593Smuzhiyun 				ia64_increment_ip(regs);
343*4882a593Smuzhiyun 			}
344*4882a593Smuzhiyun 			si_code = FPE_FLTUNK;	/* default code */
345*4882a593Smuzhiyun 			if (isr & 0x11) {
346*4882a593Smuzhiyun 				si_code = FPE_FLTINV;
347*4882a593Smuzhiyun 			} else if (isr & 0x22) {
348*4882a593Smuzhiyun 				/* denormal operand gets the same si_code as underflow
349*4882a593Smuzhiyun 				* see arch/i386/kernel/traps.c:math_error()  */
350*4882a593Smuzhiyun 				si_code = FPE_FLTUND;
351*4882a593Smuzhiyun 			} else if (isr & 0x44) {
352*4882a593Smuzhiyun 				si_code = FPE_FLTDIV;
353*4882a593Smuzhiyun 			}
354*4882a593Smuzhiyun 			force_sig_fault(SIGFPE, si_code,
355*4882a593Smuzhiyun 					(void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
356*4882a593Smuzhiyun 					0, __ISR_VALID, isr);
357*4882a593Smuzhiyun 		}
358*4882a593Smuzhiyun 	} else {
359*4882a593Smuzhiyun 		if (exception == -1) {
360*4882a593Smuzhiyun 			printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
361*4882a593Smuzhiyun 			return -1;
362*4882a593Smuzhiyun 		} else if (exception != 0) {
363*4882a593Smuzhiyun 			/* raise exception */
364*4882a593Smuzhiyun 			int si_code;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 			si_code = FPE_FLTUNK;	/* default code */
367*4882a593Smuzhiyun 			if (isr & 0x880) {
368*4882a593Smuzhiyun 				si_code = FPE_FLTOVF;
369*4882a593Smuzhiyun 			} else if (isr & 0x1100) {
370*4882a593Smuzhiyun 				si_code = FPE_FLTUND;
371*4882a593Smuzhiyun 			} else if (isr & 0x2200) {
372*4882a593Smuzhiyun 				si_code = FPE_FLTRES;
373*4882a593Smuzhiyun 			}
374*4882a593Smuzhiyun 			force_sig_fault(SIGFPE, si_code,
375*4882a593Smuzhiyun 					(void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
376*4882a593Smuzhiyun 					0, __ISR_VALID, isr);
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 	return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun struct illegal_op_return {
383*4882a593Smuzhiyun 	unsigned long fkt, arg1, arg2, arg3;
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun struct illegal_op_return
ia64_illegal_op_fault(unsigned long ec,long arg1,long arg2,long arg3,long arg4,long arg5,long arg6,long arg7,struct pt_regs regs)387*4882a593Smuzhiyun ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
388*4882a593Smuzhiyun 		       long arg4, long arg5, long arg6, long arg7,
389*4882a593Smuzhiyun 		       struct pt_regs regs)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	struct illegal_op_return rv;
392*4882a593Smuzhiyun 	char buf[128];
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun #ifdef CONFIG_IA64_BRL_EMU
395*4882a593Smuzhiyun 	{
396*4882a593Smuzhiyun 		extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		rv = ia64_emulate_brl(&regs, ec);
399*4882a593Smuzhiyun 		if (rv.fkt != (unsigned long) -1)
400*4882a593Smuzhiyun 			return rv;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun #endif
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	sprintf(buf, "IA-64 Illegal operation fault");
405*4882a593Smuzhiyun 	rv.fkt = 0;
406*4882a593Smuzhiyun 	if (die_if_kernel(buf, &regs, 0))
407*4882a593Smuzhiyun 		return rv;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	force_sig_fault(SIGILL, ILL_ILLOPC,
410*4882a593Smuzhiyun 			(void __user *) (regs.cr_iip + ia64_psr(&regs)->ri),
411*4882a593Smuzhiyun 			0, 0, 0);
412*4882a593Smuzhiyun 	return rv;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun void __kprobes
ia64_fault(unsigned long vector,unsigned long isr,unsigned long ifa,unsigned long iim,unsigned long itir,long arg5,long arg6,long arg7,struct pt_regs regs)416*4882a593Smuzhiyun ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
417*4882a593Smuzhiyun 	    unsigned long iim, unsigned long itir, long arg5, long arg6,
418*4882a593Smuzhiyun 	    long arg7, struct pt_regs regs)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	unsigned long code, error = isr, iip;
421*4882a593Smuzhiyun 	char buf[128];
422*4882a593Smuzhiyun 	int result, sig, si_code;
423*4882a593Smuzhiyun 	static const char *reason[] = {
424*4882a593Smuzhiyun 		"IA-64 Illegal Operation fault",
425*4882a593Smuzhiyun 		"IA-64 Privileged Operation fault",
426*4882a593Smuzhiyun 		"IA-64 Privileged Register fault",
427*4882a593Smuzhiyun 		"IA-64 Reserved Register/Field fault",
428*4882a593Smuzhiyun 		"Disabled Instruction Set Transition fault",
429*4882a593Smuzhiyun 		"Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
430*4882a593Smuzhiyun 		"Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
431*4882a593Smuzhiyun 		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
432*4882a593Smuzhiyun 	};
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
435*4882a593Smuzhiyun 		/*
436*4882a593Smuzhiyun 		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
437*4882a593Smuzhiyun 		 * the lfetch.
438*4882a593Smuzhiyun 		 */
439*4882a593Smuzhiyun 		ia64_psr(&regs)->ed = 1;
440*4882a593Smuzhiyun 		return;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	iip = regs.cr_iip + ia64_psr(&regs)->ri;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	switch (vector) {
446*4882a593Smuzhiyun 	      case 24: /* General Exception */
447*4882a593Smuzhiyun 		code = (isr >> 4) & 0xf;
448*4882a593Smuzhiyun 		sprintf(buf, "General Exception: %s%s", reason[code],
449*4882a593Smuzhiyun 			(code == 3) ? ((isr & (1UL << 37))
450*4882a593Smuzhiyun 				       ? " (RSE access)" : " (data access)") : "");
451*4882a593Smuzhiyun 		if (code == 8) {
452*4882a593Smuzhiyun # ifdef CONFIG_IA64_PRINT_HAZARDS
453*4882a593Smuzhiyun 			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
454*4882a593Smuzhiyun 			       current->comm, task_pid_nr(current),
455*4882a593Smuzhiyun 			       regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
456*4882a593Smuzhiyun # endif
457*4882a593Smuzhiyun 			return;
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 		break;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	      case 25: /* Disabled FP-Register */
462*4882a593Smuzhiyun 		if (isr & 2) {
463*4882a593Smuzhiyun 			disabled_fph_fault(&regs);
464*4882a593Smuzhiyun 			return;
465*4882a593Smuzhiyun 		}
466*4882a593Smuzhiyun 		sprintf(buf, "Disabled FPL fault---not supposed to happen!");
467*4882a593Smuzhiyun 		break;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	      case 26: /* NaT Consumption */
470*4882a593Smuzhiyun 		if (user_mode(&regs)) {
471*4882a593Smuzhiyun 			void __user *addr;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 			if (((isr >> 4) & 0xf) == 2) {
474*4882a593Smuzhiyun 				/* NaT page consumption */
475*4882a593Smuzhiyun 				sig = SIGSEGV;
476*4882a593Smuzhiyun 				code = SEGV_ACCERR;
477*4882a593Smuzhiyun 				addr = (void __user *) ifa;
478*4882a593Smuzhiyun 			} else {
479*4882a593Smuzhiyun 				/* register NaT consumption */
480*4882a593Smuzhiyun 				sig = SIGILL;
481*4882a593Smuzhiyun 				code = ILL_ILLOPN;
482*4882a593Smuzhiyun 				addr = (void __user *) (regs.cr_iip
483*4882a593Smuzhiyun 							+ ia64_psr(&regs)->ri);
484*4882a593Smuzhiyun 			}
485*4882a593Smuzhiyun 			force_sig_fault(sig, code, addr,
486*4882a593Smuzhiyun 					vector, __ISR_VALID, isr);
487*4882a593Smuzhiyun 			return;
488*4882a593Smuzhiyun 		} else if (ia64_done_with_exception(&regs))
489*4882a593Smuzhiyun 			return;
490*4882a593Smuzhiyun 		sprintf(buf, "NaT consumption");
491*4882a593Smuzhiyun 		break;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	      case 31: /* Unsupported Data Reference */
494*4882a593Smuzhiyun 		if (user_mode(&regs)) {
495*4882a593Smuzhiyun 			force_sig_fault(SIGILL, ILL_ILLOPN, (void __user *) iip,
496*4882a593Smuzhiyun 					vector, __ISR_VALID, isr);
497*4882a593Smuzhiyun 			return;
498*4882a593Smuzhiyun 		}
499*4882a593Smuzhiyun 		sprintf(buf, "Unsupported data reference");
500*4882a593Smuzhiyun 		break;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	      case 29: /* Debug */
503*4882a593Smuzhiyun 	      case 35: /* Taken Branch Trap */
504*4882a593Smuzhiyun 	      case 36: /* Single Step Trap */
505*4882a593Smuzhiyun 		if (fsys_mode(current, &regs)) {
506*4882a593Smuzhiyun 			extern char __kernel_syscall_via_break[];
507*4882a593Smuzhiyun 			/*
508*4882a593Smuzhiyun 			 * Got a trap in fsys-mode: Taken Branch Trap
509*4882a593Smuzhiyun 			 * and Single Step trap need special handling;
510*4882a593Smuzhiyun 			 * Debug trap is ignored (we disable it here
511*4882a593Smuzhiyun 			 * and re-enable it in the lower-privilege trap).
512*4882a593Smuzhiyun 			 */
513*4882a593Smuzhiyun 			if (unlikely(vector == 29)) {
514*4882a593Smuzhiyun 				set_thread_flag(TIF_DB_DISABLED);
515*4882a593Smuzhiyun 				ia64_psr(&regs)->db = 0;
516*4882a593Smuzhiyun 				ia64_psr(&regs)->lp = 1;
517*4882a593Smuzhiyun 				return;
518*4882a593Smuzhiyun 			}
519*4882a593Smuzhiyun 			/* re-do the system call via break 0x100000: */
520*4882a593Smuzhiyun 			regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
521*4882a593Smuzhiyun 			ia64_psr(&regs)->ri = 0;
522*4882a593Smuzhiyun 			ia64_psr(&regs)->cpl = 3;
523*4882a593Smuzhiyun 			return;
524*4882a593Smuzhiyun 		}
525*4882a593Smuzhiyun 		switch (vector) {
526*4882a593Smuzhiyun 		      default:
527*4882a593Smuzhiyun 		      case 29:
528*4882a593Smuzhiyun 			si_code = TRAP_HWBKPT;
529*4882a593Smuzhiyun #ifdef CONFIG_ITANIUM
530*4882a593Smuzhiyun 			/*
531*4882a593Smuzhiyun 			 * Erratum 10 (IFA may contain incorrect address) now has
532*4882a593Smuzhiyun 			 * "NoFix" status.  There are no plans for fixing this.
533*4882a593Smuzhiyun 			 */
534*4882a593Smuzhiyun 			if (ia64_psr(&regs)->is == 0)
535*4882a593Smuzhiyun 			  ifa = regs.cr_iip;
536*4882a593Smuzhiyun #endif
537*4882a593Smuzhiyun 			break;
538*4882a593Smuzhiyun 		      case 35: si_code = TRAP_BRANCH; ifa = 0; break;
539*4882a593Smuzhiyun 		      case 36: si_code = TRAP_TRACE; ifa = 0; break;
540*4882a593Smuzhiyun 		}
541*4882a593Smuzhiyun 		if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, si_code, SIGTRAP)
542*4882a593Smuzhiyun 			       	== NOTIFY_STOP)
543*4882a593Smuzhiyun 			return;
544*4882a593Smuzhiyun 		force_sig_fault(SIGTRAP, si_code, (void __user *) ifa,
545*4882a593Smuzhiyun 				0, __ISR_VALID, isr);
546*4882a593Smuzhiyun 		return;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	      case 32: /* fp fault */
549*4882a593Smuzhiyun 	      case 33: /* fp trap */
550*4882a593Smuzhiyun 		result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
551*4882a593Smuzhiyun 		if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
552*4882a593Smuzhiyun 			force_sig_fault(SIGFPE, FPE_FLTINV, (void __user *) iip,
553*4882a593Smuzhiyun 					0, __ISR_VALID, isr);
554*4882a593Smuzhiyun 		}
555*4882a593Smuzhiyun 		return;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	      case 34:
558*4882a593Smuzhiyun 		if (isr & 0x2) {
559*4882a593Smuzhiyun 			/* Lower-Privilege Transfer Trap */
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 			/* If we disabled debug traps during an fsyscall,
562*4882a593Smuzhiyun 			 * re-enable them here.
563*4882a593Smuzhiyun 			 */
564*4882a593Smuzhiyun 			if (test_thread_flag(TIF_DB_DISABLED)) {
565*4882a593Smuzhiyun 				clear_thread_flag(TIF_DB_DISABLED);
566*4882a593Smuzhiyun 				ia64_psr(&regs)->db = 1;
567*4882a593Smuzhiyun 			}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 			/*
570*4882a593Smuzhiyun 			 * Just clear PSR.lp and then return immediately:
571*4882a593Smuzhiyun 			 * all the interesting work (e.g., signal delivery)
572*4882a593Smuzhiyun 			 * is done in the kernel exit path.
573*4882a593Smuzhiyun 			 */
574*4882a593Smuzhiyun 			ia64_psr(&regs)->lp = 0;
575*4882a593Smuzhiyun 			return;
576*4882a593Smuzhiyun 		} else {
577*4882a593Smuzhiyun 			/* Unimplemented Instr. Address Trap */
578*4882a593Smuzhiyun 			if (user_mode(&regs)) {
579*4882a593Smuzhiyun 				force_sig_fault(SIGILL, ILL_BADIADDR,
580*4882a593Smuzhiyun 						(void __user *) iip,
581*4882a593Smuzhiyun 						0, 0, 0);
582*4882a593Smuzhiyun 				return;
583*4882a593Smuzhiyun 			}
584*4882a593Smuzhiyun 			sprintf(buf, "Unimplemented Instruction Address fault");
585*4882a593Smuzhiyun 		}
586*4882a593Smuzhiyun 		break;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	      case 45:
589*4882a593Smuzhiyun 		printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
590*4882a593Smuzhiyun 		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
591*4882a593Smuzhiyun 		       iip, ifa, isr);
592*4882a593Smuzhiyun 		force_sig(SIGSEGV);
593*4882a593Smuzhiyun 		return;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	      case 46:
596*4882a593Smuzhiyun 		printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
597*4882a593Smuzhiyun 		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
598*4882a593Smuzhiyun 		       iip, ifa, isr, iim);
599*4882a593Smuzhiyun 		force_sig(SIGSEGV);
600*4882a593Smuzhiyun 		return;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	      case 47:
603*4882a593Smuzhiyun 		sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
604*4882a593Smuzhiyun 		break;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	      default:
607*4882a593Smuzhiyun 		sprintf(buf, "Fault %lu", vector);
608*4882a593Smuzhiyun 		break;
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 	if (!die_if_kernel(buf, &regs, error))
611*4882a593Smuzhiyun 		force_sig(SIGILL);
612*4882a593Smuzhiyun }
613