1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/x86_64/ia32/ia32_signal.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1991, 1992 Linus Torvalds
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
8*4882a593Smuzhiyun * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
9*4882a593Smuzhiyun * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/smp.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/errno.h>
18*4882a593Smuzhiyun #include <linux/wait.h>
19*4882a593Smuzhiyun #include <linux/unistd.h>
20*4882a593Smuzhiyun #include <linux/stddef.h>
21*4882a593Smuzhiyun #include <linux/personality.h>
22*4882a593Smuzhiyun #include <linux/compat.h>
23*4882a593Smuzhiyun #include <linux/binfmts.h>
24*4882a593Smuzhiyun #include <linux/syscalls.h>
25*4882a593Smuzhiyun #include <asm/ucontext.h>
26*4882a593Smuzhiyun #include <linux/uaccess.h>
27*4882a593Smuzhiyun #include <asm/fpu/internal.h>
28*4882a593Smuzhiyun #include <asm/fpu/signal.h>
29*4882a593Smuzhiyun #include <asm/ptrace.h>
30*4882a593Smuzhiyun #include <asm/ia32_unistd.h>
31*4882a593Smuzhiyun #include <asm/user32.h>
32*4882a593Smuzhiyun #include <uapi/asm/sigcontext.h>
33*4882a593Smuzhiyun #include <asm/proto.h>
34*4882a593Smuzhiyun #include <asm/vdso.h>
35*4882a593Smuzhiyun #include <asm/sigframe.h>
36*4882a593Smuzhiyun #include <asm/sighandling.h>
37*4882a593Smuzhiyun #include <asm/smap.h>
38*4882a593Smuzhiyun
reload_segments(struct sigcontext_32 * sc)39*4882a593Smuzhiyun static inline void reload_segments(struct sigcontext_32 *sc)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun unsigned int cur;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun savesegment(gs, cur);
44*4882a593Smuzhiyun if ((sc->gs | 0x03) != cur)
45*4882a593Smuzhiyun load_gs_index(sc->gs | 0x03);
46*4882a593Smuzhiyun savesegment(fs, cur);
47*4882a593Smuzhiyun if ((sc->fs | 0x03) != cur)
48*4882a593Smuzhiyun loadsegment(fs, sc->fs | 0x03);
49*4882a593Smuzhiyun savesegment(ds, cur);
50*4882a593Smuzhiyun if ((sc->ds | 0x03) != cur)
51*4882a593Smuzhiyun loadsegment(ds, sc->ds | 0x03);
52*4882a593Smuzhiyun savesegment(es, cur);
53*4882a593Smuzhiyun if ((sc->es | 0x03) != cur)
54*4882a593Smuzhiyun loadsegment(es, sc->es | 0x03);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * Do a signal return; undo the signal stack.
59*4882a593Smuzhiyun */
ia32_restore_sigcontext(struct pt_regs * regs,struct sigcontext_32 __user * usc)60*4882a593Smuzhiyun static int ia32_restore_sigcontext(struct pt_regs *regs,
61*4882a593Smuzhiyun struct sigcontext_32 __user *usc)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct sigcontext_32 sc;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Always make any pending restarted system calls return -EINTR */
66*4882a593Smuzhiyun current->restart_block.fn = do_no_restart_syscall;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (unlikely(copy_from_user(&sc, usc, sizeof(sc))))
69*4882a593Smuzhiyun return -EFAULT;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Get only the ia32 registers. */
72*4882a593Smuzhiyun regs->bx = sc.bx;
73*4882a593Smuzhiyun regs->cx = sc.cx;
74*4882a593Smuzhiyun regs->dx = sc.dx;
75*4882a593Smuzhiyun regs->si = sc.si;
76*4882a593Smuzhiyun regs->di = sc.di;
77*4882a593Smuzhiyun regs->bp = sc.bp;
78*4882a593Smuzhiyun regs->ax = sc.ax;
79*4882a593Smuzhiyun regs->sp = sc.sp;
80*4882a593Smuzhiyun regs->ip = sc.ip;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* Get CS/SS and force CPL3 */
83*4882a593Smuzhiyun regs->cs = sc.cs | 0x03;
84*4882a593Smuzhiyun regs->ss = sc.ss | 0x03;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS);
87*4882a593Smuzhiyun /* disable syscall checks */
88*4882a593Smuzhiyun regs->orig_ax = -1;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * Reload fs and gs if they have changed in the signal
92*4882a593Smuzhiyun * handler. This does not handle long fs/gs base changes in
93*4882a593Smuzhiyun * the handler, but does not clobber them at least in the
94*4882a593Smuzhiyun * normal case.
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun reload_segments(&sc);
97*4882a593Smuzhiyun return fpu__restore_sig(compat_ptr(sc.fpstate), 1);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
COMPAT_SYSCALL_DEFINE0(sigreturn)100*4882a593Smuzhiyun COMPAT_SYSCALL_DEFINE0(sigreturn)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct pt_regs *regs = current_pt_regs();
103*4882a593Smuzhiyun struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
104*4882a593Smuzhiyun sigset_t set;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (!access_ok(frame, sizeof(*frame)))
107*4882a593Smuzhiyun goto badframe;
108*4882a593Smuzhiyun if (__get_user(set.sig[0], &frame->sc.oldmask)
109*4882a593Smuzhiyun || __get_user(((__u32 *)&set)[1], &frame->extramask[0]))
110*4882a593Smuzhiyun goto badframe;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun set_current_blocked(&set);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (ia32_restore_sigcontext(regs, &frame->sc))
115*4882a593Smuzhiyun goto badframe;
116*4882a593Smuzhiyun return regs->ax;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun badframe:
119*4882a593Smuzhiyun signal_fault(regs, frame, "32bit sigreturn");
120*4882a593Smuzhiyun return 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)123*4882a593Smuzhiyun COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct pt_regs *regs = current_pt_regs();
126*4882a593Smuzhiyun struct rt_sigframe_ia32 __user *frame;
127*4882a593Smuzhiyun sigset_t set;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (!access_ok(frame, sizeof(*frame)))
132*4882a593Smuzhiyun goto badframe;
133*4882a593Smuzhiyun if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask))
134*4882a593Smuzhiyun goto badframe;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun set_current_blocked(&set);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext))
139*4882a593Smuzhiyun goto badframe;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (compat_restore_altstack(&frame->uc.uc_stack))
142*4882a593Smuzhiyun goto badframe;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return regs->ax;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun badframe:
147*4882a593Smuzhiyun signal_fault(regs, frame, "32bit rt sigreturn");
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Set up a signal frame.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; })
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun static __always_inline int
__unsafe_setup_sigcontext32(struct sigcontext_32 __user * sc,void __user * fpstate,struct pt_regs * regs,unsigned int mask)158*4882a593Smuzhiyun __unsafe_setup_sigcontext32(struct sigcontext_32 __user *sc,
159*4882a593Smuzhiyun void __user *fpstate,
160*4882a593Smuzhiyun struct pt_regs *regs, unsigned int mask)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun unsafe_put_user(get_user_seg(gs), (unsigned int __user *)&sc->gs, Efault);
163*4882a593Smuzhiyun unsafe_put_user(get_user_seg(fs), (unsigned int __user *)&sc->fs, Efault);
164*4882a593Smuzhiyun unsafe_put_user(get_user_seg(ds), (unsigned int __user *)&sc->ds, Efault);
165*4882a593Smuzhiyun unsafe_put_user(get_user_seg(es), (unsigned int __user *)&sc->es, Efault);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun unsafe_put_user(regs->di, &sc->di, Efault);
168*4882a593Smuzhiyun unsafe_put_user(regs->si, &sc->si, Efault);
169*4882a593Smuzhiyun unsafe_put_user(regs->bp, &sc->bp, Efault);
170*4882a593Smuzhiyun unsafe_put_user(regs->sp, &sc->sp, Efault);
171*4882a593Smuzhiyun unsafe_put_user(regs->bx, &sc->bx, Efault);
172*4882a593Smuzhiyun unsafe_put_user(regs->dx, &sc->dx, Efault);
173*4882a593Smuzhiyun unsafe_put_user(regs->cx, &sc->cx, Efault);
174*4882a593Smuzhiyun unsafe_put_user(regs->ax, &sc->ax, Efault);
175*4882a593Smuzhiyun unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
176*4882a593Smuzhiyun unsafe_put_user(current->thread.error_code, &sc->err, Efault);
177*4882a593Smuzhiyun unsafe_put_user(regs->ip, &sc->ip, Efault);
178*4882a593Smuzhiyun unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault);
179*4882a593Smuzhiyun unsafe_put_user(regs->flags, &sc->flags, Efault);
180*4882a593Smuzhiyun unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault);
181*4882a593Smuzhiyun unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun unsafe_put_user(ptr_to_compat(fpstate), &sc->fpstate, Efault);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* non-iBCS2 extensions.. */
186*4882a593Smuzhiyun unsafe_put_user(mask, &sc->oldmask, Efault);
187*4882a593Smuzhiyun unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun Efault:
191*4882a593Smuzhiyun return -EFAULT;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #define unsafe_put_sigcontext32(sc, fp, regs, set, label) \
195*4882a593Smuzhiyun do { \
196*4882a593Smuzhiyun if (__unsafe_setup_sigcontext32(sc, fp, regs, set->sig[0])) \
197*4882a593Smuzhiyun goto label; \
198*4882a593Smuzhiyun } while(0)
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun * Determine which stack to use..
202*4882a593Smuzhiyun */
get_sigframe(struct ksignal * ksig,struct pt_regs * regs,size_t frame_size,void __user ** fpstate)203*4882a593Smuzhiyun static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
204*4882a593Smuzhiyun size_t frame_size,
205*4882a593Smuzhiyun void __user **fpstate)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun unsigned long sp, fx_aligned, math_size;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Default to using normal stack */
210*4882a593Smuzhiyun sp = regs->sp;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* This is the X/Open sanctioned signal stack switching. */
213*4882a593Smuzhiyun if (ksig->ka.sa.sa_flags & SA_ONSTACK)
214*4882a593Smuzhiyun sp = sigsp(sp, ksig);
215*4882a593Smuzhiyun /* This is the legacy signal stack switching. */
216*4882a593Smuzhiyun else if (regs->ss != __USER32_DS &&
217*4882a593Smuzhiyun !(ksig->ka.sa.sa_flags & SA_RESTORER) &&
218*4882a593Smuzhiyun ksig->ka.sa.sa_restorer)
219*4882a593Smuzhiyun sp = (unsigned long) ksig->ka.sa.sa_restorer;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
222*4882a593Smuzhiyun *fpstate = (struct _fpstate_32 __user *) sp;
223*4882a593Smuzhiyun if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
224*4882a593Smuzhiyun math_size) < 0)
225*4882a593Smuzhiyun return (void __user *) -1L;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun sp -= frame_size;
228*4882a593Smuzhiyun /* Align the stack pointer according to the i386 ABI,
229*4882a593Smuzhiyun * i.e. so that on function entry ((sp + 4) & 15) == 0. */
230*4882a593Smuzhiyun sp = ((sp + 4) & -16ul) - 4;
231*4882a593Smuzhiyun return (void __user *) sp;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
ia32_setup_frame(int sig,struct ksignal * ksig,compat_sigset_t * set,struct pt_regs * regs)234*4882a593Smuzhiyun int ia32_setup_frame(int sig, struct ksignal *ksig,
235*4882a593Smuzhiyun compat_sigset_t *set, struct pt_regs *regs)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct sigframe_ia32 __user *frame;
238*4882a593Smuzhiyun void __user *restorer;
239*4882a593Smuzhiyun void __user *fp = NULL;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* copy_to_user optimizes that into a single 8 byte store */
242*4882a593Smuzhiyun static const struct {
243*4882a593Smuzhiyun u16 poplmovl;
244*4882a593Smuzhiyun u32 val;
245*4882a593Smuzhiyun u16 int80;
246*4882a593Smuzhiyun } __attribute__((packed)) code = {
247*4882a593Smuzhiyun 0xb858, /* popl %eax ; movl $...,%eax */
248*4882a593Smuzhiyun __NR_ia32_sigreturn,
249*4882a593Smuzhiyun 0x80cd, /* int $0x80 */
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun frame = get_sigframe(ksig, regs, sizeof(*frame), &fp);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (ksig->ka.sa.sa_flags & SA_RESTORER) {
255*4882a593Smuzhiyun restorer = ksig->ka.sa.sa_restorer;
256*4882a593Smuzhiyun } else {
257*4882a593Smuzhiyun /* Return stub is in 32bit vsyscall page */
258*4882a593Smuzhiyun if (current->mm->context.vdso)
259*4882a593Smuzhiyun restorer = current->mm->context.vdso +
260*4882a593Smuzhiyun vdso_image_32.sym___kernel_sigreturn;
261*4882a593Smuzhiyun else
262*4882a593Smuzhiyun restorer = &frame->retcode;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (!user_access_begin(frame, sizeof(*frame)))
266*4882a593Smuzhiyun return -EFAULT;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun unsafe_put_user(sig, &frame->sig, Efault);
269*4882a593Smuzhiyun unsafe_put_sigcontext32(&frame->sc, fp, regs, set, Efault);
270*4882a593Smuzhiyun unsafe_put_user(set->sig[1], &frame->extramask[0], Efault);
271*4882a593Smuzhiyun unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault);
272*4882a593Smuzhiyun /*
273*4882a593Smuzhiyun * These are actually not used anymore, but left because some
274*4882a593Smuzhiyun * gdb versions depend on them as a marker.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun unsafe_put_user(*((u64 *)&code), (u64 __user *)frame->retcode, Efault);
277*4882a593Smuzhiyun user_access_end();
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Set up registers for signal handler */
280*4882a593Smuzhiyun regs->sp = (unsigned long) frame;
281*4882a593Smuzhiyun regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Make -mregparm=3 work */
284*4882a593Smuzhiyun regs->ax = sig;
285*4882a593Smuzhiyun regs->dx = 0;
286*4882a593Smuzhiyun regs->cx = 0;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun loadsegment(ds, __USER32_DS);
289*4882a593Smuzhiyun loadsegment(es, __USER32_DS);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun regs->cs = __USER32_CS;
292*4882a593Smuzhiyun regs->ss = __USER32_DS;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return 0;
295*4882a593Smuzhiyun Efault:
296*4882a593Smuzhiyun user_access_end();
297*4882a593Smuzhiyun return -EFAULT;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
ia32_setup_rt_frame(int sig,struct ksignal * ksig,compat_sigset_t * set,struct pt_regs * regs)300*4882a593Smuzhiyun int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
301*4882a593Smuzhiyun compat_sigset_t *set, struct pt_regs *regs)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct rt_sigframe_ia32 __user *frame;
304*4882a593Smuzhiyun void __user *restorer;
305*4882a593Smuzhiyun void __user *fp = NULL;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* unsafe_put_user optimizes that into a single 8 byte store */
308*4882a593Smuzhiyun static const struct {
309*4882a593Smuzhiyun u8 movl;
310*4882a593Smuzhiyun u32 val;
311*4882a593Smuzhiyun u16 int80;
312*4882a593Smuzhiyun u8 pad;
313*4882a593Smuzhiyun } __attribute__((packed)) code = {
314*4882a593Smuzhiyun 0xb8,
315*4882a593Smuzhiyun __NR_ia32_rt_sigreturn,
316*4882a593Smuzhiyun 0x80cd,
317*4882a593Smuzhiyun 0,
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun frame = get_sigframe(ksig, regs, sizeof(*frame), &fp);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (!user_access_begin(frame, sizeof(*frame)))
323*4882a593Smuzhiyun return -EFAULT;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun unsafe_put_user(sig, &frame->sig, Efault);
326*4882a593Smuzhiyun unsafe_put_user(ptr_to_compat(&frame->info), &frame->pinfo, Efault);
327*4882a593Smuzhiyun unsafe_put_user(ptr_to_compat(&frame->uc), &frame->puc, Efault);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Create the ucontext. */
330*4882a593Smuzhiyun if (static_cpu_has(X86_FEATURE_XSAVE))
331*4882a593Smuzhiyun unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault);
332*4882a593Smuzhiyun else
333*4882a593Smuzhiyun unsafe_put_user(0, &frame->uc.uc_flags, Efault);
334*4882a593Smuzhiyun unsafe_put_user(0, &frame->uc.uc_link, Efault);
335*4882a593Smuzhiyun unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (ksig->ka.sa.sa_flags & SA_RESTORER)
338*4882a593Smuzhiyun restorer = ksig->ka.sa.sa_restorer;
339*4882a593Smuzhiyun else
340*4882a593Smuzhiyun restorer = current->mm->context.vdso +
341*4882a593Smuzhiyun vdso_image_32.sym___kernel_rt_sigreturn;
342*4882a593Smuzhiyun unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * Not actually used anymore, but left because some gdb
346*4882a593Smuzhiyun * versions need it.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun unsafe_put_user(*((u64 *)&code), (u64 __user *)frame->retcode, Efault);
349*4882a593Smuzhiyun unsafe_put_sigcontext32(&frame->uc.uc_mcontext, fp, regs, set, Efault);
350*4882a593Smuzhiyun unsafe_put_user(*(__u64 *)set, (__u64 *)&frame->uc.uc_sigmask, Efault);
351*4882a593Smuzhiyun user_access_end();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (__copy_siginfo_to_user32(&frame->info, &ksig->info))
354*4882a593Smuzhiyun return -EFAULT;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* Set up registers for signal handler */
357*4882a593Smuzhiyun regs->sp = (unsigned long) frame;
358*4882a593Smuzhiyun regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Make -mregparm=3 work */
361*4882a593Smuzhiyun regs->ax = sig;
362*4882a593Smuzhiyun regs->dx = (unsigned long) &frame->info;
363*4882a593Smuzhiyun regs->cx = (unsigned long) &frame->uc;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun loadsegment(ds, __USER32_DS);
366*4882a593Smuzhiyun loadsegment(es, __USER32_DS);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun regs->cs = __USER32_CS;
369*4882a593Smuzhiyun regs->ss = __USER32_DS;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return 0;
372*4882a593Smuzhiyun Efault:
373*4882a593Smuzhiyun user_access_end();
374*4882a593Smuzhiyun return -EFAULT;
375*4882a593Smuzhiyun }
376