1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Ptrace user space interface.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 1999, 2010
6*4882a593Smuzhiyun * Author(s): Denis Joseph Barrow
7*4882a593Smuzhiyun * Martin Schwidefsky (schwidefsky@de.ibm.com)
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/smp.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <linux/ptrace.h>
17*4882a593Smuzhiyun #include <linux/user.h>
18*4882a593Smuzhiyun #include <linux/security.h>
19*4882a593Smuzhiyun #include <linux/audit.h>
20*4882a593Smuzhiyun #include <linux/signal.h>
21*4882a593Smuzhiyun #include <linux/elf.h>
22*4882a593Smuzhiyun #include <linux/regset.h>
23*4882a593Smuzhiyun #include <linux/tracehook.h>
24*4882a593Smuzhiyun #include <linux/seccomp.h>
25*4882a593Smuzhiyun #include <linux/compat.h>
26*4882a593Smuzhiyun #include <trace/syscall.h>
27*4882a593Smuzhiyun #include <asm/page.h>
28*4882a593Smuzhiyun #include <linux/uaccess.h>
29*4882a593Smuzhiyun #include <asm/unistd.h>
30*4882a593Smuzhiyun #include <asm/switch_to.h>
31*4882a593Smuzhiyun #include <asm/runtime_instr.h>
32*4882a593Smuzhiyun #include <asm/facility.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include "entry.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
37*4882a593Smuzhiyun #include "compat_ptrace.h"
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
41*4882a593Smuzhiyun #include <trace/events/syscalls.h>
42*4882a593Smuzhiyun
update_cr_regs(struct task_struct * task)43*4882a593Smuzhiyun void update_cr_regs(struct task_struct *task)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct pt_regs *regs = task_pt_regs(task);
46*4882a593Smuzhiyun struct thread_struct *thread = &task->thread;
47*4882a593Smuzhiyun struct per_regs old, new;
48*4882a593Smuzhiyun union ctlreg0 cr0_old, cr0_new;
49*4882a593Smuzhiyun union ctlreg2 cr2_old, cr2_new;
50*4882a593Smuzhiyun int cr0_changed, cr2_changed;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun __ctl_store(cr0_old.val, 0, 0);
53*4882a593Smuzhiyun __ctl_store(cr2_old.val, 2, 2);
54*4882a593Smuzhiyun cr0_new = cr0_old;
55*4882a593Smuzhiyun cr2_new = cr2_old;
56*4882a593Smuzhiyun /* Take care of the enable/disable of transactional execution. */
57*4882a593Smuzhiyun if (MACHINE_HAS_TE) {
58*4882a593Smuzhiyun /* Set or clear transaction execution TXC bit 8. */
59*4882a593Smuzhiyun cr0_new.tcx = 1;
60*4882a593Smuzhiyun if (task->thread.per_flags & PER_FLAG_NO_TE)
61*4882a593Smuzhiyun cr0_new.tcx = 0;
62*4882a593Smuzhiyun /* Set or clear transaction execution TDC bits 62 and 63. */
63*4882a593Smuzhiyun cr2_new.tdc = 0;
64*4882a593Smuzhiyun if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65*4882a593Smuzhiyun if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
66*4882a593Smuzhiyun cr2_new.tdc = 1;
67*4882a593Smuzhiyun else
68*4882a593Smuzhiyun cr2_new.tdc = 2;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun /* Take care of enable/disable of guarded storage. */
72*4882a593Smuzhiyun if (MACHINE_HAS_GS) {
73*4882a593Smuzhiyun cr2_new.gse = 0;
74*4882a593Smuzhiyun if (task->thread.gs_cb)
75*4882a593Smuzhiyun cr2_new.gse = 1;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun /* Load control register 0/2 iff changed */
78*4882a593Smuzhiyun cr0_changed = cr0_new.val != cr0_old.val;
79*4882a593Smuzhiyun cr2_changed = cr2_new.val != cr2_old.val;
80*4882a593Smuzhiyun if (cr0_changed)
81*4882a593Smuzhiyun __ctl_load(cr0_new.val, 0, 0);
82*4882a593Smuzhiyun if (cr2_changed)
83*4882a593Smuzhiyun __ctl_load(cr2_new.val, 2, 2);
84*4882a593Smuzhiyun /* Copy user specified PER registers */
85*4882a593Smuzhiyun new.control = thread->per_user.control;
86*4882a593Smuzhiyun new.start = thread->per_user.start;
87*4882a593Smuzhiyun new.end = thread->per_user.end;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* merge TIF_SINGLE_STEP into user specified PER registers. */
90*4882a593Smuzhiyun if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91*4882a593Smuzhiyun test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
92*4882a593Smuzhiyun if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93*4882a593Smuzhiyun new.control |= PER_EVENT_BRANCH;
94*4882a593Smuzhiyun else
95*4882a593Smuzhiyun new.control |= PER_EVENT_IFETCH;
96*4882a593Smuzhiyun new.control |= PER_CONTROL_SUSPENSION;
97*4882a593Smuzhiyun new.control |= PER_EVENT_TRANSACTION_END;
98*4882a593Smuzhiyun if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99*4882a593Smuzhiyun new.control |= PER_EVENT_IFETCH;
100*4882a593Smuzhiyun new.start = 0;
101*4882a593Smuzhiyun new.end = -1UL;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Take care of the PER enablement bit in the PSW. */
105*4882a593Smuzhiyun if (!(new.control & PER_EVENT_MASK)) {
106*4882a593Smuzhiyun regs->psw.mask &= ~PSW_MASK_PER;
107*4882a593Smuzhiyun return;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun regs->psw.mask |= PSW_MASK_PER;
110*4882a593Smuzhiyun __ctl_store(old, 9, 11);
111*4882a593Smuzhiyun if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112*4882a593Smuzhiyun __ctl_load(new, 9, 11);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
user_enable_single_step(struct task_struct * task)115*4882a593Smuzhiyun void user_enable_single_step(struct task_struct *task)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
118*4882a593Smuzhiyun set_tsk_thread_flag(task, TIF_SINGLE_STEP);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
user_disable_single_step(struct task_struct * task)121*4882a593Smuzhiyun void user_disable_single_step(struct task_struct *task)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
124*4882a593Smuzhiyun clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
user_enable_block_step(struct task_struct * task)127*4882a593Smuzhiyun void user_enable_block_step(struct task_struct *task)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130*4882a593Smuzhiyun set_tsk_thread_flag(task, TIF_BLOCK_STEP);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Called by kernel/ptrace.c when detaching..
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Clear all debugging related fields.
137*4882a593Smuzhiyun */
ptrace_disable(struct task_struct * task)138*4882a593Smuzhiyun void ptrace_disable(struct task_struct *task)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141*4882a593Smuzhiyun memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142*4882a593Smuzhiyun clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
143*4882a593Smuzhiyun clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
144*4882a593Smuzhiyun task->thread.per_flags = 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #define __ADDR_MASK 7
148*4882a593Smuzhiyun
__peek_user_per(struct task_struct * child,addr_t addr)149*4882a593Smuzhiyun static inline unsigned long __peek_user_per(struct task_struct *child,
150*4882a593Smuzhiyun addr_t addr)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct per_struct_kernel *dummy = NULL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (addr == (addr_t) &dummy->cr9)
155*4882a593Smuzhiyun /* Control bits of the active per set. */
156*4882a593Smuzhiyun return test_thread_flag(TIF_SINGLE_STEP) ?
157*4882a593Smuzhiyun PER_EVENT_IFETCH : child->thread.per_user.control;
158*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->cr10)
159*4882a593Smuzhiyun /* Start address of the active per set. */
160*4882a593Smuzhiyun return test_thread_flag(TIF_SINGLE_STEP) ?
161*4882a593Smuzhiyun 0 : child->thread.per_user.start;
162*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->cr11)
163*4882a593Smuzhiyun /* End address of the active per set. */
164*4882a593Smuzhiyun return test_thread_flag(TIF_SINGLE_STEP) ?
165*4882a593Smuzhiyun -1UL : child->thread.per_user.end;
166*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->bits)
167*4882a593Smuzhiyun /* Single-step bit. */
168*4882a593Smuzhiyun return test_thread_flag(TIF_SINGLE_STEP) ?
169*4882a593Smuzhiyun (1UL << (BITS_PER_LONG - 1)) : 0;
170*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->starting_addr)
171*4882a593Smuzhiyun /* Start address of the user specified per set. */
172*4882a593Smuzhiyun return child->thread.per_user.start;
173*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->ending_addr)
174*4882a593Smuzhiyun /* End address of the user specified per set. */
175*4882a593Smuzhiyun return child->thread.per_user.end;
176*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->perc_atmid)
177*4882a593Smuzhiyun /* PER code, ATMID and AI of the last PER trap */
178*4882a593Smuzhiyun return (unsigned long)
179*4882a593Smuzhiyun child->thread.per_event.cause << (BITS_PER_LONG - 16);
180*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->address)
181*4882a593Smuzhiyun /* Address of the last PER trap */
182*4882a593Smuzhiyun return child->thread.per_event.address;
183*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->access_id)
184*4882a593Smuzhiyun /* Access id of the last PER trap */
185*4882a593Smuzhiyun return (unsigned long)
186*4882a593Smuzhiyun child->thread.per_event.paid << (BITS_PER_LONG - 8);
187*4882a593Smuzhiyun return 0;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Read the word at offset addr from the user area of a process. The
192*4882a593Smuzhiyun * trouble here is that the information is littered over different
193*4882a593Smuzhiyun * locations. The process registers are found on the kernel stack,
194*4882a593Smuzhiyun * the floating point stuff and the trace settings are stored in
195*4882a593Smuzhiyun * the task structure. In addition the different structures in
196*4882a593Smuzhiyun * struct user contain pad bytes that should be read as zeroes.
197*4882a593Smuzhiyun * Lovely...
198*4882a593Smuzhiyun */
__peek_user(struct task_struct * child,addr_t addr)199*4882a593Smuzhiyun static unsigned long __peek_user(struct task_struct *child, addr_t addr)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct user *dummy = NULL;
202*4882a593Smuzhiyun addr_t offset, tmp;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (addr < (addr_t) &dummy->regs.acrs) {
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * psw and gprs are stored on the stack
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
209*4882a593Smuzhiyun if (addr == (addr_t) &dummy->regs.psw.mask) {
210*4882a593Smuzhiyun /* Return a clean psw mask. */
211*4882a593Smuzhiyun tmp &= PSW_MASK_USER | PSW_MASK_RI;
212*4882a593Smuzhiyun tmp |= PSW_USER_BITS;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * access registers are stored in the thread structure
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun offset = addr - (addr_t) &dummy->regs.acrs;
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Very special case: old & broken 64 bit gdb reading
222*4882a593Smuzhiyun * from acrs[15]. Result is a 64 bit value. Read the
223*4882a593Smuzhiyun * 32 bit acrs[15] value and shift it by 32. Sick...
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun if (addr == (addr_t) &dummy->regs.acrs[15])
226*4882a593Smuzhiyun tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227*4882a593Smuzhiyun else
228*4882a593Smuzhiyun tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * orig_gpr2 is stored on the kernel stack
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun } else if (addr < (addr_t) &dummy->regs.fp_regs) {
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * prevent reads of padding hole between
239*4882a593Smuzhiyun * orig_gpr2 and fp_regs on s390.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun tmp = 0;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * floating point control reg. is in the thread structure
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun tmp = child->thread.fpu.fpc;
248*4882a593Smuzhiyun tmp <<= BITS_PER_LONG - 32;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * floating point regs. are either in child->thread.fpu
253*4882a593Smuzhiyun * or the child->thread.fpu.vxrs array
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256*4882a593Smuzhiyun if (MACHINE_HAS_VX)
257*4882a593Smuzhiyun tmp = *(addr_t *)
258*4882a593Smuzhiyun ((addr_t) child->thread.fpu.vxrs + 2*offset);
259*4882a593Smuzhiyun else
260*4882a593Smuzhiyun tmp = *(addr_t *)
261*4882a593Smuzhiyun ((addr_t) child->thread.fpu.fprs + offset);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * Handle access to the per_info structure.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun addr -= (addr_t) &dummy->regs.per_info;
268*4882a593Smuzhiyun tmp = __peek_user_per(child, addr);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun } else
271*4882a593Smuzhiyun tmp = 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return tmp;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun static int
peek_user(struct task_struct * child,addr_t addr,addr_t data)277*4882a593Smuzhiyun peek_user(struct task_struct *child, addr_t addr, addr_t data)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun addr_t tmp, mask;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * Stupid gdb peeks/pokes the access registers in 64 bit with
283*4882a593Smuzhiyun * an alignment of 4. Programmers from hell...
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun mask = __ADDR_MASK;
286*4882a593Smuzhiyun if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287*4882a593Smuzhiyun addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
288*4882a593Smuzhiyun mask = 3;
289*4882a593Smuzhiyun if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
290*4882a593Smuzhiyun return -EIO;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun tmp = __peek_user(child, addr);
293*4882a593Smuzhiyun return put_user(tmp, (addr_t __user *) data);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
__poke_user_per(struct task_struct * child,addr_t addr,addr_t data)296*4882a593Smuzhiyun static inline void __poke_user_per(struct task_struct *child,
297*4882a593Smuzhiyun addr_t addr, addr_t data)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct per_struct_kernel *dummy = NULL;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * There are only three fields in the per_info struct that the
303*4882a593Smuzhiyun * debugger user can write to.
304*4882a593Smuzhiyun * 1) cr9: the debugger wants to set a new PER event mask
305*4882a593Smuzhiyun * 2) starting_addr: the debugger wants to set a new starting
306*4882a593Smuzhiyun * address to use with the PER event mask.
307*4882a593Smuzhiyun * 3) ending_addr: the debugger wants to set a new ending
308*4882a593Smuzhiyun * address to use with the PER event mask.
309*4882a593Smuzhiyun * The user specified PER event mask and the start and end
310*4882a593Smuzhiyun * addresses are used only if single stepping is not in effect.
311*4882a593Smuzhiyun * Writes to any other field in per_info are ignored.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun if (addr == (addr_t) &dummy->cr9)
314*4882a593Smuzhiyun /* PER event mask of the user specified per set. */
315*4882a593Smuzhiyun child->thread.per_user.control =
316*4882a593Smuzhiyun data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->starting_addr)
318*4882a593Smuzhiyun /* Starting address of the user specified per set. */
319*4882a593Smuzhiyun child->thread.per_user.start = data;
320*4882a593Smuzhiyun else if (addr == (addr_t) &dummy->ending_addr)
321*4882a593Smuzhiyun /* Ending address of the user specified per set. */
322*4882a593Smuzhiyun child->thread.per_user.end = data;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
fixup_int_code(struct task_struct * child,addr_t data)325*4882a593Smuzhiyun static void fixup_int_code(struct task_struct *child, addr_t data)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct pt_regs *regs = task_pt_regs(child);
328*4882a593Smuzhiyun int ilc = regs->int_code >> 16;
329*4882a593Smuzhiyun u16 insn;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (ilc > 6)
332*4882a593Smuzhiyun return;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
335*4882a593Smuzhiyun &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
336*4882a593Smuzhiyun return;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* double check that tracee stopped on svc instruction */
339*4882a593Smuzhiyun if ((insn >> 8) != 0xa)
340*4882a593Smuzhiyun return;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun regs->int_code = 0x20000 | (data & 0xffff);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * Write a word to the user area of a process at location addr. This
346*4882a593Smuzhiyun * operation does have an additional problem compared to peek_user.
347*4882a593Smuzhiyun * Stores to the program status word and on the floating point
348*4882a593Smuzhiyun * control register needs to get checked for validity.
349*4882a593Smuzhiyun */
__poke_user(struct task_struct * child,addr_t addr,addr_t data)350*4882a593Smuzhiyun static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct user *dummy = NULL;
353*4882a593Smuzhiyun addr_t offset;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (addr < (addr_t) &dummy->regs.acrs) {
357*4882a593Smuzhiyun struct pt_regs *regs = task_pt_regs(child);
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * psw and gprs are stored on the stack
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun if (addr == (addr_t) &dummy->regs.psw.mask) {
362*4882a593Smuzhiyun unsigned long mask = PSW_MASK_USER;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
365*4882a593Smuzhiyun if ((data ^ PSW_USER_BITS) & ~mask)
366*4882a593Smuzhiyun /* Invalid psw mask. */
367*4882a593Smuzhiyun return -EINVAL;
368*4882a593Smuzhiyun if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
369*4882a593Smuzhiyun /* Invalid address-space-control bits */
370*4882a593Smuzhiyun return -EINVAL;
371*4882a593Smuzhiyun if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
372*4882a593Smuzhiyun /* Invalid addressing mode bits */
373*4882a593Smuzhiyun return -EINVAL;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
377*4882a593Smuzhiyun addr == offsetof(struct user, regs.gprs[2]))
378*4882a593Smuzhiyun fixup_int_code(child, data);
379*4882a593Smuzhiyun *(addr_t *)((addr_t) ®s->psw + addr) = data;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun * access registers are stored in the thread structure
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun offset = addr - (addr_t) &dummy->regs.acrs;
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * Very special case: old & broken 64 bit gdb writing
388*4882a593Smuzhiyun * to acrs[15] with a 64 bit value. Ignore the lower
389*4882a593Smuzhiyun * half of the value and write the upper 32 bit to
390*4882a593Smuzhiyun * acrs[15]. Sick...
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun if (addr == (addr_t) &dummy->regs.acrs[15])
393*4882a593Smuzhiyun child->thread.acrs[15] = (unsigned int) (data >> 32);
394*4882a593Smuzhiyun else
395*4882a593Smuzhiyun *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * orig_gpr2 is stored on the kernel stack
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun task_pt_regs(child)->orig_gpr2 = data;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun } else if (addr < (addr_t) &dummy->regs.fp_regs) {
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun * prevent writes of padding hole between
406*4882a593Smuzhiyun * orig_gpr2 and fp_regs on s390.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun * floating point control reg. is in the thread structure
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun if ((unsigned int) data != 0 ||
415*4882a593Smuzhiyun test_fp_ctl(data >> (BITS_PER_LONG - 32)))
416*4882a593Smuzhiyun return -EINVAL;
417*4882a593Smuzhiyun child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * floating point regs. are either in child->thread.fpu
422*4882a593Smuzhiyun * or the child->thread.fpu.vxrs array
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
425*4882a593Smuzhiyun if (MACHINE_HAS_VX)
426*4882a593Smuzhiyun *(addr_t *)((addr_t)
427*4882a593Smuzhiyun child->thread.fpu.vxrs + 2*offset) = data;
428*4882a593Smuzhiyun else
429*4882a593Smuzhiyun *(addr_t *)((addr_t)
430*4882a593Smuzhiyun child->thread.fpu.fprs + offset) = data;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Handle access to the per_info structure.
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun addr -= (addr_t) &dummy->regs.per_info;
437*4882a593Smuzhiyun __poke_user_per(child, addr, data);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
poke_user(struct task_struct * child,addr_t addr,addr_t data)444*4882a593Smuzhiyun static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun addr_t mask;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * Stupid gdb peeks/pokes the access registers in 64 bit with
450*4882a593Smuzhiyun * an alignment of 4. Programmers from hell indeed...
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun mask = __ADDR_MASK;
453*4882a593Smuzhiyun if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
454*4882a593Smuzhiyun addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
455*4882a593Smuzhiyun mask = 3;
456*4882a593Smuzhiyun if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
457*4882a593Smuzhiyun return -EIO;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun return __poke_user(child, addr, data);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)462*4882a593Smuzhiyun long arch_ptrace(struct task_struct *child, long request,
463*4882a593Smuzhiyun unsigned long addr, unsigned long data)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun ptrace_area parea;
466*4882a593Smuzhiyun int copied, ret;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun switch (request) {
469*4882a593Smuzhiyun case PTRACE_PEEKUSR:
470*4882a593Smuzhiyun /* read the word at location addr in the USER area. */
471*4882a593Smuzhiyun return peek_user(child, addr, data);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun case PTRACE_POKEUSR:
474*4882a593Smuzhiyun /* write the word at location addr in the USER area */
475*4882a593Smuzhiyun return poke_user(child, addr, data);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun case PTRACE_PEEKUSR_AREA:
478*4882a593Smuzhiyun case PTRACE_POKEUSR_AREA:
479*4882a593Smuzhiyun if (copy_from_user(&parea, (void __force __user *) addr,
480*4882a593Smuzhiyun sizeof(parea)))
481*4882a593Smuzhiyun return -EFAULT;
482*4882a593Smuzhiyun addr = parea.kernel_addr;
483*4882a593Smuzhiyun data = parea.process_addr;
484*4882a593Smuzhiyun copied = 0;
485*4882a593Smuzhiyun while (copied < parea.len) {
486*4882a593Smuzhiyun if (request == PTRACE_PEEKUSR_AREA)
487*4882a593Smuzhiyun ret = peek_user(child, addr, data);
488*4882a593Smuzhiyun else {
489*4882a593Smuzhiyun addr_t utmp;
490*4882a593Smuzhiyun if (get_user(utmp,
491*4882a593Smuzhiyun (addr_t __force __user *) data))
492*4882a593Smuzhiyun return -EFAULT;
493*4882a593Smuzhiyun ret = poke_user(child, addr, utmp);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun if (ret)
496*4882a593Smuzhiyun return ret;
497*4882a593Smuzhiyun addr += sizeof(unsigned long);
498*4882a593Smuzhiyun data += sizeof(unsigned long);
499*4882a593Smuzhiyun copied += sizeof(unsigned long);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun case PTRACE_GET_LAST_BREAK:
503*4882a593Smuzhiyun put_user(child->thread.last_break,
504*4882a593Smuzhiyun (unsigned long __user *) data);
505*4882a593Smuzhiyun return 0;
506*4882a593Smuzhiyun case PTRACE_ENABLE_TE:
507*4882a593Smuzhiyun if (!MACHINE_HAS_TE)
508*4882a593Smuzhiyun return -EIO;
509*4882a593Smuzhiyun child->thread.per_flags &= ~PER_FLAG_NO_TE;
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun case PTRACE_DISABLE_TE:
512*4882a593Smuzhiyun if (!MACHINE_HAS_TE)
513*4882a593Smuzhiyun return -EIO;
514*4882a593Smuzhiyun child->thread.per_flags |= PER_FLAG_NO_TE;
515*4882a593Smuzhiyun child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun case PTRACE_TE_ABORT_RAND:
518*4882a593Smuzhiyun if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
519*4882a593Smuzhiyun return -EIO;
520*4882a593Smuzhiyun switch (data) {
521*4882a593Smuzhiyun case 0UL:
522*4882a593Smuzhiyun child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun case 1UL:
525*4882a593Smuzhiyun child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
526*4882a593Smuzhiyun child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
527*4882a593Smuzhiyun break;
528*4882a593Smuzhiyun case 2UL:
529*4882a593Smuzhiyun child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
530*4882a593Smuzhiyun child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
531*4882a593Smuzhiyun break;
532*4882a593Smuzhiyun default:
533*4882a593Smuzhiyun return -EINVAL;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun return 0;
536*4882a593Smuzhiyun default:
537*4882a593Smuzhiyun return ptrace_request(child, request, addr, data);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * Now the fun part starts... a 31 bit program running in the
544*4882a593Smuzhiyun * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
545*4882a593Smuzhiyun * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
546*4882a593Smuzhiyun * to handle, the difference to the 64 bit versions of the requests
547*4882a593Smuzhiyun * is that the access is done in multiples of 4 byte instead of
548*4882a593Smuzhiyun * 8 bytes (sizeof(unsigned long) on 31/64 bit).
549*4882a593Smuzhiyun * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
550*4882a593Smuzhiyun * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
551*4882a593Smuzhiyun * is a 31 bit program too, the content of struct user can be
552*4882a593Smuzhiyun * emulated. A 31 bit program peeking into the struct user of
553*4882a593Smuzhiyun * a 64 bit program is a no-no.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * Same as peek_user_per but for a 31 bit program.
558*4882a593Smuzhiyun */
__peek_user_per_compat(struct task_struct * child,addr_t addr)559*4882a593Smuzhiyun static inline __u32 __peek_user_per_compat(struct task_struct *child,
560*4882a593Smuzhiyun addr_t addr)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct compat_per_struct_kernel *dummy32 = NULL;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (addr == (addr_t) &dummy32->cr9)
565*4882a593Smuzhiyun /* Control bits of the active per set. */
566*4882a593Smuzhiyun return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
567*4882a593Smuzhiyun PER_EVENT_IFETCH : child->thread.per_user.control;
568*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->cr10)
569*4882a593Smuzhiyun /* Start address of the active per set. */
570*4882a593Smuzhiyun return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
571*4882a593Smuzhiyun 0 : child->thread.per_user.start;
572*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->cr11)
573*4882a593Smuzhiyun /* End address of the active per set. */
574*4882a593Smuzhiyun return test_thread_flag(TIF_SINGLE_STEP) ?
575*4882a593Smuzhiyun PSW32_ADDR_INSN : child->thread.per_user.end;
576*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->bits)
577*4882a593Smuzhiyun /* Single-step bit. */
578*4882a593Smuzhiyun return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
579*4882a593Smuzhiyun 0x80000000 : 0;
580*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->starting_addr)
581*4882a593Smuzhiyun /* Start address of the user specified per set. */
582*4882a593Smuzhiyun return (__u32) child->thread.per_user.start;
583*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->ending_addr)
584*4882a593Smuzhiyun /* End address of the user specified per set. */
585*4882a593Smuzhiyun return (__u32) child->thread.per_user.end;
586*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->perc_atmid)
587*4882a593Smuzhiyun /* PER code, ATMID and AI of the last PER trap */
588*4882a593Smuzhiyun return (__u32) child->thread.per_event.cause << 16;
589*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->address)
590*4882a593Smuzhiyun /* Address of the last PER trap */
591*4882a593Smuzhiyun return (__u32) child->thread.per_event.address;
592*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->access_id)
593*4882a593Smuzhiyun /* Access id of the last PER trap */
594*4882a593Smuzhiyun return (__u32) child->thread.per_event.paid << 24;
595*4882a593Smuzhiyun return 0;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun * Same as peek_user but for a 31 bit program.
600*4882a593Smuzhiyun */
__peek_user_compat(struct task_struct * child,addr_t addr)601*4882a593Smuzhiyun static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun struct compat_user *dummy32 = NULL;
604*4882a593Smuzhiyun addr_t offset;
605*4882a593Smuzhiyun __u32 tmp;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (addr < (addr_t) &dummy32->regs.acrs) {
608*4882a593Smuzhiyun struct pt_regs *regs = task_pt_regs(child);
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * psw and gprs are stored on the stack
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun if (addr == (addr_t) &dummy32->regs.psw.mask) {
613*4882a593Smuzhiyun /* Fake a 31 bit psw mask. */
614*4882a593Smuzhiyun tmp = (__u32)(regs->psw.mask >> 32);
615*4882a593Smuzhiyun tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
616*4882a593Smuzhiyun tmp |= PSW32_USER_BITS;
617*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
618*4882a593Smuzhiyun /* Fake a 31 bit psw address. */
619*4882a593Smuzhiyun tmp = (__u32) regs->psw.addr |
620*4882a593Smuzhiyun (__u32)(regs->psw.mask & PSW_MASK_BA);
621*4882a593Smuzhiyun } else {
622*4882a593Smuzhiyun /* gpr 0-15 */
623*4882a593Smuzhiyun tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * access registers are stored in the thread structure
628*4882a593Smuzhiyun */
629*4882a593Smuzhiyun offset = addr - (addr_t) &dummy32->regs.acrs;
630*4882a593Smuzhiyun tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
633*4882a593Smuzhiyun /*
634*4882a593Smuzhiyun * orig_gpr2 is stored on the kernel stack
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
639*4882a593Smuzhiyun /*
640*4882a593Smuzhiyun * prevent reads of padding hole between
641*4882a593Smuzhiyun * orig_gpr2 and fp_regs on s390.
642*4882a593Smuzhiyun */
643*4882a593Smuzhiyun tmp = 0;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * floating point control reg. is in the thread structure
648*4882a593Smuzhiyun */
649*4882a593Smuzhiyun tmp = child->thread.fpu.fpc;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
652*4882a593Smuzhiyun /*
653*4882a593Smuzhiyun * floating point regs. are either in child->thread.fpu
654*4882a593Smuzhiyun * or the child->thread.fpu.vxrs array
655*4882a593Smuzhiyun */
656*4882a593Smuzhiyun offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
657*4882a593Smuzhiyun if (MACHINE_HAS_VX)
658*4882a593Smuzhiyun tmp = *(__u32 *)
659*4882a593Smuzhiyun ((addr_t) child->thread.fpu.vxrs + 2*offset);
660*4882a593Smuzhiyun else
661*4882a593Smuzhiyun tmp = *(__u32 *)
662*4882a593Smuzhiyun ((addr_t) child->thread.fpu.fprs + offset);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * Handle access to the per_info structure.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun addr -= (addr_t) &dummy32->regs.per_info;
669*4882a593Smuzhiyun tmp = __peek_user_per_compat(child, addr);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun } else
672*4882a593Smuzhiyun tmp = 0;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return tmp;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
peek_user_compat(struct task_struct * child,addr_t addr,addr_t data)677*4882a593Smuzhiyun static int peek_user_compat(struct task_struct *child,
678*4882a593Smuzhiyun addr_t addr, addr_t data)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun __u32 tmp;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
683*4882a593Smuzhiyun return -EIO;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun tmp = __peek_user_compat(child, addr);
686*4882a593Smuzhiyun return put_user(tmp, (__u32 __user *) data);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /*
690*4882a593Smuzhiyun * Same as poke_user_per but for a 31 bit program.
691*4882a593Smuzhiyun */
__poke_user_per_compat(struct task_struct * child,addr_t addr,__u32 data)692*4882a593Smuzhiyun static inline void __poke_user_per_compat(struct task_struct *child,
693*4882a593Smuzhiyun addr_t addr, __u32 data)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct compat_per_struct_kernel *dummy32 = NULL;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (addr == (addr_t) &dummy32->cr9)
698*4882a593Smuzhiyun /* PER event mask of the user specified per set. */
699*4882a593Smuzhiyun child->thread.per_user.control =
700*4882a593Smuzhiyun data & (PER_EVENT_MASK | PER_CONTROL_MASK);
701*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->starting_addr)
702*4882a593Smuzhiyun /* Starting address of the user specified per set. */
703*4882a593Smuzhiyun child->thread.per_user.start = data;
704*4882a593Smuzhiyun else if (addr == (addr_t) &dummy32->ending_addr)
705*4882a593Smuzhiyun /* Ending address of the user specified per set. */
706*4882a593Smuzhiyun child->thread.per_user.end = data;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /*
710*4882a593Smuzhiyun * Same as poke_user but for a 31 bit program.
711*4882a593Smuzhiyun */
__poke_user_compat(struct task_struct * child,addr_t addr,addr_t data)712*4882a593Smuzhiyun static int __poke_user_compat(struct task_struct *child,
713*4882a593Smuzhiyun addr_t addr, addr_t data)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun struct compat_user *dummy32 = NULL;
716*4882a593Smuzhiyun __u32 tmp = (__u32) data;
717*4882a593Smuzhiyun addr_t offset;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (addr < (addr_t) &dummy32->regs.acrs) {
720*4882a593Smuzhiyun struct pt_regs *regs = task_pt_regs(child);
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun * psw, gprs, acrs and orig_gpr2 are stored on the stack
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun if (addr == (addr_t) &dummy32->regs.psw.mask) {
725*4882a593Smuzhiyun __u32 mask = PSW32_MASK_USER;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
728*4882a593Smuzhiyun /* Build a 64 bit psw mask from 31 bit mask. */
729*4882a593Smuzhiyun if ((tmp ^ PSW32_USER_BITS) & ~mask)
730*4882a593Smuzhiyun /* Invalid psw mask. */
731*4882a593Smuzhiyun return -EINVAL;
732*4882a593Smuzhiyun if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
733*4882a593Smuzhiyun /* Invalid address-space-control bits */
734*4882a593Smuzhiyun return -EINVAL;
735*4882a593Smuzhiyun regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
736*4882a593Smuzhiyun (regs->psw.mask & PSW_MASK_BA) |
737*4882a593Smuzhiyun (__u64)(tmp & mask) << 32;
738*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
739*4882a593Smuzhiyun /* Build a 64 bit psw address from 31 bit address. */
740*4882a593Smuzhiyun regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
741*4882a593Smuzhiyun /* Transfer 31 bit amode bit to psw mask. */
742*4882a593Smuzhiyun regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
743*4882a593Smuzhiyun (__u64)(tmp & PSW32_ADDR_AMODE);
744*4882a593Smuzhiyun } else {
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
747*4882a593Smuzhiyun addr == offsetof(struct compat_user, regs.gprs[2]))
748*4882a593Smuzhiyun fixup_int_code(child, data);
749*4882a593Smuzhiyun /* gpr 0-15 */
750*4882a593Smuzhiyun *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
753*4882a593Smuzhiyun /*
754*4882a593Smuzhiyun * access registers are stored in the thread structure
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun offset = addr - (addr_t) &dummy32->regs.acrs;
757*4882a593Smuzhiyun *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * orig_gpr2 is stored on the kernel stack
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * prevent writess of padding hole between
768*4882a593Smuzhiyun * orig_gpr2 and fp_regs on s390.
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun return 0;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
773*4882a593Smuzhiyun /*
774*4882a593Smuzhiyun * floating point control reg. is in the thread structure
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun if (test_fp_ctl(tmp))
777*4882a593Smuzhiyun return -EINVAL;
778*4882a593Smuzhiyun child->thread.fpu.fpc = data;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun * floating point regs. are either in child->thread.fpu
783*4882a593Smuzhiyun * or the child->thread.fpu.vxrs array
784*4882a593Smuzhiyun */
785*4882a593Smuzhiyun offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
786*4882a593Smuzhiyun if (MACHINE_HAS_VX)
787*4882a593Smuzhiyun *(__u32 *)((addr_t)
788*4882a593Smuzhiyun child->thread.fpu.vxrs + 2*offset) = tmp;
789*4882a593Smuzhiyun else
790*4882a593Smuzhiyun *(__u32 *)((addr_t)
791*4882a593Smuzhiyun child->thread.fpu.fprs + offset) = tmp;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * Handle access to the per_info structure.
796*4882a593Smuzhiyun */
797*4882a593Smuzhiyun addr -= (addr_t) &dummy32->regs.per_info;
798*4882a593Smuzhiyun __poke_user_per_compat(child, addr, data);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun return 0;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
poke_user_compat(struct task_struct * child,addr_t addr,addr_t data)804*4882a593Smuzhiyun static int poke_user_compat(struct task_struct *child,
805*4882a593Smuzhiyun addr_t addr, addr_t data)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun if (!is_compat_task() || (addr & 3) ||
808*4882a593Smuzhiyun addr > sizeof(struct compat_user) - 3)
809*4882a593Smuzhiyun return -EIO;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun return __poke_user_compat(child, addr, data);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)814*4882a593Smuzhiyun long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
815*4882a593Smuzhiyun compat_ulong_t caddr, compat_ulong_t cdata)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun unsigned long addr = caddr;
818*4882a593Smuzhiyun unsigned long data = cdata;
819*4882a593Smuzhiyun compat_ptrace_area parea;
820*4882a593Smuzhiyun int copied, ret;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun switch (request) {
823*4882a593Smuzhiyun case PTRACE_PEEKUSR:
824*4882a593Smuzhiyun /* read the word at location addr in the USER area. */
825*4882a593Smuzhiyun return peek_user_compat(child, addr, data);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun case PTRACE_POKEUSR:
828*4882a593Smuzhiyun /* write the word at location addr in the USER area */
829*4882a593Smuzhiyun return poke_user_compat(child, addr, data);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun case PTRACE_PEEKUSR_AREA:
832*4882a593Smuzhiyun case PTRACE_POKEUSR_AREA:
833*4882a593Smuzhiyun if (copy_from_user(&parea, (void __force __user *) addr,
834*4882a593Smuzhiyun sizeof(parea)))
835*4882a593Smuzhiyun return -EFAULT;
836*4882a593Smuzhiyun addr = parea.kernel_addr;
837*4882a593Smuzhiyun data = parea.process_addr;
838*4882a593Smuzhiyun copied = 0;
839*4882a593Smuzhiyun while (copied < parea.len) {
840*4882a593Smuzhiyun if (request == PTRACE_PEEKUSR_AREA)
841*4882a593Smuzhiyun ret = peek_user_compat(child, addr, data);
842*4882a593Smuzhiyun else {
843*4882a593Smuzhiyun __u32 utmp;
844*4882a593Smuzhiyun if (get_user(utmp,
845*4882a593Smuzhiyun (__u32 __force __user *) data))
846*4882a593Smuzhiyun return -EFAULT;
847*4882a593Smuzhiyun ret = poke_user_compat(child, addr, utmp);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun if (ret)
850*4882a593Smuzhiyun return ret;
851*4882a593Smuzhiyun addr += sizeof(unsigned int);
852*4882a593Smuzhiyun data += sizeof(unsigned int);
853*4882a593Smuzhiyun copied += sizeof(unsigned int);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun case PTRACE_GET_LAST_BREAK:
857*4882a593Smuzhiyun put_user(child->thread.last_break,
858*4882a593Smuzhiyun (unsigned int __user *) data);
859*4882a593Smuzhiyun return 0;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun return compat_ptrace_request(child, request, addr, data);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun #endif
864*4882a593Smuzhiyun
do_syscall_trace_enter(struct pt_regs * regs)865*4882a593Smuzhiyun asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun unsigned long mask = -1UL;
868*4882a593Smuzhiyun long ret = -1;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (is_compat_task())
871*4882a593Smuzhiyun mask = 0xffffffff;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun * The sysc_tracesys code in entry.S stored the system
875*4882a593Smuzhiyun * call number to gprs[2].
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun if (test_thread_flag(TIF_SYSCALL_TRACE) &&
878*4882a593Smuzhiyun tracehook_report_syscall_entry(regs)) {
879*4882a593Smuzhiyun /*
880*4882a593Smuzhiyun * Tracing decided this syscall should not happen. Skip
881*4882a593Smuzhiyun * the system call and the system call restart handling.
882*4882a593Smuzhiyun */
883*4882a593Smuzhiyun goto skip;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun #ifdef CONFIG_SECCOMP
887*4882a593Smuzhiyun /* Do the secure computing check after ptrace. */
888*4882a593Smuzhiyun if (unlikely(test_thread_flag(TIF_SECCOMP))) {
889*4882a593Smuzhiyun struct seccomp_data sd;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (is_compat_task()) {
892*4882a593Smuzhiyun sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
893*4882a593Smuzhiyun sd.arch = AUDIT_ARCH_S390;
894*4882a593Smuzhiyun } else {
895*4882a593Smuzhiyun sd.instruction_pointer = regs->psw.addr;
896*4882a593Smuzhiyun sd.arch = AUDIT_ARCH_S390X;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun sd.nr = regs->int_code & 0xffff;
900*4882a593Smuzhiyun sd.args[0] = regs->orig_gpr2 & mask;
901*4882a593Smuzhiyun sd.args[1] = regs->gprs[3] & mask;
902*4882a593Smuzhiyun sd.args[2] = regs->gprs[4] & mask;
903*4882a593Smuzhiyun sd.args[3] = regs->gprs[5] & mask;
904*4882a593Smuzhiyun sd.args[4] = regs->gprs[6] & mask;
905*4882a593Smuzhiyun sd.args[5] = regs->gprs[7] & mask;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun if (__secure_computing(&sd) == -1)
908*4882a593Smuzhiyun goto skip;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun #endif /* CONFIG_SECCOMP */
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
913*4882a593Smuzhiyun trace_sys_enter(regs, regs->int_code & 0xffff);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
917*4882a593Smuzhiyun regs->gprs[3] &mask, regs->gprs[4] &mask,
918*4882a593Smuzhiyun regs->gprs[5] &mask);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if ((signed long)regs->gprs[2] >= NR_syscalls) {
921*4882a593Smuzhiyun regs->gprs[2] = -ENOSYS;
922*4882a593Smuzhiyun ret = -ENOSYS;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun return regs->gprs[2];
925*4882a593Smuzhiyun skip:
926*4882a593Smuzhiyun clear_pt_regs_flag(regs, PIF_SYSCALL);
927*4882a593Smuzhiyun return ret;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
do_syscall_trace_exit(struct pt_regs * regs)930*4882a593Smuzhiyun asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun audit_syscall_exit(regs);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
935*4882a593Smuzhiyun trace_sys_exit(regs, regs->gprs[2]);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (test_thread_flag(TIF_SYSCALL_TRACE))
938*4882a593Smuzhiyun tracehook_report_syscall_exit(regs, 0);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * user_regset definitions.
943*4882a593Smuzhiyun */
944*4882a593Smuzhiyun
s390_regs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)945*4882a593Smuzhiyun static int s390_regs_get(struct task_struct *target,
946*4882a593Smuzhiyun const struct user_regset *regset,
947*4882a593Smuzhiyun struct membuf to)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun unsigned pos;
950*4882a593Smuzhiyun if (target == current)
951*4882a593Smuzhiyun save_access_regs(target->thread.acrs);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long))
954*4882a593Smuzhiyun membuf_store(&to, __peek_user(target, pos));
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
s390_regs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)958*4882a593Smuzhiyun static int s390_regs_set(struct task_struct *target,
959*4882a593Smuzhiyun const struct user_regset *regset,
960*4882a593Smuzhiyun unsigned int pos, unsigned int count,
961*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun int rc = 0;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (target == current)
966*4882a593Smuzhiyun save_access_regs(target->thread.acrs);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (kbuf) {
969*4882a593Smuzhiyun const unsigned long *k = kbuf;
970*4882a593Smuzhiyun while (count > 0 && !rc) {
971*4882a593Smuzhiyun rc = __poke_user(target, pos, *k++);
972*4882a593Smuzhiyun count -= sizeof(*k);
973*4882a593Smuzhiyun pos += sizeof(*k);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun } else {
976*4882a593Smuzhiyun const unsigned long __user *u = ubuf;
977*4882a593Smuzhiyun while (count > 0 && !rc) {
978*4882a593Smuzhiyun unsigned long word;
979*4882a593Smuzhiyun rc = __get_user(word, u++);
980*4882a593Smuzhiyun if (rc)
981*4882a593Smuzhiyun break;
982*4882a593Smuzhiyun rc = __poke_user(target, pos, word);
983*4882a593Smuzhiyun count -= sizeof(*u);
984*4882a593Smuzhiyun pos += sizeof(*u);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (rc == 0 && target == current)
989*4882a593Smuzhiyun restore_access_regs(target->thread.acrs);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun return rc;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
s390_fpregs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)994*4882a593Smuzhiyun static int s390_fpregs_get(struct task_struct *target,
995*4882a593Smuzhiyun const struct user_regset *regset,
996*4882a593Smuzhiyun struct membuf to)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun _s390_fp_regs fp_regs;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (target == current)
1001*4882a593Smuzhiyun save_fpu_regs();
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun fp_regs.fpc = target->thread.fpu.fpc;
1004*4882a593Smuzhiyun fpregs_store(&fp_regs, &target->thread.fpu);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun return membuf_write(&to, &fp_regs, sizeof(fp_regs));
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
s390_fpregs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1009*4882a593Smuzhiyun static int s390_fpregs_set(struct task_struct *target,
1010*4882a593Smuzhiyun const struct user_regset *regset, unsigned int pos,
1011*4882a593Smuzhiyun unsigned int count, const void *kbuf,
1012*4882a593Smuzhiyun const void __user *ubuf)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun int rc = 0;
1015*4882a593Smuzhiyun freg_t fprs[__NUM_FPRS];
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (target == current)
1018*4882a593Smuzhiyun save_fpu_regs();
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (MACHINE_HAS_VX)
1021*4882a593Smuzhiyun convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1022*4882a593Smuzhiyun else
1023*4882a593Smuzhiyun memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /* If setting FPC, must validate it first. */
1026*4882a593Smuzhiyun if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
1027*4882a593Smuzhiyun u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
1028*4882a593Smuzhiyun rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
1029*4882a593Smuzhiyun 0, offsetof(s390_fp_regs, fprs));
1030*4882a593Smuzhiyun if (rc)
1031*4882a593Smuzhiyun return rc;
1032*4882a593Smuzhiyun if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
1033*4882a593Smuzhiyun return -EINVAL;
1034*4882a593Smuzhiyun target->thread.fpu.fpc = ufpc[0];
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun if (rc == 0 && count > 0)
1038*4882a593Smuzhiyun rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1039*4882a593Smuzhiyun fprs, offsetof(s390_fp_regs, fprs), -1);
1040*4882a593Smuzhiyun if (rc)
1041*4882a593Smuzhiyun return rc;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (MACHINE_HAS_VX)
1044*4882a593Smuzhiyun convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1045*4882a593Smuzhiyun else
1046*4882a593Smuzhiyun memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun return rc;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
s390_last_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1051*4882a593Smuzhiyun static int s390_last_break_get(struct task_struct *target,
1052*4882a593Smuzhiyun const struct user_regset *regset,
1053*4882a593Smuzhiyun struct membuf to)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun return membuf_store(&to, target->thread.last_break);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
s390_last_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1058*4882a593Smuzhiyun static int s390_last_break_set(struct task_struct *target,
1059*4882a593Smuzhiyun const struct user_regset *regset,
1060*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1061*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun return 0;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
s390_tdb_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1066*4882a593Smuzhiyun static int s390_tdb_get(struct task_struct *target,
1067*4882a593Smuzhiyun const struct user_regset *regset,
1068*4882a593Smuzhiyun struct membuf to)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun struct pt_regs *regs = task_pt_regs(target);
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun if (!(regs->int_code & 0x200))
1073*4882a593Smuzhiyun return -ENODATA;
1074*4882a593Smuzhiyun return membuf_write(&to, target->thread.trap_tdb, 256);
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
s390_tdb_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1077*4882a593Smuzhiyun static int s390_tdb_set(struct task_struct *target,
1078*4882a593Smuzhiyun const struct user_regset *regset,
1079*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1080*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
s390_vxrs_low_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1085*4882a593Smuzhiyun static int s390_vxrs_low_get(struct task_struct *target,
1086*4882a593Smuzhiyun const struct user_regset *regset,
1087*4882a593Smuzhiyun struct membuf to)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun __u64 vxrs[__NUM_VXRS_LOW];
1090*4882a593Smuzhiyun int i;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (!MACHINE_HAS_VX)
1093*4882a593Smuzhiyun return -ENODEV;
1094*4882a593Smuzhiyun if (target == current)
1095*4882a593Smuzhiyun save_fpu_regs();
1096*4882a593Smuzhiyun for (i = 0; i < __NUM_VXRS_LOW; i++)
1097*4882a593Smuzhiyun vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1098*4882a593Smuzhiyun return membuf_write(&to, vxrs, sizeof(vxrs));
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
s390_vxrs_low_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1101*4882a593Smuzhiyun static int s390_vxrs_low_set(struct task_struct *target,
1102*4882a593Smuzhiyun const struct user_regset *regset,
1103*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1104*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun __u64 vxrs[__NUM_VXRS_LOW];
1107*4882a593Smuzhiyun int i, rc;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (!MACHINE_HAS_VX)
1110*4882a593Smuzhiyun return -ENODEV;
1111*4882a593Smuzhiyun if (target == current)
1112*4882a593Smuzhiyun save_fpu_regs();
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun for (i = 0; i < __NUM_VXRS_LOW; i++)
1115*4882a593Smuzhiyun vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1118*4882a593Smuzhiyun if (rc == 0)
1119*4882a593Smuzhiyun for (i = 0; i < __NUM_VXRS_LOW; i++)
1120*4882a593Smuzhiyun *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun return rc;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
s390_vxrs_high_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1125*4882a593Smuzhiyun static int s390_vxrs_high_get(struct task_struct *target,
1126*4882a593Smuzhiyun const struct user_regset *regset,
1127*4882a593Smuzhiyun struct membuf to)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun if (!MACHINE_HAS_VX)
1130*4882a593Smuzhiyun return -ENODEV;
1131*4882a593Smuzhiyun if (target == current)
1132*4882a593Smuzhiyun save_fpu_regs();
1133*4882a593Smuzhiyun return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
1134*4882a593Smuzhiyun __NUM_VXRS_HIGH * sizeof(__vector128));
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
s390_vxrs_high_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1137*4882a593Smuzhiyun static int s390_vxrs_high_set(struct task_struct *target,
1138*4882a593Smuzhiyun const struct user_regset *regset,
1139*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1140*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun int rc;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun if (!MACHINE_HAS_VX)
1145*4882a593Smuzhiyun return -ENODEV;
1146*4882a593Smuzhiyun if (target == current)
1147*4882a593Smuzhiyun save_fpu_regs();
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1150*4882a593Smuzhiyun target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1151*4882a593Smuzhiyun return rc;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
s390_system_call_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1154*4882a593Smuzhiyun static int s390_system_call_get(struct task_struct *target,
1155*4882a593Smuzhiyun const struct user_regset *regset,
1156*4882a593Smuzhiyun struct membuf to)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun return membuf_store(&to, target->thread.system_call);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
s390_system_call_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1161*4882a593Smuzhiyun static int s390_system_call_set(struct task_struct *target,
1162*4882a593Smuzhiyun const struct user_regset *regset,
1163*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1164*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun unsigned int *data = &target->thread.system_call;
1167*4882a593Smuzhiyun return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1168*4882a593Smuzhiyun data, 0, sizeof(unsigned int));
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
s390_gs_cb_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1171*4882a593Smuzhiyun static int s390_gs_cb_get(struct task_struct *target,
1172*4882a593Smuzhiyun const struct user_regset *regset,
1173*4882a593Smuzhiyun struct membuf to)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun struct gs_cb *data = target->thread.gs_cb;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (!MACHINE_HAS_GS)
1178*4882a593Smuzhiyun return -ENODEV;
1179*4882a593Smuzhiyun if (!data)
1180*4882a593Smuzhiyun return -ENODATA;
1181*4882a593Smuzhiyun if (target == current)
1182*4882a593Smuzhiyun save_gs_cb(data);
1183*4882a593Smuzhiyun return membuf_write(&to, data, sizeof(struct gs_cb));
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
s390_gs_cb_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1186*4882a593Smuzhiyun static int s390_gs_cb_set(struct task_struct *target,
1187*4882a593Smuzhiyun const struct user_regset *regset,
1188*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1189*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun struct gs_cb gs_cb = { }, *data = NULL;
1192*4882a593Smuzhiyun int rc;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun if (!MACHINE_HAS_GS)
1195*4882a593Smuzhiyun return -ENODEV;
1196*4882a593Smuzhiyun if (!target->thread.gs_cb) {
1197*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
1198*4882a593Smuzhiyun if (!data)
1199*4882a593Smuzhiyun return -ENOMEM;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun if (!target->thread.gs_cb)
1202*4882a593Smuzhiyun gs_cb.gsd = 25;
1203*4882a593Smuzhiyun else if (target == current)
1204*4882a593Smuzhiyun save_gs_cb(&gs_cb);
1205*4882a593Smuzhiyun else
1206*4882a593Smuzhiyun gs_cb = *target->thread.gs_cb;
1207*4882a593Smuzhiyun rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1208*4882a593Smuzhiyun &gs_cb, 0, sizeof(gs_cb));
1209*4882a593Smuzhiyun if (rc) {
1210*4882a593Smuzhiyun kfree(data);
1211*4882a593Smuzhiyun return -EFAULT;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun preempt_disable();
1214*4882a593Smuzhiyun if (!target->thread.gs_cb)
1215*4882a593Smuzhiyun target->thread.gs_cb = data;
1216*4882a593Smuzhiyun *target->thread.gs_cb = gs_cb;
1217*4882a593Smuzhiyun if (target == current) {
1218*4882a593Smuzhiyun __ctl_set_bit(2, 4);
1219*4882a593Smuzhiyun restore_gs_cb(target->thread.gs_cb);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun preempt_enable();
1222*4882a593Smuzhiyun return rc;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
s390_gs_bc_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1225*4882a593Smuzhiyun static int s390_gs_bc_get(struct task_struct *target,
1226*4882a593Smuzhiyun const struct user_regset *regset,
1227*4882a593Smuzhiyun struct membuf to)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun struct gs_cb *data = target->thread.gs_bc_cb;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun if (!MACHINE_HAS_GS)
1232*4882a593Smuzhiyun return -ENODEV;
1233*4882a593Smuzhiyun if (!data)
1234*4882a593Smuzhiyun return -ENODATA;
1235*4882a593Smuzhiyun return membuf_write(&to, data, sizeof(struct gs_cb));
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
s390_gs_bc_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1238*4882a593Smuzhiyun static int s390_gs_bc_set(struct task_struct *target,
1239*4882a593Smuzhiyun const struct user_regset *regset,
1240*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1241*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun struct gs_cb *data = target->thread.gs_bc_cb;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if (!MACHINE_HAS_GS)
1246*4882a593Smuzhiyun return -ENODEV;
1247*4882a593Smuzhiyun if (!data) {
1248*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
1249*4882a593Smuzhiyun if (!data)
1250*4882a593Smuzhiyun return -ENOMEM;
1251*4882a593Smuzhiyun target->thread.gs_bc_cb = data;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1254*4882a593Smuzhiyun data, 0, sizeof(struct gs_cb));
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun
is_ri_cb_valid(struct runtime_instr_cb * cb)1257*4882a593Smuzhiyun static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun return (cb->rca & 0x1f) == 0 &&
1260*4882a593Smuzhiyun (cb->roa & 0xfff) == 0 &&
1261*4882a593Smuzhiyun (cb->rla & 0xfff) == 0xfff &&
1262*4882a593Smuzhiyun cb->s == 1 &&
1263*4882a593Smuzhiyun cb->k == 1 &&
1264*4882a593Smuzhiyun cb->h == 0 &&
1265*4882a593Smuzhiyun cb->reserved1 == 0 &&
1266*4882a593Smuzhiyun cb->ps == 1 &&
1267*4882a593Smuzhiyun cb->qs == 0 &&
1268*4882a593Smuzhiyun cb->pc == 1 &&
1269*4882a593Smuzhiyun cb->qc == 0 &&
1270*4882a593Smuzhiyun cb->reserved2 == 0 &&
1271*4882a593Smuzhiyun cb->reserved3 == 0 &&
1272*4882a593Smuzhiyun cb->reserved4 == 0 &&
1273*4882a593Smuzhiyun cb->reserved5 == 0 &&
1274*4882a593Smuzhiyun cb->reserved6 == 0 &&
1275*4882a593Smuzhiyun cb->reserved7 == 0 &&
1276*4882a593Smuzhiyun cb->reserved8 == 0 &&
1277*4882a593Smuzhiyun cb->rla >= cb->roa &&
1278*4882a593Smuzhiyun cb->rca >= cb->roa &&
1279*4882a593Smuzhiyun cb->rca <= cb->rla+1 &&
1280*4882a593Smuzhiyun cb->m < 3;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
s390_runtime_instr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1283*4882a593Smuzhiyun static int s390_runtime_instr_get(struct task_struct *target,
1284*4882a593Smuzhiyun const struct user_regset *regset,
1285*4882a593Smuzhiyun struct membuf to)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun struct runtime_instr_cb *data = target->thread.ri_cb;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun if (!test_facility(64))
1290*4882a593Smuzhiyun return -ENODEV;
1291*4882a593Smuzhiyun if (!data)
1292*4882a593Smuzhiyun return -ENODATA;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun return membuf_write(&to, data, sizeof(struct runtime_instr_cb));
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
s390_runtime_instr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1297*4882a593Smuzhiyun static int s390_runtime_instr_set(struct task_struct *target,
1298*4882a593Smuzhiyun const struct user_regset *regset,
1299*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1300*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun struct runtime_instr_cb ri_cb = { }, *data = NULL;
1303*4882a593Smuzhiyun int rc;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun if (!test_facility(64))
1306*4882a593Smuzhiyun return -ENODEV;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun if (!target->thread.ri_cb) {
1309*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
1310*4882a593Smuzhiyun if (!data)
1311*4882a593Smuzhiyun return -ENOMEM;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if (target->thread.ri_cb) {
1315*4882a593Smuzhiyun if (target == current)
1316*4882a593Smuzhiyun store_runtime_instr_cb(&ri_cb);
1317*4882a593Smuzhiyun else
1318*4882a593Smuzhiyun ri_cb = *target->thread.ri_cb;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322*4882a593Smuzhiyun &ri_cb, 0, sizeof(struct runtime_instr_cb));
1323*4882a593Smuzhiyun if (rc) {
1324*4882a593Smuzhiyun kfree(data);
1325*4882a593Smuzhiyun return -EFAULT;
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun if (!is_ri_cb_valid(&ri_cb)) {
1329*4882a593Smuzhiyun kfree(data);
1330*4882a593Smuzhiyun return -EINVAL;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun /*
1333*4882a593Smuzhiyun * Override access key in any case, since user space should
1334*4882a593Smuzhiyun * not be able to set it, nor should it care about it.
1335*4882a593Smuzhiyun */
1336*4882a593Smuzhiyun ri_cb.key = PAGE_DEFAULT_KEY >> 4;
1337*4882a593Smuzhiyun preempt_disable();
1338*4882a593Smuzhiyun if (!target->thread.ri_cb)
1339*4882a593Smuzhiyun target->thread.ri_cb = data;
1340*4882a593Smuzhiyun *target->thread.ri_cb = ri_cb;
1341*4882a593Smuzhiyun if (target == current)
1342*4882a593Smuzhiyun load_runtime_instr_cb(target->thread.ri_cb);
1343*4882a593Smuzhiyun preempt_enable();
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun return 0;
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun static const struct user_regset s390_regsets[] = {
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun .core_note_type = NT_PRSTATUS,
1351*4882a593Smuzhiyun .n = sizeof(s390_regs) / sizeof(long),
1352*4882a593Smuzhiyun .size = sizeof(long),
1353*4882a593Smuzhiyun .align = sizeof(long),
1354*4882a593Smuzhiyun .regset_get = s390_regs_get,
1355*4882a593Smuzhiyun .set = s390_regs_set,
1356*4882a593Smuzhiyun },
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun .core_note_type = NT_PRFPREG,
1359*4882a593Smuzhiyun .n = sizeof(s390_fp_regs) / sizeof(long),
1360*4882a593Smuzhiyun .size = sizeof(long),
1361*4882a593Smuzhiyun .align = sizeof(long),
1362*4882a593Smuzhiyun .regset_get = s390_fpregs_get,
1363*4882a593Smuzhiyun .set = s390_fpregs_set,
1364*4882a593Smuzhiyun },
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun .core_note_type = NT_S390_SYSTEM_CALL,
1367*4882a593Smuzhiyun .n = 1,
1368*4882a593Smuzhiyun .size = sizeof(unsigned int),
1369*4882a593Smuzhiyun .align = sizeof(unsigned int),
1370*4882a593Smuzhiyun .regset_get = s390_system_call_get,
1371*4882a593Smuzhiyun .set = s390_system_call_set,
1372*4882a593Smuzhiyun },
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun .core_note_type = NT_S390_LAST_BREAK,
1375*4882a593Smuzhiyun .n = 1,
1376*4882a593Smuzhiyun .size = sizeof(long),
1377*4882a593Smuzhiyun .align = sizeof(long),
1378*4882a593Smuzhiyun .regset_get = s390_last_break_get,
1379*4882a593Smuzhiyun .set = s390_last_break_set,
1380*4882a593Smuzhiyun },
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun .core_note_type = NT_S390_TDB,
1383*4882a593Smuzhiyun .n = 1,
1384*4882a593Smuzhiyun .size = 256,
1385*4882a593Smuzhiyun .align = 1,
1386*4882a593Smuzhiyun .regset_get = s390_tdb_get,
1387*4882a593Smuzhiyun .set = s390_tdb_set,
1388*4882a593Smuzhiyun },
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun .core_note_type = NT_S390_VXRS_LOW,
1391*4882a593Smuzhiyun .n = __NUM_VXRS_LOW,
1392*4882a593Smuzhiyun .size = sizeof(__u64),
1393*4882a593Smuzhiyun .align = sizeof(__u64),
1394*4882a593Smuzhiyun .regset_get = s390_vxrs_low_get,
1395*4882a593Smuzhiyun .set = s390_vxrs_low_set,
1396*4882a593Smuzhiyun },
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun .core_note_type = NT_S390_VXRS_HIGH,
1399*4882a593Smuzhiyun .n = __NUM_VXRS_HIGH,
1400*4882a593Smuzhiyun .size = sizeof(__vector128),
1401*4882a593Smuzhiyun .align = sizeof(__vector128),
1402*4882a593Smuzhiyun .regset_get = s390_vxrs_high_get,
1403*4882a593Smuzhiyun .set = s390_vxrs_high_set,
1404*4882a593Smuzhiyun },
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun .core_note_type = NT_S390_GS_CB,
1407*4882a593Smuzhiyun .n = sizeof(struct gs_cb) / sizeof(__u64),
1408*4882a593Smuzhiyun .size = sizeof(__u64),
1409*4882a593Smuzhiyun .align = sizeof(__u64),
1410*4882a593Smuzhiyun .regset_get = s390_gs_cb_get,
1411*4882a593Smuzhiyun .set = s390_gs_cb_set,
1412*4882a593Smuzhiyun },
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun .core_note_type = NT_S390_GS_BC,
1415*4882a593Smuzhiyun .n = sizeof(struct gs_cb) / sizeof(__u64),
1416*4882a593Smuzhiyun .size = sizeof(__u64),
1417*4882a593Smuzhiyun .align = sizeof(__u64),
1418*4882a593Smuzhiyun .regset_get = s390_gs_bc_get,
1419*4882a593Smuzhiyun .set = s390_gs_bc_set,
1420*4882a593Smuzhiyun },
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun .core_note_type = NT_S390_RI_CB,
1423*4882a593Smuzhiyun .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1424*4882a593Smuzhiyun .size = sizeof(__u64),
1425*4882a593Smuzhiyun .align = sizeof(__u64),
1426*4882a593Smuzhiyun .regset_get = s390_runtime_instr_get,
1427*4882a593Smuzhiyun .set = s390_runtime_instr_set,
1428*4882a593Smuzhiyun },
1429*4882a593Smuzhiyun };
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun static const struct user_regset_view user_s390_view = {
1432*4882a593Smuzhiyun .name = "s390x",
1433*4882a593Smuzhiyun .e_machine = EM_S390,
1434*4882a593Smuzhiyun .regsets = s390_regsets,
1435*4882a593Smuzhiyun .n = ARRAY_SIZE(s390_regsets)
1436*4882a593Smuzhiyun };
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
s390_compat_regs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1439*4882a593Smuzhiyun static int s390_compat_regs_get(struct task_struct *target,
1440*4882a593Smuzhiyun const struct user_regset *regset,
1441*4882a593Smuzhiyun struct membuf to)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun unsigned n;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun if (target == current)
1446*4882a593Smuzhiyun save_access_regs(target->thread.acrs);
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
1449*4882a593Smuzhiyun membuf_store(&to, __peek_user_compat(target, n));
1450*4882a593Smuzhiyun return 0;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
s390_compat_regs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1453*4882a593Smuzhiyun static int s390_compat_regs_set(struct task_struct *target,
1454*4882a593Smuzhiyun const struct user_regset *regset,
1455*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1456*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun int rc = 0;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun if (target == current)
1461*4882a593Smuzhiyun save_access_regs(target->thread.acrs);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun if (kbuf) {
1464*4882a593Smuzhiyun const compat_ulong_t *k = kbuf;
1465*4882a593Smuzhiyun while (count > 0 && !rc) {
1466*4882a593Smuzhiyun rc = __poke_user_compat(target, pos, *k++);
1467*4882a593Smuzhiyun count -= sizeof(*k);
1468*4882a593Smuzhiyun pos += sizeof(*k);
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun } else {
1471*4882a593Smuzhiyun const compat_ulong_t __user *u = ubuf;
1472*4882a593Smuzhiyun while (count > 0 && !rc) {
1473*4882a593Smuzhiyun compat_ulong_t word;
1474*4882a593Smuzhiyun rc = __get_user(word, u++);
1475*4882a593Smuzhiyun if (rc)
1476*4882a593Smuzhiyun break;
1477*4882a593Smuzhiyun rc = __poke_user_compat(target, pos, word);
1478*4882a593Smuzhiyun count -= sizeof(*u);
1479*4882a593Smuzhiyun pos += sizeof(*u);
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun if (rc == 0 && target == current)
1484*4882a593Smuzhiyun restore_access_regs(target->thread.acrs);
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun return rc;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun
s390_compat_regs_high_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1489*4882a593Smuzhiyun static int s390_compat_regs_high_get(struct task_struct *target,
1490*4882a593Smuzhiyun const struct user_regset *regset,
1491*4882a593Smuzhiyun struct membuf to)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun compat_ulong_t *gprs_high;
1494*4882a593Smuzhiyun int i;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
1497*4882a593Smuzhiyun for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
1498*4882a593Smuzhiyun membuf_store(&to, *gprs_high);
1499*4882a593Smuzhiyun return 0;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun
s390_compat_regs_high_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1502*4882a593Smuzhiyun static int s390_compat_regs_high_set(struct task_struct *target,
1503*4882a593Smuzhiyun const struct user_regset *regset,
1504*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1505*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun compat_ulong_t *gprs_high;
1508*4882a593Smuzhiyun int rc = 0;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun gprs_high = (compat_ulong_t *)
1511*4882a593Smuzhiyun &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1512*4882a593Smuzhiyun if (kbuf) {
1513*4882a593Smuzhiyun const compat_ulong_t *k = kbuf;
1514*4882a593Smuzhiyun while (count > 0) {
1515*4882a593Smuzhiyun *gprs_high = *k++;
1516*4882a593Smuzhiyun *gprs_high += 2;
1517*4882a593Smuzhiyun count -= sizeof(*k);
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun } else {
1520*4882a593Smuzhiyun const compat_ulong_t __user *u = ubuf;
1521*4882a593Smuzhiyun while (count > 0 && !rc) {
1522*4882a593Smuzhiyun unsigned long word;
1523*4882a593Smuzhiyun rc = __get_user(word, u++);
1524*4882a593Smuzhiyun if (rc)
1525*4882a593Smuzhiyun break;
1526*4882a593Smuzhiyun *gprs_high = word;
1527*4882a593Smuzhiyun *gprs_high += 2;
1528*4882a593Smuzhiyun count -= sizeof(*u);
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun return rc;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun
s390_compat_last_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1535*4882a593Smuzhiyun static int s390_compat_last_break_get(struct task_struct *target,
1536*4882a593Smuzhiyun const struct user_regset *regset,
1537*4882a593Smuzhiyun struct membuf to)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun compat_ulong_t last_break = target->thread.last_break;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun return membuf_store(&to, (unsigned long)last_break);
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun
s390_compat_last_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1544*4882a593Smuzhiyun static int s390_compat_last_break_set(struct task_struct *target,
1545*4882a593Smuzhiyun const struct user_regset *regset,
1546*4882a593Smuzhiyun unsigned int pos, unsigned int count,
1547*4882a593Smuzhiyun const void *kbuf, const void __user *ubuf)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun return 0;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun static const struct user_regset s390_compat_regsets[] = {
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun .core_note_type = NT_PRSTATUS,
1555*4882a593Smuzhiyun .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1556*4882a593Smuzhiyun .size = sizeof(compat_long_t),
1557*4882a593Smuzhiyun .align = sizeof(compat_long_t),
1558*4882a593Smuzhiyun .regset_get = s390_compat_regs_get,
1559*4882a593Smuzhiyun .set = s390_compat_regs_set,
1560*4882a593Smuzhiyun },
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun .core_note_type = NT_PRFPREG,
1563*4882a593Smuzhiyun .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1564*4882a593Smuzhiyun .size = sizeof(compat_long_t),
1565*4882a593Smuzhiyun .align = sizeof(compat_long_t),
1566*4882a593Smuzhiyun .regset_get = s390_fpregs_get,
1567*4882a593Smuzhiyun .set = s390_fpregs_set,
1568*4882a593Smuzhiyun },
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun .core_note_type = NT_S390_SYSTEM_CALL,
1571*4882a593Smuzhiyun .n = 1,
1572*4882a593Smuzhiyun .size = sizeof(compat_uint_t),
1573*4882a593Smuzhiyun .align = sizeof(compat_uint_t),
1574*4882a593Smuzhiyun .regset_get = s390_system_call_get,
1575*4882a593Smuzhiyun .set = s390_system_call_set,
1576*4882a593Smuzhiyun },
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun .core_note_type = NT_S390_LAST_BREAK,
1579*4882a593Smuzhiyun .n = 1,
1580*4882a593Smuzhiyun .size = sizeof(long),
1581*4882a593Smuzhiyun .align = sizeof(long),
1582*4882a593Smuzhiyun .regset_get = s390_compat_last_break_get,
1583*4882a593Smuzhiyun .set = s390_compat_last_break_set,
1584*4882a593Smuzhiyun },
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun .core_note_type = NT_S390_TDB,
1587*4882a593Smuzhiyun .n = 1,
1588*4882a593Smuzhiyun .size = 256,
1589*4882a593Smuzhiyun .align = 1,
1590*4882a593Smuzhiyun .regset_get = s390_tdb_get,
1591*4882a593Smuzhiyun .set = s390_tdb_set,
1592*4882a593Smuzhiyun },
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun .core_note_type = NT_S390_VXRS_LOW,
1595*4882a593Smuzhiyun .n = __NUM_VXRS_LOW,
1596*4882a593Smuzhiyun .size = sizeof(__u64),
1597*4882a593Smuzhiyun .align = sizeof(__u64),
1598*4882a593Smuzhiyun .regset_get = s390_vxrs_low_get,
1599*4882a593Smuzhiyun .set = s390_vxrs_low_set,
1600*4882a593Smuzhiyun },
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun .core_note_type = NT_S390_VXRS_HIGH,
1603*4882a593Smuzhiyun .n = __NUM_VXRS_HIGH,
1604*4882a593Smuzhiyun .size = sizeof(__vector128),
1605*4882a593Smuzhiyun .align = sizeof(__vector128),
1606*4882a593Smuzhiyun .regset_get = s390_vxrs_high_get,
1607*4882a593Smuzhiyun .set = s390_vxrs_high_set,
1608*4882a593Smuzhiyun },
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun .core_note_type = NT_S390_HIGH_GPRS,
1611*4882a593Smuzhiyun .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1612*4882a593Smuzhiyun .size = sizeof(compat_long_t),
1613*4882a593Smuzhiyun .align = sizeof(compat_long_t),
1614*4882a593Smuzhiyun .regset_get = s390_compat_regs_high_get,
1615*4882a593Smuzhiyun .set = s390_compat_regs_high_set,
1616*4882a593Smuzhiyun },
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun .core_note_type = NT_S390_GS_CB,
1619*4882a593Smuzhiyun .n = sizeof(struct gs_cb) / sizeof(__u64),
1620*4882a593Smuzhiyun .size = sizeof(__u64),
1621*4882a593Smuzhiyun .align = sizeof(__u64),
1622*4882a593Smuzhiyun .regset_get = s390_gs_cb_get,
1623*4882a593Smuzhiyun .set = s390_gs_cb_set,
1624*4882a593Smuzhiyun },
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun .core_note_type = NT_S390_GS_BC,
1627*4882a593Smuzhiyun .n = sizeof(struct gs_cb) / sizeof(__u64),
1628*4882a593Smuzhiyun .size = sizeof(__u64),
1629*4882a593Smuzhiyun .align = sizeof(__u64),
1630*4882a593Smuzhiyun .regset_get = s390_gs_bc_get,
1631*4882a593Smuzhiyun .set = s390_gs_bc_set,
1632*4882a593Smuzhiyun },
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun .core_note_type = NT_S390_RI_CB,
1635*4882a593Smuzhiyun .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1636*4882a593Smuzhiyun .size = sizeof(__u64),
1637*4882a593Smuzhiyun .align = sizeof(__u64),
1638*4882a593Smuzhiyun .regset_get = s390_runtime_instr_get,
1639*4882a593Smuzhiyun .set = s390_runtime_instr_set,
1640*4882a593Smuzhiyun },
1641*4882a593Smuzhiyun };
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun static const struct user_regset_view user_s390_compat_view = {
1644*4882a593Smuzhiyun .name = "s390",
1645*4882a593Smuzhiyun .e_machine = EM_S390,
1646*4882a593Smuzhiyun .regsets = s390_compat_regsets,
1647*4882a593Smuzhiyun .n = ARRAY_SIZE(s390_compat_regsets)
1648*4882a593Smuzhiyun };
1649*4882a593Smuzhiyun #endif
1650*4882a593Smuzhiyun
task_user_regset_view(struct task_struct * task)1651*4882a593Smuzhiyun const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1654*4882a593Smuzhiyun if (test_tsk_thread_flag(task, TIF_31BIT))
1655*4882a593Smuzhiyun return &user_s390_compat_view;
1656*4882a593Smuzhiyun #endif
1657*4882a593Smuzhiyun return &user_s390_view;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun static const char *gpr_names[NUM_GPRS] = {
1661*4882a593Smuzhiyun "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1662*4882a593Smuzhiyun "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1663*4882a593Smuzhiyun };
1664*4882a593Smuzhiyun
regs_get_register(struct pt_regs * regs,unsigned int offset)1665*4882a593Smuzhiyun unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1666*4882a593Smuzhiyun {
1667*4882a593Smuzhiyun if (offset >= NUM_GPRS)
1668*4882a593Smuzhiyun return 0;
1669*4882a593Smuzhiyun return regs->gprs[offset];
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun
regs_query_register_offset(const char * name)1672*4882a593Smuzhiyun int regs_query_register_offset(const char *name)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun unsigned long offset;
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun if (!name || *name != 'r')
1677*4882a593Smuzhiyun return -EINVAL;
1678*4882a593Smuzhiyun if (kstrtoul(name + 1, 10, &offset))
1679*4882a593Smuzhiyun return -EINVAL;
1680*4882a593Smuzhiyun if (offset >= NUM_GPRS)
1681*4882a593Smuzhiyun return -EINVAL;
1682*4882a593Smuzhiyun return offset;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
regs_query_register_name(unsigned int offset)1685*4882a593Smuzhiyun const char *regs_query_register_name(unsigned int offset)
1686*4882a593Smuzhiyun {
1687*4882a593Smuzhiyun if (offset >= NUM_GPRS)
1688*4882a593Smuzhiyun return NULL;
1689*4882a593Smuzhiyun return gpr_names[offset];
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)1692*4882a593Smuzhiyun static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1693*4882a593Smuzhiyun {
1694*4882a593Smuzhiyun unsigned long ksp = kernel_stack_pointer(regs);
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /**
1700*4882a593Smuzhiyun * regs_get_kernel_stack_nth() - get Nth entry of the stack
1701*4882a593Smuzhiyun * @regs:pt_regs which contains kernel stack pointer.
1702*4882a593Smuzhiyun * @n:stack entry number.
1703*4882a593Smuzhiyun *
1704*4882a593Smuzhiyun * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1705*4882a593Smuzhiyun * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1706*4882a593Smuzhiyun * this returns 0.
1707*4882a593Smuzhiyun */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)1708*4882a593Smuzhiyun unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1709*4882a593Smuzhiyun {
1710*4882a593Smuzhiyun unsigned long addr;
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun addr = kernel_stack_pointer(regs) + n * sizeof(long);
1713*4882a593Smuzhiyun if (!regs_within_kernel_stack(regs, addr))
1714*4882a593Smuzhiyun return 0;
1715*4882a593Smuzhiyun return *(unsigned long *)addr;
1716*4882a593Smuzhiyun }
1717