xref: /OK3568_Linux_fs/kernel/arch/s390/lib/uaccess.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Standard user space access functions based on mvcp/mvcs and doing
4*4882a593Smuzhiyun  *  interesting things in the secondary space mode.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *    Copyright IBM Corp. 2006,2014
7*4882a593Smuzhiyun  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8*4882a593Smuzhiyun  *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/jump_label.h>
12*4882a593Smuzhiyun #include <linux/uaccess.h>
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <asm/mmu_context.h>
17*4882a593Smuzhiyun #include <asm/facility.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
20*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(have_mvcos);
21*4882a593Smuzhiyun 
uaccess_init(void)22*4882a593Smuzhiyun static int __init uaccess_init(void)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	if (test_facility(27))
25*4882a593Smuzhiyun 		static_branch_enable(&have_mvcos);
26*4882a593Smuzhiyun 	return 0;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun early_initcall(uaccess_init);
29*4882a593Smuzhiyun 
copy_with_mvcos(void)30*4882a593Smuzhiyun static inline int copy_with_mvcos(void)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	if (static_branch_likely(&have_mvcos))
33*4882a593Smuzhiyun 		return 1;
34*4882a593Smuzhiyun 	return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun #else
copy_with_mvcos(void)37*4882a593Smuzhiyun static inline int copy_with_mvcos(void)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	return 1;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun 
set_fs(mm_segment_t fs)43*4882a593Smuzhiyun void set_fs(mm_segment_t fs)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	current->thread.mm_segment = fs;
46*4882a593Smuzhiyun 	if (fs == USER_DS) {
47*4882a593Smuzhiyun 		__ctl_load(S390_lowcore.user_asce, 1, 1);
48*4882a593Smuzhiyun 		clear_cpu_flag(CIF_ASCE_PRIMARY);
49*4882a593Smuzhiyun 	} else {
50*4882a593Smuzhiyun 		__ctl_load(S390_lowcore.kernel_asce, 1, 1);
51*4882a593Smuzhiyun 		set_cpu_flag(CIF_ASCE_PRIMARY);
52*4882a593Smuzhiyun 	}
53*4882a593Smuzhiyun 	if (fs & 1) {
54*4882a593Smuzhiyun 		if (fs == USER_DS_SACF)
55*4882a593Smuzhiyun 			__ctl_load(S390_lowcore.user_asce, 7, 7);
56*4882a593Smuzhiyun 		else
57*4882a593Smuzhiyun 			__ctl_load(S390_lowcore.kernel_asce, 7, 7);
58*4882a593Smuzhiyun 		set_cpu_flag(CIF_ASCE_SECONDARY);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun EXPORT_SYMBOL(set_fs);
62*4882a593Smuzhiyun 
enable_sacf_uaccess(void)63*4882a593Smuzhiyun mm_segment_t enable_sacf_uaccess(void)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	mm_segment_t old_fs;
66*4882a593Smuzhiyun 	unsigned long asce, cr;
67*4882a593Smuzhiyun 	unsigned long flags;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	old_fs = current->thread.mm_segment;
70*4882a593Smuzhiyun 	if (old_fs & 1)
71*4882a593Smuzhiyun 		return old_fs;
72*4882a593Smuzhiyun 	/* protect against a concurrent page table upgrade */
73*4882a593Smuzhiyun 	local_irq_save(flags);
74*4882a593Smuzhiyun 	current->thread.mm_segment |= 1;
75*4882a593Smuzhiyun 	asce = S390_lowcore.kernel_asce;
76*4882a593Smuzhiyun 	if (likely(old_fs == USER_DS)) {
77*4882a593Smuzhiyun 		__ctl_store(cr, 1, 1);
78*4882a593Smuzhiyun 		if (cr != S390_lowcore.kernel_asce) {
79*4882a593Smuzhiyun 			__ctl_load(S390_lowcore.kernel_asce, 1, 1);
80*4882a593Smuzhiyun 			set_cpu_flag(CIF_ASCE_PRIMARY);
81*4882a593Smuzhiyun 		}
82*4882a593Smuzhiyun 		asce = S390_lowcore.user_asce;
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 	__ctl_store(cr, 7, 7);
85*4882a593Smuzhiyun 	if (cr != asce) {
86*4882a593Smuzhiyun 		__ctl_load(asce, 7, 7);
87*4882a593Smuzhiyun 		set_cpu_flag(CIF_ASCE_SECONDARY);
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	local_irq_restore(flags);
90*4882a593Smuzhiyun 	return old_fs;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun EXPORT_SYMBOL(enable_sacf_uaccess);
93*4882a593Smuzhiyun 
disable_sacf_uaccess(mm_segment_t old_fs)94*4882a593Smuzhiyun void disable_sacf_uaccess(mm_segment_t old_fs)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	current->thread.mm_segment = old_fs;
97*4882a593Smuzhiyun 	if (old_fs == USER_DS && test_facility(27)) {
98*4882a593Smuzhiyun 		__ctl_load(S390_lowcore.user_asce, 1, 1);
99*4882a593Smuzhiyun 		clear_cpu_flag(CIF_ASCE_PRIMARY);
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL(disable_sacf_uaccess);
103*4882a593Smuzhiyun 
copy_from_user_mvcos(void * x,const void __user * ptr,unsigned long size)104*4882a593Smuzhiyun static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
105*4882a593Smuzhiyun 						 unsigned long size)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	register unsigned long reg0 asm("0") = 0x01UL;
108*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	tmp1 = -4096UL;
111*4882a593Smuzhiyun 	asm volatile(
112*4882a593Smuzhiyun 		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
113*4882a593Smuzhiyun 		"6: jz    4f\n"
114*4882a593Smuzhiyun 		"1: algr  %0,%3\n"
115*4882a593Smuzhiyun 		"   slgr  %1,%3\n"
116*4882a593Smuzhiyun 		"   slgr  %2,%3\n"
117*4882a593Smuzhiyun 		"   j     0b\n"
118*4882a593Smuzhiyun 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
119*4882a593Smuzhiyun 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
120*4882a593Smuzhiyun 		"   slgr  %4,%1\n"
121*4882a593Smuzhiyun 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
122*4882a593Smuzhiyun 		"   jnh   5f\n"
123*4882a593Smuzhiyun 		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
124*4882a593Smuzhiyun 		"7: slgr  %0,%4\n"
125*4882a593Smuzhiyun 		"   j     5f\n"
126*4882a593Smuzhiyun 		"4: slgr  %0,%0\n"
127*4882a593Smuzhiyun 		"5:\n"
128*4882a593Smuzhiyun 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
129*4882a593Smuzhiyun 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
130*4882a593Smuzhiyun 		: "d" (reg0) : "cc", "memory");
131*4882a593Smuzhiyun 	return size;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
copy_from_user_mvcp(void * x,const void __user * ptr,unsigned long size)134*4882a593Smuzhiyun static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
135*4882a593Smuzhiyun 						unsigned long size)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
138*4882a593Smuzhiyun 	mm_segment_t old_fs;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	old_fs = enable_sacf_uaccess();
141*4882a593Smuzhiyun 	tmp1 = -256UL;
142*4882a593Smuzhiyun 	asm volatile(
143*4882a593Smuzhiyun 		"   sacf  0\n"
144*4882a593Smuzhiyun 		"0: mvcp  0(%0,%2),0(%1),%3\n"
145*4882a593Smuzhiyun 		"7: jz    5f\n"
146*4882a593Smuzhiyun 		"1: algr  %0,%3\n"
147*4882a593Smuzhiyun 		"   la    %1,256(%1)\n"
148*4882a593Smuzhiyun 		"   la    %2,256(%2)\n"
149*4882a593Smuzhiyun 		"2: mvcp  0(%0,%2),0(%1),%3\n"
150*4882a593Smuzhiyun 		"8: jnz   1b\n"
151*4882a593Smuzhiyun 		"   j     5f\n"
152*4882a593Smuzhiyun 		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
153*4882a593Smuzhiyun 		"   lghi  %3,-4096\n"
154*4882a593Smuzhiyun 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
155*4882a593Smuzhiyun 		"   slgr  %4,%1\n"
156*4882a593Smuzhiyun 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
157*4882a593Smuzhiyun 		"   jnh   6f\n"
158*4882a593Smuzhiyun 		"4: mvcp  0(%4,%2),0(%1),%3\n"
159*4882a593Smuzhiyun 		"9: slgr  %0,%4\n"
160*4882a593Smuzhiyun 		"   j     6f\n"
161*4882a593Smuzhiyun 		"5: slgr  %0,%0\n"
162*4882a593Smuzhiyun 		"6: sacf  768\n"
163*4882a593Smuzhiyun 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
164*4882a593Smuzhiyun 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
165*4882a593Smuzhiyun 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
166*4882a593Smuzhiyun 		: : "cc", "memory");
167*4882a593Smuzhiyun 	disable_sacf_uaccess(old_fs);
168*4882a593Smuzhiyun 	return size;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
raw_copy_from_user(void * to,const void __user * from,unsigned long n)171*4882a593Smuzhiyun unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	if (copy_with_mvcos())
174*4882a593Smuzhiyun 		return copy_from_user_mvcos(to, from, n);
175*4882a593Smuzhiyun 	return copy_from_user_mvcp(to, from, n);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun EXPORT_SYMBOL(raw_copy_from_user);
178*4882a593Smuzhiyun 
copy_to_user_mvcos(void __user * ptr,const void * x,unsigned long size)179*4882a593Smuzhiyun static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
180*4882a593Smuzhiyun 					       unsigned long size)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	register unsigned long reg0 asm("0") = 0x010000UL;
183*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	tmp1 = -4096UL;
186*4882a593Smuzhiyun 	asm volatile(
187*4882a593Smuzhiyun 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
188*4882a593Smuzhiyun 		"6: jz    4f\n"
189*4882a593Smuzhiyun 		"1: algr  %0,%3\n"
190*4882a593Smuzhiyun 		"   slgr  %1,%3\n"
191*4882a593Smuzhiyun 		"   slgr  %2,%3\n"
192*4882a593Smuzhiyun 		"   j     0b\n"
193*4882a593Smuzhiyun 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
194*4882a593Smuzhiyun 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
195*4882a593Smuzhiyun 		"   slgr  %4,%1\n"
196*4882a593Smuzhiyun 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
197*4882a593Smuzhiyun 		"   jnh   5f\n"
198*4882a593Smuzhiyun 		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
199*4882a593Smuzhiyun 		"7: slgr  %0,%4\n"
200*4882a593Smuzhiyun 		"   j     5f\n"
201*4882a593Smuzhiyun 		"4: slgr  %0,%0\n"
202*4882a593Smuzhiyun 		"5:\n"
203*4882a593Smuzhiyun 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
204*4882a593Smuzhiyun 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
205*4882a593Smuzhiyun 		: "d" (reg0) : "cc", "memory");
206*4882a593Smuzhiyun 	return size;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
copy_to_user_mvcs(void __user * ptr,const void * x,unsigned long size)209*4882a593Smuzhiyun static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
210*4882a593Smuzhiyun 					      unsigned long size)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
213*4882a593Smuzhiyun 	mm_segment_t old_fs;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	old_fs = enable_sacf_uaccess();
216*4882a593Smuzhiyun 	tmp1 = -256UL;
217*4882a593Smuzhiyun 	asm volatile(
218*4882a593Smuzhiyun 		"   sacf  0\n"
219*4882a593Smuzhiyun 		"0: mvcs  0(%0,%1),0(%2),%3\n"
220*4882a593Smuzhiyun 		"7: jz    5f\n"
221*4882a593Smuzhiyun 		"1: algr  %0,%3\n"
222*4882a593Smuzhiyun 		"   la    %1,256(%1)\n"
223*4882a593Smuzhiyun 		"   la    %2,256(%2)\n"
224*4882a593Smuzhiyun 		"2: mvcs  0(%0,%1),0(%2),%3\n"
225*4882a593Smuzhiyun 		"8: jnz   1b\n"
226*4882a593Smuzhiyun 		"   j     5f\n"
227*4882a593Smuzhiyun 		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
228*4882a593Smuzhiyun 		"   lghi  %3,-4096\n"
229*4882a593Smuzhiyun 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
230*4882a593Smuzhiyun 		"   slgr  %4,%1\n"
231*4882a593Smuzhiyun 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
232*4882a593Smuzhiyun 		"   jnh   6f\n"
233*4882a593Smuzhiyun 		"4: mvcs  0(%4,%1),0(%2),%3\n"
234*4882a593Smuzhiyun 		"9: slgr  %0,%4\n"
235*4882a593Smuzhiyun 		"   j     6f\n"
236*4882a593Smuzhiyun 		"5: slgr  %0,%0\n"
237*4882a593Smuzhiyun 		"6: sacf  768\n"
238*4882a593Smuzhiyun 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
239*4882a593Smuzhiyun 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
240*4882a593Smuzhiyun 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
241*4882a593Smuzhiyun 		: : "cc", "memory");
242*4882a593Smuzhiyun 	disable_sacf_uaccess(old_fs);
243*4882a593Smuzhiyun 	return size;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
raw_copy_to_user(void __user * to,const void * from,unsigned long n)246*4882a593Smuzhiyun unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	if (copy_with_mvcos())
249*4882a593Smuzhiyun 		return copy_to_user_mvcos(to, from, n);
250*4882a593Smuzhiyun 	return copy_to_user_mvcs(to, from, n);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL(raw_copy_to_user);
253*4882a593Smuzhiyun 
copy_in_user_mvcos(void __user * to,const void __user * from,unsigned long size)254*4882a593Smuzhiyun static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
255*4882a593Smuzhiyun 					       unsigned long size)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	register unsigned long reg0 asm("0") = 0x010001UL;
258*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	tmp1 = -4096UL;
261*4882a593Smuzhiyun 	/* FIXME: copy with reduced length. */
262*4882a593Smuzhiyun 	asm volatile(
263*4882a593Smuzhiyun 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
264*4882a593Smuzhiyun 		"   jz	  2f\n"
265*4882a593Smuzhiyun 		"1: algr  %0,%3\n"
266*4882a593Smuzhiyun 		"   slgr  %1,%3\n"
267*4882a593Smuzhiyun 		"   slgr  %2,%3\n"
268*4882a593Smuzhiyun 		"   j	  0b\n"
269*4882a593Smuzhiyun 		"2:slgr  %0,%0\n"
270*4882a593Smuzhiyun 		"3: \n"
271*4882a593Smuzhiyun 		EX_TABLE(0b,3b)
272*4882a593Smuzhiyun 		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
273*4882a593Smuzhiyun 		: "d" (reg0) : "cc", "memory");
274*4882a593Smuzhiyun 	return size;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
copy_in_user_mvc(void __user * to,const void __user * from,unsigned long size)277*4882a593Smuzhiyun static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
278*4882a593Smuzhiyun 					     unsigned long size)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	mm_segment_t old_fs;
281*4882a593Smuzhiyun 	unsigned long tmp1;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	old_fs = enable_sacf_uaccess();
284*4882a593Smuzhiyun 	asm volatile(
285*4882a593Smuzhiyun 		"   sacf  256\n"
286*4882a593Smuzhiyun 		"   aghi  %0,-1\n"
287*4882a593Smuzhiyun 		"   jo	  5f\n"
288*4882a593Smuzhiyun 		"   bras  %3,3f\n"
289*4882a593Smuzhiyun 		"0: aghi  %0,257\n"
290*4882a593Smuzhiyun 		"1: mvc	  0(1,%1),0(%2)\n"
291*4882a593Smuzhiyun 		"   la	  %1,1(%1)\n"
292*4882a593Smuzhiyun 		"   la	  %2,1(%2)\n"
293*4882a593Smuzhiyun 		"   aghi  %0,-1\n"
294*4882a593Smuzhiyun 		"   jnz	  1b\n"
295*4882a593Smuzhiyun 		"   j	  5f\n"
296*4882a593Smuzhiyun 		"2: mvc	  0(256,%1),0(%2)\n"
297*4882a593Smuzhiyun 		"   la	  %1,256(%1)\n"
298*4882a593Smuzhiyun 		"   la	  %2,256(%2)\n"
299*4882a593Smuzhiyun 		"3: aghi  %0,-256\n"
300*4882a593Smuzhiyun 		"   jnm	  2b\n"
301*4882a593Smuzhiyun 		"4: ex	  %0,1b-0b(%3)\n"
302*4882a593Smuzhiyun 		"5: slgr  %0,%0\n"
303*4882a593Smuzhiyun 		"6: sacf  768\n"
304*4882a593Smuzhiyun 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
305*4882a593Smuzhiyun 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
306*4882a593Smuzhiyun 		: : "cc", "memory");
307*4882a593Smuzhiyun 	disable_sacf_uaccess(old_fs);
308*4882a593Smuzhiyun 	return size;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)311*4882a593Smuzhiyun unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	if (copy_with_mvcos())
314*4882a593Smuzhiyun 		return copy_in_user_mvcos(to, from, n);
315*4882a593Smuzhiyun 	return copy_in_user_mvc(to, from, n);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun EXPORT_SYMBOL(raw_copy_in_user);
318*4882a593Smuzhiyun 
clear_user_mvcos(void __user * to,unsigned long size)319*4882a593Smuzhiyun static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	register unsigned long reg0 asm("0") = 0x010000UL;
322*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	tmp1 = -4096UL;
325*4882a593Smuzhiyun 	asm volatile(
326*4882a593Smuzhiyun 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
327*4882a593Smuzhiyun 		"   jz	  4f\n"
328*4882a593Smuzhiyun 		"1: algr  %0,%2\n"
329*4882a593Smuzhiyun 		"   slgr  %1,%2\n"
330*4882a593Smuzhiyun 		"   j	  0b\n"
331*4882a593Smuzhiyun 		"2: la	  %3,4095(%1)\n"/* %4 = to + 4095 */
332*4882a593Smuzhiyun 		"   nr	  %3,%2\n"	/* %4 = (to + 4095) & -4096 */
333*4882a593Smuzhiyun 		"   slgr  %3,%1\n"
334*4882a593Smuzhiyun 		"   clgr  %0,%3\n"	/* copy crosses next page boundary? */
335*4882a593Smuzhiyun 		"   jnh	  5f\n"
336*4882a593Smuzhiyun 		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
337*4882a593Smuzhiyun 		"   slgr  %0,%3\n"
338*4882a593Smuzhiyun 		"   j	  5f\n"
339*4882a593Smuzhiyun 		"4: slgr  %0,%0\n"
340*4882a593Smuzhiyun 		"5:\n"
341*4882a593Smuzhiyun 		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
342*4882a593Smuzhiyun 		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
343*4882a593Smuzhiyun 		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
344*4882a593Smuzhiyun 	return size;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
clear_user_xc(void __user * to,unsigned long size)347*4882a593Smuzhiyun static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	mm_segment_t old_fs;
350*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	old_fs = enable_sacf_uaccess();
353*4882a593Smuzhiyun 	asm volatile(
354*4882a593Smuzhiyun 		"   sacf  256\n"
355*4882a593Smuzhiyun 		"   aghi  %0,-1\n"
356*4882a593Smuzhiyun 		"   jo    5f\n"
357*4882a593Smuzhiyun 		"   bras  %3,3f\n"
358*4882a593Smuzhiyun 		"   xc    0(1,%1),0(%1)\n"
359*4882a593Smuzhiyun 		"0: aghi  %0,257\n"
360*4882a593Smuzhiyun 		"   la    %2,255(%1)\n" /* %2 = ptr + 255 */
361*4882a593Smuzhiyun 		"   srl   %2,12\n"
362*4882a593Smuzhiyun 		"   sll   %2,12\n"	/* %2 = (ptr + 255) & -4096 */
363*4882a593Smuzhiyun 		"   slgr  %2,%1\n"
364*4882a593Smuzhiyun 		"   clgr  %0,%2\n"	/* clear crosses next page boundary? */
365*4882a593Smuzhiyun 		"   jnh   5f\n"
366*4882a593Smuzhiyun 		"   aghi  %2,-1\n"
367*4882a593Smuzhiyun 		"1: ex    %2,0(%3)\n"
368*4882a593Smuzhiyun 		"   aghi  %2,1\n"
369*4882a593Smuzhiyun 		"   slgr  %0,%2\n"
370*4882a593Smuzhiyun 		"   j     5f\n"
371*4882a593Smuzhiyun 		"2: xc    0(256,%1),0(%1)\n"
372*4882a593Smuzhiyun 		"   la    %1,256(%1)\n"
373*4882a593Smuzhiyun 		"3: aghi  %0,-256\n"
374*4882a593Smuzhiyun 		"   jnm   2b\n"
375*4882a593Smuzhiyun 		"4: ex    %0,0(%3)\n"
376*4882a593Smuzhiyun 		"5: slgr  %0,%0\n"
377*4882a593Smuzhiyun 		"6: sacf  768\n"
378*4882a593Smuzhiyun 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
379*4882a593Smuzhiyun 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
380*4882a593Smuzhiyun 		: : "cc", "memory");
381*4882a593Smuzhiyun 	disable_sacf_uaccess(old_fs);
382*4882a593Smuzhiyun 	return size;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
__clear_user(void __user * to,unsigned long size)385*4882a593Smuzhiyun unsigned long __clear_user(void __user *to, unsigned long size)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	if (copy_with_mvcos())
388*4882a593Smuzhiyun 			return clear_user_mvcos(to, size);
389*4882a593Smuzhiyun 	return clear_user_xc(to, size);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun EXPORT_SYMBOL(__clear_user);
392*4882a593Smuzhiyun 
strnlen_user_srst(const char __user * src,unsigned long size)393*4882a593Smuzhiyun static inline unsigned long strnlen_user_srst(const char __user *src,
394*4882a593Smuzhiyun 					      unsigned long size)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	register unsigned long reg0 asm("0") = 0;
397*4882a593Smuzhiyun 	unsigned long tmp1, tmp2;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	asm volatile(
400*4882a593Smuzhiyun 		"   la    %2,0(%1)\n"
401*4882a593Smuzhiyun 		"   la    %3,0(%0,%1)\n"
402*4882a593Smuzhiyun 		"   slgr  %0,%0\n"
403*4882a593Smuzhiyun 		"   sacf  256\n"
404*4882a593Smuzhiyun 		"0: srst  %3,%2\n"
405*4882a593Smuzhiyun 		"   jo    0b\n"
406*4882a593Smuzhiyun 		"   la    %0,1(%3)\n"	/* strnlen_user results includes \0 */
407*4882a593Smuzhiyun 		"   slgr  %0,%1\n"
408*4882a593Smuzhiyun 		"1: sacf  768\n"
409*4882a593Smuzhiyun 		EX_TABLE(0b,1b)
410*4882a593Smuzhiyun 		: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
411*4882a593Smuzhiyun 		: "d" (reg0) : "cc", "memory");
412*4882a593Smuzhiyun 	return size;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
__strnlen_user(const char __user * src,unsigned long size)415*4882a593Smuzhiyun unsigned long __strnlen_user(const char __user *src, unsigned long size)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	mm_segment_t old_fs;
418*4882a593Smuzhiyun 	unsigned long len;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (unlikely(!size))
421*4882a593Smuzhiyun 		return 0;
422*4882a593Smuzhiyun 	old_fs = enable_sacf_uaccess();
423*4882a593Smuzhiyun 	len = strnlen_user_srst(src, size);
424*4882a593Smuzhiyun 	disable_sacf_uaccess(old_fs);
425*4882a593Smuzhiyun 	return len;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun EXPORT_SYMBOL(__strnlen_user);
428*4882a593Smuzhiyun 
__strncpy_from_user(char * dst,const char __user * src,long size)429*4882a593Smuzhiyun long __strncpy_from_user(char *dst, const char __user *src, long size)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	size_t done, len, offset, len_str;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (unlikely(size <= 0))
434*4882a593Smuzhiyun 		return 0;
435*4882a593Smuzhiyun 	done = 0;
436*4882a593Smuzhiyun 	do {
437*4882a593Smuzhiyun 		offset = (size_t)src & (L1_CACHE_BYTES - 1);
438*4882a593Smuzhiyun 		len = min(size - done, L1_CACHE_BYTES - offset);
439*4882a593Smuzhiyun 		if (copy_from_user(dst, src, len))
440*4882a593Smuzhiyun 			return -EFAULT;
441*4882a593Smuzhiyun 		len_str = strnlen(dst, len);
442*4882a593Smuzhiyun 		done += len_str;
443*4882a593Smuzhiyun 		src += len_str;
444*4882a593Smuzhiyun 		dst += len_str;
445*4882a593Smuzhiyun 	} while ((len_str == len) && (done < size));
446*4882a593Smuzhiyun 	return done;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun EXPORT_SYMBOL(__strncpy_from_user);
449