xref: /OK3568_Linux_fs/kernel/arch/xtensa/lib/usercopy.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/*
2*4882a593Smuzhiyun *  arch/xtensa/lib/usercopy.S
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun *  Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S)
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun *  DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>.
7*4882a593Smuzhiyun *  It needs to remain separate and distinct.  The hal files are part
8*4882a593Smuzhiyun *  of the Xtensa link-time HAL, and those files may differ per
9*4882a593Smuzhiyun *  processor configuration.  Patching the kernel for another
10*4882a593Smuzhiyun *  processor configuration includes replacing the hal files, and we
11*4882a593Smuzhiyun *  could lose the special functionality for accessing user-space
12*4882a593Smuzhiyun *  memory during such a patch.  We sacrifice a little code space here
13*4882a593Smuzhiyun *  in favor to simplify code maintenance.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun *  This file is subject to the terms and conditions of the GNU General
16*4882a593Smuzhiyun *  Public License.  See the file "COPYING" in the main directory of
17*4882a593Smuzhiyun *  this archive for more details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun *  Copyright (C) 2002 Tensilica Inc.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun/*
24*4882a593Smuzhiyun * size_t __xtensa_copy_user (void *dst, const void *src, size_t len);
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * The returned value is the number of bytes not copied.  Implies zero
27*4882a593Smuzhiyun * is success.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * The general case algorithm is as follows:
30*4882a593Smuzhiyun *   If the destination and source are both aligned,
31*4882a593Smuzhiyun *     do 16B chunks with a loop, and then finish up with
32*4882a593Smuzhiyun *     8B, 4B, 2B, and 1B copies conditional on the length.
33*4882a593Smuzhiyun *   If destination is aligned and source unaligned,
34*4882a593Smuzhiyun *     do the same, but use SRC to align the source data.
35*4882a593Smuzhiyun *   If destination is unaligned, align it by conditionally
36*4882a593Smuzhiyun *     copying 1B and 2B and then retest.
37*4882a593Smuzhiyun *   This code tries to use fall-through braches for the common
38*4882a593Smuzhiyun *     case of aligned destinations (except for the branches to
39*4882a593Smuzhiyun *     the alignment label).
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * Register use:
42*4882a593Smuzhiyun *	a0/ return address
43*4882a593Smuzhiyun *	a1/ stack pointer
44*4882a593Smuzhiyun *	a2/ return value
45*4882a593Smuzhiyun *	a3/ src
46*4882a593Smuzhiyun *	a4/ length
47*4882a593Smuzhiyun *	a5/ dst
48*4882a593Smuzhiyun *	a6/ tmp
49*4882a593Smuzhiyun *	a7/ tmp
50*4882a593Smuzhiyun *	a8/ tmp
51*4882a593Smuzhiyun *	a9/ tmp
52*4882a593Smuzhiyun *	a10/ tmp
53*4882a593Smuzhiyun *	a11/ original length
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun#include <linux/linkage.h>
57*4882a593Smuzhiyun#include <asm/asmmacro.h>
58*4882a593Smuzhiyun#include <asm/core.h>
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun	.text
61*4882a593SmuzhiyunENTRY(__xtensa_copy_user)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun	abi_entry_default
64*4882a593Smuzhiyun	# a2/ dst, a3/ src, a4/ len
65*4882a593Smuzhiyun	mov	a5, a2		# copy dst so that a2 is return value
66*4882a593Smuzhiyun	mov	a11, a4		# preserve original len for error case
67*4882a593Smuzhiyun.Lcommon:
68*4882a593Smuzhiyun	bbsi.l	a2, 0, .Ldst1mod2 # if dst is 1 mod 2
69*4882a593Smuzhiyun	bbsi.l	a2, 1, .Ldst2mod4 # if dst is 2 mod 4
70*4882a593Smuzhiyun.Ldstaligned:	# return here from .Ldstunaligned when dst is aligned
71*4882a593Smuzhiyun	srli	a7, a4, 4	# number of loop iterations with 16B
72*4882a593Smuzhiyun				# per iteration
73*4882a593Smuzhiyun	movi	a8, 3		  # if source is also aligned,
74*4882a593Smuzhiyun	bnone	a3, a8, .Laligned # then use word copy
75*4882a593Smuzhiyun	__ssa8	a3		# set shift amount from byte offset
76*4882a593Smuzhiyun	bnez	a4, .Lsrcunaligned
77*4882a593Smuzhiyun	movi	a2, 0		# return success for len==0
78*4882a593Smuzhiyun	abi_ret_default
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun/*
81*4882a593Smuzhiyun * Destination is unaligned
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun.Ldst1mod2:	# dst is only byte aligned
85*4882a593Smuzhiyun	bltui	a4, 7, .Lbytecopy	# do short copies byte by byte
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun	# copy 1 byte
88*4882a593SmuzhiyunEX(10f)	l8ui	a6, a3, 0
89*4882a593Smuzhiyun	addi	a3, a3,  1
90*4882a593SmuzhiyunEX(10f)	s8i	a6, a5,  0
91*4882a593Smuzhiyun	addi	a5, a5,  1
92*4882a593Smuzhiyun	addi	a4, a4, -1
93*4882a593Smuzhiyun	bbci.l	a5, 1, .Ldstaligned	# if dst is now aligned, then
94*4882a593Smuzhiyun					# return to main algorithm
95*4882a593Smuzhiyun.Ldst2mod4:	# dst 16-bit aligned
96*4882a593Smuzhiyun	# copy 2 bytes
97*4882a593Smuzhiyun	bltui	a4, 6, .Lbytecopy	# do short copies byte by byte
98*4882a593SmuzhiyunEX(10f)	l8ui	a6, a3, 0
99*4882a593SmuzhiyunEX(10f)	l8ui	a7, a3, 1
100*4882a593Smuzhiyun	addi	a3, a3,  2
101*4882a593SmuzhiyunEX(10f)	s8i	a6, a5,  0
102*4882a593SmuzhiyunEX(10f)	s8i	a7, a5,  1
103*4882a593Smuzhiyun	addi	a5, a5,  2
104*4882a593Smuzhiyun	addi	a4, a4, -2
105*4882a593Smuzhiyun	j	.Ldstaligned	# dst is now aligned, return to main algorithm
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun/*
108*4882a593Smuzhiyun * Byte by byte copy
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun	.align	4
111*4882a593Smuzhiyun	.byte	0		# 1 mod 4 alignment for LOOPNEZ
112*4882a593Smuzhiyun				# (0 mod 4 alignment for LBEG)
113*4882a593Smuzhiyun.Lbytecopy:
114*4882a593Smuzhiyun#if XCHAL_HAVE_LOOPS
115*4882a593Smuzhiyun	loopnez	a4, .Lbytecopydone
116*4882a593Smuzhiyun#else /* !XCHAL_HAVE_LOOPS */
117*4882a593Smuzhiyun	beqz	a4, .Lbytecopydone
118*4882a593Smuzhiyun	add	a7, a3, a4	# a7 = end address for source
119*4882a593Smuzhiyun#endif /* !XCHAL_HAVE_LOOPS */
120*4882a593Smuzhiyun.Lnextbyte:
121*4882a593SmuzhiyunEX(10f)	l8ui	a6, a3, 0
122*4882a593Smuzhiyun	addi	a3, a3, 1
123*4882a593SmuzhiyunEX(10f)	s8i	a6, a5, 0
124*4882a593Smuzhiyun	addi	a5, a5, 1
125*4882a593Smuzhiyun#if !XCHAL_HAVE_LOOPS
126*4882a593Smuzhiyun	blt	a3, a7, .Lnextbyte
127*4882a593Smuzhiyun#endif /* !XCHAL_HAVE_LOOPS */
128*4882a593Smuzhiyun.Lbytecopydone:
129*4882a593Smuzhiyun	movi	a2, 0		# return success for len bytes copied
130*4882a593Smuzhiyun	abi_ret_default
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun/*
133*4882a593Smuzhiyun * Destination and source are word-aligned.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun	# copy 16 bytes per iteration for word-aligned dst and word-aligned src
136*4882a593Smuzhiyun	.align	4		# 1 mod 4 alignment for LOOPNEZ
137*4882a593Smuzhiyun	.byte	0		# (0 mod 4 alignment for LBEG)
138*4882a593Smuzhiyun.Laligned:
139*4882a593Smuzhiyun#if XCHAL_HAVE_LOOPS
140*4882a593Smuzhiyun	loopnez	a7, .Loop1done
141*4882a593Smuzhiyun#else /* !XCHAL_HAVE_LOOPS */
142*4882a593Smuzhiyun	beqz	a7, .Loop1done
143*4882a593Smuzhiyun	slli	a8, a7, 4
144*4882a593Smuzhiyun	add	a8, a8, a3	# a8 = end of last 16B source chunk
145*4882a593Smuzhiyun#endif /* !XCHAL_HAVE_LOOPS */
146*4882a593Smuzhiyun.Loop1:
147*4882a593SmuzhiyunEX(10f)	l32i	a6, a3,  0
148*4882a593SmuzhiyunEX(10f)	l32i	a7, a3,  4
149*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  0
150*4882a593SmuzhiyunEX(10f)	l32i	a6, a3,  8
151*4882a593SmuzhiyunEX(10f)	s32i	a7, a5,  4
152*4882a593SmuzhiyunEX(10f)	l32i	a7, a3, 12
153*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  8
154*4882a593Smuzhiyun	addi	a3, a3, 16
155*4882a593SmuzhiyunEX(10f)	s32i	a7, a5, 12
156*4882a593Smuzhiyun	addi	a5, a5, 16
157*4882a593Smuzhiyun#if !XCHAL_HAVE_LOOPS
158*4882a593Smuzhiyun	blt	a3, a8, .Loop1
159*4882a593Smuzhiyun#endif /* !XCHAL_HAVE_LOOPS */
160*4882a593Smuzhiyun.Loop1done:
161*4882a593Smuzhiyun	bbci.l	a4, 3, .L2
162*4882a593Smuzhiyun	# copy 8 bytes
163*4882a593SmuzhiyunEX(10f)	l32i	a6, a3,  0
164*4882a593SmuzhiyunEX(10f)	l32i	a7, a3,  4
165*4882a593Smuzhiyun	addi	a3, a3,  8
166*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  0
167*4882a593SmuzhiyunEX(10f)	s32i	a7, a5,  4
168*4882a593Smuzhiyun	addi	a5, a5,  8
169*4882a593Smuzhiyun.L2:
170*4882a593Smuzhiyun	bbci.l	a4, 2, .L3
171*4882a593Smuzhiyun	# copy 4 bytes
172*4882a593SmuzhiyunEX(10f)	l32i	a6, a3,  0
173*4882a593Smuzhiyun	addi	a3, a3,  4
174*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  0
175*4882a593Smuzhiyun	addi	a5, a5,  4
176*4882a593Smuzhiyun.L3:
177*4882a593Smuzhiyun	bbci.l	a4, 1, .L4
178*4882a593Smuzhiyun	# copy 2 bytes
179*4882a593SmuzhiyunEX(10f)	l16ui	a6, a3,  0
180*4882a593Smuzhiyun	addi	a3, a3,  2
181*4882a593SmuzhiyunEX(10f)	s16i	a6, a5,  0
182*4882a593Smuzhiyun	addi	a5, a5,  2
183*4882a593Smuzhiyun.L4:
184*4882a593Smuzhiyun	bbci.l	a4, 0, .L5
185*4882a593Smuzhiyun	# copy 1 byte
186*4882a593SmuzhiyunEX(10f)	l8ui	a6, a3,  0
187*4882a593SmuzhiyunEX(10f)	s8i	a6, a5,  0
188*4882a593Smuzhiyun.L5:
189*4882a593Smuzhiyun	movi	a2, 0		# return success for len bytes copied
190*4882a593Smuzhiyun	abi_ret_default
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun/*
193*4882a593Smuzhiyun * Destination is aligned, Source is unaligned
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun	.align	4
197*4882a593Smuzhiyun	.byte	0		# 1 mod 4 alignement for LOOPNEZ
198*4882a593Smuzhiyun				# (0 mod 4 alignment for LBEG)
199*4882a593Smuzhiyun.Lsrcunaligned:
200*4882a593Smuzhiyun	# copy 16 bytes per iteration for word-aligned dst and unaligned src
201*4882a593Smuzhiyun	and	a10, a3, a8	# save unalignment offset for below
202*4882a593Smuzhiyun	sub	a3, a3, a10	# align a3 (to avoid sim warnings only; not needed for hardware)
203*4882a593SmuzhiyunEX(10f)	l32i	a6, a3, 0	# load first word
204*4882a593Smuzhiyun#if XCHAL_HAVE_LOOPS
205*4882a593Smuzhiyun	loopnez	a7, .Loop2done
206*4882a593Smuzhiyun#else /* !XCHAL_HAVE_LOOPS */
207*4882a593Smuzhiyun	beqz	a7, .Loop2done
208*4882a593Smuzhiyun	slli	a12, a7, 4
209*4882a593Smuzhiyun	add	a12, a12, a3	# a12 = end of last 16B source chunk
210*4882a593Smuzhiyun#endif /* !XCHAL_HAVE_LOOPS */
211*4882a593Smuzhiyun.Loop2:
212*4882a593SmuzhiyunEX(10f)	l32i	a7, a3,  4
213*4882a593SmuzhiyunEX(10f)	l32i	a8, a3,  8
214*4882a593Smuzhiyun	__src_b	a6, a6, a7
215*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  0
216*4882a593SmuzhiyunEX(10f)	l32i	a9, a3, 12
217*4882a593Smuzhiyun	__src_b	a7, a7, a8
218*4882a593SmuzhiyunEX(10f)	s32i	a7, a5,  4
219*4882a593SmuzhiyunEX(10f)	l32i	a6, a3, 16
220*4882a593Smuzhiyun	__src_b	a8, a8, a9
221*4882a593SmuzhiyunEX(10f)	s32i	a8, a5,  8
222*4882a593Smuzhiyun	addi	a3, a3, 16
223*4882a593Smuzhiyun	__src_b	a9, a9, a6
224*4882a593SmuzhiyunEX(10f)	s32i	a9, a5, 12
225*4882a593Smuzhiyun	addi	a5, a5, 16
226*4882a593Smuzhiyun#if !XCHAL_HAVE_LOOPS
227*4882a593Smuzhiyun	blt	a3, a12, .Loop2
228*4882a593Smuzhiyun#endif /* !XCHAL_HAVE_LOOPS */
229*4882a593Smuzhiyun.Loop2done:
230*4882a593Smuzhiyun	bbci.l	a4, 3, .L12
231*4882a593Smuzhiyun	# copy 8 bytes
232*4882a593SmuzhiyunEX(10f)	l32i	a7, a3,  4
233*4882a593SmuzhiyunEX(10f)	l32i	a8, a3,  8
234*4882a593Smuzhiyun	__src_b	a6, a6, a7
235*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  0
236*4882a593Smuzhiyun	addi	a3, a3,  8
237*4882a593Smuzhiyun	__src_b	a7, a7, a8
238*4882a593SmuzhiyunEX(10f)	s32i	a7, a5,  4
239*4882a593Smuzhiyun	addi	a5, a5,  8
240*4882a593Smuzhiyun	mov	a6, a8
241*4882a593Smuzhiyun.L12:
242*4882a593Smuzhiyun	bbci.l	a4, 2, .L13
243*4882a593Smuzhiyun	# copy 4 bytes
244*4882a593SmuzhiyunEX(10f)	l32i	a7, a3,  4
245*4882a593Smuzhiyun	addi	a3, a3,  4
246*4882a593Smuzhiyun	__src_b	a6, a6, a7
247*4882a593SmuzhiyunEX(10f)	s32i	a6, a5,  0
248*4882a593Smuzhiyun	addi	a5, a5,  4
249*4882a593Smuzhiyun	mov	a6, a7
250*4882a593Smuzhiyun.L13:
251*4882a593Smuzhiyun	add	a3, a3, a10	# readjust a3 with correct misalignment
252*4882a593Smuzhiyun	bbci.l	a4, 1, .L14
253*4882a593Smuzhiyun	# copy 2 bytes
254*4882a593SmuzhiyunEX(10f)	l8ui	a6, a3,  0
255*4882a593SmuzhiyunEX(10f)	l8ui	a7, a3,  1
256*4882a593Smuzhiyun	addi	a3, a3,  2
257*4882a593SmuzhiyunEX(10f)	s8i	a6, a5,  0
258*4882a593SmuzhiyunEX(10f)	s8i	a7, a5,  1
259*4882a593Smuzhiyun	addi	a5, a5,  2
260*4882a593Smuzhiyun.L14:
261*4882a593Smuzhiyun	bbci.l	a4, 0, .L15
262*4882a593Smuzhiyun	# copy 1 byte
263*4882a593SmuzhiyunEX(10f)	l8ui	a6, a3,  0
264*4882a593SmuzhiyunEX(10f)	s8i	a6, a5,  0
265*4882a593Smuzhiyun.L15:
266*4882a593Smuzhiyun	movi	a2, 0		# return success for len bytes copied
267*4882a593Smuzhiyun	abi_ret_default
268*4882a593Smuzhiyun
269*4882a593SmuzhiyunENDPROC(__xtensa_copy_user)
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun	.section .fixup, "ax"
272*4882a593Smuzhiyun	.align	4
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun/* a2 = original dst; a5 = current dst; a11= original len
275*4882a593Smuzhiyun * bytes_copied = a5 - a2
276*4882a593Smuzhiyun * retval = bytes_not_copied = original len - bytes_copied
277*4882a593Smuzhiyun * retval = a11 - (a5 - a2)
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun10:
282*4882a593Smuzhiyun	sub	a2, a5, a2	/* a2 <-- bytes copied */
283*4882a593Smuzhiyun	sub	a2, a11, a2	/* a2 <-- bytes not copied */
284*4882a593Smuzhiyun	abi_ret_default
285