xref: /OK3568_Linux_fs/kernel/arch/alpha/lib/stxcpy.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * arch/alpha/lib/stxcpy.S
4*4882a593Smuzhiyun * Contributed by Richard Henderson (rth@tamu.edu)
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copy a null-terminated string from SRC to DST.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This is an internal routine used by strcpy, stpcpy, and strcat.
9*4882a593Smuzhiyun * As such, it uses special linkage conventions to make implementation
10*4882a593Smuzhiyun * of these public functions more efficient.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * On input:
13*4882a593Smuzhiyun *	t9 = return address
14*4882a593Smuzhiyun *	a0 = DST
15*4882a593Smuzhiyun *	a1 = SRC
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * On output:
18*4882a593Smuzhiyun *	t12 = bitmask (with one bit set) indicating the last byte written
19*4882a593Smuzhiyun *	a0  = unaligned address of the last *word* written
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Furthermore, v0, a3-a5, t11, and t12 are untouched.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun#include <asm/regdef.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun	.set noat
27*4882a593Smuzhiyun	.set noreorder
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun	.text
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
32*4882a593Smuzhiyun   doesn't like putting the entry point for a procedure somewhere in the
33*4882a593Smuzhiyun   middle of the procedure descriptor.  Work around this by putting the
34*4882a593Smuzhiyun   aligned copy in its own procedure descriptor */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun	.ent stxcpy_aligned
37*4882a593Smuzhiyun	.align 3
38*4882a593Smuzhiyunstxcpy_aligned:
39*4882a593Smuzhiyun	.frame sp, 0, t9
40*4882a593Smuzhiyun	.prologue 0
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun	/* On entry to this basic block:
43*4882a593Smuzhiyun	   t0 == the first destination word for masking back in
44*4882a593Smuzhiyun	   t1 == the first source word.  */
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun	/* Create the 1st output word and detect 0's in the 1st input word.  */
47*4882a593Smuzhiyun	lda	t2, -1		# e1    : build a mask against false zero
48*4882a593Smuzhiyun	mskqh	t2, a1, t2	# e0    :   detection in the src word
49*4882a593Smuzhiyun	mskqh	t1, a1, t3	# e0    :
50*4882a593Smuzhiyun	ornot	t1, t2, t2	# .. e1 :
51*4882a593Smuzhiyun	mskql	t0, a1, t0	# e0    : assemble the first output word
52*4882a593Smuzhiyun	cmpbge	zero, t2, t8	# .. e1 : bits set iff null found
53*4882a593Smuzhiyun	or	t0, t3, t1	# e0    :
54*4882a593Smuzhiyun	bne	t8, $a_eos	# .. e1 :
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun	/* On entry to this basic block:
57*4882a593Smuzhiyun	   t0 == the first destination word for masking back in
58*4882a593Smuzhiyun	   t1 == a source word not containing a null.  */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun$a_loop:
61*4882a593Smuzhiyun	stq_u	t1, 0(a0)	# e0    :
62*4882a593Smuzhiyun	addq	a0, 8, a0	# .. e1 :
63*4882a593Smuzhiyun	ldq_u	t1, 0(a1)	# e0    :
64*4882a593Smuzhiyun	addq	a1, 8, a1	# .. e1 :
65*4882a593Smuzhiyun	cmpbge	zero, t1, t8	# e0 (stall)
66*4882a593Smuzhiyun	beq	t8, $a_loop	# .. e1 (zdb)
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun	/* Take care of the final (partial) word store.
69*4882a593Smuzhiyun	   On entry to this basic block we have:
70*4882a593Smuzhiyun	   t1 == the source word containing the null
71*4882a593Smuzhiyun	   t8 == the cmpbge mask that found it.  */
72*4882a593Smuzhiyun$a_eos:
73*4882a593Smuzhiyun	negq	t8, t6		# e0    : find low bit set
74*4882a593Smuzhiyun	and	t8, t6, t12	# e1 (stall)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun	/* For the sake of the cache, don't read a destination word
77*4882a593Smuzhiyun	   if we're not going to need it.  */
78*4882a593Smuzhiyun	and	t12, 0x80, t6	# e0    :
79*4882a593Smuzhiyun	bne	t6, 1f		# .. e1 (zdb)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun	/* We're doing a partial word store and so need to combine
82*4882a593Smuzhiyun	   our source and original destination words.  */
83*4882a593Smuzhiyun	ldq_u	t0, 0(a0)	# e0    :
84*4882a593Smuzhiyun	subq	t12, 1, t6	# .. e1 :
85*4882a593Smuzhiyun	zapnot	t1, t6, t1	# e0    : clear src bytes >= null
86*4882a593Smuzhiyun	or	t12, t6, t8	# .. e1 :
87*4882a593Smuzhiyun	zap	t0, t8, t0	# e0    : clear dst bytes <= null
88*4882a593Smuzhiyun	or	t0, t1, t1	# e1    :
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun1:	stq_u	t1, 0(a0)	# e0    :
91*4882a593Smuzhiyun	ret	(t9)		# .. e1 :
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun	.end stxcpy_aligned
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun	.align 3
96*4882a593Smuzhiyun	.ent __stxcpy
97*4882a593Smuzhiyun	.globl __stxcpy
98*4882a593Smuzhiyun__stxcpy:
99*4882a593Smuzhiyun	.frame sp, 0, t9
100*4882a593Smuzhiyun	.prologue 0
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun	/* Are source and destination co-aligned?  */
103*4882a593Smuzhiyun	xor	a0, a1, t0	# e0    :
104*4882a593Smuzhiyun	unop			#       :
105*4882a593Smuzhiyun	and	t0, 7, t0	# e0    :
106*4882a593Smuzhiyun	bne	t0, $unaligned	# .. e1 :
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun	/* We are co-aligned; take care of a partial first word.  */
109*4882a593Smuzhiyun	ldq_u	t1, 0(a1)	# e0    : load first src word
110*4882a593Smuzhiyun	and	a0, 7, t0	# .. e1 : take care not to load a word ...
111*4882a593Smuzhiyun	addq	a1, 8, a1		# e0    :
112*4882a593Smuzhiyun	beq	t0, stxcpy_aligned	# .. e1 : ... if we wont need it
113*4882a593Smuzhiyun	ldq_u	t0, 0(a0)	# e0    :
114*4882a593Smuzhiyun	br	stxcpy_aligned	# .. e1 :
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun/* The source and destination are not co-aligned.  Align the destination
118*4882a593Smuzhiyun   and cope.  We have to be very careful about not reading too much and
119*4882a593Smuzhiyun   causing a SEGV.  */
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun	.align 3
122*4882a593Smuzhiyun$u_head:
123*4882a593Smuzhiyun	/* We know just enough now to be able to assemble the first
124*4882a593Smuzhiyun	   full source word.  We can still find a zero at the end of it
125*4882a593Smuzhiyun	   that prevents us from outputting the whole thing.
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun	   On entry to this basic block:
128*4882a593Smuzhiyun	   t0 == the first dest word, for masking back in, if needed else 0
129*4882a593Smuzhiyun	   t1 == the low bits of the first source word
130*4882a593Smuzhiyun	   t6 == bytemask that is -1 in dest word bytes */
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun	ldq_u	t2, 8(a1)	# e0    :
133*4882a593Smuzhiyun	addq	a1, 8, a1	# .. e1 :
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun	extql	t1, a1, t1	# e0    :
136*4882a593Smuzhiyun	extqh	t2, a1, t4	# e0    :
137*4882a593Smuzhiyun	mskql	t0, a0, t0	# e0    :
138*4882a593Smuzhiyun	or	t1, t4, t1	# .. e1 :
139*4882a593Smuzhiyun	mskqh	t1, a0, t1	# e0    :
140*4882a593Smuzhiyun	or	t0, t1, t1	# e1    :
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun	or	t1, t6, t6	# e0    :
143*4882a593Smuzhiyun	cmpbge	zero, t6, t8	# .. e1 :
144*4882a593Smuzhiyun	lda	t6, -1		# e0    : for masking just below
145*4882a593Smuzhiyun	bne	t8, $u_final	# .. e1 :
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun	mskql	t6, a1, t6		# e0    : mask out the bits we have
148*4882a593Smuzhiyun	or	t6, t2, t2		# e1    :   already extracted before
149*4882a593Smuzhiyun	cmpbge	zero, t2, t8		# e0    :   testing eos
150*4882a593Smuzhiyun	bne	t8, $u_late_head_exit	# .. e1 (zdb)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun	/* Finally, we've got all the stupid leading edge cases taken care
153*4882a593Smuzhiyun	   of and we can set up to enter the main loop.  */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun	stq_u	t1, 0(a0)	# e0    : store first output word
156*4882a593Smuzhiyun	addq	a0, 8, a0	# .. e1 :
157*4882a593Smuzhiyun	extql	t2, a1, t0	# e0    : position ho-bits of lo word
158*4882a593Smuzhiyun	ldq_u	t2, 8(a1)	# .. e1 : read next high-order source word
159*4882a593Smuzhiyun	addq	a1, 8, a1	# e0    :
160*4882a593Smuzhiyun	cmpbge	zero, t2, t8	# .. e1 :
161*4882a593Smuzhiyun	nop			# e0    :
162*4882a593Smuzhiyun	bne	t8, $u_eos	# .. e1 :
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun	/* Unaligned copy main loop.  In order to avoid reading too much,
165*4882a593Smuzhiyun	   the loop is structured to detect zeros in aligned source words.
166*4882a593Smuzhiyun	   This has, unfortunately, effectively pulled half of a loop
167*4882a593Smuzhiyun	   iteration out into the head and half into the tail, but it does
168*4882a593Smuzhiyun	   prevent nastiness from accumulating in the very thing we want
169*4882a593Smuzhiyun	   to run as fast as possible.
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun	   On entry to this basic block:
172*4882a593Smuzhiyun	   t0 == the shifted high-order bits from the previous source word
173*4882a593Smuzhiyun	   t2 == the unshifted current source word
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun	   We further know that t2 does not contain a null terminator.  */
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun	.align 3
178*4882a593Smuzhiyun$u_loop:
179*4882a593Smuzhiyun	extqh	t2, a1, t1	# e0    : extract high bits for current word
180*4882a593Smuzhiyun	addq	a1, 8, a1	# .. e1 :
181*4882a593Smuzhiyun	extql	t2, a1, t3	# e0    : extract low bits for next time
182*4882a593Smuzhiyun	addq	a0, 8, a0	# .. e1 :
183*4882a593Smuzhiyun	or	t0, t1, t1	# e0    : current dst word now complete
184*4882a593Smuzhiyun	ldq_u	t2, 0(a1)	# .. e1 : load high word for next time
185*4882a593Smuzhiyun	stq_u	t1, -8(a0)	# e0    : save the current word
186*4882a593Smuzhiyun	mov	t3, t0		# .. e1 :
187*4882a593Smuzhiyun	cmpbge	zero, t2, t8	# e0    : test new word for eos
188*4882a593Smuzhiyun	beq	t8, $u_loop	# .. e1 :
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun	/* We've found a zero somewhere in the source word we just read.
191*4882a593Smuzhiyun	   If it resides in the lower half, we have one (probably partial)
192*4882a593Smuzhiyun	   word to write out, and if it resides in the upper half, we
193*4882a593Smuzhiyun	   have one full and one partial word left to write out.
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun	   On entry to this basic block:
196*4882a593Smuzhiyun	   t0 == the shifted high-order bits from the previous source word
197*4882a593Smuzhiyun	   t2 == the unshifted current source word.  */
198*4882a593Smuzhiyun$u_eos:
199*4882a593Smuzhiyun	extqh	t2, a1, t1	# e0    :
200*4882a593Smuzhiyun	or	t0, t1, t1	# e1    : first (partial) source word complete
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun	cmpbge	zero, t1, t8	# e0    : is the null in this first bit?
203*4882a593Smuzhiyun	bne	t8, $u_final	# .. e1 (zdb)
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun$u_late_head_exit:
206*4882a593Smuzhiyun	stq_u	t1, 0(a0)	# e0    : the null was in the high-order bits
207*4882a593Smuzhiyun	addq	a0, 8, a0	# .. e1 :
208*4882a593Smuzhiyun	extql	t2, a1, t1	# e0    :
209*4882a593Smuzhiyun	cmpbge	zero, t1, t8	# .. e1 :
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun	/* Take care of a final (probably partial) result word.
212*4882a593Smuzhiyun	   On entry to this basic block:
213*4882a593Smuzhiyun	   t1 == assembled source word
214*4882a593Smuzhiyun	   t8 == cmpbge mask that found the null.  */
215*4882a593Smuzhiyun$u_final:
216*4882a593Smuzhiyun	negq	t8, t6		# e0    : isolate low bit set
217*4882a593Smuzhiyun	and	t6, t8, t12	# e1    :
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun	and	t12, 0x80, t6	# e0    : avoid dest word load if we can
220*4882a593Smuzhiyun	bne	t6, 1f		# .. e1 (zdb)
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun	ldq_u	t0, 0(a0)	# e0    :
223*4882a593Smuzhiyun	subq	t12, 1, t6	# .. e1 :
224*4882a593Smuzhiyun	or	t6, t12, t8	# e0    :
225*4882a593Smuzhiyun	zapnot	t1, t6, t1	# .. e1 : kill source bytes >= null
226*4882a593Smuzhiyun	zap	t0, t8, t0	# e0    : kill dest bytes <= null
227*4882a593Smuzhiyun	or	t0, t1, t1	# e1    :
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun1:	stq_u	t1, 0(a0)	# e0    :
230*4882a593Smuzhiyun	ret	(t9)		# .. e1 :
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun	/* Unaligned copy entry point.  */
233*4882a593Smuzhiyun	.align 3
234*4882a593Smuzhiyun$unaligned:
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun	ldq_u	t1, 0(a1)	# e0    : load first source word
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun	and	a0, 7, t4	# .. e1 : find dest misalignment
239*4882a593Smuzhiyun	and	a1, 7, t5	# e0    : find src misalignment
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun	/* Conditionally load the first destination word and a bytemask
242*4882a593Smuzhiyun	   with 0xff indicating that the destination byte is sacrosanct.  */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun	mov	zero, t0	# .. e1 :
245*4882a593Smuzhiyun	mov	zero, t6	# e0    :
246*4882a593Smuzhiyun	beq	t4, 1f		# .. e1 :
247*4882a593Smuzhiyun	ldq_u	t0, 0(a0)	# e0    :
248*4882a593Smuzhiyun	lda	t6, -1		# .. e1 :
249*4882a593Smuzhiyun	mskql	t6, a0, t6	# e0    :
250*4882a593Smuzhiyun1:
251*4882a593Smuzhiyun	subq	a1, t4, a1	# .. e1 : sub dest misalignment from src addr
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun	/* If source misalignment is larger than dest misalignment, we need
254*4882a593Smuzhiyun	   extra startup checks to avoid SEGV.  */
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun	cmplt	t4, t5, t12	# e0    :
257*4882a593Smuzhiyun	beq	t12, $u_head	# .. e1 (zdb)
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun	lda	t2, -1		# e1    : mask out leading garbage in source
260*4882a593Smuzhiyun	mskqh	t2, t5, t2	# e0    :
261*4882a593Smuzhiyun	nop			# e0    :
262*4882a593Smuzhiyun	ornot	t1, t2, t3	# .. e1 :
263*4882a593Smuzhiyun	cmpbge	zero, t3, t8	# e0    : is there a zero?
264*4882a593Smuzhiyun	beq	t8, $u_head	# .. e1 (zdb)
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun	/* At this point we've found a zero in the first partial word of
267*4882a593Smuzhiyun	   the source.  We need to isolate the valid source data and mask
268*4882a593Smuzhiyun	   it into the original destination data.  (Incidentally, we know
269*4882a593Smuzhiyun	   that we'll need at least one byte of that original dest word.) */
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun	ldq_u	t0, 0(a0)	# e0    :
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun	negq	t8, t6		# .. e1 : build bitmask of bytes <= zero
274*4882a593Smuzhiyun	and	t6, t8, t12	# e0    :
275*4882a593Smuzhiyun	and	a1, 7, t5	# .. e1 :
276*4882a593Smuzhiyun	subq	t12, 1, t6	# e0    :
277*4882a593Smuzhiyun	or	t6, t12, t8	# e1    :
278*4882a593Smuzhiyun	srl	t12, t5, t12	# e0    : adjust final null return value
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun	zapnot	t2, t8, t2	# .. e1 : prepare source word; mirror changes
281*4882a593Smuzhiyun	and	t1, t2, t1	# e1    : to source validity mask
282*4882a593Smuzhiyun	extql	t2, a1, t2	# .. e0 :
283*4882a593Smuzhiyun	extql	t1, a1, t1	# e0    :
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun	andnot	t0, t2, t0	# .. e1 : zero place for source to reside
286*4882a593Smuzhiyun	or	t0, t1, t1	# e1    : and put it there
287*4882a593Smuzhiyun	stq_u	t1, 0(a0)	# .. e0 :
288*4882a593Smuzhiyun	ret	(t9)		# e1    :
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun	.end __stxcpy
291