xref: /OK3568_Linux_fs/u-boot/arch/arc/lib/strcpy-700.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/*
2*4882a593Smuzhiyun * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier:	GPL-2.0+
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun/*
8*4882a593Smuzhiyun * If dst and src are 4 byte aligned, copy 8 bytes at a time.
9*4882a593Smuzhiyun * If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
10*4882a593Smuzhiyun * it 8 byte aligned.  Thus, we can do a little read-ahead, without
11*4882a593Smuzhiyun * dereferencing a cache line that we should not touch.
12*4882a593Smuzhiyun * Note that short and long instructions have been scheduled to avoid
13*4882a593Smuzhiyun * branch stalls.
14*4882a593Smuzhiyun * The beq_s to r3z could be made unaligned & long to avoid a stall
15*4882a593Smuzhiyun * there, but it is not likely to be taken often, and it would also be likely
16*4882a593Smuzhiyun * to cost an unaligned mispredict at the next call.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun.global strcpy
20*4882a593Smuzhiyun.align 4
21*4882a593Smuzhiyunstrcpy:
22*4882a593Smuzhiyun	or	%r2, %r0, %r1
23*4882a593Smuzhiyun	bmsk_s	%r2, %r2, 1
24*4882a593Smuzhiyun	brne.d	%r2, 0, charloop
25*4882a593Smuzhiyun	mov_s	%r10, %r0
26*4882a593Smuzhiyun	ld_s	%r3, [%r1, 0]
27*4882a593Smuzhiyun	mov	%r8, 0x01010101
28*4882a593Smuzhiyun	bbit0.d	%r1, 2, loop_start
29*4882a593Smuzhiyun	ror	%r12, %r8
30*4882a593Smuzhiyun	sub	%r2, %r3, %r8
31*4882a593Smuzhiyun	bic_s	%r2, %r2, %r3
32*4882a593Smuzhiyun	tst_s	%r2,%r12
33*4882a593Smuzhiyun	bne	r3z
34*4882a593Smuzhiyun	mov_s	%r4,%r3
35*4882a593Smuzhiyun	.balign 4
36*4882a593Smuzhiyunloop:
37*4882a593Smuzhiyun	ld.a	%r3, [%r1, 4]
38*4882a593Smuzhiyun	st.ab	%r4, [%r10, 4]
39*4882a593Smuzhiyunloop_start:
40*4882a593Smuzhiyun	ld.a	%r4, [%r1, 4]
41*4882a593Smuzhiyun	sub	%r2, %r3, %r8
42*4882a593Smuzhiyun	bic_s	%r2, %r2, %r3
43*4882a593Smuzhiyun	tst_s	%r2, %r12
44*4882a593Smuzhiyun	bne_s	r3z
45*4882a593Smuzhiyun	st.ab	%r3, [%r10, 4]
46*4882a593Smuzhiyun	sub	%r2, %r4, %r8
47*4882a593Smuzhiyun	bic	%r2, %r2, %r4
48*4882a593Smuzhiyun	tst	%r2, %r12
49*4882a593Smuzhiyun	beq	loop
50*4882a593Smuzhiyun	mov_s	%r3, %r4
51*4882a593Smuzhiyun#ifdef __LITTLE_ENDIAN__
52*4882a593Smuzhiyunr3z:	bmsk.f	%r1, %r3, 7
53*4882a593Smuzhiyun	lsr_s	%r3, %r3, 8
54*4882a593Smuzhiyun#else /* __BIG_ENDIAN__ */
55*4882a593Smuzhiyunr3z:	lsr.f	%r1, %r3, 24
56*4882a593Smuzhiyun	asl_s	%r3, %r3, 8
57*4882a593Smuzhiyun#endif /* _ENDIAN__ */
58*4882a593Smuzhiyun	bne.d	r3z
59*4882a593Smuzhiyun	stb.ab	%r1, [%r10, 1]
60*4882a593Smuzhiyun	j_s	[%blink]
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun	.balign	4
63*4882a593Smuzhiyuncharloop:
64*4882a593Smuzhiyun	ldb.ab	%r3, [%r1, 1]
65*4882a593Smuzhiyun	brne.d	%r3, 0, charloop
66*4882a593Smuzhiyun	stb.ab	%r3, [%r10, 1]
67*4882a593Smuzhiyun	j	[%blink]
68