xref: /OK3568_Linux_fs/kernel/arch/alpha/lib/ev6-clear_user.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * arch/alpha/lib/ev6-clear_user.S
4*4882a593Smuzhiyun * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Zero user space, handling exceptions as we go.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * We have to make sure that $0 is always up-to-date and contains the
9*4882a593Smuzhiyun * right "bytes left to zero" value (and that it is updated only _after_
10*4882a593Smuzhiyun * a successful copy).  There is also some rather minor exception setup
11*4882a593Smuzhiyun * stuff.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Much of the information about 21264 scheduling/coding comes from:
14*4882a593Smuzhiyun *	Compiler Writer's Guide for the Alpha 21264
15*4882a593Smuzhiyun *	abbreviated as 'CWG' in other comments here
16*4882a593Smuzhiyun *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
17*4882a593Smuzhiyun * Scheduling notation:
18*4882a593Smuzhiyun *	E	- either cluster
19*4882a593Smuzhiyun *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
20*4882a593Smuzhiyun *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
21*4882a593Smuzhiyun * Try not to change the actual algorithm if possible for consistency.
22*4882a593Smuzhiyun * Determining actual stalls (other than slotting) doesn't appear to be easy to do.
23*4882a593Smuzhiyun * From perusing the source code context where this routine is called, it is
24*4882a593Smuzhiyun * a fair assumption that significant fractions of entire pages are zeroed, so
25*4882a593Smuzhiyun * it's going to be worth the effort to hand-unroll a big loop, and use wh64.
26*4882a593Smuzhiyun * ASSUMPTION:
27*4882a593Smuzhiyun *	The believed purpose of only updating $0 after a store is that a signal
28*4882a593Smuzhiyun *	may come along during the execution of this chunk of code, and we don't
29*4882a593Smuzhiyun *	want to leave a hole (and we also want to avoid repeating lots of work)
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun#include <asm/export.h>
33*4882a593Smuzhiyun/* Allow an exception for an insn; exit if we get one.  */
34*4882a593Smuzhiyun#define EX(x,y...)			\
35*4882a593Smuzhiyun	99: x,##y;			\
36*4882a593Smuzhiyun	.section __ex_table,"a";	\
37*4882a593Smuzhiyun	.long 99b - .;			\
38*4882a593Smuzhiyun	lda $31, $exception-99b($31); 	\
39*4882a593Smuzhiyun	.previous
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun	.set noat
42*4882a593Smuzhiyun	.set noreorder
43*4882a593Smuzhiyun	.align 4
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun	.globl __clear_user
46*4882a593Smuzhiyun	.ent __clear_user
47*4882a593Smuzhiyun	.frame	$30, 0, $26
48*4882a593Smuzhiyun	.prologue 0
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun				# Pipeline info : Slotting & Comments
51*4882a593Smuzhiyun__clear_user:
52*4882a593Smuzhiyun	and	$17, $17, $0
53*4882a593Smuzhiyun	and	$16, 7, $4	# .. E  .. ..	: find dest head misalignment
54*4882a593Smuzhiyun	beq	$0, $zerolength # U  .. .. ..	:  U L U L
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun	addq	$0, $4, $1	# .. .. .. E	: bias counter
57*4882a593Smuzhiyun	and	$1, 7, $2	# .. .. E  ..	: number of misaligned bytes in tail
58*4882a593Smuzhiyun# Note - we never actually use $2, so this is a moot computation
59*4882a593Smuzhiyun# and we can rewrite this later...
60*4882a593Smuzhiyun	srl	$1, 3, $1	# .. E  .. ..	: number of quadwords to clear
61*4882a593Smuzhiyun	beq	$4, $headalign	# U  .. .. ..	: U L U L
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun/*
64*4882a593Smuzhiyun * Head is not aligned.  Write (8 - $4) bytes to head of destination
65*4882a593Smuzhiyun * This means $16 is known to be misaligned
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun	EX( ldq_u $5, 0($16) )	# .. .. .. L	: load dst word to mask back in
68*4882a593Smuzhiyun	beq	$1, $onebyte	# .. .. U  ..	: sub-word store?
69*4882a593Smuzhiyun	mskql	$5, $16, $5	# .. U  .. ..	: take care of misaligned head
70*4882a593Smuzhiyun	addq	$16, 8, $16	# E  .. .. .. 	: L U U L
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun	EX( stq_u $5, -8($16) )	# .. .. .. L	:
73*4882a593Smuzhiyun	subq	$1, 1, $1	# .. .. E  ..	:
74*4882a593Smuzhiyun	addq	$0, $4, $0	# .. E  .. ..	: bytes left -= 8 - misalignment
75*4882a593Smuzhiyun	subq	$0, 8, $0	# E  .. .. ..	: U L U L
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun	.align	4
78*4882a593Smuzhiyun/*
79*4882a593Smuzhiyun * (The .align directive ought to be a moot point)
80*4882a593Smuzhiyun * values upon initial entry to the loop
81*4882a593Smuzhiyun * $1 is number of quadwords to clear (zero is a valid value)
82*4882a593Smuzhiyun * $2 is number of trailing bytes (0..7) ($2 never used...)
83*4882a593Smuzhiyun * $16 is known to be aligned 0mod8
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun$headalign:
86*4882a593Smuzhiyun	subq	$1, 16, $4	# .. .. .. E	: If < 16, we can not use the huge loop
87*4882a593Smuzhiyun	and	$16, 0x3f, $2	# .. .. E  ..	: Forward work for huge loop
88*4882a593Smuzhiyun	subq	$2, 0x40, $3	# .. E  .. ..	: bias counter (huge loop)
89*4882a593Smuzhiyun	blt	$4, $trailquad	# U  .. .. ..	: U L U L
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun/*
92*4882a593Smuzhiyun * We know that we're going to do at least 16 quads, which means we are
93*4882a593Smuzhiyun * going to be able to use the large block clear loop at least once.
94*4882a593Smuzhiyun * Figure out how many quads we need to clear before we are 0mod64 aligned
95*4882a593Smuzhiyun * so we can use the wh64 instruction.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun	nop			# .. .. .. E
99*4882a593Smuzhiyun	nop			# .. .. E  ..
100*4882a593Smuzhiyun	nop			# .. E  .. ..
101*4882a593Smuzhiyun	beq	$3, $bigalign	# U  .. .. ..	: U L U L : Aligned 0mod64
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun$alignmod64:
104*4882a593Smuzhiyun	EX( stq_u $31, 0($16) )	# .. .. .. L
105*4882a593Smuzhiyun	addq	$3, 8, $3	# .. .. E  ..
106*4882a593Smuzhiyun	subq	$0, 8, $0	# .. E  .. ..
107*4882a593Smuzhiyun	nop			# E  .. .. ..	: U L U L
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun	nop			# .. .. .. E
110*4882a593Smuzhiyun	subq	$1, 1, $1	# .. .. E  ..
111*4882a593Smuzhiyun	addq	$16, 8, $16	# .. E  .. ..
112*4882a593Smuzhiyun	blt	$3, $alignmod64	# U  .. .. ..	: U L U L
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun$bigalign:
115*4882a593Smuzhiyun/*
116*4882a593Smuzhiyun * $0 is the number of bytes left
117*4882a593Smuzhiyun * $1 is the number of quads left
118*4882a593Smuzhiyun * $16 is aligned 0mod64
119*4882a593Smuzhiyun * we know that we'll be taking a minimum of one trip through
120*4882a593Smuzhiyun * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
121*4882a593Smuzhiyun * We are _not_ going to update $0 after every single store.  That
122*4882a593Smuzhiyun * would be silly, because there will be cross-cluster dependencies
123*4882a593Smuzhiyun * no matter how the code is scheduled.  By doing it in slightly
124*4882a593Smuzhiyun * staggered fashion, we can still do this loop in 5 fetches
125*4882a593Smuzhiyun * The worse case will be doing two extra quads in some future execution,
126*4882a593Smuzhiyun * in the event of an interrupted clear.
127*4882a593Smuzhiyun * Assumes the wh64 needs to be for 2 trips through the loop in the future
128*4882a593Smuzhiyun * The wh64 is issued on for the starting destination address for trip +2
129*4882a593Smuzhiyun * through the loop, and if there are less than two trips left, the target
130*4882a593Smuzhiyun * address will be for the current trip.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun	nop			# E :
133*4882a593Smuzhiyun	nop			# E :
134*4882a593Smuzhiyun	nop			# E :
135*4882a593Smuzhiyun	bis	$16,$16,$3	# E : U L U L : Initial wh64 address is dest
136*4882a593Smuzhiyun	/* This might actually help for the current trip... */
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun$do_wh64:
139*4882a593Smuzhiyun	wh64	($3)		# .. .. .. L1	: memory subsystem hint
140*4882a593Smuzhiyun	subq	$1, 16, $4	# .. .. E  ..	: Forward calculation - repeat the loop?
141*4882a593Smuzhiyun	EX( stq_u $31, 0($16) )	# .. L  .. ..
142*4882a593Smuzhiyun	subq	$0, 8, $0	# E  .. .. ..	: U L U L
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun	addq	$16, 128, $3	# E : Target address of wh64
145*4882a593Smuzhiyun	EX( stq_u $31, 8($16) )	# L :
146*4882a593Smuzhiyun	EX( stq_u $31, 16($16) )	# L :
147*4882a593Smuzhiyun	subq	$0, 16, $0	# E : U L L U
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun	nop			# E :
150*4882a593Smuzhiyun	EX( stq_u $31, 24($16) )	# L :
151*4882a593Smuzhiyun	EX( stq_u $31, 32($16) )	# L :
152*4882a593Smuzhiyun	subq	$0, 168, $5	# E : U L L U : two trips through the loop left?
153*4882a593Smuzhiyun	/* 168 = 192 - 24, since we've already completed some stores */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun	subq	$0, 16, $0	# E :
156*4882a593Smuzhiyun	EX( stq_u $31, 40($16) )	# L :
157*4882a593Smuzhiyun	EX( stq_u $31, 48($16) )	# L :
158*4882a593Smuzhiyun	cmovlt	$5, $16, $3	# E : U L L U : Latency 2, extra mapping cycle
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun	subq	$1, 8, $1	# E :
161*4882a593Smuzhiyun	subq	$0, 16, $0	# E :
162*4882a593Smuzhiyun	EX( stq_u $31, 56($16) )	# L :
163*4882a593Smuzhiyun	nop			# E : U L U L
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun	nop			# E :
166*4882a593Smuzhiyun	subq	$0, 8, $0	# E :
167*4882a593Smuzhiyun	addq	$16, 64, $16	# E :
168*4882a593Smuzhiyun	bge	$4, $do_wh64	# U : U L U L
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun$trailquad:
171*4882a593Smuzhiyun	# zero to 16 quadwords left to store, plus any trailing bytes
172*4882a593Smuzhiyun	# $1 is the number of quadwords left to go.
173*4882a593Smuzhiyun	#
174*4882a593Smuzhiyun	nop			# .. .. .. E
175*4882a593Smuzhiyun	nop			# .. .. E  ..
176*4882a593Smuzhiyun	nop			# .. E  .. ..
177*4882a593Smuzhiyun	beq	$1, $trailbytes	# U  .. .. ..	: U L U L : Only 0..7 bytes to go
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun$onequad:
180*4882a593Smuzhiyun	EX( stq_u $31, 0($16) )	# .. .. .. L
181*4882a593Smuzhiyun	subq	$1, 1, $1	# .. .. E  ..
182*4882a593Smuzhiyun	subq	$0, 8, $0	# .. E  .. ..
183*4882a593Smuzhiyun	nop			# E  .. .. ..	: U L U L
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun	nop			# .. .. .. E
186*4882a593Smuzhiyun	nop			# .. .. E  ..
187*4882a593Smuzhiyun	addq	$16, 8, $16	# .. E  .. ..
188*4882a593Smuzhiyun	bgt	$1, $onequad	# U  .. .. ..	: U L U L
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun	# We have an unknown number of bytes left to go.
191*4882a593Smuzhiyun$trailbytes:
192*4882a593Smuzhiyun	nop			# .. .. .. E
193*4882a593Smuzhiyun	nop			# .. .. E  ..
194*4882a593Smuzhiyun	nop			# .. E  .. ..
195*4882a593Smuzhiyun	beq	$0, $zerolength	# U  .. .. ..	: U L U L
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun	# $0 contains the number of bytes left to copy (0..31)
198*4882a593Smuzhiyun	# so we will use $0 as the loop counter
199*4882a593Smuzhiyun	# We know for a fact that $0 > 0 zero due to previous context
200*4882a593Smuzhiyun$onebyte:
201*4882a593Smuzhiyun	EX( stb $31, 0($16) )	# .. .. .. L
202*4882a593Smuzhiyun	subq	$0, 1, $0	# .. .. E  ..	:
203*4882a593Smuzhiyun	addq	$16, 1, $16	# .. E  .. ..	:
204*4882a593Smuzhiyun	bgt	$0, $onebyte	# U  .. .. ..	: U L U L
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun$zerolength:
207*4882a593Smuzhiyun$exception:			# Destination for exception recovery(?)
208*4882a593Smuzhiyun	nop			# .. .. .. E	:
209*4882a593Smuzhiyun	nop			# .. .. E  ..	:
210*4882a593Smuzhiyun	nop			# .. E  .. ..	:
211*4882a593Smuzhiyun	ret	$31, ($26), 1	# L0 .. .. ..	: L U L U
212*4882a593Smuzhiyun	.end __clear_user
213*4882a593Smuzhiyun	EXPORT_SYMBOL(__clear_user)
214