xref: /OK3568_Linux_fs/kernel/arch/ia64/lib/copy_page_mck.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * McKinley-optimized version of copy_page().
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2002 Hewlett-Packard Co
6*4882a593Smuzhiyun *	David Mosberger <davidm@hpl.hp.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Inputs:
9*4882a593Smuzhiyun *	in0:	address of target page
10*4882a593Smuzhiyun *	in1:	address of source page
11*4882a593Smuzhiyun * Output:
12*4882a593Smuzhiyun *	no return value
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * General idea:
15*4882a593Smuzhiyun *	- use regular loads and stores to prefetch data to avoid consuming M-slot just for
16*4882a593Smuzhiyun *	  lfetches => good for in-cache performance
17*4882a593Smuzhiyun *	- avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single
18*4882a593Smuzhiyun *	  cycle
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Principle of operation:
21*4882a593Smuzhiyun *	First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes.
22*4882a593Smuzhiyun *	To avoid secondary misses in L2, we prefetch both source and destination with a line-size
23*4882a593Smuzhiyun *	of 128 bytes.  When both of these lines are in the L2 and the first half of the
24*4882a593Smuzhiyun *	source line is in L1, we start copying the remaining words.  The second half of the
25*4882a593Smuzhiyun *	source line is prefetched in an earlier iteration, so that by the time we start
26*4882a593Smuzhiyun *	accessing it, it's also present in the L1.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun *	We use a software-pipelined loop to control the overall operation.  The pipeline
29*4882a593Smuzhiyun *	has 2*PREFETCH_DIST+K stages.  The first PREFETCH_DIST stages are used for prefetching
30*4882a593Smuzhiyun *	source cache-lines.  The second PREFETCH_DIST stages are used for prefetching destination
31*4882a593Smuzhiyun *	cache-lines, the last K stages are used to copy the cache-line words not copied by
32*4882a593Smuzhiyun *	the prefetches.  The four relevant points in the pipelined are called A, B, C, D:
33*4882a593Smuzhiyun *	p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line
34*4882a593Smuzhiyun *	should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought
35*4882a593Smuzhiyun *	into L1D and p[D] is TRUE if a cacheline needs to be copied.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun *	This all sounds very complicated, but thanks to the modulo-scheduled loop support,
38*4882a593Smuzhiyun *	the resulting code is very regular and quite easy to follow (once you get the idea).
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun *	As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented
41*4882a593Smuzhiyun *	as the separate .prefetch_loop.  Logically, this loop performs exactly like the
42*4882a593Smuzhiyun *	main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed,
43*4882a593Smuzhiyun *	so that each loop iteration is faster (again, good for cached case).
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun *	When reading the code, it helps to keep the following picture in mind:
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun *	       word 0 word 1
48*4882a593Smuzhiyun *            +------+------+---
49*4882a593Smuzhiyun *	      |	v[x] | 	t1  | ^
50*4882a593Smuzhiyun *	      |	t2   |	t3  | |
51*4882a593Smuzhiyun *	      |	t4   |	t5  | |
52*4882a593Smuzhiyun *	      |	t6   |	t7  | | 128 bytes
53*4882a593Smuzhiyun *     	      |	n[y] | 	t9  | |	(L2 cache line)
54*4882a593Smuzhiyun *	      |	t10  | 	t11 | |
55*4882a593Smuzhiyun *	      |	t12  | 	t13 | |
56*4882a593Smuzhiyun *	      |	t14  | 	t15 | v
57*4882a593Smuzhiyun *	      +------+------+---
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun *	Here, v[x] is copied by the (memory) prefetch.  n[y] is loaded at p[C]
60*4882a593Smuzhiyun *	to fetch the second-half of the L2 cache line into L1, and the tX words are copied in
61*4882a593Smuzhiyun *	an order that avoids bank conflicts.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun#include <asm/asmmacro.h>
64*4882a593Smuzhiyun#include <asm/page.h>
65*4882a593Smuzhiyun#include <asm/export.h>
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun#define PREFETCH_DIST	8		// McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun#define src0		r2
70*4882a593Smuzhiyun#define src1		r3
71*4882a593Smuzhiyun#define dst0		r9
72*4882a593Smuzhiyun#define dst1		r10
73*4882a593Smuzhiyun#define src_pre_mem	r11
74*4882a593Smuzhiyun#define dst_pre_mem	r14
75*4882a593Smuzhiyun#define src_pre_l2	r15
76*4882a593Smuzhiyun#define dst_pre_l2	r16
77*4882a593Smuzhiyun#define t1		r17
78*4882a593Smuzhiyun#define t2		r18
79*4882a593Smuzhiyun#define t3		r19
80*4882a593Smuzhiyun#define t4		r20
81*4882a593Smuzhiyun#define t5		t1	// alias!
82*4882a593Smuzhiyun#define t6		t2	// alias!
83*4882a593Smuzhiyun#define t7		t3	// alias!
84*4882a593Smuzhiyun#define t9		t5	// alias!
85*4882a593Smuzhiyun#define t10		t4	// alias!
86*4882a593Smuzhiyun#define t11		t7	// alias!
87*4882a593Smuzhiyun#define t12		t6	// alias!
88*4882a593Smuzhiyun#define t14		t10	// alias!
89*4882a593Smuzhiyun#define t13		r21
90*4882a593Smuzhiyun#define t15		r22
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun#define saved_lc	r23
93*4882a593Smuzhiyun#define saved_pr	r24
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun#define	A	0
96*4882a593Smuzhiyun#define B	(PREFETCH_DIST)
97*4882a593Smuzhiyun#define C	(B + PREFETCH_DIST)
98*4882a593Smuzhiyun#define D	(C + 3)
99*4882a593Smuzhiyun#define N	(D + 1)
100*4882a593Smuzhiyun#define Nrot	((N + 7) & ~7)
101*4882a593Smuzhiyun
102*4882a593SmuzhiyunGLOBAL_ENTRY(copy_page)
103*4882a593Smuzhiyun	.prologue
104*4882a593Smuzhiyun	alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun	.rotr v[2*PREFETCH_DIST], n[D-C+1]
107*4882a593Smuzhiyun	.rotp p[N]
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun	.save ar.lc, saved_lc
110*4882a593Smuzhiyun	mov saved_lc = ar.lc
111*4882a593Smuzhiyun	.save pr, saved_pr
112*4882a593Smuzhiyun	mov saved_pr = pr
113*4882a593Smuzhiyun	.body
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun	mov src_pre_mem = in1
116*4882a593Smuzhiyun	mov pr.rot = 0x10000
117*4882a593Smuzhiyun	mov ar.ec = 1				// special unrolled loop
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun	mov dst_pre_mem = in0
120*4882a593Smuzhiyun	mov ar.lc = 2*PREFETCH_DIST - 1
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun	add src_pre_l2 = 8*8, in1
123*4882a593Smuzhiyun	add dst_pre_l2 = 8*8, in0
124*4882a593Smuzhiyun	add src0 = 8, in1			// first t1 src
125*4882a593Smuzhiyun	add src1 = 3*8, in1			// first t3 src
126*4882a593Smuzhiyun	add dst0 = 8, in0			// first t1 dst
127*4882a593Smuzhiyun	add dst1 = 3*8, in0			// first t3 dst
128*4882a593Smuzhiyun	mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1
129*4882a593Smuzhiyun	nop.m 0
130*4882a593Smuzhiyun	nop.i 0
131*4882a593Smuzhiyun	;;
132*4882a593Smuzhiyun	// same as .line_copy loop, but with all predicated-off instructions removed:
133*4882a593Smuzhiyun.prefetch_loop:
134*4882a593Smuzhiyun(p[A])	ld8 v[A] = [src_pre_mem], 128		// M0
135*4882a593Smuzhiyun(p[B])	st8 [dst_pre_mem] = v[B], 128		// M2
136*4882a593Smuzhiyun	br.ctop.sptk .prefetch_loop
137*4882a593Smuzhiyun	;;
138*4882a593Smuzhiyun	cmp.eq p16, p0 = r0, r0			// reset p16 to 1 (br.ctop cleared it to zero)
139*4882a593Smuzhiyun	mov ar.lc = t1				// with 64KB pages, t1 is too big to fit in 8 bits!
140*4882a593Smuzhiyun	mov ar.ec = N				// # of stages in pipeline
141*4882a593Smuzhiyun	;;
142*4882a593Smuzhiyun.line_copy:
143*4882a593Smuzhiyun(p[D])	ld8 t2 = [src0], 3*8			// M0
144*4882a593Smuzhiyun(p[D])	ld8 t4 = [src1], 3*8			// M1
145*4882a593Smuzhiyun(p[B])	st8 [dst_pre_mem] = v[B], 128		// M2 prefetch dst from memory
146*4882a593Smuzhiyun(p[D])	st8 [dst_pre_l2] = n[D-C], 128		// M3 prefetch dst from L2
147*4882a593Smuzhiyun	;;
148*4882a593Smuzhiyun(p[A])	ld8 v[A] = [src_pre_mem], 128		// M0 prefetch src from memory
149*4882a593Smuzhiyun(p[C])	ld8 n[0] = [src_pre_l2], 128		// M1 prefetch src from L2
150*4882a593Smuzhiyun(p[D])	st8 [dst0] =  t1, 8			// M2
151*4882a593Smuzhiyun(p[D])	st8 [dst1] =  t3, 8			// M3
152*4882a593Smuzhiyun	;;
153*4882a593Smuzhiyun(p[D])	ld8  t5 = [src0], 8
154*4882a593Smuzhiyun(p[D])	ld8  t7 = [src1], 3*8
155*4882a593Smuzhiyun(p[D])	st8 [dst0] =  t2, 3*8
156*4882a593Smuzhiyun(p[D])	st8 [dst1] =  t4, 3*8
157*4882a593Smuzhiyun	;;
158*4882a593Smuzhiyun(p[D])	ld8  t6 = [src0], 3*8
159*4882a593Smuzhiyun(p[D])	ld8 t10 = [src1], 8
160*4882a593Smuzhiyun(p[D])	st8 [dst0] =  t5, 8
161*4882a593Smuzhiyun(p[D])	st8 [dst1] =  t7, 3*8
162*4882a593Smuzhiyun	;;
163*4882a593Smuzhiyun(p[D])	ld8  t9 = [src0], 3*8
164*4882a593Smuzhiyun(p[D])	ld8 t11 = [src1], 3*8
165*4882a593Smuzhiyun(p[D])	st8 [dst0] =  t6, 3*8
166*4882a593Smuzhiyun(p[D])	st8 [dst1] = t10, 8
167*4882a593Smuzhiyun	;;
168*4882a593Smuzhiyun(p[D])	ld8 t12 = [src0], 8
169*4882a593Smuzhiyun(p[D])	ld8 t14 = [src1], 8
170*4882a593Smuzhiyun(p[D])	st8 [dst0] =  t9, 3*8
171*4882a593Smuzhiyun(p[D])	st8 [dst1] = t11, 3*8
172*4882a593Smuzhiyun	;;
173*4882a593Smuzhiyun(p[D])	ld8 t13 = [src0], 4*8
174*4882a593Smuzhiyun(p[D])	ld8 t15 = [src1], 4*8
175*4882a593Smuzhiyun(p[D])	st8 [dst0] = t12, 8
176*4882a593Smuzhiyun(p[D])	st8 [dst1] = t14, 8
177*4882a593Smuzhiyun	;;
178*4882a593Smuzhiyun(p[D-1])ld8  t1 = [src0], 8
179*4882a593Smuzhiyun(p[D-1])ld8  t3 = [src1], 8
180*4882a593Smuzhiyun(p[D])	st8 [dst0] = t13, 4*8
181*4882a593Smuzhiyun(p[D])	st8 [dst1] = t15, 4*8
182*4882a593Smuzhiyun	br.ctop.sptk .line_copy
183*4882a593Smuzhiyun	;;
184*4882a593Smuzhiyun	mov ar.lc = saved_lc
185*4882a593Smuzhiyun	mov pr = saved_pr, -1
186*4882a593Smuzhiyun	br.ret.sptk.many rp
187*4882a593SmuzhiyunEND(copy_page)
188*4882a593SmuzhiyunEXPORT_SYMBOL(copy_page)
189