1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Optimized version of the standard memcpy() function 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Inputs: 7*4882a593Smuzhiyun * in0: destination address 8*4882a593Smuzhiyun * in1: source address 9*4882a593Smuzhiyun * in2: number of bytes to copy 10*4882a593Smuzhiyun * Output: 11*4882a593Smuzhiyun * no return value 12*4882a593Smuzhiyun * 13*4882a593Smuzhiyun * Copyright (C) 2000-2001 Hewlett-Packard Co 14*4882a593Smuzhiyun * Stephane Eranian <eranian@hpl.hp.com> 15*4882a593Smuzhiyun * David Mosberger-Tang <davidm@hpl.hp.com> 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun#include <asm/asmmacro.h> 18*4882a593Smuzhiyun#include <asm/export.h> 19*4882a593Smuzhiyun 20*4882a593SmuzhiyunGLOBAL_ENTRY(memcpy) 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun# define MEM_LAT 21 /* latency to memory */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun# define dst r2 25*4882a593Smuzhiyun# define src r3 26*4882a593Smuzhiyun# define retval r8 27*4882a593Smuzhiyun# define saved_pfs r9 28*4882a593Smuzhiyun# define saved_lc r10 29*4882a593Smuzhiyun# define saved_pr r11 30*4882a593Smuzhiyun# define cnt r16 31*4882a593Smuzhiyun# define src2 r17 32*4882a593Smuzhiyun# define t0 r18 33*4882a593Smuzhiyun# define t1 r19 34*4882a593Smuzhiyun# define t2 r20 35*4882a593Smuzhiyun# define t3 r21 36*4882a593Smuzhiyun# define t4 r22 37*4882a593Smuzhiyun# define src_end r23 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun# define N (MEM_LAT + 4) 40*4882a593Smuzhiyun# define Nrot ((N + 7) & ~7) 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun /* 43*4882a593Smuzhiyun * First, check if everything (src, dst, len) is a multiple of eight. If 44*4882a593Smuzhiyun * so, we handle everything with no taken branches (other than the loop 45*4882a593Smuzhiyun * itself) and a small icache footprint. Otherwise, we jump off to 46*4882a593Smuzhiyun * the more general copy routine handling arbitrary 47*4882a593Smuzhiyun * sizes/alignment etc. 48*4882a593Smuzhiyun */ 49*4882a593Smuzhiyun .prologue 50*4882a593Smuzhiyun .save ar.pfs, saved_pfs 51*4882a593Smuzhiyun alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot 52*4882a593Smuzhiyun .save ar.lc, saved_lc 53*4882a593Smuzhiyun mov saved_lc=ar.lc 54*4882a593Smuzhiyun or t0=in0,in1 55*4882a593Smuzhiyun ;; 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun or t0=t0,in2 58*4882a593Smuzhiyun .save pr, saved_pr 59*4882a593Smuzhiyun mov saved_pr=pr 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun .body 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun cmp.eq p6,p0=in2,r0 // zero length? 64*4882a593Smuzhiyun mov retval=in0 // return dst 65*4882a593Smuzhiyun(p6) br.ret.spnt.many rp // zero length, return immediately 66*4882a593Smuzhiyun ;; 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun mov dst=in0 // copy because of rotation 69*4882a593Smuzhiyun shr.u cnt=in2,3 // number of 8-byte words to copy 70*4882a593Smuzhiyun mov pr.rot=1<<16 71*4882a593Smuzhiyun ;; 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun adds cnt=-1,cnt // br.ctop is repeat/until 74*4882a593Smuzhiyun cmp.gtu p7,p0=16,in2 // copying less than 16 bytes? 75*4882a593Smuzhiyun mov ar.ec=N 76*4882a593Smuzhiyun ;; 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun and t0=0x7,t0 79*4882a593Smuzhiyun mov ar.lc=cnt 80*4882a593Smuzhiyun ;; 81*4882a593Smuzhiyun cmp.ne p6,p0=t0,r0 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun mov src=in1 // copy because of rotation 84*4882a593Smuzhiyun(p7) br.cond.spnt.few .memcpy_short 85*4882a593Smuzhiyun(p6) br.cond.spnt.few .memcpy_long 86*4882a593Smuzhiyun ;; 87*4882a593Smuzhiyun nop.m 0 88*4882a593Smuzhiyun ;; 89*4882a593Smuzhiyun nop.m 0 90*4882a593Smuzhiyun nop.i 0 91*4882a593Smuzhiyun ;; 92*4882a593Smuzhiyun nop.m 0 93*4882a593Smuzhiyun ;; 94*4882a593Smuzhiyun .rotr val[N] 95*4882a593Smuzhiyun .rotp p[N] 96*4882a593Smuzhiyun .align 32 97*4882a593Smuzhiyun1: { .mib 98*4882a593Smuzhiyun(p[0]) ld8 val[0]=[src],8 99*4882a593Smuzhiyun nop.i 0 100*4882a593Smuzhiyun brp.loop.imp 1b, 2f 101*4882a593Smuzhiyun} 102*4882a593Smuzhiyun2: { .mfb 103*4882a593Smuzhiyun(p[N-1])st8 [dst]=val[N-1],8 104*4882a593Smuzhiyun nop.f 0 105*4882a593Smuzhiyun br.ctop.dptk.few 1b 106*4882a593Smuzhiyun} 107*4882a593Smuzhiyun ;; 108*4882a593Smuzhiyun mov ar.lc=saved_lc 109*4882a593Smuzhiyun mov pr=saved_pr,-1 110*4882a593Smuzhiyun mov ar.pfs=saved_pfs 111*4882a593Smuzhiyun br.ret.sptk.many rp 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun /* 114*4882a593Smuzhiyun * Small (<16 bytes) unaligned copying is done via a simple byte-at-the-time 115*4882a593Smuzhiyun * copy loop. This performs relatively poorly on Itanium, but it doesn't 116*4882a593Smuzhiyun * get used very often (gcc inlines small copies) and due to atomicity 117*4882a593Smuzhiyun * issues, we want to avoid read-modify-write of entire words. 118*4882a593Smuzhiyun */ 119*4882a593Smuzhiyun .align 32 120*4882a593Smuzhiyun.memcpy_short: 121*4882a593Smuzhiyun adds cnt=-1,in2 // br.ctop is repeat/until 122*4882a593Smuzhiyun mov ar.ec=MEM_LAT 123*4882a593Smuzhiyun brp.loop.imp 1f, 2f 124*4882a593Smuzhiyun ;; 125*4882a593Smuzhiyun mov ar.lc=cnt 126*4882a593Smuzhiyun ;; 127*4882a593Smuzhiyun nop.m 0 128*4882a593Smuzhiyun ;; 129*4882a593Smuzhiyun nop.m 0 130*4882a593Smuzhiyun nop.i 0 131*4882a593Smuzhiyun ;; 132*4882a593Smuzhiyun nop.m 0 133*4882a593Smuzhiyun ;; 134*4882a593Smuzhiyun nop.m 0 135*4882a593Smuzhiyun ;; 136*4882a593Smuzhiyun /* 137*4882a593Smuzhiyun * It is faster to put a stop bit in the loop here because it makes 138*4882a593Smuzhiyun * the pipeline shorter (and latency is what matters on short copies). 139*4882a593Smuzhiyun */ 140*4882a593Smuzhiyun .align 32 141*4882a593Smuzhiyun1: { .mib 142*4882a593Smuzhiyun(p[0]) ld1 val[0]=[src],1 143*4882a593Smuzhiyun nop.i 0 144*4882a593Smuzhiyun brp.loop.imp 1b, 2f 145*4882a593Smuzhiyun} ;; 146*4882a593Smuzhiyun2: { .mfb 147*4882a593Smuzhiyun(p[MEM_LAT-1])st1 [dst]=val[MEM_LAT-1],1 148*4882a593Smuzhiyun nop.f 0 149*4882a593Smuzhiyun br.ctop.dptk.few 1b 150*4882a593Smuzhiyun} ;; 151*4882a593Smuzhiyun mov ar.lc=saved_lc 152*4882a593Smuzhiyun mov pr=saved_pr,-1 153*4882a593Smuzhiyun mov ar.pfs=saved_pfs 154*4882a593Smuzhiyun br.ret.sptk.many rp 155*4882a593Smuzhiyun 156*4882a593Smuzhiyun /* 157*4882a593Smuzhiyun * Large (>= 16 bytes) copying is done in a fancy way. Latency isn't 158*4882a593Smuzhiyun * an overriding concern here, but throughput is. We first do 159*4882a593Smuzhiyun * sub-word copying until the destination is aligned, then we check 160*4882a593Smuzhiyun * if the source is also aligned. If so, we do a simple load/store-loop 161*4882a593Smuzhiyun * until there are less than 8 bytes left over and then we do the tail, 162*4882a593Smuzhiyun * by storing the last few bytes using sub-word copying. If the source 163*4882a593Smuzhiyun * is not aligned, we branch off to the non-congruent loop. 164*4882a593Smuzhiyun * 165*4882a593Smuzhiyun * stage: op: 166*4882a593Smuzhiyun * 0 ld 167*4882a593Smuzhiyun * : 168*4882a593Smuzhiyun * MEM_LAT+3 shrp 169*4882a593Smuzhiyun * MEM_LAT+4 st 170*4882a593Smuzhiyun * 171*4882a593Smuzhiyun * On Itanium, the pipeline itself runs without stalls. However, br.ctop 172*4882a593Smuzhiyun * seems to introduce an unavoidable bubble in the pipeline so the overall 173*4882a593Smuzhiyun * latency is 2 cycles/iteration. This gives us a _copy_ throughput 174*4882a593Smuzhiyun * of 4 byte/cycle. Still not bad. 175*4882a593Smuzhiyun */ 176*4882a593Smuzhiyun# undef N 177*4882a593Smuzhiyun# undef Nrot 178*4882a593Smuzhiyun# define N (MEM_LAT + 5) /* number of stages */ 179*4882a593Smuzhiyun# define Nrot ((N+1 + 2 + 7) & ~7) /* number of rotating regs */ 180*4882a593Smuzhiyun 181*4882a593Smuzhiyun#define LOG_LOOP_SIZE 6 182*4882a593Smuzhiyun 183*4882a593Smuzhiyun.memcpy_long: 184*4882a593Smuzhiyun alloc t3=ar.pfs,3,Nrot,0,Nrot // resize register frame 185*4882a593Smuzhiyun and t0=-8,src // t0 = src & ~7 186*4882a593Smuzhiyun and t2=7,src // t2 = src & 7 187*4882a593Smuzhiyun ;; 188*4882a593Smuzhiyun ld8 t0=[t0] // t0 = 1st source word 189*4882a593Smuzhiyun adds src2=7,src // src2 = (src + 7) 190*4882a593Smuzhiyun sub t4=r0,dst // t4 = -dst 191*4882a593Smuzhiyun ;; 192*4882a593Smuzhiyun and src2=-8,src2 // src2 = (src + 7) & ~7 193*4882a593Smuzhiyun shl t2=t2,3 // t2 = 8*(src & 7) 194*4882a593Smuzhiyun shl t4=t4,3 // t4 = 8*(dst & 7) 195*4882a593Smuzhiyun ;; 196*4882a593Smuzhiyun ld8 t1=[src2] // t1 = 1st source word if src is 8-byte aligned, 2nd otherwise 197*4882a593Smuzhiyun sub t3=64,t2 // t3 = 64-8*(src & 7) 198*4882a593Smuzhiyun shr.u t0=t0,t2 199*4882a593Smuzhiyun ;; 200*4882a593Smuzhiyun add src_end=src,in2 201*4882a593Smuzhiyun shl t1=t1,t3 202*4882a593Smuzhiyun mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7) 203*4882a593Smuzhiyun ;; 204*4882a593Smuzhiyun or t0=t0,t1 205*4882a593Smuzhiyun mov cnt=r0 206*4882a593Smuzhiyun adds src_end=-1,src_end 207*4882a593Smuzhiyun ;; 208*4882a593Smuzhiyun(p3) st1 [dst]=t0,1 209*4882a593Smuzhiyun(p3) shr.u t0=t0,8 210*4882a593Smuzhiyun(p3) adds cnt=1,cnt 211*4882a593Smuzhiyun ;; 212*4882a593Smuzhiyun(p4) st2 [dst]=t0,2 213*4882a593Smuzhiyun(p4) shr.u t0=t0,16 214*4882a593Smuzhiyun(p4) adds cnt=2,cnt 215*4882a593Smuzhiyun ;; 216*4882a593Smuzhiyun(p5) st4 [dst]=t0,4 217*4882a593Smuzhiyun(p5) adds cnt=4,cnt 218*4882a593Smuzhiyun and src_end=-8,src_end // src_end = last word of source buffer 219*4882a593Smuzhiyun ;; 220*4882a593Smuzhiyun 221*4882a593Smuzhiyun // At this point, dst is aligned to 8 bytes and there at least 16-7=9 bytes left to copy: 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun1:{ add src=cnt,src // make src point to remainder of source buffer 224*4882a593Smuzhiyun sub cnt=in2,cnt // cnt = number of bytes left to copy 225*4882a593Smuzhiyun mov t4=ip 226*4882a593Smuzhiyun } ;; 227*4882a593Smuzhiyun and src2=-8,src // align source pointer 228*4882a593Smuzhiyun adds t4=.memcpy_loops-1b,t4 229*4882a593Smuzhiyun mov ar.ec=N 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun and t0=7,src // t0 = src & 7 232*4882a593Smuzhiyun shr.u t2=cnt,3 // t2 = number of 8-byte words left to copy 233*4882a593Smuzhiyun shl cnt=cnt,3 // move bits 0-2 to 3-5 234*4882a593Smuzhiyun ;; 235*4882a593Smuzhiyun 236*4882a593Smuzhiyun .rotr val[N+1], w[2] 237*4882a593Smuzhiyun .rotp p[N] 238*4882a593Smuzhiyun 239*4882a593Smuzhiyun cmp.ne p6,p0=t0,r0 // is src aligned, too? 240*4882a593Smuzhiyun shl t0=t0,LOG_LOOP_SIZE // t0 = 8*(src & 7) 241*4882a593Smuzhiyun adds t2=-1,t2 // br.ctop is repeat/until 242*4882a593Smuzhiyun ;; 243*4882a593Smuzhiyun add t4=t0,t4 244*4882a593Smuzhiyun mov pr=cnt,0x38 // set (p5,p4,p3) to # of bytes last-word bytes to copy 245*4882a593Smuzhiyun mov ar.lc=t2 246*4882a593Smuzhiyun ;; 247*4882a593Smuzhiyun nop.m 0 248*4882a593Smuzhiyun ;; 249*4882a593Smuzhiyun nop.m 0 250*4882a593Smuzhiyun nop.i 0 251*4882a593Smuzhiyun ;; 252*4882a593Smuzhiyun nop.m 0 253*4882a593Smuzhiyun ;; 254*4882a593Smuzhiyun(p6) ld8 val[1]=[src2],8 // prime the pump... 255*4882a593Smuzhiyun mov b6=t4 256*4882a593Smuzhiyun br.sptk.few b6 257*4882a593Smuzhiyun ;; 258*4882a593Smuzhiyun 259*4882a593Smuzhiyun.memcpy_tail: 260*4882a593Smuzhiyun // At this point, (p5,p4,p3) are set to the number of bytes left to copy (which is 261*4882a593Smuzhiyun // less than 8) and t0 contains the last few bytes of the src buffer: 262*4882a593Smuzhiyun(p5) st4 [dst]=t0,4 263*4882a593Smuzhiyun(p5) shr.u t0=t0,32 264*4882a593Smuzhiyun mov ar.lc=saved_lc 265*4882a593Smuzhiyun ;; 266*4882a593Smuzhiyun(p4) st2 [dst]=t0,2 267*4882a593Smuzhiyun(p4) shr.u t0=t0,16 268*4882a593Smuzhiyun mov ar.pfs=saved_pfs 269*4882a593Smuzhiyun ;; 270*4882a593Smuzhiyun(p3) st1 [dst]=t0 271*4882a593Smuzhiyun mov pr=saved_pr,-1 272*4882a593Smuzhiyun br.ret.sptk.many rp 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun/////////////////////////////////////////////////////// 275*4882a593Smuzhiyun .align 64 276*4882a593Smuzhiyun 277*4882a593Smuzhiyun#define COPY(shift,index) \ 278*4882a593Smuzhiyun 1: { .mib \ 279*4882a593Smuzhiyun (p[0]) ld8 val[0]=[src2],8; \ 280*4882a593Smuzhiyun (p[MEM_LAT+3]) shrp w[0]=val[MEM_LAT+3],val[MEM_LAT+4-index],shift; \ 281*4882a593Smuzhiyun brp.loop.imp 1b, 2f \ 282*4882a593Smuzhiyun }; \ 283*4882a593Smuzhiyun 2: { .mfb \ 284*4882a593Smuzhiyun (p[MEM_LAT+4]) st8 [dst]=w[1],8; \ 285*4882a593Smuzhiyun nop.f 0; \ 286*4882a593Smuzhiyun br.ctop.dptk.few 1b; \ 287*4882a593Smuzhiyun }; \ 288*4882a593Smuzhiyun ;; \ 289*4882a593Smuzhiyun ld8 val[N-1]=[src_end]; /* load last word (may be same as val[N]) */ \ 290*4882a593Smuzhiyun ;; \ 291*4882a593Smuzhiyun shrp t0=val[N-1],val[N-index],shift; \ 292*4882a593Smuzhiyun br .memcpy_tail 293*4882a593Smuzhiyun.memcpy_loops: 294*4882a593Smuzhiyun COPY(0, 1) /* no point special casing this---it doesn't go any faster without shrp */ 295*4882a593Smuzhiyun COPY(8, 0) 296*4882a593Smuzhiyun COPY(16, 0) 297*4882a593Smuzhiyun COPY(24, 0) 298*4882a593Smuzhiyun COPY(32, 0) 299*4882a593Smuzhiyun COPY(40, 0) 300*4882a593Smuzhiyun COPY(48, 0) 301*4882a593Smuzhiyun COPY(56, 0) 302*4882a593Smuzhiyun 303*4882a593SmuzhiyunEND(memcpy) 304*4882a593SmuzhiyunEXPORT_SYMBOL(memcpy) 305