1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Optimized version of the copy_user() routine. 5*4882a593Smuzhiyun * It is used to copy date across the kernel/user boundary. 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * The source and destination are always on opposite side of 8*4882a593Smuzhiyun * the boundary. When reading from user space we must catch 9*4882a593Smuzhiyun * faults on loads. When writing to user space we must catch 10*4882a593Smuzhiyun * errors on stores. Note that because of the nature of the copy 11*4882a593Smuzhiyun * we don't need to worry about overlapping regions. 12*4882a593Smuzhiyun * 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * Inputs: 15*4882a593Smuzhiyun * in0 address of source buffer 16*4882a593Smuzhiyun * in1 address of destination buffer 17*4882a593Smuzhiyun * in2 number of bytes to copy 18*4882a593Smuzhiyun * 19*4882a593Smuzhiyun * Outputs: 20*4882a593Smuzhiyun * ret0 0 in case of success. The number of bytes NOT copied in 21*4882a593Smuzhiyun * case of error. 22*4882a593Smuzhiyun * 23*4882a593Smuzhiyun * Copyright (C) 2000-2001 Hewlett-Packard Co 24*4882a593Smuzhiyun * Stephane Eranian <eranian@hpl.hp.com> 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * Fixme: 27*4882a593Smuzhiyun * - handle the case where we have more than 16 bytes and the alignment 28*4882a593Smuzhiyun * are different. 29*4882a593Smuzhiyun * - more benchmarking 30*4882a593Smuzhiyun * - fix extraneous stop bit introduced by the EX() macro. 31*4882a593Smuzhiyun */ 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun#include <asm/asmmacro.h> 34*4882a593Smuzhiyun#include <asm/export.h> 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun// 37*4882a593Smuzhiyun// Tuneable parameters 38*4882a593Smuzhiyun// 39*4882a593Smuzhiyun#define COPY_BREAK 16 // we do byte copy below (must be >=16) 40*4882a593Smuzhiyun#define PIPE_DEPTH 21 // pipe depth 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun#define EPI p[PIPE_DEPTH-1] 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun// 45*4882a593Smuzhiyun// arguments 46*4882a593Smuzhiyun// 47*4882a593Smuzhiyun#define dst in0 48*4882a593Smuzhiyun#define src in1 49*4882a593Smuzhiyun#define len in2 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun// 52*4882a593Smuzhiyun// local registers 53*4882a593Smuzhiyun// 54*4882a593Smuzhiyun#define t1 r2 // rshift in bytes 55*4882a593Smuzhiyun#define t2 r3 // lshift in bytes 56*4882a593Smuzhiyun#define rshift r14 // right shift in bits 57*4882a593Smuzhiyun#define lshift r15 // left shift in bits 58*4882a593Smuzhiyun#define word1 r16 59*4882a593Smuzhiyun#define word2 r17 60*4882a593Smuzhiyun#define cnt r18 61*4882a593Smuzhiyun#define len2 r19 62*4882a593Smuzhiyun#define saved_lc r20 63*4882a593Smuzhiyun#define saved_pr r21 64*4882a593Smuzhiyun#define tmp r22 65*4882a593Smuzhiyun#define val r23 66*4882a593Smuzhiyun#define src1 r24 67*4882a593Smuzhiyun#define dst1 r25 68*4882a593Smuzhiyun#define src2 r26 69*4882a593Smuzhiyun#define dst2 r27 70*4882a593Smuzhiyun#define len1 r28 71*4882a593Smuzhiyun#define enddst r29 72*4882a593Smuzhiyun#define endsrc r30 73*4882a593Smuzhiyun#define saved_pfs r31 74*4882a593Smuzhiyun 75*4882a593SmuzhiyunGLOBAL_ENTRY(__copy_user) 76*4882a593Smuzhiyun .prologue 77*4882a593Smuzhiyun .save ar.pfs, saved_pfs 78*4882a593Smuzhiyun alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7) 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun .rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH] 81*4882a593Smuzhiyun .rotp p[PIPE_DEPTH] 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun adds len2=-1,len // br.ctop is repeat/until 84*4882a593Smuzhiyun mov ret0=r0 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun ;; // RAW of cfm when len=0 87*4882a593Smuzhiyun cmp.eq p8,p0=r0,len // check for zero length 88*4882a593Smuzhiyun .save ar.lc, saved_lc 89*4882a593Smuzhiyun mov saved_lc=ar.lc // preserve ar.lc (slow) 90*4882a593Smuzhiyun(p8) br.ret.spnt.many rp // empty mempcy() 91*4882a593Smuzhiyun ;; 92*4882a593Smuzhiyun add enddst=dst,len // first byte after end of source 93*4882a593Smuzhiyun add endsrc=src,len // first byte after end of destination 94*4882a593Smuzhiyun .save pr, saved_pr 95*4882a593Smuzhiyun mov saved_pr=pr // preserve predicates 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun .body 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun mov dst1=dst // copy because of rotation 100*4882a593Smuzhiyun mov ar.ec=PIPE_DEPTH 101*4882a593Smuzhiyun mov pr.rot=1<<16 // p16=true all others are false 102*4882a593Smuzhiyun 103*4882a593Smuzhiyun mov src1=src // copy because of rotation 104*4882a593Smuzhiyun mov ar.lc=len2 // initialize lc for small count 105*4882a593Smuzhiyun cmp.lt p10,p7=COPY_BREAK,len // if len > COPY_BREAK then long copy 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun xor tmp=src,dst // same alignment test prepare 108*4882a593Smuzhiyun(p10) br.cond.dptk .long_copy_user 109*4882a593Smuzhiyun ;; // RAW pr.rot/p16 ? 110*4882a593Smuzhiyun // 111*4882a593Smuzhiyun // Now we do the byte by byte loop with software pipeline 112*4882a593Smuzhiyun // 113*4882a593Smuzhiyun // p7 is necessarily false by now 114*4882a593Smuzhiyun1: 115*4882a593Smuzhiyun EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) 116*4882a593Smuzhiyun EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) 117*4882a593Smuzhiyun br.ctop.dptk.few 1b 118*4882a593Smuzhiyun ;; 119*4882a593Smuzhiyun mov ar.lc=saved_lc 120*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 121*4882a593Smuzhiyun mov ar.pfs=saved_pfs // restore ar.ec 122*4882a593Smuzhiyun br.ret.sptk.many rp // end of short memcpy 123*4882a593Smuzhiyun 124*4882a593Smuzhiyun // 125*4882a593Smuzhiyun // Not 8-byte aligned 126*4882a593Smuzhiyun // 127*4882a593Smuzhiyun.diff_align_copy_user: 128*4882a593Smuzhiyun // At this point we know we have more than 16 bytes to copy 129*4882a593Smuzhiyun // and also that src and dest do _not_ have the same alignment. 130*4882a593Smuzhiyun and src2=0x7,src1 // src offset 131*4882a593Smuzhiyun and dst2=0x7,dst1 // dst offset 132*4882a593Smuzhiyun ;; 133*4882a593Smuzhiyun // The basic idea is that we copy byte-by-byte at the head so 134*4882a593Smuzhiyun // that we can reach 8-byte alignment for both src1 and dst1. 135*4882a593Smuzhiyun // Then copy the body using software pipelined 8-byte copy, 136*4882a593Smuzhiyun // shifting the two back-to-back words right and left, then copy 137*4882a593Smuzhiyun // the tail by copying byte-by-byte. 138*4882a593Smuzhiyun // 139*4882a593Smuzhiyun // Fault handling. If the byte-by-byte at the head fails on the 140*4882a593Smuzhiyun // load, then restart and finish the pipleline by copying zeros 141*4882a593Smuzhiyun // to the dst1. Then copy zeros for the rest of dst1. 142*4882a593Smuzhiyun // If 8-byte software pipeline fails on the load, do the same as 143*4882a593Smuzhiyun // failure_in3 does. If the byte-by-byte at the tail fails, it is 144*4882a593Smuzhiyun // handled simply by failure_in_pipe1. 145*4882a593Smuzhiyun // 146*4882a593Smuzhiyun // The case p14 represents the source has more bytes in the 147*4882a593Smuzhiyun // the first word (by the shifted part), whereas the p15 needs to 148*4882a593Smuzhiyun // copy some bytes from the 2nd word of the source that has the 149*4882a593Smuzhiyun // tail of the 1st of the destination. 150*4882a593Smuzhiyun // 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun // 153*4882a593Smuzhiyun // Optimization. If dst1 is 8-byte aligned (quite common), we don't need 154*4882a593Smuzhiyun // to copy the head to dst1, to start 8-byte copy software pipeline. 155*4882a593Smuzhiyun // We know src1 is not 8-byte aligned in this case. 156*4882a593Smuzhiyun // 157*4882a593Smuzhiyun cmp.eq p14,p15=r0,dst2 158*4882a593Smuzhiyun(p15) br.cond.spnt 1f 159*4882a593Smuzhiyun ;; 160*4882a593Smuzhiyun sub t1=8,src2 161*4882a593Smuzhiyun mov t2=src2 162*4882a593Smuzhiyun ;; 163*4882a593Smuzhiyun shl rshift=t2,3 164*4882a593Smuzhiyun sub len1=len,t1 // set len1 165*4882a593Smuzhiyun ;; 166*4882a593Smuzhiyun sub lshift=64,rshift 167*4882a593Smuzhiyun ;; 168*4882a593Smuzhiyun br.cond.spnt .word_copy_user 169*4882a593Smuzhiyun ;; 170*4882a593Smuzhiyun1: 171*4882a593Smuzhiyun cmp.leu p14,p15=src2,dst2 172*4882a593Smuzhiyun sub t1=dst2,src2 173*4882a593Smuzhiyun ;; 174*4882a593Smuzhiyun .pred.rel "mutex", p14, p15 175*4882a593Smuzhiyun(p14) sub word1=8,src2 // (8 - src offset) 176*4882a593Smuzhiyun(p15) sub t1=r0,t1 // absolute value 177*4882a593Smuzhiyun(p15) sub word1=8,dst2 // (8 - dst offset) 178*4882a593Smuzhiyun ;; 179*4882a593Smuzhiyun // For the case p14, we don't need to copy the shifted part to 180*4882a593Smuzhiyun // the 1st word of destination. 181*4882a593Smuzhiyun sub t2=8,t1 182*4882a593Smuzhiyun(p14) sub word1=word1,t1 183*4882a593Smuzhiyun ;; 184*4882a593Smuzhiyun sub len1=len,word1 // resulting len 185*4882a593Smuzhiyun(p15) shl rshift=t1,3 // in bits 186*4882a593Smuzhiyun(p14) shl rshift=t2,3 187*4882a593Smuzhiyun ;; 188*4882a593Smuzhiyun(p14) sub len1=len1,t1 189*4882a593Smuzhiyun adds cnt=-1,word1 190*4882a593Smuzhiyun ;; 191*4882a593Smuzhiyun sub lshift=64,rshift 192*4882a593Smuzhiyun mov ar.ec=PIPE_DEPTH 193*4882a593Smuzhiyun mov pr.rot=1<<16 // p16=true all others are false 194*4882a593Smuzhiyun mov ar.lc=cnt 195*4882a593Smuzhiyun ;; 196*4882a593Smuzhiyun2: 197*4882a593Smuzhiyun EX(.failure_in_pipe2,(p16) ld1 val1[0]=[src1],1) 198*4882a593Smuzhiyun EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) 199*4882a593Smuzhiyun br.ctop.dptk.few 2b 200*4882a593Smuzhiyun ;; 201*4882a593Smuzhiyun clrrrb 202*4882a593Smuzhiyun ;; 203*4882a593Smuzhiyun.word_copy_user: 204*4882a593Smuzhiyun cmp.gtu p9,p0=16,len1 205*4882a593Smuzhiyun(p9) br.cond.spnt 4f // if (16 > len1) skip 8-byte copy 206*4882a593Smuzhiyun ;; 207*4882a593Smuzhiyun shr.u cnt=len1,3 // number of 64-bit words 208*4882a593Smuzhiyun ;; 209*4882a593Smuzhiyun adds cnt=-1,cnt 210*4882a593Smuzhiyun ;; 211*4882a593Smuzhiyun .pred.rel "mutex", p14, p15 212*4882a593Smuzhiyun(p14) sub src1=src1,t2 213*4882a593Smuzhiyun(p15) sub src1=src1,t1 214*4882a593Smuzhiyun // 215*4882a593Smuzhiyun // Now both src1 and dst1 point to an 8-byte aligned address. And 216*4882a593Smuzhiyun // we have more than 8 bytes to copy. 217*4882a593Smuzhiyun // 218*4882a593Smuzhiyun mov ar.lc=cnt 219*4882a593Smuzhiyun mov ar.ec=PIPE_DEPTH 220*4882a593Smuzhiyun mov pr.rot=1<<16 // p16=true all others are false 221*4882a593Smuzhiyun ;; 222*4882a593Smuzhiyun3: 223*4882a593Smuzhiyun // 224*4882a593Smuzhiyun // The pipleline consists of 3 stages: 225*4882a593Smuzhiyun // 1 (p16): Load a word from src1 226*4882a593Smuzhiyun // 2 (EPI_1): Shift right pair, saving to tmp 227*4882a593Smuzhiyun // 3 (EPI): Store tmp to dst1 228*4882a593Smuzhiyun // 229*4882a593Smuzhiyun // To make it simple, use at least 2 (p16) loops to set up val1[n] 230*4882a593Smuzhiyun // because we need 2 back-to-back val1[] to get tmp. 231*4882a593Smuzhiyun // Note that this implies EPI_2 must be p18 or greater. 232*4882a593Smuzhiyun // 233*4882a593Smuzhiyun 234*4882a593Smuzhiyun#define EPI_1 p[PIPE_DEPTH-2] 235*4882a593Smuzhiyun#define SWITCH(pred, shift) cmp.eq pred,p0=shift,rshift 236*4882a593Smuzhiyun#define CASE(pred, shift) \ 237*4882a593Smuzhiyun (pred) br.cond.spnt .copy_user_bit##shift 238*4882a593Smuzhiyun#define BODY(rshift) \ 239*4882a593Smuzhiyun.copy_user_bit##rshift: \ 240*4882a593Smuzhiyun1: \ 241*4882a593Smuzhiyun EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \ 242*4882a593Smuzhiyun(EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \ 243*4882a593Smuzhiyun EX(3f,(p16) ld8 val1[1]=[src1],8); \ 244*4882a593Smuzhiyun(p16) mov val1[0]=r0; \ 245*4882a593Smuzhiyun br.ctop.dptk 1b; \ 246*4882a593Smuzhiyun ;; \ 247*4882a593Smuzhiyun br.cond.sptk.many .diff_align_do_tail; \ 248*4882a593Smuzhiyun2: \ 249*4882a593Smuzhiyun(EPI) st8 [dst1]=tmp,8; \ 250*4882a593Smuzhiyun(EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \ 251*4882a593Smuzhiyun3: \ 252*4882a593Smuzhiyun(p16) mov val1[1]=r0; \ 253*4882a593Smuzhiyun(p16) mov val1[0]=r0; \ 254*4882a593Smuzhiyun br.ctop.dptk 2b; \ 255*4882a593Smuzhiyun ;; \ 256*4882a593Smuzhiyun br.cond.sptk.many .failure_in2 257*4882a593Smuzhiyun 258*4882a593Smuzhiyun // 259*4882a593Smuzhiyun // Since the instruction 'shrp' requires a fixed 128-bit value 260*4882a593Smuzhiyun // specifying the bits to shift, we need to provide 7 cases 261*4882a593Smuzhiyun // below. 262*4882a593Smuzhiyun // 263*4882a593Smuzhiyun SWITCH(p6, 8) 264*4882a593Smuzhiyun SWITCH(p7, 16) 265*4882a593Smuzhiyun SWITCH(p8, 24) 266*4882a593Smuzhiyun SWITCH(p9, 32) 267*4882a593Smuzhiyun SWITCH(p10, 40) 268*4882a593Smuzhiyun SWITCH(p11, 48) 269*4882a593Smuzhiyun SWITCH(p12, 56) 270*4882a593Smuzhiyun ;; 271*4882a593Smuzhiyun CASE(p6, 8) 272*4882a593Smuzhiyun CASE(p7, 16) 273*4882a593Smuzhiyun CASE(p8, 24) 274*4882a593Smuzhiyun CASE(p9, 32) 275*4882a593Smuzhiyun CASE(p10, 40) 276*4882a593Smuzhiyun CASE(p11, 48) 277*4882a593Smuzhiyun CASE(p12, 56) 278*4882a593Smuzhiyun ;; 279*4882a593Smuzhiyun BODY(8) 280*4882a593Smuzhiyun BODY(16) 281*4882a593Smuzhiyun BODY(24) 282*4882a593Smuzhiyun BODY(32) 283*4882a593Smuzhiyun BODY(40) 284*4882a593Smuzhiyun BODY(48) 285*4882a593Smuzhiyun BODY(56) 286*4882a593Smuzhiyun ;; 287*4882a593Smuzhiyun.diff_align_do_tail: 288*4882a593Smuzhiyun .pred.rel "mutex", p14, p15 289*4882a593Smuzhiyun(p14) sub src1=src1,t1 290*4882a593Smuzhiyun(p14) adds dst1=-8,dst1 291*4882a593Smuzhiyun(p15) sub dst1=dst1,t1 292*4882a593Smuzhiyun ;; 293*4882a593Smuzhiyun4: 294*4882a593Smuzhiyun // Tail correction. 295*4882a593Smuzhiyun // 296*4882a593Smuzhiyun // The problem with this piplelined loop is that the last word is not 297*4882a593Smuzhiyun // loaded and thus parf of the last word written is not correct. 298*4882a593Smuzhiyun // To fix that, we simply copy the tail byte by byte. 299*4882a593Smuzhiyun 300*4882a593Smuzhiyun sub len1=endsrc,src1,1 301*4882a593Smuzhiyun clrrrb 302*4882a593Smuzhiyun ;; 303*4882a593Smuzhiyun mov ar.ec=PIPE_DEPTH 304*4882a593Smuzhiyun mov pr.rot=1<<16 // p16=true all others are false 305*4882a593Smuzhiyun mov ar.lc=len1 306*4882a593Smuzhiyun ;; 307*4882a593Smuzhiyun5: 308*4882a593Smuzhiyun EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) 309*4882a593Smuzhiyun EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) 310*4882a593Smuzhiyun br.ctop.dptk.few 5b 311*4882a593Smuzhiyun ;; 312*4882a593Smuzhiyun mov ar.lc=saved_lc 313*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 314*4882a593Smuzhiyun mov ar.pfs=saved_pfs 315*4882a593Smuzhiyun br.ret.sptk.many rp 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun // 318*4882a593Smuzhiyun // Beginning of long mempcy (i.e. > 16 bytes) 319*4882a593Smuzhiyun // 320*4882a593Smuzhiyun.long_copy_user: 321*4882a593Smuzhiyun tbit.nz p6,p7=src1,0 // odd alignment 322*4882a593Smuzhiyun and tmp=7,tmp 323*4882a593Smuzhiyun ;; 324*4882a593Smuzhiyun cmp.eq p10,p8=r0,tmp 325*4882a593Smuzhiyun mov len1=len // copy because of rotation 326*4882a593Smuzhiyun(p8) br.cond.dpnt .diff_align_copy_user 327*4882a593Smuzhiyun ;; 328*4882a593Smuzhiyun // At this point we know we have more than 16 bytes to copy 329*4882a593Smuzhiyun // and also that both src and dest have the same alignment 330*4882a593Smuzhiyun // which may not be the one we want. So for now we must move 331*4882a593Smuzhiyun // forward slowly until we reach 16byte alignment: no need to 332*4882a593Smuzhiyun // worry about reaching the end of buffer. 333*4882a593Smuzhiyun // 334*4882a593Smuzhiyun EX(.failure_in1,(p6) ld1 val1[0]=[src1],1) // 1-byte aligned 335*4882a593Smuzhiyun(p6) adds len1=-1,len1;; 336*4882a593Smuzhiyun tbit.nz p7,p0=src1,1 337*4882a593Smuzhiyun ;; 338*4882a593Smuzhiyun EX(.failure_in1,(p7) ld2 val1[1]=[src1],2) // 2-byte aligned 339*4882a593Smuzhiyun(p7) adds len1=-2,len1;; 340*4882a593Smuzhiyun tbit.nz p8,p0=src1,2 341*4882a593Smuzhiyun ;; 342*4882a593Smuzhiyun // 343*4882a593Smuzhiyun // Stop bit not required after ld4 because if we fail on ld4 344*4882a593Smuzhiyun // we have never executed the ld1, therefore st1 is not executed. 345*4882a593Smuzhiyun // 346*4882a593Smuzhiyun EX(.failure_in1,(p8) ld4 val2[0]=[src1],4) // 4-byte aligned 347*4882a593Smuzhiyun ;; 348*4882a593Smuzhiyun EX(.failure_out,(p6) st1 [dst1]=val1[0],1) 349*4882a593Smuzhiyun tbit.nz p9,p0=src1,3 350*4882a593Smuzhiyun ;; 351*4882a593Smuzhiyun // 352*4882a593Smuzhiyun // Stop bit not required after ld8 because if we fail on ld8 353*4882a593Smuzhiyun // we have never executed the ld2, therefore st2 is not executed. 354*4882a593Smuzhiyun // 355*4882a593Smuzhiyun EX(.failure_in1,(p9) ld8 val2[1]=[src1],8) // 8-byte aligned 356*4882a593Smuzhiyun EX(.failure_out,(p7) st2 [dst1]=val1[1],2) 357*4882a593Smuzhiyun(p8) adds len1=-4,len1 358*4882a593Smuzhiyun ;; 359*4882a593Smuzhiyun EX(.failure_out, (p8) st4 [dst1]=val2[0],4) 360*4882a593Smuzhiyun(p9) adds len1=-8,len1;; 361*4882a593Smuzhiyun shr.u cnt=len1,4 // number of 128-bit (2x64bit) words 362*4882a593Smuzhiyun ;; 363*4882a593Smuzhiyun EX(.failure_out, (p9) st8 [dst1]=val2[1],8) 364*4882a593Smuzhiyun tbit.nz p6,p0=len1,3 365*4882a593Smuzhiyun cmp.eq p7,p0=r0,cnt 366*4882a593Smuzhiyun adds tmp=-1,cnt // br.ctop is repeat/until 367*4882a593Smuzhiyun(p7) br.cond.dpnt .dotail // we have less than 16 bytes left 368*4882a593Smuzhiyun ;; 369*4882a593Smuzhiyun adds src2=8,src1 370*4882a593Smuzhiyun adds dst2=8,dst1 371*4882a593Smuzhiyun mov ar.lc=tmp 372*4882a593Smuzhiyun ;; 373*4882a593Smuzhiyun // 374*4882a593Smuzhiyun // 16bytes/iteration 375*4882a593Smuzhiyun // 376*4882a593Smuzhiyun2: 377*4882a593Smuzhiyun EX(.failure_in3,(p16) ld8 val1[0]=[src1],16) 378*4882a593Smuzhiyun(p16) ld8 val2[0]=[src2],16 379*4882a593Smuzhiyun 380*4882a593Smuzhiyun EX(.failure_out, (EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16) 381*4882a593Smuzhiyun(EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16 382*4882a593Smuzhiyun br.ctop.dptk 2b 383*4882a593Smuzhiyun ;; // RAW on src1 when fall through from loop 384*4882a593Smuzhiyun // 385*4882a593Smuzhiyun // Tail correction based on len only 386*4882a593Smuzhiyun // 387*4882a593Smuzhiyun // No matter where we come from (loop or test) the src1 pointer 388*4882a593Smuzhiyun // is 16 byte aligned AND we have less than 16 bytes to copy. 389*4882a593Smuzhiyun // 390*4882a593Smuzhiyun.dotail: 391*4882a593Smuzhiyun EX(.failure_in1,(p6) ld8 val1[0]=[src1],8) // at least 8 bytes 392*4882a593Smuzhiyun tbit.nz p7,p0=len1,2 393*4882a593Smuzhiyun ;; 394*4882a593Smuzhiyun EX(.failure_in1,(p7) ld4 val1[1]=[src1],4) // at least 4 bytes 395*4882a593Smuzhiyun tbit.nz p8,p0=len1,1 396*4882a593Smuzhiyun ;; 397*4882a593Smuzhiyun EX(.failure_in1,(p8) ld2 val2[0]=[src1],2) // at least 2 bytes 398*4882a593Smuzhiyun tbit.nz p9,p0=len1,0 399*4882a593Smuzhiyun ;; 400*4882a593Smuzhiyun EX(.failure_out, (p6) st8 [dst1]=val1[0],8) 401*4882a593Smuzhiyun ;; 402*4882a593Smuzhiyun EX(.failure_in1,(p9) ld1 val2[1]=[src1]) // only 1 byte left 403*4882a593Smuzhiyun mov ar.lc=saved_lc 404*4882a593Smuzhiyun ;; 405*4882a593Smuzhiyun EX(.failure_out,(p7) st4 [dst1]=val1[1],4) 406*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 407*4882a593Smuzhiyun ;; 408*4882a593Smuzhiyun EX(.failure_out, (p8) st2 [dst1]=val2[0],2) 409*4882a593Smuzhiyun mov ar.pfs=saved_pfs 410*4882a593Smuzhiyun ;; 411*4882a593Smuzhiyun EX(.failure_out, (p9) st1 [dst1]=val2[1]) 412*4882a593Smuzhiyun br.ret.sptk.many rp 413*4882a593Smuzhiyun 414*4882a593Smuzhiyun 415*4882a593Smuzhiyun // 416*4882a593Smuzhiyun // Here we handle the case where the byte by byte copy fails 417*4882a593Smuzhiyun // on the load. 418*4882a593Smuzhiyun // Several factors make the zeroing of the rest of the buffer kind of 419*4882a593Smuzhiyun // tricky: 420*4882a593Smuzhiyun // - the pipeline: loads/stores are not in sync (pipeline) 421*4882a593Smuzhiyun // 422*4882a593Smuzhiyun // In the same loop iteration, the dst1 pointer does not directly 423*4882a593Smuzhiyun // reflect where the faulty load was. 424*4882a593Smuzhiyun // 425*4882a593Smuzhiyun // - pipeline effect 426*4882a593Smuzhiyun // When you get a fault on load, you may have valid data from 427*4882a593Smuzhiyun // previous loads not yet store in transit. Such data must be 428*4882a593Smuzhiyun // store normally before moving onto zeroing the rest. 429*4882a593Smuzhiyun // 430*4882a593Smuzhiyun // - single/multi dispersal independence. 431*4882a593Smuzhiyun // 432*4882a593Smuzhiyun // solution: 433*4882a593Smuzhiyun // - we don't disrupt the pipeline, i.e. data in transit in 434*4882a593Smuzhiyun // the software pipeline will be eventually move to memory. 435*4882a593Smuzhiyun // We simply replace the load with a simple mov and keep the 436*4882a593Smuzhiyun // pipeline going. We can't really do this inline because 437*4882a593Smuzhiyun // p16 is always reset to 1 when lc > 0. 438*4882a593Smuzhiyun // 439*4882a593Smuzhiyun.failure_in_pipe1: 440*4882a593Smuzhiyun sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 441*4882a593Smuzhiyun1: 442*4882a593Smuzhiyun(p16) mov val1[0]=r0 443*4882a593Smuzhiyun(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1 444*4882a593Smuzhiyun br.ctop.dptk 1b 445*4882a593Smuzhiyun ;; 446*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 447*4882a593Smuzhiyun mov ar.lc=saved_lc 448*4882a593Smuzhiyun mov ar.pfs=saved_pfs 449*4882a593Smuzhiyun br.ret.sptk.many rp 450*4882a593Smuzhiyun 451*4882a593Smuzhiyun // 452*4882a593Smuzhiyun // This is the case where the byte by byte copy fails on the load 453*4882a593Smuzhiyun // when we copy the head. We need to finish the pipeline and copy 454*4882a593Smuzhiyun // zeros for the rest of the destination. Since this happens 455*4882a593Smuzhiyun // at the top we still need to fill the body and tail. 456*4882a593Smuzhiyun.failure_in_pipe2: 457*4882a593Smuzhiyun sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 458*4882a593Smuzhiyun2: 459*4882a593Smuzhiyun(p16) mov val1[0]=r0 460*4882a593Smuzhiyun(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1 461*4882a593Smuzhiyun br.ctop.dptk 2b 462*4882a593Smuzhiyun ;; 463*4882a593Smuzhiyun sub len=enddst,dst1,1 // precompute len 464*4882a593Smuzhiyun br.cond.dptk.many .failure_in1bis 465*4882a593Smuzhiyun ;; 466*4882a593Smuzhiyun 467*4882a593Smuzhiyun // 468*4882a593Smuzhiyun // Here we handle the head & tail part when we check for alignment. 469*4882a593Smuzhiyun // The following code handles only the load failures. The 470*4882a593Smuzhiyun // main diffculty comes from the fact that loads/stores are 471*4882a593Smuzhiyun // scheduled. So when you fail on a load, the stores corresponding 472*4882a593Smuzhiyun // to previous successful loads must be executed. 473*4882a593Smuzhiyun // 474*4882a593Smuzhiyun // However some simplifications are possible given the way 475*4882a593Smuzhiyun // things work. 476*4882a593Smuzhiyun // 477*4882a593Smuzhiyun // 1) HEAD 478*4882a593Smuzhiyun // Theory of operation: 479*4882a593Smuzhiyun // 480*4882a593Smuzhiyun // Page A | Page B 481*4882a593Smuzhiyun // ---------|----- 482*4882a593Smuzhiyun // 1|8 x 483*4882a593Smuzhiyun // 1 2|8 x 484*4882a593Smuzhiyun // 4|8 x 485*4882a593Smuzhiyun // 1 4|8 x 486*4882a593Smuzhiyun // 2 4|8 x 487*4882a593Smuzhiyun // 1 2 4|8 x 488*4882a593Smuzhiyun // |1 489*4882a593Smuzhiyun // |2 x 490*4882a593Smuzhiyun // |4 x 491*4882a593Smuzhiyun // 492*4882a593Smuzhiyun // page_size >= 4k (2^12). (x means 4, 2, 1) 493*4882a593Smuzhiyun // Here we suppose Page A exists and Page B does not. 494*4882a593Smuzhiyun // 495*4882a593Smuzhiyun // As we move towards eight byte alignment we may encounter faults. 496*4882a593Smuzhiyun // The numbers on each page show the size of the load (current alignment). 497*4882a593Smuzhiyun // 498*4882a593Smuzhiyun // Key point: 499*4882a593Smuzhiyun // - if you fail on 1, 2, 4 then you have never executed any smaller 500*4882a593Smuzhiyun // size loads, e.g. failing ld4 means no ld1 nor ld2 executed 501*4882a593Smuzhiyun // before. 502*4882a593Smuzhiyun // 503*4882a593Smuzhiyun // This allows us to simplify the cleanup code, because basically you 504*4882a593Smuzhiyun // only have to worry about "pending" stores in the case of a failing 505*4882a593Smuzhiyun // ld8(). Given the way the code is written today, this means only 506*4882a593Smuzhiyun // worry about st2, st4. There we can use the information encapsulated 507*4882a593Smuzhiyun // into the predicates. 508*4882a593Smuzhiyun // 509*4882a593Smuzhiyun // Other key point: 510*4882a593Smuzhiyun // - if you fail on the ld8 in the head, it means you went straight 511*4882a593Smuzhiyun // to it, i.e. 8byte alignment within an unexisting page. 512*4882a593Smuzhiyun // Again this comes from the fact that if you crossed just for the ld8 then 513*4882a593Smuzhiyun // you are 8byte aligned but also 16byte align, therefore you would 514*4882a593Smuzhiyun // either go for the 16byte copy loop OR the ld8 in the tail part. 515*4882a593Smuzhiyun // The combination ld1, ld2, ld4, ld8 where you fail on ld8 is impossible 516*4882a593Smuzhiyun // because it would mean you had 15bytes to copy in which case you 517*4882a593Smuzhiyun // would have defaulted to the byte by byte copy. 518*4882a593Smuzhiyun // 519*4882a593Smuzhiyun // 520*4882a593Smuzhiyun // 2) TAIL 521*4882a593Smuzhiyun // Here we now we have less than 16 bytes AND we are either 8 or 16 byte 522*4882a593Smuzhiyun // aligned. 523*4882a593Smuzhiyun // 524*4882a593Smuzhiyun // Key point: 525*4882a593Smuzhiyun // This means that we either: 526*4882a593Smuzhiyun // - are right on a page boundary 527*4882a593Smuzhiyun // OR 528*4882a593Smuzhiyun // - are at more than 16 bytes from a page boundary with 529*4882a593Smuzhiyun // at most 15 bytes to copy: no chance of crossing. 530*4882a593Smuzhiyun // 531*4882a593Smuzhiyun // This allows us to assume that if we fail on a load we haven't possibly 532*4882a593Smuzhiyun // executed any of the previous (tail) ones, so we don't need to do 533*4882a593Smuzhiyun // any stores. For instance, if we fail on ld2, this means we had 534*4882a593Smuzhiyun // 2 or 3 bytes left to copy and we did not execute the ld8 nor ld4. 535*4882a593Smuzhiyun // 536*4882a593Smuzhiyun // This means that we are in a situation similar the a fault in the 537*4882a593Smuzhiyun // head part. That's nice! 538*4882a593Smuzhiyun // 539*4882a593Smuzhiyun.failure_in1: 540*4882a593Smuzhiyun sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 541*4882a593Smuzhiyun sub len=endsrc,src1,1 542*4882a593Smuzhiyun // 543*4882a593Smuzhiyun // we know that ret0 can never be zero at this point 544*4882a593Smuzhiyun // because we failed why trying to do a load, i.e. there is still 545*4882a593Smuzhiyun // some work to do. 546*4882a593Smuzhiyun // The failure_in1bis and length problem is taken care of at the 547*4882a593Smuzhiyun // calling side. 548*4882a593Smuzhiyun // 549*4882a593Smuzhiyun ;; 550*4882a593Smuzhiyun.failure_in1bis: // from (.failure_in3) 551*4882a593Smuzhiyun mov ar.lc=len // Continue with a stupid byte store. 552*4882a593Smuzhiyun ;; 553*4882a593Smuzhiyun5: 554*4882a593Smuzhiyun st1 [dst1]=r0,1 555*4882a593Smuzhiyun br.cloop.dptk 5b 556*4882a593Smuzhiyun ;; 557*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 558*4882a593Smuzhiyun mov ar.lc=saved_lc 559*4882a593Smuzhiyun mov ar.pfs=saved_pfs 560*4882a593Smuzhiyun br.ret.sptk.many rp 561*4882a593Smuzhiyun 562*4882a593Smuzhiyun // 563*4882a593Smuzhiyun // Here we simply restart the loop but instead 564*4882a593Smuzhiyun // of doing loads we fill the pipeline with zeroes 565*4882a593Smuzhiyun // We can't simply store r0 because we may have valid 566*4882a593Smuzhiyun // data in transit in the pipeline. 567*4882a593Smuzhiyun // ar.lc and ar.ec are setup correctly at this point 568*4882a593Smuzhiyun // 569*4882a593Smuzhiyun // we MUST use src1/endsrc here and not dst1/enddst because 570*4882a593Smuzhiyun // of the pipeline effect. 571*4882a593Smuzhiyun // 572*4882a593Smuzhiyun.failure_in3: 573*4882a593Smuzhiyun sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 574*4882a593Smuzhiyun ;; 575*4882a593Smuzhiyun2: 576*4882a593Smuzhiyun(p16) mov val1[0]=r0 577*4882a593Smuzhiyun(p16) mov val2[0]=r0 578*4882a593Smuzhiyun(EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16 579*4882a593Smuzhiyun(EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16 580*4882a593Smuzhiyun br.ctop.dptk 2b 581*4882a593Smuzhiyun ;; 582*4882a593Smuzhiyun cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ? 583*4882a593Smuzhiyun sub len=enddst,dst1,1 // precompute len 584*4882a593Smuzhiyun(p6) br.cond.dptk .failure_in1bis 585*4882a593Smuzhiyun ;; 586*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 587*4882a593Smuzhiyun mov ar.lc=saved_lc 588*4882a593Smuzhiyun mov ar.pfs=saved_pfs 589*4882a593Smuzhiyun br.ret.sptk.many rp 590*4882a593Smuzhiyun 591*4882a593Smuzhiyun.failure_in2: 592*4882a593Smuzhiyun sub ret0=endsrc,src1 593*4882a593Smuzhiyun cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ? 594*4882a593Smuzhiyun sub len=enddst,dst1,1 // precompute len 595*4882a593Smuzhiyun(p6) br.cond.dptk .failure_in1bis 596*4882a593Smuzhiyun ;; 597*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 598*4882a593Smuzhiyun mov ar.lc=saved_lc 599*4882a593Smuzhiyun mov ar.pfs=saved_pfs 600*4882a593Smuzhiyun br.ret.sptk.many rp 601*4882a593Smuzhiyun 602*4882a593Smuzhiyun // 603*4882a593Smuzhiyun // handling of failures on stores: that's the easy part 604*4882a593Smuzhiyun // 605*4882a593Smuzhiyun.failure_out: 606*4882a593Smuzhiyun sub ret0=enddst,dst1 607*4882a593Smuzhiyun mov pr=saved_pr,0xffffffffffff0000 608*4882a593Smuzhiyun mov ar.lc=saved_lc 609*4882a593Smuzhiyun 610*4882a593Smuzhiyun mov ar.pfs=saved_pfs 611*4882a593Smuzhiyun br.ret.sptk.many rp 612*4882a593SmuzhiyunEND(__copy_user) 613*4882a593SmuzhiyunEXPORT_SYMBOL(__copy_user) 614