1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * User memory copy functions for kernel 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun/* 9*4882a593Smuzhiyun * The right way to do this involves valignb 10*4882a593Smuzhiyun * The easy way to do this is only speed up src/dest similar alignment. 11*4882a593Smuzhiyun */ 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun/* 14*4882a593Smuzhiyun * Copy to/from user are the same, except that for packets with a load and 15*4882a593Smuzhiyun * a store, I don't know how to tell which kind of exception we got. 16*4882a593Smuzhiyun * Therefore, we duplicate the function, and handle faulting addresses 17*4882a593Smuzhiyun * differently for each function 18*4882a593Smuzhiyun */ 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun/* 21*4882a593Smuzhiyun * copy from user: loads can fault 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun#define src_sav r13 24*4882a593Smuzhiyun#define dst_sav r12 25*4882a593Smuzhiyun#define src_dst_sav r13:12 26*4882a593Smuzhiyun#define d_dbuf r15:14 27*4882a593Smuzhiyun#define w_dbuf r15 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun#define dst r0 30*4882a593Smuzhiyun#define src r1 31*4882a593Smuzhiyun#define bytes r2 32*4882a593Smuzhiyun#define loopcount r5 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun#define FUNCNAME raw_copy_from_user 35*4882a593Smuzhiyun#include "copy_user_template.S" 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun /* LOAD FAULTS from COPY_FROM_USER */ 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun /* Alignment loop. r2 has been updated. Return it. */ 40*4882a593Smuzhiyun .falign 41*4882a593Smuzhiyun1009: 42*4882a593Smuzhiyun2009: 43*4882a593Smuzhiyun4009: 44*4882a593Smuzhiyun { 45*4882a593Smuzhiyun r0 = r2 46*4882a593Smuzhiyun jumpr r31 47*4882a593Smuzhiyun } 48*4882a593Smuzhiyun /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */ 49*4882a593Smuzhiyun /* X - (A - B) == X + B - A */ 50*4882a593Smuzhiyun .falign 51*4882a593Smuzhiyun8089: 52*4882a593Smuzhiyun { 53*4882a593Smuzhiyun memd(dst) = d_dbuf 54*4882a593Smuzhiyun r2 += sub(src_sav,src) 55*4882a593Smuzhiyun } 56*4882a593Smuzhiyun { 57*4882a593Smuzhiyun r0 = r2 58*4882a593Smuzhiyun jumpr r31 59*4882a593Smuzhiyun } 60*4882a593Smuzhiyun .falign 61*4882a593Smuzhiyun4089: 62*4882a593Smuzhiyun { 63*4882a593Smuzhiyun memw(dst) = w_dbuf 64*4882a593Smuzhiyun r2 += sub(src_sav,src) 65*4882a593Smuzhiyun } 66*4882a593Smuzhiyun { 67*4882a593Smuzhiyun r0 = r2 68*4882a593Smuzhiyun jumpr r31 69*4882a593Smuzhiyun } 70*4882a593Smuzhiyun .falign 71*4882a593Smuzhiyun2089: 72*4882a593Smuzhiyun { 73*4882a593Smuzhiyun memh(dst) = w_dbuf 74*4882a593Smuzhiyun r2 += sub(src_sav,src) 75*4882a593Smuzhiyun } 76*4882a593Smuzhiyun { 77*4882a593Smuzhiyun r0 = r2 78*4882a593Smuzhiyun jumpr r31 79*4882a593Smuzhiyun } 80*4882a593Smuzhiyun .falign 81*4882a593Smuzhiyun1089: 82*4882a593Smuzhiyun { 83*4882a593Smuzhiyun memb(dst) = w_dbuf 84*4882a593Smuzhiyun r2 += sub(src_sav,src) 85*4882a593Smuzhiyun } 86*4882a593Smuzhiyun { 87*4882a593Smuzhiyun r0 = r2 88*4882a593Smuzhiyun jumpr r31 89*4882a593Smuzhiyun } 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun /* COPY FROM USER: only loads can fail */ 92*4882a593Smuzhiyun 93*4882a593Smuzhiyun .section __ex_table,"a" 94*4882a593Smuzhiyun .long 1000b,1009b 95*4882a593Smuzhiyun .long 2000b,2009b 96*4882a593Smuzhiyun .long 4000b,4009b 97*4882a593Smuzhiyun .long 8080b,8089b 98*4882a593Smuzhiyun .long 4080b,4089b 99*4882a593Smuzhiyun .long 2080b,2089b 100*4882a593Smuzhiyun .long 1080b,1089b 101*4882a593Smuzhiyun .previous 102