1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * User memory copying routines for the Hexagon Kernel 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun/* The right way to do this involves valignb 9*4882a593Smuzhiyun * The easy way to do this is only speed up src/dest similar alignment. 10*4882a593Smuzhiyun */ 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun/* 13*4882a593Smuzhiyun * Copy to/from user are the same, except that for packets with a load and 14*4882a593Smuzhiyun * a store, I don't know how to tell which kind of exception we got. 15*4882a593Smuzhiyun * Therefore, we duplicate the function, and handle faulting addresses 16*4882a593Smuzhiyun * differently for each function 17*4882a593Smuzhiyun */ 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun/* 20*4882a593Smuzhiyun * copy to user: stores can fault 21*4882a593Smuzhiyun */ 22*4882a593Smuzhiyun#define src_sav r13 23*4882a593Smuzhiyun#define dst_sav r12 24*4882a593Smuzhiyun#define src_dst_sav r13:12 25*4882a593Smuzhiyun#define d_dbuf r15:14 26*4882a593Smuzhiyun#define w_dbuf r15 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun#define dst r0 29*4882a593Smuzhiyun#define src r1 30*4882a593Smuzhiyun#define bytes r2 31*4882a593Smuzhiyun#define loopcount r5 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun#define FUNCNAME raw_copy_to_user 34*4882a593Smuzhiyun#include "copy_user_template.S" 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun /* STORE FAULTS from COPY_TO_USER */ 37*4882a593Smuzhiyun .falign 38*4882a593Smuzhiyun1109: 39*4882a593Smuzhiyun2109: 40*4882a593Smuzhiyun4109: 41*4882a593Smuzhiyun /* Alignment loop. r2 has been updated. Return it. */ 42*4882a593Smuzhiyun { 43*4882a593Smuzhiyun r0 = r2 44*4882a593Smuzhiyun jumpr r31 45*4882a593Smuzhiyun } 46*4882a593Smuzhiyun /* Normal copy loops. Use dst-dst_sav to compute distance */ 47*4882a593Smuzhiyun /* dst holds best write, no need to unwind any loops */ 48*4882a593Smuzhiyun /* X - (A - B) == X + B - A */ 49*4882a593Smuzhiyun .falign 50*4882a593Smuzhiyun8189: 51*4882a593Smuzhiyun8199: 52*4882a593Smuzhiyun4189: 53*4882a593Smuzhiyun4199: 54*4882a593Smuzhiyun2189: 55*4882a593Smuzhiyun2199: 56*4882a593Smuzhiyun1189: 57*4882a593Smuzhiyun1199: 58*4882a593Smuzhiyun { 59*4882a593Smuzhiyun r2 += sub(dst_sav,dst) 60*4882a593Smuzhiyun } 61*4882a593Smuzhiyun { 62*4882a593Smuzhiyun r0 = r2 63*4882a593Smuzhiyun jumpr r31 64*4882a593Smuzhiyun } 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun /* COPY TO USER: only stores can fail */ 67*4882a593Smuzhiyun .section __ex_table,"a" 68*4882a593Smuzhiyun .long 1100b,1109b 69*4882a593Smuzhiyun .long 2100b,2109b 70*4882a593Smuzhiyun .long 4100b,4109b 71*4882a593Smuzhiyun .long 8180b,8189b 72*4882a593Smuzhiyun .long 8190b,8199b 73*4882a593Smuzhiyun .long 4180b,4189b 74*4882a593Smuzhiyun .long 4190b,4199b 75*4882a593Smuzhiyun .long 2180b,2189b 76*4882a593Smuzhiyun .long 2190b,2199b 77*4882a593Smuzhiyun .long 1180b,1189b 78*4882a593Smuzhiyun .long 1190b,1199b 79*4882a593Smuzhiyun .previous 80