1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * arch/arm/kernel/crunch-bits.S 4*4882a593Smuzhiyun * Cirrus MaverickCrunch context switching and handling 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is 9*4882a593Smuzhiyun * Copyright (c) 2003-2004, MontaVista Software, Inc. 10*4882a593Smuzhiyun */ 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun#include <linux/linkage.h> 13*4882a593Smuzhiyun#include <asm/ptrace.h> 14*4882a593Smuzhiyun#include <asm/thread_info.h> 15*4882a593Smuzhiyun#include <asm/asm-offsets.h> 16*4882a593Smuzhiyun#include <asm/assembler.h> 17*4882a593Smuzhiyun#include <mach/ep93xx-regs.h> 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun/* 20*4882a593Smuzhiyun * We can't use hex constants here due to a bug in gas. 21*4882a593Smuzhiyun */ 22*4882a593Smuzhiyun#define CRUNCH_MVDX0 0 23*4882a593Smuzhiyun#define CRUNCH_MVDX1 8 24*4882a593Smuzhiyun#define CRUNCH_MVDX2 16 25*4882a593Smuzhiyun#define CRUNCH_MVDX3 24 26*4882a593Smuzhiyun#define CRUNCH_MVDX4 32 27*4882a593Smuzhiyun#define CRUNCH_MVDX5 40 28*4882a593Smuzhiyun#define CRUNCH_MVDX6 48 29*4882a593Smuzhiyun#define CRUNCH_MVDX7 56 30*4882a593Smuzhiyun#define CRUNCH_MVDX8 64 31*4882a593Smuzhiyun#define CRUNCH_MVDX9 72 32*4882a593Smuzhiyun#define CRUNCH_MVDX10 80 33*4882a593Smuzhiyun#define CRUNCH_MVDX11 88 34*4882a593Smuzhiyun#define CRUNCH_MVDX12 96 35*4882a593Smuzhiyun#define CRUNCH_MVDX13 104 36*4882a593Smuzhiyun#define CRUNCH_MVDX14 112 37*4882a593Smuzhiyun#define CRUNCH_MVDX15 120 38*4882a593Smuzhiyun#define CRUNCH_MVAX0L 128 39*4882a593Smuzhiyun#define CRUNCH_MVAX0M 132 40*4882a593Smuzhiyun#define CRUNCH_MVAX0H 136 41*4882a593Smuzhiyun#define CRUNCH_MVAX1L 140 42*4882a593Smuzhiyun#define CRUNCH_MVAX1M 144 43*4882a593Smuzhiyun#define CRUNCH_MVAX1H 148 44*4882a593Smuzhiyun#define CRUNCH_MVAX2L 152 45*4882a593Smuzhiyun#define CRUNCH_MVAX2M 156 46*4882a593Smuzhiyun#define CRUNCH_MVAX2H 160 47*4882a593Smuzhiyun#define CRUNCH_MVAX3L 164 48*4882a593Smuzhiyun#define CRUNCH_MVAX3M 168 49*4882a593Smuzhiyun#define CRUNCH_MVAX3H 172 50*4882a593Smuzhiyun#define CRUNCH_DSPSC 176 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun#define CRUNCH_SIZE 184 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun .text 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun/* 57*4882a593Smuzhiyun * Lazy switching of crunch coprocessor context 58*4882a593Smuzhiyun * 59*4882a593Smuzhiyun * r10 = struct thread_info pointer 60*4882a593Smuzhiyun * r9 = ret_from_exception 61*4882a593Smuzhiyun * lr = undefined instr exit 62*4882a593Smuzhiyun * 63*4882a593Smuzhiyun * called from prefetch exception handler with interrupts enabled 64*4882a593Smuzhiyun */ 65*4882a593SmuzhiyunENTRY(crunch_task_enable) 66*4882a593Smuzhiyun inc_preempt_count r10, r3 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun ldr r1, [r8, #0x80] 71*4882a593Smuzhiyun tst r1, #0x00800000 @ access to crunch enabled? 72*4882a593Smuzhiyun bne 2f @ if so no business here 73*4882a593Smuzhiyun mov r3, #0xaa @ unlock syscon swlock 74*4882a593Smuzhiyun str r3, [r8, #0xc0] 75*4882a593Smuzhiyun orr r1, r1, #0x00800000 @ enable access to crunch 76*4882a593Smuzhiyun str r1, [r8, #0x80] 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun ldr r3, =crunch_owner 79*4882a593Smuzhiyun add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area 80*4882a593Smuzhiyun ldr r2, [sp, #60] @ current task pc value 81*4882a593Smuzhiyun ldr r1, [r3] @ get current crunch owner 82*4882a593Smuzhiyun str r0, [r3] @ this task now owns crunch 83*4882a593Smuzhiyun sub r2, r2, #4 @ adjust pc back 84*4882a593Smuzhiyun str r2, [sp, #60] 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun ldr r2, [r8, #0x80] 87*4882a593Smuzhiyun mov r2, r2 @ flush out enable (@@@) 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun teq r1, #0 @ test for last ownership 90*4882a593Smuzhiyun mov lr, r9 @ normal exit from exception 91*4882a593Smuzhiyun beq crunch_load @ no owner, skip save 92*4882a593Smuzhiyun 93*4882a593Smuzhiyuncrunch_save: 94*4882a593Smuzhiyun cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers 95*4882a593Smuzhiyun cfstr64 mvdx1, [r1, #CRUNCH_MVDX1] 96*4882a593Smuzhiyun cfstr64 mvdx2, [r1, #CRUNCH_MVDX2] 97*4882a593Smuzhiyun cfstr64 mvdx3, [r1, #CRUNCH_MVDX3] 98*4882a593Smuzhiyun cfstr64 mvdx4, [r1, #CRUNCH_MVDX4] 99*4882a593Smuzhiyun cfstr64 mvdx5, [r1, #CRUNCH_MVDX5] 100*4882a593Smuzhiyun cfstr64 mvdx6, [r1, #CRUNCH_MVDX6] 101*4882a593Smuzhiyun cfstr64 mvdx7, [r1, #CRUNCH_MVDX7] 102*4882a593Smuzhiyun cfstr64 mvdx8, [r1, #CRUNCH_MVDX8] 103*4882a593Smuzhiyun cfstr64 mvdx9, [r1, #CRUNCH_MVDX9] 104*4882a593Smuzhiyun cfstr64 mvdx10, [r1, #CRUNCH_MVDX10] 105*4882a593Smuzhiyun cfstr64 mvdx11, [r1, #CRUNCH_MVDX11] 106*4882a593Smuzhiyun cfstr64 mvdx12, [r1, #CRUNCH_MVDX12] 107*4882a593Smuzhiyun cfstr64 mvdx13, [r1, #CRUNCH_MVDX13] 108*4882a593Smuzhiyun cfstr64 mvdx14, [r1, #CRUNCH_MVDX14] 109*4882a593Smuzhiyun cfstr64 mvdx15, [r1, #CRUNCH_MVDX15] 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun#ifdef __ARMEB__ 112*4882a593Smuzhiyun#error fix me for ARMEB 113*4882a593Smuzhiyun#endif 114*4882a593Smuzhiyun 115*4882a593Smuzhiyun cfmv32al mvfx0, mvax0 @ save 72b accumulators 116*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L] 117*4882a593Smuzhiyun cfmv32am mvfx0, mvax0 118*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M] 119*4882a593Smuzhiyun cfmv32ah mvfx0, mvax0 120*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H] 121*4882a593Smuzhiyun cfmv32al mvfx0, mvax1 122*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L] 123*4882a593Smuzhiyun cfmv32am mvfx0, mvax1 124*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M] 125*4882a593Smuzhiyun cfmv32ah mvfx0, mvax1 126*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H] 127*4882a593Smuzhiyun cfmv32al mvfx0, mvax2 128*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L] 129*4882a593Smuzhiyun cfmv32am mvfx0, mvax2 130*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M] 131*4882a593Smuzhiyun cfmv32ah mvfx0, mvax2 132*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H] 133*4882a593Smuzhiyun cfmv32al mvfx0, mvax3 134*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L] 135*4882a593Smuzhiyun cfmv32am mvfx0, mvax3 136*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M] 137*4882a593Smuzhiyun cfmv32ah mvfx0, mvax3 138*4882a593Smuzhiyun cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H] 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun cfmv32sc mvdx0, dspsc @ save status word 141*4882a593Smuzhiyun cfstr64 mvdx0, [r1, #CRUNCH_DSPSC] 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun teq r0, #0 @ anything to load? 144*4882a593Smuzhiyun cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered 145*4882a593Smuzhiyun beq 1f 146*4882a593Smuzhiyun 147*4882a593Smuzhiyuncrunch_load: 148*4882a593Smuzhiyun cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word 149*4882a593Smuzhiyun cfmvsc32 dspsc, mvdx0 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators 152*4882a593Smuzhiyun cfmval32 mvax0, mvfx0 153*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M] 154*4882a593Smuzhiyun cfmvam32 mvax0, mvfx0 155*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H] 156*4882a593Smuzhiyun cfmvah32 mvax0, mvfx0 157*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L] 158*4882a593Smuzhiyun cfmval32 mvax1, mvfx0 159*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M] 160*4882a593Smuzhiyun cfmvam32 mvax1, mvfx0 161*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H] 162*4882a593Smuzhiyun cfmvah32 mvax1, mvfx0 163*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L] 164*4882a593Smuzhiyun cfmval32 mvax2, mvfx0 165*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M] 166*4882a593Smuzhiyun cfmvam32 mvax2, mvfx0 167*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H] 168*4882a593Smuzhiyun cfmvah32 mvax2, mvfx0 169*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L] 170*4882a593Smuzhiyun cfmval32 mvax3, mvfx0 171*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M] 172*4882a593Smuzhiyun cfmvam32 mvax3, mvfx0 173*4882a593Smuzhiyun cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H] 174*4882a593Smuzhiyun cfmvah32 mvax3, mvfx0 175*4882a593Smuzhiyun 176*4882a593Smuzhiyun cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers 177*4882a593Smuzhiyun cfldr64 mvdx1, [r0, #CRUNCH_MVDX1] 178*4882a593Smuzhiyun cfldr64 mvdx2, [r0, #CRUNCH_MVDX2] 179*4882a593Smuzhiyun cfldr64 mvdx3, [r0, #CRUNCH_MVDX3] 180*4882a593Smuzhiyun cfldr64 mvdx4, [r0, #CRUNCH_MVDX4] 181*4882a593Smuzhiyun cfldr64 mvdx5, [r0, #CRUNCH_MVDX5] 182*4882a593Smuzhiyun cfldr64 mvdx6, [r0, #CRUNCH_MVDX6] 183*4882a593Smuzhiyun cfldr64 mvdx7, [r0, #CRUNCH_MVDX7] 184*4882a593Smuzhiyun cfldr64 mvdx8, [r0, #CRUNCH_MVDX8] 185*4882a593Smuzhiyun cfldr64 mvdx9, [r0, #CRUNCH_MVDX9] 186*4882a593Smuzhiyun cfldr64 mvdx10, [r0, #CRUNCH_MVDX10] 187*4882a593Smuzhiyun cfldr64 mvdx11, [r0, #CRUNCH_MVDX11] 188*4882a593Smuzhiyun cfldr64 mvdx12, [r0, #CRUNCH_MVDX12] 189*4882a593Smuzhiyun cfldr64 mvdx13, [r0, #CRUNCH_MVDX13] 190*4882a593Smuzhiyun cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] 191*4882a593Smuzhiyun cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] 192*4882a593Smuzhiyun 193*4882a593Smuzhiyun1: 194*4882a593Smuzhiyun#ifdef CONFIG_PREEMPT_COUNT 195*4882a593Smuzhiyun get_thread_info r10 196*4882a593Smuzhiyun#endif 197*4882a593Smuzhiyun2: dec_preempt_count r10, r3 198*4882a593Smuzhiyun ret lr 199*4882a593Smuzhiyun 200*4882a593Smuzhiyun/* 201*4882a593Smuzhiyun * Back up crunch regs to save area and disable access to them 202*4882a593Smuzhiyun * (mainly for gdb or sleep mode usage) 203*4882a593Smuzhiyun * 204*4882a593Smuzhiyun * r0 = struct thread_info pointer of target task or NULL for any 205*4882a593Smuzhiyun */ 206*4882a593SmuzhiyunENTRY(crunch_task_disable) 207*4882a593Smuzhiyun stmfd sp!, {r4, r5, lr} 208*4882a593Smuzhiyun 209*4882a593Smuzhiyun mrs ip, cpsr 210*4882a593Smuzhiyun orr r2, ip, #PSR_I_BIT @ disable interrupts 211*4882a593Smuzhiyun msr cpsr_c, r2 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 214*4882a593Smuzhiyun 215*4882a593Smuzhiyun ldr r3, =crunch_owner 216*4882a593Smuzhiyun add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 217*4882a593Smuzhiyun ldr r1, [r3] @ get current crunch owner 218*4882a593Smuzhiyun teq r1, #0 @ any current owner? 219*4882a593Smuzhiyun beq 1f @ no: quit 220*4882a593Smuzhiyun teq r0, #0 @ any owner? 221*4882a593Smuzhiyun teqne r1, r2 @ or specified one? 222*4882a593Smuzhiyun bne 1f @ no: quit 223*4882a593Smuzhiyun 224*4882a593Smuzhiyun ldr r5, [r4, #0x80] @ enable access to crunch 225*4882a593Smuzhiyun mov r2, #0xaa 226*4882a593Smuzhiyun str r2, [r4, #0xc0] 227*4882a593Smuzhiyun orr r5, r5, #0x00800000 228*4882a593Smuzhiyun str r5, [r4, #0x80] 229*4882a593Smuzhiyun 230*4882a593Smuzhiyun mov r0, #0 @ nothing to load 231*4882a593Smuzhiyun str r0, [r3] @ no more current owner 232*4882a593Smuzhiyun ldr r2, [r4, #0x80] @ flush out enable (@@@) 233*4882a593Smuzhiyun mov r2, r2 234*4882a593Smuzhiyun bl crunch_save 235*4882a593Smuzhiyun 236*4882a593Smuzhiyun mov r2, #0xaa @ disable access to crunch 237*4882a593Smuzhiyun str r2, [r4, #0xc0] 238*4882a593Smuzhiyun bic r5, r5, #0x00800000 239*4882a593Smuzhiyun str r5, [r4, #0x80] 240*4882a593Smuzhiyun ldr r5, [r4, #0x80] @ flush out enable (@@@) 241*4882a593Smuzhiyun mov r5, r5 242*4882a593Smuzhiyun 243*4882a593Smuzhiyun1: msr cpsr_c, ip @ restore interrupt mode 244*4882a593Smuzhiyun ldmfd sp!, {r4, r5, pc} 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun/* 247*4882a593Smuzhiyun * Copy crunch state to given memory address 248*4882a593Smuzhiyun * 249*4882a593Smuzhiyun * r0 = struct thread_info pointer of target task 250*4882a593Smuzhiyun * r1 = memory address where to store crunch state 251*4882a593Smuzhiyun * 252*4882a593Smuzhiyun * this is called mainly in the creation of signal stack frames 253*4882a593Smuzhiyun */ 254*4882a593SmuzhiyunENTRY(crunch_task_copy) 255*4882a593Smuzhiyun mrs ip, cpsr 256*4882a593Smuzhiyun orr r2, ip, #PSR_I_BIT @ disable interrupts 257*4882a593Smuzhiyun msr cpsr_c, r2 258*4882a593Smuzhiyun 259*4882a593Smuzhiyun ldr r3, =crunch_owner 260*4882a593Smuzhiyun add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 261*4882a593Smuzhiyun ldr r3, [r3] @ get current crunch owner 262*4882a593Smuzhiyun teq r2, r3 @ does this task own it... 263*4882a593Smuzhiyun beq 1f 264*4882a593Smuzhiyun 265*4882a593Smuzhiyun @ current crunch values are in the task save area 266*4882a593Smuzhiyun msr cpsr_c, ip @ restore interrupt mode 267*4882a593Smuzhiyun mov r0, r1 268*4882a593Smuzhiyun mov r1, r2 269*4882a593Smuzhiyun mov r2, #CRUNCH_SIZE 270*4882a593Smuzhiyun b memcpy 271*4882a593Smuzhiyun 272*4882a593Smuzhiyun1: @ this task owns crunch regs -- grab a copy from there 273*4882a593Smuzhiyun mov r0, #0 @ nothing to load 274*4882a593Smuzhiyun mov r3, lr @ preserve return address 275*4882a593Smuzhiyun bl crunch_save 276*4882a593Smuzhiyun msr cpsr_c, ip @ restore interrupt mode 277*4882a593Smuzhiyun ret r3 278*4882a593Smuzhiyun 279*4882a593Smuzhiyun/* 280*4882a593Smuzhiyun * Restore crunch state from given memory address 281*4882a593Smuzhiyun * 282*4882a593Smuzhiyun * r0 = struct thread_info pointer of target task 283*4882a593Smuzhiyun * r1 = memory address where to get crunch state from 284*4882a593Smuzhiyun * 285*4882a593Smuzhiyun * this is used to restore crunch state when unwinding a signal stack frame 286*4882a593Smuzhiyun */ 287*4882a593SmuzhiyunENTRY(crunch_task_restore) 288*4882a593Smuzhiyun mrs ip, cpsr 289*4882a593Smuzhiyun orr r2, ip, #PSR_I_BIT @ disable interrupts 290*4882a593Smuzhiyun msr cpsr_c, r2 291*4882a593Smuzhiyun 292*4882a593Smuzhiyun ldr r3, =crunch_owner 293*4882a593Smuzhiyun add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 294*4882a593Smuzhiyun ldr r3, [r3] @ get current crunch owner 295*4882a593Smuzhiyun teq r2, r3 @ does this task own it... 296*4882a593Smuzhiyun beq 1f 297*4882a593Smuzhiyun 298*4882a593Smuzhiyun @ this task doesn't own crunch regs -- use its save area 299*4882a593Smuzhiyun msr cpsr_c, ip @ restore interrupt mode 300*4882a593Smuzhiyun mov r0, r2 301*4882a593Smuzhiyun mov r2, #CRUNCH_SIZE 302*4882a593Smuzhiyun b memcpy 303*4882a593Smuzhiyun 304*4882a593Smuzhiyun1: @ this task owns crunch regs -- load them directly 305*4882a593Smuzhiyun mov r0, r1 306*4882a593Smuzhiyun mov r1, #0 @ nothing to save 307*4882a593Smuzhiyun mov r3, lr @ preserve return address 308*4882a593Smuzhiyun bl crunch_load 309*4882a593Smuzhiyun msr cpsr_c, ip @ restore interrupt mode 310*4882a593Smuzhiyun ret r3 311