1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Process/processor support for the Hexagon architecture 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef _ASM_PROCESSOR_H 9*4882a593Smuzhiyun #define _ASM_PROCESSOR_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #ifndef __ASSEMBLY__ 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun #include <asm/mem-layout.h> 14*4882a593Smuzhiyun #include <asm/registers.h> 15*4882a593Smuzhiyun #include <asm/hexagon_vm.h> 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun /* task_struct, defined elsewhere, is the "process descriptor" */ 18*4882a593Smuzhiyun struct task_struct; 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun extern void start_thread(struct pt_regs *, unsigned long, unsigned long); 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun /* 23*4882a593Smuzhiyun * thread_struct is supposed to be for context switch data. 24*4882a593Smuzhiyun * Specifically, to hold the state necessary to perform switch_to... 25*4882a593Smuzhiyun */ 26*4882a593Smuzhiyun struct thread_struct { 27*4882a593Smuzhiyun void *switch_sp; 28*4882a593Smuzhiyun }; 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun /* 31*4882a593Smuzhiyun * initializes thread_struct 32*4882a593Smuzhiyun * The only thing we have in there is switch_sp 33*4882a593Smuzhiyun * which doesn't really need to be initialized. 34*4882a593Smuzhiyun */ 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun #define INIT_THREAD { \ 37*4882a593Smuzhiyun } 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun #define cpu_relax() __vmyield() 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun /* 42*4882a593Smuzhiyun * Decides where the kernel will search for a free chunk of vm space during 43*4882a593Smuzhiyun * mmaps. 44*4882a593Smuzhiyun * See also arch_get_unmapped_area. 45*4882a593Smuzhiyun * Doesn't affect if you have MAX_FIXED in the page flags set though... 46*4882a593Smuzhiyun * 47*4882a593Smuzhiyun * Apparently the convention is that ld.so will ask for "unmapped" private 48*4882a593Smuzhiyun * memory to be allocated SOMEWHERE, but it also asks for memory explicitly 49*4882a593Smuzhiyun * via MAP_FIXED at the lower * addresses starting at VA=0x0. 50*4882a593Smuzhiyun * 51*4882a593Smuzhiyun * If the two requests collide, you get authentic segfaulting action, so 52*4882a593Smuzhiyun * you have to kick the "unmapped" base requests higher up. 53*4882a593Smuzhiyun */ 54*4882a593Smuzhiyun #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE/3)) 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun #define task_pt_regs(task) \ 58*4882a593Smuzhiyun ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1) 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun #define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk))) 61*4882a593Smuzhiyun #define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk))) 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun /* Free all resources held by a thread; defined in process.c */ 64*4882a593Smuzhiyun extern void release_thread(struct task_struct *dead_task); 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun /* Get wait channel for task P. */ 67*4882a593Smuzhiyun extern unsigned long get_wchan(struct task_struct *p); 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun /* The following stuff is pretty HEXAGON specific. */ 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun /* This is really just here for __switch_to. 72*4882a593Smuzhiyun Offsets are pulled via asm-offsets.c */ 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun /* 75*4882a593Smuzhiyun * No real reason why VM and native switch stacks should be different. 76*4882a593Smuzhiyun * Ultimately this should merge. Note that Rev C. ABI called out only 77*4882a593Smuzhiyun * R24-27 as callee saved GPRs needing explicit attention (R29-31 being 78*4882a593Smuzhiyun * dealt with automagically by allocframe), but the current ABI has 79*4882a593Smuzhiyun * more, R16-R27. By saving more, the worst case is that we waste some 80*4882a593Smuzhiyun * cycles if building with the old compilers. 81*4882a593Smuzhiyun */ 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun struct hexagon_switch_stack { 84*4882a593Smuzhiyun union { 85*4882a593Smuzhiyun struct { 86*4882a593Smuzhiyun unsigned long r16; 87*4882a593Smuzhiyun unsigned long r17; 88*4882a593Smuzhiyun }; 89*4882a593Smuzhiyun unsigned long long r1716; 90*4882a593Smuzhiyun }; 91*4882a593Smuzhiyun union { 92*4882a593Smuzhiyun struct { 93*4882a593Smuzhiyun unsigned long r18; 94*4882a593Smuzhiyun unsigned long r19; 95*4882a593Smuzhiyun }; 96*4882a593Smuzhiyun unsigned long long r1918; 97*4882a593Smuzhiyun }; 98*4882a593Smuzhiyun union { 99*4882a593Smuzhiyun struct { 100*4882a593Smuzhiyun unsigned long r20; 101*4882a593Smuzhiyun unsigned long r21; 102*4882a593Smuzhiyun }; 103*4882a593Smuzhiyun unsigned long long r2120; 104*4882a593Smuzhiyun }; 105*4882a593Smuzhiyun union { 106*4882a593Smuzhiyun struct { 107*4882a593Smuzhiyun unsigned long r22; 108*4882a593Smuzhiyun unsigned long r23; 109*4882a593Smuzhiyun }; 110*4882a593Smuzhiyun unsigned long long r2322; 111*4882a593Smuzhiyun }; 112*4882a593Smuzhiyun union { 113*4882a593Smuzhiyun struct { 114*4882a593Smuzhiyun unsigned long r24; 115*4882a593Smuzhiyun unsigned long r25; 116*4882a593Smuzhiyun }; 117*4882a593Smuzhiyun unsigned long long r2524; 118*4882a593Smuzhiyun }; 119*4882a593Smuzhiyun union { 120*4882a593Smuzhiyun struct { 121*4882a593Smuzhiyun unsigned long r26; 122*4882a593Smuzhiyun unsigned long r27; 123*4882a593Smuzhiyun }; 124*4882a593Smuzhiyun unsigned long long r2726; 125*4882a593Smuzhiyun }; 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun unsigned long fp; 128*4882a593Smuzhiyun unsigned long lr; 129*4882a593Smuzhiyun }; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */ 132*4882a593Smuzhiyun 133*4882a593Smuzhiyun #endif 134