1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef _ASM_IA64_ASMMACRO_H 3*4882a593Smuzhiyun #define _ASM_IA64_ASMMACRO_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun /* 6*4882a593Smuzhiyun * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co 7*4882a593Smuzhiyun * David Mosberger-Tang <davidm@hpl.hp.com> 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #define ENTRY(name) \ 12*4882a593Smuzhiyun .align 32; \ 13*4882a593Smuzhiyun .proc name; \ 14*4882a593Smuzhiyun name: 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun #define ENTRY_MIN_ALIGN(name) \ 17*4882a593Smuzhiyun .align 16; \ 18*4882a593Smuzhiyun .proc name; \ 19*4882a593Smuzhiyun name: 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun #define GLOBAL_ENTRY(name) \ 22*4882a593Smuzhiyun .global name; \ 23*4882a593Smuzhiyun ENTRY(name) 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #define END(name) \ 26*4882a593Smuzhiyun .endp name 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun /* 29*4882a593Smuzhiyun * Helper macros to make unwind directives more readable: 30*4882a593Smuzhiyun */ 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun /* prologue_gr: */ 33*4882a593Smuzhiyun #define ASM_UNW_PRLG_RP 0x8 34*4882a593Smuzhiyun #define ASM_UNW_PRLG_PFS 0x4 35*4882a593Smuzhiyun #define ASM_UNW_PRLG_PSP 0x2 36*4882a593Smuzhiyun #define ASM_UNW_PRLG_PR 0x1 37*4882a593Smuzhiyun #define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs)) 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun /* 40*4882a593Smuzhiyun * Helper macros for accessing user memory. 41*4882a593Smuzhiyun * 42*4882a593Smuzhiyun * When adding any new .section/.previous entries here, make sure to 43*4882a593Smuzhiyun * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or 44*4882a593Smuzhiyun * unpleasant things will happen. 45*4882a593Smuzhiyun */ 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun .section "__ex_table", "a" // declare section & section attributes 48*4882a593Smuzhiyun .previous 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun # define EX(y,x...) \ 51*4882a593Smuzhiyun .xdata4 "__ex_table", 99f-., y-.; \ 52*4882a593Smuzhiyun [99:] x 53*4882a593Smuzhiyun # define EXCLR(y,x...) \ 54*4882a593Smuzhiyun .xdata4 "__ex_table", 99f-., y-.+4; \ 55*4882a593Smuzhiyun [99:] x 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun /* 58*4882a593Smuzhiyun * Tag MCA recoverable instruction ranges. 59*4882a593Smuzhiyun */ 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun .section "__mca_table", "a" // declare section & section attributes 62*4882a593Smuzhiyun .previous 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun # define MCA_RECOVER_RANGE(y) \ 65*4882a593Smuzhiyun .xdata4 "__mca_table", y-., 99f-.; \ 66*4882a593Smuzhiyun [99:] 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun /* 69*4882a593Smuzhiyun * Mark instructions that need a load of a virtual address patched to be 70*4882a593Smuzhiyun * a load of a physical address. We use this either in critical performance 71*4882a593Smuzhiyun * path (ivt.S - TLB miss processing) or in places where it might not be 72*4882a593Smuzhiyun * safe to use a "tpa" instruction (mca_asm.S - error recovery). 73*4882a593Smuzhiyun */ 74*4882a593Smuzhiyun .section ".data..patch.vtop", "a" // declare section & section attributes 75*4882a593Smuzhiyun .previous 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun #define LOAD_PHYSICAL(pr, reg, obj) \ 78*4882a593Smuzhiyun [1:](pr)movl reg = obj; \ 79*4882a593Smuzhiyun .xdata4 ".data..patch.vtop", 1b-. 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun /* 82*4882a593Smuzhiyun * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, 83*4882a593Smuzhiyun * we'll patch out the work-around bundles with NOPs, so their impact is minimal. 84*4882a593Smuzhiyun */ 85*4882a593Smuzhiyun #define DO_MCKINLEY_E9_WORKAROUND 86*4882a593Smuzhiyun 87*4882a593Smuzhiyun #ifdef DO_MCKINLEY_E9_WORKAROUND 88*4882a593Smuzhiyun .section ".data..patch.mckinley_e9", "a" 89*4882a593Smuzhiyun .previous 90*4882a593Smuzhiyun /* workaround for Itanium 2 Errata 9: */ 91*4882a593Smuzhiyun # define FSYS_RETURN \ 92*4882a593Smuzhiyun .xdata4 ".data..patch.mckinley_e9", 1f-.; \ 93*4882a593Smuzhiyun 1:{ .mib; \ 94*4882a593Smuzhiyun nop.m 0; \ 95*4882a593Smuzhiyun mov r16=ar.pfs; \ 96*4882a593Smuzhiyun br.call.sptk.many b7=2f;; \ 97*4882a593Smuzhiyun }; \ 98*4882a593Smuzhiyun 2:{ .mib; \ 99*4882a593Smuzhiyun nop.m 0; \ 100*4882a593Smuzhiyun mov ar.pfs=r16; \ 101*4882a593Smuzhiyun br.ret.sptk.many b6;; \ 102*4882a593Smuzhiyun } 103*4882a593Smuzhiyun #else 104*4882a593Smuzhiyun # define FSYS_RETURN br.ret.sptk.many b6 105*4882a593Smuzhiyun #endif 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun /* 108*4882a593Smuzhiyun * If physical stack register size is different from DEF_NUM_STACK_REG, 109*4882a593Smuzhiyun * dynamically patch the kernel for correct size. 110*4882a593Smuzhiyun */ 111*4882a593Smuzhiyun .section ".data..patch.phys_stack_reg", "a" 112*4882a593Smuzhiyun .previous 113*4882a593Smuzhiyun #define LOAD_PHYS_STACK_REG_SIZE(reg) \ 114*4882a593Smuzhiyun [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ 115*4882a593Smuzhiyun .xdata4 ".data..patch.phys_stack_reg", 1b-. 116*4882a593Smuzhiyun 117*4882a593Smuzhiyun /* 118*4882a593Smuzhiyun * Up until early 2004, use of .align within a function caused bad unwind info. 119*4882a593Smuzhiyun * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing 120*4882a593Smuzhiyun * otherwise. 121*4882a593Smuzhiyun */ 122*4882a593Smuzhiyun #ifdef HAVE_WORKING_TEXT_ALIGN 123*4882a593Smuzhiyun # define TEXT_ALIGN(n) .align n 124*4882a593Smuzhiyun #else 125*4882a593Smuzhiyun # define TEXT_ALIGN(n) 126*4882a593Smuzhiyun #endif 127*4882a593Smuzhiyun 128*4882a593Smuzhiyun #ifdef HAVE_SERIALIZE_DIRECTIVE 129*4882a593Smuzhiyun # define dv_serialize_data .serialize.data 130*4882a593Smuzhiyun # define dv_serialize_instruction .serialize.instruction 131*4882a593Smuzhiyun #else 132*4882a593Smuzhiyun # define dv_serialize_data 133*4882a593Smuzhiyun # define dv_serialize_instruction 134*4882a593Smuzhiyun #endif 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun #endif /* _ASM_IA64_ASMMACRO_H */ 137