1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_PROCESSOR_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_PROCESSOR_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Copyright (C) 2001 PPC 64 Team, IBM Corp
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <asm/reg.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifdef CONFIG_VSX
12*4882a593Smuzhiyun #define TS_FPRWIDTH 2
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifdef __BIG_ENDIAN__
15*4882a593Smuzhiyun #define TS_FPROFFSET 0
16*4882a593Smuzhiyun #define TS_VSRLOWOFFSET 1
17*4882a593Smuzhiyun #else
18*4882a593Smuzhiyun #define TS_FPROFFSET 1
19*4882a593Smuzhiyun #define TS_VSRLOWOFFSET 0
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #else
23*4882a593Smuzhiyun #define TS_FPRWIDTH 1
24*4882a593Smuzhiyun #define TS_FPROFFSET 0
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifdef CONFIG_PPC64
28*4882a593Smuzhiyun /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
29*4882a593Smuzhiyun #define PPR_PRIORITY 3
30*4882a593Smuzhiyun #ifdef __ASSEMBLY__
31*4882a593Smuzhiyun #define DEFAULT_PPR (PPR_PRIORITY << 50)
32*4882a593Smuzhiyun #else
33*4882a593Smuzhiyun #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
34*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
35*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #ifndef __ASSEMBLY__
38*4882a593Smuzhiyun #include <linux/types.h>
39*4882a593Smuzhiyun #include <linux/thread_info.h>
40*4882a593Smuzhiyun #include <asm/ptrace.h>
41*4882a593Smuzhiyun #include <asm/hw_breakpoint.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* We do _not_ want to define new machine types at all, those must die
44*4882a593Smuzhiyun * in favor of using the device-tree
45*4882a593Smuzhiyun * -- BenH.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* PREP sub-platform types. Unused */
49*4882a593Smuzhiyun #define _PREP_Motorola 0x01 /* motorola prep */
50*4882a593Smuzhiyun #define _PREP_Firm 0x02 /* firmworks prep */
51*4882a593Smuzhiyun #define _PREP_IBM 0x00 /* ibm prep */
52*4882a593Smuzhiyun #define _PREP_Bull 0x03 /* bull prep */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* CHRP sub-platform types. These are arbitrary */
55*4882a593Smuzhiyun #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
56*4882a593Smuzhiyun #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
57*4882a593Smuzhiyun #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
58*4882a593Smuzhiyun #define _CHRP_briq 0x07 /* TotalImpact's briQ */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #if defined(__KERNEL__) && defined(CONFIG_PPC32)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun extern int _chrp_type;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Macros for adjusting thread priority (hardware multi-threading) */
67*4882a593Smuzhiyun #define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
68*4882a593Smuzhiyun #define HMT_low() asm volatile("or 1,1,1 # low priority")
69*4882a593Smuzhiyun #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
70*4882a593Smuzhiyun #define HMT_medium() asm volatile("or 2,2,2 # medium priority")
71*4882a593Smuzhiyun #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
72*4882a593Smuzhiyun #define HMT_high() asm volatile("or 3,3,3 # high priority")
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #ifdef __KERNEL__
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #ifdef CONFIG_PPC64
77*4882a593Smuzhiyun #include <asm/task_size_64.h>
78*4882a593Smuzhiyun #else
79*4882a593Smuzhiyun #include <asm/task_size_32.h>
80*4882a593Smuzhiyun #endif
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun struct task_struct;
83*4882a593Smuzhiyun void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
84*4882a593Smuzhiyun void release_thread(struct task_struct *);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
87*4882a593Smuzhiyun #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* FP and VSX 0-31 register set */
90*4882a593Smuzhiyun struct thread_fp_state {
91*4882a593Smuzhiyun u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
92*4882a593Smuzhiyun u64 fpscr; /* Floating point status */
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Complete AltiVec register set including VSCR */
96*4882a593Smuzhiyun struct thread_vr_state {
97*4882a593Smuzhiyun vector128 vr[32] __attribute__((aligned(16)));
98*4882a593Smuzhiyun vector128 vscr __attribute__((aligned(16)));
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun struct debug_reg {
102*4882a593Smuzhiyun #ifdef CONFIG_PPC_ADV_DEBUG_REGS
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * The following help to manage the use of Debug Control Registers
105*4882a593Smuzhiyun * om the BookE platforms.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun uint32_t dbcr0;
108*4882a593Smuzhiyun uint32_t dbcr1;
109*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
110*4882a593Smuzhiyun uint32_t dbcr2;
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * The stored value of the DBSR register will be the value at the
114*4882a593Smuzhiyun * last debug interrupt. This register can only be read from the
115*4882a593Smuzhiyun * user (will never be written to) and has value while helping to
116*4882a593Smuzhiyun * describe the reason for the last debug trap. Torez
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun uint32_t dbsr;
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * The following will contain addresses used by debug applications
121*4882a593Smuzhiyun * to help trace and trap on particular address locations.
122*4882a593Smuzhiyun * The bits in the Debug Control Registers above help define which
123*4882a593Smuzhiyun * of the following registers will contain valid data and/or addresses.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun unsigned long iac1;
126*4882a593Smuzhiyun unsigned long iac2;
127*4882a593Smuzhiyun #if CONFIG_PPC_ADV_DEBUG_IACS > 2
128*4882a593Smuzhiyun unsigned long iac3;
129*4882a593Smuzhiyun unsigned long iac4;
130*4882a593Smuzhiyun #endif
131*4882a593Smuzhiyun unsigned long dac1;
132*4882a593Smuzhiyun unsigned long dac2;
133*4882a593Smuzhiyun #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
134*4882a593Smuzhiyun unsigned long dvc1;
135*4882a593Smuzhiyun unsigned long dvc2;
136*4882a593Smuzhiyun #endif
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun struct thread_struct {
141*4882a593Smuzhiyun unsigned long ksp; /* Kernel stack pointer */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #ifdef CONFIG_PPC64
144*4882a593Smuzhiyun unsigned long ksp_vsid;
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun struct pt_regs *regs; /* Pointer to saved register state */
147*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
148*4882a593Smuzhiyun /* BookE base exception scratch space; align on cacheline */
149*4882a593Smuzhiyun unsigned long normsave[8] ____cacheline_aligned;
150*4882a593Smuzhiyun #endif
151*4882a593Smuzhiyun #ifdef CONFIG_PPC32
152*4882a593Smuzhiyun void *pgdir; /* root of page-table tree */
153*4882a593Smuzhiyun unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
154*4882a593Smuzhiyun #ifdef CONFIG_PPC_RTAS
155*4882a593Smuzhiyun unsigned long rtas_sp; /* stack pointer for when in RTAS */
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
159*4882a593Smuzhiyun unsigned long kuap; /* opened segments for user access */
160*4882a593Smuzhiyun #endif
161*4882a593Smuzhiyun #ifdef CONFIG_VMAP_STACK
162*4882a593Smuzhiyun unsigned long srr0;
163*4882a593Smuzhiyun unsigned long srr1;
164*4882a593Smuzhiyun unsigned long dar;
165*4882a593Smuzhiyun unsigned long dsisr;
166*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
167*4882a593Smuzhiyun unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
168*4882a593Smuzhiyun unsigned long lr, ctr;
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun #endif
171*4882a593Smuzhiyun /* Debug Registers */
172*4882a593Smuzhiyun struct debug_reg debug;
173*4882a593Smuzhiyun struct thread_fp_state fp_state;
174*4882a593Smuzhiyun struct thread_fp_state *fp_save_area;
175*4882a593Smuzhiyun int fpexc_mode; /* floating-point exception mode */
176*4882a593Smuzhiyun unsigned int align_ctl; /* alignment handling control */
177*4882a593Smuzhiyun #ifdef CONFIG_HAVE_HW_BREAKPOINT
178*4882a593Smuzhiyun struct perf_event *ptrace_bps[HBP_NUM_MAX];
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Helps identify source of single-step exception and subsequent
181*4882a593Smuzhiyun * hw-breakpoint enablement
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun struct perf_event *last_hit_ubp[HBP_NUM_MAX];
184*4882a593Smuzhiyun #endif /* CONFIG_HAVE_HW_BREAKPOINT */
185*4882a593Smuzhiyun struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */
186*4882a593Smuzhiyun unsigned long trap_nr; /* last trap # on this thread */
187*4882a593Smuzhiyun u8 load_slb; /* Ages out SLB preload cache entries */
188*4882a593Smuzhiyun u8 load_fp;
189*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
190*4882a593Smuzhiyun u8 load_vec;
191*4882a593Smuzhiyun struct thread_vr_state vr_state;
192*4882a593Smuzhiyun struct thread_vr_state *vr_save_area;
193*4882a593Smuzhiyun unsigned long vrsave;
194*4882a593Smuzhiyun int used_vr; /* set if process has used altivec */
195*4882a593Smuzhiyun #endif /* CONFIG_ALTIVEC */
196*4882a593Smuzhiyun #ifdef CONFIG_VSX
197*4882a593Smuzhiyun /* VSR status */
198*4882a593Smuzhiyun int used_vsr; /* set if process has used VSX */
199*4882a593Smuzhiyun #endif /* CONFIG_VSX */
200*4882a593Smuzhiyun #ifdef CONFIG_SPE
201*4882a593Smuzhiyun unsigned long evr[32]; /* upper 32-bits of SPE regs */
202*4882a593Smuzhiyun u64 acc; /* Accumulator */
203*4882a593Smuzhiyun unsigned long spefscr; /* SPE & eFP status */
204*4882a593Smuzhiyun unsigned long spefscr_last; /* SPEFSCR value on last prctl
205*4882a593Smuzhiyun call or trap return */
206*4882a593Smuzhiyun int used_spe; /* set if process has used spe */
207*4882a593Smuzhiyun #endif /* CONFIG_SPE */
208*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
209*4882a593Smuzhiyun u8 load_tm;
210*4882a593Smuzhiyun u64 tm_tfhar; /* Transaction fail handler addr */
211*4882a593Smuzhiyun u64 tm_texasr; /* Transaction exception & summary */
212*4882a593Smuzhiyun u64 tm_tfiar; /* Transaction fail instr address reg */
213*4882a593Smuzhiyun struct pt_regs ckpt_regs; /* Checkpointed registers */
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun unsigned long tm_tar;
216*4882a593Smuzhiyun unsigned long tm_ppr;
217*4882a593Smuzhiyun unsigned long tm_dscr;
218*4882a593Smuzhiyun unsigned long tm_amr;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Checkpointed FP and VSX 0-31 register set.
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * When a transaction is active/signalled/scheduled etc., *regs is the
224*4882a593Smuzhiyun * most recent set of/speculated GPRs with ckpt_regs being the older
225*4882a593Smuzhiyun * checkpointed regs to which we roll back if transaction aborts.
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * These are analogous to how ckpt_regs and pt_regs work
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun struct thread_fp_state ckfp_state; /* Checkpointed FP state */
230*4882a593Smuzhiyun struct thread_vr_state ckvr_state; /* Checkpointed VR state */
231*4882a593Smuzhiyun unsigned long ckvrsave; /* Checkpointed VRSAVE */
232*4882a593Smuzhiyun #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
233*4882a593Smuzhiyun #ifdef CONFIG_PPC_MEM_KEYS
234*4882a593Smuzhiyun unsigned long amr;
235*4882a593Smuzhiyun unsigned long iamr;
236*4882a593Smuzhiyun #endif
237*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
238*4882a593Smuzhiyun void* kvm_shadow_vcpu; /* KVM internal data */
239*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
240*4882a593Smuzhiyun #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
241*4882a593Smuzhiyun struct kvm_vcpu *kvm_vcpu;
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun #ifdef CONFIG_PPC64
244*4882a593Smuzhiyun unsigned long dscr;
245*4882a593Smuzhiyun unsigned long fscr;
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * This member element dscr_inherit indicates that the process
248*4882a593Smuzhiyun * has explicitly attempted and changed the DSCR register value
249*4882a593Smuzhiyun * for itself. Hence kernel wont use the default CPU DSCR value
250*4882a593Smuzhiyun * contained in the PACA structure anymore during process context
251*4882a593Smuzhiyun * switch. Once this variable is set, this behaviour will also be
252*4882a593Smuzhiyun * inherited to all the children of this process from that point
253*4882a593Smuzhiyun * onwards.
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun int dscr_inherit;
256*4882a593Smuzhiyun unsigned long tidr;
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
259*4882a593Smuzhiyun unsigned long tar;
260*4882a593Smuzhiyun unsigned long ebbrr;
261*4882a593Smuzhiyun unsigned long ebbhr;
262*4882a593Smuzhiyun unsigned long bescr;
263*4882a593Smuzhiyun unsigned long siar;
264*4882a593Smuzhiyun unsigned long sdar;
265*4882a593Smuzhiyun unsigned long sier;
266*4882a593Smuzhiyun unsigned long mmcr2;
267*4882a593Smuzhiyun unsigned mmcr0;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun unsigned used_ebb;
270*4882a593Smuzhiyun unsigned long mmcr3;
271*4882a593Smuzhiyun unsigned long sier2;
272*4882a593Smuzhiyun unsigned long sier3;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun #endif
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #define ARCH_MIN_TASKALIGN 16
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
280*4882a593Smuzhiyun #define INIT_SP_LIMIT ((unsigned long)&init_stack)
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun #ifdef CONFIG_SPE
283*4882a593Smuzhiyun #define SPEFSCR_INIT \
284*4882a593Smuzhiyun .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
285*4882a593Smuzhiyun .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
286*4882a593Smuzhiyun #else
287*4882a593Smuzhiyun #define SPEFSCR_INIT
288*4882a593Smuzhiyun #endif
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun #ifdef CONFIG_PPC32
291*4882a593Smuzhiyun #define INIT_THREAD { \
292*4882a593Smuzhiyun .ksp = INIT_SP, \
293*4882a593Smuzhiyun .ksp_limit = INIT_SP_LIMIT, \
294*4882a593Smuzhiyun .pgdir = swapper_pg_dir, \
295*4882a593Smuzhiyun .fpexc_mode = MSR_FE0 | MSR_FE1, \
296*4882a593Smuzhiyun SPEFSCR_INIT \
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun #else
299*4882a593Smuzhiyun #define INIT_THREAD { \
300*4882a593Smuzhiyun .ksp = INIT_SP, \
301*4882a593Smuzhiyun .fpexc_mode = 0, \
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun #endif
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun #define task_pt_regs(tsk) ((tsk)->thread.regs)
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun unsigned long get_wchan(struct task_struct *p);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
310*4882a593Smuzhiyun #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Get/set floating-point exception mode */
313*4882a593Smuzhiyun #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
314*4882a593Smuzhiyun #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
317*4882a593Smuzhiyun extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
320*4882a593Smuzhiyun #define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun extern int get_endian(struct task_struct *tsk, unsigned long adr);
323*4882a593Smuzhiyun extern int set_endian(struct task_struct *tsk, unsigned int val);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
326*4882a593Smuzhiyun #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
329*4882a593Smuzhiyun extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun extern void load_fp_state(struct thread_fp_state *fp);
332*4882a593Smuzhiyun extern void store_fp_state(struct thread_fp_state *fp);
333*4882a593Smuzhiyun extern void load_vr_state(struct thread_vr_state *vr);
334*4882a593Smuzhiyun extern void store_vr_state(struct thread_vr_state *vr);
335*4882a593Smuzhiyun
__unpack_fe01(unsigned long msr_bits)336*4882a593Smuzhiyun static inline unsigned int __unpack_fe01(unsigned long msr_bits)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
__pack_fe01(unsigned int fpmode)341*4882a593Smuzhiyun static inline unsigned long __pack_fe01(unsigned int fpmode)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun #ifdef CONFIG_PPC64
347*4882a593Smuzhiyun #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun #define spin_begin() HMT_low()
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun #define spin_cpu_relax() barrier()
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun #define spin_end() HMT_medium()
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun #define spin_until_cond(cond) \
356*4882a593Smuzhiyun do { \
357*4882a593Smuzhiyun if (unlikely(!(cond))) { \
358*4882a593Smuzhiyun spin_begin(); \
359*4882a593Smuzhiyun do { \
360*4882a593Smuzhiyun spin_cpu_relax(); \
361*4882a593Smuzhiyun } while (!(cond)); \
362*4882a593Smuzhiyun spin_end(); \
363*4882a593Smuzhiyun } \
364*4882a593Smuzhiyun } while (0)
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun #else
367*4882a593Smuzhiyun #define cpu_relax() barrier()
368*4882a593Smuzhiyun #endif
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Check that a certain kernel stack pointer is valid in task_struct p */
371*4882a593Smuzhiyun int validate_sp(unsigned long sp, struct task_struct *p,
372*4882a593Smuzhiyun unsigned long nbytes);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * Prefetch macros.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun #define ARCH_HAS_PREFETCH
378*4882a593Smuzhiyun #define ARCH_HAS_PREFETCHW
379*4882a593Smuzhiyun #define ARCH_HAS_SPINLOCK_PREFETCH
380*4882a593Smuzhiyun
prefetch(const void * x)381*4882a593Smuzhiyun static inline void prefetch(const void *x)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun if (unlikely(!x))
384*4882a593Smuzhiyun return;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
prefetchw(const void * x)389*4882a593Smuzhiyun static inline void prefetchw(const void *x)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun if (unlikely(!x))
392*4882a593Smuzhiyun return;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun #define spin_lock_prefetch(x) prefetchw(x)
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun #define HAVE_ARCH_PICK_MMAP_LAYOUT
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun #ifdef CONFIG_PPC64
get_clean_sp(unsigned long sp,int is_32)402*4882a593Smuzhiyun static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun if (is_32)
405*4882a593Smuzhiyun return sp & 0x0ffffffffUL;
406*4882a593Smuzhiyun return sp;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun #else
get_clean_sp(unsigned long sp,int is_32)409*4882a593Smuzhiyun static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun return sp;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun #endif
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* asm stubs */
416*4882a593Smuzhiyun extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
417*4882a593Smuzhiyun extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
418*4882a593Smuzhiyun extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
419*4882a593Smuzhiyun #ifdef CONFIG_PPC_970_NAP
420*4882a593Smuzhiyun extern void power4_idle_nap(void);
421*4882a593Smuzhiyun #endif
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun extern unsigned long cpuidle_disable;
424*4882a593Smuzhiyun enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun extern int powersave_nap; /* set if nap mode can be used in idle loop */
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun extern void power7_idle_type(unsigned long type);
429*4882a593Smuzhiyun extern void arch300_idle_type(unsigned long stop_psscr_val,
430*4882a593Smuzhiyun unsigned long stop_psscr_mask);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun extern int fix_alignment(struct pt_regs *);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun #ifdef CONFIG_PPC64
435*4882a593Smuzhiyun /*
436*4882a593Smuzhiyun * We handle most unaligned accesses in hardware. On the other hand
437*4882a593Smuzhiyun * unaligned DMA can be very expensive on some ppc64 IO chips (it does
438*4882a593Smuzhiyun * powers of 2 writes until it reaches sufficient alignment).
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * Based on this we disable the IP header alignment in network drivers.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun #define NET_IP_ALIGN 0
443*4882a593Smuzhiyun #endif
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun #endif /* __KERNEL__ */
446*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
447*4882a593Smuzhiyun #endif /* _ASM_POWERPC_PROCESSOR_H */
448