xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/processor.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  S390 version
4*4882a593Smuzhiyun  *    Copyright IBM Corp. 1999
5*4882a593Smuzhiyun  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6*4882a593Smuzhiyun  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Derived from "include/asm-i386/processor.h"
9*4882a593Smuzhiyun  *    Copyright (C) 1994, Linus Torvalds
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef __ASM_S390_PROCESSOR_H
13*4882a593Smuzhiyun #define __ASM_S390_PROCESSOR_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/bits.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define CIF_ASCE_PRIMARY	0	/* primary asce needs fixup / uaccess */
18*4882a593Smuzhiyun #define CIF_ASCE_SECONDARY	1	/* secondary asce needs fixup / uaccess */
19*4882a593Smuzhiyun #define CIF_NOHZ_DELAY		2	/* delay HZ disable for a tick */
20*4882a593Smuzhiyun #define CIF_FPU			3	/* restore FPU registers */
21*4882a593Smuzhiyun #define CIF_IGNORE_IRQ		4	/* ignore interrupt (for udelay) */
22*4882a593Smuzhiyun #define CIF_ENABLED_WAIT	5	/* in enabled wait state */
23*4882a593Smuzhiyun #define CIF_MCCK_GUEST		6	/* machine check happening in guest */
24*4882a593Smuzhiyun #define CIF_DEDICATED_CPU	7	/* this CPU is dedicated */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define _CIF_ASCE_PRIMARY	BIT(CIF_ASCE_PRIMARY)
27*4882a593Smuzhiyun #define _CIF_ASCE_SECONDARY	BIT(CIF_ASCE_SECONDARY)
28*4882a593Smuzhiyun #define _CIF_NOHZ_DELAY		BIT(CIF_NOHZ_DELAY)
29*4882a593Smuzhiyun #define _CIF_FPU		BIT(CIF_FPU)
30*4882a593Smuzhiyun #define _CIF_IGNORE_IRQ		BIT(CIF_IGNORE_IRQ)
31*4882a593Smuzhiyun #define _CIF_ENABLED_WAIT	BIT(CIF_ENABLED_WAIT)
32*4882a593Smuzhiyun #define _CIF_MCCK_GUEST		BIT(CIF_MCCK_GUEST)
33*4882a593Smuzhiyun #define _CIF_DEDICATED_CPU	BIT(CIF_DEDICATED_CPU)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifndef __ASSEMBLY__
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <linux/cpumask.h>
38*4882a593Smuzhiyun #include <linux/linkage.h>
39*4882a593Smuzhiyun #include <linux/irqflags.h>
40*4882a593Smuzhiyun #include <asm/cpu.h>
41*4882a593Smuzhiyun #include <asm/page.h>
42*4882a593Smuzhiyun #include <asm/ptrace.h>
43*4882a593Smuzhiyun #include <asm/setup.h>
44*4882a593Smuzhiyun #include <asm/runtime_instr.h>
45*4882a593Smuzhiyun #include <asm/fpu/types.h>
46*4882a593Smuzhiyun #include <asm/fpu/internal.h>
47*4882a593Smuzhiyun 
set_cpu_flag(int flag)48*4882a593Smuzhiyun static inline void set_cpu_flag(int flag)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	S390_lowcore.cpu_flags |= (1UL << flag);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
clear_cpu_flag(int flag)53*4882a593Smuzhiyun static inline void clear_cpu_flag(int flag)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	S390_lowcore.cpu_flags &= ~(1UL << flag);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
test_cpu_flag(int flag)58*4882a593Smuzhiyun static inline int test_cpu_flag(int flag)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return !!(S390_lowcore.cpu_flags & (1UL << flag));
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * Test CIF flag of another CPU. The caller needs to ensure that
65*4882a593Smuzhiyun  * CPU hotplug can not happen, e.g. by disabling preemption.
66*4882a593Smuzhiyun  */
test_cpu_flag_of(int flag,int cpu)67*4882a593Smuzhiyun static inline int test_cpu_flag_of(int flag, int cpu)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct lowcore *lc = lowcore_ptr[cpu];
70*4882a593Smuzhiyun 	return !!(lc->cpu_flags & (1UL << flag));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
74*4882a593Smuzhiyun 
get_cpu_id(struct cpuid * ptr)75*4882a593Smuzhiyun static inline void get_cpu_id(struct cpuid *ptr)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	asm volatile("stidp %0" : "=Q" (*ptr));
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun void s390_adjust_jiffies(void);
81*4882a593Smuzhiyun void s390_update_cpu_mhz(void);
82*4882a593Smuzhiyun void cpu_detect_mhz_feature(void);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun extern const struct seq_operations cpuinfo_op;
85*4882a593Smuzhiyun extern void execve_tail(void);
86*4882a593Smuzhiyun extern void __bpon(void);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_31BIT) ? \
93*4882a593Smuzhiyun 					_REGION3_SIZE : TASK_SIZE_MAX)
94*4882a593Smuzhiyun #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_31BIT) ? \
95*4882a593Smuzhiyun 					(_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1))
96*4882a593Smuzhiyun #define TASK_SIZE		TASK_SIZE_OF(current)
97*4882a593Smuzhiyun #define TASK_SIZE_MAX		(-PAGE_SIZE)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define STACK_TOP		(test_thread_flag(TIF_31BIT) ? \
100*4882a593Smuzhiyun 					_REGION3_SIZE : _REGION2_SIZE)
101*4882a593Smuzhiyun #define STACK_TOP_MAX		_REGION2_SIZE
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define HAVE_ARCH_PICK_MMAP_LAYOUT
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun typedef unsigned int mm_segment_t;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * Thread structure
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun struct thread_struct {
111*4882a593Smuzhiyun 	unsigned int  acrs[NUM_ACRS];
112*4882a593Smuzhiyun         unsigned long ksp;              /* kernel stack pointer             */
113*4882a593Smuzhiyun 	unsigned long user_timer;	/* task cputime in user space */
114*4882a593Smuzhiyun 	unsigned long guest_timer;	/* task cputime in kvm guest */
115*4882a593Smuzhiyun 	unsigned long system_timer;	/* task cputime in kernel space */
116*4882a593Smuzhiyun 	unsigned long hardirq_timer;	/* task cputime in hardirq context */
117*4882a593Smuzhiyun 	unsigned long softirq_timer;	/* task cputime in softirq context */
118*4882a593Smuzhiyun 	unsigned long sys_call_table;	/* system call table address */
119*4882a593Smuzhiyun 	mm_segment_t mm_segment;
120*4882a593Smuzhiyun 	unsigned long gmap_addr;	/* address of last gmap fault. */
121*4882a593Smuzhiyun 	unsigned int gmap_write_flag;	/* gmap fault write indication */
122*4882a593Smuzhiyun 	unsigned int gmap_int_code;	/* int code of last gmap fault */
123*4882a593Smuzhiyun 	unsigned int gmap_pfault;	/* signal of a pending guest pfault */
124*4882a593Smuzhiyun 	/* Per-thread information related to debugging */
125*4882a593Smuzhiyun 	struct per_regs per_user;	/* User specified PER registers */
126*4882a593Smuzhiyun 	struct per_event per_event;	/* Cause of the last PER trap */
127*4882a593Smuzhiyun 	unsigned long per_flags;	/* Flags to control debug behavior */
128*4882a593Smuzhiyun 	unsigned int system_call;	/* system call number in signal */
129*4882a593Smuzhiyun 	unsigned long last_break;	/* last breaking-event-address. */
130*4882a593Smuzhiyun         /* pfault_wait is used to block the process on a pfault event */
131*4882a593Smuzhiyun 	unsigned long pfault_wait;
132*4882a593Smuzhiyun 	struct list_head list;
133*4882a593Smuzhiyun 	/* cpu runtime instrumentation */
134*4882a593Smuzhiyun 	struct runtime_instr_cb *ri_cb;
135*4882a593Smuzhiyun 	struct gs_cb *gs_cb;		/* Current guarded storage cb */
136*4882a593Smuzhiyun 	struct gs_cb *gs_bc_cb;		/* Broadcast guarded storage cb */
137*4882a593Smuzhiyun 	unsigned char trap_tdb[256];	/* Transaction abort diagnose block */
138*4882a593Smuzhiyun 	/*
139*4882a593Smuzhiyun 	 * Warning: 'fpu' is dynamically-sized. It *MUST* be at
140*4882a593Smuzhiyun 	 * the end.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	struct fpu fpu;			/* FP and VX register save area */
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* Flag to disable transactions. */
146*4882a593Smuzhiyun #define PER_FLAG_NO_TE			1UL
147*4882a593Smuzhiyun /* Flag to enable random transaction aborts. */
148*4882a593Smuzhiyun #define PER_FLAG_TE_ABORT_RAND		2UL
149*4882a593Smuzhiyun /* Flag to specify random transaction abort mode:
150*4882a593Smuzhiyun  * - abort each transaction at a random instruction before TEND if set.
151*4882a593Smuzhiyun  * - abort random transactions at a random instruction if cleared.
152*4882a593Smuzhiyun  */
153*4882a593Smuzhiyun #define PER_FLAG_TE_ABORT_RAND_TEND	4UL
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun typedef struct thread_struct thread_struct;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #define ARCH_MIN_TASKALIGN	8
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #define INIT_THREAD {							\
160*4882a593Smuzhiyun 	.ksp = sizeof(init_stack) + (unsigned long) &init_stack,	\
161*4882a593Smuzhiyun 	.fpu.regs = (void *) init_task.thread.fpu.fprs,			\
162*4882a593Smuzhiyun 	.last_break = 1,						\
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun  * Do necessary setup to start up a new thread.
167*4882a593Smuzhiyun  */
168*4882a593Smuzhiyun #define start_thread(regs, new_psw, new_stackp) do {			\
169*4882a593Smuzhiyun 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;	\
170*4882a593Smuzhiyun 	regs->psw.addr	= new_psw;					\
171*4882a593Smuzhiyun 	regs->gprs[15]	= new_stackp;					\
172*4882a593Smuzhiyun 	execve_tail();							\
173*4882a593Smuzhiyun } while (0)
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun #define start_thread31(regs, new_psw, new_stackp) do {			\
176*4882a593Smuzhiyun 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_BA;			\
177*4882a593Smuzhiyun 	regs->psw.addr	= new_psw;					\
178*4882a593Smuzhiyun 	regs->gprs[15]	= new_stackp;					\
179*4882a593Smuzhiyun 	execve_tail();							\
180*4882a593Smuzhiyun } while (0)
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /* Forward declaration, a strange C thing */
183*4882a593Smuzhiyun struct task_struct;
184*4882a593Smuzhiyun struct mm_struct;
185*4882a593Smuzhiyun struct seq_file;
186*4882a593Smuzhiyun struct pt_regs;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun void show_registers(struct pt_regs *regs);
189*4882a593Smuzhiyun void show_cacheinfo(struct seq_file *m);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* Free all resources held by a thread. */
release_thread(struct task_struct * tsk)192*4882a593Smuzhiyun static inline void release_thread(struct task_struct *tsk) { }
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun /* Free guarded storage control block */
195*4882a593Smuzhiyun void guarded_storage_release(struct task_struct *tsk);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun unsigned long get_wchan(struct task_struct *p);
198*4882a593Smuzhiyun #define task_pt_regs(tsk) ((struct pt_regs *) \
199*4882a593Smuzhiyun         (task_stack_page(tsk) + THREAD_SIZE) - 1)
200*4882a593Smuzhiyun #define KSTK_EIP(tsk)	(task_pt_regs(tsk)->psw.addr)
201*4882a593Smuzhiyun #define KSTK_ESP(tsk)	(task_pt_regs(tsk)->gprs[15])
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* Has task runtime instrumentation enabled ? */
204*4882a593Smuzhiyun #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
205*4882a593Smuzhiyun 
current_stack_pointer(void)206*4882a593Smuzhiyun static __always_inline unsigned long current_stack_pointer(void)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	unsigned long sp;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	asm volatile("la %0,0(15)" : "=a" (sp));
211*4882a593Smuzhiyun 	return sp;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
stap(void)214*4882a593Smuzhiyun static __always_inline unsigned short stap(void)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	unsigned short cpu_address;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	asm volatile("stap %0" : "=Q" (cpu_address));
219*4882a593Smuzhiyun 	return cpu_address;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun #define cpu_relax() barrier()
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #define ECAG_CACHE_ATTRIBUTE	0
225*4882a593Smuzhiyun #define ECAG_CPU_ATTRIBUTE	1
226*4882a593Smuzhiyun 
__ecag(unsigned int asi,unsigned char parm)227*4882a593Smuzhiyun static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	unsigned long val;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	asm volatile(".insn	rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
232*4882a593Smuzhiyun 		     : "=d" (val) : "a" (asi << 8 | parm));
233*4882a593Smuzhiyun 	return val;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
psw_set_key(unsigned int key)236*4882a593Smuzhiyun static inline void psw_set_key(unsigned int key)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	asm volatile("spka 0(%0)" : : "d" (key));
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * Set PSW to specified value.
243*4882a593Smuzhiyun  */
__load_psw(psw_t psw)244*4882a593Smuzhiyun static inline void __load_psw(psw_t psw)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	asm volatile("lpswe %0" : : "Q" (psw) : "cc");
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun  * Set PSW mask to specified value, while leaving the
251*4882a593Smuzhiyun  * PSW addr pointing to the next instruction.
252*4882a593Smuzhiyun  */
__load_psw_mask(unsigned long mask)253*4882a593Smuzhiyun static __always_inline void __load_psw_mask(unsigned long mask)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	unsigned long addr;
256*4882a593Smuzhiyun 	psw_t psw;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	psw.mask = mask;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	asm volatile(
261*4882a593Smuzhiyun 		"	larl	%0,1f\n"
262*4882a593Smuzhiyun 		"	stg	%0,%1\n"
263*4882a593Smuzhiyun 		"	lpswe	%2\n"
264*4882a593Smuzhiyun 		"1:"
265*4882a593Smuzhiyun 		: "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc");
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun  * Extract current PSW mask
270*4882a593Smuzhiyun  */
__extract_psw(void)271*4882a593Smuzhiyun static inline unsigned long __extract_psw(void)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	unsigned int reg1, reg2;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
276*4882a593Smuzhiyun 	return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
local_mcck_enable(void)279*4882a593Smuzhiyun static inline void local_mcck_enable(void)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	__load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
local_mcck_disable(void)284*4882a593Smuzhiyun static inline void local_mcck_disable(void)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	__load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * Rewind PSW instruction address by specified number of bytes.
291*4882a593Smuzhiyun  */
__rewind_psw(psw_t psw,unsigned long ilc)292*4882a593Smuzhiyun static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	unsigned long mask;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	mask = (psw.mask & PSW_MASK_EA) ? -1UL :
297*4882a593Smuzhiyun 	       (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
298*4882a593Smuzhiyun 					  (1UL << 24) - 1;
299*4882a593Smuzhiyun 	return (psw.addr - ilc) & mask;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun  * Function to stop a processor until the next interrupt occurs
304*4882a593Smuzhiyun  */
305*4882a593Smuzhiyun void enabled_wait(void);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun  * Function to drop a processor into disabled wait state
309*4882a593Smuzhiyun  */
disabled_wait(void)310*4882a593Smuzhiyun static __always_inline void __noreturn disabled_wait(void)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	psw_t psw;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
315*4882a593Smuzhiyun 	psw.addr = _THIS_IP_;
316*4882a593Smuzhiyun 	__load_psw(psw);
317*4882a593Smuzhiyun 	while (1);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun  * Basic Machine Check/Program Check Handler.
322*4882a593Smuzhiyun  */
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun extern void s390_base_pgm_handler(void);
325*4882a593Smuzhiyun extern void s390_base_ext_handler(void);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun extern void (*s390_base_pgm_handler_fn)(void);
328*4882a593Smuzhiyun extern void (*s390_base_ext_handler_fn)(void);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun #define ARCH_LOW_ADDRESS_LIMIT	0x7fffffffUL
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun extern int memcpy_real(void *, void *, size_t);
333*4882a593Smuzhiyun extern void memcpy_absolute(void *, void *, size_t);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #define mem_assign_absolute(dest, val) do {			\
336*4882a593Smuzhiyun 	__typeof__(dest) __tmp = (val);				\
337*4882a593Smuzhiyun 								\
338*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(__tmp) != sizeof(val));		\
339*4882a593Smuzhiyun 	memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));	\
340*4882a593Smuzhiyun } while (0)
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun extern int s390_isolate_bp(void);
343*4882a593Smuzhiyun extern int s390_isolate_bp_guest(void);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun #endif /* __ASM_S390_PROCESSOR_H */
348