1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 4*4882a593Smuzhiyun */ 5*4882a593Smuzhiyun #ifndef _ASM_POWERPC_RUNLATCH_H 6*4882a593Smuzhiyun #define _ASM_POWERPC_RUNLATCH_H 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifdef CONFIG_PPC64 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun extern void __ppc64_runlatch_on(void); 11*4882a593Smuzhiyun extern void __ppc64_runlatch_off(void); 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun /* 14*4882a593Smuzhiyun * We manually hard enable-disable, this is called 15*4882a593Smuzhiyun * in the idle loop and we don't want to mess up 16*4882a593Smuzhiyun * with soft-disable/enable & interrupt replay. 17*4882a593Smuzhiyun */ 18*4882a593Smuzhiyun #define ppc64_runlatch_off() \ 19*4882a593Smuzhiyun do { \ 20*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_CTRL) && \ 21*4882a593Smuzhiyun test_thread_local_flags(_TLF_RUNLATCH)) { \ 22*4882a593Smuzhiyun unsigned long msr = mfmsr(); \ 23*4882a593Smuzhiyun __hard_irq_disable(); \ 24*4882a593Smuzhiyun __ppc64_runlatch_off(); \ 25*4882a593Smuzhiyun if (msr & MSR_EE) \ 26*4882a593Smuzhiyun __hard_irq_enable(); \ 27*4882a593Smuzhiyun } \ 28*4882a593Smuzhiyun } while (0) 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun #define ppc64_runlatch_on() \ 31*4882a593Smuzhiyun do { \ 32*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_CTRL) && \ 33*4882a593Smuzhiyun !test_thread_local_flags(_TLF_RUNLATCH)) { \ 34*4882a593Smuzhiyun unsigned long msr = mfmsr(); \ 35*4882a593Smuzhiyun __hard_irq_disable(); \ 36*4882a593Smuzhiyun __ppc64_runlatch_on(); \ 37*4882a593Smuzhiyun if (msr & MSR_EE) \ 38*4882a593Smuzhiyun __hard_irq_enable(); \ 39*4882a593Smuzhiyun } \ 40*4882a593Smuzhiyun } while (0) 41*4882a593Smuzhiyun #else 42*4882a593Smuzhiyun #define ppc64_runlatch_on() 43*4882a593Smuzhiyun #define ppc64_runlatch_off() 44*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */ 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun #endif /* _ASM_POWERPC_RUNLATCH_H */ 47