1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef __ASM_GENERIC_MMIOWB_H 3*4882a593Smuzhiyun #define __ASM_GENERIC_MMIOWB_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun /* 6*4882a593Smuzhiyun * Generic implementation of mmiowb() tracking for spinlocks. 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * If your architecture doesn't ensure that writes to an I/O peripheral 9*4882a593Smuzhiyun * within two spinlocked sections on two different CPUs are seen by the 10*4882a593Smuzhiyun * peripheral in the order corresponding to the lock handover, then you 11*4882a593Smuzhiyun * need to follow these FIVE easy steps: 12*4882a593Smuzhiyun * 13*4882a593Smuzhiyun * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy) 14*4882a593Smuzhiyun * in asm/mmiowb.h, then #include this file 15*4882a593Smuzhiyun * 2. Ensure your I/O write accessors call mmiowb_set_pending() 16*4882a593Smuzhiyun * 3. Select ARCH_HAS_MMIOWB 17*4882a593Smuzhiyun * 4. Untangle the resulting mess of header files 18*4882a593Smuzhiyun * 5. Complain to your architects 19*4882a593Smuzhiyun */ 20*4882a593Smuzhiyun #ifdef CONFIG_MMIOWB 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun #include <linux/compiler.h> 23*4882a593Smuzhiyun #include <asm-generic/mmiowb_types.h> 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #ifndef arch_mmiowb_state 26*4882a593Smuzhiyun #include <asm/percpu.h> 27*4882a593Smuzhiyun #include <asm/smp.h> 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); 30*4882a593Smuzhiyun #define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) 31*4882a593Smuzhiyun #else 32*4882a593Smuzhiyun #define __mmiowb_state() arch_mmiowb_state() 33*4882a593Smuzhiyun #endif /* arch_mmiowb_state */ 34*4882a593Smuzhiyun mmiowb_set_pending(void)35*4882a593Smuzhiyunstatic inline void mmiowb_set_pending(void) 36*4882a593Smuzhiyun { 37*4882a593Smuzhiyun struct mmiowb_state *ms = __mmiowb_state(); 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun if (likely(ms->nesting_count)) 40*4882a593Smuzhiyun ms->mmiowb_pending = ms->nesting_count; 41*4882a593Smuzhiyun } 42*4882a593Smuzhiyun mmiowb_spin_lock(void)43*4882a593Smuzhiyunstatic inline void mmiowb_spin_lock(void) 44*4882a593Smuzhiyun { 45*4882a593Smuzhiyun struct mmiowb_state *ms = __mmiowb_state(); 46*4882a593Smuzhiyun ms->nesting_count++; 47*4882a593Smuzhiyun } 48*4882a593Smuzhiyun mmiowb_spin_unlock(void)49*4882a593Smuzhiyunstatic inline void mmiowb_spin_unlock(void) 50*4882a593Smuzhiyun { 51*4882a593Smuzhiyun struct mmiowb_state *ms = __mmiowb_state(); 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun if (unlikely(ms->mmiowb_pending)) { 54*4882a593Smuzhiyun ms->mmiowb_pending = 0; 55*4882a593Smuzhiyun mmiowb(); 56*4882a593Smuzhiyun } 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun ms->nesting_count--; 59*4882a593Smuzhiyun } 60*4882a593Smuzhiyun #else 61*4882a593Smuzhiyun #define mmiowb_set_pending() do { } while (0) 62*4882a593Smuzhiyun #define mmiowb_spin_lock() do { } while (0) 63*4882a593Smuzhiyun #define mmiowb_spin_unlock() do { } while (0) 64*4882a593Smuzhiyun #endif /* CONFIG_MMIOWB */ 65*4882a593Smuzhiyun #endif /* __ASM_GENERIC_MMIOWB_H */ 66