1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copied from the kernel sources to tools/: 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Memory barrier definitions. This is based on information published 6*4882a593Smuzhiyun * in the Processor Abstraction Layer and the System Abstraction Layer 7*4882a593Smuzhiyun * manual. 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun * Copyright (C) 1998-2003 Hewlett-Packard Co 10*4882a593Smuzhiyun * David Mosberger-Tang <davidm@hpl.hp.com> 11*4882a593Smuzhiyun * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 12*4882a593Smuzhiyun * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 13*4882a593Smuzhiyun */ 14*4882a593Smuzhiyun #ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H 15*4882a593Smuzhiyun #define _TOOLS_LINUX_ASM_IA64_BARRIER_H 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun #include <linux/compiler.h> 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun /* 20*4882a593Smuzhiyun * Macros to force memory ordering. In these descriptions, "previous" 21*4882a593Smuzhiyun * and "subsequent" refer to program order; "visible" means that all 22*4882a593Smuzhiyun * architecturally visible effects of a memory access have occurred 23*4882a593Smuzhiyun * (at a minimum, this means the memory has been read or written). 24*4882a593Smuzhiyun * 25*4882a593Smuzhiyun * wmb(): Guarantees that all preceding stores to memory- 26*4882a593Smuzhiyun * like regions are visible before any subsequent 27*4882a593Smuzhiyun * stores and that all following stores will be 28*4882a593Smuzhiyun * visible only after all previous stores. 29*4882a593Smuzhiyun * rmb(): Like wmb(), but for reads. 30*4882a593Smuzhiyun * mb(): wmb()/rmb() combo, i.e., all previous memory 31*4882a593Smuzhiyun * accesses are visible before all subsequent 32*4882a593Smuzhiyun * accesses and vice versa. This is also known as 33*4882a593Smuzhiyun * a "fence." 34*4882a593Smuzhiyun * 35*4882a593Smuzhiyun * Note: "mb()" and its variants cannot be used as a fence to order 36*4882a593Smuzhiyun * accesses to memory mapped I/O registers. For that, mf.a needs to 37*4882a593Smuzhiyun * be used. However, we don't want to always use mf.a because (a) 38*4882a593Smuzhiyun * it's (presumably) much slower than mf and (b) mf.a is supported for 39*4882a593Smuzhiyun * sequential memory pages only. 40*4882a593Smuzhiyun */ 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun #define mb() ia64_mf() 43*4882a593Smuzhiyun #define rmb() mb() 44*4882a593Smuzhiyun #define wmb() mb() 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun #define smp_store_release(p, v) \ 47*4882a593Smuzhiyun do { \ 48*4882a593Smuzhiyun barrier(); \ 49*4882a593Smuzhiyun WRITE_ONCE(*p, v); \ 50*4882a593Smuzhiyun } while (0) 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun #define smp_load_acquire(p) \ 53*4882a593Smuzhiyun ({ \ 54*4882a593Smuzhiyun typeof(*p) ___p1 = READ_ONCE(*p); \ 55*4882a593Smuzhiyun barrier(); \ 56*4882a593Smuzhiyun ___p1; \ 57*4882a593Smuzhiyun }) 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun #endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */ 60