1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Generic cache management functions. Everything is arch-specific, 4*4882a593Smuzhiyun * but this header exists to make sure the defines/functions can be 5*4882a593Smuzhiyun * used in a generic way. 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * 2000-11-13 Arjan van de Ven <arjan@fenrus.demon.nl> 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun */ 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #ifndef _LINUX_PREFETCH_H 12*4882a593Smuzhiyun #define _LINUX_PREFETCH_H 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <linux/types.h> 15*4882a593Smuzhiyun #include <asm/processor.h> 16*4882a593Smuzhiyun #include <asm/cache.h> 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun struct page; 19*4882a593Smuzhiyun /* 20*4882a593Smuzhiyun prefetch(x) attempts to pre-emptively get the memory pointed to 21*4882a593Smuzhiyun by address "x" into the CPU L1 cache. 22*4882a593Smuzhiyun prefetch(x) should not cause any kind of exception, prefetch(0) is 23*4882a593Smuzhiyun specifically ok. 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun prefetch() should be defined by the architecture, if not, the 26*4882a593Smuzhiyun #define below provides a no-op define. 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun There are 3 prefetch() macros: 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun prefetch(x) - prefetches the cacheline at "x" for read 31*4882a593Smuzhiyun prefetchw(x) - prefetches the cacheline at "x" for write 32*4882a593Smuzhiyun spin_lock_prefetch(x) - prefetches the spinlock *x for taking 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun there is also PREFETCH_STRIDE which is the architecure-preferred 35*4882a593Smuzhiyun "lookahead" size for prefetching streamed operations. 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun */ 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun #ifndef ARCH_HAS_PREFETCH 40*4882a593Smuzhiyun #define prefetch(x) __builtin_prefetch(x) 41*4882a593Smuzhiyun #endif 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun #ifndef ARCH_HAS_PREFETCHW 44*4882a593Smuzhiyun #define prefetchw(x) __builtin_prefetch(x,1) 45*4882a593Smuzhiyun #endif 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun #ifndef ARCH_HAS_SPINLOCK_PREFETCH 48*4882a593Smuzhiyun #define spin_lock_prefetch(x) prefetchw(x) 49*4882a593Smuzhiyun #endif 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun #ifndef PREFETCH_STRIDE 52*4882a593Smuzhiyun #define PREFETCH_STRIDE (4*L1_CACHE_BYTES) 53*4882a593Smuzhiyun #endif 54*4882a593Smuzhiyun prefetch_range(void * addr,size_t len)55*4882a593Smuzhiyunstatic inline void prefetch_range(void *addr, size_t len) 56*4882a593Smuzhiyun { 57*4882a593Smuzhiyun #ifdef ARCH_HAS_PREFETCH 58*4882a593Smuzhiyun char *cp; 59*4882a593Smuzhiyun char *end = addr + len; 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun for (cp = addr; cp < end; cp += PREFETCH_STRIDE) 62*4882a593Smuzhiyun prefetch(cp); 63*4882a593Smuzhiyun #endif 64*4882a593Smuzhiyun } 65*4882a593Smuzhiyun prefetch_page_address(struct page * page)66*4882a593Smuzhiyunstatic inline void prefetch_page_address(struct page *page) 67*4882a593Smuzhiyun { 68*4882a593Smuzhiyun #if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL) 69*4882a593Smuzhiyun prefetch(page); 70*4882a593Smuzhiyun #endif 71*4882a593Smuzhiyun } 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun #endif 74