1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4*4882a593Smuzhiyun * All Rights Reserved. 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun #ifndef __XFS_SUPPORT_KMEM_H__ 7*4882a593Smuzhiyun #define __XFS_SUPPORT_KMEM_H__ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #include <linux/slab.h> 10*4882a593Smuzhiyun #include <linux/sched.h> 11*4882a593Smuzhiyun #include <linux/mm.h> 12*4882a593Smuzhiyun #include <linux/vmalloc.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * General memory allocation interfaces 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun typedef unsigned __bitwise xfs_km_flags_t; 19*4882a593Smuzhiyun #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 20*4882a593Smuzhiyun #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 21*4882a593Smuzhiyun #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) 22*4882a593Smuzhiyun #define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u) 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun /* 25*4882a593Smuzhiyun * We use a special process flag to avoid recursive callbacks into 26*4882a593Smuzhiyun * the filesystem during transactions. We will also issue our own 27*4882a593Smuzhiyun * warnings, so we explicitly skip any generic ones (silly of us). 28*4882a593Smuzhiyun */ 29*4882a593Smuzhiyun static inline gfp_t kmem_flags_convert(xfs_km_flags_t flags)30*4882a593Smuzhiyunkmem_flags_convert(xfs_km_flags_t flags) 31*4882a593Smuzhiyun { 32*4882a593Smuzhiyun gfp_t lflags; 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP)); 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun lflags = GFP_KERNEL | __GFP_NOWARN; 37*4882a593Smuzhiyun if (flags & KM_NOFS) 38*4882a593Smuzhiyun lflags &= ~__GFP_FS; 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun /* 41*4882a593Smuzhiyun * Default page/slab allocator behavior is to retry for ever 42*4882a593Smuzhiyun * for small allocations. We can override this behavior by using 43*4882a593Smuzhiyun * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long 44*4882a593Smuzhiyun * as it is feasible but rather fail than retry forever for all 45*4882a593Smuzhiyun * request sizes. 46*4882a593Smuzhiyun */ 47*4882a593Smuzhiyun if (flags & KM_MAYFAIL) 48*4882a593Smuzhiyun lflags |= __GFP_RETRY_MAYFAIL; 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun if (flags & KM_ZERO) 51*4882a593Smuzhiyun lflags |= __GFP_ZERO; 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun if (flags & KM_NOLOCKDEP) 54*4882a593Smuzhiyun lflags |= __GFP_NOLOCKDEP; 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun return lflags; 57*4882a593Smuzhiyun } 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun extern void *kmem_alloc(size_t, xfs_km_flags_t); 60*4882a593Smuzhiyun extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags); 61*4882a593Smuzhiyun extern void *kmem_alloc_large(size_t size, xfs_km_flags_t); kmem_free(const void * ptr)62*4882a593Smuzhiyunstatic inline void kmem_free(const void *ptr) 63*4882a593Smuzhiyun { 64*4882a593Smuzhiyun kvfree(ptr); 65*4882a593Smuzhiyun } 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun static inline void * kmem_zalloc(size_t size,xfs_km_flags_t flags)69*4882a593Smuzhiyunkmem_zalloc(size_t size, xfs_km_flags_t flags) 70*4882a593Smuzhiyun { 71*4882a593Smuzhiyun return kmem_alloc(size, flags | KM_ZERO); 72*4882a593Smuzhiyun } 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun /* 75*4882a593Smuzhiyun * Zone interfaces 76*4882a593Smuzhiyun */ 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun #define kmem_zone kmem_cache 79*4882a593Smuzhiyun #define kmem_zone_t struct kmem_cache 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun static inline struct page * kmem_to_page(void * addr)82*4882a593Smuzhiyunkmem_to_page(void *addr) 83*4882a593Smuzhiyun { 84*4882a593Smuzhiyun if (is_vmalloc_addr(addr)) 85*4882a593Smuzhiyun return vmalloc_to_page(addr); 86*4882a593Smuzhiyun return virt_to_page(addr); 87*4882a593Smuzhiyun } 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun #endif /* __XFS_SUPPORT_KMEM_H__ */ 90