1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_SLUB_DEF_H
3*4882a593Smuzhiyun #define _LINUX_SLUB_DEF_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * SLUB : A Slab allocator without object queues.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * (C) 2007 SGI, Christoph Lameter
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #include <linux/kfence.h>
11*4882a593Smuzhiyun #include <linux/kobject.h>
12*4882a593Smuzhiyun #include <linux/reciprocal_div.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun enum stat_item {
15*4882a593Smuzhiyun ALLOC_FASTPATH, /* Allocation from cpu slab */
16*4882a593Smuzhiyun ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
17*4882a593Smuzhiyun FREE_FASTPATH, /* Free to cpu slab */
18*4882a593Smuzhiyun FREE_SLOWPATH, /* Freeing not to cpu slab */
19*4882a593Smuzhiyun FREE_FROZEN, /* Freeing to frozen slab */
20*4882a593Smuzhiyun FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
21*4882a593Smuzhiyun FREE_REMOVE_PARTIAL, /* Freeing removes last object */
22*4882a593Smuzhiyun ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
23*4882a593Smuzhiyun ALLOC_SLAB, /* Cpu slab acquired from page allocator */
24*4882a593Smuzhiyun ALLOC_REFILL, /* Refill cpu slab from slab freelist */
25*4882a593Smuzhiyun ALLOC_NODE_MISMATCH, /* Switching cpu slab */
26*4882a593Smuzhiyun FREE_SLAB, /* Slab freed to the page allocator */
27*4882a593Smuzhiyun CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
28*4882a593Smuzhiyun DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
29*4882a593Smuzhiyun DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
30*4882a593Smuzhiyun DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
31*4882a593Smuzhiyun DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
32*4882a593Smuzhiyun DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
33*4882a593Smuzhiyun DEACTIVATE_BYPASS, /* Implicit deactivation */
34*4882a593Smuzhiyun ORDER_FALLBACK, /* Number of times fallback was necessary */
35*4882a593Smuzhiyun CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
36*4882a593Smuzhiyun CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
37*4882a593Smuzhiyun CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
38*4882a593Smuzhiyun CPU_PARTIAL_FREE, /* Refill cpu partial on free */
39*4882a593Smuzhiyun CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
40*4882a593Smuzhiyun CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
41*4882a593Smuzhiyun NR_SLUB_STAT_ITEMS };
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct kmem_cache_cpu {
44*4882a593Smuzhiyun void **freelist; /* Pointer to next available object */
45*4882a593Smuzhiyun unsigned long tid; /* Globally unique transaction id */
46*4882a593Smuzhiyun struct page *page; /* The slab from which we are allocating */
47*4882a593Smuzhiyun #ifdef CONFIG_SLUB_CPU_PARTIAL
48*4882a593Smuzhiyun struct page *partial; /* Partially allocated frozen slabs */
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun #ifdef CONFIG_SLUB_STATS
51*4882a593Smuzhiyun unsigned stat[NR_SLUB_STAT_ITEMS];
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #ifdef CONFIG_SLUB_CPU_PARTIAL
56*4882a593Smuzhiyun #define slub_percpu_partial(c) ((c)->partial)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define slub_set_percpu_partial(c, p) \
59*4882a593Smuzhiyun ({ \
60*4882a593Smuzhiyun slub_percpu_partial(c) = (p)->next; \
61*4882a593Smuzhiyun })
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
64*4882a593Smuzhiyun #else
65*4882a593Smuzhiyun #define slub_percpu_partial(c) NULL
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define slub_set_percpu_partial(c, p)
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define slub_percpu_partial_read_once(c) NULL
70*4882a593Smuzhiyun #endif // CONFIG_SLUB_CPU_PARTIAL
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Word size structure that can be atomically updated or read and that
74*4882a593Smuzhiyun * contains both the order and the number of objects that a slab of the
75*4882a593Smuzhiyun * given order would contain.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun struct kmem_cache_order_objects {
78*4882a593Smuzhiyun unsigned int x;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Slab cache management.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun struct kmem_cache {
85*4882a593Smuzhiyun struct kmem_cache_cpu __percpu *cpu_slab;
86*4882a593Smuzhiyun /* Used for retrieving partial slabs, etc. */
87*4882a593Smuzhiyun slab_flags_t flags;
88*4882a593Smuzhiyun unsigned long min_partial;
89*4882a593Smuzhiyun unsigned int size; /* The size of an object including metadata */
90*4882a593Smuzhiyun unsigned int object_size;/* The size of an object without metadata */
91*4882a593Smuzhiyun struct reciprocal_value reciprocal_size;
92*4882a593Smuzhiyun unsigned int offset; /* Free pointer offset */
93*4882a593Smuzhiyun #ifdef CONFIG_SLUB_CPU_PARTIAL
94*4882a593Smuzhiyun /* Number of per cpu partial objects to keep around */
95*4882a593Smuzhiyun unsigned int cpu_partial;
96*4882a593Smuzhiyun #endif
97*4882a593Smuzhiyun struct kmem_cache_order_objects oo;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Allocation and freeing of slabs */
100*4882a593Smuzhiyun struct kmem_cache_order_objects max;
101*4882a593Smuzhiyun struct kmem_cache_order_objects min;
102*4882a593Smuzhiyun gfp_t allocflags; /* gfp flags to use on each alloc */
103*4882a593Smuzhiyun int refcount; /* Refcount for slab cache destroy */
104*4882a593Smuzhiyun void (*ctor)(void *);
105*4882a593Smuzhiyun unsigned int inuse; /* Offset to metadata */
106*4882a593Smuzhiyun unsigned int align; /* Alignment */
107*4882a593Smuzhiyun unsigned int red_left_pad; /* Left redzone padding size */
108*4882a593Smuzhiyun const char *name; /* Name (only for display!) */
109*4882a593Smuzhiyun struct list_head list; /* List of slab caches */
110*4882a593Smuzhiyun #ifdef CONFIG_SLUB_SYSFS
111*4882a593Smuzhiyun struct kobject kobj; /* For sysfs */
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun #ifdef CONFIG_SLAB_FREELIST_HARDENED
114*4882a593Smuzhiyun unsigned long random;
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #ifdef CONFIG_NUMA
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * Defragmentation by allocating from a remote node.
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun unsigned int remote_node_defrag_ratio;
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #ifdef CONFIG_SLAB_FREELIST_RANDOM
125*4882a593Smuzhiyun unsigned int *random_seq;
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #ifdef CONFIG_KASAN
129*4882a593Smuzhiyun struct kasan_cache kasan_info;
130*4882a593Smuzhiyun #endif
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun unsigned int useroffset; /* Usercopy region offset */
133*4882a593Smuzhiyun unsigned int usersize; /* Usercopy region size */
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun struct kmem_cache_node *node[MAX_NUMNODES];
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #ifdef CONFIG_SLUB_CPU_PARTIAL
139*4882a593Smuzhiyun #define slub_cpu_partial(s) ((s)->cpu_partial)
140*4882a593Smuzhiyun #define slub_set_cpu_partial(s, n) \
141*4882a593Smuzhiyun ({ \
142*4882a593Smuzhiyun slub_cpu_partial(s) = (n); \
143*4882a593Smuzhiyun })
144*4882a593Smuzhiyun #else
145*4882a593Smuzhiyun #define slub_cpu_partial(s) (0)
146*4882a593Smuzhiyun #define slub_set_cpu_partial(s, n)
147*4882a593Smuzhiyun #endif /* CONFIG_SLUB_CPU_PARTIAL */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #ifdef CONFIG_SLUB_SYSFS
150*4882a593Smuzhiyun #define SLAB_SUPPORTS_SYSFS
151*4882a593Smuzhiyun void sysfs_slab_unlink(struct kmem_cache *);
152*4882a593Smuzhiyun void sysfs_slab_release(struct kmem_cache *);
153*4882a593Smuzhiyun #else
sysfs_slab_unlink(struct kmem_cache * s)154*4882a593Smuzhiyun static inline void sysfs_slab_unlink(struct kmem_cache *s)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun }
sysfs_slab_release(struct kmem_cache * s)157*4882a593Smuzhiyun static inline void sysfs_slab_release(struct kmem_cache *s)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun #endif
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun void object_err(struct kmem_cache *s, struct page *page,
163*4882a593Smuzhiyun u8 *object, char *reason);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun void *fixup_red_left(struct kmem_cache *s, void *p);
166*4882a593Smuzhiyun
nearest_obj(struct kmem_cache * cache,struct page * page,void * x)167*4882a593Smuzhiyun static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
168*4882a593Smuzhiyun void *x) {
169*4882a593Smuzhiyun void *object = x - (x - page_address(page)) % cache->size;
170*4882a593Smuzhiyun void *last_object = page_address(page) +
171*4882a593Smuzhiyun (page->objects - 1) * cache->size;
172*4882a593Smuzhiyun void *result = (unlikely(object > last_object)) ? last_object : object;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun result = fixup_red_left(cache, result);
175*4882a593Smuzhiyun return result;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)179*4882a593Smuzhiyun static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
180*4882a593Smuzhiyun void *addr, void *obj)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun return reciprocal_divide(kasan_reset_tag(obj) - addr,
183*4882a593Smuzhiyun cache->reciprocal_size);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
obj_to_index(const struct kmem_cache * cache,const struct page * page,void * obj)186*4882a593Smuzhiyun static inline unsigned int obj_to_index(const struct kmem_cache *cache,
187*4882a593Smuzhiyun const struct page *page, void *obj)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun if (is_kfence_address(obj))
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun return __obj_to_index(cache, page_address(page), obj);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
objs_per_slab_page(const struct kmem_cache * cache,const struct page * page)194*4882a593Smuzhiyun static inline int objs_per_slab_page(const struct kmem_cache *cache,
195*4882a593Smuzhiyun const struct page *page)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun return page->objects;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun #endif /* _LINUX_SLUB_DEF_H */
200