1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * zsmalloc memory allocator
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2011 Nitin Gupta
5*4882a593Smuzhiyun * Copyright (C) 2012, 2013 Minchan Kim
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This code is released using a dual license strategy: BSD/GPL
8*4882a593Smuzhiyun * You can choose the license that better fits your requirements.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Released under the terms of 3-clause BSD License
11*4882a593Smuzhiyun * Released under the terms of GNU General Public License Version 2.0
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun * Following is how we use various fields and flags of underlying
16*4882a593Smuzhiyun * struct page(s) to form a zspage.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Usage of struct page fields:
19*4882a593Smuzhiyun * page->private: points to zspage
20*4882a593Smuzhiyun * page->freelist(index): links together all component pages of a zspage
21*4882a593Smuzhiyun * For the huge page, this is always 0, so we use this field
22*4882a593Smuzhiyun * to store handle.
23*4882a593Smuzhiyun * page->units: first object offset in a subpage of zspage
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Usage of struct page flags:
26*4882a593Smuzhiyun * PG_private: identifies the first component page
27*4882a593Smuzhiyun * PG_owner_priv_1: identifies the huge component page
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/module.h>
34*4882a593Smuzhiyun #include <linux/kernel.h>
35*4882a593Smuzhiyun #include <linux/sched.h>
36*4882a593Smuzhiyun #include <linux/magic.h>
37*4882a593Smuzhiyun #include <linux/bitops.h>
38*4882a593Smuzhiyun #include <linux/errno.h>
39*4882a593Smuzhiyun #include <linux/highmem.h>
40*4882a593Smuzhiyun #include <linux/string.h>
41*4882a593Smuzhiyun #include <linux/slab.h>
42*4882a593Smuzhiyun #include <linux/pgtable.h>
43*4882a593Smuzhiyun #include <asm/tlbflush.h>
44*4882a593Smuzhiyun #include <linux/cpumask.h>
45*4882a593Smuzhiyun #include <linux/cpu.h>
46*4882a593Smuzhiyun #include <linux/vmalloc.h>
47*4882a593Smuzhiyun #include <linux/preempt.h>
48*4882a593Smuzhiyun #include <linux/spinlock.h>
49*4882a593Smuzhiyun #include <linux/shrinker.h>
50*4882a593Smuzhiyun #include <linux/types.h>
51*4882a593Smuzhiyun #include <linux/debugfs.h>
52*4882a593Smuzhiyun #include <linux/zsmalloc.h>
53*4882a593Smuzhiyun #include <linux/zpool.h>
54*4882a593Smuzhiyun #include <linux/mount.h>
55*4882a593Smuzhiyun #include <linux/pseudo_fs.h>
56*4882a593Smuzhiyun #include <linux/migrate.h>
57*4882a593Smuzhiyun #include <linux/wait.h>
58*4882a593Smuzhiyun #include <linux/pagemap.h>
59*4882a593Smuzhiyun #include <linux/fs.h>
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define ZSPAGE_MAGIC 0x58
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * This must be power of 2 and greater than of equal to sizeof(link_free).
65*4882a593Smuzhiyun * These two conditions ensure that any 'struct link_free' itself doesn't
66*4882a593Smuzhiyun * span more than 1 page which avoids complex case of mapping 2 pages simply
67*4882a593Smuzhiyun * to restore link_free pointer values.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun #define ZS_ALIGN 8
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
73*4882a593Smuzhiyun * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun #define ZS_MAX_ZSPAGE_ORDER 2
76*4882a593Smuzhiyun #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define ZS_HANDLE_SIZE (sizeof(unsigned long))
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Object location (<PFN>, <obj_idx>) is encoded as
82*4882a593Smuzhiyun * a single (unsigned long) handle value.
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * Note that object index <obj_idx> starts from 0.
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * This is made more complicated by various memory models and PAE.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifndef MAX_POSSIBLE_PHYSMEM_BITS
90*4882a593Smuzhiyun #ifdef MAX_PHYSMEM_BITS
91*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
92*4882a593Smuzhiyun #else
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
95*4882a593Smuzhiyun * be PAGE_SHIFT
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Memory for allocating for handle keeps object position by
105*4882a593Smuzhiyun * encoding <page, obj_idx> and the encoded value has a room
106*4882a593Smuzhiyun * in least bit(ie, look at obj_to_location).
107*4882a593Smuzhiyun * We use the bit to synchronize between object access by
108*4882a593Smuzhiyun * user and migration.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun #define HANDLE_PIN_BIT 0
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Head in allocated object should have OBJ_ALLOCATED_TAG
114*4882a593Smuzhiyun * to identify the object was allocated or not.
115*4882a593Smuzhiyun * It's okay to add the status bit in the least bit because
116*4882a593Smuzhiyun * header keeps handle which is 4byte-aligned address so we
117*4882a593Smuzhiyun * have room for two bit at least.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun #define OBJ_ALLOCATED_TAG 1
120*4882a593Smuzhiyun #define OBJ_TAG_BITS 1
121*4882a593Smuzhiyun #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
122*4882a593Smuzhiyun #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #define FULLNESS_BITS 2
125*4882a593Smuzhiyun #define CLASS_BITS 8
126*4882a593Smuzhiyun #define ISOLATED_BITS 3
127*4882a593Smuzhiyun #define MAGIC_VAL_BITS 8
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #define MAX(a, b) ((a) >= (b) ? (a) : (b))
130*4882a593Smuzhiyun /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
131*4882a593Smuzhiyun #define ZS_MIN_ALLOC_SIZE \
132*4882a593Smuzhiyun MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
133*4882a593Smuzhiyun /* each chunk includes extra space to keep handle */
134*4882a593Smuzhiyun #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * On systems with 4K page size, this gives 255 size classes! There is a
138*4882a593Smuzhiyun * trader-off here:
139*4882a593Smuzhiyun * - Large number of size classes is potentially wasteful as free page are
140*4882a593Smuzhiyun * spread across these classes
141*4882a593Smuzhiyun * - Small number of size classes causes large internal fragmentation
142*4882a593Smuzhiyun * - Probably its better to use specific size classes (empirically
143*4882a593Smuzhiyun * determined). NOTE: all those class sizes must be set as multiple of
144*4882a593Smuzhiyun * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
147*4882a593Smuzhiyun * (reason above)
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
150*4882a593Smuzhiyun #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
151*4882a593Smuzhiyun ZS_SIZE_CLASS_DELTA) + 1)
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun enum fullness_group {
154*4882a593Smuzhiyun ZS_EMPTY,
155*4882a593Smuzhiyun ZS_ALMOST_EMPTY,
156*4882a593Smuzhiyun ZS_ALMOST_FULL,
157*4882a593Smuzhiyun ZS_FULL,
158*4882a593Smuzhiyun NR_ZS_FULLNESS,
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun enum zs_stat_type {
162*4882a593Smuzhiyun CLASS_EMPTY,
163*4882a593Smuzhiyun CLASS_ALMOST_EMPTY,
164*4882a593Smuzhiyun CLASS_ALMOST_FULL,
165*4882a593Smuzhiyun CLASS_FULL,
166*4882a593Smuzhiyun OBJ_ALLOCATED,
167*4882a593Smuzhiyun OBJ_USED,
168*4882a593Smuzhiyun NR_ZS_STAT_TYPE,
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun struct zs_size_stat {
172*4882a593Smuzhiyun unsigned long objs[NR_ZS_STAT_TYPE];
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #ifdef CONFIG_ZSMALLOC_STAT
176*4882a593Smuzhiyun static struct dentry *zs_stat_root;
177*4882a593Smuzhiyun #endif
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
180*4882a593Smuzhiyun static struct vfsmount *zsmalloc_mnt;
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * We assign a page to ZS_ALMOST_EMPTY fullness group when:
185*4882a593Smuzhiyun * n <= N / f, where
186*4882a593Smuzhiyun * n = number of allocated objects
187*4882a593Smuzhiyun * N = total number of objects zspage can store
188*4882a593Smuzhiyun * f = fullness_threshold_frac
189*4882a593Smuzhiyun *
190*4882a593Smuzhiyun * Similarly, we assign zspage to:
191*4882a593Smuzhiyun * ZS_ALMOST_FULL when n > N / f
192*4882a593Smuzhiyun * ZS_EMPTY when n == 0
193*4882a593Smuzhiyun * ZS_FULL when n == N
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * (see: fix_fullness_group())
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun static const int fullness_threshold_frac = 4;
198*4882a593Smuzhiyun static size_t huge_class_size;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun struct size_class {
201*4882a593Smuzhiyun spinlock_t lock;
202*4882a593Smuzhiyun struct list_head fullness_list[NR_ZS_FULLNESS];
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Size of objects stored in this class. Must be multiple
205*4882a593Smuzhiyun * of ZS_ALIGN.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun int size;
208*4882a593Smuzhiyun int objs_per_zspage;
209*4882a593Smuzhiyun /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
210*4882a593Smuzhiyun int pages_per_zspage;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun unsigned int index;
213*4882a593Smuzhiyun struct zs_size_stat stats;
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
SetPageHugeObject(struct page * page)217*4882a593Smuzhiyun static void SetPageHugeObject(struct page *page)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun SetPageOwnerPriv1(page);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
ClearPageHugeObject(struct page * page)222*4882a593Smuzhiyun static void ClearPageHugeObject(struct page *page)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun ClearPageOwnerPriv1(page);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
PageHugeObject(struct page * page)227*4882a593Smuzhiyun static int PageHugeObject(struct page *page)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun return PageOwnerPriv1(page);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * Placed within free objects to form a singly linked list.
234*4882a593Smuzhiyun * For every zspage, zspage->freeobj gives head of this list.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * This must be power of 2 and less than or equal to ZS_ALIGN
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun struct link_free {
239*4882a593Smuzhiyun union {
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * Free object index;
242*4882a593Smuzhiyun * It's valid for non-allocated object
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun unsigned long next;
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * Handle of allocated object.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun unsigned long handle;
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun struct zs_pool {
253*4882a593Smuzhiyun const char *name;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun struct size_class *size_class[ZS_SIZE_CLASSES];
256*4882a593Smuzhiyun struct kmem_cache *handle_cachep;
257*4882a593Smuzhiyun struct kmem_cache *zspage_cachep;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun atomic_long_t pages_allocated;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun struct zs_pool_stats stats;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Compact classes */
264*4882a593Smuzhiyun struct shrinker shrinker;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun #ifdef CONFIG_ZSMALLOC_STAT
267*4882a593Smuzhiyun struct dentry *stat_dentry;
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
270*4882a593Smuzhiyun struct inode *inode;
271*4882a593Smuzhiyun struct work_struct free_work;
272*4882a593Smuzhiyun /* A wait queue for when migration races with async_free_zspage() */
273*4882a593Smuzhiyun struct wait_queue_head migration_wait;
274*4882a593Smuzhiyun atomic_long_t isolated_pages;
275*4882a593Smuzhiyun bool destroying;
276*4882a593Smuzhiyun #endif
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun struct zspage {
280*4882a593Smuzhiyun struct {
281*4882a593Smuzhiyun unsigned int fullness:FULLNESS_BITS;
282*4882a593Smuzhiyun unsigned int class:CLASS_BITS + 1;
283*4882a593Smuzhiyun unsigned int isolated:ISOLATED_BITS;
284*4882a593Smuzhiyun unsigned int magic:MAGIC_VAL_BITS;
285*4882a593Smuzhiyun };
286*4882a593Smuzhiyun unsigned int inuse;
287*4882a593Smuzhiyun unsigned int freeobj;
288*4882a593Smuzhiyun struct page *first_page;
289*4882a593Smuzhiyun struct list_head list; /* fullness list */
290*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
291*4882a593Smuzhiyun rwlock_t lock;
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun };
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun struct mapping_area {
296*4882a593Smuzhiyun char *vm_buf; /* copy buffer for objects that span pages */
297*4882a593Smuzhiyun char *vm_addr; /* address of kmap_atomic()'ed pages */
298*4882a593Smuzhiyun enum zs_mapmode vm_mm; /* mapping mode */
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
302*4882a593Smuzhiyun static int zs_register_migration(struct zs_pool *pool);
303*4882a593Smuzhiyun static void zs_unregister_migration(struct zs_pool *pool);
304*4882a593Smuzhiyun static void migrate_lock_init(struct zspage *zspage);
305*4882a593Smuzhiyun static void migrate_read_lock(struct zspage *zspage);
306*4882a593Smuzhiyun static void migrate_read_unlock(struct zspage *zspage);
307*4882a593Smuzhiyun static void kick_deferred_free(struct zs_pool *pool);
308*4882a593Smuzhiyun static void init_deferred_free(struct zs_pool *pool);
309*4882a593Smuzhiyun static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
310*4882a593Smuzhiyun #else
zsmalloc_mount(void)311*4882a593Smuzhiyun static int zsmalloc_mount(void) { return 0; }
zsmalloc_unmount(void)312*4882a593Smuzhiyun static void zsmalloc_unmount(void) {}
zs_register_migration(struct zs_pool * pool)313*4882a593Smuzhiyun static int zs_register_migration(struct zs_pool *pool) { return 0; }
zs_unregister_migration(struct zs_pool * pool)314*4882a593Smuzhiyun static void zs_unregister_migration(struct zs_pool *pool) {}
migrate_lock_init(struct zspage * zspage)315*4882a593Smuzhiyun static void migrate_lock_init(struct zspage *zspage) {}
migrate_read_lock(struct zspage * zspage)316*4882a593Smuzhiyun static void migrate_read_lock(struct zspage *zspage) {}
migrate_read_unlock(struct zspage * zspage)317*4882a593Smuzhiyun static void migrate_read_unlock(struct zspage *zspage) {}
kick_deferred_free(struct zs_pool * pool)318*4882a593Smuzhiyun static void kick_deferred_free(struct zs_pool *pool) {}
init_deferred_free(struct zs_pool * pool)319*4882a593Smuzhiyun static void init_deferred_free(struct zs_pool *pool) {}
SetZsPageMovable(struct zs_pool * pool,struct zspage * zspage)320*4882a593Smuzhiyun static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
321*4882a593Smuzhiyun #endif
322*4882a593Smuzhiyun
create_cache(struct zs_pool * pool)323*4882a593Smuzhiyun static int create_cache(struct zs_pool *pool)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
326*4882a593Smuzhiyun 0, 0, NULL);
327*4882a593Smuzhiyun if (!pool->handle_cachep)
328*4882a593Smuzhiyun return 1;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
331*4882a593Smuzhiyun 0, 0, NULL);
332*4882a593Smuzhiyun if (!pool->zspage_cachep) {
333*4882a593Smuzhiyun kmem_cache_destroy(pool->handle_cachep);
334*4882a593Smuzhiyun pool->handle_cachep = NULL;
335*4882a593Smuzhiyun return 1;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
destroy_cache(struct zs_pool * pool)341*4882a593Smuzhiyun static void destroy_cache(struct zs_pool *pool)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun kmem_cache_destroy(pool->handle_cachep);
344*4882a593Smuzhiyun kmem_cache_destroy(pool->zspage_cachep);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
cache_alloc_handle(struct zs_pool * pool,gfp_t gfp)347*4882a593Smuzhiyun static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
350*4882a593Smuzhiyun gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
cache_free_handle(struct zs_pool * pool,unsigned long handle)353*4882a593Smuzhiyun static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun kmem_cache_free(pool->handle_cachep, (void *)handle);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
cache_alloc_zspage(struct zs_pool * pool,gfp_t flags)358*4882a593Smuzhiyun static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun return kmem_cache_alloc(pool->zspage_cachep,
361*4882a593Smuzhiyun flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
cache_free_zspage(struct zs_pool * pool,struct zspage * zspage)364*4882a593Smuzhiyun static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun kmem_cache_free(pool->zspage_cachep, zspage);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
record_obj(unsigned long handle,unsigned long obj)369*4882a593Smuzhiyun static void record_obj(unsigned long handle, unsigned long obj)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * lsb of @obj represents handle lock while other bits
373*4882a593Smuzhiyun * represent object value the handle is pointing so
374*4882a593Smuzhiyun * updating shouldn't do store tearing.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun WRITE_ONCE(*(unsigned long *)handle, obj);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* zpool driver */
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
382*4882a593Smuzhiyun
zs_zpool_create(const char * name,gfp_t gfp,const struct zpool_ops * zpool_ops,struct zpool * zpool)383*4882a593Smuzhiyun static void *zs_zpool_create(const char *name, gfp_t gfp,
384*4882a593Smuzhiyun const struct zpool_ops *zpool_ops,
385*4882a593Smuzhiyun struct zpool *zpool)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * Ignore global gfp flags: zs_malloc() may be invoked from
389*4882a593Smuzhiyun * different contexts and its caller must provide a valid
390*4882a593Smuzhiyun * gfp mask.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun return zs_create_pool(name);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
zs_zpool_destroy(void * pool)395*4882a593Smuzhiyun static void zs_zpool_destroy(void *pool)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun zs_destroy_pool(pool);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
zs_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle)400*4882a593Smuzhiyun static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
401*4882a593Smuzhiyun unsigned long *handle)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun *handle = zs_malloc(pool, size, gfp);
404*4882a593Smuzhiyun return *handle ? 0 : -1;
405*4882a593Smuzhiyun }
zs_zpool_free(void * pool,unsigned long handle)406*4882a593Smuzhiyun static void zs_zpool_free(void *pool, unsigned long handle)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun zs_free(pool, handle);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
zs_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm)411*4882a593Smuzhiyun static void *zs_zpool_map(void *pool, unsigned long handle,
412*4882a593Smuzhiyun enum zpool_mapmode mm)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun enum zs_mapmode zs_mm;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun switch (mm) {
417*4882a593Smuzhiyun case ZPOOL_MM_RO:
418*4882a593Smuzhiyun zs_mm = ZS_MM_RO;
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun case ZPOOL_MM_WO:
421*4882a593Smuzhiyun zs_mm = ZS_MM_WO;
422*4882a593Smuzhiyun break;
423*4882a593Smuzhiyun case ZPOOL_MM_RW:
424*4882a593Smuzhiyun default:
425*4882a593Smuzhiyun zs_mm = ZS_MM_RW;
426*4882a593Smuzhiyun break;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return zs_map_object(pool, handle, zs_mm);
430*4882a593Smuzhiyun }
zs_zpool_unmap(void * pool,unsigned long handle)431*4882a593Smuzhiyun static void zs_zpool_unmap(void *pool, unsigned long handle)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun zs_unmap_object(pool, handle);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
zs_zpool_total_size(void * pool)436*4882a593Smuzhiyun static u64 zs_zpool_total_size(void *pool)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun return zs_get_total_pages(pool) << PAGE_SHIFT;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun static struct zpool_driver zs_zpool_driver = {
442*4882a593Smuzhiyun .type = "zsmalloc",
443*4882a593Smuzhiyun .owner = THIS_MODULE,
444*4882a593Smuzhiyun .create = zs_zpool_create,
445*4882a593Smuzhiyun .destroy = zs_zpool_destroy,
446*4882a593Smuzhiyun .malloc_support_movable = true,
447*4882a593Smuzhiyun .malloc = zs_zpool_malloc,
448*4882a593Smuzhiyun .free = zs_zpool_free,
449*4882a593Smuzhiyun .map = zs_zpool_map,
450*4882a593Smuzhiyun .unmap = zs_zpool_unmap,
451*4882a593Smuzhiyun .total_size = zs_zpool_total_size,
452*4882a593Smuzhiyun };
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun MODULE_ALIAS("zpool-zsmalloc");
455*4882a593Smuzhiyun #endif /* CONFIG_ZPOOL */
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
458*4882a593Smuzhiyun static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
459*4882a593Smuzhiyun
is_zspage_isolated(struct zspage * zspage)460*4882a593Smuzhiyun static bool is_zspage_isolated(struct zspage *zspage)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun return zspage->isolated;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
is_first_page(struct page * page)465*4882a593Smuzhiyun static __maybe_unused int is_first_page(struct page *page)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun return PagePrivate(page);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Protected by class->lock */
get_zspage_inuse(struct zspage * zspage)471*4882a593Smuzhiyun static inline int get_zspage_inuse(struct zspage *zspage)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun return zspage->inuse;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun
mod_zspage_inuse(struct zspage * zspage,int val)477*4882a593Smuzhiyun static inline void mod_zspage_inuse(struct zspage *zspage, int val)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun zspage->inuse += val;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
get_first_page(struct zspage * zspage)482*4882a593Smuzhiyun static inline struct page *get_first_page(struct zspage *zspage)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct page *first_page = zspage->first_page;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
487*4882a593Smuzhiyun return first_page;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
get_first_obj_offset(struct page * page)490*4882a593Smuzhiyun static inline int get_first_obj_offset(struct page *page)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun return page->units;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
set_first_obj_offset(struct page * page,int offset)495*4882a593Smuzhiyun static inline void set_first_obj_offset(struct page *page, int offset)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun page->units = offset;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
get_freeobj(struct zspage * zspage)500*4882a593Smuzhiyun static inline unsigned int get_freeobj(struct zspage *zspage)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun return zspage->freeobj;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
set_freeobj(struct zspage * zspage,unsigned int obj)505*4882a593Smuzhiyun static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun zspage->freeobj = obj;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
get_zspage_mapping(struct zspage * zspage,unsigned int * class_idx,enum fullness_group * fullness)510*4882a593Smuzhiyun static void get_zspage_mapping(struct zspage *zspage,
511*4882a593Smuzhiyun unsigned int *class_idx,
512*4882a593Smuzhiyun enum fullness_group *fullness)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun BUG_ON(zspage->magic != ZSPAGE_MAGIC);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun *fullness = zspage->fullness;
517*4882a593Smuzhiyun *class_idx = zspage->class;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
set_zspage_mapping(struct zspage * zspage,unsigned int class_idx,enum fullness_group fullness)520*4882a593Smuzhiyun static void set_zspage_mapping(struct zspage *zspage,
521*4882a593Smuzhiyun unsigned int class_idx,
522*4882a593Smuzhiyun enum fullness_group fullness)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun zspage->class = class_idx;
525*4882a593Smuzhiyun zspage->fullness = fullness;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /*
529*4882a593Smuzhiyun * zsmalloc divides the pool into various size classes where each
530*4882a593Smuzhiyun * class maintains a list of zspages where each zspage is divided
531*4882a593Smuzhiyun * into equal sized chunks. Each allocation falls into one of these
532*4882a593Smuzhiyun * classes depending on its size. This function returns index of the
533*4882a593Smuzhiyun * size class which has chunk size big enough to hold the give size.
534*4882a593Smuzhiyun */
get_size_class_index(int size)535*4882a593Smuzhiyun static int get_size_class_index(int size)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun int idx = 0;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (likely(size > ZS_MIN_ALLOC_SIZE))
540*4882a593Smuzhiyun idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
541*4882a593Smuzhiyun ZS_SIZE_CLASS_DELTA);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun return min_t(int, ZS_SIZE_CLASSES - 1, idx);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* type can be of enum type zs_stat_type or fullness_group */
zs_stat_inc(struct size_class * class,int type,unsigned long cnt)547*4882a593Smuzhiyun static inline void zs_stat_inc(struct size_class *class,
548*4882a593Smuzhiyun int type, unsigned long cnt)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun class->stats.objs[type] += cnt;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* type can be of enum type zs_stat_type or fullness_group */
zs_stat_dec(struct size_class * class,int type,unsigned long cnt)554*4882a593Smuzhiyun static inline void zs_stat_dec(struct size_class *class,
555*4882a593Smuzhiyun int type, unsigned long cnt)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun class->stats.objs[type] -= cnt;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* type can be of enum type zs_stat_type or fullness_group */
zs_stat_get(struct size_class * class,int type)561*4882a593Smuzhiyun static inline unsigned long zs_stat_get(struct size_class *class,
562*4882a593Smuzhiyun int type)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun return class->stats.objs[type];
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun #ifdef CONFIG_ZSMALLOC_STAT
568*4882a593Smuzhiyun
zs_stat_init(void)569*4882a593Smuzhiyun static void __init zs_stat_init(void)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun if (!debugfs_initialized()) {
572*4882a593Smuzhiyun pr_warn("debugfs not available, stat dir not created\n");
573*4882a593Smuzhiyun return;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
zs_stat_exit(void)579*4882a593Smuzhiyun static void __exit zs_stat_exit(void)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun debugfs_remove_recursive(zs_stat_root);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun static unsigned long zs_can_compact(struct size_class *class);
585*4882a593Smuzhiyun
zs_stats_size_show(struct seq_file * s,void * v)586*4882a593Smuzhiyun static int zs_stats_size_show(struct seq_file *s, void *v)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun int i;
589*4882a593Smuzhiyun struct zs_pool *pool = s->private;
590*4882a593Smuzhiyun struct size_class *class;
591*4882a593Smuzhiyun int objs_per_zspage;
592*4882a593Smuzhiyun unsigned long class_almost_full, class_almost_empty;
593*4882a593Smuzhiyun unsigned long obj_allocated, obj_used, pages_used, freeable;
594*4882a593Smuzhiyun unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
595*4882a593Smuzhiyun unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
596*4882a593Smuzhiyun unsigned long total_freeable = 0;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
599*4882a593Smuzhiyun "class", "size", "almost_full", "almost_empty",
600*4882a593Smuzhiyun "obj_allocated", "obj_used", "pages_used",
601*4882a593Smuzhiyun "pages_per_zspage", "freeable");
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun for (i = 0; i < ZS_SIZE_CLASSES; i++) {
604*4882a593Smuzhiyun class = pool->size_class[i];
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun if (class->index != i)
607*4882a593Smuzhiyun continue;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun spin_lock(&class->lock);
610*4882a593Smuzhiyun class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
611*4882a593Smuzhiyun class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
612*4882a593Smuzhiyun obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
613*4882a593Smuzhiyun obj_used = zs_stat_get(class, OBJ_USED);
614*4882a593Smuzhiyun freeable = zs_can_compact(class);
615*4882a593Smuzhiyun spin_unlock(&class->lock);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun objs_per_zspage = class->objs_per_zspage;
618*4882a593Smuzhiyun pages_used = obj_allocated / objs_per_zspage *
619*4882a593Smuzhiyun class->pages_per_zspage;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun seq_printf(s, " %5u %5u %11lu %12lu %13lu"
622*4882a593Smuzhiyun " %10lu %10lu %16d %8lu\n",
623*4882a593Smuzhiyun i, class->size, class_almost_full, class_almost_empty,
624*4882a593Smuzhiyun obj_allocated, obj_used, pages_used,
625*4882a593Smuzhiyun class->pages_per_zspage, freeable);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun total_class_almost_full += class_almost_full;
628*4882a593Smuzhiyun total_class_almost_empty += class_almost_empty;
629*4882a593Smuzhiyun total_objs += obj_allocated;
630*4882a593Smuzhiyun total_used_objs += obj_used;
631*4882a593Smuzhiyun total_pages += pages_used;
632*4882a593Smuzhiyun total_freeable += freeable;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun seq_puts(s, "\n");
636*4882a593Smuzhiyun seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
637*4882a593Smuzhiyun "Total", "", total_class_almost_full,
638*4882a593Smuzhiyun total_class_almost_empty, total_objs,
639*4882a593Smuzhiyun total_used_objs, total_pages, "", total_freeable);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
644*4882a593Smuzhiyun
zs_pool_stat_create(struct zs_pool * pool,const char * name)645*4882a593Smuzhiyun static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun if (!zs_stat_root) {
648*4882a593Smuzhiyun pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
649*4882a593Smuzhiyun return;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
655*4882a593Smuzhiyun &zs_stats_size_fops);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
zs_pool_stat_destroy(struct zs_pool * pool)658*4882a593Smuzhiyun static void zs_pool_stat_destroy(struct zs_pool *pool)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun debugfs_remove_recursive(pool->stat_dentry);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun #else /* CONFIG_ZSMALLOC_STAT */
zs_stat_init(void)664*4882a593Smuzhiyun static void __init zs_stat_init(void)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
zs_stat_exit(void)668*4882a593Smuzhiyun static void __exit zs_stat_exit(void)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
zs_pool_stat_create(struct zs_pool * pool,const char * name)672*4882a593Smuzhiyun static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
zs_pool_stat_destroy(struct zs_pool * pool)676*4882a593Smuzhiyun static inline void zs_pool_stat_destroy(struct zs_pool *pool)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun #endif
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /*
683*4882a593Smuzhiyun * For each size class, zspages are divided into different groups
684*4882a593Smuzhiyun * depending on how "full" they are. This was done so that we could
685*4882a593Smuzhiyun * easily find empty or nearly empty zspages when we try to shrink
686*4882a593Smuzhiyun * the pool (not yet implemented). This function returns fullness
687*4882a593Smuzhiyun * status of the given page.
688*4882a593Smuzhiyun */
get_fullness_group(struct size_class * class,struct zspage * zspage)689*4882a593Smuzhiyun static enum fullness_group get_fullness_group(struct size_class *class,
690*4882a593Smuzhiyun struct zspage *zspage)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun int inuse, objs_per_zspage;
693*4882a593Smuzhiyun enum fullness_group fg;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun inuse = get_zspage_inuse(zspage);
696*4882a593Smuzhiyun objs_per_zspage = class->objs_per_zspage;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun if (inuse == 0)
699*4882a593Smuzhiyun fg = ZS_EMPTY;
700*4882a593Smuzhiyun else if (inuse == objs_per_zspage)
701*4882a593Smuzhiyun fg = ZS_FULL;
702*4882a593Smuzhiyun else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
703*4882a593Smuzhiyun fg = ZS_ALMOST_EMPTY;
704*4882a593Smuzhiyun else
705*4882a593Smuzhiyun fg = ZS_ALMOST_FULL;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun return fg;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /*
711*4882a593Smuzhiyun * Each size class maintains various freelists and zspages are assigned
712*4882a593Smuzhiyun * to one of these freelists based on the number of live objects they
713*4882a593Smuzhiyun * have. This functions inserts the given zspage into the freelist
714*4882a593Smuzhiyun * identified by <class, fullness_group>.
715*4882a593Smuzhiyun */
insert_zspage(struct size_class * class,struct zspage * zspage,enum fullness_group fullness)716*4882a593Smuzhiyun static void insert_zspage(struct size_class *class,
717*4882a593Smuzhiyun struct zspage *zspage,
718*4882a593Smuzhiyun enum fullness_group fullness)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun struct zspage *head;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun zs_stat_inc(class, fullness, 1);
723*4882a593Smuzhiyun head = list_first_entry_or_null(&class->fullness_list[fullness],
724*4882a593Smuzhiyun struct zspage, list);
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun * We want to see more ZS_FULL pages and less almost empty/full.
727*4882a593Smuzhiyun * Put pages with higher ->inuse first.
728*4882a593Smuzhiyun */
729*4882a593Smuzhiyun if (head) {
730*4882a593Smuzhiyun if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
731*4882a593Smuzhiyun list_add(&zspage->list, &head->list);
732*4882a593Smuzhiyun return;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun list_add(&zspage->list, &class->fullness_list[fullness]);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /*
739*4882a593Smuzhiyun * This function removes the given zspage from the freelist identified
740*4882a593Smuzhiyun * by <class, fullness_group>.
741*4882a593Smuzhiyun */
remove_zspage(struct size_class * class,struct zspage * zspage,enum fullness_group fullness)742*4882a593Smuzhiyun static void remove_zspage(struct size_class *class,
743*4882a593Smuzhiyun struct zspage *zspage,
744*4882a593Smuzhiyun enum fullness_group fullness)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
747*4882a593Smuzhiyun VM_BUG_ON(is_zspage_isolated(zspage));
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun list_del_init(&zspage->list);
750*4882a593Smuzhiyun zs_stat_dec(class, fullness, 1);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /*
754*4882a593Smuzhiyun * Each size class maintains zspages in different fullness groups depending
755*4882a593Smuzhiyun * on the number of live objects they contain. When allocating or freeing
756*4882a593Smuzhiyun * objects, the fullness status of the page can change, say, from ALMOST_FULL
757*4882a593Smuzhiyun * to ALMOST_EMPTY when freeing an object. This function checks if such
758*4882a593Smuzhiyun * a status change has occurred for the given page and accordingly moves the
759*4882a593Smuzhiyun * page from the freelist of the old fullness group to that of the new
760*4882a593Smuzhiyun * fullness group.
761*4882a593Smuzhiyun */
fix_fullness_group(struct size_class * class,struct zspage * zspage)762*4882a593Smuzhiyun static enum fullness_group fix_fullness_group(struct size_class *class,
763*4882a593Smuzhiyun struct zspage *zspage)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun int class_idx;
766*4882a593Smuzhiyun enum fullness_group currfg, newfg;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &currfg);
769*4882a593Smuzhiyun newfg = get_fullness_group(class, zspage);
770*4882a593Smuzhiyun if (newfg == currfg)
771*4882a593Smuzhiyun goto out;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (!is_zspage_isolated(zspage)) {
774*4882a593Smuzhiyun remove_zspage(class, zspage, currfg);
775*4882a593Smuzhiyun insert_zspage(class, zspage, newfg);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun set_zspage_mapping(zspage, class_idx, newfg);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun out:
781*4882a593Smuzhiyun return newfg;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /*
785*4882a593Smuzhiyun * We have to decide on how many pages to link together
786*4882a593Smuzhiyun * to form a zspage for each size class. This is important
787*4882a593Smuzhiyun * to reduce wastage due to unusable space left at end of
788*4882a593Smuzhiyun * each zspage which is given as:
789*4882a593Smuzhiyun * wastage = Zp % class_size
790*4882a593Smuzhiyun * usage = Zp - wastage
791*4882a593Smuzhiyun * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
792*4882a593Smuzhiyun *
793*4882a593Smuzhiyun * For example, for size class of 3/8 * PAGE_SIZE, we should
794*4882a593Smuzhiyun * link together 3 PAGE_SIZE sized pages to form a zspage
795*4882a593Smuzhiyun * since then we can perfectly fit in 8 such objects.
796*4882a593Smuzhiyun */
get_pages_per_zspage(int class_size)797*4882a593Smuzhiyun static int get_pages_per_zspage(int class_size)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun int i, max_usedpc = 0;
800*4882a593Smuzhiyun /* zspage order which gives maximum used size per KB */
801*4882a593Smuzhiyun int max_usedpc_order = 1;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
804*4882a593Smuzhiyun int zspage_size;
805*4882a593Smuzhiyun int waste, usedpc;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun zspage_size = i * PAGE_SIZE;
808*4882a593Smuzhiyun waste = zspage_size % class_size;
809*4882a593Smuzhiyun usedpc = (zspage_size - waste) * 100 / zspage_size;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if (usedpc > max_usedpc) {
812*4882a593Smuzhiyun max_usedpc = usedpc;
813*4882a593Smuzhiyun max_usedpc_order = i;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun return max_usedpc_order;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
get_zspage(struct page * page)820*4882a593Smuzhiyun static struct zspage *get_zspage(struct page *page)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct zspage *zspage = (struct zspage *)page->private;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun BUG_ON(zspage->magic != ZSPAGE_MAGIC);
825*4882a593Smuzhiyun return zspage;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
get_next_page(struct page * page)828*4882a593Smuzhiyun static struct page *get_next_page(struct page *page)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun if (unlikely(PageHugeObject(page)))
831*4882a593Smuzhiyun return NULL;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun return page->freelist;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /**
837*4882a593Smuzhiyun * obj_to_location - get (<page>, <obj_idx>) from encoded object value
838*4882a593Smuzhiyun * @obj: the encoded object value
839*4882a593Smuzhiyun * @page: page object resides in zspage
840*4882a593Smuzhiyun * @obj_idx: object index
841*4882a593Smuzhiyun */
obj_to_location(unsigned long obj,struct page ** page,unsigned int * obj_idx)842*4882a593Smuzhiyun static void obj_to_location(unsigned long obj, struct page **page,
843*4882a593Smuzhiyun unsigned int *obj_idx)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun obj >>= OBJ_TAG_BITS;
846*4882a593Smuzhiyun *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
847*4882a593Smuzhiyun *obj_idx = (obj & OBJ_INDEX_MASK);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /**
851*4882a593Smuzhiyun * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
852*4882a593Smuzhiyun * @page: page object resides in zspage
853*4882a593Smuzhiyun * @obj_idx: object index
854*4882a593Smuzhiyun */
location_to_obj(struct page * page,unsigned int obj_idx)855*4882a593Smuzhiyun static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun unsigned long obj;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun obj = page_to_pfn(page) << OBJ_INDEX_BITS;
860*4882a593Smuzhiyun obj |= obj_idx & OBJ_INDEX_MASK;
861*4882a593Smuzhiyun obj <<= OBJ_TAG_BITS;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun return obj;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
handle_to_obj(unsigned long handle)866*4882a593Smuzhiyun static unsigned long handle_to_obj(unsigned long handle)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun return *(unsigned long *)handle;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
obj_to_head(struct page * page,void * obj)871*4882a593Smuzhiyun static unsigned long obj_to_head(struct page *page, void *obj)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun if (unlikely(PageHugeObject(page))) {
874*4882a593Smuzhiyun VM_BUG_ON_PAGE(!is_first_page(page), page);
875*4882a593Smuzhiyun return page->index;
876*4882a593Smuzhiyun } else
877*4882a593Smuzhiyun return *(unsigned long *)obj;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
testpin_tag(unsigned long handle)880*4882a593Smuzhiyun static inline int testpin_tag(unsigned long handle)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
trypin_tag(unsigned long handle)885*4882a593Smuzhiyun static inline int trypin_tag(unsigned long handle)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
pin_tag(unsigned long handle)890*4882a593Smuzhiyun static void pin_tag(unsigned long handle) __acquires(bitlock)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
unpin_tag(unsigned long handle)895*4882a593Smuzhiyun static void unpin_tag(unsigned long handle) __releases(bitlock)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
reset_page(struct page * page)900*4882a593Smuzhiyun static void reset_page(struct page *page)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun __ClearPageMovable(page);
903*4882a593Smuzhiyun ClearPagePrivate(page);
904*4882a593Smuzhiyun set_page_private(page, 0);
905*4882a593Smuzhiyun page_mapcount_reset(page);
906*4882a593Smuzhiyun ClearPageHugeObject(page);
907*4882a593Smuzhiyun page->freelist = NULL;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
trylock_zspage(struct zspage * zspage)910*4882a593Smuzhiyun static int trylock_zspage(struct zspage *zspage)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct page *cursor, *fail;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun for (cursor = get_first_page(zspage); cursor != NULL; cursor =
915*4882a593Smuzhiyun get_next_page(cursor)) {
916*4882a593Smuzhiyun if (!trylock_page(cursor)) {
917*4882a593Smuzhiyun fail = cursor;
918*4882a593Smuzhiyun goto unlock;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun return 1;
923*4882a593Smuzhiyun unlock:
924*4882a593Smuzhiyun for (cursor = get_first_page(zspage); cursor != fail; cursor =
925*4882a593Smuzhiyun get_next_page(cursor))
926*4882a593Smuzhiyun unlock_page(cursor);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun return 0;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
__free_zspage(struct zs_pool * pool,struct size_class * class,struct zspage * zspage)931*4882a593Smuzhiyun static void __free_zspage(struct zs_pool *pool, struct size_class *class,
932*4882a593Smuzhiyun struct zspage *zspage)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct page *page, *next;
935*4882a593Smuzhiyun enum fullness_group fg;
936*4882a593Smuzhiyun unsigned int class_idx;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fg);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun assert_spin_locked(&class->lock);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun VM_BUG_ON(get_zspage_inuse(zspage));
943*4882a593Smuzhiyun VM_BUG_ON(fg != ZS_EMPTY);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun next = page = get_first_page(zspage);
946*4882a593Smuzhiyun do {
947*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageLocked(page), page);
948*4882a593Smuzhiyun next = get_next_page(page);
949*4882a593Smuzhiyun reset_page(page);
950*4882a593Smuzhiyun unlock_page(page);
951*4882a593Smuzhiyun dec_zone_page_state(page, NR_ZSPAGES);
952*4882a593Smuzhiyun put_page(page);
953*4882a593Smuzhiyun page = next;
954*4882a593Smuzhiyun } while (page != NULL);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun cache_free_zspage(pool, zspage);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
959*4882a593Smuzhiyun atomic_long_sub(class->pages_per_zspage,
960*4882a593Smuzhiyun &pool->pages_allocated);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
free_zspage(struct zs_pool * pool,struct size_class * class,struct zspage * zspage)963*4882a593Smuzhiyun static void free_zspage(struct zs_pool *pool, struct size_class *class,
964*4882a593Smuzhiyun struct zspage *zspage)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun VM_BUG_ON(get_zspage_inuse(zspage));
967*4882a593Smuzhiyun VM_BUG_ON(list_empty(&zspage->list));
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if (!trylock_zspage(zspage)) {
970*4882a593Smuzhiyun kick_deferred_free(pool);
971*4882a593Smuzhiyun return;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun remove_zspage(class, zspage, ZS_EMPTY);
975*4882a593Smuzhiyun __free_zspage(pool, class, zspage);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun /* Initialize a newly allocated zspage */
init_zspage(struct size_class * class,struct zspage * zspage)979*4882a593Smuzhiyun static void init_zspage(struct size_class *class, struct zspage *zspage)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun unsigned int freeobj = 1;
982*4882a593Smuzhiyun unsigned long off = 0;
983*4882a593Smuzhiyun struct page *page = get_first_page(zspage);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun while (page) {
986*4882a593Smuzhiyun struct page *next_page;
987*4882a593Smuzhiyun struct link_free *link;
988*4882a593Smuzhiyun void *vaddr;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun set_first_obj_offset(page, off);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun vaddr = kmap_atomic(page);
993*4882a593Smuzhiyun link = (struct link_free *)vaddr + off / sizeof(*link);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun while ((off += class->size) < PAGE_SIZE) {
996*4882a593Smuzhiyun link->next = freeobj++ << OBJ_TAG_BITS;
997*4882a593Smuzhiyun link += class->size / sizeof(*link);
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /*
1001*4882a593Smuzhiyun * We now come to the last (full or partial) object on this
1002*4882a593Smuzhiyun * page, which must point to the first object on the next
1003*4882a593Smuzhiyun * page (if present)
1004*4882a593Smuzhiyun */
1005*4882a593Smuzhiyun next_page = get_next_page(page);
1006*4882a593Smuzhiyun if (next_page) {
1007*4882a593Smuzhiyun link->next = freeobj++ << OBJ_TAG_BITS;
1008*4882a593Smuzhiyun } else {
1009*4882a593Smuzhiyun /*
1010*4882a593Smuzhiyun * Reset OBJ_TAG_BITS bit to last link to tell
1011*4882a593Smuzhiyun * whether it's allocated object or not.
1012*4882a593Smuzhiyun */
1013*4882a593Smuzhiyun link->next = -1UL << OBJ_TAG_BITS;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun kunmap_atomic(vaddr);
1016*4882a593Smuzhiyun page = next_page;
1017*4882a593Smuzhiyun off %= PAGE_SIZE;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun set_freeobj(zspage, 0);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
create_page_chain(struct size_class * class,struct zspage * zspage,struct page * pages[])1023*4882a593Smuzhiyun static void create_page_chain(struct size_class *class, struct zspage *zspage,
1024*4882a593Smuzhiyun struct page *pages[])
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun int i;
1027*4882a593Smuzhiyun struct page *page;
1028*4882a593Smuzhiyun struct page *prev_page = NULL;
1029*4882a593Smuzhiyun int nr_pages = class->pages_per_zspage;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /*
1032*4882a593Smuzhiyun * Allocate individual pages and link them together as:
1033*4882a593Smuzhiyun * 1. all pages are linked together using page->freelist
1034*4882a593Smuzhiyun * 2. each sub-page point to zspage using page->private
1035*4882a593Smuzhiyun *
1036*4882a593Smuzhiyun * we set PG_private to identify the first page (i.e. no other sub-page
1037*4882a593Smuzhiyun * has this flag set).
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
1040*4882a593Smuzhiyun page = pages[i];
1041*4882a593Smuzhiyun set_page_private(page, (unsigned long)zspage);
1042*4882a593Smuzhiyun page->freelist = NULL;
1043*4882a593Smuzhiyun if (i == 0) {
1044*4882a593Smuzhiyun zspage->first_page = page;
1045*4882a593Smuzhiyun SetPagePrivate(page);
1046*4882a593Smuzhiyun if (unlikely(class->objs_per_zspage == 1 &&
1047*4882a593Smuzhiyun class->pages_per_zspage == 1))
1048*4882a593Smuzhiyun SetPageHugeObject(page);
1049*4882a593Smuzhiyun } else {
1050*4882a593Smuzhiyun prev_page->freelist = page;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun prev_page = page;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /*
1057*4882a593Smuzhiyun * Allocate a zspage for the given size class
1058*4882a593Smuzhiyun */
alloc_zspage(struct zs_pool * pool,struct size_class * class,gfp_t gfp)1059*4882a593Smuzhiyun static struct zspage *alloc_zspage(struct zs_pool *pool,
1060*4882a593Smuzhiyun struct size_class *class,
1061*4882a593Smuzhiyun gfp_t gfp)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun int i;
1064*4882a593Smuzhiyun struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
1065*4882a593Smuzhiyun struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (!zspage)
1068*4882a593Smuzhiyun return NULL;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun memset(zspage, 0, sizeof(struct zspage));
1071*4882a593Smuzhiyun zspage->magic = ZSPAGE_MAGIC;
1072*4882a593Smuzhiyun migrate_lock_init(zspage);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun for (i = 0; i < class->pages_per_zspage; i++) {
1075*4882a593Smuzhiyun struct page *page;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun page = alloc_page(gfp);
1078*4882a593Smuzhiyun if (!page) {
1079*4882a593Smuzhiyun while (--i >= 0) {
1080*4882a593Smuzhiyun dec_zone_page_state(pages[i], NR_ZSPAGES);
1081*4882a593Smuzhiyun __free_page(pages[i]);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun cache_free_zspage(pool, zspage);
1084*4882a593Smuzhiyun return NULL;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun inc_zone_page_state(page, NR_ZSPAGES);
1088*4882a593Smuzhiyun pages[i] = page;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun create_page_chain(class, zspage, pages);
1092*4882a593Smuzhiyun init_zspage(class, zspage);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun return zspage;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
find_get_zspage(struct size_class * class)1097*4882a593Smuzhiyun static struct zspage *find_get_zspage(struct size_class *class)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun int i;
1100*4882a593Smuzhiyun struct zspage *zspage;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
1103*4882a593Smuzhiyun zspage = list_first_entry_or_null(&class->fullness_list[i],
1104*4882a593Smuzhiyun struct zspage, list);
1105*4882a593Smuzhiyun if (zspage)
1106*4882a593Smuzhiyun break;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun return zspage;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
__zs_cpu_up(struct mapping_area * area)1112*4882a593Smuzhiyun static inline int __zs_cpu_up(struct mapping_area *area)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun /*
1115*4882a593Smuzhiyun * Make sure we don't leak memory if a cpu UP notification
1116*4882a593Smuzhiyun * and zs_init() race and both call zs_cpu_up() on the same cpu
1117*4882a593Smuzhiyun */
1118*4882a593Smuzhiyun if (area->vm_buf)
1119*4882a593Smuzhiyun return 0;
1120*4882a593Smuzhiyun area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1121*4882a593Smuzhiyun if (!area->vm_buf)
1122*4882a593Smuzhiyun return -ENOMEM;
1123*4882a593Smuzhiyun return 0;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
__zs_cpu_down(struct mapping_area * area)1126*4882a593Smuzhiyun static inline void __zs_cpu_down(struct mapping_area *area)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun kfree(area->vm_buf);
1129*4882a593Smuzhiyun area->vm_buf = NULL;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
__zs_map_object(struct mapping_area * area,struct page * pages[2],int off,int size)1132*4882a593Smuzhiyun static void *__zs_map_object(struct mapping_area *area,
1133*4882a593Smuzhiyun struct page *pages[2], int off, int size)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun int sizes[2];
1136*4882a593Smuzhiyun void *addr;
1137*4882a593Smuzhiyun char *buf = area->vm_buf;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /* disable page faults to match kmap_atomic() return conditions */
1140*4882a593Smuzhiyun pagefault_disable();
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /* no read fastpath */
1143*4882a593Smuzhiyun if (area->vm_mm == ZS_MM_WO)
1144*4882a593Smuzhiyun goto out;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun sizes[0] = PAGE_SIZE - off;
1147*4882a593Smuzhiyun sizes[1] = size - sizes[0];
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* copy object to per-cpu buffer */
1150*4882a593Smuzhiyun addr = kmap_atomic(pages[0]);
1151*4882a593Smuzhiyun memcpy(buf, addr + off, sizes[0]);
1152*4882a593Smuzhiyun kunmap_atomic(addr);
1153*4882a593Smuzhiyun addr = kmap_atomic(pages[1]);
1154*4882a593Smuzhiyun memcpy(buf + sizes[0], addr, sizes[1]);
1155*4882a593Smuzhiyun kunmap_atomic(addr);
1156*4882a593Smuzhiyun out:
1157*4882a593Smuzhiyun return area->vm_buf;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
__zs_unmap_object(struct mapping_area * area,struct page * pages[2],int off,int size)1160*4882a593Smuzhiyun static void __zs_unmap_object(struct mapping_area *area,
1161*4882a593Smuzhiyun struct page *pages[2], int off, int size)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun int sizes[2];
1164*4882a593Smuzhiyun void *addr;
1165*4882a593Smuzhiyun char *buf;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /* no write fastpath */
1168*4882a593Smuzhiyun if (area->vm_mm == ZS_MM_RO)
1169*4882a593Smuzhiyun goto out;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun buf = area->vm_buf;
1172*4882a593Smuzhiyun buf = buf + ZS_HANDLE_SIZE;
1173*4882a593Smuzhiyun size -= ZS_HANDLE_SIZE;
1174*4882a593Smuzhiyun off += ZS_HANDLE_SIZE;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun sizes[0] = PAGE_SIZE - off;
1177*4882a593Smuzhiyun sizes[1] = size - sizes[0];
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun /* copy per-cpu buffer to object */
1180*4882a593Smuzhiyun addr = kmap_atomic(pages[0]);
1181*4882a593Smuzhiyun memcpy(addr + off, buf, sizes[0]);
1182*4882a593Smuzhiyun kunmap_atomic(addr);
1183*4882a593Smuzhiyun addr = kmap_atomic(pages[1]);
1184*4882a593Smuzhiyun memcpy(addr, buf + sizes[0], sizes[1]);
1185*4882a593Smuzhiyun kunmap_atomic(addr);
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun out:
1188*4882a593Smuzhiyun /* enable page faults to match kunmap_atomic() return conditions */
1189*4882a593Smuzhiyun pagefault_enable();
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
zs_cpu_prepare(unsigned int cpu)1192*4882a593Smuzhiyun static int zs_cpu_prepare(unsigned int cpu)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun struct mapping_area *area;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun area = &per_cpu(zs_map_area, cpu);
1197*4882a593Smuzhiyun return __zs_cpu_up(area);
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
zs_cpu_dead(unsigned int cpu)1200*4882a593Smuzhiyun static int zs_cpu_dead(unsigned int cpu)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun struct mapping_area *area;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun area = &per_cpu(zs_map_area, cpu);
1205*4882a593Smuzhiyun __zs_cpu_down(area);
1206*4882a593Smuzhiyun return 0;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
can_merge(struct size_class * prev,int pages_per_zspage,int objs_per_zspage)1209*4882a593Smuzhiyun static bool can_merge(struct size_class *prev, int pages_per_zspage,
1210*4882a593Smuzhiyun int objs_per_zspage)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun if (prev->pages_per_zspage == pages_per_zspage &&
1213*4882a593Smuzhiyun prev->objs_per_zspage == objs_per_zspage)
1214*4882a593Smuzhiyun return true;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun return false;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
zspage_full(struct size_class * class,struct zspage * zspage)1219*4882a593Smuzhiyun static bool zspage_full(struct size_class *class, struct zspage *zspage)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun return get_zspage_inuse(zspage) == class->objs_per_zspage;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
zs_get_total_pages(struct zs_pool * pool)1224*4882a593Smuzhiyun unsigned long zs_get_total_pages(struct zs_pool *pool)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun return atomic_long_read(&pool->pages_allocated);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_get_total_pages);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /**
1231*4882a593Smuzhiyun * zs_map_object - get address of allocated object from handle.
1232*4882a593Smuzhiyun * @pool: pool from which the object was allocated
1233*4882a593Smuzhiyun * @handle: handle returned from zs_malloc
1234*4882a593Smuzhiyun * @mm: maping mode to use
1235*4882a593Smuzhiyun *
1236*4882a593Smuzhiyun * Before using an object allocated from zs_malloc, it must be mapped using
1237*4882a593Smuzhiyun * this function. When done with the object, it must be unmapped using
1238*4882a593Smuzhiyun * zs_unmap_object.
1239*4882a593Smuzhiyun *
1240*4882a593Smuzhiyun * Only one object can be mapped per cpu at a time. There is no protection
1241*4882a593Smuzhiyun * against nested mappings.
1242*4882a593Smuzhiyun *
1243*4882a593Smuzhiyun * This function returns with preemption and page faults disabled.
1244*4882a593Smuzhiyun */
zs_map_object(struct zs_pool * pool,unsigned long handle,enum zs_mapmode mm)1245*4882a593Smuzhiyun void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1246*4882a593Smuzhiyun enum zs_mapmode mm)
1247*4882a593Smuzhiyun {
1248*4882a593Smuzhiyun struct zspage *zspage;
1249*4882a593Smuzhiyun struct page *page;
1250*4882a593Smuzhiyun unsigned long obj, off;
1251*4882a593Smuzhiyun unsigned int obj_idx;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun unsigned int class_idx;
1254*4882a593Smuzhiyun enum fullness_group fg;
1255*4882a593Smuzhiyun struct size_class *class;
1256*4882a593Smuzhiyun struct mapping_area *area;
1257*4882a593Smuzhiyun struct page *pages[2];
1258*4882a593Smuzhiyun void *ret;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun /*
1261*4882a593Smuzhiyun * Because we use per-cpu mapping areas shared among the
1262*4882a593Smuzhiyun * pools/users, we can't allow mapping in interrupt context
1263*4882a593Smuzhiyun * because it can corrupt another users mappings.
1264*4882a593Smuzhiyun */
1265*4882a593Smuzhiyun BUG_ON(in_interrupt());
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun /* From now on, migration cannot move the object */
1268*4882a593Smuzhiyun pin_tag(handle);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun obj = handle_to_obj(handle);
1271*4882a593Smuzhiyun obj_to_location(obj, &page, &obj_idx);
1272*4882a593Smuzhiyun zspage = get_zspage(page);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun /* migration cannot move any subpage in this zspage */
1275*4882a593Smuzhiyun migrate_read_lock(zspage);
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fg);
1278*4882a593Smuzhiyun class = pool->size_class[class_idx];
1279*4882a593Smuzhiyun off = (class->size * obj_idx) & ~PAGE_MASK;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun area = &get_cpu_var(zs_map_area);
1282*4882a593Smuzhiyun area->vm_mm = mm;
1283*4882a593Smuzhiyun if (off + class->size <= PAGE_SIZE) {
1284*4882a593Smuzhiyun /* this object is contained entirely within a page */
1285*4882a593Smuzhiyun area->vm_addr = kmap_atomic(page);
1286*4882a593Smuzhiyun ret = area->vm_addr + off;
1287*4882a593Smuzhiyun goto out;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /* this object spans two pages */
1291*4882a593Smuzhiyun pages[0] = page;
1292*4882a593Smuzhiyun pages[1] = get_next_page(page);
1293*4882a593Smuzhiyun BUG_ON(!pages[1]);
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun ret = __zs_map_object(area, pages, off, class->size);
1296*4882a593Smuzhiyun out:
1297*4882a593Smuzhiyun if (likely(!PageHugeObject(page)))
1298*4882a593Smuzhiyun ret += ZS_HANDLE_SIZE;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun return ret;
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_map_object);
1303*4882a593Smuzhiyun
zs_unmap_object(struct zs_pool * pool,unsigned long handle)1304*4882a593Smuzhiyun void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun struct zspage *zspage;
1307*4882a593Smuzhiyun struct page *page;
1308*4882a593Smuzhiyun unsigned long obj, off;
1309*4882a593Smuzhiyun unsigned int obj_idx;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun unsigned int class_idx;
1312*4882a593Smuzhiyun enum fullness_group fg;
1313*4882a593Smuzhiyun struct size_class *class;
1314*4882a593Smuzhiyun struct mapping_area *area;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun obj = handle_to_obj(handle);
1317*4882a593Smuzhiyun obj_to_location(obj, &page, &obj_idx);
1318*4882a593Smuzhiyun zspage = get_zspage(page);
1319*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fg);
1320*4882a593Smuzhiyun class = pool->size_class[class_idx];
1321*4882a593Smuzhiyun off = (class->size * obj_idx) & ~PAGE_MASK;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun area = this_cpu_ptr(&zs_map_area);
1324*4882a593Smuzhiyun if (off + class->size <= PAGE_SIZE)
1325*4882a593Smuzhiyun kunmap_atomic(area->vm_addr);
1326*4882a593Smuzhiyun else {
1327*4882a593Smuzhiyun struct page *pages[2];
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun pages[0] = page;
1330*4882a593Smuzhiyun pages[1] = get_next_page(page);
1331*4882a593Smuzhiyun BUG_ON(!pages[1]);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun __zs_unmap_object(area, pages, off, class->size);
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun put_cpu_var(zs_map_area);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun migrate_read_unlock(zspage);
1338*4882a593Smuzhiyun unpin_tag(handle);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_unmap_object);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /**
1343*4882a593Smuzhiyun * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1344*4882a593Smuzhiyun * zsmalloc &size_class.
1345*4882a593Smuzhiyun * @pool: zsmalloc pool to use
1346*4882a593Smuzhiyun *
1347*4882a593Smuzhiyun * The function returns the size of the first huge class - any object of equal
1348*4882a593Smuzhiyun * or bigger size will be stored in zspage consisting of a single physical
1349*4882a593Smuzhiyun * page.
1350*4882a593Smuzhiyun *
1351*4882a593Smuzhiyun * Context: Any context.
1352*4882a593Smuzhiyun *
1353*4882a593Smuzhiyun * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1354*4882a593Smuzhiyun */
zs_huge_class_size(struct zs_pool * pool)1355*4882a593Smuzhiyun size_t zs_huge_class_size(struct zs_pool *pool)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun return huge_class_size;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_huge_class_size);
1360*4882a593Smuzhiyun
obj_malloc(struct size_class * class,struct zspage * zspage,unsigned long handle)1361*4882a593Smuzhiyun static unsigned long obj_malloc(struct size_class *class,
1362*4882a593Smuzhiyun struct zspage *zspage, unsigned long handle)
1363*4882a593Smuzhiyun {
1364*4882a593Smuzhiyun int i, nr_page, offset;
1365*4882a593Smuzhiyun unsigned long obj;
1366*4882a593Smuzhiyun struct link_free *link;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun struct page *m_page;
1369*4882a593Smuzhiyun unsigned long m_offset;
1370*4882a593Smuzhiyun void *vaddr;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun handle |= OBJ_ALLOCATED_TAG;
1373*4882a593Smuzhiyun obj = get_freeobj(zspage);
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun offset = obj * class->size;
1376*4882a593Smuzhiyun nr_page = offset >> PAGE_SHIFT;
1377*4882a593Smuzhiyun m_offset = offset & ~PAGE_MASK;
1378*4882a593Smuzhiyun m_page = get_first_page(zspage);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun for (i = 0; i < nr_page; i++)
1381*4882a593Smuzhiyun m_page = get_next_page(m_page);
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun vaddr = kmap_atomic(m_page);
1384*4882a593Smuzhiyun link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1385*4882a593Smuzhiyun set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1386*4882a593Smuzhiyun if (likely(!PageHugeObject(m_page)))
1387*4882a593Smuzhiyun /* record handle in the header of allocated chunk */
1388*4882a593Smuzhiyun link->handle = handle;
1389*4882a593Smuzhiyun else
1390*4882a593Smuzhiyun /* record handle to page->index */
1391*4882a593Smuzhiyun zspage->first_page->index = handle;
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun kunmap_atomic(vaddr);
1394*4882a593Smuzhiyun mod_zspage_inuse(zspage, 1);
1395*4882a593Smuzhiyun zs_stat_inc(class, OBJ_USED, 1);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun obj = location_to_obj(m_page, obj);
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun return obj;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun /**
1404*4882a593Smuzhiyun * zs_malloc - Allocate block of given size from pool.
1405*4882a593Smuzhiyun * @pool: pool to allocate from
1406*4882a593Smuzhiyun * @size: size of block to allocate
1407*4882a593Smuzhiyun * @gfp: gfp flags when allocating object
1408*4882a593Smuzhiyun *
1409*4882a593Smuzhiyun * On success, handle to the allocated object is returned,
1410*4882a593Smuzhiyun * otherwise 0.
1411*4882a593Smuzhiyun * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1412*4882a593Smuzhiyun */
zs_malloc(struct zs_pool * pool,size_t size,gfp_t gfp)1413*4882a593Smuzhiyun unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun unsigned long handle, obj;
1416*4882a593Smuzhiyun struct size_class *class;
1417*4882a593Smuzhiyun enum fullness_group newfg;
1418*4882a593Smuzhiyun struct zspage *zspage;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1421*4882a593Smuzhiyun return 0;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun handle = cache_alloc_handle(pool, gfp);
1424*4882a593Smuzhiyun if (!handle)
1425*4882a593Smuzhiyun return 0;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun /* extra space in chunk to keep the handle */
1428*4882a593Smuzhiyun size += ZS_HANDLE_SIZE;
1429*4882a593Smuzhiyun class = pool->size_class[get_size_class_index(size)];
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun spin_lock(&class->lock);
1432*4882a593Smuzhiyun zspage = find_get_zspage(class);
1433*4882a593Smuzhiyun if (likely(zspage)) {
1434*4882a593Smuzhiyun obj = obj_malloc(class, zspage, handle);
1435*4882a593Smuzhiyun /* Now move the zspage to another fullness group, if required */
1436*4882a593Smuzhiyun fix_fullness_group(class, zspage);
1437*4882a593Smuzhiyun record_obj(handle, obj);
1438*4882a593Smuzhiyun spin_unlock(&class->lock);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun return handle;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun spin_unlock(&class->lock);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun zspage = alloc_zspage(pool, class, gfp);
1446*4882a593Smuzhiyun if (!zspage) {
1447*4882a593Smuzhiyun cache_free_handle(pool, handle);
1448*4882a593Smuzhiyun return 0;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun spin_lock(&class->lock);
1452*4882a593Smuzhiyun obj = obj_malloc(class, zspage, handle);
1453*4882a593Smuzhiyun newfg = get_fullness_group(class, zspage);
1454*4882a593Smuzhiyun insert_zspage(class, zspage, newfg);
1455*4882a593Smuzhiyun set_zspage_mapping(zspage, class->index, newfg);
1456*4882a593Smuzhiyun record_obj(handle, obj);
1457*4882a593Smuzhiyun atomic_long_add(class->pages_per_zspage,
1458*4882a593Smuzhiyun &pool->pages_allocated);
1459*4882a593Smuzhiyun zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun /* We completely set up zspage so mark them as movable */
1462*4882a593Smuzhiyun SetZsPageMovable(pool, zspage);
1463*4882a593Smuzhiyun spin_unlock(&class->lock);
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun return handle;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_malloc);
1468*4882a593Smuzhiyun
obj_free(struct size_class * class,unsigned long obj)1469*4882a593Smuzhiyun static void obj_free(struct size_class *class, unsigned long obj)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun struct link_free *link;
1472*4882a593Smuzhiyun struct zspage *zspage;
1473*4882a593Smuzhiyun struct page *f_page;
1474*4882a593Smuzhiyun unsigned long f_offset;
1475*4882a593Smuzhiyun unsigned int f_objidx;
1476*4882a593Smuzhiyun void *vaddr;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun obj &= ~OBJ_ALLOCATED_TAG;
1479*4882a593Smuzhiyun obj_to_location(obj, &f_page, &f_objidx);
1480*4882a593Smuzhiyun f_offset = (class->size * f_objidx) & ~PAGE_MASK;
1481*4882a593Smuzhiyun zspage = get_zspage(f_page);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun vaddr = kmap_atomic(f_page);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun /* Insert this object in containing zspage's freelist */
1486*4882a593Smuzhiyun link = (struct link_free *)(vaddr + f_offset);
1487*4882a593Smuzhiyun link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1488*4882a593Smuzhiyun kunmap_atomic(vaddr);
1489*4882a593Smuzhiyun set_freeobj(zspage, f_objidx);
1490*4882a593Smuzhiyun mod_zspage_inuse(zspage, -1);
1491*4882a593Smuzhiyun zs_stat_dec(class, OBJ_USED, 1);
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun
zs_free(struct zs_pool * pool,unsigned long handle)1494*4882a593Smuzhiyun void zs_free(struct zs_pool *pool, unsigned long handle)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun struct zspage *zspage;
1497*4882a593Smuzhiyun struct page *f_page;
1498*4882a593Smuzhiyun unsigned long obj;
1499*4882a593Smuzhiyun unsigned int f_objidx;
1500*4882a593Smuzhiyun int class_idx;
1501*4882a593Smuzhiyun struct size_class *class;
1502*4882a593Smuzhiyun enum fullness_group fullness;
1503*4882a593Smuzhiyun bool isolated;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun if (unlikely(!handle))
1506*4882a593Smuzhiyun return;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun pin_tag(handle);
1509*4882a593Smuzhiyun obj = handle_to_obj(handle);
1510*4882a593Smuzhiyun obj_to_location(obj, &f_page, &f_objidx);
1511*4882a593Smuzhiyun zspage = get_zspage(f_page);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun migrate_read_lock(zspage);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fullness);
1516*4882a593Smuzhiyun class = pool->size_class[class_idx];
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun spin_lock(&class->lock);
1519*4882a593Smuzhiyun obj_free(class, obj);
1520*4882a593Smuzhiyun fullness = fix_fullness_group(class, zspage);
1521*4882a593Smuzhiyun if (fullness != ZS_EMPTY) {
1522*4882a593Smuzhiyun migrate_read_unlock(zspage);
1523*4882a593Smuzhiyun goto out;
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun isolated = is_zspage_isolated(zspage);
1527*4882a593Smuzhiyun migrate_read_unlock(zspage);
1528*4882a593Smuzhiyun /* If zspage is isolated, zs_page_putback will free the zspage */
1529*4882a593Smuzhiyun if (likely(!isolated))
1530*4882a593Smuzhiyun free_zspage(pool, class, zspage);
1531*4882a593Smuzhiyun out:
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun spin_unlock(&class->lock);
1534*4882a593Smuzhiyun unpin_tag(handle);
1535*4882a593Smuzhiyun cache_free_handle(pool, handle);
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_free);
1538*4882a593Smuzhiyun
zs_object_copy(struct size_class * class,unsigned long dst,unsigned long src)1539*4882a593Smuzhiyun static void zs_object_copy(struct size_class *class, unsigned long dst,
1540*4882a593Smuzhiyun unsigned long src)
1541*4882a593Smuzhiyun {
1542*4882a593Smuzhiyun struct page *s_page, *d_page;
1543*4882a593Smuzhiyun unsigned int s_objidx, d_objidx;
1544*4882a593Smuzhiyun unsigned long s_off, d_off;
1545*4882a593Smuzhiyun void *s_addr, *d_addr;
1546*4882a593Smuzhiyun int s_size, d_size, size;
1547*4882a593Smuzhiyun int written = 0;
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun s_size = d_size = class->size;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun obj_to_location(src, &s_page, &s_objidx);
1552*4882a593Smuzhiyun obj_to_location(dst, &d_page, &d_objidx);
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun s_off = (class->size * s_objidx) & ~PAGE_MASK;
1555*4882a593Smuzhiyun d_off = (class->size * d_objidx) & ~PAGE_MASK;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun if (s_off + class->size > PAGE_SIZE)
1558*4882a593Smuzhiyun s_size = PAGE_SIZE - s_off;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (d_off + class->size > PAGE_SIZE)
1561*4882a593Smuzhiyun d_size = PAGE_SIZE - d_off;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun s_addr = kmap_atomic(s_page);
1564*4882a593Smuzhiyun d_addr = kmap_atomic(d_page);
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun while (1) {
1567*4882a593Smuzhiyun size = min(s_size, d_size);
1568*4882a593Smuzhiyun memcpy(d_addr + d_off, s_addr + s_off, size);
1569*4882a593Smuzhiyun written += size;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun if (written == class->size)
1572*4882a593Smuzhiyun break;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun s_off += size;
1575*4882a593Smuzhiyun s_size -= size;
1576*4882a593Smuzhiyun d_off += size;
1577*4882a593Smuzhiyun d_size -= size;
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun if (s_off >= PAGE_SIZE) {
1580*4882a593Smuzhiyun kunmap_atomic(d_addr);
1581*4882a593Smuzhiyun kunmap_atomic(s_addr);
1582*4882a593Smuzhiyun s_page = get_next_page(s_page);
1583*4882a593Smuzhiyun s_addr = kmap_atomic(s_page);
1584*4882a593Smuzhiyun d_addr = kmap_atomic(d_page);
1585*4882a593Smuzhiyun s_size = class->size - written;
1586*4882a593Smuzhiyun s_off = 0;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun if (d_off >= PAGE_SIZE) {
1590*4882a593Smuzhiyun kunmap_atomic(d_addr);
1591*4882a593Smuzhiyun d_page = get_next_page(d_page);
1592*4882a593Smuzhiyun d_addr = kmap_atomic(d_page);
1593*4882a593Smuzhiyun d_size = class->size - written;
1594*4882a593Smuzhiyun d_off = 0;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun kunmap_atomic(d_addr);
1599*4882a593Smuzhiyun kunmap_atomic(s_addr);
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /*
1603*4882a593Smuzhiyun * Find alloced object in zspage from index object and
1604*4882a593Smuzhiyun * return handle.
1605*4882a593Smuzhiyun */
find_alloced_obj(struct size_class * class,struct page * page,int * obj_idx)1606*4882a593Smuzhiyun static unsigned long find_alloced_obj(struct size_class *class,
1607*4882a593Smuzhiyun struct page *page, int *obj_idx)
1608*4882a593Smuzhiyun {
1609*4882a593Smuzhiyun unsigned long head;
1610*4882a593Smuzhiyun int offset = 0;
1611*4882a593Smuzhiyun int index = *obj_idx;
1612*4882a593Smuzhiyun unsigned long handle = 0;
1613*4882a593Smuzhiyun void *addr = kmap_atomic(page);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun offset = get_first_obj_offset(page);
1616*4882a593Smuzhiyun offset += class->size * index;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun while (offset < PAGE_SIZE) {
1619*4882a593Smuzhiyun head = obj_to_head(page, addr + offset);
1620*4882a593Smuzhiyun if (head & OBJ_ALLOCATED_TAG) {
1621*4882a593Smuzhiyun handle = head & ~OBJ_ALLOCATED_TAG;
1622*4882a593Smuzhiyun if (trypin_tag(handle))
1623*4882a593Smuzhiyun break;
1624*4882a593Smuzhiyun handle = 0;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun offset += class->size;
1628*4882a593Smuzhiyun index++;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun kunmap_atomic(addr);
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun *obj_idx = index;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun return handle;
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun struct zs_compact_control {
1639*4882a593Smuzhiyun /* Source spage for migration which could be a subpage of zspage */
1640*4882a593Smuzhiyun struct page *s_page;
1641*4882a593Smuzhiyun /* Destination page for migration which should be a first page
1642*4882a593Smuzhiyun * of zspage. */
1643*4882a593Smuzhiyun struct page *d_page;
1644*4882a593Smuzhiyun /* Starting object index within @s_page which used for live object
1645*4882a593Smuzhiyun * in the subpage. */
1646*4882a593Smuzhiyun int obj_idx;
1647*4882a593Smuzhiyun };
1648*4882a593Smuzhiyun
migrate_zspage(struct zs_pool * pool,struct size_class * class,struct zs_compact_control * cc)1649*4882a593Smuzhiyun static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1650*4882a593Smuzhiyun struct zs_compact_control *cc)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun unsigned long used_obj, free_obj;
1653*4882a593Smuzhiyun unsigned long handle;
1654*4882a593Smuzhiyun struct page *s_page = cc->s_page;
1655*4882a593Smuzhiyun struct page *d_page = cc->d_page;
1656*4882a593Smuzhiyun int obj_idx = cc->obj_idx;
1657*4882a593Smuzhiyun int ret = 0;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun while (1) {
1660*4882a593Smuzhiyun handle = find_alloced_obj(class, s_page, &obj_idx);
1661*4882a593Smuzhiyun if (!handle) {
1662*4882a593Smuzhiyun s_page = get_next_page(s_page);
1663*4882a593Smuzhiyun if (!s_page)
1664*4882a593Smuzhiyun break;
1665*4882a593Smuzhiyun obj_idx = 0;
1666*4882a593Smuzhiyun continue;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun /* Stop if there is no more space */
1670*4882a593Smuzhiyun if (zspage_full(class, get_zspage(d_page))) {
1671*4882a593Smuzhiyun unpin_tag(handle);
1672*4882a593Smuzhiyun ret = -ENOMEM;
1673*4882a593Smuzhiyun break;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun used_obj = handle_to_obj(handle);
1677*4882a593Smuzhiyun free_obj = obj_malloc(class, get_zspage(d_page), handle);
1678*4882a593Smuzhiyun zs_object_copy(class, free_obj, used_obj);
1679*4882a593Smuzhiyun obj_idx++;
1680*4882a593Smuzhiyun /*
1681*4882a593Smuzhiyun * record_obj updates handle's value to free_obj and it will
1682*4882a593Smuzhiyun * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
1683*4882a593Smuzhiyun * breaks synchronization using pin_tag(e,g, zs_free) so
1684*4882a593Smuzhiyun * let's keep the lock bit.
1685*4882a593Smuzhiyun */
1686*4882a593Smuzhiyun free_obj |= BIT(HANDLE_PIN_BIT);
1687*4882a593Smuzhiyun record_obj(handle, free_obj);
1688*4882a593Smuzhiyun unpin_tag(handle);
1689*4882a593Smuzhiyun obj_free(class, used_obj);
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun /* Remember last position in this iteration */
1693*4882a593Smuzhiyun cc->s_page = s_page;
1694*4882a593Smuzhiyun cc->obj_idx = obj_idx;
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun return ret;
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun
isolate_zspage(struct size_class * class,bool source)1699*4882a593Smuzhiyun static struct zspage *isolate_zspage(struct size_class *class, bool source)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun int i;
1702*4882a593Smuzhiyun struct zspage *zspage;
1703*4882a593Smuzhiyun enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun if (!source) {
1706*4882a593Smuzhiyun fg[0] = ZS_ALMOST_FULL;
1707*4882a593Smuzhiyun fg[1] = ZS_ALMOST_EMPTY;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
1711*4882a593Smuzhiyun zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
1712*4882a593Smuzhiyun struct zspage, list);
1713*4882a593Smuzhiyun if (zspage) {
1714*4882a593Smuzhiyun VM_BUG_ON(is_zspage_isolated(zspage));
1715*4882a593Smuzhiyun remove_zspage(class, zspage, fg[i]);
1716*4882a593Smuzhiyun return zspage;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun return zspage;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun /*
1724*4882a593Smuzhiyun * putback_zspage - add @zspage into right class's fullness list
1725*4882a593Smuzhiyun * @class: destination class
1726*4882a593Smuzhiyun * @zspage: target page
1727*4882a593Smuzhiyun *
1728*4882a593Smuzhiyun * Return @zspage's fullness_group
1729*4882a593Smuzhiyun */
putback_zspage(struct size_class * class,struct zspage * zspage)1730*4882a593Smuzhiyun static enum fullness_group putback_zspage(struct size_class *class,
1731*4882a593Smuzhiyun struct zspage *zspage)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun enum fullness_group fullness;
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun VM_BUG_ON(is_zspage_isolated(zspage));
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun fullness = get_fullness_group(class, zspage);
1738*4882a593Smuzhiyun insert_zspage(class, zspage, fullness);
1739*4882a593Smuzhiyun set_zspage_mapping(zspage, class->index, fullness);
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun return fullness;
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
1745*4882a593Smuzhiyun /*
1746*4882a593Smuzhiyun * To prevent zspage destroy during migration, zspage freeing should
1747*4882a593Smuzhiyun * hold locks of all pages in the zspage.
1748*4882a593Smuzhiyun */
lock_zspage(struct zspage * zspage)1749*4882a593Smuzhiyun static void lock_zspage(struct zspage *zspage)
1750*4882a593Smuzhiyun {
1751*4882a593Smuzhiyun struct page *curr_page, *page;
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun /*
1754*4882a593Smuzhiyun * Pages we haven't locked yet can be migrated off the list while we're
1755*4882a593Smuzhiyun * trying to lock them, so we need to be careful and only attempt to
1756*4882a593Smuzhiyun * lock each page under migrate_read_lock(). Otherwise, the page we lock
1757*4882a593Smuzhiyun * may no longer belong to the zspage. This means that we may wait for
1758*4882a593Smuzhiyun * the wrong page to unlock, so we must take a reference to the page
1759*4882a593Smuzhiyun * prior to waiting for it to unlock outside migrate_read_lock().
1760*4882a593Smuzhiyun */
1761*4882a593Smuzhiyun while (1) {
1762*4882a593Smuzhiyun migrate_read_lock(zspage);
1763*4882a593Smuzhiyun page = get_first_page(zspage);
1764*4882a593Smuzhiyun if (trylock_page(page))
1765*4882a593Smuzhiyun break;
1766*4882a593Smuzhiyun get_page(page);
1767*4882a593Smuzhiyun migrate_read_unlock(zspage);
1768*4882a593Smuzhiyun wait_on_page_locked(page);
1769*4882a593Smuzhiyun put_page(page);
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun curr_page = page;
1773*4882a593Smuzhiyun while ((page = get_next_page(curr_page))) {
1774*4882a593Smuzhiyun if (trylock_page(page)) {
1775*4882a593Smuzhiyun curr_page = page;
1776*4882a593Smuzhiyun } else {
1777*4882a593Smuzhiyun get_page(page);
1778*4882a593Smuzhiyun migrate_read_unlock(zspage);
1779*4882a593Smuzhiyun wait_on_page_locked(page);
1780*4882a593Smuzhiyun put_page(page);
1781*4882a593Smuzhiyun migrate_read_lock(zspage);
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun migrate_read_unlock(zspage);
1785*4882a593Smuzhiyun }
1786*4882a593Smuzhiyun
zs_init_fs_context(struct fs_context * fc)1787*4882a593Smuzhiyun static int zs_init_fs_context(struct fs_context *fc)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun return init_pseudo(fc, ZSMALLOC_MAGIC) ? 0 : -ENOMEM;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun static struct file_system_type zsmalloc_fs = {
1793*4882a593Smuzhiyun .name = "zsmalloc",
1794*4882a593Smuzhiyun .init_fs_context = zs_init_fs_context,
1795*4882a593Smuzhiyun .kill_sb = kill_anon_super,
1796*4882a593Smuzhiyun };
1797*4882a593Smuzhiyun
zsmalloc_mount(void)1798*4882a593Smuzhiyun static int zsmalloc_mount(void)
1799*4882a593Smuzhiyun {
1800*4882a593Smuzhiyun int ret = 0;
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun zsmalloc_mnt = kern_mount(&zsmalloc_fs);
1803*4882a593Smuzhiyun if (IS_ERR(zsmalloc_mnt))
1804*4882a593Smuzhiyun ret = PTR_ERR(zsmalloc_mnt);
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun return ret;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun
zsmalloc_unmount(void)1809*4882a593Smuzhiyun static void zsmalloc_unmount(void)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun kern_unmount(zsmalloc_mnt);
1812*4882a593Smuzhiyun }
1813*4882a593Smuzhiyun
migrate_lock_init(struct zspage * zspage)1814*4882a593Smuzhiyun static void migrate_lock_init(struct zspage *zspage)
1815*4882a593Smuzhiyun {
1816*4882a593Smuzhiyun rwlock_init(&zspage->lock);
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun
migrate_read_lock(struct zspage * zspage)1819*4882a593Smuzhiyun static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun read_lock(&zspage->lock);
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
migrate_read_unlock(struct zspage * zspage)1824*4882a593Smuzhiyun static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1825*4882a593Smuzhiyun {
1826*4882a593Smuzhiyun read_unlock(&zspage->lock);
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
migrate_write_lock(struct zspage * zspage)1829*4882a593Smuzhiyun static void migrate_write_lock(struct zspage *zspage)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun write_lock(&zspage->lock);
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun
migrate_write_unlock(struct zspage * zspage)1834*4882a593Smuzhiyun static void migrate_write_unlock(struct zspage *zspage)
1835*4882a593Smuzhiyun {
1836*4882a593Smuzhiyun write_unlock(&zspage->lock);
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun /* Number of isolated subpage for *page migration* in this zspage */
inc_zspage_isolation(struct zspage * zspage)1840*4882a593Smuzhiyun static void inc_zspage_isolation(struct zspage *zspage)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun zspage->isolated++;
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun
dec_zspage_isolation(struct zspage * zspage)1845*4882a593Smuzhiyun static void dec_zspage_isolation(struct zspage *zspage)
1846*4882a593Smuzhiyun {
1847*4882a593Smuzhiyun zspage->isolated--;
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
putback_zspage_deferred(struct zs_pool * pool,struct size_class * class,struct zspage * zspage)1850*4882a593Smuzhiyun static void putback_zspage_deferred(struct zs_pool *pool,
1851*4882a593Smuzhiyun struct size_class *class,
1852*4882a593Smuzhiyun struct zspage *zspage)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun enum fullness_group fg;
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun fg = putback_zspage(class, zspage);
1857*4882a593Smuzhiyun if (fg == ZS_EMPTY)
1858*4882a593Smuzhiyun schedule_work(&pool->free_work);
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun
zs_pool_dec_isolated(struct zs_pool * pool)1862*4882a593Smuzhiyun static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1865*4882a593Smuzhiyun atomic_long_dec(&pool->isolated_pages);
1866*4882a593Smuzhiyun /*
1867*4882a593Smuzhiyun * Checking pool->destroying must happen after atomic_long_dec()
1868*4882a593Smuzhiyun * for pool->isolated_pages above. Paired with the smp_mb() in
1869*4882a593Smuzhiyun * zs_unregister_migration().
1870*4882a593Smuzhiyun */
1871*4882a593Smuzhiyun smp_mb__after_atomic();
1872*4882a593Smuzhiyun if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1873*4882a593Smuzhiyun wake_up_all(&pool->migration_wait);
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
replace_sub_page(struct size_class * class,struct zspage * zspage,struct page * newpage,struct page * oldpage)1876*4882a593Smuzhiyun static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1877*4882a593Smuzhiyun struct page *newpage, struct page *oldpage)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun struct page *page;
1880*4882a593Smuzhiyun struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1881*4882a593Smuzhiyun int idx = 0;
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun page = get_first_page(zspage);
1884*4882a593Smuzhiyun do {
1885*4882a593Smuzhiyun if (page == oldpage)
1886*4882a593Smuzhiyun pages[idx] = newpage;
1887*4882a593Smuzhiyun else
1888*4882a593Smuzhiyun pages[idx] = page;
1889*4882a593Smuzhiyun idx++;
1890*4882a593Smuzhiyun } while ((page = get_next_page(page)) != NULL);
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun create_page_chain(class, zspage, pages);
1893*4882a593Smuzhiyun set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1894*4882a593Smuzhiyun if (unlikely(PageHugeObject(oldpage)))
1895*4882a593Smuzhiyun newpage->index = oldpage->index;
1896*4882a593Smuzhiyun __SetPageMovable(newpage, page_mapping(oldpage));
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun
zs_page_isolate(struct page * page,isolate_mode_t mode)1899*4882a593Smuzhiyun static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun struct zs_pool *pool;
1902*4882a593Smuzhiyun struct size_class *class;
1903*4882a593Smuzhiyun int class_idx;
1904*4882a593Smuzhiyun enum fullness_group fullness;
1905*4882a593Smuzhiyun struct zspage *zspage;
1906*4882a593Smuzhiyun struct address_space *mapping;
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun /*
1909*4882a593Smuzhiyun * Page is locked so zspage couldn't be destroyed. For detail, look at
1910*4882a593Smuzhiyun * lock_zspage in free_zspage.
1911*4882a593Smuzhiyun */
1912*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageMovable(page), page);
1913*4882a593Smuzhiyun VM_BUG_ON_PAGE(PageIsolated(page), page);
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun zspage = get_zspage(page);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun /*
1918*4882a593Smuzhiyun * Without class lock, fullness could be stale while class_idx is okay
1919*4882a593Smuzhiyun * because class_idx is constant unless page is freed so we should get
1920*4882a593Smuzhiyun * fullness again under class lock.
1921*4882a593Smuzhiyun */
1922*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fullness);
1923*4882a593Smuzhiyun mapping = page_mapping(page);
1924*4882a593Smuzhiyun pool = mapping->private_data;
1925*4882a593Smuzhiyun class = pool->size_class[class_idx];
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun spin_lock(&class->lock);
1928*4882a593Smuzhiyun if (get_zspage_inuse(zspage) == 0) {
1929*4882a593Smuzhiyun spin_unlock(&class->lock);
1930*4882a593Smuzhiyun return false;
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* zspage is isolated for object migration */
1934*4882a593Smuzhiyun if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1935*4882a593Smuzhiyun spin_unlock(&class->lock);
1936*4882a593Smuzhiyun return false;
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun /*
1940*4882a593Smuzhiyun * If this is first time isolation for the zspage, isolate zspage from
1941*4882a593Smuzhiyun * size_class to prevent further object allocation from the zspage.
1942*4882a593Smuzhiyun */
1943*4882a593Smuzhiyun if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1944*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fullness);
1945*4882a593Smuzhiyun atomic_long_inc(&pool->isolated_pages);
1946*4882a593Smuzhiyun remove_zspage(class, zspage, fullness);
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun inc_zspage_isolation(zspage);
1950*4882a593Smuzhiyun spin_unlock(&class->lock);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun return true;
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun
zs_page_migrate(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)1955*4882a593Smuzhiyun static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
1956*4882a593Smuzhiyun struct page *page, enum migrate_mode mode)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun struct zs_pool *pool;
1959*4882a593Smuzhiyun struct size_class *class;
1960*4882a593Smuzhiyun int class_idx;
1961*4882a593Smuzhiyun enum fullness_group fullness;
1962*4882a593Smuzhiyun struct zspage *zspage;
1963*4882a593Smuzhiyun struct page *dummy;
1964*4882a593Smuzhiyun void *s_addr, *d_addr, *addr;
1965*4882a593Smuzhiyun int offset, pos;
1966*4882a593Smuzhiyun unsigned long handle, head;
1967*4882a593Smuzhiyun unsigned long old_obj, new_obj;
1968*4882a593Smuzhiyun unsigned int obj_idx;
1969*4882a593Smuzhiyun int ret = -EAGAIN;
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun /*
1972*4882a593Smuzhiyun * We cannot support the _NO_COPY case here, because copy needs to
1973*4882a593Smuzhiyun * happen under the zs lock, which does not work with
1974*4882a593Smuzhiyun * MIGRATE_SYNC_NO_COPY workflow.
1975*4882a593Smuzhiyun */
1976*4882a593Smuzhiyun if (mode == MIGRATE_SYNC_NO_COPY)
1977*4882a593Smuzhiyun return -EINVAL;
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageMovable(page), page);
1980*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageIsolated(page), page);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun zspage = get_zspage(page);
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun /* Concurrent compactor cannot migrate any subpage in zspage */
1985*4882a593Smuzhiyun migrate_write_lock(zspage);
1986*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fullness);
1987*4882a593Smuzhiyun pool = mapping->private_data;
1988*4882a593Smuzhiyun class = pool->size_class[class_idx];
1989*4882a593Smuzhiyun offset = get_first_obj_offset(page);
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun spin_lock(&class->lock);
1992*4882a593Smuzhiyun if (!get_zspage_inuse(zspage)) {
1993*4882a593Smuzhiyun /*
1994*4882a593Smuzhiyun * Set "offset" to end of the page so that every loops
1995*4882a593Smuzhiyun * skips unnecessary object scanning.
1996*4882a593Smuzhiyun */
1997*4882a593Smuzhiyun offset = PAGE_SIZE;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun pos = offset;
2001*4882a593Smuzhiyun s_addr = kmap_atomic(page);
2002*4882a593Smuzhiyun while (pos < PAGE_SIZE) {
2003*4882a593Smuzhiyun head = obj_to_head(page, s_addr + pos);
2004*4882a593Smuzhiyun if (head & OBJ_ALLOCATED_TAG) {
2005*4882a593Smuzhiyun handle = head & ~OBJ_ALLOCATED_TAG;
2006*4882a593Smuzhiyun if (!trypin_tag(handle))
2007*4882a593Smuzhiyun goto unpin_objects;
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun pos += class->size;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun /*
2013*4882a593Smuzhiyun * Here, any user cannot access all objects in the zspage so let's move.
2014*4882a593Smuzhiyun */
2015*4882a593Smuzhiyun d_addr = kmap_atomic(newpage);
2016*4882a593Smuzhiyun memcpy(d_addr, s_addr, PAGE_SIZE);
2017*4882a593Smuzhiyun kunmap_atomic(d_addr);
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun for (addr = s_addr + offset; addr < s_addr + pos;
2020*4882a593Smuzhiyun addr += class->size) {
2021*4882a593Smuzhiyun head = obj_to_head(page, addr);
2022*4882a593Smuzhiyun if (head & OBJ_ALLOCATED_TAG) {
2023*4882a593Smuzhiyun handle = head & ~OBJ_ALLOCATED_TAG;
2024*4882a593Smuzhiyun if (!testpin_tag(handle))
2025*4882a593Smuzhiyun BUG();
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun old_obj = handle_to_obj(handle);
2028*4882a593Smuzhiyun obj_to_location(old_obj, &dummy, &obj_idx);
2029*4882a593Smuzhiyun new_obj = (unsigned long)location_to_obj(newpage,
2030*4882a593Smuzhiyun obj_idx);
2031*4882a593Smuzhiyun new_obj |= BIT(HANDLE_PIN_BIT);
2032*4882a593Smuzhiyun record_obj(handle, new_obj);
2033*4882a593Smuzhiyun }
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun replace_sub_page(class, zspage, newpage, page);
2037*4882a593Smuzhiyun get_page(newpage);
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun dec_zspage_isolation(zspage);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun /*
2042*4882a593Smuzhiyun * Page migration is done so let's putback isolated zspage to
2043*4882a593Smuzhiyun * the list if @page is final isolated subpage in the zspage.
2044*4882a593Smuzhiyun */
2045*4882a593Smuzhiyun if (!is_zspage_isolated(zspage)) {
2046*4882a593Smuzhiyun /*
2047*4882a593Smuzhiyun * We cannot race with zs_destroy_pool() here because we wait
2048*4882a593Smuzhiyun * for isolation to hit zero before we start destroying.
2049*4882a593Smuzhiyun * Also, we ensure that everyone can see pool->destroying before
2050*4882a593Smuzhiyun * we start waiting.
2051*4882a593Smuzhiyun */
2052*4882a593Smuzhiyun putback_zspage_deferred(pool, class, zspage);
2053*4882a593Smuzhiyun zs_pool_dec_isolated(pool);
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun if (page_zone(newpage) != page_zone(page)) {
2057*4882a593Smuzhiyun dec_zone_page_state(page, NR_ZSPAGES);
2058*4882a593Smuzhiyun inc_zone_page_state(newpage, NR_ZSPAGES);
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun reset_page(page);
2062*4882a593Smuzhiyun put_page(page);
2063*4882a593Smuzhiyun page = newpage;
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun ret = MIGRATEPAGE_SUCCESS;
2066*4882a593Smuzhiyun unpin_objects:
2067*4882a593Smuzhiyun for (addr = s_addr + offset; addr < s_addr + pos;
2068*4882a593Smuzhiyun addr += class->size) {
2069*4882a593Smuzhiyun head = obj_to_head(page, addr);
2070*4882a593Smuzhiyun if (head & OBJ_ALLOCATED_TAG) {
2071*4882a593Smuzhiyun handle = head & ~OBJ_ALLOCATED_TAG;
2072*4882a593Smuzhiyun if (!testpin_tag(handle))
2073*4882a593Smuzhiyun BUG();
2074*4882a593Smuzhiyun unpin_tag(handle);
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun kunmap_atomic(s_addr);
2078*4882a593Smuzhiyun spin_unlock(&class->lock);
2079*4882a593Smuzhiyun migrate_write_unlock(zspage);
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun return ret;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
zs_page_putback(struct page * page)2084*4882a593Smuzhiyun static void zs_page_putback(struct page *page)
2085*4882a593Smuzhiyun {
2086*4882a593Smuzhiyun struct zs_pool *pool;
2087*4882a593Smuzhiyun struct size_class *class;
2088*4882a593Smuzhiyun int class_idx;
2089*4882a593Smuzhiyun enum fullness_group fg;
2090*4882a593Smuzhiyun struct address_space *mapping;
2091*4882a593Smuzhiyun struct zspage *zspage;
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageMovable(page), page);
2094*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageIsolated(page), page);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun zspage = get_zspage(page);
2097*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fg);
2098*4882a593Smuzhiyun mapping = page_mapping(page);
2099*4882a593Smuzhiyun pool = mapping->private_data;
2100*4882a593Smuzhiyun class = pool->size_class[class_idx];
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun spin_lock(&class->lock);
2103*4882a593Smuzhiyun dec_zspage_isolation(zspage);
2104*4882a593Smuzhiyun if (!is_zspage_isolated(zspage)) {
2105*4882a593Smuzhiyun /*
2106*4882a593Smuzhiyun * Due to page_lock, we cannot free zspage immediately
2107*4882a593Smuzhiyun * so let's defer.
2108*4882a593Smuzhiyun */
2109*4882a593Smuzhiyun putback_zspage_deferred(pool, class, zspage);
2110*4882a593Smuzhiyun zs_pool_dec_isolated(pool);
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun spin_unlock(&class->lock);
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun static const struct address_space_operations zsmalloc_aops = {
2116*4882a593Smuzhiyun .isolate_page = zs_page_isolate,
2117*4882a593Smuzhiyun .migratepage = zs_page_migrate,
2118*4882a593Smuzhiyun .putback_page = zs_page_putback,
2119*4882a593Smuzhiyun };
2120*4882a593Smuzhiyun
zs_register_migration(struct zs_pool * pool)2121*4882a593Smuzhiyun static int zs_register_migration(struct zs_pool *pool)
2122*4882a593Smuzhiyun {
2123*4882a593Smuzhiyun pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
2124*4882a593Smuzhiyun if (IS_ERR(pool->inode)) {
2125*4882a593Smuzhiyun pool->inode = NULL;
2126*4882a593Smuzhiyun return 1;
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun pool->inode->i_mapping->private_data = pool;
2130*4882a593Smuzhiyun pool->inode->i_mapping->a_ops = &zsmalloc_aops;
2131*4882a593Smuzhiyun return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
pool_isolated_are_drained(struct zs_pool * pool)2134*4882a593Smuzhiyun static bool pool_isolated_are_drained(struct zs_pool *pool)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun return atomic_long_read(&pool->isolated_pages) == 0;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun /* Function for resolving migration */
wait_for_isolated_drain(struct zs_pool * pool)2140*4882a593Smuzhiyun static void wait_for_isolated_drain(struct zs_pool *pool)
2141*4882a593Smuzhiyun {
2142*4882a593Smuzhiyun
2143*4882a593Smuzhiyun /*
2144*4882a593Smuzhiyun * We're in the process of destroying the pool, so there are no
2145*4882a593Smuzhiyun * active allocations. zs_page_isolate() fails for completely free
2146*4882a593Smuzhiyun * zspages, so we need only wait for the zs_pool's isolated
2147*4882a593Smuzhiyun * count to hit zero.
2148*4882a593Smuzhiyun */
2149*4882a593Smuzhiyun wait_event(pool->migration_wait,
2150*4882a593Smuzhiyun pool_isolated_are_drained(pool));
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun
zs_unregister_migration(struct zs_pool * pool)2153*4882a593Smuzhiyun static void zs_unregister_migration(struct zs_pool *pool)
2154*4882a593Smuzhiyun {
2155*4882a593Smuzhiyun pool->destroying = true;
2156*4882a593Smuzhiyun /*
2157*4882a593Smuzhiyun * We need a memory barrier here to ensure global visibility of
2158*4882a593Smuzhiyun * pool->destroying. Thus pool->isolated pages will either be 0 in which
2159*4882a593Smuzhiyun * case we don't care, or it will be > 0 and pool->destroying will
2160*4882a593Smuzhiyun * ensure that we wake up once isolation hits 0.
2161*4882a593Smuzhiyun */
2162*4882a593Smuzhiyun smp_mb();
2163*4882a593Smuzhiyun wait_for_isolated_drain(pool); /* This can block */
2164*4882a593Smuzhiyun flush_work(&pool->free_work);
2165*4882a593Smuzhiyun iput(pool->inode);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /*
2169*4882a593Smuzhiyun * Caller should hold page_lock of all pages in the zspage
2170*4882a593Smuzhiyun * In here, we cannot use zspage meta data.
2171*4882a593Smuzhiyun */
async_free_zspage(struct work_struct * work)2172*4882a593Smuzhiyun static void async_free_zspage(struct work_struct *work)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun int i;
2175*4882a593Smuzhiyun struct size_class *class;
2176*4882a593Smuzhiyun unsigned int class_idx;
2177*4882a593Smuzhiyun enum fullness_group fullness;
2178*4882a593Smuzhiyun struct zspage *zspage, *tmp;
2179*4882a593Smuzhiyun LIST_HEAD(free_pages);
2180*4882a593Smuzhiyun struct zs_pool *pool = container_of(work, struct zs_pool,
2181*4882a593Smuzhiyun free_work);
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2184*4882a593Smuzhiyun class = pool->size_class[i];
2185*4882a593Smuzhiyun if (class->index != i)
2186*4882a593Smuzhiyun continue;
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun spin_lock(&class->lock);
2189*4882a593Smuzhiyun list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
2190*4882a593Smuzhiyun spin_unlock(&class->lock);
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
2195*4882a593Smuzhiyun list_del(&zspage->list);
2196*4882a593Smuzhiyun lock_zspage(zspage);
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun get_zspage_mapping(zspage, &class_idx, &fullness);
2199*4882a593Smuzhiyun VM_BUG_ON(fullness != ZS_EMPTY);
2200*4882a593Smuzhiyun class = pool->size_class[class_idx];
2201*4882a593Smuzhiyun spin_lock(&class->lock);
2202*4882a593Smuzhiyun __free_zspage(pool, pool->size_class[class_idx], zspage);
2203*4882a593Smuzhiyun spin_unlock(&class->lock);
2204*4882a593Smuzhiyun }
2205*4882a593Smuzhiyun };
2206*4882a593Smuzhiyun
kick_deferred_free(struct zs_pool * pool)2207*4882a593Smuzhiyun static void kick_deferred_free(struct zs_pool *pool)
2208*4882a593Smuzhiyun {
2209*4882a593Smuzhiyun schedule_work(&pool->free_work);
2210*4882a593Smuzhiyun }
2211*4882a593Smuzhiyun
init_deferred_free(struct zs_pool * pool)2212*4882a593Smuzhiyun static void init_deferred_free(struct zs_pool *pool)
2213*4882a593Smuzhiyun {
2214*4882a593Smuzhiyun INIT_WORK(&pool->free_work, async_free_zspage);
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun
SetZsPageMovable(struct zs_pool * pool,struct zspage * zspage)2217*4882a593Smuzhiyun static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun struct page *page = get_first_page(zspage);
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun do {
2222*4882a593Smuzhiyun WARN_ON(!trylock_page(page));
2223*4882a593Smuzhiyun __SetPageMovable(page, pool->inode->i_mapping);
2224*4882a593Smuzhiyun unlock_page(page);
2225*4882a593Smuzhiyun } while ((page = get_next_page(page)) != NULL);
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun #endif
2228*4882a593Smuzhiyun
2229*4882a593Smuzhiyun /*
2230*4882a593Smuzhiyun *
2231*4882a593Smuzhiyun * Based on the number of unused allocated objects calculate
2232*4882a593Smuzhiyun * and return the number of pages that we can free.
2233*4882a593Smuzhiyun */
zs_can_compact(struct size_class * class)2234*4882a593Smuzhiyun static unsigned long zs_can_compact(struct size_class *class)
2235*4882a593Smuzhiyun {
2236*4882a593Smuzhiyun unsigned long obj_wasted;
2237*4882a593Smuzhiyun unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
2238*4882a593Smuzhiyun unsigned long obj_used = zs_stat_get(class, OBJ_USED);
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun if (obj_allocated <= obj_used)
2241*4882a593Smuzhiyun return 0;
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun obj_wasted = obj_allocated - obj_used;
2244*4882a593Smuzhiyun obj_wasted /= class->objs_per_zspage;
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun return obj_wasted * class->pages_per_zspage;
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun
__zs_compact(struct zs_pool * pool,struct size_class * class)2249*4882a593Smuzhiyun static unsigned long __zs_compact(struct zs_pool *pool,
2250*4882a593Smuzhiyun struct size_class *class)
2251*4882a593Smuzhiyun {
2252*4882a593Smuzhiyun struct zs_compact_control cc;
2253*4882a593Smuzhiyun struct zspage *src_zspage;
2254*4882a593Smuzhiyun struct zspage *dst_zspage = NULL;
2255*4882a593Smuzhiyun unsigned long pages_freed = 0;
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun spin_lock(&class->lock);
2258*4882a593Smuzhiyun while ((src_zspage = isolate_zspage(class, true))) {
2259*4882a593Smuzhiyun
2260*4882a593Smuzhiyun if (!zs_can_compact(class))
2261*4882a593Smuzhiyun break;
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun cc.obj_idx = 0;
2264*4882a593Smuzhiyun cc.s_page = get_first_page(src_zspage);
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun while ((dst_zspage = isolate_zspage(class, false))) {
2267*4882a593Smuzhiyun cc.d_page = get_first_page(dst_zspage);
2268*4882a593Smuzhiyun /*
2269*4882a593Smuzhiyun * If there is no more space in dst_page, resched
2270*4882a593Smuzhiyun * and see if anyone had allocated another zspage.
2271*4882a593Smuzhiyun */
2272*4882a593Smuzhiyun if (!migrate_zspage(pool, class, &cc))
2273*4882a593Smuzhiyun break;
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun putback_zspage(class, dst_zspage);
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun /* Stop if we couldn't find slot */
2279*4882a593Smuzhiyun if (dst_zspage == NULL)
2280*4882a593Smuzhiyun break;
2281*4882a593Smuzhiyun
2282*4882a593Smuzhiyun putback_zspage(class, dst_zspage);
2283*4882a593Smuzhiyun if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
2284*4882a593Smuzhiyun free_zspage(pool, class, src_zspage);
2285*4882a593Smuzhiyun pages_freed += class->pages_per_zspage;
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun spin_unlock(&class->lock);
2288*4882a593Smuzhiyun cond_resched();
2289*4882a593Smuzhiyun spin_lock(&class->lock);
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun if (src_zspage)
2293*4882a593Smuzhiyun putback_zspage(class, src_zspage);
2294*4882a593Smuzhiyun
2295*4882a593Smuzhiyun spin_unlock(&class->lock);
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun return pages_freed;
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun
zs_compact(struct zs_pool * pool)2300*4882a593Smuzhiyun unsigned long zs_compact(struct zs_pool *pool)
2301*4882a593Smuzhiyun {
2302*4882a593Smuzhiyun int i;
2303*4882a593Smuzhiyun struct size_class *class;
2304*4882a593Smuzhiyun unsigned long pages_freed = 0;
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2307*4882a593Smuzhiyun class = pool->size_class[i];
2308*4882a593Smuzhiyun if (!class)
2309*4882a593Smuzhiyun continue;
2310*4882a593Smuzhiyun if (class->index != i)
2311*4882a593Smuzhiyun continue;
2312*4882a593Smuzhiyun pages_freed += __zs_compact(pool, class);
2313*4882a593Smuzhiyun }
2314*4882a593Smuzhiyun atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun return pages_freed;
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_compact);
2319*4882a593Smuzhiyun
zs_pool_stats(struct zs_pool * pool,struct zs_pool_stats * stats)2320*4882a593Smuzhiyun void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2321*4882a593Smuzhiyun {
2322*4882a593Smuzhiyun memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_pool_stats);
2325*4882a593Smuzhiyun
zs_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)2326*4882a593Smuzhiyun static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2327*4882a593Smuzhiyun struct shrink_control *sc)
2328*4882a593Smuzhiyun {
2329*4882a593Smuzhiyun unsigned long pages_freed;
2330*4882a593Smuzhiyun struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2331*4882a593Smuzhiyun shrinker);
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun /*
2334*4882a593Smuzhiyun * Compact classes and calculate compaction delta.
2335*4882a593Smuzhiyun * Can run concurrently with a manually triggered
2336*4882a593Smuzhiyun * (by user) compaction.
2337*4882a593Smuzhiyun */
2338*4882a593Smuzhiyun pages_freed = zs_compact(pool);
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun return pages_freed ? pages_freed : SHRINK_STOP;
2341*4882a593Smuzhiyun }
2342*4882a593Smuzhiyun
zs_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)2343*4882a593Smuzhiyun static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2344*4882a593Smuzhiyun struct shrink_control *sc)
2345*4882a593Smuzhiyun {
2346*4882a593Smuzhiyun int i;
2347*4882a593Smuzhiyun struct size_class *class;
2348*4882a593Smuzhiyun unsigned long pages_to_free = 0;
2349*4882a593Smuzhiyun struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2350*4882a593Smuzhiyun shrinker);
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2353*4882a593Smuzhiyun class = pool->size_class[i];
2354*4882a593Smuzhiyun if (!class)
2355*4882a593Smuzhiyun continue;
2356*4882a593Smuzhiyun if (class->index != i)
2357*4882a593Smuzhiyun continue;
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun pages_to_free += zs_can_compact(class);
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun return pages_to_free;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun
zs_unregister_shrinker(struct zs_pool * pool)2365*4882a593Smuzhiyun static void zs_unregister_shrinker(struct zs_pool *pool)
2366*4882a593Smuzhiyun {
2367*4882a593Smuzhiyun unregister_shrinker(&pool->shrinker);
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun
zs_register_shrinker(struct zs_pool * pool)2370*4882a593Smuzhiyun static int zs_register_shrinker(struct zs_pool *pool)
2371*4882a593Smuzhiyun {
2372*4882a593Smuzhiyun pool->shrinker.scan_objects = zs_shrinker_scan;
2373*4882a593Smuzhiyun pool->shrinker.count_objects = zs_shrinker_count;
2374*4882a593Smuzhiyun pool->shrinker.batch = 0;
2375*4882a593Smuzhiyun pool->shrinker.seeks = DEFAULT_SEEKS;
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun return register_shrinker(&pool->shrinker);
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun /**
2381*4882a593Smuzhiyun * zs_create_pool - Creates an allocation pool to work from.
2382*4882a593Smuzhiyun * @name: pool name to be created
2383*4882a593Smuzhiyun *
2384*4882a593Smuzhiyun * This function must be called before anything when using
2385*4882a593Smuzhiyun * the zsmalloc allocator.
2386*4882a593Smuzhiyun *
2387*4882a593Smuzhiyun * On success, a pointer to the newly created pool is returned,
2388*4882a593Smuzhiyun * otherwise NULL.
2389*4882a593Smuzhiyun */
zs_create_pool(const char * name)2390*4882a593Smuzhiyun struct zs_pool *zs_create_pool(const char *name)
2391*4882a593Smuzhiyun {
2392*4882a593Smuzhiyun int i;
2393*4882a593Smuzhiyun struct zs_pool *pool;
2394*4882a593Smuzhiyun struct size_class *prev_class = NULL;
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2397*4882a593Smuzhiyun if (!pool)
2398*4882a593Smuzhiyun return NULL;
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun init_deferred_free(pool);
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun pool->name = kstrdup(name, GFP_KERNEL);
2403*4882a593Smuzhiyun if (!pool->name)
2404*4882a593Smuzhiyun goto err;
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
2407*4882a593Smuzhiyun init_waitqueue_head(&pool->migration_wait);
2408*4882a593Smuzhiyun #endif
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun if (create_cache(pool))
2411*4882a593Smuzhiyun goto err;
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun /*
2414*4882a593Smuzhiyun * Iterate reversely, because, size of size_class that we want to use
2415*4882a593Smuzhiyun * for merging should be larger or equal to current size.
2416*4882a593Smuzhiyun */
2417*4882a593Smuzhiyun for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2418*4882a593Smuzhiyun int size;
2419*4882a593Smuzhiyun int pages_per_zspage;
2420*4882a593Smuzhiyun int objs_per_zspage;
2421*4882a593Smuzhiyun struct size_class *class;
2422*4882a593Smuzhiyun int fullness = 0;
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2425*4882a593Smuzhiyun if (size > ZS_MAX_ALLOC_SIZE)
2426*4882a593Smuzhiyun size = ZS_MAX_ALLOC_SIZE;
2427*4882a593Smuzhiyun pages_per_zspage = get_pages_per_zspage(size);
2428*4882a593Smuzhiyun objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun /*
2431*4882a593Smuzhiyun * We iterate from biggest down to smallest classes,
2432*4882a593Smuzhiyun * so huge_class_size holds the size of the first huge
2433*4882a593Smuzhiyun * class. Any object bigger than or equal to that will
2434*4882a593Smuzhiyun * endup in the huge class.
2435*4882a593Smuzhiyun */
2436*4882a593Smuzhiyun if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
2437*4882a593Smuzhiyun !huge_class_size) {
2438*4882a593Smuzhiyun huge_class_size = size;
2439*4882a593Smuzhiyun /*
2440*4882a593Smuzhiyun * The object uses ZS_HANDLE_SIZE bytes to store the
2441*4882a593Smuzhiyun * handle. We need to subtract it, because zs_malloc()
2442*4882a593Smuzhiyun * unconditionally adds handle size before it performs
2443*4882a593Smuzhiyun * size class search - so object may be smaller than
2444*4882a593Smuzhiyun * huge class size, yet it still can end up in the huge
2445*4882a593Smuzhiyun * class because it grows by ZS_HANDLE_SIZE extra bytes
2446*4882a593Smuzhiyun * right before class lookup.
2447*4882a593Smuzhiyun */
2448*4882a593Smuzhiyun huge_class_size -= (ZS_HANDLE_SIZE - 1);
2449*4882a593Smuzhiyun }
2450*4882a593Smuzhiyun
2451*4882a593Smuzhiyun /*
2452*4882a593Smuzhiyun * size_class is used for normal zsmalloc operation such
2453*4882a593Smuzhiyun * as alloc/free for that size. Although it is natural that we
2454*4882a593Smuzhiyun * have one size_class for each size, there is a chance that we
2455*4882a593Smuzhiyun * can get more memory utilization if we use one size_class for
2456*4882a593Smuzhiyun * many different sizes whose size_class have same
2457*4882a593Smuzhiyun * characteristics. So, we makes size_class point to
2458*4882a593Smuzhiyun * previous size_class if possible.
2459*4882a593Smuzhiyun */
2460*4882a593Smuzhiyun if (prev_class) {
2461*4882a593Smuzhiyun if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2462*4882a593Smuzhiyun pool->size_class[i] = prev_class;
2463*4882a593Smuzhiyun continue;
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2468*4882a593Smuzhiyun if (!class)
2469*4882a593Smuzhiyun goto err;
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun class->size = size;
2472*4882a593Smuzhiyun class->index = i;
2473*4882a593Smuzhiyun class->pages_per_zspage = pages_per_zspage;
2474*4882a593Smuzhiyun class->objs_per_zspage = objs_per_zspage;
2475*4882a593Smuzhiyun spin_lock_init(&class->lock);
2476*4882a593Smuzhiyun pool->size_class[i] = class;
2477*4882a593Smuzhiyun for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
2478*4882a593Smuzhiyun fullness++)
2479*4882a593Smuzhiyun INIT_LIST_HEAD(&class->fullness_list[fullness]);
2480*4882a593Smuzhiyun
2481*4882a593Smuzhiyun prev_class = class;
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun /* debug only, don't abort if it fails */
2485*4882a593Smuzhiyun zs_pool_stat_create(pool, name);
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun if (zs_register_migration(pool))
2488*4882a593Smuzhiyun goto err;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun /*
2491*4882a593Smuzhiyun * Not critical since shrinker is only used to trigger internal
2492*4882a593Smuzhiyun * defragmentation of the pool which is pretty optional thing. If
2493*4882a593Smuzhiyun * registration fails we still can use the pool normally and user can
2494*4882a593Smuzhiyun * trigger compaction manually. Thus, ignore return code.
2495*4882a593Smuzhiyun */
2496*4882a593Smuzhiyun zs_register_shrinker(pool);
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun return pool;
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun err:
2501*4882a593Smuzhiyun zs_destroy_pool(pool);
2502*4882a593Smuzhiyun return NULL;
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_create_pool);
2505*4882a593Smuzhiyun
zs_destroy_pool(struct zs_pool * pool)2506*4882a593Smuzhiyun void zs_destroy_pool(struct zs_pool *pool)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun int i;
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun zs_unregister_shrinker(pool);
2511*4882a593Smuzhiyun zs_unregister_migration(pool);
2512*4882a593Smuzhiyun zs_pool_stat_destroy(pool);
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2515*4882a593Smuzhiyun int fg;
2516*4882a593Smuzhiyun struct size_class *class = pool->size_class[i];
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun if (!class)
2519*4882a593Smuzhiyun continue;
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun if (class->index != i)
2522*4882a593Smuzhiyun continue;
2523*4882a593Smuzhiyun
2524*4882a593Smuzhiyun for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
2525*4882a593Smuzhiyun if (!list_empty(&class->fullness_list[fg])) {
2526*4882a593Smuzhiyun pr_info("Freeing non-empty class with size %db, fullness group %d\n",
2527*4882a593Smuzhiyun class->size, fg);
2528*4882a593Smuzhiyun }
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun kfree(class);
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun destroy_cache(pool);
2534*4882a593Smuzhiyun kfree(pool->name);
2535*4882a593Smuzhiyun kfree(pool);
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zs_destroy_pool);
2538*4882a593Smuzhiyun
zs_init(void)2539*4882a593Smuzhiyun static int __init zs_init(void)
2540*4882a593Smuzhiyun {
2541*4882a593Smuzhiyun int ret;
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun ret = zsmalloc_mount();
2544*4882a593Smuzhiyun if (ret)
2545*4882a593Smuzhiyun goto out;
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2548*4882a593Smuzhiyun zs_cpu_prepare, zs_cpu_dead);
2549*4882a593Smuzhiyun if (ret)
2550*4882a593Smuzhiyun goto hp_setup_fail;
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
2553*4882a593Smuzhiyun zpool_register_driver(&zs_zpool_driver);
2554*4882a593Smuzhiyun #endif
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun zs_stat_init();
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun return 0;
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun hp_setup_fail:
2561*4882a593Smuzhiyun zsmalloc_unmount();
2562*4882a593Smuzhiyun out:
2563*4882a593Smuzhiyun return ret;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun
zs_exit(void)2566*4882a593Smuzhiyun static void __exit zs_exit(void)
2567*4882a593Smuzhiyun {
2568*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
2569*4882a593Smuzhiyun zpool_unregister_driver(&zs_zpool_driver);
2570*4882a593Smuzhiyun #endif
2571*4882a593Smuzhiyun zsmalloc_unmount();
2572*4882a593Smuzhiyun cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun zs_stat_exit();
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun module_init(zs_init);
2578*4882a593Smuzhiyun module_exit(zs_exit);
2579*4882a593Smuzhiyun
2580*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
2581*4882a593Smuzhiyun MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2582