xref: /OK3568_Linux_fs/kernel/mm/z3fold.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * z3fold.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6*4882a593Smuzhiyun  * Copyright (C) 2016, Sony Mobile Communications Inc.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This implementation is based on zbud written by Seth Jennings.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * z3fold is an special purpose allocator for storing compressed pages. It
11*4882a593Smuzhiyun  * can store up to three compressed pages per page which improves the
12*4882a593Smuzhiyun  * compression ratio of zbud while retaining its main concepts (e. g. always
13*4882a593Smuzhiyun  * storing an integral number of objects per page) and simplicity.
14*4882a593Smuzhiyun  * It still has simple and deterministic reclaim properties that make it
15*4882a593Smuzhiyun  * preferable to a higher density approach (with no requirement on integral
16*4882a593Smuzhiyun  * number of object per page) when reclaim is used.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19*4882a593Smuzhiyun  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * z3fold doesn't export any API and is meant to be used via zpool API.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/atomic.h>
27*4882a593Smuzhiyun #include <linux/sched.h>
28*4882a593Smuzhiyun #include <linux/cpumask.h>
29*4882a593Smuzhiyun #include <linux/list.h>
30*4882a593Smuzhiyun #include <linux/mm.h>
31*4882a593Smuzhiyun #include <linux/module.h>
32*4882a593Smuzhiyun #include <linux/page-flags.h>
33*4882a593Smuzhiyun #include <linux/migrate.h>
34*4882a593Smuzhiyun #include <linux/node.h>
35*4882a593Smuzhiyun #include <linux/compaction.h>
36*4882a593Smuzhiyun #include <linux/percpu.h>
37*4882a593Smuzhiyun #include <linux/mount.h>
38*4882a593Smuzhiyun #include <linux/pseudo_fs.h>
39*4882a593Smuzhiyun #include <linux/fs.h>
40*4882a593Smuzhiyun #include <linux/preempt.h>
41*4882a593Smuzhiyun #include <linux/workqueue.h>
42*4882a593Smuzhiyun #include <linux/slab.h>
43*4882a593Smuzhiyun #include <linux/spinlock.h>
44*4882a593Smuzhiyun #include <linux/zpool.h>
45*4882a593Smuzhiyun #include <linux/magic.h>
46*4882a593Smuzhiyun #include <linux/kmemleak.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50*4882a593Smuzhiyun  * adjusting internal fragmentation.  It also determines the number of
51*4882a593Smuzhiyun  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52*4882a593Smuzhiyun  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53*4882a593Smuzhiyun  * in the beginning of an allocated page are occupied by z3fold header, so
54*4882a593Smuzhiyun  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55*4882a593Smuzhiyun  * which shows the max number of free chunks in z3fold page, also there will
56*4882a593Smuzhiyun  * be 63, or 62, respectively, freelists per pool.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define NCHUNKS_ORDER	6
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
61*4882a593Smuzhiyun #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
62*4882a593Smuzhiyun #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63*4882a593Smuzhiyun #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64*4882a593Smuzhiyun #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
65*4882a593Smuzhiyun #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define BUDDY_MASK	(0x3)
68*4882a593Smuzhiyun #define BUDDY_SHIFT	2
69*4882a593Smuzhiyun #define SLOTS_ALIGN	(0x40)
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /*****************
72*4882a593Smuzhiyun  * Structures
73*4882a593Smuzhiyun *****************/
74*4882a593Smuzhiyun struct z3fold_pool;
75*4882a593Smuzhiyun struct z3fold_ops {
76*4882a593Smuzhiyun 	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun enum buddy {
80*4882a593Smuzhiyun 	HEADLESS = 0,
81*4882a593Smuzhiyun 	FIRST,
82*4882a593Smuzhiyun 	MIDDLE,
83*4882a593Smuzhiyun 	LAST,
84*4882a593Smuzhiyun 	BUDDIES_MAX = LAST
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct z3fold_buddy_slots {
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90*4882a593Smuzhiyun 	 * be enough slots to hold all possible variants
91*4882a593Smuzhiyun 	 */
92*4882a593Smuzhiyun 	unsigned long slot[BUDDY_MASK + 1];
93*4882a593Smuzhiyun 	unsigned long pool; /* back link */
94*4882a593Smuzhiyun 	rwlock_t lock;
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun #define HANDLE_FLAG_MASK	(0x03)
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * struct z3fold_header - z3fold page metadata occupying first chunks of each
100*4882a593Smuzhiyun  *			z3fold page, except for HEADLESS pages
101*4882a593Smuzhiyun  * @buddy:		links the z3fold page into the relevant list in the
102*4882a593Smuzhiyun  *			pool
103*4882a593Smuzhiyun  * @page_lock:		per-page lock
104*4882a593Smuzhiyun  * @refcount:		reference count for the z3fold page
105*4882a593Smuzhiyun  * @work:		work_struct for page layout optimization
106*4882a593Smuzhiyun  * @slots:		pointer to the structure holding buddy slots
107*4882a593Smuzhiyun  * @pool:		pointer to the containing pool
108*4882a593Smuzhiyun  * @cpu:		CPU which this page "belongs" to
109*4882a593Smuzhiyun  * @first_chunks:	the size of the first buddy in chunks, 0 if free
110*4882a593Smuzhiyun  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
111*4882a593Smuzhiyun  * @last_chunks:	the size of the last buddy in chunks, 0 if free
112*4882a593Smuzhiyun  * @first_num:		the starting number (for the first handle)
113*4882a593Smuzhiyun  * @mapped_count:	the number of objects currently mapped
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun struct z3fold_header {
116*4882a593Smuzhiyun 	struct list_head buddy;
117*4882a593Smuzhiyun 	spinlock_t page_lock;
118*4882a593Smuzhiyun 	struct kref refcount;
119*4882a593Smuzhiyun 	struct work_struct work;
120*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots;
121*4882a593Smuzhiyun 	struct z3fold_pool *pool;
122*4882a593Smuzhiyun 	short cpu;
123*4882a593Smuzhiyun 	unsigned short first_chunks;
124*4882a593Smuzhiyun 	unsigned short middle_chunks;
125*4882a593Smuzhiyun 	unsigned short last_chunks;
126*4882a593Smuzhiyun 	unsigned short start_middle;
127*4882a593Smuzhiyun 	unsigned short first_num:2;
128*4882a593Smuzhiyun 	unsigned short mapped_count:2;
129*4882a593Smuzhiyun 	unsigned short foreign_handles:2;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun  * struct z3fold_pool - stores metadata for each z3fold pool
134*4882a593Smuzhiyun  * @name:	pool name
135*4882a593Smuzhiyun  * @lock:	protects pool unbuddied/lru lists
136*4882a593Smuzhiyun  * @stale_lock:	protects pool stale page list
137*4882a593Smuzhiyun  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
138*4882a593Smuzhiyun  *		buddies; the list each z3fold page is added to depends on
139*4882a593Smuzhiyun  *		the size of its free region.
140*4882a593Smuzhiyun  * @lru:	list tracking the z3fold pages in LRU order by most recently
141*4882a593Smuzhiyun  *		added buddy.
142*4882a593Smuzhiyun  * @stale:	list of pages marked for freeing
143*4882a593Smuzhiyun  * @pages_nr:	number of z3fold pages in the pool.
144*4882a593Smuzhiyun  * @c_handle:	cache for z3fold_buddy_slots allocation
145*4882a593Smuzhiyun  * @ops:	pointer to a structure of user defined operations specified at
146*4882a593Smuzhiyun  *		pool creation time.
147*4882a593Smuzhiyun  * @compact_wq:	workqueue for page layout background optimization
148*4882a593Smuzhiyun  * @release_wq:	workqueue for safe page release
149*4882a593Smuzhiyun  * @work:	work_struct for safe page release
150*4882a593Smuzhiyun  * @inode:	inode for z3fold pseudo filesystem
151*4882a593Smuzhiyun  *
152*4882a593Smuzhiyun  * This structure is allocated at pool creation time and maintains metadata
153*4882a593Smuzhiyun  * pertaining to a particular z3fold pool.
154*4882a593Smuzhiyun  */
155*4882a593Smuzhiyun struct z3fold_pool {
156*4882a593Smuzhiyun 	const char *name;
157*4882a593Smuzhiyun 	spinlock_t lock;
158*4882a593Smuzhiyun 	spinlock_t stale_lock;
159*4882a593Smuzhiyun 	struct list_head *unbuddied;
160*4882a593Smuzhiyun 	struct list_head lru;
161*4882a593Smuzhiyun 	struct list_head stale;
162*4882a593Smuzhiyun 	atomic64_t pages_nr;
163*4882a593Smuzhiyun 	struct kmem_cache *c_handle;
164*4882a593Smuzhiyun 	const struct z3fold_ops *ops;
165*4882a593Smuzhiyun 	struct zpool *zpool;
166*4882a593Smuzhiyun 	const struct zpool_ops *zpool_ops;
167*4882a593Smuzhiyun 	struct workqueue_struct *compact_wq;
168*4882a593Smuzhiyun 	struct workqueue_struct *release_wq;
169*4882a593Smuzhiyun 	struct work_struct work;
170*4882a593Smuzhiyun 	struct inode *inode;
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun  * Internal z3fold page flags
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun enum z3fold_page_flags {
177*4882a593Smuzhiyun 	PAGE_HEADLESS = 0,
178*4882a593Smuzhiyun 	MIDDLE_CHUNK_MAPPED,
179*4882a593Smuzhiyun 	NEEDS_COMPACTING,
180*4882a593Smuzhiyun 	PAGE_STALE,
181*4882a593Smuzhiyun 	PAGE_CLAIMED, /* by either reclaim or free */
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * handle flags, go under HANDLE_FLAG_MASK
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun enum z3fold_handle_flags {
188*4882a593Smuzhiyun 	HANDLES_NOFREE = 0,
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun  * Forward declarations
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
195*4882a593Smuzhiyun static void compact_page_work(struct work_struct *w);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*****************
198*4882a593Smuzhiyun  * Helpers
199*4882a593Smuzhiyun *****************/
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /* Converts an allocation size in bytes to size in z3fold chunks */
size_to_chunks(size_t size)202*4882a593Smuzhiyun static int size_to_chunks(size_t size)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun #define for_each_unbuddied_list(_iter, _begin) \
208*4882a593Smuzhiyun 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
209*4882a593Smuzhiyun 
alloc_slots(struct z3fold_pool * pool,gfp_t gfp)210*4882a593Smuzhiyun static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
211*4882a593Smuzhiyun 							gfp_t gfp)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	slots = kmem_cache_zalloc(pool->c_handle,
216*4882a593Smuzhiyun 				 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (slots) {
219*4882a593Smuzhiyun 		/* It will be freed separately in free_handle(). */
220*4882a593Smuzhiyun 		kmemleak_not_leak(slots);
221*4882a593Smuzhiyun 		slots->pool = (unsigned long)pool;
222*4882a593Smuzhiyun 		rwlock_init(&slots->lock);
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return slots;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
slots_to_pool(struct z3fold_buddy_slots * s)228*4882a593Smuzhiyun static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
handle_to_slots(unsigned long handle)233*4882a593Smuzhiyun static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /* Lock a z3fold page */
z3fold_page_lock(struct z3fold_header * zhdr)239*4882a593Smuzhiyun static inline void z3fold_page_lock(struct z3fold_header *zhdr)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	spin_lock(&zhdr->page_lock);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /* Try to lock a z3fold page */
z3fold_page_trylock(struct z3fold_header * zhdr)245*4882a593Smuzhiyun static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	return spin_trylock(&zhdr->page_lock);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* Unlock a z3fold page */
z3fold_page_unlock(struct z3fold_header * zhdr)251*4882a593Smuzhiyun static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	spin_unlock(&zhdr->page_lock);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 
__get_z3fold_header(unsigned long handle,bool lock)257*4882a593Smuzhiyun static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
258*4882a593Smuzhiyun 							bool lock)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots;
261*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
262*4882a593Smuzhiyun 	int locked = 0;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (!(handle & (1 << PAGE_HEADLESS))) {
265*4882a593Smuzhiyun 		slots = handle_to_slots(handle);
266*4882a593Smuzhiyun 		do {
267*4882a593Smuzhiyun 			unsigned long addr;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 			read_lock(&slots->lock);
270*4882a593Smuzhiyun 			addr = *(unsigned long *)handle;
271*4882a593Smuzhiyun 			zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
272*4882a593Smuzhiyun 			if (lock)
273*4882a593Smuzhiyun 				locked = z3fold_page_trylock(zhdr);
274*4882a593Smuzhiyun 			read_unlock(&slots->lock);
275*4882a593Smuzhiyun 			if (locked)
276*4882a593Smuzhiyun 				break;
277*4882a593Smuzhiyun 			cpu_relax();
278*4882a593Smuzhiyun 		} while (lock);
279*4882a593Smuzhiyun 	} else {
280*4882a593Smuzhiyun 		zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return zhdr;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /* Returns the z3fold page where a given handle is stored */
handle_to_z3fold_header(unsigned long h)287*4882a593Smuzhiyun static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	return __get_z3fold_header(h, false);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /* return locked z3fold page if it's not headless */
get_z3fold_header(unsigned long h)293*4882a593Smuzhiyun static inline struct z3fold_header *get_z3fold_header(unsigned long h)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	return __get_z3fold_header(h, true);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
put_z3fold_header(struct z3fold_header * zhdr)298*4882a593Smuzhiyun static inline void put_z3fold_header(struct z3fold_header *zhdr)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct page *page = virt_to_page(zhdr);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (!test_bit(PAGE_HEADLESS, &page->private))
303*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
free_handle(unsigned long handle,struct z3fold_header * zhdr)306*4882a593Smuzhiyun static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots;
309*4882a593Smuzhiyun 	int i;
310*4882a593Smuzhiyun 	bool is_free;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (handle & (1 << PAGE_HEADLESS))
313*4882a593Smuzhiyun 		return;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (WARN_ON(*(unsigned long *)handle == 0))
316*4882a593Smuzhiyun 		return;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	slots = handle_to_slots(handle);
319*4882a593Smuzhiyun 	write_lock(&slots->lock);
320*4882a593Smuzhiyun 	*(unsigned long *)handle = 0;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (test_bit(HANDLES_NOFREE, &slots->pool)) {
323*4882a593Smuzhiyun 		write_unlock(&slots->lock);
324*4882a593Smuzhiyun 		return; /* simple case, nothing else to do */
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (zhdr->slots != slots)
328*4882a593Smuzhiyun 		zhdr->foreign_handles--;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	is_free = true;
331*4882a593Smuzhiyun 	for (i = 0; i <= BUDDY_MASK; i++) {
332*4882a593Smuzhiyun 		if (slots->slot[i]) {
333*4882a593Smuzhiyun 			is_free = false;
334*4882a593Smuzhiyun 			break;
335*4882a593Smuzhiyun 		}
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 	write_unlock(&slots->lock);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (is_free) {
340*4882a593Smuzhiyun 		struct z3fold_pool *pool = slots_to_pool(slots);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		if (zhdr->slots == slots)
343*4882a593Smuzhiyun 			zhdr->slots = NULL;
344*4882a593Smuzhiyun 		kmem_cache_free(pool->c_handle, slots);
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
z3fold_init_fs_context(struct fs_context * fc)348*4882a593Smuzhiyun static int z3fold_init_fs_context(struct fs_context *fc)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun static struct file_system_type z3fold_fs = {
354*4882a593Smuzhiyun 	.name		= "z3fold",
355*4882a593Smuzhiyun 	.init_fs_context = z3fold_init_fs_context,
356*4882a593Smuzhiyun 	.kill_sb	= kill_anon_super,
357*4882a593Smuzhiyun };
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun static struct vfsmount *z3fold_mnt;
z3fold_mount(void)360*4882a593Smuzhiyun static int z3fold_mount(void)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	int ret = 0;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	z3fold_mnt = kern_mount(&z3fold_fs);
365*4882a593Smuzhiyun 	if (IS_ERR(z3fold_mnt))
366*4882a593Smuzhiyun 		ret = PTR_ERR(z3fold_mnt);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	return ret;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
z3fold_unmount(void)371*4882a593Smuzhiyun static void z3fold_unmount(void)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	kern_unmount(z3fold_mnt);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun static const struct address_space_operations z3fold_aops;
z3fold_register_migration(struct z3fold_pool * pool)377*4882a593Smuzhiyun static int z3fold_register_migration(struct z3fold_pool *pool)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
380*4882a593Smuzhiyun 	if (IS_ERR(pool->inode)) {
381*4882a593Smuzhiyun 		pool->inode = NULL;
382*4882a593Smuzhiyun 		return 1;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	pool->inode->i_mapping->private_data = pool;
386*4882a593Smuzhiyun 	pool->inode->i_mapping->a_ops = &z3fold_aops;
387*4882a593Smuzhiyun 	return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
z3fold_unregister_migration(struct z3fold_pool * pool)390*4882a593Smuzhiyun static void z3fold_unregister_migration(struct z3fold_pool *pool)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	if (pool->inode)
393*4882a593Smuzhiyun 		iput(pool->inode);
394*4882a593Smuzhiyun  }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /* Initializes the z3fold header of a newly allocated z3fold page */
init_z3fold_page(struct page * page,bool headless,struct z3fold_pool * pool,gfp_t gfp)397*4882a593Smuzhiyun static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
398*4882a593Smuzhiyun 					struct z3fold_pool *pool, gfp_t gfp)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct z3fold_header *zhdr = page_address(page);
401*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	INIT_LIST_HEAD(&page->lru);
404*4882a593Smuzhiyun 	clear_bit(PAGE_HEADLESS, &page->private);
405*4882a593Smuzhiyun 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
406*4882a593Smuzhiyun 	clear_bit(NEEDS_COMPACTING, &page->private);
407*4882a593Smuzhiyun 	clear_bit(PAGE_STALE, &page->private);
408*4882a593Smuzhiyun 	clear_bit(PAGE_CLAIMED, &page->private);
409*4882a593Smuzhiyun 	if (headless)
410*4882a593Smuzhiyun 		return zhdr;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	slots = alloc_slots(pool, gfp);
413*4882a593Smuzhiyun 	if (!slots)
414*4882a593Smuzhiyun 		return NULL;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	spin_lock_init(&zhdr->page_lock);
417*4882a593Smuzhiyun 	kref_init(&zhdr->refcount);
418*4882a593Smuzhiyun 	zhdr->first_chunks = 0;
419*4882a593Smuzhiyun 	zhdr->middle_chunks = 0;
420*4882a593Smuzhiyun 	zhdr->last_chunks = 0;
421*4882a593Smuzhiyun 	zhdr->first_num = 0;
422*4882a593Smuzhiyun 	zhdr->start_middle = 0;
423*4882a593Smuzhiyun 	zhdr->cpu = -1;
424*4882a593Smuzhiyun 	zhdr->foreign_handles = 0;
425*4882a593Smuzhiyun 	zhdr->mapped_count = 0;
426*4882a593Smuzhiyun 	zhdr->slots = slots;
427*4882a593Smuzhiyun 	zhdr->pool = pool;
428*4882a593Smuzhiyun 	INIT_LIST_HEAD(&zhdr->buddy);
429*4882a593Smuzhiyun 	INIT_WORK(&zhdr->work, compact_page_work);
430*4882a593Smuzhiyun 	return zhdr;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /* Resets the struct page fields and frees the page */
free_z3fold_page(struct page * page,bool headless)434*4882a593Smuzhiyun static void free_z3fold_page(struct page *page, bool headless)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	if (!headless) {
437*4882a593Smuzhiyun 		lock_page(page);
438*4882a593Smuzhiyun 		__ClearPageMovable(page);
439*4882a593Smuzhiyun 		unlock_page(page);
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 	ClearPagePrivate(page);
442*4882a593Smuzhiyun 	__free_page(page);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /* Helper function to build the index */
__idx(struct z3fold_header * zhdr,enum buddy bud)446*4882a593Smuzhiyun static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	return (bud + zhdr->first_num) & BUDDY_MASK;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun  * Encodes the handle of a particular buddy within a z3fold page
453*4882a593Smuzhiyun  * Pool lock should be held as this function accesses first_num
454*4882a593Smuzhiyun  */
__encode_handle(struct z3fold_header * zhdr,struct z3fold_buddy_slots * slots,enum buddy bud)455*4882a593Smuzhiyun static unsigned long __encode_handle(struct z3fold_header *zhdr,
456*4882a593Smuzhiyun 				struct z3fold_buddy_slots *slots,
457*4882a593Smuzhiyun 				enum buddy bud)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	unsigned long h = (unsigned long)zhdr;
460*4882a593Smuzhiyun 	int idx = 0;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * For a headless page, its handle is its pointer with the extra
464*4882a593Smuzhiyun 	 * PAGE_HEADLESS bit set
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	if (bud == HEADLESS)
467*4882a593Smuzhiyun 		return h | (1 << PAGE_HEADLESS);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/* otherwise, return pointer to encoded handle */
470*4882a593Smuzhiyun 	idx = __idx(zhdr, bud);
471*4882a593Smuzhiyun 	h += idx;
472*4882a593Smuzhiyun 	if (bud == LAST)
473*4882a593Smuzhiyun 		h |= (zhdr->last_chunks << BUDDY_SHIFT);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	write_lock(&slots->lock);
476*4882a593Smuzhiyun 	slots->slot[idx] = h;
477*4882a593Smuzhiyun 	write_unlock(&slots->lock);
478*4882a593Smuzhiyun 	return (unsigned long)&slots->slot[idx];
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
encode_handle(struct z3fold_header * zhdr,enum buddy bud)481*4882a593Smuzhiyun static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	return __encode_handle(zhdr, zhdr->slots, bud);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /* only for LAST bud, returns zero otherwise */
handle_to_chunks(unsigned long handle)487*4882a593Smuzhiyun static unsigned short handle_to_chunks(unsigned long handle)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
490*4882a593Smuzhiyun 	unsigned long addr;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	read_lock(&slots->lock);
493*4882a593Smuzhiyun 	addr = *(unsigned long *)handle;
494*4882a593Smuzhiyun 	read_unlock(&slots->lock);
495*4882a593Smuzhiyun 	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
500*4882a593Smuzhiyun  *  but that doesn't matter. because the masking will result in the
501*4882a593Smuzhiyun  *  correct buddy number.
502*4882a593Smuzhiyun  */
handle_to_buddy(unsigned long handle)503*4882a593Smuzhiyun static enum buddy handle_to_buddy(unsigned long handle)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
506*4882a593Smuzhiyun 	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
507*4882a593Smuzhiyun 	unsigned long addr;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	read_lock(&slots->lock);
510*4882a593Smuzhiyun 	WARN_ON(handle & (1 << PAGE_HEADLESS));
511*4882a593Smuzhiyun 	addr = *(unsigned long *)handle;
512*4882a593Smuzhiyun 	read_unlock(&slots->lock);
513*4882a593Smuzhiyun 	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
514*4882a593Smuzhiyun 	return (addr - zhdr->first_num) & BUDDY_MASK;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
zhdr_to_pool(struct z3fold_header * zhdr)517*4882a593Smuzhiyun static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	return zhdr->pool;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
__release_z3fold_page(struct z3fold_header * zhdr,bool locked)522*4882a593Smuzhiyun static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct page *page = virt_to_page(zhdr);
525*4882a593Smuzhiyun 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	WARN_ON(!list_empty(&zhdr->buddy));
528*4882a593Smuzhiyun 	set_bit(PAGE_STALE, &page->private);
529*4882a593Smuzhiyun 	clear_bit(NEEDS_COMPACTING, &page->private);
530*4882a593Smuzhiyun 	spin_lock(&pool->lock);
531*4882a593Smuzhiyun 	if (!list_empty(&page->lru))
532*4882a593Smuzhiyun 		list_del_init(&page->lru);
533*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (locked)
536*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	spin_lock(&pool->stale_lock);
539*4882a593Smuzhiyun 	list_add(&zhdr->buddy, &pool->stale);
540*4882a593Smuzhiyun 	queue_work(pool->release_wq, &pool->work);
541*4882a593Smuzhiyun 	spin_unlock(&pool->stale_lock);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun static void __attribute__((__unused__))
release_z3fold_page(struct kref * ref)545*4882a593Smuzhiyun 			release_z3fold_page(struct kref *ref)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
548*4882a593Smuzhiyun 						refcount);
549*4882a593Smuzhiyun 	__release_z3fold_page(zhdr, false);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun 
release_z3fold_page_locked(struct kref * ref)552*4882a593Smuzhiyun static void release_z3fold_page_locked(struct kref *ref)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
555*4882a593Smuzhiyun 						refcount);
556*4882a593Smuzhiyun 	WARN_ON(z3fold_page_trylock(zhdr));
557*4882a593Smuzhiyun 	__release_z3fold_page(zhdr, true);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
release_z3fold_page_locked_list(struct kref * ref)560*4882a593Smuzhiyun static void release_z3fold_page_locked_list(struct kref *ref)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
563*4882a593Smuzhiyun 					       refcount);
564*4882a593Smuzhiyun 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	spin_lock(&pool->lock);
567*4882a593Smuzhiyun 	list_del_init(&zhdr->buddy);
568*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	WARN_ON(z3fold_page_trylock(zhdr));
571*4882a593Smuzhiyun 	__release_z3fold_page(zhdr, true);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
free_pages_work(struct work_struct * w)574*4882a593Smuzhiyun static void free_pages_work(struct work_struct *w)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	spin_lock(&pool->stale_lock);
579*4882a593Smuzhiyun 	while (!list_empty(&pool->stale)) {
580*4882a593Smuzhiyun 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
581*4882a593Smuzhiyun 						struct z3fold_header, buddy);
582*4882a593Smuzhiyun 		struct page *page = virt_to_page(zhdr);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		list_del(&zhdr->buddy);
585*4882a593Smuzhiyun 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
586*4882a593Smuzhiyun 			continue;
587*4882a593Smuzhiyun 		spin_unlock(&pool->stale_lock);
588*4882a593Smuzhiyun 		cancel_work_sync(&zhdr->work);
589*4882a593Smuzhiyun 		free_z3fold_page(page, false);
590*4882a593Smuzhiyun 		cond_resched();
591*4882a593Smuzhiyun 		spin_lock(&pool->stale_lock);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 	spin_unlock(&pool->stale_lock);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun /*
597*4882a593Smuzhiyun  * Returns the number of free chunks in a z3fold page.
598*4882a593Smuzhiyun  * NB: can't be used with HEADLESS pages.
599*4882a593Smuzhiyun  */
num_free_chunks(struct z3fold_header * zhdr)600*4882a593Smuzhiyun static int num_free_chunks(struct z3fold_header *zhdr)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	int nfree;
603*4882a593Smuzhiyun 	/*
604*4882a593Smuzhiyun 	 * If there is a middle object, pick up the bigger free space
605*4882a593Smuzhiyun 	 * either before or after it. Otherwise just subtract the number
606*4882a593Smuzhiyun 	 * of chunks occupied by the first and the last objects.
607*4882a593Smuzhiyun 	 */
608*4882a593Smuzhiyun 	if (zhdr->middle_chunks != 0) {
609*4882a593Smuzhiyun 		int nfree_before = zhdr->first_chunks ?
610*4882a593Smuzhiyun 			0 : zhdr->start_middle - ZHDR_CHUNKS;
611*4882a593Smuzhiyun 		int nfree_after = zhdr->last_chunks ?
612*4882a593Smuzhiyun 			0 : TOTAL_CHUNKS -
613*4882a593Smuzhiyun 				(zhdr->start_middle + zhdr->middle_chunks);
614*4882a593Smuzhiyun 		nfree = max(nfree_before, nfree_after);
615*4882a593Smuzhiyun 	} else
616*4882a593Smuzhiyun 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
617*4882a593Smuzhiyun 	return nfree;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /* Add to the appropriate unbuddied list */
add_to_unbuddied(struct z3fold_pool * pool,struct z3fold_header * zhdr)621*4882a593Smuzhiyun static inline void add_to_unbuddied(struct z3fold_pool *pool,
622*4882a593Smuzhiyun 				struct z3fold_header *zhdr)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
625*4882a593Smuzhiyun 			zhdr->middle_chunks == 0) {
626*4882a593Smuzhiyun 		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 		int freechunks = num_free_chunks(zhdr);
629*4882a593Smuzhiyun 		spin_lock(&pool->lock);
630*4882a593Smuzhiyun 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
631*4882a593Smuzhiyun 		spin_unlock(&pool->lock);
632*4882a593Smuzhiyun 		zhdr->cpu = smp_processor_id();
633*4882a593Smuzhiyun 		put_cpu_ptr(pool->unbuddied);
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
get_free_buddy(struct z3fold_header * zhdr,int chunks)637*4882a593Smuzhiyun static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	enum buddy bud = HEADLESS;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (zhdr->middle_chunks) {
642*4882a593Smuzhiyun 		if (!zhdr->first_chunks &&
643*4882a593Smuzhiyun 		    chunks <= zhdr->start_middle - ZHDR_CHUNKS)
644*4882a593Smuzhiyun 			bud = FIRST;
645*4882a593Smuzhiyun 		else if (!zhdr->last_chunks)
646*4882a593Smuzhiyun 			bud = LAST;
647*4882a593Smuzhiyun 	} else {
648*4882a593Smuzhiyun 		if (!zhdr->first_chunks)
649*4882a593Smuzhiyun 			bud = FIRST;
650*4882a593Smuzhiyun 		else if (!zhdr->last_chunks)
651*4882a593Smuzhiyun 			bud = LAST;
652*4882a593Smuzhiyun 		else
653*4882a593Smuzhiyun 			bud = MIDDLE;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	return bud;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
mchunk_memmove(struct z3fold_header * zhdr,unsigned short dst_chunk)659*4882a593Smuzhiyun static inline void *mchunk_memmove(struct z3fold_header *zhdr,
660*4882a593Smuzhiyun 				unsigned short dst_chunk)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	void *beg = zhdr;
663*4882a593Smuzhiyun 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
664*4882a593Smuzhiyun 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
665*4882a593Smuzhiyun 		       zhdr->middle_chunks << CHUNK_SHIFT);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
buddy_single(struct z3fold_header * zhdr)668*4882a593Smuzhiyun static inline bool buddy_single(struct z3fold_header *zhdr)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	return !((zhdr->first_chunks && zhdr->middle_chunks) ||
671*4882a593Smuzhiyun 			(zhdr->first_chunks && zhdr->last_chunks) ||
672*4882a593Smuzhiyun 			(zhdr->middle_chunks && zhdr->last_chunks));
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
compact_single_buddy(struct z3fold_header * zhdr)675*4882a593Smuzhiyun static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
678*4882a593Smuzhiyun 	void *p = zhdr;
679*4882a593Smuzhiyun 	unsigned long old_handle = 0;
680*4882a593Smuzhiyun 	size_t sz = 0;
681*4882a593Smuzhiyun 	struct z3fold_header *new_zhdr = NULL;
682*4882a593Smuzhiyun 	int first_idx = __idx(zhdr, FIRST);
683*4882a593Smuzhiyun 	int middle_idx = __idx(zhdr, MIDDLE);
684*4882a593Smuzhiyun 	int last_idx = __idx(zhdr, LAST);
685*4882a593Smuzhiyun 	unsigned short *moved_chunks = NULL;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	/*
688*4882a593Smuzhiyun 	 * No need to protect slots here -- all the slots are "local" and
689*4882a593Smuzhiyun 	 * the page lock is already taken
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
692*4882a593Smuzhiyun 		p += ZHDR_SIZE_ALIGNED;
693*4882a593Smuzhiyun 		sz = zhdr->first_chunks << CHUNK_SHIFT;
694*4882a593Smuzhiyun 		old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
695*4882a593Smuzhiyun 		moved_chunks = &zhdr->first_chunks;
696*4882a593Smuzhiyun 	} else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
697*4882a593Smuzhiyun 		p += zhdr->start_middle << CHUNK_SHIFT;
698*4882a593Smuzhiyun 		sz = zhdr->middle_chunks << CHUNK_SHIFT;
699*4882a593Smuzhiyun 		old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
700*4882a593Smuzhiyun 		moved_chunks = &zhdr->middle_chunks;
701*4882a593Smuzhiyun 	} else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
702*4882a593Smuzhiyun 		p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
703*4882a593Smuzhiyun 		sz = zhdr->last_chunks << CHUNK_SHIFT;
704*4882a593Smuzhiyun 		old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
705*4882a593Smuzhiyun 		moved_chunks = &zhdr->last_chunks;
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	if (sz > 0) {
709*4882a593Smuzhiyun 		enum buddy new_bud = HEADLESS;
710*4882a593Smuzhiyun 		short chunks = size_to_chunks(sz);
711*4882a593Smuzhiyun 		void *q;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		new_zhdr = __z3fold_alloc(pool, sz, false);
714*4882a593Smuzhiyun 		if (!new_zhdr)
715*4882a593Smuzhiyun 			return NULL;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		if (WARN_ON(new_zhdr == zhdr))
718*4882a593Smuzhiyun 			goto out_fail;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 		new_bud = get_free_buddy(new_zhdr, chunks);
721*4882a593Smuzhiyun 		q = new_zhdr;
722*4882a593Smuzhiyun 		switch (new_bud) {
723*4882a593Smuzhiyun 		case FIRST:
724*4882a593Smuzhiyun 			new_zhdr->first_chunks = chunks;
725*4882a593Smuzhiyun 			q += ZHDR_SIZE_ALIGNED;
726*4882a593Smuzhiyun 			break;
727*4882a593Smuzhiyun 		case MIDDLE:
728*4882a593Smuzhiyun 			new_zhdr->middle_chunks = chunks;
729*4882a593Smuzhiyun 			new_zhdr->start_middle =
730*4882a593Smuzhiyun 				new_zhdr->first_chunks + ZHDR_CHUNKS;
731*4882a593Smuzhiyun 			q += new_zhdr->start_middle << CHUNK_SHIFT;
732*4882a593Smuzhiyun 			break;
733*4882a593Smuzhiyun 		case LAST:
734*4882a593Smuzhiyun 			new_zhdr->last_chunks = chunks;
735*4882a593Smuzhiyun 			q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
736*4882a593Smuzhiyun 			break;
737*4882a593Smuzhiyun 		default:
738*4882a593Smuzhiyun 			goto out_fail;
739*4882a593Smuzhiyun 		}
740*4882a593Smuzhiyun 		new_zhdr->foreign_handles++;
741*4882a593Smuzhiyun 		memcpy(q, p, sz);
742*4882a593Smuzhiyun 		write_lock(&zhdr->slots->lock);
743*4882a593Smuzhiyun 		*(unsigned long *)old_handle = (unsigned long)new_zhdr +
744*4882a593Smuzhiyun 			__idx(new_zhdr, new_bud);
745*4882a593Smuzhiyun 		if (new_bud == LAST)
746*4882a593Smuzhiyun 			*(unsigned long *)old_handle |=
747*4882a593Smuzhiyun 					(new_zhdr->last_chunks << BUDDY_SHIFT);
748*4882a593Smuzhiyun 		write_unlock(&zhdr->slots->lock);
749*4882a593Smuzhiyun 		add_to_unbuddied(pool, new_zhdr);
750*4882a593Smuzhiyun 		z3fold_page_unlock(new_zhdr);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 		*moved_chunks = 0;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	return new_zhdr;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun out_fail:
758*4882a593Smuzhiyun 	if (new_zhdr) {
759*4882a593Smuzhiyun 		if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
760*4882a593Smuzhiyun 			atomic64_dec(&pool->pages_nr);
761*4882a593Smuzhiyun 		else {
762*4882a593Smuzhiyun 			add_to_unbuddied(pool, new_zhdr);
763*4882a593Smuzhiyun 			z3fold_page_unlock(new_zhdr);
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 	}
766*4882a593Smuzhiyun 	return NULL;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun #define BIG_CHUNK_GAP	3
771*4882a593Smuzhiyun /* Has to be called with lock held */
z3fold_compact_page(struct z3fold_header * zhdr)772*4882a593Smuzhiyun static int z3fold_compact_page(struct z3fold_header *zhdr)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	struct page *page = virt_to_page(zhdr);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
777*4882a593Smuzhiyun 		return 0; /* can't move middle chunk, it's used */
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (unlikely(PageIsolated(page)))
780*4882a593Smuzhiyun 		return 0;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	if (zhdr->middle_chunks == 0)
783*4882a593Smuzhiyun 		return 0; /* nothing to compact */
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
786*4882a593Smuzhiyun 		/* move to the beginning */
787*4882a593Smuzhiyun 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
788*4882a593Smuzhiyun 		zhdr->first_chunks = zhdr->middle_chunks;
789*4882a593Smuzhiyun 		zhdr->middle_chunks = 0;
790*4882a593Smuzhiyun 		zhdr->start_middle = 0;
791*4882a593Smuzhiyun 		zhdr->first_num++;
792*4882a593Smuzhiyun 		return 1;
793*4882a593Smuzhiyun 	}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/*
796*4882a593Smuzhiyun 	 * moving data is expensive, so let's only do that if
797*4882a593Smuzhiyun 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
798*4882a593Smuzhiyun 	 */
799*4882a593Smuzhiyun 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
800*4882a593Smuzhiyun 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
801*4882a593Smuzhiyun 			BIG_CHUNK_GAP) {
802*4882a593Smuzhiyun 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
803*4882a593Smuzhiyun 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
804*4882a593Smuzhiyun 		return 1;
805*4882a593Smuzhiyun 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
806*4882a593Smuzhiyun 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
807*4882a593Smuzhiyun 					+ zhdr->middle_chunks) >=
808*4882a593Smuzhiyun 			BIG_CHUNK_GAP) {
809*4882a593Smuzhiyun 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
810*4882a593Smuzhiyun 			zhdr->middle_chunks;
811*4882a593Smuzhiyun 		mchunk_memmove(zhdr, new_start);
812*4882a593Smuzhiyun 		zhdr->start_middle = new_start;
813*4882a593Smuzhiyun 		return 1;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	return 0;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
do_compact_page(struct z3fold_header * zhdr,bool locked)819*4882a593Smuzhiyun static void do_compact_page(struct z3fold_header *zhdr, bool locked)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
822*4882a593Smuzhiyun 	struct page *page;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	page = virt_to_page(zhdr);
825*4882a593Smuzhiyun 	if (locked)
826*4882a593Smuzhiyun 		WARN_ON(z3fold_page_trylock(zhdr));
827*4882a593Smuzhiyun 	else
828*4882a593Smuzhiyun 		z3fold_page_lock(zhdr);
829*4882a593Smuzhiyun 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
830*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
831*4882a593Smuzhiyun 		return;
832*4882a593Smuzhiyun 	}
833*4882a593Smuzhiyun 	spin_lock(&pool->lock);
834*4882a593Smuzhiyun 	list_del_init(&zhdr->buddy);
835*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
838*4882a593Smuzhiyun 		atomic64_dec(&pool->pages_nr);
839*4882a593Smuzhiyun 		return;
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (test_bit(PAGE_STALE, &page->private) ||
843*4882a593Smuzhiyun 	    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
844*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
845*4882a593Smuzhiyun 		return;
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	if (!zhdr->foreign_handles && buddy_single(zhdr) &&
849*4882a593Smuzhiyun 	    zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
850*4882a593Smuzhiyun 		if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
851*4882a593Smuzhiyun 			atomic64_dec(&pool->pages_nr);
852*4882a593Smuzhiyun 		else {
853*4882a593Smuzhiyun 			clear_bit(PAGE_CLAIMED, &page->private);
854*4882a593Smuzhiyun 			z3fold_page_unlock(zhdr);
855*4882a593Smuzhiyun 		}
856*4882a593Smuzhiyun 		return;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	z3fold_compact_page(zhdr);
860*4882a593Smuzhiyun 	add_to_unbuddied(pool, zhdr);
861*4882a593Smuzhiyun 	clear_bit(PAGE_CLAIMED, &page->private);
862*4882a593Smuzhiyun 	z3fold_page_unlock(zhdr);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun 
compact_page_work(struct work_struct * w)865*4882a593Smuzhiyun static void compact_page_work(struct work_struct *w)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
868*4882a593Smuzhiyun 						work);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	do_compact_page(zhdr, false);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun /* returns _locked_ z3fold page header or NULL */
__z3fold_alloc(struct z3fold_pool * pool,size_t size,bool can_sleep)874*4882a593Smuzhiyun static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
875*4882a593Smuzhiyun 						size_t size, bool can_sleep)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun 	struct z3fold_header *zhdr = NULL;
878*4882a593Smuzhiyun 	struct page *page;
879*4882a593Smuzhiyun 	struct list_head *unbuddied;
880*4882a593Smuzhiyun 	int chunks = size_to_chunks(size), i;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun lookup:
883*4882a593Smuzhiyun 	/* First, try to find an unbuddied z3fold page. */
884*4882a593Smuzhiyun 	unbuddied = get_cpu_ptr(pool->unbuddied);
885*4882a593Smuzhiyun 	for_each_unbuddied_list(i, chunks) {
886*4882a593Smuzhiyun 		struct list_head *l = &unbuddied[i];
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 		zhdr = list_first_entry_or_null(READ_ONCE(l),
889*4882a593Smuzhiyun 					struct z3fold_header, buddy);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 		if (!zhdr)
892*4882a593Smuzhiyun 			continue;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 		/* Re-check under lock. */
895*4882a593Smuzhiyun 		spin_lock(&pool->lock);
896*4882a593Smuzhiyun 		l = &unbuddied[i];
897*4882a593Smuzhiyun 		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
898*4882a593Smuzhiyun 						struct z3fold_header, buddy)) ||
899*4882a593Smuzhiyun 		    !z3fold_page_trylock(zhdr)) {
900*4882a593Smuzhiyun 			spin_unlock(&pool->lock);
901*4882a593Smuzhiyun 			zhdr = NULL;
902*4882a593Smuzhiyun 			put_cpu_ptr(pool->unbuddied);
903*4882a593Smuzhiyun 			if (can_sleep)
904*4882a593Smuzhiyun 				cond_resched();
905*4882a593Smuzhiyun 			goto lookup;
906*4882a593Smuzhiyun 		}
907*4882a593Smuzhiyun 		list_del_init(&zhdr->buddy);
908*4882a593Smuzhiyun 		zhdr->cpu = -1;
909*4882a593Smuzhiyun 		spin_unlock(&pool->lock);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 		page = virt_to_page(zhdr);
912*4882a593Smuzhiyun 		if (test_bit(NEEDS_COMPACTING, &page->private) ||
913*4882a593Smuzhiyun 		    test_bit(PAGE_CLAIMED, &page->private)) {
914*4882a593Smuzhiyun 			z3fold_page_unlock(zhdr);
915*4882a593Smuzhiyun 			zhdr = NULL;
916*4882a593Smuzhiyun 			put_cpu_ptr(pool->unbuddied);
917*4882a593Smuzhiyun 			if (can_sleep)
918*4882a593Smuzhiyun 				cond_resched();
919*4882a593Smuzhiyun 			goto lookup;
920*4882a593Smuzhiyun 		}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 		/*
923*4882a593Smuzhiyun 		 * this page could not be removed from its unbuddied
924*4882a593Smuzhiyun 		 * list while pool lock was held, and then we've taken
925*4882a593Smuzhiyun 		 * page lock so kref_put could not be called before
926*4882a593Smuzhiyun 		 * we got here, so it's safe to just call kref_get()
927*4882a593Smuzhiyun 		 */
928*4882a593Smuzhiyun 		kref_get(&zhdr->refcount);
929*4882a593Smuzhiyun 		break;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 	put_cpu_ptr(pool->unbuddied);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	if (!zhdr) {
934*4882a593Smuzhiyun 		int cpu;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 		/* look for _exact_ match on other cpus' lists */
937*4882a593Smuzhiyun 		for_each_online_cpu(cpu) {
938*4882a593Smuzhiyun 			struct list_head *l;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
941*4882a593Smuzhiyun 			spin_lock(&pool->lock);
942*4882a593Smuzhiyun 			l = &unbuddied[chunks];
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 			zhdr = list_first_entry_or_null(READ_ONCE(l),
945*4882a593Smuzhiyun 						struct z3fold_header, buddy);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
948*4882a593Smuzhiyun 				spin_unlock(&pool->lock);
949*4882a593Smuzhiyun 				zhdr = NULL;
950*4882a593Smuzhiyun 				continue;
951*4882a593Smuzhiyun 			}
952*4882a593Smuzhiyun 			list_del_init(&zhdr->buddy);
953*4882a593Smuzhiyun 			zhdr->cpu = -1;
954*4882a593Smuzhiyun 			spin_unlock(&pool->lock);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 			page = virt_to_page(zhdr);
957*4882a593Smuzhiyun 			if (test_bit(NEEDS_COMPACTING, &page->private) ||
958*4882a593Smuzhiyun 			    test_bit(PAGE_CLAIMED, &page->private)) {
959*4882a593Smuzhiyun 				z3fold_page_unlock(zhdr);
960*4882a593Smuzhiyun 				zhdr = NULL;
961*4882a593Smuzhiyun 				if (can_sleep)
962*4882a593Smuzhiyun 					cond_resched();
963*4882a593Smuzhiyun 				continue;
964*4882a593Smuzhiyun 			}
965*4882a593Smuzhiyun 			kref_get(&zhdr->refcount);
966*4882a593Smuzhiyun 			break;
967*4882a593Smuzhiyun 		}
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (zhdr && !zhdr->slots)
971*4882a593Smuzhiyun 		zhdr->slots = alloc_slots(pool,
972*4882a593Smuzhiyun 					can_sleep ? GFP_NOIO : GFP_ATOMIC);
973*4882a593Smuzhiyun 	return zhdr;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun /*
977*4882a593Smuzhiyun  * API Functions
978*4882a593Smuzhiyun  */
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun /**
981*4882a593Smuzhiyun  * z3fold_create_pool() - create a new z3fold pool
982*4882a593Smuzhiyun  * @name:	pool name
983*4882a593Smuzhiyun  * @gfp:	gfp flags when allocating the z3fold pool structure
984*4882a593Smuzhiyun  * @ops:	user-defined operations for the z3fold pool
985*4882a593Smuzhiyun  *
986*4882a593Smuzhiyun  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
987*4882a593Smuzhiyun  * failed.
988*4882a593Smuzhiyun  */
z3fold_create_pool(const char * name,gfp_t gfp,const struct z3fold_ops * ops)989*4882a593Smuzhiyun static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
990*4882a593Smuzhiyun 		const struct z3fold_ops *ops)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct z3fold_pool *pool = NULL;
993*4882a593Smuzhiyun 	int i, cpu;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
996*4882a593Smuzhiyun 	if (!pool)
997*4882a593Smuzhiyun 		goto out;
998*4882a593Smuzhiyun 	pool->c_handle = kmem_cache_create("z3fold_handle",
999*4882a593Smuzhiyun 				sizeof(struct z3fold_buddy_slots),
1000*4882a593Smuzhiyun 				SLOTS_ALIGN, 0, NULL);
1001*4882a593Smuzhiyun 	if (!pool->c_handle)
1002*4882a593Smuzhiyun 		goto out_c;
1003*4882a593Smuzhiyun 	spin_lock_init(&pool->lock);
1004*4882a593Smuzhiyun 	spin_lock_init(&pool->stale_lock);
1005*4882a593Smuzhiyun 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1006*4882a593Smuzhiyun 	if (!pool->unbuddied)
1007*4882a593Smuzhiyun 		goto out_pool;
1008*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
1009*4882a593Smuzhiyun 		struct list_head *unbuddied =
1010*4882a593Smuzhiyun 				per_cpu_ptr(pool->unbuddied, cpu);
1011*4882a593Smuzhiyun 		for_each_unbuddied_list(i, 0)
1012*4882a593Smuzhiyun 			INIT_LIST_HEAD(&unbuddied[i]);
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pool->lru);
1015*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pool->stale);
1016*4882a593Smuzhiyun 	atomic64_set(&pool->pages_nr, 0);
1017*4882a593Smuzhiyun 	pool->name = name;
1018*4882a593Smuzhiyun 	pool->compact_wq = create_singlethread_workqueue(pool->name);
1019*4882a593Smuzhiyun 	if (!pool->compact_wq)
1020*4882a593Smuzhiyun 		goto out_unbuddied;
1021*4882a593Smuzhiyun 	pool->release_wq = create_singlethread_workqueue(pool->name);
1022*4882a593Smuzhiyun 	if (!pool->release_wq)
1023*4882a593Smuzhiyun 		goto out_wq;
1024*4882a593Smuzhiyun 	if (z3fold_register_migration(pool))
1025*4882a593Smuzhiyun 		goto out_rwq;
1026*4882a593Smuzhiyun 	INIT_WORK(&pool->work, free_pages_work);
1027*4882a593Smuzhiyun 	pool->ops = ops;
1028*4882a593Smuzhiyun 	return pool;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun out_rwq:
1031*4882a593Smuzhiyun 	destroy_workqueue(pool->release_wq);
1032*4882a593Smuzhiyun out_wq:
1033*4882a593Smuzhiyun 	destroy_workqueue(pool->compact_wq);
1034*4882a593Smuzhiyun out_unbuddied:
1035*4882a593Smuzhiyun 	free_percpu(pool->unbuddied);
1036*4882a593Smuzhiyun out_pool:
1037*4882a593Smuzhiyun 	kmem_cache_destroy(pool->c_handle);
1038*4882a593Smuzhiyun out_c:
1039*4882a593Smuzhiyun 	kfree(pool);
1040*4882a593Smuzhiyun out:
1041*4882a593Smuzhiyun 	return NULL;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun /**
1045*4882a593Smuzhiyun  * z3fold_destroy_pool() - destroys an existing z3fold pool
1046*4882a593Smuzhiyun  * @pool:	the z3fold pool to be destroyed
1047*4882a593Smuzhiyun  *
1048*4882a593Smuzhiyun  * The pool should be emptied before this function is called.
1049*4882a593Smuzhiyun  */
z3fold_destroy_pool(struct z3fold_pool * pool)1050*4882a593Smuzhiyun static void z3fold_destroy_pool(struct z3fold_pool *pool)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	kmem_cache_destroy(pool->c_handle);
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	/*
1055*4882a593Smuzhiyun 	 * We need to destroy pool->compact_wq before pool->release_wq,
1056*4882a593Smuzhiyun 	 * as any pending work on pool->compact_wq will call
1057*4882a593Smuzhiyun 	 * queue_work(pool->release_wq, &pool->work).
1058*4882a593Smuzhiyun 	 *
1059*4882a593Smuzhiyun 	 * There are still outstanding pages until both workqueues are drained,
1060*4882a593Smuzhiyun 	 * so we cannot unregister migration until then.
1061*4882a593Smuzhiyun 	 */
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	destroy_workqueue(pool->compact_wq);
1064*4882a593Smuzhiyun 	destroy_workqueue(pool->release_wq);
1065*4882a593Smuzhiyun 	z3fold_unregister_migration(pool);
1066*4882a593Smuzhiyun 	free_percpu(pool->unbuddied);
1067*4882a593Smuzhiyun 	kfree(pool);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun /**
1071*4882a593Smuzhiyun  * z3fold_alloc() - allocates a region of a given size
1072*4882a593Smuzhiyun  * @pool:	z3fold pool from which to allocate
1073*4882a593Smuzhiyun  * @size:	size in bytes of the desired allocation
1074*4882a593Smuzhiyun  * @gfp:	gfp flags used if the pool needs to grow
1075*4882a593Smuzhiyun  * @handle:	handle of the new allocation
1076*4882a593Smuzhiyun  *
1077*4882a593Smuzhiyun  * This function will attempt to find a free region in the pool large enough to
1078*4882a593Smuzhiyun  * satisfy the allocation request.  A search of the unbuddied lists is
1079*4882a593Smuzhiyun  * performed first. If no suitable free region is found, then a new page is
1080*4882a593Smuzhiyun  * allocated and added to the pool to satisfy the request.
1081*4882a593Smuzhiyun  *
1082*4882a593Smuzhiyun  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
1083*4882a593Smuzhiyun  * as z3fold pool pages.
1084*4882a593Smuzhiyun  *
1085*4882a593Smuzhiyun  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1086*4882a593Smuzhiyun  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1087*4882a593Smuzhiyun  * a new page.
1088*4882a593Smuzhiyun  */
z3fold_alloc(struct z3fold_pool * pool,size_t size,gfp_t gfp,unsigned long * handle)1089*4882a593Smuzhiyun static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1090*4882a593Smuzhiyun 			unsigned long *handle)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	int chunks = size_to_chunks(size);
1093*4882a593Smuzhiyun 	struct z3fold_header *zhdr = NULL;
1094*4882a593Smuzhiyun 	struct page *page = NULL;
1095*4882a593Smuzhiyun 	enum buddy bud;
1096*4882a593Smuzhiyun 	bool can_sleep = gfpflags_allow_blocking(gfp);
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	if (!size)
1099*4882a593Smuzhiyun 		return -EINVAL;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	if (size > PAGE_SIZE)
1102*4882a593Smuzhiyun 		return -ENOSPC;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1105*4882a593Smuzhiyun 		bud = HEADLESS;
1106*4882a593Smuzhiyun 	else {
1107*4882a593Smuzhiyun retry:
1108*4882a593Smuzhiyun 		zhdr = __z3fold_alloc(pool, size, can_sleep);
1109*4882a593Smuzhiyun 		if (zhdr) {
1110*4882a593Smuzhiyun 			bud = get_free_buddy(zhdr, chunks);
1111*4882a593Smuzhiyun 			if (bud == HEADLESS) {
1112*4882a593Smuzhiyun 				if (kref_put(&zhdr->refcount,
1113*4882a593Smuzhiyun 					     release_z3fold_page_locked))
1114*4882a593Smuzhiyun 					atomic64_dec(&pool->pages_nr);
1115*4882a593Smuzhiyun 				else
1116*4882a593Smuzhiyun 					z3fold_page_unlock(zhdr);
1117*4882a593Smuzhiyun 				pr_err("No free chunks in unbuddied\n");
1118*4882a593Smuzhiyun 				WARN_ON(1);
1119*4882a593Smuzhiyun 				goto retry;
1120*4882a593Smuzhiyun 			}
1121*4882a593Smuzhiyun 			page = virt_to_page(zhdr);
1122*4882a593Smuzhiyun 			goto found;
1123*4882a593Smuzhiyun 		}
1124*4882a593Smuzhiyun 		bud = FIRST;
1125*4882a593Smuzhiyun 	}
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	page = NULL;
1128*4882a593Smuzhiyun 	if (can_sleep) {
1129*4882a593Smuzhiyun 		spin_lock(&pool->stale_lock);
1130*4882a593Smuzhiyun 		zhdr = list_first_entry_or_null(&pool->stale,
1131*4882a593Smuzhiyun 						struct z3fold_header, buddy);
1132*4882a593Smuzhiyun 		/*
1133*4882a593Smuzhiyun 		 * Before allocating a page, let's see if we can take one from
1134*4882a593Smuzhiyun 		 * the stale pages list. cancel_work_sync() can sleep so we
1135*4882a593Smuzhiyun 		 * limit this case to the contexts where we can sleep
1136*4882a593Smuzhiyun 		 */
1137*4882a593Smuzhiyun 		if (zhdr) {
1138*4882a593Smuzhiyun 			list_del(&zhdr->buddy);
1139*4882a593Smuzhiyun 			spin_unlock(&pool->stale_lock);
1140*4882a593Smuzhiyun 			cancel_work_sync(&zhdr->work);
1141*4882a593Smuzhiyun 			page = virt_to_page(zhdr);
1142*4882a593Smuzhiyun 		} else {
1143*4882a593Smuzhiyun 			spin_unlock(&pool->stale_lock);
1144*4882a593Smuzhiyun 		}
1145*4882a593Smuzhiyun 	}
1146*4882a593Smuzhiyun 	if (!page)
1147*4882a593Smuzhiyun 		page = alloc_page(gfp);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (!page)
1150*4882a593Smuzhiyun 		return -ENOMEM;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1153*4882a593Smuzhiyun 	if (!zhdr) {
1154*4882a593Smuzhiyun 		__free_page(page);
1155*4882a593Smuzhiyun 		return -ENOMEM;
1156*4882a593Smuzhiyun 	}
1157*4882a593Smuzhiyun 	atomic64_inc(&pool->pages_nr);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	if (bud == HEADLESS) {
1160*4882a593Smuzhiyun 		set_bit(PAGE_HEADLESS, &page->private);
1161*4882a593Smuzhiyun 		goto headless;
1162*4882a593Smuzhiyun 	}
1163*4882a593Smuzhiyun 	if (can_sleep) {
1164*4882a593Smuzhiyun 		lock_page(page);
1165*4882a593Smuzhiyun 		__SetPageMovable(page, pool->inode->i_mapping);
1166*4882a593Smuzhiyun 		unlock_page(page);
1167*4882a593Smuzhiyun 	} else {
1168*4882a593Smuzhiyun 		if (trylock_page(page)) {
1169*4882a593Smuzhiyun 			__SetPageMovable(page, pool->inode->i_mapping);
1170*4882a593Smuzhiyun 			unlock_page(page);
1171*4882a593Smuzhiyun 		}
1172*4882a593Smuzhiyun 	}
1173*4882a593Smuzhiyun 	z3fold_page_lock(zhdr);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun found:
1176*4882a593Smuzhiyun 	if (bud == FIRST)
1177*4882a593Smuzhiyun 		zhdr->first_chunks = chunks;
1178*4882a593Smuzhiyun 	else if (bud == LAST)
1179*4882a593Smuzhiyun 		zhdr->last_chunks = chunks;
1180*4882a593Smuzhiyun 	else {
1181*4882a593Smuzhiyun 		zhdr->middle_chunks = chunks;
1182*4882a593Smuzhiyun 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 	add_to_unbuddied(pool, zhdr);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun headless:
1187*4882a593Smuzhiyun 	spin_lock(&pool->lock);
1188*4882a593Smuzhiyun 	/* Add/move z3fold page to beginning of LRU */
1189*4882a593Smuzhiyun 	if (!list_empty(&page->lru))
1190*4882a593Smuzhiyun 		list_del(&page->lru);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	list_add(&page->lru, &pool->lru);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	*handle = encode_handle(zhdr, bud);
1195*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
1196*4882a593Smuzhiyun 	if (bud != HEADLESS)
1197*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	return 0;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun /**
1203*4882a593Smuzhiyun  * z3fold_free() - frees the allocation associated with the given handle
1204*4882a593Smuzhiyun  * @pool:	pool in which the allocation resided
1205*4882a593Smuzhiyun  * @handle:	handle associated with the allocation returned by z3fold_alloc()
1206*4882a593Smuzhiyun  *
1207*4882a593Smuzhiyun  * In the case that the z3fold page in which the allocation resides is under
1208*4882a593Smuzhiyun  * reclaim, as indicated by the PG_reclaim flag being set, this function
1209*4882a593Smuzhiyun  * only sets the first|last_chunks to 0.  The page is actually freed
1210*4882a593Smuzhiyun  * once both buddies are evicted (see z3fold_reclaim_page() below).
1211*4882a593Smuzhiyun  */
z3fold_free(struct z3fold_pool * pool,unsigned long handle)1212*4882a593Smuzhiyun static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
1215*4882a593Smuzhiyun 	struct page *page;
1216*4882a593Smuzhiyun 	enum buddy bud;
1217*4882a593Smuzhiyun 	bool page_claimed;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	zhdr = get_z3fold_header(handle);
1220*4882a593Smuzhiyun 	page = virt_to_page(zhdr);
1221*4882a593Smuzhiyun 	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	if (test_bit(PAGE_HEADLESS, &page->private)) {
1224*4882a593Smuzhiyun 		/* if a headless page is under reclaim, just leave.
1225*4882a593Smuzhiyun 		 * NB: we use test_and_set_bit for a reason: if the bit
1226*4882a593Smuzhiyun 		 * has not been set before, we release this page
1227*4882a593Smuzhiyun 		 * immediately so we don't care about its value any more.
1228*4882a593Smuzhiyun 		 */
1229*4882a593Smuzhiyun 		if (!page_claimed) {
1230*4882a593Smuzhiyun 			spin_lock(&pool->lock);
1231*4882a593Smuzhiyun 			list_del(&page->lru);
1232*4882a593Smuzhiyun 			spin_unlock(&pool->lock);
1233*4882a593Smuzhiyun 			put_z3fold_header(zhdr);
1234*4882a593Smuzhiyun 			free_z3fold_page(page, true);
1235*4882a593Smuzhiyun 			atomic64_dec(&pool->pages_nr);
1236*4882a593Smuzhiyun 		}
1237*4882a593Smuzhiyun 		return;
1238*4882a593Smuzhiyun 	}
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	/* Non-headless case */
1241*4882a593Smuzhiyun 	bud = handle_to_buddy(handle);
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	switch (bud) {
1244*4882a593Smuzhiyun 	case FIRST:
1245*4882a593Smuzhiyun 		zhdr->first_chunks = 0;
1246*4882a593Smuzhiyun 		break;
1247*4882a593Smuzhiyun 	case MIDDLE:
1248*4882a593Smuzhiyun 		zhdr->middle_chunks = 0;
1249*4882a593Smuzhiyun 		break;
1250*4882a593Smuzhiyun 	case LAST:
1251*4882a593Smuzhiyun 		zhdr->last_chunks = 0;
1252*4882a593Smuzhiyun 		break;
1253*4882a593Smuzhiyun 	default:
1254*4882a593Smuzhiyun 		pr_err("%s: unknown bud %d\n", __func__, bud);
1255*4882a593Smuzhiyun 		WARN_ON(1);
1256*4882a593Smuzhiyun 		put_z3fold_header(zhdr);
1257*4882a593Smuzhiyun 		return;
1258*4882a593Smuzhiyun 	}
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	if (!page_claimed)
1261*4882a593Smuzhiyun 		free_handle(handle, zhdr);
1262*4882a593Smuzhiyun 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1263*4882a593Smuzhiyun 		atomic64_dec(&pool->pages_nr);
1264*4882a593Smuzhiyun 		return;
1265*4882a593Smuzhiyun 	}
1266*4882a593Smuzhiyun 	if (page_claimed) {
1267*4882a593Smuzhiyun 		/* the page has not been claimed by us */
1268*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
1269*4882a593Smuzhiyun 		return;
1270*4882a593Smuzhiyun 	}
1271*4882a593Smuzhiyun 	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1272*4882a593Smuzhiyun 		put_z3fold_header(zhdr);
1273*4882a593Smuzhiyun 		clear_bit(PAGE_CLAIMED, &page->private);
1274*4882a593Smuzhiyun 		return;
1275*4882a593Smuzhiyun 	}
1276*4882a593Smuzhiyun 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1277*4882a593Smuzhiyun 		spin_lock(&pool->lock);
1278*4882a593Smuzhiyun 		list_del_init(&zhdr->buddy);
1279*4882a593Smuzhiyun 		spin_unlock(&pool->lock);
1280*4882a593Smuzhiyun 		zhdr->cpu = -1;
1281*4882a593Smuzhiyun 		kref_get(&zhdr->refcount);
1282*4882a593Smuzhiyun 		clear_bit(PAGE_CLAIMED, &page->private);
1283*4882a593Smuzhiyun 		do_compact_page(zhdr, true);
1284*4882a593Smuzhiyun 		return;
1285*4882a593Smuzhiyun 	}
1286*4882a593Smuzhiyun 	kref_get(&zhdr->refcount);
1287*4882a593Smuzhiyun 	clear_bit(PAGE_CLAIMED, &page->private);
1288*4882a593Smuzhiyun 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1289*4882a593Smuzhiyun 	put_z3fold_header(zhdr);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun /**
1293*4882a593Smuzhiyun  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1294*4882a593Smuzhiyun  * @pool:	pool from which a page will attempt to be evicted
1295*4882a593Smuzhiyun  * @retries:	number of pages on the LRU list for which eviction will
1296*4882a593Smuzhiyun  *		be attempted before failing
1297*4882a593Smuzhiyun  *
1298*4882a593Smuzhiyun  * z3fold reclaim is different from normal system reclaim in that it is done
1299*4882a593Smuzhiyun  * from the bottom, up. This is because only the bottom layer, z3fold, has
1300*4882a593Smuzhiyun  * information on how the allocations are organized within each z3fold page.
1301*4882a593Smuzhiyun  * This has the potential to create interesting locking situations between
1302*4882a593Smuzhiyun  * z3fold and the user, however.
1303*4882a593Smuzhiyun  *
1304*4882a593Smuzhiyun  * To avoid these, this is how z3fold_reclaim_page() should be called:
1305*4882a593Smuzhiyun  *
1306*4882a593Smuzhiyun  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1307*4882a593Smuzhiyun  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1308*4882a593Smuzhiyun  * call the user-defined eviction handler with the pool and handle as
1309*4882a593Smuzhiyun  * arguments.
1310*4882a593Smuzhiyun  *
1311*4882a593Smuzhiyun  * If the handle can not be evicted, the eviction handler should return
1312*4882a593Smuzhiyun  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1313*4882a593Smuzhiyun  * appropriate list and try the next z3fold page on the LRU up to
1314*4882a593Smuzhiyun  * a user defined number of retries.
1315*4882a593Smuzhiyun  *
1316*4882a593Smuzhiyun  * If the handle is successfully evicted, the eviction handler should
1317*4882a593Smuzhiyun  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1318*4882a593Smuzhiyun  * contains logic to delay freeing the page if the page is under reclaim,
1319*4882a593Smuzhiyun  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1320*4882a593Smuzhiyun  *
1321*4882a593Smuzhiyun  * If all buddies in the z3fold page are successfully evicted, then the
1322*4882a593Smuzhiyun  * z3fold page can be freed.
1323*4882a593Smuzhiyun  *
1324*4882a593Smuzhiyun  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1325*4882a593Smuzhiyun  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1326*4882a593Smuzhiyun  * the retry limit was hit.
1327*4882a593Smuzhiyun  */
z3fold_reclaim_page(struct z3fold_pool * pool,unsigned int retries)1328*4882a593Smuzhiyun static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun 	int i, ret = -1;
1331*4882a593Smuzhiyun 	struct z3fold_header *zhdr = NULL;
1332*4882a593Smuzhiyun 	struct page *page = NULL;
1333*4882a593Smuzhiyun 	struct list_head *pos;
1334*4882a593Smuzhiyun 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1335*4882a593Smuzhiyun 	struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	rwlock_init(&slots.lock);
1338*4882a593Smuzhiyun 	slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	spin_lock(&pool->lock);
1341*4882a593Smuzhiyun 	if (!pool->ops || !pool->ops->evict || retries == 0) {
1342*4882a593Smuzhiyun 		spin_unlock(&pool->lock);
1343*4882a593Smuzhiyun 		return -EINVAL;
1344*4882a593Smuzhiyun 	}
1345*4882a593Smuzhiyun 	for (i = 0; i < retries; i++) {
1346*4882a593Smuzhiyun 		if (list_empty(&pool->lru)) {
1347*4882a593Smuzhiyun 			spin_unlock(&pool->lock);
1348*4882a593Smuzhiyun 			return -EINVAL;
1349*4882a593Smuzhiyun 		}
1350*4882a593Smuzhiyun 		list_for_each_prev(pos, &pool->lru) {
1351*4882a593Smuzhiyun 			page = list_entry(pos, struct page, lru);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 			zhdr = page_address(page);
1354*4882a593Smuzhiyun 			if (test_bit(PAGE_HEADLESS, &page->private)) {
1355*4882a593Smuzhiyun 				/*
1356*4882a593Smuzhiyun 				 * For non-headless pages, we wait to do this
1357*4882a593Smuzhiyun 				 * until we have the page lock to avoid racing
1358*4882a593Smuzhiyun 				 * with __z3fold_alloc(). Headless pages don't
1359*4882a593Smuzhiyun 				 * have a lock (and __z3fold_alloc() will never
1360*4882a593Smuzhiyun 				 * see them), but we still need to test and set
1361*4882a593Smuzhiyun 				 * PAGE_CLAIMED to avoid racing with
1362*4882a593Smuzhiyun 				 * z3fold_free(), so just do it now before
1363*4882a593Smuzhiyun 				 * leaving the loop.
1364*4882a593Smuzhiyun 				 */
1365*4882a593Smuzhiyun 				if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1366*4882a593Smuzhiyun 					continue;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 				break;
1369*4882a593Smuzhiyun 			}
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 			if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1372*4882a593Smuzhiyun 				zhdr = NULL;
1373*4882a593Smuzhiyun 				break;
1374*4882a593Smuzhiyun 			}
1375*4882a593Smuzhiyun 			if (!z3fold_page_trylock(zhdr)) {
1376*4882a593Smuzhiyun 				if (kref_put(&zhdr->refcount,
1377*4882a593Smuzhiyun 						release_z3fold_page))
1378*4882a593Smuzhiyun 					atomic64_dec(&pool->pages_nr);
1379*4882a593Smuzhiyun 				zhdr = NULL;
1380*4882a593Smuzhiyun 				continue; /* can't evict at this point */
1381*4882a593Smuzhiyun 			}
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 			/* test_and_set_bit is of course atomic, but we still
1384*4882a593Smuzhiyun 			 * need to do it under page lock, otherwise checking
1385*4882a593Smuzhiyun 			 * that bit in __z3fold_alloc wouldn't make sense
1386*4882a593Smuzhiyun 			 */
1387*4882a593Smuzhiyun 			if (zhdr->foreign_handles ||
1388*4882a593Smuzhiyun 			    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1389*4882a593Smuzhiyun 				if (kref_put(&zhdr->refcount,
1390*4882a593Smuzhiyun 						release_z3fold_page_locked))
1391*4882a593Smuzhiyun 					atomic64_dec(&pool->pages_nr);
1392*4882a593Smuzhiyun 				else
1393*4882a593Smuzhiyun 					z3fold_page_unlock(zhdr);
1394*4882a593Smuzhiyun 				zhdr = NULL;
1395*4882a593Smuzhiyun 				continue; /* can't evict such page */
1396*4882a593Smuzhiyun 			}
1397*4882a593Smuzhiyun 			list_del_init(&zhdr->buddy);
1398*4882a593Smuzhiyun 			zhdr->cpu = -1;
1399*4882a593Smuzhiyun 			break;
1400*4882a593Smuzhiyun 		}
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 		if (!zhdr)
1403*4882a593Smuzhiyun 			break;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 		list_del_init(&page->lru);
1406*4882a593Smuzhiyun 		spin_unlock(&pool->lock);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
1409*4882a593Smuzhiyun 			/*
1410*4882a593Smuzhiyun 			 * We need encode the handles before unlocking, and
1411*4882a593Smuzhiyun 			 * use our local slots structure because z3fold_free
1412*4882a593Smuzhiyun 			 * can zero out zhdr->slots and we can't do much
1413*4882a593Smuzhiyun 			 * about that
1414*4882a593Smuzhiyun 			 */
1415*4882a593Smuzhiyun 			first_handle = 0;
1416*4882a593Smuzhiyun 			last_handle = 0;
1417*4882a593Smuzhiyun 			middle_handle = 0;
1418*4882a593Smuzhiyun 			memset(slots.slot, 0, sizeof(slots.slot));
1419*4882a593Smuzhiyun 			if (zhdr->first_chunks)
1420*4882a593Smuzhiyun 				first_handle = __encode_handle(zhdr, &slots,
1421*4882a593Smuzhiyun 								FIRST);
1422*4882a593Smuzhiyun 			if (zhdr->middle_chunks)
1423*4882a593Smuzhiyun 				middle_handle = __encode_handle(zhdr, &slots,
1424*4882a593Smuzhiyun 								MIDDLE);
1425*4882a593Smuzhiyun 			if (zhdr->last_chunks)
1426*4882a593Smuzhiyun 				last_handle = __encode_handle(zhdr, &slots,
1427*4882a593Smuzhiyun 								LAST);
1428*4882a593Smuzhiyun 			/*
1429*4882a593Smuzhiyun 			 * it's safe to unlock here because we hold a
1430*4882a593Smuzhiyun 			 * reference to this page
1431*4882a593Smuzhiyun 			 */
1432*4882a593Smuzhiyun 			z3fold_page_unlock(zhdr);
1433*4882a593Smuzhiyun 		} else {
1434*4882a593Smuzhiyun 			first_handle = encode_handle(zhdr, HEADLESS);
1435*4882a593Smuzhiyun 			last_handle = middle_handle = 0;
1436*4882a593Smuzhiyun 		}
1437*4882a593Smuzhiyun 		/* Issue the eviction callback(s) */
1438*4882a593Smuzhiyun 		if (middle_handle) {
1439*4882a593Smuzhiyun 			ret = pool->ops->evict(pool, middle_handle);
1440*4882a593Smuzhiyun 			if (ret)
1441*4882a593Smuzhiyun 				goto next;
1442*4882a593Smuzhiyun 		}
1443*4882a593Smuzhiyun 		if (first_handle) {
1444*4882a593Smuzhiyun 			ret = pool->ops->evict(pool, first_handle);
1445*4882a593Smuzhiyun 			if (ret)
1446*4882a593Smuzhiyun 				goto next;
1447*4882a593Smuzhiyun 		}
1448*4882a593Smuzhiyun 		if (last_handle) {
1449*4882a593Smuzhiyun 			ret = pool->ops->evict(pool, last_handle);
1450*4882a593Smuzhiyun 			if (ret)
1451*4882a593Smuzhiyun 				goto next;
1452*4882a593Smuzhiyun 		}
1453*4882a593Smuzhiyun next:
1454*4882a593Smuzhiyun 		if (test_bit(PAGE_HEADLESS, &page->private)) {
1455*4882a593Smuzhiyun 			if (ret == 0) {
1456*4882a593Smuzhiyun 				free_z3fold_page(page, true);
1457*4882a593Smuzhiyun 				atomic64_dec(&pool->pages_nr);
1458*4882a593Smuzhiyun 				return 0;
1459*4882a593Smuzhiyun 			}
1460*4882a593Smuzhiyun 			spin_lock(&pool->lock);
1461*4882a593Smuzhiyun 			list_add(&page->lru, &pool->lru);
1462*4882a593Smuzhiyun 			spin_unlock(&pool->lock);
1463*4882a593Smuzhiyun 			clear_bit(PAGE_CLAIMED, &page->private);
1464*4882a593Smuzhiyun 		} else {
1465*4882a593Smuzhiyun 			struct z3fold_buddy_slots *slots = zhdr->slots;
1466*4882a593Smuzhiyun 			z3fold_page_lock(zhdr);
1467*4882a593Smuzhiyun 			if (kref_put(&zhdr->refcount,
1468*4882a593Smuzhiyun 					release_z3fold_page_locked)) {
1469*4882a593Smuzhiyun 				kmem_cache_free(pool->c_handle, slots);
1470*4882a593Smuzhiyun 				atomic64_dec(&pool->pages_nr);
1471*4882a593Smuzhiyun 				return 0;
1472*4882a593Smuzhiyun 			}
1473*4882a593Smuzhiyun 			/*
1474*4882a593Smuzhiyun 			 * if we are here, the page is still not completely
1475*4882a593Smuzhiyun 			 * free. Take the global pool lock then to be able
1476*4882a593Smuzhiyun 			 * to add it back to the lru list
1477*4882a593Smuzhiyun 			 */
1478*4882a593Smuzhiyun 			spin_lock(&pool->lock);
1479*4882a593Smuzhiyun 			list_add(&page->lru, &pool->lru);
1480*4882a593Smuzhiyun 			spin_unlock(&pool->lock);
1481*4882a593Smuzhiyun 			z3fold_page_unlock(zhdr);
1482*4882a593Smuzhiyun 			clear_bit(PAGE_CLAIMED, &page->private);
1483*4882a593Smuzhiyun 		}
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 		/* We started off locked to we need to lock the pool back */
1486*4882a593Smuzhiyun 		spin_lock(&pool->lock);
1487*4882a593Smuzhiyun 	}
1488*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
1489*4882a593Smuzhiyun 	return -EAGAIN;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun /**
1493*4882a593Smuzhiyun  * z3fold_map() - maps the allocation associated with the given handle
1494*4882a593Smuzhiyun  * @pool:	pool in which the allocation resides
1495*4882a593Smuzhiyun  * @handle:	handle associated with the allocation to be mapped
1496*4882a593Smuzhiyun  *
1497*4882a593Smuzhiyun  * Extracts the buddy number from handle and constructs the pointer to the
1498*4882a593Smuzhiyun  * correct starting chunk within the page.
1499*4882a593Smuzhiyun  *
1500*4882a593Smuzhiyun  * Returns: a pointer to the mapped allocation
1501*4882a593Smuzhiyun  */
z3fold_map(struct z3fold_pool * pool,unsigned long handle)1502*4882a593Smuzhiyun static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
1505*4882a593Smuzhiyun 	struct page *page;
1506*4882a593Smuzhiyun 	void *addr;
1507*4882a593Smuzhiyun 	enum buddy buddy;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	zhdr = get_z3fold_header(handle);
1510*4882a593Smuzhiyun 	addr = zhdr;
1511*4882a593Smuzhiyun 	page = virt_to_page(zhdr);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	if (test_bit(PAGE_HEADLESS, &page->private))
1514*4882a593Smuzhiyun 		goto out;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	buddy = handle_to_buddy(handle);
1517*4882a593Smuzhiyun 	switch (buddy) {
1518*4882a593Smuzhiyun 	case FIRST:
1519*4882a593Smuzhiyun 		addr += ZHDR_SIZE_ALIGNED;
1520*4882a593Smuzhiyun 		break;
1521*4882a593Smuzhiyun 	case MIDDLE:
1522*4882a593Smuzhiyun 		addr += zhdr->start_middle << CHUNK_SHIFT;
1523*4882a593Smuzhiyun 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1524*4882a593Smuzhiyun 		break;
1525*4882a593Smuzhiyun 	case LAST:
1526*4882a593Smuzhiyun 		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1527*4882a593Smuzhiyun 		break;
1528*4882a593Smuzhiyun 	default:
1529*4882a593Smuzhiyun 		pr_err("unknown buddy id %d\n", buddy);
1530*4882a593Smuzhiyun 		WARN_ON(1);
1531*4882a593Smuzhiyun 		addr = NULL;
1532*4882a593Smuzhiyun 		break;
1533*4882a593Smuzhiyun 	}
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	if (addr)
1536*4882a593Smuzhiyun 		zhdr->mapped_count++;
1537*4882a593Smuzhiyun out:
1538*4882a593Smuzhiyun 	put_z3fold_header(zhdr);
1539*4882a593Smuzhiyun 	return addr;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun /**
1543*4882a593Smuzhiyun  * z3fold_unmap() - unmaps the allocation associated with the given handle
1544*4882a593Smuzhiyun  * @pool:	pool in which the allocation resides
1545*4882a593Smuzhiyun  * @handle:	handle associated with the allocation to be unmapped
1546*4882a593Smuzhiyun  */
z3fold_unmap(struct z3fold_pool * pool,unsigned long handle)1547*4882a593Smuzhiyun static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
1550*4882a593Smuzhiyun 	struct page *page;
1551*4882a593Smuzhiyun 	enum buddy buddy;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	zhdr = get_z3fold_header(handle);
1554*4882a593Smuzhiyun 	page = virt_to_page(zhdr);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	if (test_bit(PAGE_HEADLESS, &page->private))
1557*4882a593Smuzhiyun 		return;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	buddy = handle_to_buddy(handle);
1560*4882a593Smuzhiyun 	if (buddy == MIDDLE)
1561*4882a593Smuzhiyun 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1562*4882a593Smuzhiyun 	zhdr->mapped_count--;
1563*4882a593Smuzhiyun 	put_z3fold_header(zhdr);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun /**
1567*4882a593Smuzhiyun  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1568*4882a593Smuzhiyun  * @pool:	pool whose size is being queried
1569*4882a593Smuzhiyun  *
1570*4882a593Smuzhiyun  * Returns: size in pages of the given pool.
1571*4882a593Smuzhiyun  */
z3fold_get_pool_size(struct z3fold_pool * pool)1572*4882a593Smuzhiyun static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	return atomic64_read(&pool->pages_nr);
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun 
z3fold_page_isolate(struct page * page,isolate_mode_t mode)1577*4882a593Smuzhiyun static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
1580*4882a593Smuzhiyun 	struct z3fold_pool *pool;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1583*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	if (test_bit(PAGE_HEADLESS, &page->private))
1586*4882a593Smuzhiyun 		return false;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	zhdr = page_address(page);
1589*4882a593Smuzhiyun 	z3fold_page_lock(zhdr);
1590*4882a593Smuzhiyun 	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1591*4882a593Smuzhiyun 	    test_bit(PAGE_STALE, &page->private))
1592*4882a593Smuzhiyun 		goto out;
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1595*4882a593Smuzhiyun 		goto out;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1598*4882a593Smuzhiyun 		goto out;
1599*4882a593Smuzhiyun 	pool = zhdr_to_pool(zhdr);
1600*4882a593Smuzhiyun 	spin_lock(&pool->lock);
1601*4882a593Smuzhiyun 	if (!list_empty(&zhdr->buddy))
1602*4882a593Smuzhiyun 		list_del_init(&zhdr->buddy);
1603*4882a593Smuzhiyun 	if (!list_empty(&page->lru))
1604*4882a593Smuzhiyun 		list_del_init(&page->lru);
1605*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	kref_get(&zhdr->refcount);
1608*4882a593Smuzhiyun 	z3fold_page_unlock(zhdr);
1609*4882a593Smuzhiyun 	return true;
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun out:
1612*4882a593Smuzhiyun 	z3fold_page_unlock(zhdr);
1613*4882a593Smuzhiyun 	return false;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun 
z3fold_page_migrate(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)1616*4882a593Smuzhiyun static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1617*4882a593Smuzhiyun 			       struct page *page, enum migrate_mode mode)
1618*4882a593Smuzhiyun {
1619*4882a593Smuzhiyun 	struct z3fold_header *zhdr, *new_zhdr;
1620*4882a593Smuzhiyun 	struct z3fold_pool *pool;
1621*4882a593Smuzhiyun 	struct address_space *new_mapping;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1624*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1625*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1626*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	zhdr = page_address(page);
1629*4882a593Smuzhiyun 	pool = zhdr_to_pool(zhdr);
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	if (!z3fold_page_trylock(zhdr))
1632*4882a593Smuzhiyun 		return -EAGAIN;
1633*4882a593Smuzhiyun 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1634*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
1635*4882a593Smuzhiyun 		clear_bit(PAGE_CLAIMED, &page->private);
1636*4882a593Smuzhiyun 		return -EBUSY;
1637*4882a593Smuzhiyun 	}
1638*4882a593Smuzhiyun 	if (work_pending(&zhdr->work)) {
1639*4882a593Smuzhiyun 		z3fold_page_unlock(zhdr);
1640*4882a593Smuzhiyun 		return -EAGAIN;
1641*4882a593Smuzhiyun 	}
1642*4882a593Smuzhiyun 	new_zhdr = page_address(newpage);
1643*4882a593Smuzhiyun 	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1644*4882a593Smuzhiyun 	newpage->private = page->private;
1645*4882a593Smuzhiyun 	page->private = 0;
1646*4882a593Smuzhiyun 	z3fold_page_unlock(zhdr);
1647*4882a593Smuzhiyun 	spin_lock_init(&new_zhdr->page_lock);
1648*4882a593Smuzhiyun 	INIT_WORK(&new_zhdr->work, compact_page_work);
1649*4882a593Smuzhiyun 	/*
1650*4882a593Smuzhiyun 	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1651*4882a593Smuzhiyun 	 * so we only have to reinitialize it.
1652*4882a593Smuzhiyun 	 */
1653*4882a593Smuzhiyun 	INIT_LIST_HEAD(&new_zhdr->buddy);
1654*4882a593Smuzhiyun 	new_mapping = page_mapping(page);
1655*4882a593Smuzhiyun 	__ClearPageMovable(page);
1656*4882a593Smuzhiyun 	ClearPagePrivate(page);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	get_page(newpage);
1659*4882a593Smuzhiyun 	z3fold_page_lock(new_zhdr);
1660*4882a593Smuzhiyun 	if (new_zhdr->first_chunks)
1661*4882a593Smuzhiyun 		encode_handle(new_zhdr, FIRST);
1662*4882a593Smuzhiyun 	if (new_zhdr->last_chunks)
1663*4882a593Smuzhiyun 		encode_handle(new_zhdr, LAST);
1664*4882a593Smuzhiyun 	if (new_zhdr->middle_chunks)
1665*4882a593Smuzhiyun 		encode_handle(new_zhdr, MIDDLE);
1666*4882a593Smuzhiyun 	set_bit(NEEDS_COMPACTING, &newpage->private);
1667*4882a593Smuzhiyun 	new_zhdr->cpu = smp_processor_id();
1668*4882a593Smuzhiyun 	spin_lock(&pool->lock);
1669*4882a593Smuzhiyun 	list_add(&newpage->lru, &pool->lru);
1670*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
1671*4882a593Smuzhiyun 	__SetPageMovable(newpage, new_mapping);
1672*4882a593Smuzhiyun 	z3fold_page_unlock(new_zhdr);
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	page_mapcount_reset(page);
1677*4882a593Smuzhiyun 	clear_bit(PAGE_CLAIMED, &page->private);
1678*4882a593Smuzhiyun 	put_page(page);
1679*4882a593Smuzhiyun 	return 0;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun 
z3fold_page_putback(struct page * page)1682*4882a593Smuzhiyun static void z3fold_page_putback(struct page *page)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun 	struct z3fold_header *zhdr;
1685*4882a593Smuzhiyun 	struct z3fold_pool *pool;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	zhdr = page_address(page);
1688*4882a593Smuzhiyun 	pool = zhdr_to_pool(zhdr);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	z3fold_page_lock(zhdr);
1691*4882a593Smuzhiyun 	if (!list_empty(&zhdr->buddy))
1692*4882a593Smuzhiyun 		list_del_init(&zhdr->buddy);
1693*4882a593Smuzhiyun 	INIT_LIST_HEAD(&page->lru);
1694*4882a593Smuzhiyun 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1695*4882a593Smuzhiyun 		atomic64_dec(&pool->pages_nr);
1696*4882a593Smuzhiyun 		return;
1697*4882a593Smuzhiyun 	}
1698*4882a593Smuzhiyun 	spin_lock(&pool->lock);
1699*4882a593Smuzhiyun 	list_add(&page->lru, &pool->lru);
1700*4882a593Smuzhiyun 	spin_unlock(&pool->lock);
1701*4882a593Smuzhiyun 	clear_bit(PAGE_CLAIMED, &page->private);
1702*4882a593Smuzhiyun 	z3fold_page_unlock(zhdr);
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun static const struct address_space_operations z3fold_aops = {
1706*4882a593Smuzhiyun 	.isolate_page = z3fold_page_isolate,
1707*4882a593Smuzhiyun 	.migratepage = z3fold_page_migrate,
1708*4882a593Smuzhiyun 	.putback_page = z3fold_page_putback,
1709*4882a593Smuzhiyun };
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun /*****************
1712*4882a593Smuzhiyun  * zpool
1713*4882a593Smuzhiyun  ****************/
1714*4882a593Smuzhiyun 
z3fold_zpool_evict(struct z3fold_pool * pool,unsigned long handle)1715*4882a593Smuzhiyun static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun 	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1718*4882a593Smuzhiyun 		return pool->zpool_ops->evict(pool->zpool, handle);
1719*4882a593Smuzhiyun 	else
1720*4882a593Smuzhiyun 		return -ENOENT;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun static const struct z3fold_ops z3fold_zpool_ops = {
1724*4882a593Smuzhiyun 	.evict =	z3fold_zpool_evict
1725*4882a593Smuzhiyun };
1726*4882a593Smuzhiyun 
z3fold_zpool_create(const char * name,gfp_t gfp,const struct zpool_ops * zpool_ops,struct zpool * zpool)1727*4882a593Smuzhiyun static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1728*4882a593Smuzhiyun 			       const struct zpool_ops *zpool_ops,
1729*4882a593Smuzhiyun 			       struct zpool *zpool)
1730*4882a593Smuzhiyun {
1731*4882a593Smuzhiyun 	struct z3fold_pool *pool;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	pool = z3fold_create_pool(name, gfp,
1734*4882a593Smuzhiyun 				zpool_ops ? &z3fold_zpool_ops : NULL);
1735*4882a593Smuzhiyun 	if (pool) {
1736*4882a593Smuzhiyun 		pool->zpool = zpool;
1737*4882a593Smuzhiyun 		pool->zpool_ops = zpool_ops;
1738*4882a593Smuzhiyun 	}
1739*4882a593Smuzhiyun 	return pool;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun 
z3fold_zpool_destroy(void * pool)1742*4882a593Smuzhiyun static void z3fold_zpool_destroy(void *pool)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun 	z3fold_destroy_pool(pool);
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun 
z3fold_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle)1747*4882a593Smuzhiyun static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1748*4882a593Smuzhiyun 			unsigned long *handle)
1749*4882a593Smuzhiyun {
1750*4882a593Smuzhiyun 	return z3fold_alloc(pool, size, gfp, handle);
1751*4882a593Smuzhiyun }
z3fold_zpool_free(void * pool,unsigned long handle)1752*4882a593Smuzhiyun static void z3fold_zpool_free(void *pool, unsigned long handle)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun 	z3fold_free(pool, handle);
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun 
z3fold_zpool_shrink(void * pool,unsigned int pages,unsigned int * reclaimed)1757*4882a593Smuzhiyun static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1758*4882a593Smuzhiyun 			unsigned int *reclaimed)
1759*4882a593Smuzhiyun {
1760*4882a593Smuzhiyun 	unsigned int total = 0;
1761*4882a593Smuzhiyun 	int ret = -EINVAL;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	while (total < pages) {
1764*4882a593Smuzhiyun 		ret = z3fold_reclaim_page(pool, 8);
1765*4882a593Smuzhiyun 		if (ret < 0)
1766*4882a593Smuzhiyun 			break;
1767*4882a593Smuzhiyun 		total++;
1768*4882a593Smuzhiyun 	}
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	if (reclaimed)
1771*4882a593Smuzhiyun 		*reclaimed = total;
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	return ret;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun 
z3fold_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm)1776*4882a593Smuzhiyun static void *z3fold_zpool_map(void *pool, unsigned long handle,
1777*4882a593Smuzhiyun 			enum zpool_mapmode mm)
1778*4882a593Smuzhiyun {
1779*4882a593Smuzhiyun 	return z3fold_map(pool, handle);
1780*4882a593Smuzhiyun }
z3fold_zpool_unmap(void * pool,unsigned long handle)1781*4882a593Smuzhiyun static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun 	z3fold_unmap(pool, handle);
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun 
z3fold_zpool_total_size(void * pool)1786*4882a593Smuzhiyun static u64 z3fold_zpool_total_size(void *pool)
1787*4882a593Smuzhiyun {
1788*4882a593Smuzhiyun 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun static struct zpool_driver z3fold_zpool_driver = {
1792*4882a593Smuzhiyun 	.type =		"z3fold",
1793*4882a593Smuzhiyun 	.owner =	THIS_MODULE,
1794*4882a593Smuzhiyun 	.create =	z3fold_zpool_create,
1795*4882a593Smuzhiyun 	.destroy =	z3fold_zpool_destroy,
1796*4882a593Smuzhiyun 	.malloc =	z3fold_zpool_malloc,
1797*4882a593Smuzhiyun 	.free =		z3fold_zpool_free,
1798*4882a593Smuzhiyun 	.shrink =	z3fold_zpool_shrink,
1799*4882a593Smuzhiyun 	.map =		z3fold_zpool_map,
1800*4882a593Smuzhiyun 	.unmap =	z3fold_zpool_unmap,
1801*4882a593Smuzhiyun 	.total_size =	z3fold_zpool_total_size,
1802*4882a593Smuzhiyun };
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun MODULE_ALIAS("zpool-z3fold");
1805*4882a593Smuzhiyun 
init_z3fold(void)1806*4882a593Smuzhiyun static int __init init_z3fold(void)
1807*4882a593Smuzhiyun {
1808*4882a593Smuzhiyun 	int ret;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	/* Make sure the z3fold header is not larger than the page size */
1811*4882a593Smuzhiyun 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1812*4882a593Smuzhiyun 	ret = z3fold_mount();
1813*4882a593Smuzhiyun 	if (ret)
1814*4882a593Smuzhiyun 		return ret;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	zpool_register_driver(&z3fold_zpool_driver);
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	return 0;
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun 
exit_z3fold(void)1821*4882a593Smuzhiyun static void __exit exit_z3fold(void)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun 	z3fold_unmount();
1824*4882a593Smuzhiyun 	zpool_unregister_driver(&z3fold_zpool_driver);
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun module_init(init_z3fold);
1828*4882a593Smuzhiyun module_exit(exit_z3fold);
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1831*4882a593Smuzhiyun MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1832*4882a593Smuzhiyun MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1833