1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * zbud.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013, Seth Jennings, IBM
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * zbud is an special purpose allocator for storing compressed pages. Contrary
10*4882a593Smuzhiyun * to what its name may suggest, zbud is not a buddy allocator, but rather an
11*4882a593Smuzhiyun * allocator that "buddies" two compressed pages together in a single memory
12*4882a593Smuzhiyun * page.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * While this design limits storage density, it has simple and deterministic
15*4882a593Smuzhiyun * reclaim properties that make it preferable to a higher density approach when
16*4882a593Smuzhiyun * reclaim will be used.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * zbud works by storing compressed pages, or "zpages", together in pairs in a
19*4882a593Smuzhiyun * single memory page called a "zbud page". The first buddy is "left
20*4882a593Smuzhiyun * justified" at the beginning of the zbud page, and the last buddy is "right
21*4882a593Smuzhiyun * justified" at the end of the zbud page. The benefit is that if either
22*4882a593Smuzhiyun * buddy is freed, the freed buddy space, coalesced with whatever slack space
23*4882a593Smuzhiyun * that existed between the buddies, results in the largest possible free region
24*4882a593Smuzhiyun * within the zbud page.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * zbud also provides an attractive lower bound on density. The ratio of zpages
27*4882a593Smuzhiyun * to zbud pages can not be less than 1. This ensures that zbud can never "do
28*4882a593Smuzhiyun * harm" by using more pages to store zpages than the uncompressed zpages would
29*4882a593Smuzhiyun * have used on their own.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * zbud pages are divided into "chunks". The size of the chunks is fixed at
32*4882a593Smuzhiyun * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
33*4882a593Smuzhiyun * into chunks allows organizing unbuddied zbud pages into a manageable number
34*4882a593Smuzhiyun * of unbuddied lists according to the number of free chunks available in the
35*4882a593Smuzhiyun * zbud page.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * The zbud API differs from that of conventional allocators in that the
38*4882a593Smuzhiyun * allocation function, zbud_alloc(), returns an opaque handle to the user,
39*4882a593Smuzhiyun * not a dereferenceable pointer. The user must map the handle using
40*4882a593Smuzhiyun * zbud_map() in order to get a usable pointer by which to access the
41*4882a593Smuzhiyun * allocation data and unmap the handle with zbud_unmap() when operations
42*4882a593Smuzhiyun * on the allocation data are complete.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include <linux/atomic.h>
48*4882a593Smuzhiyun #include <linux/list.h>
49*4882a593Smuzhiyun #include <linux/mm.h>
50*4882a593Smuzhiyun #include <linux/module.h>
51*4882a593Smuzhiyun #include <linux/preempt.h>
52*4882a593Smuzhiyun #include <linux/slab.h>
53*4882a593Smuzhiyun #include <linux/spinlock.h>
54*4882a593Smuzhiyun #include <linux/zbud.h>
55*4882a593Smuzhiyun #include <linux/zpool.h>
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*****************
58*4882a593Smuzhiyun * Structures
59*4882a593Smuzhiyun *****************/
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * NCHUNKS_ORDER determines the internal allocation granularity, effectively
62*4882a593Smuzhiyun * adjusting internal fragmentation. It also determines the number of
63*4882a593Smuzhiyun * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
64*4882a593Smuzhiyun * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
65*4882a593Smuzhiyun * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
66*4882a593Smuzhiyun * 63 which shows the max number of free chunks in zbud page, also there will be
67*4882a593Smuzhiyun * 63 freelists per pool.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun #define NCHUNKS_ORDER 6
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
72*4882a593Smuzhiyun #define CHUNK_SIZE (1 << CHUNK_SHIFT)
73*4882a593Smuzhiyun #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
74*4882a593Smuzhiyun #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun * struct zbud_pool - stores metadata for each zbud pool
78*4882a593Smuzhiyun * @lock: protects all pool fields and first|last_chunk fields of any
79*4882a593Smuzhiyun * zbud page in the pool
80*4882a593Smuzhiyun * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
81*4882a593Smuzhiyun * the lists each zbud page is added to depends on the size of
82*4882a593Smuzhiyun * its free region.
83*4882a593Smuzhiyun * @buddied: list tracking the zbud pages that contain two buddies;
84*4882a593Smuzhiyun * these zbud pages are full
85*4882a593Smuzhiyun * @lru: list tracking the zbud pages in LRU order by most recently
86*4882a593Smuzhiyun * added buddy.
87*4882a593Smuzhiyun * @pages_nr: number of zbud pages in the pool.
88*4882a593Smuzhiyun * @ops: pointer to a structure of user defined operations specified at
89*4882a593Smuzhiyun * pool creation time.
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * This structure is allocated at pool creation time and maintains metadata
92*4882a593Smuzhiyun * pertaining to a particular zbud pool.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun struct zbud_pool {
95*4882a593Smuzhiyun spinlock_t lock;
96*4882a593Smuzhiyun struct list_head unbuddied[NCHUNKS];
97*4882a593Smuzhiyun struct list_head buddied;
98*4882a593Smuzhiyun struct list_head lru;
99*4882a593Smuzhiyun u64 pages_nr;
100*4882a593Smuzhiyun const struct zbud_ops *ops;
101*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
102*4882a593Smuzhiyun struct zpool *zpool;
103*4882a593Smuzhiyun const struct zpool_ops *zpool_ops;
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * struct zbud_header - zbud page metadata occupying the first chunk of each
109*4882a593Smuzhiyun * zbud page.
110*4882a593Smuzhiyun * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
111*4882a593Smuzhiyun * @lru: links the zbud page into the lru list in the pool
112*4882a593Smuzhiyun * @first_chunks: the size of the first buddy in chunks, 0 if free
113*4882a593Smuzhiyun * @last_chunks: the size of the last buddy in chunks, 0 if free
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun struct zbud_header {
116*4882a593Smuzhiyun struct list_head buddy;
117*4882a593Smuzhiyun struct list_head lru;
118*4882a593Smuzhiyun unsigned int first_chunks;
119*4882a593Smuzhiyun unsigned int last_chunks;
120*4882a593Smuzhiyun bool under_reclaim;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*****************
124*4882a593Smuzhiyun * zpool
125*4882a593Smuzhiyun ****************/
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
128*4882a593Smuzhiyun
zbud_zpool_evict(struct zbud_pool * pool,unsigned long handle)129*4882a593Smuzhiyun static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
132*4882a593Smuzhiyun return pool->zpool_ops->evict(pool->zpool, handle);
133*4882a593Smuzhiyun else
134*4882a593Smuzhiyun return -ENOENT;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static const struct zbud_ops zbud_zpool_ops = {
138*4882a593Smuzhiyun .evict = zbud_zpool_evict
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun
zbud_zpool_create(const char * name,gfp_t gfp,const struct zpool_ops * zpool_ops,struct zpool * zpool)141*4882a593Smuzhiyun static void *zbud_zpool_create(const char *name, gfp_t gfp,
142*4882a593Smuzhiyun const struct zpool_ops *zpool_ops,
143*4882a593Smuzhiyun struct zpool *zpool)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct zbud_pool *pool;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
148*4882a593Smuzhiyun if (pool) {
149*4882a593Smuzhiyun pool->zpool = zpool;
150*4882a593Smuzhiyun pool->zpool_ops = zpool_ops;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun return pool;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
zbud_zpool_destroy(void * pool)155*4882a593Smuzhiyun static void zbud_zpool_destroy(void *pool)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun zbud_destroy_pool(pool);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
zbud_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle)160*4882a593Smuzhiyun static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
161*4882a593Smuzhiyun unsigned long *handle)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return zbud_alloc(pool, size, gfp, handle);
164*4882a593Smuzhiyun }
zbud_zpool_free(void * pool,unsigned long handle)165*4882a593Smuzhiyun static void zbud_zpool_free(void *pool, unsigned long handle)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun zbud_free(pool, handle);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
zbud_zpool_shrink(void * pool,unsigned int pages,unsigned int * reclaimed)170*4882a593Smuzhiyun static int zbud_zpool_shrink(void *pool, unsigned int pages,
171*4882a593Smuzhiyun unsigned int *reclaimed)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun unsigned int total = 0;
174*4882a593Smuzhiyun int ret = -EINVAL;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun while (total < pages) {
177*4882a593Smuzhiyun ret = zbud_reclaim_page(pool, 8);
178*4882a593Smuzhiyun if (ret < 0)
179*4882a593Smuzhiyun break;
180*4882a593Smuzhiyun total++;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (reclaimed)
184*4882a593Smuzhiyun *reclaimed = total;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return ret;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
zbud_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm)189*4882a593Smuzhiyun static void *zbud_zpool_map(void *pool, unsigned long handle,
190*4882a593Smuzhiyun enum zpool_mapmode mm)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return zbud_map(pool, handle);
193*4882a593Smuzhiyun }
zbud_zpool_unmap(void * pool,unsigned long handle)194*4882a593Smuzhiyun static void zbud_zpool_unmap(void *pool, unsigned long handle)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun zbud_unmap(pool, handle);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
zbud_zpool_total_size(void * pool)199*4882a593Smuzhiyun static u64 zbud_zpool_total_size(void *pool)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun return zbud_get_pool_size(pool) * PAGE_SIZE;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun static struct zpool_driver zbud_zpool_driver = {
205*4882a593Smuzhiyun .type = "zbud",
206*4882a593Smuzhiyun .owner = THIS_MODULE,
207*4882a593Smuzhiyun .create = zbud_zpool_create,
208*4882a593Smuzhiyun .destroy = zbud_zpool_destroy,
209*4882a593Smuzhiyun .malloc = zbud_zpool_malloc,
210*4882a593Smuzhiyun .free = zbud_zpool_free,
211*4882a593Smuzhiyun .shrink = zbud_zpool_shrink,
212*4882a593Smuzhiyun .map = zbud_zpool_map,
213*4882a593Smuzhiyun .unmap = zbud_zpool_unmap,
214*4882a593Smuzhiyun .total_size = zbud_zpool_total_size,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun MODULE_ALIAS("zpool-zbud");
218*4882a593Smuzhiyun #endif /* CONFIG_ZPOOL */
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*****************
221*4882a593Smuzhiyun * Helpers
222*4882a593Smuzhiyun *****************/
223*4882a593Smuzhiyun /* Just to make the code easier to read */
224*4882a593Smuzhiyun enum buddy {
225*4882a593Smuzhiyun FIRST,
226*4882a593Smuzhiyun LAST
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Converts an allocation size in bytes to size in zbud chunks */
size_to_chunks(size_t size)230*4882a593Smuzhiyun static int size_to_chunks(size_t size)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #define for_each_unbuddied_list(_iter, _begin) \
236*4882a593Smuzhiyun for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Initializes the zbud header of a newly allocated zbud page */
init_zbud_page(struct page * page)239*4882a593Smuzhiyun static struct zbud_header *init_zbud_page(struct page *page)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct zbud_header *zhdr = page_address(page);
242*4882a593Smuzhiyun zhdr->first_chunks = 0;
243*4882a593Smuzhiyun zhdr->last_chunks = 0;
244*4882a593Smuzhiyun INIT_LIST_HEAD(&zhdr->buddy);
245*4882a593Smuzhiyun INIT_LIST_HEAD(&zhdr->lru);
246*4882a593Smuzhiyun zhdr->under_reclaim = false;
247*4882a593Smuzhiyun return zhdr;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* Resets the struct page fields and frees the page */
free_zbud_page(struct zbud_header * zhdr)251*4882a593Smuzhiyun static void free_zbud_page(struct zbud_header *zhdr)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun __free_page(virt_to_page(zhdr));
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * Encodes the handle of a particular buddy within a zbud page
258*4882a593Smuzhiyun * Pool lock should be held as this function accesses first|last_chunks
259*4882a593Smuzhiyun */
encode_handle(struct zbud_header * zhdr,enum buddy bud)260*4882a593Smuzhiyun static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun unsigned long handle;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * For now, the encoded handle is actually just the pointer to the data
266*4882a593Smuzhiyun * but this might not always be the case. A little information hiding.
267*4882a593Smuzhiyun * Add CHUNK_SIZE to the handle if it is the first allocation to jump
268*4882a593Smuzhiyun * over the zbud header in the first chunk.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun handle = (unsigned long)zhdr;
271*4882a593Smuzhiyun if (bud == FIRST)
272*4882a593Smuzhiyun /* skip over zbud header */
273*4882a593Smuzhiyun handle += ZHDR_SIZE_ALIGNED;
274*4882a593Smuzhiyun else /* bud == LAST */
275*4882a593Smuzhiyun handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
276*4882a593Smuzhiyun return handle;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Returns the zbud page where a given handle is stored */
handle_to_zbud_header(unsigned long handle)280*4882a593Smuzhiyun static struct zbud_header *handle_to_zbud_header(unsigned long handle)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun return (struct zbud_header *)(handle & PAGE_MASK);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Returns the number of free chunks in a zbud page */
num_free_chunks(struct zbud_header * zhdr)286*4882a593Smuzhiyun static int num_free_chunks(struct zbud_header *zhdr)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Rather than branch for different situations, just use the fact that
290*4882a593Smuzhiyun * free buddies have a length of zero to simplify everything.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /*****************
296*4882a593Smuzhiyun * API Functions
297*4882a593Smuzhiyun *****************/
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun * zbud_create_pool() - create a new zbud pool
300*4882a593Smuzhiyun * @gfp: gfp flags when allocating the zbud pool structure
301*4882a593Smuzhiyun * @ops: user-defined operations for the zbud pool
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * Return: pointer to the new zbud pool or NULL if the metadata allocation
304*4882a593Smuzhiyun * failed.
305*4882a593Smuzhiyun */
zbud_create_pool(gfp_t gfp,const struct zbud_ops * ops)306*4882a593Smuzhiyun struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct zbud_pool *pool;
309*4882a593Smuzhiyun int i;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun pool = kzalloc(sizeof(struct zbud_pool), gfp);
312*4882a593Smuzhiyun if (!pool)
313*4882a593Smuzhiyun return NULL;
314*4882a593Smuzhiyun spin_lock_init(&pool->lock);
315*4882a593Smuzhiyun for_each_unbuddied_list(i, 0)
316*4882a593Smuzhiyun INIT_LIST_HEAD(&pool->unbuddied[i]);
317*4882a593Smuzhiyun INIT_LIST_HEAD(&pool->buddied);
318*4882a593Smuzhiyun INIT_LIST_HEAD(&pool->lru);
319*4882a593Smuzhiyun pool->pages_nr = 0;
320*4882a593Smuzhiyun pool->ops = ops;
321*4882a593Smuzhiyun return pool;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /**
325*4882a593Smuzhiyun * zbud_destroy_pool() - destroys an existing zbud pool
326*4882a593Smuzhiyun * @pool: the zbud pool to be destroyed
327*4882a593Smuzhiyun *
328*4882a593Smuzhiyun * The pool should be emptied before this function is called.
329*4882a593Smuzhiyun */
zbud_destroy_pool(struct zbud_pool * pool)330*4882a593Smuzhiyun void zbud_destroy_pool(struct zbud_pool *pool)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun kfree(pool);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun * zbud_alloc() - allocates a region of a given size
337*4882a593Smuzhiyun * @pool: zbud pool from which to allocate
338*4882a593Smuzhiyun * @size: size in bytes of the desired allocation
339*4882a593Smuzhiyun * @gfp: gfp flags used if the pool needs to grow
340*4882a593Smuzhiyun * @handle: handle of the new allocation
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * This function will attempt to find a free region in the pool large enough to
343*4882a593Smuzhiyun * satisfy the allocation request. A search of the unbuddied lists is
344*4882a593Smuzhiyun * performed first. If no suitable free region is found, then a new page is
345*4882a593Smuzhiyun * allocated and added to the pool to satisfy the request.
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
348*4882a593Smuzhiyun * as zbud pool pages.
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
351*4882a593Smuzhiyun * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
352*4882a593Smuzhiyun * a new page.
353*4882a593Smuzhiyun */
zbud_alloc(struct zbud_pool * pool,size_t size,gfp_t gfp,unsigned long * handle)354*4882a593Smuzhiyun int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
355*4882a593Smuzhiyun unsigned long *handle)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun int chunks, i, freechunks;
358*4882a593Smuzhiyun struct zbud_header *zhdr = NULL;
359*4882a593Smuzhiyun enum buddy bud;
360*4882a593Smuzhiyun struct page *page;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (!size || (gfp & __GFP_HIGHMEM))
363*4882a593Smuzhiyun return -EINVAL;
364*4882a593Smuzhiyun if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
365*4882a593Smuzhiyun return -ENOSPC;
366*4882a593Smuzhiyun chunks = size_to_chunks(size);
367*4882a593Smuzhiyun spin_lock(&pool->lock);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* First, try to find an unbuddied zbud page. */
370*4882a593Smuzhiyun for_each_unbuddied_list(i, chunks) {
371*4882a593Smuzhiyun if (!list_empty(&pool->unbuddied[i])) {
372*4882a593Smuzhiyun zhdr = list_first_entry(&pool->unbuddied[i],
373*4882a593Smuzhiyun struct zbud_header, buddy);
374*4882a593Smuzhiyun list_del(&zhdr->buddy);
375*4882a593Smuzhiyun if (zhdr->first_chunks == 0)
376*4882a593Smuzhiyun bud = FIRST;
377*4882a593Smuzhiyun else
378*4882a593Smuzhiyun bud = LAST;
379*4882a593Smuzhiyun goto found;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Couldn't find unbuddied zbud page, create new one */
384*4882a593Smuzhiyun spin_unlock(&pool->lock);
385*4882a593Smuzhiyun page = alloc_page(gfp);
386*4882a593Smuzhiyun if (!page)
387*4882a593Smuzhiyun return -ENOMEM;
388*4882a593Smuzhiyun spin_lock(&pool->lock);
389*4882a593Smuzhiyun pool->pages_nr++;
390*4882a593Smuzhiyun zhdr = init_zbud_page(page);
391*4882a593Smuzhiyun bud = FIRST;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun found:
394*4882a593Smuzhiyun if (bud == FIRST)
395*4882a593Smuzhiyun zhdr->first_chunks = chunks;
396*4882a593Smuzhiyun else
397*4882a593Smuzhiyun zhdr->last_chunks = chunks;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
400*4882a593Smuzhiyun /* Add to unbuddied list */
401*4882a593Smuzhiyun freechunks = num_free_chunks(zhdr);
402*4882a593Smuzhiyun list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
403*4882a593Smuzhiyun } else {
404*4882a593Smuzhiyun /* Add to buddied list */
405*4882a593Smuzhiyun list_add(&zhdr->buddy, &pool->buddied);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* Add/move zbud page to beginning of LRU */
409*4882a593Smuzhiyun if (!list_empty(&zhdr->lru))
410*4882a593Smuzhiyun list_del(&zhdr->lru);
411*4882a593Smuzhiyun list_add(&zhdr->lru, &pool->lru);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun *handle = encode_handle(zhdr, bud);
414*4882a593Smuzhiyun spin_unlock(&pool->lock);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * zbud_free() - frees the allocation associated with the given handle
421*4882a593Smuzhiyun * @pool: pool in which the allocation resided
422*4882a593Smuzhiyun * @handle: handle associated with the allocation returned by zbud_alloc()
423*4882a593Smuzhiyun *
424*4882a593Smuzhiyun * In the case that the zbud page in which the allocation resides is under
425*4882a593Smuzhiyun * reclaim, as indicated by the PG_reclaim flag being set, this function
426*4882a593Smuzhiyun * only sets the first|last_chunks to 0. The page is actually freed
427*4882a593Smuzhiyun * once both buddies are evicted (see zbud_reclaim_page() below).
428*4882a593Smuzhiyun */
zbud_free(struct zbud_pool * pool,unsigned long handle)429*4882a593Smuzhiyun void zbud_free(struct zbud_pool *pool, unsigned long handle)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct zbud_header *zhdr;
432*4882a593Smuzhiyun int freechunks;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun spin_lock(&pool->lock);
435*4882a593Smuzhiyun zhdr = handle_to_zbud_header(handle);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* If first buddy, handle will be page aligned */
438*4882a593Smuzhiyun if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
439*4882a593Smuzhiyun zhdr->last_chunks = 0;
440*4882a593Smuzhiyun else
441*4882a593Smuzhiyun zhdr->first_chunks = 0;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (zhdr->under_reclaim) {
444*4882a593Smuzhiyun /* zbud page is under reclaim, reclaim will free */
445*4882a593Smuzhiyun spin_unlock(&pool->lock);
446*4882a593Smuzhiyun return;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /* Remove from existing buddy list */
450*4882a593Smuzhiyun list_del(&zhdr->buddy);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
453*4882a593Smuzhiyun /* zbud page is empty, free */
454*4882a593Smuzhiyun list_del(&zhdr->lru);
455*4882a593Smuzhiyun free_zbud_page(zhdr);
456*4882a593Smuzhiyun pool->pages_nr--;
457*4882a593Smuzhiyun } else {
458*4882a593Smuzhiyun /* Add to unbuddied list */
459*4882a593Smuzhiyun freechunks = num_free_chunks(zhdr);
460*4882a593Smuzhiyun list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun spin_unlock(&pool->lock);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun * zbud_reclaim_page() - evicts allocations from a pool page and frees it
468*4882a593Smuzhiyun * @pool: pool from which a page will attempt to be evicted
469*4882a593Smuzhiyun * @retries: number of pages on the LRU list for which eviction will
470*4882a593Smuzhiyun * be attempted before failing
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * zbud reclaim is different from normal system reclaim in that the reclaim is
473*4882a593Smuzhiyun * done from the bottom, up. This is because only the bottom layer, zbud, has
474*4882a593Smuzhiyun * information on how the allocations are organized within each zbud page. This
475*4882a593Smuzhiyun * has the potential to create interesting locking situations between zbud and
476*4882a593Smuzhiyun * the user, however.
477*4882a593Smuzhiyun *
478*4882a593Smuzhiyun * To avoid these, this is how zbud_reclaim_page() should be called:
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * The user detects a page should be reclaimed and calls zbud_reclaim_page().
481*4882a593Smuzhiyun * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
482*4882a593Smuzhiyun * the user-defined eviction handler with the pool and handle as arguments.
483*4882a593Smuzhiyun *
484*4882a593Smuzhiyun * If the handle can not be evicted, the eviction handler should return
485*4882a593Smuzhiyun * non-zero. zbud_reclaim_page() will add the zbud page back to the
486*4882a593Smuzhiyun * appropriate list and try the next zbud page on the LRU up to
487*4882a593Smuzhiyun * a user defined number of retries.
488*4882a593Smuzhiyun *
489*4882a593Smuzhiyun * If the handle is successfully evicted, the eviction handler should
490*4882a593Smuzhiyun * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
491*4882a593Smuzhiyun * contains logic to delay freeing the page if the page is under reclaim,
492*4882a593Smuzhiyun * as indicated by the setting of the PG_reclaim flag on the underlying page.
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun * If all buddies in the zbud page are successfully evicted, then the
495*4882a593Smuzhiyun * zbud page can be freed.
496*4882a593Smuzhiyun *
497*4882a593Smuzhiyun * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
498*4882a593Smuzhiyun * no pages to evict or an eviction handler is not registered, -EAGAIN if
499*4882a593Smuzhiyun * the retry limit was hit.
500*4882a593Smuzhiyun */
zbud_reclaim_page(struct zbud_pool * pool,unsigned int retries)501*4882a593Smuzhiyun int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun int i, ret, freechunks;
504*4882a593Smuzhiyun struct zbud_header *zhdr;
505*4882a593Smuzhiyun unsigned long first_handle = 0, last_handle = 0;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun spin_lock(&pool->lock);
508*4882a593Smuzhiyun if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
509*4882a593Smuzhiyun retries == 0) {
510*4882a593Smuzhiyun spin_unlock(&pool->lock);
511*4882a593Smuzhiyun return -EINVAL;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun for (i = 0; i < retries; i++) {
514*4882a593Smuzhiyun zhdr = list_last_entry(&pool->lru, struct zbud_header, lru);
515*4882a593Smuzhiyun list_del(&zhdr->lru);
516*4882a593Smuzhiyun list_del(&zhdr->buddy);
517*4882a593Smuzhiyun /* Protect zbud page against free */
518*4882a593Smuzhiyun zhdr->under_reclaim = true;
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * We need encode the handles before unlocking, since we can
521*4882a593Smuzhiyun * race with free that will set (first|last)_chunks to 0
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun first_handle = 0;
524*4882a593Smuzhiyun last_handle = 0;
525*4882a593Smuzhiyun if (zhdr->first_chunks)
526*4882a593Smuzhiyun first_handle = encode_handle(zhdr, FIRST);
527*4882a593Smuzhiyun if (zhdr->last_chunks)
528*4882a593Smuzhiyun last_handle = encode_handle(zhdr, LAST);
529*4882a593Smuzhiyun spin_unlock(&pool->lock);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* Issue the eviction callback(s) */
532*4882a593Smuzhiyun if (first_handle) {
533*4882a593Smuzhiyun ret = pool->ops->evict(pool, first_handle);
534*4882a593Smuzhiyun if (ret)
535*4882a593Smuzhiyun goto next;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun if (last_handle) {
538*4882a593Smuzhiyun ret = pool->ops->evict(pool, last_handle);
539*4882a593Smuzhiyun if (ret)
540*4882a593Smuzhiyun goto next;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun next:
543*4882a593Smuzhiyun spin_lock(&pool->lock);
544*4882a593Smuzhiyun zhdr->under_reclaim = false;
545*4882a593Smuzhiyun if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * Both buddies are now free, free the zbud page and
548*4882a593Smuzhiyun * return success.
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun free_zbud_page(zhdr);
551*4882a593Smuzhiyun pool->pages_nr--;
552*4882a593Smuzhiyun spin_unlock(&pool->lock);
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun } else if (zhdr->first_chunks == 0 ||
555*4882a593Smuzhiyun zhdr->last_chunks == 0) {
556*4882a593Smuzhiyun /* add to unbuddied list */
557*4882a593Smuzhiyun freechunks = num_free_chunks(zhdr);
558*4882a593Smuzhiyun list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
559*4882a593Smuzhiyun } else {
560*4882a593Smuzhiyun /* add to buddied list */
561*4882a593Smuzhiyun list_add(&zhdr->buddy, &pool->buddied);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* add to beginning of LRU */
565*4882a593Smuzhiyun list_add(&zhdr->lru, &pool->lru);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun spin_unlock(&pool->lock);
568*4882a593Smuzhiyun return -EAGAIN;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /**
572*4882a593Smuzhiyun * zbud_map() - maps the allocation associated with the given handle
573*4882a593Smuzhiyun * @pool: pool in which the allocation resides
574*4882a593Smuzhiyun * @handle: handle associated with the allocation to be mapped
575*4882a593Smuzhiyun *
576*4882a593Smuzhiyun * While trivial for zbud, the mapping functions for others allocators
577*4882a593Smuzhiyun * implementing this allocation API could have more complex information encoded
578*4882a593Smuzhiyun * in the handle and could create temporary mappings to make the data
579*4882a593Smuzhiyun * accessible to the user.
580*4882a593Smuzhiyun *
581*4882a593Smuzhiyun * Returns: a pointer to the mapped allocation
582*4882a593Smuzhiyun */
zbud_map(struct zbud_pool * pool,unsigned long handle)583*4882a593Smuzhiyun void *zbud_map(struct zbud_pool *pool, unsigned long handle)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun return (void *)(handle);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /**
589*4882a593Smuzhiyun * zbud_unmap() - maps the allocation associated with the given handle
590*4882a593Smuzhiyun * @pool: pool in which the allocation resides
591*4882a593Smuzhiyun * @handle: handle associated with the allocation to be unmapped
592*4882a593Smuzhiyun */
zbud_unmap(struct zbud_pool * pool,unsigned long handle)593*4882a593Smuzhiyun void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun * zbud_get_pool_size() - gets the zbud pool size in pages
599*4882a593Smuzhiyun * @pool: pool whose size is being queried
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * Returns: size in pages of the given pool. The pool lock need not be
602*4882a593Smuzhiyun * taken to access pages_nr.
603*4882a593Smuzhiyun */
zbud_get_pool_size(struct zbud_pool * pool)604*4882a593Smuzhiyun u64 zbud_get_pool_size(struct zbud_pool *pool)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun return pool->pages_nr;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
init_zbud(void)609*4882a593Smuzhiyun static int __init init_zbud(void)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun /* Make sure the zbud header will fit in one chunk */
612*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
613*4882a593Smuzhiyun pr_info("loaded\n");
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
616*4882a593Smuzhiyun zpool_register_driver(&zbud_zpool_driver);
617*4882a593Smuzhiyun #endif
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun return 0;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
exit_zbud(void)622*4882a593Smuzhiyun static void __exit exit_zbud(void)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun #ifdef CONFIG_ZPOOL
625*4882a593Smuzhiyun zpool_unregister_driver(&zbud_zpool_driver);
626*4882a593Smuzhiyun #endif
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun pr_info("unloaded\n");
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun module_init(init_zbud);
632*4882a593Smuzhiyun module_exit(exit_zbud);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun MODULE_LICENSE("GPL");
635*4882a593Smuzhiyun MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
636*4882a593Smuzhiyun MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
637