xref: /OK3568_Linux_fs/kernel/mm/zpool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * zpool memory storage api
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014 Dan Streetman
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This is a common frontend for memory storage pool implementations.
8*4882a593Smuzhiyun  * Typically, this is used to store compressed memory.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/zpool.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct zpool {
22*4882a593Smuzhiyun 	struct zpool_driver *driver;
23*4882a593Smuzhiyun 	void *pool;
24*4882a593Smuzhiyun 	const struct zpool_ops *ops;
25*4882a593Smuzhiyun 	bool evictable;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	struct list_head list;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static LIST_HEAD(drivers_head);
31*4882a593Smuzhiyun static DEFINE_SPINLOCK(drivers_lock);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static LIST_HEAD(pools_head);
34*4882a593Smuzhiyun static DEFINE_SPINLOCK(pools_lock);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun  * zpool_register_driver() - register a zpool implementation.
38*4882a593Smuzhiyun  * @driver:	driver to register
39*4882a593Smuzhiyun  */
zpool_register_driver(struct zpool_driver * driver)40*4882a593Smuzhiyun void zpool_register_driver(struct zpool_driver *driver)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	spin_lock(&drivers_lock);
43*4882a593Smuzhiyun 	atomic_set(&driver->refcount, 0);
44*4882a593Smuzhiyun 	list_add(&driver->list, &drivers_head);
45*4882a593Smuzhiyun 	spin_unlock(&drivers_lock);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun EXPORT_SYMBOL(zpool_register_driver);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun  * zpool_unregister_driver() - unregister a zpool implementation.
51*4882a593Smuzhiyun  * @driver:	driver to unregister.
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * Module usage counting is used to prevent using a driver
54*4882a593Smuzhiyun  * while/after unloading, so if this is called from module
55*4882a593Smuzhiyun  * exit function, this should never fail; if called from
56*4882a593Smuzhiyun  * other than the module exit function, and this returns
57*4882a593Smuzhiyun  * failure, the driver is in use and must remain available.
58*4882a593Smuzhiyun  */
zpool_unregister_driver(struct zpool_driver * driver)59*4882a593Smuzhiyun int zpool_unregister_driver(struct zpool_driver *driver)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	int ret = 0, refcount;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	spin_lock(&drivers_lock);
64*4882a593Smuzhiyun 	refcount = atomic_read(&driver->refcount);
65*4882a593Smuzhiyun 	WARN_ON(refcount < 0);
66*4882a593Smuzhiyun 	if (refcount > 0)
67*4882a593Smuzhiyun 		ret = -EBUSY;
68*4882a593Smuzhiyun 	else
69*4882a593Smuzhiyun 		list_del(&driver->list);
70*4882a593Smuzhiyun 	spin_unlock(&drivers_lock);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	return ret;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun EXPORT_SYMBOL(zpool_unregister_driver);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* this assumes @type is null-terminated. */
zpool_get_driver(const char * type)77*4882a593Smuzhiyun static struct zpool_driver *zpool_get_driver(const char *type)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct zpool_driver *driver;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	spin_lock(&drivers_lock);
82*4882a593Smuzhiyun 	list_for_each_entry(driver, &drivers_head, list) {
83*4882a593Smuzhiyun 		if (!strcmp(driver->type, type)) {
84*4882a593Smuzhiyun 			bool got = try_module_get(driver->owner);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 			if (got)
87*4882a593Smuzhiyun 				atomic_inc(&driver->refcount);
88*4882a593Smuzhiyun 			spin_unlock(&drivers_lock);
89*4882a593Smuzhiyun 			return got ? driver : NULL;
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	spin_unlock(&drivers_lock);
94*4882a593Smuzhiyun 	return NULL;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
zpool_put_driver(struct zpool_driver * driver)97*4882a593Smuzhiyun static void zpool_put_driver(struct zpool_driver *driver)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	atomic_dec(&driver->refcount);
100*4882a593Smuzhiyun 	module_put(driver->owner);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  * zpool_has_pool() - Check if the pool driver is available
105*4882a593Smuzhiyun  * @type:	The type of the zpool to check (e.g. zbud, zsmalloc)
106*4882a593Smuzhiyun  *
107*4882a593Smuzhiyun  * This checks if the @type pool driver is available.  This will try to load
108*4882a593Smuzhiyun  * the requested module, if needed, but there is no guarantee the module will
109*4882a593Smuzhiyun  * still be loaded and available immediately after calling.  If this returns
110*4882a593Smuzhiyun  * true, the caller should assume the pool is available, but must be prepared
111*4882a593Smuzhiyun  * to handle the @zpool_create_pool() returning failure.  However if this
112*4882a593Smuzhiyun  * returns false, the caller should assume the requested pool type is not
113*4882a593Smuzhiyun  * available; either the requested pool type module does not exist, or could
114*4882a593Smuzhiyun  * not be loaded, and calling @zpool_create_pool() with the pool type will
115*4882a593Smuzhiyun  * fail.
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * The @type string must be null-terminated.
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * Returns: true if @type pool is available, false if not
120*4882a593Smuzhiyun  */
zpool_has_pool(char * type)121*4882a593Smuzhiyun bool zpool_has_pool(char *type)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct zpool_driver *driver = zpool_get_driver(type);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	if (!driver) {
126*4882a593Smuzhiyun 		request_module("zpool-%s", type);
127*4882a593Smuzhiyun 		driver = zpool_get_driver(type);
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!driver)
131*4882a593Smuzhiyun 		return false;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	zpool_put_driver(driver);
134*4882a593Smuzhiyun 	return true;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun EXPORT_SYMBOL(zpool_has_pool);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * zpool_create_pool() - Create a new zpool
140*4882a593Smuzhiyun  * @type:	The type of the zpool to create (e.g. zbud, zsmalloc)
141*4882a593Smuzhiyun  * @name:	The name of the zpool (e.g. zram0, zswap)
142*4882a593Smuzhiyun  * @gfp:	The GFP flags to use when allocating the pool.
143*4882a593Smuzhiyun  * @ops:	The optional ops callback.
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * This creates a new zpool of the specified type.  The gfp flags will be
146*4882a593Smuzhiyun  * used when allocating memory, if the implementation supports it.  If the
147*4882a593Smuzhiyun  * ops param is NULL, then the created zpool will not be evictable.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  * The @type and @name strings must be null-terminated.
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * Returns: New zpool on success, NULL on failure.
154*4882a593Smuzhiyun  */
zpool_create_pool(const char * type,const char * name,gfp_t gfp,const struct zpool_ops * ops)155*4882a593Smuzhiyun struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
156*4882a593Smuzhiyun 		const struct zpool_ops *ops)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct zpool_driver *driver;
159*4882a593Smuzhiyun 	struct zpool *zpool;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	pr_debug("creating pool type %s\n", type);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	driver = zpool_get_driver(type);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (!driver) {
166*4882a593Smuzhiyun 		request_module("zpool-%s", type);
167*4882a593Smuzhiyun 		driver = zpool_get_driver(type);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (!driver) {
171*4882a593Smuzhiyun 		pr_err("no driver for type %s\n", type);
172*4882a593Smuzhiyun 		return NULL;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	zpool = kmalloc(sizeof(*zpool), gfp);
176*4882a593Smuzhiyun 	if (!zpool) {
177*4882a593Smuzhiyun 		pr_err("couldn't create zpool - out of memory\n");
178*4882a593Smuzhiyun 		zpool_put_driver(driver);
179*4882a593Smuzhiyun 		return NULL;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	zpool->driver = driver;
183*4882a593Smuzhiyun 	zpool->pool = driver->create(name, gfp, ops, zpool);
184*4882a593Smuzhiyun 	zpool->ops = ops;
185*4882a593Smuzhiyun 	zpool->evictable = driver->shrink && ops && ops->evict;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (!zpool->pool) {
188*4882a593Smuzhiyun 		pr_err("couldn't create %s pool\n", type);
189*4882a593Smuzhiyun 		zpool_put_driver(driver);
190*4882a593Smuzhiyun 		kfree(zpool);
191*4882a593Smuzhiyun 		return NULL;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	pr_debug("created pool type %s\n", type);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	spin_lock(&pools_lock);
197*4882a593Smuzhiyun 	list_add(&zpool->list, &pools_head);
198*4882a593Smuzhiyun 	spin_unlock(&pools_lock);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return zpool;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /**
204*4882a593Smuzhiyun  * zpool_destroy_pool() - Destroy a zpool
205*4882a593Smuzhiyun  * @zpool:	The zpool to destroy.
206*4882a593Smuzhiyun  *
207*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe,
208*4882a593Smuzhiyun  * however only when destroying different pools.  The same
209*4882a593Smuzhiyun  * pool should only be destroyed once, and should not be used
210*4882a593Smuzhiyun  * after it is destroyed.
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * This destroys an existing zpool.  The zpool should not be in use.
213*4882a593Smuzhiyun  */
zpool_destroy_pool(struct zpool * zpool)214*4882a593Smuzhiyun void zpool_destroy_pool(struct zpool *zpool)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	pr_debug("destroying pool type %s\n", zpool->driver->type);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	spin_lock(&pools_lock);
219*4882a593Smuzhiyun 	list_del(&zpool->list);
220*4882a593Smuzhiyun 	spin_unlock(&pools_lock);
221*4882a593Smuzhiyun 	zpool->driver->destroy(zpool->pool);
222*4882a593Smuzhiyun 	zpool_put_driver(zpool->driver);
223*4882a593Smuzhiyun 	kfree(zpool);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun  * zpool_get_type() - Get the type of the zpool
228*4882a593Smuzhiyun  * @zpool:	The zpool to check
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * This returns the type of the pool.
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe.
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  * Returns: The type of zpool.
235*4882a593Smuzhiyun  */
zpool_get_type(struct zpool * zpool)236*4882a593Smuzhiyun const char *zpool_get_type(struct zpool *zpool)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	return zpool->driver->type;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun  * zpool_malloc_support_movable() - Check if the zpool supports
243*4882a593Smuzhiyun  *	allocating movable memory
244*4882a593Smuzhiyun  * @zpool:	The zpool to check
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * This returns if the zpool supports allocating movable memory.
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe.
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * Returns: true if the zpool supports allocating movable memory, false if not
251*4882a593Smuzhiyun  */
zpool_malloc_support_movable(struct zpool * zpool)252*4882a593Smuzhiyun bool zpool_malloc_support_movable(struct zpool *zpool)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	return zpool->driver->malloc_support_movable;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun  * zpool_malloc() - Allocate memory
259*4882a593Smuzhiyun  * @zpool:	The zpool to allocate from.
260*4882a593Smuzhiyun  * @size:	The amount of memory to allocate.
261*4882a593Smuzhiyun  * @gfp:	The GFP flags to use when allocating memory.
262*4882a593Smuzhiyun  * @handle:	Pointer to the handle to set
263*4882a593Smuzhiyun  *
264*4882a593Smuzhiyun  * This allocates the requested amount of memory from the pool.
265*4882a593Smuzhiyun  * The gfp flags will be used when allocating memory, if the
266*4882a593Smuzhiyun  * implementation supports it.  The provided @handle will be
267*4882a593Smuzhiyun  * set to the allocated object handle.
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * Returns: 0 on success, negative value on error.
272*4882a593Smuzhiyun  */
zpool_malloc(struct zpool * zpool,size_t size,gfp_t gfp,unsigned long * handle)273*4882a593Smuzhiyun int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
274*4882a593Smuzhiyun 			unsigned long *handle)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	return zpool->driver->malloc(zpool->pool, size, gfp, handle);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun  * zpool_free() - Free previously allocated memory
281*4882a593Smuzhiyun  * @zpool:	The zpool that allocated the memory.
282*4882a593Smuzhiyun  * @handle:	The handle to the memory to free.
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * This frees previously allocated memory.  This does not guarantee
285*4882a593Smuzhiyun  * that the pool will actually free memory, only that the memory
286*4882a593Smuzhiyun  * in the pool will become available for use by the pool.
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe,
289*4882a593Smuzhiyun  * however only when freeing different handles.  The same
290*4882a593Smuzhiyun  * handle should only be freed once, and should not be used
291*4882a593Smuzhiyun  * after freeing.
292*4882a593Smuzhiyun  */
zpool_free(struct zpool * zpool,unsigned long handle)293*4882a593Smuzhiyun void zpool_free(struct zpool *zpool, unsigned long handle)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	zpool->driver->free(zpool->pool, handle);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun  * zpool_shrink() - Shrink the pool size
300*4882a593Smuzhiyun  * @zpool:	The zpool to shrink.
301*4882a593Smuzhiyun  * @pages:	The number of pages to shrink the pool.
302*4882a593Smuzhiyun  * @reclaimed:	The number of pages successfully evicted.
303*4882a593Smuzhiyun  *
304*4882a593Smuzhiyun  * This attempts to shrink the actual memory size of the pool
305*4882a593Smuzhiyun  * by evicting currently used handle(s).  If the pool was
306*4882a593Smuzhiyun  * created with no zpool_ops, or the evict call fails for any
307*4882a593Smuzhiyun  * of the handles, this will fail.  If non-NULL, the @reclaimed
308*4882a593Smuzhiyun  * parameter will be set to the number of pages reclaimed,
309*4882a593Smuzhiyun  * which may be more than the number of pages requested.
310*4882a593Smuzhiyun  *
311*4882a593Smuzhiyun  * Implementations must guarantee this to be thread-safe.
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Returns: 0 on success, negative value on error/failure.
314*4882a593Smuzhiyun  */
zpool_shrink(struct zpool * zpool,unsigned int pages,unsigned int * reclaimed)315*4882a593Smuzhiyun int zpool_shrink(struct zpool *zpool, unsigned int pages,
316*4882a593Smuzhiyun 			unsigned int *reclaimed)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return zpool->driver->shrink ?
319*4882a593Smuzhiyun 	       zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun /**
323*4882a593Smuzhiyun  * zpool_map_handle() - Map a previously allocated handle into memory
324*4882a593Smuzhiyun  * @zpool:	The zpool that the handle was allocated from
325*4882a593Smuzhiyun  * @handle:	The handle to map
326*4882a593Smuzhiyun  * @mapmode:	How the memory should be mapped
327*4882a593Smuzhiyun  *
328*4882a593Smuzhiyun  * This maps a previously allocated handle into memory.  The @mapmode
329*4882a593Smuzhiyun  * param indicates to the implementation how the memory will be
330*4882a593Smuzhiyun  * used, i.e. read-only, write-only, read-write.  If the
331*4882a593Smuzhiyun  * implementation does not support it, the memory will be treated
332*4882a593Smuzhiyun  * as read-write.
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  * This may hold locks, disable interrupts, and/or preemption,
335*4882a593Smuzhiyun  * and the zpool_unmap_handle() must be called to undo those
336*4882a593Smuzhiyun  * actions.  The code that uses the mapped handle should complete
337*4882a593Smuzhiyun  * its operatons on the mapped handle memory quickly and unmap
338*4882a593Smuzhiyun  * as soon as possible.  As the implementation may use per-cpu
339*4882a593Smuzhiyun  * data, multiple handles should not be mapped concurrently on
340*4882a593Smuzhiyun  * any cpu.
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * Returns: A pointer to the handle's mapped memory area.
343*4882a593Smuzhiyun  */
zpool_map_handle(struct zpool * zpool,unsigned long handle,enum zpool_mapmode mapmode)344*4882a593Smuzhiyun void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
345*4882a593Smuzhiyun 			enum zpool_mapmode mapmode)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	return zpool->driver->map(zpool->pool, handle, mapmode);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun  * zpool_unmap_handle() - Unmap a previously mapped handle
352*4882a593Smuzhiyun  * @zpool:	The zpool that the handle was allocated from
353*4882a593Smuzhiyun  * @handle:	The handle to unmap
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * This unmaps a previously mapped handle.  Any locks or other
356*4882a593Smuzhiyun  * actions that the implementation took in zpool_map_handle()
357*4882a593Smuzhiyun  * will be undone here.  The memory area returned from
358*4882a593Smuzhiyun  * zpool_map_handle() should no longer be used after this.
359*4882a593Smuzhiyun  */
zpool_unmap_handle(struct zpool * zpool,unsigned long handle)360*4882a593Smuzhiyun void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	zpool->driver->unmap(zpool->pool, handle);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * zpool_get_total_size() - The total size of the pool
367*4882a593Smuzhiyun  * @zpool:	The zpool to check
368*4882a593Smuzhiyun  *
369*4882a593Smuzhiyun  * This returns the total size in bytes of the pool.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Returns: Total size of the zpool in bytes.
372*4882a593Smuzhiyun  */
zpool_get_total_size(struct zpool * zpool)373*4882a593Smuzhiyun u64 zpool_get_total_size(struct zpool *zpool)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	return zpool->driver->total_size(zpool->pool);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun  * zpool_evictable() - Test if zpool is potentially evictable
380*4882a593Smuzhiyun  * @zpool:	The zpool to test
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * Zpool is only potentially evictable when it's created with struct
383*4882a593Smuzhiyun  * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * However, it doesn't necessarily mean driver will use zpool_ops.evict
386*4882a593Smuzhiyun  * in its implementation of zpool_driver.shrink. It could do internal
387*4882a593Smuzhiyun  * defragmentation instead.
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  * Returns: true if potentially evictable; false otherwise.
390*4882a593Smuzhiyun  */
zpool_evictable(struct zpool * zpool)391*4882a593Smuzhiyun bool zpool_evictable(struct zpool *zpool)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	return zpool->evictable;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun MODULE_LICENSE("GPL");
397*4882a593Smuzhiyun MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
398*4882a593Smuzhiyun MODULE_DESCRIPTION("Common API for compressed memory storage");
399