1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * zswap.c - zswap driver file
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * zswap is a backend for frontswap that takes pages that are in the process
6*4882a593Smuzhiyun * of being swapped out and attempts to compress and store them in a
7*4882a593Smuzhiyun * RAM-based memory pool. This can result in a significant I/O reduction on
8*4882a593Smuzhiyun * the swap device and, in the case where decompressing from RAM is faster
9*4882a593Smuzhiyun * than reading from the swap device, can also improve workload performance.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/cpu.h>
18*4882a593Smuzhiyun #include <linux/highmem.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/atomic.h>
23*4882a593Smuzhiyun #include <linux/frontswap.h>
24*4882a593Smuzhiyun #include <linux/rbtree.h>
25*4882a593Smuzhiyun #include <linux/swap.h>
26*4882a593Smuzhiyun #include <linux/crypto.h>
27*4882a593Smuzhiyun #include <linux/mempool.h>
28*4882a593Smuzhiyun #include <linux/zpool.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/mm_types.h>
31*4882a593Smuzhiyun #include <linux/page-flags.h>
32*4882a593Smuzhiyun #include <linux/swapops.h>
33*4882a593Smuzhiyun #include <linux/writeback.h>
34*4882a593Smuzhiyun #include <linux/pagemap.h>
35*4882a593Smuzhiyun #include <linux/workqueue.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*********************************
38*4882a593Smuzhiyun * statistics
39*4882a593Smuzhiyun **********************************/
40*4882a593Smuzhiyun /* Total bytes used by the compressed storage */
41*4882a593Smuzhiyun static u64 zswap_pool_total_size;
42*4882a593Smuzhiyun /* The number of compressed pages currently stored in zswap */
43*4882a593Smuzhiyun static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
44*4882a593Smuzhiyun /* The number of same-value filled pages currently stored in zswap */
45*4882a593Smuzhiyun static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * The statistics below are not protected from concurrent access for
49*4882a593Smuzhiyun * performance reasons so they may not be a 100% accurate. However,
50*4882a593Smuzhiyun * they do provide useful information on roughly how many times a
51*4882a593Smuzhiyun * certain event is occurring.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Pool limit was hit (see zswap_max_pool_percent) */
55*4882a593Smuzhiyun static u64 zswap_pool_limit_hit;
56*4882a593Smuzhiyun /* Pages written back when pool limit was reached */
57*4882a593Smuzhiyun static u64 zswap_written_back_pages;
58*4882a593Smuzhiyun /* Store failed due to a reclaim failure after pool limit was reached */
59*4882a593Smuzhiyun static u64 zswap_reject_reclaim_fail;
60*4882a593Smuzhiyun /* Compressed page was too big for the allocator to (optimally) store */
61*4882a593Smuzhiyun static u64 zswap_reject_compress_poor;
62*4882a593Smuzhiyun /* Store failed because underlying allocator could not get memory */
63*4882a593Smuzhiyun static u64 zswap_reject_alloc_fail;
64*4882a593Smuzhiyun /* Store failed because the entry metadata could not be allocated (rare) */
65*4882a593Smuzhiyun static u64 zswap_reject_kmemcache_fail;
66*4882a593Smuzhiyun /* Duplicate store was encountered (rare) */
67*4882a593Smuzhiyun static u64 zswap_duplicate_entry;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Shrinker work queue */
70*4882a593Smuzhiyun static struct workqueue_struct *shrink_wq;
71*4882a593Smuzhiyun /* Pool limit was hit, we need to calm down */
72*4882a593Smuzhiyun static bool zswap_pool_reached_full;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*********************************
75*4882a593Smuzhiyun * tunables
76*4882a593Smuzhiyun **********************************/
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define ZSWAP_PARAM_UNSET ""
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Enable/disable zswap */
81*4882a593Smuzhiyun static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
82*4882a593Smuzhiyun static int zswap_enabled_param_set(const char *,
83*4882a593Smuzhiyun const struct kernel_param *);
84*4882a593Smuzhiyun static struct kernel_param_ops zswap_enabled_param_ops = {
85*4882a593Smuzhiyun .set = zswap_enabled_param_set,
86*4882a593Smuzhiyun .get = param_get_bool,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Crypto compressor to use */
91*4882a593Smuzhiyun static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
92*4882a593Smuzhiyun static int zswap_compressor_param_set(const char *,
93*4882a593Smuzhiyun const struct kernel_param *);
94*4882a593Smuzhiyun static struct kernel_param_ops zswap_compressor_param_ops = {
95*4882a593Smuzhiyun .set = zswap_compressor_param_set,
96*4882a593Smuzhiyun .get = param_get_charp,
97*4882a593Smuzhiyun .free = param_free_charp,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun module_param_cb(compressor, &zswap_compressor_param_ops,
100*4882a593Smuzhiyun &zswap_compressor, 0644);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Compressed storage zpool to use */
103*4882a593Smuzhiyun static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
104*4882a593Smuzhiyun static int zswap_zpool_param_set(const char *, const struct kernel_param *);
105*4882a593Smuzhiyun static struct kernel_param_ops zswap_zpool_param_ops = {
106*4882a593Smuzhiyun .set = zswap_zpool_param_set,
107*4882a593Smuzhiyun .get = param_get_charp,
108*4882a593Smuzhiyun .free = param_free_charp,
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* The maximum percentage of memory that the compressed pool can occupy */
113*4882a593Smuzhiyun static unsigned int zswap_max_pool_percent = 20;
114*4882a593Smuzhiyun module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* The threshold for accepting new pages after the max_pool_percent was hit */
117*4882a593Smuzhiyun static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
118*4882a593Smuzhiyun module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
119*4882a593Smuzhiyun uint, 0644);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Enable/disable handling same-value filled pages (enabled by default) */
122*4882a593Smuzhiyun static bool zswap_same_filled_pages_enabled = true;
123*4882a593Smuzhiyun module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
124*4882a593Smuzhiyun bool, 0644);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /*********************************
127*4882a593Smuzhiyun * data structures
128*4882a593Smuzhiyun **********************************/
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun struct zswap_pool {
131*4882a593Smuzhiyun struct zpool *zpool;
132*4882a593Smuzhiyun struct crypto_comp * __percpu *tfm;
133*4882a593Smuzhiyun struct kref kref;
134*4882a593Smuzhiyun struct list_head list;
135*4882a593Smuzhiyun struct work_struct release_work;
136*4882a593Smuzhiyun struct work_struct shrink_work;
137*4882a593Smuzhiyun struct hlist_node node;
138*4882a593Smuzhiyun char tfm_name[CRYPTO_MAX_ALG_NAME];
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * struct zswap_entry
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * This structure contains the metadata for tracking a single compressed
145*4882a593Smuzhiyun * page within zswap.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * rbnode - links the entry into red-black tree for the appropriate swap type
148*4882a593Smuzhiyun * offset - the swap offset for the entry. Index into the red-black tree.
149*4882a593Smuzhiyun * refcount - the number of outstanding reference to the entry. This is needed
150*4882a593Smuzhiyun * to protect against premature freeing of the entry by code
151*4882a593Smuzhiyun * concurrent calls to load, invalidate, and writeback. The lock
152*4882a593Smuzhiyun * for the zswap_tree structure that contains the entry must
153*4882a593Smuzhiyun * be held while changing the refcount. Since the lock must
154*4882a593Smuzhiyun * be held, there is no reason to also make refcount atomic.
155*4882a593Smuzhiyun * length - the length in bytes of the compressed page data. Needed during
156*4882a593Smuzhiyun * decompression. For a same value filled page length is 0.
157*4882a593Smuzhiyun * pool - the zswap_pool the entry's data is in
158*4882a593Smuzhiyun * handle - zpool allocation handle that stores the compressed page data
159*4882a593Smuzhiyun * value - value of the same-value filled pages which have same content
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun struct zswap_entry {
162*4882a593Smuzhiyun struct rb_node rbnode;
163*4882a593Smuzhiyun pgoff_t offset;
164*4882a593Smuzhiyun int refcount;
165*4882a593Smuzhiyun unsigned int length;
166*4882a593Smuzhiyun struct zswap_pool *pool;
167*4882a593Smuzhiyun union {
168*4882a593Smuzhiyun unsigned long handle;
169*4882a593Smuzhiyun unsigned long value;
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun struct zswap_header {
174*4882a593Smuzhiyun swp_entry_t swpentry;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun * The tree lock in the zswap_tree struct protects a few things:
179*4882a593Smuzhiyun * - the rbtree
180*4882a593Smuzhiyun * - the refcount field of each entry in the tree
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun struct zswap_tree {
183*4882a593Smuzhiyun struct rb_root rbroot;
184*4882a593Smuzhiyun spinlock_t lock;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* RCU-protected iteration */
190*4882a593Smuzhiyun static LIST_HEAD(zswap_pools);
191*4882a593Smuzhiyun /* protects zswap_pools list modification */
192*4882a593Smuzhiyun static DEFINE_SPINLOCK(zswap_pools_lock);
193*4882a593Smuzhiyun /* pool counter to provide unique names to zpool */
194*4882a593Smuzhiyun static atomic_t zswap_pools_count = ATOMIC_INIT(0);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* used by param callback function */
197*4882a593Smuzhiyun static bool zswap_init_started;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* fatal error during init */
200*4882a593Smuzhiyun static bool zswap_init_failed;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* init completed, but couldn't create the initial pool */
203*4882a593Smuzhiyun static bool zswap_has_pool;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*********************************
206*4882a593Smuzhiyun * helpers and fwd declarations
207*4882a593Smuzhiyun **********************************/
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun #define zswap_pool_debug(msg, p) \
210*4882a593Smuzhiyun pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
211*4882a593Smuzhiyun zpool_get_type((p)->zpool))
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
214*4882a593Smuzhiyun static int zswap_pool_get(struct zswap_pool *pool);
215*4882a593Smuzhiyun static void zswap_pool_put(struct zswap_pool *pool);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun static const struct zpool_ops zswap_zpool_ops = {
218*4882a593Smuzhiyun .evict = zswap_writeback_entry
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun
zswap_is_full(void)221*4882a593Smuzhiyun static bool zswap_is_full(void)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun return totalram_pages() * zswap_max_pool_percent / 100 <
224*4882a593Smuzhiyun DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
zswap_can_accept(void)227*4882a593Smuzhiyun static bool zswap_can_accept(void)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun return totalram_pages() * zswap_accept_thr_percent / 100 *
230*4882a593Smuzhiyun zswap_max_pool_percent / 100 >
231*4882a593Smuzhiyun DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
zswap_update_total_size(void)234*4882a593Smuzhiyun static void zswap_update_total_size(void)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct zswap_pool *pool;
237*4882a593Smuzhiyun u64 total = 0;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun rcu_read_lock();
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun list_for_each_entry_rcu(pool, &zswap_pools, list)
242*4882a593Smuzhiyun total += zpool_get_total_size(pool->zpool);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun rcu_read_unlock();
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun zswap_pool_total_size = total;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*********************************
250*4882a593Smuzhiyun * zswap entry functions
251*4882a593Smuzhiyun **********************************/
252*4882a593Smuzhiyun static struct kmem_cache *zswap_entry_cache;
253*4882a593Smuzhiyun
zswap_entry_cache_create(void)254*4882a593Smuzhiyun static int __init zswap_entry_cache_create(void)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
257*4882a593Smuzhiyun return zswap_entry_cache == NULL;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
zswap_entry_cache_destroy(void)260*4882a593Smuzhiyun static void __init zswap_entry_cache_destroy(void)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun kmem_cache_destroy(zswap_entry_cache);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
zswap_entry_cache_alloc(gfp_t gfp)265*4882a593Smuzhiyun static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct zswap_entry *entry;
268*4882a593Smuzhiyun entry = kmem_cache_alloc(zswap_entry_cache, gfp);
269*4882a593Smuzhiyun if (!entry)
270*4882a593Smuzhiyun return NULL;
271*4882a593Smuzhiyun entry->refcount = 1;
272*4882a593Smuzhiyun RB_CLEAR_NODE(&entry->rbnode);
273*4882a593Smuzhiyun return entry;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
zswap_entry_cache_free(struct zswap_entry * entry)276*4882a593Smuzhiyun static void zswap_entry_cache_free(struct zswap_entry *entry)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun kmem_cache_free(zswap_entry_cache, entry);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /*********************************
282*4882a593Smuzhiyun * rbtree functions
283*4882a593Smuzhiyun **********************************/
zswap_rb_search(struct rb_root * root,pgoff_t offset)284*4882a593Smuzhiyun static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct rb_node *node = root->rb_node;
287*4882a593Smuzhiyun struct zswap_entry *entry;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun while (node) {
290*4882a593Smuzhiyun entry = rb_entry(node, struct zswap_entry, rbnode);
291*4882a593Smuzhiyun if (entry->offset > offset)
292*4882a593Smuzhiyun node = node->rb_left;
293*4882a593Smuzhiyun else if (entry->offset < offset)
294*4882a593Smuzhiyun node = node->rb_right;
295*4882a593Smuzhiyun else
296*4882a593Smuzhiyun return entry;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun return NULL;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * In the case that a entry with the same offset is found, a pointer to
303*4882a593Smuzhiyun * the existing entry is stored in dupentry and the function returns -EEXIST
304*4882a593Smuzhiyun */
zswap_rb_insert(struct rb_root * root,struct zswap_entry * entry,struct zswap_entry ** dupentry)305*4882a593Smuzhiyun static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
306*4882a593Smuzhiyun struct zswap_entry **dupentry)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct rb_node **link = &root->rb_node, *parent = NULL;
309*4882a593Smuzhiyun struct zswap_entry *myentry;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun while (*link) {
312*4882a593Smuzhiyun parent = *link;
313*4882a593Smuzhiyun myentry = rb_entry(parent, struct zswap_entry, rbnode);
314*4882a593Smuzhiyun if (myentry->offset > entry->offset)
315*4882a593Smuzhiyun link = &(*link)->rb_left;
316*4882a593Smuzhiyun else if (myentry->offset < entry->offset)
317*4882a593Smuzhiyun link = &(*link)->rb_right;
318*4882a593Smuzhiyun else {
319*4882a593Smuzhiyun *dupentry = myentry;
320*4882a593Smuzhiyun return -EEXIST;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun rb_link_node(&entry->rbnode, parent, link);
324*4882a593Smuzhiyun rb_insert_color(&entry->rbnode, root);
325*4882a593Smuzhiyun return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
zswap_rb_erase(struct rb_root * root,struct zswap_entry * entry)328*4882a593Smuzhiyun static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun if (!RB_EMPTY_NODE(&entry->rbnode)) {
331*4882a593Smuzhiyun rb_erase(&entry->rbnode, root);
332*4882a593Smuzhiyun RB_CLEAR_NODE(&entry->rbnode);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * Carries out the common pattern of freeing and entry's zpool allocation,
338*4882a593Smuzhiyun * freeing the entry itself, and decrementing the number of stored pages.
339*4882a593Smuzhiyun */
zswap_free_entry(struct zswap_entry * entry)340*4882a593Smuzhiyun static void zswap_free_entry(struct zswap_entry *entry)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun if (!entry->length)
343*4882a593Smuzhiyun atomic_dec(&zswap_same_filled_pages);
344*4882a593Smuzhiyun else {
345*4882a593Smuzhiyun zpool_free(entry->pool->zpool, entry->handle);
346*4882a593Smuzhiyun zswap_pool_put(entry->pool);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun zswap_entry_cache_free(entry);
349*4882a593Smuzhiyun atomic_dec(&zswap_stored_pages);
350*4882a593Smuzhiyun zswap_update_total_size();
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* caller must hold the tree lock */
zswap_entry_get(struct zswap_entry * entry)354*4882a593Smuzhiyun static void zswap_entry_get(struct zswap_entry *entry)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun entry->refcount++;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* caller must hold the tree lock
360*4882a593Smuzhiyun * remove from the tree and free it, if nobody reference the entry
361*4882a593Smuzhiyun */
zswap_entry_put(struct zswap_tree * tree,struct zswap_entry * entry)362*4882a593Smuzhiyun static void zswap_entry_put(struct zswap_tree *tree,
363*4882a593Smuzhiyun struct zswap_entry *entry)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun int refcount = --entry->refcount;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun BUG_ON(refcount < 0);
368*4882a593Smuzhiyun if (refcount == 0) {
369*4882a593Smuzhiyun zswap_rb_erase(&tree->rbroot, entry);
370*4882a593Smuzhiyun zswap_free_entry(entry);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* caller must hold the tree lock */
zswap_entry_find_get(struct rb_root * root,pgoff_t offset)375*4882a593Smuzhiyun static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
376*4882a593Smuzhiyun pgoff_t offset)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct zswap_entry *entry;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun entry = zswap_rb_search(root, offset);
381*4882a593Smuzhiyun if (entry)
382*4882a593Smuzhiyun zswap_entry_get(entry);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return entry;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*********************************
388*4882a593Smuzhiyun * per-cpu code
389*4882a593Smuzhiyun **********************************/
390*4882a593Smuzhiyun static DEFINE_PER_CPU(u8 *, zswap_dstmem);
391*4882a593Smuzhiyun
zswap_dstmem_prepare(unsigned int cpu)392*4882a593Smuzhiyun static int zswap_dstmem_prepare(unsigned int cpu)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun u8 *dst;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
397*4882a593Smuzhiyun if (!dst)
398*4882a593Smuzhiyun return -ENOMEM;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun per_cpu(zswap_dstmem, cpu) = dst;
401*4882a593Smuzhiyun return 0;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
zswap_dstmem_dead(unsigned int cpu)404*4882a593Smuzhiyun static int zswap_dstmem_dead(unsigned int cpu)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun u8 *dst;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun dst = per_cpu(zswap_dstmem, cpu);
409*4882a593Smuzhiyun kfree(dst);
410*4882a593Smuzhiyun per_cpu(zswap_dstmem, cpu) = NULL;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
zswap_cpu_comp_prepare(unsigned int cpu,struct hlist_node * node)415*4882a593Smuzhiyun static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
418*4882a593Smuzhiyun struct crypto_comp *tfm;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
421*4882a593Smuzhiyun return 0;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
424*4882a593Smuzhiyun if (IS_ERR_OR_NULL(tfm)) {
425*4882a593Smuzhiyun pr_err("could not alloc crypto comp %s : %ld\n",
426*4882a593Smuzhiyun pool->tfm_name, PTR_ERR(tfm));
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun *per_cpu_ptr(pool->tfm, cpu) = tfm;
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
zswap_cpu_comp_dead(unsigned int cpu,struct hlist_node * node)433*4882a593Smuzhiyun static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
436*4882a593Smuzhiyun struct crypto_comp *tfm;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun tfm = *per_cpu_ptr(pool->tfm, cpu);
439*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(tfm))
440*4882a593Smuzhiyun crypto_free_comp(tfm);
441*4882a593Smuzhiyun *per_cpu_ptr(pool->tfm, cpu) = NULL;
442*4882a593Smuzhiyun return 0;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /*********************************
446*4882a593Smuzhiyun * pool functions
447*4882a593Smuzhiyun **********************************/
448*4882a593Smuzhiyun
__zswap_pool_current(void)449*4882a593Smuzhiyun static struct zswap_pool *__zswap_pool_current(void)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun struct zswap_pool *pool;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
454*4882a593Smuzhiyun WARN_ONCE(!pool && zswap_has_pool,
455*4882a593Smuzhiyun "%s: no page storage pool!\n", __func__);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return pool;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
zswap_pool_current(void)460*4882a593Smuzhiyun static struct zswap_pool *zswap_pool_current(void)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun assert_spin_locked(&zswap_pools_lock);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun return __zswap_pool_current();
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
zswap_pool_current_get(void)467*4882a593Smuzhiyun static struct zswap_pool *zswap_pool_current_get(void)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct zswap_pool *pool;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun rcu_read_lock();
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun pool = __zswap_pool_current();
474*4882a593Smuzhiyun if (!zswap_pool_get(pool))
475*4882a593Smuzhiyun pool = NULL;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun rcu_read_unlock();
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return pool;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
zswap_pool_last_get(void)482*4882a593Smuzhiyun static struct zswap_pool *zswap_pool_last_get(void)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct zswap_pool *pool, *last = NULL;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun rcu_read_lock();
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun list_for_each_entry_rcu(pool, &zswap_pools, list)
489*4882a593Smuzhiyun last = pool;
490*4882a593Smuzhiyun WARN_ONCE(!last && zswap_has_pool,
491*4882a593Smuzhiyun "%s: no page storage pool!\n", __func__);
492*4882a593Smuzhiyun if (!zswap_pool_get(last))
493*4882a593Smuzhiyun last = NULL;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun rcu_read_unlock();
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun return last;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* type and compressor must be null-terminated */
zswap_pool_find_get(char * type,char * compressor)501*4882a593Smuzhiyun static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct zswap_pool *pool;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun assert_spin_locked(&zswap_pools_lock);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun list_for_each_entry_rcu(pool, &zswap_pools, list) {
508*4882a593Smuzhiyun if (strcmp(pool->tfm_name, compressor))
509*4882a593Smuzhiyun continue;
510*4882a593Smuzhiyun if (strcmp(zpool_get_type(pool->zpool), type))
511*4882a593Smuzhiyun continue;
512*4882a593Smuzhiyun /* if we can't get it, it's about to be destroyed */
513*4882a593Smuzhiyun if (!zswap_pool_get(pool))
514*4882a593Smuzhiyun continue;
515*4882a593Smuzhiyun return pool;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return NULL;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
shrink_worker(struct work_struct * w)521*4882a593Smuzhiyun static void shrink_worker(struct work_struct *w)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct zswap_pool *pool = container_of(w, typeof(*pool),
524*4882a593Smuzhiyun shrink_work);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (zpool_shrink(pool->zpool, 1, NULL))
527*4882a593Smuzhiyun zswap_reject_reclaim_fail++;
528*4882a593Smuzhiyun zswap_pool_put(pool);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
zswap_pool_create(char * type,char * compressor)531*4882a593Smuzhiyun static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct zswap_pool *pool;
534*4882a593Smuzhiyun char name[38]; /* 'zswap' + 32 char (max) num + \0 */
535*4882a593Smuzhiyun gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
536*4882a593Smuzhiyun int ret;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!zswap_has_pool) {
539*4882a593Smuzhiyun /* if either are unset, pool initialization failed, and we
540*4882a593Smuzhiyun * need both params to be set correctly before trying to
541*4882a593Smuzhiyun * create a pool.
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun if (!strcmp(type, ZSWAP_PARAM_UNSET))
544*4882a593Smuzhiyun return NULL;
545*4882a593Smuzhiyun if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
546*4882a593Smuzhiyun return NULL;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun pool = kzalloc(sizeof(*pool), GFP_KERNEL);
550*4882a593Smuzhiyun if (!pool)
551*4882a593Smuzhiyun return NULL;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* unique name for each pool specifically required by zsmalloc */
554*4882a593Smuzhiyun snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
557*4882a593Smuzhiyun if (!pool->zpool) {
558*4882a593Smuzhiyun pr_err("%s zpool not available\n", type);
559*4882a593Smuzhiyun goto error;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
564*4882a593Smuzhiyun pool->tfm = alloc_percpu(struct crypto_comp *);
565*4882a593Smuzhiyun if (!pool->tfm) {
566*4882a593Smuzhiyun pr_err("percpu alloc failed\n");
567*4882a593Smuzhiyun goto error;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
571*4882a593Smuzhiyun &pool->node);
572*4882a593Smuzhiyun if (ret)
573*4882a593Smuzhiyun goto error;
574*4882a593Smuzhiyun pr_debug("using %s compressor\n", pool->tfm_name);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* being the current pool takes 1 ref; this func expects the
577*4882a593Smuzhiyun * caller to always add the new pool as the current pool
578*4882a593Smuzhiyun */
579*4882a593Smuzhiyun kref_init(&pool->kref);
580*4882a593Smuzhiyun INIT_LIST_HEAD(&pool->list);
581*4882a593Smuzhiyun INIT_WORK(&pool->shrink_work, shrink_worker);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun zswap_pool_debug("created", pool);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return pool;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun error:
588*4882a593Smuzhiyun free_percpu(pool->tfm);
589*4882a593Smuzhiyun if (pool->zpool)
590*4882a593Smuzhiyun zpool_destroy_pool(pool->zpool);
591*4882a593Smuzhiyun kfree(pool);
592*4882a593Smuzhiyun return NULL;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
__zswap_pool_create_fallback(void)595*4882a593Smuzhiyun static __init struct zswap_pool *__zswap_pool_create_fallback(void)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun bool has_comp, has_zpool;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun has_comp = crypto_has_comp(zswap_compressor, 0, 0);
600*4882a593Smuzhiyun if (!has_comp && strcmp(zswap_compressor,
601*4882a593Smuzhiyun CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
602*4882a593Smuzhiyun pr_err("compressor %s not available, using default %s\n",
603*4882a593Smuzhiyun zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
604*4882a593Smuzhiyun param_free_charp(&zswap_compressor);
605*4882a593Smuzhiyun zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
606*4882a593Smuzhiyun has_comp = crypto_has_comp(zswap_compressor, 0, 0);
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun if (!has_comp) {
609*4882a593Smuzhiyun pr_err("default compressor %s not available\n",
610*4882a593Smuzhiyun zswap_compressor);
611*4882a593Smuzhiyun param_free_charp(&zswap_compressor);
612*4882a593Smuzhiyun zswap_compressor = ZSWAP_PARAM_UNSET;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun has_zpool = zpool_has_pool(zswap_zpool_type);
616*4882a593Smuzhiyun if (!has_zpool && strcmp(zswap_zpool_type,
617*4882a593Smuzhiyun CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
618*4882a593Smuzhiyun pr_err("zpool %s not available, using default %s\n",
619*4882a593Smuzhiyun zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
620*4882a593Smuzhiyun param_free_charp(&zswap_zpool_type);
621*4882a593Smuzhiyun zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
622*4882a593Smuzhiyun has_zpool = zpool_has_pool(zswap_zpool_type);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun if (!has_zpool) {
625*4882a593Smuzhiyun pr_err("default zpool %s not available\n",
626*4882a593Smuzhiyun zswap_zpool_type);
627*4882a593Smuzhiyun param_free_charp(&zswap_zpool_type);
628*4882a593Smuzhiyun zswap_zpool_type = ZSWAP_PARAM_UNSET;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (!has_comp || !has_zpool)
632*4882a593Smuzhiyun return NULL;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun return zswap_pool_create(zswap_zpool_type, zswap_compressor);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
zswap_pool_destroy(struct zswap_pool * pool)637*4882a593Smuzhiyun static void zswap_pool_destroy(struct zswap_pool *pool)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun zswap_pool_debug("destroying", pool);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
642*4882a593Smuzhiyun free_percpu(pool->tfm);
643*4882a593Smuzhiyun zpool_destroy_pool(pool->zpool);
644*4882a593Smuzhiyun kfree(pool);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
zswap_pool_get(struct zswap_pool * pool)647*4882a593Smuzhiyun static int __must_check zswap_pool_get(struct zswap_pool *pool)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun if (!pool)
650*4882a593Smuzhiyun return 0;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun return kref_get_unless_zero(&pool->kref);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
__zswap_pool_release(struct work_struct * work)655*4882a593Smuzhiyun static void __zswap_pool_release(struct work_struct *work)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct zswap_pool *pool = container_of(work, typeof(*pool),
658*4882a593Smuzhiyun release_work);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun synchronize_rcu();
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* nobody should have been able to get a kref... */
663*4882a593Smuzhiyun WARN_ON(kref_get_unless_zero(&pool->kref));
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* pool is now off zswap_pools list and has no references. */
666*4882a593Smuzhiyun zswap_pool_destroy(pool);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
__zswap_pool_empty(struct kref * kref)669*4882a593Smuzhiyun static void __zswap_pool_empty(struct kref *kref)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct zswap_pool *pool;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun pool = container_of(kref, typeof(*pool), kref);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun spin_lock(&zswap_pools_lock);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun WARN_ON(pool == zswap_pool_current());
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun list_del_rcu(&pool->list);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun INIT_WORK(&pool->release_work, __zswap_pool_release);
682*4882a593Smuzhiyun schedule_work(&pool->release_work);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun spin_unlock(&zswap_pools_lock);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
zswap_pool_put(struct zswap_pool * pool)687*4882a593Smuzhiyun static void zswap_pool_put(struct zswap_pool *pool)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun kref_put(&pool->kref, __zswap_pool_empty);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /*********************************
693*4882a593Smuzhiyun * param callbacks
694*4882a593Smuzhiyun **********************************/
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* val must be a null-terminated string */
__zswap_param_set(const char * val,const struct kernel_param * kp,char * type,char * compressor)697*4882a593Smuzhiyun static int __zswap_param_set(const char *val, const struct kernel_param *kp,
698*4882a593Smuzhiyun char *type, char *compressor)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct zswap_pool *pool, *put_pool = NULL;
701*4882a593Smuzhiyun char *s = strstrip((char *)val);
702*4882a593Smuzhiyun int ret;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun if (zswap_init_failed) {
705*4882a593Smuzhiyun pr_err("can't set param, initialization failed\n");
706*4882a593Smuzhiyun return -ENODEV;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* no change required */
710*4882a593Smuzhiyun if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
711*4882a593Smuzhiyun return 0;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /* if this is load-time (pre-init) param setting,
714*4882a593Smuzhiyun * don't create a pool; that's done during init.
715*4882a593Smuzhiyun */
716*4882a593Smuzhiyun if (!zswap_init_started)
717*4882a593Smuzhiyun return param_set_charp(s, kp);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (!type) {
720*4882a593Smuzhiyun if (!zpool_has_pool(s)) {
721*4882a593Smuzhiyun pr_err("zpool %s not available\n", s);
722*4882a593Smuzhiyun return -ENOENT;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun type = s;
725*4882a593Smuzhiyun } else if (!compressor) {
726*4882a593Smuzhiyun if (!crypto_has_comp(s, 0, 0)) {
727*4882a593Smuzhiyun pr_err("compressor %s not available\n", s);
728*4882a593Smuzhiyun return -ENOENT;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun compressor = s;
731*4882a593Smuzhiyun } else {
732*4882a593Smuzhiyun WARN_ON(1);
733*4882a593Smuzhiyun return -EINVAL;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun spin_lock(&zswap_pools_lock);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun pool = zswap_pool_find_get(type, compressor);
739*4882a593Smuzhiyun if (pool) {
740*4882a593Smuzhiyun zswap_pool_debug("using existing", pool);
741*4882a593Smuzhiyun WARN_ON(pool == zswap_pool_current());
742*4882a593Smuzhiyun list_del_rcu(&pool->list);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun spin_unlock(&zswap_pools_lock);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun if (!pool)
748*4882a593Smuzhiyun pool = zswap_pool_create(type, compressor);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (pool)
751*4882a593Smuzhiyun ret = param_set_charp(s, kp);
752*4882a593Smuzhiyun else
753*4882a593Smuzhiyun ret = -EINVAL;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun spin_lock(&zswap_pools_lock);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (!ret) {
758*4882a593Smuzhiyun put_pool = zswap_pool_current();
759*4882a593Smuzhiyun list_add_rcu(&pool->list, &zswap_pools);
760*4882a593Smuzhiyun zswap_has_pool = true;
761*4882a593Smuzhiyun } else if (pool) {
762*4882a593Smuzhiyun /* add the possibly pre-existing pool to the end of the pools
763*4882a593Smuzhiyun * list; if it's new (and empty) then it'll be removed and
764*4882a593Smuzhiyun * destroyed by the put after we drop the lock
765*4882a593Smuzhiyun */
766*4882a593Smuzhiyun list_add_tail_rcu(&pool->list, &zswap_pools);
767*4882a593Smuzhiyun put_pool = pool;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun spin_unlock(&zswap_pools_lock);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (!zswap_has_pool && !pool) {
773*4882a593Smuzhiyun /* if initial pool creation failed, and this pool creation also
774*4882a593Smuzhiyun * failed, maybe both compressor and zpool params were bad.
775*4882a593Smuzhiyun * Allow changing this param, so pool creation will succeed
776*4882a593Smuzhiyun * when the other param is changed. We already verified this
777*4882a593Smuzhiyun * param is ok in the zpool_has_pool() or crypto_has_comp()
778*4882a593Smuzhiyun * checks above.
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun ret = param_set_charp(s, kp);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /* drop the ref from either the old current pool,
784*4882a593Smuzhiyun * or the new pool we failed to add
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun if (put_pool)
787*4882a593Smuzhiyun zswap_pool_put(put_pool);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun return ret;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
zswap_compressor_param_set(const char * val,const struct kernel_param * kp)792*4882a593Smuzhiyun static int zswap_compressor_param_set(const char *val,
793*4882a593Smuzhiyun const struct kernel_param *kp)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
zswap_zpool_param_set(const char * val,const struct kernel_param * kp)798*4882a593Smuzhiyun static int zswap_zpool_param_set(const char *val,
799*4882a593Smuzhiyun const struct kernel_param *kp)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun return __zswap_param_set(val, kp, NULL, zswap_compressor);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
zswap_enabled_param_set(const char * val,const struct kernel_param * kp)804*4882a593Smuzhiyun static int zswap_enabled_param_set(const char *val,
805*4882a593Smuzhiyun const struct kernel_param *kp)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun if (zswap_init_failed) {
808*4882a593Smuzhiyun pr_err("can't enable, initialization failed\n");
809*4882a593Smuzhiyun return -ENODEV;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun if (!zswap_has_pool && zswap_init_started) {
812*4882a593Smuzhiyun pr_err("can't enable, no pool configured\n");
813*4882a593Smuzhiyun return -ENODEV;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun return param_set_bool(val, kp);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /*********************************
820*4882a593Smuzhiyun * writeback code
821*4882a593Smuzhiyun **********************************/
822*4882a593Smuzhiyun /* return enum for zswap_get_swap_cache_page */
823*4882a593Smuzhiyun enum zswap_get_swap_ret {
824*4882a593Smuzhiyun ZSWAP_SWAPCACHE_NEW,
825*4882a593Smuzhiyun ZSWAP_SWAPCACHE_EXIST,
826*4882a593Smuzhiyun ZSWAP_SWAPCACHE_FAIL,
827*4882a593Smuzhiyun };
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun * zswap_get_swap_cache_page
831*4882a593Smuzhiyun *
832*4882a593Smuzhiyun * This is an adaption of read_swap_cache_async()
833*4882a593Smuzhiyun *
834*4882a593Smuzhiyun * This function tries to find a page with the given swap entry
835*4882a593Smuzhiyun * in the swapper_space address space (the swap cache). If the page
836*4882a593Smuzhiyun * is found, it is returned in retpage. Otherwise, a page is allocated,
837*4882a593Smuzhiyun * added to the swap cache, and returned in retpage.
838*4882a593Smuzhiyun *
839*4882a593Smuzhiyun * If success, the swap cache page is returned in retpage
840*4882a593Smuzhiyun * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
841*4882a593Smuzhiyun * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
842*4882a593Smuzhiyun * the new page is added to swapcache and locked
843*4882a593Smuzhiyun * Returns ZSWAP_SWAPCACHE_FAIL on error
844*4882a593Smuzhiyun */
zswap_get_swap_cache_page(swp_entry_t entry,struct page ** retpage)845*4882a593Smuzhiyun static int zswap_get_swap_cache_page(swp_entry_t entry,
846*4882a593Smuzhiyun struct page **retpage)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun bool page_was_allocated;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
851*4882a593Smuzhiyun NULL, 0, &page_was_allocated);
852*4882a593Smuzhiyun if (page_was_allocated)
853*4882a593Smuzhiyun return ZSWAP_SWAPCACHE_NEW;
854*4882a593Smuzhiyun if (!*retpage)
855*4882a593Smuzhiyun return ZSWAP_SWAPCACHE_FAIL;
856*4882a593Smuzhiyun return ZSWAP_SWAPCACHE_EXIST;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun * Attempts to free an entry by adding a page to the swap cache,
861*4882a593Smuzhiyun * decompressing the entry data into the page, and issuing a
862*4882a593Smuzhiyun * bio write to write the page back to the swap device.
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * This can be thought of as a "resumed writeback" of the page
865*4882a593Smuzhiyun * to the swap device. We are basically resuming the same swap
866*4882a593Smuzhiyun * writeback path that was intercepted with the frontswap_store()
867*4882a593Smuzhiyun * in the first place. After the page has been decompressed into
868*4882a593Smuzhiyun * the swap cache, the compressed version stored by zswap can be
869*4882a593Smuzhiyun * freed.
870*4882a593Smuzhiyun */
zswap_writeback_entry(struct zpool * pool,unsigned long handle)871*4882a593Smuzhiyun static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct zswap_header *zhdr;
874*4882a593Smuzhiyun swp_entry_t swpentry;
875*4882a593Smuzhiyun struct zswap_tree *tree;
876*4882a593Smuzhiyun pgoff_t offset;
877*4882a593Smuzhiyun struct zswap_entry *entry;
878*4882a593Smuzhiyun struct page *page;
879*4882a593Smuzhiyun struct crypto_comp *tfm;
880*4882a593Smuzhiyun u8 *src, *dst;
881*4882a593Smuzhiyun unsigned int dlen;
882*4882a593Smuzhiyun int ret;
883*4882a593Smuzhiyun struct writeback_control wbc = {
884*4882a593Smuzhiyun .sync_mode = WB_SYNC_NONE,
885*4882a593Smuzhiyun };
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /* extract swpentry from data */
888*4882a593Smuzhiyun zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
889*4882a593Smuzhiyun swpentry = zhdr->swpentry; /* here */
890*4882a593Smuzhiyun tree = zswap_trees[swp_type(swpentry)];
891*4882a593Smuzhiyun offset = swp_offset(swpentry);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /* find and ref zswap entry */
894*4882a593Smuzhiyun spin_lock(&tree->lock);
895*4882a593Smuzhiyun entry = zswap_entry_find_get(&tree->rbroot, offset);
896*4882a593Smuzhiyun if (!entry) {
897*4882a593Smuzhiyun /* entry was invalidated */
898*4882a593Smuzhiyun spin_unlock(&tree->lock);
899*4882a593Smuzhiyun zpool_unmap_handle(pool, handle);
900*4882a593Smuzhiyun return 0;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun spin_unlock(&tree->lock);
903*4882a593Smuzhiyun BUG_ON(offset != entry->offset);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun /* try to allocate swap cache page */
906*4882a593Smuzhiyun switch (zswap_get_swap_cache_page(swpentry, &page)) {
907*4882a593Smuzhiyun case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
908*4882a593Smuzhiyun ret = -ENOMEM;
909*4882a593Smuzhiyun goto fail;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun case ZSWAP_SWAPCACHE_EXIST:
912*4882a593Smuzhiyun /* page is already in the swap cache, ignore for now */
913*4882a593Smuzhiyun put_page(page);
914*4882a593Smuzhiyun ret = -EEXIST;
915*4882a593Smuzhiyun goto fail;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun case ZSWAP_SWAPCACHE_NEW: /* page is locked */
918*4882a593Smuzhiyun /* decompress */
919*4882a593Smuzhiyun dlen = PAGE_SIZE;
920*4882a593Smuzhiyun src = (u8 *)zhdr + sizeof(struct zswap_header);
921*4882a593Smuzhiyun dst = kmap_atomic(page);
922*4882a593Smuzhiyun tfm = *get_cpu_ptr(entry->pool->tfm);
923*4882a593Smuzhiyun ret = crypto_comp_decompress(tfm, src, entry->length,
924*4882a593Smuzhiyun dst, &dlen);
925*4882a593Smuzhiyun put_cpu_ptr(entry->pool->tfm);
926*4882a593Smuzhiyun kunmap_atomic(dst);
927*4882a593Smuzhiyun BUG_ON(ret);
928*4882a593Smuzhiyun BUG_ON(dlen != PAGE_SIZE);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* page is up to date */
931*4882a593Smuzhiyun SetPageUptodate(page);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /* move it to the tail of the inactive list after end_writeback */
935*4882a593Smuzhiyun SetPageReclaim(page);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /* start writeback */
938*4882a593Smuzhiyun __swap_writepage(page, &wbc, end_swap_bio_write);
939*4882a593Smuzhiyun put_page(page);
940*4882a593Smuzhiyun zswap_written_back_pages++;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun spin_lock(&tree->lock);
943*4882a593Smuzhiyun /* drop local reference */
944*4882a593Smuzhiyun zswap_entry_put(tree, entry);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /*
947*4882a593Smuzhiyun * There are two possible situations for entry here:
948*4882a593Smuzhiyun * (1) refcount is 1(normal case), entry is valid and on the tree
949*4882a593Smuzhiyun * (2) refcount is 0, entry is freed and not on the tree
950*4882a593Smuzhiyun * because invalidate happened during writeback
951*4882a593Smuzhiyun * search the tree and free the entry if find entry
952*4882a593Smuzhiyun */
953*4882a593Smuzhiyun if (entry == zswap_rb_search(&tree->rbroot, offset))
954*4882a593Smuzhiyun zswap_entry_put(tree, entry);
955*4882a593Smuzhiyun spin_unlock(&tree->lock);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun goto end;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun /*
960*4882a593Smuzhiyun * if we get here due to ZSWAP_SWAPCACHE_EXIST
961*4882a593Smuzhiyun * a load may happening concurrently
962*4882a593Smuzhiyun * it is safe and okay to not free the entry
963*4882a593Smuzhiyun * if we free the entry in the following put
964*4882a593Smuzhiyun * it it either okay to return !0
965*4882a593Smuzhiyun */
966*4882a593Smuzhiyun fail:
967*4882a593Smuzhiyun spin_lock(&tree->lock);
968*4882a593Smuzhiyun zswap_entry_put(tree, entry);
969*4882a593Smuzhiyun spin_unlock(&tree->lock);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun end:
972*4882a593Smuzhiyun zpool_unmap_handle(pool, handle);
973*4882a593Smuzhiyun return ret;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
zswap_is_page_same_filled(void * ptr,unsigned long * value)976*4882a593Smuzhiyun static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun unsigned int pos;
979*4882a593Smuzhiyun unsigned long *page;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun page = (unsigned long *)ptr;
982*4882a593Smuzhiyun for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
983*4882a593Smuzhiyun if (page[pos] != page[0])
984*4882a593Smuzhiyun return 0;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun *value = page[0];
987*4882a593Smuzhiyun return 1;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
zswap_fill_page(void * ptr,unsigned long value)990*4882a593Smuzhiyun static void zswap_fill_page(void *ptr, unsigned long value)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun unsigned long *page;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun page = (unsigned long *)ptr;
995*4882a593Smuzhiyun memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /*********************************
999*4882a593Smuzhiyun * frontswap hooks
1000*4882a593Smuzhiyun **********************************/
1001*4882a593Smuzhiyun /* attempts to compress and store an single page */
zswap_frontswap_store(unsigned type,pgoff_t offset,struct page * page)1002*4882a593Smuzhiyun static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1003*4882a593Smuzhiyun struct page *page)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun struct zswap_tree *tree = zswap_trees[type];
1006*4882a593Smuzhiyun struct zswap_entry *entry, *dupentry;
1007*4882a593Smuzhiyun struct crypto_comp *tfm;
1008*4882a593Smuzhiyun int ret;
1009*4882a593Smuzhiyun unsigned int hlen, dlen = PAGE_SIZE;
1010*4882a593Smuzhiyun unsigned long handle, value;
1011*4882a593Smuzhiyun char *buf;
1012*4882a593Smuzhiyun u8 *src, *dst;
1013*4882a593Smuzhiyun struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
1014*4882a593Smuzhiyun gfp_t gfp;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun /* THP isn't supported */
1017*4882a593Smuzhiyun if (PageTransHuge(page)) {
1018*4882a593Smuzhiyun ret = -EINVAL;
1019*4882a593Smuzhiyun goto reject;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (!zswap_enabled || !tree) {
1023*4882a593Smuzhiyun ret = -ENODEV;
1024*4882a593Smuzhiyun goto reject;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /* reclaim space if needed */
1028*4882a593Smuzhiyun if (zswap_is_full()) {
1029*4882a593Smuzhiyun struct zswap_pool *pool;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun zswap_pool_limit_hit++;
1032*4882a593Smuzhiyun zswap_pool_reached_full = true;
1033*4882a593Smuzhiyun pool = zswap_pool_last_get();
1034*4882a593Smuzhiyun if (pool)
1035*4882a593Smuzhiyun queue_work(shrink_wq, &pool->shrink_work);
1036*4882a593Smuzhiyun ret = -ENOMEM;
1037*4882a593Smuzhiyun goto reject;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (zswap_pool_reached_full) {
1041*4882a593Smuzhiyun if (!zswap_can_accept()) {
1042*4882a593Smuzhiyun ret = -ENOMEM;
1043*4882a593Smuzhiyun goto reject;
1044*4882a593Smuzhiyun } else
1045*4882a593Smuzhiyun zswap_pool_reached_full = false;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /* allocate entry */
1049*4882a593Smuzhiyun entry = zswap_entry_cache_alloc(GFP_KERNEL);
1050*4882a593Smuzhiyun if (!entry) {
1051*4882a593Smuzhiyun zswap_reject_kmemcache_fail++;
1052*4882a593Smuzhiyun ret = -ENOMEM;
1053*4882a593Smuzhiyun goto reject;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if (zswap_same_filled_pages_enabled) {
1057*4882a593Smuzhiyun src = kmap_atomic(page);
1058*4882a593Smuzhiyun if (zswap_is_page_same_filled(src, &value)) {
1059*4882a593Smuzhiyun kunmap_atomic(src);
1060*4882a593Smuzhiyun entry->offset = offset;
1061*4882a593Smuzhiyun entry->length = 0;
1062*4882a593Smuzhiyun entry->value = value;
1063*4882a593Smuzhiyun atomic_inc(&zswap_same_filled_pages);
1064*4882a593Smuzhiyun goto insert_entry;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun kunmap_atomic(src);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /* if entry is successfully added, it keeps the reference */
1070*4882a593Smuzhiyun entry->pool = zswap_pool_current_get();
1071*4882a593Smuzhiyun if (!entry->pool) {
1072*4882a593Smuzhiyun ret = -EINVAL;
1073*4882a593Smuzhiyun goto freepage;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /* compress */
1077*4882a593Smuzhiyun dst = get_cpu_var(zswap_dstmem);
1078*4882a593Smuzhiyun tfm = *get_cpu_ptr(entry->pool->tfm);
1079*4882a593Smuzhiyun src = kmap_atomic(page);
1080*4882a593Smuzhiyun ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1081*4882a593Smuzhiyun kunmap_atomic(src);
1082*4882a593Smuzhiyun put_cpu_ptr(entry->pool->tfm);
1083*4882a593Smuzhiyun if (ret) {
1084*4882a593Smuzhiyun ret = -EINVAL;
1085*4882a593Smuzhiyun goto put_dstmem;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /* store */
1089*4882a593Smuzhiyun hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1090*4882a593Smuzhiyun gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1091*4882a593Smuzhiyun if (zpool_malloc_support_movable(entry->pool->zpool))
1092*4882a593Smuzhiyun gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1093*4882a593Smuzhiyun ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
1094*4882a593Smuzhiyun if (ret == -ENOSPC) {
1095*4882a593Smuzhiyun zswap_reject_compress_poor++;
1096*4882a593Smuzhiyun goto put_dstmem;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun if (ret) {
1099*4882a593Smuzhiyun zswap_reject_alloc_fail++;
1100*4882a593Smuzhiyun goto put_dstmem;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1103*4882a593Smuzhiyun memcpy(buf, &zhdr, hlen);
1104*4882a593Smuzhiyun memcpy(buf + hlen, dst, dlen);
1105*4882a593Smuzhiyun zpool_unmap_handle(entry->pool->zpool, handle);
1106*4882a593Smuzhiyun put_cpu_var(zswap_dstmem);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /* populate entry */
1109*4882a593Smuzhiyun entry->offset = offset;
1110*4882a593Smuzhiyun entry->handle = handle;
1111*4882a593Smuzhiyun entry->length = dlen;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun insert_entry:
1114*4882a593Smuzhiyun /* map */
1115*4882a593Smuzhiyun spin_lock(&tree->lock);
1116*4882a593Smuzhiyun do {
1117*4882a593Smuzhiyun ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1118*4882a593Smuzhiyun if (ret == -EEXIST) {
1119*4882a593Smuzhiyun zswap_duplicate_entry++;
1120*4882a593Smuzhiyun /* remove from rbtree */
1121*4882a593Smuzhiyun zswap_rb_erase(&tree->rbroot, dupentry);
1122*4882a593Smuzhiyun zswap_entry_put(tree, dupentry);
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun } while (ret == -EEXIST);
1125*4882a593Smuzhiyun spin_unlock(&tree->lock);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /* update stats */
1128*4882a593Smuzhiyun atomic_inc(&zswap_stored_pages);
1129*4882a593Smuzhiyun zswap_update_total_size();
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun return 0;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun put_dstmem:
1134*4882a593Smuzhiyun put_cpu_var(zswap_dstmem);
1135*4882a593Smuzhiyun zswap_pool_put(entry->pool);
1136*4882a593Smuzhiyun freepage:
1137*4882a593Smuzhiyun zswap_entry_cache_free(entry);
1138*4882a593Smuzhiyun reject:
1139*4882a593Smuzhiyun return ret;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /*
1143*4882a593Smuzhiyun * returns 0 if the page was successfully decompressed
1144*4882a593Smuzhiyun * return -1 on entry not found or error
1145*4882a593Smuzhiyun */
zswap_frontswap_load(unsigned type,pgoff_t offset,struct page * page)1146*4882a593Smuzhiyun static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1147*4882a593Smuzhiyun struct page *page)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun struct zswap_tree *tree = zswap_trees[type];
1150*4882a593Smuzhiyun struct zswap_entry *entry;
1151*4882a593Smuzhiyun struct crypto_comp *tfm;
1152*4882a593Smuzhiyun u8 *src, *dst;
1153*4882a593Smuzhiyun unsigned int dlen;
1154*4882a593Smuzhiyun int ret;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun /* find */
1157*4882a593Smuzhiyun spin_lock(&tree->lock);
1158*4882a593Smuzhiyun entry = zswap_entry_find_get(&tree->rbroot, offset);
1159*4882a593Smuzhiyun if (!entry) {
1160*4882a593Smuzhiyun /* entry was written back */
1161*4882a593Smuzhiyun spin_unlock(&tree->lock);
1162*4882a593Smuzhiyun return -1;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun spin_unlock(&tree->lock);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun if (!entry->length) {
1167*4882a593Smuzhiyun dst = kmap_atomic(page);
1168*4882a593Smuzhiyun zswap_fill_page(dst, entry->value);
1169*4882a593Smuzhiyun kunmap_atomic(dst);
1170*4882a593Smuzhiyun goto freeentry;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* decompress */
1174*4882a593Smuzhiyun dlen = PAGE_SIZE;
1175*4882a593Smuzhiyun src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1176*4882a593Smuzhiyun if (zpool_evictable(entry->pool->zpool))
1177*4882a593Smuzhiyun src += sizeof(struct zswap_header);
1178*4882a593Smuzhiyun dst = kmap_atomic(page);
1179*4882a593Smuzhiyun tfm = *get_cpu_ptr(entry->pool->tfm);
1180*4882a593Smuzhiyun ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1181*4882a593Smuzhiyun put_cpu_ptr(entry->pool->tfm);
1182*4882a593Smuzhiyun kunmap_atomic(dst);
1183*4882a593Smuzhiyun zpool_unmap_handle(entry->pool->zpool, entry->handle);
1184*4882a593Smuzhiyun BUG_ON(ret);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun freeentry:
1187*4882a593Smuzhiyun spin_lock(&tree->lock);
1188*4882a593Smuzhiyun zswap_entry_put(tree, entry);
1189*4882a593Smuzhiyun spin_unlock(&tree->lock);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun return 0;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /* frees an entry in zswap */
zswap_frontswap_invalidate_page(unsigned type,pgoff_t offset)1195*4882a593Smuzhiyun static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun struct zswap_tree *tree = zswap_trees[type];
1198*4882a593Smuzhiyun struct zswap_entry *entry;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /* find */
1201*4882a593Smuzhiyun spin_lock(&tree->lock);
1202*4882a593Smuzhiyun entry = zswap_rb_search(&tree->rbroot, offset);
1203*4882a593Smuzhiyun if (!entry) {
1204*4882a593Smuzhiyun /* entry was written back */
1205*4882a593Smuzhiyun spin_unlock(&tree->lock);
1206*4882a593Smuzhiyun return;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* remove from rbtree */
1210*4882a593Smuzhiyun zswap_rb_erase(&tree->rbroot, entry);
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /* drop the initial reference from entry creation */
1213*4882a593Smuzhiyun zswap_entry_put(tree, entry);
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun spin_unlock(&tree->lock);
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun /* frees all zswap entries for the given swap type */
zswap_frontswap_invalidate_area(unsigned type)1219*4882a593Smuzhiyun static void zswap_frontswap_invalidate_area(unsigned type)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun struct zswap_tree *tree = zswap_trees[type];
1222*4882a593Smuzhiyun struct zswap_entry *entry, *n;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun if (!tree)
1225*4882a593Smuzhiyun return;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /* walk the tree and free everything */
1228*4882a593Smuzhiyun spin_lock(&tree->lock);
1229*4882a593Smuzhiyun rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1230*4882a593Smuzhiyun zswap_free_entry(entry);
1231*4882a593Smuzhiyun tree->rbroot = RB_ROOT;
1232*4882a593Smuzhiyun spin_unlock(&tree->lock);
1233*4882a593Smuzhiyun kfree(tree);
1234*4882a593Smuzhiyun zswap_trees[type] = NULL;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
zswap_frontswap_init(unsigned type)1237*4882a593Smuzhiyun static void zswap_frontswap_init(unsigned type)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun struct zswap_tree *tree;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1242*4882a593Smuzhiyun if (!tree) {
1243*4882a593Smuzhiyun pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1244*4882a593Smuzhiyun return;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun tree->rbroot = RB_ROOT;
1248*4882a593Smuzhiyun spin_lock_init(&tree->lock);
1249*4882a593Smuzhiyun zswap_trees[type] = tree;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun static struct frontswap_ops zswap_frontswap_ops = {
1253*4882a593Smuzhiyun .store = zswap_frontswap_store,
1254*4882a593Smuzhiyun .load = zswap_frontswap_load,
1255*4882a593Smuzhiyun .invalidate_page = zswap_frontswap_invalidate_page,
1256*4882a593Smuzhiyun .invalidate_area = zswap_frontswap_invalidate_area,
1257*4882a593Smuzhiyun .init = zswap_frontswap_init
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun /*********************************
1261*4882a593Smuzhiyun * debugfs functions
1262*4882a593Smuzhiyun **********************************/
1263*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
1264*4882a593Smuzhiyun #include <linux/debugfs.h>
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun static struct dentry *zswap_debugfs_root;
1267*4882a593Smuzhiyun
zswap_debugfs_init(void)1268*4882a593Smuzhiyun static int __init zswap_debugfs_init(void)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun if (!debugfs_initialized())
1271*4882a593Smuzhiyun return -ENODEV;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun debugfs_create_u64("pool_limit_hit", 0444,
1276*4882a593Smuzhiyun zswap_debugfs_root, &zswap_pool_limit_hit);
1277*4882a593Smuzhiyun debugfs_create_u64("reject_reclaim_fail", 0444,
1278*4882a593Smuzhiyun zswap_debugfs_root, &zswap_reject_reclaim_fail);
1279*4882a593Smuzhiyun debugfs_create_u64("reject_alloc_fail", 0444,
1280*4882a593Smuzhiyun zswap_debugfs_root, &zswap_reject_alloc_fail);
1281*4882a593Smuzhiyun debugfs_create_u64("reject_kmemcache_fail", 0444,
1282*4882a593Smuzhiyun zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1283*4882a593Smuzhiyun debugfs_create_u64("reject_compress_poor", 0444,
1284*4882a593Smuzhiyun zswap_debugfs_root, &zswap_reject_compress_poor);
1285*4882a593Smuzhiyun debugfs_create_u64("written_back_pages", 0444,
1286*4882a593Smuzhiyun zswap_debugfs_root, &zswap_written_back_pages);
1287*4882a593Smuzhiyun debugfs_create_u64("duplicate_entry", 0444,
1288*4882a593Smuzhiyun zswap_debugfs_root, &zswap_duplicate_entry);
1289*4882a593Smuzhiyun debugfs_create_u64("pool_total_size", 0444,
1290*4882a593Smuzhiyun zswap_debugfs_root, &zswap_pool_total_size);
1291*4882a593Smuzhiyun debugfs_create_atomic_t("stored_pages", 0444,
1292*4882a593Smuzhiyun zswap_debugfs_root, &zswap_stored_pages);
1293*4882a593Smuzhiyun debugfs_create_atomic_t("same_filled_pages", 0444,
1294*4882a593Smuzhiyun zswap_debugfs_root, &zswap_same_filled_pages);
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun return 0;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
zswap_debugfs_exit(void)1299*4882a593Smuzhiyun static void __exit zswap_debugfs_exit(void)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun debugfs_remove_recursive(zswap_debugfs_root);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun #else
zswap_debugfs_init(void)1304*4882a593Smuzhiyun static int __init zswap_debugfs_init(void)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun return 0;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
zswap_debugfs_exit(void)1309*4882a593Smuzhiyun static void __exit zswap_debugfs_exit(void) { }
1310*4882a593Smuzhiyun #endif
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /*********************************
1313*4882a593Smuzhiyun * module init and exit
1314*4882a593Smuzhiyun **********************************/
init_zswap(void)1315*4882a593Smuzhiyun static int __init init_zswap(void)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun struct zswap_pool *pool;
1318*4882a593Smuzhiyun int ret;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun zswap_init_started = true;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun if (zswap_entry_cache_create()) {
1323*4882a593Smuzhiyun pr_err("entry cache creation failed\n");
1324*4882a593Smuzhiyun goto cache_fail;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1328*4882a593Smuzhiyun zswap_dstmem_prepare, zswap_dstmem_dead);
1329*4882a593Smuzhiyun if (ret) {
1330*4882a593Smuzhiyun pr_err("dstmem alloc failed\n");
1331*4882a593Smuzhiyun goto dstmem_fail;
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1335*4882a593Smuzhiyun "mm/zswap_pool:prepare",
1336*4882a593Smuzhiyun zswap_cpu_comp_prepare,
1337*4882a593Smuzhiyun zswap_cpu_comp_dead);
1338*4882a593Smuzhiyun if (ret)
1339*4882a593Smuzhiyun goto hp_fail;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun pool = __zswap_pool_create_fallback();
1342*4882a593Smuzhiyun if (pool) {
1343*4882a593Smuzhiyun pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1344*4882a593Smuzhiyun zpool_get_type(pool->zpool));
1345*4882a593Smuzhiyun list_add(&pool->list, &zswap_pools);
1346*4882a593Smuzhiyun zswap_has_pool = true;
1347*4882a593Smuzhiyun } else {
1348*4882a593Smuzhiyun pr_err("pool creation failed\n");
1349*4882a593Smuzhiyun zswap_enabled = false;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun shrink_wq = create_workqueue("zswap-shrink");
1353*4882a593Smuzhiyun if (!shrink_wq)
1354*4882a593Smuzhiyun goto fallback_fail;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun frontswap_register_ops(&zswap_frontswap_ops);
1357*4882a593Smuzhiyun if (zswap_debugfs_init())
1358*4882a593Smuzhiyun pr_warn("debugfs initialization failed\n");
1359*4882a593Smuzhiyun return 0;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun fallback_fail:
1362*4882a593Smuzhiyun if (pool)
1363*4882a593Smuzhiyun zswap_pool_destroy(pool);
1364*4882a593Smuzhiyun hp_fail:
1365*4882a593Smuzhiyun cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1366*4882a593Smuzhiyun dstmem_fail:
1367*4882a593Smuzhiyun zswap_entry_cache_destroy();
1368*4882a593Smuzhiyun cache_fail:
1369*4882a593Smuzhiyun /* if built-in, we aren't unloaded on failure; don't allow use */
1370*4882a593Smuzhiyun zswap_init_failed = true;
1371*4882a593Smuzhiyun zswap_enabled = false;
1372*4882a593Smuzhiyun return -ENOMEM;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun /* must be late so crypto has time to come up */
1375*4882a593Smuzhiyun late_initcall(init_zswap);
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1378*4882a593Smuzhiyun MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1379*4882a593Smuzhiyun MODULE_DESCRIPTION("Compressed cache for swap pages");
1380