1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Compressed RAM block device
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5*4882a593Smuzhiyun * 2012, 2013 Minchan Kim
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This code is released using a dual license strategy: BSD/GPL
8*4882a593Smuzhiyun * You can choose the licence that better fits your requirements.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Released under the terms of 3-clause BSD License
11*4882a593Smuzhiyun * Released under the terms of GNU General Public License Version 2.0
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define KMSG_COMPONENT "zram"
16*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/bio.h>
21*4882a593Smuzhiyun #include <linux/bitops.h>
22*4882a593Smuzhiyun #include <linux/blkdev.h>
23*4882a593Smuzhiyun #include <linux/buffer_head.h>
24*4882a593Smuzhiyun #include <linux/device.h>
25*4882a593Smuzhiyun #include <linux/genhd.h>
26*4882a593Smuzhiyun #include <linux/highmem.h>
27*4882a593Smuzhiyun #include <linux/slab.h>
28*4882a593Smuzhiyun #include <linux/backing-dev.h>
29*4882a593Smuzhiyun #include <linux/string.h>
30*4882a593Smuzhiyun #include <linux/vmalloc.h>
31*4882a593Smuzhiyun #include <linux/err.h>
32*4882a593Smuzhiyun #include <linux/idr.h>
33*4882a593Smuzhiyun #include <linux/sysfs.h>
34*4882a593Smuzhiyun #include <linux/debugfs.h>
35*4882a593Smuzhiyun #include <linux/cpuhotplug.h>
36*4882a593Smuzhiyun #include <linux/part_stat.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include "zram_drv.h"
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun static DEFINE_IDR(zram_index_idr);
41*4882a593Smuzhiyun /* idr index must be protected */
42*4882a593Smuzhiyun static DEFINE_MUTEX(zram_index_mutex);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static int zram_major;
45*4882a593Smuzhiyun static const char *default_compressor = "lzo-rle";
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Module params (documentation at end) */
48*4882a593Smuzhiyun static unsigned int num_devices = 1;
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Pages that compress to sizes equals or greater than this are stored
51*4882a593Smuzhiyun * uncompressed in memory.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun static size_t huge_class_size;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static const struct block_device_operations zram_devops;
56*4882a593Smuzhiyun static const struct block_device_operations zram_wb_devops;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static void zram_free_page(struct zram *zram, size_t index);
59*4882a593Smuzhiyun static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
60*4882a593Smuzhiyun u32 index, int offset, struct bio *bio);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun
zram_slot_trylock(struct zram * zram,u32 index)63*4882a593Smuzhiyun static int zram_slot_trylock(struct zram *zram, u32 index)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
zram_slot_lock(struct zram * zram,u32 index)68*4882a593Smuzhiyun static void zram_slot_lock(struct zram *zram, u32 index)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
zram_slot_unlock(struct zram * zram,u32 index)73*4882a593Smuzhiyun static void zram_slot_unlock(struct zram *zram, u32 index)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
init_done(struct zram * zram)78*4882a593Smuzhiyun static inline bool init_done(struct zram *zram)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun return zram->disksize;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
dev_to_zram(struct device * dev)83*4882a593Smuzhiyun static inline struct zram *dev_to_zram(struct device *dev)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun return (struct zram *)dev_to_disk(dev)->private_data;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
zram_get_handle(struct zram * zram,u32 index)88*4882a593Smuzhiyun static unsigned long zram_get_handle(struct zram *zram, u32 index)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return zram->table[index].handle;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
zram_set_handle(struct zram * zram,u32 index,unsigned long handle)93*4882a593Smuzhiyun static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun zram->table[index].handle = handle;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* flag operations require table entry bit_spin_lock() being held */
zram_test_flag(struct zram * zram,u32 index,enum zram_pageflags flag)99*4882a593Smuzhiyun static bool zram_test_flag(struct zram *zram, u32 index,
100*4882a593Smuzhiyun enum zram_pageflags flag)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return zram->table[index].flags & BIT(flag);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
zram_set_flag(struct zram * zram,u32 index,enum zram_pageflags flag)105*4882a593Smuzhiyun static void zram_set_flag(struct zram *zram, u32 index,
106*4882a593Smuzhiyun enum zram_pageflags flag)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun zram->table[index].flags |= BIT(flag);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
zram_clear_flag(struct zram * zram,u32 index,enum zram_pageflags flag)111*4882a593Smuzhiyun static void zram_clear_flag(struct zram *zram, u32 index,
112*4882a593Smuzhiyun enum zram_pageflags flag)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun zram->table[index].flags &= ~BIT(flag);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
zram_set_element(struct zram * zram,u32 index,unsigned long element)117*4882a593Smuzhiyun static inline void zram_set_element(struct zram *zram, u32 index,
118*4882a593Smuzhiyun unsigned long element)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun zram->table[index].element = element;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
zram_get_element(struct zram * zram,u32 index)123*4882a593Smuzhiyun static unsigned long zram_get_element(struct zram *zram, u32 index)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return zram->table[index].element;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
zram_get_obj_size(struct zram * zram,u32 index)128*4882a593Smuzhiyun static size_t zram_get_obj_size(struct zram *zram, u32 index)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
zram_set_obj_size(struct zram * zram,u32 index,size_t size)133*4882a593Smuzhiyun static void zram_set_obj_size(struct zram *zram,
134*4882a593Smuzhiyun u32 index, size_t size)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
zram_allocated(struct zram * zram,u32 index)141*4882a593Smuzhiyun static inline bool zram_allocated(struct zram *zram, u32 index)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return zram_get_obj_size(zram, index) ||
144*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_SAME) ||
145*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_WB);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #if PAGE_SIZE != 4096
is_partial_io(struct bio_vec * bvec)149*4882a593Smuzhiyun static inline bool is_partial_io(struct bio_vec *bvec)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return bvec->bv_len != PAGE_SIZE;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun #else
is_partial_io(struct bio_vec * bvec)154*4882a593Smuzhiyun static inline bool is_partial_io(struct bio_vec *bvec)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun return false;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Check if request is within bounds and aligned on zram logical blocks.
162*4882a593Smuzhiyun */
valid_io_request(struct zram * zram,sector_t start,unsigned int size)163*4882a593Smuzhiyun static inline bool valid_io_request(struct zram *zram,
164*4882a593Smuzhiyun sector_t start, unsigned int size)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun u64 end, bound;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* unaligned request */
169*4882a593Smuzhiyun if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
170*4882a593Smuzhiyun return false;
171*4882a593Smuzhiyun if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
172*4882a593Smuzhiyun return false;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun end = start + (size >> SECTOR_SHIFT);
175*4882a593Smuzhiyun bound = zram->disksize >> SECTOR_SHIFT;
176*4882a593Smuzhiyun /* out of range range */
177*4882a593Smuzhiyun if (unlikely(start >= bound || end > bound || start > end))
178*4882a593Smuzhiyun return false;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* I/O request is valid */
181*4882a593Smuzhiyun return true;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
update_position(u32 * index,int * offset,struct bio_vec * bvec)184*4882a593Smuzhiyun static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun *index += (*offset + bvec->bv_len) / PAGE_SIZE;
187*4882a593Smuzhiyun *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
update_used_max(struct zram * zram,const unsigned long pages)190*4882a593Smuzhiyun static inline void update_used_max(struct zram *zram,
191*4882a593Smuzhiyun const unsigned long pages)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun unsigned long old_max, cur_max;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun old_max = atomic_long_read(&zram->stats.max_used_pages);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun do {
198*4882a593Smuzhiyun cur_max = old_max;
199*4882a593Smuzhiyun if (pages > cur_max)
200*4882a593Smuzhiyun old_max = atomic_long_cmpxchg(
201*4882a593Smuzhiyun &zram->stats.max_used_pages, cur_max, pages);
202*4882a593Smuzhiyun } while (old_max != cur_max);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
zram_fill_page(void * ptr,unsigned long len,unsigned long value)205*4882a593Smuzhiyun static inline void zram_fill_page(void *ptr, unsigned long len,
206*4882a593Smuzhiyun unsigned long value)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
209*4882a593Smuzhiyun memset_l(ptr, value, len / sizeof(unsigned long));
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
page_same_filled(void * ptr,unsigned long * element)212*4882a593Smuzhiyun static bool page_same_filled(void *ptr, unsigned long *element)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun unsigned long *page;
215*4882a593Smuzhiyun unsigned long val;
216*4882a593Smuzhiyun unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun page = (unsigned long *)ptr;
219*4882a593Smuzhiyun val = page[0];
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (val != page[last_pos])
222*4882a593Smuzhiyun return false;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun for (pos = 1; pos < last_pos; pos++) {
225*4882a593Smuzhiyun if (val != page[pos])
226*4882a593Smuzhiyun return false;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun *element = val;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun return true;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
initstate_show(struct device * dev,struct device_attribute * attr,char * buf)234*4882a593Smuzhiyun static ssize_t initstate_show(struct device *dev,
235*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun u32 val;
238*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun down_read(&zram->init_lock);
241*4882a593Smuzhiyun val = init_done(zram);
242*4882a593Smuzhiyun up_read(&zram->init_lock);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "%u\n", val);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
disksize_show(struct device * dev,struct device_attribute * attr,char * buf)247*4882a593Smuzhiyun static ssize_t disksize_show(struct device *dev,
248*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
mem_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)255*4882a593Smuzhiyun static ssize_t mem_limit_store(struct device *dev,
256*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun u64 limit;
259*4882a593Smuzhiyun char *tmp;
260*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun limit = memparse(buf, &tmp);
263*4882a593Smuzhiyun if (buf == tmp) /* no chars parsed, invalid input */
264*4882a593Smuzhiyun return -EINVAL;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun down_write(&zram->init_lock);
267*4882a593Smuzhiyun zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
268*4882a593Smuzhiyun up_write(&zram->init_lock);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return len;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
mem_used_max_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)273*4882a593Smuzhiyun static ssize_t mem_used_max_store(struct device *dev,
274*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun int err;
277*4882a593Smuzhiyun unsigned long val;
278*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun err = kstrtoul(buf, 10, &val);
281*4882a593Smuzhiyun if (err || val != 0)
282*4882a593Smuzhiyun return -EINVAL;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun down_read(&zram->init_lock);
285*4882a593Smuzhiyun if (init_done(zram)) {
286*4882a593Smuzhiyun atomic_long_set(&zram->stats.max_used_pages,
287*4882a593Smuzhiyun zs_get_total_pages(zram->mem_pool));
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun up_read(&zram->init_lock);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun return len;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
idle_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)294*4882a593Smuzhiyun static ssize_t idle_store(struct device *dev,
295*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
298*4882a593Smuzhiyun unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
299*4882a593Smuzhiyun int index;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (!sysfs_streq(buf, "all"))
302*4882a593Smuzhiyun return -EINVAL;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun down_read(&zram->init_lock);
305*4882a593Smuzhiyun if (!init_done(zram)) {
306*4882a593Smuzhiyun up_read(&zram->init_lock);
307*4882a593Smuzhiyun return -EINVAL;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun for (index = 0; index < nr_pages; index++) {
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
313*4882a593Smuzhiyun * See the comment in writeback_store.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun zram_slot_lock(zram, index);
316*4882a593Smuzhiyun if (zram_allocated(zram, index) &&
317*4882a593Smuzhiyun !zram_test_flag(zram, index, ZRAM_UNDER_WB))
318*4882a593Smuzhiyun zram_set_flag(zram, index, ZRAM_IDLE);
319*4882a593Smuzhiyun zram_slot_unlock(zram, index);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun up_read(&zram->init_lock);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun return len;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
writeback_limit_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)328*4882a593Smuzhiyun static ssize_t writeback_limit_enable_store(struct device *dev,
329*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
332*4882a593Smuzhiyun u64 val;
333*4882a593Smuzhiyun ssize_t ret = -EINVAL;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (kstrtoull(buf, 10, &val))
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun down_read(&zram->init_lock);
339*4882a593Smuzhiyun spin_lock(&zram->wb_limit_lock);
340*4882a593Smuzhiyun zram->wb_limit_enable = val;
341*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
342*4882a593Smuzhiyun up_read(&zram->init_lock);
343*4882a593Smuzhiyun ret = len;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return ret;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
writeback_limit_enable_show(struct device * dev,struct device_attribute * attr,char * buf)348*4882a593Smuzhiyun static ssize_t writeback_limit_enable_show(struct device *dev,
349*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun bool val;
352*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun down_read(&zram->init_lock);
355*4882a593Smuzhiyun spin_lock(&zram->wb_limit_lock);
356*4882a593Smuzhiyun val = zram->wb_limit_enable;
357*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
358*4882a593Smuzhiyun up_read(&zram->init_lock);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "%d\n", val);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
writeback_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)363*4882a593Smuzhiyun static ssize_t writeback_limit_store(struct device *dev,
364*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
367*4882a593Smuzhiyun u64 val;
368*4882a593Smuzhiyun ssize_t ret = -EINVAL;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (kstrtoull(buf, 10, &val))
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun down_read(&zram->init_lock);
374*4882a593Smuzhiyun spin_lock(&zram->wb_limit_lock);
375*4882a593Smuzhiyun zram->bd_wb_limit = val;
376*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
377*4882a593Smuzhiyun up_read(&zram->init_lock);
378*4882a593Smuzhiyun ret = len;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return ret;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
writeback_limit_show(struct device * dev,struct device_attribute * attr,char * buf)383*4882a593Smuzhiyun static ssize_t writeback_limit_show(struct device *dev,
384*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun u64 val;
387*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun down_read(&zram->init_lock);
390*4882a593Smuzhiyun spin_lock(&zram->wb_limit_lock);
391*4882a593Smuzhiyun val = zram->bd_wb_limit;
392*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
393*4882a593Smuzhiyun up_read(&zram->init_lock);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
reset_bdev(struct zram * zram)398*4882a593Smuzhiyun static void reset_bdev(struct zram *zram)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct block_device *bdev;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!zram->backing_dev)
403*4882a593Smuzhiyun return;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun bdev = zram->bdev;
406*4882a593Smuzhiyun if (zram->old_block_size)
407*4882a593Smuzhiyun set_blocksize(bdev, zram->old_block_size);
408*4882a593Smuzhiyun blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
409*4882a593Smuzhiyun /* hope filp_close flush all of IO */
410*4882a593Smuzhiyun filp_close(zram->backing_dev, NULL);
411*4882a593Smuzhiyun zram->backing_dev = NULL;
412*4882a593Smuzhiyun zram->old_block_size = 0;
413*4882a593Smuzhiyun zram->bdev = NULL;
414*4882a593Smuzhiyun zram->disk->fops = &zram_devops;
415*4882a593Smuzhiyun kvfree(zram->bitmap);
416*4882a593Smuzhiyun zram->bitmap = NULL;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
backing_dev_show(struct device * dev,struct device_attribute * attr,char * buf)419*4882a593Smuzhiyun static ssize_t backing_dev_show(struct device *dev,
420*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct file *file;
423*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
424*4882a593Smuzhiyun char *p;
425*4882a593Smuzhiyun ssize_t ret;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun down_read(&zram->init_lock);
428*4882a593Smuzhiyun file = zram->backing_dev;
429*4882a593Smuzhiyun if (!file) {
430*4882a593Smuzhiyun memcpy(buf, "none\n", 5);
431*4882a593Smuzhiyun up_read(&zram->init_lock);
432*4882a593Smuzhiyun return 5;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun p = file_path(file, buf, PAGE_SIZE - 1);
436*4882a593Smuzhiyun if (IS_ERR(p)) {
437*4882a593Smuzhiyun ret = PTR_ERR(p);
438*4882a593Smuzhiyun goto out;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun ret = strlen(p);
442*4882a593Smuzhiyun memmove(buf, p, ret);
443*4882a593Smuzhiyun buf[ret++] = '\n';
444*4882a593Smuzhiyun out:
445*4882a593Smuzhiyun up_read(&zram->init_lock);
446*4882a593Smuzhiyun return ret;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
backing_dev_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)449*4882a593Smuzhiyun static ssize_t backing_dev_store(struct device *dev,
450*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun char *file_name;
453*4882a593Smuzhiyun size_t sz;
454*4882a593Smuzhiyun struct file *backing_dev = NULL;
455*4882a593Smuzhiyun struct inode *inode;
456*4882a593Smuzhiyun struct address_space *mapping;
457*4882a593Smuzhiyun unsigned int bitmap_sz, old_block_size = 0;
458*4882a593Smuzhiyun unsigned long nr_pages, *bitmap = NULL;
459*4882a593Smuzhiyun struct block_device *bdev = NULL;
460*4882a593Smuzhiyun int err;
461*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun file_name = kmalloc(PATH_MAX, GFP_KERNEL);
464*4882a593Smuzhiyun if (!file_name)
465*4882a593Smuzhiyun return -ENOMEM;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun down_write(&zram->init_lock);
468*4882a593Smuzhiyun if (init_done(zram)) {
469*4882a593Smuzhiyun pr_info("Can't setup backing device for initialized device\n");
470*4882a593Smuzhiyun err = -EBUSY;
471*4882a593Smuzhiyun goto out;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun strlcpy(file_name, buf, PATH_MAX);
475*4882a593Smuzhiyun /* ignore trailing newline */
476*4882a593Smuzhiyun sz = strlen(file_name);
477*4882a593Smuzhiyun if (sz > 0 && file_name[sz - 1] == '\n')
478*4882a593Smuzhiyun file_name[sz - 1] = 0x00;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun backing_dev = filp_open_block(file_name, O_RDWR|O_LARGEFILE, 0);
481*4882a593Smuzhiyun if (IS_ERR(backing_dev)) {
482*4882a593Smuzhiyun err = PTR_ERR(backing_dev);
483*4882a593Smuzhiyun backing_dev = NULL;
484*4882a593Smuzhiyun goto out;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun mapping = backing_dev->f_mapping;
488*4882a593Smuzhiyun inode = mapping->host;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* Support only block device in this moment */
491*4882a593Smuzhiyun if (!S_ISBLK(inode->i_mode)) {
492*4882a593Smuzhiyun err = -ENOTBLK;
493*4882a593Smuzhiyun goto out;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun bdev = blkdev_get_by_dev(inode->i_rdev,
497*4882a593Smuzhiyun FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
498*4882a593Smuzhiyun if (IS_ERR(bdev)) {
499*4882a593Smuzhiyun err = PTR_ERR(bdev);
500*4882a593Smuzhiyun bdev = NULL;
501*4882a593Smuzhiyun goto out;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun nr_pages = i_size_read(inode) >> PAGE_SHIFT;
505*4882a593Smuzhiyun bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
506*4882a593Smuzhiyun bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
507*4882a593Smuzhiyun if (!bitmap) {
508*4882a593Smuzhiyun err = -ENOMEM;
509*4882a593Smuzhiyun goto out;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun old_block_size = block_size(bdev);
513*4882a593Smuzhiyun err = set_blocksize(bdev, PAGE_SIZE);
514*4882a593Smuzhiyun if (err)
515*4882a593Smuzhiyun goto out;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun reset_bdev(zram);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun zram->old_block_size = old_block_size;
520*4882a593Smuzhiyun zram->bdev = bdev;
521*4882a593Smuzhiyun zram->backing_dev = backing_dev;
522*4882a593Smuzhiyun zram->bitmap = bitmap;
523*4882a593Smuzhiyun zram->nr_pages = nr_pages;
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun * With writeback feature, zram does asynchronous IO so it's no longer
526*4882a593Smuzhiyun * synchronous device so let's remove synchronous io flag. Othewise,
527*4882a593Smuzhiyun * upper layer(e.g., swap) could wait IO completion rather than
528*4882a593Smuzhiyun * (submit and return), which will cause system sluggish.
529*4882a593Smuzhiyun * Furthermore, when the IO function returns(e.g., swap_readpage),
530*4882a593Smuzhiyun * upper layer expects IO was done so it could deallocate the page
531*4882a593Smuzhiyun * freely but in fact, IO is going on so finally could cause
532*4882a593Smuzhiyun * use-after-free when the IO is really done.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun zram->disk->fops = &zram_wb_devops;
535*4882a593Smuzhiyun up_write(&zram->init_lock);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun pr_info("setup backing device %s\n", file_name);
538*4882a593Smuzhiyun kfree(file_name);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return len;
541*4882a593Smuzhiyun out:
542*4882a593Smuzhiyun if (bitmap)
543*4882a593Smuzhiyun kvfree(bitmap);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (bdev)
546*4882a593Smuzhiyun blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (backing_dev)
549*4882a593Smuzhiyun filp_close(backing_dev, NULL);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun up_write(&zram->init_lock);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun kfree(file_name);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun return err;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
alloc_block_bdev(struct zram * zram)558*4882a593Smuzhiyun static unsigned long alloc_block_bdev(struct zram *zram)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun unsigned long blk_idx = 1;
561*4882a593Smuzhiyun retry:
562*4882a593Smuzhiyun /* skip 0 bit to confuse zram.handle = 0 */
563*4882a593Smuzhiyun blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
564*4882a593Smuzhiyun if (blk_idx == zram->nr_pages)
565*4882a593Smuzhiyun return 0;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (test_and_set_bit(blk_idx, zram->bitmap))
568*4882a593Smuzhiyun goto retry;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun atomic64_inc(&zram->stats.bd_count);
571*4882a593Smuzhiyun return blk_idx;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
free_block_bdev(struct zram * zram,unsigned long blk_idx)574*4882a593Smuzhiyun static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun int was_set;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun was_set = test_and_clear_bit(blk_idx, zram->bitmap);
579*4882a593Smuzhiyun WARN_ON_ONCE(!was_set);
580*4882a593Smuzhiyun atomic64_dec(&zram->stats.bd_count);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
zram_page_end_io(struct bio * bio)583*4882a593Smuzhiyun static void zram_page_end_io(struct bio *bio)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct page *page = bio_first_page_all(bio);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun page_endio(page, op_is_write(bio_op(bio)),
588*4882a593Smuzhiyun blk_status_to_errno(bio->bi_status));
589*4882a593Smuzhiyun bio_put(bio);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /*
593*4882a593Smuzhiyun * Returns 1 if the submission is successful.
594*4882a593Smuzhiyun */
read_from_bdev_async(struct zram * zram,struct bio_vec * bvec,unsigned long entry,struct bio * parent)595*4882a593Smuzhiyun static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
596*4882a593Smuzhiyun unsigned long entry, struct bio *parent)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct bio *bio;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun bio = bio_alloc(GFP_ATOMIC, 1);
601*4882a593Smuzhiyun if (!bio)
602*4882a593Smuzhiyun return -ENOMEM;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
605*4882a593Smuzhiyun bio_set_dev(bio, zram->bdev);
606*4882a593Smuzhiyun if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
607*4882a593Smuzhiyun bio_put(bio);
608*4882a593Smuzhiyun return -EIO;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (!parent) {
612*4882a593Smuzhiyun bio->bi_opf = REQ_OP_READ;
613*4882a593Smuzhiyun bio->bi_end_io = zram_page_end_io;
614*4882a593Smuzhiyun } else {
615*4882a593Smuzhiyun bio->bi_opf = parent->bi_opf;
616*4882a593Smuzhiyun bio_chain(bio, parent);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun submit_bio(bio);
620*4882a593Smuzhiyun return 1;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun #define PAGE_WB_SIG "page_index="
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun #define PAGE_WRITEBACK 0
626*4882a593Smuzhiyun #define HUGE_WRITEBACK 1
627*4882a593Smuzhiyun #define IDLE_WRITEBACK 2
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun
writeback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)630*4882a593Smuzhiyun static ssize_t writeback_store(struct device *dev,
631*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
634*4882a593Smuzhiyun unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
635*4882a593Smuzhiyun unsigned long index = 0;
636*4882a593Smuzhiyun struct bio bio;
637*4882a593Smuzhiyun struct bio_vec bio_vec;
638*4882a593Smuzhiyun struct page *page;
639*4882a593Smuzhiyun ssize_t ret = len;
640*4882a593Smuzhiyun int mode, err;
641*4882a593Smuzhiyun unsigned long blk_idx = 0;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (sysfs_streq(buf, "idle"))
644*4882a593Smuzhiyun mode = IDLE_WRITEBACK;
645*4882a593Smuzhiyun else if (sysfs_streq(buf, "huge"))
646*4882a593Smuzhiyun mode = HUGE_WRITEBACK;
647*4882a593Smuzhiyun else {
648*4882a593Smuzhiyun if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
649*4882a593Smuzhiyun return -EINVAL;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
652*4882a593Smuzhiyun index >= nr_pages)
653*4882a593Smuzhiyun return -EINVAL;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun nr_pages = 1;
656*4882a593Smuzhiyun mode = PAGE_WRITEBACK;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun down_read(&zram->init_lock);
660*4882a593Smuzhiyun if (!init_done(zram)) {
661*4882a593Smuzhiyun ret = -EINVAL;
662*4882a593Smuzhiyun goto release_init_lock;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (!zram->backing_dev) {
666*4882a593Smuzhiyun ret = -ENODEV;
667*4882a593Smuzhiyun goto release_init_lock;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL);
671*4882a593Smuzhiyun if (!page) {
672*4882a593Smuzhiyun ret = -ENOMEM;
673*4882a593Smuzhiyun goto release_init_lock;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun for (; nr_pages != 0; index++, nr_pages--) {
677*4882a593Smuzhiyun struct bio_vec bvec;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun bvec.bv_page = page;
680*4882a593Smuzhiyun bvec.bv_len = PAGE_SIZE;
681*4882a593Smuzhiyun bvec.bv_offset = 0;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun spin_lock(&zram->wb_limit_lock);
684*4882a593Smuzhiyun if (zram->wb_limit_enable && !zram->bd_wb_limit) {
685*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
686*4882a593Smuzhiyun ret = -EIO;
687*4882a593Smuzhiyun break;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (!blk_idx) {
692*4882a593Smuzhiyun blk_idx = alloc_block_bdev(zram);
693*4882a593Smuzhiyun if (!blk_idx) {
694*4882a593Smuzhiyun ret = -ENOSPC;
695*4882a593Smuzhiyun break;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun zram_slot_lock(zram, index);
700*4882a593Smuzhiyun if (!zram_allocated(zram, index))
701*4882a593Smuzhiyun goto next;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (zram_test_flag(zram, index, ZRAM_WB) ||
704*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_SAME) ||
705*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_UNDER_WB))
706*4882a593Smuzhiyun goto next;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (mode == IDLE_WRITEBACK &&
709*4882a593Smuzhiyun !zram_test_flag(zram, index, ZRAM_IDLE))
710*4882a593Smuzhiyun goto next;
711*4882a593Smuzhiyun if (mode == HUGE_WRITEBACK &&
712*4882a593Smuzhiyun !zram_test_flag(zram, index, ZRAM_HUGE))
713*4882a593Smuzhiyun goto next;
714*4882a593Smuzhiyun /*
715*4882a593Smuzhiyun * Clearing ZRAM_UNDER_WB is duty of caller.
716*4882a593Smuzhiyun * IOW, zram_free_page never clear it.
717*4882a593Smuzhiyun */
718*4882a593Smuzhiyun zram_set_flag(zram, index, ZRAM_UNDER_WB);
719*4882a593Smuzhiyun /* Need for hugepage writeback racing */
720*4882a593Smuzhiyun zram_set_flag(zram, index, ZRAM_IDLE);
721*4882a593Smuzhiyun zram_slot_unlock(zram, index);
722*4882a593Smuzhiyun if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
723*4882a593Smuzhiyun zram_slot_lock(zram, index);
724*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_UNDER_WB);
725*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_IDLE);
726*4882a593Smuzhiyun zram_slot_unlock(zram, index);
727*4882a593Smuzhiyun continue;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun bio_init(&bio, &bio_vec, 1);
731*4882a593Smuzhiyun bio_set_dev(&bio, zram->bdev);
732*4882a593Smuzhiyun bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
733*4882a593Smuzhiyun bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
736*4882a593Smuzhiyun bvec.bv_offset);
737*4882a593Smuzhiyun /*
738*4882a593Smuzhiyun * XXX: A single page IO would be inefficient for write
739*4882a593Smuzhiyun * but it would be not bad as starter.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun err = submit_bio_wait(&bio);
742*4882a593Smuzhiyun if (err) {
743*4882a593Smuzhiyun zram_slot_lock(zram, index);
744*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_UNDER_WB);
745*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_IDLE);
746*4882a593Smuzhiyun zram_slot_unlock(zram, index);
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * Return last IO error unless every IO were
749*4882a593Smuzhiyun * not suceeded.
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun ret = err;
752*4882a593Smuzhiyun continue;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun atomic64_inc(&zram->stats.bd_writes);
756*4882a593Smuzhiyun /*
757*4882a593Smuzhiyun * We released zram_slot_lock so need to check if the slot was
758*4882a593Smuzhiyun * changed. If there is freeing for the slot, we can catch it
759*4882a593Smuzhiyun * easily by zram_allocated.
760*4882a593Smuzhiyun * A subtle case is the slot is freed/reallocated/marked as
761*4882a593Smuzhiyun * ZRAM_IDLE again. To close the race, idle_store doesn't
762*4882a593Smuzhiyun * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
763*4882a593Smuzhiyun * Thus, we could close the race by checking ZRAM_IDLE bit.
764*4882a593Smuzhiyun */
765*4882a593Smuzhiyun zram_slot_lock(zram, index);
766*4882a593Smuzhiyun if (!zram_allocated(zram, index) ||
767*4882a593Smuzhiyun !zram_test_flag(zram, index, ZRAM_IDLE)) {
768*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_UNDER_WB);
769*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_IDLE);
770*4882a593Smuzhiyun goto next;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun zram_free_page(zram, index);
774*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_UNDER_WB);
775*4882a593Smuzhiyun zram_set_flag(zram, index, ZRAM_WB);
776*4882a593Smuzhiyun zram_set_element(zram, index, blk_idx);
777*4882a593Smuzhiyun blk_idx = 0;
778*4882a593Smuzhiyun atomic64_inc(&zram->stats.pages_stored);
779*4882a593Smuzhiyun spin_lock(&zram->wb_limit_lock);
780*4882a593Smuzhiyun if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
781*4882a593Smuzhiyun zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
782*4882a593Smuzhiyun spin_unlock(&zram->wb_limit_lock);
783*4882a593Smuzhiyun next:
784*4882a593Smuzhiyun zram_slot_unlock(zram, index);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun if (blk_idx)
788*4882a593Smuzhiyun free_block_bdev(zram, blk_idx);
789*4882a593Smuzhiyun __free_page(page);
790*4882a593Smuzhiyun release_init_lock:
791*4882a593Smuzhiyun up_read(&zram->init_lock);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun return ret;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun struct zram_work {
797*4882a593Smuzhiyun struct work_struct work;
798*4882a593Smuzhiyun struct zram *zram;
799*4882a593Smuzhiyun unsigned long entry;
800*4882a593Smuzhiyun struct bio *bio;
801*4882a593Smuzhiyun struct bio_vec bvec;
802*4882a593Smuzhiyun };
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun #if PAGE_SIZE != 4096
zram_sync_read(struct work_struct * work)805*4882a593Smuzhiyun static void zram_sync_read(struct work_struct *work)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct zram_work *zw = container_of(work, struct zram_work, work);
808*4882a593Smuzhiyun struct zram *zram = zw->zram;
809*4882a593Smuzhiyun unsigned long entry = zw->entry;
810*4882a593Smuzhiyun struct bio *bio = zw->bio;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun read_from_bdev_async(zram, &zw->bvec, entry, bio);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /*
816*4882a593Smuzhiyun * Block layer want one ->submit_bio to be active at a time, so if we use
817*4882a593Smuzhiyun * chained IO with parent IO in same context, it's a deadlock. To avoid that,
818*4882a593Smuzhiyun * use a worker thread context.
819*4882a593Smuzhiyun */
read_from_bdev_sync(struct zram * zram,struct bio_vec * bvec,unsigned long entry,struct bio * bio)820*4882a593Smuzhiyun static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
821*4882a593Smuzhiyun unsigned long entry, struct bio *bio)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun struct zram_work work;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun work.bvec = *bvec;
826*4882a593Smuzhiyun work.zram = zram;
827*4882a593Smuzhiyun work.entry = entry;
828*4882a593Smuzhiyun work.bio = bio;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun INIT_WORK_ONSTACK(&work.work, zram_sync_read);
831*4882a593Smuzhiyun queue_work(system_unbound_wq, &work.work);
832*4882a593Smuzhiyun flush_work(&work.work);
833*4882a593Smuzhiyun destroy_work_on_stack(&work.work);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return 1;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun #else
read_from_bdev_sync(struct zram * zram,struct bio_vec * bvec,unsigned long entry,struct bio * bio)838*4882a593Smuzhiyun static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
839*4882a593Smuzhiyun unsigned long entry, struct bio *bio)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun WARN_ON(1);
842*4882a593Smuzhiyun return -EIO;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun #endif
845*4882a593Smuzhiyun
read_from_bdev(struct zram * zram,struct bio_vec * bvec,unsigned long entry,struct bio * parent,bool sync)846*4882a593Smuzhiyun static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
847*4882a593Smuzhiyun unsigned long entry, struct bio *parent, bool sync)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun atomic64_inc(&zram->stats.bd_reads);
850*4882a593Smuzhiyun if (sync)
851*4882a593Smuzhiyun return read_from_bdev_sync(zram, bvec, entry, parent);
852*4882a593Smuzhiyun else
853*4882a593Smuzhiyun return read_from_bdev_async(zram, bvec, entry, parent);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun #else
reset_bdev(struct zram * zram)856*4882a593Smuzhiyun static inline void reset_bdev(struct zram *zram) {};
read_from_bdev(struct zram * zram,struct bio_vec * bvec,unsigned long entry,struct bio * parent,bool sync)857*4882a593Smuzhiyun static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
858*4882a593Smuzhiyun unsigned long entry, struct bio *parent, bool sync)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun return -EIO;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
free_block_bdev(struct zram * zram,unsigned long blk_idx)863*4882a593Smuzhiyun static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
864*4882a593Smuzhiyun #endif
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_MEMORY_TRACKING
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun static struct dentry *zram_debugfs_root;
869*4882a593Smuzhiyun
zram_debugfs_create(void)870*4882a593Smuzhiyun static void zram_debugfs_create(void)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun zram_debugfs_root = debugfs_create_dir("zram", NULL);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
zram_debugfs_destroy(void)875*4882a593Smuzhiyun static void zram_debugfs_destroy(void)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun debugfs_remove_recursive(zram_debugfs_root);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
zram_accessed(struct zram * zram,u32 index)880*4882a593Smuzhiyun static void zram_accessed(struct zram *zram, u32 index)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_IDLE);
883*4882a593Smuzhiyun zram->table[index].ac_time = ktime_get_boottime();
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
read_block_state(struct file * file,char __user * buf,size_t count,loff_t * ppos)886*4882a593Smuzhiyun static ssize_t read_block_state(struct file *file, char __user *buf,
887*4882a593Smuzhiyun size_t count, loff_t *ppos)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun char *kbuf;
890*4882a593Smuzhiyun ssize_t index, written = 0;
891*4882a593Smuzhiyun struct zram *zram = file->private_data;
892*4882a593Smuzhiyun unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
893*4882a593Smuzhiyun struct timespec64 ts;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun kbuf = kvmalloc(count, GFP_KERNEL);
896*4882a593Smuzhiyun if (!kbuf)
897*4882a593Smuzhiyun return -ENOMEM;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun down_read(&zram->init_lock);
900*4882a593Smuzhiyun if (!init_done(zram)) {
901*4882a593Smuzhiyun up_read(&zram->init_lock);
902*4882a593Smuzhiyun kvfree(kbuf);
903*4882a593Smuzhiyun return -EINVAL;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun for (index = *ppos; index < nr_pages; index++) {
907*4882a593Smuzhiyun int copied;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun zram_slot_lock(zram, index);
910*4882a593Smuzhiyun if (!zram_allocated(zram, index))
911*4882a593Smuzhiyun goto next;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun ts = ktime_to_timespec64(zram->table[index].ac_time);
914*4882a593Smuzhiyun copied = snprintf(kbuf + written, count,
915*4882a593Smuzhiyun "%12zd %12lld.%06lu %c%c%c%c\n",
916*4882a593Smuzhiyun index, (s64)ts.tv_sec,
917*4882a593Smuzhiyun ts.tv_nsec / NSEC_PER_USEC,
918*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
919*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
920*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
921*4882a593Smuzhiyun zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (count <= copied) {
924*4882a593Smuzhiyun zram_slot_unlock(zram, index);
925*4882a593Smuzhiyun break;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun written += copied;
928*4882a593Smuzhiyun count -= copied;
929*4882a593Smuzhiyun next:
930*4882a593Smuzhiyun zram_slot_unlock(zram, index);
931*4882a593Smuzhiyun *ppos += 1;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun up_read(&zram->init_lock);
935*4882a593Smuzhiyun if (copy_to_user(buf, kbuf, written))
936*4882a593Smuzhiyun written = -EFAULT;
937*4882a593Smuzhiyun kvfree(kbuf);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return written;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun static const struct file_operations proc_zram_block_state_op = {
943*4882a593Smuzhiyun .open = simple_open,
944*4882a593Smuzhiyun .read = read_block_state,
945*4882a593Smuzhiyun .llseek = default_llseek,
946*4882a593Smuzhiyun };
947*4882a593Smuzhiyun
zram_debugfs_register(struct zram * zram)948*4882a593Smuzhiyun static void zram_debugfs_register(struct zram *zram)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun if (!zram_debugfs_root)
951*4882a593Smuzhiyun return;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
954*4882a593Smuzhiyun zram_debugfs_root);
955*4882a593Smuzhiyun debugfs_create_file("block_state", 0400, zram->debugfs_dir,
956*4882a593Smuzhiyun zram, &proc_zram_block_state_op);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
zram_debugfs_unregister(struct zram * zram)959*4882a593Smuzhiyun static void zram_debugfs_unregister(struct zram *zram)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun debugfs_remove_recursive(zram->debugfs_dir);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun #else
zram_debugfs_create(void)964*4882a593Smuzhiyun static void zram_debugfs_create(void) {};
zram_debugfs_destroy(void)965*4882a593Smuzhiyun static void zram_debugfs_destroy(void) {};
zram_accessed(struct zram * zram,u32 index)966*4882a593Smuzhiyun static void zram_accessed(struct zram *zram, u32 index)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_IDLE);
969*4882a593Smuzhiyun };
zram_debugfs_register(struct zram * zram)970*4882a593Smuzhiyun static void zram_debugfs_register(struct zram *zram) {};
zram_debugfs_unregister(struct zram * zram)971*4882a593Smuzhiyun static void zram_debugfs_unregister(struct zram *zram) {};
972*4882a593Smuzhiyun #endif
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /*
975*4882a593Smuzhiyun * We switched to per-cpu streams and this attr is not needed anymore.
976*4882a593Smuzhiyun * However, we will keep it around for some time, because:
977*4882a593Smuzhiyun * a) we may revert per-cpu streams in the future
978*4882a593Smuzhiyun * b) it's visible to user space and we need to follow our 2 years
979*4882a593Smuzhiyun * retirement rule; but we already have a number of 'soon to be
980*4882a593Smuzhiyun * altered' attrs, so max_comp_streams need to wait for the next
981*4882a593Smuzhiyun * layoff cycle.
982*4882a593Smuzhiyun */
max_comp_streams_show(struct device * dev,struct device_attribute * attr,char * buf)983*4882a593Smuzhiyun static ssize_t max_comp_streams_show(struct device *dev,
984*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
max_comp_streams_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)989*4882a593Smuzhiyun static ssize_t max_comp_streams_store(struct device *dev,
990*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun return len;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
comp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)995*4882a593Smuzhiyun static ssize_t comp_algorithm_show(struct device *dev,
996*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun size_t sz;
999*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun down_read(&zram->init_lock);
1002*4882a593Smuzhiyun sz = zcomp_available_show(zram->compressor, buf);
1003*4882a593Smuzhiyun up_read(&zram->init_lock);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun return sz;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
comp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1008*4882a593Smuzhiyun static ssize_t comp_algorithm_store(struct device *dev,
1009*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1012*4882a593Smuzhiyun char compressor[ARRAY_SIZE(zram->compressor)];
1013*4882a593Smuzhiyun size_t sz;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun strlcpy(compressor, buf, sizeof(compressor));
1016*4882a593Smuzhiyun /* ignore trailing newline */
1017*4882a593Smuzhiyun sz = strlen(compressor);
1018*4882a593Smuzhiyun if (sz > 0 && compressor[sz - 1] == '\n')
1019*4882a593Smuzhiyun compressor[sz - 1] = 0x00;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (!zcomp_available_algorithm(compressor))
1022*4882a593Smuzhiyun return -EINVAL;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun down_write(&zram->init_lock);
1025*4882a593Smuzhiyun if (init_done(zram)) {
1026*4882a593Smuzhiyun up_write(&zram->init_lock);
1027*4882a593Smuzhiyun pr_info("Can't change algorithm for initialized device\n");
1028*4882a593Smuzhiyun return -EBUSY;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun strcpy(zram->compressor, compressor);
1032*4882a593Smuzhiyun up_write(&zram->init_lock);
1033*4882a593Smuzhiyun return len;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
compact_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1036*4882a593Smuzhiyun static ssize_t compact_store(struct device *dev,
1037*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun down_read(&zram->init_lock);
1042*4882a593Smuzhiyun if (!init_done(zram)) {
1043*4882a593Smuzhiyun up_read(&zram->init_lock);
1044*4882a593Smuzhiyun return -EINVAL;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun zs_compact(zram->mem_pool);
1048*4882a593Smuzhiyun up_read(&zram->init_lock);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun return len;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
io_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1053*4882a593Smuzhiyun static ssize_t io_stat_show(struct device *dev,
1054*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1057*4882a593Smuzhiyun ssize_t ret;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun down_read(&zram->init_lock);
1060*4882a593Smuzhiyun ret = scnprintf(buf, PAGE_SIZE,
1061*4882a593Smuzhiyun "%8llu %8llu %8llu %8llu\n",
1062*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.failed_reads),
1063*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.failed_writes),
1064*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.invalid_io),
1065*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.notify_free));
1066*4882a593Smuzhiyun up_read(&zram->init_lock);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun return ret;
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
mm_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1071*4882a593Smuzhiyun static ssize_t mm_stat_show(struct device *dev,
1072*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1075*4882a593Smuzhiyun struct zs_pool_stats pool_stats;
1076*4882a593Smuzhiyun u64 orig_size, mem_used = 0;
1077*4882a593Smuzhiyun long max_used;
1078*4882a593Smuzhiyun ssize_t ret;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun down_read(&zram->init_lock);
1083*4882a593Smuzhiyun if (init_done(zram)) {
1084*4882a593Smuzhiyun mem_used = zs_get_total_pages(zram->mem_pool);
1085*4882a593Smuzhiyun zs_pool_stats(zram->mem_pool, &pool_stats);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun orig_size = atomic64_read(&zram->stats.pages_stored);
1089*4882a593Smuzhiyun max_used = atomic_long_read(&zram->stats.max_used_pages);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun ret = scnprintf(buf, PAGE_SIZE,
1092*4882a593Smuzhiyun "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
1093*4882a593Smuzhiyun orig_size << PAGE_SHIFT,
1094*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.compr_data_size),
1095*4882a593Smuzhiyun mem_used << PAGE_SHIFT,
1096*4882a593Smuzhiyun zram->limit_pages << PAGE_SHIFT,
1097*4882a593Smuzhiyun max_used << PAGE_SHIFT,
1098*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.same_pages),
1099*4882a593Smuzhiyun atomic_long_read(&pool_stats.pages_compacted),
1100*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.huge_pages));
1101*4882a593Smuzhiyun up_read(&zram->init_lock);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun return ret;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
1107*4882a593Smuzhiyun #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
bd_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1108*4882a593Smuzhiyun static ssize_t bd_stat_show(struct device *dev,
1109*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1112*4882a593Smuzhiyun ssize_t ret;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun down_read(&zram->init_lock);
1115*4882a593Smuzhiyun ret = scnprintf(buf, PAGE_SIZE,
1116*4882a593Smuzhiyun "%8llu %8llu %8llu\n",
1117*4882a593Smuzhiyun FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1118*4882a593Smuzhiyun FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1119*4882a593Smuzhiyun FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1120*4882a593Smuzhiyun up_read(&zram->init_lock);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun return ret;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun #endif
1125*4882a593Smuzhiyun
debug_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1126*4882a593Smuzhiyun static ssize_t debug_stat_show(struct device *dev,
1127*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun int version = 1;
1130*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1131*4882a593Smuzhiyun ssize_t ret;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun down_read(&zram->init_lock);
1134*4882a593Smuzhiyun ret = scnprintf(buf, PAGE_SIZE,
1135*4882a593Smuzhiyun "version: %d\n%8llu %8llu\n",
1136*4882a593Smuzhiyun version,
1137*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.writestall),
1138*4882a593Smuzhiyun (u64)atomic64_read(&zram->stats.miss_free));
1139*4882a593Smuzhiyun up_read(&zram->init_lock);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun return ret;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun static DEVICE_ATTR_RO(io_stat);
1145*4882a593Smuzhiyun static DEVICE_ATTR_RO(mm_stat);
1146*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
1147*4882a593Smuzhiyun static DEVICE_ATTR_RO(bd_stat);
1148*4882a593Smuzhiyun #endif
1149*4882a593Smuzhiyun static DEVICE_ATTR_RO(debug_stat);
1150*4882a593Smuzhiyun
zram_meta_free(struct zram * zram,u64 disksize)1151*4882a593Smuzhiyun static void zram_meta_free(struct zram *zram, u64 disksize)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun size_t num_pages = disksize >> PAGE_SHIFT;
1154*4882a593Smuzhiyun size_t index;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun /* Free all pages that are still in this zram device */
1157*4882a593Smuzhiyun for (index = 0; index < num_pages; index++)
1158*4882a593Smuzhiyun zram_free_page(zram, index);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun zs_destroy_pool(zram->mem_pool);
1161*4882a593Smuzhiyun vfree(zram->table);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
zram_meta_alloc(struct zram * zram,u64 disksize)1164*4882a593Smuzhiyun static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun size_t num_pages;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun num_pages = disksize >> PAGE_SHIFT;
1169*4882a593Smuzhiyun zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1170*4882a593Smuzhiyun if (!zram->table)
1171*4882a593Smuzhiyun return false;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1174*4882a593Smuzhiyun if (!zram->mem_pool) {
1175*4882a593Smuzhiyun vfree(zram->table);
1176*4882a593Smuzhiyun return false;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun if (!huge_class_size)
1180*4882a593Smuzhiyun huge_class_size = zs_huge_class_size(zram->mem_pool);
1181*4882a593Smuzhiyun return true;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /*
1185*4882a593Smuzhiyun * To protect concurrent access to the same index entry,
1186*4882a593Smuzhiyun * caller should hold this table index entry's bit_spinlock to
1187*4882a593Smuzhiyun * indicate this index entry is accessing.
1188*4882a593Smuzhiyun */
zram_free_page(struct zram * zram,size_t index)1189*4882a593Smuzhiyun static void zram_free_page(struct zram *zram, size_t index)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun unsigned long handle;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_MEMORY_TRACKING
1194*4882a593Smuzhiyun zram->table[index].ac_time = 0;
1195*4882a593Smuzhiyun #endif
1196*4882a593Smuzhiyun if (zram_test_flag(zram, index, ZRAM_IDLE))
1197*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_IDLE);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1200*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_HUGE);
1201*4882a593Smuzhiyun atomic64_dec(&zram->stats.huge_pages);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun if (zram_test_flag(zram, index, ZRAM_WB)) {
1205*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_WB);
1206*4882a593Smuzhiyun free_block_bdev(zram, zram_get_element(zram, index));
1207*4882a593Smuzhiyun goto out;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun /*
1211*4882a593Smuzhiyun * No memory is allocated for same element filled pages.
1212*4882a593Smuzhiyun * Simply clear same page flag.
1213*4882a593Smuzhiyun */
1214*4882a593Smuzhiyun if (zram_test_flag(zram, index, ZRAM_SAME)) {
1215*4882a593Smuzhiyun zram_clear_flag(zram, index, ZRAM_SAME);
1216*4882a593Smuzhiyun atomic64_dec(&zram->stats.same_pages);
1217*4882a593Smuzhiyun goto out;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun handle = zram_get_handle(zram, index);
1221*4882a593Smuzhiyun if (!handle)
1222*4882a593Smuzhiyun return;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun zs_free(zram->mem_pool, handle);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun atomic64_sub(zram_get_obj_size(zram, index),
1227*4882a593Smuzhiyun &zram->stats.compr_data_size);
1228*4882a593Smuzhiyun out:
1229*4882a593Smuzhiyun atomic64_dec(&zram->stats.pages_stored);
1230*4882a593Smuzhiyun zram_set_handle(zram, index, 0);
1231*4882a593Smuzhiyun zram_set_obj_size(zram, index, 0);
1232*4882a593Smuzhiyun WARN_ON_ONCE(zram->table[index].flags &
1233*4882a593Smuzhiyun ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun
__zram_bvec_read(struct zram * zram,struct page * page,u32 index,struct bio * bio,bool partial_io)1236*4882a593Smuzhiyun static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1237*4882a593Smuzhiyun struct bio *bio, bool partial_io)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun struct zcomp_strm *zstrm;
1240*4882a593Smuzhiyun unsigned long handle;
1241*4882a593Smuzhiyun unsigned int size;
1242*4882a593Smuzhiyun void *src, *dst;
1243*4882a593Smuzhiyun int ret;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun zram_slot_lock(zram, index);
1246*4882a593Smuzhiyun if (zram_test_flag(zram, index, ZRAM_WB)) {
1247*4882a593Smuzhiyun struct bio_vec bvec;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun bvec.bv_page = page;
1252*4882a593Smuzhiyun bvec.bv_len = PAGE_SIZE;
1253*4882a593Smuzhiyun bvec.bv_offset = 0;
1254*4882a593Smuzhiyun return read_from_bdev(zram, &bvec,
1255*4882a593Smuzhiyun zram_get_element(zram, index),
1256*4882a593Smuzhiyun bio, partial_io);
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun handle = zram_get_handle(zram, index);
1260*4882a593Smuzhiyun if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1261*4882a593Smuzhiyun unsigned long value;
1262*4882a593Smuzhiyun void *mem;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun value = handle ? zram_get_element(zram, index) : 0;
1265*4882a593Smuzhiyun mem = kmap_atomic(page);
1266*4882a593Smuzhiyun zram_fill_page(mem, PAGE_SIZE, value);
1267*4882a593Smuzhiyun kunmap_atomic(mem);
1268*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1269*4882a593Smuzhiyun return 0;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun size = zram_get_obj_size(zram, index);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun if (size != PAGE_SIZE)
1275*4882a593Smuzhiyun zstrm = zcomp_stream_get(zram->comp);
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1278*4882a593Smuzhiyun if (size == PAGE_SIZE) {
1279*4882a593Smuzhiyun dst = kmap_atomic(page);
1280*4882a593Smuzhiyun memcpy(dst, src, PAGE_SIZE);
1281*4882a593Smuzhiyun kunmap_atomic(dst);
1282*4882a593Smuzhiyun ret = 0;
1283*4882a593Smuzhiyun } else {
1284*4882a593Smuzhiyun dst = kmap_atomic(page);
1285*4882a593Smuzhiyun ret = zcomp_decompress(zstrm, src, size, dst);
1286*4882a593Smuzhiyun kunmap_atomic(dst);
1287*4882a593Smuzhiyun zcomp_stream_put(zram->comp);
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun zs_unmap_object(zram->mem_pool, handle);
1290*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /* Should NEVER happen. Return bio error if it does. */
1293*4882a593Smuzhiyun if (WARN_ON(ret))
1294*4882a593Smuzhiyun pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun return ret;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
zram_bvec_read(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1299*4882a593Smuzhiyun static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1300*4882a593Smuzhiyun u32 index, int offset, struct bio *bio)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun int ret;
1303*4882a593Smuzhiyun struct page *page;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun page = bvec->bv_page;
1306*4882a593Smuzhiyun if (is_partial_io(bvec)) {
1307*4882a593Smuzhiyun /* Use a temporary buffer to decompress the page */
1308*4882a593Smuzhiyun page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1309*4882a593Smuzhiyun if (!page)
1310*4882a593Smuzhiyun return -ENOMEM;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1314*4882a593Smuzhiyun if (unlikely(ret))
1315*4882a593Smuzhiyun goto out;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun if (is_partial_io(bvec)) {
1318*4882a593Smuzhiyun void *dst = kmap_atomic(bvec->bv_page);
1319*4882a593Smuzhiyun void *src = kmap_atomic(page);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
1322*4882a593Smuzhiyun kunmap_atomic(src);
1323*4882a593Smuzhiyun kunmap_atomic(dst);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun out:
1326*4882a593Smuzhiyun if (is_partial_io(bvec))
1327*4882a593Smuzhiyun __free_page(page);
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun return ret;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
__zram_bvec_write(struct zram * zram,struct bio_vec * bvec,u32 index,struct bio * bio)1332*4882a593Smuzhiyun static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1333*4882a593Smuzhiyun u32 index, struct bio *bio)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun int ret = 0;
1336*4882a593Smuzhiyun unsigned long alloced_pages;
1337*4882a593Smuzhiyun unsigned long handle = 0;
1338*4882a593Smuzhiyun unsigned int comp_len = 0;
1339*4882a593Smuzhiyun void *src, *dst, *mem;
1340*4882a593Smuzhiyun struct zcomp_strm *zstrm;
1341*4882a593Smuzhiyun struct page *page = bvec->bv_page;
1342*4882a593Smuzhiyun unsigned long element = 0;
1343*4882a593Smuzhiyun enum zram_pageflags flags = 0;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun mem = kmap_atomic(page);
1346*4882a593Smuzhiyun if (page_same_filled(mem, &element)) {
1347*4882a593Smuzhiyun kunmap_atomic(mem);
1348*4882a593Smuzhiyun /* Free memory associated with this sector now. */
1349*4882a593Smuzhiyun flags = ZRAM_SAME;
1350*4882a593Smuzhiyun atomic64_inc(&zram->stats.same_pages);
1351*4882a593Smuzhiyun goto out;
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun kunmap_atomic(mem);
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun compress_again:
1356*4882a593Smuzhiyun zstrm = zcomp_stream_get(zram->comp);
1357*4882a593Smuzhiyun src = kmap_atomic(page);
1358*4882a593Smuzhiyun ret = zcomp_compress(zstrm, src, &comp_len);
1359*4882a593Smuzhiyun kunmap_atomic(src);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun if (unlikely(ret)) {
1362*4882a593Smuzhiyun zcomp_stream_put(zram->comp);
1363*4882a593Smuzhiyun pr_err("Compression failed! err=%d\n", ret);
1364*4882a593Smuzhiyun zs_free(zram->mem_pool, handle);
1365*4882a593Smuzhiyun return ret;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun if (comp_len >= huge_class_size)
1369*4882a593Smuzhiyun comp_len = PAGE_SIZE;
1370*4882a593Smuzhiyun /*
1371*4882a593Smuzhiyun * handle allocation has 2 paths:
1372*4882a593Smuzhiyun * a) fast path is executed with preemption disabled (for
1373*4882a593Smuzhiyun * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1374*4882a593Smuzhiyun * since we can't sleep;
1375*4882a593Smuzhiyun * b) slow path enables preemption and attempts to allocate
1376*4882a593Smuzhiyun * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1377*4882a593Smuzhiyun * put per-cpu compression stream and, thus, to re-do
1378*4882a593Smuzhiyun * the compression once handle is allocated.
1379*4882a593Smuzhiyun *
1380*4882a593Smuzhiyun * if we have a 'non-null' handle here then we are coming
1381*4882a593Smuzhiyun * from the slow path and handle has already been allocated.
1382*4882a593Smuzhiyun */
1383*4882a593Smuzhiyun if (!handle)
1384*4882a593Smuzhiyun handle = zs_malloc(zram->mem_pool, comp_len,
1385*4882a593Smuzhiyun __GFP_KSWAPD_RECLAIM |
1386*4882a593Smuzhiyun __GFP_NOWARN |
1387*4882a593Smuzhiyun __GFP_HIGHMEM |
1388*4882a593Smuzhiyun __GFP_MOVABLE |
1389*4882a593Smuzhiyun __GFP_CMA);
1390*4882a593Smuzhiyun if (!handle) {
1391*4882a593Smuzhiyun zcomp_stream_put(zram->comp);
1392*4882a593Smuzhiyun atomic64_inc(&zram->stats.writestall);
1393*4882a593Smuzhiyun handle = zs_malloc(zram->mem_pool, comp_len,
1394*4882a593Smuzhiyun GFP_NOIO | __GFP_HIGHMEM |
1395*4882a593Smuzhiyun __GFP_MOVABLE | __GFP_CMA);
1396*4882a593Smuzhiyun if (handle)
1397*4882a593Smuzhiyun goto compress_again;
1398*4882a593Smuzhiyun return -ENOMEM;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun alloced_pages = zs_get_total_pages(zram->mem_pool);
1402*4882a593Smuzhiyun update_used_max(zram, alloced_pages);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1405*4882a593Smuzhiyun zcomp_stream_put(zram->comp);
1406*4882a593Smuzhiyun zs_free(zram->mem_pool, handle);
1407*4882a593Smuzhiyun return -ENOMEM;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun src = zstrm->buffer;
1413*4882a593Smuzhiyun if (comp_len == PAGE_SIZE)
1414*4882a593Smuzhiyun src = kmap_atomic(page);
1415*4882a593Smuzhiyun memcpy(dst, src, comp_len);
1416*4882a593Smuzhiyun if (comp_len == PAGE_SIZE)
1417*4882a593Smuzhiyun kunmap_atomic(src);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun zcomp_stream_put(zram->comp);
1420*4882a593Smuzhiyun zs_unmap_object(zram->mem_pool, handle);
1421*4882a593Smuzhiyun atomic64_add(comp_len, &zram->stats.compr_data_size);
1422*4882a593Smuzhiyun out:
1423*4882a593Smuzhiyun /*
1424*4882a593Smuzhiyun * Free memory associated with this sector
1425*4882a593Smuzhiyun * before overwriting unused sectors.
1426*4882a593Smuzhiyun */
1427*4882a593Smuzhiyun zram_slot_lock(zram, index);
1428*4882a593Smuzhiyun zram_free_page(zram, index);
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun if (comp_len == PAGE_SIZE) {
1431*4882a593Smuzhiyun zram_set_flag(zram, index, ZRAM_HUGE);
1432*4882a593Smuzhiyun atomic64_inc(&zram->stats.huge_pages);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun if (flags) {
1436*4882a593Smuzhiyun zram_set_flag(zram, index, flags);
1437*4882a593Smuzhiyun zram_set_element(zram, index, element);
1438*4882a593Smuzhiyun } else {
1439*4882a593Smuzhiyun zram_set_handle(zram, index, handle);
1440*4882a593Smuzhiyun zram_set_obj_size(zram, index, comp_len);
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun /* Update stats */
1445*4882a593Smuzhiyun atomic64_inc(&zram->stats.pages_stored);
1446*4882a593Smuzhiyun return ret;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
zram_bvec_write(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1449*4882a593Smuzhiyun static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1450*4882a593Smuzhiyun u32 index, int offset, struct bio *bio)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun int ret;
1453*4882a593Smuzhiyun struct page *page = NULL;
1454*4882a593Smuzhiyun void *src;
1455*4882a593Smuzhiyun struct bio_vec vec;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun vec = *bvec;
1458*4882a593Smuzhiyun if (is_partial_io(bvec)) {
1459*4882a593Smuzhiyun void *dst;
1460*4882a593Smuzhiyun /*
1461*4882a593Smuzhiyun * This is a partial IO. We need to read the full page
1462*4882a593Smuzhiyun * before to write the changes.
1463*4882a593Smuzhiyun */
1464*4882a593Smuzhiyun page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1465*4882a593Smuzhiyun if (!page)
1466*4882a593Smuzhiyun return -ENOMEM;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun ret = __zram_bvec_read(zram, page, index, bio, true);
1469*4882a593Smuzhiyun if (ret)
1470*4882a593Smuzhiyun goto out;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun src = kmap_atomic(bvec->bv_page);
1473*4882a593Smuzhiyun dst = kmap_atomic(page);
1474*4882a593Smuzhiyun memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1475*4882a593Smuzhiyun kunmap_atomic(dst);
1476*4882a593Smuzhiyun kunmap_atomic(src);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun vec.bv_page = page;
1479*4882a593Smuzhiyun vec.bv_len = PAGE_SIZE;
1480*4882a593Smuzhiyun vec.bv_offset = 0;
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun ret = __zram_bvec_write(zram, &vec, index, bio);
1484*4882a593Smuzhiyun out:
1485*4882a593Smuzhiyun if (is_partial_io(bvec))
1486*4882a593Smuzhiyun __free_page(page);
1487*4882a593Smuzhiyun return ret;
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun /*
1491*4882a593Smuzhiyun * zram_bio_discard - handler on discard request
1492*4882a593Smuzhiyun * @index: physical block index in PAGE_SIZE units
1493*4882a593Smuzhiyun * @offset: byte offset within physical block
1494*4882a593Smuzhiyun */
zram_bio_discard(struct zram * zram,u32 index,int offset,struct bio * bio)1495*4882a593Smuzhiyun static void zram_bio_discard(struct zram *zram, u32 index,
1496*4882a593Smuzhiyun int offset, struct bio *bio)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun size_t n = bio->bi_iter.bi_size;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun /*
1501*4882a593Smuzhiyun * zram manages data in physical block size units. Because logical block
1502*4882a593Smuzhiyun * size isn't identical with physical block size on some arch, we
1503*4882a593Smuzhiyun * could get a discard request pointing to a specific offset within a
1504*4882a593Smuzhiyun * certain physical block. Although we can handle this request by
1505*4882a593Smuzhiyun * reading that physiclal block and decompressing and partially zeroing
1506*4882a593Smuzhiyun * and re-compressing and then re-storing it, this isn't reasonable
1507*4882a593Smuzhiyun * because our intent with a discard request is to save memory. So
1508*4882a593Smuzhiyun * skipping this logical block is appropriate here.
1509*4882a593Smuzhiyun */
1510*4882a593Smuzhiyun if (offset) {
1511*4882a593Smuzhiyun if (n <= (PAGE_SIZE - offset))
1512*4882a593Smuzhiyun return;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun n -= (PAGE_SIZE - offset);
1515*4882a593Smuzhiyun index++;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun while (n >= PAGE_SIZE) {
1519*4882a593Smuzhiyun zram_slot_lock(zram, index);
1520*4882a593Smuzhiyun zram_free_page(zram, index);
1521*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1522*4882a593Smuzhiyun atomic64_inc(&zram->stats.notify_free);
1523*4882a593Smuzhiyun index++;
1524*4882a593Smuzhiyun n -= PAGE_SIZE;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /*
1529*4882a593Smuzhiyun * Returns errno if it has some problem. Otherwise return 0 or 1.
1530*4882a593Smuzhiyun * Returns 0 if IO request was done synchronously
1531*4882a593Smuzhiyun * Returns 1 if IO request was successfully submitted.
1532*4882a593Smuzhiyun */
zram_bvec_rw(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,unsigned int op,struct bio * bio)1533*4882a593Smuzhiyun static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1534*4882a593Smuzhiyun int offset, unsigned int op, struct bio *bio)
1535*4882a593Smuzhiyun {
1536*4882a593Smuzhiyun int ret;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun if (!op_is_write(op)) {
1539*4882a593Smuzhiyun atomic64_inc(&zram->stats.num_reads);
1540*4882a593Smuzhiyun ret = zram_bvec_read(zram, bvec, index, offset, bio);
1541*4882a593Smuzhiyun flush_dcache_page(bvec->bv_page);
1542*4882a593Smuzhiyun } else {
1543*4882a593Smuzhiyun atomic64_inc(&zram->stats.num_writes);
1544*4882a593Smuzhiyun ret = zram_bvec_write(zram, bvec, index, offset, bio);
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun zram_slot_lock(zram, index);
1548*4882a593Smuzhiyun zram_accessed(zram, index);
1549*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun if (unlikely(ret < 0)) {
1552*4882a593Smuzhiyun if (!op_is_write(op))
1553*4882a593Smuzhiyun atomic64_inc(&zram->stats.failed_reads);
1554*4882a593Smuzhiyun else
1555*4882a593Smuzhiyun atomic64_inc(&zram->stats.failed_writes);
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun return ret;
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun
__zram_make_request(struct zram * zram,struct bio * bio)1561*4882a593Smuzhiyun static void __zram_make_request(struct zram *zram, struct bio *bio)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun int offset;
1564*4882a593Smuzhiyun u32 index;
1565*4882a593Smuzhiyun struct bio_vec bvec;
1566*4882a593Smuzhiyun struct bvec_iter iter;
1567*4882a593Smuzhiyun unsigned long start_time;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1570*4882a593Smuzhiyun offset = (bio->bi_iter.bi_sector &
1571*4882a593Smuzhiyun (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun switch (bio_op(bio)) {
1574*4882a593Smuzhiyun case REQ_OP_DISCARD:
1575*4882a593Smuzhiyun case REQ_OP_WRITE_ZEROES:
1576*4882a593Smuzhiyun zram_bio_discard(zram, index, offset, bio);
1577*4882a593Smuzhiyun bio_endio(bio);
1578*4882a593Smuzhiyun return;
1579*4882a593Smuzhiyun default:
1580*4882a593Smuzhiyun break;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun start_time = bio_start_io_acct(bio);
1584*4882a593Smuzhiyun bio_for_each_segment(bvec, bio, iter) {
1585*4882a593Smuzhiyun struct bio_vec bv = bvec;
1586*4882a593Smuzhiyun unsigned int unwritten = bvec.bv_len;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun do {
1589*4882a593Smuzhiyun bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1590*4882a593Smuzhiyun unwritten);
1591*4882a593Smuzhiyun if (zram_bvec_rw(zram, &bv, index, offset,
1592*4882a593Smuzhiyun bio_op(bio), bio) < 0) {
1593*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
1594*4882a593Smuzhiyun break;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun bv.bv_offset += bv.bv_len;
1598*4882a593Smuzhiyun unwritten -= bv.bv_len;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun update_position(&index, &offset, &bv);
1601*4882a593Smuzhiyun } while (unwritten);
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun bio_end_io_acct(bio, start_time);
1604*4882a593Smuzhiyun bio_endio(bio);
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun /*
1608*4882a593Smuzhiyun * Handler function for all zram I/O requests.
1609*4882a593Smuzhiyun */
zram_submit_bio(struct bio * bio)1610*4882a593Smuzhiyun static blk_qc_t zram_submit_bio(struct bio *bio)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun struct zram *zram = bio->bi_disk->private_data;
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1615*4882a593Smuzhiyun bio->bi_iter.bi_size)) {
1616*4882a593Smuzhiyun atomic64_inc(&zram->stats.invalid_io);
1617*4882a593Smuzhiyun goto error;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun __zram_make_request(zram, bio);
1621*4882a593Smuzhiyun return BLK_QC_T_NONE;
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun error:
1624*4882a593Smuzhiyun bio_io_error(bio);
1625*4882a593Smuzhiyun return BLK_QC_T_NONE;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun
zram_slot_free_notify(struct block_device * bdev,unsigned long index)1628*4882a593Smuzhiyun static void zram_slot_free_notify(struct block_device *bdev,
1629*4882a593Smuzhiyun unsigned long index)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun struct zram *zram;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun zram = bdev->bd_disk->private_data;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun atomic64_inc(&zram->stats.notify_free);
1636*4882a593Smuzhiyun if (!zram_slot_trylock(zram, index)) {
1637*4882a593Smuzhiyun atomic64_inc(&zram->stats.miss_free);
1638*4882a593Smuzhiyun return;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun zram_free_page(zram, index);
1642*4882a593Smuzhiyun zram_slot_unlock(zram, index);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
zram_rw_page(struct block_device * bdev,sector_t sector,struct page * page,unsigned int op)1645*4882a593Smuzhiyun static int zram_rw_page(struct block_device *bdev, sector_t sector,
1646*4882a593Smuzhiyun struct page *page, unsigned int op)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun int offset, ret;
1649*4882a593Smuzhiyun u32 index;
1650*4882a593Smuzhiyun struct zram *zram;
1651*4882a593Smuzhiyun struct bio_vec bv;
1652*4882a593Smuzhiyun unsigned long start_time;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (PageTransHuge(page))
1655*4882a593Smuzhiyun return -ENOTSUPP;
1656*4882a593Smuzhiyun zram = bdev->bd_disk->private_data;
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1659*4882a593Smuzhiyun atomic64_inc(&zram->stats.invalid_io);
1660*4882a593Smuzhiyun ret = -EINVAL;
1661*4882a593Smuzhiyun goto out;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun index = sector >> SECTORS_PER_PAGE_SHIFT;
1665*4882a593Smuzhiyun offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun bv.bv_page = page;
1668*4882a593Smuzhiyun bv.bv_len = PAGE_SIZE;
1669*4882a593Smuzhiyun bv.bv_offset = 0;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
1672*4882a593Smuzhiyun ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1673*4882a593Smuzhiyun disk_end_io_acct(bdev->bd_disk, op, start_time);
1674*4882a593Smuzhiyun out:
1675*4882a593Smuzhiyun /*
1676*4882a593Smuzhiyun * If I/O fails, just return error(ie, non-zero) without
1677*4882a593Smuzhiyun * calling page_endio.
1678*4882a593Smuzhiyun * It causes resubmit the I/O with bio request by upper functions
1679*4882a593Smuzhiyun * of rw_page(e.g., swap_readpage, __swap_writepage) and
1680*4882a593Smuzhiyun * bio->bi_end_io does things to handle the error
1681*4882a593Smuzhiyun * (e.g., SetPageError, set_page_dirty and extra works).
1682*4882a593Smuzhiyun */
1683*4882a593Smuzhiyun if (unlikely(ret < 0))
1684*4882a593Smuzhiyun return ret;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun switch (ret) {
1687*4882a593Smuzhiyun case 0:
1688*4882a593Smuzhiyun page_endio(page, op_is_write(op), 0);
1689*4882a593Smuzhiyun break;
1690*4882a593Smuzhiyun case 1:
1691*4882a593Smuzhiyun ret = 0;
1692*4882a593Smuzhiyun break;
1693*4882a593Smuzhiyun default:
1694*4882a593Smuzhiyun WARN_ON(1);
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun return ret;
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun
zram_reset_device(struct zram * zram)1699*4882a593Smuzhiyun static void zram_reset_device(struct zram *zram)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun struct zcomp *comp;
1702*4882a593Smuzhiyun u64 disksize;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun down_write(&zram->init_lock);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun zram->limit_pages = 0;
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun if (!init_done(zram)) {
1709*4882a593Smuzhiyun up_write(&zram->init_lock);
1710*4882a593Smuzhiyun return;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun comp = zram->comp;
1714*4882a593Smuzhiyun disksize = zram->disksize;
1715*4882a593Smuzhiyun zram->disksize = 0;
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun set_capacity(zram->disk, 0);
1718*4882a593Smuzhiyun part_stat_set_all(&zram->disk->part0, 0);
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun up_write(&zram->init_lock);
1721*4882a593Smuzhiyun /* I/O operation under all of CPU are done so let's free */
1722*4882a593Smuzhiyun zram_meta_free(zram, disksize);
1723*4882a593Smuzhiyun memset(&zram->stats, 0, sizeof(zram->stats));
1724*4882a593Smuzhiyun zcomp_destroy(comp);
1725*4882a593Smuzhiyun reset_bdev(zram);
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
disksize_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1728*4882a593Smuzhiyun static ssize_t disksize_store(struct device *dev,
1729*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
1730*4882a593Smuzhiyun {
1731*4882a593Smuzhiyun u64 disksize;
1732*4882a593Smuzhiyun struct zcomp *comp;
1733*4882a593Smuzhiyun struct zram *zram = dev_to_zram(dev);
1734*4882a593Smuzhiyun int err;
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun disksize = memparse(buf, NULL);
1737*4882a593Smuzhiyun if (!disksize)
1738*4882a593Smuzhiyun return -EINVAL;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun down_write(&zram->init_lock);
1741*4882a593Smuzhiyun if (init_done(zram)) {
1742*4882a593Smuzhiyun pr_info("Cannot change disksize for initialized device\n");
1743*4882a593Smuzhiyun err = -EBUSY;
1744*4882a593Smuzhiyun goto out_unlock;
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun disksize = PAGE_ALIGN(disksize);
1748*4882a593Smuzhiyun if (!zram_meta_alloc(zram, disksize)) {
1749*4882a593Smuzhiyun err = -ENOMEM;
1750*4882a593Smuzhiyun goto out_unlock;
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun comp = zcomp_create(zram->compressor);
1754*4882a593Smuzhiyun if (IS_ERR(comp)) {
1755*4882a593Smuzhiyun pr_err("Cannot initialise %s compressing backend\n",
1756*4882a593Smuzhiyun zram->compressor);
1757*4882a593Smuzhiyun err = PTR_ERR(comp);
1758*4882a593Smuzhiyun goto out_free_meta;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun zram->comp = comp;
1762*4882a593Smuzhiyun zram->disksize = disksize;
1763*4882a593Smuzhiyun set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun revalidate_disk_size(zram->disk, true);
1766*4882a593Smuzhiyun up_write(&zram->init_lock);
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun return len;
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun out_free_meta:
1771*4882a593Smuzhiyun zram_meta_free(zram, disksize);
1772*4882a593Smuzhiyun out_unlock:
1773*4882a593Smuzhiyun up_write(&zram->init_lock);
1774*4882a593Smuzhiyun return err;
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1777*4882a593Smuzhiyun static ssize_t reset_store(struct device *dev,
1778*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun int ret;
1781*4882a593Smuzhiyun unsigned short do_reset;
1782*4882a593Smuzhiyun struct zram *zram;
1783*4882a593Smuzhiyun struct block_device *bdev;
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun ret = kstrtou16(buf, 10, &do_reset);
1786*4882a593Smuzhiyun if (ret)
1787*4882a593Smuzhiyun return ret;
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun if (!do_reset)
1790*4882a593Smuzhiyun return -EINVAL;
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun zram = dev_to_zram(dev);
1793*4882a593Smuzhiyun bdev = bdget_disk(zram->disk, 0);
1794*4882a593Smuzhiyun if (!bdev)
1795*4882a593Smuzhiyun return -ENOMEM;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun mutex_lock(&bdev->bd_mutex);
1798*4882a593Smuzhiyun /* Do not reset an active device or claimed device */
1799*4882a593Smuzhiyun if (bdev->bd_openers || zram->claim) {
1800*4882a593Smuzhiyun mutex_unlock(&bdev->bd_mutex);
1801*4882a593Smuzhiyun bdput(bdev);
1802*4882a593Smuzhiyun return -EBUSY;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun /* From now on, anyone can't open /dev/zram[0-9] */
1806*4882a593Smuzhiyun zram->claim = true;
1807*4882a593Smuzhiyun mutex_unlock(&bdev->bd_mutex);
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun /* Make sure all the pending I/O are finished */
1810*4882a593Smuzhiyun fsync_bdev(bdev);
1811*4882a593Smuzhiyun zram_reset_device(zram);
1812*4882a593Smuzhiyun revalidate_disk_size(zram->disk, true);
1813*4882a593Smuzhiyun bdput(bdev);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun mutex_lock(&bdev->bd_mutex);
1816*4882a593Smuzhiyun zram->claim = false;
1817*4882a593Smuzhiyun mutex_unlock(&bdev->bd_mutex);
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun return len;
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun
zram_open(struct block_device * bdev,fmode_t mode)1822*4882a593Smuzhiyun static int zram_open(struct block_device *bdev, fmode_t mode)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun int ret = 0;
1825*4882a593Smuzhiyun struct zram *zram;
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun zram = bdev->bd_disk->private_data;
1830*4882a593Smuzhiyun /* zram was claimed to reset so open request fails */
1831*4882a593Smuzhiyun if (zram->claim)
1832*4882a593Smuzhiyun ret = -EBUSY;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun return ret;
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun static const struct block_device_operations zram_devops = {
1838*4882a593Smuzhiyun .open = zram_open,
1839*4882a593Smuzhiyun .submit_bio = zram_submit_bio,
1840*4882a593Smuzhiyun .swap_slot_free_notify = zram_slot_free_notify,
1841*4882a593Smuzhiyun .rw_page = zram_rw_page,
1842*4882a593Smuzhiyun .owner = THIS_MODULE
1843*4882a593Smuzhiyun };
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun static const struct block_device_operations zram_wb_devops = {
1846*4882a593Smuzhiyun .open = zram_open,
1847*4882a593Smuzhiyun .submit_bio = zram_submit_bio,
1848*4882a593Smuzhiyun .swap_slot_free_notify = zram_slot_free_notify,
1849*4882a593Smuzhiyun .owner = THIS_MODULE
1850*4882a593Smuzhiyun };
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun static DEVICE_ATTR_WO(compact);
1853*4882a593Smuzhiyun static DEVICE_ATTR_RW(disksize);
1854*4882a593Smuzhiyun static DEVICE_ATTR_RO(initstate);
1855*4882a593Smuzhiyun static DEVICE_ATTR_WO(reset);
1856*4882a593Smuzhiyun static DEVICE_ATTR_WO(mem_limit);
1857*4882a593Smuzhiyun static DEVICE_ATTR_WO(mem_used_max);
1858*4882a593Smuzhiyun static DEVICE_ATTR_WO(idle);
1859*4882a593Smuzhiyun static DEVICE_ATTR_RW(max_comp_streams);
1860*4882a593Smuzhiyun static DEVICE_ATTR_RW(comp_algorithm);
1861*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
1862*4882a593Smuzhiyun static DEVICE_ATTR_RW(backing_dev);
1863*4882a593Smuzhiyun static DEVICE_ATTR_WO(writeback);
1864*4882a593Smuzhiyun static DEVICE_ATTR_RW(writeback_limit);
1865*4882a593Smuzhiyun static DEVICE_ATTR_RW(writeback_limit_enable);
1866*4882a593Smuzhiyun #endif
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun static struct attribute *zram_disk_attrs[] = {
1869*4882a593Smuzhiyun &dev_attr_disksize.attr,
1870*4882a593Smuzhiyun &dev_attr_initstate.attr,
1871*4882a593Smuzhiyun &dev_attr_reset.attr,
1872*4882a593Smuzhiyun &dev_attr_compact.attr,
1873*4882a593Smuzhiyun &dev_attr_mem_limit.attr,
1874*4882a593Smuzhiyun &dev_attr_mem_used_max.attr,
1875*4882a593Smuzhiyun &dev_attr_idle.attr,
1876*4882a593Smuzhiyun &dev_attr_max_comp_streams.attr,
1877*4882a593Smuzhiyun &dev_attr_comp_algorithm.attr,
1878*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
1879*4882a593Smuzhiyun &dev_attr_backing_dev.attr,
1880*4882a593Smuzhiyun &dev_attr_writeback.attr,
1881*4882a593Smuzhiyun &dev_attr_writeback_limit.attr,
1882*4882a593Smuzhiyun &dev_attr_writeback_limit_enable.attr,
1883*4882a593Smuzhiyun #endif
1884*4882a593Smuzhiyun &dev_attr_io_stat.attr,
1885*4882a593Smuzhiyun &dev_attr_mm_stat.attr,
1886*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
1887*4882a593Smuzhiyun &dev_attr_bd_stat.attr,
1888*4882a593Smuzhiyun #endif
1889*4882a593Smuzhiyun &dev_attr_debug_stat.attr,
1890*4882a593Smuzhiyun NULL,
1891*4882a593Smuzhiyun };
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun static const struct attribute_group zram_disk_attr_group = {
1894*4882a593Smuzhiyun .attrs = zram_disk_attrs,
1895*4882a593Smuzhiyun };
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun static const struct attribute_group *zram_disk_attr_groups[] = {
1898*4882a593Smuzhiyun &zram_disk_attr_group,
1899*4882a593Smuzhiyun NULL,
1900*4882a593Smuzhiyun };
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun /*
1903*4882a593Smuzhiyun * Allocate and initialize new zram device. the function returns
1904*4882a593Smuzhiyun * '>= 0' device_id upon success, and negative value otherwise.
1905*4882a593Smuzhiyun */
zram_add(void)1906*4882a593Smuzhiyun static int zram_add(void)
1907*4882a593Smuzhiyun {
1908*4882a593Smuzhiyun struct zram *zram;
1909*4882a593Smuzhiyun struct request_queue *queue;
1910*4882a593Smuzhiyun int ret, device_id;
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1913*4882a593Smuzhiyun if (!zram)
1914*4882a593Smuzhiyun return -ENOMEM;
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1917*4882a593Smuzhiyun if (ret < 0)
1918*4882a593Smuzhiyun goto out_free_dev;
1919*4882a593Smuzhiyun device_id = ret;
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun init_rwsem(&zram->init_lock);
1922*4882a593Smuzhiyun #ifdef CONFIG_ZRAM_WRITEBACK
1923*4882a593Smuzhiyun spin_lock_init(&zram->wb_limit_lock);
1924*4882a593Smuzhiyun #endif
1925*4882a593Smuzhiyun queue = blk_alloc_queue(NUMA_NO_NODE);
1926*4882a593Smuzhiyun if (!queue) {
1927*4882a593Smuzhiyun pr_err("Error allocating disk queue for device %d\n",
1928*4882a593Smuzhiyun device_id);
1929*4882a593Smuzhiyun ret = -ENOMEM;
1930*4882a593Smuzhiyun goto out_free_idr;
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* gendisk structure */
1934*4882a593Smuzhiyun zram->disk = alloc_disk(1);
1935*4882a593Smuzhiyun if (!zram->disk) {
1936*4882a593Smuzhiyun pr_err("Error allocating disk structure for device %d\n",
1937*4882a593Smuzhiyun device_id);
1938*4882a593Smuzhiyun ret = -ENOMEM;
1939*4882a593Smuzhiyun goto out_free_queue;
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun zram->disk->major = zram_major;
1943*4882a593Smuzhiyun zram->disk->first_minor = device_id;
1944*4882a593Smuzhiyun zram->disk->fops = &zram_devops;
1945*4882a593Smuzhiyun zram->disk->queue = queue;
1946*4882a593Smuzhiyun zram->disk->private_data = zram;
1947*4882a593Smuzhiyun snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1950*4882a593Smuzhiyun set_capacity(zram->disk, 0);
1951*4882a593Smuzhiyun /* zram devices sort of resembles non-rotational disks */
1952*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
1953*4882a593Smuzhiyun blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun /*
1956*4882a593Smuzhiyun * To ensure that we always get PAGE_SIZE aligned
1957*4882a593Smuzhiyun * and n*PAGE_SIZED sized I/O requests.
1958*4882a593Smuzhiyun */
1959*4882a593Smuzhiyun blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1960*4882a593Smuzhiyun blk_queue_logical_block_size(zram->disk->queue,
1961*4882a593Smuzhiyun ZRAM_LOGICAL_BLOCK_SIZE);
1962*4882a593Smuzhiyun blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1963*4882a593Smuzhiyun blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1964*4882a593Smuzhiyun zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1965*4882a593Smuzhiyun blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1966*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun /*
1969*4882a593Smuzhiyun * zram_bio_discard() will clear all logical blocks if logical block
1970*4882a593Smuzhiyun * size is identical with physical block size(PAGE_SIZE). But if it is
1971*4882a593Smuzhiyun * different, we will skip discarding some parts of logical blocks in
1972*4882a593Smuzhiyun * the part of the request range which isn't aligned to physical block
1973*4882a593Smuzhiyun * size. So we can't ensure that all discarded logical blocks are
1974*4882a593Smuzhiyun * zeroed.
1975*4882a593Smuzhiyun */
1976*4882a593Smuzhiyun if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1977*4882a593Smuzhiyun blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
1980*4882a593Smuzhiyun device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun zram_debugfs_register(zram);
1985*4882a593Smuzhiyun pr_info("Added device: %s\n", zram->disk->disk_name);
1986*4882a593Smuzhiyun return device_id;
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun out_free_queue:
1989*4882a593Smuzhiyun blk_cleanup_queue(queue);
1990*4882a593Smuzhiyun out_free_idr:
1991*4882a593Smuzhiyun idr_remove(&zram_index_idr, device_id);
1992*4882a593Smuzhiyun out_free_dev:
1993*4882a593Smuzhiyun kfree(zram);
1994*4882a593Smuzhiyun return ret;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun
zram_remove(struct zram * zram)1997*4882a593Smuzhiyun static int zram_remove(struct zram *zram)
1998*4882a593Smuzhiyun {
1999*4882a593Smuzhiyun struct block_device *bdev;
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun bdev = bdget_disk(zram->disk, 0);
2002*4882a593Smuzhiyun if (!bdev)
2003*4882a593Smuzhiyun return -ENOMEM;
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun mutex_lock(&bdev->bd_mutex);
2006*4882a593Smuzhiyun if (bdev->bd_openers || zram->claim) {
2007*4882a593Smuzhiyun mutex_unlock(&bdev->bd_mutex);
2008*4882a593Smuzhiyun bdput(bdev);
2009*4882a593Smuzhiyun return -EBUSY;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun zram->claim = true;
2013*4882a593Smuzhiyun mutex_unlock(&bdev->bd_mutex);
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun zram_debugfs_unregister(zram);
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun /* Make sure all the pending I/O are finished */
2018*4882a593Smuzhiyun fsync_bdev(bdev);
2019*4882a593Smuzhiyun zram_reset_device(zram);
2020*4882a593Smuzhiyun bdput(bdev);
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun pr_info("Removed device: %s\n", zram->disk->disk_name);
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun del_gendisk(zram->disk);
2025*4882a593Smuzhiyun blk_cleanup_queue(zram->disk->queue);
2026*4882a593Smuzhiyun put_disk(zram->disk);
2027*4882a593Smuzhiyun kfree(zram);
2028*4882a593Smuzhiyun return 0;
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun /* zram-control sysfs attributes */
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun /*
2034*4882a593Smuzhiyun * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2035*4882a593Smuzhiyun * sense that reading from this file does alter the state of your system -- it
2036*4882a593Smuzhiyun * creates a new un-initialized zram device and returns back this device's
2037*4882a593Smuzhiyun * device_id (or an error code if it fails to create a new device).
2038*4882a593Smuzhiyun */
hot_add_show(struct class * class,struct class_attribute * attr,char * buf)2039*4882a593Smuzhiyun static ssize_t hot_add_show(struct class *class,
2040*4882a593Smuzhiyun struct class_attribute *attr,
2041*4882a593Smuzhiyun char *buf)
2042*4882a593Smuzhiyun {
2043*4882a593Smuzhiyun int ret;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun mutex_lock(&zram_index_mutex);
2046*4882a593Smuzhiyun ret = zram_add();
2047*4882a593Smuzhiyun mutex_unlock(&zram_index_mutex);
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun if (ret < 0)
2050*4882a593Smuzhiyun return ret;
2051*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun static struct class_attribute class_attr_hot_add =
2054*4882a593Smuzhiyun __ATTR(hot_add, 0400, hot_add_show, NULL);
2055*4882a593Smuzhiyun
hot_remove_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)2056*4882a593Smuzhiyun static ssize_t hot_remove_store(struct class *class,
2057*4882a593Smuzhiyun struct class_attribute *attr,
2058*4882a593Smuzhiyun const char *buf,
2059*4882a593Smuzhiyun size_t count)
2060*4882a593Smuzhiyun {
2061*4882a593Smuzhiyun struct zram *zram;
2062*4882a593Smuzhiyun int ret, dev_id;
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun /* dev_id is gendisk->first_minor, which is `int' */
2065*4882a593Smuzhiyun ret = kstrtoint(buf, 10, &dev_id);
2066*4882a593Smuzhiyun if (ret)
2067*4882a593Smuzhiyun return ret;
2068*4882a593Smuzhiyun if (dev_id < 0)
2069*4882a593Smuzhiyun return -EINVAL;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun mutex_lock(&zram_index_mutex);
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun zram = idr_find(&zram_index_idr, dev_id);
2074*4882a593Smuzhiyun if (zram) {
2075*4882a593Smuzhiyun ret = zram_remove(zram);
2076*4882a593Smuzhiyun if (!ret)
2077*4882a593Smuzhiyun idr_remove(&zram_index_idr, dev_id);
2078*4882a593Smuzhiyun } else {
2079*4882a593Smuzhiyun ret = -ENODEV;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun mutex_unlock(&zram_index_mutex);
2083*4882a593Smuzhiyun return ret ? ret : count;
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun static CLASS_ATTR_WO(hot_remove);
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun static struct attribute *zram_control_class_attrs[] = {
2088*4882a593Smuzhiyun &class_attr_hot_add.attr,
2089*4882a593Smuzhiyun &class_attr_hot_remove.attr,
2090*4882a593Smuzhiyun NULL,
2091*4882a593Smuzhiyun };
2092*4882a593Smuzhiyun ATTRIBUTE_GROUPS(zram_control_class);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun static struct class zram_control_class = {
2095*4882a593Smuzhiyun .name = "zram-control",
2096*4882a593Smuzhiyun .owner = THIS_MODULE,
2097*4882a593Smuzhiyun .class_groups = zram_control_class_groups,
2098*4882a593Smuzhiyun };
2099*4882a593Smuzhiyun
zram_remove_cb(int id,void * ptr,void * data)2100*4882a593Smuzhiyun static int zram_remove_cb(int id, void *ptr, void *data)
2101*4882a593Smuzhiyun {
2102*4882a593Smuzhiyun zram_remove(ptr);
2103*4882a593Smuzhiyun return 0;
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun
destroy_devices(void)2106*4882a593Smuzhiyun static void destroy_devices(void)
2107*4882a593Smuzhiyun {
2108*4882a593Smuzhiyun class_unregister(&zram_control_class);
2109*4882a593Smuzhiyun idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2110*4882a593Smuzhiyun zram_debugfs_destroy();
2111*4882a593Smuzhiyun idr_destroy(&zram_index_idr);
2112*4882a593Smuzhiyun unregister_blkdev(zram_major, "zram");
2113*4882a593Smuzhiyun cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun
zram_init(void)2116*4882a593Smuzhiyun static int __init zram_init(void)
2117*4882a593Smuzhiyun {
2118*4882a593Smuzhiyun int ret;
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2121*4882a593Smuzhiyun zcomp_cpu_up_prepare, zcomp_cpu_dead);
2122*4882a593Smuzhiyun if (ret < 0)
2123*4882a593Smuzhiyun return ret;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun ret = class_register(&zram_control_class);
2126*4882a593Smuzhiyun if (ret) {
2127*4882a593Smuzhiyun pr_err("Unable to register zram-control class\n");
2128*4882a593Smuzhiyun cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2129*4882a593Smuzhiyun return ret;
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun zram_debugfs_create();
2133*4882a593Smuzhiyun zram_major = register_blkdev(0, "zram");
2134*4882a593Smuzhiyun if (zram_major <= 0) {
2135*4882a593Smuzhiyun pr_err("Unable to get major number\n");
2136*4882a593Smuzhiyun class_unregister(&zram_control_class);
2137*4882a593Smuzhiyun cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2138*4882a593Smuzhiyun return -EBUSY;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun while (num_devices != 0) {
2142*4882a593Smuzhiyun mutex_lock(&zram_index_mutex);
2143*4882a593Smuzhiyun ret = zram_add();
2144*4882a593Smuzhiyun mutex_unlock(&zram_index_mutex);
2145*4882a593Smuzhiyun if (ret < 0)
2146*4882a593Smuzhiyun goto out_error;
2147*4882a593Smuzhiyun num_devices--;
2148*4882a593Smuzhiyun }
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun return 0;
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun out_error:
2153*4882a593Smuzhiyun destroy_devices();
2154*4882a593Smuzhiyun return ret;
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun
zram_exit(void)2157*4882a593Smuzhiyun static void __exit zram_exit(void)
2158*4882a593Smuzhiyun {
2159*4882a593Smuzhiyun destroy_devices();
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun module_init(zram_init);
2163*4882a593Smuzhiyun module_exit(zram_exit);
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun module_param(num_devices, uint, 0);
2166*4882a593Smuzhiyun MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
2169*4882a593Smuzhiyun MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2170*4882a593Smuzhiyun MODULE_DESCRIPTION("Compressed RAM Block Device");
2171