xref: /OK3568_Linux_fs/kernel/drivers/md/dm-bufio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2009-2011 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Author: Mikulas Patocka <mpatocka@redhat.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file is released under the GPL.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/dm-bufio.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/device-mapper.h>
12*4882a593Smuzhiyun #include <linux/dm-io.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/sched/mm.h>
15*4882a593Smuzhiyun #include <linux/jiffies.h>
16*4882a593Smuzhiyun #include <linux/vmalloc.h>
17*4882a593Smuzhiyun #include <linux/shrinker.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/rbtree.h>
20*4882a593Smuzhiyun #include <linux/stacktrace.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define DM_MSG_PREFIX "bufio"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Memory management policy:
26*4882a593Smuzhiyun  *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27*4882a593Smuzhiyun  *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28*4882a593Smuzhiyun  *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29*4882a593Smuzhiyun  *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30*4882a593Smuzhiyun  *	dirty buffers.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #define DM_BUFIO_MIN_BUFFERS		8
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define DM_BUFIO_MEMORY_PERCENT		2
35*4882a593Smuzhiyun #define DM_BUFIO_VMALLOC_PERCENT	25
36*4882a593Smuzhiyun #define DM_BUFIO_WRITEBACK_RATIO	3
37*4882a593Smuzhiyun #define DM_BUFIO_LOW_WATERMARK_RATIO	16
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Check buffer ages in this interval (seconds)
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun #define DM_BUFIO_WORK_TIMER_SECS	30
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Free buffers when they are older than this (seconds)
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun #define DM_BUFIO_DEFAULT_AGE_SECS	300
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * The nr of bytes of cached data to keep around.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * Align buffer writes to this boundary.
56*4882a593Smuzhiyun  * Tests show that SSDs have the highest IOPS when using 4k writes.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define DM_BUFIO_WRITE_ALIGN		4096
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * dm_buffer->list_mode
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun #define LIST_CLEAN	0
64*4882a593Smuzhiyun #define LIST_DIRTY	1
65*4882a593Smuzhiyun #define LIST_SIZE	2
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  * Linking of buffers:
69*4882a593Smuzhiyun  *	All buffers are linked to buffer_tree with their node field.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  *	Clean buffers that are not being written (B_WRITING not set)
72*4882a593Smuzhiyun  *	are linked to lru[LIST_CLEAN] with their lru_list field.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  *	Dirty and clean buffers that are being written are linked to
75*4882a593Smuzhiyun  *	lru[LIST_DIRTY] with their lru_list field. When the write
76*4882a593Smuzhiyun  *	finishes, the buffer cannot be relinked immediately (because we
77*4882a593Smuzhiyun  *	are in an interrupt context and relinking requires process
78*4882a593Smuzhiyun  *	context), so some clean-not-writing buffers can be held on
79*4882a593Smuzhiyun  *	dirty_lru too.  They are later added to lru in the process
80*4882a593Smuzhiyun  *	context.
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun struct dm_bufio_client {
83*4882a593Smuzhiyun 	struct mutex lock;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	struct list_head lru[LIST_SIZE];
86*4882a593Smuzhiyun 	unsigned long n_buffers[LIST_SIZE];
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	struct block_device *bdev;
89*4882a593Smuzhiyun 	unsigned block_size;
90*4882a593Smuzhiyun 	s8 sectors_per_block_bits;
91*4882a593Smuzhiyun 	void (*alloc_callback)(struct dm_buffer *);
92*4882a593Smuzhiyun 	void (*write_callback)(struct dm_buffer *);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	struct kmem_cache *slab_buffer;
95*4882a593Smuzhiyun 	struct kmem_cache *slab_cache;
96*4882a593Smuzhiyun 	struct dm_io_client *dm_io;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	struct list_head reserved_buffers;
99*4882a593Smuzhiyun 	unsigned need_reserved_buffers;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	unsigned minimum_buffers;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	struct rb_root buffer_tree;
104*4882a593Smuzhiyun 	wait_queue_head_t free_buffer_wait;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	sector_t start;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	int async_write_error;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	struct list_head client_list;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	struct shrinker shrinker;
113*4882a593Smuzhiyun 	struct work_struct shrink_work;
114*4882a593Smuzhiyun 	atomic_long_t need_shrink;
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun  * Buffer state bits.
119*4882a593Smuzhiyun  */
120*4882a593Smuzhiyun #define B_READING	0
121*4882a593Smuzhiyun #define B_WRITING	1
122*4882a593Smuzhiyun #define B_DIRTY		2
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  * Describes how the block was allocated:
126*4882a593Smuzhiyun  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
127*4882a593Smuzhiyun  * See the comment at alloc_buffer_data.
128*4882a593Smuzhiyun  */
129*4882a593Smuzhiyun enum data_mode {
130*4882a593Smuzhiyun 	DATA_MODE_SLAB = 0,
131*4882a593Smuzhiyun 	DATA_MODE_GET_FREE_PAGES = 1,
132*4882a593Smuzhiyun 	DATA_MODE_VMALLOC = 2,
133*4882a593Smuzhiyun 	DATA_MODE_LIMIT = 3
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun struct dm_buffer {
137*4882a593Smuzhiyun 	struct rb_node node;
138*4882a593Smuzhiyun 	struct list_head lru_list;
139*4882a593Smuzhiyun 	struct list_head global_list;
140*4882a593Smuzhiyun 	sector_t block;
141*4882a593Smuzhiyun 	void *data;
142*4882a593Smuzhiyun 	unsigned char data_mode;		/* DATA_MODE_* */
143*4882a593Smuzhiyun 	unsigned char list_mode;		/* LIST_* */
144*4882a593Smuzhiyun 	blk_status_t read_error;
145*4882a593Smuzhiyun 	blk_status_t write_error;
146*4882a593Smuzhiyun 	unsigned accessed;
147*4882a593Smuzhiyun 	unsigned hold_count;
148*4882a593Smuzhiyun 	unsigned long state;
149*4882a593Smuzhiyun 	unsigned long last_accessed;
150*4882a593Smuzhiyun 	unsigned dirty_start;
151*4882a593Smuzhiyun 	unsigned dirty_end;
152*4882a593Smuzhiyun 	unsigned write_start;
153*4882a593Smuzhiyun 	unsigned write_end;
154*4882a593Smuzhiyun 	struct dm_bufio_client *c;
155*4882a593Smuzhiyun 	struct list_head write_list;
156*4882a593Smuzhiyun 	void (*end_io)(struct dm_buffer *, blk_status_t);
157*4882a593Smuzhiyun #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
158*4882a593Smuzhiyun #define MAX_STACK 10
159*4882a593Smuzhiyun 	unsigned int stack_len;
160*4882a593Smuzhiyun 	unsigned long stack_entries[MAX_STACK];
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*----------------------------------------------------------------*/
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #define dm_bufio_in_request()	(!!current->bio_list)
167*4882a593Smuzhiyun 
dm_bufio_lock(struct dm_bufio_client * c)168*4882a593Smuzhiyun static void dm_bufio_lock(struct dm_bufio_client *c)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	mutex_lock_nested(&c->lock, dm_bufio_in_request());
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
dm_bufio_trylock(struct dm_bufio_client * c)173*4882a593Smuzhiyun static int dm_bufio_trylock(struct dm_bufio_client *c)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	return mutex_trylock(&c->lock);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
dm_bufio_unlock(struct dm_bufio_client * c)178*4882a593Smuzhiyun static void dm_bufio_unlock(struct dm_bufio_client *c)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	mutex_unlock(&c->lock);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /*----------------------------------------------------------------*/
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun  * Default cache size: available memory divided by the ratio.
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun static unsigned long dm_bufio_default_cache_size;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun  * Total cache size set by the user.
192*4882a593Smuzhiyun  */
193*4882a593Smuzhiyun static unsigned long dm_bufio_cache_size;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
197*4882a593Smuzhiyun  * at any time.  If it disagrees, the user has changed cache size.
198*4882a593Smuzhiyun  */
199*4882a593Smuzhiyun static unsigned long dm_bufio_cache_size_latch;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun static DEFINE_SPINLOCK(global_spinlock);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun static LIST_HEAD(global_queue);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun static unsigned long global_num = 0;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun  * Buffers are freed after this timeout
209*4882a593Smuzhiyun  */
210*4882a593Smuzhiyun static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
211*4882a593Smuzhiyun static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun static unsigned long dm_bufio_peak_allocated;
214*4882a593Smuzhiyun static unsigned long dm_bufio_allocated_kmem_cache;
215*4882a593Smuzhiyun static unsigned long dm_bufio_allocated_get_free_pages;
216*4882a593Smuzhiyun static unsigned long dm_bufio_allocated_vmalloc;
217*4882a593Smuzhiyun static unsigned long dm_bufio_current_allocated;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /*----------------------------------------------------------------*/
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun  * The current number of clients.
223*4882a593Smuzhiyun  */
224*4882a593Smuzhiyun static int dm_bufio_client_count;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun  * The list of all clients.
228*4882a593Smuzhiyun  */
229*4882a593Smuzhiyun static LIST_HEAD(dm_bufio_all_clients);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun  * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun static DEFINE_MUTEX(dm_bufio_clients_lock);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static struct workqueue_struct *dm_bufio_wq;
237*4882a593Smuzhiyun static struct delayed_work dm_bufio_cleanup_old_work;
238*4882a593Smuzhiyun static struct work_struct dm_bufio_replacement_work;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)242*4882a593Smuzhiyun static void buffer_record_stack(struct dm_buffer *b)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /*----------------------------------------------------------------
249*4882a593Smuzhiyun  * A red/black tree acts as an index for all the buffers.
250*4882a593Smuzhiyun  *--------------------------------------------------------------*/
__find(struct dm_bufio_client * c,sector_t block)251*4882a593Smuzhiyun static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct rb_node *n = c->buffer_tree.rb_node;
254*4882a593Smuzhiyun 	struct dm_buffer *b;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	while (n) {
257*4882a593Smuzhiyun 		b = container_of(n, struct dm_buffer, node);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		if (b->block == block)
260*4882a593Smuzhiyun 			return b;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		n = block < b->block ? n->rb_left : n->rb_right;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return NULL;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
__find_next(struct dm_bufio_client * c,sector_t block)268*4882a593Smuzhiyun static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct rb_node *n = c->buffer_tree.rb_node;
271*4882a593Smuzhiyun 	struct dm_buffer *b;
272*4882a593Smuzhiyun 	struct dm_buffer *best = NULL;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	while (n) {
275*4882a593Smuzhiyun 		b = container_of(n, struct dm_buffer, node);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		if (b->block == block)
278*4882a593Smuzhiyun 			return b;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		if (block <= b->block) {
281*4882a593Smuzhiyun 			n = n->rb_left;
282*4882a593Smuzhiyun 			best = b;
283*4882a593Smuzhiyun 		} else {
284*4882a593Smuzhiyun 			n = n->rb_right;
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return best;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
__insert(struct dm_bufio_client * c,struct dm_buffer * b)291*4882a593Smuzhiyun static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
294*4882a593Smuzhiyun 	struct dm_buffer *found;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	while (*new) {
297*4882a593Smuzhiyun 		found = container_of(*new, struct dm_buffer, node);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		if (found->block == b->block) {
300*4882a593Smuzhiyun 			BUG_ON(found != b);
301*4882a593Smuzhiyun 			return;
302*4882a593Smuzhiyun 		}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		parent = *new;
305*4882a593Smuzhiyun 		new = b->block < found->block ?
306*4882a593Smuzhiyun 			&found->node.rb_left : &found->node.rb_right;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	rb_link_node(&b->node, parent, new);
310*4882a593Smuzhiyun 	rb_insert_color(&b->node, &c->buffer_tree);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
__remove(struct dm_bufio_client * c,struct dm_buffer * b)313*4882a593Smuzhiyun static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	rb_erase(&b->node, &c->buffer_tree);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun /*----------------------------------------------------------------*/
319*4882a593Smuzhiyun 
adjust_total_allocated(struct dm_buffer * b,bool unlink)320*4882a593Smuzhiyun static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	unsigned char data_mode;
323*4882a593Smuzhiyun 	long diff;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
326*4882a593Smuzhiyun 		&dm_bufio_allocated_kmem_cache,
327*4882a593Smuzhiyun 		&dm_bufio_allocated_get_free_pages,
328*4882a593Smuzhiyun 		&dm_bufio_allocated_vmalloc,
329*4882a593Smuzhiyun 	};
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	data_mode = b->data_mode;
332*4882a593Smuzhiyun 	diff = (long)b->c->block_size;
333*4882a593Smuzhiyun 	if (unlink)
334*4882a593Smuzhiyun 		diff = -diff;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	spin_lock(&global_spinlock);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	*class_ptr[data_mode] += diff;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	dm_bufio_current_allocated += diff;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
343*4882a593Smuzhiyun 		dm_bufio_peak_allocated = dm_bufio_current_allocated;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	b->accessed = 1;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (!unlink) {
348*4882a593Smuzhiyun 		list_add(&b->global_list, &global_queue);
349*4882a593Smuzhiyun 		global_num++;
350*4882a593Smuzhiyun 		if (dm_bufio_current_allocated > dm_bufio_cache_size)
351*4882a593Smuzhiyun 			queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
352*4882a593Smuzhiyun 	} else {
353*4882a593Smuzhiyun 		list_del(&b->global_list);
354*4882a593Smuzhiyun 		global_num--;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	spin_unlock(&global_spinlock);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun  * Change the number of clients and recalculate per-client limit.
362*4882a593Smuzhiyun  */
__cache_size_refresh(void)363*4882a593Smuzhiyun static void __cache_size_refresh(void)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
366*4882a593Smuzhiyun 	BUG_ON(dm_bufio_client_count < 0);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/*
371*4882a593Smuzhiyun 	 * Use default if set to 0 and report the actual cache size used.
372*4882a593Smuzhiyun 	 */
373*4882a593Smuzhiyun 	if (!dm_bufio_cache_size_latch) {
374*4882a593Smuzhiyun 		(void)cmpxchg(&dm_bufio_cache_size, 0,
375*4882a593Smuzhiyun 			      dm_bufio_default_cache_size);
376*4882a593Smuzhiyun 		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun  * Allocating buffer data.
382*4882a593Smuzhiyun  *
383*4882a593Smuzhiyun  * Small buffers are allocated with kmem_cache, to use space optimally.
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * For large buffers, we choose between get_free_pages and vmalloc.
386*4882a593Smuzhiyun  * Each has advantages and disadvantages.
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  * __get_free_pages can randomly fail if the memory is fragmented.
389*4882a593Smuzhiyun  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
390*4882a593Smuzhiyun  * as low as 128M) so using it for caching is not appropriate.
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * If the allocation may fail we use __get_free_pages. Memory fragmentation
393*4882a593Smuzhiyun  * won't have a fatal effect here, but it just causes flushes of some other
394*4882a593Smuzhiyun  * buffers and more I/O will be performed. Don't use __get_free_pages if it
395*4882a593Smuzhiyun  * always fails (i.e. order >= MAX_ORDER).
396*4882a593Smuzhiyun  *
397*4882a593Smuzhiyun  * If the allocation shouldn't fail we use __vmalloc. This is only for the
398*4882a593Smuzhiyun  * initial reserve allocation, so there's no risk of wasting all vmalloc
399*4882a593Smuzhiyun  * space.
400*4882a593Smuzhiyun  */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)401*4882a593Smuzhiyun static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
402*4882a593Smuzhiyun 			       unsigned char *data_mode)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	if (unlikely(c->slab_cache != NULL)) {
405*4882a593Smuzhiyun 		*data_mode = DATA_MODE_SLAB;
406*4882a593Smuzhiyun 		return kmem_cache_alloc(c->slab_cache, gfp_mask);
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (c->block_size <= KMALLOC_MAX_SIZE &&
410*4882a593Smuzhiyun 	    gfp_mask & __GFP_NORETRY) {
411*4882a593Smuzhiyun 		*data_mode = DATA_MODE_GET_FREE_PAGES;
412*4882a593Smuzhiyun 		return (void *)__get_free_pages(gfp_mask,
413*4882a593Smuzhiyun 						c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	*data_mode = DATA_MODE_VMALLOC;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/*
419*4882a593Smuzhiyun 	 * __vmalloc allocates the data pages and auxiliary structures with
420*4882a593Smuzhiyun 	 * gfp_flags that were specified, but pagetables are always allocated
421*4882a593Smuzhiyun 	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
422*4882a593Smuzhiyun 	 *
423*4882a593Smuzhiyun 	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
424*4882a593Smuzhiyun 	 * all allocations done by this process (including pagetables) are done
425*4882a593Smuzhiyun 	 * as if GFP_NOIO was specified.
426*4882a593Smuzhiyun 	 */
427*4882a593Smuzhiyun 	if (gfp_mask & __GFP_NORETRY) {
428*4882a593Smuzhiyun 		unsigned noio_flag = memalloc_noio_save();
429*4882a593Smuzhiyun 		void *ptr = __vmalloc(c->block_size, gfp_mask);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		memalloc_noio_restore(noio_flag);
432*4882a593Smuzhiyun 		return ptr;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	return __vmalloc(c->block_size, gfp_mask);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun  * Free buffer's data.
440*4882a593Smuzhiyun  */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)441*4882a593Smuzhiyun static void free_buffer_data(struct dm_bufio_client *c,
442*4882a593Smuzhiyun 			     void *data, unsigned char data_mode)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	switch (data_mode) {
445*4882a593Smuzhiyun 	case DATA_MODE_SLAB:
446*4882a593Smuzhiyun 		kmem_cache_free(c->slab_cache, data);
447*4882a593Smuzhiyun 		break;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	case DATA_MODE_GET_FREE_PAGES:
450*4882a593Smuzhiyun 		free_pages((unsigned long)data,
451*4882a593Smuzhiyun 			   c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
452*4882a593Smuzhiyun 		break;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	case DATA_MODE_VMALLOC:
455*4882a593Smuzhiyun 		vfree(data);
456*4882a593Smuzhiyun 		break;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	default:
459*4882a593Smuzhiyun 		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
460*4882a593Smuzhiyun 		       data_mode);
461*4882a593Smuzhiyun 		BUG();
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun /*
466*4882a593Smuzhiyun  * Allocate buffer and its data.
467*4882a593Smuzhiyun  */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)468*4882a593Smuzhiyun static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (!b)
473*4882a593Smuzhiyun 		return NULL;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	b->c = c;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
478*4882a593Smuzhiyun 	if (!b->data) {
479*4882a593Smuzhiyun 		kmem_cache_free(c->slab_buffer, b);
480*4882a593Smuzhiyun 		return NULL;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
484*4882a593Smuzhiyun 	b->stack_len = 0;
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun 	return b;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun  * Free buffer and its data.
491*4882a593Smuzhiyun  */
free_buffer(struct dm_buffer * b)492*4882a593Smuzhiyun static void free_buffer(struct dm_buffer *b)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	free_buffer_data(c, b->data, b->data_mode);
497*4882a593Smuzhiyun 	kmem_cache_free(c->slab_buffer, b);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun  * Link buffer to the buffer tree and clean or dirty queue.
502*4882a593Smuzhiyun  */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)503*4882a593Smuzhiyun static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	c->n_buffers[dirty]++;
508*4882a593Smuzhiyun 	b->block = block;
509*4882a593Smuzhiyun 	b->list_mode = dirty;
510*4882a593Smuzhiyun 	list_add(&b->lru_list, &c->lru[dirty]);
511*4882a593Smuzhiyun 	__insert(b->c, b);
512*4882a593Smuzhiyun 	b->last_accessed = jiffies;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	adjust_total_allocated(b, false);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun  * Unlink buffer from the buffer tree and dirty or clean queue.
519*4882a593Smuzhiyun  */
__unlink_buffer(struct dm_buffer * b)520*4882a593Smuzhiyun static void __unlink_buffer(struct dm_buffer *b)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	BUG_ON(!c->n_buffers[b->list_mode]);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	c->n_buffers[b->list_mode]--;
527*4882a593Smuzhiyun 	__remove(b->c, b);
528*4882a593Smuzhiyun 	list_del(&b->lru_list);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	adjust_total_allocated(b, true);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun  * Place the buffer to the head of dirty or clean LRU queue.
535*4882a593Smuzhiyun  */
__relink_lru(struct dm_buffer * b,int dirty)536*4882a593Smuzhiyun static void __relink_lru(struct dm_buffer *b, int dirty)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	b->accessed = 1;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	BUG_ON(!c->n_buffers[b->list_mode]);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	c->n_buffers[b->list_mode]--;
545*4882a593Smuzhiyun 	c->n_buffers[dirty]++;
546*4882a593Smuzhiyun 	b->list_mode = dirty;
547*4882a593Smuzhiyun 	list_move(&b->lru_list, &c->lru[dirty]);
548*4882a593Smuzhiyun 	b->last_accessed = jiffies;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun /*----------------------------------------------------------------
552*4882a593Smuzhiyun  * Submit I/O on the buffer.
553*4882a593Smuzhiyun  *
554*4882a593Smuzhiyun  * Bio interface is faster but it has some problems:
555*4882a593Smuzhiyun  *	the vector list is limited (increasing this limit increases
556*4882a593Smuzhiyun  *	memory-consumption per buffer, so it is not viable);
557*4882a593Smuzhiyun  *
558*4882a593Smuzhiyun  *	the memory must be direct-mapped, not vmalloced;
559*4882a593Smuzhiyun  *
560*4882a593Smuzhiyun  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
561*4882a593Smuzhiyun  * it is not vmalloced, try using the bio interface.
562*4882a593Smuzhiyun  *
563*4882a593Smuzhiyun  * If the buffer is big, if it is vmalloced or if the underlying device
564*4882a593Smuzhiyun  * rejects the bio because it is too large, use dm-io layer to do the I/O.
565*4882a593Smuzhiyun  * The dm-io layer splits the I/O into multiple requests, avoiding the above
566*4882a593Smuzhiyun  * shortcomings.
567*4882a593Smuzhiyun  *--------------------------------------------------------------*/
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
571*4882a593Smuzhiyun  * that the request was handled directly with bio interface.
572*4882a593Smuzhiyun  */
dmio_complete(unsigned long error,void * context)573*4882a593Smuzhiyun static void dmio_complete(unsigned long error, void *context)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	struct dm_buffer *b = context;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
use_dmio(struct dm_buffer * b,int rw,sector_t sector,unsigned n_sectors,unsigned offset)580*4882a593Smuzhiyun static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
581*4882a593Smuzhiyun 		     unsigned n_sectors, unsigned offset)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	int r;
584*4882a593Smuzhiyun 	struct dm_io_request io_req = {
585*4882a593Smuzhiyun 		.bi_op = rw,
586*4882a593Smuzhiyun 		.bi_op_flags = 0,
587*4882a593Smuzhiyun 		.notify.fn = dmio_complete,
588*4882a593Smuzhiyun 		.notify.context = b,
589*4882a593Smuzhiyun 		.client = b->c->dm_io,
590*4882a593Smuzhiyun 	};
591*4882a593Smuzhiyun 	struct dm_io_region region = {
592*4882a593Smuzhiyun 		.bdev = b->c->bdev,
593*4882a593Smuzhiyun 		.sector = sector,
594*4882a593Smuzhiyun 		.count = n_sectors,
595*4882a593Smuzhiyun 	};
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if (b->data_mode != DATA_MODE_VMALLOC) {
598*4882a593Smuzhiyun 		io_req.mem.type = DM_IO_KMEM;
599*4882a593Smuzhiyun 		io_req.mem.ptr.addr = (char *)b->data + offset;
600*4882a593Smuzhiyun 	} else {
601*4882a593Smuzhiyun 		io_req.mem.type = DM_IO_VMA;
602*4882a593Smuzhiyun 		io_req.mem.ptr.vma = (char *)b->data + offset;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	r = dm_io(&io_req, 1, &region, NULL);
606*4882a593Smuzhiyun 	if (unlikely(r))
607*4882a593Smuzhiyun 		b->end_io(b, errno_to_blk_status(r));
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
bio_complete(struct bio * bio)610*4882a593Smuzhiyun static void bio_complete(struct bio *bio)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	struct dm_buffer *b = bio->bi_private;
613*4882a593Smuzhiyun 	blk_status_t status = bio->bi_status;
614*4882a593Smuzhiyun 	bio_put(bio);
615*4882a593Smuzhiyun 	b->end_io(b, status);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
use_bio(struct dm_buffer * b,int rw,sector_t sector,unsigned n_sectors,unsigned offset)618*4882a593Smuzhiyun static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
619*4882a593Smuzhiyun 		    unsigned n_sectors, unsigned offset)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct bio *bio;
622*4882a593Smuzhiyun 	char *ptr;
623*4882a593Smuzhiyun 	unsigned vec_size, len;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	vec_size = b->c->block_size >> PAGE_SHIFT;
626*4882a593Smuzhiyun 	if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
627*4882a593Smuzhiyun 		vec_size += 2;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
630*4882a593Smuzhiyun 	if (!bio) {
631*4882a593Smuzhiyun dmio:
632*4882a593Smuzhiyun 		use_dmio(b, rw, sector, n_sectors, offset);
633*4882a593Smuzhiyun 		return;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	bio->bi_iter.bi_sector = sector;
637*4882a593Smuzhiyun 	bio_set_dev(bio, b->c->bdev);
638*4882a593Smuzhiyun 	bio_set_op_attrs(bio, rw, 0);
639*4882a593Smuzhiyun 	bio->bi_end_io = bio_complete;
640*4882a593Smuzhiyun 	bio->bi_private = b;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	ptr = (char *)b->data + offset;
643*4882a593Smuzhiyun 	len = n_sectors << SECTOR_SHIFT;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	do {
646*4882a593Smuzhiyun 		unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
647*4882a593Smuzhiyun 		if (!bio_add_page(bio, virt_to_page(ptr), this_step,
648*4882a593Smuzhiyun 				  offset_in_page(ptr))) {
649*4882a593Smuzhiyun 			bio_put(bio);
650*4882a593Smuzhiyun 			goto dmio;
651*4882a593Smuzhiyun 		}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		len -= this_step;
654*4882a593Smuzhiyun 		ptr += this_step;
655*4882a593Smuzhiyun 	} while (len > 0);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	submit_bio(bio);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
block_to_sector(struct dm_bufio_client * c,sector_t block)660*4882a593Smuzhiyun static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	sector_t sector;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	if (likely(c->sectors_per_block_bits >= 0))
665*4882a593Smuzhiyun 		sector = block << c->sectors_per_block_bits;
666*4882a593Smuzhiyun 	else
667*4882a593Smuzhiyun 		sector = block * (c->block_size >> SECTOR_SHIFT);
668*4882a593Smuzhiyun 	sector += c->start;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	return sector;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
submit_io(struct dm_buffer * b,int rw,void (* end_io)(struct dm_buffer *,blk_status_t))673*4882a593Smuzhiyun static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	unsigned n_sectors;
676*4882a593Smuzhiyun 	sector_t sector;
677*4882a593Smuzhiyun 	unsigned offset, end;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	b->end_io = end_io;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	sector = block_to_sector(b->c, b->block);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	if (rw != REQ_OP_WRITE) {
684*4882a593Smuzhiyun 		n_sectors = b->c->block_size >> SECTOR_SHIFT;
685*4882a593Smuzhiyun 		offset = 0;
686*4882a593Smuzhiyun 	} else {
687*4882a593Smuzhiyun 		if (b->c->write_callback)
688*4882a593Smuzhiyun 			b->c->write_callback(b);
689*4882a593Smuzhiyun 		offset = b->write_start;
690*4882a593Smuzhiyun 		end = b->write_end;
691*4882a593Smuzhiyun 		offset &= -DM_BUFIO_WRITE_ALIGN;
692*4882a593Smuzhiyun 		end += DM_BUFIO_WRITE_ALIGN - 1;
693*4882a593Smuzhiyun 		end &= -DM_BUFIO_WRITE_ALIGN;
694*4882a593Smuzhiyun 		if (unlikely(end > b->c->block_size))
695*4882a593Smuzhiyun 			end = b->c->block_size;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 		sector += offset >> SECTOR_SHIFT;
698*4882a593Smuzhiyun 		n_sectors = (end - offset) >> SECTOR_SHIFT;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (b->data_mode != DATA_MODE_VMALLOC)
702*4882a593Smuzhiyun 		use_bio(b, rw, sector, n_sectors, offset);
703*4882a593Smuzhiyun 	else
704*4882a593Smuzhiyun 		use_dmio(b, rw, sector, n_sectors, offset);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /*----------------------------------------------------------------
708*4882a593Smuzhiyun  * Writing dirty buffers
709*4882a593Smuzhiyun  *--------------------------------------------------------------*/
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun /*
712*4882a593Smuzhiyun  * The endio routine for write.
713*4882a593Smuzhiyun  *
714*4882a593Smuzhiyun  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
715*4882a593Smuzhiyun  * it.
716*4882a593Smuzhiyun  */
write_endio(struct dm_buffer * b,blk_status_t status)717*4882a593Smuzhiyun static void write_endio(struct dm_buffer *b, blk_status_t status)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	b->write_error = status;
720*4882a593Smuzhiyun 	if (unlikely(status)) {
721*4882a593Smuzhiyun 		struct dm_bufio_client *c = b->c;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		(void)cmpxchg(&c->async_write_error, 0,
724*4882a593Smuzhiyun 				blk_status_to_errno(status));
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	BUG_ON(!test_bit(B_WRITING, &b->state));
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	smp_mb__before_atomic();
730*4882a593Smuzhiyun 	clear_bit(B_WRITING, &b->state);
731*4882a593Smuzhiyun 	smp_mb__after_atomic();
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	wake_up_bit(&b->state, B_WRITING);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun /*
737*4882a593Smuzhiyun  * Initiate a write on a dirty buffer, but don't wait for it.
738*4882a593Smuzhiyun  *
739*4882a593Smuzhiyun  * - If the buffer is not dirty, exit.
740*4882a593Smuzhiyun  * - If there some previous write going on, wait for it to finish (we can't
741*4882a593Smuzhiyun  *   have two writes on the same buffer simultaneously).
742*4882a593Smuzhiyun  * - Submit our write and don't wait on it. We set B_WRITING indicating
743*4882a593Smuzhiyun  *   that there is a write in progress.
744*4882a593Smuzhiyun  */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)745*4882a593Smuzhiyun static void __write_dirty_buffer(struct dm_buffer *b,
746*4882a593Smuzhiyun 				 struct list_head *write_list)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	if (!test_bit(B_DIRTY, &b->state))
749*4882a593Smuzhiyun 		return;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	clear_bit(B_DIRTY, &b->state);
752*4882a593Smuzhiyun 	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	b->write_start = b->dirty_start;
755*4882a593Smuzhiyun 	b->write_end = b->dirty_end;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (!write_list)
758*4882a593Smuzhiyun 		submit_io(b, REQ_OP_WRITE, write_endio);
759*4882a593Smuzhiyun 	else
760*4882a593Smuzhiyun 		list_add_tail(&b->write_list, write_list);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun 
__flush_write_list(struct list_head * write_list)763*4882a593Smuzhiyun static void __flush_write_list(struct list_head *write_list)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	struct blk_plug plug;
766*4882a593Smuzhiyun 	blk_start_plug(&plug);
767*4882a593Smuzhiyun 	while (!list_empty(write_list)) {
768*4882a593Smuzhiyun 		struct dm_buffer *b =
769*4882a593Smuzhiyun 			list_entry(write_list->next, struct dm_buffer, write_list);
770*4882a593Smuzhiyun 		list_del(&b->write_list);
771*4882a593Smuzhiyun 		submit_io(b, REQ_OP_WRITE, write_endio);
772*4882a593Smuzhiyun 		cond_resched();
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 	blk_finish_plug(&plug);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun  * Wait until any activity on the buffer finishes.  Possibly write the
779*4882a593Smuzhiyun  * buffer if it is dirty.  When this function finishes, there is no I/O
780*4882a593Smuzhiyun  * running on the buffer and the buffer is not dirty.
781*4882a593Smuzhiyun  */
__make_buffer_clean(struct dm_buffer * b)782*4882a593Smuzhiyun static void __make_buffer_clean(struct dm_buffer *b)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	BUG_ON(b->hold_count);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	if (!b->state)	/* fast case */
787*4882a593Smuzhiyun 		return;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
790*4882a593Smuzhiyun 	__write_dirty_buffer(b, NULL);
791*4882a593Smuzhiyun 	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun  * Find some buffer that is not held by anybody, clean it, unlink it and
796*4882a593Smuzhiyun  * return it.
797*4882a593Smuzhiyun  */
__get_unclaimed_buffer(struct dm_bufio_client * c)798*4882a593Smuzhiyun static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun 	struct dm_buffer *b;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
803*4882a593Smuzhiyun 		BUG_ON(test_bit(B_WRITING, &b->state));
804*4882a593Smuzhiyun 		BUG_ON(test_bit(B_DIRTY, &b->state));
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 		if (!b->hold_count) {
807*4882a593Smuzhiyun 			__make_buffer_clean(b);
808*4882a593Smuzhiyun 			__unlink_buffer(b);
809*4882a593Smuzhiyun 			return b;
810*4882a593Smuzhiyun 		}
811*4882a593Smuzhiyun 		cond_resched();
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
815*4882a593Smuzhiyun 		BUG_ON(test_bit(B_READING, &b->state));
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		if (!b->hold_count) {
818*4882a593Smuzhiyun 			__make_buffer_clean(b);
819*4882a593Smuzhiyun 			__unlink_buffer(b);
820*4882a593Smuzhiyun 			return b;
821*4882a593Smuzhiyun 		}
822*4882a593Smuzhiyun 		cond_resched();
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return NULL;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun  * Wait until some other threads free some buffer or release hold count on
830*4882a593Smuzhiyun  * some buffer.
831*4882a593Smuzhiyun  *
832*4882a593Smuzhiyun  * This function is entered with c->lock held, drops it and regains it
833*4882a593Smuzhiyun  * before exiting.
834*4882a593Smuzhiyun  */
__wait_for_free_buffer(struct dm_bufio_client * c)835*4882a593Smuzhiyun static void __wait_for_free_buffer(struct dm_bufio_client *c)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	add_wait_queue(&c->free_buffer_wait, &wait);
840*4882a593Smuzhiyun 	set_current_state(TASK_UNINTERRUPTIBLE);
841*4882a593Smuzhiyun 	dm_bufio_unlock(c);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	io_schedule();
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	remove_wait_queue(&c->free_buffer_wait, &wait);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	dm_bufio_lock(c);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun enum new_flag {
851*4882a593Smuzhiyun 	NF_FRESH = 0,
852*4882a593Smuzhiyun 	NF_READ = 1,
853*4882a593Smuzhiyun 	NF_GET = 2,
854*4882a593Smuzhiyun 	NF_PREFETCH = 3
855*4882a593Smuzhiyun };
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun  * Allocate a new buffer. If the allocation is not possible, wait until
859*4882a593Smuzhiyun  * some other thread frees a buffer.
860*4882a593Smuzhiyun  *
861*4882a593Smuzhiyun  * May drop the lock and regain it.
862*4882a593Smuzhiyun  */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)863*4882a593Smuzhiyun static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	struct dm_buffer *b;
866*4882a593Smuzhiyun 	bool tried_noio_alloc = false;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/*
869*4882a593Smuzhiyun 	 * dm-bufio is resistant to allocation failures (it just keeps
870*4882a593Smuzhiyun 	 * one buffer reserved in cases all the allocations fail).
871*4882a593Smuzhiyun 	 * So set flags to not try too hard:
872*4882a593Smuzhiyun 	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
873*4882a593Smuzhiyun 	 *		    mutex and wait ourselves.
874*4882a593Smuzhiyun 	 *	__GFP_NORETRY: don't retry and rather return failure
875*4882a593Smuzhiyun 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
876*4882a593Smuzhiyun 	 *	__GFP_NOWARN: don't print a warning in case of failure
877*4882a593Smuzhiyun 	 *
878*4882a593Smuzhiyun 	 * For debugging, if we set the cache size to 1, no new buffers will
879*4882a593Smuzhiyun 	 * be allocated.
880*4882a593Smuzhiyun 	 */
881*4882a593Smuzhiyun 	while (1) {
882*4882a593Smuzhiyun 		if (dm_bufio_cache_size_latch != 1) {
883*4882a593Smuzhiyun 			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
884*4882a593Smuzhiyun 			if (b)
885*4882a593Smuzhiyun 				return b;
886*4882a593Smuzhiyun 		}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 		if (nf == NF_PREFETCH)
889*4882a593Smuzhiyun 			return NULL;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
892*4882a593Smuzhiyun 			dm_bufio_unlock(c);
893*4882a593Smuzhiyun 			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
894*4882a593Smuzhiyun 			dm_bufio_lock(c);
895*4882a593Smuzhiyun 			if (b)
896*4882a593Smuzhiyun 				return b;
897*4882a593Smuzhiyun 			tried_noio_alloc = true;
898*4882a593Smuzhiyun 		}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 		if (!list_empty(&c->reserved_buffers)) {
901*4882a593Smuzhiyun 			b = list_entry(c->reserved_buffers.next,
902*4882a593Smuzhiyun 				       struct dm_buffer, lru_list);
903*4882a593Smuzhiyun 			list_del(&b->lru_list);
904*4882a593Smuzhiyun 			c->need_reserved_buffers++;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 			return b;
907*4882a593Smuzhiyun 		}
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 		b = __get_unclaimed_buffer(c);
910*4882a593Smuzhiyun 		if (b)
911*4882a593Smuzhiyun 			return b;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 		__wait_for_free_buffer(c);
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)917*4882a593Smuzhiyun static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	if (!b)
922*4882a593Smuzhiyun 		return NULL;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (c->alloc_callback)
925*4882a593Smuzhiyun 		c->alloc_callback(b);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return b;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun /*
931*4882a593Smuzhiyun  * Free a buffer and wake other threads waiting for free buffers.
932*4882a593Smuzhiyun  */
__free_buffer_wake(struct dm_buffer * b)933*4882a593Smuzhiyun static void __free_buffer_wake(struct dm_buffer *b)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (!c->need_reserved_buffers)
938*4882a593Smuzhiyun 		free_buffer(b);
939*4882a593Smuzhiyun 	else {
940*4882a593Smuzhiyun 		list_add(&b->lru_list, &c->reserved_buffers);
941*4882a593Smuzhiyun 		c->need_reserved_buffers--;
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	wake_up(&c->free_buffer_wait);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)947*4882a593Smuzhiyun static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
948*4882a593Smuzhiyun 					struct list_head *write_list)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct dm_buffer *b, *tmp;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
953*4882a593Smuzhiyun 		BUG_ON(test_bit(B_READING, &b->state));
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 		if (!test_bit(B_DIRTY, &b->state) &&
956*4882a593Smuzhiyun 		    !test_bit(B_WRITING, &b->state)) {
957*4882a593Smuzhiyun 			__relink_lru(b, LIST_CLEAN);
958*4882a593Smuzhiyun 			continue;
959*4882a593Smuzhiyun 		}
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 		if (no_wait && test_bit(B_WRITING, &b->state))
962*4882a593Smuzhiyun 			return;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 		__write_dirty_buffer(b, write_list);
965*4882a593Smuzhiyun 		cond_resched();
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun  * Check if we're over watermark.
971*4882a593Smuzhiyun  * If we are over threshold_buffers, start freeing buffers.
972*4882a593Smuzhiyun  * If we're over "limit_buffers", block until we get under the limit.
973*4882a593Smuzhiyun  */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)974*4882a593Smuzhiyun static void __check_watermark(struct dm_bufio_client *c,
975*4882a593Smuzhiyun 			      struct list_head *write_list)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun 	if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
978*4882a593Smuzhiyun 		__write_dirty_buffers_async(c, 1, write_list);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun /*----------------------------------------------------------------
982*4882a593Smuzhiyun  * Getting a buffer
983*4882a593Smuzhiyun  *--------------------------------------------------------------*/
984*4882a593Smuzhiyun 
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)985*4882a593Smuzhiyun static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
986*4882a593Smuzhiyun 				     enum new_flag nf, int *need_submit,
987*4882a593Smuzhiyun 				     struct list_head *write_list)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	struct dm_buffer *b, *new_b = NULL;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	*need_submit = 0;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	b = __find(c, block);
994*4882a593Smuzhiyun 	if (b)
995*4882a593Smuzhiyun 		goto found_buffer;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	if (nf == NF_GET)
998*4882a593Smuzhiyun 		return NULL;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	new_b = __alloc_buffer_wait(c, nf);
1001*4882a593Smuzhiyun 	if (!new_b)
1002*4882a593Smuzhiyun 		return NULL;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	/*
1005*4882a593Smuzhiyun 	 * We've had a period where the mutex was unlocked, so need to
1006*4882a593Smuzhiyun 	 * recheck the buffer tree.
1007*4882a593Smuzhiyun 	 */
1008*4882a593Smuzhiyun 	b = __find(c, block);
1009*4882a593Smuzhiyun 	if (b) {
1010*4882a593Smuzhiyun 		__free_buffer_wake(new_b);
1011*4882a593Smuzhiyun 		goto found_buffer;
1012*4882a593Smuzhiyun 	}
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	__check_watermark(c, write_list);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	b = new_b;
1017*4882a593Smuzhiyun 	b->hold_count = 1;
1018*4882a593Smuzhiyun 	b->read_error = 0;
1019*4882a593Smuzhiyun 	b->write_error = 0;
1020*4882a593Smuzhiyun 	__link_buffer(b, block, LIST_CLEAN);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	if (nf == NF_FRESH) {
1023*4882a593Smuzhiyun 		b->state = 0;
1024*4882a593Smuzhiyun 		return b;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	b->state = 1 << B_READING;
1028*4882a593Smuzhiyun 	*need_submit = 1;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	return b;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun found_buffer:
1033*4882a593Smuzhiyun 	if (nf == NF_PREFETCH)
1034*4882a593Smuzhiyun 		return NULL;
1035*4882a593Smuzhiyun 	/*
1036*4882a593Smuzhiyun 	 * Note: it is essential that we don't wait for the buffer to be
1037*4882a593Smuzhiyun 	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1038*4882a593Smuzhiyun 	 * dm_bufio_prefetch can be used in the driver request routine.
1039*4882a593Smuzhiyun 	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1040*4882a593Smuzhiyun 	 * the same buffer, it would deadlock if we waited.
1041*4882a593Smuzhiyun 	 */
1042*4882a593Smuzhiyun 	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1043*4882a593Smuzhiyun 		return NULL;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	b->hold_count++;
1046*4882a593Smuzhiyun 	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1047*4882a593Smuzhiyun 		     test_bit(B_WRITING, &b->state));
1048*4882a593Smuzhiyun 	return b;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun /*
1052*4882a593Smuzhiyun  * The endio routine for reading: set the error, clear the bit and wake up
1053*4882a593Smuzhiyun  * anyone waiting on the buffer.
1054*4882a593Smuzhiyun  */
read_endio(struct dm_buffer * b,blk_status_t status)1055*4882a593Smuzhiyun static void read_endio(struct dm_buffer *b, blk_status_t status)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	b->read_error = status;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	BUG_ON(!test_bit(B_READING, &b->state));
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	smp_mb__before_atomic();
1062*4882a593Smuzhiyun 	clear_bit(B_READING, &b->state);
1063*4882a593Smuzhiyun 	smp_mb__after_atomic();
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	wake_up_bit(&b->state, B_READING);
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun /*
1069*4882a593Smuzhiyun  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1070*4882a593Smuzhiyun  * functions is similar except that dm_bufio_new doesn't read the
1071*4882a593Smuzhiyun  * buffer from the disk (assuming that the caller overwrites all the data
1072*4882a593Smuzhiyun  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1073*4882a593Smuzhiyun  */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)1074*4882a593Smuzhiyun static void *new_read(struct dm_bufio_client *c, sector_t block,
1075*4882a593Smuzhiyun 		      enum new_flag nf, struct dm_buffer **bp)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	int need_submit;
1078*4882a593Smuzhiyun 	struct dm_buffer *b;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	LIST_HEAD(write_list);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	dm_bufio_lock(c);
1083*4882a593Smuzhiyun 	b = __bufio_new(c, block, nf, &need_submit, &write_list);
1084*4882a593Smuzhiyun #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1085*4882a593Smuzhiyun 	if (b && b->hold_count == 1)
1086*4882a593Smuzhiyun 		buffer_record_stack(b);
1087*4882a593Smuzhiyun #endif
1088*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	__flush_write_list(&write_list);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	if (!b)
1093*4882a593Smuzhiyun 		return NULL;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	if (need_submit)
1096*4882a593Smuzhiyun 		submit_io(b, REQ_OP_READ, read_endio);
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (b->read_error) {
1101*4882a593Smuzhiyun 		int error = blk_status_to_errno(b->read_error);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 		dm_bufio_release(b);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		return ERR_PTR(error);
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	*bp = b;
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	return b->data;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun 
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1113*4882a593Smuzhiyun void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1114*4882a593Smuzhiyun 		   struct dm_buffer **bp)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun 	return new_read(c, block, NF_GET, bp);
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get);
1119*4882a593Smuzhiyun 
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1120*4882a593Smuzhiyun void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1121*4882a593Smuzhiyun 		    struct dm_buffer **bp)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	return new_read(c, block, NF_READ, bp);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_read);
1128*4882a593Smuzhiyun 
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1129*4882a593Smuzhiyun void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1130*4882a593Smuzhiyun 		   struct dm_buffer **bp)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return new_read(c, block, NF_FRESH, bp);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_new);
1137*4882a593Smuzhiyun 
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned n_blocks)1138*4882a593Smuzhiyun void dm_bufio_prefetch(struct dm_bufio_client *c,
1139*4882a593Smuzhiyun 		       sector_t block, unsigned n_blocks)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	struct blk_plug plug;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	LIST_HEAD(write_list);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	blk_start_plug(&plug);
1148*4882a593Smuzhiyun 	dm_bufio_lock(c);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	for (; n_blocks--; block++) {
1151*4882a593Smuzhiyun 		int need_submit;
1152*4882a593Smuzhiyun 		struct dm_buffer *b;
1153*4882a593Smuzhiyun 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1154*4882a593Smuzhiyun 				&write_list);
1155*4882a593Smuzhiyun 		if (unlikely(!list_empty(&write_list))) {
1156*4882a593Smuzhiyun 			dm_bufio_unlock(c);
1157*4882a593Smuzhiyun 			blk_finish_plug(&plug);
1158*4882a593Smuzhiyun 			__flush_write_list(&write_list);
1159*4882a593Smuzhiyun 			blk_start_plug(&plug);
1160*4882a593Smuzhiyun 			dm_bufio_lock(c);
1161*4882a593Smuzhiyun 		}
1162*4882a593Smuzhiyun 		if (unlikely(b != NULL)) {
1163*4882a593Smuzhiyun 			dm_bufio_unlock(c);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 			if (need_submit)
1166*4882a593Smuzhiyun 				submit_io(b, REQ_OP_READ, read_endio);
1167*4882a593Smuzhiyun 			dm_bufio_release(b);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 			cond_resched();
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 			if (!n_blocks)
1172*4882a593Smuzhiyun 				goto flush_plug;
1173*4882a593Smuzhiyun 			dm_bufio_lock(c);
1174*4882a593Smuzhiyun 		}
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun flush_plug:
1180*4882a593Smuzhiyun 	blk_finish_plug(&plug);
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1183*4882a593Smuzhiyun 
dm_bufio_release(struct dm_buffer * b)1184*4882a593Smuzhiyun void dm_bufio_release(struct dm_buffer *b)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	dm_bufio_lock(c);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	BUG_ON(!b->hold_count);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	b->hold_count--;
1193*4882a593Smuzhiyun 	if (!b->hold_count) {
1194*4882a593Smuzhiyun 		wake_up(&c->free_buffer_wait);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 		/*
1197*4882a593Smuzhiyun 		 * If there were errors on the buffer, and the buffer is not
1198*4882a593Smuzhiyun 		 * to be written, free the buffer. There is no point in caching
1199*4882a593Smuzhiyun 		 * invalid buffer.
1200*4882a593Smuzhiyun 		 */
1201*4882a593Smuzhiyun 		if ((b->read_error || b->write_error) &&
1202*4882a593Smuzhiyun 		    !test_bit(B_READING, &b->state) &&
1203*4882a593Smuzhiyun 		    !test_bit(B_WRITING, &b->state) &&
1204*4882a593Smuzhiyun 		    !test_bit(B_DIRTY, &b->state)) {
1205*4882a593Smuzhiyun 			__unlink_buffer(b);
1206*4882a593Smuzhiyun 			__free_buffer_wake(b);
1207*4882a593Smuzhiyun 		}
1208*4882a593Smuzhiyun 	}
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_release);
1213*4882a593Smuzhiyun 
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned start,unsigned end)1214*4882a593Smuzhiyun void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1215*4882a593Smuzhiyun 					unsigned start, unsigned end)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	BUG_ON(start >= end);
1220*4882a593Smuzhiyun 	BUG_ON(end > b->c->block_size);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	dm_bufio_lock(c);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	BUG_ON(test_bit(B_READING, &b->state));
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	if (!test_and_set_bit(B_DIRTY, &b->state)) {
1227*4882a593Smuzhiyun 		b->dirty_start = start;
1228*4882a593Smuzhiyun 		b->dirty_end = end;
1229*4882a593Smuzhiyun 		__relink_lru(b, LIST_DIRTY);
1230*4882a593Smuzhiyun 	} else {
1231*4882a593Smuzhiyun 		if (start < b->dirty_start)
1232*4882a593Smuzhiyun 			b->dirty_start = start;
1233*4882a593Smuzhiyun 		if (end > b->dirty_end)
1234*4882a593Smuzhiyun 			b->dirty_end = end;
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1240*4882a593Smuzhiyun 
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1241*4882a593Smuzhiyun void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1246*4882a593Smuzhiyun 
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1247*4882a593Smuzhiyun void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun 	LIST_HEAD(write_list);
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	dm_bufio_lock(c);
1254*4882a593Smuzhiyun 	__write_dirty_buffers_async(c, 0, &write_list);
1255*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1256*4882a593Smuzhiyun 	__flush_write_list(&write_list);
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun /*
1261*4882a593Smuzhiyun  * For performance, it is essential that the buffers are written asynchronously
1262*4882a593Smuzhiyun  * and simultaneously (so that the block layer can merge the writes) and then
1263*4882a593Smuzhiyun  * waited upon.
1264*4882a593Smuzhiyun  *
1265*4882a593Smuzhiyun  * Finally, we flush hardware disk cache.
1266*4882a593Smuzhiyun  */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1267*4882a593Smuzhiyun int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	int a, f;
1270*4882a593Smuzhiyun 	unsigned long buffers_processed = 0;
1271*4882a593Smuzhiyun 	struct dm_buffer *b, *tmp;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	LIST_HEAD(write_list);
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	dm_bufio_lock(c);
1276*4882a593Smuzhiyun 	__write_dirty_buffers_async(c, 0, &write_list);
1277*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1278*4882a593Smuzhiyun 	__flush_write_list(&write_list);
1279*4882a593Smuzhiyun 	dm_bufio_lock(c);
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun again:
1282*4882a593Smuzhiyun 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1283*4882a593Smuzhiyun 		int dropped_lock = 0;
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1286*4882a593Smuzhiyun 			buffers_processed++;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		BUG_ON(test_bit(B_READING, &b->state));
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 		if (test_bit(B_WRITING, &b->state)) {
1291*4882a593Smuzhiyun 			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1292*4882a593Smuzhiyun 				dropped_lock = 1;
1293*4882a593Smuzhiyun 				b->hold_count++;
1294*4882a593Smuzhiyun 				dm_bufio_unlock(c);
1295*4882a593Smuzhiyun 				wait_on_bit_io(&b->state, B_WRITING,
1296*4882a593Smuzhiyun 					       TASK_UNINTERRUPTIBLE);
1297*4882a593Smuzhiyun 				dm_bufio_lock(c);
1298*4882a593Smuzhiyun 				b->hold_count--;
1299*4882a593Smuzhiyun 			} else
1300*4882a593Smuzhiyun 				wait_on_bit_io(&b->state, B_WRITING,
1301*4882a593Smuzhiyun 					       TASK_UNINTERRUPTIBLE);
1302*4882a593Smuzhiyun 		}
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 		if (!test_bit(B_DIRTY, &b->state) &&
1305*4882a593Smuzhiyun 		    !test_bit(B_WRITING, &b->state))
1306*4882a593Smuzhiyun 			__relink_lru(b, LIST_CLEAN);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		cond_resched();
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		/*
1311*4882a593Smuzhiyun 		 * If we dropped the lock, the list is no longer consistent,
1312*4882a593Smuzhiyun 		 * so we must restart the search.
1313*4882a593Smuzhiyun 		 *
1314*4882a593Smuzhiyun 		 * In the most common case, the buffer just processed is
1315*4882a593Smuzhiyun 		 * relinked to the clean list, so we won't loop scanning the
1316*4882a593Smuzhiyun 		 * same buffer again and again.
1317*4882a593Smuzhiyun 		 *
1318*4882a593Smuzhiyun 		 * This may livelock if there is another thread simultaneously
1319*4882a593Smuzhiyun 		 * dirtying buffers, so we count the number of buffers walked
1320*4882a593Smuzhiyun 		 * and if it exceeds the total number of buffers, it means that
1321*4882a593Smuzhiyun 		 * someone is doing some writes simultaneously with us.  In
1322*4882a593Smuzhiyun 		 * this case, stop, dropping the lock.
1323*4882a593Smuzhiyun 		 */
1324*4882a593Smuzhiyun 		if (dropped_lock)
1325*4882a593Smuzhiyun 			goto again;
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 	wake_up(&c->free_buffer_wait);
1328*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	a = xchg(&c->async_write_error, 0);
1331*4882a593Smuzhiyun 	f = dm_bufio_issue_flush(c);
1332*4882a593Smuzhiyun 	if (a)
1333*4882a593Smuzhiyun 		return a;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	return f;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun /*
1340*4882a593Smuzhiyun  * Use dm-io to send an empty barrier to flush the device.
1341*4882a593Smuzhiyun  */
dm_bufio_issue_flush(struct dm_bufio_client * c)1342*4882a593Smuzhiyun int dm_bufio_issue_flush(struct dm_bufio_client *c)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun 	struct dm_io_request io_req = {
1345*4882a593Smuzhiyun 		.bi_op = REQ_OP_WRITE,
1346*4882a593Smuzhiyun 		.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1347*4882a593Smuzhiyun 		.mem.type = DM_IO_KMEM,
1348*4882a593Smuzhiyun 		.mem.ptr.addr = NULL,
1349*4882a593Smuzhiyun 		.client = c->dm_io,
1350*4882a593Smuzhiyun 	};
1351*4882a593Smuzhiyun 	struct dm_io_region io_reg = {
1352*4882a593Smuzhiyun 		.bdev = c->bdev,
1353*4882a593Smuzhiyun 		.sector = 0,
1354*4882a593Smuzhiyun 		.count = 0,
1355*4882a593Smuzhiyun 	};
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 	return dm_io(&io_req, 1, &io_reg, NULL);
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun /*
1364*4882a593Smuzhiyun  * Use dm-io to send a discard request to flush the device.
1365*4882a593Smuzhiyun  */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)1366*4882a593Smuzhiyun int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun 	struct dm_io_request io_req = {
1369*4882a593Smuzhiyun 		.bi_op = REQ_OP_DISCARD,
1370*4882a593Smuzhiyun 		.bi_op_flags = REQ_SYNC,
1371*4882a593Smuzhiyun 		.mem.type = DM_IO_KMEM,
1372*4882a593Smuzhiyun 		.mem.ptr.addr = NULL,
1373*4882a593Smuzhiyun 		.client = c->dm_io,
1374*4882a593Smuzhiyun 	};
1375*4882a593Smuzhiyun 	struct dm_io_region io_reg = {
1376*4882a593Smuzhiyun 		.bdev = c->bdev,
1377*4882a593Smuzhiyun 		.sector = block_to_sector(c, block),
1378*4882a593Smuzhiyun 		.count = block_to_sector(c, count),
1379*4882a593Smuzhiyun 	};
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	return dm_io(&io_req, 1, &io_reg, NULL);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun /*
1388*4882a593Smuzhiyun  * We first delete any other buffer that may be at that new location.
1389*4882a593Smuzhiyun  *
1390*4882a593Smuzhiyun  * Then, we write the buffer to the original location if it was dirty.
1391*4882a593Smuzhiyun  *
1392*4882a593Smuzhiyun  * Then, if we are the only one who is holding the buffer, relink the buffer
1393*4882a593Smuzhiyun  * in the buffer tree for the new location.
1394*4882a593Smuzhiyun  *
1395*4882a593Smuzhiyun  * If there was someone else holding the buffer, we write it to the new
1396*4882a593Smuzhiyun  * location but not relink it, because that other user needs to have the buffer
1397*4882a593Smuzhiyun  * at the same place.
1398*4882a593Smuzhiyun  */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1399*4882a593Smuzhiyun void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun 	struct dm_bufio_client *c = b->c;
1402*4882a593Smuzhiyun 	struct dm_buffer *new;
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	dm_bufio_lock(c);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun retry:
1409*4882a593Smuzhiyun 	new = __find(c, new_block);
1410*4882a593Smuzhiyun 	if (new) {
1411*4882a593Smuzhiyun 		if (new->hold_count) {
1412*4882a593Smuzhiyun 			__wait_for_free_buffer(c);
1413*4882a593Smuzhiyun 			goto retry;
1414*4882a593Smuzhiyun 		}
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 		/*
1417*4882a593Smuzhiyun 		 * FIXME: Is there any point waiting for a write that's going
1418*4882a593Smuzhiyun 		 * to be overwritten in a bit?
1419*4882a593Smuzhiyun 		 */
1420*4882a593Smuzhiyun 		__make_buffer_clean(new);
1421*4882a593Smuzhiyun 		__unlink_buffer(new);
1422*4882a593Smuzhiyun 		__free_buffer_wake(new);
1423*4882a593Smuzhiyun 	}
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	BUG_ON(!b->hold_count);
1426*4882a593Smuzhiyun 	BUG_ON(test_bit(B_READING, &b->state));
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	__write_dirty_buffer(b, NULL);
1429*4882a593Smuzhiyun 	if (b->hold_count == 1) {
1430*4882a593Smuzhiyun 		wait_on_bit_io(&b->state, B_WRITING,
1431*4882a593Smuzhiyun 			       TASK_UNINTERRUPTIBLE);
1432*4882a593Smuzhiyun 		set_bit(B_DIRTY, &b->state);
1433*4882a593Smuzhiyun 		b->dirty_start = 0;
1434*4882a593Smuzhiyun 		b->dirty_end = c->block_size;
1435*4882a593Smuzhiyun 		__unlink_buffer(b);
1436*4882a593Smuzhiyun 		__link_buffer(b, new_block, LIST_DIRTY);
1437*4882a593Smuzhiyun 	} else {
1438*4882a593Smuzhiyun 		sector_t old_block;
1439*4882a593Smuzhiyun 		wait_on_bit_lock_io(&b->state, B_WRITING,
1440*4882a593Smuzhiyun 				    TASK_UNINTERRUPTIBLE);
1441*4882a593Smuzhiyun 		/*
1442*4882a593Smuzhiyun 		 * Relink buffer to "new_block" so that write_callback
1443*4882a593Smuzhiyun 		 * sees "new_block" as a block number.
1444*4882a593Smuzhiyun 		 * After the write, link the buffer back to old_block.
1445*4882a593Smuzhiyun 		 * All this must be done in bufio lock, so that block number
1446*4882a593Smuzhiyun 		 * change isn't visible to other threads.
1447*4882a593Smuzhiyun 		 */
1448*4882a593Smuzhiyun 		old_block = b->block;
1449*4882a593Smuzhiyun 		__unlink_buffer(b);
1450*4882a593Smuzhiyun 		__link_buffer(b, new_block, b->list_mode);
1451*4882a593Smuzhiyun 		submit_io(b, REQ_OP_WRITE, write_endio);
1452*4882a593Smuzhiyun 		wait_on_bit_io(&b->state, B_WRITING,
1453*4882a593Smuzhiyun 			       TASK_UNINTERRUPTIBLE);
1454*4882a593Smuzhiyun 		__unlink_buffer(b);
1455*4882a593Smuzhiyun 		__link_buffer(b, old_block, b->list_mode);
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1459*4882a593Smuzhiyun 	dm_bufio_release(b);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1462*4882a593Smuzhiyun 
forget_buffer_locked(struct dm_buffer * b)1463*4882a593Smuzhiyun static void forget_buffer_locked(struct dm_buffer *b)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun 	if (likely(!b->hold_count) && likely(!b->state)) {
1466*4882a593Smuzhiyun 		__unlink_buffer(b);
1467*4882a593Smuzhiyun 		__free_buffer_wake(b);
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun /*
1472*4882a593Smuzhiyun  * Free the given buffer.
1473*4882a593Smuzhiyun  *
1474*4882a593Smuzhiyun  * This is just a hint, if the buffer is in use or dirty, this function
1475*4882a593Smuzhiyun  * does nothing.
1476*4882a593Smuzhiyun  */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)1477*4882a593Smuzhiyun void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun 	struct dm_buffer *b;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	dm_bufio_lock(c);
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	b = __find(c, block);
1484*4882a593Smuzhiyun 	if (b)
1485*4882a593Smuzhiyun 		forget_buffer_locked(b);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_forget);
1490*4882a593Smuzhiyun 
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)1491*4882a593Smuzhiyun void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun 	struct dm_buffer *b;
1494*4882a593Smuzhiyun 	sector_t end_block = block + n_blocks;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	while (block < end_block) {
1497*4882a593Smuzhiyun 		dm_bufio_lock(c);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 		b = __find_next(c, block);
1500*4882a593Smuzhiyun 		if (b) {
1501*4882a593Smuzhiyun 			block = b->block + 1;
1502*4882a593Smuzhiyun 			forget_buffer_locked(b);
1503*4882a593Smuzhiyun 		}
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 		dm_bufio_unlock(c);
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 		if (!b)
1508*4882a593Smuzhiyun 			break;
1509*4882a593Smuzhiyun 	}
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1513*4882a593Smuzhiyun 
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned n)1514*4882a593Smuzhiyun void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun 	c->minimum_buffers = n;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1519*4882a593Smuzhiyun 
dm_bufio_get_block_size(struct dm_bufio_client * c)1520*4882a593Smuzhiyun unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	return c->block_size;
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1525*4882a593Smuzhiyun 
dm_bufio_get_device_size(struct dm_bufio_client * c)1526*4882a593Smuzhiyun sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1527*4882a593Smuzhiyun {
1528*4882a593Smuzhiyun 	sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1529*4882a593Smuzhiyun 	if (s >= c->start)
1530*4882a593Smuzhiyun 		s -= c->start;
1531*4882a593Smuzhiyun 	else
1532*4882a593Smuzhiyun 		s = 0;
1533*4882a593Smuzhiyun 	if (likely(c->sectors_per_block_bits >= 0))
1534*4882a593Smuzhiyun 		s >>= c->sectors_per_block_bits;
1535*4882a593Smuzhiyun 	else
1536*4882a593Smuzhiyun 		sector_div(s, c->block_size >> SECTOR_SHIFT);
1537*4882a593Smuzhiyun 	return s;
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1540*4882a593Smuzhiyun 
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)1541*4882a593Smuzhiyun struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun 	return c->dm_io;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1546*4882a593Smuzhiyun 
dm_bufio_get_block_number(struct dm_buffer * b)1547*4882a593Smuzhiyun sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun 	return b->block;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1552*4882a593Smuzhiyun 
dm_bufio_get_block_data(struct dm_buffer * b)1553*4882a593Smuzhiyun void *dm_bufio_get_block_data(struct dm_buffer *b)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	return b->data;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1558*4882a593Smuzhiyun 
dm_bufio_get_aux_data(struct dm_buffer * b)1559*4882a593Smuzhiyun void *dm_bufio_get_aux_data(struct dm_buffer *b)
1560*4882a593Smuzhiyun {
1561*4882a593Smuzhiyun 	return b + 1;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1564*4882a593Smuzhiyun 
dm_bufio_get_client(struct dm_buffer * b)1565*4882a593Smuzhiyun struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun 	return b->c;
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1570*4882a593Smuzhiyun 
drop_buffers(struct dm_bufio_client * c)1571*4882a593Smuzhiyun static void drop_buffers(struct dm_bufio_client *c)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun 	struct dm_buffer *b;
1574*4882a593Smuzhiyun 	int i;
1575*4882a593Smuzhiyun 	bool warned = false;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	BUG_ON(dm_bufio_in_request());
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/*
1580*4882a593Smuzhiyun 	 * An optimization so that the buffers are not written one-by-one.
1581*4882a593Smuzhiyun 	 */
1582*4882a593Smuzhiyun 	dm_bufio_write_dirty_buffers_async(c);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	dm_bufio_lock(c);
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	while ((b = __get_unclaimed_buffer(c)))
1587*4882a593Smuzhiyun 		__free_buffer_wake(b);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	for (i = 0; i < LIST_SIZE; i++)
1590*4882a593Smuzhiyun 		list_for_each_entry(b, &c->lru[i], lru_list) {
1591*4882a593Smuzhiyun 			WARN_ON(!warned);
1592*4882a593Smuzhiyun 			warned = true;
1593*4882a593Smuzhiyun 			DMERR("leaked buffer %llx, hold count %u, list %d",
1594*4882a593Smuzhiyun 			      (unsigned long long)b->block, b->hold_count, i);
1595*4882a593Smuzhiyun #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1596*4882a593Smuzhiyun 			stack_trace_print(b->stack_entries, b->stack_len, 1);
1597*4882a593Smuzhiyun 			/* mark unclaimed to avoid BUG_ON below */
1598*4882a593Smuzhiyun 			b->hold_count = 0;
1599*4882a593Smuzhiyun #endif
1600*4882a593Smuzhiyun 		}
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1603*4882a593Smuzhiyun 	while ((b = __get_unclaimed_buffer(c)))
1604*4882a593Smuzhiyun 		__free_buffer_wake(b);
1605*4882a593Smuzhiyun #endif
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	for (i = 0; i < LIST_SIZE; i++)
1608*4882a593Smuzhiyun 		BUG_ON(!list_empty(&c->lru[i]));
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun /*
1614*4882a593Smuzhiyun  * We may not be able to evict this buffer if IO pending or the client
1615*4882a593Smuzhiyun  * is still using it.  Caller is expected to know buffer is too old.
1616*4882a593Smuzhiyun  *
1617*4882a593Smuzhiyun  * And if GFP_NOFS is used, we must not do any I/O because we hold
1618*4882a593Smuzhiyun  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1619*4882a593Smuzhiyun  * rerouted to different bufio client.
1620*4882a593Smuzhiyun  */
__try_evict_buffer(struct dm_buffer * b,gfp_t gfp)1621*4882a593Smuzhiyun static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun 	if (!(gfp & __GFP_FS)) {
1624*4882a593Smuzhiyun 		if (test_bit(B_READING, &b->state) ||
1625*4882a593Smuzhiyun 		    test_bit(B_WRITING, &b->state) ||
1626*4882a593Smuzhiyun 		    test_bit(B_DIRTY, &b->state))
1627*4882a593Smuzhiyun 			return false;
1628*4882a593Smuzhiyun 	}
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	if (b->hold_count)
1631*4882a593Smuzhiyun 		return false;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	__make_buffer_clean(b);
1634*4882a593Smuzhiyun 	__unlink_buffer(b);
1635*4882a593Smuzhiyun 	__free_buffer_wake(b);
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	return true;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun 
get_retain_buffers(struct dm_bufio_client * c)1640*4882a593Smuzhiyun static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun 	unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1643*4882a593Smuzhiyun 	if (likely(c->sectors_per_block_bits >= 0))
1644*4882a593Smuzhiyun 		retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1645*4882a593Smuzhiyun 	else
1646*4882a593Smuzhiyun 		retain_bytes /= c->block_size;
1647*4882a593Smuzhiyun 	return retain_bytes;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun 
__scan(struct dm_bufio_client * c)1650*4882a593Smuzhiyun static void __scan(struct dm_bufio_client *c)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun 	int l;
1653*4882a593Smuzhiyun 	struct dm_buffer *b, *tmp;
1654*4882a593Smuzhiyun 	unsigned long freed = 0;
1655*4882a593Smuzhiyun 	unsigned long count = c->n_buffers[LIST_CLEAN] +
1656*4882a593Smuzhiyun 			      c->n_buffers[LIST_DIRTY];
1657*4882a593Smuzhiyun 	unsigned long retain_target = get_retain_buffers(c);
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	for (l = 0; l < LIST_SIZE; l++) {
1660*4882a593Smuzhiyun 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1661*4882a593Smuzhiyun 			if (count - freed <= retain_target)
1662*4882a593Smuzhiyun 				atomic_long_set(&c->need_shrink, 0);
1663*4882a593Smuzhiyun 			if (!atomic_long_read(&c->need_shrink))
1664*4882a593Smuzhiyun 				return;
1665*4882a593Smuzhiyun 			if (__try_evict_buffer(b, GFP_KERNEL)) {
1666*4882a593Smuzhiyun 				atomic_long_dec(&c->need_shrink);
1667*4882a593Smuzhiyun 				freed++;
1668*4882a593Smuzhiyun 			}
1669*4882a593Smuzhiyun 			cond_resched();
1670*4882a593Smuzhiyun 		}
1671*4882a593Smuzhiyun 	}
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun 
shrink_work(struct work_struct * w)1674*4882a593Smuzhiyun static void shrink_work(struct work_struct *w)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun 	struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	dm_bufio_lock(c);
1679*4882a593Smuzhiyun 	__scan(c);
1680*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun 
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1683*4882a593Smuzhiyun static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun 	struct dm_bufio_client *c;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	c = container_of(shrink, struct dm_bufio_client, shrinker);
1688*4882a593Smuzhiyun 	atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1689*4882a593Smuzhiyun 	queue_work(dm_bufio_wq, &c->shrink_work);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	return sc->nr_to_scan;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun 
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1694*4882a593Smuzhiyun static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1695*4882a593Smuzhiyun {
1696*4882a593Smuzhiyun 	struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1697*4882a593Smuzhiyun 	unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1698*4882a593Smuzhiyun 			      READ_ONCE(c->n_buffers[LIST_DIRTY]);
1699*4882a593Smuzhiyun 	unsigned long retain_target = get_retain_buffers(c);
1700*4882a593Smuzhiyun 	unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	if (unlikely(count < retain_target))
1703*4882a593Smuzhiyun 		count = 0;
1704*4882a593Smuzhiyun 	else
1705*4882a593Smuzhiyun 		count -= retain_target;
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	if (unlikely(count < queued_for_cleanup))
1708*4882a593Smuzhiyun 		count = 0;
1709*4882a593Smuzhiyun 	else
1710*4882a593Smuzhiyun 		count -= queued_for_cleanup;
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	return count;
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun /*
1716*4882a593Smuzhiyun  * Create the buffering interface
1717*4882a593Smuzhiyun  */
dm_bufio_client_create(struct block_device * bdev,unsigned block_size,unsigned reserved_buffers,unsigned aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *))1718*4882a593Smuzhiyun struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1719*4882a593Smuzhiyun 					       unsigned reserved_buffers, unsigned aux_size,
1720*4882a593Smuzhiyun 					       void (*alloc_callback)(struct dm_buffer *),
1721*4882a593Smuzhiyun 					       void (*write_callback)(struct dm_buffer *))
1722*4882a593Smuzhiyun {
1723*4882a593Smuzhiyun 	int r;
1724*4882a593Smuzhiyun 	struct dm_bufio_client *c;
1725*4882a593Smuzhiyun 	unsigned i;
1726*4882a593Smuzhiyun 	char slab_name[27];
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1729*4882a593Smuzhiyun 		DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1730*4882a593Smuzhiyun 		r = -EINVAL;
1731*4882a593Smuzhiyun 		goto bad_client;
1732*4882a593Smuzhiyun 	}
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1735*4882a593Smuzhiyun 	if (!c) {
1736*4882a593Smuzhiyun 		r = -ENOMEM;
1737*4882a593Smuzhiyun 		goto bad_client;
1738*4882a593Smuzhiyun 	}
1739*4882a593Smuzhiyun 	c->buffer_tree = RB_ROOT;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	c->bdev = bdev;
1742*4882a593Smuzhiyun 	c->block_size = block_size;
1743*4882a593Smuzhiyun 	if (is_power_of_2(block_size))
1744*4882a593Smuzhiyun 		c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1745*4882a593Smuzhiyun 	else
1746*4882a593Smuzhiyun 		c->sectors_per_block_bits = -1;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	c->alloc_callback = alloc_callback;
1749*4882a593Smuzhiyun 	c->write_callback = write_callback;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	for (i = 0; i < LIST_SIZE; i++) {
1752*4882a593Smuzhiyun 		INIT_LIST_HEAD(&c->lru[i]);
1753*4882a593Smuzhiyun 		c->n_buffers[i] = 0;
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	mutex_init(&c->lock);
1757*4882a593Smuzhiyun 	INIT_LIST_HEAD(&c->reserved_buffers);
1758*4882a593Smuzhiyun 	c->need_reserved_buffers = reserved_buffers;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	init_waitqueue_head(&c->free_buffer_wait);
1763*4882a593Smuzhiyun 	c->async_write_error = 0;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	c->dm_io = dm_io_client_create();
1766*4882a593Smuzhiyun 	if (IS_ERR(c->dm_io)) {
1767*4882a593Smuzhiyun 		r = PTR_ERR(c->dm_io);
1768*4882a593Smuzhiyun 		goto bad_dm_io;
1769*4882a593Smuzhiyun 	}
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	if (block_size <= KMALLOC_MAX_SIZE &&
1772*4882a593Smuzhiyun 	    (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1773*4882a593Smuzhiyun 		unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1774*4882a593Smuzhiyun 		snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1775*4882a593Smuzhiyun 		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1776*4882a593Smuzhiyun 						  SLAB_RECLAIM_ACCOUNT, NULL);
1777*4882a593Smuzhiyun 		if (!c->slab_cache) {
1778*4882a593Smuzhiyun 			r = -ENOMEM;
1779*4882a593Smuzhiyun 			goto bad;
1780*4882a593Smuzhiyun 		}
1781*4882a593Smuzhiyun 	}
1782*4882a593Smuzhiyun 	if (aux_size)
1783*4882a593Smuzhiyun 		snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1784*4882a593Smuzhiyun 	else
1785*4882a593Smuzhiyun 		snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1786*4882a593Smuzhiyun 	c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1787*4882a593Smuzhiyun 					   0, SLAB_RECLAIM_ACCOUNT, NULL);
1788*4882a593Smuzhiyun 	if (!c->slab_buffer) {
1789*4882a593Smuzhiyun 		r = -ENOMEM;
1790*4882a593Smuzhiyun 		goto bad;
1791*4882a593Smuzhiyun 	}
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	while (c->need_reserved_buffers) {
1794*4882a593Smuzhiyun 		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 		if (!b) {
1797*4882a593Smuzhiyun 			r = -ENOMEM;
1798*4882a593Smuzhiyun 			goto bad;
1799*4882a593Smuzhiyun 		}
1800*4882a593Smuzhiyun 		__free_buffer_wake(b);
1801*4882a593Smuzhiyun 	}
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	INIT_WORK(&c->shrink_work, shrink_work);
1804*4882a593Smuzhiyun 	atomic_long_set(&c->need_shrink, 0);
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	c->shrinker.count_objects = dm_bufio_shrink_count;
1807*4882a593Smuzhiyun 	c->shrinker.scan_objects = dm_bufio_shrink_scan;
1808*4882a593Smuzhiyun 	c->shrinker.seeks = 1;
1809*4882a593Smuzhiyun 	c->shrinker.batch = 0;
1810*4882a593Smuzhiyun 	r = register_shrinker(&c->shrinker);
1811*4882a593Smuzhiyun 	if (r)
1812*4882a593Smuzhiyun 		goto bad;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	mutex_lock(&dm_bufio_clients_lock);
1815*4882a593Smuzhiyun 	dm_bufio_client_count++;
1816*4882a593Smuzhiyun 	list_add(&c->client_list, &dm_bufio_all_clients);
1817*4882a593Smuzhiyun 	__cache_size_refresh();
1818*4882a593Smuzhiyun 	mutex_unlock(&dm_bufio_clients_lock);
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	return c;
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun bad:
1823*4882a593Smuzhiyun 	while (!list_empty(&c->reserved_buffers)) {
1824*4882a593Smuzhiyun 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1825*4882a593Smuzhiyun 						 struct dm_buffer, lru_list);
1826*4882a593Smuzhiyun 		list_del(&b->lru_list);
1827*4882a593Smuzhiyun 		free_buffer(b);
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 	kmem_cache_destroy(c->slab_cache);
1830*4882a593Smuzhiyun 	kmem_cache_destroy(c->slab_buffer);
1831*4882a593Smuzhiyun 	dm_io_client_destroy(c->dm_io);
1832*4882a593Smuzhiyun bad_dm_io:
1833*4882a593Smuzhiyun 	mutex_destroy(&c->lock);
1834*4882a593Smuzhiyun 	kfree(c);
1835*4882a593Smuzhiyun bad_client:
1836*4882a593Smuzhiyun 	return ERR_PTR(r);
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun /*
1841*4882a593Smuzhiyun  * Free the buffering interface.
1842*4882a593Smuzhiyun  * It is required that there are no references on any buffers.
1843*4882a593Smuzhiyun  */
dm_bufio_client_destroy(struct dm_bufio_client * c)1844*4882a593Smuzhiyun void dm_bufio_client_destroy(struct dm_bufio_client *c)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun 	unsigned i;
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 	drop_buffers(c);
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	unregister_shrinker(&c->shrinker);
1851*4882a593Smuzhiyun 	flush_work(&c->shrink_work);
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	mutex_lock(&dm_bufio_clients_lock);
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	list_del(&c->client_list);
1856*4882a593Smuzhiyun 	dm_bufio_client_count--;
1857*4882a593Smuzhiyun 	__cache_size_refresh();
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	mutex_unlock(&dm_bufio_clients_lock);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1862*4882a593Smuzhiyun 	BUG_ON(c->need_reserved_buffers);
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	while (!list_empty(&c->reserved_buffers)) {
1865*4882a593Smuzhiyun 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1866*4882a593Smuzhiyun 						 struct dm_buffer, lru_list);
1867*4882a593Smuzhiyun 		list_del(&b->lru_list);
1868*4882a593Smuzhiyun 		free_buffer(b);
1869*4882a593Smuzhiyun 	}
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	for (i = 0; i < LIST_SIZE; i++)
1872*4882a593Smuzhiyun 		if (c->n_buffers[i])
1873*4882a593Smuzhiyun 			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	for (i = 0; i < LIST_SIZE; i++)
1876*4882a593Smuzhiyun 		BUG_ON(c->n_buffers[i]);
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	kmem_cache_destroy(c->slab_cache);
1879*4882a593Smuzhiyun 	kmem_cache_destroy(c->slab_buffer);
1880*4882a593Smuzhiyun 	dm_io_client_destroy(c->dm_io);
1881*4882a593Smuzhiyun 	mutex_destroy(&c->lock);
1882*4882a593Smuzhiyun 	kfree(c);
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1885*4882a593Smuzhiyun 
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)1886*4882a593Smuzhiyun void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun 	c->start = start;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1891*4882a593Smuzhiyun 
get_max_age_hz(void)1892*4882a593Smuzhiyun static unsigned get_max_age_hz(void)
1893*4882a593Smuzhiyun {
1894*4882a593Smuzhiyun 	unsigned max_age = READ_ONCE(dm_bufio_max_age);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 	if (max_age > UINT_MAX / HZ)
1897*4882a593Smuzhiyun 		max_age = UINT_MAX / HZ;
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	return max_age * HZ;
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun 
older_than(struct dm_buffer * b,unsigned long age_hz)1902*4882a593Smuzhiyun static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1903*4882a593Smuzhiyun {
1904*4882a593Smuzhiyun 	return time_after_eq(jiffies, b->last_accessed + age_hz);
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun 
__evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)1907*4882a593Smuzhiyun static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun 	struct dm_buffer *b, *tmp;
1910*4882a593Smuzhiyun 	unsigned long retain_target = get_retain_buffers(c);
1911*4882a593Smuzhiyun 	unsigned long count;
1912*4882a593Smuzhiyun 	LIST_HEAD(write_list);
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	dm_bufio_lock(c);
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	__check_watermark(c, &write_list);
1917*4882a593Smuzhiyun 	if (unlikely(!list_empty(&write_list))) {
1918*4882a593Smuzhiyun 		dm_bufio_unlock(c);
1919*4882a593Smuzhiyun 		__flush_write_list(&write_list);
1920*4882a593Smuzhiyun 		dm_bufio_lock(c);
1921*4882a593Smuzhiyun 	}
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1924*4882a593Smuzhiyun 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1925*4882a593Smuzhiyun 		if (count <= retain_target)
1926*4882a593Smuzhiyun 			break;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 		if (!older_than(b, age_hz))
1929*4882a593Smuzhiyun 			break;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 		if (__try_evict_buffer(b, 0))
1932*4882a593Smuzhiyun 			count--;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 		cond_resched();
1935*4882a593Smuzhiyun 	}
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	dm_bufio_unlock(c);
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun 
do_global_cleanup(struct work_struct * w)1940*4882a593Smuzhiyun static void do_global_cleanup(struct work_struct *w)
1941*4882a593Smuzhiyun {
1942*4882a593Smuzhiyun 	struct dm_bufio_client *locked_client = NULL;
1943*4882a593Smuzhiyun 	struct dm_bufio_client *current_client;
1944*4882a593Smuzhiyun 	struct dm_buffer *b;
1945*4882a593Smuzhiyun 	unsigned spinlock_hold_count;
1946*4882a593Smuzhiyun 	unsigned long threshold = dm_bufio_cache_size -
1947*4882a593Smuzhiyun 		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1948*4882a593Smuzhiyun 	unsigned long loops = global_num * 2;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	mutex_lock(&dm_bufio_clients_lock);
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	while (1) {
1953*4882a593Smuzhiyun 		cond_resched();
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 		spin_lock(&global_spinlock);
1956*4882a593Smuzhiyun 		if (unlikely(dm_bufio_current_allocated <= threshold))
1957*4882a593Smuzhiyun 			break;
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun 		spinlock_hold_count = 0;
1960*4882a593Smuzhiyun get_next:
1961*4882a593Smuzhiyun 		if (!loops--)
1962*4882a593Smuzhiyun 			break;
1963*4882a593Smuzhiyun 		if (unlikely(list_empty(&global_queue)))
1964*4882a593Smuzhiyun 			break;
1965*4882a593Smuzhiyun 		b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 		if (b->accessed) {
1968*4882a593Smuzhiyun 			b->accessed = 0;
1969*4882a593Smuzhiyun 			list_move(&b->global_list, &global_queue);
1970*4882a593Smuzhiyun 			if (likely(++spinlock_hold_count < 16))
1971*4882a593Smuzhiyun 				goto get_next;
1972*4882a593Smuzhiyun 			spin_unlock(&global_spinlock);
1973*4882a593Smuzhiyun 			continue;
1974*4882a593Smuzhiyun 		}
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 		current_client = b->c;
1977*4882a593Smuzhiyun 		if (unlikely(current_client != locked_client)) {
1978*4882a593Smuzhiyun 			if (locked_client)
1979*4882a593Smuzhiyun 				dm_bufio_unlock(locked_client);
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 			if (!dm_bufio_trylock(current_client)) {
1982*4882a593Smuzhiyun 				spin_unlock(&global_spinlock);
1983*4882a593Smuzhiyun 				dm_bufio_lock(current_client);
1984*4882a593Smuzhiyun 				locked_client = current_client;
1985*4882a593Smuzhiyun 				continue;
1986*4882a593Smuzhiyun 			}
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 			locked_client = current_client;
1989*4882a593Smuzhiyun 		}
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 		spin_unlock(&global_spinlock);
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 		if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
1994*4882a593Smuzhiyun 			spin_lock(&global_spinlock);
1995*4882a593Smuzhiyun 			list_move(&b->global_list, &global_queue);
1996*4882a593Smuzhiyun 			spin_unlock(&global_spinlock);
1997*4882a593Smuzhiyun 		}
1998*4882a593Smuzhiyun 	}
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	spin_unlock(&global_spinlock);
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	if (locked_client)
2003*4882a593Smuzhiyun 		dm_bufio_unlock(locked_client);
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 	mutex_unlock(&dm_bufio_clients_lock);
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun 
cleanup_old_buffers(void)2008*4882a593Smuzhiyun static void cleanup_old_buffers(void)
2009*4882a593Smuzhiyun {
2010*4882a593Smuzhiyun 	unsigned long max_age_hz = get_max_age_hz();
2011*4882a593Smuzhiyun 	struct dm_bufio_client *c;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	mutex_lock(&dm_bufio_clients_lock);
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	__cache_size_refresh();
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2018*4882a593Smuzhiyun 		__evict_old_buffers(c, max_age_hz);
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 	mutex_unlock(&dm_bufio_clients_lock);
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun 
work_fn(struct work_struct * w)2023*4882a593Smuzhiyun static void work_fn(struct work_struct *w)
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun 	cleanup_old_buffers();
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2028*4882a593Smuzhiyun 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun /*----------------------------------------------------------------
2032*4882a593Smuzhiyun  * Module setup
2033*4882a593Smuzhiyun  *--------------------------------------------------------------*/
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun /*
2036*4882a593Smuzhiyun  * This is called only once for the whole dm_bufio module.
2037*4882a593Smuzhiyun  * It initializes memory limit.
2038*4882a593Smuzhiyun  */
dm_bufio_init(void)2039*4882a593Smuzhiyun static int __init dm_bufio_init(void)
2040*4882a593Smuzhiyun {
2041*4882a593Smuzhiyun 	__u64 mem;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	dm_bufio_allocated_kmem_cache = 0;
2044*4882a593Smuzhiyun 	dm_bufio_allocated_get_free_pages = 0;
2045*4882a593Smuzhiyun 	dm_bufio_allocated_vmalloc = 0;
2046*4882a593Smuzhiyun 	dm_bufio_current_allocated = 0;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2049*4882a593Smuzhiyun 			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	if (mem > ULONG_MAX)
2052*4882a593Smuzhiyun 		mem = ULONG_MAX;
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun #ifdef CONFIG_MMU
2055*4882a593Smuzhiyun 	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2056*4882a593Smuzhiyun 		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2057*4882a593Smuzhiyun #endif
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	dm_bufio_default_cache_size = mem;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	mutex_lock(&dm_bufio_clients_lock);
2062*4882a593Smuzhiyun 	__cache_size_refresh();
2063*4882a593Smuzhiyun 	mutex_unlock(&dm_bufio_clients_lock);
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2066*4882a593Smuzhiyun 	if (!dm_bufio_wq)
2067*4882a593Smuzhiyun 		return -ENOMEM;
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2070*4882a593Smuzhiyun 	INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2071*4882a593Smuzhiyun 	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2072*4882a593Smuzhiyun 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	return 0;
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun /*
2078*4882a593Smuzhiyun  * This is called once when unloading the dm_bufio module.
2079*4882a593Smuzhiyun  */
dm_bufio_exit(void)2080*4882a593Smuzhiyun static void __exit dm_bufio_exit(void)
2081*4882a593Smuzhiyun {
2082*4882a593Smuzhiyun 	int bug = 0;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2085*4882a593Smuzhiyun 	flush_workqueue(dm_bufio_wq);
2086*4882a593Smuzhiyun 	destroy_workqueue(dm_bufio_wq);
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 	if (dm_bufio_client_count) {
2089*4882a593Smuzhiyun 		DMCRIT("%s: dm_bufio_client_count leaked: %d",
2090*4882a593Smuzhiyun 			__func__, dm_bufio_client_count);
2091*4882a593Smuzhiyun 		bug = 1;
2092*4882a593Smuzhiyun 	}
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	if (dm_bufio_current_allocated) {
2095*4882a593Smuzhiyun 		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2096*4882a593Smuzhiyun 			__func__, dm_bufio_current_allocated);
2097*4882a593Smuzhiyun 		bug = 1;
2098*4882a593Smuzhiyun 	}
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun 	if (dm_bufio_allocated_get_free_pages) {
2101*4882a593Smuzhiyun 		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2102*4882a593Smuzhiyun 		       __func__, dm_bufio_allocated_get_free_pages);
2103*4882a593Smuzhiyun 		bug = 1;
2104*4882a593Smuzhiyun 	}
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	if (dm_bufio_allocated_vmalloc) {
2107*4882a593Smuzhiyun 		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2108*4882a593Smuzhiyun 		       __func__, dm_bufio_allocated_vmalloc);
2109*4882a593Smuzhiyun 		bug = 1;
2110*4882a593Smuzhiyun 	}
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	BUG_ON(bug);
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun module_init(dm_bufio_init)
2116*4882a593Smuzhiyun module_exit(dm_bufio_exit)
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2119*4882a593Smuzhiyun MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2122*4882a593Smuzhiyun MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2125*4882a593Smuzhiyun MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2128*4882a593Smuzhiyun MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2131*4882a593Smuzhiyun MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2134*4882a593Smuzhiyun MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2137*4882a593Smuzhiyun MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2140*4882a593Smuzhiyun MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2143*4882a593Smuzhiyun MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2144*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2145