xref: /OK3568_Linux_fs/kernel/drivers/md/dm-kcopyd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2002 Sistina Software (UK) Limited.
3*4882a593Smuzhiyun  * Copyright (C) 2006 Red Hat GmbH
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is released under the GPL.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Kcopyd provides a simple interface for copying an area of one
8*4882a593Smuzhiyun  * block-device to one or more other block-devices, with an asynchronous
9*4882a593Smuzhiyun  * completion notification.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/atomic.h>
14*4882a593Smuzhiyun #include <linux/blkdev.h>
15*4882a593Smuzhiyun #include <linux/fs.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/mempool.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/of_platform.h>
21*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
22*4882a593Smuzhiyun #include <linux/pagemap.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/vmalloc.h>
25*4882a593Smuzhiyun #include <linux/workqueue.h>
26*4882a593Smuzhiyun #include <linux/mutex.h>
27*4882a593Smuzhiyun #include <linux/delay.h>
28*4882a593Smuzhiyun #include <linux/device-mapper.h>
29*4882a593Smuzhiyun #include <linux/dm-kcopyd.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include "dm-core.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define SPLIT_COUNT	8
34*4882a593Smuzhiyun #define MIN_JOBS	8
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define DEFAULT_SUB_JOB_SIZE_KB 512
37*4882a593Smuzhiyun #define MAX_SUB_JOB_SIZE_KB     1024
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
42*4882a593Smuzhiyun MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static bool rsm_enabled;
45*4882a593Smuzhiyun static phys_addr_t rsm_mem_base, rsm_mem_size;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #ifndef MODULE
48*4882a593Smuzhiyun static DEFINE_SPINLOCK(rsm_lock);
49*4882a593Smuzhiyun static int *rsm_mem;
50*4882a593Smuzhiyun static int rsm_page_cnt;
51*4882a593Smuzhiyun static int rsm_tbl_idx;
52*4882a593Smuzhiyun static struct reserved_mem *rmem;
53*4882a593Smuzhiyun 
kcopyd_rsm_init(void)54*4882a593Smuzhiyun static void __init kcopyd_rsm_init(void)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	static struct device_node *rsm_node;
57*4882a593Smuzhiyun 	int ret = 0;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (!rsm_enabled)
60*4882a593Smuzhiyun 		return;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
63*4882a593Smuzhiyun 	if (!rsm_node) {
64*4882a593Smuzhiyun 		ret = -ENODEV;
65*4882a593Smuzhiyun 		goto out;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	rmem = of_reserved_mem_lookup(rsm_node);
69*4882a593Smuzhiyun 	if (!rmem) {
70*4882a593Smuzhiyun 		ret = -EINVAL;
71*4882a593Smuzhiyun 		goto out_put_node;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	rsm_mem_base = rmem->base;
75*4882a593Smuzhiyun 	rsm_mem_size = rmem->size;
76*4882a593Smuzhiyun 	rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
77*4882a593Smuzhiyun 	rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
78*4882a593Smuzhiyun 	if (!rsm_mem)
79*4882a593Smuzhiyun 		ret = -ENOMEM;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun out_put_node:
82*4882a593Smuzhiyun 	of_node_put(rsm_node);
83*4882a593Smuzhiyun out:
84*4882a593Smuzhiyun 	if (ret)
85*4882a593Smuzhiyun 		pr_warn("kcopyd: failed to init rsm: %d", ret);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
kcopyd_rsm_enable(char * str)88*4882a593Smuzhiyun static int __init kcopyd_rsm_enable(char *str)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	rsm_enabled = true;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
95*4882a593Smuzhiyun 
kcopyd_rsm_get_page(struct page ** p)96*4882a593Smuzhiyun static void kcopyd_rsm_get_page(struct page **p)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	int i;
99*4882a593Smuzhiyun 	unsigned long flags;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	*p = NULL;
102*4882a593Smuzhiyun 	spin_lock_irqsave(&rsm_lock, flags);
103*4882a593Smuzhiyun 	for (i = 0 ; i < rsm_page_cnt ; i++) {
104*4882a593Smuzhiyun 		rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		if (rsm_mem[rsm_tbl_idx] == 0) {
107*4882a593Smuzhiyun 			rsm_mem[rsm_tbl_idx] = 1;
108*4882a593Smuzhiyun 			*p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
109*4882a593Smuzhiyun 						       * rsm_tbl_idx));
110*4882a593Smuzhiyun 			break;
111*4882a593Smuzhiyun 		}
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rsm_lock, flags);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
kcopyd_rsm_drop_page(struct page ** p)116*4882a593Smuzhiyun static void kcopyd_rsm_drop_page(struct page **p)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	u64 off;
119*4882a593Smuzhiyun 	unsigned long flags;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (*p) {
122*4882a593Smuzhiyun 		off = page_to_phys(*p) - rsm_mem_base;
123*4882a593Smuzhiyun 		spin_lock_irqsave(&rsm_lock, flags);
124*4882a593Smuzhiyun 		rsm_mem[off >> PAGE_SHIFT] = 0;
125*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rsm_lock, flags);
126*4882a593Smuzhiyun 		*p = NULL;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
kcopyd_rsm_destroy(void)130*4882a593Smuzhiyun static void kcopyd_rsm_destroy(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	if (rsm_enabled)
133*4882a593Smuzhiyun 		kfree(rsm_mem);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun #else
137*4882a593Smuzhiyun #define kcopyd_rsm_destroy(...)
138*4882a593Smuzhiyun #define kcopyd_rsm_drop_page(...)
139*4882a593Smuzhiyun #define kcopyd_rsm_get_page(...)
140*4882a593Smuzhiyun #define kcopyd_rsm_init(...)
141*4882a593Smuzhiyun #endif
142*4882a593Smuzhiyun 
dm_get_kcopyd_subjob_size(void)143*4882a593Smuzhiyun static unsigned dm_get_kcopyd_subjob_size(void)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	unsigned sub_job_size_kb;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
148*4882a593Smuzhiyun 						DEFAULT_SUB_JOB_SIZE_KB,
149*4882a593Smuzhiyun 						MAX_SUB_JOB_SIZE_KB);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return sub_job_size_kb << 1;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /*-----------------------------------------------------------------
155*4882a593Smuzhiyun  * Each kcopyd client has its own little pool of preallocated
156*4882a593Smuzhiyun  * pages for kcopyd io.
157*4882a593Smuzhiyun  *---------------------------------------------------------------*/
158*4882a593Smuzhiyun struct dm_kcopyd_client {
159*4882a593Smuzhiyun 	struct page_list *pages;
160*4882a593Smuzhiyun 	unsigned nr_reserved_pages;
161*4882a593Smuzhiyun 	unsigned nr_free_pages;
162*4882a593Smuzhiyun 	unsigned sub_job_size;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	struct dm_io_client *io_client;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	wait_queue_head_t destroyq;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	mempool_t job_pool;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	struct workqueue_struct *kcopyd_wq;
171*4882a593Smuzhiyun 	struct work_struct kcopyd_work;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	struct dm_kcopyd_throttle *throttle;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	atomic_t nr_jobs;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * We maintain four lists of jobs:
179*4882a593Smuzhiyun  *
180*4882a593Smuzhiyun  * i)   jobs waiting for pages
181*4882a593Smuzhiyun  * ii)  jobs that have pages, and are waiting for the io to be issued.
182*4882a593Smuzhiyun  * iii) jobs that don't need to do any IO and just run a callback
183*4882a593Smuzhiyun  * iv) jobs that have completed.
184*4882a593Smuzhiyun  *
185*4882a593Smuzhiyun  * All four of these are protected by job_lock.
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun 	spinlock_t job_lock;
188*4882a593Smuzhiyun 	struct list_head callback_jobs;
189*4882a593Smuzhiyun 	struct list_head complete_jobs;
190*4882a593Smuzhiyun 	struct list_head io_jobs;
191*4882a593Smuzhiyun 	struct list_head pages_jobs;
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun static struct page_list zero_page_list;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun static DEFINE_SPINLOCK(throttle_spinlock);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun  * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
200*4882a593Smuzhiyun  * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
201*4882a593Smuzhiyun  * by 2.
202*4882a593Smuzhiyun  */
203*4882a593Smuzhiyun #define ACCOUNT_INTERVAL_SHIFT		SHIFT_HZ
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * Sleep this number of milliseconds.
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * The value was decided experimentally.
209*4882a593Smuzhiyun  * Smaller values seem to cause an increased copy rate above the limit.
210*4882a593Smuzhiyun  * The reason for this is unknown but possibly due to jiffies rounding errors
211*4882a593Smuzhiyun  * or read/write cache inside the disk.
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun #define SLEEP_MSEC			100
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun  * Maximum number of sleep events. There is a theoretical livelock if more
217*4882a593Smuzhiyun  * kcopyd clients do work simultaneously which this limit avoids.
218*4882a593Smuzhiyun  */
219*4882a593Smuzhiyun #define MAX_SLEEPS			10
220*4882a593Smuzhiyun 
io_job_start(struct dm_kcopyd_throttle * t)221*4882a593Smuzhiyun static void io_job_start(struct dm_kcopyd_throttle *t)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	unsigned throttle, now, difference;
224*4882a593Smuzhiyun 	int slept = 0, skew;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (unlikely(!t))
227*4882a593Smuzhiyun 		return;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun try_again:
230*4882a593Smuzhiyun 	spin_lock_irq(&throttle_spinlock);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	throttle = READ_ONCE(t->throttle);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (likely(throttle >= 100))
235*4882a593Smuzhiyun 		goto skip_limit;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	now = jiffies;
238*4882a593Smuzhiyun 	difference = now - t->last_jiffies;
239*4882a593Smuzhiyun 	t->last_jiffies = now;
240*4882a593Smuzhiyun 	if (t->num_io_jobs)
241*4882a593Smuzhiyun 		t->io_period += difference;
242*4882a593Smuzhiyun 	t->total_period += difference;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/*
245*4882a593Smuzhiyun 	 * Maintain sane values if we got a temporary overflow.
246*4882a593Smuzhiyun 	 */
247*4882a593Smuzhiyun 	if (unlikely(t->io_period > t->total_period))
248*4882a593Smuzhiyun 		t->io_period = t->total_period;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
251*4882a593Smuzhiyun 		int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
252*4882a593Smuzhiyun 		t->total_period >>= shift;
253*4882a593Smuzhiyun 		t->io_period >>= shift;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	skew = t->io_period - throttle * t->total_period / 100;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
259*4882a593Smuzhiyun 		slept++;
260*4882a593Smuzhiyun 		spin_unlock_irq(&throttle_spinlock);
261*4882a593Smuzhiyun 		msleep(SLEEP_MSEC);
262*4882a593Smuzhiyun 		goto try_again;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun skip_limit:
266*4882a593Smuzhiyun 	t->num_io_jobs++;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	spin_unlock_irq(&throttle_spinlock);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
io_job_finish(struct dm_kcopyd_throttle * t)271*4882a593Smuzhiyun static void io_job_finish(struct dm_kcopyd_throttle *t)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	unsigned long flags;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (unlikely(!t))
276*4882a593Smuzhiyun 		return;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	spin_lock_irqsave(&throttle_spinlock, flags);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	t->num_io_jobs--;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (likely(READ_ONCE(t->throttle) >= 100))
283*4882a593Smuzhiyun 		goto skip_limit;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	if (!t->num_io_jobs) {
286*4882a593Smuzhiyun 		unsigned now, difference;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		now = jiffies;
289*4882a593Smuzhiyun 		difference = now - t->last_jiffies;
290*4882a593Smuzhiyun 		t->last_jiffies = now;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 		t->io_period += difference;
293*4882a593Smuzhiyun 		t->total_period += difference;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		/*
296*4882a593Smuzhiyun 		 * Maintain sane values if we got a temporary overflow.
297*4882a593Smuzhiyun 		 */
298*4882a593Smuzhiyun 		if (unlikely(t->io_period > t->total_period))
299*4882a593Smuzhiyun 			t->io_period = t->total_period;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun skip_limit:
303*4882a593Smuzhiyun 	spin_unlock_irqrestore(&throttle_spinlock, flags);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 
wake(struct dm_kcopyd_client * kc)307*4882a593Smuzhiyun static void wake(struct dm_kcopyd_client *kc)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  * Obtain one page for the use of kcopyd.
314*4882a593Smuzhiyun  */
alloc_pl(gfp_t gfp,unsigned long job_flags)315*4882a593Smuzhiyun static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct page_list *pl;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	pl = kmalloc(sizeof(*pl), gfp);
320*4882a593Smuzhiyun 	if (!pl)
321*4882a593Smuzhiyun 		return NULL;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
324*4882a593Smuzhiyun 		kcopyd_rsm_get_page(&pl->page);
325*4882a593Smuzhiyun 	} else {
326*4882a593Smuzhiyun 		pl->page = alloc_page(gfp);
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (!pl->page) {
330*4882a593Smuzhiyun 		kfree(pl);
331*4882a593Smuzhiyun 		return NULL;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	return pl;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
free_pl(struct page_list * pl)337*4882a593Smuzhiyun static void free_pl(struct page_list *pl)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct page *p = pl->page;
340*4882a593Smuzhiyun 	phys_addr_t pa = page_to_phys(p);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
343*4882a593Smuzhiyun 		kcopyd_rsm_drop_page(&pl->page);
344*4882a593Smuzhiyun 	else
345*4882a593Smuzhiyun 		__free_page(pl->page);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	kfree(pl);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun  * Add the provided pages to a client's free page list, releasing
352*4882a593Smuzhiyun  * back to the system any beyond the reserved_pages limit.
353*4882a593Smuzhiyun  */
kcopyd_put_pages(struct dm_kcopyd_client * kc,struct page_list * pl)354*4882a593Smuzhiyun static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct page_list *next;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	do {
359*4882a593Smuzhiyun 		next = pl->next;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		if (kc->nr_free_pages >= kc->nr_reserved_pages)
362*4882a593Smuzhiyun 			free_pl(pl);
363*4882a593Smuzhiyun 		else {
364*4882a593Smuzhiyun 			pl->next = kc->pages;
365*4882a593Smuzhiyun 			kc->pages = pl;
366*4882a593Smuzhiyun 			kc->nr_free_pages++;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		pl = next;
370*4882a593Smuzhiyun 	} while (pl);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
kcopyd_get_pages(struct dm_kcopyd_client * kc,unsigned int nr,struct page_list ** pages,unsigned long job_flags)373*4882a593Smuzhiyun static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
374*4882a593Smuzhiyun 			    unsigned int nr, struct page_list **pages,
375*4882a593Smuzhiyun 			    unsigned long job_flags)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct page_list *pl;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	*pages = NULL;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	do {
382*4882a593Smuzhiyun 		pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
383*4882a593Smuzhiyun 		if (unlikely(!pl)) {
384*4882a593Smuzhiyun 			/* Use reserved pages */
385*4882a593Smuzhiyun 			pl = kc->pages;
386*4882a593Smuzhiyun 			if (unlikely(!pl))
387*4882a593Smuzhiyun 				goto out_of_memory;
388*4882a593Smuzhiyun 			kc->pages = pl->next;
389*4882a593Smuzhiyun 			kc->nr_free_pages--;
390*4882a593Smuzhiyun 		}
391*4882a593Smuzhiyun 		pl->next = *pages;
392*4882a593Smuzhiyun 		*pages = pl;
393*4882a593Smuzhiyun 	} while (--nr);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return 0;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun out_of_memory:
398*4882a593Smuzhiyun 	if (*pages)
399*4882a593Smuzhiyun 		kcopyd_put_pages(kc, *pages);
400*4882a593Smuzhiyun 	return -ENOMEM;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun  * These three functions resize the page pool.
405*4882a593Smuzhiyun  */
drop_pages(struct page_list * pl)406*4882a593Smuzhiyun static void drop_pages(struct page_list *pl)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct page_list *next;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	while (pl) {
411*4882a593Smuzhiyun 		next = pl->next;
412*4882a593Smuzhiyun 		free_pl(pl);
413*4882a593Smuzhiyun 		pl = next;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun  * Allocate and reserve nr_pages for the use of a specific client.
419*4882a593Smuzhiyun  */
client_reserve_pages(struct dm_kcopyd_client * kc,unsigned nr_pages)420*4882a593Smuzhiyun static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	unsigned i;
423*4882a593Smuzhiyun 	struct page_list *pl = NULL, *next;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	for (i = 0; i < nr_pages; i++) {
426*4882a593Smuzhiyun 		next = alloc_pl(GFP_KERNEL, 0);
427*4882a593Smuzhiyun 		if (!next) {
428*4882a593Smuzhiyun 			if (pl)
429*4882a593Smuzhiyun 				drop_pages(pl);
430*4882a593Smuzhiyun 			return -ENOMEM;
431*4882a593Smuzhiyun 		}
432*4882a593Smuzhiyun 		next->next = pl;
433*4882a593Smuzhiyun 		pl = next;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	kc->nr_reserved_pages += nr_pages;
437*4882a593Smuzhiyun 	kcopyd_put_pages(kc, pl);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
client_free_pages(struct dm_kcopyd_client * kc)442*4882a593Smuzhiyun static void client_free_pages(struct dm_kcopyd_client *kc)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
445*4882a593Smuzhiyun 	drop_pages(kc->pages);
446*4882a593Smuzhiyun 	kc->pages = NULL;
447*4882a593Smuzhiyun 	kc->nr_free_pages = kc->nr_reserved_pages = 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /*-----------------------------------------------------------------
451*4882a593Smuzhiyun  * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
452*4882a593Smuzhiyun  * for this reason we use a mempool to prevent the client from
453*4882a593Smuzhiyun  * ever having to do io (which could cause a deadlock).
454*4882a593Smuzhiyun  *---------------------------------------------------------------*/
455*4882a593Smuzhiyun struct kcopyd_job {
456*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc;
457*4882a593Smuzhiyun 	struct list_head list;
458*4882a593Smuzhiyun 	unsigned long flags;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/*
461*4882a593Smuzhiyun 	 * Error state of the job.
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	int read_err;
464*4882a593Smuzhiyun 	unsigned long write_err;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/*
467*4882a593Smuzhiyun 	 * Either READ or WRITE
468*4882a593Smuzhiyun 	 */
469*4882a593Smuzhiyun 	int rw;
470*4882a593Smuzhiyun 	struct dm_io_region source;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/*
473*4882a593Smuzhiyun 	 * The destinations for the transfer.
474*4882a593Smuzhiyun 	 */
475*4882a593Smuzhiyun 	unsigned int num_dests;
476*4882a593Smuzhiyun 	struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	struct page_list *pages;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/*
481*4882a593Smuzhiyun 	 * Set this to ensure you are notified when the job has
482*4882a593Smuzhiyun 	 * completed.  'context' is for callback to use.
483*4882a593Smuzhiyun 	 */
484*4882a593Smuzhiyun 	dm_kcopyd_notify_fn fn;
485*4882a593Smuzhiyun 	void *context;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/*
488*4882a593Smuzhiyun 	 * These fields are only used if the job has been split
489*4882a593Smuzhiyun 	 * into more manageable parts.
490*4882a593Smuzhiyun 	 */
491*4882a593Smuzhiyun 	struct mutex lock;
492*4882a593Smuzhiyun 	atomic_t sub_jobs;
493*4882a593Smuzhiyun 	sector_t progress;
494*4882a593Smuzhiyun 	sector_t write_offset;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	struct kcopyd_job *master_job;
497*4882a593Smuzhiyun };
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun static struct kmem_cache *_job_cache;
500*4882a593Smuzhiyun 
dm_kcopyd_init(void)501*4882a593Smuzhiyun int __init dm_kcopyd_init(void)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	_job_cache = kmem_cache_create("kcopyd_job",
504*4882a593Smuzhiyun 				sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
505*4882a593Smuzhiyun 				__alignof__(struct kcopyd_job), 0, NULL);
506*4882a593Smuzhiyun 	if (!_job_cache)
507*4882a593Smuzhiyun 		return -ENOMEM;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	zero_page_list.next = &zero_page_list;
510*4882a593Smuzhiyun 	zero_page_list.page = ZERO_PAGE(0);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	kcopyd_rsm_init();
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
dm_kcopyd_exit(void)517*4882a593Smuzhiyun void dm_kcopyd_exit(void)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	kmem_cache_destroy(_job_cache);
520*4882a593Smuzhiyun 	_job_cache = NULL;
521*4882a593Smuzhiyun 	kcopyd_rsm_destroy();
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun  * Functions to push and pop a job onto the head of a given job
526*4882a593Smuzhiyun  * list.
527*4882a593Smuzhiyun  */
pop_io_job(struct list_head * jobs,struct dm_kcopyd_client * kc)528*4882a593Smuzhiyun static struct kcopyd_job *pop_io_job(struct list_head *jobs,
529*4882a593Smuzhiyun 				     struct dm_kcopyd_client *kc)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct kcopyd_job *job;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/*
534*4882a593Smuzhiyun 	 * For I/O jobs, pop any read, any write without sequential write
535*4882a593Smuzhiyun 	 * constraint and sequential writes that are at the right position.
536*4882a593Smuzhiyun 	 */
537*4882a593Smuzhiyun 	list_for_each_entry(job, jobs, list) {
538*4882a593Smuzhiyun 		if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
539*4882a593Smuzhiyun 			list_del(&job->list);
540*4882a593Smuzhiyun 			return job;
541*4882a593Smuzhiyun 		}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		if (job->write_offset == job->master_job->write_offset) {
544*4882a593Smuzhiyun 			job->master_job->write_offset += job->source.count;
545*4882a593Smuzhiyun 			list_del(&job->list);
546*4882a593Smuzhiyun 			return job;
547*4882a593Smuzhiyun 		}
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return NULL;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
pop(struct list_head * jobs,struct dm_kcopyd_client * kc)553*4882a593Smuzhiyun static struct kcopyd_job *pop(struct list_head *jobs,
554*4882a593Smuzhiyun 			      struct dm_kcopyd_client *kc)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	struct kcopyd_job *job = NULL;
557*4882a593Smuzhiyun 	unsigned long flags;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	spin_lock_irqsave(&kc->job_lock, flags);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (!list_empty(jobs)) {
562*4882a593Smuzhiyun 		if (jobs == &kc->io_jobs)
563*4882a593Smuzhiyun 			job = pop_io_job(jobs, kc);
564*4882a593Smuzhiyun 		else {
565*4882a593Smuzhiyun 			job = list_entry(jobs->next, struct kcopyd_job, list);
566*4882a593Smuzhiyun 			list_del(&job->list);
567*4882a593Smuzhiyun 		}
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kc->job_lock, flags);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return job;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
push(struct list_head * jobs,struct kcopyd_job * job)574*4882a593Smuzhiyun static void push(struct list_head *jobs, struct kcopyd_job *job)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	unsigned long flags;
577*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	spin_lock_irqsave(&kc->job_lock, flags);
580*4882a593Smuzhiyun 	list_add_tail(&job->list, jobs);
581*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kc->job_lock, flags);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 
push_head(struct list_head * jobs,struct kcopyd_job * job)585*4882a593Smuzhiyun static void push_head(struct list_head *jobs, struct kcopyd_job *job)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	unsigned long flags;
588*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	spin_lock_irqsave(&kc->job_lock, flags);
591*4882a593Smuzhiyun 	list_add(&job->list, jobs);
592*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kc->job_lock, flags);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun  * These three functions process 1 item from the corresponding
597*4882a593Smuzhiyun  * job list.
598*4882a593Smuzhiyun  *
599*4882a593Smuzhiyun  * They return:
600*4882a593Smuzhiyun  * < 0: error
601*4882a593Smuzhiyun  *   0: success
602*4882a593Smuzhiyun  * > 0: can't process yet.
603*4882a593Smuzhiyun  */
run_complete_job(struct kcopyd_job * job)604*4882a593Smuzhiyun static int run_complete_job(struct kcopyd_job *job)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	void *context = job->context;
607*4882a593Smuzhiyun 	int read_err = job->read_err;
608*4882a593Smuzhiyun 	unsigned long write_err = job->write_err;
609*4882a593Smuzhiyun 	dm_kcopyd_notify_fn fn = job->fn;
610*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (job->pages && job->pages != &zero_page_list)
613*4882a593Smuzhiyun 		kcopyd_put_pages(kc, job->pages);
614*4882a593Smuzhiyun 	/*
615*4882a593Smuzhiyun 	 * If this is the master job, the sub jobs have already
616*4882a593Smuzhiyun 	 * completed so we can free everything.
617*4882a593Smuzhiyun 	 */
618*4882a593Smuzhiyun 	if (job->master_job == job) {
619*4882a593Smuzhiyun 		mutex_destroy(&job->lock);
620*4882a593Smuzhiyun 		mempool_free(job, &kc->job_pool);
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 	fn(read_err, write_err, context);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (atomic_dec_and_test(&kc->nr_jobs))
625*4882a593Smuzhiyun 		wake_up(&kc->destroyq);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	cond_resched();
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	return 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
complete_io(unsigned long error,void * context)632*4882a593Smuzhiyun static void complete_io(unsigned long error, void *context)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct kcopyd_job *job = (struct kcopyd_job *) context;
635*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	io_job_finish(kc->throttle);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (error) {
640*4882a593Smuzhiyun 		if (op_is_write(job->rw))
641*4882a593Smuzhiyun 			job->write_err |= error;
642*4882a593Smuzhiyun 		else
643*4882a593Smuzhiyun 			job->read_err = 1;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
646*4882a593Smuzhiyun 			push(&kc->complete_jobs, job);
647*4882a593Smuzhiyun 			wake(kc);
648*4882a593Smuzhiyun 			return;
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	if (op_is_write(job->rw))
653*4882a593Smuzhiyun 		push(&kc->complete_jobs, job);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	else {
656*4882a593Smuzhiyun 		job->rw = WRITE;
657*4882a593Smuzhiyun 		push(&kc->io_jobs, job);
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	wake(kc);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun  * Request io on as many buffer heads as we can currently get for
665*4882a593Smuzhiyun  * a particular job.
666*4882a593Smuzhiyun  */
run_io_job(struct kcopyd_job * job)667*4882a593Smuzhiyun static int run_io_job(struct kcopyd_job *job)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	int r;
670*4882a593Smuzhiyun 	struct dm_io_request io_req = {
671*4882a593Smuzhiyun 		.bi_op = job->rw,
672*4882a593Smuzhiyun 		.bi_op_flags = 0,
673*4882a593Smuzhiyun 		.mem.type = DM_IO_PAGE_LIST,
674*4882a593Smuzhiyun 		.mem.ptr.pl = job->pages,
675*4882a593Smuzhiyun 		.mem.offset = 0,
676*4882a593Smuzhiyun 		.notify.fn = complete_io,
677*4882a593Smuzhiyun 		.notify.context = job,
678*4882a593Smuzhiyun 		.client = job->kc->io_client,
679*4882a593Smuzhiyun 	};
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/*
682*4882a593Smuzhiyun 	 * If we need to write sequentially and some reads or writes failed,
683*4882a593Smuzhiyun 	 * no point in continuing.
684*4882a593Smuzhiyun 	 */
685*4882a593Smuzhiyun 	if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
686*4882a593Smuzhiyun 	    job->master_job->write_err) {
687*4882a593Smuzhiyun 		job->write_err = job->master_job->write_err;
688*4882a593Smuzhiyun 		return -EIO;
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	io_job_start(job->kc->throttle);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (job->rw == READ)
694*4882a593Smuzhiyun 		r = dm_io(&io_req, 1, &job->source, NULL);
695*4882a593Smuzhiyun 	else
696*4882a593Smuzhiyun 		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	return r;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
run_pages_job(struct kcopyd_job * job)701*4882a593Smuzhiyun static int run_pages_job(struct kcopyd_job *job)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	int r;
704*4882a593Smuzhiyun 	unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
707*4882a593Smuzhiyun 	if (!r) {
708*4882a593Smuzhiyun 		/* this job is ready for io */
709*4882a593Smuzhiyun 		push(&job->kc->io_jobs, job);
710*4882a593Smuzhiyun 		return 0;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	if (r == -ENOMEM)
714*4882a593Smuzhiyun 		/* can't complete now */
715*4882a593Smuzhiyun 		return 1;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	return r;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun  * Run through a list for as long as possible.  Returns the count
722*4882a593Smuzhiyun  * of successful jobs.
723*4882a593Smuzhiyun  */
process_jobs(struct list_head * jobs,struct dm_kcopyd_client * kc,int (* fn)(struct kcopyd_job *))724*4882a593Smuzhiyun static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
725*4882a593Smuzhiyun 			int (*fn) (struct kcopyd_job *))
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	struct kcopyd_job *job;
728*4882a593Smuzhiyun 	int r, count = 0;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	while ((job = pop(jobs, kc))) {
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 		r = fn(job);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 		if (r < 0) {
735*4882a593Smuzhiyun 			/* error this rogue job */
736*4882a593Smuzhiyun 			if (op_is_write(job->rw))
737*4882a593Smuzhiyun 				job->write_err = (unsigned long) -1L;
738*4882a593Smuzhiyun 			else
739*4882a593Smuzhiyun 				job->read_err = 1;
740*4882a593Smuzhiyun 			push(&kc->complete_jobs, job);
741*4882a593Smuzhiyun 			wake(kc);
742*4882a593Smuzhiyun 			break;
743*4882a593Smuzhiyun 		}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 		if (r > 0) {
746*4882a593Smuzhiyun 			/*
747*4882a593Smuzhiyun 			 * We couldn't service this job ATM, so
748*4882a593Smuzhiyun 			 * push this job back onto the list.
749*4882a593Smuzhiyun 			 */
750*4882a593Smuzhiyun 			push_head(jobs, job);
751*4882a593Smuzhiyun 			break;
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		count++;
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	return count;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun  * kcopyd does this every time it's woken up.
762*4882a593Smuzhiyun  */
do_work(struct work_struct * work)763*4882a593Smuzhiyun static void do_work(struct work_struct *work)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = container_of(work,
766*4882a593Smuzhiyun 					struct dm_kcopyd_client, kcopyd_work);
767*4882a593Smuzhiyun 	struct blk_plug plug;
768*4882a593Smuzhiyun 	unsigned long flags;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	/*
771*4882a593Smuzhiyun 	 * The order that these are called is *very* important.
772*4882a593Smuzhiyun 	 * complete jobs can free some pages for pages jobs.
773*4882a593Smuzhiyun 	 * Pages jobs when successful will jump onto the io jobs
774*4882a593Smuzhiyun 	 * list.  io jobs call wake when they complete and it all
775*4882a593Smuzhiyun 	 * starts again.
776*4882a593Smuzhiyun 	 */
777*4882a593Smuzhiyun 	spin_lock_irqsave(&kc->job_lock, flags);
778*4882a593Smuzhiyun 	list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
779*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kc->job_lock, flags);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	blk_start_plug(&plug);
782*4882a593Smuzhiyun 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
783*4882a593Smuzhiyun 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
784*4882a593Smuzhiyun 	process_jobs(&kc->io_jobs, kc, run_io_job);
785*4882a593Smuzhiyun 	blk_finish_plug(&plug);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun /*
789*4882a593Smuzhiyun  * If we are copying a small region we just dispatch a single job
790*4882a593Smuzhiyun  * to do the copy, otherwise the io has to be split up into many
791*4882a593Smuzhiyun  * jobs.
792*4882a593Smuzhiyun  */
dispatch_job(struct kcopyd_job * job)793*4882a593Smuzhiyun static void dispatch_job(struct kcopyd_job *job)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
796*4882a593Smuzhiyun 	atomic_inc(&kc->nr_jobs);
797*4882a593Smuzhiyun 	if (unlikely(!job->source.count))
798*4882a593Smuzhiyun 		push(&kc->callback_jobs, job);
799*4882a593Smuzhiyun 	else if (job->pages == &zero_page_list)
800*4882a593Smuzhiyun 		push(&kc->io_jobs, job);
801*4882a593Smuzhiyun 	else
802*4882a593Smuzhiyun 		push(&kc->pages_jobs, job);
803*4882a593Smuzhiyun 	wake(kc);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
segment_complete(int read_err,unsigned long write_err,void * context)806*4882a593Smuzhiyun static void segment_complete(int read_err, unsigned long write_err,
807*4882a593Smuzhiyun 			     void *context)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	/* FIXME: tidy this function */
810*4882a593Smuzhiyun 	sector_t progress = 0;
811*4882a593Smuzhiyun 	sector_t count = 0;
812*4882a593Smuzhiyun 	struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
813*4882a593Smuzhiyun 	struct kcopyd_job *job = sub_job->master_job;
814*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	mutex_lock(&job->lock);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* update the error */
819*4882a593Smuzhiyun 	if (read_err)
820*4882a593Smuzhiyun 		job->read_err = 1;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	if (write_err)
823*4882a593Smuzhiyun 		job->write_err |= write_err;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	/*
826*4882a593Smuzhiyun 	 * Only dispatch more work if there hasn't been an error.
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	if ((!job->read_err && !job->write_err) ||
829*4882a593Smuzhiyun 	    test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
830*4882a593Smuzhiyun 		/* get the next chunk of work */
831*4882a593Smuzhiyun 		progress = job->progress;
832*4882a593Smuzhiyun 		count = job->source.count - progress;
833*4882a593Smuzhiyun 		if (count) {
834*4882a593Smuzhiyun 			if (count > kc->sub_job_size)
835*4882a593Smuzhiyun 				count = kc->sub_job_size;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 			job->progress += count;
838*4882a593Smuzhiyun 		}
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun 	mutex_unlock(&job->lock);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (count) {
843*4882a593Smuzhiyun 		int i;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 		*sub_job = *job;
846*4882a593Smuzhiyun 		sub_job->write_offset = progress;
847*4882a593Smuzhiyun 		sub_job->source.sector += progress;
848*4882a593Smuzhiyun 		sub_job->source.count = count;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 		for (i = 0; i < job->num_dests; i++) {
851*4882a593Smuzhiyun 			sub_job->dests[i].sector += progress;
852*4882a593Smuzhiyun 			sub_job->dests[i].count = count;
853*4882a593Smuzhiyun 		}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 		sub_job->fn = segment_complete;
856*4882a593Smuzhiyun 		sub_job->context = sub_job;
857*4882a593Smuzhiyun 		dispatch_job(sub_job);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	} else if (atomic_dec_and_test(&job->sub_jobs)) {
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 		/*
862*4882a593Smuzhiyun 		 * Queue the completion callback to the kcopyd thread.
863*4882a593Smuzhiyun 		 *
864*4882a593Smuzhiyun 		 * Some callers assume that all the completions are called
865*4882a593Smuzhiyun 		 * from a single thread and don't race with each other.
866*4882a593Smuzhiyun 		 *
867*4882a593Smuzhiyun 		 * We must not call the callback directly here because this
868*4882a593Smuzhiyun 		 * code may not be executing in the thread.
869*4882a593Smuzhiyun 		 */
870*4882a593Smuzhiyun 		push(&kc->complete_jobs, job);
871*4882a593Smuzhiyun 		wake(kc);
872*4882a593Smuzhiyun 	}
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun /*
876*4882a593Smuzhiyun  * Create some sub jobs to share the work between them.
877*4882a593Smuzhiyun  */
split_job(struct kcopyd_job * master_job)878*4882a593Smuzhiyun static void split_job(struct kcopyd_job *master_job)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun 	int i;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	atomic_inc(&master_job->kc->nr_jobs);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
885*4882a593Smuzhiyun 	for (i = 0; i < SPLIT_COUNT; i++) {
886*4882a593Smuzhiyun 		master_job[i + 1].master_job = master_job;
887*4882a593Smuzhiyun 		segment_complete(0, 0u, &master_job[i + 1]);
888*4882a593Smuzhiyun 	}
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
dm_kcopyd_copy(struct dm_kcopyd_client * kc,struct dm_io_region * from,unsigned int num_dests,struct dm_io_region * dests,unsigned int flags,dm_kcopyd_notify_fn fn,void * context)891*4882a593Smuzhiyun void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
892*4882a593Smuzhiyun 		    unsigned int num_dests, struct dm_io_region *dests,
893*4882a593Smuzhiyun 		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	struct kcopyd_job *job;
896*4882a593Smuzhiyun 	int i;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/*
899*4882a593Smuzhiyun 	 * Allocate an array of jobs consisting of one master job
900*4882a593Smuzhiyun 	 * followed by SPLIT_COUNT sub jobs.
901*4882a593Smuzhiyun 	 */
902*4882a593Smuzhiyun 	job = mempool_alloc(&kc->job_pool, GFP_NOIO);
903*4882a593Smuzhiyun 	mutex_init(&job->lock);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/*
906*4882a593Smuzhiyun 	 * set up for the read.
907*4882a593Smuzhiyun 	 */
908*4882a593Smuzhiyun 	job->kc = kc;
909*4882a593Smuzhiyun 	job->flags = flags;
910*4882a593Smuzhiyun 	job->read_err = 0;
911*4882a593Smuzhiyun 	job->write_err = 0;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	job->num_dests = num_dests;
914*4882a593Smuzhiyun 	memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/*
917*4882a593Smuzhiyun 	 * If one of the destination is a host-managed zoned block device,
918*4882a593Smuzhiyun 	 * we need to write sequentially. If one of the destination is a
919*4882a593Smuzhiyun 	 * host-aware device, then leave it to the caller to choose what to do.
920*4882a593Smuzhiyun 	 */
921*4882a593Smuzhiyun 	if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
922*4882a593Smuzhiyun 		for (i = 0; i < job->num_dests; i++) {
923*4882a593Smuzhiyun 			if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
924*4882a593Smuzhiyun 				set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
925*4882a593Smuzhiyun 				break;
926*4882a593Smuzhiyun 			}
927*4882a593Smuzhiyun 		}
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	/*
931*4882a593Smuzhiyun 	 * If we need to write sequentially, errors cannot be ignored.
932*4882a593Smuzhiyun 	 */
933*4882a593Smuzhiyun 	if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
934*4882a593Smuzhiyun 	    test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
935*4882a593Smuzhiyun 		clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (from) {
938*4882a593Smuzhiyun 		job->source = *from;
939*4882a593Smuzhiyun 		job->pages = NULL;
940*4882a593Smuzhiyun 		job->rw = READ;
941*4882a593Smuzhiyun 	} else {
942*4882a593Smuzhiyun 		memset(&job->source, 0, sizeof job->source);
943*4882a593Smuzhiyun 		job->source.count = job->dests[0].count;
944*4882a593Smuzhiyun 		job->pages = &zero_page_list;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 		/*
947*4882a593Smuzhiyun 		 * Use WRITE ZEROES to optimize zeroing if all dests support it.
948*4882a593Smuzhiyun 		 */
949*4882a593Smuzhiyun 		job->rw = REQ_OP_WRITE_ZEROES;
950*4882a593Smuzhiyun 		for (i = 0; i < job->num_dests; i++)
951*4882a593Smuzhiyun 			if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
952*4882a593Smuzhiyun 				job->rw = WRITE;
953*4882a593Smuzhiyun 				break;
954*4882a593Smuzhiyun 			}
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	job->fn = fn;
958*4882a593Smuzhiyun 	job->context = context;
959*4882a593Smuzhiyun 	job->master_job = job;
960*4882a593Smuzhiyun 	job->write_offset = 0;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	if (job->source.count <= kc->sub_job_size)
963*4882a593Smuzhiyun 		dispatch_job(job);
964*4882a593Smuzhiyun 	else {
965*4882a593Smuzhiyun 		job->progress = 0;
966*4882a593Smuzhiyun 		split_job(job);
967*4882a593Smuzhiyun 	}
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun EXPORT_SYMBOL(dm_kcopyd_copy);
970*4882a593Smuzhiyun 
dm_kcopyd_zero(struct dm_kcopyd_client * kc,unsigned num_dests,struct dm_io_region * dests,unsigned flags,dm_kcopyd_notify_fn fn,void * context)971*4882a593Smuzhiyun void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
972*4882a593Smuzhiyun 		    unsigned num_dests, struct dm_io_region *dests,
973*4882a593Smuzhiyun 		    unsigned flags, dm_kcopyd_notify_fn fn, void *context)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun 	dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun EXPORT_SYMBOL(dm_kcopyd_zero);
978*4882a593Smuzhiyun 
dm_kcopyd_prepare_callback(struct dm_kcopyd_client * kc,dm_kcopyd_notify_fn fn,void * context)979*4882a593Smuzhiyun void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
980*4882a593Smuzhiyun 				 dm_kcopyd_notify_fn fn, void *context)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun 	struct kcopyd_job *job;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	job = mempool_alloc(&kc->job_pool, GFP_NOIO);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	memset(job, 0, sizeof(struct kcopyd_job));
987*4882a593Smuzhiyun 	job->kc = kc;
988*4882a593Smuzhiyun 	job->fn = fn;
989*4882a593Smuzhiyun 	job->context = context;
990*4882a593Smuzhiyun 	job->master_job = job;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	atomic_inc(&kc->nr_jobs);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	return job;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
997*4882a593Smuzhiyun 
dm_kcopyd_do_callback(void * j,int read_err,unsigned long write_err)998*4882a593Smuzhiyun void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun 	struct kcopyd_job *job = j;
1001*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc = job->kc;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	job->read_err = read_err;
1004*4882a593Smuzhiyun 	job->write_err = write_err;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	push(&kc->callback_jobs, job);
1007*4882a593Smuzhiyun 	wake(kc);
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun EXPORT_SYMBOL(dm_kcopyd_do_callback);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun  * Cancels a kcopyd job, eg. someone might be deactivating a
1013*4882a593Smuzhiyun  * mirror.
1014*4882a593Smuzhiyun  */
1015*4882a593Smuzhiyun #if 0
1016*4882a593Smuzhiyun int kcopyd_cancel(struct kcopyd_job *job, int block)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun 	/* FIXME: finish */
1019*4882a593Smuzhiyun 	return -1;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun #endif  /*  0  */
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun /*-----------------------------------------------------------------
1024*4882a593Smuzhiyun  * Client setup
1025*4882a593Smuzhiyun  *---------------------------------------------------------------*/
dm_kcopyd_client_create(struct dm_kcopyd_throttle * throttle)1026*4882a593Smuzhiyun struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	int r;
1029*4882a593Smuzhiyun 	unsigned reserve_pages;
1030*4882a593Smuzhiyun 	struct dm_kcopyd_client *kc;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	kc = kzalloc(sizeof(*kc), GFP_KERNEL);
1033*4882a593Smuzhiyun 	if (!kc)
1034*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	spin_lock_init(&kc->job_lock);
1037*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kc->callback_jobs);
1038*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kc->complete_jobs);
1039*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kc->io_jobs);
1040*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kc->pages_jobs);
1041*4882a593Smuzhiyun 	kc->throttle = throttle;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
1044*4882a593Smuzhiyun 	if (r)
1045*4882a593Smuzhiyun 		goto bad_slab;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	INIT_WORK(&kc->kcopyd_work, do_work);
1048*4882a593Smuzhiyun 	kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
1049*4882a593Smuzhiyun 	if (!kc->kcopyd_wq) {
1050*4882a593Smuzhiyun 		r = -ENOMEM;
1051*4882a593Smuzhiyun 		goto bad_workqueue;
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	kc->sub_job_size = dm_get_kcopyd_subjob_size();
1055*4882a593Smuzhiyun 	reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	kc->pages = NULL;
1058*4882a593Smuzhiyun 	kc->nr_reserved_pages = kc->nr_free_pages = 0;
1059*4882a593Smuzhiyun 	r = client_reserve_pages(kc, reserve_pages);
1060*4882a593Smuzhiyun 	if (r)
1061*4882a593Smuzhiyun 		goto bad_client_pages;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	kc->io_client = dm_io_client_create();
1064*4882a593Smuzhiyun 	if (IS_ERR(kc->io_client)) {
1065*4882a593Smuzhiyun 		r = PTR_ERR(kc->io_client);
1066*4882a593Smuzhiyun 		goto bad_io_client;
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	init_waitqueue_head(&kc->destroyq);
1070*4882a593Smuzhiyun 	atomic_set(&kc->nr_jobs, 0);
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	return kc;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun bad_io_client:
1075*4882a593Smuzhiyun 	client_free_pages(kc);
1076*4882a593Smuzhiyun bad_client_pages:
1077*4882a593Smuzhiyun 	destroy_workqueue(kc->kcopyd_wq);
1078*4882a593Smuzhiyun bad_workqueue:
1079*4882a593Smuzhiyun 	mempool_exit(&kc->job_pool);
1080*4882a593Smuzhiyun bad_slab:
1081*4882a593Smuzhiyun 	kfree(kc);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	return ERR_PTR(r);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun EXPORT_SYMBOL(dm_kcopyd_client_create);
1086*4882a593Smuzhiyun 
dm_kcopyd_client_destroy(struct dm_kcopyd_client * kc)1087*4882a593Smuzhiyun void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun 	/* Wait for completion of all jobs submitted by this client. */
1090*4882a593Smuzhiyun 	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	BUG_ON(!list_empty(&kc->callback_jobs));
1093*4882a593Smuzhiyun 	BUG_ON(!list_empty(&kc->complete_jobs));
1094*4882a593Smuzhiyun 	BUG_ON(!list_empty(&kc->io_jobs));
1095*4882a593Smuzhiyun 	BUG_ON(!list_empty(&kc->pages_jobs));
1096*4882a593Smuzhiyun 	destroy_workqueue(kc->kcopyd_wq);
1097*4882a593Smuzhiyun 	dm_io_client_destroy(kc->io_client);
1098*4882a593Smuzhiyun 	client_free_pages(kc);
1099*4882a593Smuzhiyun 	mempool_exit(&kc->job_pool);
1100*4882a593Smuzhiyun 	kfree(kc);
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun EXPORT_SYMBOL(dm_kcopyd_client_destroy);
1103