xref: /OK3568_Linux_fs/kernel/drivers/md/dm-region-hash.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2003 Sistina Software Limited.
3*4882a593Smuzhiyun  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is released under the GPL.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/dm-dirty-log.h>
9*4882a593Smuzhiyun #include <linux/dm-region-hash.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/ctype.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/vmalloc.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "dm.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define	DM_MSG_PREFIX	"region hash"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /*-----------------------------------------------------------------
22*4882a593Smuzhiyun  * Region hash
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * The mirror splits itself up into discrete regions.  Each
25*4882a593Smuzhiyun  * region can be in one of three states: clean, dirty,
26*4882a593Smuzhiyun  * nosync.  There is no need to put clean regions in the hash.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * In addition to being present in the hash table a region _may_
29*4882a593Smuzhiyun  * be present on one of three lists.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  *   clean_regions: Regions on this list have no io pending to
32*4882a593Smuzhiyun  *   them, they are in sync, we are no longer interested in them,
33*4882a593Smuzhiyun  *   they are dull.  dm_rh_update_states() will remove them from the
34*4882a593Smuzhiyun  *   hash table.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  *   quiesced_regions: These regions have been spun down, ready
37*4882a593Smuzhiyun  *   for recovery.  rh_recovery_start() will remove regions from
38*4882a593Smuzhiyun  *   this list and hand them to kmirrord, which will schedule the
39*4882a593Smuzhiyun  *   recovery io with kcopyd.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  *   recovered_regions: Regions that kcopyd has successfully
42*4882a593Smuzhiyun  *   recovered.  dm_rh_update_states() will now schedule any delayed
43*4882a593Smuzhiyun  *   io, up the recovery_count, and remove the region from the
44*4882a593Smuzhiyun  *   hash.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * There are 2 locks:
47*4882a593Smuzhiyun  *   A rw spin lock 'hash_lock' protects just the hash table,
48*4882a593Smuzhiyun  *   this is never held in write mode from interrupt context,
49*4882a593Smuzhiyun  *   which I believe means that we only have to disable irqs when
50*4882a593Smuzhiyun  *   doing a write lock.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  *   An ordinary spin lock 'region_lock' that protects the three
53*4882a593Smuzhiyun  *   lists in the region_hash, with the 'state', 'list' and
54*4882a593Smuzhiyun  *   'delayed_bios' fields of the regions.  This is used from irq
55*4882a593Smuzhiyun  *   context, so all other uses will have to suspend local irqs.
56*4882a593Smuzhiyun  *---------------------------------------------------------------*/
57*4882a593Smuzhiyun struct dm_region_hash {
58*4882a593Smuzhiyun 	uint32_t region_size;
59*4882a593Smuzhiyun 	unsigned region_shift;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* holds persistent region state */
62*4882a593Smuzhiyun 	struct dm_dirty_log *log;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* hash table */
65*4882a593Smuzhiyun 	rwlock_t hash_lock;
66*4882a593Smuzhiyun 	unsigned mask;
67*4882a593Smuzhiyun 	unsigned nr_buckets;
68*4882a593Smuzhiyun 	unsigned prime;
69*4882a593Smuzhiyun 	unsigned shift;
70*4882a593Smuzhiyun 	struct list_head *buckets;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/*
73*4882a593Smuzhiyun 	 * If there was a flush failure no regions can be marked clean.
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 	int flush_failure;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	unsigned max_recovery; /* Max # of regions to recover in parallel */
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	spinlock_t region_lock;
80*4882a593Smuzhiyun 	atomic_t recovery_in_flight;
81*4882a593Smuzhiyun 	struct list_head clean_regions;
82*4882a593Smuzhiyun 	struct list_head quiesced_regions;
83*4882a593Smuzhiyun 	struct list_head recovered_regions;
84*4882a593Smuzhiyun 	struct list_head failed_recovered_regions;
85*4882a593Smuzhiyun 	struct semaphore recovery_count;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	mempool_t region_pool;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	void *context;
90*4882a593Smuzhiyun 	sector_t target_begin;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* Callback function to schedule bios writes */
93*4882a593Smuzhiyun 	void (*dispatch_bios)(void *context, struct bio_list *bios);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* Callback function to wakeup callers worker thread. */
96*4882a593Smuzhiyun 	void (*wakeup_workers)(void *context);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/* Callback function to wakeup callers recovery waiters. */
99*4882a593Smuzhiyun 	void (*wakeup_all_recovery_waiters)(void *context);
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct dm_region {
103*4882a593Smuzhiyun 	struct dm_region_hash *rh;	/* FIXME: can we get rid of this ? */
104*4882a593Smuzhiyun 	region_t key;
105*4882a593Smuzhiyun 	int state;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	struct list_head hash_list;
108*4882a593Smuzhiyun 	struct list_head list;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	atomic_t pending;
111*4882a593Smuzhiyun 	struct bio_list delayed_bios;
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * Conversion fns
116*4882a593Smuzhiyun  */
dm_rh_sector_to_region(struct dm_region_hash * rh,sector_t sector)117*4882a593Smuzhiyun static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	return sector >> rh->region_shift;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
dm_rh_region_to_sector(struct dm_region_hash * rh,region_t region)122*4882a593Smuzhiyun sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	return region << rh->region_shift;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
127*4882a593Smuzhiyun 
dm_rh_bio_to_region(struct dm_region_hash * rh,struct bio * bio)128*4882a593Smuzhiyun region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
131*4882a593Smuzhiyun 				      rh->target_begin);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
134*4882a593Smuzhiyun 
dm_rh_region_context(struct dm_region * reg)135*4882a593Smuzhiyun void *dm_rh_region_context(struct dm_region *reg)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	return reg->rh->context;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_region_context);
140*4882a593Smuzhiyun 
dm_rh_get_region_key(struct dm_region * reg)141*4882a593Smuzhiyun region_t dm_rh_get_region_key(struct dm_region *reg)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	return reg->key;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
146*4882a593Smuzhiyun 
dm_rh_get_region_size(struct dm_region_hash * rh)147*4882a593Smuzhiyun sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	return rh->region_size;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun  * FIXME: shall we pass in a structure instead of all these args to
155*4882a593Smuzhiyun  * dm_region_hash_create()????
156*4882a593Smuzhiyun  */
157*4882a593Smuzhiyun #define RH_HASH_MULT 2654435387U
158*4882a593Smuzhiyun #define RH_HASH_SHIFT 12
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define MIN_REGIONS 64
dm_region_hash_create(void * context,void (* dispatch_bios)(void * context,struct bio_list * bios),void (* wakeup_workers)(void * context),void (* wakeup_all_recovery_waiters)(void * context),sector_t target_begin,unsigned max_recovery,struct dm_dirty_log * log,uint32_t region_size,region_t nr_regions)161*4882a593Smuzhiyun struct dm_region_hash *dm_region_hash_create(
162*4882a593Smuzhiyun 		void *context, void (*dispatch_bios)(void *context,
163*4882a593Smuzhiyun 						     struct bio_list *bios),
164*4882a593Smuzhiyun 		void (*wakeup_workers)(void *context),
165*4882a593Smuzhiyun 		void (*wakeup_all_recovery_waiters)(void *context),
166*4882a593Smuzhiyun 		sector_t target_begin, unsigned max_recovery,
167*4882a593Smuzhiyun 		struct dm_dirty_log *log, uint32_t region_size,
168*4882a593Smuzhiyun 		region_t nr_regions)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct dm_region_hash *rh;
171*4882a593Smuzhiyun 	unsigned nr_buckets, max_buckets;
172*4882a593Smuzhiyun 	size_t i;
173*4882a593Smuzhiyun 	int ret;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/*
176*4882a593Smuzhiyun 	 * Calculate a suitable number of buckets for our hash
177*4882a593Smuzhiyun 	 * table.
178*4882a593Smuzhiyun 	 */
179*4882a593Smuzhiyun 	max_buckets = nr_regions >> 6;
180*4882a593Smuzhiyun 	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
181*4882a593Smuzhiyun 		;
182*4882a593Smuzhiyun 	nr_buckets >>= 1;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	rh = kzalloc(sizeof(*rh), GFP_KERNEL);
185*4882a593Smuzhiyun 	if (!rh) {
186*4882a593Smuzhiyun 		DMERR("unable to allocate region hash memory");
187*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	rh->context = context;
191*4882a593Smuzhiyun 	rh->dispatch_bios = dispatch_bios;
192*4882a593Smuzhiyun 	rh->wakeup_workers = wakeup_workers;
193*4882a593Smuzhiyun 	rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
194*4882a593Smuzhiyun 	rh->target_begin = target_begin;
195*4882a593Smuzhiyun 	rh->max_recovery = max_recovery;
196*4882a593Smuzhiyun 	rh->log = log;
197*4882a593Smuzhiyun 	rh->region_size = region_size;
198*4882a593Smuzhiyun 	rh->region_shift = __ffs(region_size);
199*4882a593Smuzhiyun 	rwlock_init(&rh->hash_lock);
200*4882a593Smuzhiyun 	rh->mask = nr_buckets - 1;
201*4882a593Smuzhiyun 	rh->nr_buckets = nr_buckets;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	rh->shift = RH_HASH_SHIFT;
204*4882a593Smuzhiyun 	rh->prime = RH_HASH_MULT;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
207*4882a593Smuzhiyun 	if (!rh->buckets) {
208*4882a593Smuzhiyun 		DMERR("unable to allocate region hash bucket memory");
209*4882a593Smuzhiyun 		kfree(rh);
210*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	for (i = 0; i < nr_buckets; i++)
214*4882a593Smuzhiyun 		INIT_LIST_HEAD(rh->buckets + i);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	spin_lock_init(&rh->region_lock);
217*4882a593Smuzhiyun 	sema_init(&rh->recovery_count, 0);
218*4882a593Smuzhiyun 	atomic_set(&rh->recovery_in_flight, 0);
219*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rh->clean_regions);
220*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rh->quiesced_regions);
221*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rh->recovered_regions);
222*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rh->failed_recovered_regions);
223*4882a593Smuzhiyun 	rh->flush_failure = 0;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
226*4882a593Smuzhiyun 					sizeof(struct dm_region));
227*4882a593Smuzhiyun 	if (ret) {
228*4882a593Smuzhiyun 		vfree(rh->buckets);
229*4882a593Smuzhiyun 		kfree(rh);
230*4882a593Smuzhiyun 		rh = ERR_PTR(-ENOMEM);
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return rh;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_region_hash_create);
236*4882a593Smuzhiyun 
dm_region_hash_destroy(struct dm_region_hash * rh)237*4882a593Smuzhiyun void dm_region_hash_destroy(struct dm_region_hash *rh)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	unsigned h;
240*4882a593Smuzhiyun 	struct dm_region *reg, *nreg;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	BUG_ON(!list_empty(&rh->quiesced_regions));
243*4882a593Smuzhiyun 	for (h = 0; h < rh->nr_buckets; h++) {
244*4882a593Smuzhiyun 		list_for_each_entry_safe(reg, nreg, rh->buckets + h,
245*4882a593Smuzhiyun 					 hash_list) {
246*4882a593Smuzhiyun 			BUG_ON(atomic_read(&reg->pending));
247*4882a593Smuzhiyun 			mempool_free(reg, &rh->region_pool);
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (rh->log)
252*4882a593Smuzhiyun 		dm_dirty_log_destroy(rh->log);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	mempool_exit(&rh->region_pool);
255*4882a593Smuzhiyun 	vfree(rh->buckets);
256*4882a593Smuzhiyun 	kfree(rh);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
259*4882a593Smuzhiyun 
dm_rh_dirty_log(struct dm_region_hash * rh)260*4882a593Smuzhiyun struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	return rh->log;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
265*4882a593Smuzhiyun 
rh_hash(struct dm_region_hash * rh,region_t region)266*4882a593Smuzhiyun static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
__rh_lookup(struct dm_region_hash * rh,region_t region)271*4882a593Smuzhiyun static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct dm_region *reg;
274*4882a593Smuzhiyun 	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	list_for_each_entry(reg, bucket, hash_list)
277*4882a593Smuzhiyun 		if (reg->key == region)
278*4882a593Smuzhiyun 			return reg;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return NULL;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
__rh_insert(struct dm_region_hash * rh,struct dm_region * reg)283*4882a593Smuzhiyun static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
__rh_alloc(struct dm_region_hash * rh,region_t region)288*4882a593Smuzhiyun static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct dm_region *reg, *nreg;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
293*4882a593Smuzhiyun 	if (unlikely(!nreg))
294*4882a593Smuzhiyun 		nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
297*4882a593Smuzhiyun 		      DM_RH_CLEAN : DM_RH_NOSYNC;
298*4882a593Smuzhiyun 	nreg->rh = rh;
299*4882a593Smuzhiyun 	nreg->key = region;
300*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nreg->list);
301*4882a593Smuzhiyun 	atomic_set(&nreg->pending, 0);
302*4882a593Smuzhiyun 	bio_list_init(&nreg->delayed_bios);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	write_lock_irq(&rh->hash_lock);
305*4882a593Smuzhiyun 	reg = __rh_lookup(rh, region);
306*4882a593Smuzhiyun 	if (reg)
307*4882a593Smuzhiyun 		/* We lost the race. */
308*4882a593Smuzhiyun 		mempool_free(nreg, &rh->region_pool);
309*4882a593Smuzhiyun 	else {
310*4882a593Smuzhiyun 		__rh_insert(rh, nreg);
311*4882a593Smuzhiyun 		if (nreg->state == DM_RH_CLEAN) {
312*4882a593Smuzhiyun 			spin_lock(&rh->region_lock);
313*4882a593Smuzhiyun 			list_add(&nreg->list, &rh->clean_regions);
314*4882a593Smuzhiyun 			spin_unlock(&rh->region_lock);
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		reg = nreg;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 	write_unlock_irq(&rh->hash_lock);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return reg;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
__rh_find(struct dm_region_hash * rh,region_t region)324*4882a593Smuzhiyun static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct dm_region *reg;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	reg = __rh_lookup(rh, region);
329*4882a593Smuzhiyun 	if (!reg) {
330*4882a593Smuzhiyun 		read_unlock(&rh->hash_lock);
331*4882a593Smuzhiyun 		reg = __rh_alloc(rh, region);
332*4882a593Smuzhiyun 		read_lock(&rh->hash_lock);
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return reg;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
dm_rh_get_state(struct dm_region_hash * rh,region_t region,int may_block)338*4882a593Smuzhiyun int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	int r;
341*4882a593Smuzhiyun 	struct dm_region *reg;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	read_lock(&rh->hash_lock);
344*4882a593Smuzhiyun 	reg = __rh_lookup(rh, region);
345*4882a593Smuzhiyun 	read_unlock(&rh->hash_lock);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (reg)
348*4882a593Smuzhiyun 		return reg->state;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/*
351*4882a593Smuzhiyun 	 * The region wasn't in the hash, so we fall back to the
352*4882a593Smuzhiyun 	 * dirty log.
353*4882a593Smuzhiyun 	 */
354*4882a593Smuzhiyun 	r = rh->log->type->in_sync(rh->log, region, may_block);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/*
357*4882a593Smuzhiyun 	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
358*4882a593Smuzhiyun 	 * taken as a DM_RH_NOSYNC
359*4882a593Smuzhiyun 	 */
360*4882a593Smuzhiyun 	return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_get_state);
363*4882a593Smuzhiyun 
complete_resync_work(struct dm_region * reg,int success)364*4882a593Smuzhiyun static void complete_resync_work(struct dm_region *reg, int success)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct dm_region_hash *rh = reg->rh;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	rh->log->type->set_region_sync(rh->log, reg->key, success);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/*
371*4882a593Smuzhiyun 	 * Dispatch the bios before we call 'wake_up_all'.
372*4882a593Smuzhiyun 	 * This is important because if we are suspending,
373*4882a593Smuzhiyun 	 * we want to know that recovery is complete and
374*4882a593Smuzhiyun 	 * the work queue is flushed.  If we wake_up_all
375*4882a593Smuzhiyun 	 * before we dispatch_bios (queue bios and call wake()),
376*4882a593Smuzhiyun 	 * then we risk suspending before the work queue
377*4882a593Smuzhiyun 	 * has been properly flushed.
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	rh->dispatch_bios(rh->context, &reg->delayed_bios);
380*4882a593Smuzhiyun 	if (atomic_dec_and_test(&rh->recovery_in_flight))
381*4882a593Smuzhiyun 		rh->wakeup_all_recovery_waiters(rh->context);
382*4882a593Smuzhiyun 	up(&rh->recovery_count);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* dm_rh_mark_nosync
386*4882a593Smuzhiyun  * @ms
387*4882a593Smuzhiyun  * @bio
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  * The bio was written on some mirror(s) but failed on other mirror(s).
390*4882a593Smuzhiyun  * We can successfully endio the bio but should avoid the region being
391*4882a593Smuzhiyun  * marked clean by setting the state DM_RH_NOSYNC.
392*4882a593Smuzhiyun  *
393*4882a593Smuzhiyun  * This function is _not_ safe in interrupt context!
394*4882a593Smuzhiyun  */
dm_rh_mark_nosync(struct dm_region_hash * rh,struct bio * bio)395*4882a593Smuzhiyun void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	unsigned long flags;
398*4882a593Smuzhiyun 	struct dm_dirty_log *log = rh->log;
399*4882a593Smuzhiyun 	struct dm_region *reg;
400*4882a593Smuzhiyun 	region_t region = dm_rh_bio_to_region(rh, bio);
401*4882a593Smuzhiyun 	int recovering = 0;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (bio->bi_opf & REQ_PREFLUSH) {
404*4882a593Smuzhiyun 		rh->flush_failure = 1;
405*4882a593Smuzhiyun 		return;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (bio_op(bio) == REQ_OP_DISCARD)
409*4882a593Smuzhiyun 		return;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/* We must inform the log that the sync count has changed. */
412*4882a593Smuzhiyun 	log->type->set_region_sync(log, region, 0);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	read_lock(&rh->hash_lock);
415*4882a593Smuzhiyun 	reg = __rh_find(rh, region);
416*4882a593Smuzhiyun 	read_unlock(&rh->hash_lock);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* region hash entry should exist because write was in-flight */
419*4882a593Smuzhiyun 	BUG_ON(!reg);
420*4882a593Smuzhiyun 	BUG_ON(!list_empty(&reg->list));
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	spin_lock_irqsave(&rh->region_lock, flags);
423*4882a593Smuzhiyun 	/*
424*4882a593Smuzhiyun 	 * Possible cases:
425*4882a593Smuzhiyun 	 *   1) DM_RH_DIRTY
426*4882a593Smuzhiyun 	 *   2) DM_RH_NOSYNC: was dirty, other preceding writes failed
427*4882a593Smuzhiyun 	 *   3) DM_RH_RECOVERING: flushing pending writes
428*4882a593Smuzhiyun 	 * Either case, the region should have not been connected to list.
429*4882a593Smuzhiyun 	 */
430*4882a593Smuzhiyun 	recovering = (reg->state == DM_RH_RECOVERING);
431*4882a593Smuzhiyun 	reg->state = DM_RH_NOSYNC;
432*4882a593Smuzhiyun 	BUG_ON(!list_empty(&reg->list));
433*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rh->region_lock, flags);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (recovering)
436*4882a593Smuzhiyun 		complete_resync_work(reg, 0);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
439*4882a593Smuzhiyun 
dm_rh_update_states(struct dm_region_hash * rh,int errors_handled)440*4882a593Smuzhiyun void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct dm_region *reg, *next;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	LIST_HEAD(clean);
445*4882a593Smuzhiyun 	LIST_HEAD(recovered);
446*4882a593Smuzhiyun 	LIST_HEAD(failed_recovered);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/*
449*4882a593Smuzhiyun 	 * Quickly grab the lists.
450*4882a593Smuzhiyun 	 */
451*4882a593Smuzhiyun 	write_lock_irq(&rh->hash_lock);
452*4882a593Smuzhiyun 	spin_lock(&rh->region_lock);
453*4882a593Smuzhiyun 	if (!list_empty(&rh->clean_regions)) {
454*4882a593Smuzhiyun 		list_splice_init(&rh->clean_regions, &clean);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		list_for_each_entry(reg, &clean, list)
457*4882a593Smuzhiyun 			list_del(&reg->hash_list);
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	if (!list_empty(&rh->recovered_regions)) {
461*4882a593Smuzhiyun 		list_splice_init(&rh->recovered_regions, &recovered);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 		list_for_each_entry(reg, &recovered, list)
464*4882a593Smuzhiyun 			list_del(&reg->hash_list);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (!list_empty(&rh->failed_recovered_regions)) {
468*4882a593Smuzhiyun 		list_splice_init(&rh->failed_recovered_regions,
469*4882a593Smuzhiyun 				 &failed_recovered);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		list_for_each_entry(reg, &failed_recovered, list)
472*4882a593Smuzhiyun 			list_del(&reg->hash_list);
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	spin_unlock(&rh->region_lock);
476*4882a593Smuzhiyun 	write_unlock_irq(&rh->hash_lock);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/*
479*4882a593Smuzhiyun 	 * All the regions on the recovered and clean lists have
480*4882a593Smuzhiyun 	 * now been pulled out of the system, so no need to do
481*4882a593Smuzhiyun 	 * any more locking.
482*4882a593Smuzhiyun 	 */
483*4882a593Smuzhiyun 	list_for_each_entry_safe(reg, next, &recovered, list) {
484*4882a593Smuzhiyun 		rh->log->type->clear_region(rh->log, reg->key);
485*4882a593Smuzhiyun 		complete_resync_work(reg, 1);
486*4882a593Smuzhiyun 		mempool_free(reg, &rh->region_pool);
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
490*4882a593Smuzhiyun 		complete_resync_work(reg, errors_handled ? 0 : 1);
491*4882a593Smuzhiyun 		mempool_free(reg, &rh->region_pool);
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	list_for_each_entry_safe(reg, next, &clean, list) {
495*4882a593Smuzhiyun 		rh->log->type->clear_region(rh->log, reg->key);
496*4882a593Smuzhiyun 		mempool_free(reg, &rh->region_pool);
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	rh->log->type->flush(rh->log);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_update_states);
502*4882a593Smuzhiyun 
rh_inc(struct dm_region_hash * rh,region_t region)503*4882a593Smuzhiyun static void rh_inc(struct dm_region_hash *rh, region_t region)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct dm_region *reg;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	read_lock(&rh->hash_lock);
508*4882a593Smuzhiyun 	reg = __rh_find(rh, region);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	spin_lock_irq(&rh->region_lock);
511*4882a593Smuzhiyun 	atomic_inc(&reg->pending);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (reg->state == DM_RH_CLEAN) {
514*4882a593Smuzhiyun 		reg->state = DM_RH_DIRTY;
515*4882a593Smuzhiyun 		list_del_init(&reg->list);	/* take off the clean list */
516*4882a593Smuzhiyun 		spin_unlock_irq(&rh->region_lock);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 		rh->log->type->mark_region(rh->log, reg->key);
519*4882a593Smuzhiyun 	} else
520*4882a593Smuzhiyun 		spin_unlock_irq(&rh->region_lock);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	read_unlock(&rh->hash_lock);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
dm_rh_inc_pending(struct dm_region_hash * rh,struct bio_list * bios)526*4882a593Smuzhiyun void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	struct bio *bio;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	for (bio = bios->head; bio; bio = bio->bi_next) {
531*4882a593Smuzhiyun 		if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
532*4882a593Smuzhiyun 			continue;
533*4882a593Smuzhiyun 		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
537*4882a593Smuzhiyun 
dm_rh_dec(struct dm_region_hash * rh,region_t region)538*4882a593Smuzhiyun void dm_rh_dec(struct dm_region_hash *rh, region_t region)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	unsigned long flags;
541*4882a593Smuzhiyun 	struct dm_region *reg;
542*4882a593Smuzhiyun 	int should_wake = 0;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	read_lock(&rh->hash_lock);
545*4882a593Smuzhiyun 	reg = __rh_lookup(rh, region);
546*4882a593Smuzhiyun 	read_unlock(&rh->hash_lock);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	spin_lock_irqsave(&rh->region_lock, flags);
549*4882a593Smuzhiyun 	if (atomic_dec_and_test(&reg->pending)) {
550*4882a593Smuzhiyun 		/*
551*4882a593Smuzhiyun 		 * There is no pending I/O for this region.
552*4882a593Smuzhiyun 		 * We can move the region to corresponding list for next action.
553*4882a593Smuzhiyun 		 * At this point, the region is not yet connected to any list.
554*4882a593Smuzhiyun 		 *
555*4882a593Smuzhiyun 		 * If the state is DM_RH_NOSYNC, the region should be kept off
556*4882a593Smuzhiyun 		 * from clean list.
557*4882a593Smuzhiyun 		 * The hash entry for DM_RH_NOSYNC will remain in memory
558*4882a593Smuzhiyun 		 * until the region is recovered or the map is reloaded.
559*4882a593Smuzhiyun 		 */
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		/* do nothing for DM_RH_NOSYNC */
562*4882a593Smuzhiyun 		if (unlikely(rh->flush_failure)) {
563*4882a593Smuzhiyun 			/*
564*4882a593Smuzhiyun 			 * If a write flush failed some time ago, we
565*4882a593Smuzhiyun 			 * don't know whether or not this write made it
566*4882a593Smuzhiyun 			 * to the disk, so we must resync the device.
567*4882a593Smuzhiyun 			 */
568*4882a593Smuzhiyun 			reg->state = DM_RH_NOSYNC;
569*4882a593Smuzhiyun 		} else if (reg->state == DM_RH_RECOVERING) {
570*4882a593Smuzhiyun 			list_add_tail(&reg->list, &rh->quiesced_regions);
571*4882a593Smuzhiyun 		} else if (reg->state == DM_RH_DIRTY) {
572*4882a593Smuzhiyun 			reg->state = DM_RH_CLEAN;
573*4882a593Smuzhiyun 			list_add(&reg->list, &rh->clean_regions);
574*4882a593Smuzhiyun 		}
575*4882a593Smuzhiyun 		should_wake = 1;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rh->region_lock, flags);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (should_wake)
580*4882a593Smuzhiyun 		rh->wakeup_workers(rh->context);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_dec);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun  * Starts quiescing a region in preparation for recovery.
586*4882a593Smuzhiyun  */
__rh_recovery_prepare(struct dm_region_hash * rh)587*4882a593Smuzhiyun static int __rh_recovery_prepare(struct dm_region_hash *rh)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	int r;
590*4882a593Smuzhiyun 	region_t region;
591*4882a593Smuzhiyun 	struct dm_region *reg;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	/*
594*4882a593Smuzhiyun 	 * Ask the dirty log what's next.
595*4882a593Smuzhiyun 	 */
596*4882a593Smuzhiyun 	r = rh->log->type->get_resync_work(rh->log, &region);
597*4882a593Smuzhiyun 	if (r <= 0)
598*4882a593Smuzhiyun 		return r;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/*
601*4882a593Smuzhiyun 	 * Get this region, and start it quiescing by setting the
602*4882a593Smuzhiyun 	 * recovering flag.
603*4882a593Smuzhiyun 	 */
604*4882a593Smuzhiyun 	read_lock(&rh->hash_lock);
605*4882a593Smuzhiyun 	reg = __rh_find(rh, region);
606*4882a593Smuzhiyun 	read_unlock(&rh->hash_lock);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	spin_lock_irq(&rh->region_lock);
609*4882a593Smuzhiyun 	reg->state = DM_RH_RECOVERING;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* Already quiesced ? */
612*4882a593Smuzhiyun 	if (atomic_read(&reg->pending))
613*4882a593Smuzhiyun 		list_del_init(&reg->list);
614*4882a593Smuzhiyun 	else
615*4882a593Smuzhiyun 		list_move(&reg->list, &rh->quiesced_regions);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	spin_unlock_irq(&rh->region_lock);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	return 1;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
dm_rh_recovery_prepare(struct dm_region_hash * rh)622*4882a593Smuzhiyun void dm_rh_recovery_prepare(struct dm_region_hash *rh)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	/* Extra reference to avoid race with dm_rh_stop_recovery */
625*4882a593Smuzhiyun 	atomic_inc(&rh->recovery_in_flight);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	while (!down_trylock(&rh->recovery_count)) {
628*4882a593Smuzhiyun 		atomic_inc(&rh->recovery_in_flight);
629*4882a593Smuzhiyun 		if (__rh_recovery_prepare(rh) <= 0) {
630*4882a593Smuzhiyun 			atomic_dec(&rh->recovery_in_flight);
631*4882a593Smuzhiyun 			up(&rh->recovery_count);
632*4882a593Smuzhiyun 			break;
633*4882a593Smuzhiyun 		}
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* Drop the extra reference */
637*4882a593Smuzhiyun 	if (atomic_dec_and_test(&rh->recovery_in_flight))
638*4882a593Smuzhiyun 		rh->wakeup_all_recovery_waiters(rh->context);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun  * Returns any quiesced regions.
644*4882a593Smuzhiyun  */
dm_rh_recovery_start(struct dm_region_hash * rh)645*4882a593Smuzhiyun struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	struct dm_region *reg = NULL;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	spin_lock_irq(&rh->region_lock);
650*4882a593Smuzhiyun 	if (!list_empty(&rh->quiesced_regions)) {
651*4882a593Smuzhiyun 		reg = list_entry(rh->quiesced_regions.next,
652*4882a593Smuzhiyun 				 struct dm_region, list);
653*4882a593Smuzhiyun 		list_del_init(&reg->list);  /* remove from the quiesced list */
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 	spin_unlock_irq(&rh->region_lock);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	return reg;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
660*4882a593Smuzhiyun 
dm_rh_recovery_end(struct dm_region * reg,int success)661*4882a593Smuzhiyun void dm_rh_recovery_end(struct dm_region *reg, int success)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct dm_region_hash *rh = reg->rh;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	spin_lock_irq(&rh->region_lock);
666*4882a593Smuzhiyun 	if (success)
667*4882a593Smuzhiyun 		list_add(&reg->list, &reg->rh->recovered_regions);
668*4882a593Smuzhiyun 	else
669*4882a593Smuzhiyun 		list_add(&reg->list, &reg->rh->failed_recovered_regions);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	spin_unlock_irq(&rh->region_lock);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	rh->wakeup_workers(rh->context);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun /* Return recovery in flight count. */
dm_rh_recovery_in_flight(struct dm_region_hash * rh)678*4882a593Smuzhiyun int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	return atomic_read(&rh->recovery_in_flight);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
683*4882a593Smuzhiyun 
dm_rh_flush(struct dm_region_hash * rh)684*4882a593Smuzhiyun int dm_rh_flush(struct dm_region_hash *rh)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	return rh->log->type->flush(rh->log);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_flush);
689*4882a593Smuzhiyun 
dm_rh_delay(struct dm_region_hash * rh,struct bio * bio)690*4882a593Smuzhiyun void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	struct dm_region *reg;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	read_lock(&rh->hash_lock);
695*4882a593Smuzhiyun 	reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
696*4882a593Smuzhiyun 	bio_list_add(&reg->delayed_bios, bio);
697*4882a593Smuzhiyun 	read_unlock(&rh->hash_lock);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_delay);
700*4882a593Smuzhiyun 
dm_rh_stop_recovery(struct dm_region_hash * rh)701*4882a593Smuzhiyun void dm_rh_stop_recovery(struct dm_region_hash *rh)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	int i;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	/* wait for any recovering regions */
706*4882a593Smuzhiyun 	for (i = 0; i < rh->max_recovery; i++)
707*4882a593Smuzhiyun 		down(&rh->recovery_count);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
710*4882a593Smuzhiyun 
dm_rh_start_recovery(struct dm_region_hash * rh)711*4882a593Smuzhiyun void dm_rh_start_recovery(struct dm_region_hash *rh)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun 	int i;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	for (i = 0; i < rh->max_recovery; i++)
716*4882a593Smuzhiyun 		up(&rh->recovery_count);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	rh->wakeup_workers(rh->context);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun MODULE_DESCRIPTION(DM_NAME " region hash");
723*4882a593Smuzhiyun MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
724*4882a593Smuzhiyun MODULE_LICENSE("GPL");
725