xref: /OK3568_Linux_fs/kernel/drivers/md/dm-bio-prison-v1.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2011-2017 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is released under the GPL.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef DM_BIO_PRISON_H
8*4882a593Smuzhiyun #define DM_BIO_PRISON_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
11*4882a593Smuzhiyun #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/bio.h>
14*4882a593Smuzhiyun #include <linux/rbtree.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*----------------------------------------------------------------*/
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * Sometimes we can't deal with a bio straight away.  We put them in prison
20*4882a593Smuzhiyun  * where they can't cause any mischief.  Bios are put in a cell identified
21*4882a593Smuzhiyun  * by a key, multiple bios can be in the same cell.  When the cell is
22*4882a593Smuzhiyun  * subsequently unlocked the bios become available.
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun struct dm_bio_prison;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Keys define a range of blocks within either a virtual or physical
28*4882a593Smuzhiyun  * device.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun struct dm_cell_key {
31*4882a593Smuzhiyun 	int virtual;
32*4882a593Smuzhiyun 	dm_thin_id dev;
33*4882a593Smuzhiyun 	dm_block_t block_begin, block_end;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Treat this as opaque, only in header so callers can manage allocation
38*4882a593Smuzhiyun  * themselves.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun struct dm_bio_prison_cell {
41*4882a593Smuzhiyun 	struct list_head user_list;	/* for client use */
42*4882a593Smuzhiyun 	struct rb_node node;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	struct dm_cell_key key;
45*4882a593Smuzhiyun 	struct bio *holder;
46*4882a593Smuzhiyun 	struct bio_list bios;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun struct dm_bio_prison *dm_bio_prison_create(void);
50*4882a593Smuzhiyun void dm_bio_prison_destroy(struct dm_bio_prison *prison);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * These two functions just wrap a mempool.  This is a transitory step:
54*4882a593Smuzhiyun  * Eventually all bio prison clients should manage their own cell memory.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
57*4882a593Smuzhiyun  * in interrupt context or passed GFP_NOWAIT.
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
60*4882a593Smuzhiyun 						    gfp_t gfp);
61*4882a593Smuzhiyun void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
62*4882a593Smuzhiyun 			     struct dm_bio_prison_cell *cell);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun  * Creates, or retrieves a cell that overlaps the given key.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * Returns 1 if pre-existing cell returned, zero if new cell created using
68*4882a593Smuzhiyun  * @cell_prealloc.
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun int dm_get_cell(struct dm_bio_prison *prison,
71*4882a593Smuzhiyun 		struct dm_cell_key *key,
72*4882a593Smuzhiyun 		struct dm_bio_prison_cell *cell_prealloc,
73*4882a593Smuzhiyun 		struct dm_bio_prison_cell **cell_result);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun  * An atomic op that combines retrieving or creating a cell, and adding a
77*4882a593Smuzhiyun  * bio to it.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
80*4882a593Smuzhiyun  */
81*4882a593Smuzhiyun int dm_bio_detain(struct dm_bio_prison *prison,
82*4882a593Smuzhiyun 		  struct dm_cell_key *key,
83*4882a593Smuzhiyun 		  struct bio *inmate,
84*4882a593Smuzhiyun 		  struct dm_bio_prison_cell *cell_prealloc,
85*4882a593Smuzhiyun 		  struct dm_bio_prison_cell **cell_result);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun void dm_cell_release(struct dm_bio_prison *prison,
88*4882a593Smuzhiyun 		     struct dm_bio_prison_cell *cell,
89*4882a593Smuzhiyun 		     struct bio_list *bios);
90*4882a593Smuzhiyun void dm_cell_release_no_holder(struct dm_bio_prison *prison,
91*4882a593Smuzhiyun 			       struct dm_bio_prison_cell *cell,
92*4882a593Smuzhiyun 			       struct bio_list *inmates);
93*4882a593Smuzhiyun void dm_cell_error(struct dm_bio_prison *prison,
94*4882a593Smuzhiyun 		   struct dm_bio_prison_cell *cell, blk_status_t error);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * Visits the cell and then releases.  Guarantees no new inmates are
98*4882a593Smuzhiyun  * inserted between the visit and release.
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun void dm_cell_visit_release(struct dm_bio_prison *prison,
101*4882a593Smuzhiyun 			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
102*4882a593Smuzhiyun 			   void *context, struct dm_bio_prison_cell *cell);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun  * Rather than always releasing the prisoners in a cell, the client may
106*4882a593Smuzhiyun  * want to promote one of them to be the new holder.  There is a race here
107*4882a593Smuzhiyun  * though between releasing an empty cell, and other threads adding new
108*4882a593Smuzhiyun  * inmates.  So this function makes the decision with its lock held.
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * This function can have two outcomes:
111*4882a593Smuzhiyun  * i) An inmate is promoted to be the holder of the cell (return value of 0).
112*4882a593Smuzhiyun  * ii) The cell has no inmate for promotion and is released (return value of 1).
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun int dm_cell_promote_or_release(struct dm_bio_prison *prison,
115*4882a593Smuzhiyun 			       struct dm_bio_prison_cell *cell);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /*----------------------------------------------------------------*/
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * We use the deferred set to keep track of pending reads to shared blocks.
121*4882a593Smuzhiyun  * We do this to ensure the new mapping caused by a write isn't performed
122*4882a593Smuzhiyun  * until these prior reads have completed.  Otherwise the insertion of the
123*4882a593Smuzhiyun  * new mapping could free the old block that the read bios are mapped to.
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun struct dm_deferred_set;
127*4882a593Smuzhiyun struct dm_deferred_entry;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun struct dm_deferred_set *dm_deferred_set_create(void);
130*4882a593Smuzhiyun void dm_deferred_set_destroy(struct dm_deferred_set *ds);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
133*4882a593Smuzhiyun void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
134*4882a593Smuzhiyun int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*----------------------------------------------------------------*/
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #endif
139