xref: /OK3568_Linux_fs/kernel/drivers/md/persistent-data/dm-space-map-metadata.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2011 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is released under the GPL.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "dm-space-map.h"
8*4882a593Smuzhiyun #include "dm-space-map-common.h"
9*4882a593Smuzhiyun #include "dm-space-map-metadata.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/device-mapper.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define DM_MSG_PREFIX "space map metadata"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*----------------------------------------------------------------*/
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * An edge triggered threshold.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun struct threshold {
24*4882a593Smuzhiyun 	bool threshold_set;
25*4882a593Smuzhiyun 	bool value_set;
26*4882a593Smuzhiyun 	dm_block_t threshold;
27*4882a593Smuzhiyun 	dm_block_t current_value;
28*4882a593Smuzhiyun 	dm_sm_threshold_fn fn;
29*4882a593Smuzhiyun 	void *context;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
threshold_init(struct threshold * t)32*4882a593Smuzhiyun static void threshold_init(struct threshold *t)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	t->threshold_set = false;
35*4882a593Smuzhiyun 	t->value_set = false;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
set_threshold(struct threshold * t,dm_block_t value,dm_sm_threshold_fn fn,void * context)38*4882a593Smuzhiyun static void set_threshold(struct threshold *t, dm_block_t value,
39*4882a593Smuzhiyun 			  dm_sm_threshold_fn fn, void *context)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	t->threshold_set = true;
42*4882a593Smuzhiyun 	t->threshold = value;
43*4882a593Smuzhiyun 	t->fn = fn;
44*4882a593Smuzhiyun 	t->context = context;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
below_threshold(struct threshold * t,dm_block_t value)47*4882a593Smuzhiyun static bool below_threshold(struct threshold *t, dm_block_t value)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return t->threshold_set && value <= t->threshold;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
threshold_already_triggered(struct threshold * t)52*4882a593Smuzhiyun static bool threshold_already_triggered(struct threshold *t)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return t->value_set && below_threshold(t, t->current_value);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
check_threshold(struct threshold * t,dm_block_t value)57*4882a593Smuzhiyun static void check_threshold(struct threshold *t, dm_block_t value)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	if (below_threshold(t, value) &&
60*4882a593Smuzhiyun 	    !threshold_already_triggered(t))
61*4882a593Smuzhiyun 		t->fn(t->context);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	t->value_set = true;
64*4882a593Smuzhiyun 	t->current_value = value;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*----------------------------------------------------------------*/
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * Space map interface.
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  * The low level disk format is written using the standard btree and
73*4882a593Smuzhiyun  * transaction manager.  This means that performing disk operations may
74*4882a593Smuzhiyun  * cause us to recurse into the space map in order to allocate new blocks.
75*4882a593Smuzhiyun  * For this reason we have a pool of pre-allocated blocks large enough to
76*4882a593Smuzhiyun  * service any metadata_ll_disk operation.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun  * FIXME: we should calculate this based on the size of the device.
81*4882a593Smuzhiyun  * Only the metadata space map needs this functionality.
82*4882a593Smuzhiyun  */
83*4882a593Smuzhiyun #define MAX_RECURSIVE_ALLOCATIONS 1024
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun enum block_op_type {
86*4882a593Smuzhiyun 	BOP_INC,
87*4882a593Smuzhiyun 	BOP_DEC
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun struct block_op {
91*4882a593Smuzhiyun 	enum block_op_type type;
92*4882a593Smuzhiyun 	dm_block_t block;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct bop_ring_buffer {
96*4882a593Smuzhiyun 	unsigned begin;
97*4882a593Smuzhiyun 	unsigned end;
98*4882a593Smuzhiyun 	struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
brb_init(struct bop_ring_buffer * brb)101*4882a593Smuzhiyun static void brb_init(struct bop_ring_buffer *brb)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	brb->begin = 0;
104*4882a593Smuzhiyun 	brb->end = 0;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
brb_empty(struct bop_ring_buffer * brb)107*4882a593Smuzhiyun static bool brb_empty(struct bop_ring_buffer *brb)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	return brb->begin == brb->end;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
brb_next(struct bop_ring_buffer * brb,unsigned old)112*4882a593Smuzhiyun static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	unsigned r = old + 1;
115*4882a593Smuzhiyun 	return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
brb_push(struct bop_ring_buffer * brb,enum block_op_type type,dm_block_t b)118*4882a593Smuzhiyun static int brb_push(struct bop_ring_buffer *brb,
119*4882a593Smuzhiyun 		    enum block_op_type type, dm_block_t b)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct block_op *bop;
122*4882a593Smuzhiyun 	unsigned next = brb_next(brb, brb->end);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/*
125*4882a593Smuzhiyun 	 * We don't allow the last bop to be filled, this way we can
126*4882a593Smuzhiyun 	 * differentiate between full and empty.
127*4882a593Smuzhiyun 	 */
128*4882a593Smuzhiyun 	if (next == brb->begin)
129*4882a593Smuzhiyun 		return -ENOMEM;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	bop = brb->bops + brb->end;
132*4882a593Smuzhiyun 	bop->type = type;
133*4882a593Smuzhiyun 	bop->block = b;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	brb->end = next;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
brb_peek(struct bop_ring_buffer * brb,struct block_op * result)140*4882a593Smuzhiyun static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct block_op *bop;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (brb_empty(brb))
145*4882a593Smuzhiyun 		return -ENODATA;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	bop = brb->bops + brb->begin;
148*4882a593Smuzhiyun 	result->type = bop->type;
149*4882a593Smuzhiyun 	result->block = bop->block;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return 0;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
brb_pop(struct bop_ring_buffer * brb)154*4882a593Smuzhiyun static int brb_pop(struct bop_ring_buffer *brb)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	if (brb_empty(brb))
157*4882a593Smuzhiyun 		return -ENODATA;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	brb->begin = brb_next(brb, brb->begin);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*----------------------------------------------------------------*/
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun struct sm_metadata {
167*4882a593Smuzhiyun 	struct dm_space_map sm;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	struct ll_disk ll;
170*4882a593Smuzhiyun 	struct ll_disk old_ll;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	dm_block_t begin;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	unsigned recursion_count;
175*4882a593Smuzhiyun 	unsigned allocated_this_transaction;
176*4882a593Smuzhiyun 	struct bop_ring_buffer uncommitted;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	struct threshold threshold;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
add_bop(struct sm_metadata * smm,enum block_op_type type,dm_block_t b)181*4882a593Smuzhiyun static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	int r = brb_push(&smm->uncommitted, type, b);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (r) {
186*4882a593Smuzhiyun 		DMERR("too many recursive allocations");
187*4882a593Smuzhiyun 		return -ENOMEM;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
commit_bop(struct sm_metadata * smm,struct block_op * op)193*4882a593Smuzhiyun static int commit_bop(struct sm_metadata *smm, struct block_op *op)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	int r = 0;
196*4882a593Smuzhiyun 	enum allocation_event ev;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	switch (op->type) {
199*4882a593Smuzhiyun 	case BOP_INC:
200*4882a593Smuzhiyun 		r = sm_ll_inc(&smm->ll, op->block, &ev);
201*4882a593Smuzhiyun 		break;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	case BOP_DEC:
204*4882a593Smuzhiyun 		r = sm_ll_dec(&smm->ll, op->block, &ev);
205*4882a593Smuzhiyun 		break;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return r;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
in(struct sm_metadata * smm)211*4882a593Smuzhiyun static void in(struct sm_metadata *smm)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	smm->recursion_count++;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
apply_bops(struct sm_metadata * smm)216*4882a593Smuzhiyun static int apply_bops(struct sm_metadata *smm)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	int r = 0;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	while (!brb_empty(&smm->uncommitted)) {
221*4882a593Smuzhiyun 		struct block_op bop;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 		r = brb_peek(&smm->uncommitted, &bop);
224*4882a593Smuzhiyun 		if (r) {
225*4882a593Smuzhiyun 			DMERR("bug in bop ring buffer");
226*4882a593Smuzhiyun 			break;
227*4882a593Smuzhiyun 		}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		r = commit_bop(smm, &bop);
230*4882a593Smuzhiyun 		if (r)
231*4882a593Smuzhiyun 			break;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		brb_pop(&smm->uncommitted);
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	return r;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
out(struct sm_metadata * smm)239*4882a593Smuzhiyun static int out(struct sm_metadata *smm)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	int r = 0;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/*
244*4882a593Smuzhiyun 	 * If we're not recursing then very bad things are happening.
245*4882a593Smuzhiyun 	 */
246*4882a593Smuzhiyun 	if (!smm->recursion_count) {
247*4882a593Smuzhiyun 		DMERR("lost track of recursion depth");
248*4882a593Smuzhiyun 		return -ENOMEM;
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (smm->recursion_count == 1)
252*4882a593Smuzhiyun 		r = apply_bops(smm);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	smm->recursion_count--;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return r;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun  * When using the out() function above, we often want to combine an error
261*4882a593Smuzhiyun  * code for the operation run in the recursive context with that from
262*4882a593Smuzhiyun  * out().
263*4882a593Smuzhiyun  */
combine_errors(int r1,int r2)264*4882a593Smuzhiyun static int combine_errors(int r1, int r2)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return r1 ? r1 : r2;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
recursing(struct sm_metadata * smm)269*4882a593Smuzhiyun static int recursing(struct sm_metadata *smm)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return smm->recursion_count;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
sm_metadata_destroy(struct dm_space_map * sm)274*4882a593Smuzhiyun static void sm_metadata_destroy(struct dm_space_map *sm)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	kfree(smm);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
sm_metadata_get_nr_blocks(struct dm_space_map * sm,dm_block_t * count)281*4882a593Smuzhiyun static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	*count = smm->ll.nr_blocks;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
sm_metadata_get_nr_free(struct dm_space_map * sm,dm_block_t * count)290*4882a593Smuzhiyun static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	*count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
295*4882a593Smuzhiyun 		 smm->allocated_this_transaction;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
sm_metadata_get_count(struct dm_space_map * sm,dm_block_t b,uint32_t * result)300*4882a593Smuzhiyun static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
301*4882a593Smuzhiyun 				 uint32_t *result)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	int r;
304*4882a593Smuzhiyun 	unsigned i;
305*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
306*4882a593Smuzhiyun 	unsigned adjustment = 0;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/*
309*4882a593Smuzhiyun 	 * We may have some uncommitted adjustments to add.  This list
310*4882a593Smuzhiyun 	 * should always be really short.
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	for (i = smm->uncommitted.begin;
313*4882a593Smuzhiyun 	     i != smm->uncommitted.end;
314*4882a593Smuzhiyun 	     i = brb_next(&smm->uncommitted, i)) {
315*4882a593Smuzhiyun 		struct block_op *op = smm->uncommitted.bops + i;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		if (op->block != b)
318*4882a593Smuzhiyun 			continue;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 		switch (op->type) {
321*4882a593Smuzhiyun 		case BOP_INC:
322*4882a593Smuzhiyun 			adjustment++;
323*4882a593Smuzhiyun 			break;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		case BOP_DEC:
326*4882a593Smuzhiyun 			adjustment--;
327*4882a593Smuzhiyun 			break;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	r = sm_ll_lookup(&smm->ll, b, result);
332*4882a593Smuzhiyun 	if (r)
333*4882a593Smuzhiyun 		return r;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	*result += adjustment;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	return 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
sm_metadata_count_is_more_than_one(struct dm_space_map * sm,dm_block_t b,int * result)340*4882a593Smuzhiyun static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
341*4882a593Smuzhiyun 					      dm_block_t b, int *result)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	int r, adjustment = 0;
344*4882a593Smuzhiyun 	unsigned i;
345*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
346*4882a593Smuzhiyun 	uint32_t rc;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	/*
349*4882a593Smuzhiyun 	 * We may have some uncommitted adjustments to add.  This list
350*4882a593Smuzhiyun 	 * should always be really short.
351*4882a593Smuzhiyun 	 */
352*4882a593Smuzhiyun 	for (i = smm->uncommitted.begin;
353*4882a593Smuzhiyun 	     i != smm->uncommitted.end;
354*4882a593Smuzhiyun 	     i = brb_next(&smm->uncommitted, i)) {
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		struct block_op *op = smm->uncommitted.bops + i;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		if (op->block != b)
359*4882a593Smuzhiyun 			continue;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		switch (op->type) {
362*4882a593Smuzhiyun 		case BOP_INC:
363*4882a593Smuzhiyun 			adjustment++;
364*4882a593Smuzhiyun 			break;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		case BOP_DEC:
367*4882a593Smuzhiyun 			adjustment--;
368*4882a593Smuzhiyun 			break;
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (adjustment > 1) {
373*4882a593Smuzhiyun 		*result = 1;
374*4882a593Smuzhiyun 		return 0;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
378*4882a593Smuzhiyun 	if (r)
379*4882a593Smuzhiyun 		return r;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (rc == 3)
382*4882a593Smuzhiyun 		/*
383*4882a593Smuzhiyun 		 * We err on the side of caution, and always return true.
384*4882a593Smuzhiyun 		 */
385*4882a593Smuzhiyun 		*result = 1;
386*4882a593Smuzhiyun 	else
387*4882a593Smuzhiyun 		*result = rc + adjustment > 1;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return 0;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
sm_metadata_set_count(struct dm_space_map * sm,dm_block_t b,uint32_t count)392*4882a593Smuzhiyun static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
393*4882a593Smuzhiyun 				 uint32_t count)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	int r, r2;
396*4882a593Smuzhiyun 	enum allocation_event ev;
397*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (smm->recursion_count) {
400*4882a593Smuzhiyun 		DMERR("cannot recurse set_count()");
401*4882a593Smuzhiyun 		return -EINVAL;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	in(smm);
405*4882a593Smuzhiyun 	r = sm_ll_insert(&smm->ll, b, count, &ev);
406*4882a593Smuzhiyun 	r2 = out(smm);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return combine_errors(r, r2);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
sm_metadata_inc_block(struct dm_space_map * sm,dm_block_t b)411*4882a593Smuzhiyun static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	int r, r2 = 0;
414*4882a593Smuzhiyun 	enum allocation_event ev;
415*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (recursing(smm))
418*4882a593Smuzhiyun 		r = add_bop(smm, BOP_INC, b);
419*4882a593Smuzhiyun 	else {
420*4882a593Smuzhiyun 		in(smm);
421*4882a593Smuzhiyun 		r = sm_ll_inc(&smm->ll, b, &ev);
422*4882a593Smuzhiyun 		r2 = out(smm);
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return combine_errors(r, r2);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
sm_metadata_dec_block(struct dm_space_map * sm,dm_block_t b)428*4882a593Smuzhiyun static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	int r, r2 = 0;
431*4882a593Smuzhiyun 	enum allocation_event ev;
432*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (recursing(smm))
435*4882a593Smuzhiyun 		r = add_bop(smm, BOP_DEC, b);
436*4882a593Smuzhiyun 	else {
437*4882a593Smuzhiyun 		in(smm);
438*4882a593Smuzhiyun 		r = sm_ll_dec(&smm->ll, b, &ev);
439*4882a593Smuzhiyun 		r2 = out(smm);
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	return combine_errors(r, r2);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
sm_metadata_new_block_(struct dm_space_map * sm,dm_block_t * b)445*4882a593Smuzhiyun static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	int r, r2 = 0;
448*4882a593Smuzhiyun 	enum allocation_event ev;
449*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * Any block we allocate has to be free in both the old and current ll.
453*4882a593Smuzhiyun 	 */
454*4882a593Smuzhiyun 	r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
455*4882a593Smuzhiyun 	if (r == -ENOSPC) {
456*4882a593Smuzhiyun 		/*
457*4882a593Smuzhiyun 		 * There's no free block between smm->begin and the end of the metadata device.
458*4882a593Smuzhiyun 		 * We search before smm->begin in case something has been freed.
459*4882a593Smuzhiyun 		 */
460*4882a593Smuzhiyun 		r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (r)
464*4882a593Smuzhiyun 		return r;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	smm->begin = *b + 1;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (recursing(smm))
469*4882a593Smuzhiyun 		r = add_bop(smm, BOP_INC, *b);
470*4882a593Smuzhiyun 	else {
471*4882a593Smuzhiyun 		in(smm);
472*4882a593Smuzhiyun 		r = sm_ll_inc(&smm->ll, *b, &ev);
473*4882a593Smuzhiyun 		r2 = out(smm);
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (!r)
477*4882a593Smuzhiyun 		smm->allocated_this_transaction++;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	return combine_errors(r, r2);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
sm_metadata_new_block(struct dm_space_map * sm,dm_block_t * b)482*4882a593Smuzhiyun static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	dm_block_t count;
485*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	int r = sm_metadata_new_block_(sm, b);
488*4882a593Smuzhiyun 	if (r) {
489*4882a593Smuzhiyun 		DMERR_LIMIT("unable to allocate new metadata block");
490*4882a593Smuzhiyun 		return r;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	r = sm_metadata_get_nr_free(sm, &count);
494*4882a593Smuzhiyun 	if (r) {
495*4882a593Smuzhiyun 		DMERR_LIMIT("couldn't get free block count");
496*4882a593Smuzhiyun 		return r;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	check_threshold(&smm->threshold, count);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	return r;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
sm_metadata_commit(struct dm_space_map * sm)504*4882a593Smuzhiyun static int sm_metadata_commit(struct dm_space_map *sm)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	int r;
507*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	r = sm_ll_commit(&smm->ll);
510*4882a593Smuzhiyun 	if (r)
511*4882a593Smuzhiyun 		return r;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
514*4882a593Smuzhiyun 	smm->allocated_this_transaction = 0;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
sm_metadata_register_threshold_callback(struct dm_space_map * sm,dm_block_t threshold,dm_sm_threshold_fn fn,void * context)519*4882a593Smuzhiyun static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
520*4882a593Smuzhiyun 						   dm_block_t threshold,
521*4882a593Smuzhiyun 						   dm_sm_threshold_fn fn,
522*4882a593Smuzhiyun 						   void *context)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	set_threshold(&smm->threshold, threshold, fn, context);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	return 0;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
sm_metadata_root_size(struct dm_space_map * sm,size_t * result)531*4882a593Smuzhiyun static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	*result = sizeof(struct disk_sm_root);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
sm_metadata_copy_root(struct dm_space_map * sm,void * where_le,size_t max)538*4882a593Smuzhiyun static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
541*4882a593Smuzhiyun 	struct disk_sm_root root_le;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
544*4882a593Smuzhiyun 	root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
545*4882a593Smuzhiyun 	root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
546*4882a593Smuzhiyun 	root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	if (max < sizeof(root_le))
549*4882a593Smuzhiyun 		return -ENOSPC;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	memcpy(where_le, &root_le, sizeof(root_le));
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun static const struct dm_space_map ops = {
559*4882a593Smuzhiyun 	.destroy = sm_metadata_destroy,
560*4882a593Smuzhiyun 	.extend = sm_metadata_extend,
561*4882a593Smuzhiyun 	.get_nr_blocks = sm_metadata_get_nr_blocks,
562*4882a593Smuzhiyun 	.get_nr_free = sm_metadata_get_nr_free,
563*4882a593Smuzhiyun 	.get_count = sm_metadata_get_count,
564*4882a593Smuzhiyun 	.count_is_more_than_one = sm_metadata_count_is_more_than_one,
565*4882a593Smuzhiyun 	.set_count = sm_metadata_set_count,
566*4882a593Smuzhiyun 	.inc_block = sm_metadata_inc_block,
567*4882a593Smuzhiyun 	.dec_block = sm_metadata_dec_block,
568*4882a593Smuzhiyun 	.new_block = sm_metadata_new_block,
569*4882a593Smuzhiyun 	.commit = sm_metadata_commit,
570*4882a593Smuzhiyun 	.root_size = sm_metadata_root_size,
571*4882a593Smuzhiyun 	.copy_root = sm_metadata_copy_root,
572*4882a593Smuzhiyun 	.register_threshold_callback = sm_metadata_register_threshold_callback
573*4882a593Smuzhiyun };
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /*----------------------------------------------------------------*/
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun  * When a new space map is created that manages its own space.  We use
579*4882a593Smuzhiyun  * this tiny bootstrap allocator.
580*4882a593Smuzhiyun  */
sm_bootstrap_destroy(struct dm_space_map * sm)581*4882a593Smuzhiyun static void sm_bootstrap_destroy(struct dm_space_map *sm)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
sm_bootstrap_extend(struct dm_space_map * sm,dm_block_t extra_blocks)585*4882a593Smuzhiyun static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	DMERR("bootstrap doesn't support extend");
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	return -EINVAL;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
sm_bootstrap_get_nr_blocks(struct dm_space_map * sm,dm_block_t * count)592*4882a593Smuzhiyun static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	*count = smm->ll.nr_blocks;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
sm_bootstrap_get_nr_free(struct dm_space_map * sm,dm_block_t * count)601*4882a593Smuzhiyun static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	*count = smm->ll.nr_blocks - smm->begin;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return 0;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
sm_bootstrap_get_count(struct dm_space_map * sm,dm_block_t b,uint32_t * result)610*4882a593Smuzhiyun static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
611*4882a593Smuzhiyun 				  uint32_t *result)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	*result = (b < smm->begin) ? 1 : 0;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
sm_bootstrap_count_is_more_than_one(struct dm_space_map * sm,dm_block_t b,int * result)620*4882a593Smuzhiyun static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
621*4882a593Smuzhiyun 					       dm_block_t b, int *result)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	*result = 0;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	return 0;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
sm_bootstrap_set_count(struct dm_space_map * sm,dm_block_t b,uint32_t count)628*4882a593Smuzhiyun static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
629*4882a593Smuzhiyun 				  uint32_t count)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	DMERR("bootstrap doesn't support set_count");
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	return -EINVAL;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
sm_bootstrap_new_block(struct dm_space_map * sm,dm_block_t * b)636*4882a593Smuzhiyun static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/*
641*4882a593Smuzhiyun 	 * We know the entire device is unused.
642*4882a593Smuzhiyun 	 */
643*4882a593Smuzhiyun 	if (smm->begin == smm->ll.nr_blocks)
644*4882a593Smuzhiyun 		return -ENOSPC;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	*b = smm->begin++;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
sm_bootstrap_inc_block(struct dm_space_map * sm,dm_block_t b)651*4882a593Smuzhiyun static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	return add_bop(smm, BOP_INC, b);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
sm_bootstrap_dec_block(struct dm_space_map * sm,dm_block_t b)658*4882a593Smuzhiyun static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	return add_bop(smm, BOP_DEC, b);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
sm_bootstrap_commit(struct dm_space_map * sm)665*4882a593Smuzhiyun static int sm_bootstrap_commit(struct dm_space_map *sm)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	return 0;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
sm_bootstrap_root_size(struct dm_space_map * sm,size_t * result)670*4882a593Smuzhiyun static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	DMERR("bootstrap doesn't support root_size");
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	return -EINVAL;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
sm_bootstrap_copy_root(struct dm_space_map * sm,void * where,size_t max)677*4882a593Smuzhiyun static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
678*4882a593Smuzhiyun 				  size_t max)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	DMERR("bootstrap doesn't support copy_root");
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	return -EINVAL;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun static const struct dm_space_map bootstrap_ops = {
686*4882a593Smuzhiyun 	.destroy = sm_bootstrap_destroy,
687*4882a593Smuzhiyun 	.extend = sm_bootstrap_extend,
688*4882a593Smuzhiyun 	.get_nr_blocks = sm_bootstrap_get_nr_blocks,
689*4882a593Smuzhiyun 	.get_nr_free = sm_bootstrap_get_nr_free,
690*4882a593Smuzhiyun 	.get_count = sm_bootstrap_get_count,
691*4882a593Smuzhiyun 	.count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
692*4882a593Smuzhiyun 	.set_count = sm_bootstrap_set_count,
693*4882a593Smuzhiyun 	.inc_block = sm_bootstrap_inc_block,
694*4882a593Smuzhiyun 	.dec_block = sm_bootstrap_dec_block,
695*4882a593Smuzhiyun 	.new_block = sm_bootstrap_new_block,
696*4882a593Smuzhiyun 	.commit = sm_bootstrap_commit,
697*4882a593Smuzhiyun 	.root_size = sm_bootstrap_root_size,
698*4882a593Smuzhiyun 	.copy_root = sm_bootstrap_copy_root,
699*4882a593Smuzhiyun 	.register_threshold_callback = NULL
700*4882a593Smuzhiyun };
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /*----------------------------------------------------------------*/
703*4882a593Smuzhiyun 
sm_metadata_extend(struct dm_space_map * sm,dm_block_t extra_blocks)704*4882a593Smuzhiyun static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	int r, i;
707*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
708*4882a593Smuzhiyun 	dm_block_t old_len = smm->ll.nr_blocks;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/*
711*4882a593Smuzhiyun 	 * Flick into a mode where all blocks get allocated in the new area.
712*4882a593Smuzhiyun 	 */
713*4882a593Smuzhiyun 	smm->begin = old_len;
714*4882a593Smuzhiyun 	memcpy(sm, &bootstrap_ops, sizeof(*sm));
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	/*
717*4882a593Smuzhiyun 	 * Extend.
718*4882a593Smuzhiyun 	 */
719*4882a593Smuzhiyun 	r = sm_ll_extend(&smm->ll, extra_blocks);
720*4882a593Smuzhiyun 	if (r)
721*4882a593Smuzhiyun 		goto out;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	/*
724*4882a593Smuzhiyun 	 * We repeatedly increment then commit until the commit doesn't
725*4882a593Smuzhiyun 	 * allocate any new blocks.
726*4882a593Smuzhiyun 	 */
727*4882a593Smuzhiyun 	do {
728*4882a593Smuzhiyun 		for (i = old_len; !r && i < smm->begin; i++)
729*4882a593Smuzhiyun 			r = add_bop(smm, BOP_INC, i);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		if (r)
732*4882a593Smuzhiyun 			goto out;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 		old_len = smm->begin;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 		r = apply_bops(smm);
737*4882a593Smuzhiyun 		if (r) {
738*4882a593Smuzhiyun 			DMERR("%s: apply_bops failed", __func__);
739*4882a593Smuzhiyun 			goto out;
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		r = sm_ll_commit(&smm->ll);
743*4882a593Smuzhiyun 		if (r)
744*4882a593Smuzhiyun 			goto out;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	} while (old_len != smm->begin);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun out:
749*4882a593Smuzhiyun 	/*
750*4882a593Smuzhiyun 	 * Switch back to normal behaviour.
751*4882a593Smuzhiyun 	 */
752*4882a593Smuzhiyun 	memcpy(sm, &ops, sizeof(*sm));
753*4882a593Smuzhiyun 	return r;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun /*----------------------------------------------------------------*/
757*4882a593Smuzhiyun 
dm_sm_metadata_init(void)758*4882a593Smuzhiyun struct dm_space_map *dm_sm_metadata_init(void)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	struct sm_metadata *smm;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	smm = kmalloc(sizeof(*smm), GFP_KERNEL);
763*4882a593Smuzhiyun 	if (!smm)
764*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	memcpy(&smm->sm, &ops, sizeof(smm->sm));
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	return &smm->sm;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
dm_sm_metadata_create(struct dm_space_map * sm,struct dm_transaction_manager * tm,dm_block_t nr_blocks,dm_block_t superblock)771*4882a593Smuzhiyun int dm_sm_metadata_create(struct dm_space_map *sm,
772*4882a593Smuzhiyun 			  struct dm_transaction_manager *tm,
773*4882a593Smuzhiyun 			  dm_block_t nr_blocks,
774*4882a593Smuzhiyun 			  dm_block_t superblock)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	int r;
777*4882a593Smuzhiyun 	dm_block_t i;
778*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	smm->begin = superblock + 1;
781*4882a593Smuzhiyun 	smm->recursion_count = 0;
782*4882a593Smuzhiyun 	smm->allocated_this_transaction = 0;
783*4882a593Smuzhiyun 	brb_init(&smm->uncommitted);
784*4882a593Smuzhiyun 	threshold_init(&smm->threshold);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	r = sm_ll_new_metadata(&smm->ll, tm);
789*4882a593Smuzhiyun 	if (!r) {
790*4882a593Smuzhiyun 		if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
791*4882a593Smuzhiyun 			nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
792*4882a593Smuzhiyun 		r = sm_ll_extend(&smm->ll, nr_blocks);
793*4882a593Smuzhiyun 	}
794*4882a593Smuzhiyun 	memcpy(&smm->sm, &ops, sizeof(smm->sm));
795*4882a593Smuzhiyun 	if (r)
796*4882a593Smuzhiyun 		return r;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/*
799*4882a593Smuzhiyun 	 * Now we need to update the newly created data structures with the
800*4882a593Smuzhiyun 	 * allocated blocks that they were built from.
801*4882a593Smuzhiyun 	 */
802*4882a593Smuzhiyun 	for (i = superblock; !r && i < smm->begin; i++)
803*4882a593Smuzhiyun 		r = add_bop(smm, BOP_INC, i);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (r)
806*4882a593Smuzhiyun 		return r;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	r = apply_bops(smm);
809*4882a593Smuzhiyun 	if (r) {
810*4882a593Smuzhiyun 		DMERR("%s: apply_bops failed", __func__);
811*4882a593Smuzhiyun 		return r;
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	return sm_metadata_commit(sm);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
dm_sm_metadata_open(struct dm_space_map * sm,struct dm_transaction_manager * tm,void * root_le,size_t len)817*4882a593Smuzhiyun int dm_sm_metadata_open(struct dm_space_map *sm,
818*4882a593Smuzhiyun 			struct dm_transaction_manager *tm,
819*4882a593Smuzhiyun 			void *root_le, size_t len)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	int r;
822*4882a593Smuzhiyun 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
825*4882a593Smuzhiyun 	if (r)
826*4882a593Smuzhiyun 		return r;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	smm->begin = 0;
829*4882a593Smuzhiyun 	smm->recursion_count = 0;
830*4882a593Smuzhiyun 	smm->allocated_this_transaction = 0;
831*4882a593Smuzhiyun 	brb_init(&smm->uncommitted);
832*4882a593Smuzhiyun 	threshold_init(&smm->threshold);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
835*4882a593Smuzhiyun 	return 0;
836*4882a593Smuzhiyun }
837