xref: /OK3568_Linux_fs/kernel/drivers/md/bcache/movinggc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Moving/copying garbage collector
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2012 Google, Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "bcache.h"
9*4882a593Smuzhiyun #include "btree.h"
10*4882a593Smuzhiyun #include "debug.h"
11*4882a593Smuzhiyun #include "request.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <trace/events/bcache.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct moving_io {
16*4882a593Smuzhiyun 	struct closure		cl;
17*4882a593Smuzhiyun 	struct keybuf_key	*w;
18*4882a593Smuzhiyun 	struct data_insert_op	op;
19*4882a593Smuzhiyun 	struct bbio		bio;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun 
moving_pred(struct keybuf * buf,struct bkey * k)22*4882a593Smuzhiyun static bool moving_pred(struct keybuf *buf, struct bkey *k)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	struct cache_set *c = container_of(buf, struct cache_set,
25*4882a593Smuzhiyun 					   moving_gc_keys);
26*4882a593Smuzhiyun 	unsigned int i;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	for (i = 0; i < KEY_PTRS(k); i++)
29*4882a593Smuzhiyun 		if (ptr_available(c, k, i) &&
30*4882a593Smuzhiyun 		    GC_MOVE(PTR_BUCKET(c, k, i)))
31*4882a593Smuzhiyun 			return true;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	return false;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Moving GC - IO loop */
37*4882a593Smuzhiyun 
moving_io_destructor(struct closure * cl)38*4882a593Smuzhiyun static void moving_io_destructor(struct closure *cl)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct moving_io *io = container_of(cl, struct moving_io, cl);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	kfree(io);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
write_moving_finish(struct closure * cl)45*4882a593Smuzhiyun static void write_moving_finish(struct closure *cl)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct moving_io *io = container_of(cl, struct moving_io, cl);
48*4882a593Smuzhiyun 	struct bio *bio = &io->bio.bio;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	bio_free_pages(bio);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (io->op.replace_collision)
53*4882a593Smuzhiyun 		trace_bcache_gc_copy_collision(&io->w->key);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	up(&io->op.c->moving_in_flight);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	closure_return_with_destructor(cl, moving_io_destructor);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
read_moving_endio(struct bio * bio)62*4882a593Smuzhiyun static void read_moving_endio(struct bio *bio)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct bbio *b = container_of(bio, struct bbio, bio);
65*4882a593Smuzhiyun 	struct moving_io *io = container_of(bio->bi_private,
66*4882a593Smuzhiyun 					    struct moving_io, cl);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (bio->bi_status)
69*4882a593Smuzhiyun 		io->op.status = bio->bi_status;
70*4882a593Smuzhiyun 	else if (!KEY_DIRTY(&b->key) &&
71*4882a593Smuzhiyun 		 ptr_stale(io->op.c, &b->key, 0)) {
72*4882a593Smuzhiyun 		io->op.status = BLK_STS_IOERR;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
moving_init(struct moving_io * io)78*4882a593Smuzhiyun static void moving_init(struct moving_io *io)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct bio *bio = &io->bio.bio;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	bio_init(bio, bio->bi_inline_vecs,
83*4882a593Smuzhiyun 		 DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
84*4882a593Smuzhiyun 	bio_get(bio);
85*4882a593Smuzhiyun 	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;
88*4882a593Smuzhiyun 	bio->bi_private		= &io->cl;
89*4882a593Smuzhiyun 	bch_bio_map(bio, NULL);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
write_moving(struct closure * cl)92*4882a593Smuzhiyun static void write_moving(struct closure *cl)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct moving_io *io = container_of(cl, struct moving_io, cl);
95*4882a593Smuzhiyun 	struct data_insert_op *op = &io->op;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (!op->status) {
98*4882a593Smuzhiyun 		moving_init(io);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
101*4882a593Smuzhiyun 		op->write_prio		= 1;
102*4882a593Smuzhiyun 		op->bio			= &io->bio.bio;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 		op->writeback		= KEY_DIRTY(&io->w->key);
105*4882a593Smuzhiyun 		op->csum		= KEY_CSUM(&io->w->key);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		bkey_copy(&op->replace_key, &io->w->key);
108*4882a593Smuzhiyun 		op->replace		= true;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		closure_call(&op->cl, bch_data_insert, NULL, cl);
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	continue_at(cl, write_moving_finish, op->wq);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
read_moving_submit(struct closure * cl)116*4882a593Smuzhiyun static void read_moving_submit(struct closure *cl)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct moving_io *io = container_of(cl, struct moving_io, cl);
119*4882a593Smuzhiyun 	struct bio *bio = &io->bio.bio;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	continue_at(cl, write_moving, io->op.wq);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
read_moving(struct cache_set * c)126*4882a593Smuzhiyun static void read_moving(struct cache_set *c)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct keybuf_key *w;
129*4882a593Smuzhiyun 	struct moving_io *io;
130*4882a593Smuzhiyun 	struct bio *bio;
131*4882a593Smuzhiyun 	struct closure cl;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	closure_init_stack(&cl);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* XXX: if we error, background writeback could stall indefinitely */
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
138*4882a593Smuzhiyun 		w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
139*4882a593Smuzhiyun 					   &MAX_KEY, moving_pred);
140*4882a593Smuzhiyun 		if (!w)
141*4882a593Smuzhiyun 			break;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		if (ptr_stale(c, &w->key, 0)) {
144*4882a593Smuzhiyun 			bch_keybuf_del(&c->moving_gc_keys, w);
145*4882a593Smuzhiyun 			continue;
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
149*4882a593Smuzhiyun 					 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
150*4882a593Smuzhiyun 			     GFP_KERNEL);
151*4882a593Smuzhiyun 		if (!io)
152*4882a593Smuzhiyun 			goto err;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		w->private	= io;
155*4882a593Smuzhiyun 		io->w		= w;
156*4882a593Smuzhiyun 		io->op.inode	= KEY_INODE(&w->key);
157*4882a593Smuzhiyun 		io->op.c	= c;
158*4882a593Smuzhiyun 		io->op.wq	= c->moving_gc_wq;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		moving_init(io);
161*4882a593Smuzhiyun 		bio = &io->bio.bio;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
164*4882a593Smuzhiyun 		bio->bi_end_io	= read_moving_endio;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		if (bch_bio_alloc_pages(bio, GFP_KERNEL))
167*4882a593Smuzhiyun 			goto err;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		trace_bcache_gc_copy(&w->key);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		down(&c->moving_in_flight);
172*4882a593Smuzhiyun 		closure_call(&io->cl, read_moving_submit, NULL, &cl);
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (0) {
176*4882a593Smuzhiyun err:		if (!IS_ERR_OR_NULL(w->private))
177*4882a593Smuzhiyun 			kfree(w->private);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		bch_keybuf_del(&c->moving_gc_keys, w);
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	closure_sync(&cl);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
bucket_cmp(struct bucket * l,struct bucket * r)185*4882a593Smuzhiyun static bool bucket_cmp(struct bucket *l, struct bucket *r)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
bucket_heap_top(struct cache * ca)190*4882a593Smuzhiyun static unsigned int bucket_heap_top(struct cache *ca)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct bucket *b;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
bch_moving_gc(struct cache_set * c)197*4882a593Smuzhiyun void bch_moving_gc(struct cache_set *c)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct cache *ca = c->cache;
200*4882a593Smuzhiyun 	struct bucket *b;
201*4882a593Smuzhiyun 	unsigned long sectors_to_move, reserve_sectors;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (!c->copy_gc_enabled)
204*4882a593Smuzhiyun 		return;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	mutex_lock(&c->bucket_lock);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	sectors_to_move = 0;
209*4882a593Smuzhiyun 	reserve_sectors = ca->sb.bucket_size *
210*4882a593Smuzhiyun 			     fifo_used(&ca->free[RESERVE_MOVINGGC]);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	ca->heap.used = 0;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	for_each_bucket(b, ca) {
215*4882a593Smuzhiyun 		if (GC_MARK(b) == GC_MARK_METADATA ||
216*4882a593Smuzhiyun 		    !GC_SECTORS_USED(b) ||
217*4882a593Smuzhiyun 		    GC_SECTORS_USED(b) == ca->sb.bucket_size ||
218*4882a593Smuzhiyun 		    atomic_read(&b->pin))
219*4882a593Smuzhiyun 			continue;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		if (!heap_full(&ca->heap)) {
222*4882a593Smuzhiyun 			sectors_to_move += GC_SECTORS_USED(b);
223*4882a593Smuzhiyun 			heap_add(&ca->heap, b, bucket_cmp);
224*4882a593Smuzhiyun 		} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
225*4882a593Smuzhiyun 			sectors_to_move -= bucket_heap_top(ca);
226*4882a593Smuzhiyun 			sectors_to_move += GC_SECTORS_USED(b);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 			ca->heap.data[0] = b;
229*4882a593Smuzhiyun 			heap_sift(&ca->heap, 0, bucket_cmp);
230*4882a593Smuzhiyun 		}
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	while (sectors_to_move > reserve_sectors) {
234*4882a593Smuzhiyun 		heap_pop(&ca->heap, b, bucket_cmp);
235*4882a593Smuzhiyun 		sectors_to_move -= GC_SECTORS_USED(b);
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	while (heap_pop(&ca->heap, b, bucket_cmp))
239*4882a593Smuzhiyun 		SET_GC_MOVE(b, 1);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	mutex_unlock(&c->bucket_lock);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	c->moving_gc_keys.last_scanned = ZERO_KEY;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	read_moving(c);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
bch_moving_init_cache_set(struct cache_set * c)248*4882a593Smuzhiyun void bch_moving_init_cache_set(struct cache_set *c)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	bch_keybuf_init(&c->moving_gc_keys);
251*4882a593Smuzhiyun 	sema_init(&c->moving_in_flight, 64);
252*4882a593Smuzhiyun }
253