1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Uses a block device as cache for other block devices; optimized for SSDs.
6*4882a593Smuzhiyun * All allocation is done in buckets, which should match the erase block size
7*4882a593Smuzhiyun * of the device.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Buckets containing cached data are kept on a heap sorted by priority;
10*4882a593Smuzhiyun * bucket priority is increased on cache hit, and periodically all the buckets
11*4882a593Smuzhiyun * on the heap have their priority scaled down. This currently is just used as
12*4882a593Smuzhiyun * an LRU but in the future should allow for more intelligent heuristics.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15*4882a593Smuzhiyun * counter. Garbage collection is used to remove stale pointers.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18*4882a593Smuzhiyun * as keys are inserted we only sort the pages that have not yet been written.
19*4882a593Smuzhiyun * When garbage collection is run, we resort the entire node.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "bcache.h"
25*4882a593Smuzhiyun #include "btree.h"
26*4882a593Smuzhiyun #include "debug.h"
27*4882a593Smuzhiyun #include "extents.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/bitops.h>
31*4882a593Smuzhiyun #include <linux/hash.h>
32*4882a593Smuzhiyun #include <linux/kthread.h>
33*4882a593Smuzhiyun #include <linux/prefetch.h>
34*4882a593Smuzhiyun #include <linux/random.h>
35*4882a593Smuzhiyun #include <linux/rcupdate.h>
36*4882a593Smuzhiyun #include <linux/sched/clock.h>
37*4882a593Smuzhiyun #include <linux/rculist.h>
38*4882a593Smuzhiyun #include <linux/delay.h>
39*4882a593Smuzhiyun #include <trace/events/bcache.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Todo:
43*4882a593Smuzhiyun * register_bcache: Return errors out to userspace correctly
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Writeback: don't undirty key until after a cache flush
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * Create an iterator for key pointers
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * On btree write error, mark bucket such that it won't be freed from the cache
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * Journalling:
52*4882a593Smuzhiyun * Check for bad keys in replay
53*4882a593Smuzhiyun * Propagate barriers
54*4882a593Smuzhiyun * Refcount journal entries in journal_replay
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Garbage collection:
57*4882a593Smuzhiyun * Finish incremental gc
58*4882a593Smuzhiyun * Gc should free old UUIDs, data for invalid UUIDs
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * Provide a way to list backing device UUIDs we have data cached for, and
61*4882a593Smuzhiyun * probably how long it's been since we've seen them, and a way to invalidate
62*4882a593Smuzhiyun * dirty data for devices that will never be attached again
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65*4882a593Smuzhiyun * that based on that and how much dirty data we have we can keep writeback
66*4882a593Smuzhiyun * from being starved
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Add a tracepoint or somesuch to watch for writeback starvation
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * When btree depth > 1 and splitting an interior node, we have to make sure
71*4882a593Smuzhiyun * alloc_bucket() cannot fail. This should be true but is not completely
72*4882a593Smuzhiyun * obvious.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Plugging?
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * If data write is less than hard sector size of ssd, round up offset in open
77*4882a593Smuzhiyun * bucket to the next whole sector
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * Superblock needs to be fleshed out for multiple cache devices
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Add a sysfs tunable for the number of writeback IOs in flight
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * Add a sysfs tunable for the number of open data buckets
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * IO tracking: Can we track when one process is doing io on behalf of another?
86*4882a593Smuzhiyun * IO tracking: Don't use just an average, weigh more recent stuff higher
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * Test module load/unload
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define MAX_NEED_GC 64
92*4882a593Smuzhiyun #define MAX_SAVE_PRIO 72
93*4882a593Smuzhiyun #define MAX_GC_TIMES 100
94*4882a593Smuzhiyun #define MIN_GC_NODES 100
95*4882a593Smuzhiyun #define GC_SLEEP_MS 100
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define PTR_HASH(c, k) \
100*4882a593Smuzhiyun (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun static struct workqueue_struct *btree_io_wq;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #define insert_lock(s, b) ((b)->level <= (s)->lock)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun
write_block(struct btree * b)107*4882a593Smuzhiyun static inline struct bset *write_block(struct btree *b)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
bch_btree_init_next(struct btree * b)112*4882a593Smuzhiyun static void bch_btree_init_next(struct btree *b)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun /* If not a leaf node, always sort */
115*4882a593Smuzhiyun if (b->level && b->keys.nsets)
116*4882a593Smuzhiyun bch_btree_sort(&b->keys, &b->c->sort);
117*4882a593Smuzhiyun else
118*4882a593Smuzhiyun bch_btree_sort_lazy(&b->keys, &b->c->sort);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (b->written < btree_blocks(b))
121*4882a593Smuzhiyun bch_bset_init_next(&b->keys, write_block(b),
122*4882a593Smuzhiyun bset_magic(&b->c->cache->sb));
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Btree key manipulation */
127*4882a593Smuzhiyun
bkey_put(struct cache_set * c,struct bkey * k)128*4882a593Smuzhiyun void bkey_put(struct cache_set *c, struct bkey *k)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun unsigned int i;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
133*4882a593Smuzhiyun if (ptr_available(c, k, i))
134*4882a593Smuzhiyun atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Btree IO */
138*4882a593Smuzhiyun
btree_csum_set(struct btree * b,struct bset * i)139*4882a593Smuzhiyun static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun uint64_t crc = b->key.ptr[0];
142*4882a593Smuzhiyun void *data = (void *) i + 8, *end = bset_bkey_last(i);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun crc = bch_crc64_update(crc, data, end - data);
145*4882a593Smuzhiyun return crc ^ 0xffffffffffffffffULL;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
bch_btree_node_read_done(struct btree * b)148*4882a593Smuzhiyun void bch_btree_node_read_done(struct btree *b)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun const char *err = "bad btree header";
151*4882a593Smuzhiyun struct bset *i = btree_bset_first(b);
152*4882a593Smuzhiyun struct btree_iter *iter;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * c->fill_iter can allocate an iterator with more memory space
156*4882a593Smuzhiyun * than static MAX_BSETS.
157*4882a593Smuzhiyun * See the comment arount cache_set->fill_iter.
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160*4882a593Smuzhiyun iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161*4882a593Smuzhiyun iter->used = 0;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun #ifdef CONFIG_BCACHE_DEBUG
164*4882a593Smuzhiyun iter->b = &b->keys;
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (!i->seq)
168*4882a593Smuzhiyun goto err;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (;
171*4882a593Smuzhiyun b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172*4882a593Smuzhiyun i = write_block(b)) {
173*4882a593Smuzhiyun err = "unsupported bset version";
174*4882a593Smuzhiyun if (i->version > BCACHE_BSET_VERSION)
175*4882a593Smuzhiyun goto err;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun err = "bad btree header";
178*4882a593Smuzhiyun if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179*4882a593Smuzhiyun btree_blocks(b))
180*4882a593Smuzhiyun goto err;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun err = "bad magic";
183*4882a593Smuzhiyun if (i->magic != bset_magic(&b->c->cache->sb))
184*4882a593Smuzhiyun goto err;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun err = "bad checksum";
187*4882a593Smuzhiyun switch (i->version) {
188*4882a593Smuzhiyun case 0:
189*4882a593Smuzhiyun if (i->csum != csum_set(i))
190*4882a593Smuzhiyun goto err;
191*4882a593Smuzhiyun break;
192*4882a593Smuzhiyun case BCACHE_BSET_VERSION:
193*4882a593Smuzhiyun if (i->csum != btree_csum_set(b, i))
194*4882a593Smuzhiyun goto err;
195*4882a593Smuzhiyun break;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun err = "empty set";
199*4882a593Smuzhiyun if (i != b->keys.set[0].data && !i->keys)
200*4882a593Smuzhiyun goto err;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun b->written += set_blocks(i, block_bytes(b->c->cache));
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun err = "corrupted btree";
208*4882a593Smuzhiyun for (i = write_block(b);
209*4882a593Smuzhiyun bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210*4882a593Smuzhiyun i = ((void *) i) + block_bytes(b->c->cache))
211*4882a593Smuzhiyun if (i->seq == b->keys.set[0].data->seq)
212*4882a593Smuzhiyun goto err;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun i = b->keys.set[0].data;
217*4882a593Smuzhiyun err = "short btree key";
218*4882a593Smuzhiyun if (b->keys.set[0].size &&
219*4882a593Smuzhiyun bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220*4882a593Smuzhiyun goto err;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (b->written < btree_blocks(b))
223*4882a593Smuzhiyun bch_bset_init_next(&b->keys, write_block(b),
224*4882a593Smuzhiyun bset_magic(&b->c->cache->sb));
225*4882a593Smuzhiyun out:
226*4882a593Smuzhiyun mempool_free(iter, &b->c->fill_iter);
227*4882a593Smuzhiyun return;
228*4882a593Smuzhiyun err:
229*4882a593Smuzhiyun set_btree_node_io_error(b);
230*4882a593Smuzhiyun bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231*4882a593Smuzhiyun err, PTR_BUCKET_NR(b->c, &b->key, 0),
232*4882a593Smuzhiyun bset_block_offset(b, i), i->keys);
233*4882a593Smuzhiyun goto out;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
btree_node_read_endio(struct bio * bio)236*4882a593Smuzhiyun static void btree_node_read_endio(struct bio *bio)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct closure *cl = bio->bi_private;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun closure_put(cl);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
bch_btree_node_read(struct btree * b)243*4882a593Smuzhiyun static void bch_btree_node_read(struct btree *b)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun uint64_t start_time = local_clock();
246*4882a593Smuzhiyun struct closure cl;
247*4882a593Smuzhiyun struct bio *bio;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun trace_bcache_btree_read(b);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun closure_init_stack(&cl);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun bio = bch_bbio_alloc(b->c);
254*4882a593Smuzhiyun bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255*4882a593Smuzhiyun bio->bi_end_io = btree_node_read_endio;
256*4882a593Smuzhiyun bio->bi_private = &cl;
257*4882a593Smuzhiyun bio->bi_opf = REQ_OP_READ | REQ_META;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun bch_bio_map(bio, b->keys.set[0].data);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun bch_submit_bbio(bio, b->c, &b->key, 0);
262*4882a593Smuzhiyun closure_sync(&cl);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (bio->bi_status)
265*4882a593Smuzhiyun set_btree_node_io_error(b);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun bch_bbio_free(bio, b->c);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (btree_node_io_error(b))
270*4882a593Smuzhiyun goto err;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun bch_btree_node_read_done(b);
273*4882a593Smuzhiyun bch_time_stats_update(&b->c->btree_read_time, start_time);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return;
276*4882a593Smuzhiyun err:
277*4882a593Smuzhiyun bch_cache_set_error(b->c, "io error reading bucket %zu",
278*4882a593Smuzhiyun PTR_BUCKET_NR(b->c, &b->key, 0));
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
btree_complete_write(struct btree * b,struct btree_write * w)281*4882a593Smuzhiyun static void btree_complete_write(struct btree *b, struct btree_write *w)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun if (w->prio_blocked &&
284*4882a593Smuzhiyun !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285*4882a593Smuzhiyun wake_up_allocators(b->c);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (w->journal) {
288*4882a593Smuzhiyun atomic_dec_bug(w->journal);
289*4882a593Smuzhiyun __closure_wake_up(&b->c->journal.wait);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun w->prio_blocked = 0;
293*4882a593Smuzhiyun w->journal = NULL;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
btree_node_write_unlock(struct closure * cl)296*4882a593Smuzhiyun static void btree_node_write_unlock(struct closure *cl)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct btree *b = container_of(cl, struct btree, io);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun up(&b->io_mutex);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
__btree_node_write_done(struct closure * cl)303*4882a593Smuzhiyun static void __btree_node_write_done(struct closure *cl)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct btree *b = container_of(cl, struct btree, io);
306*4882a593Smuzhiyun struct btree_write *w = btree_prev_write(b);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun bch_bbio_free(b->bio, b->c);
309*4882a593Smuzhiyun b->bio = NULL;
310*4882a593Smuzhiyun btree_complete_write(b, w);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (btree_node_dirty(b))
313*4882a593Smuzhiyun queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun closure_return_with_destructor(cl, btree_node_write_unlock);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
btree_node_write_done(struct closure * cl)318*4882a593Smuzhiyun static void btree_node_write_done(struct closure *cl)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct btree *b = container_of(cl, struct btree, io);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun bio_free_pages(b->bio);
323*4882a593Smuzhiyun __btree_node_write_done(cl);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
btree_node_write_endio(struct bio * bio)326*4882a593Smuzhiyun static void btree_node_write_endio(struct bio *bio)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct closure *cl = bio->bi_private;
329*4882a593Smuzhiyun struct btree *b = container_of(cl, struct btree, io);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (bio->bi_status)
332*4882a593Smuzhiyun set_btree_node_io_error(b);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
335*4882a593Smuzhiyun closure_put(cl);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
do_btree_node_write(struct btree * b)338*4882a593Smuzhiyun static void do_btree_node_write(struct btree *b)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct closure *cl = &b->io;
341*4882a593Smuzhiyun struct bset *i = btree_bset_last(b);
342*4882a593Smuzhiyun BKEY_PADDED(key) k;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun i->version = BCACHE_BSET_VERSION;
345*4882a593Smuzhiyun i->csum = btree_csum_set(b, i);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun BUG_ON(b->bio);
348*4882a593Smuzhiyun b->bio = bch_bbio_alloc(b->c);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun b->bio->bi_end_io = btree_node_write_endio;
351*4882a593Smuzhiyun b->bio->bi_private = cl;
352*4882a593Smuzhiyun b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
353*4882a593Smuzhiyun b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
354*4882a593Smuzhiyun bch_bio_map(b->bio, i);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * If we're appending to a leaf node, we don't technically need FUA -
358*4882a593Smuzhiyun * this write just needs to be persisted before the next journal write,
359*4882a593Smuzhiyun * which will be marked FLUSH|FUA.
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * Similarly if we're writing a new btree root - the pointer is going to
362*4882a593Smuzhiyun * be in the next journal entry.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * But if we're writing a new btree node (that isn't a root) or
365*4882a593Smuzhiyun * appending to a non leaf btree node, we need either FUA or a flush
366*4882a593Smuzhiyun * when we write the parent with the new pointer. FUA is cheaper than a
367*4882a593Smuzhiyun * flush, and writes appending to leaf nodes aren't blocking anything so
368*4882a593Smuzhiyun * just make all btree node writes FUA to keep things sane.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun bkey_copy(&k.key, &b->key);
372*4882a593Smuzhiyun SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373*4882a593Smuzhiyun bset_sector_offset(&b->keys, i));
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376*4882a593Smuzhiyun struct bio_vec *bv;
377*4882a593Smuzhiyun void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378*4882a593Smuzhiyun struct bvec_iter_all iter_all;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun bio_for_each_segment_all(bv, b->bio, iter_all) {
381*4882a593Smuzhiyun memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382*4882a593Smuzhiyun addr += PAGE_SIZE;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun bch_submit_bbio(b->bio, b->c, &k.key, 0);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun continue_at(cl, btree_node_write_done, NULL);
388*4882a593Smuzhiyun } else {
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * No problem for multipage bvec since the bio is
391*4882a593Smuzhiyun * just allocated
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun b->bio->bi_vcnt = 0;
394*4882a593Smuzhiyun bch_bio_map(b->bio, i);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun bch_submit_bbio(b->bio, b->c, &k.key, 0);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun closure_sync(cl);
399*4882a593Smuzhiyun continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
__bch_btree_node_write(struct btree * b,struct closure * parent)403*4882a593Smuzhiyun void __bch_btree_node_write(struct btree *b, struct closure *parent)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct bset *i = btree_bset_last(b);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun lockdep_assert_held(&b->write_lock);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun trace_bcache_btree_write(b);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun BUG_ON(current->bio_list);
412*4882a593Smuzhiyun BUG_ON(b->written >= btree_blocks(b));
413*4882a593Smuzhiyun BUG_ON(b->written && !i->keys);
414*4882a593Smuzhiyun BUG_ON(btree_bset_first(b)->seq != i->seq);
415*4882a593Smuzhiyun bch_check_keys(&b->keys, "writing");
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun cancel_delayed_work(&b->work);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* If caller isn't waiting for write, parent refcount is cache set */
420*4882a593Smuzhiyun down(&b->io_mutex);
421*4882a593Smuzhiyun closure_init(&b->io, parent ?: &b->c->cl);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun clear_bit(BTREE_NODE_dirty, &b->flags);
424*4882a593Smuzhiyun change_bit(BTREE_NODE_write_idx, &b->flags);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun do_btree_node_write(b);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429*4882a593Smuzhiyun &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun b->written += set_blocks(i, block_bytes(b->c->cache));
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
bch_btree_node_write(struct btree * b,struct closure * parent)434*4882a593Smuzhiyun void bch_btree_node_write(struct btree *b, struct closure *parent)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun unsigned int nsets = b->keys.nsets;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun lockdep_assert_held(&b->lock);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun __bch_btree_node_write(b, parent);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun * do verify if there was more than one set initially (i.e. we did a
444*4882a593Smuzhiyun * sort) and we sorted down to a single set:
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun if (nsets && !b->keys.nsets)
447*4882a593Smuzhiyun bch_btree_verify(b);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun bch_btree_init_next(b);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
bch_btree_node_write_sync(struct btree * b)452*4882a593Smuzhiyun static void bch_btree_node_write_sync(struct btree *b)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct closure cl;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun closure_init_stack(&cl);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun mutex_lock(&b->write_lock);
459*4882a593Smuzhiyun bch_btree_node_write(b, &cl);
460*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun closure_sync(&cl);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
btree_node_write_work(struct work_struct * w)465*4882a593Smuzhiyun static void btree_node_write_work(struct work_struct *w)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun mutex_lock(&b->write_lock);
470*4882a593Smuzhiyun if (btree_node_dirty(b))
471*4882a593Smuzhiyun __bch_btree_node_write(b, NULL);
472*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)475*4882a593Smuzhiyun static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun struct bset *i = btree_bset_last(b);
478*4882a593Smuzhiyun struct btree_write *w = btree_current_write(b);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun lockdep_assert_held(&b->write_lock);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun BUG_ON(!b->written);
483*4882a593Smuzhiyun BUG_ON(!i->keys);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (!btree_node_dirty(b))
486*4882a593Smuzhiyun queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun set_btree_node_dirty(b);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * w->journal is always the oldest journal pin of all bkeys
492*4882a593Smuzhiyun * in the leaf node, to make sure the oldest jset seq won't
493*4882a593Smuzhiyun * be increased before this btree node is flushed.
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun if (journal_ref) {
496*4882a593Smuzhiyun if (w->journal &&
497*4882a593Smuzhiyun journal_pin_cmp(b->c, w->journal, journal_ref)) {
498*4882a593Smuzhiyun atomic_dec_bug(w->journal);
499*4882a593Smuzhiyun w->journal = NULL;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (!w->journal) {
503*4882a593Smuzhiyun w->journal = journal_ref;
504*4882a593Smuzhiyun atomic_inc(w->journal);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Force write if set is too big */
509*4882a593Smuzhiyun if (set_bytes(i) > PAGE_SIZE - 48 &&
510*4882a593Smuzhiyun !current->bio_list)
511*4882a593Smuzhiyun bch_btree_node_write(b, NULL);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun * Btree in memory cache - allocation/freeing
516*4882a593Smuzhiyun * mca -> memory cache
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520*4882a593Smuzhiyun ? c->root->level : 1) * 8 + 16)
521*4882a593Smuzhiyun #define mca_can_free(c) \
522*4882a593Smuzhiyun max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523*4882a593Smuzhiyun
mca_data_free(struct btree * b)524*4882a593Smuzhiyun static void mca_data_free(struct btree *b)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun BUG_ON(b->io_mutex.count != 1);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun bch_btree_keys_free(&b->keys);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun b->c->btree_cache_used--;
531*4882a593Smuzhiyun list_move(&b->list, &b->c->btree_cache_freed);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
mca_bucket_free(struct btree * b)534*4882a593Smuzhiyun static void mca_bucket_free(struct btree *b)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun BUG_ON(btree_node_dirty(b));
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun b->key.ptr[0] = 0;
539*4882a593Smuzhiyun hlist_del_init_rcu(&b->hash);
540*4882a593Smuzhiyun list_move(&b->list, &b->c->btree_cache_freeable);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
btree_order(struct bkey * k)543*4882a593Smuzhiyun static unsigned int btree_order(struct bkey *k)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)548*4882a593Smuzhiyun static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun if (!bch_btree_keys_alloc(&b->keys,
551*4882a593Smuzhiyun max_t(unsigned int,
552*4882a593Smuzhiyun ilog2(b->c->btree_pages),
553*4882a593Smuzhiyun btree_order(k)),
554*4882a593Smuzhiyun gfp)) {
555*4882a593Smuzhiyun b->c->btree_cache_used++;
556*4882a593Smuzhiyun list_move(&b->list, &b->c->btree_cache);
557*4882a593Smuzhiyun } else {
558*4882a593Smuzhiyun list_move(&b->list, &b->c->btree_cache_freed);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)562*4882a593Smuzhiyun static struct btree *mca_bucket_alloc(struct cache_set *c,
563*4882a593Smuzhiyun struct bkey *k, gfp_t gfp)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun /*
566*4882a593Smuzhiyun * kzalloc() is necessary here for initialization,
567*4882a593Smuzhiyun * see code comments in bch_btree_keys_init().
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun struct btree *b = kzalloc(sizeof(struct btree), gfp);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (!b)
572*4882a593Smuzhiyun return NULL;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun init_rwsem(&b->lock);
575*4882a593Smuzhiyun lockdep_set_novalidate_class(&b->lock);
576*4882a593Smuzhiyun mutex_init(&b->write_lock);
577*4882a593Smuzhiyun lockdep_set_novalidate_class(&b->write_lock);
578*4882a593Smuzhiyun INIT_LIST_HEAD(&b->list);
579*4882a593Smuzhiyun INIT_DELAYED_WORK(&b->work, btree_node_write_work);
580*4882a593Smuzhiyun b->c = c;
581*4882a593Smuzhiyun sema_init(&b->io_mutex, 1);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun mca_data_alloc(b, k, gfp);
584*4882a593Smuzhiyun return b;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
mca_reap(struct btree * b,unsigned int min_order,bool flush)587*4882a593Smuzhiyun static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun struct closure cl;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun closure_init_stack(&cl);
592*4882a593Smuzhiyun lockdep_assert_held(&b->c->bucket_lock);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (!down_write_trylock(&b->lock))
595*4882a593Smuzhiyun return -ENOMEM;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if (b->keys.page_order < min_order)
600*4882a593Smuzhiyun goto out_unlock;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (!flush) {
603*4882a593Smuzhiyun if (btree_node_dirty(b))
604*4882a593Smuzhiyun goto out_unlock;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun if (down_trylock(&b->io_mutex))
607*4882a593Smuzhiyun goto out_unlock;
608*4882a593Smuzhiyun up(&b->io_mutex);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun retry:
612*4882a593Smuzhiyun /*
613*4882a593Smuzhiyun * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
614*4882a593Smuzhiyun * __bch_btree_node_write(). To avoid an extra flush, acquire
615*4882a593Smuzhiyun * b->write_lock before checking BTREE_NODE_dirty bit.
616*4882a593Smuzhiyun */
617*4882a593Smuzhiyun mutex_lock(&b->write_lock);
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * If this btree node is selected in btree_flush_write() by journal
620*4882a593Smuzhiyun * code, delay and retry until the node is flushed by journal code
621*4882a593Smuzhiyun * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun if (btree_node_journal_flush(b)) {
624*4882a593Smuzhiyun pr_debug("bnode %p is flushing by journal, retry\n", b);
625*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
626*4882a593Smuzhiyun udelay(1);
627*4882a593Smuzhiyun goto retry;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (btree_node_dirty(b))
631*4882a593Smuzhiyun __bch_btree_node_write(b, &cl);
632*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun closure_sync(&cl);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* wait for any in flight btree write */
637*4882a593Smuzhiyun down(&b->io_mutex);
638*4882a593Smuzhiyun up(&b->io_mutex);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun return 0;
641*4882a593Smuzhiyun out_unlock:
642*4882a593Smuzhiyun rw_unlock(true, b);
643*4882a593Smuzhiyun return -ENOMEM;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)646*4882a593Smuzhiyun static unsigned long bch_mca_scan(struct shrinker *shrink,
647*4882a593Smuzhiyun struct shrink_control *sc)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct cache_set *c = container_of(shrink, struct cache_set, shrink);
650*4882a593Smuzhiyun struct btree *b, *t;
651*4882a593Smuzhiyun unsigned long i, nr = sc->nr_to_scan;
652*4882a593Smuzhiyun unsigned long freed = 0;
653*4882a593Smuzhiyun unsigned int btree_cache_used;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (c->shrinker_disabled)
656*4882a593Smuzhiyun return SHRINK_STOP;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (c->btree_cache_alloc_lock)
659*4882a593Smuzhiyun return SHRINK_STOP;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /* Return -1 if we can't do anything right now */
662*4882a593Smuzhiyun if (sc->gfp_mask & __GFP_IO)
663*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
664*4882a593Smuzhiyun else if (!mutex_trylock(&c->bucket_lock))
665*4882a593Smuzhiyun return -1;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /*
668*4882a593Smuzhiyun * It's _really_ critical that we don't free too many btree nodes - we
669*4882a593Smuzhiyun * have to always leave ourselves a reserve. The reserve is how we
670*4882a593Smuzhiyun * guarantee that allocating memory for a new btree node can always
671*4882a593Smuzhiyun * succeed, so that inserting keys into the btree can always succeed and
672*4882a593Smuzhiyun * IO can always make forward progress:
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun nr /= c->btree_pages;
675*4882a593Smuzhiyun if (nr == 0)
676*4882a593Smuzhiyun nr = 1;
677*4882a593Smuzhiyun nr = min_t(unsigned long, nr, mca_can_free(c));
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun i = 0;
680*4882a593Smuzhiyun btree_cache_used = c->btree_cache_used;
681*4882a593Smuzhiyun list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
682*4882a593Smuzhiyun if (nr <= 0)
683*4882a593Smuzhiyun goto out;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (!mca_reap(b, 0, false)) {
686*4882a593Smuzhiyun mca_data_free(b);
687*4882a593Smuzhiyun rw_unlock(true, b);
688*4882a593Smuzhiyun freed++;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun nr--;
691*4882a593Smuzhiyun i++;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
695*4882a593Smuzhiyun if (nr <= 0 || i >= btree_cache_used)
696*4882a593Smuzhiyun goto out;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun if (!mca_reap(b, 0, false)) {
699*4882a593Smuzhiyun mca_bucket_free(b);
700*4882a593Smuzhiyun mca_data_free(b);
701*4882a593Smuzhiyun rw_unlock(true, b);
702*4882a593Smuzhiyun freed++;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun nr--;
706*4882a593Smuzhiyun i++;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun out:
709*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
710*4882a593Smuzhiyun return freed * c->btree_pages;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)713*4882a593Smuzhiyun static unsigned long bch_mca_count(struct shrinker *shrink,
714*4882a593Smuzhiyun struct shrink_control *sc)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun struct cache_set *c = container_of(shrink, struct cache_set, shrink);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun if (c->shrinker_disabled)
719*4882a593Smuzhiyun return 0;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (c->btree_cache_alloc_lock)
722*4882a593Smuzhiyun return 0;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun return mca_can_free(c) * c->btree_pages;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
bch_btree_cache_free(struct cache_set * c)727*4882a593Smuzhiyun void bch_btree_cache_free(struct cache_set *c)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct btree *b;
730*4882a593Smuzhiyun struct closure cl;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun closure_init_stack(&cl);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (c->shrink.list.next)
735*4882a593Smuzhiyun unregister_shrinker(&c->shrink);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun #ifdef CONFIG_BCACHE_DEBUG
740*4882a593Smuzhiyun if (c->verify_data)
741*4882a593Smuzhiyun list_move(&c->verify_data->list, &c->btree_cache);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
744*4882a593Smuzhiyun #endif
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun list_splice(&c->btree_cache_freeable,
747*4882a593Smuzhiyun &c->btree_cache);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun while (!list_empty(&c->btree_cache)) {
750*4882a593Smuzhiyun b = list_first_entry(&c->btree_cache, struct btree, list);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /*
753*4882a593Smuzhiyun * This function is called by cache_set_free(), no I/O
754*4882a593Smuzhiyun * request on cache now, it is unnecessary to acquire
755*4882a593Smuzhiyun * b->write_lock before clearing BTREE_NODE_dirty anymore.
756*4882a593Smuzhiyun */
757*4882a593Smuzhiyun if (btree_node_dirty(b)) {
758*4882a593Smuzhiyun btree_complete_write(b, btree_current_write(b));
759*4882a593Smuzhiyun clear_bit(BTREE_NODE_dirty, &b->flags);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun mca_data_free(b);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun while (!list_empty(&c->btree_cache_freed)) {
765*4882a593Smuzhiyun b = list_first_entry(&c->btree_cache_freed,
766*4882a593Smuzhiyun struct btree, list);
767*4882a593Smuzhiyun list_del(&b->list);
768*4882a593Smuzhiyun cancel_delayed_work_sync(&b->work);
769*4882a593Smuzhiyun kfree(b);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
bch_btree_cache_alloc(struct cache_set * c)775*4882a593Smuzhiyun int bch_btree_cache_alloc(struct cache_set *c)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun unsigned int i;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun for (i = 0; i < mca_reserve(c); i++)
780*4882a593Smuzhiyun if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
781*4882a593Smuzhiyun return -ENOMEM;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun list_splice_init(&c->btree_cache,
784*4882a593Smuzhiyun &c->btree_cache_freeable);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun #ifdef CONFIG_BCACHE_DEBUG
787*4882a593Smuzhiyun mutex_init(&c->verify_lock);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun c->verify_ondisk = (void *)
790*4882a593Smuzhiyun __get_free_pages(GFP_KERNEL|__GFP_COMP,
791*4882a593Smuzhiyun ilog2(meta_bucket_pages(&c->cache->sb)));
792*4882a593Smuzhiyun if (!c->verify_ondisk) {
793*4882a593Smuzhiyun /*
794*4882a593Smuzhiyun * Don't worry about the mca_rereserve buckets
795*4882a593Smuzhiyun * allocated in previous for-loop, they will be
796*4882a593Smuzhiyun * handled properly in bch_cache_set_unregister().
797*4882a593Smuzhiyun */
798*4882a593Smuzhiyun return -ENOMEM;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (c->verify_data &&
804*4882a593Smuzhiyun c->verify_data->keys.set->data)
805*4882a593Smuzhiyun list_del_init(&c->verify_data->list);
806*4882a593Smuzhiyun else
807*4882a593Smuzhiyun c->verify_data = NULL;
808*4882a593Smuzhiyun #endif
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun c->shrink.count_objects = bch_mca_count;
811*4882a593Smuzhiyun c->shrink.scan_objects = bch_mca_scan;
812*4882a593Smuzhiyun c->shrink.seeks = 4;
813*4882a593Smuzhiyun c->shrink.batch = c->btree_pages * 2;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (register_shrinker(&c->shrink))
816*4882a593Smuzhiyun pr_warn("bcache: %s: could not register shrinker\n",
817*4882a593Smuzhiyun __func__);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /* Btree in memory cache - hash table */
823*4882a593Smuzhiyun
mca_hash(struct cache_set * c,struct bkey * k)824*4882a593Smuzhiyun static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
mca_find(struct cache_set * c,struct bkey * k)829*4882a593Smuzhiyun static struct btree *mca_find(struct cache_set *c, struct bkey *k)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun struct btree *b;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun rcu_read_lock();
834*4882a593Smuzhiyun hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
835*4882a593Smuzhiyun if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
836*4882a593Smuzhiyun goto out;
837*4882a593Smuzhiyun b = NULL;
838*4882a593Smuzhiyun out:
839*4882a593Smuzhiyun rcu_read_unlock();
840*4882a593Smuzhiyun return b;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)843*4882a593Smuzhiyun static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun spin_lock(&c->btree_cannibalize_lock);
846*4882a593Smuzhiyun if (likely(c->btree_cache_alloc_lock == NULL)) {
847*4882a593Smuzhiyun c->btree_cache_alloc_lock = current;
848*4882a593Smuzhiyun } else if (c->btree_cache_alloc_lock != current) {
849*4882a593Smuzhiyun if (op)
850*4882a593Smuzhiyun prepare_to_wait(&c->btree_cache_wait, &op->wait,
851*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
852*4882a593Smuzhiyun spin_unlock(&c->btree_cannibalize_lock);
853*4882a593Smuzhiyun return -EINTR;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun spin_unlock(&c->btree_cannibalize_lock);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun return 0;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)860*4882a593Smuzhiyun static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
861*4882a593Smuzhiyun struct bkey *k)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct btree *b;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun trace_bcache_btree_cache_cannibalize(c);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (mca_cannibalize_lock(c, op))
868*4882a593Smuzhiyun return ERR_PTR(-EINTR);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun list_for_each_entry_reverse(b, &c->btree_cache, list)
871*4882a593Smuzhiyun if (!mca_reap(b, btree_order(k), false))
872*4882a593Smuzhiyun return b;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun list_for_each_entry_reverse(b, &c->btree_cache, list)
875*4882a593Smuzhiyun if (!mca_reap(b, btree_order(k), true))
876*4882a593Smuzhiyun return b;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun WARN(1, "btree cache cannibalize failed\n");
879*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /*
883*4882a593Smuzhiyun * We can only have one thread cannibalizing other cached btree nodes at a time,
884*4882a593Smuzhiyun * or we'll deadlock. We use an open coded mutex to ensure that, which a
885*4882a593Smuzhiyun * cannibalize_bucket() will take. This means every time we unlock the root of
886*4882a593Smuzhiyun * the btree, we need to release this lock if we have it held.
887*4882a593Smuzhiyun */
bch_cannibalize_unlock(struct cache_set * c)888*4882a593Smuzhiyun static void bch_cannibalize_unlock(struct cache_set *c)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun spin_lock(&c->btree_cannibalize_lock);
891*4882a593Smuzhiyun if (c->btree_cache_alloc_lock == current) {
892*4882a593Smuzhiyun c->btree_cache_alloc_lock = NULL;
893*4882a593Smuzhiyun wake_up(&c->btree_cache_wait);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun spin_unlock(&c->btree_cannibalize_lock);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)898*4882a593Smuzhiyun static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
899*4882a593Smuzhiyun struct bkey *k, int level)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun struct btree *b;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun BUG_ON(current->bio_list);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun lockdep_assert_held(&c->bucket_lock);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun if (mca_find(c, k))
908*4882a593Smuzhiyun return NULL;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* btree_free() doesn't free memory; it sticks the node on the end of
911*4882a593Smuzhiyun * the list. Check if there's any freed nodes there:
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun list_for_each_entry(b, &c->btree_cache_freeable, list)
914*4882a593Smuzhiyun if (!mca_reap(b, btree_order(k), false))
915*4882a593Smuzhiyun goto out;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* We never free struct btree itself, just the memory that holds the on
918*4882a593Smuzhiyun * disk node. Check the freed list before allocating a new one:
919*4882a593Smuzhiyun */
920*4882a593Smuzhiyun list_for_each_entry(b, &c->btree_cache_freed, list)
921*4882a593Smuzhiyun if (!mca_reap(b, 0, false)) {
922*4882a593Smuzhiyun mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
923*4882a593Smuzhiyun if (!b->keys.set[0].data)
924*4882a593Smuzhiyun goto err;
925*4882a593Smuzhiyun else
926*4882a593Smuzhiyun goto out;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
930*4882a593Smuzhiyun if (!b)
931*4882a593Smuzhiyun goto err;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun BUG_ON(!down_write_trylock(&b->lock));
934*4882a593Smuzhiyun if (!b->keys.set->data)
935*4882a593Smuzhiyun goto err;
936*4882a593Smuzhiyun out:
937*4882a593Smuzhiyun BUG_ON(b->io_mutex.count != 1);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun bkey_copy(&b->key, k);
940*4882a593Smuzhiyun list_move(&b->list, &c->btree_cache);
941*4882a593Smuzhiyun hlist_del_init_rcu(&b->hash);
942*4882a593Smuzhiyun hlist_add_head_rcu(&b->hash, mca_hash(c, k));
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
945*4882a593Smuzhiyun b->parent = (void *) ~0UL;
946*4882a593Smuzhiyun b->flags = 0;
947*4882a593Smuzhiyun b->written = 0;
948*4882a593Smuzhiyun b->level = level;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (!b->level)
951*4882a593Smuzhiyun bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
952*4882a593Smuzhiyun &b->c->expensive_debug_checks);
953*4882a593Smuzhiyun else
954*4882a593Smuzhiyun bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
955*4882a593Smuzhiyun &b->c->expensive_debug_checks);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun return b;
958*4882a593Smuzhiyun err:
959*4882a593Smuzhiyun if (b)
960*4882a593Smuzhiyun rw_unlock(true, b);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun b = mca_cannibalize(c, op, k);
963*4882a593Smuzhiyun if (!IS_ERR(b))
964*4882a593Smuzhiyun goto out;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun return b;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun * bch_btree_node_get - find a btree node in the cache and lock it, reading it
971*4882a593Smuzhiyun * in from disk if necessary.
972*4882a593Smuzhiyun *
973*4882a593Smuzhiyun * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
974*4882a593Smuzhiyun *
975*4882a593Smuzhiyun * The btree node will have either a read or a write lock held, depending on
976*4882a593Smuzhiyun * level and op->lock.
977*4882a593Smuzhiyun */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)978*4882a593Smuzhiyun struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
979*4882a593Smuzhiyun struct bkey *k, int level, bool write,
980*4882a593Smuzhiyun struct btree *parent)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun int i = 0;
983*4882a593Smuzhiyun struct btree *b;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun BUG_ON(level < 0);
986*4882a593Smuzhiyun retry:
987*4882a593Smuzhiyun b = mca_find(c, k);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun if (!b) {
990*4882a593Smuzhiyun if (current->bio_list)
991*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
994*4882a593Smuzhiyun b = mca_alloc(c, op, k, level);
995*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun if (!b)
998*4882a593Smuzhiyun goto retry;
999*4882a593Smuzhiyun if (IS_ERR(b))
1000*4882a593Smuzhiyun return b;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun bch_btree_node_read(b);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun if (!write)
1005*4882a593Smuzhiyun downgrade_write(&b->lock);
1006*4882a593Smuzhiyun } else {
1007*4882a593Smuzhiyun rw_lock(write, b, level);
1008*4882a593Smuzhiyun if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1009*4882a593Smuzhiyun rw_unlock(write, b);
1010*4882a593Smuzhiyun goto retry;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun BUG_ON(b->level != level);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun if (btree_node_io_error(b)) {
1016*4882a593Smuzhiyun rw_unlock(write, b);
1017*4882a593Smuzhiyun return ERR_PTR(-EIO);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun BUG_ON(!b->written);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun b->parent = parent;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1025*4882a593Smuzhiyun prefetch(b->keys.set[i].tree);
1026*4882a593Smuzhiyun prefetch(b->keys.set[i].data);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun for (; i <= b->keys.nsets; i++)
1030*4882a593Smuzhiyun prefetch(b->keys.set[i].data);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun return b;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
btree_node_prefetch(struct btree * parent,struct bkey * k)1035*4882a593Smuzhiyun static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun struct btree *b;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun mutex_lock(&parent->c->bucket_lock);
1040*4882a593Smuzhiyun b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1041*4882a593Smuzhiyun mutex_unlock(&parent->c->bucket_lock);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(b)) {
1044*4882a593Smuzhiyun b->parent = parent;
1045*4882a593Smuzhiyun bch_btree_node_read(b);
1046*4882a593Smuzhiyun rw_unlock(true, b);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* Btree alloc */
1051*4882a593Smuzhiyun
btree_node_free(struct btree * b)1052*4882a593Smuzhiyun static void btree_node_free(struct btree *b)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun trace_bcache_btree_node_free(b);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun BUG_ON(b == b->c->root);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun retry:
1059*4882a593Smuzhiyun mutex_lock(&b->write_lock);
1060*4882a593Smuzhiyun /*
1061*4882a593Smuzhiyun * If the btree node is selected and flushing in btree_flush_write(),
1062*4882a593Smuzhiyun * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1063*4882a593Smuzhiyun * then it is safe to free the btree node here. Otherwise this btree
1064*4882a593Smuzhiyun * node will be in race condition.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun if (btree_node_journal_flush(b)) {
1067*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
1068*4882a593Smuzhiyun pr_debug("bnode %p journal_flush set, retry\n", b);
1069*4882a593Smuzhiyun udelay(1);
1070*4882a593Smuzhiyun goto retry;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun if (btree_node_dirty(b)) {
1074*4882a593Smuzhiyun btree_complete_write(b, btree_current_write(b));
1075*4882a593Smuzhiyun clear_bit(BTREE_NODE_dirty, &b->flags);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun cancel_delayed_work(&b->work);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun mutex_lock(&b->c->bucket_lock);
1083*4882a593Smuzhiyun bch_bucket_free(b->c, &b->key);
1084*4882a593Smuzhiyun mca_bucket_free(b);
1085*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1088*4882a593Smuzhiyun struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1089*4882a593Smuzhiyun int level, bool wait,
1090*4882a593Smuzhiyun struct btree *parent)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun BKEY_PADDED(key) k;
1093*4882a593Smuzhiyun struct btree *b = ERR_PTR(-EAGAIN);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
1096*4882a593Smuzhiyun retry:
1097*4882a593Smuzhiyun if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1098*4882a593Smuzhiyun goto err;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun bkey_put(c, &k.key);
1101*4882a593Smuzhiyun SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun b = mca_alloc(c, op, &k.key, level);
1104*4882a593Smuzhiyun if (IS_ERR(b))
1105*4882a593Smuzhiyun goto err_free;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (!b) {
1108*4882a593Smuzhiyun cache_bug(c,
1109*4882a593Smuzhiyun "Tried to allocate bucket that was in btree cache");
1110*4882a593Smuzhiyun goto retry;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun b->parent = parent;
1114*4882a593Smuzhiyun bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun trace_bcache_btree_node_alloc(b);
1119*4882a593Smuzhiyun return b;
1120*4882a593Smuzhiyun err_free:
1121*4882a593Smuzhiyun bch_bucket_free(c, &k.key);
1122*4882a593Smuzhiyun err:
1123*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun trace_bcache_btree_node_alloc_fail(c);
1126*4882a593Smuzhiyun return b;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1129*4882a593Smuzhiyun static struct btree *bch_btree_node_alloc(struct cache_set *c,
1130*4882a593Smuzhiyun struct btree_op *op, int level,
1131*4882a593Smuzhiyun struct btree *parent)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1136*4882a593Smuzhiyun static struct btree *btree_node_alloc_replacement(struct btree *b,
1137*4882a593Smuzhiyun struct btree_op *op)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(n)) {
1142*4882a593Smuzhiyun mutex_lock(&n->write_lock);
1143*4882a593Smuzhiyun bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1144*4882a593Smuzhiyun bkey_copy_key(&n->key, &b->key);
1145*4882a593Smuzhiyun mutex_unlock(&n->write_lock);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun return n;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
make_btree_freeing_key(struct btree * b,struct bkey * k)1151*4882a593Smuzhiyun static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun unsigned int i;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun mutex_lock(&b->c->bucket_lock);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun atomic_inc(&b->c->prio_blocked);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun bkey_copy(k, &b->key);
1160*4882a593Smuzhiyun bkey_copy_key(k, &ZERO_KEY);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
1163*4882a593Smuzhiyun SET_PTR_GEN(k, i,
1164*4882a593Smuzhiyun bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1165*4882a593Smuzhiyun PTR_BUCKET(b->c, &b->key, i)));
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
btree_check_reserve(struct btree * b,struct btree_op * op)1170*4882a593Smuzhiyun static int btree_check_reserve(struct btree *b, struct btree_op *op)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct cache_set *c = b->c;
1173*4882a593Smuzhiyun struct cache *ca = c->cache;
1174*4882a593Smuzhiyun unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1179*4882a593Smuzhiyun if (op)
1180*4882a593Smuzhiyun prepare_to_wait(&c->btree_cache_wait, &op->wait,
1181*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
1182*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
1183*4882a593Smuzhiyun return -EINTR;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun return mca_cannibalize_lock(b->c, op);
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun /* Garbage collection */
1192*4882a593Smuzhiyun
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1193*4882a593Smuzhiyun static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1194*4882a593Smuzhiyun struct bkey *k)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun uint8_t stale = 0;
1197*4882a593Smuzhiyun unsigned int i;
1198*4882a593Smuzhiyun struct bucket *g;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /*
1201*4882a593Smuzhiyun * ptr_invalid() can't return true for the keys that mark btree nodes as
1202*4882a593Smuzhiyun * freed, but since ptr_bad() returns true we'll never actually use them
1203*4882a593Smuzhiyun * for anything and thus we don't want mark their pointers here
1204*4882a593Smuzhiyun */
1205*4882a593Smuzhiyun if (!bkey_cmp(k, &ZERO_KEY))
1206*4882a593Smuzhiyun return stale;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++) {
1209*4882a593Smuzhiyun if (!ptr_available(c, k, i))
1210*4882a593Smuzhiyun continue;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun g = PTR_BUCKET(c, k, i);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if (gen_after(g->last_gc, PTR_GEN(k, i)))
1215*4882a593Smuzhiyun g->last_gc = PTR_GEN(k, i);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun if (ptr_stale(c, k, i)) {
1218*4882a593Smuzhiyun stale = max(stale, ptr_stale(c, k, i));
1219*4882a593Smuzhiyun continue;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun cache_bug_on(GC_MARK(g) &&
1223*4882a593Smuzhiyun (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1224*4882a593Smuzhiyun c, "inconsistent ptrs: mark = %llu, level = %i",
1225*4882a593Smuzhiyun GC_MARK(g), level);
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (level)
1228*4882a593Smuzhiyun SET_GC_MARK(g, GC_MARK_METADATA);
1229*4882a593Smuzhiyun else if (KEY_DIRTY(k))
1230*4882a593Smuzhiyun SET_GC_MARK(g, GC_MARK_DIRTY);
1231*4882a593Smuzhiyun else if (!GC_MARK(g))
1232*4882a593Smuzhiyun SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /* guard against overflow */
1235*4882a593Smuzhiyun SET_GC_SECTORS_USED(g, min_t(unsigned int,
1236*4882a593Smuzhiyun GC_SECTORS_USED(g) + KEY_SIZE(k),
1237*4882a593Smuzhiyun MAX_GC_SECTORS_USED));
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun BUG_ON(!GC_SECTORS_USED(g));
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun return stale;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1246*4882a593Smuzhiyun
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1247*4882a593Smuzhiyun void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun unsigned int i;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
1252*4882a593Smuzhiyun if (ptr_available(c, k, i) &&
1253*4882a593Smuzhiyun !ptr_stale(c, k, i)) {
1254*4882a593Smuzhiyun struct bucket *b = PTR_BUCKET(c, k, i);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun b->gen = PTR_GEN(k, i);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun if (level && bkey_cmp(k, &ZERO_KEY))
1259*4882a593Smuzhiyun b->prio = BTREE_PRIO;
1260*4882a593Smuzhiyun else if (!level && b->prio == BTREE_PRIO)
1261*4882a593Smuzhiyun b->prio = INITIAL_PRIO;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun __bch_btree_mark_key(c, level, k);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1267*4882a593Smuzhiyun void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1272*4882a593Smuzhiyun static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun uint8_t stale = 0;
1275*4882a593Smuzhiyun unsigned int keys = 0, good_keys = 0;
1276*4882a593Smuzhiyun struct bkey *k;
1277*4882a593Smuzhiyun struct btree_iter iter;
1278*4882a593Smuzhiyun struct bset_tree *t;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun gc->nodes++;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1283*4882a593Smuzhiyun stale = max(stale, btree_mark_key(b, k));
1284*4882a593Smuzhiyun keys++;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun if (bch_ptr_bad(&b->keys, k))
1287*4882a593Smuzhiyun continue;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun gc->key_bytes += bkey_u64s(k);
1290*4882a593Smuzhiyun gc->nkeys++;
1291*4882a593Smuzhiyun good_keys++;
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun gc->data += KEY_SIZE(k);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1297*4882a593Smuzhiyun btree_bug_on(t->size &&
1298*4882a593Smuzhiyun bset_written(&b->keys, t) &&
1299*4882a593Smuzhiyun bkey_cmp(&b->key, &t->end) < 0,
1300*4882a593Smuzhiyun b, "found short btree key in gc");
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun if (b->c->gc_always_rewrite)
1303*4882a593Smuzhiyun return true;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun if (stale > 10)
1306*4882a593Smuzhiyun return true;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun if ((keys - good_keys) * 2 > keys)
1309*4882a593Smuzhiyun return true;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun return false;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun #define GC_MERGE_NODES 4U
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun struct gc_merge_info {
1317*4882a593Smuzhiyun struct btree *b;
1318*4882a593Smuzhiyun unsigned int keys;
1319*4882a593Smuzhiyun };
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1322*4882a593Smuzhiyun struct keylist *insert_keys,
1323*4882a593Smuzhiyun atomic_t *journal_ref,
1324*4882a593Smuzhiyun struct bkey *replace_key);
1325*4882a593Smuzhiyun
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1326*4882a593Smuzhiyun static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1327*4882a593Smuzhiyun struct gc_stat *gc, struct gc_merge_info *r)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun unsigned int i, nodes = 0, keys = 0, blocks;
1330*4882a593Smuzhiyun struct btree *new_nodes[GC_MERGE_NODES];
1331*4882a593Smuzhiyun struct keylist keylist;
1332*4882a593Smuzhiyun struct closure cl;
1333*4882a593Smuzhiyun struct bkey *k;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun bch_keylist_init(&keylist);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (btree_check_reserve(b, NULL))
1338*4882a593Smuzhiyun return 0;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun memset(new_nodes, 0, sizeof(new_nodes));
1341*4882a593Smuzhiyun closure_init_stack(&cl);
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1344*4882a593Smuzhiyun keys += r[nodes++].keys;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun blocks = btree_default_blocks(b->c) * 2 / 3;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun if (nodes < 2 ||
1349*4882a593Smuzhiyun __set_blocks(b->keys.set[0].data, keys,
1350*4882a593Smuzhiyun block_bytes(b->c->cache)) > blocks * (nodes - 1))
1351*4882a593Smuzhiyun return 0;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun for (i = 0; i < nodes; i++) {
1354*4882a593Smuzhiyun new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1355*4882a593Smuzhiyun if (IS_ERR_OR_NULL(new_nodes[i]))
1356*4882a593Smuzhiyun goto out_nocoalesce;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /*
1360*4882a593Smuzhiyun * We have to check the reserve here, after we've allocated our new
1361*4882a593Smuzhiyun * nodes, to make sure the insert below will succeed - we also check
1362*4882a593Smuzhiyun * before as an optimization to potentially avoid a bunch of expensive
1363*4882a593Smuzhiyun * allocs/sorts
1364*4882a593Smuzhiyun */
1365*4882a593Smuzhiyun if (btree_check_reserve(b, NULL))
1366*4882a593Smuzhiyun goto out_nocoalesce;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun for (i = 0; i < nodes; i++)
1369*4882a593Smuzhiyun mutex_lock(&new_nodes[i]->write_lock);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun for (i = nodes - 1; i > 0; --i) {
1372*4882a593Smuzhiyun struct bset *n1 = btree_bset_first(new_nodes[i]);
1373*4882a593Smuzhiyun struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1374*4882a593Smuzhiyun struct bkey *k, *last = NULL;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun keys = 0;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun if (i > 1) {
1379*4882a593Smuzhiyun for (k = n2->start;
1380*4882a593Smuzhiyun k < bset_bkey_last(n2);
1381*4882a593Smuzhiyun k = bkey_next(k)) {
1382*4882a593Smuzhiyun if (__set_blocks(n1, n1->keys + keys +
1383*4882a593Smuzhiyun bkey_u64s(k),
1384*4882a593Smuzhiyun block_bytes(b->c->cache)) > blocks)
1385*4882a593Smuzhiyun break;
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun last = k;
1388*4882a593Smuzhiyun keys += bkey_u64s(k);
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun } else {
1391*4882a593Smuzhiyun /*
1392*4882a593Smuzhiyun * Last node we're not getting rid of - we're getting
1393*4882a593Smuzhiyun * rid of the node at r[0]. Have to try and fit all of
1394*4882a593Smuzhiyun * the remaining keys into this node; we can't ensure
1395*4882a593Smuzhiyun * they will always fit due to rounding and variable
1396*4882a593Smuzhiyun * length keys (shouldn't be possible in practice,
1397*4882a593Smuzhiyun * though)
1398*4882a593Smuzhiyun */
1399*4882a593Smuzhiyun if (__set_blocks(n1, n1->keys + n2->keys,
1400*4882a593Smuzhiyun block_bytes(b->c->cache)) >
1401*4882a593Smuzhiyun btree_blocks(new_nodes[i]))
1402*4882a593Smuzhiyun goto out_unlock_nocoalesce;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun keys = n2->keys;
1405*4882a593Smuzhiyun /* Take the key of the node we're getting rid of */
1406*4882a593Smuzhiyun last = &r->b->key;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1410*4882a593Smuzhiyun btree_blocks(new_nodes[i]));
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun if (last)
1413*4882a593Smuzhiyun bkey_copy_key(&new_nodes[i]->key, last);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun memcpy(bset_bkey_last(n1),
1416*4882a593Smuzhiyun n2->start,
1417*4882a593Smuzhiyun (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun n1->keys += keys;
1420*4882a593Smuzhiyun r[i].keys = n1->keys;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun memmove(n2->start,
1423*4882a593Smuzhiyun bset_bkey_idx(n2, keys),
1424*4882a593Smuzhiyun (void *) bset_bkey_last(n2) -
1425*4882a593Smuzhiyun (void *) bset_bkey_idx(n2, keys));
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun n2->keys -= keys;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun if (__bch_keylist_realloc(&keylist,
1430*4882a593Smuzhiyun bkey_u64s(&new_nodes[i]->key)))
1431*4882a593Smuzhiyun goto out_unlock_nocoalesce;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun bch_btree_node_write(new_nodes[i], &cl);
1434*4882a593Smuzhiyun bch_keylist_add(&keylist, &new_nodes[i]->key);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun for (i = 0; i < nodes; i++)
1438*4882a593Smuzhiyun mutex_unlock(&new_nodes[i]->write_lock);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun closure_sync(&cl);
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun /* We emptied out this node */
1443*4882a593Smuzhiyun BUG_ON(btree_bset_first(new_nodes[0])->keys);
1444*4882a593Smuzhiyun btree_node_free(new_nodes[0]);
1445*4882a593Smuzhiyun rw_unlock(true, new_nodes[0]);
1446*4882a593Smuzhiyun new_nodes[0] = NULL;
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun for (i = 0; i < nodes; i++) {
1449*4882a593Smuzhiyun if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1450*4882a593Smuzhiyun goto out_nocoalesce;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun make_btree_freeing_key(r[i].b, keylist.top);
1453*4882a593Smuzhiyun bch_keylist_push(&keylist);
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1457*4882a593Smuzhiyun BUG_ON(!bch_keylist_empty(&keylist));
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun for (i = 0; i < nodes; i++) {
1460*4882a593Smuzhiyun btree_node_free(r[i].b);
1461*4882a593Smuzhiyun rw_unlock(true, r[i].b);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun r[i].b = new_nodes[i];
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1467*4882a593Smuzhiyun r[nodes - 1].b = ERR_PTR(-EINTR);
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun trace_bcache_btree_gc_coalesce(nodes);
1470*4882a593Smuzhiyun gc->nodes--;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun bch_keylist_free(&keylist);
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun /* Invalidated our iterator */
1475*4882a593Smuzhiyun return -EINTR;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun out_unlock_nocoalesce:
1478*4882a593Smuzhiyun for (i = 0; i < nodes; i++)
1479*4882a593Smuzhiyun mutex_unlock(&new_nodes[i]->write_lock);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun out_nocoalesce:
1482*4882a593Smuzhiyun closure_sync(&cl);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun while ((k = bch_keylist_pop(&keylist)))
1485*4882a593Smuzhiyun if (!bkey_cmp(k, &ZERO_KEY))
1486*4882a593Smuzhiyun atomic_dec(&b->c->prio_blocked);
1487*4882a593Smuzhiyun bch_keylist_free(&keylist);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun for (i = 0; i < nodes; i++)
1490*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(new_nodes[i])) {
1491*4882a593Smuzhiyun btree_node_free(new_nodes[i]);
1492*4882a593Smuzhiyun rw_unlock(true, new_nodes[i]);
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun return 0;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1497*4882a593Smuzhiyun static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1498*4882a593Smuzhiyun struct btree *replace)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun struct keylist keys;
1501*4882a593Smuzhiyun struct btree *n;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun if (btree_check_reserve(b, NULL))
1504*4882a593Smuzhiyun return 0;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun n = btree_node_alloc_replacement(replace, NULL);
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun /* recheck reserve after allocating replacement node */
1509*4882a593Smuzhiyun if (btree_check_reserve(b, NULL)) {
1510*4882a593Smuzhiyun btree_node_free(n);
1511*4882a593Smuzhiyun rw_unlock(true, n);
1512*4882a593Smuzhiyun return 0;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun bch_btree_node_write_sync(n);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun bch_keylist_init(&keys);
1518*4882a593Smuzhiyun bch_keylist_add(&keys, &n->key);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun make_btree_freeing_key(replace, keys.top);
1521*4882a593Smuzhiyun bch_keylist_push(&keys);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun bch_btree_insert_node(b, op, &keys, NULL, NULL);
1524*4882a593Smuzhiyun BUG_ON(!bch_keylist_empty(&keys));
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun btree_node_free(replace);
1527*4882a593Smuzhiyun rw_unlock(true, n);
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun /* Invalidated our iterator */
1530*4882a593Smuzhiyun return -EINTR;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun
btree_gc_count_keys(struct btree * b)1533*4882a593Smuzhiyun static unsigned int btree_gc_count_keys(struct btree *b)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun struct bkey *k;
1536*4882a593Smuzhiyun struct btree_iter iter;
1537*4882a593Smuzhiyun unsigned int ret = 0;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1540*4882a593Smuzhiyun ret += bkey_u64s(k);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun return ret;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun
btree_gc_min_nodes(struct cache_set * c)1545*4882a593Smuzhiyun static size_t btree_gc_min_nodes(struct cache_set *c)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun size_t min_nodes;
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun /*
1550*4882a593Smuzhiyun * Since incremental GC would stop 100ms when front
1551*4882a593Smuzhiyun * side I/O comes, so when there are many btree nodes,
1552*4882a593Smuzhiyun * if GC only processes constant (100) nodes each time,
1553*4882a593Smuzhiyun * GC would last a long time, and the front side I/Os
1554*4882a593Smuzhiyun * would run out of the buckets (since no new bucket
1555*4882a593Smuzhiyun * can be allocated during GC), and be blocked again.
1556*4882a593Smuzhiyun * So GC should not process constant nodes, but varied
1557*4882a593Smuzhiyun * nodes according to the number of btree nodes, which
1558*4882a593Smuzhiyun * realized by dividing GC into constant(100) times,
1559*4882a593Smuzhiyun * so when there are many btree nodes, GC can process
1560*4882a593Smuzhiyun * more nodes each time, otherwise, GC will process less
1561*4882a593Smuzhiyun * nodes each time (but no less than MIN_GC_NODES)
1562*4882a593Smuzhiyun */
1563*4882a593Smuzhiyun min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1564*4882a593Smuzhiyun if (min_nodes < MIN_GC_NODES)
1565*4882a593Smuzhiyun min_nodes = MIN_GC_NODES;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun return min_nodes;
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1571*4882a593Smuzhiyun static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1572*4882a593Smuzhiyun struct closure *writes, struct gc_stat *gc)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun int ret = 0;
1575*4882a593Smuzhiyun bool should_rewrite;
1576*4882a593Smuzhiyun struct bkey *k;
1577*4882a593Smuzhiyun struct btree_iter iter;
1578*4882a593Smuzhiyun struct gc_merge_info r[GC_MERGE_NODES];
1579*4882a593Smuzhiyun struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun for (i = r; i < r + ARRAY_SIZE(r); i++)
1584*4882a593Smuzhiyun i->b = ERR_PTR(-EINTR);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun while (1) {
1587*4882a593Smuzhiyun k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1588*4882a593Smuzhiyun if (k) {
1589*4882a593Smuzhiyun r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1590*4882a593Smuzhiyun true, b);
1591*4882a593Smuzhiyun if (IS_ERR(r->b)) {
1592*4882a593Smuzhiyun ret = PTR_ERR(r->b);
1593*4882a593Smuzhiyun break;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun r->keys = btree_gc_count_keys(r->b);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun ret = btree_gc_coalesce(b, op, gc, r);
1599*4882a593Smuzhiyun if (ret)
1600*4882a593Smuzhiyun break;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun if (!last->b)
1604*4882a593Smuzhiyun break;
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun if (!IS_ERR(last->b)) {
1607*4882a593Smuzhiyun should_rewrite = btree_gc_mark_node(last->b, gc);
1608*4882a593Smuzhiyun if (should_rewrite) {
1609*4882a593Smuzhiyun ret = btree_gc_rewrite_node(b, op, last->b);
1610*4882a593Smuzhiyun if (ret)
1611*4882a593Smuzhiyun break;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun if (last->b->level) {
1615*4882a593Smuzhiyun ret = btree_gc_recurse(last->b, op, writes, gc);
1616*4882a593Smuzhiyun if (ret)
1617*4882a593Smuzhiyun break;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun bkey_copy_key(&b->c->gc_done, &last->b->key);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun /*
1623*4882a593Smuzhiyun * Must flush leaf nodes before gc ends, since replace
1624*4882a593Smuzhiyun * operations aren't journalled
1625*4882a593Smuzhiyun */
1626*4882a593Smuzhiyun mutex_lock(&last->b->write_lock);
1627*4882a593Smuzhiyun if (btree_node_dirty(last->b))
1628*4882a593Smuzhiyun bch_btree_node_write(last->b, writes);
1629*4882a593Smuzhiyun mutex_unlock(&last->b->write_lock);
1630*4882a593Smuzhiyun rw_unlock(true, last->b);
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1634*4882a593Smuzhiyun r->b = NULL;
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun if (atomic_read(&b->c->search_inflight) &&
1637*4882a593Smuzhiyun gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1638*4882a593Smuzhiyun gc->nodes_pre = gc->nodes;
1639*4882a593Smuzhiyun ret = -EAGAIN;
1640*4882a593Smuzhiyun break;
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun if (need_resched()) {
1644*4882a593Smuzhiyun ret = -EAGAIN;
1645*4882a593Smuzhiyun break;
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun for (i = r; i < r + ARRAY_SIZE(r); i++)
1650*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(i->b)) {
1651*4882a593Smuzhiyun mutex_lock(&i->b->write_lock);
1652*4882a593Smuzhiyun if (btree_node_dirty(i->b))
1653*4882a593Smuzhiyun bch_btree_node_write(i->b, writes);
1654*4882a593Smuzhiyun mutex_unlock(&i->b->write_lock);
1655*4882a593Smuzhiyun rw_unlock(true, i->b);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun return ret;
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1661*4882a593Smuzhiyun static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1662*4882a593Smuzhiyun struct closure *writes, struct gc_stat *gc)
1663*4882a593Smuzhiyun {
1664*4882a593Smuzhiyun struct btree *n = NULL;
1665*4882a593Smuzhiyun int ret = 0;
1666*4882a593Smuzhiyun bool should_rewrite;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun should_rewrite = btree_gc_mark_node(b, gc);
1669*4882a593Smuzhiyun if (should_rewrite) {
1670*4882a593Smuzhiyun n = btree_node_alloc_replacement(b, NULL);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(n)) {
1673*4882a593Smuzhiyun bch_btree_node_write_sync(n);
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun bch_btree_set_root(n);
1676*4882a593Smuzhiyun btree_node_free(b);
1677*4882a593Smuzhiyun rw_unlock(true, n);
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun return -EINTR;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun if (b->level) {
1686*4882a593Smuzhiyun ret = btree_gc_recurse(b, op, writes, gc);
1687*4882a593Smuzhiyun if (ret)
1688*4882a593Smuzhiyun return ret;
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun bkey_copy_key(&b->c->gc_done, &b->key);
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun return ret;
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
btree_gc_start(struct cache_set * c)1696*4882a593Smuzhiyun static void btree_gc_start(struct cache_set *c)
1697*4882a593Smuzhiyun {
1698*4882a593Smuzhiyun struct cache *ca;
1699*4882a593Smuzhiyun struct bucket *b;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (!c->gc_mark_valid)
1702*4882a593Smuzhiyun return;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun c->gc_mark_valid = 0;
1707*4882a593Smuzhiyun c->gc_done = ZERO_KEY;
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun ca = c->cache;
1710*4882a593Smuzhiyun for_each_bucket(b, ca) {
1711*4882a593Smuzhiyun b->last_gc = b->gen;
1712*4882a593Smuzhiyun if (!atomic_read(&b->pin)) {
1713*4882a593Smuzhiyun SET_GC_MARK(b, 0);
1714*4882a593Smuzhiyun SET_GC_SECTORS_USED(b, 0);
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
bch_btree_gc_finish(struct cache_set * c)1721*4882a593Smuzhiyun static void bch_btree_gc_finish(struct cache_set *c)
1722*4882a593Smuzhiyun {
1723*4882a593Smuzhiyun struct bucket *b;
1724*4882a593Smuzhiyun struct cache *ca;
1725*4882a593Smuzhiyun unsigned int i, j;
1726*4882a593Smuzhiyun uint64_t *k;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun set_gc_sectors(c);
1731*4882a593Smuzhiyun c->gc_mark_valid = 1;
1732*4882a593Smuzhiyun c->need_gc = 0;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1735*4882a593Smuzhiyun SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1736*4882a593Smuzhiyun GC_MARK_METADATA);
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun /* don't reclaim buckets to which writeback keys point */
1739*4882a593Smuzhiyun rcu_read_lock();
1740*4882a593Smuzhiyun for (i = 0; i < c->devices_max_used; i++) {
1741*4882a593Smuzhiyun struct bcache_device *d = c->devices[i];
1742*4882a593Smuzhiyun struct cached_dev *dc;
1743*4882a593Smuzhiyun struct keybuf_key *w, *n;
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1746*4882a593Smuzhiyun continue;
1747*4882a593Smuzhiyun dc = container_of(d, struct cached_dev, disk);
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun spin_lock(&dc->writeback_keys.lock);
1750*4882a593Smuzhiyun rbtree_postorder_for_each_entry_safe(w, n,
1751*4882a593Smuzhiyun &dc->writeback_keys.keys, node)
1752*4882a593Smuzhiyun for (j = 0; j < KEY_PTRS(&w->key); j++)
1753*4882a593Smuzhiyun SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1754*4882a593Smuzhiyun GC_MARK_DIRTY);
1755*4882a593Smuzhiyun spin_unlock(&dc->writeback_keys.lock);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun rcu_read_unlock();
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun c->avail_nbuckets = 0;
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun ca = c->cache;
1762*4882a593Smuzhiyun ca->invalidate_needs_gc = 0;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1765*4882a593Smuzhiyun SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun for (k = ca->prio_buckets;
1768*4882a593Smuzhiyun k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1769*4882a593Smuzhiyun SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun for_each_bucket(b, ca) {
1772*4882a593Smuzhiyun c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun if (atomic_read(&b->pin))
1775*4882a593Smuzhiyun continue;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1780*4882a593Smuzhiyun c->avail_nbuckets++;
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun
bch_btree_gc(struct cache_set * c)1786*4882a593Smuzhiyun static void bch_btree_gc(struct cache_set *c)
1787*4882a593Smuzhiyun {
1788*4882a593Smuzhiyun int ret;
1789*4882a593Smuzhiyun struct gc_stat stats;
1790*4882a593Smuzhiyun struct closure writes;
1791*4882a593Smuzhiyun struct btree_op op;
1792*4882a593Smuzhiyun uint64_t start_time = local_clock();
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun trace_bcache_gc_start(c);
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun memset(&stats, 0, sizeof(struct gc_stat));
1797*4882a593Smuzhiyun closure_init_stack(&writes);
1798*4882a593Smuzhiyun bch_btree_op_init(&op, SHRT_MAX);
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun btree_gc_start(c);
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1803*4882a593Smuzhiyun do {
1804*4882a593Smuzhiyun ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1805*4882a593Smuzhiyun closure_sync(&writes);
1806*4882a593Smuzhiyun cond_resched();
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun if (ret == -EAGAIN)
1809*4882a593Smuzhiyun schedule_timeout_interruptible(msecs_to_jiffies
1810*4882a593Smuzhiyun (GC_SLEEP_MS));
1811*4882a593Smuzhiyun else if (ret)
1812*4882a593Smuzhiyun pr_warn("gc failed!\n");
1813*4882a593Smuzhiyun } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun bch_btree_gc_finish(c);
1816*4882a593Smuzhiyun wake_up_allocators(c);
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun bch_time_stats_update(&c->btree_gc_time, start_time);
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun stats.key_bytes *= sizeof(uint64_t);
1821*4882a593Smuzhiyun stats.data <<= 9;
1822*4882a593Smuzhiyun bch_update_bucket_in_use(c, &stats);
1823*4882a593Smuzhiyun memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun trace_bcache_gc_end(c);
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun bch_moving_gc(c);
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun
gc_should_run(struct cache_set * c)1830*4882a593Smuzhiyun static bool gc_should_run(struct cache_set *c)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun struct cache *ca = c->cache;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun if (ca->invalidate_needs_gc)
1835*4882a593Smuzhiyun return true;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun if (atomic_read(&c->sectors_to_gc) < 0)
1838*4882a593Smuzhiyun return true;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun return false;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
bch_gc_thread(void * arg)1843*4882a593Smuzhiyun static int bch_gc_thread(void *arg)
1844*4882a593Smuzhiyun {
1845*4882a593Smuzhiyun struct cache_set *c = arg;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun while (1) {
1848*4882a593Smuzhiyun wait_event_interruptible(c->gc_wait,
1849*4882a593Smuzhiyun kthread_should_stop() ||
1850*4882a593Smuzhiyun test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1851*4882a593Smuzhiyun gc_should_run(c));
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun if (kthread_should_stop() ||
1854*4882a593Smuzhiyun test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1855*4882a593Smuzhiyun break;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun set_gc_sectors(c);
1858*4882a593Smuzhiyun bch_btree_gc(c);
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun wait_for_kthread_stop();
1862*4882a593Smuzhiyun return 0;
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun
bch_gc_thread_start(struct cache_set * c)1865*4882a593Smuzhiyun int bch_gc_thread_start(struct cache_set *c)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1868*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(c->gc_thread);
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun /* Initial partial gc */
1872*4882a593Smuzhiyun
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1873*4882a593Smuzhiyun static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun int ret = 0;
1876*4882a593Smuzhiyun struct bkey *k, *p = NULL;
1877*4882a593Smuzhiyun struct btree_iter iter;
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1880*4882a593Smuzhiyun bch_initial_mark_key(b->c, b->level, k);
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun bch_initial_mark_key(b->c, b->level + 1, &b->key);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun if (b->level) {
1885*4882a593Smuzhiyun bch_btree_iter_init(&b->keys, &iter, NULL);
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun do {
1888*4882a593Smuzhiyun k = bch_btree_iter_next_filter(&iter, &b->keys,
1889*4882a593Smuzhiyun bch_ptr_bad);
1890*4882a593Smuzhiyun if (k) {
1891*4882a593Smuzhiyun btree_node_prefetch(b, k);
1892*4882a593Smuzhiyun /*
1893*4882a593Smuzhiyun * initiallize c->gc_stats.nodes
1894*4882a593Smuzhiyun * for incremental GC
1895*4882a593Smuzhiyun */
1896*4882a593Smuzhiyun b->c->gc_stats.nodes++;
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun if (p)
1900*4882a593Smuzhiyun ret = bcache_btree(check_recurse, p, b, op);
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun p = k;
1903*4882a593Smuzhiyun } while (p && !ret);
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun return ret;
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun
bch_btree_check_thread(void * arg)1910*4882a593Smuzhiyun static int bch_btree_check_thread(void *arg)
1911*4882a593Smuzhiyun {
1912*4882a593Smuzhiyun int ret;
1913*4882a593Smuzhiyun struct btree_check_info *info = arg;
1914*4882a593Smuzhiyun struct btree_check_state *check_state = info->state;
1915*4882a593Smuzhiyun struct cache_set *c = check_state->c;
1916*4882a593Smuzhiyun struct btree_iter iter;
1917*4882a593Smuzhiyun struct bkey *k, *p;
1918*4882a593Smuzhiyun int cur_idx, prev_idx, skip_nr;
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun k = p = NULL;
1921*4882a593Smuzhiyun cur_idx = prev_idx = 0;
1922*4882a593Smuzhiyun ret = 0;
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun /* root node keys are checked before thread created */
1925*4882a593Smuzhiyun bch_btree_iter_init(&c->root->keys, &iter, NULL);
1926*4882a593Smuzhiyun k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1927*4882a593Smuzhiyun BUG_ON(!k);
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun p = k;
1930*4882a593Smuzhiyun while (k) {
1931*4882a593Smuzhiyun /*
1932*4882a593Smuzhiyun * Fetch a root node key index, skip the keys which
1933*4882a593Smuzhiyun * should be fetched by other threads, then check the
1934*4882a593Smuzhiyun * sub-tree indexed by the fetched key.
1935*4882a593Smuzhiyun */
1936*4882a593Smuzhiyun spin_lock(&check_state->idx_lock);
1937*4882a593Smuzhiyun cur_idx = check_state->key_idx;
1938*4882a593Smuzhiyun check_state->key_idx++;
1939*4882a593Smuzhiyun spin_unlock(&check_state->idx_lock);
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun skip_nr = cur_idx - prev_idx;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun while (skip_nr) {
1944*4882a593Smuzhiyun k = bch_btree_iter_next_filter(&iter,
1945*4882a593Smuzhiyun &c->root->keys,
1946*4882a593Smuzhiyun bch_ptr_bad);
1947*4882a593Smuzhiyun if (k)
1948*4882a593Smuzhiyun p = k;
1949*4882a593Smuzhiyun else {
1950*4882a593Smuzhiyun /*
1951*4882a593Smuzhiyun * No more keys to check in root node,
1952*4882a593Smuzhiyun * current checking threads are enough,
1953*4882a593Smuzhiyun * stop creating more.
1954*4882a593Smuzhiyun */
1955*4882a593Smuzhiyun atomic_set(&check_state->enough, 1);
1956*4882a593Smuzhiyun /* Update check_state->enough earlier */
1957*4882a593Smuzhiyun smp_mb__after_atomic();
1958*4882a593Smuzhiyun goto out;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun skip_nr--;
1961*4882a593Smuzhiyun cond_resched();
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun if (p) {
1965*4882a593Smuzhiyun struct btree_op op;
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun btree_node_prefetch(c->root, p);
1968*4882a593Smuzhiyun c->gc_stats.nodes++;
1969*4882a593Smuzhiyun bch_btree_op_init(&op, 0);
1970*4882a593Smuzhiyun ret = bcache_btree(check_recurse, p, c->root, &op);
1971*4882a593Smuzhiyun if (ret)
1972*4882a593Smuzhiyun goto out;
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun p = NULL;
1975*4882a593Smuzhiyun prev_idx = cur_idx;
1976*4882a593Smuzhiyun cond_resched();
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun out:
1980*4882a593Smuzhiyun info->result = ret;
1981*4882a593Smuzhiyun /* update check_state->started among all CPUs */
1982*4882a593Smuzhiyun smp_mb__before_atomic();
1983*4882a593Smuzhiyun if (atomic_dec_and_test(&check_state->started))
1984*4882a593Smuzhiyun wake_up(&check_state->wait);
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun return ret;
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun
bch_btree_chkthread_nr(void)1991*4882a593Smuzhiyun static int bch_btree_chkthread_nr(void)
1992*4882a593Smuzhiyun {
1993*4882a593Smuzhiyun int n = num_online_cpus()/2;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun if (n == 0)
1996*4882a593Smuzhiyun n = 1;
1997*4882a593Smuzhiyun else if (n > BCH_BTR_CHKTHREAD_MAX)
1998*4882a593Smuzhiyun n = BCH_BTR_CHKTHREAD_MAX;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun return n;
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
bch_btree_check(struct cache_set * c)2003*4882a593Smuzhiyun int bch_btree_check(struct cache_set *c)
2004*4882a593Smuzhiyun {
2005*4882a593Smuzhiyun int ret = 0;
2006*4882a593Smuzhiyun int i;
2007*4882a593Smuzhiyun struct bkey *k = NULL;
2008*4882a593Smuzhiyun struct btree_iter iter;
2009*4882a593Smuzhiyun struct btree_check_state check_state;
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun /* check and mark root node keys */
2012*4882a593Smuzhiyun for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2013*4882a593Smuzhiyun bch_initial_mark_key(c, c->root->level, k);
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun if (c->root->level == 0)
2018*4882a593Smuzhiyun return 0;
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun memset(&check_state, 0, sizeof(struct btree_check_state));
2021*4882a593Smuzhiyun check_state.c = c;
2022*4882a593Smuzhiyun check_state.total_threads = bch_btree_chkthread_nr();
2023*4882a593Smuzhiyun check_state.key_idx = 0;
2024*4882a593Smuzhiyun spin_lock_init(&check_state.idx_lock);
2025*4882a593Smuzhiyun atomic_set(&check_state.started, 0);
2026*4882a593Smuzhiyun atomic_set(&check_state.enough, 0);
2027*4882a593Smuzhiyun init_waitqueue_head(&check_state.wait);
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun rw_lock(0, c->root, c->root->level);
2030*4882a593Smuzhiyun /*
2031*4882a593Smuzhiyun * Run multiple threads to check btree nodes in parallel,
2032*4882a593Smuzhiyun * if check_state.enough is non-zero, it means current
2033*4882a593Smuzhiyun * running check threads are enough, unncessary to create
2034*4882a593Smuzhiyun * more.
2035*4882a593Smuzhiyun */
2036*4882a593Smuzhiyun for (i = 0; i < check_state.total_threads; i++) {
2037*4882a593Smuzhiyun /* fetch latest check_state.enough earlier */
2038*4882a593Smuzhiyun smp_mb__before_atomic();
2039*4882a593Smuzhiyun if (atomic_read(&check_state.enough))
2040*4882a593Smuzhiyun break;
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun check_state.infos[i].result = 0;
2043*4882a593Smuzhiyun check_state.infos[i].state = &check_state;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun check_state.infos[i].thread =
2046*4882a593Smuzhiyun kthread_run(bch_btree_check_thread,
2047*4882a593Smuzhiyun &check_state.infos[i],
2048*4882a593Smuzhiyun "bch_btrchk[%d]", i);
2049*4882a593Smuzhiyun if (IS_ERR(check_state.infos[i].thread)) {
2050*4882a593Smuzhiyun pr_err("fails to run thread bch_btrchk[%d]\n", i);
2051*4882a593Smuzhiyun for (--i; i >= 0; i--)
2052*4882a593Smuzhiyun kthread_stop(check_state.infos[i].thread);
2053*4882a593Smuzhiyun ret = -ENOMEM;
2054*4882a593Smuzhiyun goto out;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun atomic_inc(&check_state.started);
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun /*
2060*4882a593Smuzhiyun * Must wait for all threads to stop.
2061*4882a593Smuzhiyun */
2062*4882a593Smuzhiyun wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun for (i = 0; i < check_state.total_threads; i++) {
2065*4882a593Smuzhiyun if (check_state.infos[i].result) {
2066*4882a593Smuzhiyun ret = check_state.infos[i].result;
2067*4882a593Smuzhiyun goto out;
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun out:
2072*4882a593Smuzhiyun rw_unlock(0, c->root);
2073*4882a593Smuzhiyun return ret;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun
bch_initial_gc_finish(struct cache_set * c)2076*4882a593Smuzhiyun void bch_initial_gc_finish(struct cache_set *c)
2077*4882a593Smuzhiyun {
2078*4882a593Smuzhiyun struct cache *ca = c->cache;
2079*4882a593Smuzhiyun struct bucket *b;
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun bch_btree_gc_finish(c);
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun /*
2086*4882a593Smuzhiyun * We need to put some unused buckets directly on the prio freelist in
2087*4882a593Smuzhiyun * order to get the allocator thread started - it needs freed buckets in
2088*4882a593Smuzhiyun * order to rewrite the prios and gens, and it needs to rewrite prios
2089*4882a593Smuzhiyun * and gens in order to free buckets.
2090*4882a593Smuzhiyun *
2091*4882a593Smuzhiyun * This is only safe for buckets that have no live data in them, which
2092*4882a593Smuzhiyun * there should always be some of.
2093*4882a593Smuzhiyun */
2094*4882a593Smuzhiyun for_each_bucket(b, ca) {
2095*4882a593Smuzhiyun if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2096*4882a593Smuzhiyun fifo_full(&ca->free[RESERVE_BTREE]))
2097*4882a593Smuzhiyun break;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun if (bch_can_invalidate_bucket(ca, b) &&
2100*4882a593Smuzhiyun !GC_MARK(b)) {
2101*4882a593Smuzhiyun __bch_invalidate_one_bucket(ca, b);
2102*4882a593Smuzhiyun if (!fifo_push(&ca->free[RESERVE_PRIO],
2103*4882a593Smuzhiyun b - ca->buckets))
2104*4882a593Smuzhiyun fifo_push(&ca->free[RESERVE_BTREE],
2105*4882a593Smuzhiyun b - ca->buckets);
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun /* Btree insertion */
2113*4882a593Smuzhiyun
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2114*4882a593Smuzhiyun static bool btree_insert_key(struct btree *b, struct bkey *k,
2115*4882a593Smuzhiyun struct bkey *replace_key)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun unsigned int status;
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun BUG_ON(bkey_cmp(k, &b->key) > 0);
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun status = bch_btree_insert_key(&b->keys, k, replace_key);
2122*4882a593Smuzhiyun if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2123*4882a593Smuzhiyun bch_check_keys(&b->keys, "%u for %s", status,
2124*4882a593Smuzhiyun replace_key ? "replace" : "insert");
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2127*4882a593Smuzhiyun status);
2128*4882a593Smuzhiyun return true;
2129*4882a593Smuzhiyun } else
2130*4882a593Smuzhiyun return false;
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun
insert_u64s_remaining(struct btree * b)2133*4882a593Smuzhiyun static size_t insert_u64s_remaining(struct btree *b)
2134*4882a593Smuzhiyun {
2135*4882a593Smuzhiyun long ret = bch_btree_keys_u64s_remaining(&b->keys);
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun /*
2138*4882a593Smuzhiyun * Might land in the middle of an existing extent and have to split it
2139*4882a593Smuzhiyun */
2140*4882a593Smuzhiyun if (b->keys.ops->is_extents)
2141*4882a593Smuzhiyun ret -= KEY_MAX_U64S;
2142*4882a593Smuzhiyun
2143*4882a593Smuzhiyun return max(ret, 0L);
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2146*4882a593Smuzhiyun static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2147*4882a593Smuzhiyun struct keylist *insert_keys,
2148*4882a593Smuzhiyun struct bkey *replace_key)
2149*4882a593Smuzhiyun {
2150*4882a593Smuzhiyun bool ret = false;
2151*4882a593Smuzhiyun int oldsize = bch_count_data(&b->keys);
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun while (!bch_keylist_empty(insert_keys)) {
2154*4882a593Smuzhiyun struct bkey *k = insert_keys->keys;
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun if (bkey_u64s(k) > insert_u64s_remaining(b))
2157*4882a593Smuzhiyun break;
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun if (bkey_cmp(k, &b->key) <= 0) {
2160*4882a593Smuzhiyun if (!b->level)
2161*4882a593Smuzhiyun bkey_put(b->c, k);
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun ret |= btree_insert_key(b, k, replace_key);
2164*4882a593Smuzhiyun bch_keylist_pop_front(insert_keys);
2165*4882a593Smuzhiyun } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2166*4882a593Smuzhiyun BKEY_PADDED(key) temp;
2167*4882a593Smuzhiyun bkey_copy(&temp.key, insert_keys->keys);
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun bch_cut_back(&b->key, &temp.key);
2170*4882a593Smuzhiyun bch_cut_front(&b->key, insert_keys->keys);
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun ret |= btree_insert_key(b, &temp.key, replace_key);
2173*4882a593Smuzhiyun break;
2174*4882a593Smuzhiyun } else {
2175*4882a593Smuzhiyun break;
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun }
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun if (!ret)
2180*4882a593Smuzhiyun op->insert_collision = true;
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun BUG_ON(bch_count_data(&b->keys) < oldsize);
2185*4882a593Smuzhiyun return ret;
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2188*4882a593Smuzhiyun static int btree_split(struct btree *b, struct btree_op *op,
2189*4882a593Smuzhiyun struct keylist *insert_keys,
2190*4882a593Smuzhiyun struct bkey *replace_key)
2191*4882a593Smuzhiyun {
2192*4882a593Smuzhiyun bool split;
2193*4882a593Smuzhiyun struct btree *n1, *n2 = NULL, *n3 = NULL;
2194*4882a593Smuzhiyun uint64_t start_time = local_clock();
2195*4882a593Smuzhiyun struct closure cl;
2196*4882a593Smuzhiyun struct keylist parent_keys;
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun closure_init_stack(&cl);
2199*4882a593Smuzhiyun bch_keylist_init(&parent_keys);
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun if (btree_check_reserve(b, op)) {
2202*4882a593Smuzhiyun if (!b->level)
2203*4882a593Smuzhiyun return -EINTR;
2204*4882a593Smuzhiyun else
2205*4882a593Smuzhiyun WARN(1, "insufficient reserve for split\n");
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun n1 = btree_node_alloc_replacement(b, op);
2209*4882a593Smuzhiyun if (IS_ERR(n1))
2210*4882a593Smuzhiyun goto err;
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun split = set_blocks(btree_bset_first(n1),
2213*4882a593Smuzhiyun block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun if (split) {
2216*4882a593Smuzhiyun unsigned int keys = 0;
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2221*4882a593Smuzhiyun if (IS_ERR(n2))
2222*4882a593Smuzhiyun goto err_free1;
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun if (!b->parent) {
2225*4882a593Smuzhiyun n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2226*4882a593Smuzhiyun if (IS_ERR(n3))
2227*4882a593Smuzhiyun goto err_free2;
2228*4882a593Smuzhiyun }
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun mutex_lock(&n1->write_lock);
2231*4882a593Smuzhiyun mutex_lock(&n2->write_lock);
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun /*
2236*4882a593Smuzhiyun * Has to be a linear search because we don't have an auxiliary
2237*4882a593Smuzhiyun * search tree yet
2238*4882a593Smuzhiyun */
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2241*4882a593Smuzhiyun keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2242*4882a593Smuzhiyun keys));
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun bkey_copy_key(&n1->key,
2245*4882a593Smuzhiyun bset_bkey_idx(btree_bset_first(n1), keys));
2246*4882a593Smuzhiyun keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2249*4882a593Smuzhiyun btree_bset_first(n1)->keys = keys;
2250*4882a593Smuzhiyun
2251*4882a593Smuzhiyun memcpy(btree_bset_first(n2)->start,
2252*4882a593Smuzhiyun bset_bkey_last(btree_bset_first(n1)),
2253*4882a593Smuzhiyun btree_bset_first(n2)->keys * sizeof(uint64_t));
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun bkey_copy_key(&n2->key, &b->key);
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun bch_keylist_add(&parent_keys, &n2->key);
2258*4882a593Smuzhiyun bch_btree_node_write(n2, &cl);
2259*4882a593Smuzhiyun mutex_unlock(&n2->write_lock);
2260*4882a593Smuzhiyun rw_unlock(true, n2);
2261*4882a593Smuzhiyun } else {
2262*4882a593Smuzhiyun trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun mutex_lock(&n1->write_lock);
2265*4882a593Smuzhiyun bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun bch_keylist_add(&parent_keys, &n1->key);
2269*4882a593Smuzhiyun bch_btree_node_write(n1, &cl);
2270*4882a593Smuzhiyun mutex_unlock(&n1->write_lock);
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun if (n3) {
2273*4882a593Smuzhiyun /* Depth increases, make a new root */
2274*4882a593Smuzhiyun mutex_lock(&n3->write_lock);
2275*4882a593Smuzhiyun bkey_copy_key(&n3->key, &MAX_KEY);
2276*4882a593Smuzhiyun bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2277*4882a593Smuzhiyun bch_btree_node_write(n3, &cl);
2278*4882a593Smuzhiyun mutex_unlock(&n3->write_lock);
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun closure_sync(&cl);
2281*4882a593Smuzhiyun bch_btree_set_root(n3);
2282*4882a593Smuzhiyun rw_unlock(true, n3);
2283*4882a593Smuzhiyun } else if (!b->parent) {
2284*4882a593Smuzhiyun /* Root filled up but didn't need to be split */
2285*4882a593Smuzhiyun closure_sync(&cl);
2286*4882a593Smuzhiyun bch_btree_set_root(n1);
2287*4882a593Smuzhiyun } else {
2288*4882a593Smuzhiyun /* Split a non root node */
2289*4882a593Smuzhiyun closure_sync(&cl);
2290*4882a593Smuzhiyun make_btree_freeing_key(b, parent_keys.top);
2291*4882a593Smuzhiyun bch_keylist_push(&parent_keys);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2294*4882a593Smuzhiyun BUG_ON(!bch_keylist_empty(&parent_keys));
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun btree_node_free(b);
2298*4882a593Smuzhiyun rw_unlock(true, n1);
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun bch_time_stats_update(&b->c->btree_split_time, start_time);
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun return 0;
2303*4882a593Smuzhiyun err_free2:
2304*4882a593Smuzhiyun bkey_put(b->c, &n2->key);
2305*4882a593Smuzhiyun btree_node_free(n2);
2306*4882a593Smuzhiyun rw_unlock(true, n2);
2307*4882a593Smuzhiyun err_free1:
2308*4882a593Smuzhiyun bkey_put(b->c, &n1->key);
2309*4882a593Smuzhiyun btree_node_free(n1);
2310*4882a593Smuzhiyun rw_unlock(true, n1);
2311*4882a593Smuzhiyun err:
2312*4882a593Smuzhiyun WARN(1, "bcache: btree split failed (level %u)", b->level);
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun if (n3 == ERR_PTR(-EAGAIN) ||
2315*4882a593Smuzhiyun n2 == ERR_PTR(-EAGAIN) ||
2316*4882a593Smuzhiyun n1 == ERR_PTR(-EAGAIN))
2317*4882a593Smuzhiyun return -EAGAIN;
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun return -ENOMEM;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2322*4882a593Smuzhiyun static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2323*4882a593Smuzhiyun struct keylist *insert_keys,
2324*4882a593Smuzhiyun atomic_t *journal_ref,
2325*4882a593Smuzhiyun struct bkey *replace_key)
2326*4882a593Smuzhiyun {
2327*4882a593Smuzhiyun struct closure cl;
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun BUG_ON(b->level && replace_key);
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun closure_init_stack(&cl);
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun mutex_lock(&b->write_lock);
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun if (write_block(b) != btree_bset_last(b) &&
2336*4882a593Smuzhiyun b->keys.last_set_unwritten)
2337*4882a593Smuzhiyun bch_btree_init_next(b); /* just wrote a set */
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2340*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
2341*4882a593Smuzhiyun goto split;
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun BUG_ON(write_block(b) != btree_bset_last(b));
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2347*4882a593Smuzhiyun if (!b->level)
2348*4882a593Smuzhiyun bch_btree_leaf_dirty(b, journal_ref);
2349*4882a593Smuzhiyun else
2350*4882a593Smuzhiyun bch_btree_node_write(b, &cl);
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun /* wait for btree node write if necessary, after unlock */
2356*4882a593Smuzhiyun closure_sync(&cl);
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun return 0;
2359*4882a593Smuzhiyun split:
2360*4882a593Smuzhiyun if (current->bio_list) {
2361*4882a593Smuzhiyun op->lock = b->c->root->level + 1;
2362*4882a593Smuzhiyun return -EAGAIN;
2363*4882a593Smuzhiyun } else if (op->lock <= b->c->root->level) {
2364*4882a593Smuzhiyun op->lock = b->c->root->level + 1;
2365*4882a593Smuzhiyun return -EINTR;
2366*4882a593Smuzhiyun } else {
2367*4882a593Smuzhiyun /* Invalidated all iterators */
2368*4882a593Smuzhiyun int ret = btree_split(b, op, insert_keys, replace_key);
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun if (bch_keylist_empty(insert_keys))
2371*4882a593Smuzhiyun return 0;
2372*4882a593Smuzhiyun else if (!ret)
2373*4882a593Smuzhiyun return -EINTR;
2374*4882a593Smuzhiyun return ret;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun }
2377*4882a593Smuzhiyun
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2378*4882a593Smuzhiyun int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2379*4882a593Smuzhiyun struct bkey *check_key)
2380*4882a593Smuzhiyun {
2381*4882a593Smuzhiyun int ret = -EINTR;
2382*4882a593Smuzhiyun uint64_t btree_ptr = b->key.ptr[0];
2383*4882a593Smuzhiyun unsigned long seq = b->seq;
2384*4882a593Smuzhiyun struct keylist insert;
2385*4882a593Smuzhiyun bool upgrade = op->lock == -1;
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun bch_keylist_init(&insert);
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun if (upgrade) {
2390*4882a593Smuzhiyun rw_unlock(false, b);
2391*4882a593Smuzhiyun rw_lock(true, b, b->level);
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun if (b->key.ptr[0] != btree_ptr ||
2394*4882a593Smuzhiyun b->seq != seq + 1) {
2395*4882a593Smuzhiyun op->lock = b->level;
2396*4882a593Smuzhiyun goto out;
2397*4882a593Smuzhiyun }
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun SET_KEY_PTRS(check_key, 1);
2401*4882a593Smuzhiyun get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun bch_keylist_add(&insert, check_key);
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun BUG_ON(!ret && !bch_keylist_empty(&insert));
2410*4882a593Smuzhiyun out:
2411*4882a593Smuzhiyun if (upgrade)
2412*4882a593Smuzhiyun downgrade_write(&b->lock);
2413*4882a593Smuzhiyun return ret;
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun struct btree_insert_op {
2417*4882a593Smuzhiyun struct btree_op op;
2418*4882a593Smuzhiyun struct keylist *keys;
2419*4882a593Smuzhiyun atomic_t *journal_ref;
2420*4882a593Smuzhiyun struct bkey *replace_key;
2421*4882a593Smuzhiyun };
2422*4882a593Smuzhiyun
btree_insert_fn(struct btree_op * b_op,struct btree * b)2423*4882a593Smuzhiyun static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2424*4882a593Smuzhiyun {
2425*4882a593Smuzhiyun struct btree_insert_op *op = container_of(b_op,
2426*4882a593Smuzhiyun struct btree_insert_op, op);
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun int ret = bch_btree_insert_node(b, &op->op, op->keys,
2429*4882a593Smuzhiyun op->journal_ref, op->replace_key);
2430*4882a593Smuzhiyun if (ret && !bch_keylist_empty(op->keys))
2431*4882a593Smuzhiyun return ret;
2432*4882a593Smuzhiyun else
2433*4882a593Smuzhiyun return MAP_DONE;
2434*4882a593Smuzhiyun }
2435*4882a593Smuzhiyun
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2436*4882a593Smuzhiyun int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2437*4882a593Smuzhiyun atomic_t *journal_ref, struct bkey *replace_key)
2438*4882a593Smuzhiyun {
2439*4882a593Smuzhiyun struct btree_insert_op op;
2440*4882a593Smuzhiyun int ret = 0;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun BUG_ON(current->bio_list);
2443*4882a593Smuzhiyun BUG_ON(bch_keylist_empty(keys));
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun bch_btree_op_init(&op.op, 0);
2446*4882a593Smuzhiyun op.keys = keys;
2447*4882a593Smuzhiyun op.journal_ref = journal_ref;
2448*4882a593Smuzhiyun op.replace_key = replace_key;
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun while (!ret && !bch_keylist_empty(keys)) {
2451*4882a593Smuzhiyun op.op.lock = 0;
2452*4882a593Smuzhiyun ret = bch_btree_map_leaf_nodes(&op.op, c,
2453*4882a593Smuzhiyun &START_KEY(keys->keys),
2454*4882a593Smuzhiyun btree_insert_fn);
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun if (ret) {
2458*4882a593Smuzhiyun struct bkey *k;
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun pr_err("error %i\n", ret);
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun while ((k = bch_keylist_pop(keys)))
2463*4882a593Smuzhiyun bkey_put(c, k);
2464*4882a593Smuzhiyun } else if (op.op.insert_collision)
2465*4882a593Smuzhiyun ret = -ESRCH;
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun return ret;
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun
bch_btree_set_root(struct btree * b)2470*4882a593Smuzhiyun void bch_btree_set_root(struct btree *b)
2471*4882a593Smuzhiyun {
2472*4882a593Smuzhiyun unsigned int i;
2473*4882a593Smuzhiyun struct closure cl;
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun closure_init_stack(&cl);
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun trace_bcache_btree_set_root(b);
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun BUG_ON(!b->written);
2480*4882a593Smuzhiyun
2481*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&b->key); i++)
2482*4882a593Smuzhiyun BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun mutex_lock(&b->c->bucket_lock);
2485*4882a593Smuzhiyun list_del_init(&b->list);
2486*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun b->c->root = b;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun bch_journal_meta(b->c, &cl);
2491*4882a593Smuzhiyun closure_sync(&cl);
2492*4882a593Smuzhiyun }
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun /* Map across nodes or keys */
2495*4882a593Smuzhiyun
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2496*4882a593Smuzhiyun static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2497*4882a593Smuzhiyun struct bkey *from,
2498*4882a593Smuzhiyun btree_map_nodes_fn *fn, int flags)
2499*4882a593Smuzhiyun {
2500*4882a593Smuzhiyun int ret = MAP_CONTINUE;
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun if (b->level) {
2503*4882a593Smuzhiyun struct bkey *k;
2504*4882a593Smuzhiyun struct btree_iter iter;
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun bch_btree_iter_init(&b->keys, &iter, from);
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2509*4882a593Smuzhiyun bch_ptr_bad))) {
2510*4882a593Smuzhiyun ret = bcache_btree(map_nodes_recurse, k, b,
2511*4882a593Smuzhiyun op, from, fn, flags);
2512*4882a593Smuzhiyun from = NULL;
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun if (ret != MAP_CONTINUE)
2515*4882a593Smuzhiyun return ret;
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun if (!b->level || flags == MAP_ALL_NODES)
2520*4882a593Smuzhiyun ret = fn(op, b);
2521*4882a593Smuzhiyun
2522*4882a593Smuzhiyun return ret;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2525*4882a593Smuzhiyun int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2526*4882a593Smuzhiyun struct bkey *from, btree_map_nodes_fn *fn, int flags)
2527*4882a593Smuzhiyun {
2528*4882a593Smuzhiyun return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2531*4882a593Smuzhiyun int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2532*4882a593Smuzhiyun struct bkey *from, btree_map_keys_fn *fn,
2533*4882a593Smuzhiyun int flags)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun int ret = MAP_CONTINUE;
2536*4882a593Smuzhiyun struct bkey *k;
2537*4882a593Smuzhiyun struct btree_iter iter;
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun bch_btree_iter_init(&b->keys, &iter, from);
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2542*4882a593Smuzhiyun ret = !b->level
2543*4882a593Smuzhiyun ? fn(op, b, k)
2544*4882a593Smuzhiyun : bcache_btree(map_keys_recurse, k,
2545*4882a593Smuzhiyun b, op, from, fn, flags);
2546*4882a593Smuzhiyun from = NULL;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun if (ret != MAP_CONTINUE)
2549*4882a593Smuzhiyun return ret;
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun if (!b->level && (flags & MAP_END_KEY))
2553*4882a593Smuzhiyun ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2554*4882a593Smuzhiyun KEY_OFFSET(&b->key), 0));
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun return ret;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2559*4882a593Smuzhiyun int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2560*4882a593Smuzhiyun struct bkey *from, btree_map_keys_fn *fn, int flags)
2561*4882a593Smuzhiyun {
2562*4882a593Smuzhiyun return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun /* Keybuf code */
2566*4882a593Smuzhiyun
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2567*4882a593Smuzhiyun static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2568*4882a593Smuzhiyun {
2569*4882a593Smuzhiyun /* Overlapping keys compare equal */
2570*4882a593Smuzhiyun if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2571*4882a593Smuzhiyun return -1;
2572*4882a593Smuzhiyun if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2573*4882a593Smuzhiyun return 1;
2574*4882a593Smuzhiyun return 0;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2577*4882a593Smuzhiyun static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2578*4882a593Smuzhiyun struct keybuf_key *r)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun struct refill {
2584*4882a593Smuzhiyun struct btree_op op;
2585*4882a593Smuzhiyun unsigned int nr_found;
2586*4882a593Smuzhiyun struct keybuf *buf;
2587*4882a593Smuzhiyun struct bkey *end;
2588*4882a593Smuzhiyun keybuf_pred_fn *pred;
2589*4882a593Smuzhiyun };
2590*4882a593Smuzhiyun
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2591*4882a593Smuzhiyun static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2592*4882a593Smuzhiyun struct bkey *k)
2593*4882a593Smuzhiyun {
2594*4882a593Smuzhiyun struct refill *refill = container_of(op, struct refill, op);
2595*4882a593Smuzhiyun struct keybuf *buf = refill->buf;
2596*4882a593Smuzhiyun int ret = MAP_CONTINUE;
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun if (bkey_cmp(k, refill->end) > 0) {
2599*4882a593Smuzhiyun ret = MAP_DONE;
2600*4882a593Smuzhiyun goto out;
2601*4882a593Smuzhiyun }
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun if (!KEY_SIZE(k)) /* end key */
2604*4882a593Smuzhiyun goto out;
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun if (refill->pred(buf, k)) {
2607*4882a593Smuzhiyun struct keybuf_key *w;
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun spin_lock(&buf->lock);
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun w = array_alloc(&buf->freelist);
2612*4882a593Smuzhiyun if (!w) {
2613*4882a593Smuzhiyun spin_unlock(&buf->lock);
2614*4882a593Smuzhiyun return MAP_DONE;
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun w->private = NULL;
2618*4882a593Smuzhiyun bkey_copy(&w->key, k);
2619*4882a593Smuzhiyun
2620*4882a593Smuzhiyun if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2621*4882a593Smuzhiyun array_free(&buf->freelist, w);
2622*4882a593Smuzhiyun else
2623*4882a593Smuzhiyun refill->nr_found++;
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun if (array_freelist_empty(&buf->freelist))
2626*4882a593Smuzhiyun ret = MAP_DONE;
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun spin_unlock(&buf->lock);
2629*4882a593Smuzhiyun }
2630*4882a593Smuzhiyun out:
2631*4882a593Smuzhiyun buf->last_scanned = *k;
2632*4882a593Smuzhiyun return ret;
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2635*4882a593Smuzhiyun void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2636*4882a593Smuzhiyun struct bkey *end, keybuf_pred_fn *pred)
2637*4882a593Smuzhiyun {
2638*4882a593Smuzhiyun struct bkey start = buf->last_scanned;
2639*4882a593Smuzhiyun struct refill refill;
2640*4882a593Smuzhiyun
2641*4882a593Smuzhiyun cond_resched();
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun bch_btree_op_init(&refill.op, -1);
2644*4882a593Smuzhiyun refill.nr_found = 0;
2645*4882a593Smuzhiyun refill.buf = buf;
2646*4882a593Smuzhiyun refill.end = end;
2647*4882a593Smuzhiyun refill.pred = pred;
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2650*4882a593Smuzhiyun refill_keybuf_fn, MAP_END_KEY);
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun trace_bcache_keyscan(refill.nr_found,
2653*4882a593Smuzhiyun KEY_INODE(&start), KEY_OFFSET(&start),
2654*4882a593Smuzhiyun KEY_INODE(&buf->last_scanned),
2655*4882a593Smuzhiyun KEY_OFFSET(&buf->last_scanned));
2656*4882a593Smuzhiyun
2657*4882a593Smuzhiyun spin_lock(&buf->lock);
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun if (!RB_EMPTY_ROOT(&buf->keys)) {
2660*4882a593Smuzhiyun struct keybuf_key *w;
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2663*4882a593Smuzhiyun buf->start = START_KEY(&w->key);
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun w = RB_LAST(&buf->keys, struct keybuf_key, node);
2666*4882a593Smuzhiyun buf->end = w->key;
2667*4882a593Smuzhiyun } else {
2668*4882a593Smuzhiyun buf->start = MAX_KEY;
2669*4882a593Smuzhiyun buf->end = MAX_KEY;
2670*4882a593Smuzhiyun }
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun spin_unlock(&buf->lock);
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2675*4882a593Smuzhiyun static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2676*4882a593Smuzhiyun {
2677*4882a593Smuzhiyun rb_erase(&w->node, &buf->keys);
2678*4882a593Smuzhiyun array_free(&buf->freelist, w);
2679*4882a593Smuzhiyun }
2680*4882a593Smuzhiyun
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2681*4882a593Smuzhiyun void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2682*4882a593Smuzhiyun {
2683*4882a593Smuzhiyun spin_lock(&buf->lock);
2684*4882a593Smuzhiyun __bch_keybuf_del(buf, w);
2685*4882a593Smuzhiyun spin_unlock(&buf->lock);
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2688*4882a593Smuzhiyun bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2689*4882a593Smuzhiyun struct bkey *end)
2690*4882a593Smuzhiyun {
2691*4882a593Smuzhiyun bool ret = false;
2692*4882a593Smuzhiyun struct keybuf_key *p, *w, s;
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun s.key = *start;
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun if (bkey_cmp(end, &buf->start) <= 0 ||
2697*4882a593Smuzhiyun bkey_cmp(start, &buf->end) >= 0)
2698*4882a593Smuzhiyun return false;
2699*4882a593Smuzhiyun
2700*4882a593Smuzhiyun spin_lock(&buf->lock);
2701*4882a593Smuzhiyun w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2704*4882a593Smuzhiyun p = w;
2705*4882a593Smuzhiyun w = RB_NEXT(w, node);
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun if (p->private)
2708*4882a593Smuzhiyun ret = true;
2709*4882a593Smuzhiyun else
2710*4882a593Smuzhiyun __bch_keybuf_del(buf, p);
2711*4882a593Smuzhiyun }
2712*4882a593Smuzhiyun
2713*4882a593Smuzhiyun spin_unlock(&buf->lock);
2714*4882a593Smuzhiyun return ret;
2715*4882a593Smuzhiyun }
2716*4882a593Smuzhiyun
bch_keybuf_next(struct keybuf * buf)2717*4882a593Smuzhiyun struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2718*4882a593Smuzhiyun {
2719*4882a593Smuzhiyun struct keybuf_key *w;
2720*4882a593Smuzhiyun
2721*4882a593Smuzhiyun spin_lock(&buf->lock);
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun while (w && w->private)
2726*4882a593Smuzhiyun w = RB_NEXT(w, node);
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun if (w)
2729*4882a593Smuzhiyun w->private = ERR_PTR(-EINTR);
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun spin_unlock(&buf->lock);
2732*4882a593Smuzhiyun return w;
2733*4882a593Smuzhiyun }
2734*4882a593Smuzhiyun
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2735*4882a593Smuzhiyun struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2736*4882a593Smuzhiyun struct keybuf *buf,
2737*4882a593Smuzhiyun struct bkey *end,
2738*4882a593Smuzhiyun keybuf_pred_fn *pred)
2739*4882a593Smuzhiyun {
2740*4882a593Smuzhiyun struct keybuf_key *ret;
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun while (1) {
2743*4882a593Smuzhiyun ret = bch_keybuf_next(buf);
2744*4882a593Smuzhiyun if (ret)
2745*4882a593Smuzhiyun break;
2746*4882a593Smuzhiyun
2747*4882a593Smuzhiyun if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2748*4882a593Smuzhiyun pr_debug("scan finished\n");
2749*4882a593Smuzhiyun break;
2750*4882a593Smuzhiyun }
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun bch_refill_keybuf(c, buf, end, pred);
2753*4882a593Smuzhiyun }
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun return ret;
2756*4882a593Smuzhiyun }
2757*4882a593Smuzhiyun
bch_keybuf_init(struct keybuf * buf)2758*4882a593Smuzhiyun void bch_keybuf_init(struct keybuf *buf)
2759*4882a593Smuzhiyun {
2760*4882a593Smuzhiyun buf->last_scanned = MAX_KEY;
2761*4882a593Smuzhiyun buf->keys = RB_ROOT;
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun spin_lock_init(&buf->lock);
2764*4882a593Smuzhiyun array_allocator_init(&buf->freelist);
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun
bch_btree_exit(void)2767*4882a593Smuzhiyun void bch_btree_exit(void)
2768*4882a593Smuzhiyun {
2769*4882a593Smuzhiyun if (btree_io_wq)
2770*4882a593Smuzhiyun destroy_workqueue(btree_io_wq);
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun
bch_btree_init(void)2773*4882a593Smuzhiyun int __init bch_btree_init(void)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2776*4882a593Smuzhiyun if (!btree_io_wq)
2777*4882a593Smuzhiyun return -ENOMEM;
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun return 0;
2780*4882a593Smuzhiyun }
2781