1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Uses a block device as cache for other block devices; optimized for SSDs.
6*4882a593Smuzhiyun * All allocation is done in buckets, which should match the erase block size
7*4882a593Smuzhiyun * of the device.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Buckets containing cached data are kept on a heap sorted by priority;
10*4882a593Smuzhiyun * bucket priority is increased on cache hit, and periodically all the buckets
11*4882a593Smuzhiyun * on the heap have their priority scaled down. This currently is just used as
12*4882a593Smuzhiyun * an LRU but in the future should allow for more intelligent heuristics.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15*4882a593Smuzhiyun * counter. Garbage collection is used to remove stale pointers.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18*4882a593Smuzhiyun * as keys are inserted we only sort the pages that have not yet been written.
19*4882a593Smuzhiyun * When garbage collection is run, we resort the entire node.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "bcache.h"
25*4882a593Smuzhiyun #include "btree.h"
26*4882a593Smuzhiyun #include "debug.h"
27*4882a593Smuzhiyun #include "extents.h"
28*4882a593Smuzhiyun #include "writeback.h"
29*4882a593Smuzhiyun
sort_key_next(struct btree_iter * iter,struct btree_iter_set * i)30*4882a593Smuzhiyun static void sort_key_next(struct btree_iter *iter,
31*4882a593Smuzhiyun struct btree_iter_set *i)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun i->k = bkey_next(i->k);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun if (i->k == i->end)
36*4882a593Smuzhiyun *i = iter->data[--iter->used];
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
bch_key_sort_cmp(struct btree_iter_set l,struct btree_iter_set r)39*4882a593Smuzhiyun static bool bch_key_sort_cmp(struct btree_iter_set l,
40*4882a593Smuzhiyun struct btree_iter_set r)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun int64_t c = bkey_cmp(l.k, r.k);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun return c ? c > 0 : l.k < r.k;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
__ptr_invalid(struct cache_set * c,const struct bkey * k)47*4882a593Smuzhiyun static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun unsigned int i;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
52*4882a593Smuzhiyun if (ptr_available(c, k, i)) {
53*4882a593Smuzhiyun struct cache *ca = PTR_CACHE(c, k, i);
54*4882a593Smuzhiyun size_t bucket = PTR_BUCKET_NR(c, k, i);
55*4882a593Smuzhiyun size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
58*4882a593Smuzhiyun bucket < ca->sb.first_bucket ||
59*4882a593Smuzhiyun bucket >= ca->sb.nbuckets)
60*4882a593Smuzhiyun return true;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun return false;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Common among btree and extent ptrs */
67*4882a593Smuzhiyun
bch_ptr_status(struct cache_set * c,const struct bkey * k)68*4882a593Smuzhiyun static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun unsigned int i;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
73*4882a593Smuzhiyun if (ptr_available(c, k, i)) {
74*4882a593Smuzhiyun struct cache *ca = PTR_CACHE(c, k, i);
75*4882a593Smuzhiyun size_t bucket = PTR_BUCKET_NR(c, k, i);
76*4882a593Smuzhiyun size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
79*4882a593Smuzhiyun return "bad, length too big";
80*4882a593Smuzhiyun if (bucket < ca->sb.first_bucket)
81*4882a593Smuzhiyun return "bad, short offset";
82*4882a593Smuzhiyun if (bucket >= ca->sb.nbuckets)
83*4882a593Smuzhiyun return "bad, offset past end of device";
84*4882a593Smuzhiyun if (ptr_stale(c, k, i))
85*4882a593Smuzhiyun return "stale";
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (!bkey_cmp(k, &ZERO_KEY))
89*4882a593Smuzhiyun return "bad, null key";
90*4882a593Smuzhiyun if (!KEY_PTRS(k))
91*4882a593Smuzhiyun return "bad, no pointers";
92*4882a593Smuzhiyun if (!KEY_SIZE(k))
93*4882a593Smuzhiyun return "zeroed key";
94*4882a593Smuzhiyun return "";
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
bch_extent_to_text(char * buf,size_t size,const struct bkey * k)97*4882a593Smuzhiyun void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun unsigned int i = 0;
100*4882a593Smuzhiyun char *out = buf, *end = buf + size;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++) {
107*4882a593Smuzhiyun if (i)
108*4882a593Smuzhiyun p(", ");
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (PTR_DEV(k, i) == PTR_CHECK_DEV)
111*4882a593Smuzhiyun p("check dev");
112*4882a593Smuzhiyun else
113*4882a593Smuzhiyun p("%llu:%llu gen %llu", PTR_DEV(k, i),
114*4882a593Smuzhiyun PTR_OFFSET(k, i), PTR_GEN(k, i));
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun p("]");
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (KEY_DIRTY(k))
120*4882a593Smuzhiyun p(" dirty");
121*4882a593Smuzhiyun if (KEY_CSUM(k))
122*4882a593Smuzhiyun p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
123*4882a593Smuzhiyun #undef p
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
bch_bkey_dump(struct btree_keys * keys,const struct bkey * k)126*4882a593Smuzhiyun static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct btree *b = container_of(keys, struct btree, keys);
129*4882a593Smuzhiyun unsigned int j;
130*4882a593Smuzhiyun char buf[80];
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun bch_extent_to_text(buf, sizeof(buf), k);
133*4882a593Smuzhiyun pr_cont(" %s", buf);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun for (j = 0; j < KEY_PTRS(k); j++) {
136*4882a593Smuzhiyun size_t n = PTR_BUCKET_NR(b->c, k, j);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun pr_cont(" bucket %zu", n);
139*4882a593Smuzhiyun if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
140*4882a593Smuzhiyun pr_cont(" prio %i",
141*4882a593Smuzhiyun PTR_BUCKET(b->c, k, j)->prio);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun pr_cont(" %s\n", bch_ptr_status(b->c, k));
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* Btree ptrs */
148*4882a593Smuzhiyun
__bch_btree_ptr_invalid(struct cache_set * c,const struct bkey * k)149*4882a593Smuzhiyun bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun char buf[80];
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
154*4882a593Smuzhiyun goto bad;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (__ptr_invalid(c, k))
157*4882a593Smuzhiyun goto bad;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return false;
160*4882a593Smuzhiyun bad:
161*4882a593Smuzhiyun bch_extent_to_text(buf, sizeof(buf), k);
162*4882a593Smuzhiyun cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
163*4882a593Smuzhiyun return true;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
bch_btree_ptr_invalid(struct btree_keys * bk,const struct bkey * k)166*4882a593Smuzhiyun static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct btree *b = container_of(bk, struct btree, keys);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return __bch_btree_ptr_invalid(b->c, k);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
btree_ptr_bad_expensive(struct btree * b,const struct bkey * k)173*4882a593Smuzhiyun static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun unsigned int i;
176*4882a593Smuzhiyun char buf[80];
177*4882a593Smuzhiyun struct bucket *g;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (mutex_trylock(&b->c->bucket_lock)) {
180*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
181*4882a593Smuzhiyun if (ptr_available(b->c, k, i)) {
182*4882a593Smuzhiyun g = PTR_BUCKET(b->c, k, i);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (KEY_DIRTY(k) ||
185*4882a593Smuzhiyun g->prio != BTREE_PRIO ||
186*4882a593Smuzhiyun (b->c->gc_mark_valid &&
187*4882a593Smuzhiyun GC_MARK(g) != GC_MARK_METADATA))
188*4882a593Smuzhiyun goto err;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return false;
195*4882a593Smuzhiyun err:
196*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
197*4882a593Smuzhiyun bch_extent_to_text(buf, sizeof(buf), k);
198*4882a593Smuzhiyun btree_bug(b,
199*4882a593Smuzhiyun "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
200*4882a593Smuzhiyun buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
201*4882a593Smuzhiyun g->prio, g->gen, g->last_gc, GC_MARK(g));
202*4882a593Smuzhiyun return true;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
bch_btree_ptr_bad(struct btree_keys * bk,const struct bkey * k)205*4882a593Smuzhiyun static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct btree *b = container_of(bk, struct btree, keys);
208*4882a593Smuzhiyun unsigned int i;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (!bkey_cmp(k, &ZERO_KEY) ||
211*4882a593Smuzhiyun !KEY_PTRS(k) ||
212*4882a593Smuzhiyun bch_ptr_invalid(bk, k))
213*4882a593Smuzhiyun return true;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
216*4882a593Smuzhiyun if (!ptr_available(b->c, k, i) ||
217*4882a593Smuzhiyun ptr_stale(b->c, k, i))
218*4882a593Smuzhiyun return true;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (expensive_debug_checks(b->c) &&
221*4882a593Smuzhiyun btree_ptr_bad_expensive(b, k))
222*4882a593Smuzhiyun return true;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return false;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
bch_btree_ptr_insert_fixup(struct btree_keys * bk,struct bkey * insert,struct btree_iter * iter,struct bkey * replace_key)227*4882a593Smuzhiyun static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
228*4882a593Smuzhiyun struct bkey *insert,
229*4882a593Smuzhiyun struct btree_iter *iter,
230*4882a593Smuzhiyun struct bkey *replace_key)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct btree *b = container_of(bk, struct btree, keys);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (!KEY_OFFSET(insert))
235*4882a593Smuzhiyun btree_current_write(b)->prio_blocked++;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return false;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun const struct btree_keys_ops bch_btree_keys_ops = {
241*4882a593Smuzhiyun .sort_cmp = bch_key_sort_cmp,
242*4882a593Smuzhiyun .insert_fixup = bch_btree_ptr_insert_fixup,
243*4882a593Smuzhiyun .key_invalid = bch_btree_ptr_invalid,
244*4882a593Smuzhiyun .key_bad = bch_btree_ptr_bad,
245*4882a593Smuzhiyun .key_to_text = bch_extent_to_text,
246*4882a593Smuzhiyun .key_dump = bch_bkey_dump,
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Extents */
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * Returns true if l > r - unless l == r, in which case returns true if l is
253*4882a593Smuzhiyun * older than r.
254*4882a593Smuzhiyun *
255*4882a593Smuzhiyun * Necessary for btree_sort_fixup() - if there are multiple keys that compare
256*4882a593Smuzhiyun * equal in different sets, we have to process them newest to oldest.
257*4882a593Smuzhiyun */
bch_extent_sort_cmp(struct btree_iter_set l,struct btree_iter_set r)258*4882a593Smuzhiyun static bool bch_extent_sort_cmp(struct btree_iter_set l,
259*4882a593Smuzhiyun struct btree_iter_set r)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return c ? c > 0 : l.k < r.k;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
bch_extent_sort_fixup(struct btree_iter * iter,struct bkey * tmp)266*4882a593Smuzhiyun static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
267*4882a593Smuzhiyun struct bkey *tmp)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun while (iter->used > 1) {
270*4882a593Smuzhiyun struct btree_iter_set *top = iter->data, *i = top + 1;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (iter->used > 2 &&
273*4882a593Smuzhiyun bch_extent_sort_cmp(i[0], i[1]))
274*4882a593Smuzhiyun i++;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (!KEY_SIZE(i->k)) {
280*4882a593Smuzhiyun sort_key_next(iter, i);
281*4882a593Smuzhiyun heap_sift(iter, i - top, bch_extent_sort_cmp);
282*4882a593Smuzhiyun continue;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (top->k > i->k) {
286*4882a593Smuzhiyun if (bkey_cmp(top->k, i->k) >= 0)
287*4882a593Smuzhiyun sort_key_next(iter, i);
288*4882a593Smuzhiyun else
289*4882a593Smuzhiyun bch_cut_front(top->k, i->k);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun heap_sift(iter, i - top, bch_extent_sort_cmp);
292*4882a593Smuzhiyun } else {
293*4882a593Smuzhiyun /* can't happen because of comparison func */
294*4882a593Smuzhiyun BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (bkey_cmp(i->k, top->k) < 0) {
297*4882a593Smuzhiyun bkey_copy(tmp, top->k);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun bch_cut_back(&START_KEY(i->k), tmp);
300*4882a593Smuzhiyun bch_cut_front(i->k, top->k);
301*4882a593Smuzhiyun heap_sift(iter, 0, bch_extent_sort_cmp);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun return tmp;
304*4882a593Smuzhiyun } else {
305*4882a593Smuzhiyun bch_cut_back(&START_KEY(i->k), top->k);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return NULL;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
bch_subtract_dirty(struct bkey * k,struct cache_set * c,uint64_t offset,int sectors)313*4882a593Smuzhiyun static void bch_subtract_dirty(struct bkey *k,
314*4882a593Smuzhiyun struct cache_set *c,
315*4882a593Smuzhiyun uint64_t offset,
316*4882a593Smuzhiyun int sectors)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun if (KEY_DIRTY(k))
319*4882a593Smuzhiyun bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
320*4882a593Smuzhiyun offset, -sectors);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
bch_extent_insert_fixup(struct btree_keys * b,struct bkey * insert,struct btree_iter * iter,struct bkey * replace_key)323*4882a593Smuzhiyun static bool bch_extent_insert_fixup(struct btree_keys *b,
324*4882a593Smuzhiyun struct bkey *insert,
325*4882a593Smuzhiyun struct btree_iter *iter,
326*4882a593Smuzhiyun struct bkey *replace_key)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct cache_set *c = container_of(b, struct btree, keys)->c;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun uint64_t old_offset;
331*4882a593Smuzhiyun unsigned int old_size, sectors_found = 0;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun BUG_ON(!KEY_OFFSET(insert));
334*4882a593Smuzhiyun BUG_ON(!KEY_SIZE(insert));
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun while (1) {
337*4882a593Smuzhiyun struct bkey *k = bch_btree_iter_next(iter);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (!k)
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (bkey_cmp(&START_KEY(k), insert) >= 0) {
343*4882a593Smuzhiyun if (KEY_SIZE(k))
344*4882a593Smuzhiyun break;
345*4882a593Smuzhiyun else
346*4882a593Smuzhiyun continue;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (bkey_cmp(k, &START_KEY(insert)) <= 0)
350*4882a593Smuzhiyun continue;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun old_offset = KEY_START(k);
353*4882a593Smuzhiyun old_size = KEY_SIZE(k);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * We might overlap with 0 size extents; we can't skip these
357*4882a593Smuzhiyun * because if they're in the set we're inserting to we have to
358*4882a593Smuzhiyun * adjust them so they don't overlap with the key we're
359*4882a593Smuzhiyun * inserting. But we don't want to check them for replace
360*4882a593Smuzhiyun * operations.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (replace_key && KEY_SIZE(k)) {
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * k might have been split since we inserted/found the
366*4882a593Smuzhiyun * key we're replacing
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun unsigned int i;
369*4882a593Smuzhiyun uint64_t offset = KEY_START(k) -
370*4882a593Smuzhiyun KEY_START(replace_key);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* But it must be a subset of the replace key */
373*4882a593Smuzhiyun if (KEY_START(k) < KEY_START(replace_key) ||
374*4882a593Smuzhiyun KEY_OFFSET(k) > KEY_OFFSET(replace_key))
375*4882a593Smuzhiyun goto check_failed;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* We didn't find a key that we were supposed to */
378*4882a593Smuzhiyun if (KEY_START(k) > KEY_START(insert) + sectors_found)
379*4882a593Smuzhiyun goto check_failed;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (!bch_bkey_equal_header(k, replace_key))
382*4882a593Smuzhiyun goto check_failed;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* skip past gen */
385*4882a593Smuzhiyun offset <<= 8;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun BUG_ON(!KEY_PTRS(replace_key));
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(replace_key); i++)
390*4882a593Smuzhiyun if (k->ptr[i] != replace_key->ptr[i] + offset)
391*4882a593Smuzhiyun goto check_failed;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun sectors_found = KEY_OFFSET(k) - KEY_START(insert);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (bkey_cmp(insert, k) < 0 &&
397*4882a593Smuzhiyun bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * We overlapped in the middle of an existing key: that
400*4882a593Smuzhiyun * means we have to split the old key. But we have to do
401*4882a593Smuzhiyun * slightly different things depending on whether the
402*4882a593Smuzhiyun * old key has been written out yet.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun struct bkey *top;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun bch_subtract_dirty(k, c, KEY_START(insert),
408*4882a593Smuzhiyun KEY_SIZE(insert));
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (bkey_written(b, k)) {
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun * We insert a new key to cover the top of the
413*4882a593Smuzhiyun * old key, and the old key is modified in place
414*4882a593Smuzhiyun * to represent the bottom split.
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * It's completely arbitrary whether the new key
417*4882a593Smuzhiyun * is the top or the bottom, but it has to match
418*4882a593Smuzhiyun * up with what btree_sort_fixup() does - it
419*4882a593Smuzhiyun * doesn't check for this kind of overlap, it
420*4882a593Smuzhiyun * depends on us inserting a new key for the top
421*4882a593Smuzhiyun * here.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun top = bch_bset_search(b, bset_tree_last(b),
424*4882a593Smuzhiyun insert);
425*4882a593Smuzhiyun bch_bset_insert(b, top, k);
426*4882a593Smuzhiyun } else {
427*4882a593Smuzhiyun BKEY_PADDED(key) temp;
428*4882a593Smuzhiyun bkey_copy(&temp.key, k);
429*4882a593Smuzhiyun bch_bset_insert(b, k, &temp.key);
430*4882a593Smuzhiyun top = bkey_next(k);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun bch_cut_front(insert, top);
434*4882a593Smuzhiyun bch_cut_back(&START_KEY(insert), k);
435*4882a593Smuzhiyun bch_bset_fix_invalidated_key(b, k);
436*4882a593Smuzhiyun goto out;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (bkey_cmp(insert, k) < 0) {
440*4882a593Smuzhiyun bch_cut_front(insert, k);
441*4882a593Smuzhiyun } else {
442*4882a593Smuzhiyun if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
443*4882a593Smuzhiyun old_offset = KEY_START(insert);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (bkey_written(b, k) &&
446*4882a593Smuzhiyun bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun * Completely overwrote, so we don't have to
449*4882a593Smuzhiyun * invalidate the binary search tree
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun bch_cut_front(k, k);
452*4882a593Smuzhiyun } else {
453*4882a593Smuzhiyun __bch_cut_back(&START_KEY(insert), k);
454*4882a593Smuzhiyun bch_bset_fix_invalidated_key(b, k);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun check_failed:
462*4882a593Smuzhiyun if (replace_key) {
463*4882a593Smuzhiyun if (!sectors_found) {
464*4882a593Smuzhiyun return true;
465*4882a593Smuzhiyun } else if (sectors_found < KEY_SIZE(insert)) {
466*4882a593Smuzhiyun SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
467*4882a593Smuzhiyun (KEY_SIZE(insert) - sectors_found));
468*4882a593Smuzhiyun SET_KEY_SIZE(insert, sectors_found);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun out:
472*4882a593Smuzhiyun if (KEY_DIRTY(insert))
473*4882a593Smuzhiyun bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
474*4882a593Smuzhiyun KEY_START(insert),
475*4882a593Smuzhiyun KEY_SIZE(insert));
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return false;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
__bch_extent_invalid(struct cache_set * c,const struct bkey * k)480*4882a593Smuzhiyun bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun char buf[80];
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (!KEY_SIZE(k))
485*4882a593Smuzhiyun return true;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (KEY_SIZE(k) > KEY_OFFSET(k))
488*4882a593Smuzhiyun goto bad;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (__ptr_invalid(c, k))
491*4882a593Smuzhiyun goto bad;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return false;
494*4882a593Smuzhiyun bad:
495*4882a593Smuzhiyun bch_extent_to_text(buf, sizeof(buf), k);
496*4882a593Smuzhiyun cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
497*4882a593Smuzhiyun return true;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
bch_extent_invalid(struct btree_keys * bk,const struct bkey * k)500*4882a593Smuzhiyun static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun struct btree *b = container_of(bk, struct btree, keys);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun return __bch_extent_invalid(b->c, k);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
bch_extent_bad_expensive(struct btree * b,const struct bkey * k,unsigned int ptr)507*4882a593Smuzhiyun static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
508*4882a593Smuzhiyun unsigned int ptr)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun struct bucket *g = PTR_BUCKET(b->c, k, ptr);
511*4882a593Smuzhiyun char buf[80];
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (mutex_trylock(&b->c->bucket_lock)) {
514*4882a593Smuzhiyun if (b->c->gc_mark_valid &&
515*4882a593Smuzhiyun (!GC_MARK(g) ||
516*4882a593Smuzhiyun GC_MARK(g) == GC_MARK_METADATA ||
517*4882a593Smuzhiyun (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
518*4882a593Smuzhiyun goto err;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (g->prio == BTREE_PRIO)
521*4882a593Smuzhiyun goto err;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun return false;
527*4882a593Smuzhiyun err:
528*4882a593Smuzhiyun mutex_unlock(&b->c->bucket_lock);
529*4882a593Smuzhiyun bch_extent_to_text(buf, sizeof(buf), k);
530*4882a593Smuzhiyun btree_bug(b,
531*4882a593Smuzhiyun "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
532*4882a593Smuzhiyun buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
533*4882a593Smuzhiyun g->prio, g->gen, g->last_gc, GC_MARK(g));
534*4882a593Smuzhiyun return true;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
bch_extent_bad(struct btree_keys * bk,const struct bkey * k)537*4882a593Smuzhiyun static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun struct btree *b = container_of(bk, struct btree, keys);
540*4882a593Smuzhiyun unsigned int i, stale;
541*4882a593Smuzhiyun char buf[80];
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (!KEY_PTRS(k) ||
544*4882a593Smuzhiyun bch_extent_invalid(bk, k))
545*4882a593Smuzhiyun return true;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
548*4882a593Smuzhiyun if (!ptr_available(b->c, k, i))
549*4882a593Smuzhiyun return true;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++) {
552*4882a593Smuzhiyun stale = ptr_stale(b->c, k, i);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (stale && KEY_DIRTY(k)) {
555*4882a593Smuzhiyun bch_extent_to_text(buf, sizeof(buf), k);
556*4882a593Smuzhiyun pr_info("stale dirty pointer, stale %u, key: %s\n",
557*4882a593Smuzhiyun stale, buf);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
561*4882a593Smuzhiyun "key too stale: %i, need_gc %u",
562*4882a593Smuzhiyun stale, b->c->need_gc);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (stale)
565*4882a593Smuzhiyun return true;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (expensive_debug_checks(b->c) &&
568*4882a593Smuzhiyun bch_extent_bad_expensive(b, k, i))
569*4882a593Smuzhiyun return true;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun return false;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
merge_chksums(struct bkey * l,struct bkey * r)575*4882a593Smuzhiyun static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
578*4882a593Smuzhiyun ~((uint64_t)1 << 63);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
bch_extent_merge(struct btree_keys * bk,struct bkey * l,struct bkey * r)581*4882a593Smuzhiyun static bool bch_extent_merge(struct btree_keys *bk,
582*4882a593Smuzhiyun struct bkey *l,
583*4882a593Smuzhiyun struct bkey *r)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct btree *b = container_of(bk, struct btree, keys);
586*4882a593Smuzhiyun unsigned int i;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (key_merging_disabled(b->c))
589*4882a593Smuzhiyun return false;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(l); i++)
592*4882a593Smuzhiyun if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
593*4882a593Smuzhiyun PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
594*4882a593Smuzhiyun return false;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* Keys with no pointers aren't restricted to one bucket and could
597*4882a593Smuzhiyun * overflow KEY_SIZE
598*4882a593Smuzhiyun */
599*4882a593Smuzhiyun if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
600*4882a593Smuzhiyun SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
601*4882a593Smuzhiyun SET_KEY_SIZE(l, USHRT_MAX);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun bch_cut_front(l, r);
604*4882a593Smuzhiyun return false;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (KEY_CSUM(l)) {
608*4882a593Smuzhiyun if (KEY_CSUM(r))
609*4882a593Smuzhiyun l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
610*4882a593Smuzhiyun else
611*4882a593Smuzhiyun SET_KEY_CSUM(l, 0);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
615*4882a593Smuzhiyun SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return true;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun const struct btree_keys_ops bch_extent_keys_ops = {
621*4882a593Smuzhiyun .sort_cmp = bch_extent_sort_cmp,
622*4882a593Smuzhiyun .sort_fixup = bch_extent_sort_fixup,
623*4882a593Smuzhiyun .insert_fixup = bch_extent_insert_fixup,
624*4882a593Smuzhiyun .key_invalid = bch_extent_invalid,
625*4882a593Smuzhiyun .key_bad = bch_extent_bad,
626*4882a593Smuzhiyun .key_merge = bch_extent_merge,
627*4882a593Smuzhiyun .key_to_text = bch_extent_to_text,
628*4882a593Smuzhiyun .key_dump = bch_bkey_dump,
629*4882a593Smuzhiyun .is_extents = true,
630*4882a593Smuzhiyun };
631