1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * bcache journalling code, for btree insertions
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2012 Google, Inc.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "bcache.h"
9*4882a593Smuzhiyun #include "btree.h"
10*4882a593Smuzhiyun #include "debug.h"
11*4882a593Smuzhiyun #include "extents.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <trace/events/bcache.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * Journal replay/recovery:
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * This code is all driven from run_cache_set(); we first read the journal
19*4882a593Smuzhiyun * entries, do some other stuff, then we mark all the keys in the journal
20*4882a593Smuzhiyun * entries (same as garbage collection would), then we replay them - reinserting
21*4882a593Smuzhiyun * them into the cache in precisely the same order as they appear in the
22*4882a593Smuzhiyun * journal.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * We only journal keys that go in leaf nodes, which simplifies things quite a
25*4882a593Smuzhiyun * bit.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
journal_read_endio(struct bio * bio)28*4882a593Smuzhiyun static void journal_read_endio(struct bio *bio)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct closure *cl = bio->bi_private;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun closure_put(cl);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
journal_read_bucket(struct cache * ca,struct list_head * list,unsigned int bucket_index)35*4882a593Smuzhiyun static int journal_read_bucket(struct cache *ca, struct list_head *list,
36*4882a593Smuzhiyun unsigned int bucket_index)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct journal_device *ja = &ca->journal;
39*4882a593Smuzhiyun struct bio *bio = &ja->bio;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct journal_replay *i;
42*4882a593Smuzhiyun struct jset *j, *data = ca->set->journal.w[0].data;
43*4882a593Smuzhiyun struct closure cl;
44*4882a593Smuzhiyun unsigned int len, left, offset = 0;
45*4882a593Smuzhiyun int ret = 0;
46*4882a593Smuzhiyun sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun closure_init_stack(&cl);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun pr_debug("reading %u\n", bucket_index);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun while (offset < ca->sb.bucket_size) {
53*4882a593Smuzhiyun reread: left = ca->sb.bucket_size - offset;
54*4882a593Smuzhiyun len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun bio_reset(bio);
57*4882a593Smuzhiyun bio->bi_iter.bi_sector = bucket + offset;
58*4882a593Smuzhiyun bio_set_dev(bio, ca->bdev);
59*4882a593Smuzhiyun bio->bi_iter.bi_size = len << 9;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun bio->bi_end_io = journal_read_endio;
62*4882a593Smuzhiyun bio->bi_private = &cl;
63*4882a593Smuzhiyun bio_set_op_attrs(bio, REQ_OP_READ, 0);
64*4882a593Smuzhiyun bch_bio_map(bio, data);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun closure_bio_submit(ca->set, bio, &cl);
67*4882a593Smuzhiyun closure_sync(&cl);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* This function could be simpler now since we no longer write
70*4882a593Smuzhiyun * journal entries that overlap bucket boundaries; this means
71*4882a593Smuzhiyun * the start of a bucket will always have a valid journal entry
72*4882a593Smuzhiyun * if it has any journal entries at all.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun j = data;
76*4882a593Smuzhiyun while (len) {
77*4882a593Smuzhiyun struct list_head *where;
78*4882a593Smuzhiyun size_t blocks, bytes = set_bytes(j);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (j->magic != jset_magic(&ca->sb)) {
81*4882a593Smuzhiyun pr_debug("%u: bad magic\n", bucket_index);
82*4882a593Smuzhiyun return ret;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (bytes > left << 9 ||
86*4882a593Smuzhiyun bytes > PAGE_SIZE << JSET_BITS) {
87*4882a593Smuzhiyun pr_info("%u: too big, %zu bytes, offset %u\n",
88*4882a593Smuzhiyun bucket_index, bytes, offset);
89*4882a593Smuzhiyun return ret;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (bytes > len << 9)
93*4882a593Smuzhiyun goto reread;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (j->csum != csum_set(j)) {
96*4882a593Smuzhiyun pr_info("%u: bad csum, %zu bytes, offset %u\n",
97*4882a593Smuzhiyun bucket_index, bytes, offset);
98*4882a593Smuzhiyun return ret;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun blocks = set_blocks(j, block_bytes(ca));
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Nodes in 'list' are in linear increasing order of
105*4882a593Smuzhiyun * i->j.seq, the node on head has the smallest (oldest)
106*4882a593Smuzhiyun * journal seq, the node on tail has the biggest
107*4882a593Smuzhiyun * (latest) journal seq.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * Check from the oldest jset for last_seq. If
112*4882a593Smuzhiyun * i->j.seq < j->last_seq, it means the oldest jset
113*4882a593Smuzhiyun * in list is expired and useless, remove it from
114*4882a593Smuzhiyun * this list. Otherwise, j is a condidate jset for
115*4882a593Smuzhiyun * further following checks.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun while (!list_empty(list)) {
118*4882a593Smuzhiyun i = list_first_entry(list,
119*4882a593Smuzhiyun struct journal_replay, list);
120*4882a593Smuzhiyun if (i->j.seq >= j->last_seq)
121*4882a593Smuzhiyun break;
122*4882a593Smuzhiyun list_del(&i->list);
123*4882a593Smuzhiyun kfree(i);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* iterate list in reverse order (from latest jset) */
127*4882a593Smuzhiyun list_for_each_entry_reverse(i, list, list) {
128*4882a593Smuzhiyun if (j->seq == i->j.seq)
129*4882a593Smuzhiyun goto next_set;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * if j->seq is less than any i->j.last_seq
133*4882a593Smuzhiyun * in list, j is an expired and useless jset.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun if (j->seq < i->j.last_seq)
136*4882a593Smuzhiyun goto next_set;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * 'where' points to first jset in list which
140*4882a593Smuzhiyun * is elder then j.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun if (j->seq > i->j.seq) {
143*4882a593Smuzhiyun where = &i->list;
144*4882a593Smuzhiyun goto add;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun where = list;
149*4882a593Smuzhiyun add:
150*4882a593Smuzhiyun i = kmalloc(offsetof(struct journal_replay, j) +
151*4882a593Smuzhiyun bytes, GFP_KERNEL);
152*4882a593Smuzhiyun if (!i)
153*4882a593Smuzhiyun return -ENOMEM;
154*4882a593Smuzhiyun memcpy(&i->j, j, bytes);
155*4882a593Smuzhiyun /* Add to the location after 'where' points to */
156*4882a593Smuzhiyun list_add(&i->list, where);
157*4882a593Smuzhiyun ret = 1;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (j->seq > ja->seq[bucket_index])
160*4882a593Smuzhiyun ja->seq[bucket_index] = j->seq;
161*4882a593Smuzhiyun next_set:
162*4882a593Smuzhiyun offset += blocks * ca->sb.block_size;
163*4882a593Smuzhiyun len -= blocks * ca->sb.block_size;
164*4882a593Smuzhiyun j = ((void *) j) + blocks * block_bytes(ca);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return ret;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
bch_journal_read(struct cache_set * c,struct list_head * list)171*4882a593Smuzhiyun int bch_journal_read(struct cache_set *c, struct list_head *list)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun #define read_bucket(b) \
174*4882a593Smuzhiyun ({ \
175*4882a593Smuzhiyun ret = journal_read_bucket(ca, list, b); \
176*4882a593Smuzhiyun __set_bit(b, bitmap); \
177*4882a593Smuzhiyun if (ret < 0) \
178*4882a593Smuzhiyun return ret; \
179*4882a593Smuzhiyun ret; \
180*4882a593Smuzhiyun })
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun struct cache *ca = c->cache;
183*4882a593Smuzhiyun int ret = 0;
184*4882a593Smuzhiyun struct journal_device *ja = &ca->journal;
185*4882a593Smuzhiyun DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
186*4882a593Smuzhiyun unsigned int i, l, r, m;
187*4882a593Smuzhiyun uint64_t seq;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
190*4882a593Smuzhiyun pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * Read journal buckets ordered by golden ratio hash to quickly
194*4882a593Smuzhiyun * find a sequence of buckets with valid journal entries
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun for (i = 0; i < ca->sb.njournal_buckets; i++) {
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * We must try the index l with ZERO first for
199*4882a593Smuzhiyun * correctness due to the scenario that the journal
200*4882a593Smuzhiyun * bucket is circular buffer which might have wrapped
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun l = (i * 2654435769U) % ca->sb.njournal_buckets;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (test_bit(l, bitmap))
205*4882a593Smuzhiyun break;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (read_bucket(l))
208*4882a593Smuzhiyun goto bsearch;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * If that fails, check all the buckets we haven't checked
213*4882a593Smuzhiyun * already
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun pr_debug("falling back to linear search\n");
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
218*4882a593Smuzhiyun if (read_bucket(l))
219*4882a593Smuzhiyun goto bsearch;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* no journal entries on this device? */
222*4882a593Smuzhiyun if (l == ca->sb.njournal_buckets)
223*4882a593Smuzhiyun goto out;
224*4882a593Smuzhiyun bsearch:
225*4882a593Smuzhiyun BUG_ON(list_empty(list));
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Binary search */
228*4882a593Smuzhiyun m = l;
229*4882a593Smuzhiyun r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
230*4882a593Smuzhiyun pr_debug("starting binary search, l %u r %u\n", l, r);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun while (l + 1 < r) {
233*4882a593Smuzhiyun seq = list_entry(list->prev, struct journal_replay,
234*4882a593Smuzhiyun list)->j.seq;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun m = (l + r) >> 1;
237*4882a593Smuzhiyun read_bucket(m);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (seq != list_entry(list->prev, struct journal_replay,
240*4882a593Smuzhiyun list)->j.seq)
241*4882a593Smuzhiyun l = m;
242*4882a593Smuzhiyun else
243*4882a593Smuzhiyun r = m;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Read buckets in reverse order until we stop finding more
248*4882a593Smuzhiyun * journal entries
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun pr_debug("finishing up: m %u njournal_buckets %u\n",
251*4882a593Smuzhiyun m, ca->sb.njournal_buckets);
252*4882a593Smuzhiyun l = m;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun while (1) {
255*4882a593Smuzhiyun if (!l--)
256*4882a593Smuzhiyun l = ca->sb.njournal_buckets - 1;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (l == m)
259*4882a593Smuzhiyun break;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (test_bit(l, bitmap))
262*4882a593Smuzhiyun continue;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (!read_bucket(l))
265*4882a593Smuzhiyun break;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun seq = 0;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun for (i = 0; i < ca->sb.njournal_buckets; i++)
271*4882a593Smuzhiyun if (ja->seq[i] > seq) {
272*4882a593Smuzhiyun seq = ja->seq[i];
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * When journal_reclaim() goes to allocate for
275*4882a593Smuzhiyun * the first time, it'll use the bucket after
276*4882a593Smuzhiyun * ja->cur_idx
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun ja->cur_idx = i;
279*4882a593Smuzhiyun ja->last_idx = ja->discard_idx = (i + 1) %
280*4882a593Smuzhiyun ca->sb.njournal_buckets;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun out:
285*4882a593Smuzhiyun if (!list_empty(list))
286*4882a593Smuzhiyun c->journal.seq = list_entry(list->prev,
287*4882a593Smuzhiyun struct journal_replay,
288*4882a593Smuzhiyun list)->j.seq;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return 0;
291*4882a593Smuzhiyun #undef read_bucket
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
bch_journal_mark(struct cache_set * c,struct list_head * list)294*4882a593Smuzhiyun void bch_journal_mark(struct cache_set *c, struct list_head *list)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun atomic_t p = { 0 };
297*4882a593Smuzhiyun struct bkey *k;
298*4882a593Smuzhiyun struct journal_replay *i;
299*4882a593Smuzhiyun struct journal *j = &c->journal;
300*4882a593Smuzhiyun uint64_t last = j->seq;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun * journal.pin should never fill up - we never write a journal
304*4882a593Smuzhiyun * entry when it would fill up. But if for some reason it does, we
305*4882a593Smuzhiyun * iterate over the list in reverse order so that we can just skip that
306*4882a593Smuzhiyun * refcount instead of bugging.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun list_for_each_entry_reverse(i, list, list) {
310*4882a593Smuzhiyun BUG_ON(last < i->j.seq);
311*4882a593Smuzhiyun i->pin = NULL;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun while (last-- != i->j.seq)
314*4882a593Smuzhiyun if (fifo_free(&j->pin) > 1) {
315*4882a593Smuzhiyun fifo_push_front(&j->pin, p);
316*4882a593Smuzhiyun atomic_set(&fifo_front(&j->pin), 0);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (fifo_free(&j->pin) > 1) {
320*4882a593Smuzhiyun fifo_push_front(&j->pin, p);
321*4882a593Smuzhiyun i->pin = &fifo_front(&j->pin);
322*4882a593Smuzhiyun atomic_set(i->pin, 1);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun for (k = i->j.start;
326*4882a593Smuzhiyun k < bset_bkey_last(&i->j);
327*4882a593Smuzhiyun k = bkey_next(k))
328*4882a593Smuzhiyun if (!__bch_extent_invalid(c, k)) {
329*4882a593Smuzhiyun unsigned int j;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun for (j = 0; j < KEY_PTRS(k); j++)
332*4882a593Smuzhiyun if (ptr_available(c, k, j))
333*4882a593Smuzhiyun atomic_inc(&PTR_BUCKET(c, k, j)->pin);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun bch_initial_mark_key(c, 0, k);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
is_discard_enabled(struct cache_set * s)340*4882a593Smuzhiyun static bool is_discard_enabled(struct cache_set *s)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct cache *ca = s->cache;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (ca->discard)
345*4882a593Smuzhiyun return true;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun return false;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
bch_journal_replay(struct cache_set * s,struct list_head * list)350*4882a593Smuzhiyun int bch_journal_replay(struct cache_set *s, struct list_head *list)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun int ret = 0, keys = 0, entries = 0;
353*4882a593Smuzhiyun struct bkey *k;
354*4882a593Smuzhiyun struct journal_replay *i =
355*4882a593Smuzhiyun list_entry(list->prev, struct journal_replay, list);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
358*4882a593Smuzhiyun struct keylist keylist;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun list_for_each_entry(i, list, list) {
361*4882a593Smuzhiyun BUG_ON(i->pin && atomic_read(i->pin) != 1);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (n != i->j.seq) {
364*4882a593Smuzhiyun if (n == start && is_discard_enabled(s))
365*4882a593Smuzhiyun pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
366*4882a593Smuzhiyun n, i->j.seq - 1, start, end);
367*4882a593Smuzhiyun else {
368*4882a593Smuzhiyun pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
369*4882a593Smuzhiyun n, i->j.seq - 1, start, end);
370*4882a593Smuzhiyun ret = -EIO;
371*4882a593Smuzhiyun goto err;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun for (k = i->j.start;
376*4882a593Smuzhiyun k < bset_bkey_last(&i->j);
377*4882a593Smuzhiyun k = bkey_next(k)) {
378*4882a593Smuzhiyun trace_bcache_journal_replay_key(k);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun bch_keylist_init_single(&keylist, k);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun ret = bch_btree_insert(s, &keylist, i->pin, NULL);
383*4882a593Smuzhiyun if (ret)
384*4882a593Smuzhiyun goto err;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun BUG_ON(!bch_keylist_empty(&keylist));
387*4882a593Smuzhiyun keys++;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun cond_resched();
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (i->pin)
393*4882a593Smuzhiyun atomic_dec(i->pin);
394*4882a593Smuzhiyun n = i->j.seq + 1;
395*4882a593Smuzhiyun entries++;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
399*4882a593Smuzhiyun keys, entries, end);
400*4882a593Smuzhiyun err:
401*4882a593Smuzhiyun while (!list_empty(list)) {
402*4882a593Smuzhiyun i = list_first_entry(list, struct journal_replay, list);
403*4882a593Smuzhiyun list_del(&i->list);
404*4882a593Smuzhiyun kfree(i);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun return ret;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
bch_journal_space_reserve(struct journal * j)410*4882a593Smuzhiyun void bch_journal_space_reserve(struct journal *j)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun j->do_reserve = true;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* Journalling */
416*4882a593Smuzhiyun
btree_flush_write(struct cache_set * c)417*4882a593Smuzhiyun static void btree_flush_write(struct cache_set *c)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
420*4882a593Smuzhiyun unsigned int i, nr;
421*4882a593Smuzhiyun int ref_nr;
422*4882a593Smuzhiyun atomic_t *fifo_front_p, *now_fifo_front_p;
423*4882a593Smuzhiyun size_t mask;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (c->journal.btree_flushing)
426*4882a593Smuzhiyun return;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun spin_lock(&c->journal.flush_write_lock);
429*4882a593Smuzhiyun if (c->journal.btree_flushing) {
430*4882a593Smuzhiyun spin_unlock(&c->journal.flush_write_lock);
431*4882a593Smuzhiyun return;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun c->journal.btree_flushing = true;
434*4882a593Smuzhiyun spin_unlock(&c->journal.flush_write_lock);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* get the oldest journal entry and check its refcount */
437*4882a593Smuzhiyun spin_lock(&c->journal.lock);
438*4882a593Smuzhiyun fifo_front_p = &fifo_front(&c->journal.pin);
439*4882a593Smuzhiyun ref_nr = atomic_read(fifo_front_p);
440*4882a593Smuzhiyun if (ref_nr <= 0) {
441*4882a593Smuzhiyun /*
442*4882a593Smuzhiyun * do nothing if no btree node references
443*4882a593Smuzhiyun * the oldest journal entry
444*4882a593Smuzhiyun */
445*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
446*4882a593Smuzhiyun goto out;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun mask = c->journal.pin.mask;
451*4882a593Smuzhiyun nr = 0;
452*4882a593Smuzhiyun atomic_long_inc(&c->flush_write);
453*4882a593Smuzhiyun memset(btree_nodes, 0, sizeof(btree_nodes));
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
456*4882a593Smuzhiyun list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun * It is safe to get now_fifo_front_p without holding
459*4882a593Smuzhiyun * c->journal.lock here, because we don't need to know
460*4882a593Smuzhiyun * the exactly accurate value, just check whether the
461*4882a593Smuzhiyun * front pointer of c->journal.pin is changed.
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun now_fifo_front_p = &fifo_front(&c->journal.pin);
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * If the oldest journal entry is reclaimed and front
466*4882a593Smuzhiyun * pointer of c->journal.pin changes, it is unnecessary
467*4882a593Smuzhiyun * to scan c->btree_cache anymore, just quit the loop and
468*4882a593Smuzhiyun * flush out what we have already.
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun if (now_fifo_front_p != fifo_front_p)
471*4882a593Smuzhiyun break;
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * quit this loop if all matching btree nodes are
474*4882a593Smuzhiyun * scanned and record in btree_nodes[] already.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun ref_nr = atomic_read(fifo_front_p);
477*4882a593Smuzhiyun if (nr >= ref_nr)
478*4882a593Smuzhiyun break;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (btree_node_journal_flush(b))
481*4882a593Smuzhiyun pr_err("BUG: flush_write bit should not be set here!\n");
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun mutex_lock(&b->write_lock);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (!btree_node_dirty(b)) {
486*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
487*4882a593Smuzhiyun continue;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (!btree_current_write(b)->journal) {
491*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
492*4882a593Smuzhiyun continue;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /*
496*4882a593Smuzhiyun * Only select the btree node which exactly references
497*4882a593Smuzhiyun * the oldest journal entry.
498*4882a593Smuzhiyun *
499*4882a593Smuzhiyun * If the journal entry pointed by fifo_front_p is
500*4882a593Smuzhiyun * reclaimed in parallel, don't worry:
501*4882a593Smuzhiyun * - the list_for_each_xxx loop will quit when checking
502*4882a593Smuzhiyun * next now_fifo_front_p.
503*4882a593Smuzhiyun * - If there are matched nodes recorded in btree_nodes[],
504*4882a593Smuzhiyun * they are clean now (this is why and how the oldest
505*4882a593Smuzhiyun * journal entry can be reclaimed). These selected nodes
506*4882a593Smuzhiyun * will be ignored and skipped in the folowing for-loop.
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun if (((btree_current_write(b)->journal - fifo_front_p) &
509*4882a593Smuzhiyun mask) != 0) {
510*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
511*4882a593Smuzhiyun continue;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun set_btree_node_journal_flush(b);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun btree_nodes[nr++] = b;
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * To avoid holding c->bucket_lock too long time,
521*4882a593Smuzhiyun * only scan for BTREE_FLUSH_NR matched btree nodes
522*4882a593Smuzhiyun * at most. If there are more btree nodes reference
523*4882a593Smuzhiyun * the oldest journal entry, try to flush them next
524*4882a593Smuzhiyun * time when btree_flush_write() is called.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun if (nr == BTREE_FLUSH_NR)
527*4882a593Smuzhiyun break;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
532*4882a593Smuzhiyun b = btree_nodes[i];
533*4882a593Smuzhiyun if (!b) {
534*4882a593Smuzhiyun pr_err("BUG: btree_nodes[%d] is NULL\n", i);
535*4882a593Smuzhiyun continue;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* safe to check without holding b->write_lock */
539*4882a593Smuzhiyun if (!btree_node_journal_flush(b)) {
540*4882a593Smuzhiyun pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
541*4882a593Smuzhiyun continue;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun mutex_lock(&b->write_lock);
545*4882a593Smuzhiyun if (!btree_current_write(b)->journal) {
546*4882a593Smuzhiyun clear_bit(BTREE_NODE_journal_flush, &b->flags);
547*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
548*4882a593Smuzhiyun pr_debug("bnode %p: written by others\n", b);
549*4882a593Smuzhiyun continue;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (!btree_node_dirty(b)) {
553*4882a593Smuzhiyun clear_bit(BTREE_NODE_journal_flush, &b->flags);
554*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
555*4882a593Smuzhiyun pr_debug("bnode %p: dirty bit cleaned by others\n", b);
556*4882a593Smuzhiyun continue;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun __bch_btree_node_write(b, NULL);
560*4882a593Smuzhiyun clear_bit(BTREE_NODE_journal_flush, &b->flags);
561*4882a593Smuzhiyun mutex_unlock(&b->write_lock);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun out:
565*4882a593Smuzhiyun spin_lock(&c->journal.flush_write_lock);
566*4882a593Smuzhiyun c->journal.btree_flushing = false;
567*4882a593Smuzhiyun spin_unlock(&c->journal.flush_write_lock);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
571*4882a593Smuzhiyun
journal_discard_endio(struct bio * bio)572*4882a593Smuzhiyun static void journal_discard_endio(struct bio *bio)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct journal_device *ja =
575*4882a593Smuzhiyun container_of(bio, struct journal_device, discard_bio);
576*4882a593Smuzhiyun struct cache *ca = container_of(ja, struct cache, journal);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun atomic_set(&ja->discard_in_flight, DISCARD_DONE);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun closure_wake_up(&ca->set->journal.wait);
581*4882a593Smuzhiyun closure_put(&ca->set->cl);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
journal_discard_work(struct work_struct * work)584*4882a593Smuzhiyun static void journal_discard_work(struct work_struct *work)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun struct journal_device *ja =
587*4882a593Smuzhiyun container_of(work, struct journal_device, discard_work);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun submit_bio(&ja->discard_bio);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
do_journal_discard(struct cache * ca)592*4882a593Smuzhiyun static void do_journal_discard(struct cache *ca)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct journal_device *ja = &ca->journal;
595*4882a593Smuzhiyun struct bio *bio = &ja->discard_bio;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (!ca->discard) {
598*4882a593Smuzhiyun ja->discard_idx = ja->last_idx;
599*4882a593Smuzhiyun return;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun switch (atomic_read(&ja->discard_in_flight)) {
603*4882a593Smuzhiyun case DISCARD_IN_FLIGHT:
604*4882a593Smuzhiyun return;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun case DISCARD_DONE:
607*4882a593Smuzhiyun ja->discard_idx = (ja->discard_idx + 1) %
608*4882a593Smuzhiyun ca->sb.njournal_buckets;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun atomic_set(&ja->discard_in_flight, DISCARD_READY);
611*4882a593Smuzhiyun fallthrough;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun case DISCARD_READY:
614*4882a593Smuzhiyun if (ja->discard_idx == ja->last_idx)
615*4882a593Smuzhiyun return;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun bio_init(bio, bio->bi_inline_vecs, 1);
620*4882a593Smuzhiyun bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
621*4882a593Smuzhiyun bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
622*4882a593Smuzhiyun ca->sb.d[ja->discard_idx]);
623*4882a593Smuzhiyun bio_set_dev(bio, ca->bdev);
624*4882a593Smuzhiyun bio->bi_iter.bi_size = bucket_bytes(ca);
625*4882a593Smuzhiyun bio->bi_end_io = journal_discard_endio;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun closure_get(&ca->set->cl);
628*4882a593Smuzhiyun INIT_WORK(&ja->discard_work, journal_discard_work);
629*4882a593Smuzhiyun queue_work(bch_journal_wq, &ja->discard_work);
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
free_journal_buckets(struct cache_set * c)633*4882a593Smuzhiyun static unsigned int free_journal_buckets(struct cache_set *c)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun struct journal *j = &c->journal;
636*4882a593Smuzhiyun struct cache *ca = c->cache;
637*4882a593Smuzhiyun struct journal_device *ja = &c->cache->journal;
638*4882a593Smuzhiyun unsigned int n;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* In case njournal_buckets is not power of 2 */
641*4882a593Smuzhiyun if (ja->cur_idx >= ja->discard_idx)
642*4882a593Smuzhiyun n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
643*4882a593Smuzhiyun else
644*4882a593Smuzhiyun n = ja->discard_idx - ja->cur_idx;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if (n > (1 + j->do_reserve))
647*4882a593Smuzhiyun return n - (1 + j->do_reserve);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun return 0;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
journal_reclaim(struct cache_set * c)652*4882a593Smuzhiyun static void journal_reclaim(struct cache_set *c)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct bkey *k = &c->journal.key;
655*4882a593Smuzhiyun struct cache *ca = c->cache;
656*4882a593Smuzhiyun uint64_t last_seq;
657*4882a593Smuzhiyun struct journal_device *ja = &ca->journal;
658*4882a593Smuzhiyun atomic_t p __maybe_unused;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun atomic_long_inc(&c->reclaim);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun while (!atomic_read(&fifo_front(&c->journal.pin)))
663*4882a593Smuzhiyun fifo_pop(&c->journal.pin, p);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun last_seq = last_seq(&c->journal);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* Update last_idx */
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun while (ja->last_idx != ja->cur_idx &&
670*4882a593Smuzhiyun ja->seq[ja->last_idx] < last_seq)
671*4882a593Smuzhiyun ja->last_idx = (ja->last_idx + 1) %
672*4882a593Smuzhiyun ca->sb.njournal_buckets;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun do_journal_discard(ca);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (c->journal.blocks_free)
677*4882a593Smuzhiyun goto out;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (!free_journal_buckets(c))
680*4882a593Smuzhiyun goto out;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
683*4882a593Smuzhiyun k->ptr[0] = MAKE_PTR(0,
684*4882a593Smuzhiyun bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
685*4882a593Smuzhiyun ca->sb.nr_this_dev);
686*4882a593Smuzhiyun atomic_long_inc(&c->reclaimed_journal_buckets);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun bkey_init(k);
689*4882a593Smuzhiyun SET_KEY_PTRS(k, 1);
690*4882a593Smuzhiyun c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun out:
693*4882a593Smuzhiyun if (!journal_full(&c->journal))
694*4882a593Smuzhiyun __closure_wake_up(&c->journal.wait);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
bch_journal_next(struct journal * j)697*4882a593Smuzhiyun void bch_journal_next(struct journal *j)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun atomic_t p = { 1 };
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun j->cur = (j->cur == j->w)
702*4882a593Smuzhiyun ? &j->w[1]
703*4882a593Smuzhiyun : &j->w[0];
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /*
706*4882a593Smuzhiyun * The fifo_push() needs to happen at the same time as j->seq is
707*4882a593Smuzhiyun * incremented for last_seq() to be calculated correctly
708*4882a593Smuzhiyun */
709*4882a593Smuzhiyun BUG_ON(!fifo_push(&j->pin, p));
710*4882a593Smuzhiyun atomic_set(&fifo_back(&j->pin), 1);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun j->cur->data->seq = ++j->seq;
713*4882a593Smuzhiyun j->cur->dirty = false;
714*4882a593Smuzhiyun j->cur->need_write = false;
715*4882a593Smuzhiyun j->cur->data->keys = 0;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (fifo_full(&j->pin))
718*4882a593Smuzhiyun pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
journal_write_endio(struct bio * bio)721*4882a593Smuzhiyun static void journal_write_endio(struct bio *bio)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct journal_write *w = bio->bi_private;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun cache_set_err_on(bio->bi_status, w->c, "journal io error");
726*4882a593Smuzhiyun closure_put(&w->c->journal.io);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun static void journal_write(struct closure *cl);
730*4882a593Smuzhiyun
journal_write_done(struct closure * cl)731*4882a593Smuzhiyun static void journal_write_done(struct closure *cl)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun struct journal *j = container_of(cl, struct journal, io);
734*4882a593Smuzhiyun struct journal_write *w = (j->cur == j->w)
735*4882a593Smuzhiyun ? &j->w[1]
736*4882a593Smuzhiyun : &j->w[0];
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun __closure_wake_up(&w->wait);
739*4882a593Smuzhiyun continue_at_nobarrier(cl, journal_write, bch_journal_wq);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
journal_write_unlock(struct closure * cl)742*4882a593Smuzhiyun static void journal_write_unlock(struct closure *cl)
743*4882a593Smuzhiyun __releases(&c->journal.lock)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun struct cache_set *c = container_of(cl, struct cache_set, journal.io);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun c->journal.io_in_flight = 0;
748*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
journal_write_unlocked(struct closure * cl)751*4882a593Smuzhiyun static void journal_write_unlocked(struct closure *cl)
752*4882a593Smuzhiyun __releases(c->journal.lock)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun struct cache_set *c = container_of(cl, struct cache_set, journal.io);
755*4882a593Smuzhiyun struct cache *ca = c->cache;
756*4882a593Smuzhiyun struct journal_write *w = c->journal.cur;
757*4882a593Smuzhiyun struct bkey *k = &c->journal.key;
758*4882a593Smuzhiyun unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
759*4882a593Smuzhiyun ca->sb.block_size;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun struct bio *bio;
762*4882a593Smuzhiyun struct bio_list list;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun bio_list_init(&list);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (!w->need_write) {
767*4882a593Smuzhiyun closure_return_with_destructor(cl, journal_write_unlock);
768*4882a593Smuzhiyun return;
769*4882a593Smuzhiyun } else if (journal_full(&c->journal)) {
770*4882a593Smuzhiyun journal_reclaim(c);
771*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun btree_flush_write(c);
774*4882a593Smuzhiyun continue_at(cl, journal_write, bch_journal_wq);
775*4882a593Smuzhiyun return;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun w->data->btree_level = c->root->level;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun bkey_copy(&w->data->btree_root, &c->root->key);
783*4882a593Smuzhiyun bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
786*4882a593Smuzhiyun w->data->magic = jset_magic(&ca->sb);
787*4882a593Smuzhiyun w->data->version = BCACHE_JSET_VERSION;
788*4882a593Smuzhiyun w->data->last_seq = last_seq(&c->journal);
789*4882a593Smuzhiyun w->data->csum = csum_set(w->data);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++) {
792*4882a593Smuzhiyun ca = PTR_CACHE(c, k, i);
793*4882a593Smuzhiyun bio = &ca->journal.bio;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun atomic_long_add(sectors, &ca->meta_sectors_written);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun bio_reset(bio);
798*4882a593Smuzhiyun bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
799*4882a593Smuzhiyun bio_set_dev(bio, ca->bdev);
800*4882a593Smuzhiyun bio->bi_iter.bi_size = sectors << 9;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun bio->bi_end_io = journal_write_endio;
803*4882a593Smuzhiyun bio->bi_private = w;
804*4882a593Smuzhiyun bio_set_op_attrs(bio, REQ_OP_WRITE,
805*4882a593Smuzhiyun REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
806*4882a593Smuzhiyun bch_bio_map(bio, w->data);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun trace_bcache_journal_write(bio, w->data->keys);
809*4882a593Smuzhiyun bio_list_add(&list, bio);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* If KEY_PTRS(k) == 0, this jset gets lost in air */
817*4882a593Smuzhiyun BUG_ON(i == 0);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun atomic_dec_bug(&fifo_back(&c->journal.pin));
820*4882a593Smuzhiyun bch_journal_next(&c->journal);
821*4882a593Smuzhiyun journal_reclaim(c);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun while ((bio = bio_list_pop(&list)))
826*4882a593Smuzhiyun closure_bio_submit(c, bio, cl);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun continue_at(cl, journal_write_done, NULL);
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
journal_write(struct closure * cl)831*4882a593Smuzhiyun static void journal_write(struct closure *cl)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun struct cache_set *c = container_of(cl, struct cache_set, journal.io);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun spin_lock(&c->journal.lock);
836*4882a593Smuzhiyun journal_write_unlocked(cl);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
journal_try_write(struct cache_set * c)839*4882a593Smuzhiyun static void journal_try_write(struct cache_set *c)
840*4882a593Smuzhiyun __releases(c->journal.lock)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct closure *cl = &c->journal.io;
843*4882a593Smuzhiyun struct journal_write *w = c->journal.cur;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun w->need_write = true;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (!c->journal.io_in_flight) {
848*4882a593Smuzhiyun c->journal.io_in_flight = 1;
849*4882a593Smuzhiyun closure_call(cl, journal_write_unlocked, NULL, &c->cl);
850*4882a593Smuzhiyun } else {
851*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
journal_wait_for_write(struct cache_set * c,unsigned int nkeys)855*4882a593Smuzhiyun static struct journal_write *journal_wait_for_write(struct cache_set *c,
856*4882a593Smuzhiyun unsigned int nkeys)
857*4882a593Smuzhiyun __acquires(&c->journal.lock)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun size_t sectors;
860*4882a593Smuzhiyun struct closure cl;
861*4882a593Smuzhiyun bool wait = false;
862*4882a593Smuzhiyun struct cache *ca = c->cache;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun closure_init_stack(&cl);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun spin_lock(&c->journal.lock);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun while (1) {
869*4882a593Smuzhiyun struct journal_write *w = c->journal.cur;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun sectors = __set_blocks(w->data, w->data->keys + nkeys,
872*4882a593Smuzhiyun block_bytes(ca)) * ca->sb.block_size;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (sectors <= min_t(size_t,
875*4882a593Smuzhiyun c->journal.blocks_free * ca->sb.block_size,
876*4882a593Smuzhiyun PAGE_SECTORS << JSET_BITS))
877*4882a593Smuzhiyun return w;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (wait)
880*4882a593Smuzhiyun closure_wait(&c->journal.wait, &cl);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (!journal_full(&c->journal)) {
883*4882a593Smuzhiyun if (wait)
884*4882a593Smuzhiyun trace_bcache_journal_entry_full(c);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun * XXX: If we were inserting so many keys that they
888*4882a593Smuzhiyun * won't fit in an _empty_ journal write, we'll
889*4882a593Smuzhiyun * deadlock. For now, handle this in
890*4882a593Smuzhiyun * bch_keylist_realloc() - but something to think about.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun BUG_ON(!w->data->keys);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun journal_try_write(c); /* unlocks */
895*4882a593Smuzhiyun } else {
896*4882a593Smuzhiyun if (wait)
897*4882a593Smuzhiyun trace_bcache_journal_full(c);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun journal_reclaim(c);
900*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun btree_flush_write(c);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun closure_sync(&cl);
906*4882a593Smuzhiyun spin_lock(&c->journal.lock);
907*4882a593Smuzhiyun wait = true;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
journal_write_work(struct work_struct * work)911*4882a593Smuzhiyun static void journal_write_work(struct work_struct *work)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun struct cache_set *c = container_of(to_delayed_work(work),
914*4882a593Smuzhiyun struct cache_set,
915*4882a593Smuzhiyun journal.work);
916*4882a593Smuzhiyun spin_lock(&c->journal.lock);
917*4882a593Smuzhiyun if (c->journal.cur->dirty)
918*4882a593Smuzhiyun journal_try_write(c);
919*4882a593Smuzhiyun else
920*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /*
924*4882a593Smuzhiyun * Entry point to the journalling code - bio_insert() and btree_invalidate()
925*4882a593Smuzhiyun * pass bch_journal() a list of keys to be journalled, and then
926*4882a593Smuzhiyun * bch_journal() hands those same keys off to btree_insert_async()
927*4882a593Smuzhiyun */
928*4882a593Smuzhiyun
bch_journal(struct cache_set * c,struct keylist * keys,struct closure * parent)929*4882a593Smuzhiyun atomic_t *bch_journal(struct cache_set *c,
930*4882a593Smuzhiyun struct keylist *keys,
931*4882a593Smuzhiyun struct closure *parent)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun struct journal_write *w;
934*4882a593Smuzhiyun atomic_t *ret;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /* No journaling if CACHE_SET_IO_DISABLE set already */
937*4882a593Smuzhiyun if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
938*4882a593Smuzhiyun return NULL;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (!CACHE_SYNC(&c->cache->sb))
941*4882a593Smuzhiyun return NULL;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
946*4882a593Smuzhiyun w->data->keys += bch_keylist_nkeys(keys);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun ret = &fifo_back(&c->journal.pin);
949*4882a593Smuzhiyun atomic_inc(ret);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (parent) {
952*4882a593Smuzhiyun closure_wait(&w->wait, parent);
953*4882a593Smuzhiyun journal_try_write(c);
954*4882a593Smuzhiyun } else if (!w->dirty) {
955*4882a593Smuzhiyun w->dirty = true;
956*4882a593Smuzhiyun queue_delayed_work(bch_flush_wq, &c->journal.work,
957*4882a593Smuzhiyun msecs_to_jiffies(c->journal_delay_ms));
958*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
959*4882a593Smuzhiyun } else {
960*4882a593Smuzhiyun spin_unlock(&c->journal.lock);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun return ret;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
bch_journal_meta(struct cache_set * c,struct closure * cl)967*4882a593Smuzhiyun void bch_journal_meta(struct cache_set *c, struct closure *cl)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun struct keylist keys;
970*4882a593Smuzhiyun atomic_t *ref;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun bch_keylist_init(&keys);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun ref = bch_journal(c, &keys, cl);
975*4882a593Smuzhiyun if (ref)
976*4882a593Smuzhiyun atomic_dec_bug(ref);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
bch_journal_free(struct cache_set * c)979*4882a593Smuzhiyun void bch_journal_free(struct cache_set *c)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
982*4882a593Smuzhiyun free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
983*4882a593Smuzhiyun free_fifo(&c->journal.pin);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
bch_journal_alloc(struct cache_set * c)986*4882a593Smuzhiyun int bch_journal_alloc(struct cache_set *c)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun struct journal *j = &c->journal;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun spin_lock_init(&j->lock);
991*4882a593Smuzhiyun spin_lock_init(&j->flush_write_lock);
992*4882a593Smuzhiyun INIT_DELAYED_WORK(&j->work, journal_write_work);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun c->journal_delay_ms = 100;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun j->w[0].c = c;
997*4882a593Smuzhiyun j->w[1].c = c;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1000*4882a593Smuzhiyun !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
1001*4882a593Smuzhiyun !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
1002*4882a593Smuzhiyun return -ENOMEM;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun return 0;
1005*4882a593Smuzhiyun }
1006