1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Primary bucket allocation code
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2012 Google, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Allocation in bcache is done in terms of buckets:
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10*4882a593Smuzhiyun * btree pointers - they must match for the pointer to be considered valid.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13*4882a593Smuzhiyun * bucket simply by incrementing its gen.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * The gens (along with the priorities; it's really the gens are important but
16*4882a593Smuzhiyun * the code is named as if it's the priorities) are written in an arbitrary list
17*4882a593Smuzhiyun * of buckets on disk, with a pointer to them in the journal header.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * When we invalidate a bucket, we have to write its new gen to disk and wait
20*4882a593Smuzhiyun * for that write to complete before we use it - otherwise after a crash we
21*4882a593Smuzhiyun * could have pointers that appeared to be good but pointed to data that had
22*4882a593Smuzhiyun * been overwritten.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Since the gens and priorities are all stored contiguously on disk, we can
25*4882a593Smuzhiyun * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26*4882a593Smuzhiyun * call prio_write(), and when prio_write() finishes we pull buckets off the
27*4882a593Smuzhiyun * free_inc list and optionally discard them.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * free_inc isn't the only freelist - if it was, we'd often to sleep while
30*4882a593Smuzhiyun * priorities and gens were being written before we could allocate. c->free is a
31*4882a593Smuzhiyun * smaller freelist, and buckets on that list are always ready to be used.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * If we've got discards enabled, that happens when a bucket moves from the
34*4882a593Smuzhiyun * free_inc list to the free list.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * There is another freelist, because sometimes we have buckets that we know
37*4882a593Smuzhiyun * have nothing pointing into them - these we can reuse without waiting for
38*4882a593Smuzhiyun * priorities to be rewritten. These come from freed btree nodes and buckets
39*4882a593Smuzhiyun * that garbage collection discovered no longer had valid keys pointing into
40*4882a593Smuzhiyun * them (because they were overwritten). That's the unused list - buckets on the
41*4882a593Smuzhiyun * unused list move to the free list, optionally being discarded in the process.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * It's also important to ensure that gens don't wrap around - with respect to
44*4882a593Smuzhiyun * either the oldest gen in the btree or the gen on disk. This is quite
45*4882a593Smuzhiyun * difficult to do in practice, but we explicitly guard against it anyways - if
46*4882a593Smuzhiyun * a bucket is in danger of wrapping around we simply skip invalidating it that
47*4882a593Smuzhiyun * time around, and we garbage collect or rewrite the priorities sooner than we
48*4882a593Smuzhiyun * would have otherwise.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * bch_bucket_alloc() allocates a single bucket from a specific cache.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * bch_bucket_alloc_set() allocates one bucket from different caches
53*4882a593Smuzhiyun * out of a cache set.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * free_some_buckets() drives all the processes described above. It's called
56*4882a593Smuzhiyun * from bch_bucket_alloc() and a few other places that need to make sure free
57*4882a593Smuzhiyun * buckets are ready.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * invalidate_buckets_(lru|fifo)() find buckets that are available to be
60*4882a593Smuzhiyun * invalidated, and then invalidate them and stick them on the free_inc list -
61*4882a593Smuzhiyun * in either lru or fifo order.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #include "bcache.h"
65*4882a593Smuzhiyun #include "btree.h"
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #include <linux/blkdev.h>
68*4882a593Smuzhiyun #include <linux/kthread.h>
69*4882a593Smuzhiyun #include <linux/random.h>
70*4882a593Smuzhiyun #include <trace/events/bcache.h>
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define MAX_OPEN_BUCKETS 128
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Bucket heap / gen */
75*4882a593Smuzhiyun
bch_inc_gen(struct cache * ca,struct bucket * b)76*4882a593Smuzhiyun uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun uint8_t ret = ++b->gen;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81*4882a593Smuzhiyun WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun return ret;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
bch_rescale_priorities(struct cache_set * c,int sectors)86*4882a593Smuzhiyun void bch_rescale_priorities(struct cache_set *c, int sectors)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct cache *ca;
89*4882a593Smuzhiyun struct bucket *b;
90*4882a593Smuzhiyun unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
91*4882a593Smuzhiyun int r;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun atomic_sub(sectors, &c->rescale);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun do {
96*4882a593Smuzhiyun r = atomic_read(&c->rescale);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun if (r >= 0)
99*4882a593Smuzhiyun return;
100*4882a593Smuzhiyun } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun c->min_prio = USHRT_MAX;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun ca = c->cache;
107*4882a593Smuzhiyun for_each_bucket(b, ca)
108*4882a593Smuzhiyun if (b->prio &&
109*4882a593Smuzhiyun b->prio != BTREE_PRIO &&
110*4882a593Smuzhiyun !atomic_read(&b->pin)) {
111*4882a593Smuzhiyun b->prio--;
112*4882a593Smuzhiyun c->min_prio = min(c->min_prio, b->prio);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * Background allocation thread: scans for buckets to be invalidated,
120*4882a593Smuzhiyun * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
121*4882a593Smuzhiyun * then optionally issues discard commands to the newly free buckets, then puts
122*4882a593Smuzhiyun * them on the various freelists.
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun
can_inc_bucket_gen(struct bucket * b)125*4882a593Smuzhiyun static inline bool can_inc_bucket_gen(struct bucket *b)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
bch_can_invalidate_bucket(struct cache * ca,struct bucket * b)130*4882a593Smuzhiyun bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun BUG_ON(!ca->set->gc_mark_valid);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return (!GC_MARK(b) ||
135*4882a593Smuzhiyun GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
136*4882a593Smuzhiyun !atomic_read(&b->pin) &&
137*4882a593Smuzhiyun can_inc_bucket_gen(b);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
__bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)140*4882a593Smuzhiyun void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun lockdep_assert_held(&ca->set->bucket_lock);
143*4882a593Smuzhiyun BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (GC_SECTORS_USED(b))
146*4882a593Smuzhiyun trace_bcache_invalidate(ca, b - ca->buckets);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun bch_inc_gen(ca, b);
149*4882a593Smuzhiyun b->prio = INITIAL_PRIO;
150*4882a593Smuzhiyun atomic_inc(&b->pin);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)153*4882a593Smuzhiyun static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun __bch_invalidate_one_bucket(ca, b);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun fifo_push(&ca->free_inc, b - ca->buckets);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Determines what order we're going to reuse buckets, smallest bucket_prio()
162*4882a593Smuzhiyun * first: we also take into account the number of sectors of live data in that
163*4882a593Smuzhiyun * bucket, and in order for that multiply to make sense we have to scale bucket
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * Thus, we scale the bucket priorities so that the bucket with the smallest
166*4882a593Smuzhiyun * prio is worth 1/8th of what INITIAL_PRIO is worth.
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun #define bucket_prio(b) \
170*4882a593Smuzhiyun ({ \
171*4882a593Smuzhiyun unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
172*4882a593Smuzhiyun \
173*4882a593Smuzhiyun (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
174*4882a593Smuzhiyun })
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
177*4882a593Smuzhiyun #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
178*4882a593Smuzhiyun
invalidate_buckets_lru(struct cache * ca)179*4882a593Smuzhiyun static void invalidate_buckets_lru(struct cache *ca)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct bucket *b;
182*4882a593Smuzhiyun ssize_t i;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun ca->heap.used = 0;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun for_each_bucket(b, ca) {
187*4882a593Smuzhiyun if (!bch_can_invalidate_bucket(ca, b))
188*4882a593Smuzhiyun continue;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (!heap_full(&ca->heap))
191*4882a593Smuzhiyun heap_add(&ca->heap, b, bucket_max_cmp);
192*4882a593Smuzhiyun else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
193*4882a593Smuzhiyun ca->heap.data[0] = b;
194*4882a593Smuzhiyun heap_sift(&ca->heap, 0, bucket_max_cmp);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun for (i = ca->heap.used / 2 - 1; i >= 0; --i)
199*4882a593Smuzhiyun heap_sift(&ca->heap, i, bucket_min_cmp);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun while (!fifo_full(&ca->free_inc)) {
202*4882a593Smuzhiyun if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * We don't want to be calling invalidate_buckets()
205*4882a593Smuzhiyun * multiple times when it can't do anything
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun ca->invalidate_needs_gc = 1;
208*4882a593Smuzhiyun wake_up_gc(ca->set);
209*4882a593Smuzhiyun return;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun bch_invalidate_one_bucket(ca, b);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
invalidate_buckets_fifo(struct cache * ca)216*4882a593Smuzhiyun static void invalidate_buckets_fifo(struct cache *ca)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct bucket *b;
219*4882a593Smuzhiyun size_t checked = 0;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun while (!fifo_full(&ca->free_inc)) {
222*4882a593Smuzhiyun if (ca->fifo_last_bucket < ca->sb.first_bucket ||
223*4882a593Smuzhiyun ca->fifo_last_bucket >= ca->sb.nbuckets)
224*4882a593Smuzhiyun ca->fifo_last_bucket = ca->sb.first_bucket;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun b = ca->buckets + ca->fifo_last_bucket++;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (bch_can_invalidate_bucket(ca, b))
229*4882a593Smuzhiyun bch_invalidate_one_bucket(ca, b);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (++checked >= ca->sb.nbuckets) {
232*4882a593Smuzhiyun ca->invalidate_needs_gc = 1;
233*4882a593Smuzhiyun wake_up_gc(ca->set);
234*4882a593Smuzhiyun return;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
invalidate_buckets_random(struct cache * ca)239*4882a593Smuzhiyun static void invalidate_buckets_random(struct cache *ca)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct bucket *b;
242*4882a593Smuzhiyun size_t checked = 0;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun while (!fifo_full(&ca->free_inc)) {
245*4882a593Smuzhiyun size_t n;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun get_random_bytes(&n, sizeof(n));
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
250*4882a593Smuzhiyun n += ca->sb.first_bucket;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun b = ca->buckets + n;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (bch_can_invalidate_bucket(ca, b))
255*4882a593Smuzhiyun bch_invalidate_one_bucket(ca, b);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (++checked >= ca->sb.nbuckets / 2) {
258*4882a593Smuzhiyun ca->invalidate_needs_gc = 1;
259*4882a593Smuzhiyun wake_up_gc(ca->set);
260*4882a593Smuzhiyun return;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
invalidate_buckets(struct cache * ca)265*4882a593Smuzhiyun static void invalidate_buckets(struct cache *ca)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun BUG_ON(ca->invalidate_needs_gc);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun switch (CACHE_REPLACEMENT(&ca->sb)) {
270*4882a593Smuzhiyun case CACHE_REPLACEMENT_LRU:
271*4882a593Smuzhiyun invalidate_buckets_lru(ca);
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case CACHE_REPLACEMENT_FIFO:
274*4882a593Smuzhiyun invalidate_buckets_fifo(ca);
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun case CACHE_REPLACEMENT_RANDOM:
277*4882a593Smuzhiyun invalidate_buckets_random(ca);
278*4882a593Smuzhiyun break;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun #define allocator_wait(ca, cond) \
283*4882a593Smuzhiyun do { \
284*4882a593Smuzhiyun while (1) { \
285*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE); \
286*4882a593Smuzhiyun if (cond) \
287*4882a593Smuzhiyun break; \
288*4882a593Smuzhiyun \
289*4882a593Smuzhiyun mutex_unlock(&(ca)->set->bucket_lock); \
290*4882a593Smuzhiyun if (kthread_should_stop() || \
291*4882a593Smuzhiyun test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
292*4882a593Smuzhiyun set_current_state(TASK_RUNNING); \
293*4882a593Smuzhiyun goto out; \
294*4882a593Smuzhiyun } \
295*4882a593Smuzhiyun \
296*4882a593Smuzhiyun schedule(); \
297*4882a593Smuzhiyun mutex_lock(&(ca)->set->bucket_lock); \
298*4882a593Smuzhiyun } \
299*4882a593Smuzhiyun __set_current_state(TASK_RUNNING); \
300*4882a593Smuzhiyun } while (0)
301*4882a593Smuzhiyun
bch_allocator_push(struct cache * ca,long bucket)302*4882a593Smuzhiyun static int bch_allocator_push(struct cache *ca, long bucket)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun unsigned int i;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Prios/gens are actually the most important reserve */
307*4882a593Smuzhiyun if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
308*4882a593Smuzhiyun return true;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun for (i = 0; i < RESERVE_NR; i++)
311*4882a593Smuzhiyun if (fifo_push(&ca->free[i], bucket))
312*4882a593Smuzhiyun return true;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return false;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
bch_allocator_thread(void * arg)317*4882a593Smuzhiyun static int bch_allocator_thread(void *arg)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct cache *ca = arg;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun mutex_lock(&ca->set->bucket_lock);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun while (1) {
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * First, we pull buckets off of the unused and free_inc lists,
326*4882a593Smuzhiyun * possibly issue discards to them, then we add the bucket to
327*4882a593Smuzhiyun * the free list:
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun while (1) {
330*4882a593Smuzhiyun long bucket;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (!fifo_pop(&ca->free_inc, bucket))
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (ca->discard) {
336*4882a593Smuzhiyun mutex_unlock(&ca->set->bucket_lock);
337*4882a593Smuzhiyun blkdev_issue_discard(ca->bdev,
338*4882a593Smuzhiyun bucket_to_sector(ca->set, bucket),
339*4882a593Smuzhiyun ca->sb.bucket_size, GFP_KERNEL, 0);
340*4882a593Smuzhiyun mutex_lock(&ca->set->bucket_lock);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun allocator_wait(ca, bch_allocator_push(ca, bucket));
344*4882a593Smuzhiyun wake_up(&ca->set->btree_cache_wait);
345*4882a593Smuzhiyun wake_up(&ca->set->bucket_wait);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * We've run out of free buckets, we need to find some buckets
350*4882a593Smuzhiyun * we can invalidate. First, invalidate them in memory and add
351*4882a593Smuzhiyun * them to the free_inc list:
352*4882a593Smuzhiyun */
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun retry_invalidate:
355*4882a593Smuzhiyun allocator_wait(ca, ca->set->gc_mark_valid &&
356*4882a593Smuzhiyun !ca->invalidate_needs_gc);
357*4882a593Smuzhiyun invalidate_buckets(ca);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Now, we write their new gens to disk so we can start writing
361*4882a593Smuzhiyun * new stuff to them:
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
364*4882a593Smuzhiyun if (CACHE_SYNC(&ca->sb)) {
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * This could deadlock if an allocation with a btree
367*4882a593Smuzhiyun * node locked ever blocked - having the btree node
368*4882a593Smuzhiyun * locked would block garbage collection, but here we're
369*4882a593Smuzhiyun * waiting on garbage collection before we invalidate
370*4882a593Smuzhiyun * and free anything.
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun * But this should be safe since the btree code always
373*4882a593Smuzhiyun * uses btree_check_reserve() before allocating now, and
374*4882a593Smuzhiyun * if it fails it blocks without btree nodes locked.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun if (!fifo_full(&ca->free_inc))
377*4882a593Smuzhiyun goto retry_invalidate;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (bch_prio_write(ca, false) < 0) {
380*4882a593Smuzhiyun ca->invalidate_needs_gc = 1;
381*4882a593Smuzhiyun wake_up_gc(ca->set);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun out:
386*4882a593Smuzhiyun wait_for_kthread_stop();
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* Allocation */
391*4882a593Smuzhiyun
bch_bucket_alloc(struct cache * ca,unsigned int reserve,bool wait)392*4882a593Smuzhiyun long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun DEFINE_WAIT(w);
395*4882a593Smuzhiyun struct bucket *b;
396*4882a593Smuzhiyun long r;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* No allocation if CACHE_SET_IO_DISABLE bit is set */
400*4882a593Smuzhiyun if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
401*4882a593Smuzhiyun return -1;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* fastpath */
404*4882a593Smuzhiyun if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
405*4882a593Smuzhiyun fifo_pop(&ca->free[reserve], r))
406*4882a593Smuzhiyun goto out;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (!wait) {
409*4882a593Smuzhiyun trace_bcache_alloc_fail(ca, reserve);
410*4882a593Smuzhiyun return -1;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun do {
414*4882a593Smuzhiyun prepare_to_wait(&ca->set->bucket_wait, &w,
415*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun mutex_unlock(&ca->set->bucket_lock);
418*4882a593Smuzhiyun schedule();
419*4882a593Smuzhiyun mutex_lock(&ca->set->bucket_lock);
420*4882a593Smuzhiyun } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
421*4882a593Smuzhiyun !fifo_pop(&ca->free[reserve], r));
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun finish_wait(&ca->set->bucket_wait, &w);
424*4882a593Smuzhiyun out:
425*4882a593Smuzhiyun if (ca->alloc_thread)
426*4882a593Smuzhiyun wake_up_process(ca->alloc_thread);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun trace_bcache_alloc(ca, reserve);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (expensive_debug_checks(ca->set)) {
431*4882a593Smuzhiyun size_t iter;
432*4882a593Smuzhiyun long i;
433*4882a593Smuzhiyun unsigned int j;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
436*4882a593Smuzhiyun BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun for (j = 0; j < RESERVE_NR; j++)
439*4882a593Smuzhiyun fifo_for_each(i, &ca->free[j], iter)
440*4882a593Smuzhiyun BUG_ON(i == r);
441*4882a593Smuzhiyun fifo_for_each(i, &ca->free_inc, iter)
442*4882a593Smuzhiyun BUG_ON(i == r);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun b = ca->buckets + r;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun BUG_ON(atomic_read(&b->pin) != 1);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (reserve <= RESERVE_PRIO) {
452*4882a593Smuzhiyun SET_GC_MARK(b, GC_MARK_METADATA);
453*4882a593Smuzhiyun SET_GC_MOVE(b, 0);
454*4882a593Smuzhiyun b->prio = BTREE_PRIO;
455*4882a593Smuzhiyun } else {
456*4882a593Smuzhiyun SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
457*4882a593Smuzhiyun SET_GC_MOVE(b, 0);
458*4882a593Smuzhiyun b->prio = INITIAL_PRIO;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (ca->set->avail_nbuckets > 0) {
462*4882a593Smuzhiyun ca->set->avail_nbuckets--;
463*4882a593Smuzhiyun bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return r;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
__bch_bucket_free(struct cache * ca,struct bucket * b)469*4882a593Smuzhiyun void __bch_bucket_free(struct cache *ca, struct bucket *b)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun SET_GC_MARK(b, 0);
472*4882a593Smuzhiyun SET_GC_SECTORS_USED(b, 0);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (ca->set->avail_nbuckets < ca->set->nbuckets) {
475*4882a593Smuzhiyun ca->set->avail_nbuckets++;
476*4882a593Smuzhiyun bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
bch_bucket_free(struct cache_set * c,struct bkey * k)480*4882a593Smuzhiyun void bch_bucket_free(struct cache_set *c, struct bkey *k)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun unsigned int i;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(k); i++)
485*4882a593Smuzhiyun __bch_bucket_free(PTR_CACHE(c, k, i),
486*4882a593Smuzhiyun PTR_BUCKET(c, k, i));
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
__bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,bool wait)489*4882a593Smuzhiyun int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
490*4882a593Smuzhiyun struct bkey *k, bool wait)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct cache *ca;
493*4882a593Smuzhiyun long b;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* No allocation if CACHE_SET_IO_DISABLE bit is set */
496*4882a593Smuzhiyun if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
497*4882a593Smuzhiyun return -1;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun lockdep_assert_held(&c->bucket_lock);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun bkey_init(k);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun ca = c->cache;
504*4882a593Smuzhiyun b = bch_bucket_alloc(ca, reserve, wait);
505*4882a593Smuzhiyun if (b == -1)
506*4882a593Smuzhiyun goto err;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
509*4882a593Smuzhiyun bucket_to_sector(c, b),
510*4882a593Smuzhiyun ca->sb.nr_this_dev);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun SET_KEY_PTRS(k, 1);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun err:
516*4882a593Smuzhiyun bch_bucket_free(c, k);
517*4882a593Smuzhiyun bkey_put(c, k);
518*4882a593Smuzhiyun return -1;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,bool wait)521*4882a593Smuzhiyun int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
522*4882a593Smuzhiyun struct bkey *k, bool wait)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun int ret;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun mutex_lock(&c->bucket_lock);
527*4882a593Smuzhiyun ret = __bch_bucket_alloc_set(c, reserve, k, wait);
528*4882a593Smuzhiyun mutex_unlock(&c->bucket_lock);
529*4882a593Smuzhiyun return ret;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* Sector allocator */
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun struct open_bucket {
535*4882a593Smuzhiyun struct list_head list;
536*4882a593Smuzhiyun unsigned int last_write_point;
537*4882a593Smuzhiyun unsigned int sectors_free;
538*4882a593Smuzhiyun BKEY_PADDED(key);
539*4882a593Smuzhiyun };
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun * We keep multiple buckets open for writes, and try to segregate different
543*4882a593Smuzhiyun * write streams for better cache utilization: first we try to segregate flash
544*4882a593Smuzhiyun * only volume write streams from cached devices, secondly we look for a bucket
545*4882a593Smuzhiyun * where the last write to it was sequential with the current write, and
546*4882a593Smuzhiyun * failing that we look for a bucket that was last used by the same task.
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * The ideas is if you've got multiple tasks pulling data into the cache at the
549*4882a593Smuzhiyun * same time, you'll get better cache utilization if you try to segregate their
550*4882a593Smuzhiyun * data and preserve locality.
551*4882a593Smuzhiyun *
552*4882a593Smuzhiyun * For example, dirty sectors of flash only volume is not reclaimable, if their
553*4882a593Smuzhiyun * dirty sectors mixed with dirty sectors of cached device, such buckets will
554*4882a593Smuzhiyun * be marked as dirty and won't be reclaimed, though the dirty data of cached
555*4882a593Smuzhiyun * device have been written back to backend device.
556*4882a593Smuzhiyun *
557*4882a593Smuzhiyun * And say you've starting Firefox at the same time you're copying a
558*4882a593Smuzhiyun * bunch of files. Firefox will likely end up being fairly hot and stay in the
559*4882a593Smuzhiyun * cache awhile, but the data you copied might not be; if you wrote all that
560*4882a593Smuzhiyun * data to the same buckets it'd get invalidated at the same time.
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * Both of those tasks will be doing fairly random IO so we can't rely on
563*4882a593Smuzhiyun * detecting sequential IO to segregate their data, but going off of the task
564*4882a593Smuzhiyun * should be a sane heuristic.
565*4882a593Smuzhiyun */
pick_data_bucket(struct cache_set * c,const struct bkey * search,unsigned int write_point,struct bkey * alloc)566*4882a593Smuzhiyun static struct open_bucket *pick_data_bucket(struct cache_set *c,
567*4882a593Smuzhiyun const struct bkey *search,
568*4882a593Smuzhiyun unsigned int write_point,
569*4882a593Smuzhiyun struct bkey *alloc)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun struct open_bucket *ret, *ret_task = NULL;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun list_for_each_entry_reverse(ret, &c->data_buckets, list)
574*4882a593Smuzhiyun if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
575*4882a593Smuzhiyun UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
576*4882a593Smuzhiyun continue;
577*4882a593Smuzhiyun else if (!bkey_cmp(&ret->key, search))
578*4882a593Smuzhiyun goto found;
579*4882a593Smuzhiyun else if (ret->last_write_point == write_point)
580*4882a593Smuzhiyun ret_task = ret;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun ret = ret_task ?: list_first_entry(&c->data_buckets,
583*4882a593Smuzhiyun struct open_bucket, list);
584*4882a593Smuzhiyun found:
585*4882a593Smuzhiyun if (!ret->sectors_free && KEY_PTRS(alloc)) {
586*4882a593Smuzhiyun ret->sectors_free = c->cache->sb.bucket_size;
587*4882a593Smuzhiyun bkey_copy(&ret->key, alloc);
588*4882a593Smuzhiyun bkey_init(alloc);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun if (!ret->sectors_free)
592*4882a593Smuzhiyun ret = NULL;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun return ret;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun * Allocates some space in the cache to write to, and k to point to the newly
599*4882a593Smuzhiyun * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
600*4882a593Smuzhiyun * end of the newly allocated space).
601*4882a593Smuzhiyun *
602*4882a593Smuzhiyun * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
603*4882a593Smuzhiyun * sectors were actually allocated.
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * If s->writeback is true, will not fail.
606*4882a593Smuzhiyun */
bch_alloc_sectors(struct cache_set * c,struct bkey * k,unsigned int sectors,unsigned int write_point,unsigned int write_prio,bool wait)607*4882a593Smuzhiyun bool bch_alloc_sectors(struct cache_set *c,
608*4882a593Smuzhiyun struct bkey *k,
609*4882a593Smuzhiyun unsigned int sectors,
610*4882a593Smuzhiyun unsigned int write_point,
611*4882a593Smuzhiyun unsigned int write_prio,
612*4882a593Smuzhiyun bool wait)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun struct open_bucket *b;
615*4882a593Smuzhiyun BKEY_PADDED(key) alloc;
616*4882a593Smuzhiyun unsigned int i;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * We might have to allocate a new bucket, which we can't do with a
620*4882a593Smuzhiyun * spinlock held. So if we have to allocate, we drop the lock, allocate
621*4882a593Smuzhiyun * and then retry. KEY_PTRS() indicates whether alloc points to
622*4882a593Smuzhiyun * allocated bucket(s).
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun bkey_init(&alloc.key);
626*4882a593Smuzhiyun spin_lock(&c->data_bucket_lock);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
629*4882a593Smuzhiyun unsigned int watermark = write_prio
630*4882a593Smuzhiyun ? RESERVE_MOVINGGC
631*4882a593Smuzhiyun : RESERVE_NONE;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun spin_unlock(&c->data_bucket_lock);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
636*4882a593Smuzhiyun return false;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun spin_lock(&c->data_bucket_lock);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /*
642*4882a593Smuzhiyun * If we had to allocate, we might race and not need to allocate the
643*4882a593Smuzhiyun * second time we call pick_data_bucket(). If we allocated a bucket but
644*4882a593Smuzhiyun * didn't use it, drop the refcount bch_bucket_alloc_set() took:
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun if (KEY_PTRS(&alloc.key))
647*4882a593Smuzhiyun bkey_put(c, &alloc.key);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&b->key); i++)
650*4882a593Smuzhiyun EBUG_ON(ptr_stale(c, &b->key, i));
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* Set up the pointer to the space we're allocating: */
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&b->key); i++)
655*4882a593Smuzhiyun k->ptr[i] = b->key.ptr[i];
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun sectors = min(sectors, b->sectors_free);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
660*4882a593Smuzhiyun SET_KEY_SIZE(k, sectors);
661*4882a593Smuzhiyun SET_KEY_PTRS(k, KEY_PTRS(&b->key));
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * Move b to the end of the lru, and keep track of what this bucket was
665*4882a593Smuzhiyun * last used for:
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun list_move_tail(&b->list, &c->data_buckets);
668*4882a593Smuzhiyun bkey_copy_key(&b->key, k);
669*4882a593Smuzhiyun b->last_write_point = write_point;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun b->sectors_free -= sectors;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&b->key); i++) {
674*4882a593Smuzhiyun SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun atomic_long_add(sectors,
677*4882a593Smuzhiyun &PTR_CACHE(c, &b->key, i)->sectors_written);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (b->sectors_free < c->cache->sb.block_size)
681*4882a593Smuzhiyun b->sectors_free = 0;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun * k takes refcounts on the buckets it points to until it's inserted
685*4882a593Smuzhiyun * into the btree, but if we're done with this bucket we just transfer
686*4882a593Smuzhiyun * get_data_bucket()'s refcount.
687*4882a593Smuzhiyun */
688*4882a593Smuzhiyun if (b->sectors_free)
689*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&b->key); i++)
690*4882a593Smuzhiyun atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun spin_unlock(&c->data_bucket_lock);
693*4882a593Smuzhiyun return true;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* Init */
697*4882a593Smuzhiyun
bch_open_buckets_free(struct cache_set * c)698*4882a593Smuzhiyun void bch_open_buckets_free(struct cache_set *c)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct open_bucket *b;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun while (!list_empty(&c->data_buckets)) {
703*4882a593Smuzhiyun b = list_first_entry(&c->data_buckets,
704*4882a593Smuzhiyun struct open_bucket, list);
705*4882a593Smuzhiyun list_del(&b->list);
706*4882a593Smuzhiyun kfree(b);
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
bch_open_buckets_alloc(struct cache_set * c)710*4882a593Smuzhiyun int bch_open_buckets_alloc(struct cache_set *c)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun int i;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun spin_lock_init(&c->data_bucket_lock);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
717*4882a593Smuzhiyun struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (!b)
720*4882a593Smuzhiyun return -ENOMEM;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun list_add(&b->list, &c->data_buckets);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return 0;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
bch_cache_allocator_start(struct cache * ca)728*4882a593Smuzhiyun int bch_cache_allocator_start(struct cache *ca)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun struct task_struct *k = kthread_run(bch_allocator_thread,
731*4882a593Smuzhiyun ca, "bcache_allocator");
732*4882a593Smuzhiyun if (IS_ERR(k))
733*4882a593Smuzhiyun return PTR_ERR(k);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun ca->alloc_thread = k;
736*4882a593Smuzhiyun return 0;
737*4882a593Smuzhiyun }
738