1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * background writeback - scan btree for dirty data and write it to the backing
4*4882a593Smuzhiyun * device
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7*4882a593Smuzhiyun * Copyright 2012 Google, Inc.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "bcache.h"
11*4882a593Smuzhiyun #include "btree.h"
12*4882a593Smuzhiyun #include "debug.h"
13*4882a593Smuzhiyun #include "writeback.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/kthread.h>
17*4882a593Smuzhiyun #include <linux/sched/clock.h>
18*4882a593Smuzhiyun #include <trace/events/bcache.h>
19*4882a593Smuzhiyun
update_gc_after_writeback(struct cache_set * c)20*4882a593Smuzhiyun static void update_gc_after_writeback(struct cache_set *c)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
23*4882a593Smuzhiyun c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
24*4882a593Smuzhiyun return;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun c->gc_after_writeback |= BCH_DO_AUTO_GC;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Rate limiting */
__calc_target_rate(struct cached_dev * dc)30*4882a593Smuzhiyun static uint64_t __calc_target_rate(struct cached_dev *dc)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun struct cache_set *c = dc->disk.c;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * This is the size of the cache, minus the amount used for
36*4882a593Smuzhiyun * flash-only devices
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
39*4882a593Smuzhiyun atomic_long_read(&c->flash_dev_dirty_sectors);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Unfortunately there is no control of global dirty data. If the
43*4882a593Smuzhiyun * user states that they want 10% dirty data in the cache, and has,
44*4882a593Smuzhiyun * e.g., 5 backing volumes of equal size, we try and ensure each
45*4882a593Smuzhiyun * backing volume uses about 2% of the cache for dirty data.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun uint32_t bdev_share =
48*4882a593Smuzhiyun div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
49*4882a593Smuzhiyun c->cached_dev_sectors);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun uint64_t cache_dirty_target =
52*4882a593Smuzhiyun div_u64(cache_sectors * dc->writeback_percent, 100);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Ensure each backing dev gets at least one dirty share */
55*4882a593Smuzhiyun if (bdev_share < 1)
56*4882a593Smuzhiyun bdev_share = 1;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
__update_writeback_rate(struct cached_dev * dc)61*4882a593Smuzhiyun static void __update_writeback_rate(struct cached_dev *dc)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * PI controller:
65*4882a593Smuzhiyun * Figures out the amount that should be written per second.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * First, the error (number of sectors that are dirty beyond our
68*4882a593Smuzhiyun * target) is calculated. The error is accumulated (numerically
69*4882a593Smuzhiyun * integrated).
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Then, the proportional value and integral value are scaled
72*4882a593Smuzhiyun * based on configured values. These are stored as inverses to
73*4882a593Smuzhiyun * avoid fixed point math and to make configuration easy-- e.g.
74*4882a593Smuzhiyun * the default value of 40 for writeback_rate_p_term_inverse
75*4882a593Smuzhiyun * attempts to write at a rate that would retire all the dirty
76*4882a593Smuzhiyun * blocks in 40 seconds.
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79*4882a593Smuzhiyun * of the error is accumulated in the integral term per second.
80*4882a593Smuzhiyun * This acts as a slow, long-term average that is not subject to
81*4882a593Smuzhiyun * variations in usage like the p term.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun int64_t target = __calc_target_rate(dc);
84*4882a593Smuzhiyun int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
85*4882a593Smuzhiyun int64_t error = dirty - target;
86*4882a593Smuzhiyun int64_t proportional_scaled =
87*4882a593Smuzhiyun div_s64(error, dc->writeback_rate_p_term_inverse);
88*4882a593Smuzhiyun int64_t integral_scaled;
89*4882a593Smuzhiyun uint32_t new_rate;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if ((error < 0 && dc->writeback_rate_integral > 0) ||
92*4882a593Smuzhiyun (error > 0 && time_before64(local_clock(),
93*4882a593Smuzhiyun dc->writeback_rate.next + NSEC_PER_MSEC))) {
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Only decrease the integral term if it's more than
96*4882a593Smuzhiyun * zero. Only increase the integral term if the device
97*4882a593Smuzhiyun * is keeping up. (Don't wind up the integral
98*4882a593Smuzhiyun * ineffectively in either case).
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * It's necessary to scale this by
101*4882a593Smuzhiyun * writeback_rate_update_seconds to keep the integral
102*4882a593Smuzhiyun * term dimensioned properly.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun dc->writeback_rate_integral += error *
105*4882a593Smuzhiyun dc->writeback_rate_update_seconds;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun integral_scaled = div_s64(dc->writeback_rate_integral,
109*4882a593Smuzhiyun dc->writeback_rate_i_term_inverse);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
112*4882a593Smuzhiyun dc->writeback_rate_minimum, NSEC_PER_SEC);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun dc->writeback_rate_proportional = proportional_scaled;
115*4882a593Smuzhiyun dc->writeback_rate_integral_scaled = integral_scaled;
116*4882a593Smuzhiyun dc->writeback_rate_change = new_rate -
117*4882a593Smuzhiyun atomic_long_read(&dc->writeback_rate.rate);
118*4882a593Smuzhiyun atomic_long_set(&dc->writeback_rate.rate, new_rate);
119*4882a593Smuzhiyun dc->writeback_rate_target = target;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
idle_counter_exceeded(struct cache_set * c)122*4882a593Smuzhiyun static bool idle_counter_exceeded(struct cache_set *c)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun int counter, dev_nr;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun * If c->idle_counter is overflow (idel for really long time),
128*4882a593Smuzhiyun * reset as 0 and not set maximum rate this time for code
129*4882a593Smuzhiyun * simplicity.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun counter = atomic_inc_return(&c->idle_counter);
132*4882a593Smuzhiyun if (counter <= 0) {
133*4882a593Smuzhiyun atomic_set(&c->idle_counter, 0);
134*4882a593Smuzhiyun return false;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun dev_nr = atomic_read(&c->attached_dev_nr);
138*4882a593Smuzhiyun if (dev_nr == 0)
139*4882a593Smuzhiyun return false;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * c->idle_counter is increased by writeback thread of all
143*4882a593Smuzhiyun * attached backing devices, in order to represent a rough
144*4882a593Smuzhiyun * time period, counter should be divided by dev_nr.
145*4882a593Smuzhiyun * Otherwise the idle time cannot be larger with more backing
146*4882a593Smuzhiyun * device attached.
147*4882a593Smuzhiyun * The following calculation equals to checking
148*4882a593Smuzhiyun * (counter / dev_nr) < (dev_nr * 6)
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun if (counter < (dev_nr * dev_nr * 6))
151*4882a593Smuzhiyun return false;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return true;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * Idle_counter is increased every time when update_writeback_rate() is
158*4882a593Smuzhiyun * called. If all backing devices attached to the same cache set have
159*4882a593Smuzhiyun * identical dc->writeback_rate_update_seconds values, it is about 6
160*4882a593Smuzhiyun * rounds of update_writeback_rate() on each backing device before
161*4882a593Smuzhiyun * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
162*4882a593Smuzhiyun * to each dc->writeback_rate.rate.
163*4882a593Smuzhiyun * In order to avoid extra locking cost for counting exact dirty cached
164*4882a593Smuzhiyun * devices number, c->attached_dev_nr is used to calculate the idle
165*4882a593Smuzhiyun * throushold. It might be bigger if not all cached device are in write-
166*4882a593Smuzhiyun * back mode, but it still works well with limited extra rounds of
167*4882a593Smuzhiyun * update_writeback_rate().
168*4882a593Smuzhiyun */
set_at_max_writeback_rate(struct cache_set * c,struct cached_dev * dc)169*4882a593Smuzhiyun static bool set_at_max_writeback_rate(struct cache_set *c,
170*4882a593Smuzhiyun struct cached_dev *dc)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun /* Don't sst max writeback rate if it is disabled */
173*4882a593Smuzhiyun if (!c->idle_max_writeback_rate_enabled)
174*4882a593Smuzhiyun return false;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Don't set max writeback rate if gc is running */
177*4882a593Smuzhiyun if (!c->gc_mark_valid)
178*4882a593Smuzhiyun return false;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (!idle_counter_exceeded(c))
181*4882a593Smuzhiyun return false;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (atomic_read(&c->at_max_writeback_rate) != 1)
184*4882a593Smuzhiyun atomic_set(&c->at_max_writeback_rate, 1);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* keep writeback_rate_target as existing value */
189*4882a593Smuzhiyun dc->writeback_rate_proportional = 0;
190*4882a593Smuzhiyun dc->writeback_rate_integral_scaled = 0;
191*4882a593Smuzhiyun dc->writeback_rate_change = 0;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * In case new I/O arrives during before
195*4882a593Smuzhiyun * set_at_max_writeback_rate() returns.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun if (!idle_counter_exceeded(c) ||
198*4882a593Smuzhiyun !atomic_read(&c->at_max_writeback_rate))
199*4882a593Smuzhiyun return false;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return true;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
update_writeback_rate(struct work_struct * work)204*4882a593Smuzhiyun static void update_writeback_rate(struct work_struct *work)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct cached_dev *dc = container_of(to_delayed_work(work),
207*4882a593Smuzhiyun struct cached_dev,
208*4882a593Smuzhiyun writeback_rate_update);
209*4882a593Smuzhiyun struct cache_set *c = dc->disk.c;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * should check BCACHE_DEV_RATE_DW_RUNNING before calling
213*4882a593Smuzhiyun * cancel_delayed_work_sync().
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
216*4882a593Smuzhiyun /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
217*4882a593Smuzhiyun smp_mb__after_atomic();
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * CACHE_SET_IO_DISABLE might be set via sysfs interface,
221*4882a593Smuzhiyun * check it here too.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
224*4882a593Smuzhiyun test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
225*4882a593Smuzhiyun clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
226*4882a593Smuzhiyun /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
227*4882a593Smuzhiyun smp_mb__after_atomic();
228*4882a593Smuzhiyun return;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * If the whole cache set is idle, set_at_max_writeback_rate()
234*4882a593Smuzhiyun * will set writeback rate to a max number. Then it is
235*4882a593Smuzhiyun * unncessary to update writeback rate for an idle cache set
236*4882a593Smuzhiyun * in maximum writeback rate number(s).
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun if (!set_at_max_writeback_rate(c, dc)) {
239*4882a593Smuzhiyun down_read(&dc->writeback_lock);
240*4882a593Smuzhiyun __update_writeback_rate(dc);
241*4882a593Smuzhiyun update_gc_after_writeback(c);
242*4882a593Smuzhiyun up_read(&dc->writeback_lock);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun * CACHE_SET_IO_DISABLE might be set via sysfs interface,
249*4882a593Smuzhiyun * check it here too.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
252*4882a593Smuzhiyun !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
253*4882a593Smuzhiyun schedule_delayed_work(&dc->writeback_rate_update,
254*4882a593Smuzhiyun dc->writeback_rate_update_seconds * HZ);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * should check BCACHE_DEV_RATE_DW_RUNNING before calling
259*4882a593Smuzhiyun * cancel_delayed_work_sync().
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
262*4882a593Smuzhiyun /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
263*4882a593Smuzhiyun smp_mb__after_atomic();
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
writeback_delay(struct cached_dev * dc,unsigned int sectors)266*4882a593Smuzhiyun static unsigned int writeback_delay(struct cached_dev *dc,
267*4882a593Smuzhiyun unsigned int sectors)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
270*4882a593Smuzhiyun !dc->writeback_percent)
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return bch_next_delay(&dc->writeback_rate, sectors);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun struct dirty_io {
277*4882a593Smuzhiyun struct closure cl;
278*4882a593Smuzhiyun struct cached_dev *dc;
279*4882a593Smuzhiyun uint16_t sequence;
280*4882a593Smuzhiyun struct bio bio;
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun
dirty_init(struct keybuf_key * w)283*4882a593Smuzhiyun static void dirty_init(struct keybuf_key *w)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct dirty_io *io = w->private;
286*4882a593Smuzhiyun struct bio *bio = &io->bio;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun bio_init(bio, bio->bi_inline_vecs,
289*4882a593Smuzhiyun DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
290*4882a593Smuzhiyun if (!io->dc->writeback_percent)
291*4882a593Smuzhiyun bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
294*4882a593Smuzhiyun bio->bi_private = w;
295*4882a593Smuzhiyun bch_bio_map(bio, NULL);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
dirty_io_destructor(struct closure * cl)298*4882a593Smuzhiyun static void dirty_io_destructor(struct closure *cl)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct dirty_io *io = container_of(cl, struct dirty_io, cl);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun kfree(io);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
write_dirty_finish(struct closure * cl)305*4882a593Smuzhiyun static void write_dirty_finish(struct closure *cl)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct dirty_io *io = container_of(cl, struct dirty_io, cl);
308*4882a593Smuzhiyun struct keybuf_key *w = io->bio.bi_private;
309*4882a593Smuzhiyun struct cached_dev *dc = io->dc;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun bio_free_pages(&io->bio);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* This is kind of a dumb way of signalling errors. */
314*4882a593Smuzhiyun if (KEY_DIRTY(&w->key)) {
315*4882a593Smuzhiyun int ret;
316*4882a593Smuzhiyun unsigned int i;
317*4882a593Smuzhiyun struct keylist keys;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun bch_keylist_init(&keys);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun bkey_copy(keys.top, &w->key);
322*4882a593Smuzhiyun SET_KEY_DIRTY(keys.top, false);
323*4882a593Smuzhiyun bch_keylist_push(&keys);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun for (i = 0; i < KEY_PTRS(&w->key); i++)
326*4882a593Smuzhiyun atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if (ret)
331*4882a593Smuzhiyun trace_bcache_writeback_collision(&w->key);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun atomic_long_inc(ret
334*4882a593Smuzhiyun ? &dc->disk.c->writeback_keys_failed
335*4882a593Smuzhiyun : &dc->disk.c->writeback_keys_done);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun bch_keybuf_del(&dc->writeback_keys, w);
339*4882a593Smuzhiyun up(&dc->in_flight);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun closure_return_with_destructor(cl, dirty_io_destructor);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
dirty_endio(struct bio * bio)344*4882a593Smuzhiyun static void dirty_endio(struct bio *bio)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct keybuf_key *w = bio->bi_private;
347*4882a593Smuzhiyun struct dirty_io *io = w->private;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (bio->bi_status) {
350*4882a593Smuzhiyun SET_KEY_DIRTY(&w->key, false);
351*4882a593Smuzhiyun bch_count_backing_io_errors(io->dc, bio);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun closure_put(&io->cl);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
write_dirty(struct closure * cl)357*4882a593Smuzhiyun static void write_dirty(struct closure *cl)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct dirty_io *io = container_of(cl, struct dirty_io, cl);
360*4882a593Smuzhiyun struct keybuf_key *w = io->bio.bi_private;
361*4882a593Smuzhiyun struct cached_dev *dc = io->dc;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun uint16_t next_sequence;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
366*4882a593Smuzhiyun /* Not our turn to write; wait for a write to complete */
367*4882a593Smuzhiyun closure_wait(&dc->writeback_ordering_wait, cl);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * Edge case-- it happened in indeterminate order
372*4882a593Smuzhiyun * relative to when we were added to wait list..
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun closure_wake_up(&dc->writeback_ordering_wait);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun continue_at(cl, write_dirty, io->dc->writeback_write_wq);
378*4882a593Smuzhiyun return;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun next_sequence = io->sequence + 1;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun * IO errors are signalled using the dirty bit on the key.
385*4882a593Smuzhiyun * If we failed to read, we should not attempt to write to the
386*4882a593Smuzhiyun * backing device. Instead, immediately go to write_dirty_finish
387*4882a593Smuzhiyun * to clean up.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun if (KEY_DIRTY(&w->key)) {
390*4882a593Smuzhiyun dirty_init(w);
391*4882a593Smuzhiyun bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
392*4882a593Smuzhiyun io->bio.bi_iter.bi_sector = KEY_START(&w->key);
393*4882a593Smuzhiyun bio_set_dev(&io->bio, io->dc->bdev);
394*4882a593Smuzhiyun io->bio.bi_end_io = dirty_endio;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* I/O request sent to backing device */
397*4882a593Smuzhiyun closure_bio_submit(io->dc->disk.c, &io->bio, cl);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun atomic_set(&dc->writeback_sequence_next, next_sequence);
401*4882a593Smuzhiyun closure_wake_up(&dc->writeback_ordering_wait);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
read_dirty_endio(struct bio * bio)406*4882a593Smuzhiyun static void read_dirty_endio(struct bio *bio)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct keybuf_key *w = bio->bi_private;
409*4882a593Smuzhiyun struct dirty_io *io = w->private;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* is_read = 1 */
412*4882a593Smuzhiyun bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
413*4882a593Smuzhiyun bio->bi_status, 1,
414*4882a593Smuzhiyun "reading dirty data from cache");
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun dirty_endio(bio);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
read_dirty_submit(struct closure * cl)419*4882a593Smuzhiyun static void read_dirty_submit(struct closure *cl)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct dirty_io *io = container_of(cl, struct dirty_io, cl);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun closure_bio_submit(io->dc->disk.c, &io->bio, cl);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun continue_at(cl, write_dirty, io->dc->writeback_write_wq);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
read_dirty(struct cached_dev * dc)428*4882a593Smuzhiyun static void read_dirty(struct cached_dev *dc)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun unsigned int delay = 0;
431*4882a593Smuzhiyun struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
432*4882a593Smuzhiyun size_t size;
433*4882a593Smuzhiyun int nk, i;
434*4882a593Smuzhiyun struct dirty_io *io;
435*4882a593Smuzhiyun struct closure cl;
436*4882a593Smuzhiyun uint16_t sequence = 0;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
439*4882a593Smuzhiyun atomic_set(&dc->writeback_sequence_next, sequence);
440*4882a593Smuzhiyun closure_init_stack(&cl);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun * XXX: if we error, background writeback just spins. Should use some
444*4882a593Smuzhiyun * mempools.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun next = bch_keybuf_next(&dc->writeback_keys);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun while (!kthread_should_stop() &&
450*4882a593Smuzhiyun !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
451*4882a593Smuzhiyun next) {
452*4882a593Smuzhiyun size = 0;
453*4882a593Smuzhiyun nk = 0;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun do {
456*4882a593Smuzhiyun BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /*
459*4882a593Smuzhiyun * Don't combine too many operations, even if they
460*4882a593Smuzhiyun * are all small.
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun if (nk >= MAX_WRITEBACKS_IN_PASS)
463*4882a593Smuzhiyun break;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /*
466*4882a593Smuzhiyun * If the current operation is very large, don't
467*4882a593Smuzhiyun * further combine operations.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun if (size >= MAX_WRITESIZE_IN_PASS)
470*4882a593Smuzhiyun break;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * Operations are only eligible to be combined
474*4882a593Smuzhiyun * if they are contiguous.
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * TODO: add a heuristic willing to fire a
477*4882a593Smuzhiyun * certain amount of non-contiguous IO per pass,
478*4882a593Smuzhiyun * so that we can benefit from backing device
479*4882a593Smuzhiyun * command queueing.
480*4882a593Smuzhiyun */
481*4882a593Smuzhiyun if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
482*4882a593Smuzhiyun &START_KEY(&next->key)))
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun size += KEY_SIZE(&next->key);
486*4882a593Smuzhiyun keys[nk++] = next;
487*4882a593Smuzhiyun } while ((next = bch_keybuf_next(&dc->writeback_keys)));
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* Now we have gathered a set of 1..5 keys to write back. */
490*4882a593Smuzhiyun for (i = 0; i < nk; i++) {
491*4882a593Smuzhiyun w = keys[i];
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun io = kzalloc(struct_size(io, bio.bi_inline_vecs,
494*4882a593Smuzhiyun DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
495*4882a593Smuzhiyun GFP_KERNEL);
496*4882a593Smuzhiyun if (!io)
497*4882a593Smuzhiyun goto err;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun w->private = io;
500*4882a593Smuzhiyun io->dc = dc;
501*4882a593Smuzhiyun io->sequence = sequence++;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun dirty_init(w);
504*4882a593Smuzhiyun bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
505*4882a593Smuzhiyun io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
506*4882a593Smuzhiyun bio_set_dev(&io->bio,
507*4882a593Smuzhiyun PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
508*4882a593Smuzhiyun io->bio.bi_end_io = read_dirty_endio;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
511*4882a593Smuzhiyun goto err_free;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun trace_bcache_writeback(&w->key);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun down(&dc->in_flight);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * We've acquired a semaphore for the maximum
519*4882a593Smuzhiyun * simultaneous number of writebacks; from here
520*4882a593Smuzhiyun * everything happens asynchronously.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun closure_call(&io->cl, read_dirty_submit, NULL, &cl);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun delay = writeback_delay(dc, size);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun while (!kthread_should_stop() &&
528*4882a593Smuzhiyun !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
529*4882a593Smuzhiyun delay) {
530*4882a593Smuzhiyun schedule_timeout_interruptible(delay);
531*4882a593Smuzhiyun delay = writeback_delay(dc, 0);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (0) {
536*4882a593Smuzhiyun err_free:
537*4882a593Smuzhiyun kfree(w->private);
538*4882a593Smuzhiyun err:
539*4882a593Smuzhiyun bch_keybuf_del(&dc->writeback_keys, w);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * Wait for outstanding writeback IOs to finish (and keybuf slots to be
544*4882a593Smuzhiyun * freed) before refilling again
545*4882a593Smuzhiyun */
546*4882a593Smuzhiyun closure_sync(&cl);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Scan for dirty data */
550*4882a593Smuzhiyun
bcache_dev_sectors_dirty_add(struct cache_set * c,unsigned int inode,uint64_t offset,int nr_sectors)551*4882a593Smuzhiyun void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
552*4882a593Smuzhiyun uint64_t offset, int nr_sectors)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun struct bcache_device *d = c->devices[inode];
555*4882a593Smuzhiyun unsigned int stripe_offset, sectors_dirty;
556*4882a593Smuzhiyun int stripe;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (!d)
559*4882a593Smuzhiyun return;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun stripe = offset_to_stripe(d, offset);
562*4882a593Smuzhiyun if (stripe < 0)
563*4882a593Smuzhiyun return;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (UUID_FLASH_ONLY(&c->uuids[inode]))
566*4882a593Smuzhiyun atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun stripe_offset = offset & (d->stripe_size - 1);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun while (nr_sectors) {
571*4882a593Smuzhiyun int s = min_t(unsigned int, abs(nr_sectors),
572*4882a593Smuzhiyun d->stripe_size - stripe_offset);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (nr_sectors < 0)
575*4882a593Smuzhiyun s = -s;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (stripe >= d->nr_stripes)
578*4882a593Smuzhiyun return;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun sectors_dirty = atomic_add_return(s,
581*4882a593Smuzhiyun d->stripe_sectors_dirty + stripe);
582*4882a593Smuzhiyun if (sectors_dirty == d->stripe_size)
583*4882a593Smuzhiyun set_bit(stripe, d->full_dirty_stripes);
584*4882a593Smuzhiyun else
585*4882a593Smuzhiyun clear_bit(stripe, d->full_dirty_stripes);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun nr_sectors -= s;
588*4882a593Smuzhiyun stripe_offset = 0;
589*4882a593Smuzhiyun stripe++;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
dirty_pred(struct keybuf * buf,struct bkey * k)593*4882a593Smuzhiyun static bool dirty_pred(struct keybuf *buf, struct bkey *k)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun struct cached_dev *dc = container_of(buf,
596*4882a593Smuzhiyun struct cached_dev,
597*4882a593Smuzhiyun writeback_keys);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun BUG_ON(KEY_INODE(k) != dc->disk.id);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun return KEY_DIRTY(k);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
refill_full_stripes(struct cached_dev * dc)604*4882a593Smuzhiyun static void refill_full_stripes(struct cached_dev *dc)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct keybuf *buf = &dc->writeback_keys;
607*4882a593Smuzhiyun unsigned int start_stripe, next_stripe;
608*4882a593Smuzhiyun int stripe;
609*4882a593Smuzhiyun bool wrapped = false;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
612*4882a593Smuzhiyun if (stripe < 0)
613*4882a593Smuzhiyun stripe = 0;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun start_stripe = stripe;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun while (1) {
618*4882a593Smuzhiyun stripe = find_next_bit(dc->disk.full_dirty_stripes,
619*4882a593Smuzhiyun dc->disk.nr_stripes, stripe);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (stripe == dc->disk.nr_stripes)
622*4882a593Smuzhiyun goto next;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
625*4882a593Smuzhiyun dc->disk.nr_stripes, stripe);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun buf->last_scanned = KEY(dc->disk.id,
628*4882a593Smuzhiyun stripe * dc->disk.stripe_size, 0);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun bch_refill_keybuf(dc->disk.c, buf,
631*4882a593Smuzhiyun &KEY(dc->disk.id,
632*4882a593Smuzhiyun next_stripe * dc->disk.stripe_size, 0),
633*4882a593Smuzhiyun dirty_pred);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (array_freelist_empty(&buf->freelist))
636*4882a593Smuzhiyun return;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun stripe = next_stripe;
639*4882a593Smuzhiyun next:
640*4882a593Smuzhiyun if (wrapped && stripe > start_stripe)
641*4882a593Smuzhiyun return;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (stripe == dc->disk.nr_stripes) {
644*4882a593Smuzhiyun stripe = 0;
645*4882a593Smuzhiyun wrapped = true;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun * Returns true if we scanned the entire disk
652*4882a593Smuzhiyun */
refill_dirty(struct cached_dev * dc)653*4882a593Smuzhiyun static bool refill_dirty(struct cached_dev *dc)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct keybuf *buf = &dc->writeback_keys;
656*4882a593Smuzhiyun struct bkey start = KEY(dc->disk.id, 0, 0);
657*4882a593Smuzhiyun struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
658*4882a593Smuzhiyun struct bkey start_pos;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun * make sure keybuf pos is inside the range for this disk - at bringup
662*4882a593Smuzhiyun * we might not be attached yet so this disk's inode nr isn't
663*4882a593Smuzhiyun * initialized then
664*4882a593Smuzhiyun */
665*4882a593Smuzhiyun if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
666*4882a593Smuzhiyun bkey_cmp(&buf->last_scanned, &end) > 0)
667*4882a593Smuzhiyun buf->last_scanned = start;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (dc->partial_stripes_expensive) {
670*4882a593Smuzhiyun refill_full_stripes(dc);
671*4882a593Smuzhiyun if (array_freelist_empty(&buf->freelist))
672*4882a593Smuzhiyun return false;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun start_pos = buf->last_scanned;
676*4882a593Smuzhiyun bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (bkey_cmp(&buf->last_scanned, &end) < 0)
679*4882a593Smuzhiyun return false;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun * If we get to the end start scanning again from the beginning, and
683*4882a593Smuzhiyun * only scan up to where we initially started scanning from:
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun buf->last_scanned = start;
686*4882a593Smuzhiyun bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
bch_writeback_thread(void * arg)691*4882a593Smuzhiyun static int bch_writeback_thread(void *arg)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun struct cached_dev *dc = arg;
694*4882a593Smuzhiyun struct cache_set *c = dc->disk.c;
695*4882a593Smuzhiyun bool searched_full_index;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun bch_ratelimit_reset(&dc->writeback_rate);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun while (!kthread_should_stop() &&
700*4882a593Smuzhiyun !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
701*4882a593Smuzhiyun down_write(&dc->writeback_lock);
702*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
703*4882a593Smuzhiyun /*
704*4882a593Smuzhiyun * If the bache device is detaching, skip here and continue
705*4882a593Smuzhiyun * to perform writeback. Otherwise, if no dirty data on cache,
706*4882a593Smuzhiyun * or there is dirty data on cache but writeback is disabled,
707*4882a593Smuzhiyun * the writeback thread should sleep here and wait for others
708*4882a593Smuzhiyun * to wake up it.
709*4882a593Smuzhiyun */
710*4882a593Smuzhiyun if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
711*4882a593Smuzhiyun (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
712*4882a593Smuzhiyun up_write(&dc->writeback_lock);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (kthread_should_stop() ||
715*4882a593Smuzhiyun test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
716*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
717*4882a593Smuzhiyun break;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun schedule();
721*4882a593Smuzhiyun continue;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun searched_full_index = refill_dirty(dc);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (searched_full_index &&
728*4882a593Smuzhiyun RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
729*4882a593Smuzhiyun atomic_set(&dc->has_dirty, 0);
730*4882a593Smuzhiyun SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
731*4882a593Smuzhiyun bch_write_bdev_super(dc, NULL);
732*4882a593Smuzhiyun /*
733*4882a593Smuzhiyun * If bcache device is detaching via sysfs interface,
734*4882a593Smuzhiyun * writeback thread should stop after there is no dirty
735*4882a593Smuzhiyun * data on cache. BCACHE_DEV_DETACHING flag is set in
736*4882a593Smuzhiyun * bch_cached_dev_detach().
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
739*4882a593Smuzhiyun up_write(&dc->writeback_lock);
740*4882a593Smuzhiyun break;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /*
744*4882a593Smuzhiyun * When dirty data rate is high (e.g. 50%+), there might
745*4882a593Smuzhiyun * be heavy buckets fragmentation after writeback
746*4882a593Smuzhiyun * finished, which hurts following write performance.
747*4882a593Smuzhiyun * If users really care about write performance they
748*4882a593Smuzhiyun * may set BCH_ENABLE_AUTO_GC via sysfs, then when
749*4882a593Smuzhiyun * BCH_DO_AUTO_GC is set, garbage collection thread
750*4882a593Smuzhiyun * will be wake up here. After moving gc, the shrunk
751*4882a593Smuzhiyun * btree and discarded free buckets SSD space may be
752*4882a593Smuzhiyun * helpful for following write requests.
753*4882a593Smuzhiyun */
754*4882a593Smuzhiyun if (c->gc_after_writeback ==
755*4882a593Smuzhiyun (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
756*4882a593Smuzhiyun c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
757*4882a593Smuzhiyun force_wake_up_gc(c);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun up_write(&dc->writeback_lock);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun read_dirty(dc);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (searched_full_index) {
766*4882a593Smuzhiyun unsigned int delay = dc->writeback_delay * HZ;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun while (delay &&
769*4882a593Smuzhiyun !kthread_should_stop() &&
770*4882a593Smuzhiyun !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
771*4882a593Smuzhiyun !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
772*4882a593Smuzhiyun delay = schedule_timeout_interruptible(delay);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun bch_ratelimit_reset(&dc->writeback_rate);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (dc->writeback_write_wq) {
779*4882a593Smuzhiyun flush_workqueue(dc->writeback_write_wq);
780*4882a593Smuzhiyun destroy_workqueue(dc->writeback_write_wq);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun cached_dev_put(dc);
783*4882a593Smuzhiyun wait_for_kthread_stop();
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun return 0;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* Init */
789*4882a593Smuzhiyun #define INIT_KEYS_EACH_TIME 500000
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun struct sectors_dirty_init {
792*4882a593Smuzhiyun struct btree_op op;
793*4882a593Smuzhiyun unsigned int inode;
794*4882a593Smuzhiyun size_t count;
795*4882a593Smuzhiyun };
796*4882a593Smuzhiyun
sectors_dirty_init_fn(struct btree_op * _op,struct btree * b,struct bkey * k)797*4882a593Smuzhiyun static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
798*4882a593Smuzhiyun struct bkey *k)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct sectors_dirty_init *op = container_of(_op,
801*4882a593Smuzhiyun struct sectors_dirty_init, op);
802*4882a593Smuzhiyun if (KEY_INODE(k) > op->inode)
803*4882a593Smuzhiyun return MAP_DONE;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (KEY_DIRTY(k))
806*4882a593Smuzhiyun bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
807*4882a593Smuzhiyun KEY_START(k), KEY_SIZE(k));
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun op->count++;
810*4882a593Smuzhiyun if (!(op->count % INIT_KEYS_EACH_TIME))
811*4882a593Smuzhiyun cond_resched();
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun return MAP_CONTINUE;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
bch_root_node_dirty_init(struct cache_set * c,struct bcache_device * d,struct bkey * k)816*4882a593Smuzhiyun static int bch_root_node_dirty_init(struct cache_set *c,
817*4882a593Smuzhiyun struct bcache_device *d,
818*4882a593Smuzhiyun struct bkey *k)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct sectors_dirty_init op;
821*4882a593Smuzhiyun int ret;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun bch_btree_op_init(&op.op, -1);
824*4882a593Smuzhiyun op.inode = d->id;
825*4882a593Smuzhiyun op.count = 0;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun ret = bcache_btree(map_keys_recurse,
828*4882a593Smuzhiyun k,
829*4882a593Smuzhiyun c->root,
830*4882a593Smuzhiyun &op.op,
831*4882a593Smuzhiyun &KEY(op.inode, 0, 0),
832*4882a593Smuzhiyun sectors_dirty_init_fn,
833*4882a593Smuzhiyun 0);
834*4882a593Smuzhiyun if (ret < 0)
835*4882a593Smuzhiyun pr_warn("sectors dirty init failed, ret=%d!\n", ret);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return ret;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
bch_dirty_init_thread(void * arg)840*4882a593Smuzhiyun static int bch_dirty_init_thread(void *arg)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct dirty_init_thrd_info *info = arg;
843*4882a593Smuzhiyun struct bch_dirty_init_state *state = info->state;
844*4882a593Smuzhiyun struct cache_set *c = state->c;
845*4882a593Smuzhiyun struct btree_iter iter;
846*4882a593Smuzhiyun struct bkey *k, *p;
847*4882a593Smuzhiyun int cur_idx, prev_idx, skip_nr;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun k = p = NULL;
850*4882a593Smuzhiyun cur_idx = prev_idx = 0;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun bch_btree_iter_init(&c->root->keys, &iter, NULL);
853*4882a593Smuzhiyun k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
854*4882a593Smuzhiyun BUG_ON(!k);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun p = k;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun while (k) {
859*4882a593Smuzhiyun spin_lock(&state->idx_lock);
860*4882a593Smuzhiyun cur_idx = state->key_idx;
861*4882a593Smuzhiyun state->key_idx++;
862*4882a593Smuzhiyun spin_unlock(&state->idx_lock);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun skip_nr = cur_idx - prev_idx;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun while (skip_nr) {
867*4882a593Smuzhiyun k = bch_btree_iter_next_filter(&iter,
868*4882a593Smuzhiyun &c->root->keys,
869*4882a593Smuzhiyun bch_ptr_bad);
870*4882a593Smuzhiyun if (k)
871*4882a593Smuzhiyun p = k;
872*4882a593Smuzhiyun else {
873*4882a593Smuzhiyun atomic_set(&state->enough, 1);
874*4882a593Smuzhiyun /* Update state->enough earlier */
875*4882a593Smuzhiyun smp_mb__after_atomic();
876*4882a593Smuzhiyun goto out;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun skip_nr--;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (p) {
882*4882a593Smuzhiyun if (bch_root_node_dirty_init(c, state->d, p) < 0)
883*4882a593Smuzhiyun goto out;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun p = NULL;
887*4882a593Smuzhiyun prev_idx = cur_idx;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun out:
891*4882a593Smuzhiyun /* In order to wake up state->wait in time */
892*4882a593Smuzhiyun smp_mb__before_atomic();
893*4882a593Smuzhiyun if (atomic_dec_and_test(&state->started))
894*4882a593Smuzhiyun wake_up(&state->wait);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun return 0;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
bch_btre_dirty_init_thread_nr(void)899*4882a593Smuzhiyun static int bch_btre_dirty_init_thread_nr(void)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun int n = num_online_cpus()/2;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (n == 0)
904*4882a593Smuzhiyun n = 1;
905*4882a593Smuzhiyun else if (n > BCH_DIRTY_INIT_THRD_MAX)
906*4882a593Smuzhiyun n = BCH_DIRTY_INIT_THRD_MAX;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun return n;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
bch_sectors_dirty_init(struct bcache_device * d)911*4882a593Smuzhiyun void bch_sectors_dirty_init(struct bcache_device *d)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun int i;
914*4882a593Smuzhiyun struct bkey *k = NULL;
915*4882a593Smuzhiyun struct btree_iter iter;
916*4882a593Smuzhiyun struct sectors_dirty_init op;
917*4882a593Smuzhiyun struct cache_set *c = d->c;
918*4882a593Smuzhiyun struct bch_dirty_init_state state;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /* Just count root keys if no leaf node */
921*4882a593Smuzhiyun rw_lock(0, c->root, c->root->level);
922*4882a593Smuzhiyun if (c->root->level == 0) {
923*4882a593Smuzhiyun bch_btree_op_init(&op.op, -1);
924*4882a593Smuzhiyun op.inode = d->id;
925*4882a593Smuzhiyun op.count = 0;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun for_each_key_filter(&c->root->keys,
928*4882a593Smuzhiyun k, &iter, bch_ptr_invalid)
929*4882a593Smuzhiyun sectors_dirty_init_fn(&op.op, c->root, k);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun rw_unlock(0, c->root);
932*4882a593Smuzhiyun return;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun memset(&state, 0, sizeof(struct bch_dirty_init_state));
936*4882a593Smuzhiyun state.c = c;
937*4882a593Smuzhiyun state.d = d;
938*4882a593Smuzhiyun state.total_threads = bch_btre_dirty_init_thread_nr();
939*4882a593Smuzhiyun state.key_idx = 0;
940*4882a593Smuzhiyun spin_lock_init(&state.idx_lock);
941*4882a593Smuzhiyun atomic_set(&state.started, 0);
942*4882a593Smuzhiyun atomic_set(&state.enough, 0);
943*4882a593Smuzhiyun init_waitqueue_head(&state.wait);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun for (i = 0; i < state.total_threads; i++) {
946*4882a593Smuzhiyun /* Fetch latest state.enough earlier */
947*4882a593Smuzhiyun smp_mb__before_atomic();
948*4882a593Smuzhiyun if (atomic_read(&state.enough))
949*4882a593Smuzhiyun break;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun state.infos[i].state = &state;
952*4882a593Smuzhiyun state.infos[i].thread =
953*4882a593Smuzhiyun kthread_run(bch_dirty_init_thread, &state.infos[i],
954*4882a593Smuzhiyun "bch_dirtcnt[%d]", i);
955*4882a593Smuzhiyun if (IS_ERR(state.infos[i].thread)) {
956*4882a593Smuzhiyun pr_err("fails to run thread bch_dirty_init[%d]\n", i);
957*4882a593Smuzhiyun for (--i; i >= 0; i--)
958*4882a593Smuzhiyun kthread_stop(state.infos[i].thread);
959*4882a593Smuzhiyun goto out;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun atomic_inc(&state.started);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun out:
965*4882a593Smuzhiyun /* Must wait for all threads to stop. */
966*4882a593Smuzhiyun wait_event(state.wait, atomic_read(&state.started) == 0);
967*4882a593Smuzhiyun rw_unlock(0, c->root);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
bch_cached_dev_writeback_init(struct cached_dev * dc)970*4882a593Smuzhiyun void bch_cached_dev_writeback_init(struct cached_dev *dc)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun sema_init(&dc->in_flight, 64);
973*4882a593Smuzhiyun init_rwsem(&dc->writeback_lock);
974*4882a593Smuzhiyun bch_keybuf_init(&dc->writeback_keys);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun dc->writeback_metadata = true;
977*4882a593Smuzhiyun dc->writeback_running = false;
978*4882a593Smuzhiyun dc->writeback_percent = 10;
979*4882a593Smuzhiyun dc->writeback_delay = 30;
980*4882a593Smuzhiyun atomic_long_set(&dc->writeback_rate.rate, 1024);
981*4882a593Smuzhiyun dc->writeback_rate_minimum = 8;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
984*4882a593Smuzhiyun dc->writeback_rate_p_term_inverse = 40;
985*4882a593Smuzhiyun dc->writeback_rate_i_term_inverse = 10000;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
988*4882a593Smuzhiyun INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
bch_cached_dev_writeback_start(struct cached_dev * dc)991*4882a593Smuzhiyun int bch_cached_dev_writeback_start(struct cached_dev *dc)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
994*4882a593Smuzhiyun WQ_MEM_RECLAIM, 0);
995*4882a593Smuzhiyun if (!dc->writeback_write_wq)
996*4882a593Smuzhiyun return -ENOMEM;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun cached_dev_get(dc);
999*4882a593Smuzhiyun dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1000*4882a593Smuzhiyun "bcache_writeback");
1001*4882a593Smuzhiyun if (IS_ERR(dc->writeback_thread)) {
1002*4882a593Smuzhiyun cached_dev_put(dc);
1003*4882a593Smuzhiyun destroy_workqueue(dc->writeback_write_wq);
1004*4882a593Smuzhiyun return PTR_ERR(dc->writeback_thread);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun dc->writeback_running = true;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1009*4882a593Smuzhiyun schedule_delayed_work(&dc->writeback_rate_update,
1010*4882a593Smuzhiyun dc->writeback_rate_update_seconds * HZ);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun bch_writeback_queue(dc);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun return 0;
1015*4882a593Smuzhiyun }
1016