Lines Matching refs:dc

30 static uint64_t __calc_target_rate(struct cached_dev *dc)  in __calc_target_rate()  argument
32 struct cache_set *c = dc->disk.c; in __calc_target_rate()
48 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, in __calc_target_rate()
52 div_u64(cache_sectors * dc->writeback_percent, 100); in __calc_target_rate()
61 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() argument
83 int64_t target = __calc_target_rate(dc); in __update_writeback_rate()
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); in __update_writeback_rate()
87 div_s64(error, dc->writeback_rate_p_term_inverse); in __update_writeback_rate()
91 if ((error < 0 && dc->writeback_rate_integral > 0) || in __update_writeback_rate()
93 dc->writeback_rate.next + NSEC_PER_MSEC))) { in __update_writeback_rate()
104 dc->writeback_rate_integral += error * in __update_writeback_rate()
105 dc->writeback_rate_update_seconds; in __update_writeback_rate()
108 integral_scaled = div_s64(dc->writeback_rate_integral, in __update_writeback_rate()
109 dc->writeback_rate_i_term_inverse); in __update_writeback_rate()
112 dc->writeback_rate_minimum, NSEC_PER_SEC); in __update_writeback_rate()
114 dc->writeback_rate_proportional = proportional_scaled; in __update_writeback_rate()
115 dc->writeback_rate_integral_scaled = integral_scaled; in __update_writeback_rate()
116 dc->writeback_rate_change = new_rate - in __update_writeback_rate()
117 atomic_long_read(&dc->writeback_rate.rate); in __update_writeback_rate()
118 atomic_long_set(&dc->writeback_rate.rate, new_rate); in __update_writeback_rate()
119 dc->writeback_rate_target = target; in __update_writeback_rate()
170 struct cached_dev *dc) in set_at_max_writeback_rate() argument
186 atomic_long_set(&dc->writeback_rate.rate, INT_MAX); in set_at_max_writeback_rate()
189 dc->writeback_rate_proportional = 0; in set_at_max_writeback_rate()
190 dc->writeback_rate_integral_scaled = 0; in set_at_max_writeback_rate()
191 dc->writeback_rate_change = 0; in set_at_max_writeback_rate()
206 struct cached_dev *dc = container_of(to_delayed_work(work), in update_writeback_rate() local
209 struct cache_set *c = dc->disk.c; in update_writeback_rate()
215 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
223 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || in update_writeback_rate()
225 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
231 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { in update_writeback_rate()
238 if (!set_at_max_writeback_rate(c, dc)) { in update_writeback_rate()
239 down_read(&dc->writeback_lock); in update_writeback_rate()
240 __update_writeback_rate(dc); in update_writeback_rate()
242 up_read(&dc->writeback_lock); in update_writeback_rate()
251 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && in update_writeback_rate()
253 schedule_delayed_work(&dc->writeback_rate_update, in update_writeback_rate()
254 dc->writeback_rate_update_seconds * HZ); in update_writeback_rate()
261 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
266 static unsigned int writeback_delay(struct cached_dev *dc, in writeback_delay() argument
269 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in writeback_delay()
270 !dc->writeback_percent) in writeback_delay()
273 return bch_next_delay(&dc->writeback_rate, sectors); in writeback_delay()
278 struct cached_dev *dc; member
290 if (!io->dc->writeback_percent) in dirty_init()
309 struct cached_dev *dc = io->dc; in write_dirty_finish() local
326 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); in write_dirty_finish()
328 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); in write_dirty_finish()
334 ? &dc->disk.c->writeback_keys_failed in write_dirty_finish()
335 : &dc->disk.c->writeback_keys_done); in write_dirty_finish()
338 bch_keybuf_del(&dc->writeback_keys, w); in write_dirty_finish()
339 up(&dc->in_flight); in write_dirty_finish()
351 bch_count_backing_io_errors(io->dc, bio); in dirty_endio()
361 struct cached_dev *dc = io->dc; in write_dirty() local
365 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) { in write_dirty()
367 closure_wait(&dc->writeback_ordering_wait, cl); in write_dirty()
369 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) { in write_dirty()
374 closure_wake_up(&dc->writeback_ordering_wait); in write_dirty()
377 continue_at(cl, write_dirty, io->dc->writeback_write_wq); in write_dirty()
393 bio_set_dev(&io->bio, io->dc->bdev); in write_dirty()
397 closure_bio_submit(io->dc->disk.c, &io->bio, cl); in write_dirty()
400 atomic_set(&dc->writeback_sequence_next, next_sequence); in write_dirty()
401 closure_wake_up(&dc->writeback_ordering_wait); in write_dirty()
403 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); in write_dirty()
412 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), in read_dirty_endio()
423 closure_bio_submit(io->dc->disk.c, &io->bio, cl); in read_dirty_submit()
425 continue_at(cl, write_dirty, io->dc->writeback_write_wq); in read_dirty_submit()
428 static void read_dirty(struct cached_dev *dc) in read_dirty() argument
438 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list)); in read_dirty()
439 atomic_set(&dc->writeback_sequence_next, sequence); in read_dirty()
447 next = bch_keybuf_next(&dc->writeback_keys); in read_dirty()
450 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && in read_dirty()
456 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); in read_dirty()
487 } while ((next = bch_keybuf_next(&dc->writeback_keys))); in read_dirty()
500 io->dc = dc; in read_dirty()
507 PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); in read_dirty()
515 down(&dc->in_flight); in read_dirty()
525 delay = writeback_delay(dc, size); in read_dirty()
528 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && in read_dirty()
531 delay = writeback_delay(dc, 0); in read_dirty()
539 bch_keybuf_del(&dc->writeback_keys, w); in read_dirty()
595 struct cached_dev *dc = container_of(buf, in dirty_pred() local
599 BUG_ON(KEY_INODE(k) != dc->disk.id); in dirty_pred()
604 static void refill_full_stripes(struct cached_dev *dc) in refill_full_stripes() argument
606 struct keybuf *buf = &dc->writeback_keys; in refill_full_stripes()
611 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); in refill_full_stripes()
618 stripe = find_next_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
619 dc->disk.nr_stripes, stripe); in refill_full_stripes()
621 if (stripe == dc->disk.nr_stripes) in refill_full_stripes()
624 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
625 dc->disk.nr_stripes, stripe); in refill_full_stripes()
627 buf->last_scanned = KEY(dc->disk.id, in refill_full_stripes()
628 stripe * dc->disk.stripe_size, 0); in refill_full_stripes()
630 bch_refill_keybuf(dc->disk.c, buf, in refill_full_stripes()
631 &KEY(dc->disk.id, in refill_full_stripes()
632 next_stripe * dc->disk.stripe_size, 0), in refill_full_stripes()
643 if (stripe == dc->disk.nr_stripes) { in refill_full_stripes()
653 static bool refill_dirty(struct cached_dev *dc) in refill_dirty() argument
655 struct keybuf *buf = &dc->writeback_keys; in refill_dirty()
656 struct bkey start = KEY(dc->disk.id, 0, 0); in refill_dirty()
657 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); in refill_dirty()
669 if (dc->partial_stripes_expensive) { in refill_dirty()
670 refill_full_stripes(dc); in refill_dirty()
676 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); in refill_dirty()
686 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); in refill_dirty()
693 struct cached_dev *dc = arg; in bch_writeback_thread() local
694 struct cache_set *c = dc->disk.c; in bch_writeback_thread()
697 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
701 down_write(&dc->writeback_lock); in bch_writeback_thread()
710 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && in bch_writeback_thread()
711 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { in bch_writeback_thread()
712 up_write(&dc->writeback_lock); in bch_writeback_thread()
725 searched_full_index = refill_dirty(dc); in bch_writeback_thread()
728 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { in bch_writeback_thread()
729 atomic_set(&dc->has_dirty, 0); in bch_writeback_thread()
730 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); in bch_writeback_thread()
731 bch_write_bdev_super(dc, NULL); in bch_writeback_thread()
738 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { in bch_writeback_thread()
739 up_write(&dc->writeback_lock); in bch_writeback_thread()
761 up_write(&dc->writeback_lock); in bch_writeback_thread()
763 read_dirty(dc); in bch_writeback_thread()
766 unsigned int delay = dc->writeback_delay * HZ; in bch_writeback_thread()
771 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) in bch_writeback_thread()
774 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
778 if (dc->writeback_write_wq) { in bch_writeback_thread()
779 flush_workqueue(dc->writeback_write_wq); in bch_writeback_thread()
780 destroy_workqueue(dc->writeback_write_wq); in bch_writeback_thread()
782 cached_dev_put(dc); in bch_writeback_thread()
970 void bch_cached_dev_writeback_init(struct cached_dev *dc) in bch_cached_dev_writeback_init() argument
972 sema_init(&dc->in_flight, 64); in bch_cached_dev_writeback_init()
973 init_rwsem(&dc->writeback_lock); in bch_cached_dev_writeback_init()
974 bch_keybuf_init(&dc->writeback_keys); in bch_cached_dev_writeback_init()
976 dc->writeback_metadata = true; in bch_cached_dev_writeback_init()
977 dc->writeback_running = false; in bch_cached_dev_writeback_init()
978 dc->writeback_percent = 10; in bch_cached_dev_writeback_init()
979 dc->writeback_delay = 30; in bch_cached_dev_writeback_init()
980 atomic_long_set(&dc->writeback_rate.rate, 1024); in bch_cached_dev_writeback_init()
981 dc->writeback_rate_minimum = 8; in bch_cached_dev_writeback_init()
983 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; in bch_cached_dev_writeback_init()
984 dc->writeback_rate_p_term_inverse = 40; in bch_cached_dev_writeback_init()
985 dc->writeback_rate_i_term_inverse = 10000; in bch_cached_dev_writeback_init()
987 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); in bch_cached_dev_writeback_init()
988 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); in bch_cached_dev_writeback_init()
991 int bch_cached_dev_writeback_start(struct cached_dev *dc) in bch_cached_dev_writeback_start() argument
993 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", in bch_cached_dev_writeback_start()
995 if (!dc->writeback_write_wq) in bch_cached_dev_writeback_start()
998 cached_dev_get(dc); in bch_cached_dev_writeback_start()
999 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, in bch_cached_dev_writeback_start()
1001 if (IS_ERR(dc->writeback_thread)) { in bch_cached_dev_writeback_start()
1002 cached_dev_put(dc); in bch_cached_dev_writeback_start()
1003 destroy_workqueue(dc->writeback_write_wq); in bch_cached_dev_writeback_start()
1004 return PTR_ERR(dc->writeback_thread); in bch_cached_dev_writeback_start()
1006 dc->writeback_running = true; in bch_cached_dev_writeback_start()
1008 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); in bch_cached_dev_writeback_start()
1009 schedule_delayed_work(&dc->writeback_rate_update, in bch_cached_dev_writeback_start()
1010 dc->writeback_rate_update_seconds * HZ); in bch_cached_dev_writeback_start()
1012 bch_writeback_queue(dc); in bch_cached_dev_writeback_start()