Lines Matching full:wb

137 	struct bdi_writeback	*wb;  member
145 unsigned long wb_dirty; /* per-wb counterparts */
161 #define GDTC_INIT(__wb) .wb = (__wb), \
167 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
187 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
189 return &wb->memcg_completions; in wb_memcg_completions()
192 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
195 unsigned long this_bw = wb->avg_write_bandwidth; in wb_min_max_ratio()
196 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
197 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
198 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
201 * @wb may already be clean by the time control reaches here and in wb_min_max_ratio()
221 #define GDTC_INIT(__wb) .wb = (__wb), \
241 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
246 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
249 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
250 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
594 * Increment @wb's writeout completion count and the global writeout
597 static inline void __wb_writeout_inc(struct bdi_writeback *wb) in __wb_writeout_inc() argument
601 inc_wb_stat(wb, WB_WRITTEN); in __wb_writeout_inc()
602 wb_domain_writeout_inc(&global_wb_domain, &wb->completions, in __wb_writeout_inc()
603 wb->bdi->max_prop_frac); in __wb_writeout_inc()
605 cgdom = mem_cgroup_wb_domain(wb); in __wb_writeout_inc()
607 wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb), in __wb_writeout_inc()
608 wb->bdi->max_prop_frac); in __wb_writeout_inc()
611 void wb_writeout_inc(struct bdi_writeback *wb) in wb_writeout_inc() argument
616 __wb_writeout_inc(wb); in wb_writeout_inc()
741 * __wb_calc_thresh - @wb's share of dirty throttling threshold
749 * more (rather than completely block them) when the wb dirty pages go high.
755 * The wb's share of dirty limit will be adapting to its throughput and
758 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
779 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); in __wb_calc_thresh()
788 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) in wb_calc_thresh() argument
790 struct dirty_throttle_control gdtc = { GDTC_INIT(wb), in wb_calc_thresh()
831 * We want the dirty pages be balanced around the global/wb setpoints.
867 * (o) wb control line
895 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
899 * - the wb dirty thresh drops quickly due to change of JBOD workload
903 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio() local
904 unsigned long write_bw = wb->avg_write_bandwidth; in wb_position_ratio()
931 * such filesystems balance_dirty_pages always checks wb counters in wb_position_ratio()
932 * against wb limits. Even if global "nr_dirty" is under "freerun". in wb_position_ratio()
943 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is in wb_position_ratio()
944 * about ~6K pages (as the average of background and throttle wb in wb_position_ratio()
949 * because we want to throttle process writing to a strictlimit wb in wb_position_ratio()
953 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
978 * make decision based on wb counters. But there is an in wb_position_ratio()
981 * wb's) while given strictlimit wb is below limit. in wb_position_ratio()
985 * activity in the system coming from a single strictlimit wb in wb_position_ratio()
990 * (when globally we are at freerun and wb is well below wb in wb_position_ratio()
1001 * the wb is over/under its share of dirty pages, we want to scale in wb_position_ratio()
1006 * wb setpoint in wb_position_ratio()
1014 * The main wb control line is a linear function that subjects to in wb_position_ratio()
1017 * (2) k = - 1 / (8 * write_bw) (in single wb case) in wb_position_ratio()
1020 * For single wb case, the dirty pages are observed to fluctuate in wb_position_ratio()
1041 * scale global setpoint to wb's: in wb_position_ratio()
1047 * Use span=(8*write_bw) in single wb case as indicated by in wb_position_ratio()
1064 * wb reserve area, safeguard against dirty pool underrun and disk idle in wb_position_ratio()
1080 static void wb_update_write_bandwidth(struct bdi_writeback *wb, in wb_update_write_bandwidth() argument
1085 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1086 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1099 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1106 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1121 if (wb_has_dirty_io(wb)) { in wb_update_write_bandwidth()
1122 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1124 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1126 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1127 wb->avg_write_bandwidth = avg; in wb_update_write_bandwidth()
1179 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1181 * Normal wb tasks will be curbed at or below it in long term.
1188 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit() local
1193 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1194 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1206 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1217 * if there are N dd tasks, each throttled at task_ratelimit, the wb's in wb_update_dirty_ratelimit()
1256 * wb->dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1290 * For strictlimit case, calculations above were based on wb counters in wb_update_dirty_ratelimit()
1300 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1309 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1314 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1336 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); in wb_update_dirty_ratelimit()
1337 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1339 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); in wb_update_dirty_ratelimit()
1347 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth() local
1349 unsigned long elapsed = now - wb->bw_time_stamp; in __wb_update_bandwidth()
1353 lockdep_assert_held(&wb->list_lock); in __wb_update_bandwidth()
1361 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1362 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1368 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time)) in __wb_update_bandwidth()
1384 wb_update_write_bandwidth(wb, elapsed, written); in __wb_update_bandwidth()
1387 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1388 wb->written_stamp = written; in __wb_update_bandwidth()
1389 wb->bw_time_stamp = now; in __wb_update_bandwidth()
1392 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) in wb_update_bandwidth() argument
1394 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; in wb_update_bandwidth()
1416 static unsigned long wb_max_pause(struct bdi_writeback *wb, in wb_max_pause() argument
1419 unsigned long bw = wb->avg_write_bandwidth; in wb_max_pause()
1435 static long wb_min_pause(struct bdi_writeback *wb, in wb_min_pause() argument
1441 long hi = ilog2(wb->avg_write_bandwidth); in wb_min_pause()
1442 long lo = ilog2(wb->dirty_ratelimit); in wb_min_pause()
1512 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits() local
1524 * wb_thresh. Instead the auxiliary wb control line in in wb_dirty_limits()
1543 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1544 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1546 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1547 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1558 static void balance_dirty_pages(struct bdi_writeback *wb, in balance_dirty_pages() argument
1561 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in balance_dirty_pages()
1562 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in balance_dirty_pages()
1576 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1609 * If @wb belongs to !root memcg, repeat the same in balance_dirty_pages()
1612 mem_cgroup_wb_stats(wb, &filepages, &headroom, in balance_dirty_pages()
1637 * when the wb limits are ramping up in case of !strictlimit. in balance_dirty_pages()
1639 * In strictlimit case make decision based on the wb counters in balance_dirty_pages()
1640 * and limits. Small writeouts when the wb limits are ramping in balance_dirty_pages()
1664 if (unlikely(!writeback_in_progress(wb))) in balance_dirty_pages()
1665 wb_start_background_writeback(wb); in balance_dirty_pages()
1667 mem_cgroup_flush_foreign(wb); in balance_dirty_pages()
1682 * when below the per-wb freerun ceiling. in balance_dirty_pages()
1696 * pos_ratio. @wb should satisfy constraints from in balance_dirty_pages()
1709 * throttled when below the per-wb in balance_dirty_pages()
1722 if (dirty_exceeded && !wb->dirty_exceeded) in balance_dirty_pages()
1723 wb->dirty_exceeded = 1; in balance_dirty_pages()
1725 if (time_is_before_jiffies(wb->bw_time_stamp + in balance_dirty_pages()
1727 spin_lock(&wb->list_lock); in balance_dirty_pages()
1729 spin_unlock(&wb->list_lock); in balance_dirty_pages()
1733 dirty_ratelimit = wb->dirty_ratelimit; in balance_dirty_pages()
1736 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1737 min_pause = wb_min_pause(wb, max_pause, in balance_dirty_pages()
1758 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1787 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1800 wb->dirty_sleep = now; in balance_dirty_pages()
1816 * pages exceeds dirty_thresh, give the other good wb's a pipe in balance_dirty_pages()
1831 if (!dirty_exceeded && wb->dirty_exceeded) in balance_dirty_pages()
1832 wb->dirty_exceeded = 0; in balance_dirty_pages()
1834 if (writeback_in_progress(wb)) in balance_dirty_pages()
1849 wb_start_background_writeback(wb); in balance_dirty_pages()
1887 struct bdi_writeback *wb = NULL; in balance_dirty_pages_ratelimited() local
1895 wb = wb_get_create_current(bdi, GFP_KERNEL); in balance_dirty_pages_ratelimited()
1896 if (!wb) in balance_dirty_pages_ratelimited()
1897 wb = &bdi->wb; in balance_dirty_pages_ratelimited()
1900 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited()
1932 balance_dirty_pages(wb, current->nr_dirtied); in balance_dirty_pages_ratelimited()
1934 wb_put(wb); in balance_dirty_pages_ratelimited()
1939 * wb_over_bg_thresh - does @wb need to be written back?
1940 * @wb: bdi_writeback of interest
1942 * Determines whether background writeback should keep writing @wb or it's
1947 bool wb_over_bg_thresh(struct bdi_writeback *wb) in wb_over_bg_thresh() argument
1949 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in wb_over_bg_thresh()
1950 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in wb_over_bg_thresh()
1968 thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh); in wb_over_bg_thresh()
1970 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
1972 reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
1980 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, in wb_over_bg_thresh()
1988 thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh); in wb_over_bg_thresh()
1990 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
1992 reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2445 struct bdi_writeback *wb; in account_page_dirtied() local
2448 wb = inode_to_wb(inode); in account_page_dirtied()
2453 inc_wb_stat(wb, WB_RECLAIMABLE); in account_page_dirtied()
2454 inc_wb_stat(wb, WB_DIRTIED); in account_page_dirtied()
2459 mem_cgroup_track_foreign_dirty(page, wb); in account_page_dirtied()
2469 struct bdi_writeback *wb) in account_page_cleaned() argument
2474 dec_wb_stat(wb, WB_RECLAIMABLE); in account_page_cleaned()
2536 struct bdi_writeback *wb; in account_page_redirty() local
2539 wb = unlocked_inode_to_wb_begin(inode, &cookie); in account_page_redirty()
2542 dec_wb_stat(wb, WB_DIRTIED); in account_page_redirty()
2648 struct bdi_writeback *wb; in __cancel_dirty_page() local
2652 wb = unlocked_inode_to_wb_begin(inode, &cookie); in __cancel_dirty_page()
2655 account_page_cleaned(page, mapping, wb); in __cancel_dirty_page()
2688 struct bdi_writeback *wb; in clear_page_dirty_for_io() local
2726 wb = unlocked_inode_to_wb_begin(inode, &cookie); in clear_page_dirty_for_io()
2730 dec_wb_stat(wb, WB_RECLAIMABLE); in clear_page_dirty_for_io()
2760 struct bdi_writeback *wb = inode_to_wb(inode); in test_clear_page_writeback() local
2762 dec_wb_stat(wb, WB_WRITEBACK); in test_clear_page_writeback()
2763 __wb_writeout_inc(wb); in test_clear_page_writeback()