Lines Matching full:wb
129 struct bdi_writeback *wb; member
137 unsigned long wb_dirty; /* per-wb counterparts */
153 #define GDTC_INIT(__wb) .wb = (__wb), \
159 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
179 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
181 return &wb->memcg_completions; in wb_memcg_completions()
184 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
187 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth); in wb_min_max_ratio()
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
189 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
190 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
193 * @wb may already be clean by the time control reaches here and in wb_min_max_ratio()
213 #define GDTC_INIT(__wb) .wb = (__wb), \
233 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
238 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
241 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
242 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
576 * Increment @wb's writeout completion count and the global writeout
579 static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr) in __wb_writeout_add() argument
583 wb_stat_mod(wb, WB_WRITTEN, nr); in __wb_writeout_add()
584 wb_domain_writeout_add(&global_wb_domain, &wb->completions, in __wb_writeout_add()
585 wb->bdi->max_prop_frac, nr); in __wb_writeout_add()
587 cgdom = mem_cgroup_wb_domain(wb); in __wb_writeout_add()
589 wb_domain_writeout_add(cgdom, wb_memcg_completions(wb), in __wb_writeout_add()
590 wb->bdi->max_prop_frac, nr); in __wb_writeout_add()
593 void wb_writeout_inc(struct bdi_writeback *wb) in wb_writeout_inc() argument
598 __wb_writeout_add(wb, 1); in wb_writeout_inc()
841 * __wb_calc_thresh - @wb's share of dirty throttling threshold
849 * more (rather than completely block them) when the wb dirty pages go high.
855 * The wb's share of dirty limit will be adapting to its throughput and
858 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
879 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); in __wb_calc_thresh()
888 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) in wb_calc_thresh() argument
890 struct dirty_throttle_control gdtc = { GDTC_INIT(wb), in wb_calc_thresh()
931 * We want the dirty pages be balanced around the global/wb setpoints.
967 * (o) wb control line
995 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
999 * - the wb dirty thresh drops quickly due to change of JBOD workload
1003 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio() local
1004 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); in wb_position_ratio()
1031 * such filesystems balance_dirty_pages always checks wb counters in wb_position_ratio()
1032 * against wb limits. Even if global "nr_dirty" is under "freerun". in wb_position_ratio()
1043 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is in wb_position_ratio()
1044 * about ~6K pages (as the average of background and throttle wb in wb_position_ratio()
1049 * because we want to throttle process writing to a strictlimit wb in wb_position_ratio()
1053 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
1078 * make decision based on wb counters. But there is an in wb_position_ratio()
1081 * wb's) while given strictlimit wb is below limit. in wb_position_ratio()
1085 * activity in the system coming from a single strictlimit wb in wb_position_ratio()
1090 * (when globally we are at freerun and wb is well below wb in wb_position_ratio()
1101 * the wb is over/under its share of dirty pages, we want to scale in wb_position_ratio()
1106 * wb setpoint in wb_position_ratio()
1114 * The main wb control line is a linear function that subjects to in wb_position_ratio()
1117 * (2) k = - 1 / (8 * write_bw) (in single wb case) in wb_position_ratio()
1120 * For single wb case, the dirty pages are observed to fluctuate in wb_position_ratio()
1141 * scale global setpoint to wb's: in wb_position_ratio()
1147 * Use span=(8*write_bw) in single wb case as indicated by in wb_position_ratio()
1164 * wb reserve area, safeguard against dirty pool underrun and disk idle in wb_position_ratio()
1180 static void wb_update_write_bandwidth(struct bdi_writeback *wb, in wb_update_write_bandwidth() argument
1185 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1186 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1199 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1206 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1221 if (wb_has_dirty_io(wb)) { in wb_update_write_bandwidth()
1222 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1224 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1226 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1227 WRITE_ONCE(wb->avg_write_bandwidth, avg); in wb_update_write_bandwidth()
1279 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1281 * Normal wb tasks will be curbed at or below it in long term.
1288 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit() local
1293 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1294 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1306 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1317 * if there are N dd tasks, each throttled at task_ratelimit, the wb's in wb_update_dirty_ratelimit()
1356 * wb->dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1390 * For strictlimit case, calculations above were based on wb counters in wb_update_dirty_ratelimit()
1400 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1409 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1414 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1436 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL)); in wb_update_dirty_ratelimit()
1437 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1439 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); in wb_update_dirty_ratelimit()
1446 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth() local
1452 spin_lock(&wb->list_lock); in __wb_update_bandwidth()
1460 elapsed = max(now - wb->bw_time_stamp, 1UL); in __wb_update_bandwidth()
1461 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1462 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1477 wb_update_write_bandwidth(wb, elapsed, written); in __wb_update_bandwidth()
1479 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1480 wb->written_stamp = written; in __wb_update_bandwidth()
1481 WRITE_ONCE(wb->bw_time_stamp, now); in __wb_update_bandwidth()
1482 spin_unlock(&wb->list_lock); in __wb_update_bandwidth()
1485 void wb_update_bandwidth(struct bdi_writeback *wb) in wb_update_bandwidth() argument
1487 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; in wb_update_bandwidth()
1492 /* Interval after which we consider wb idle and don't estimate bandwidth */
1495 static void wb_bandwidth_estimate_start(struct bdi_writeback *wb) in wb_bandwidth_estimate_start() argument
1498 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp); in wb_bandwidth_estimate_start()
1501 !atomic_read(&wb->writeback_inodes)) { in wb_bandwidth_estimate_start()
1502 spin_lock(&wb->list_lock); in wb_bandwidth_estimate_start()
1503 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED); in wb_bandwidth_estimate_start()
1504 wb->written_stamp = wb_stat(wb, WB_WRITTEN); in wb_bandwidth_estimate_start()
1505 WRITE_ONCE(wb->bw_time_stamp, now); in wb_bandwidth_estimate_start()
1506 spin_unlock(&wb->list_lock); in wb_bandwidth_estimate_start()
1527 static unsigned long wb_max_pause(struct bdi_writeback *wb, in wb_max_pause() argument
1530 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth); in wb_max_pause()
1546 static long wb_min_pause(struct bdi_writeback *wb, in wb_min_pause() argument
1552 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth)); in wb_min_pause()
1553 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit)); in wb_min_pause()
1623 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits() local
1635 * wb_thresh. Instead the auxiliary wb control line in in wb_dirty_limits()
1654 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1655 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1657 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1658 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1669 static int balance_dirty_pages(struct bdi_writeback *wb, in balance_dirty_pages() argument
1672 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in balance_dirty_pages()
1673 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in balance_dirty_pages()
1687 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1721 * If @wb belongs to !root memcg, repeat the same in balance_dirty_pages()
1724 mem_cgroup_wb_stats(wb, &filepages, &headroom, in balance_dirty_pages()
1753 !writeback_in_progress(wb)) in balance_dirty_pages()
1754 wb_start_background_writeback(wb); in balance_dirty_pages()
1759 * when the wb limits are ramping up in case of !strictlimit. in balance_dirty_pages()
1761 * In strictlimit case make decision based on the wb counters in balance_dirty_pages()
1762 * and limits. Small writeouts when the wb limits are ramping in balance_dirty_pages()
1787 if (unlikely(!writeback_in_progress(wb))) in balance_dirty_pages()
1788 wb_start_background_writeback(wb); in balance_dirty_pages()
1790 mem_cgroup_flush_foreign(wb); in balance_dirty_pages()
1805 * when below the per-wb freerun ceiling. in balance_dirty_pages()
1819 * pos_ratio. @wb should satisfy constraints from in balance_dirty_pages()
1832 * throttled when below the per-wb in balance_dirty_pages()
1845 if (dirty_exceeded != wb->dirty_exceeded) in balance_dirty_pages()
1846 wb->dirty_exceeded = dirty_exceeded; in balance_dirty_pages()
1848 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + in balance_dirty_pages()
1853 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit); in balance_dirty_pages()
1856 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1857 min_pause = wb_min_pause(wb, max_pause, in balance_dirty_pages()
1878 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1907 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1940 * pages exceeds dirty_thresh, give the other good wb's a pipe in balance_dirty_pages()
1997 struct bdi_writeback *wb = NULL; in balance_dirty_pages_ratelimited_flags() local
2006 wb = wb_get_create_current(bdi, GFP_KERNEL); in balance_dirty_pages_ratelimited_flags()
2007 if (!wb) in balance_dirty_pages_ratelimited_flags()
2008 wb = &bdi->wb; in balance_dirty_pages_ratelimited_flags()
2011 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited_flags()
2043 ret = balance_dirty_pages(wb, current->nr_dirtied, flags); in balance_dirty_pages_ratelimited_flags()
2045 wb_put(wb); in balance_dirty_pages_ratelimited_flags()
2069 * wb_over_bg_thresh - does @wb need to be written back?
2070 * @wb: bdi_writeback of interest
2072 * Determines whether background writeback should keep writing @wb or it's
2077 bool wb_over_bg_thresh(struct bdi_writeback *wb) in wb_over_bg_thresh() argument
2079 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in wb_over_bg_thresh()
2080 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in wb_over_bg_thresh()
2098 thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh); in wb_over_bg_thresh()
2100 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2102 reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2110 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, in wb_over_bg_thresh()
2118 thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh); in wb_over_bg_thresh()
2120 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2122 reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2545 struct bdi_writeback *wb; in do_writepages() local
2549 wb = inode_to_wb_wbc(mapping->host, wbc); in do_writepages()
2550 wb_bandwidth_estimate_start(wb); in do_writepages()
2582 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + in do_writepages()
2584 wb_update_bandwidth(wb); in do_writepages()
2614 struct bdi_writeback *wb; in folio_account_dirtied() local
2618 wb = inode_to_wb(inode); in folio_account_dirtied()
2623 wb_stat_mod(wb, WB_RECLAIMABLE, nr); in folio_account_dirtied()
2624 wb_stat_mod(wb, WB_DIRTIED, nr); in folio_account_dirtied()
2629 mem_cgroup_track_foreign_dirty(folio, wb); in folio_account_dirtied()
2638 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) in folio_account_cleaned() argument
2644 wb_stat_mod(wb, WB_RECLAIMABLE, -nr); in folio_account_cleaned()
2737 struct bdi_writeback *wb; in folio_redirty_for_writepage() local
2740 wb = unlocked_inode_to_wb_begin(inode, &cookie); in folio_redirty_for_writepage()
2743 wb_stat_mod(wb, WB_DIRTIED, -nr); in folio_redirty_for_writepage()
2828 struct bdi_writeback *wb; in __folio_cancel_dirty() local
2832 wb = unlocked_inode_to_wb_begin(inode, &cookie); in __folio_cancel_dirty()
2835 folio_account_cleaned(folio, wb); in __folio_cancel_dirty()
2868 struct bdi_writeback *wb; in folio_clear_dirty_for_io() local
2906 wb = unlocked_inode_to_wb_begin(inode, &cookie); in folio_clear_dirty_for_io()
2911 wb_stat_mod(wb, WB_RECLAIMABLE, -nr); in folio_clear_dirty_for_io()
2921 static void wb_inode_writeback_start(struct bdi_writeback *wb) in wb_inode_writeback_start() argument
2923 atomic_inc(&wb->writeback_inodes); in wb_inode_writeback_start()
2926 static void wb_inode_writeback_end(struct bdi_writeback *wb) in wb_inode_writeback_end() argument
2929 atomic_dec(&wb->writeback_inodes); in wb_inode_writeback_end()
2937 spin_lock_irqsave(&wb->work_lock, flags); in wb_inode_writeback_end()
2938 if (test_bit(WB_registered, &wb->state)) in wb_inode_writeback_end()
2939 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); in wb_inode_writeback_end()
2940 spin_unlock_irqrestore(&wb->work_lock, flags); in wb_inode_writeback_end()
2961 struct bdi_writeback *wb = inode_to_wb(inode); in __folio_end_writeback() local
2963 wb_stat_mod(wb, WB_WRITEBACK, -nr); in __folio_end_writeback()
2964 __wb_writeout_add(wb, nr); in __folio_end_writeback()
2967 wb_inode_writeback_end(wb); in __folio_end_writeback()
3013 struct bdi_writeback *wb = inode_to_wb(inode); in __folio_start_writeback() local
3015 wb_stat_mod(wb, WB_WRITEBACK, nr); in __folio_start_writeback()
3017 wb_inode_writeback_start(wb); in __folio_start_writeback()