• Home
  • Raw
  • Download

Lines Matching full:wb

129 	struct bdi_writeback	*wb;  member
137 unsigned long wb_dirty; /* per-wb counterparts */
153 #define GDTC_INIT(__wb) .wb = (__wb), \
159 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
179 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
181 return &wb->memcg_completions; in wb_memcg_completions()
184 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
187 unsigned long this_bw = wb->avg_write_bandwidth; in wb_min_max_ratio()
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
189 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
190 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
193 * @wb may already be clean by the time control reaches here and in wb_min_max_ratio()
213 #define GDTC_INIT(__wb) .wb = (__wb), \
233 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
238 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
241 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
242 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
586 * Increment @wb's writeout completion count and the global writeout
589 static inline void __wb_writeout_inc(struct bdi_writeback *wb) in __wb_writeout_inc() argument
593 inc_wb_stat(wb, WB_WRITTEN); in __wb_writeout_inc()
594 wb_domain_writeout_inc(&global_wb_domain, &wb->completions, in __wb_writeout_inc()
595 wb->bdi->max_prop_frac); in __wb_writeout_inc()
597 cgdom = mem_cgroup_wb_domain(wb); in __wb_writeout_inc()
599 wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb), in __wb_writeout_inc()
600 wb->bdi->max_prop_frac); in __wb_writeout_inc()
603 void wb_writeout_inc(struct bdi_writeback *wb) in wb_writeout_inc() argument
608 __wb_writeout_inc(wb); in wb_writeout_inc()
733 * __wb_calc_thresh - @wb's share of dirty throttling threshold
741 * more (rather than completely block them) when the wb dirty pages go high.
747 * The wb's share of dirty limit will be adapting to its throughput and
750 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
771 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); in __wb_calc_thresh()
780 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) in wb_calc_thresh() argument
782 struct dirty_throttle_control gdtc = { GDTC_INIT(wb), in wb_calc_thresh()
823 * We want the dirty pages be balanced around the global/wb setpoints.
859 * (o) wb control line
887 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
891 * - the wb dirty thresh drops quickly due to change of JBOD workload
895 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio() local
896 unsigned long write_bw = wb->avg_write_bandwidth; in wb_position_ratio()
923 * such filesystems balance_dirty_pages always checks wb counters in wb_position_ratio()
924 * against wb limits. Even if global "nr_dirty" is under "freerun". in wb_position_ratio()
935 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is in wb_position_ratio()
936 * about ~6K pages (as the average of background and throttle wb in wb_position_ratio()
941 * because we want to throttle process writing to a strictlimit wb in wb_position_ratio()
945 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
970 * make decision based on wb counters. But there is an in wb_position_ratio()
973 * wb's) while given strictlimit wb is below limit. in wb_position_ratio()
977 * activity in the system coming from a single strictlimit wb in wb_position_ratio()
982 * (when globally we are at freerun and wb is well below wb in wb_position_ratio()
993 * the wb is over/under its share of dirty pages, we want to scale in wb_position_ratio()
998 * wb setpoint in wb_position_ratio()
1006 * The main wb control line is a linear function that subjects to in wb_position_ratio()
1009 * (2) k = - 1 / (8 * write_bw) (in single wb case) in wb_position_ratio()
1012 * For single wb case, the dirty pages are observed to fluctuate in wb_position_ratio()
1033 * scale global setpoint to wb's: in wb_position_ratio()
1039 * Use span=(8*write_bw) in single wb case as indicated by in wb_position_ratio()
1056 * wb reserve area, safeguard against dirty pool underrun and disk idle in wb_position_ratio()
1072 static void wb_update_write_bandwidth(struct bdi_writeback *wb, in wb_update_write_bandwidth() argument
1077 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1078 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1091 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1098 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1113 if (wb_has_dirty_io(wb)) { in wb_update_write_bandwidth()
1114 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1116 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1118 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1119 wb->avg_write_bandwidth = avg; in wb_update_write_bandwidth()
1171 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1173 * Normal wb tasks will be curbed at or below it in long term.
1180 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit() local
1185 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1186 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1198 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1209 * if there are N dd tasks, each throttled at task_ratelimit, the wb's in wb_update_dirty_ratelimit()
1248 * wb->dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1282 * For strictlimit case, calculations above were based on wb counters in wb_update_dirty_ratelimit()
1292 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1301 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1306 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1328 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); in wb_update_dirty_ratelimit()
1329 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1331 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); in wb_update_dirty_ratelimit()
1339 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth() local
1341 unsigned long elapsed = now - wb->bw_time_stamp; in __wb_update_bandwidth()
1345 lockdep_assert_held(&wb->list_lock); in __wb_update_bandwidth()
1353 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1354 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1360 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time)) in __wb_update_bandwidth()
1376 wb_update_write_bandwidth(wb, elapsed, written); in __wb_update_bandwidth()
1379 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1380 wb->written_stamp = written; in __wb_update_bandwidth()
1381 wb->bw_time_stamp = now; in __wb_update_bandwidth()
1384 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) in wb_update_bandwidth() argument
1386 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; in wb_update_bandwidth()
1408 static unsigned long wb_max_pause(struct bdi_writeback *wb, in wb_max_pause() argument
1411 unsigned long bw = wb->avg_write_bandwidth; in wb_max_pause()
1427 static long wb_min_pause(struct bdi_writeback *wb, in wb_min_pause() argument
1433 long hi = ilog2(wb->avg_write_bandwidth); in wb_min_pause()
1434 long lo = ilog2(wb->dirty_ratelimit); in wb_min_pause()
1504 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits() local
1516 * wb_thresh. Instead the auxiliary wb control line in in wb_dirty_limits()
1535 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1536 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1538 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1539 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1550 static void balance_dirty_pages(struct bdi_writeback *wb, in balance_dirty_pages() argument
1553 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in balance_dirty_pages()
1554 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in balance_dirty_pages()
1568 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1601 * If @wb belongs to !root memcg, repeat the same in balance_dirty_pages()
1604 mem_cgroup_wb_stats(wb, &filepages, &headroom, in balance_dirty_pages()
1626 * when the wb limits are ramping up in case of !strictlimit. in balance_dirty_pages()
1628 * In strictlimit case make decision based on the wb counters in balance_dirty_pages()
1629 * and limits. Small writeouts when the wb limits are ramping in balance_dirty_pages()
1653 if (unlikely(!writeback_in_progress(wb))) in balance_dirty_pages()
1654 wb_start_background_writeback(wb); in balance_dirty_pages()
1656 mem_cgroup_flush_foreign(wb); in balance_dirty_pages()
1671 * when below the per-wb freerun ceiling. in balance_dirty_pages()
1685 * pos_ratio. @wb should satisfy constraints from in balance_dirty_pages()
1698 * throttled when below the per-wb in balance_dirty_pages()
1711 if (dirty_exceeded && !wb->dirty_exceeded) in balance_dirty_pages()
1712 wb->dirty_exceeded = 1; in balance_dirty_pages()
1714 if (time_is_before_jiffies(wb->bw_time_stamp + in balance_dirty_pages()
1716 spin_lock(&wb->list_lock); in balance_dirty_pages()
1718 spin_unlock(&wb->list_lock); in balance_dirty_pages()
1722 dirty_ratelimit = wb->dirty_ratelimit; in balance_dirty_pages()
1725 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1726 min_pause = wb_min_pause(wb, max_pause, in balance_dirty_pages()
1747 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1776 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1789 wb->dirty_sleep = now; in balance_dirty_pages()
1805 * pages exceeds dirty_thresh, give the other good wb's a pipe in balance_dirty_pages()
1820 if (!dirty_exceeded && wb->dirty_exceeded) in balance_dirty_pages()
1821 wb->dirty_exceeded = 0; in balance_dirty_pages()
1823 if (writeback_in_progress(wb)) in balance_dirty_pages()
1838 wb_start_background_writeback(wb); in balance_dirty_pages()
1876 struct bdi_writeback *wb = NULL; in balance_dirty_pages_ratelimited() local
1884 wb = wb_get_create_current(bdi, GFP_KERNEL); in balance_dirty_pages_ratelimited()
1885 if (!wb) in balance_dirty_pages_ratelimited()
1886 wb = &bdi->wb; in balance_dirty_pages_ratelimited()
1889 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited()
1921 balance_dirty_pages(wb, current->nr_dirtied); in balance_dirty_pages_ratelimited()
1923 wb_put(wb); in balance_dirty_pages_ratelimited()
1928 * wb_over_bg_thresh - does @wb need to be written back?
1929 * @wb: bdi_writeback of interest
1931 * Determines whether background writeback should keep writing @wb or it's
1936 bool wb_over_bg_thresh(struct bdi_writeback *wb) in wb_over_bg_thresh() argument
1938 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in wb_over_bg_thresh()
1939 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in wb_over_bg_thresh()
1955 if (wb_stat(wb, WB_RECLAIMABLE) > in wb_over_bg_thresh()
1956 wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) in wb_over_bg_thresh()
1962 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, in wb_over_bg_thresh()
1970 if (wb_stat(wb, WB_RECLAIMABLE) > in wb_over_bg_thresh()
1971 wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) in wb_over_bg_thresh()
2422 struct bdi_writeback *wb; in account_page_dirtied() local
2425 wb = inode_to_wb(inode); in account_page_dirtied()
2430 inc_wb_stat(wb, WB_RECLAIMABLE); in account_page_dirtied()
2431 inc_wb_stat(wb, WB_DIRTIED); in account_page_dirtied()
2436 mem_cgroup_track_foreign_dirty(page, wb); in account_page_dirtied()
2446 struct bdi_writeback *wb) in account_page_cleaned() argument
2451 dec_wb_stat(wb, WB_RECLAIMABLE); in account_page_cleaned()
2513 struct bdi_writeback *wb; in account_page_redirty() local
2516 wb = unlocked_inode_to_wb_begin(inode, &cookie); in account_page_redirty()
2519 dec_wb_stat(wb, WB_DIRTIED); in account_page_redirty()
2625 struct bdi_writeback *wb; in __cancel_dirty_page() local
2629 wb = unlocked_inode_to_wb_begin(inode, &cookie); in __cancel_dirty_page()
2632 account_page_cleaned(page, mapping, wb); in __cancel_dirty_page()
2665 struct bdi_writeback *wb; in clear_page_dirty_for_io() local
2703 wb = unlocked_inode_to_wb_begin(inode, &cookie); in clear_page_dirty_for_io()
2707 dec_wb_stat(wb, WB_RECLAIMABLE); in clear_page_dirty_for_io()
2737 struct bdi_writeback *wb = inode_to_wb(inode); in test_clear_page_writeback() local
2739 dec_wb_stat(wb, WB_WRITEBACK); in test_clear_page_writeback()
2740 __wb_writeout_inc(wb); in test_clear_page_writeback()