Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/page-writeback.c
25 #include <linux/backing-dev.h>
54 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
100 * The interval between `kupdate'-style writebacks
119 /* End of sysctl-exported parameters */
137 unsigned long wb_dirty; /* per-wb counterparts */
147 * reflect changes in current writeout rate.
155 .wb_completions = &(__wb)->completions
161 .wb_completions = &(__wb)->memcg_completions, \
166 return dtc->dom; in mdtc_valid()
171 return dtc->dom; in dtc_dom()
176 return mdtc->gdtc; in mdtc_gdtc()
181 return &wb->memcg_completions; in wb_memcg_completions()
187 unsigned long this_bw = wb->avg_write_bandwidth; in wb_min_max_ratio()
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
189 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
190 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
214 .wb_completions = &(__wb)->completions
241 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
242 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
255 * user-configurable dirty ratio is the effective number of pages that
259 * Because the user is allowed to specify the dirty limit globally as
260 * absolute number of bytes, calculating the per-zone dirty limit can
261 * require translating the configured limit into a percentage of
266 * node_dirtyable_memory - number of dirtyable pages in a node
270 * page cache. This is the base value for the per-node dirty limits.
278 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory()
291 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory()
314 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
320 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory()
352 * global_dirtyable_memory - number of globally dirtyable pages
367 x -= min(x, totalreserve_pages); in global_dirtyable_memory()
373 x -= highmem_dirtyable_memory(x); in global_dirtyable_memory()
379 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
382 * Calculate @dtc->thresh and ->bg_thresh considering
384 * must ensure that @dtc->avail is set before calling this function. The
385 * dirty limits will be lifted by 1/4 for real-time tasks.
389 const unsigned long available_memory = dtc->avail; in domain_dirty_limits()
393 /* convert ratios to per-PAGE_SIZE for higher precision */ in domain_dirty_limits()
402 unsigned long global_avail = gdtc->avail; in domain_dirty_limits()
408 * per-PAGE_SIZE, they can be obtained by dividing bytes by in domain_dirty_limits()
432 tsk = current; in domain_dirty_limits()
437 dtc->thresh = thresh; in domain_dirty_limits()
438 dtc->bg_thresh = bg_thresh; in domain_dirty_limits()
446 * global_dirty_limits - background-writeback and dirty-throttling thresholds
465 * node_dirty_limit - maximum number of dirty pages allowed in a node
474 struct task_struct *tsk = current; in node_dirty_limit()
490 * node_dirty_ok - tells whether a node is within its dirty limits
494 * dirty limit, %false if the limit is exceeded.
498 unsigned long limit = node_dirty_limit(pgdat); in node_dirty_ok() local
504 return nr_pages <= limit; in node_dirty_ok()
570 __fprop_inc_percpu_max(&dom->completions, completions, in wb_domain_writeout_inc()
573 if (unlikely(!dom->period_time)) { in wb_domain_writeout_inc()
580 dom->period_time = wp_next_time(jiffies); in wb_domain_writeout_inc()
581 mod_timer(&dom->period_timer, dom->period_time); in wb_domain_writeout_inc()
594 wb_domain_writeout_inc(&global_wb_domain, &wb->completions, in __wb_writeout_inc()
595 wb->bdi->max_prop_frac); in __wb_writeout_inc()
600 wb->bdi->max_prop_frac); in __wb_writeout_inc()
620 int miss_periods = (jiffies - dom->period_time) / in writeout_period()
623 if (fprop_new_period(&dom->completions, miss_periods + 1)) { in writeout_period()
624 dom->period_time = wp_next_time(dom->period_time + in writeout_period()
626 mod_timer(&dom->period_timer, dom->period_time); in writeout_period()
632 dom->period_time = 0; in writeout_period()
640 spin_lock_init(&dom->lock); in wb_domain_init()
642 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE); in wb_domain_init()
644 dom->dirty_limit_tstamp = jiffies; in wb_domain_init()
646 return fprop_global_init(&dom->completions, gfp); in wb_domain_init()
652 del_timer_sync(&dom->period_timer); in wb_domain_exit()
653 fprop_global_destroy(&dom->completions); in wb_domain_exit()
669 if (min_ratio > bdi->max_ratio) { in bdi_set_min_ratio()
670 ret = -EINVAL; in bdi_set_min_ratio()
672 min_ratio -= bdi->min_ratio; in bdi_set_min_ratio()
675 bdi->min_ratio += min_ratio; in bdi_set_min_ratio()
677 ret = -EINVAL; in bdi_set_min_ratio()
690 return -EINVAL; in bdi_set_max_ratio()
693 if (bdi->min_ratio > max_ratio) { in bdi_set_max_ratio()
694 ret = -EINVAL; in bdi_set_max_ratio()
696 bdi->max_ratio = max_ratio; in bdi_set_max_ratio()
697 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; in bdi_set_max_ratio()
714 return max(thresh, dom->dirty_limit); in hard_dirty_limit()
719 * system-wide clean memory excluding the amount being used in the domain.
725 unsigned long clean = filepages - min(filepages, mdtc->dirty); in mdtc_calc_avail()
726 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); in mdtc_calc_avail()
727 unsigned long other_clean = global_clean - min(global_clean, clean); in mdtc_calc_avail()
729 mdtc->avail = filepages + min(headroom, other_clean); in mdtc_calc_avail()
733 * __wb_calc_thresh - @wb's share of dirty throttling threshold
736 * Note that balance_dirty_pages() will only seriously take it as a hard limit
744 * - starving fast devices
745 * - piling up dirty pages (that will take long time to sync) on slow devices
747 * The wb's share of dirty limit will be adapting to its throughput and
748 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
750 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
756 unsigned long thresh = dtc->thresh; in __wb_calc_thresh()
764 fprop_fraction_percpu(&dom->completions, dtc->wb_completions, in __wb_calc_thresh()
767 wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100; in __wb_calc_thresh()
771 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); in __wb_calc_thresh()
788 * setpoint - dirty 3
789 * f(dirty) := 1.0 + (----------------)
790 * limit - setpoint
796 * (3) f(limit) = 0 => the hard limit
803 unsigned long limit) in pos_ratio_polynom() argument
808 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, in pos_ratio_polynom()
809 (limit - setpoint) | 1); in pos_ratio_polynom()
856 * 0 +------------.------------------.----------------------*------------->
857 * freerun^ setpoint^ limit^ dirty pages
884 * 0 +----------------------.-------------------------------.------------->
889 * - start writing to a slow SD card and a fast disk at the same time. The SD
891 * - the wb dirty thresh drops quickly due to change of JBOD workload
895 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio()
896 unsigned long write_bw = wb->avg_write_bandwidth; in wb_position_ratio()
897 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); in wb_position_ratio()
898 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_position_ratio() local
899 unsigned long wb_thresh = dtc->wb_thresh; in wb_position_ratio()
904 long long pos_ratio; /* for scaling up/down the rate limit */ in wb_position_ratio()
907 dtc->pos_ratio = 0; in wb_position_ratio()
909 if (unlikely(dtc->dirty >= limit)) in wb_position_ratio()
917 setpoint = (freerun + limit) / 2; in wb_position_ratio()
918 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit); in wb_position_ratio()
925 * This is especially important for fuse which sets bdi->max_ratio to in wb_position_ratio()
932 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global in wb_position_ratio()
945 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
948 if (dtc->wb_dirty < 8) { in wb_position_ratio()
949 dtc->pos_ratio = min_t(long long, pos_ratio * 2, in wb_position_ratio()
954 if (dtc->wb_dirty >= wb_thresh) in wb_position_ratio()
958 dtc->wb_bg_thresh); in wb_position_ratio()
963 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty, in wb_position_ratio()
973 * wb's) while given strictlimit wb is below limit. in wb_position_ratio()
976 * but it would look too non-natural for the case of all in wb_position_ratio()
978 * with bdi->max_ratio == 100%. in wb_position_ratio()
987 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); in wb_position_ratio()
1000 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) in wb_position_ratio()
1002 * x_intercept - wb_dirty in wb_position_ratio()
1003 * := -------------------------- in wb_position_ratio()
1004 * x_intercept - wb_setpoint in wb_position_ratio()
1009 * (2) k = - 1 / (8 * write_bw) (in single wb case) in wb_position_ratio()
1014 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] in wb_position_ratio()
1022 if (unlikely(wb_thresh > dtc->thresh)) in wb_position_ratio()
1023 wb_thresh = dtc->thresh; in wb_position_ratio()
1031 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); in wb_position_ratio()
1036 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1); in wb_position_ratio()
1040 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. in wb_position_ratio()
1042 * wb_thresh thresh - wb_thresh in wb_position_ratio()
1043 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh in wb_position_ratio()
1046 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; in wb_position_ratio()
1049 if (dtc->wb_dirty < x_intercept - span / 4) { in wb_position_ratio()
1050 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty), in wb_position_ratio()
1051 (x_intercept - wb_setpoint) | 1); in wb_position_ratio()
1061 if (dtc->wb_dirty < x_intercept) { in wb_position_ratio()
1062 if (dtc->wb_dirty > x_intercept / 8) in wb_position_ratio()
1064 dtc->wb_dirty); in wb_position_ratio()
1069 dtc->pos_ratio = pos_ratio; in wb_position_ratio()
1077 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1078 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1084 * bw * elapsed + write_bandwidth * (period - elapsed) in wb_update_write_bandwidth()
1085 * write_bandwidth = --------------------------------------------------- in wb_update_write_bandwidth()
1091 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1098 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1105 avg -= (avg - old) >> 3; in wb_update_write_bandwidth()
1108 avg += (old - avg) >> 3; in wb_update_write_bandwidth()
1114 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1116 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1118 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1119 wb->avg_write_bandwidth = avg; in wb_update_write_bandwidth()
1125 unsigned long thresh = dtc->thresh; in update_dirty_limit()
1126 unsigned long limit = dom->dirty_limit; in update_dirty_limit() local
1131 if (limit < thresh) { in update_dirty_limit()
1132 limit = thresh; in update_dirty_limit()
1139 * dom->dirty_limit which is guaranteed to lie above the dirty pages. in update_dirty_limit()
1141 thresh = max(thresh, dtc->dirty); in update_dirty_limit()
1142 if (limit > thresh) { in update_dirty_limit()
1143 limit -= (limit - thresh) >> 5; in update_dirty_limit()
1148 dom->dirty_limit = limit; in update_dirty_limit()
1159 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) in domain_update_bandwidth()
1162 spin_lock(&dom->lock); in domain_update_bandwidth()
1163 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) { in domain_update_bandwidth()
1165 dom->dirty_limit_tstamp = now; in domain_update_bandwidth()
1167 spin_unlock(&dom->lock); in domain_update_bandwidth()
1171 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1180 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit()
1181 unsigned long dirty = dtc->dirty; in wb_update_dirty_ratelimit()
1182 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); in wb_update_dirty_ratelimit()
1183 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_update_dirty_ratelimit() local
1184 unsigned long setpoint = (freerun + limit) / 2; in wb_update_dirty_ratelimit()
1185 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1186 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1196 * when dirty pages are truncated by userspace or re-dirtied by FS. in wb_update_dirty_ratelimit()
1198 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1204 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT; in wb_update_dirty_ratelimit()
1211 * formula will yield the balanced rate limit (write_bw / N). in wb_update_dirty_ratelimit()
1248 * wb->dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1252 * limit the step size. in wb_update_dirty_ratelimit()
1256 * task_ratelimit - dirty_ratelimit in wb_update_dirty_ratelimit()
1257 * = (pos_ratio - 1) * dirty_ratelimit in wb_update_dirty_ratelimit()
1266 * - dirty_ratelimit > balanced_dirty_ratelimit in wb_update_dirty_ratelimit()
1267 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) in wb_update_dirty_ratelimit()
1273 * |task_ratelimit - dirty_ratelimit| is used to limit the step size in wb_update_dirty_ratelimit()
1292 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1293 dirty = dtc->wb_dirty; in wb_update_dirty_ratelimit()
1294 if (dtc->wb_dirty < 8) in wb_update_dirty_ratelimit()
1295 setpoint = dtc->wb_dirty + 1; in wb_update_dirty_ratelimit()
1297 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; in wb_update_dirty_ratelimit()
1301 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1304 step = x - dirty_ratelimit; in wb_update_dirty_ratelimit()
1306 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1309 step = dirty_ratelimit - x; in wb_update_dirty_ratelimit()
1326 dirty_ratelimit -= step; in wb_update_dirty_ratelimit()
1328 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); in wb_update_dirty_ratelimit()
1329 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1339 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth()
1341 unsigned long elapsed = now - wb->bw_time_stamp; in __wb_update_bandwidth()
1345 lockdep_assert_held(&wb->list_lock); in __wb_update_bandwidth()
1348 * rate-limit, only update once every 200ms. in __wb_update_bandwidth()
1353 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1354 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1357 * Skip quiet periods when disk bandwidth is under-utilized. in __wb_update_bandwidth()
1360 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time)) in __wb_update_bandwidth()
1379 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1380 wb->written_stamp = written; in __wb_update_bandwidth()
1381 wb->bw_time_stamp = now; in __wb_update_bandwidth()
1396 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1403 return 1UL << (ilog2(thresh - dirty) >> 1); in dirty_poll_interval()
1411 unsigned long bw = wb->avg_write_bandwidth; in wb_max_pause()
1415 * Limit pause time for small memory systems. If sleeping for too long in wb_max_pause()
1433 long hi = ilog2(wb->avg_write_bandwidth); in wb_min_pause()
1434 long lo = ilog2(wb->dirty_ratelimit); in wb_min_pause()
1439 /* target for 10ms pause on 1-dd case */ in wb_min_pause()
1449 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause()
1465 * 2) limit the target pause time to max_pause/2, so that the normal in wb_min_pause()
1474 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. in wb_min_pause()
1504 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits()
1510 * - in JBOD setup, wb_thresh can fluctuate a lot in wb_dirty_limits()
1511 * - in a system with HDD and USB key, the USB key may somehow in wb_dirty_limits()
1520 dtc->wb_thresh = __wb_calc_thresh(dtc); in wb_dirty_limits()
1521 dtc->wb_bg_thresh = dtc->thresh ? in wb_dirty_limits()
1522 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; in wb_dirty_limits()
1530 * reported dirty, even though there are thresh-m pages in wb_dirty_limits()
1534 if (dtc->wb_thresh < 2 * wb_stat_error()) { in wb_dirty_limits()
1536 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1539 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1568 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1569 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; in balance_dirty_pages()
1580 gdtc->avail = global_dirtyable_memory(); in balance_dirty_pages()
1581 gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK); in balance_dirty_pages()
1588 dirty = gdtc->wb_dirty; in balance_dirty_pages()
1589 thresh = gdtc->wb_thresh; in balance_dirty_pages()
1590 bg_thresh = gdtc->wb_bg_thresh; in balance_dirty_pages()
1592 dirty = gdtc->dirty; in balance_dirty_pages()
1593 thresh = gdtc->thresh; in balance_dirty_pages()
1594 bg_thresh = gdtc->bg_thresh; in balance_dirty_pages()
1605 &mdtc->dirty, &writeback); in balance_dirty_pages()
1606 mdtc->dirty += writeback; in balance_dirty_pages()
1613 m_dirty = mdtc->wb_dirty; in balance_dirty_pages()
1614 m_thresh = mdtc->wb_thresh; in balance_dirty_pages()
1615 m_bg_thresh = mdtc->wb_bg_thresh; in balance_dirty_pages()
1617 m_dirty = mdtc->dirty; in balance_dirty_pages()
1618 m_thresh = mdtc->thresh; in balance_dirty_pages()
1619 m_bg_thresh = mdtc->bg_thresh; in balance_dirty_pages()
1625 * catch-up. This avoids (excessively) small writeouts in balance_dirty_pages()
1630 * up are the price we consciously pay for strictlimit-ing. in balance_dirty_pages()
1645 current->dirty_paused_when = now; in balance_dirty_pages()
1646 current->nr_dirtied = 0; in balance_dirty_pages()
1649 current->nr_dirtied_pause = min(intv, m_intv); in balance_dirty_pages()
1665 if ((current->flags & PF_LOCAL_THROTTLE) && in balance_dirty_pages()
1666 gdtc->wb_dirty < in balance_dirty_pages()
1667 dirty_freerun_ceiling(gdtc->wb_thresh, in balance_dirty_pages()
1668 gdtc->wb_bg_thresh)) in balance_dirty_pages()
1671 * when below the per-wb freerun ceiling. in balance_dirty_pages()
1676 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) && in balance_dirty_pages()
1677 ((gdtc->dirty > gdtc->thresh) || strictlimit); in balance_dirty_pages()
1692 if ((current->flags & PF_LOCAL_THROTTLE) && in balance_dirty_pages()
1693 mdtc->wb_dirty < in balance_dirty_pages()
1694 dirty_freerun_ceiling(mdtc->wb_thresh, in balance_dirty_pages()
1695 mdtc->wb_bg_thresh)) in balance_dirty_pages()
1698 * throttled when below the per-wb in balance_dirty_pages()
1703 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) && in balance_dirty_pages()
1704 ((mdtc->dirty > mdtc->thresh) || strictlimit); in balance_dirty_pages()
1707 if (mdtc->pos_ratio < gdtc->pos_ratio) in balance_dirty_pages()
1711 if (dirty_exceeded && !wb->dirty_exceeded) in balance_dirty_pages()
1712 wb->dirty_exceeded = 1; in balance_dirty_pages()
1714 if (time_is_before_jiffies(wb->bw_time_stamp + in balance_dirty_pages()
1716 spin_lock(&wb->list_lock); in balance_dirty_pages()
1718 spin_unlock(&wb->list_lock); in balance_dirty_pages()
1722 dirty_ratelimit = wb->dirty_ratelimit; in balance_dirty_pages()
1723 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >> in balance_dirty_pages()
1725 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1737 if (current->dirty_paused_when) in balance_dirty_pages()
1738 pause -= now - current->dirty_paused_when; in balance_dirty_pages()
1741 * for up to 800ms from time to time on 1-HDD; so does xfs, in balance_dirty_pages()
1748 sdtc->thresh, in balance_dirty_pages()
1749 sdtc->bg_thresh, in balance_dirty_pages()
1750 sdtc->dirty, in balance_dirty_pages()
1751 sdtc->wb_thresh, in balance_dirty_pages()
1752 sdtc->wb_dirty, in balance_dirty_pages()
1759 if (pause < -HZ) { in balance_dirty_pages()
1760 current->dirty_paused_when = now; in balance_dirty_pages()
1761 current->nr_dirtied = 0; in balance_dirty_pages()
1763 current->dirty_paused_when += period; in balance_dirty_pages()
1764 current->nr_dirtied = 0; in balance_dirty_pages()
1765 } else if (current->nr_dirtied_pause <= pages_dirtied) in balance_dirty_pages()
1766 current->nr_dirtied_pause += pages_dirtied; in balance_dirty_pages()
1771 now += min(pause - max_pause, max_pause); in balance_dirty_pages()
1777 sdtc->thresh, in balance_dirty_pages()
1778 sdtc->bg_thresh, in balance_dirty_pages()
1779 sdtc->dirty, in balance_dirty_pages()
1780 sdtc->wb_thresh, in balance_dirty_pages()
1781 sdtc->wb_dirty, in balance_dirty_pages()
1789 wb->dirty_sleep = now; in balance_dirty_pages()
1792 current->dirty_paused_when = now + pause; in balance_dirty_pages()
1793 current->nr_dirtied = 0; in balance_dirty_pages()
1794 current->nr_dirtied_pause = nr_dirtied_pause; in balance_dirty_pages()
1808 * In theory 1 page is enough to keep the consumer-producer in balance_dirty_pages()
1813 if (sdtc->wb_dirty <= wb_stat_error()) in balance_dirty_pages()
1816 if (fatal_signal_pending(current)) in balance_dirty_pages()
1820 if (!dirty_exceeded && wb->dirty_exceeded) in balance_dirty_pages()
1821 wb->dirty_exceeded = 0; in balance_dirty_pages()
1837 if (nr_reclaimable > gdtc->bg_thresh) in balance_dirty_pages()
1846 * dirty tsk->nr_dirtied_pause pages;
1850 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1852 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1860 * balance_dirty_pages_ratelimited - balance dirty memory state
1861 * @mapping: address_space which was dirtied
1869 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1870 * from overshooting the limit by (ratelimit_pages) each.
1872 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument
1874 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited()
1880 if (!(bdi->capabilities & BDI_CAP_WRITEBACK)) in balance_dirty_pages_ratelimited()
1886 wb = &bdi->wb; in balance_dirty_pages_ratelimited()
1888 ratelimit = current->nr_dirtied_pause; in balance_dirty_pages_ratelimited()
1889 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited()
1890 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); in balance_dirty_pages_ratelimited()
1897 * time, hence all honoured too large initial task->nr_dirtied_pause. in balance_dirty_pages_ratelimited()
1900 if (unlikely(current->nr_dirtied >= ratelimit)) in balance_dirty_pages_ratelimited()
1908 * short-lived tasks (eg. gcc invocations in a kernel build) escaping in balance_dirty_pages_ratelimited()
1909 * the dirty throttling and livelock other long-run dirtiers. in balance_dirty_pages_ratelimited()
1912 if (*p > 0 && current->nr_dirtied < ratelimit) { in balance_dirty_pages_ratelimited()
1914 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); in balance_dirty_pages_ratelimited()
1915 *p -= nr_pages_dirtied; in balance_dirty_pages_ratelimited()
1916 current->nr_dirtied += nr_pages_dirtied; in balance_dirty_pages_ratelimited()
1920 if (unlikely(current->nr_dirtied >= ratelimit)) in balance_dirty_pages_ratelimited()
1921 balance_dirty_pages(wb, current->nr_dirtied); in balance_dirty_pages_ratelimited()
1928 * wb_over_bg_thresh - does @wb need to be written back?
1948 gdtc->avail = global_dirtyable_memory(); in wb_over_bg_thresh()
1949 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY); in wb_over_bg_thresh()
1952 if (gdtc->dirty > gdtc->bg_thresh) in wb_over_bg_thresh()
1956 wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) in wb_over_bg_thresh()
1962 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, in wb_over_bg_thresh()
1967 if (mdtc->dirty > mdtc->bg_thresh) in wb_over_bg_thresh()
1971 wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) in wb_over_bg_thresh()
1991 * and a different non-zero value will wakeup the writeback threads. in dirty_writeback_centisecs_handler()
2015 * then push it back - the user is still using the disk.
2019 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); in laptop_io_completion()
2034 del_timer(&bdi->laptop_mode_wb_timer); in laptop_sync_completion()
2041 * If ratelimit_pages is too high then we can get into dirty-data overload
2058 dom->dirty_limit = dirty_thresh; in writeback_set_ratelimit()
2078 * is now applied to total non-HIGHPAGE memory, and as such we can't
2081 * non-HIGHMEM memory.
2097 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2098 * @mapping: address space structure to write
2110 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2113 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2133 …* write_cache_pages - walk the list of dirty pages of the given address space and write all of the…
2134 * @mapping: address space structure to write
2135 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2140 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2141 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2143 * the call was made get new I/O started against them. If wbc->sync_mode is
2149 * writing them. For data-integrity sync we have to be careful so that we do
2157 * lock/page writeback access order inversion - we should only ever lock
2158 * multiple pages in ascending page->index order, and looping back to the start
2163 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
2179 if (wbc->range_cyclic) { in write_cache_pages()
2180 index = mapping->writeback_index; /* prev offset */ in write_cache_pages()
2181 end = -1; in write_cache_pages()
2183 index = wbc->range_start >> PAGE_SHIFT; in write_cache_pages()
2184 end = wbc->range_end >> PAGE_SHIFT; in write_cache_pages()
2185 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in write_cache_pages()
2188 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { in write_cache_pages()
2189 tag_pages_for_writeback(mapping, index, end); in write_cache_pages()
2198 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
2206 done_index = page->index; in write_cache_pages()
2218 if (unlikely(page->mapping != mapping)) { in write_cache_pages()
2230 if (wbc->sync_mode != WB_SYNC_NONE) in write_cache_pages()
2240 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); in write_cache_pages()
2258 } else if (wbc->sync_mode != WB_SYNC_ALL) { in write_cache_pages()
2260 done_index = page->index + 1; in write_cache_pages()
2274 if (--wbc->nr_to_write <= 0 && in write_cache_pages()
2275 wbc->sync_mode == WB_SYNC_NONE) { in write_cache_pages()
2289 if (wbc->range_cyclic && !done) in write_cache_pages()
2291 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in write_cache_pages()
2292 mapping->writeback_index = done_index; in write_cache_pages()
2300 * function and set the mapping flags on error
2305 struct address_space *mapping = data; in __writepage() local
2306 int ret = mapping->a_ops->writepage(page, wbc); in __writepage()
2307 mapping_set_error(mapping, ret); in __writepage()
2312 …* generic_writepages - walk the list of dirty pages of the given address space and writepage() all…
2313 * @mapping: address space structure to write
2314 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2321 int generic_writepages(struct address_space *mapping, in generic_writepages() argument
2328 if (!mapping->a_ops->writepage) in generic_writepages()
2332 ret = write_cache_pages(mapping, wbc, __writepage, mapping); in generic_writepages()
2339 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) in do_writepages() argument
2343 if (wbc->nr_to_write <= 0) in do_writepages()
2346 if (mapping->a_ops->writepages) in do_writepages()
2347 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages()
2349 ret = generic_writepages(mapping, wbc); in do_writepages()
2350 if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL)) in do_writepages()
2359 * write_one_page - write out a single page and wait on I/O
2364 * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
2371 struct address_space *mapping = page->mapping; in write_one_page() local
2384 ret = mapping->a_ops->writepage(page, &wbc); in write_one_page()
2393 ret = filemap_check_errors(mapping); in write_one_page()
2415 void account_page_dirtied(struct page *page, struct address_space *mapping) in account_page_dirtied() argument
2417 struct inode *inode = mapping->host; in account_page_dirtied()
2419 trace_writeback_dirty_page(page, mapping); in account_page_dirtied()
2421 if (mapping_can_writeback(mapping)) { in account_page_dirtied()
2433 current->nr_dirtied++; in account_page_dirtied()
2445 void account_page_cleaned(struct page *page, struct address_space *mapping, in account_page_cleaned() argument
2448 if (mapping_can_writeback(mapping)) { in account_page_cleaned()
2461 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2462 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2472 struct address_space *mapping = page_mapping(page); in __set_page_dirty_nobuffers() local
2475 if (!mapping) { in __set_page_dirty_nobuffers()
2480 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty_nobuffers()
2481 BUG_ON(page_mapping(page) != mapping); in __set_page_dirty_nobuffers()
2483 account_page_dirtied(page, mapping); in __set_page_dirty_nobuffers()
2484 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty_nobuffers()
2486 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty_nobuffers()
2489 if (mapping->host) { in __set_page_dirty_nobuffers()
2491 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in __set_page_dirty_nobuffers()
2501 * Call this whenever redirtying a page, to de-account the dirty counters
2502 * (NR_DIRTIED, WB_DIRTIED, tsk->nr_dirtied), so that they match the written
2509 struct address_space *mapping = page->mapping; in account_page_redirty() local
2511 if (mapping && mapping_can_writeback(mapping)) { in account_page_redirty()
2512 struct inode *inode = mapping->host; in account_page_redirty()
2517 current->nr_dirtied--; in account_page_redirty()
2534 wbc->pages_skipped++; in redirty_page_for_writepage()
2544 * For pages with a mapping this should be done under the page lock
2549 * If the mapping doesn't provide a set_page_dirty a_op, then
2554 struct address_space *mapping = page_mapping(page); in set_page_dirty() local
2557 if (likely(mapping)) { in set_page_dirty()
2558 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; in set_page_dirty()
2587 * page->mapping->host, and if the page is unlocked. This is because another
2588 * CPU could truncate the page off the mapping and then free the mapping.
2590 * Usually, the page _is_ locked, or the caller is a user-space process which
2621 struct address_space *mapping = page_mapping(page); in __cancel_dirty_page() local
2623 if (mapping_can_writeback(mapping)) { in __cancel_dirty_page()
2624 struct inode *inode = mapping->host; in __cancel_dirty_page()
2632 account_page_cleaned(page, mapping, wb); in __cancel_dirty_page()
2647 * tagged as dirty in the xarray so that a concurrent write-for-sync
2648 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
2658 struct address_space *mapping = page_mapping(page); in clear_page_dirty_for_io() local
2663 if (mapping && mapping_can_writeback(mapping)) { in clear_page_dirty_for_io()
2664 struct inode *inode = mapping->host; in clear_page_dirty_for_io()
2673 * (b) we tell the low-level filesystem to in clear_page_dirty_for_io()
2684 * has no effect on the actual dirty bit - since in clear_page_dirty_for_io()
2719 struct address_space *mapping = page_mapping(page); in test_clear_page_writeback() local
2726 if (mapping && mapping_use_writeback_tags(mapping)) { in test_clear_page_writeback()
2727 struct inode *inode = mapping->host; in test_clear_page_writeback()
2731 xa_lock_irqsave(&mapping->i_pages, flags); in test_clear_page_writeback()
2734 __xa_clear_mark(&mapping->i_pages, page_index(page), in test_clear_page_writeback()
2736 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { in test_clear_page_writeback()
2744 if (mapping->host && !mapping_tagged(mapping, in test_clear_page_writeback()
2746 sb_clear_inode_writeback(mapping->host); in test_clear_page_writeback()
2748 xa_unlock_irqrestore(&mapping->i_pages, flags); in test_clear_page_writeback()
2763 struct address_space *mapping = page_mapping(page); in __test_set_page_writeback() local
2767 if (mapping && mapping_use_writeback_tags(mapping)) { in __test_set_page_writeback()
2768 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback()
2769 struct inode *inode = mapping->host; in __test_set_page_writeback()
2779 on_wblist = mapping_tagged(mapping, in __test_set_page_writeback()
2783 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) in __test_set_page_writeback()
2791 if (mapping->host && !on_wblist) in __test_set_page_writeback()
2792 sb_mark_inode_writeback(mapping->host); in __test_set_page_writeback()
2832 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2842 if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES) in wait_for_stable_page()