• Home
  • Raw
  • Download

Lines Matching +full:max +full:- +full:memory +full:- +full:bandwidth

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
8 #include <linux/backing-dev.h>
11 #include <linux/page-flags.h>
21 #define HMDFS_MAX_PAUSE max((HZ / 5), 1)
22 #define HMDFS_BANDWIDTH_INTERVAL max((HZ / 5), 1)
37 try_to_writeback_inodes_sb(hwb->sbi->sb, WB_REASON_FS_FREE_SPACE); in hmdfs_writeback_inodes_sb_handler()
47 spin_lock(&hwb->inode_list_lock); in hmdfs_writeback_inode_handler()
48 while (likely(!list_empty(&hwb->inode_list_head))) { in hmdfs_writeback_inode_handler()
49 info = list_first_entry(&hwb->inode_list_head, in hmdfs_writeback_inode_handler()
51 list_del_init(&info->wb_list); in hmdfs_writeback_inode_handler()
52 spin_unlock(&hwb->inode_list_lock); in hmdfs_writeback_inode_handler()
54 inode = &info->vfs_inode; in hmdfs_writeback_inode_handler()
57 spin_lock(&hwb->inode_list_lock); in hmdfs_writeback_inode_handler()
59 spin_unlock(&hwb->inode_list_lock); in hmdfs_writeback_inode_handler()
65 struct hmdfs_sb_info *sbi = sb->s_fs_info; in hmdfs_writeback_inodes_sb_delayed()
69 if (!timeout || !work_busy(&sbi->h_wb->dirty_sb_writeback_work.work)) in hmdfs_writeback_inodes_sb_delayed()
70 mod_delayed_work(sbi->h_wb->dirty_sb_writeback_wq, in hmdfs_writeback_inodes_sb_delayed()
71 &sbi->h_wb->dirty_sb_writeback_work, timeout); in hmdfs_writeback_inodes_sb_delayed()
81 struct hmdfs_sb_info *sbi = sb->s_fs_info; in hmdfs_writeback_inode()
82 struct hmdfs_writeback *hwb = sbi->h_wb; in hmdfs_writeback_inode()
85 spin_lock(&hwb->inode_list_lock); in hmdfs_writeback_inode()
86 if (list_empty(&info->wb_list)) { in hmdfs_writeback_inode()
88 list_add_tail(&info->wb_list, &hwb->inode_list_head); in hmdfs_writeback_inode()
89 queue_delayed_work(hwb->dirty_inode_writeback_wq, in hmdfs_writeback_inode()
90 &hwb->dirty_inode_writeback_work, 0); in hmdfs_writeback_inode()
92 spin_unlock(&hwb->inode_list_lock); in hmdfs_writeback_inode()
106 while (pagevec_lookup_tag(&pvec, inode->i_mapping, &index, tag)) { in hmdfs_idirty_pages()
132 hwb->dirty_fs_thresh = DIV_ROUND_UP(hwb->dirty_fs_bytes, PAGE_SIZE); in hmdfs_calculate_dirty_thresh()
133 hwb->dirty_file_thresh = DIV_ROUND_UP(hwb->dirty_file_bytes, PAGE_SIZE); in hmdfs_calculate_dirty_thresh()
134 hwb->dirty_fs_bg_thresh = in hmdfs_calculate_dirty_thresh()
135 DIV_ROUND_UP(hwb->dirty_fs_bg_bytes, PAGE_SIZE); in hmdfs_calculate_dirty_thresh()
136 hwb->dirty_file_bg_thresh = in hmdfs_calculate_dirty_thresh()
137 DIV_ROUND_UP(hwb->dirty_file_bg_bytes, PAGE_SIZE); in hmdfs_calculate_dirty_thresh()
139 hwb->fs_bg_ratio = hmdfs_thresh_ratio(hwb->dirty_fs_bg_thresh, in hmdfs_calculate_dirty_thresh()
140 hwb->dirty_fs_thresh); in hmdfs_calculate_dirty_thresh()
141 hwb->file_bg_ratio = hmdfs_thresh_ratio(hwb->dirty_file_bg_thresh, in hmdfs_calculate_dirty_thresh()
142 hwb->dirty_file_thresh); in hmdfs_calculate_dirty_thresh()
143 hwb->fs_file_ratio = hmdfs_thresh_ratio(hwb->dirty_file_thresh, in hmdfs_calculate_dirty_thresh()
144 hwb->dirty_fs_thresh); in hmdfs_calculate_dirty_thresh()
149 struct hmdfs_writeback *hwb = hdtc->hwb; in hmdfs_init_dirty_limit()
151 hdtc->fs_thresh = hdtc->hwb->dirty_fs_thresh; in hmdfs_init_dirty_limit()
152 hdtc->file_thresh = hdtc->hwb->dirty_file_thresh; in hmdfs_init_dirty_limit()
153 hdtc->fs_bg_thresh = hdtc->hwb->dirty_fs_bg_thresh; in hmdfs_init_dirty_limit()
154 hdtc->file_bg_thresh = hdtc->hwb->dirty_file_bg_thresh; in hmdfs_init_dirty_limit()
156 if (!hwb->dirty_auto_threshold) in hmdfs_init_dirty_limit()
160 * Init thresh according the previous bandwidth adjusted thresh, in hmdfs_init_dirty_limit()
163 if (hwb->bw_fs_thresh < hdtc->fs_thresh) { in hmdfs_init_dirty_limit()
164 hdtc->fs_thresh = hwb->bw_fs_thresh; in hmdfs_init_dirty_limit()
165 hdtc->fs_bg_thresh = hmdfs_ratio_thresh(hwb->fs_bg_ratio, in hmdfs_init_dirty_limit()
166 hdtc->fs_thresh); in hmdfs_init_dirty_limit()
168 if (hwb->bw_file_thresh < hdtc->file_thresh) { in hmdfs_init_dirty_limit()
169 hdtc->file_thresh = hwb->bw_file_thresh; in hmdfs_init_dirty_limit()
170 hdtc->file_bg_thresh = hmdfs_ratio_thresh(hwb->file_bg_ratio, in hmdfs_init_dirty_limit()
171 hdtc->file_thresh); in hmdfs_init_dirty_limit()
177 hdtc->thresh_time_stamp = jiffies - HMDFS_BANDWIDTH_INTERVAL - 1; in hmdfs_init_dirty_limit()
182 struct hmdfs_writeback *hwb = hdtc->hwb; in hmdfs_update_dirty_limit()
183 struct bdi_writeback *wb = hwb->wb; in hmdfs_update_dirty_limit()
184 unsigned int time_limit = hwb->writeback_timelimit; in hmdfs_update_dirty_limit()
185 unsigned long bw = wb->avg_write_bandwidth; in hmdfs_update_dirty_limit()
188 if (!hwb->dirty_auto_threshold) in hmdfs_update_dirty_limit()
191 spin_lock(&hwb->write_bandwidth_lock); in hmdfs_update_dirty_limit()
192 if (bw > hwb->max_write_bandwidth) in hmdfs_update_dirty_limit()
193 hwb->max_write_bandwidth = bw; in hmdfs_update_dirty_limit()
195 if (bw < hwb->min_write_bandwidth) in hmdfs_update_dirty_limit()
196 hwb->min_write_bandwidth = bw; in hmdfs_update_dirty_limit()
197 hwb->avg_write_bandwidth = bw; in hmdfs_update_dirty_limit()
198 spin_unlock(&hwb->write_bandwidth_lock); in hmdfs_update_dirty_limit()
201 * If the bandwidth is lower than the lower limit, it may propably in hmdfs_update_dirty_limit()
204 bw = max(bw, hwb->bw_thresh_lowerlimit); in hmdfs_update_dirty_limit()
206 if (thresh >= hwb->dirty_fs_thresh) { in hmdfs_update_dirty_limit()
207 hdtc->fs_thresh = hwb->dirty_fs_thresh; in hmdfs_update_dirty_limit()
208 hdtc->file_thresh = hwb->dirty_file_thresh; in hmdfs_update_dirty_limit()
209 hdtc->fs_bg_thresh = hwb->dirty_fs_bg_thresh; in hmdfs_update_dirty_limit()
210 hdtc->file_bg_thresh = hwb->dirty_file_bg_thresh; in hmdfs_update_dirty_limit()
212 /* Adjust thresh according to current bandwidth */ in hmdfs_update_dirty_limit()
213 hdtc->fs_thresh = thresh; in hmdfs_update_dirty_limit()
214 hdtc->fs_bg_thresh = hmdfs_ratio_thresh(hwb->fs_bg_ratio, in hmdfs_update_dirty_limit()
215 hdtc->fs_thresh); in hmdfs_update_dirty_limit()
216 hdtc->file_thresh = hmdfs_ratio_thresh(hwb->fs_file_ratio, in hmdfs_update_dirty_limit()
217 hdtc->fs_thresh); in hmdfs_update_dirty_limit()
218 hdtc->file_bg_thresh = hmdfs_ratio_thresh(hwb->file_bg_ratio, in hmdfs_update_dirty_limit()
219 hdtc->file_thresh); in hmdfs_update_dirty_limit()
221 /* Save bandwidth adjusted thresh */ in hmdfs_update_dirty_limit()
222 hwb->bw_fs_thresh = hdtc->fs_thresh; in hmdfs_update_dirty_limit()
223 hwb->bw_file_thresh = hdtc->file_thresh; in hmdfs_update_dirty_limit()
225 hdtc->thresh_time_stamp = jiffies; in hmdfs_update_dirty_limit()
235 hwb->ratelimit_pages = hdtc.file_bg_thresh / in hmdfs_update_ratelimit()
237 if (hwb->ratelimit_pages < HMDFS_MIN_RATELIMIT_PAGES) in hmdfs_update_ratelimit()
238 hwb->ratelimit_pages = HMDFS_MIN_RATELIMIT_PAGES; in hmdfs_update_ratelimit()
245 unsigned long bw = wb->avg_write_bandwidth; in hmdfs_wb_pause()
249 * Limit pause time for small memory systems. If sleeping for too long in hmdfs_wb_pause()
266 return (hdtc->fs_thresh + hdtc->fs_bg_thresh) / 2; in hmdfs_dirty_freerun_ceiling()
268 return (hdtc->file_thresh + hdtc->file_bg_thresh) / 2; in hmdfs_dirty_freerun_ceiling()
276 return 1UL << (ilog2(thresh - dirty) >> 1); in hmdfs_dirty_intv()
282 struct inode *inode = mapping->host; in hmdfs_balance_dirty_pages()
283 struct super_block *sb = inode->i_sb; in hmdfs_balance_dirty_pages()
284 struct hmdfs_sb_info *sbi = sb->s_fs_info; in hmdfs_balance_dirty_pages()
285 struct hmdfs_writeback *hwb = sbi->h_wb; in hmdfs_balance_dirty_pages()
286 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; in hmdfs_balance_dirty_pages()
293 if (hwb->dirty_writeback_interval != 0) in hmdfs_balance_dirty_pages()
295 sb, hwb->dirty_writeback_interval * 10); in hmdfs_balance_dirty_pages()
303 /* Per-filesystem overbalance writeback */ in hmdfs_balance_dirty_pages()
313 /* Per-file overbalance writeback */ in hmdfs_balance_dirty_pages()
331 current->nr_dirtied_pause = diff; in hmdfs_balance_dirty_pages()
332 current->nr_dirtied = 0; in hmdfs_balance_dirty_pages()
346 * thresh according to current bandwidth. Update bandwidth in hmdfs_balance_dirty_pages()
348 * not exported, so we cannot update bandwidth here, so the in hmdfs_balance_dirty_pages()
349 * bandwidth' update will be delayed if writing a lot to a in hmdfs_balance_dirty_pages()
352 if (hwb->dirty_auto_threshold && in hmdfs_balance_dirty_pages()
365 current->nr_dirtied = 0; in hmdfs_balance_dirty_pages()
369 * Per-file or per-fs reclaimable pages exceed throttle limit, in hmdfs_balance_dirty_pages()
373 if (dirty_exceeded && !hwb->dirty_exceeded) in hmdfs_balance_dirty_pages()
374 hwb->dirty_exceeded = true; in hmdfs_balance_dirty_pages()
389 if (!dirty_exceeded && hwb->dirty_exceeded) in hmdfs_balance_dirty_pages()
390 hwb->dirty_exceeded = false; in hmdfs_balance_dirty_pages()
402 struct hmdfs_sb_info *sbi = mapping->host->i_sb->s_fs_info; in hmdfs_balance_dirty_pages_ratelimited()
403 struct hmdfs_writeback *hwb = sbi->h_wb; in hmdfs_balance_dirty_pages_ratelimited()
407 if (!hwb->dirty_writeback_control) in hmdfs_balance_dirty_pages_ratelimited()
411 if (hwb->dirty_writeback_interval != 0) in hmdfs_balance_dirty_pages_ratelimited()
413 mapping->host->i_sb, in hmdfs_balance_dirty_pages_ratelimited()
414 hwb->dirty_writeback_interval * 10); in hmdfs_balance_dirty_pages_ratelimited()
416 ratelimit = current->nr_dirtied_pause; in hmdfs_balance_dirty_pages_ratelimited()
417 if (hwb->dirty_exceeded) in hmdfs_balance_dirty_pages_ratelimited()
425 * initial task->nr_dirtied_pause. in hmdfs_balance_dirty_pages_ratelimited()
428 bdp_ratelimits = this_cpu_ptr(hwb->bdp_ratelimits); in hmdfs_balance_dirty_pages_ratelimited()
432 if (unlikely(current->nr_dirtied >= ratelimit)) { in hmdfs_balance_dirty_pages_ratelimited()
434 } else if (unlikely(*bdp_ratelimits >= hwb->ratelimit_pages)) { in hmdfs_balance_dirty_pages_ratelimited()
440 if (unlikely(current->nr_dirtied >= ratelimit)) in hmdfs_balance_dirty_pages_ratelimited()
446 if (!sbi->h_wb) in hmdfs_destroy_writeback()
449 flush_delayed_work(&sbi->h_wb->dirty_sb_writeback_work); in hmdfs_destroy_writeback()
450 flush_delayed_work(&sbi->h_wb->dirty_inode_writeback_work); in hmdfs_destroy_writeback()
451 destroy_workqueue(sbi->h_wb->dirty_sb_writeback_wq); in hmdfs_destroy_writeback()
452 destroy_workqueue(sbi->h_wb->dirty_inode_writeback_wq); in hmdfs_destroy_writeback()
453 free_percpu(sbi->h_wb->bdp_ratelimits); in hmdfs_destroy_writeback()
454 kfree(sbi->h_wb); in hmdfs_destroy_writeback()
455 sbi->h_wb = NULL; in hmdfs_destroy_writeback()
462 int ret = -ENOMEM; in hmdfs_init_writeback()
468 hwb->sbi = sbi; in hmdfs_init_writeback()
469 hwb->wb = &sbi->sb->s_bdi->wb; in hmdfs_init_writeback()
470 hwb->dirty_writeback_control = true; in hmdfs_init_writeback()
471 hwb->dirty_writeback_interval = HM_DEFAULT_WRITEBACK_INTERVAL; in hmdfs_init_writeback()
472 hwb->dirty_file_bg_bytes = HMDFS_FILE_BG_WB_BYTES; in hmdfs_init_writeback()
473 hwb->dirty_fs_bg_bytes = HMDFS_FS_BG_WB_BYTES; in hmdfs_init_writeback()
474 hwb->dirty_file_bytes = HMDFS_FILE_WB_BYTES; in hmdfs_init_writeback()
475 hwb->dirty_fs_bytes = HMDFS_FS_WB_BYTES; in hmdfs_init_writeback()
477 hwb->bw_file_thresh = hwb->dirty_file_thresh; in hmdfs_init_writeback()
478 hwb->bw_fs_thresh = hwb->dirty_fs_thresh; in hmdfs_init_writeback()
479 spin_lock_init(&hwb->inode_list_lock); in hmdfs_init_writeback()
480 INIT_LIST_HEAD(&hwb->inode_list_head); in hmdfs_init_writeback()
481 hwb->dirty_exceeded = false; in hmdfs_init_writeback()
482 hwb->ratelimit_pages = HMDFS_DEF_RATELIMIT_PAGES; in hmdfs_init_writeback()
483 hwb->dirty_auto_threshold = true; in hmdfs_init_writeback()
484 hwb->writeback_timelimit = HMDFS_DEF_WB_TIMELIMIT; in hmdfs_init_writeback()
485 hwb->bw_thresh_lowerlimit = HMDFS_BW_THRESH_DEF_LIMIT; in hmdfs_init_writeback()
486 spin_lock_init(&hwb->write_bandwidth_lock); in hmdfs_init_writeback()
487 hwb->avg_write_bandwidth = 0; in hmdfs_init_writeback()
488 hwb->max_write_bandwidth = 0; in hmdfs_init_writeback()
489 hwb->min_write_bandwidth = ULONG_MAX; in hmdfs_init_writeback()
490 hwb->bdp_ratelimits = alloc_percpu(int); in hmdfs_init_writeback()
491 if (!hwb->bdp_ratelimits) in hmdfs_init_writeback()
494 snprintf(name, sizeof(name), "dfs_ino_wb%u", sbi->seq); in hmdfs_init_writeback()
495 hwb->dirty_inode_writeback_wq = create_singlethread_workqueue(name); in hmdfs_init_writeback()
496 if (!hwb->dirty_inode_writeback_wq) { in hmdfs_init_writeback()
500 snprintf(name, sizeof(name), "dfs_sb_wb%u", sbi->seq); in hmdfs_init_writeback()
501 hwb->dirty_sb_writeback_wq = create_singlethread_workqueue(name); in hmdfs_init_writeback()
502 if (!hwb->dirty_sb_writeback_wq) { in hmdfs_init_writeback()
506 INIT_DELAYED_WORK(&hwb->dirty_sb_writeback_work, in hmdfs_init_writeback()
508 INIT_DELAYED_WORK(&hwb->dirty_inode_writeback_work, in hmdfs_init_writeback()
510 sbi->h_wb = hwb; in hmdfs_init_writeback()
513 destroy_workqueue(hwb->dirty_inode_writeback_wq); in hmdfs_init_writeback()
515 free_percpu(hwb->bdp_ratelimits); in hmdfs_init_writeback()