• Home
  • Raw
  • Download

Lines Matching refs:work

178 				  struct wb_writeback_work *work)  in finish_writeback_work()  argument
180 struct wb_completion *done = work->done; in finish_writeback_work()
182 if (work->auto_free) in finish_writeback_work()
183 kfree(work); in finish_writeback_work()
189 struct wb_writeback_work *work) in wb_queue_work() argument
191 trace_writeback_queue(wb, work); in wb_queue_work()
193 if (work->done) in wb_queue_work()
194 atomic_inc(&work->done->cnt); in wb_queue_work()
199 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
202 finish_writeback_work(wb, work); in wb_queue_work()
333 struct work_struct work; member
346 static void inode_switch_wbs_work_fn(struct work_struct *work) in inode_switch_wbs_work_fn() argument
349 container_of(work, struct inode_switch_wbs_context, work); in inode_switch_wbs_work_fn()
477 INIT_WORK(&isw->work, inode_switch_wbs_work_fn); in inode_switch_wbs_rcu_fn()
478 queue_work(isw_wq, &isw->work); in inode_switch_wbs_rcu_fn()
857 struct wb_writeback_work *work; in bdi_split_work_to_wbs() local
875 work = kmalloc(sizeof(*work), GFP_ATOMIC); in bdi_split_work_to_wbs()
876 if (work) { in bdi_split_work_to_wbs()
877 *work = *base_work; in bdi_split_work_to_wbs()
878 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
879 work->auto_free = 1; in bdi_split_work_to_wbs()
880 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
885 work = &fallback_work; in bdi_split_work_to_wbs()
886 *work = *base_work; in bdi_split_work_to_wbs()
887 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
888 work->auto_free = 0; in bdi_split_work_to_wbs()
889 work->done = &fallback_work_done; in bdi_split_work_to_wbs()
891 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
990 struct wb_writeback_work *work; in wb_start_writeback() local
999 work = kzalloc(sizeof(*work), GFP_ATOMIC); in wb_start_writeback()
1000 if (!work) { in wb_start_writeback()
1006 work->sync_mode = WB_SYNC_NONE; in wb_start_writeback()
1007 work->nr_pages = nr_pages; in wb_start_writeback()
1008 work->range_cyclic = range_cyclic; in wb_start_writeback()
1009 work->reason = reason; in wb_start_writeback()
1010 work->auto_free = 1; in wb_start_writeback()
1012 wb_queue_work(wb, work); in wb_start_writeback()
1178 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, in queue_io() argument
1187 if (!work->for_sync) in queue_io()
1193 trace_writeback_queue_io(wb, work, dirtied_before, moved); in queue_io()
1475 struct wb_writeback_work *work) in writeback_chunk_size() argument
1492 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) in writeback_chunk_size()
1497 pages = min(pages, work->nr_pages); in writeback_chunk_size()
1516 struct wb_writeback_work *work) in writeback_sb_inodes() argument
1519 .sync_mode = work->sync_mode, in writeback_sb_inodes()
1520 .tagged_writepages = work->tagged_writepages, in writeback_sb_inodes()
1521 .for_kupdate = work->for_kupdate, in writeback_sb_inodes()
1522 .for_background = work->for_background, in writeback_sb_inodes()
1523 .for_sync = work->for_sync, in writeback_sb_inodes()
1524 .range_cyclic = work->range_cyclic, in writeback_sb_inodes()
1537 if (work->sb) { in writeback_sb_inodes()
1598 write_chunk = writeback_chunk_size(wb, work); in writeback_sb_inodes()
1609 work->nr_pages -= write_chunk - wbc.nr_to_write; in writeback_sb_inodes()
1649 if (work->nr_pages <= 0) in writeback_sb_inodes()
1657 struct wb_writeback_work *work) in __writeback_inodes_wb() argument
1675 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
1682 if (work->nr_pages <= 0) in __writeback_inodes_wb()
1693 struct wb_writeback_work work = { in writeback_inodes_wb() local
1704 queue_io(wb, &work, jiffies); in writeback_inodes_wb()
1705 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
1709 return nr_pages - work.nr_pages; in writeback_inodes_wb()
1728 struct wb_writeback_work *work) in wb_writeback() argument
1731 long nr_pages = work->nr_pages; in wb_writeback()
1743 if (work->nr_pages <= 0) in wb_writeback()
1752 if ((work->for_background || work->for_kupdate) && in wb_writeback()
1760 if (work->for_background && !wb_over_bg_thresh(wb)) in wb_writeback()
1769 if (work->for_kupdate) { in wb_writeback()
1772 } else if (work->for_background) in wb_writeback()
1775 trace_writeback_start(wb, work); in wb_writeback()
1777 queue_io(wb, work, dirtied_before); in wb_writeback()
1778 if (work->sb) in wb_writeback()
1779 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
1781 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
1782 trace_writeback_written(wb, work); in wb_writeback()
1807 trace_writeback_wait(wb, work); in wb_writeback()
1819 return nr_pages - work->nr_pages; in wb_writeback()
1827 struct wb_writeback_work *work = NULL; in get_next_work_item() local
1831 work = list_entry(wb->work_list.next, in get_next_work_item()
1833 list_del_init(&work->list); in get_next_work_item()
1836 return work; in get_next_work_item()
1854 struct wb_writeback_work work = { in wb_check_background_flush() local
1862 return wb_writeback(wb, &work); in wb_check_background_flush()
1888 struct wb_writeback_work work = { in wb_check_old_data_flush() local
1896 return wb_writeback(wb, &work); in wb_check_old_data_flush()
1907 struct wb_writeback_work *work; in wb_do_writeback() local
1911 while ((work = get_next_work_item(wb)) != NULL) { in wb_do_writeback()
1912 trace_writeback_exec(wb, work); in wb_do_writeback()
1913 wrote += wb_writeback(wb, work); in wb_do_writeback()
1914 finish_writeback_work(wb, work); in wb_do_writeback()
1931 void wb_workfn(struct work_struct *work) in wb_workfn() argument
1933 struct bdi_writeback *wb = container_of(to_delayed_work(work), in wb_workfn()
2259 struct wb_writeback_work work = { in __writeback_inodes_sb_nr() local
2273 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); in __writeback_inodes_sb_nr()
2355 struct wb_writeback_work work = { in sync_inodes_sb() local
2377 bdi_split_work_to_wbs(bdi, &work, false); in sync_inodes_sb()