• Home
  • Raw
  • Download

Lines Matching refs:inode

70 static inline struct inode *wb_inode(struct list_head *head)  in wb_inode()
72 return list_entry(head, struct inode, i_io_list); in wb_inode()
118 static bool inode_io_list_move_locked(struct inode *inode, in inode_io_list_move_locked() argument
123 assert_spin_locked(&inode->i_lock); in inode_io_list_move_locked()
125 list_move(&inode->i_io_list, head); in inode_io_list_move_locked()
234 / sizeof(struct inode *))
239 void __inode_attach_wb(struct inode *inode, struct page *page) in __inode_attach_wb() argument
241 struct backing_dev_info *bdi = inode_to_bdi(inode); in __inode_attach_wb()
244 if (inode_cgwb_enabled(inode)) { in __inode_attach_wb()
265 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) in __inode_attach_wb()
278 static void inode_cgwb_move_to_attached(struct inode *inode, in inode_cgwb_move_to_attached() argument
282 assert_spin_locked(&inode->i_lock); in inode_cgwb_move_to_attached()
284 inode->i_state &= ~I_SYNC_QUEUED; in inode_cgwb_move_to_attached()
286 list_move(&inode->i_io_list, &wb->b_attached); in inode_cgwb_move_to_attached()
288 list_del_init(&inode->i_io_list); in inode_cgwb_move_to_attached()
301 locked_inode_to_wb_and_lock_list(struct inode *inode) in locked_inode_to_wb_and_lock_list() argument
302 __releases(&inode->i_lock) in locked_inode_to_wb_and_lock_list()
306 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list()
315 spin_unlock(&inode->i_lock); in locked_inode_to_wb_and_lock_list()
319 if (likely(wb == inode->i_wb)) { in locked_inode_to_wb_and_lock_list()
327 spin_lock(&inode->i_lock); in locked_inode_to_wb_and_lock_list()
338 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) in inode_to_wb_and_lock_list() argument
341 spin_lock(&inode->i_lock); in inode_to_wb_and_lock_list()
342 return locked_inode_to_wb_and_lock_list(inode); in inode_to_wb_and_lock_list()
357 struct inode *inodes[];
370 static bool inode_do_switch_wbs(struct inode *inode, in inode_do_switch_wbs() argument
374 struct address_space *mapping = inode->i_mapping; in inode_do_switch_wbs()
379 spin_lock(&inode->i_lock); in inode_do_switch_wbs()
386 if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE))) in inode_do_switch_wbs()
389 trace_inode_switch_wbs(inode, old_wb, new_wb); in inode_do_switch_wbs()
425 if (!list_empty(&inode->i_io_list)) { in inode_do_switch_wbs()
426 inode->i_wb = new_wb; in inode_do_switch_wbs()
428 if (inode->i_state & I_DIRTY_ALL) { in inode_do_switch_wbs()
429 struct inode *pos; in inode_do_switch_wbs()
432 if (time_after_eq(inode->dirtied_when, in inode_do_switch_wbs()
435 inode_io_list_move_locked(inode, new_wb, in inode_do_switch_wbs()
438 inode_cgwb_move_to_attached(inode, new_wb); in inode_do_switch_wbs()
441 inode->i_wb = new_wb; in inode_do_switch_wbs()
445 inode->i_wb_frn_winner = 0; in inode_do_switch_wbs()
446 inode->i_wb_frn_avg_time = 0; in inode_do_switch_wbs()
447 inode->i_wb_frn_history = 0; in inode_do_switch_wbs()
454 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); in inode_do_switch_wbs()
457 spin_unlock(&inode->i_lock); in inode_do_switch_wbs()
470 struct inode **inodep; in inode_switch_wbs_work_fn()
519 static bool inode_prepare_wbs_switch(struct inode *inode, in inode_prepare_wbs_switch() argument
530 if (IS_DAX(inode)) in inode_prepare_wbs_switch()
534 spin_lock(&inode->i_lock); in inode_prepare_wbs_switch()
535 if (!(inode->i_sb->s_flags & SB_ACTIVE) || in inode_prepare_wbs_switch()
536 inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || in inode_prepare_wbs_switch()
537 inode_to_wb(inode) == new_wb) { in inode_prepare_wbs_switch()
538 spin_unlock(&inode->i_lock); in inode_prepare_wbs_switch()
541 inode->i_state |= I_WB_SWITCH; in inode_prepare_wbs_switch()
542 __iget(inode); in inode_prepare_wbs_switch()
543 spin_unlock(&inode->i_lock); in inode_prepare_wbs_switch()
556 static void inode_switch_wbs(struct inode *inode, int new_wb_id) in inode_switch_wbs() argument
558 struct backing_dev_info *bdi = inode_to_bdi(inode); in inode_switch_wbs()
563 if (inode->i_state & I_WB_SWITCH) in inode_switch_wbs()
570 isw = kzalloc(sizeof(*isw) + 2 * sizeof(struct inode *), GFP_ATOMIC); in inode_switch_wbs()
590 if (!inode_prepare_wbs_switch(inode, isw->new_wb)) in inode_switch_wbs()
593 isw->inodes[0] = inode; in inode_switch_wbs()
615 struct inode *inode; in isw_prepare_wbs_switch() local
617 list_for_each_entry(inode, list, i_io_list) { in isw_prepare_wbs_switch()
618 if (!inode_prepare_wbs_switch(inode, isw->new_wb)) in isw_prepare_wbs_switch()
621 isw->inodes[*nr] = inode; in isw_prepare_wbs_switch()
646 sizeof(struct inode *), GFP_KERNEL); in cleanup_offline_cgwb()
707 struct inode *inode) in wbc_attach_and_unlock_inode() argument
709 if (!inode_cgwb_enabled(inode)) { in wbc_attach_and_unlock_inode()
710 spin_unlock(&inode->i_lock); in wbc_attach_and_unlock_inode()
714 wbc->wb = inode_to_wb(inode); in wbc_attach_and_unlock_inode()
715 wbc->inode = inode; in wbc_attach_and_unlock_inode()
718 wbc->wb_lcand_id = inode->i_wb_frn_winner; in wbc_attach_and_unlock_inode()
725 spin_unlock(&inode->i_lock); in wbc_attach_and_unlock_inode()
735 inode_switch_wbs(inode, wbc->wb_id); in wbc_attach_and_unlock_inode()
779 struct inode *inode = wbc->inode; in wbc_detach_inode() local
787 history = inode->i_wb_frn_history; in wbc_detach_inode()
788 avg_time = inode->i_wb_frn_avg_time; in wbc_detach_inode()
836 trace_inode_foreign_history(inode, wbc, history); in wbc_detach_inode()
846 inode_switch_wbs(inode, max_id); in wbc_detach_inode()
853 inode->i_wb_frn_winner = max_id; in wbc_detach_inode()
854 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); in wbc_detach_inode()
855 inode->i_wb_frn_history = history; in wbc_detach_inode()
928 int inode_congested(struct inode *inode, int cong_bits) in inode_congested() argument
934 if (inode && inode_to_wb_is_valid(inode)) { in inode_congested()
939 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); in inode_congested()
941 unlocked_inode_to_wb_end(inode, &lock_cookie); in inode_congested()
945 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); in inode_congested()
1182 static void inode_cgwb_move_to_attached(struct inode *inode, in inode_cgwb_move_to_attached() argument
1186 assert_spin_locked(&inode->i_lock); in inode_cgwb_move_to_attached()
1188 inode->i_state &= ~I_SYNC_QUEUED; in inode_cgwb_move_to_attached()
1189 list_del_init(&inode->i_io_list); in inode_cgwb_move_to_attached()
1194 locked_inode_to_wb_and_lock_list(struct inode *inode) in locked_inode_to_wb_and_lock_list() argument
1195 __releases(&inode->i_lock) in locked_inode_to_wb_and_lock_list()
1198 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list()
1200 spin_unlock(&inode->i_lock); in locked_inode_to_wb_and_lock_list()
1205 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) in inode_to_wb_and_lock_list() argument
1208 struct bdi_writeback *wb = inode_to_wb(inode); in inode_to_wb_and_lock_list()
1287 void inode_io_list_del(struct inode *inode) in inode_io_list_del() argument
1291 wb = inode_to_wb_and_lock_list(inode); in inode_io_list_del()
1292 spin_lock(&inode->i_lock); in inode_io_list_del()
1294 inode->i_state &= ~I_SYNC_QUEUED; in inode_io_list_del()
1295 list_del_init(&inode->i_io_list); in inode_io_list_del()
1298 spin_unlock(&inode->i_lock); in inode_io_list_del()
1306 void sb_mark_inode_writeback(struct inode *inode) in sb_mark_inode_writeback() argument
1308 struct super_block *sb = inode->i_sb; in sb_mark_inode_writeback()
1311 if (list_empty(&inode->i_wb_list)) { in sb_mark_inode_writeback()
1313 if (list_empty(&inode->i_wb_list)) { in sb_mark_inode_writeback()
1314 list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); in sb_mark_inode_writeback()
1315 trace_sb_mark_inode_writeback(inode); in sb_mark_inode_writeback()
1324 void sb_clear_inode_writeback(struct inode *inode) in sb_clear_inode_writeback() argument
1326 struct super_block *sb = inode->i_sb; in sb_clear_inode_writeback()
1329 if (!list_empty(&inode->i_wb_list)) { in sb_clear_inode_writeback()
1331 if (!list_empty(&inode->i_wb_list)) { in sb_clear_inode_writeback()
1332 list_del_init(&inode->i_wb_list); in sb_clear_inode_writeback()
1333 trace_sb_clear_inode_writeback(inode); in sb_clear_inode_writeback()
1348 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) in redirty_tail_locked() argument
1350 assert_spin_locked(&inode->i_lock); in redirty_tail_locked()
1353 struct inode *tail; in redirty_tail_locked()
1356 if (time_before(inode->dirtied_when, tail->dirtied_when)) in redirty_tail_locked()
1357 inode->dirtied_when = jiffies; in redirty_tail_locked()
1359 inode_io_list_move_locked(inode, wb, &wb->b_dirty); in redirty_tail_locked()
1360 inode->i_state &= ~I_SYNC_QUEUED; in redirty_tail_locked()
1363 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) in redirty_tail() argument
1365 spin_lock(&inode->i_lock); in redirty_tail()
1366 redirty_tail_locked(inode, wb); in redirty_tail()
1367 spin_unlock(&inode->i_lock); in redirty_tail()
1373 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) in requeue_io() argument
1375 inode_io_list_move_locked(inode, wb, &wb->b_more_io); in requeue_io()
1378 static void inode_sync_complete(struct inode *inode) in inode_sync_complete() argument
1380 inode->i_state &= ~I_SYNC; in inode_sync_complete()
1382 inode_add_lru(inode); in inode_sync_complete()
1385 wake_up_bit(&inode->i_state, __I_SYNC); in inode_sync_complete()
1388 static bool inode_dirtied_after(struct inode *inode, unsigned long t) in inode_dirtied_after() argument
1390 bool ret = time_after(inode->dirtied_when, t); in inode_dirtied_after()
1398 ret = ret && time_before_eq(inode->dirtied_when, jiffies); in inode_dirtied_after()
1416 struct inode *inode; in move_expired_inodes() local
1421 inode = wb_inode(delaying_queue->prev); in move_expired_inodes()
1422 if (inode_dirtied_after(inode, dirtied_before)) in move_expired_inodes()
1424 spin_lock(&inode->i_lock); in move_expired_inodes()
1425 list_move(&inode->i_io_list, &tmp); in move_expired_inodes()
1427 inode->i_state |= I_SYNC_QUEUED; in move_expired_inodes()
1428 spin_unlock(&inode->i_lock); in move_expired_inodes()
1429 if (sb_is_blkdev_sb(inode->i_sb)) in move_expired_inodes()
1431 if (sb && sb != inode->i_sb) in move_expired_inodes()
1433 sb = inode->i_sb; in move_expired_inodes()
1451 inode = wb_inode(pos); in move_expired_inodes()
1452 if (inode->i_sb == sb) in move_expired_inodes()
1453 list_move(&inode->i_io_list, dispatch_queue); in move_expired_inodes()
1489 static int write_inode(struct inode *inode, struct writeback_control *wbc) in write_inode() argument
1493 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { in write_inode()
1494 trace_writeback_write_inode_start(inode, wbc); in write_inode()
1495 ret = inode->i_sb->s_op->write_inode(inode, wbc); in write_inode()
1496 trace_writeback_write_inode(inode, wbc); in write_inode()
1506 static void __inode_wait_for_writeback(struct inode *inode) in __inode_wait_for_writeback() argument
1507 __releases(inode->i_lock) in __inode_wait_for_writeback()
1508 __acquires(inode->i_lock) in __inode_wait_for_writeback()
1510 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); in __inode_wait_for_writeback()
1513 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); in __inode_wait_for_writeback()
1514 while (inode->i_state & I_SYNC) { in __inode_wait_for_writeback()
1515 spin_unlock(&inode->i_lock); in __inode_wait_for_writeback()
1518 spin_lock(&inode->i_lock); in __inode_wait_for_writeback()
1525 void inode_wait_for_writeback(struct inode *inode) in inode_wait_for_writeback() argument
1527 spin_lock(&inode->i_lock); in inode_wait_for_writeback()
1528 __inode_wait_for_writeback(inode); in inode_wait_for_writeback()
1529 spin_unlock(&inode->i_lock); in inode_wait_for_writeback()
1537 static void inode_sleep_on_writeback(struct inode *inode) in inode_sleep_on_writeback() argument
1538 __releases(inode->i_lock) in inode_sleep_on_writeback()
1541 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); in inode_sleep_on_writeback()
1545 sleep = inode->i_state & I_SYNC; in inode_sleep_on_writeback()
1546 spin_unlock(&inode->i_lock); in inode_sleep_on_writeback()
1560 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, in requeue_inode() argument
1563 if (inode->i_state & I_FREEING) in requeue_inode()
1571 if ((inode->i_state & I_DIRTY) && in requeue_inode()
1573 inode->dirtied_when = jiffies; in requeue_inode()
1582 if (inode->i_state & I_DIRTY_ALL) in requeue_inode()
1583 redirty_tail_locked(inode, wb); in requeue_inode()
1585 inode_cgwb_move_to_attached(inode, wb); in requeue_inode()
1589 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { in requeue_inode()
1596 requeue_io(inode, wb); in requeue_inode()
1605 redirty_tail_locked(inode, wb); in requeue_inode()
1607 } else if (inode->i_state & I_DIRTY) { in requeue_inode()
1613 redirty_tail_locked(inode, wb); in requeue_inode()
1614 } else if (inode->i_state & I_DIRTY_TIME) { in requeue_inode()
1615 inode->dirtied_when = jiffies; in requeue_inode()
1616 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); in requeue_inode()
1617 inode->i_state &= ~I_SYNC_QUEUED; in requeue_inode()
1620 inode_cgwb_move_to_attached(inode, wb); in requeue_inode()
1636 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) in __writeback_single_inode() argument
1638 struct address_space *mapping = inode->i_mapping; in __writeback_single_inode()
1643 WARN_ON(!(inode->i_state & I_SYNC)); in __writeback_single_inode()
1645 trace_writeback_single_inode_start(inode, wbc, nr_to_write); in __writeback_single_inode()
1667 if ((inode->i_state & I_DIRTY_TIME) && in __writeback_single_inode()
1669 time_after(jiffies, inode->dirtied_time_when + in __writeback_single_inode()
1671 trace_writeback_lazytime(inode); in __writeback_single_inode()
1672 mark_inode_dirty_sync(inode); in __writeback_single_inode()
1681 spin_lock(&inode->i_lock); in __writeback_single_inode()
1682 dirty = inode->i_state & I_DIRTY; in __writeback_single_inode()
1683 inode->i_state &= ~dirty; in __writeback_single_inode()
1699 inode->i_state |= I_DIRTY_PAGES; in __writeback_single_inode()
1701 spin_unlock(&inode->i_lock); in __writeback_single_inode()
1705 int err = write_inode(inode, wbc); in __writeback_single_inode()
1709 trace_writeback_single_inode(inode, wbc, nr_to_write); in __writeback_single_inode()
1722 static int writeback_single_inode(struct inode *inode, in writeback_single_inode() argument
1728 spin_lock(&inode->i_lock); in writeback_single_inode()
1729 if (!atomic_read(&inode->i_count)) in writeback_single_inode()
1730 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); in writeback_single_inode()
1732 WARN_ON(inode->i_state & I_WILL_FREE); in writeback_single_inode()
1734 if (inode->i_state & I_SYNC) { in writeback_single_inode()
1743 __inode_wait_for_writeback(inode); in writeback_single_inode()
1745 WARN_ON(inode->i_state & I_SYNC); in writeback_single_inode()
1753 if (!(inode->i_state & I_DIRTY_ALL) && in writeback_single_inode()
1755 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) in writeback_single_inode()
1757 inode->i_state |= I_SYNC; in writeback_single_inode()
1758 wbc_attach_and_unlock_inode(wbc, inode); in writeback_single_inode()
1760 ret = __writeback_single_inode(inode, wbc); in writeback_single_inode()
1764 wb = inode_to_wb_and_lock_list(inode); in writeback_single_inode()
1765 spin_lock(&inode->i_lock); in writeback_single_inode()
1770 if (!(inode->i_state & I_FREEING)) { in writeback_single_inode()
1776 if (!(inode->i_state & I_DIRTY_ALL)) in writeback_single_inode()
1777 inode_cgwb_move_to_attached(inode, wb); in writeback_single_inode()
1778 else if (!(inode->i_state & I_SYNC_QUEUED)) { in writeback_single_inode()
1779 if ((inode->i_state & I_DIRTY)) in writeback_single_inode()
1780 redirty_tail_locked(inode, wb); in writeback_single_inode()
1781 else if (inode->i_state & I_DIRTY_TIME) { in writeback_single_inode()
1782 inode->dirtied_when = jiffies; in writeback_single_inode()
1783 inode_io_list_move_locked(inode, in writeback_single_inode()
1791 inode_sync_complete(inode); in writeback_single_inode()
1793 spin_unlock(&inode->i_lock); in writeback_single_inode()
1856 struct inode *inode = wb_inode(wb->b_io.prev); in writeback_sb_inodes() local
1860 if (inode->i_sb != sb) { in writeback_sb_inodes()
1867 redirty_tail(inode, wb); in writeback_sb_inodes()
1884 spin_lock(&inode->i_lock); in writeback_sb_inodes()
1885 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { in writeback_sb_inodes()
1886 redirty_tail_locked(inode, wb); in writeback_sb_inodes()
1887 spin_unlock(&inode->i_lock); in writeback_sb_inodes()
1890 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { in writeback_sb_inodes()
1900 requeue_io(inode, wb); in writeback_sb_inodes()
1901 spin_unlock(&inode->i_lock); in writeback_sb_inodes()
1902 trace_writeback_sb_inodes_requeue(inode); in writeback_sb_inodes()
1912 if (inode->i_state & I_SYNC) { in writeback_sb_inodes()
1914 inode_sleep_on_writeback(inode); in writeback_sb_inodes()
1919 inode->i_state |= I_SYNC; in writeback_sb_inodes()
1920 wbc_attach_and_unlock_inode(&wbc, inode); in writeback_sb_inodes()
1930 __writeback_single_inode(inode, &wbc); in writeback_sb_inodes()
1955 tmp_wb = inode_to_wb_and_lock_list(inode); in writeback_sb_inodes()
1956 spin_lock(&inode->i_lock); in writeback_sb_inodes()
1957 if (!(inode->i_state & I_DIRTY_ALL)) in writeback_sb_inodes()
1959 requeue_inode(inode, tmp_wb, &wbc); in writeback_sb_inodes()
1960 inode_sync_complete(inode); in writeback_sb_inodes()
1961 spin_unlock(&inode->i_lock); in writeback_sb_inodes()
1989 struct inode *inode = wb_inode(wb->b_io.prev); in __writeback_inodes_wb() local
1990 struct super_block *sb = inode->i_sb; in __writeback_inodes_wb()
1998 redirty_tail(inode, wb); in __writeback_inodes_wb()
2058 struct inode *inode; in wb_writeback() local
2130 inode = wb_inode(wb->b_more_io.prev); in wb_writeback()
2131 spin_lock(&inode->i_lock); in wb_writeback()
2134 inode_sleep_on_writeback(inode); in wb_writeback()
2432 void __mark_inode_dirty(struct inode *inode, int flags) in __mark_inode_dirty() argument
2434 struct super_block *sb = inode->i_sb; in __mark_inode_dirty()
2438 trace_writeback_mark_inode_dirty(inode, flags); in __mark_inode_dirty()
2446 if (inode->i_state & I_DIRTY_TIME) { in __mark_inode_dirty()
2447 spin_lock(&inode->i_lock); in __mark_inode_dirty()
2448 if (inode->i_state & I_DIRTY_TIME) { in __mark_inode_dirty()
2449 inode->i_state &= ~I_DIRTY_TIME; in __mark_inode_dirty()
2452 spin_unlock(&inode->i_lock); in __mark_inode_dirty()
2462 trace_writeback_dirty_inode_start(inode, flags); in __mark_inode_dirty()
2464 sb->s_op->dirty_inode(inode, in __mark_inode_dirty()
2466 trace_writeback_dirty_inode(inode, flags); in __mark_inode_dirty()
2486 if ((inode->i_state & flags) == flags) in __mark_inode_dirty()
2489 spin_lock(&inode->i_lock); in __mark_inode_dirty()
2490 if ((inode->i_state & flags) != flags) { in __mark_inode_dirty()
2491 const int was_dirty = inode->i_state & I_DIRTY; in __mark_inode_dirty()
2493 inode_attach_wb(inode, NULL); in __mark_inode_dirty()
2495 inode->i_state |= flags; in __mark_inode_dirty()
2504 wb = locked_inode_to_wb_and_lock_list(inode); in __mark_inode_dirty()
2505 spin_lock(&inode->i_lock); in __mark_inode_dirty()
2514 if (inode->i_state & I_SYNC_QUEUED) in __mark_inode_dirty()
2521 if (!S_ISBLK(inode->i_mode)) { in __mark_inode_dirty()
2522 if (inode_unhashed(inode)) in __mark_inode_dirty()
2525 if (inode->i_state & I_FREEING) in __mark_inode_dirty()
2536 inode->dirtied_when = jiffies; in __mark_inode_dirty()
2538 inode->dirtied_time_when = jiffies; in __mark_inode_dirty()
2540 if (inode->i_state & I_DIRTY) in __mark_inode_dirty()
2545 wakeup_bdi = inode_io_list_move_locked(inode, wb, in __mark_inode_dirty()
2549 spin_unlock(&inode->i_lock); in __mark_inode_dirty()
2550 trace_writeback_dirty_inode_enqueue(inode); in __mark_inode_dirty()
2567 spin_unlock(&inode->i_lock); in __mark_inode_dirty()
2613 struct inode *inode = list_first_entry(&sync_list, struct inode, in wait_sb_inodes() local
2615 struct address_space *mapping = inode->i_mapping; in wait_sb_inodes()
2623 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb); in wait_sb_inodes()
2635 spin_lock(&inode->i_lock); in wait_sb_inodes()
2636 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { in wait_sb_inodes()
2637 spin_unlock(&inode->i_lock); in wait_sb_inodes()
2642 __iget(inode); in wait_sb_inodes()
2643 spin_unlock(&inode->i_lock); in wait_sb_inodes()
2655 iput(inode); in wait_sb_inodes()
2787 int write_inode_now(struct inode *inode, int sync) in write_inode_now() argument
2796 if (!mapping_can_writeback(inode->i_mapping)) in write_inode_now()
2800 return writeback_single_inode(inode, &wbc); in write_inode_now()
2813 int sync_inode_metadata(struct inode *inode, int wait) in sync_inode_metadata() argument
2820 return writeback_single_inode(inode, &wbc); in sync_inode_metadata()