• Home
  • Raw
  • Download

Lines Matching refs:inode

94 void __mark_inode_dirty(struct inode *inode, int flags)  in __mark_inode_dirty()  argument
96 struct super_block *sb = inode->i_sb; in __mark_inode_dirty()
104 sb->s_op->dirty_inode(inode); in __mark_inode_dirty()
114 if ((inode->i_state & flags) == flags) in __mark_inode_dirty()
121 if (!list_empty(&inode->i_dentry)) { in __mark_inode_dirty()
122 dentry = list_entry(inode->i_dentry.next, in __mark_inode_dirty()
128 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) in __mark_inode_dirty()
131 current->comm, task_pid_nr(current), inode->i_ino, in __mark_inode_dirty()
132 name, inode->i_sb->s_id); in __mark_inode_dirty()
136 if ((inode->i_state & flags) != flags) { in __mark_inode_dirty()
137 const int was_dirty = inode->i_state & I_DIRTY; in __mark_inode_dirty()
139 inode->i_state |= flags; in __mark_inode_dirty()
146 if (inode->i_state & I_SYNC) in __mark_inode_dirty()
153 if (!S_ISBLK(inode->i_mode)) { in __mark_inode_dirty()
154 if (hlist_unhashed(&inode->i_hash)) in __mark_inode_dirty()
157 if (inode->i_state & (I_FREEING|I_CLEAR)) in __mark_inode_dirty()
165 inode->dirtied_when = jiffies; in __mark_inode_dirty()
166 list_move(&inode->i_list, &sb->s_dirty); in __mark_inode_dirty()
175 static int write_inode(struct inode *inode, int sync) in write_inode() argument
177 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) in write_inode()
178 return inode->i_sb->s_op->write_inode(inode, sync); in write_inode()
191 static void redirty_tail(struct inode *inode) in redirty_tail() argument
193 struct super_block *sb = inode->i_sb; in redirty_tail()
196 struct inode *tail_inode; in redirty_tail()
198 tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list); in redirty_tail()
199 if (!time_after_eq(inode->dirtied_when, in redirty_tail()
201 inode->dirtied_when = jiffies; in redirty_tail()
203 list_move(&inode->i_list, &sb->s_dirty); in redirty_tail()
209 static void requeue_io(struct inode *inode) in requeue_io() argument
211 list_move(&inode->i_list, &inode->i_sb->s_more_io); in requeue_io()
214 static void inode_sync_complete(struct inode *inode) in inode_sync_complete() argument
220 wake_up_bit(&inode->i_state, __I_SYNC); in inode_sync_complete()
231 struct inode *inode = list_entry(delaying_queue->prev, in move_expired_inodes() local
232 struct inode, i_list); in move_expired_inodes()
234 time_after(inode->dirtied_when, *older_than_this)) in move_expired_inodes()
236 list_move(&inode->i_list, dispatch_queue); in move_expired_inodes()
269 __sync_single_inode(struct inode *inode, struct writeback_control *wbc) in __sync_single_inode() argument
272 struct address_space *mapping = inode->i_mapping; in __sync_single_inode()
276 BUG_ON(inode->i_state & I_SYNC); in __sync_single_inode()
277 WARN_ON(inode->i_state & I_NEW); in __sync_single_inode()
280 dirty = inode->i_state & I_DIRTY; in __sync_single_inode()
281 inode->i_state |= I_SYNC; in __sync_single_inode()
282 inode->i_state &= ~I_DIRTY; in __sync_single_inode()
290 int err = write_inode(inode, wait); in __sync_single_inode()
302 WARN_ON(inode->i_state & I_NEW); in __sync_single_inode()
303 inode->i_state &= ~I_SYNC; in __sync_single_inode()
304 if (!(inode->i_state & I_FREEING)) { in __sync_single_inode()
305 if (!(inode->i_state & I_DIRTY) && in __sync_single_inode()
327 inode->i_state |= I_DIRTY_PAGES; in __sync_single_inode()
332 requeue_io(inode); in __sync_single_inode()
337 redirty_tail(inode); in __sync_single_inode()
347 inode->i_state |= I_DIRTY_PAGES; in __sync_single_inode()
348 redirty_tail(inode); in __sync_single_inode()
350 } else if (inode->i_state & I_DIRTY) { in __sync_single_inode()
355 redirty_tail(inode); in __sync_single_inode()
356 } else if (atomic_read(&inode->i_count)) { in __sync_single_inode()
360 list_move(&inode->i_list, &inode_in_use); in __sync_single_inode()
365 list_move(&inode->i_list, &inode_unused); in __sync_single_inode()
368 inode_sync_complete(inode); in __sync_single_inode()
378 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) in __writeback_single_inode() argument
382 if (!atomic_read(&inode->i_count)) in __writeback_single_inode()
383 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); in __writeback_single_inode()
385 WARN_ON(inode->i_state & I_WILL_FREE); in __writeback_single_inode()
387 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) { in __writeback_single_inode()
395 requeue_io(inode); in __writeback_single_inode()
402 if (inode->i_state & I_SYNC) { in __writeback_single_inode()
403 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); in __writeback_single_inode()
405 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); in __writeback_single_inode()
411 } while (inode->i_state & I_SYNC); in __writeback_single_inode()
413 return __sync_single_inode(inode, wbc); in __writeback_single_inode()
452 struct inode *inode = list_entry(sb->s_io.prev, in generic_sync_sb_inodes() local
453 struct inode, i_list); in generic_sync_sb_inodes()
454 struct address_space *mapping = inode->i_mapping; in generic_sync_sb_inodes()
459 redirty_tail(inode); in generic_sync_sb_inodes()
475 if (inode->i_state & I_NEW) { in generic_sync_sb_inodes()
476 requeue_io(inode); in generic_sync_sb_inodes()
484 requeue_io(inode); in generic_sync_sb_inodes()
491 requeue_io(inode); in generic_sync_sb_inodes()
496 if (time_after(inode->dirtied_when, start)) in generic_sync_sb_inodes()
503 BUG_ON(inode->i_state & I_FREEING); in generic_sync_sb_inodes()
504 __iget(inode); in generic_sync_sb_inodes()
506 __writeback_single_inode(inode, wbc); in generic_sync_sb_inodes()
514 redirty_tail(inode); in generic_sync_sb_inodes()
517 iput(inode); in generic_sync_sb_inodes()
529 struct inode *inode, *old_inode = NULL; in generic_sync_sb_inodes() local
538 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { in generic_sync_sb_inodes()
541 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) in generic_sync_sb_inodes()
543 mapping = inode->i_mapping; in generic_sync_sb_inodes()
546 __iget(inode); in generic_sync_sb_inodes()
557 old_inode = inode; in generic_sync_sb_inodes()
721 int write_inode_now(struct inode *inode, int sync) in write_inode_now() argument
731 if (!mapping_cap_writeback_dirty(inode->i_mapping)) in write_inode_now()
736 ret = __writeback_single_inode(inode, &wbc); in write_inode_now()
739 inode_sync_wait(inode); in write_inode_now()
755 int sync_inode(struct inode *inode, struct writeback_control *wbc) in sync_inode() argument
760 ret = __writeback_single_inode(inode, wbc); in sync_inode()
783 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) in generic_osync_inode() argument
803 if ((inode->i_state & I_DIRTY) && in generic_osync_inode()
804 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) in generic_osync_inode()
809 err2 = write_inode_now(inode, 1); in generic_osync_inode()
814 inode_sync_wait(inode); in generic_osync_inode()