Lines Matching +full:ip +full:- +full:blocks
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
34 * Fast and loose check if this write could update the on-disk inode size.
38 return ioend->io_offset + ioend->io_size > in xfs_ioend_is_append()
39 XFS_I(ioend->io_inode)->i_d.di_size; in xfs_ioend_is_append()
43 * Update on-disk file size now that data has been written to disk.
47 struct xfs_inode *ip, in __xfs_setfilesize() argument
54 xfs_ilock(ip, XFS_ILOCK_EXCL); in __xfs_setfilesize()
55 isize = xfs_new_eof(ip, offset + size); in __xfs_setfilesize()
57 xfs_iunlock(ip, XFS_ILOCK_EXCL); in __xfs_setfilesize()
62 trace_xfs_setfilesize(ip, offset, size); in __xfs_setfilesize()
64 ip->i_d.di_size = isize; in __xfs_setfilesize()
65 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); in __xfs_setfilesize()
66 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); in __xfs_setfilesize()
73 struct xfs_inode *ip, in xfs_setfilesize() argument
77 struct xfs_mount *mp = ip->i_mount; in xfs_setfilesize()
81 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize()
85 return __xfs_setfilesize(ip, tp, offset, size); in xfs_setfilesize()
93 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_setfilesize_ioend() local
94 struct xfs_trans *tp = ioend->io_private; in xfs_setfilesize_ioend()
102 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); in xfs_setfilesize_ioend()
110 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); in xfs_setfilesize_ioend()
120 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_ioend() local
121 struct xfs_mount *mp = ip->i_mount; in xfs_end_ioend()
122 xfs_off_t offset = ioend->io_offset; in xfs_end_ioend()
123 size_t size = ioend->io_size; in xfs_end_ioend()
130 * task-wide nofs context for the following operations. in xfs_end_ioend()
135 * Just clean up the in-memory strutures if the fs has been shut down. in xfs_end_ioend()
138 error = -EIO; in xfs_end_ioend()
143 * Clean up all COW blocks and underlying data fork delalloc blocks on in xfs_end_ioend()
145 * mapped to blocks in the COW fork and the associated pages are no in xfs_end_ioend()
146 * longer dirty. If we don't remove delalloc blocks here, they become in xfs_end_ioend()
149 error = blk_status_to_errno(ioend->io_bio->bi_status); in xfs_end_ioend()
151 if (ioend->io_flags & IOMAP_F_SHARED) { in xfs_end_ioend()
152 xfs_reflink_cancel_cow_range(ip, offset, size, true); in xfs_end_ioend()
153 xfs_bmap_punch_delalloc_range(ip, in xfs_end_ioend()
161 * Success: commit the COW or unwritten blocks if needed. in xfs_end_ioend()
163 if (ioend->io_flags & IOMAP_F_SHARED) in xfs_end_ioend()
164 error = xfs_reflink_end_cow(ip, offset, size); in xfs_end_ioend()
165 else if (ioend->io_type == IOMAP_UNWRITTEN) in xfs_end_ioend()
166 error = xfs_iomap_write_unwritten(ip, offset, size, false); in xfs_end_ioend()
169 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); in xfs_end_ioend()
186 if (!ioend->io_private) { in xfs_ioend_merge_private()
187 ioend->io_private = next->io_private; in xfs_ioend_merge_private()
188 next->io_private = NULL; in xfs_ioend_merge_private()
190 xfs_setfilesize_ioend(next, -ECANCELED); in xfs_ioend_merge_private()
199 struct xfs_inode *ip = in xfs_end_io() local
205 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_io()
206 list_replace_init(&ip->i_ioend_list, &tmp); in xfs_end_io()
207 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_io()
212 list_del_init(&ioend->io_list); in xfs_end_io()
221 ioend->io_type == IOMAP_UNWRITTEN || in xfs_ioend_needs_workqueue()
222 (ioend->io_flags & IOMAP_F_SHARED); in xfs_ioend_needs_workqueue()
229 struct iomap_ioend *ioend = bio->bi_private; in xfs_end_bio()
230 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_bio() local
233 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_bio()
234 if (list_empty(&ip->i_ioend_list)) in xfs_end_bio()
235 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, in xfs_end_bio()
236 &ip->i_ioend_work)); in xfs_end_bio()
237 list_add_tail(&ioend->io_list, &ip->i_ioend_list); in xfs_end_bio()
238 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_bio()
248 struct xfs_inode *ip, in xfs_imap_valid() argument
251 if (offset < wpc->iomap.offset || in xfs_imap_valid()
252 offset >= wpc->iomap.offset + wpc->iomap.length) in xfs_imap_valid()
259 if (wpc->iomap.flags & IOMAP_F_SHARED) in xfs_imap_valid()
267 * overlapping blocks. in xfs_imap_valid()
269 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) in xfs_imap_valid()
271 if (xfs_inode_has_cow_data(ip) && in xfs_imap_valid()
272 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) in xfs_imap_valid()
279 * extent that maps offset_fsb in wpc->iomap.
288 struct xfs_inode *ip, in xfs_convert_blocks() argument
296 seq = &XFS_WPC(wpc)->cow_seq; in xfs_convert_blocks()
298 seq = &XFS_WPC(wpc)->data_seq; in xfs_convert_blocks()
302 * and put the result into wpc->iomap. Allocate in a loop because it in xfs_convert_blocks()
303 * may take several attempts to allocate real blocks for a contiguous in xfs_convert_blocks()
307 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, in xfs_convert_blocks()
308 &wpc->iomap, seq); in xfs_convert_blocks()
311 } while (wpc->iomap.offset + wpc->iomap.length <= offset); in xfs_convert_blocks()
322 struct xfs_inode *ip = XFS_I(inode); in xfs_map_blocks() local
323 struct xfs_mount *mp = ip->i_mount; in xfs_map_blocks()
335 return -EIO; in xfs_map_blocks()
338 * COW fork blocks can overlap data fork blocks even if the blocks in xfs_map_blocks()
352 if (xfs_imap_valid(wpc, ip, offset)) in xfs_map_blocks()
364 xfs_ilock(ip, XFS_ILOCK_SHARED); in xfs_map_blocks()
365 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || in xfs_map_blocks()
366 (ip->i_df.if_flags & XFS_IFEXTENTS)); in xfs_map_blocks()
372 if (xfs_inode_has_cow_data(ip) && in xfs_map_blocks()
373 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) in xfs_map_blocks()
376 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); in xfs_map_blocks()
377 xfs_iunlock(ip, XFS_ILOCK_SHARED); in xfs_map_blocks()
385 * ->cow_seq. If the data mapping is still valid, we're done. in xfs_map_blocks()
387 if (xfs_imap_valid(wpc, ip, offset)) { in xfs_map_blocks()
388 xfs_iunlock(ip, XFS_ILOCK_SHARED); in xfs_map_blocks()
397 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) in xfs_map_blocks()
399 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); in xfs_map_blocks()
400 xfs_iunlock(ip, XFS_ILOCK_SHARED); in xfs_map_blocks()
404 imap.br_blockcount = imap.br_startoff - offset_fsb; in xfs_map_blocks()
413 * subsequent blocks in the mapping; however, the requirement to treat in xfs_map_blocks()
418 imap.br_blockcount = cow_fsb - imap.br_startoff; in xfs_map_blocks()
425 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0); in xfs_map_blocks()
426 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); in xfs_map_blocks()
429 error = xfs_convert_blocks(wpc, ip, whichfork, offset); in xfs_map_blocks()
438 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) in xfs_map_blocks()
440 ASSERT(error != -EAGAIN); in xfs_map_blocks()
447 * boundary again to force a re-lookup. in xfs_map_blocks()
452 if (cow_offset < wpc->iomap.offset + wpc->iomap.length) in xfs_map_blocks()
453 wpc->iomap.length = cow_offset - wpc->iomap.offset; in xfs_map_blocks()
456 ASSERT(wpc->iomap.offset <= offset); in xfs_map_blocks()
457 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); in xfs_map_blocks()
458 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap); in xfs_map_blocks()
472 * task-wide nofs context for the following operations. in xfs_prepare_ioend()
477 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { in xfs_prepare_ioend()
478 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), in xfs_prepare_ioend()
479 ioend->io_offset, ioend->io_size); in xfs_prepare_ioend()
485 ioend->io_bio->bi_end_io = xfs_end_bio; in xfs_prepare_ioend()
490 * If the page has delalloc blocks on it, we need to punch them out before we
495 * they are delalloc, we can do this without needing a transaction. Indeed - if
505 struct inode *inode = page->mapping->host; in xfs_discard_page()
506 struct xfs_inode *ip = XFS_I(inode); in xfs_discard_page() local
507 struct xfs_mount *mp = ip->i_mount; in xfs_discard_page()
518 page, ip->i_ino, fileoff); in xfs_discard_page()
520 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, in xfs_discard_page()
521 i_blocks_per_page(inode, page) - pageoff_fsb); in xfs_discard_page()
525 iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff); in xfs_discard_page()
541 if (WARN_ON_ONCE(current->journal_info)) { in xfs_vm_writepage()
561 if (WARN_ON_ONCE(current->journal_info)) in xfs_vm_writepages()
564 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); in xfs_vm_writepages()
573 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_dax_writepages() local
575 xfs_iflags_clear(ip, XFS_ITRUNCATED); in xfs_dax_writepages()
577 xfs_inode_buftarg(ip)->bt_daxdev, wbc); in xfs_dax_writepages()
585 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_vm_bmap() local
587 trace_xfs_vm_bmap(ip); in xfs_vm_bmap()
590 * The swap code (ab-)uses ->bmap to get a block mapping and then in xfs_vm_bmap()
598 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip)) in xfs_vm_bmap()
624 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev; in xfs_iomap_swapfile_activate()