• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_da_format.h"
26 #include "xfs_da_btree.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_bmap.h"
31 #include "xfs_bmap_util.h"
32 #include "xfs_error.h"
33 #include "xfs_dir2.h"
34 #include "xfs_dir2_priv.h"
35 #include "xfs_ioctl.h"
36 #include "xfs_trace.h"
37 #include "xfs_log.h"
38 #include "xfs_icache.h"
39 #include "xfs_pnfs.h"
40 
41 #include <linux/dcache.h>
42 #include <linux/falloc.h>
43 #include <linux/pagevec.h>
44 #include <linux/backing-dev.h>
45 
46 static const struct vm_operations_struct xfs_file_vm_ops;
47 
48 /*
49  * Locking primitives for read and write IO paths to ensure we consistently use
50  * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
51  */
52 static inline void
xfs_rw_ilock(struct xfs_inode * ip,int type)53 xfs_rw_ilock(
54 	struct xfs_inode	*ip,
55 	int			type)
56 {
57 	if (type & XFS_IOLOCK_EXCL)
58 		mutex_lock(&VFS_I(ip)->i_mutex);
59 	xfs_ilock(ip, type);
60 }
61 
62 static inline void
xfs_rw_iunlock(struct xfs_inode * ip,int type)63 xfs_rw_iunlock(
64 	struct xfs_inode	*ip,
65 	int			type)
66 {
67 	xfs_iunlock(ip, type);
68 	if (type & XFS_IOLOCK_EXCL)
69 		mutex_unlock(&VFS_I(ip)->i_mutex);
70 }
71 
72 static inline void
xfs_rw_ilock_demote(struct xfs_inode * ip,int type)73 xfs_rw_ilock_demote(
74 	struct xfs_inode	*ip,
75 	int			type)
76 {
77 	xfs_ilock_demote(ip, type);
78 	if (type & XFS_IOLOCK_EXCL)
79 		mutex_unlock(&VFS_I(ip)->i_mutex);
80 }
81 
82 /*
83  * xfs_iozero clears the specified range supplied via the page cache (except in
84  * the DAX case). Writes through the page cache will allocate blocks over holes,
85  * though the callers usually map the holes first and avoid them. If a block is
86  * not completely zeroed, then it will be read from disk before being partially
87  * zeroed.
88  *
89  * In the DAX case, we can just directly write to the underlying pages. This
90  * will not allocate blocks, but will avoid holes and unwritten extents and so
91  * not do unnecessary work.
92  */
93 int
xfs_iozero(struct xfs_inode * ip,loff_t pos,size_t count)94 xfs_iozero(
95 	struct xfs_inode	*ip,	/* inode			*/
96 	loff_t			pos,	/* offset in file		*/
97 	size_t			count)	/* size of data to zero		*/
98 {
99 	struct page		*page;
100 	struct address_space	*mapping;
101 	int			status = 0;
102 
103 
104 	mapping = VFS_I(ip)->i_mapping;
105 	do {
106 		unsigned offset, bytes;
107 		void *fsdata;
108 
109 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
110 		bytes = PAGE_CACHE_SIZE - offset;
111 		if (bytes > count)
112 			bytes = count;
113 
114 		if (IS_DAX(VFS_I(ip))) {
115 			status = dax_zero_page_range(VFS_I(ip), pos, bytes,
116 						     xfs_get_blocks_direct);
117 			if (status)
118 				break;
119 		} else {
120 			status = pagecache_write_begin(NULL, mapping, pos, bytes,
121 						AOP_FLAG_UNINTERRUPTIBLE,
122 						&page, &fsdata);
123 			if (status)
124 				break;
125 
126 			zero_user(page, offset, bytes);
127 
128 			status = pagecache_write_end(NULL, mapping, pos, bytes,
129 						bytes, page, fsdata);
130 			WARN_ON(status <= 0); /* can't return less than zero! */
131 			status = 0;
132 		}
133 		pos += bytes;
134 		count -= bytes;
135 	} while (count);
136 
137 	return status;
138 }
139 
140 int
xfs_update_prealloc_flags(struct xfs_inode * ip,enum xfs_prealloc_flags flags)141 xfs_update_prealloc_flags(
142 	struct xfs_inode	*ip,
143 	enum xfs_prealloc_flags	flags)
144 {
145 	struct xfs_trans	*tp;
146 	int			error;
147 
148 	tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
149 	error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
150 	if (error) {
151 		xfs_trans_cancel(tp);
152 		return error;
153 	}
154 
155 	xfs_ilock(ip, XFS_ILOCK_EXCL);
156 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
157 
158 	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
159 		ip->i_d.di_mode &= ~S_ISUID;
160 		if (ip->i_d.di_mode & S_IXGRP)
161 			ip->i_d.di_mode &= ~S_ISGID;
162 		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
163 	}
164 
165 	if (flags & XFS_PREALLOC_SET)
166 		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
167 	if (flags & XFS_PREALLOC_CLEAR)
168 		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
169 
170 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
171 	if (flags & XFS_PREALLOC_SYNC)
172 		xfs_trans_set_sync(tp);
173 	return xfs_trans_commit(tp);
174 }
175 
176 /*
177  * Fsync operations on directories are much simpler than on regular files,
178  * as there is no file data to flush, and thus also no need for explicit
179  * cache flush operations, and there are no non-transaction metadata updates
180  * on directories either.
181  */
182 STATIC int
xfs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)183 xfs_dir_fsync(
184 	struct file		*file,
185 	loff_t			start,
186 	loff_t			end,
187 	int			datasync)
188 {
189 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
190 	struct xfs_mount	*mp = ip->i_mount;
191 	xfs_lsn_t		lsn = 0;
192 
193 	trace_xfs_dir_fsync(ip);
194 
195 	xfs_ilock(ip, XFS_ILOCK_SHARED);
196 	if (xfs_ipincount(ip))
197 		lsn = ip->i_itemp->ili_last_lsn;
198 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
199 
200 	if (!lsn)
201 		return 0;
202 	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
203 }
204 
205 STATIC int
xfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)206 xfs_file_fsync(
207 	struct file		*file,
208 	loff_t			start,
209 	loff_t			end,
210 	int			datasync)
211 {
212 	struct inode		*inode = file->f_mapping->host;
213 	struct xfs_inode	*ip = XFS_I(inode);
214 	struct xfs_mount	*mp = ip->i_mount;
215 	int			error = 0;
216 	int			log_flushed = 0;
217 	xfs_lsn_t		lsn = 0;
218 
219 	trace_xfs_file_fsync(ip);
220 
221 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
222 	if (error)
223 		return error;
224 
225 	if (XFS_FORCED_SHUTDOWN(mp))
226 		return -EIO;
227 
228 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
229 
230 	if (mp->m_flags & XFS_MOUNT_BARRIER) {
231 		/*
232 		 * If we have an RT and/or log subvolume we need to make sure
233 		 * to flush the write cache the device used for file data
234 		 * first.  This is to ensure newly written file data make
235 		 * it to disk before logging the new inode size in case of
236 		 * an extending write.
237 		 */
238 		if (XFS_IS_REALTIME_INODE(ip))
239 			xfs_blkdev_issue_flush(mp->m_rtdev_targp);
240 		else if (mp->m_logdev_targp != mp->m_ddev_targp)
241 			xfs_blkdev_issue_flush(mp->m_ddev_targp);
242 	}
243 
244 	/*
245 	 * All metadata updates are logged, which means that we just have to
246 	 * flush the log up to the latest LSN that touched the inode. If we have
247 	 * concurrent fsync/fdatasync() calls, we need them to all block on the
248 	 * log force before we clear the ili_fsync_fields field. This ensures
249 	 * that we don't get a racing sync operation that does not wait for the
250 	 * metadata to hit the journal before returning. If we race with
251 	 * clearing the ili_fsync_fields, then all that will happen is the log
252 	 * force will do nothing as the lsn will already be on disk. We can't
253 	 * race with setting ili_fsync_fields because that is done under
254 	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
255 	 * until after the ili_fsync_fields is cleared.
256 	 */
257 	xfs_ilock(ip, XFS_ILOCK_SHARED);
258 	if (xfs_ipincount(ip)) {
259 		if (!datasync ||
260 		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
261 			lsn = ip->i_itemp->ili_last_lsn;
262 	}
263 
264 	if (lsn) {
265 		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
266 		ip->i_itemp->ili_fsync_fields = 0;
267 	}
268 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
269 
270 	/*
271 	 * If we only have a single device, and the log force about was
272 	 * a no-op we might have to flush the data device cache here.
273 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
274 	 * an already allocated file and thus do not have any metadata to
275 	 * commit.
276 	 */
277 	if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
278 	    mp->m_logdev_targp == mp->m_ddev_targp &&
279 	    !XFS_IS_REALTIME_INODE(ip) &&
280 	    !log_flushed)
281 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
282 
283 	return error;
284 }
285 
286 STATIC ssize_t
xfs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)287 xfs_file_read_iter(
288 	struct kiocb		*iocb,
289 	struct iov_iter		*to)
290 {
291 	struct file		*file = iocb->ki_filp;
292 	struct inode		*inode = file->f_mapping->host;
293 	struct xfs_inode	*ip = XFS_I(inode);
294 	struct xfs_mount	*mp = ip->i_mount;
295 	size_t			size = iov_iter_count(to);
296 	ssize_t			ret = 0;
297 	int			ioflags = 0;
298 	xfs_fsize_t		n;
299 	loff_t			pos = iocb->ki_pos;
300 
301 	XFS_STATS_INC(mp, xs_read_calls);
302 
303 	if (unlikely(iocb->ki_flags & IOCB_DIRECT))
304 		ioflags |= XFS_IO_ISDIRECT;
305 	if (file->f_mode & FMODE_NOCMTIME)
306 		ioflags |= XFS_IO_INVIS;
307 
308 	if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
309 		xfs_buftarg_t	*target =
310 			XFS_IS_REALTIME_INODE(ip) ?
311 				mp->m_rtdev_targp : mp->m_ddev_targp;
312 		/* DIO must be aligned to device logical sector size */
313 		if ((pos | size) & target->bt_logical_sectormask) {
314 			if (pos == i_size_read(inode))
315 				return 0;
316 			return -EINVAL;
317 		}
318 	}
319 
320 	n = mp->m_super->s_maxbytes - pos;
321 	if (n <= 0 || size == 0)
322 		return 0;
323 
324 	if (n < size)
325 		size = n;
326 
327 	if (XFS_FORCED_SHUTDOWN(mp))
328 		return -EIO;
329 
330 	/*
331 	 * Locking is a bit tricky here. If we take an exclusive lock for direct
332 	 * IO, we effectively serialise all new concurrent read IO to this file
333 	 * and block it behind IO that is currently in progress because IO in
334 	 * progress holds the IO lock shared. We only need to hold the lock
335 	 * exclusive to blow away the page cache, so only take lock exclusively
336 	 * if the page cache needs invalidation. This allows the normal direct
337 	 * IO case of no page cache pages to proceeed concurrently without
338 	 * serialisation.
339 	 */
340 	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
341 	if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
342 		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
343 		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
344 
345 		/*
346 		 * The generic dio code only flushes the range of the particular
347 		 * I/O. Because we take an exclusive lock here, this whole
348 		 * sequence is considerably more expensive for us. This has a
349 		 * noticeable performance impact for any file with cached pages,
350 		 * even when outside of the range of the particular I/O.
351 		 *
352 		 * Hence, amortize the cost of the lock against a full file
353 		 * flush and reduce the chances of repeated iolock cycles going
354 		 * forward.
355 		 */
356 		if (inode->i_mapping->nrpages) {
357 			ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
358 			if (ret) {
359 				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
360 				return ret;
361 			}
362 
363 			/*
364 			 * Invalidate whole pages. This can return an error if
365 			 * we fail to invalidate a page, but this should never
366 			 * happen on XFS. Warn if it does fail.
367 			 */
368 			ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
369 			WARN_ON_ONCE(ret);
370 			ret = 0;
371 		}
372 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
373 	}
374 
375 	trace_xfs_file_read(ip, size, pos, ioflags);
376 
377 	ret = generic_file_read_iter(iocb, to);
378 	if (ret > 0)
379 		XFS_STATS_ADD(mp, xs_read_bytes, ret);
380 
381 	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
382 	return ret;
383 }
384 
385 STATIC ssize_t
xfs_file_splice_read(struct file * infilp,loff_t * ppos,struct pipe_inode_info * pipe,size_t count,unsigned int flags)386 xfs_file_splice_read(
387 	struct file		*infilp,
388 	loff_t			*ppos,
389 	struct pipe_inode_info	*pipe,
390 	size_t			count,
391 	unsigned int		flags)
392 {
393 	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host);
394 	int			ioflags = 0;
395 	ssize_t			ret;
396 
397 	XFS_STATS_INC(ip->i_mount, xs_read_calls);
398 
399 	if (infilp->f_mode & FMODE_NOCMTIME)
400 		ioflags |= XFS_IO_INVIS;
401 
402 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
403 		return -EIO;
404 
405 	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
406 
407 	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
408 
409 	/* for dax, we need to avoid the page cache */
410 	if (IS_DAX(VFS_I(ip)))
411 		ret = default_file_splice_read(infilp, ppos, pipe, count, flags);
412 	else
413 		ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
414 	if (ret > 0)
415 		XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
416 
417 	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
418 	return ret;
419 }
420 
421 /*
422  * This routine is called to handle zeroing any space in the last block of the
423  * file that is beyond the EOF.  We do this since the size is being increased
424  * without writing anything to that block and we don't want to read the
425  * garbage on the disk.
426  */
427 STATIC int				/* error (positive) */
xfs_zero_last_block(struct xfs_inode * ip,xfs_fsize_t offset,xfs_fsize_t isize,bool * did_zeroing)428 xfs_zero_last_block(
429 	struct xfs_inode	*ip,
430 	xfs_fsize_t		offset,
431 	xfs_fsize_t		isize,
432 	bool			*did_zeroing)
433 {
434 	struct xfs_mount	*mp = ip->i_mount;
435 	xfs_fileoff_t		last_fsb = XFS_B_TO_FSBT(mp, isize);
436 	int			zero_offset = XFS_B_FSB_OFFSET(mp, isize);
437 	int			zero_len;
438 	int			nimaps = 1;
439 	int			error = 0;
440 	struct xfs_bmbt_irec	imap;
441 
442 	xfs_ilock(ip, XFS_ILOCK_EXCL);
443 	error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
444 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
445 	if (error)
446 		return error;
447 
448 	ASSERT(nimaps > 0);
449 
450 	/*
451 	 * If the block underlying isize is just a hole, then there
452 	 * is nothing to zero.
453 	 */
454 	if (imap.br_startblock == HOLESTARTBLOCK)
455 		return 0;
456 
457 	zero_len = mp->m_sb.sb_blocksize - zero_offset;
458 	if (isize + zero_len > offset)
459 		zero_len = offset - isize;
460 	*did_zeroing = true;
461 	return xfs_iozero(ip, isize, zero_len);
462 }
463 
464 /*
465  * Zero any on disk space between the current EOF and the new, larger EOF.
466  *
467  * This handles the normal case of zeroing the remainder of the last block in
468  * the file and the unusual case of zeroing blocks out beyond the size of the
469  * file.  This second case only happens with fixed size extents and when the
470  * system crashes before the inode size was updated but after blocks were
471  * allocated.
472  *
473  * Expects the iolock to be held exclusive, and will take the ilock internally.
474  */
475 int					/* error (positive) */
xfs_zero_eof(struct xfs_inode * ip,xfs_off_t offset,xfs_fsize_t isize,bool * did_zeroing)476 xfs_zero_eof(
477 	struct xfs_inode	*ip,
478 	xfs_off_t		offset,		/* starting I/O offset */
479 	xfs_fsize_t		isize,		/* current inode size */
480 	bool			*did_zeroing)
481 {
482 	struct xfs_mount	*mp = ip->i_mount;
483 	xfs_fileoff_t		start_zero_fsb;
484 	xfs_fileoff_t		end_zero_fsb;
485 	xfs_fileoff_t		zero_count_fsb;
486 	xfs_fileoff_t		last_fsb;
487 	xfs_fileoff_t		zero_off;
488 	xfs_fsize_t		zero_len;
489 	int			nimaps;
490 	int			error = 0;
491 	struct xfs_bmbt_irec	imap;
492 
493 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
494 	ASSERT(offset > isize);
495 
496 	trace_xfs_zero_eof(ip, isize, offset - isize);
497 
498 	/*
499 	 * First handle zeroing the block on which isize resides.
500 	 *
501 	 * We only zero a part of that block so it is handled specially.
502 	 */
503 	if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
504 		error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
505 		if (error)
506 			return error;
507 	}
508 
509 	/*
510 	 * Calculate the range between the new size and the old where blocks
511 	 * needing to be zeroed may exist.
512 	 *
513 	 * To get the block where the last byte in the file currently resides,
514 	 * we need to subtract one from the size and truncate back to a block
515 	 * boundary.  We subtract 1 in case the size is exactly on a block
516 	 * boundary.
517 	 */
518 	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
519 	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
520 	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
521 	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
522 	if (last_fsb == end_zero_fsb) {
523 		/*
524 		 * The size was only incremented on its last block.
525 		 * We took care of that above, so just return.
526 		 */
527 		return 0;
528 	}
529 
530 	ASSERT(start_zero_fsb <= end_zero_fsb);
531 	while (start_zero_fsb <= end_zero_fsb) {
532 		nimaps = 1;
533 		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
534 
535 		xfs_ilock(ip, XFS_ILOCK_EXCL);
536 		error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
537 					  &imap, &nimaps, 0);
538 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
539 		if (error)
540 			return error;
541 
542 		ASSERT(nimaps > 0);
543 
544 		if (imap.br_state == XFS_EXT_UNWRITTEN ||
545 		    imap.br_startblock == HOLESTARTBLOCK) {
546 			start_zero_fsb = imap.br_startoff + imap.br_blockcount;
547 			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
548 			continue;
549 		}
550 
551 		/*
552 		 * There are blocks we need to zero.
553 		 */
554 		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
555 		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
556 
557 		if ((zero_off + zero_len) > offset)
558 			zero_len = offset - zero_off;
559 
560 		error = xfs_iozero(ip, zero_off, zero_len);
561 		if (error)
562 			return error;
563 
564 		*did_zeroing = true;
565 		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
566 		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
567 	}
568 
569 	return 0;
570 }
571 
572 /*
573  * Common pre-write limit and setup checks.
574  *
575  * Called with the iolocked held either shared and exclusive according to
576  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
577  * if called for a direct write beyond i_size.
578  */
579 STATIC ssize_t
xfs_file_aio_write_checks(struct kiocb * iocb,struct iov_iter * from,int * iolock)580 xfs_file_aio_write_checks(
581 	struct kiocb		*iocb,
582 	struct iov_iter		*from,
583 	int			*iolock)
584 {
585 	struct file		*file = iocb->ki_filp;
586 	struct inode		*inode = file->f_mapping->host;
587 	struct xfs_inode	*ip = XFS_I(inode);
588 	ssize_t			error = 0;
589 	size_t			count = iov_iter_count(from);
590 	bool			drained_dio = false;
591 
592 restart:
593 	error = generic_write_checks(iocb, from);
594 	if (error <= 0)
595 		return error;
596 
597 	error = xfs_break_layouts(inode, iolock, true);
598 	if (error)
599 		return error;
600 
601 	/* For changing security info in file_remove_privs() we need i_mutex */
602 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
603 		xfs_rw_iunlock(ip, *iolock);
604 		*iolock = XFS_IOLOCK_EXCL;
605 		xfs_rw_ilock(ip, *iolock);
606 		goto restart;
607 	}
608 	/*
609 	 * If the offset is beyond the size of the file, we need to zero any
610 	 * blocks that fall between the existing EOF and the start of this
611 	 * write.  If zeroing is needed and we are currently holding the
612 	 * iolock shared, we need to update it to exclusive which implies
613 	 * having to redo all checks before.
614 	 *
615 	 * We need to serialise against EOF updates that occur in IO
616 	 * completions here. We want to make sure that nobody is changing the
617 	 * size while we do this check until we have placed an IO barrier (i.e.
618 	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
619 	 * The spinlock effectively forms a memory barrier once we have the
620 	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
621 	 * and hence be able to correctly determine if we need to run zeroing.
622 	 */
623 	spin_lock(&ip->i_flags_lock);
624 	if (iocb->ki_pos > i_size_read(inode)) {
625 		bool	zero = false;
626 
627 		spin_unlock(&ip->i_flags_lock);
628 		if (!drained_dio) {
629 			if (*iolock == XFS_IOLOCK_SHARED) {
630 				xfs_rw_iunlock(ip, *iolock);
631 				*iolock = XFS_IOLOCK_EXCL;
632 				xfs_rw_ilock(ip, *iolock);
633 				iov_iter_reexpand(from, count);
634 			}
635 			/*
636 			 * We now have an IO submission barrier in place, but
637 			 * AIO can do EOF updates during IO completion and hence
638 			 * we now need to wait for all of them to drain. Non-AIO
639 			 * DIO will have drained before we are given the
640 			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
641 			 * no-op.
642 			 */
643 			inode_dio_wait(inode);
644 			drained_dio = true;
645 			goto restart;
646 		}
647 		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
648 		if (error)
649 			return error;
650 	} else
651 		spin_unlock(&ip->i_flags_lock);
652 
653 	/*
654 	 * Updating the timestamps will grab the ilock again from
655 	 * xfs_fs_dirty_inode, so we have to call it after dropping the
656 	 * lock above.  Eventually we should look into a way to avoid
657 	 * the pointless lock roundtrip.
658 	 */
659 	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
660 		error = file_update_time(file);
661 		if (error)
662 			return error;
663 	}
664 
665 	/*
666 	 * If we're writing the file then make sure to clear the setuid and
667 	 * setgid bits if the process is not being run by root.  This keeps
668 	 * people from modifying setuid and setgid binaries.
669 	 */
670 	if (!IS_NOSEC(inode))
671 		return file_remove_privs(file);
672 	return 0;
673 }
674 
675 /*
676  * xfs_file_dio_aio_write - handle direct IO writes
677  *
678  * Lock the inode appropriately to prepare for and issue a direct IO write.
679  * By separating it from the buffered write path we remove all the tricky to
680  * follow locking changes and looping.
681  *
682  * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
683  * until we're sure the bytes at the new EOF have been zeroed and/or the cached
684  * pages are flushed out.
685  *
686  * In most cases the direct IO writes will be done holding IOLOCK_SHARED
687  * allowing them to be done in parallel with reads and other direct IO writes.
688  * However, if the IO is not aligned to filesystem blocks, the direct IO layer
689  * needs to do sub-block zeroing and that requires serialisation against other
690  * direct IOs to the same block. In this case we need to serialise the
691  * submission of the unaligned IOs so that we don't get racing block zeroing in
692  * the dio layer.  To avoid the problem with aio, we also need to wait for
693  * outstanding IOs to complete so that unwritten extent conversion is completed
694  * before we try to map the overlapping block. This is currently implemented by
695  * hitting it with a big hammer (i.e. inode_dio_wait()).
696  *
697  * Returns with locks held indicated by @iolock and errors indicated by
698  * negative return values.
699  */
700 STATIC ssize_t
xfs_file_dio_aio_write(struct kiocb * iocb,struct iov_iter * from)701 xfs_file_dio_aio_write(
702 	struct kiocb		*iocb,
703 	struct iov_iter		*from)
704 {
705 	struct file		*file = iocb->ki_filp;
706 	struct address_space	*mapping = file->f_mapping;
707 	struct inode		*inode = mapping->host;
708 	struct xfs_inode	*ip = XFS_I(inode);
709 	struct xfs_mount	*mp = ip->i_mount;
710 	ssize_t			ret = 0;
711 	int			unaligned_io = 0;
712 	int			iolock;
713 	size_t			count = iov_iter_count(from);
714 	loff_t			pos = iocb->ki_pos;
715 	loff_t			end;
716 	struct iov_iter		data;
717 	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
718 					mp->m_rtdev_targp : mp->m_ddev_targp;
719 
720 	/* DIO must be aligned to device logical sector size */
721 	if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask))
722 		return -EINVAL;
723 
724 	/* "unaligned" here means not aligned to a filesystem block */
725 	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
726 		unaligned_io = 1;
727 
728 	/*
729 	 * We don't need to take an exclusive lock unless there page cache needs
730 	 * to be invalidated or unaligned IO is being executed. We don't need to
731 	 * consider the EOF extension case here because
732 	 * xfs_file_aio_write_checks() will relock the inode as necessary for
733 	 * EOF zeroing cases and fill out the new inode size as appropriate.
734 	 */
735 	if (unaligned_io || mapping->nrpages)
736 		iolock = XFS_IOLOCK_EXCL;
737 	else
738 		iolock = XFS_IOLOCK_SHARED;
739 	xfs_rw_ilock(ip, iolock);
740 
741 	/*
742 	 * Recheck if there are cached pages that need invalidate after we got
743 	 * the iolock to protect against other threads adding new pages while
744 	 * we were waiting for the iolock.
745 	 */
746 	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
747 		xfs_rw_iunlock(ip, iolock);
748 		iolock = XFS_IOLOCK_EXCL;
749 		xfs_rw_ilock(ip, iolock);
750 	}
751 
752 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
753 	if (ret)
754 		goto out;
755 	count = iov_iter_count(from);
756 	pos = iocb->ki_pos;
757 	end = pos + count - 1;
758 
759 	/*
760 	 * See xfs_file_read_iter() for why we do a full-file flush here.
761 	 */
762 	if (mapping->nrpages) {
763 		ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
764 		if (ret)
765 			goto out;
766 		/*
767 		 * Invalidate whole pages. This can return an error if we fail
768 		 * to invalidate a page, but this should never happen on XFS.
769 		 * Warn if it does fail.
770 		 */
771 		ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
772 		WARN_ON_ONCE(ret);
773 		ret = 0;
774 	}
775 
776 	/*
777 	 * If we are doing unaligned IO, wait for all other IO to drain,
778 	 * otherwise demote the lock if we had to flush cached pages
779 	 */
780 	if (unaligned_io)
781 		inode_dio_wait(inode);
782 	else if (iolock == XFS_IOLOCK_EXCL) {
783 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
784 		iolock = XFS_IOLOCK_SHARED;
785 	}
786 
787 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
788 
789 	data = *from;
790 	ret = mapping->a_ops->direct_IO(iocb, &data, pos);
791 
792 	/* see generic_file_direct_write() for why this is necessary */
793 	if (mapping->nrpages) {
794 		invalidate_inode_pages2_range(mapping,
795 					      pos >> PAGE_CACHE_SHIFT,
796 					      end >> PAGE_CACHE_SHIFT);
797 	}
798 
799 	if (ret > 0) {
800 		pos += ret;
801 		iov_iter_advance(from, ret);
802 		iocb->ki_pos = pos;
803 	}
804 out:
805 	xfs_rw_iunlock(ip, iolock);
806 
807 	/*
808 	 * No fallback to buffered IO on errors for XFS. DAX can result in
809 	 * partial writes, but direct IO will either complete fully or fail.
810 	 */
811 	ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip)));
812 	return ret;
813 }
814 
815 STATIC ssize_t
xfs_file_buffered_aio_write(struct kiocb * iocb,struct iov_iter * from)816 xfs_file_buffered_aio_write(
817 	struct kiocb		*iocb,
818 	struct iov_iter		*from)
819 {
820 	struct file		*file = iocb->ki_filp;
821 	struct address_space	*mapping = file->f_mapping;
822 	struct inode		*inode = mapping->host;
823 	struct xfs_inode	*ip = XFS_I(inode);
824 	ssize_t			ret;
825 	int			enospc = 0;
826 	int			iolock = XFS_IOLOCK_EXCL;
827 
828 	xfs_rw_ilock(ip, iolock);
829 
830 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
831 	if (ret)
832 		goto out;
833 
834 	/* We can write back this queue in page reclaim */
835 	current->backing_dev_info = inode_to_bdi(inode);
836 
837 write_retry:
838 	trace_xfs_file_buffered_write(ip, iov_iter_count(from),
839 				      iocb->ki_pos, 0);
840 	ret = generic_perform_write(file, from, iocb->ki_pos);
841 	if (likely(ret >= 0))
842 		iocb->ki_pos += ret;
843 
844 	/*
845 	 * If we hit a space limit, try to free up some lingering preallocated
846 	 * space before returning an error. In the case of ENOSPC, first try to
847 	 * write back all dirty inodes to free up some of the excess reserved
848 	 * metadata space. This reduces the chances that the eofblocks scan
849 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
850 	 * also behaves as a filter to prevent too many eofblocks scans from
851 	 * running at the same time.
852 	 */
853 	if (ret == -EDQUOT && !enospc) {
854 		enospc = xfs_inode_free_quota_eofblocks(ip);
855 		if (enospc)
856 			goto write_retry;
857 	} else if (ret == -ENOSPC && !enospc) {
858 		struct xfs_eofblocks eofb = {0};
859 
860 		enospc = 1;
861 		xfs_flush_inodes(ip->i_mount);
862 		eofb.eof_scan_owner = ip->i_ino; /* for locking */
863 		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
864 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
865 		goto write_retry;
866 	}
867 
868 	current->backing_dev_info = NULL;
869 out:
870 	xfs_rw_iunlock(ip, iolock);
871 	return ret;
872 }
873 
874 STATIC ssize_t
xfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)875 xfs_file_write_iter(
876 	struct kiocb		*iocb,
877 	struct iov_iter		*from)
878 {
879 	struct file		*file = iocb->ki_filp;
880 	struct address_space	*mapping = file->f_mapping;
881 	struct inode		*inode = mapping->host;
882 	struct xfs_inode	*ip = XFS_I(inode);
883 	ssize_t			ret;
884 	size_t			ocount = iov_iter_count(from);
885 
886 	XFS_STATS_INC(ip->i_mount, xs_write_calls);
887 
888 	if (ocount == 0)
889 		return 0;
890 
891 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
892 		return -EIO;
893 
894 	if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode))
895 		ret = xfs_file_dio_aio_write(iocb, from);
896 	else
897 		ret = xfs_file_buffered_aio_write(iocb, from);
898 
899 	if (ret > 0) {
900 		ssize_t err;
901 
902 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
903 
904 		/* Handle various SYNC-type writes */
905 		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
906 		if (err < 0)
907 			ret = err;
908 	}
909 	return ret;
910 }
911 
912 #define	XFS_FALLOC_FL_SUPPORTED						\
913 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
914 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
915 		 FALLOC_FL_INSERT_RANGE)
916 
917 STATIC long
xfs_file_fallocate(struct file * file,int mode,loff_t offset,loff_t len)918 xfs_file_fallocate(
919 	struct file		*file,
920 	int			mode,
921 	loff_t			offset,
922 	loff_t			len)
923 {
924 	struct inode		*inode = file_inode(file);
925 	struct xfs_inode	*ip = XFS_I(inode);
926 	long			error;
927 	enum xfs_prealloc_flags	flags = 0;
928 	uint			iolock = XFS_IOLOCK_EXCL;
929 	loff_t			new_size = 0;
930 	bool			do_file_insert = 0;
931 
932 	if (!S_ISREG(inode->i_mode))
933 		return -EINVAL;
934 	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
935 		return -EOPNOTSUPP;
936 
937 	xfs_ilock(ip, iolock);
938 	error = xfs_break_layouts(inode, &iolock, false);
939 	if (error)
940 		goto out_unlock;
941 
942 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
943 	iolock |= XFS_MMAPLOCK_EXCL;
944 
945 	if (mode & FALLOC_FL_PUNCH_HOLE) {
946 		error = xfs_free_file_space(ip, offset, len);
947 		if (error)
948 			goto out_unlock;
949 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
950 		unsigned int blksize_mask = i_blocksize(inode) - 1;
951 
952 		if (offset & blksize_mask || len & blksize_mask) {
953 			error = -EINVAL;
954 			goto out_unlock;
955 		}
956 
957 		/*
958 		 * There is no need to overlap collapse range with EOF,
959 		 * in which case it is effectively a truncate operation
960 		 */
961 		if (offset + len >= i_size_read(inode)) {
962 			error = -EINVAL;
963 			goto out_unlock;
964 		}
965 
966 		new_size = i_size_read(inode) - len;
967 
968 		error = xfs_collapse_file_space(ip, offset, len);
969 		if (error)
970 			goto out_unlock;
971 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
972 		unsigned int	blksize_mask = i_blocksize(inode) - 1;
973 		loff_t		isize = i_size_read(inode);
974 
975 		if (offset & blksize_mask || len & blksize_mask) {
976 			error = -EINVAL;
977 			goto out_unlock;
978 		}
979 
980 		/*
981 		 * New inode size must not exceed ->s_maxbytes, accounting for
982 		 * possible signed overflow.
983 		 */
984 		if (inode->i_sb->s_maxbytes - isize < len) {
985 			error = -EFBIG;
986 			goto out_unlock;
987 		}
988 		new_size = isize + len;
989 
990 		/* Offset should be less than i_size */
991 		if (offset >= isize) {
992 			error = -EINVAL;
993 			goto out_unlock;
994 		}
995 		do_file_insert = 1;
996 	} else {
997 		flags |= XFS_PREALLOC_SET;
998 
999 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1000 		    offset + len > i_size_read(inode)) {
1001 			new_size = offset + len;
1002 			error = inode_newsize_ok(inode, new_size);
1003 			if (error)
1004 				goto out_unlock;
1005 		}
1006 
1007 		if (mode & FALLOC_FL_ZERO_RANGE)
1008 			error = xfs_zero_file_space(ip, offset, len);
1009 		else
1010 			error = xfs_alloc_file_space(ip, offset, len,
1011 						     XFS_BMAPI_PREALLOC);
1012 		if (error)
1013 			goto out_unlock;
1014 	}
1015 
1016 	if (file->f_flags & O_DSYNC)
1017 		flags |= XFS_PREALLOC_SYNC;
1018 
1019 	error = xfs_update_prealloc_flags(ip, flags);
1020 	if (error)
1021 		goto out_unlock;
1022 
1023 	/* Change file size if needed */
1024 	if (new_size) {
1025 		struct iattr iattr;
1026 
1027 		iattr.ia_valid = ATTR_SIZE;
1028 		iattr.ia_size = new_size;
1029 		error = xfs_setattr_size(ip, &iattr);
1030 		if (error)
1031 			goto out_unlock;
1032 	}
1033 
1034 	/*
1035 	 * Perform hole insertion now that the file size has been
1036 	 * updated so that if we crash during the operation we don't
1037 	 * leave shifted extents past EOF and hence losing access to
1038 	 * the data that is contained within them.
1039 	 */
1040 	if (do_file_insert)
1041 		error = xfs_insert_file_space(ip, offset, len);
1042 
1043 out_unlock:
1044 	xfs_iunlock(ip, iolock);
1045 	return error;
1046 }
1047 
1048 
1049 STATIC int
xfs_file_open(struct inode * inode,struct file * file)1050 xfs_file_open(
1051 	struct inode	*inode,
1052 	struct file	*file)
1053 {
1054 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1055 		return -EFBIG;
1056 	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1057 		return -EIO;
1058 	return 0;
1059 }
1060 
1061 STATIC int
xfs_dir_open(struct inode * inode,struct file * file)1062 xfs_dir_open(
1063 	struct inode	*inode,
1064 	struct file	*file)
1065 {
1066 	struct xfs_inode *ip = XFS_I(inode);
1067 	int		mode;
1068 	int		error;
1069 
1070 	error = xfs_file_open(inode, file);
1071 	if (error)
1072 		return error;
1073 
1074 	/*
1075 	 * If there are any blocks, read-ahead block 0 as we're almost
1076 	 * certain to have the next operation be a read there.
1077 	 */
1078 	mode = xfs_ilock_data_map_shared(ip);
1079 	if (ip->i_d.di_nextents > 0)
1080 		xfs_dir3_data_readahead(ip, 0, -1);
1081 	xfs_iunlock(ip, mode);
1082 	return 0;
1083 }
1084 
1085 STATIC int
xfs_file_release(struct inode * inode,struct file * filp)1086 xfs_file_release(
1087 	struct inode	*inode,
1088 	struct file	*filp)
1089 {
1090 	return xfs_release(XFS_I(inode));
1091 }
1092 
1093 STATIC int
xfs_file_readdir(struct file * file,struct dir_context * ctx)1094 xfs_file_readdir(
1095 	struct file	*file,
1096 	struct dir_context *ctx)
1097 {
1098 	struct inode	*inode = file_inode(file);
1099 	xfs_inode_t	*ip = XFS_I(inode);
1100 	size_t		bufsize;
1101 
1102 	/*
1103 	 * The Linux API doesn't pass down the total size of the buffer
1104 	 * we read into down to the filesystem.  With the filldir concept
1105 	 * it's not needed for correct information, but the XFS dir2 leaf
1106 	 * code wants an estimate of the buffer size to calculate it's
1107 	 * readahead window and size the buffers used for mapping to
1108 	 * physical blocks.
1109 	 *
1110 	 * Try to give it an estimate that's good enough, maybe at some
1111 	 * point we can change the ->readdir prototype to include the
1112 	 * buffer size.  For now we use the current glibc buffer size.
1113 	 */
1114 	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1115 
1116 	return xfs_readdir(ip, ctx, bufsize);
1117 }
1118 
1119 /*
1120  * This type is designed to indicate the type of offset we would like
1121  * to search from page cache for xfs_seek_hole_data().
1122  */
1123 enum {
1124 	HOLE_OFF = 0,
1125 	DATA_OFF,
1126 };
1127 
1128 /*
1129  * Lookup the desired type of offset from the given page.
1130  *
1131  * On success, return true and the offset argument will point to the
1132  * start of the region that was found.  Otherwise this function will
1133  * return false and keep the offset argument unchanged.
1134  */
1135 STATIC bool
xfs_lookup_buffer_offset(struct page * page,loff_t * offset,unsigned int type)1136 xfs_lookup_buffer_offset(
1137 	struct page		*page,
1138 	loff_t			*offset,
1139 	unsigned int		type)
1140 {
1141 	loff_t			lastoff = page_offset(page);
1142 	bool			found = false;
1143 	struct buffer_head	*bh, *head;
1144 
1145 	bh = head = page_buffers(page);
1146 	do {
1147 		/*
1148 		 * Unwritten extents that have data in the page
1149 		 * cache covering them can be identified by the
1150 		 * BH_Unwritten state flag.  Pages with multiple
1151 		 * buffers might have a mix of holes, data and
1152 		 * unwritten extents - any buffer with valid
1153 		 * data in it should have BH_Uptodate flag set
1154 		 * on it.
1155 		 */
1156 		if (buffer_unwritten(bh) ||
1157 		    buffer_uptodate(bh)) {
1158 			if (type == DATA_OFF)
1159 				found = true;
1160 		} else {
1161 			if (type == HOLE_OFF)
1162 				found = true;
1163 		}
1164 
1165 		if (found) {
1166 			*offset = lastoff;
1167 			break;
1168 		}
1169 		lastoff += bh->b_size;
1170 	} while ((bh = bh->b_this_page) != head);
1171 
1172 	return found;
1173 }
1174 
1175 /*
1176  * This routine is called to find out and return a data or hole offset
1177  * from the page cache for unwritten extents according to the desired
1178  * type for xfs_seek_hole_data().
1179  *
1180  * The argument offset is used to tell where we start to search from the
1181  * page cache.  Map is used to figure out the end points of the range to
1182  * lookup pages.
1183  *
1184  * Return true if the desired type of offset was found, and the argument
1185  * offset is filled with that address.  Otherwise, return false and keep
1186  * offset unchanged.
1187  */
1188 STATIC bool
xfs_find_get_desired_pgoff(struct inode * inode,struct xfs_bmbt_irec * map,unsigned int type,loff_t * offset)1189 xfs_find_get_desired_pgoff(
1190 	struct inode		*inode,
1191 	struct xfs_bmbt_irec	*map,
1192 	unsigned int		type,
1193 	loff_t			*offset)
1194 {
1195 	struct xfs_inode	*ip = XFS_I(inode);
1196 	struct xfs_mount	*mp = ip->i_mount;
1197 	struct pagevec		pvec;
1198 	pgoff_t			index;
1199 	pgoff_t			end;
1200 	loff_t			endoff;
1201 	loff_t			startoff = *offset;
1202 	loff_t			lastoff = startoff;
1203 	bool			found = false;
1204 
1205 	pagevec_init(&pvec, 0);
1206 
1207 	index = startoff >> PAGE_CACHE_SHIFT;
1208 	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1209 	end = endoff >> PAGE_CACHE_SHIFT;
1210 	do {
1211 		int		want;
1212 		unsigned	nr_pages;
1213 		unsigned int	i;
1214 
1215 		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
1216 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1217 					  want);
1218 		/*
1219 		 * No page mapped into given range.  If we are searching holes
1220 		 * and if this is the first time we got into the loop, it means
1221 		 * that the given offset is landed in a hole, return it.
1222 		 *
1223 		 * If we have already stepped through some block buffers to find
1224 		 * holes but they all contains data.  In this case, the last
1225 		 * offset is already updated and pointed to the end of the last
1226 		 * mapped page, if it does not reach the endpoint to search,
1227 		 * that means there should be a hole between them.
1228 		 */
1229 		if (nr_pages == 0) {
1230 			/* Data search found nothing */
1231 			if (type == DATA_OFF)
1232 				break;
1233 
1234 			ASSERT(type == HOLE_OFF);
1235 			if (lastoff == startoff || lastoff < endoff) {
1236 				found = true;
1237 				*offset = lastoff;
1238 			}
1239 			break;
1240 		}
1241 
1242 		for (i = 0; i < nr_pages; i++) {
1243 			struct page	*page = pvec.pages[i];
1244 			loff_t		b_offset;
1245 
1246 			/*
1247 			 * At this point, the page may be truncated or
1248 			 * invalidated (changing page->mapping to NULL),
1249 			 * or even swizzled back from swapper_space to tmpfs
1250 			 * file mapping. However, page->index will not change
1251 			 * because we have a reference on the page.
1252 			 *
1253 			 * If current page offset is beyond where we've ended,
1254 			 * we've found a hole.
1255 			 */
1256 			if (type == HOLE_OFF && lastoff < endoff &&
1257 			    lastoff < page_offset(pvec.pages[i])) {
1258 				found = true;
1259 				*offset = lastoff;
1260 				goto out;
1261 			}
1262 			/* Searching done if the page index is out of range. */
1263 			if (page->index > end)
1264 				goto out;
1265 
1266 			lock_page(page);
1267 			/*
1268 			 * Page truncated or invalidated(page->mapping == NULL).
1269 			 * We can freely skip it and proceed to check the next
1270 			 * page.
1271 			 */
1272 			if (unlikely(page->mapping != inode->i_mapping)) {
1273 				unlock_page(page);
1274 				continue;
1275 			}
1276 
1277 			if (!page_has_buffers(page)) {
1278 				unlock_page(page);
1279 				continue;
1280 			}
1281 
1282 			found = xfs_lookup_buffer_offset(page, &b_offset, type);
1283 			if (found) {
1284 				/*
1285 				 * The found offset may be less than the start
1286 				 * point to search if this is the first time to
1287 				 * come here.
1288 				 */
1289 				*offset = max_t(loff_t, startoff, b_offset);
1290 				unlock_page(page);
1291 				goto out;
1292 			}
1293 
1294 			/*
1295 			 * We either searching data but nothing was found, or
1296 			 * searching hole but found a data buffer.  In either
1297 			 * case, probably the next page contains the desired
1298 			 * things, update the last offset to it so.
1299 			 */
1300 			lastoff = page_offset(page) + PAGE_SIZE;
1301 			unlock_page(page);
1302 		}
1303 
1304 		/*
1305 		 * The number of returned pages less than our desired, search
1306 		 * done.  In this case, nothing was found for searching data,
1307 		 * but we found a hole behind the last offset.
1308 		 */
1309 		if (nr_pages < want) {
1310 			if (type == HOLE_OFF) {
1311 				*offset = lastoff;
1312 				found = true;
1313 			}
1314 			break;
1315 		}
1316 
1317 		index = pvec.pages[i - 1]->index + 1;
1318 		pagevec_release(&pvec);
1319 	} while (index <= end);
1320 
1321 out:
1322 	pagevec_release(&pvec);
1323 	return found;
1324 }
1325 
1326 STATIC loff_t
xfs_seek_hole_data(struct file * file,loff_t start,int whence)1327 xfs_seek_hole_data(
1328 	struct file		*file,
1329 	loff_t			start,
1330 	int			whence)
1331 {
1332 	struct inode		*inode = file->f_mapping->host;
1333 	struct xfs_inode	*ip = XFS_I(inode);
1334 	struct xfs_mount	*mp = ip->i_mount;
1335 	loff_t			uninitialized_var(offset);
1336 	xfs_fsize_t		isize;
1337 	xfs_fileoff_t		fsbno;
1338 	xfs_filblks_t		end;
1339 	uint			lock;
1340 	int			error;
1341 
1342 	if (XFS_FORCED_SHUTDOWN(mp))
1343 		return -EIO;
1344 
1345 	lock = xfs_ilock_data_map_shared(ip);
1346 
1347 	isize = i_size_read(inode);
1348 	if (start >= isize) {
1349 		error = -ENXIO;
1350 		goto out_unlock;
1351 	}
1352 
1353 	/*
1354 	 * Try to read extents from the first block indicated
1355 	 * by fsbno to the end block of the file.
1356 	 */
1357 	fsbno = XFS_B_TO_FSBT(mp, start);
1358 	end = XFS_B_TO_FSB(mp, isize);
1359 
1360 	for (;;) {
1361 		struct xfs_bmbt_irec	map[2];
1362 		int			nmap = 2;
1363 		unsigned int		i;
1364 
1365 		error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1366 				       XFS_BMAPI_ENTIRE);
1367 		if (error)
1368 			goto out_unlock;
1369 
1370 		/* No extents at given offset, must be beyond EOF */
1371 		if (nmap == 0) {
1372 			error = -ENXIO;
1373 			goto out_unlock;
1374 		}
1375 
1376 		for (i = 0; i < nmap; i++) {
1377 			offset = max_t(loff_t, start,
1378 				       XFS_FSB_TO_B(mp, map[i].br_startoff));
1379 
1380 			/* Landed in the hole we wanted? */
1381 			if (whence == SEEK_HOLE &&
1382 			    map[i].br_startblock == HOLESTARTBLOCK)
1383 				goto out;
1384 
1385 			/* Landed in the data extent we wanted? */
1386 			if (whence == SEEK_DATA &&
1387 			    (map[i].br_startblock == DELAYSTARTBLOCK ||
1388 			     (map[i].br_state == XFS_EXT_NORM &&
1389 			      !isnullstartblock(map[i].br_startblock))))
1390 				goto out;
1391 
1392 			/*
1393 			 * Landed in an unwritten extent, try to search
1394 			 * for hole or data from page cache.
1395 			 */
1396 			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1397 				if (xfs_find_get_desired_pgoff(inode, &map[i],
1398 				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1399 							&offset))
1400 					goto out;
1401 			}
1402 		}
1403 
1404 		/*
1405 		 * We only received one extent out of the two requested. This
1406 		 * means we've hit EOF and didn't find what we are looking for.
1407 		 */
1408 		if (nmap == 1) {
1409 			/*
1410 			 * If we were looking for a hole, set offset to
1411 			 * the end of the file (i.e., there is an implicit
1412 			 * hole at the end of any file).
1413 		 	 */
1414 			if (whence == SEEK_HOLE) {
1415 				offset = isize;
1416 				break;
1417 			}
1418 			/*
1419 			 * If we were looking for data, it's nowhere to be found
1420 			 */
1421 			ASSERT(whence == SEEK_DATA);
1422 			error = -ENXIO;
1423 			goto out_unlock;
1424 		}
1425 
1426 		ASSERT(i > 1);
1427 
1428 		/*
1429 		 * Nothing was found, proceed to the next round of search
1430 		 * if the next reading offset is not at or beyond EOF.
1431 		 */
1432 		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1433 		start = XFS_FSB_TO_B(mp, fsbno);
1434 		if (start >= isize) {
1435 			if (whence == SEEK_HOLE) {
1436 				offset = isize;
1437 				break;
1438 			}
1439 			ASSERT(whence == SEEK_DATA);
1440 			error = -ENXIO;
1441 			goto out_unlock;
1442 		}
1443 	}
1444 
1445 out:
1446 	/*
1447 	 * If at this point we have found the hole we wanted, the returned
1448 	 * offset may be bigger than the file size as it may be aligned to
1449 	 * page boundary for unwritten extents.  We need to deal with this
1450 	 * situation in particular.
1451 	 */
1452 	if (whence == SEEK_HOLE)
1453 		offset = min_t(loff_t, offset, isize);
1454 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1455 
1456 out_unlock:
1457 	xfs_iunlock(ip, lock);
1458 
1459 	if (error)
1460 		return error;
1461 	return offset;
1462 }
1463 
1464 STATIC loff_t
xfs_file_llseek(struct file * file,loff_t offset,int whence)1465 xfs_file_llseek(
1466 	struct file	*file,
1467 	loff_t		offset,
1468 	int		whence)
1469 {
1470 	switch (whence) {
1471 	case SEEK_END:
1472 	case SEEK_CUR:
1473 	case SEEK_SET:
1474 		return generic_file_llseek(file, offset, whence);
1475 	case SEEK_HOLE:
1476 	case SEEK_DATA:
1477 		return xfs_seek_hole_data(file, offset, whence);
1478 	default:
1479 		return -EINVAL;
1480 	}
1481 }
1482 
1483 /*
1484  * Locking for serialisation of IO during page faults. This results in a lock
1485  * ordering of:
1486  *
1487  * mmap_sem (MM)
1488  *   sb_start_pagefault(vfs, freeze)
1489  *     i_mmaplock (XFS - truncate serialisation)
1490  *       page_lock (MM)
1491  *         i_lock (XFS - extent map serialisation)
1492  */
1493 
1494 /*
1495  * mmap()d file has taken write protection fault and is being made writable. We
1496  * can set the page state up correctly for a writable page, which means we can
1497  * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1498  * mapping.
1499  */
1500 STATIC int
xfs_filemap_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)1501 xfs_filemap_page_mkwrite(
1502 	struct vm_area_struct	*vma,
1503 	struct vm_fault		*vmf)
1504 {
1505 	struct inode		*inode = file_inode(vma->vm_file);
1506 	int			ret;
1507 
1508 	trace_xfs_filemap_page_mkwrite(XFS_I(inode));
1509 
1510 	sb_start_pagefault(inode->i_sb);
1511 	file_update_time(vma->vm_file);
1512 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1513 
1514 	if (IS_DAX(inode)) {
1515 		ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
1516 	} else {
1517 		ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
1518 		ret = block_page_mkwrite_return(ret);
1519 	}
1520 
1521 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1522 	sb_end_pagefault(inode->i_sb);
1523 
1524 	return ret;
1525 }
1526 
1527 STATIC int
xfs_filemap_fault(struct vm_area_struct * vma,struct vm_fault * vmf)1528 xfs_filemap_fault(
1529 	struct vm_area_struct	*vma,
1530 	struct vm_fault		*vmf)
1531 {
1532 	struct inode		*inode = file_inode(vma->vm_file);
1533 	int			ret;
1534 
1535 	trace_xfs_filemap_fault(XFS_I(inode));
1536 
1537 	/* DAX can shortcut the normal fault path on write faults! */
1538 	if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1539 		return xfs_filemap_page_mkwrite(vma, vmf);
1540 
1541 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1542 	if (IS_DAX(inode)) {
1543 		/*
1544 		 * we do not want to trigger unwritten extent conversion on read
1545 		 * faults - that is unnecessary overhead and would also require
1546 		 * changes to xfs_get_blocks_direct() to map unwritten extent
1547 		 * ioend for conversion on read-only mappings.
1548 		 */
1549 		ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
1550 	} else
1551 		ret = filemap_fault(vma, vmf);
1552 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1553 
1554 	return ret;
1555 }
1556 
1557 /*
1558  * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1559  * both read and write faults. Hence we need to handle both cases. There is no
1560  * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1561  * handle both cases here. @flags carries the information on the type of fault
1562  * occuring.
1563  */
1564 STATIC int
xfs_filemap_pmd_fault(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,unsigned int flags)1565 xfs_filemap_pmd_fault(
1566 	struct vm_area_struct	*vma,
1567 	unsigned long		addr,
1568 	pmd_t			*pmd,
1569 	unsigned int		flags)
1570 {
1571 	struct inode		*inode = file_inode(vma->vm_file);
1572 	struct xfs_inode	*ip = XFS_I(inode);
1573 	int			ret;
1574 
1575 	if (!IS_DAX(inode))
1576 		return VM_FAULT_FALLBACK;
1577 
1578 	trace_xfs_filemap_pmd_fault(ip);
1579 
1580 	if (flags & FAULT_FLAG_WRITE) {
1581 		sb_start_pagefault(inode->i_sb);
1582 		file_update_time(vma->vm_file);
1583 	}
1584 
1585 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1586 	ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
1587 			      NULL);
1588 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1589 
1590 	if (flags & FAULT_FLAG_WRITE)
1591 		sb_end_pagefault(inode->i_sb);
1592 
1593 	return ret;
1594 }
1595 
1596 /*
1597  * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1598  * updates on write faults. In reality, it's need to serialise against
1599  * truncate similar to page_mkwrite. Hence we open-code dax_pfn_mkwrite()
1600  * here and cycle the XFS_MMAPLOCK_SHARED to ensure we serialise the fault
1601  * barrier in place.
1602  */
1603 static int
xfs_filemap_pfn_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)1604 xfs_filemap_pfn_mkwrite(
1605 	struct vm_area_struct	*vma,
1606 	struct vm_fault		*vmf)
1607 {
1608 
1609 	struct inode		*inode = file_inode(vma->vm_file);
1610 	struct xfs_inode	*ip = XFS_I(inode);
1611 	int			ret = VM_FAULT_NOPAGE;
1612 	loff_t			size;
1613 
1614 	trace_xfs_filemap_pfn_mkwrite(ip);
1615 
1616 	sb_start_pagefault(inode->i_sb);
1617 	file_update_time(vma->vm_file);
1618 
1619 	/* check if the faulting page hasn't raced with truncate */
1620 	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1621 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1622 	if (vmf->pgoff >= size)
1623 		ret = VM_FAULT_SIGBUS;
1624 	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1625 	sb_end_pagefault(inode->i_sb);
1626 	return ret;
1627 
1628 }
1629 
1630 static const struct vm_operations_struct xfs_file_vm_ops = {
1631 	.fault		= xfs_filemap_fault,
1632 	.pmd_fault	= xfs_filemap_pmd_fault,
1633 	.map_pages	= filemap_map_pages,
1634 	.page_mkwrite	= xfs_filemap_page_mkwrite,
1635 	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1636 };
1637 
1638 STATIC int
xfs_file_mmap(struct file * filp,struct vm_area_struct * vma)1639 xfs_file_mmap(
1640 	struct file	*filp,
1641 	struct vm_area_struct *vma)
1642 {
1643 	file_accessed(filp);
1644 	vma->vm_ops = &xfs_file_vm_ops;
1645 	if (IS_DAX(file_inode(filp)))
1646 		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1647 	return 0;
1648 }
1649 
1650 const struct file_operations xfs_file_operations = {
1651 	.llseek		= xfs_file_llseek,
1652 	.read_iter	= xfs_file_read_iter,
1653 	.write_iter	= xfs_file_write_iter,
1654 	.splice_read	= xfs_file_splice_read,
1655 	.splice_write	= iter_file_splice_write,
1656 	.unlocked_ioctl	= xfs_file_ioctl,
1657 #ifdef CONFIG_COMPAT
1658 	.compat_ioctl	= xfs_file_compat_ioctl,
1659 #endif
1660 	.mmap		= xfs_file_mmap,
1661 	.open		= xfs_file_open,
1662 	.release	= xfs_file_release,
1663 	.fsync		= xfs_file_fsync,
1664 	.fallocate	= xfs_file_fallocate,
1665 };
1666 
1667 const struct file_operations xfs_dir_file_operations = {
1668 	.open		= xfs_dir_open,
1669 	.read		= generic_read_dir,
1670 	.iterate	= xfs_file_readdir,
1671 	.llseek		= generic_file_llseek,
1672 	.unlocked_ioctl	= xfs_file_ioctl,
1673 #ifdef CONFIG_COMPAT
1674 	.compat_ioctl	= xfs_file_compat_ioctl,
1675 #endif
1676 	.fsync		= xfs_dir_fsync,
1677 };
1678