• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
51 #include "xfs_vnodeops.h"
52 
53 #include <linux/capability.h>
54 #include <linux/writeback.h>
55 
56 
57 #if defined(XFS_RW_TRACE)
58 void
xfs_rw_enter_trace(int tag,xfs_inode_t * ip,void * data,size_t segs,loff_t offset,int ioflags)59 xfs_rw_enter_trace(
60 	int			tag,
61 	xfs_inode_t		*ip,
62 	void			*data,
63 	size_t			segs,
64 	loff_t			offset,
65 	int			ioflags)
66 {
67 	if (ip->i_rwtrace == NULL)
68 		return;
69 	ktrace_enter(ip->i_rwtrace,
70 		(void *)(unsigned long)tag,
71 		(void *)ip,
72 		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
73 		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
74 		(void *)data,
75 		(void *)((unsigned long)segs),
76 		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
77 		(void *)((unsigned long)(offset & 0xffffffff)),
78 		(void *)((unsigned long)ioflags),
79 		(void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
80 		(void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
81 		(void *)((unsigned long)current_pid()),
82 		(void *)NULL,
83 		(void *)NULL,
84 		(void *)NULL,
85 		(void *)NULL);
86 }
87 
88 void
xfs_inval_cached_trace(xfs_inode_t * ip,xfs_off_t offset,xfs_off_t len,xfs_off_t first,xfs_off_t last)89 xfs_inval_cached_trace(
90 	xfs_inode_t	*ip,
91 	xfs_off_t	offset,
92 	xfs_off_t	len,
93 	xfs_off_t	first,
94 	xfs_off_t	last)
95 {
96 
97 	if (ip->i_rwtrace == NULL)
98 		return;
99 	ktrace_enter(ip->i_rwtrace,
100 		(void *)(__psint_t)XFS_INVAL_CACHED,
101 		(void *)ip,
102 		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
103 		(void *)((unsigned long)(offset & 0xffffffff)),
104 		(void *)((unsigned long)((len >> 32) & 0xffffffff)),
105 		(void *)((unsigned long)(len & 0xffffffff)),
106 		(void *)((unsigned long)((first >> 32) & 0xffffffff)),
107 		(void *)((unsigned long)(first & 0xffffffff)),
108 		(void *)((unsigned long)((last >> 32) & 0xffffffff)),
109 		(void *)((unsigned long)(last & 0xffffffff)),
110 		(void *)((unsigned long)current_pid()),
111 		(void *)NULL,
112 		(void *)NULL,
113 		(void *)NULL,
114 		(void *)NULL,
115 		(void *)NULL);
116 }
117 #endif
118 
119 /*
120  *	xfs_iozero
121  *
122  *	xfs_iozero clears the specified range of buffer supplied,
123  *	and marks all the affected blocks as valid and modified.  If
124  *	an affected block is not allocated, it will be allocated.  If
125  *	an affected block is not completely overwritten, and is not
126  *	valid before the operation, it will be read from disk before
127  *	being partially zeroed.
128  */
129 STATIC int
xfs_iozero(struct xfs_inode * ip,loff_t pos,size_t count)130 xfs_iozero(
131 	struct xfs_inode	*ip,	/* inode			*/
132 	loff_t			pos,	/* offset in file		*/
133 	size_t			count)	/* size of data to zero		*/
134 {
135 	struct page		*page;
136 	struct address_space	*mapping;
137 	int			status;
138 
139 	mapping = VFS_I(ip)->i_mapping;
140 	do {
141 		unsigned offset, bytes;
142 		void *fsdata;
143 
144 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
145 		bytes = PAGE_CACHE_SIZE - offset;
146 		if (bytes > count)
147 			bytes = count;
148 
149 		status = pagecache_write_begin(NULL, mapping, pos, bytes,
150 					AOP_FLAG_UNINTERRUPTIBLE,
151 					&page, &fsdata);
152 		if (status)
153 			break;
154 
155 		zero_user(page, offset, bytes);
156 
157 		status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
158 					page, fsdata);
159 		WARN_ON(status <= 0); /* can't return less than zero! */
160 		pos += bytes;
161 		count -= bytes;
162 		status = 0;
163 	} while (count);
164 
165 	return (-status);
166 }
167 
168 ssize_t			/* bytes read, or (-)  error */
xfs_read(xfs_inode_t * ip,struct kiocb * iocb,const struct iovec * iovp,unsigned int segs,loff_t * offset,int ioflags)169 xfs_read(
170 	xfs_inode_t		*ip,
171 	struct kiocb		*iocb,
172 	const struct iovec	*iovp,
173 	unsigned int		segs,
174 	loff_t			*offset,
175 	int			ioflags)
176 {
177 	struct file		*file = iocb->ki_filp;
178 	struct inode		*inode = file->f_mapping->host;
179 	xfs_mount_t		*mp = ip->i_mount;
180 	size_t			size = 0;
181 	ssize_t			ret = 0;
182 	xfs_fsize_t		n;
183 	unsigned long		seg;
184 
185 
186 	XFS_STATS_INC(xs_read_calls);
187 
188 	/* START copy & waste from filemap.c */
189 	for (seg = 0; seg < segs; seg++) {
190 		const struct iovec *iv = &iovp[seg];
191 
192 		/*
193 		 * If any segment has a negative length, or the cumulative
194 		 * length ever wraps negative then return -EINVAL.
195 		 */
196 		size += iv->iov_len;
197 		if (unlikely((ssize_t)(size|iv->iov_len) < 0))
198 			return XFS_ERROR(-EINVAL);
199 	}
200 	/* END copy & waste from filemap.c */
201 
202 	if (unlikely(ioflags & IO_ISDIRECT)) {
203 		xfs_buftarg_t	*target =
204 			XFS_IS_REALTIME_INODE(ip) ?
205 				mp->m_rtdev_targp : mp->m_ddev_targp;
206 		if ((*offset & target->bt_smask) ||
207 		    (size & target->bt_smask)) {
208 			if (*offset == ip->i_size) {
209 				return (0);
210 			}
211 			return -XFS_ERROR(EINVAL);
212 		}
213 	}
214 
215 	n = XFS_MAXIOFFSET(mp) - *offset;
216 	if ((n <= 0) || (size == 0))
217 		return 0;
218 
219 	if (n < size)
220 		size = n;
221 
222 	if (XFS_FORCED_SHUTDOWN(mp))
223 		return -EIO;
224 
225 	if (unlikely(ioflags & IO_ISDIRECT))
226 		mutex_lock(&inode->i_mutex);
227 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
228 
229 	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
230 		int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
231 		int iolock = XFS_IOLOCK_SHARED;
232 
233 		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
234 					dmflags, &iolock);
235 		if (ret) {
236 			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
237 			if (unlikely(ioflags & IO_ISDIRECT))
238 				mutex_unlock(&inode->i_mutex);
239 			return ret;
240 		}
241 	}
242 
243 	if (unlikely(ioflags & IO_ISDIRECT)) {
244 		if (inode->i_mapping->nrpages)
245 			ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
246 						    -1, FI_REMAPF_LOCKED);
247 		mutex_unlock(&inode->i_mutex);
248 		if (ret) {
249 			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
250 			return ret;
251 		}
252 	}
253 
254 	xfs_rw_enter_trace(XFS_READ_ENTER, ip,
255 				(void *)iovp, segs, *offset, ioflags);
256 
257 	iocb->ki_pos = *offset;
258 	ret = generic_file_aio_read(iocb, iovp, segs, *offset);
259 	if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
260 		ret = wait_on_sync_kiocb(iocb);
261 	if (ret > 0)
262 		XFS_STATS_ADD(xs_read_bytes, ret);
263 
264 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
265 	return ret;
266 }
267 
268 ssize_t
xfs_splice_read(xfs_inode_t * ip,struct file * infilp,loff_t * ppos,struct pipe_inode_info * pipe,size_t count,int flags,int ioflags)269 xfs_splice_read(
270 	xfs_inode_t		*ip,
271 	struct file		*infilp,
272 	loff_t			*ppos,
273 	struct pipe_inode_info	*pipe,
274 	size_t			count,
275 	int			flags,
276 	int			ioflags)
277 {
278 	xfs_mount_t		*mp = ip->i_mount;
279 	ssize_t			ret;
280 
281 	XFS_STATS_INC(xs_read_calls);
282 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
283 		return -EIO;
284 
285 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
286 
287 	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
288 		int iolock = XFS_IOLOCK_SHARED;
289 		int error;
290 
291 		error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
292 					FILP_DELAY_FLAG(infilp), &iolock);
293 		if (error) {
294 			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
295 			return -error;
296 		}
297 	}
298 	xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
299 			   pipe, count, *ppos, ioflags);
300 	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
301 	if (ret > 0)
302 		XFS_STATS_ADD(xs_read_bytes, ret);
303 
304 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
305 	return ret;
306 }
307 
308 ssize_t
xfs_splice_write(xfs_inode_t * ip,struct pipe_inode_info * pipe,struct file * outfilp,loff_t * ppos,size_t count,int flags,int ioflags)309 xfs_splice_write(
310 	xfs_inode_t		*ip,
311 	struct pipe_inode_info	*pipe,
312 	struct file		*outfilp,
313 	loff_t			*ppos,
314 	size_t			count,
315 	int			flags,
316 	int			ioflags)
317 {
318 	xfs_mount_t		*mp = ip->i_mount;
319 	ssize_t			ret;
320 	struct inode		*inode = outfilp->f_mapping->host;
321 	xfs_fsize_t		isize, new_size;
322 
323 	XFS_STATS_INC(xs_write_calls);
324 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
325 		return -EIO;
326 
327 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
328 
329 	if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
330 		int iolock = XFS_IOLOCK_EXCL;
331 		int error;
332 
333 		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
334 					FILP_DELAY_FLAG(outfilp), &iolock);
335 		if (error) {
336 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
337 			return -error;
338 		}
339 	}
340 
341 	new_size = *ppos + count;
342 
343 	xfs_ilock(ip, XFS_ILOCK_EXCL);
344 	if (new_size > ip->i_size)
345 		ip->i_new_size = new_size;
346 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
347 
348 	xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
349 			   pipe, count, *ppos, ioflags);
350 	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
351 	if (ret > 0)
352 		XFS_STATS_ADD(xs_write_bytes, ret);
353 
354 	isize = i_size_read(inode);
355 	if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
356 		*ppos = isize;
357 
358 	if (*ppos > ip->i_size) {
359 		xfs_ilock(ip, XFS_ILOCK_EXCL);
360 		if (*ppos > ip->i_size)
361 			ip->i_size = *ppos;
362 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
363 	}
364 
365 	if (ip->i_new_size) {
366 		xfs_ilock(ip, XFS_ILOCK_EXCL);
367 		ip->i_new_size = 0;
368 		if (ip->i_d.di_size > ip->i_size)
369 			ip->i_d.di_size = ip->i_size;
370 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
371 	}
372 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
373 	return ret;
374 }
375 
376 /*
377  * This routine is called to handle zeroing any space in the last
378  * block of the file that is beyond the EOF.  We do this since the
379  * size is being increased without writing anything to that block
380  * and we don't want anyone to read the garbage on the disk.
381  */
382 STATIC int				/* error (positive) */
xfs_zero_last_block(xfs_inode_t * ip,xfs_fsize_t offset,xfs_fsize_t isize)383 xfs_zero_last_block(
384 	xfs_inode_t	*ip,
385 	xfs_fsize_t	offset,
386 	xfs_fsize_t	isize)
387 {
388 	xfs_fileoff_t	last_fsb;
389 	xfs_mount_t	*mp = ip->i_mount;
390 	int		nimaps;
391 	int		zero_offset;
392 	int		zero_len;
393 	int		error = 0;
394 	xfs_bmbt_irec_t	imap;
395 
396 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
397 
398 	zero_offset = XFS_B_FSB_OFFSET(mp, isize);
399 	if (zero_offset == 0) {
400 		/*
401 		 * There are no extra bytes in the last block on disk to
402 		 * zero, so return.
403 		 */
404 		return 0;
405 	}
406 
407 	last_fsb = XFS_B_TO_FSBT(mp, isize);
408 	nimaps = 1;
409 	error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
410 			  &nimaps, NULL, NULL);
411 	if (error) {
412 		return error;
413 	}
414 	ASSERT(nimaps > 0);
415 	/*
416 	 * If the block underlying isize is just a hole, then there
417 	 * is nothing to zero.
418 	 */
419 	if (imap.br_startblock == HOLESTARTBLOCK) {
420 		return 0;
421 	}
422 	/*
423 	 * Zero the part of the last block beyond the EOF, and write it
424 	 * out sync.  We need to drop the ilock while we do this so we
425 	 * don't deadlock when the buffer cache calls back to us.
426 	 */
427 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
428 
429 	zero_len = mp->m_sb.sb_blocksize - zero_offset;
430 	if (isize + zero_len > offset)
431 		zero_len = offset - isize;
432 	error = xfs_iozero(ip, isize, zero_len);
433 
434 	xfs_ilock(ip, XFS_ILOCK_EXCL);
435 	ASSERT(error >= 0);
436 	return error;
437 }
438 
439 /*
440  * Zero any on disk space between the current EOF and the new,
441  * larger EOF.  This handles the normal case of zeroing the remainder
442  * of the last block in the file and the unusual case of zeroing blocks
443  * out beyond the size of the file.  This second case only happens
444  * with fixed size extents and when the system crashes before the inode
445  * size was updated but after blocks were allocated.  If fill is set,
446  * then any holes in the range are filled and zeroed.  If not, the holes
447  * are left alone as holes.
448  */
449 
450 int					/* error (positive) */
xfs_zero_eof(xfs_inode_t * ip,xfs_off_t offset,xfs_fsize_t isize)451 xfs_zero_eof(
452 	xfs_inode_t	*ip,
453 	xfs_off_t	offset,		/* starting I/O offset */
454 	xfs_fsize_t	isize)		/* current inode size */
455 {
456 	xfs_mount_t	*mp = ip->i_mount;
457 	xfs_fileoff_t	start_zero_fsb;
458 	xfs_fileoff_t	end_zero_fsb;
459 	xfs_fileoff_t	zero_count_fsb;
460 	xfs_fileoff_t	last_fsb;
461 	xfs_fileoff_t	zero_off;
462 	xfs_fsize_t	zero_len;
463 	int		nimaps;
464 	int		error = 0;
465 	xfs_bmbt_irec_t	imap;
466 
467 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
468 	ASSERT(offset > isize);
469 
470 	/*
471 	 * First handle zeroing the block on which isize resides.
472 	 * We only zero a part of that block so it is handled specially.
473 	 */
474 	error = xfs_zero_last_block(ip, offset, isize);
475 	if (error) {
476 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
477 		return error;
478 	}
479 
480 	/*
481 	 * Calculate the range between the new size and the old
482 	 * where blocks needing to be zeroed may exist.  To get the
483 	 * block where the last byte in the file currently resides,
484 	 * we need to subtract one from the size and truncate back
485 	 * to a block boundary.  We subtract 1 in case the size is
486 	 * exactly on a block boundary.
487 	 */
488 	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
489 	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
490 	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
491 	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
492 	if (last_fsb == end_zero_fsb) {
493 		/*
494 		 * The size was only incremented on its last block.
495 		 * We took care of that above, so just return.
496 		 */
497 		return 0;
498 	}
499 
500 	ASSERT(start_zero_fsb <= end_zero_fsb);
501 	while (start_zero_fsb <= end_zero_fsb) {
502 		nimaps = 1;
503 		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
504 		error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
505 				  0, NULL, 0, &imap, &nimaps, NULL, NULL);
506 		if (error) {
507 			ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
508 			return error;
509 		}
510 		ASSERT(nimaps > 0);
511 
512 		if (imap.br_state == XFS_EXT_UNWRITTEN ||
513 		    imap.br_startblock == HOLESTARTBLOCK) {
514 			/*
515 			 * This loop handles initializing pages that were
516 			 * partially initialized by the code below this
517 			 * loop. It basically zeroes the part of the page
518 			 * that sits on a hole and sets the page as P_HOLE
519 			 * and calls remapf if it is a mapped file.
520 			 */
521 			start_zero_fsb = imap.br_startoff + imap.br_blockcount;
522 			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
523 			continue;
524 		}
525 
526 		/*
527 		 * There are blocks we need to zero.
528 		 * Drop the inode lock while we're doing the I/O.
529 		 * We'll still have the iolock to protect us.
530 		 */
531 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
532 
533 		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
534 		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
535 
536 		if ((zero_off + zero_len) > offset)
537 			zero_len = offset - zero_off;
538 
539 		error = xfs_iozero(ip, zero_off, zero_len);
540 		if (error) {
541 			goto out_lock;
542 		}
543 
544 		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
545 		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
546 
547 		xfs_ilock(ip, XFS_ILOCK_EXCL);
548 	}
549 
550 	return 0;
551 
552 out_lock:
553 	xfs_ilock(ip, XFS_ILOCK_EXCL);
554 	ASSERT(error >= 0);
555 	return error;
556 }
557 
558 ssize_t				/* bytes written, or (-) error */
xfs_write(struct xfs_inode * xip,struct kiocb * iocb,const struct iovec * iovp,unsigned int nsegs,loff_t * offset,int ioflags)559 xfs_write(
560 	struct xfs_inode	*xip,
561 	struct kiocb		*iocb,
562 	const struct iovec	*iovp,
563 	unsigned int		nsegs,
564 	loff_t			*offset,
565 	int			ioflags)
566 {
567 	struct file		*file = iocb->ki_filp;
568 	struct address_space	*mapping = file->f_mapping;
569 	struct inode		*inode = mapping->host;
570 	unsigned long		segs = nsegs;
571 	xfs_mount_t		*mp;
572 	ssize_t			ret = 0, error = 0;
573 	xfs_fsize_t		isize, new_size;
574 	int			iolock;
575 	int			eventsent = 0;
576 	size_t			ocount = 0, count;
577 	loff_t			pos;
578 	int			need_i_mutex;
579 
580 	XFS_STATS_INC(xs_write_calls);
581 
582 	error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
583 	if (error)
584 		return error;
585 
586 	count = ocount;
587 	pos = *offset;
588 
589 	if (count == 0)
590 		return 0;
591 
592 	mp = xip->i_mount;
593 
594 	xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
595 
596 	if (XFS_FORCED_SHUTDOWN(mp))
597 		return -EIO;
598 
599 relock:
600 	if (ioflags & IO_ISDIRECT) {
601 		iolock = XFS_IOLOCK_SHARED;
602 		need_i_mutex = 0;
603 	} else {
604 		iolock = XFS_IOLOCK_EXCL;
605 		need_i_mutex = 1;
606 		mutex_lock(&inode->i_mutex);
607 	}
608 
609 	xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
610 
611 start:
612 	error = -generic_write_checks(file, &pos, &count,
613 					S_ISBLK(inode->i_mode));
614 	if (error) {
615 		xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
616 		goto out_unlock_mutex;
617 	}
618 
619 	if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
620 	    !(ioflags & IO_INVIS) && !eventsent)) {
621 		int		dmflags = FILP_DELAY_FLAG(file);
622 
623 		if (need_i_mutex)
624 			dmflags |= DM_FLAGS_IMUX;
625 
626 		xfs_iunlock(xip, XFS_ILOCK_EXCL);
627 		error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
628 				      pos, count, dmflags, &iolock);
629 		if (error) {
630 			goto out_unlock_internal;
631 		}
632 		xfs_ilock(xip, XFS_ILOCK_EXCL);
633 		eventsent = 1;
634 
635 		/*
636 		 * The iolock was dropped and reacquired in XFS_SEND_DATA
637 		 * so we have to recheck the size when appending.
638 		 * We will only "goto start;" once, since having sent the
639 		 * event prevents another call to XFS_SEND_DATA, which is
640 		 * what allows the size to change in the first place.
641 		 */
642 		if ((file->f_flags & O_APPEND) && pos != xip->i_size)
643 			goto start;
644 	}
645 
646 	if (ioflags & IO_ISDIRECT) {
647 		xfs_buftarg_t	*target =
648 			XFS_IS_REALTIME_INODE(xip) ?
649 				mp->m_rtdev_targp : mp->m_ddev_targp;
650 
651 		if ((pos & target->bt_smask) || (count & target->bt_smask)) {
652 			xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
653 			return XFS_ERROR(-EINVAL);
654 		}
655 
656 		if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
657 			xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
658 			iolock = XFS_IOLOCK_EXCL;
659 			need_i_mutex = 1;
660 			mutex_lock(&inode->i_mutex);
661 			xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
662 			goto start;
663 		}
664 	}
665 
666 	new_size = pos + count;
667 	if (new_size > xip->i_size)
668 		xip->i_new_size = new_size;
669 
670 	if (likely(!(ioflags & IO_INVIS)))
671 		xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
672 
673 	/*
674 	 * If the offset is beyond the size of the file, we have a couple
675 	 * of things to do. First, if there is already space allocated
676 	 * we need to either create holes or zero the disk or ...
677 	 *
678 	 * If there is a page where the previous size lands, we need
679 	 * to zero it out up to the new size.
680 	 */
681 
682 	if (pos > xip->i_size) {
683 		error = xfs_zero_eof(xip, pos, xip->i_size);
684 		if (error) {
685 			xfs_iunlock(xip, XFS_ILOCK_EXCL);
686 			goto out_unlock_internal;
687 		}
688 	}
689 	xfs_iunlock(xip, XFS_ILOCK_EXCL);
690 
691 	/*
692 	 * If we're writing the file then make sure to clear the
693 	 * setuid and setgid bits if the process is not being run
694 	 * by root.  This keeps people from modifying setuid and
695 	 * setgid binaries.
696 	 */
697 
698 	if (((xip->i_d.di_mode & S_ISUID) ||
699 	    ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
700 		(S_ISGID | S_IXGRP))) &&
701 	     !capable(CAP_FSETID)) {
702 		error = xfs_write_clear_setuid(xip);
703 		if (likely(!error))
704 			error = -file_remove_suid(file);
705 		if (unlikely(error)) {
706 			goto out_unlock_internal;
707 		}
708 	}
709 
710 	/* We can write back this queue in page reclaim */
711 	current->backing_dev_info = mapping->backing_dev_info;
712 
713 	if ((ioflags & IO_ISDIRECT)) {
714 		if (mapping->nrpages) {
715 			WARN_ON(need_i_mutex == 0);
716 			xfs_inval_cached_trace(xip, pos, -1,
717 					(pos & PAGE_CACHE_MASK), -1);
718 			error = xfs_flushinval_pages(xip,
719 					(pos & PAGE_CACHE_MASK),
720 					-1, FI_REMAPF_LOCKED);
721 			if (error)
722 				goto out_unlock_internal;
723 		}
724 
725 		if (need_i_mutex) {
726 			/* demote the lock now the cached pages are gone */
727 			xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
728 			mutex_unlock(&inode->i_mutex);
729 
730 			iolock = XFS_IOLOCK_SHARED;
731 			need_i_mutex = 0;
732 		}
733 
734  		xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
735 				*offset, ioflags);
736 		ret = generic_file_direct_write(iocb, iovp,
737 				&segs, pos, offset, count, ocount);
738 
739 		/*
740 		 * direct-io write to a hole: fall through to buffered I/O
741 		 * for completing the rest of the request.
742 		 */
743 		if (ret >= 0 && ret != count) {
744 			XFS_STATS_ADD(xs_write_bytes, ret);
745 
746 			pos += ret;
747 			count -= ret;
748 
749 			ioflags &= ~IO_ISDIRECT;
750 			xfs_iunlock(xip, iolock);
751 			goto relock;
752 		}
753 	} else {
754 		xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
755 				*offset, ioflags);
756 		ret = generic_file_buffered_write(iocb, iovp, segs,
757 				pos, offset, count, ret);
758 	}
759 
760 	current->backing_dev_info = NULL;
761 
762 	if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
763 		ret = wait_on_sync_kiocb(iocb);
764 
765 	isize = i_size_read(inode);
766 	if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
767 		*offset = isize;
768 
769 	if (*offset > xip->i_size) {
770 		xfs_ilock(xip, XFS_ILOCK_EXCL);
771 		if (*offset > xip->i_size)
772 			xip->i_size = *offset;
773 		xfs_iunlock(xip, XFS_ILOCK_EXCL);
774 	}
775 
776 	if (ret == -ENOSPC &&
777 	    DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
778 		xfs_iunlock(xip, iolock);
779 		if (need_i_mutex)
780 			mutex_unlock(&inode->i_mutex);
781 		error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
782 				DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
783 				0, 0, 0); /* Delay flag intentionally  unused */
784 		if (need_i_mutex)
785 			mutex_lock(&inode->i_mutex);
786 		xfs_ilock(xip, iolock);
787 		if (error)
788 			goto out_unlock_internal;
789 		goto start;
790 	}
791 
792 	error = -ret;
793 	if (ret <= 0)
794 		goto out_unlock_internal;
795 
796 	XFS_STATS_ADD(xs_write_bytes, ret);
797 
798 	/* Handle various SYNC-type writes */
799 	if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
800 		int error2;
801 
802 		xfs_iunlock(xip, iolock);
803 		if (need_i_mutex)
804 			mutex_unlock(&inode->i_mutex);
805 		error2 = sync_page_range(inode, mapping, pos, ret);
806 		if (!error)
807 			error = error2;
808 		if (need_i_mutex)
809 			mutex_lock(&inode->i_mutex);
810 		xfs_ilock(xip, iolock);
811 		error2 = xfs_write_sync_logforce(mp, xip);
812 		if (!error)
813 			error = error2;
814 	}
815 
816  out_unlock_internal:
817 	if (xip->i_new_size) {
818 		xfs_ilock(xip, XFS_ILOCK_EXCL);
819 		xip->i_new_size = 0;
820 		/*
821 		 * If this was a direct or synchronous I/O that failed (such
822 		 * as ENOSPC) then part of the I/O may have been written to
823 		 * disk before the error occured.  In this case the on-disk
824 		 * file size may have been adjusted beyond the in-memory file
825 		 * size and now needs to be truncated back.
826 		 */
827 		if (xip->i_d.di_size > xip->i_size)
828 			xip->i_d.di_size = xip->i_size;
829 		xfs_iunlock(xip, XFS_ILOCK_EXCL);
830 	}
831 	xfs_iunlock(xip, iolock);
832  out_unlock_mutex:
833 	if (need_i_mutex)
834 		mutex_unlock(&inode->i_mutex);
835 	return -error;
836 }
837 
838 /*
839  * All xfs metadata buffers except log state machine buffers
840  * get this attached as their b_bdstrat callback function.
841  * This is so that we can catch a buffer
842  * after prematurely unpinning it to forcibly shutdown the filesystem.
843  */
844 int
xfs_bdstrat_cb(struct xfs_buf * bp)845 xfs_bdstrat_cb(struct xfs_buf *bp)
846 {
847 	if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
848 		xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
849 		/*
850 		 * Metadata write that didn't get logged but
851 		 * written delayed anyway. These aren't associated
852 		 * with a transaction, and can be ignored.
853 		 */
854 		if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
855 		    (XFS_BUF_ISREAD(bp)) == 0)
856 			return (xfs_bioerror_relse(bp));
857 		else
858 			return (xfs_bioerror(bp));
859 	}
860 
861 	xfs_buf_iorequest(bp);
862 	return 0;
863 }
864 
865 /*
866  * Wrapper around bdstrat so that we can stop data from going to disk in case
867  * we are shutting down the filesystem.  Typically user data goes thru this
868  * path; one of the exceptions is the superblock.
869  */
870 void
xfsbdstrat(struct xfs_mount * mp,struct xfs_buf * bp)871 xfsbdstrat(
872 	struct xfs_mount	*mp,
873 	struct xfs_buf		*bp)
874 {
875 	ASSERT(mp);
876 	if (!XFS_FORCED_SHUTDOWN(mp)) {
877 		xfs_buf_iorequest(bp);
878 		return;
879 	}
880 
881 	xfs_buftrace("XFSBDSTRAT IOERROR", bp);
882 	xfs_bioerror_relse(bp);
883 }
884 
885 /*
886  * If the underlying (data/log/rt) device is readonly, there are some
887  * operations that cannot proceed.
888  */
889 int
xfs_dev_is_read_only(xfs_mount_t * mp,char * message)890 xfs_dev_is_read_only(
891 	xfs_mount_t		*mp,
892 	char			*message)
893 {
894 	if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
895 	    xfs_readonly_buftarg(mp->m_logdev_targp) ||
896 	    (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
897 		cmn_err(CE_NOTE,
898 			"XFS: %s required on read-only device.", message);
899 		cmn_err(CE_NOTE,
900 			"XFS: write access unavailable, cannot proceed.");
901 		return EROFS;
902 	}
903 	return 0;
904 }
905