• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
15 
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "inode.h"
21 #include "meta_io.h"
22 #include "quota.h"
23 #include "rgrp.h"
24 #include "log.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "util.h"
29 #include "aops.h"
30 #include "trace_gfs2.h"
31 
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33  * block is 512, so __u16 is fine for that. It saves stack space to
34  * keep it small.
35  */
36 struct metapath {
37 	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 	__u16 mp_list[GFS2_MAX_META_HEIGHT];
39 	int mp_fheight; /* find_metapath height */
40 	int mp_aheight; /* actual height (lookup height) */
41 };
42 
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
44 
45 /**
46  * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
47  * @ip: the inode
48  * @dibh: the dinode buffer
49  * @block: the block number that was allocated
50  * @page: The (optional) page. This is looked up if @page is NULL
51  *
52  * Returns: errno
53  */
54 
gfs2_unstuffer_page(struct gfs2_inode * ip,struct buffer_head * dibh,u64 block,struct page * page)55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 			       u64 block, struct page *page)
57 {
58 	struct inode *inode = &ip->i_inode;
59 	struct buffer_head *bh;
60 	int release = 0;
61 
62 	if (!page || page->index) {
63 		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
64 		if (!page)
65 			return -ENOMEM;
66 		release = 1;
67 	}
68 
69 	if (!PageUptodate(page)) {
70 		void *kaddr = kmap(page);
71 		u64 dsize = i_size_read(inode);
72 
73 		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
74 		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
75 		kunmap(page);
76 
77 		SetPageUptodate(page);
78 	}
79 
80 	if (!page_has_buffers(page))
81 		create_empty_buffers(page, BIT(inode->i_blkbits),
82 				     BIT(BH_Uptodate));
83 
84 	bh = page_buffers(page);
85 
86 	if (!buffer_mapped(bh))
87 		map_bh(bh, inode->i_sb, block);
88 
89 	set_buffer_uptodate(bh);
90 	if (gfs2_is_jdata(ip))
91 		gfs2_trans_add_data(ip->i_gl, bh);
92 	else {
93 		mark_buffer_dirty(bh);
94 		gfs2_ordered_add_inode(ip);
95 	}
96 
97 	if (release) {
98 		unlock_page(page);
99 		put_page(page);
100 	}
101 
102 	return 0;
103 }
104 
105 /**
106  * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
107  * @ip: The GFS2 inode to unstuff
108  * @page: The (optional) page. This is looked up if the @page is NULL
109  *
110  * This routine unstuffs a dinode and returns it to a "normal" state such
111  * that the height can be grown in the traditional way.
112  *
113  * Returns: errno
114  */
115 
gfs2_unstuff_dinode(struct gfs2_inode * ip,struct page * page)116 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
117 {
118 	struct buffer_head *bh, *dibh;
119 	struct gfs2_dinode *di;
120 	u64 block = 0;
121 	int isdir = gfs2_is_dir(ip);
122 	int error;
123 
124 	down_write(&ip->i_rw_mutex);
125 
126 	error = gfs2_meta_inode_buffer(ip, &dibh);
127 	if (error)
128 		goto out;
129 
130 	if (i_size_read(&ip->i_inode)) {
131 		/* Get a free block, fill it with the stuffed data,
132 		   and write it out to disk */
133 
134 		unsigned int n = 1;
135 		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
136 		if (error)
137 			goto out_brelse;
138 		if (isdir) {
139 			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
140 			error = gfs2_dir_get_new_buffer(ip, block, &bh);
141 			if (error)
142 				goto out_brelse;
143 			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
144 					      dibh, sizeof(struct gfs2_dinode));
145 			brelse(bh);
146 		} else {
147 			error = gfs2_unstuffer_page(ip, dibh, block, page);
148 			if (error)
149 				goto out_brelse;
150 		}
151 	}
152 
153 	/*  Set up the pointer to the new block  */
154 
155 	gfs2_trans_add_meta(ip->i_gl, dibh);
156 	di = (struct gfs2_dinode *)dibh->b_data;
157 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
158 
159 	if (i_size_read(&ip->i_inode)) {
160 		*(__be64 *)(di + 1) = cpu_to_be64(block);
161 		gfs2_add_inode_blocks(&ip->i_inode, 1);
162 		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
163 	}
164 
165 	ip->i_height = 1;
166 	di->di_height = cpu_to_be16(1);
167 
168 out_brelse:
169 	brelse(dibh);
170 out:
171 	up_write(&ip->i_rw_mutex);
172 	return error;
173 }
174 
175 
176 /**
177  * find_metapath - Find path through the metadata tree
178  * @sdp: The superblock
179  * @block: The disk block to look up
180  * @mp: The metapath to return the result in
181  * @height: The pre-calculated height of the metadata tree
182  *
183  *   This routine returns a struct metapath structure that defines a path
184  *   through the metadata of inode "ip" to get to block "block".
185  *
186  *   Example:
187  *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
188  *   filesystem with a blocksize of 4096.
189  *
190  *   find_metapath() would return a struct metapath structure set to:
191  *   mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
192  *
193  *   That means that in order to get to the block containing the byte at
194  *   offset 101342453, we would load the indirect block pointed to by pointer
195  *   0 in the dinode.  We would then load the indirect block pointed to by
196  *   pointer 48 in that indirect block.  We would then load the data block
197  *   pointed to by pointer 165 in that indirect block.
198  *
199  *             ----------------------------------------
200  *             | Dinode |                             |
201  *             |        |                            4|
202  *             |        |0 1 2 3 4 5                 9|
203  *             |        |                            6|
204  *             ----------------------------------------
205  *                       |
206  *                       |
207  *                       V
208  *             ----------------------------------------
209  *             | Indirect Block                       |
210  *             |                                     5|
211  *             |            4 4 4 4 4 5 5            1|
212  *             |0           5 6 7 8 9 0 1            2|
213  *             ----------------------------------------
214  *                                |
215  *                                |
216  *                                V
217  *             ----------------------------------------
218  *             | Indirect Block                       |
219  *             |                         1 1 1 1 1   5|
220  *             |                         6 6 6 6 6   1|
221  *             |0                        3 4 5 6 7   2|
222  *             ----------------------------------------
223  *                                           |
224  *                                           |
225  *                                           V
226  *             ----------------------------------------
227  *             | Data block containing offset         |
228  *             |            101342453                 |
229  *             |                                      |
230  *             |                                      |
231  *             ----------------------------------------
232  *
233  */
234 
find_metapath(const struct gfs2_sbd * sdp,u64 block,struct metapath * mp,unsigned int height)235 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
236 			  struct metapath *mp, unsigned int height)
237 {
238 	unsigned int i;
239 
240 	mp->mp_fheight = height;
241 	for (i = height; i--;)
242 		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
243 }
244 
metapath_branch_start(const struct metapath * mp)245 static inline unsigned int metapath_branch_start(const struct metapath *mp)
246 {
247 	if (mp->mp_list[0] == 0)
248 		return 2;
249 	return 1;
250 }
251 
252 /**
253  * metaptr1 - Return the first possible metadata pointer in a metapath buffer
254  * @height: The metadata height (0 = dinode)
255  * @mp: The metapath
256  */
metaptr1(unsigned int height,const struct metapath * mp)257 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
258 {
259 	struct buffer_head *bh = mp->mp_bh[height];
260 	if (height == 0)
261 		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
262 	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
263 }
264 
265 /**
266  * metapointer - Return pointer to start of metadata in a buffer
267  * @height: The metadata height (0 = dinode)
268  * @mp: The metapath
269  *
270  * Return a pointer to the block number of the next height of the metadata
271  * tree given a buffer containing the pointer to the current height of the
272  * metadata tree.
273  */
274 
metapointer(unsigned int height,const struct metapath * mp)275 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
276 {
277 	__be64 *p = metaptr1(height, mp);
278 	return p + mp->mp_list[height];
279 }
280 
metaend(unsigned int height,const struct metapath * mp)281 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
282 {
283 	const struct buffer_head *bh = mp->mp_bh[height];
284 	return (const __be64 *)(bh->b_data + bh->b_size);
285 }
286 
clone_metapath(struct metapath * clone,struct metapath * mp)287 static void clone_metapath(struct metapath *clone, struct metapath *mp)
288 {
289 	unsigned int hgt;
290 
291 	*clone = *mp;
292 	for (hgt = 0; hgt < mp->mp_aheight; hgt++)
293 		get_bh(clone->mp_bh[hgt]);
294 }
295 
gfs2_metapath_ra(struct gfs2_glock * gl,__be64 * start,__be64 * end)296 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
297 {
298 	const __be64 *t;
299 
300 	for (t = start; t < end; t++) {
301 		struct buffer_head *rabh;
302 
303 		if (!*t)
304 			continue;
305 
306 		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
307 		if (trylock_buffer(rabh)) {
308 			if (!buffer_uptodate(rabh)) {
309 				rabh->b_end_io = end_buffer_read_sync;
310 				submit_bh(REQ_OP_READ,
311 					  REQ_RAHEAD | REQ_META | REQ_PRIO,
312 					  rabh);
313 				continue;
314 			}
315 			unlock_buffer(rabh);
316 		}
317 		brelse(rabh);
318 	}
319 }
320 
__fillup_metapath(struct gfs2_inode * ip,struct metapath * mp,unsigned int x,unsigned int h)321 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
322 			     unsigned int x, unsigned int h)
323 {
324 	for (; x < h; x++) {
325 		__be64 *ptr = metapointer(x, mp);
326 		u64 dblock = be64_to_cpu(*ptr);
327 		int ret;
328 
329 		if (!dblock)
330 			break;
331 		ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
332 		if (ret)
333 			return ret;
334 	}
335 	mp->mp_aheight = x + 1;
336 	return 0;
337 }
338 
339 /**
340  * lookup_metapath - Walk the metadata tree to a specific point
341  * @ip: The inode
342  * @mp: The metapath
343  *
344  * Assumes that the inode's buffer has already been looked up and
345  * hooked onto mp->mp_bh[0] and that the metapath has been initialised
346  * by find_metapath().
347  *
348  * If this function encounters part of the tree which has not been
349  * allocated, it returns the current height of the tree at the point
350  * at which it found the unallocated block. Blocks which are found are
351  * added to the mp->mp_bh[] list.
352  *
353  * Returns: error
354  */
355 
lookup_metapath(struct gfs2_inode * ip,struct metapath * mp)356 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
357 {
358 	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
359 }
360 
361 /**
362  * fillup_metapath - fill up buffers for the metadata path to a specific height
363  * @ip: The inode
364  * @mp: The metapath
365  * @h: The height to which it should be mapped
366  *
367  * Similar to lookup_metapath, but does lookups for a range of heights
368  *
369  * Returns: error or the number of buffers filled
370  */
371 
fillup_metapath(struct gfs2_inode * ip,struct metapath * mp,int h)372 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
373 {
374 	unsigned int x = 0;
375 	int ret;
376 
377 	if (h) {
378 		/* find the first buffer we need to look up. */
379 		for (x = h - 1; x > 0; x--) {
380 			if (mp->mp_bh[x])
381 				break;
382 		}
383 	}
384 	ret = __fillup_metapath(ip, mp, x, h);
385 	if (ret)
386 		return ret;
387 	return mp->mp_aheight - x - 1;
388 }
389 
metapath_to_block(struct gfs2_sbd * sdp,struct metapath * mp)390 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
391 {
392 	sector_t factor = 1, block = 0;
393 	int hgt;
394 
395 	for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
396 		if (hgt < mp->mp_aheight)
397 			block += mp->mp_list[hgt] * factor;
398 		factor *= sdp->sd_inptrs;
399 	}
400 	return block;
401 }
402 
release_metapath(struct metapath * mp)403 static void release_metapath(struct metapath *mp)
404 {
405 	int i;
406 
407 	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
408 		if (mp->mp_bh[i] == NULL)
409 			break;
410 		brelse(mp->mp_bh[i]);
411 		mp->mp_bh[i] = NULL;
412 	}
413 }
414 
415 /**
416  * gfs2_extent_length - Returns length of an extent of blocks
417  * @bh: The metadata block
418  * @ptr: Current position in @bh
419  * @limit: Max extent length to return
420  * @eob: Set to 1 if we hit "end of block"
421  *
422  * Returns: The length of the extent (minimum of one block)
423  */
424 
gfs2_extent_length(struct buffer_head * bh,__be64 * ptr,size_t limit,int * eob)425 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
426 {
427 	const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
428 	const __be64 *first = ptr;
429 	u64 d = be64_to_cpu(*ptr);
430 
431 	*eob = 0;
432 	do {
433 		ptr++;
434 		if (ptr >= end)
435 			break;
436 		d++;
437 	} while(be64_to_cpu(*ptr) == d);
438 	if (ptr >= end)
439 		*eob = 1;
440 	return ptr - first;
441 }
442 
443 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
444 
445 /*
446  * gfs2_metadata_walker - walk an indirect block
447  * @mp: Metapath to indirect block
448  * @ptrs: Number of pointers to look at
449  *
450  * When returning WALK_FOLLOW, the walker must update @mp to point at the right
451  * indirect block to follow.
452  */
453 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
454 						   unsigned int ptrs);
455 
456 /*
457  * gfs2_walk_metadata - walk a tree of indirect blocks
458  * @inode: The inode
459  * @mp: Starting point of walk
460  * @max_len: Maximum number of blocks to walk
461  * @walker: Called during the walk
462  *
463  * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
464  * past the end of metadata, and a negative error code otherwise.
465  */
466 
gfs2_walk_metadata(struct inode * inode,struct metapath * mp,u64 max_len,gfs2_metadata_walker walker)467 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
468 		u64 max_len, gfs2_metadata_walker walker)
469 {
470 	struct gfs2_inode *ip = GFS2_I(inode);
471 	struct gfs2_sbd *sdp = GFS2_SB(inode);
472 	u64 factor = 1;
473 	unsigned int hgt;
474 	int ret;
475 
476 	/*
477 	 * The walk starts in the lowest allocated indirect block, which may be
478 	 * before the position indicated by @mp.  Adjust @max_len accordingly
479 	 * to avoid a short walk.
480 	 */
481 	for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
482 		max_len += mp->mp_list[hgt] * factor;
483 		mp->mp_list[hgt] = 0;
484 		factor *= sdp->sd_inptrs;
485 	}
486 
487 	for (;;) {
488 		u16 start = mp->mp_list[hgt];
489 		enum walker_status status;
490 		unsigned int ptrs;
491 		u64 len;
492 
493 		/* Walk indirect block. */
494 		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
495 		len = ptrs * factor;
496 		if (len > max_len)
497 			ptrs = DIV_ROUND_UP_ULL(max_len, factor);
498 		status = walker(mp, ptrs);
499 		switch (status) {
500 		case WALK_STOP:
501 			return 1;
502 		case WALK_FOLLOW:
503 			BUG_ON(mp->mp_aheight == mp->mp_fheight);
504 			ptrs = mp->mp_list[hgt] - start;
505 			len = ptrs * factor;
506 			break;
507 		case WALK_CONTINUE:
508 			break;
509 		}
510 		if (len >= max_len)
511 			break;
512 		max_len -= len;
513 		if (status == WALK_FOLLOW)
514 			goto fill_up_metapath;
515 
516 lower_metapath:
517 		/* Decrease height of metapath. */
518 		brelse(mp->mp_bh[hgt]);
519 		mp->mp_bh[hgt] = NULL;
520 		mp->mp_list[hgt] = 0;
521 		if (!hgt)
522 			break;
523 		hgt--;
524 		factor *= sdp->sd_inptrs;
525 
526 		/* Advance in metadata tree. */
527 		(mp->mp_list[hgt])++;
528 		if (hgt) {
529 			if (mp->mp_list[hgt] >= sdp->sd_inptrs)
530 				goto lower_metapath;
531 		} else {
532 			if (mp->mp_list[hgt] >= sdp->sd_diptrs)
533 				break;
534 		}
535 
536 fill_up_metapath:
537 		/* Increase height of metapath. */
538 		ret = fillup_metapath(ip, mp, ip->i_height - 1);
539 		if (ret < 0)
540 			return ret;
541 		hgt += ret;
542 		for (; ret; ret--)
543 			do_div(factor, sdp->sd_inptrs);
544 		mp->mp_aheight = hgt + 1;
545 	}
546 	return 0;
547 }
548 
gfs2_hole_walker(struct metapath * mp,unsigned int ptrs)549 static enum walker_status gfs2_hole_walker(struct metapath *mp,
550 					   unsigned int ptrs)
551 {
552 	const __be64 *start, *ptr, *end;
553 	unsigned int hgt;
554 
555 	hgt = mp->mp_aheight - 1;
556 	start = metapointer(hgt, mp);
557 	end = start + ptrs;
558 
559 	for (ptr = start; ptr < end; ptr++) {
560 		if (*ptr) {
561 			mp->mp_list[hgt] += ptr - start;
562 			if (mp->mp_aheight == mp->mp_fheight)
563 				return WALK_STOP;
564 			return WALK_FOLLOW;
565 		}
566 	}
567 	return WALK_CONTINUE;
568 }
569 
570 /**
571  * gfs2_hole_size - figure out the size of a hole
572  * @inode: The inode
573  * @lblock: The logical starting block number
574  * @len: How far to look (in blocks)
575  * @mp: The metapath at lblock
576  * @iomap: The iomap to store the hole size in
577  *
578  * This function modifies @mp.
579  *
580  * Returns: errno on error
581  */
gfs2_hole_size(struct inode * inode,sector_t lblock,u64 len,struct metapath * mp,struct iomap * iomap)582 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
583 			  struct metapath *mp, struct iomap *iomap)
584 {
585 	struct metapath clone;
586 	u64 hole_size;
587 	int ret;
588 
589 	clone_metapath(&clone, mp);
590 	ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
591 	if (ret < 0)
592 		goto out;
593 
594 	if (ret == 1)
595 		hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
596 	else
597 		hole_size = len;
598 	iomap->length = hole_size << inode->i_blkbits;
599 	ret = 0;
600 
601 out:
602 	release_metapath(&clone);
603 	return ret;
604 }
605 
gfs2_indirect_init(struct metapath * mp,struct gfs2_glock * gl,unsigned int i,unsigned offset,u64 bn)606 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
607 					 struct gfs2_glock *gl, unsigned int i,
608 					 unsigned offset, u64 bn)
609 {
610 	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
611 		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
612 				 sizeof(struct gfs2_dinode)));
613 	BUG_ON(i < 1);
614 	BUG_ON(mp->mp_bh[i] != NULL);
615 	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
616 	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
617 	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
618 	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
619 	ptr += offset;
620 	*ptr = cpu_to_be64(bn);
621 	return ptr;
622 }
623 
624 enum alloc_state {
625 	ALLOC_DATA = 0,
626 	ALLOC_GROW_DEPTH = 1,
627 	ALLOC_GROW_HEIGHT = 2,
628 	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
629 };
630 
631 /**
632  * gfs2_iomap_alloc - Build a metadata tree of the requested height
633  * @inode: The GFS2 inode
634  * @iomap: The iomap structure
635  * @mp: The metapath, with proper height information calculated
636  *
637  * In this routine we may have to alloc:
638  *   i) Indirect blocks to grow the metadata tree height
639  *  ii) Indirect blocks to fill in lower part of the metadata tree
640  * iii) Data blocks
641  *
642  * This function is called after gfs2_iomap_get, which works out the
643  * total number of blocks which we need via gfs2_alloc_size.
644  *
645  * We then do the actual allocation asking for an extent at a time (if
646  * enough contiguous free blocks are available, there will only be one
647  * allocation request per call) and uses the state machine to initialise
648  * the blocks in order.
649  *
650  * Right now, this function will allocate at most one indirect block
651  * worth of data -- with a default block size of 4K, that's slightly
652  * less than 2M.  If this limitation is ever removed to allow huge
653  * allocations, we would probably still want to limit the iomap size we
654  * return to avoid stalling other tasks during huge writes; the next
655  * iomap iteration would then find the blocks already allocated.
656  *
657  * Returns: errno on error
658  */
659 
gfs2_iomap_alloc(struct inode * inode,struct iomap * iomap,struct metapath * mp)660 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
661 			    struct metapath *mp)
662 {
663 	struct gfs2_inode *ip = GFS2_I(inode);
664 	struct gfs2_sbd *sdp = GFS2_SB(inode);
665 	struct buffer_head *dibh = mp->mp_bh[0];
666 	u64 bn;
667 	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
668 	size_t dblks = iomap->length >> inode->i_blkbits;
669 	const unsigned end_of_metadata = mp->mp_fheight - 1;
670 	int ret;
671 	enum alloc_state state;
672 	__be64 *ptr;
673 	__be64 zero_bn = 0;
674 
675 	BUG_ON(mp->mp_aheight < 1);
676 	BUG_ON(dibh == NULL);
677 	BUG_ON(dblks < 1);
678 
679 	gfs2_trans_add_meta(ip->i_gl, dibh);
680 
681 	down_write(&ip->i_rw_mutex);
682 
683 	if (mp->mp_fheight == mp->mp_aheight) {
684 		/* Bottom indirect block exists */
685 		state = ALLOC_DATA;
686 	} else {
687 		/* Need to allocate indirect blocks */
688 		if (mp->mp_fheight == ip->i_height) {
689 			/* Writing into existing tree, extend tree down */
690 			iblks = mp->mp_fheight - mp->mp_aheight;
691 			state = ALLOC_GROW_DEPTH;
692 		} else {
693 			/* Building up tree height */
694 			state = ALLOC_GROW_HEIGHT;
695 			iblks = mp->mp_fheight - ip->i_height;
696 			branch_start = metapath_branch_start(mp);
697 			iblks += (mp->mp_fheight - branch_start);
698 		}
699 	}
700 
701 	/* start of the second part of the function (state machine) */
702 
703 	blks = dblks + iblks;
704 	i = mp->mp_aheight;
705 	do {
706 		n = blks - alloced;
707 		ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
708 		if (ret)
709 			goto out;
710 		alloced += n;
711 		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
712 			gfs2_trans_remove_revoke(sdp, bn, n);
713 		switch (state) {
714 		/* Growing height of tree */
715 		case ALLOC_GROW_HEIGHT:
716 			if (i == 1) {
717 				ptr = (__be64 *)(dibh->b_data +
718 						 sizeof(struct gfs2_dinode));
719 				zero_bn = *ptr;
720 			}
721 			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
722 			     i++, n--)
723 				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
724 			if (i - 1 == mp->mp_fheight - ip->i_height) {
725 				i--;
726 				gfs2_buffer_copy_tail(mp->mp_bh[i],
727 						sizeof(struct gfs2_meta_header),
728 						dibh, sizeof(struct gfs2_dinode));
729 				gfs2_buffer_clear_tail(dibh,
730 						sizeof(struct gfs2_dinode) +
731 						sizeof(__be64));
732 				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
733 					sizeof(struct gfs2_meta_header));
734 				*ptr = zero_bn;
735 				state = ALLOC_GROW_DEPTH;
736 				for(i = branch_start; i < mp->mp_fheight; i++) {
737 					if (mp->mp_bh[i] == NULL)
738 						break;
739 					brelse(mp->mp_bh[i]);
740 					mp->mp_bh[i] = NULL;
741 				}
742 				i = branch_start;
743 			}
744 			if (n == 0)
745 				break;
746 		/* fall through - To branching from existing tree */
747 		case ALLOC_GROW_DEPTH:
748 			if (i > 1 && i < mp->mp_fheight)
749 				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
750 			for (; i < mp->mp_fheight && n > 0; i++, n--)
751 				gfs2_indirect_init(mp, ip->i_gl, i,
752 						   mp->mp_list[i-1], bn++);
753 			if (i == mp->mp_fheight)
754 				state = ALLOC_DATA;
755 			if (n == 0)
756 				break;
757 		/* fall through - To tree complete, adding data blocks */
758 		case ALLOC_DATA:
759 			BUG_ON(n > dblks);
760 			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
761 			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
762 			dblks = n;
763 			ptr = metapointer(end_of_metadata, mp);
764 			iomap->addr = bn << inode->i_blkbits;
765 			iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
766 			while (n-- > 0)
767 				*ptr++ = cpu_to_be64(bn++);
768 			break;
769 		}
770 	} while (iomap->addr == IOMAP_NULL_ADDR);
771 
772 	iomap->type = IOMAP_MAPPED;
773 	iomap->length = (u64)dblks << inode->i_blkbits;
774 	ip->i_height = mp->mp_fheight;
775 	gfs2_add_inode_blocks(&ip->i_inode, alloced);
776 	gfs2_dinode_out(ip, dibh->b_data);
777 out:
778 	up_write(&ip->i_rw_mutex);
779 	return ret;
780 }
781 
782 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
783 
784 /**
785  * gfs2_alloc_size - Compute the maximum allocation size
786  * @inode: The inode
787  * @mp: The metapath
788  * @size: Requested size in blocks
789  *
790  * Compute the maximum size of the next allocation at @mp.
791  *
792  * Returns: size in blocks
793  */
gfs2_alloc_size(struct inode * inode,struct metapath * mp,u64 size)794 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
795 {
796 	struct gfs2_inode *ip = GFS2_I(inode);
797 	struct gfs2_sbd *sdp = GFS2_SB(inode);
798 	const __be64 *first, *ptr, *end;
799 
800 	/*
801 	 * For writes to stuffed files, this function is called twice via
802 	 * gfs2_iomap_get, before and after unstuffing. The size we return the
803 	 * first time needs to be large enough to get the reservation and
804 	 * allocation sizes right.  The size we return the second time must
805 	 * be exact or else gfs2_iomap_alloc won't do the right thing.
806 	 */
807 
808 	if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
809 		unsigned int maxsize = mp->mp_fheight > 1 ?
810 			sdp->sd_inptrs : sdp->sd_diptrs;
811 		maxsize -= mp->mp_list[mp->mp_fheight - 1];
812 		if (size > maxsize)
813 			size = maxsize;
814 		return size;
815 	}
816 
817 	first = metapointer(ip->i_height - 1, mp);
818 	end = metaend(ip->i_height - 1, mp);
819 	if (end - first > size)
820 		end = first + size;
821 	for (ptr = first; ptr < end; ptr++) {
822 		if (*ptr)
823 			break;
824 	}
825 	return ptr - first;
826 }
827 
828 /**
829  * gfs2_iomap_get - Map blocks from an inode to disk blocks
830  * @inode: The inode
831  * @pos: Starting position in bytes
832  * @length: Length to map, in bytes
833  * @flags: iomap flags
834  * @iomap: The iomap structure
835  * @mp: The metapath
836  *
837  * Returns: errno
838  */
gfs2_iomap_get(struct inode * inode,loff_t pos,loff_t length,unsigned flags,struct iomap * iomap,struct metapath * mp)839 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
840 			  unsigned flags, struct iomap *iomap,
841 			  struct metapath *mp)
842 {
843 	struct gfs2_inode *ip = GFS2_I(inode);
844 	struct gfs2_sbd *sdp = GFS2_SB(inode);
845 	loff_t size = i_size_read(inode);
846 	__be64 *ptr;
847 	sector_t lblock;
848 	sector_t lblock_stop;
849 	int ret;
850 	int eob;
851 	u64 len;
852 	struct buffer_head *dibh = NULL, *bh;
853 	u8 height;
854 
855 	if (!length)
856 		return -EINVAL;
857 
858 	down_read(&ip->i_rw_mutex);
859 
860 	ret = gfs2_meta_inode_buffer(ip, &dibh);
861 	if (ret)
862 		goto unlock;
863 	mp->mp_bh[0] = dibh;
864 
865 	if (gfs2_is_stuffed(ip)) {
866 		if (flags & IOMAP_WRITE) {
867 			loff_t max_size = gfs2_max_stuffed_size(ip);
868 
869 			if (pos + length > max_size)
870 				goto unstuff;
871 			iomap->length = max_size;
872 		} else {
873 			if (pos >= size) {
874 				if (flags & IOMAP_REPORT) {
875 					ret = -ENOENT;
876 					goto unlock;
877 				} else {
878 					iomap->offset = pos;
879 					iomap->length = length;
880 					goto hole_found;
881 				}
882 			}
883 			iomap->length = size;
884 		}
885 		iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
886 			      sizeof(struct gfs2_dinode);
887 		iomap->type = IOMAP_INLINE;
888 		iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
889 		goto out;
890 	}
891 
892 unstuff:
893 	lblock = pos >> inode->i_blkbits;
894 	iomap->offset = lblock << inode->i_blkbits;
895 	lblock_stop = (pos + length - 1) >> inode->i_blkbits;
896 	len = lblock_stop - lblock + 1;
897 	iomap->length = len << inode->i_blkbits;
898 
899 	height = ip->i_height;
900 	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
901 		height++;
902 	find_metapath(sdp, lblock, mp, height);
903 	if (height > ip->i_height || gfs2_is_stuffed(ip))
904 		goto do_alloc;
905 
906 	ret = lookup_metapath(ip, mp);
907 	if (ret)
908 		goto unlock;
909 
910 	if (mp->mp_aheight != ip->i_height)
911 		goto do_alloc;
912 
913 	ptr = metapointer(ip->i_height - 1, mp);
914 	if (*ptr == 0)
915 		goto do_alloc;
916 
917 	bh = mp->mp_bh[ip->i_height - 1];
918 	len = gfs2_extent_length(bh, ptr, len, &eob);
919 
920 	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
921 	iomap->length = len << inode->i_blkbits;
922 	iomap->type = IOMAP_MAPPED;
923 	iomap->flags |= IOMAP_F_MERGED;
924 	if (eob)
925 		iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
926 
927 out:
928 	iomap->bdev = inode->i_sb->s_bdev;
929 unlock:
930 	up_read(&ip->i_rw_mutex);
931 	return ret;
932 
933 do_alloc:
934 	if (flags & IOMAP_REPORT) {
935 		if (pos >= size)
936 			ret = -ENOENT;
937 		else if (height == ip->i_height)
938 			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
939 		else
940 			iomap->length = size - iomap->offset;
941 	} else if (flags & IOMAP_WRITE) {
942 		u64 alloc_size;
943 
944 		if (flags & IOMAP_DIRECT)
945 			goto out;  /* (see gfs2_file_direct_write) */
946 
947 		len = gfs2_alloc_size(inode, mp, len);
948 		alloc_size = len << inode->i_blkbits;
949 		if (alloc_size < iomap->length)
950 			iomap->length = alloc_size;
951 	} else {
952 		if (pos < size && height == ip->i_height)
953 			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
954 	}
955 hole_found:
956 	iomap->addr = IOMAP_NULL_ADDR;
957 	iomap->type = IOMAP_HOLE;
958 	goto out;
959 }
960 
961 /**
962  * gfs2_lblk_to_dblk - convert logical block to disk block
963  * @inode: the inode of the file we're mapping
964  * @lblock: the block relative to the start of the file
965  * @dblock: the returned dblock, if no error
966  *
967  * This function maps a single block from a file logical block (relative to
968  * the start of the file) to a file system absolute block using iomap.
969  *
970  * Returns: the absolute file system block, or an error
971  */
gfs2_lblk_to_dblk(struct inode * inode,u32 lblock,u64 * dblock)972 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
973 {
974 	struct iomap iomap = { };
975 	struct metapath mp = { .mp_aheight = 1, };
976 	loff_t pos = (loff_t)lblock << inode->i_blkbits;
977 	int ret;
978 
979 	ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
980 	release_metapath(&mp);
981 	if (ret == 0)
982 		*dblock = iomap.addr >> inode->i_blkbits;
983 
984 	return ret;
985 }
986 
gfs2_write_lock(struct inode * inode)987 static int gfs2_write_lock(struct inode *inode)
988 {
989 	struct gfs2_inode *ip = GFS2_I(inode);
990 	struct gfs2_sbd *sdp = GFS2_SB(inode);
991 	int error;
992 
993 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
994 	error = gfs2_glock_nq(&ip->i_gh);
995 	if (error)
996 		goto out_uninit;
997 	if (&ip->i_inode == sdp->sd_rindex) {
998 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
999 
1000 		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1001 					   GL_NOCACHE, &m_ip->i_gh);
1002 		if (error)
1003 			goto out_unlock;
1004 	}
1005 	return 0;
1006 
1007 out_unlock:
1008 	gfs2_glock_dq(&ip->i_gh);
1009 out_uninit:
1010 	gfs2_holder_uninit(&ip->i_gh);
1011 	return error;
1012 }
1013 
gfs2_write_unlock(struct inode * inode)1014 static void gfs2_write_unlock(struct inode *inode)
1015 {
1016 	struct gfs2_inode *ip = GFS2_I(inode);
1017 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1018 
1019 	if (&ip->i_inode == sdp->sd_rindex) {
1020 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1021 
1022 		gfs2_glock_dq_uninit(&m_ip->i_gh);
1023 	}
1024 	gfs2_glock_dq_uninit(&ip->i_gh);
1025 }
1026 
gfs2_iomap_page_prepare(struct inode * inode,loff_t pos,unsigned len,struct iomap * iomap)1027 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1028 				   unsigned len, struct iomap *iomap)
1029 {
1030 	unsigned int blockmask = i_blocksize(inode) - 1;
1031 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1032 	unsigned int blocks;
1033 
1034 	blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1035 	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1036 }
1037 
gfs2_iomap_page_done(struct inode * inode,loff_t pos,unsigned copied,struct page * page,struct iomap * iomap)1038 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1039 				 unsigned copied, struct page *page,
1040 				 struct iomap *iomap)
1041 {
1042 	struct gfs2_trans *tr = current->journal_info;
1043 	struct gfs2_inode *ip = GFS2_I(inode);
1044 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1045 
1046 	if (page && !gfs2_is_stuffed(ip))
1047 		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1048 
1049 	if (tr->tr_num_buf_new)
1050 		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1051 
1052 	gfs2_trans_end(sdp);
1053 }
1054 
1055 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1056 	.page_prepare = gfs2_iomap_page_prepare,
1057 	.page_done = gfs2_iomap_page_done,
1058 };
1059 
gfs2_iomap_begin_write(struct inode * inode,loff_t pos,loff_t length,unsigned flags,struct iomap * iomap,struct metapath * mp)1060 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1061 				  loff_t length, unsigned flags,
1062 				  struct iomap *iomap,
1063 				  struct metapath *mp)
1064 {
1065 	struct gfs2_inode *ip = GFS2_I(inode);
1066 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1067 	bool unstuff;
1068 	int ret;
1069 
1070 	unstuff = gfs2_is_stuffed(ip) &&
1071 		  pos + length > gfs2_max_stuffed_size(ip);
1072 
1073 	if (unstuff || iomap->type == IOMAP_HOLE) {
1074 		unsigned int data_blocks, ind_blocks;
1075 		struct gfs2_alloc_parms ap = {};
1076 		unsigned int rblocks;
1077 		struct gfs2_trans *tr;
1078 
1079 		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1080 				       &ind_blocks);
1081 		ap.target = data_blocks + ind_blocks;
1082 		ret = gfs2_quota_lock_check(ip, &ap);
1083 		if (ret)
1084 			return ret;
1085 
1086 		ret = gfs2_inplace_reserve(ip, &ap);
1087 		if (ret)
1088 			goto out_qunlock;
1089 
1090 		rblocks = RES_DINODE + ind_blocks;
1091 		if (gfs2_is_jdata(ip))
1092 			rblocks += data_blocks;
1093 		if (ind_blocks || data_blocks)
1094 			rblocks += RES_STATFS + RES_QUOTA;
1095 		if (inode == sdp->sd_rindex)
1096 			rblocks += 2 * RES_STATFS;
1097 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1098 
1099 		ret = gfs2_trans_begin(sdp, rblocks,
1100 				       iomap->length >> inode->i_blkbits);
1101 		if (ret)
1102 			goto out_trans_fail;
1103 
1104 		if (unstuff) {
1105 			ret = gfs2_unstuff_dinode(ip, NULL);
1106 			if (ret)
1107 				goto out_trans_end;
1108 			release_metapath(mp);
1109 			ret = gfs2_iomap_get(inode, iomap->offset,
1110 					     iomap->length, flags, iomap, mp);
1111 			if (ret)
1112 				goto out_trans_end;
1113 		}
1114 
1115 		if (iomap->type == IOMAP_HOLE) {
1116 			ret = gfs2_iomap_alloc(inode, iomap, mp);
1117 			if (ret) {
1118 				gfs2_trans_end(sdp);
1119 				gfs2_inplace_release(ip);
1120 				punch_hole(ip, iomap->offset, iomap->length);
1121 				goto out_qunlock;
1122 			}
1123 		}
1124 
1125 		tr = current->journal_info;
1126 		if (tr->tr_num_buf_new)
1127 			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1128 
1129 		gfs2_trans_end(sdp);
1130 	}
1131 
1132 	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1133 		iomap->page_ops = &gfs2_iomap_page_ops;
1134 	return 0;
1135 
1136 out_trans_end:
1137 	gfs2_trans_end(sdp);
1138 out_trans_fail:
1139 	gfs2_inplace_release(ip);
1140 out_qunlock:
1141 	gfs2_quota_unlock(ip);
1142 	return ret;
1143 }
1144 
gfs2_iomap_need_write_lock(unsigned flags)1145 static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1146 {
1147 	return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1148 }
1149 
gfs2_iomap_begin(struct inode * inode,loff_t pos,loff_t length,unsigned flags,struct iomap * iomap)1150 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1151 			    unsigned flags, struct iomap *iomap)
1152 {
1153 	struct gfs2_inode *ip = GFS2_I(inode);
1154 	struct metapath mp = { .mp_aheight = 1, };
1155 	int ret;
1156 
1157 	iomap->flags |= IOMAP_F_BUFFER_HEAD;
1158 
1159 	trace_gfs2_iomap_start(ip, pos, length, flags);
1160 	if (gfs2_iomap_need_write_lock(flags)) {
1161 		ret = gfs2_write_lock(inode);
1162 		if (ret)
1163 			goto out;
1164 	}
1165 
1166 	ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1167 	if (ret)
1168 		goto out_unlock;
1169 
1170 	switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1171 	case IOMAP_WRITE:
1172 		if (flags & IOMAP_DIRECT) {
1173 			/*
1174 			 * Silently fall back to buffered I/O for stuffed files
1175 			 * or if we've got a hole (see gfs2_file_direct_write).
1176 			 */
1177 			if (iomap->type != IOMAP_MAPPED)
1178 				ret = -ENOTBLK;
1179 			goto out_unlock;
1180 		}
1181 		break;
1182 	case IOMAP_ZERO:
1183 		if (iomap->type == IOMAP_HOLE)
1184 			goto out_unlock;
1185 		break;
1186 	default:
1187 		goto out_unlock;
1188 	}
1189 
1190 	ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1191 
1192 out_unlock:
1193 	if (ret && gfs2_iomap_need_write_lock(flags))
1194 		gfs2_write_unlock(inode);
1195 	release_metapath(&mp);
1196 out:
1197 	trace_gfs2_iomap_end(ip, iomap, ret);
1198 	return ret;
1199 }
1200 
gfs2_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned flags,struct iomap * iomap)1201 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1202 			  ssize_t written, unsigned flags, struct iomap *iomap)
1203 {
1204 	struct gfs2_inode *ip = GFS2_I(inode);
1205 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1206 
1207 	switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1208 	case IOMAP_WRITE:
1209 		if (flags & IOMAP_DIRECT)
1210 			return 0;
1211 		break;
1212 	case IOMAP_ZERO:
1213 		 if (iomap->type == IOMAP_HOLE)
1214 			 return 0;
1215 		 break;
1216 	default:
1217 		 return 0;
1218 	}
1219 
1220 	if (!gfs2_is_stuffed(ip))
1221 		gfs2_ordered_add_inode(ip);
1222 
1223 	if (inode == sdp->sd_rindex)
1224 		adjust_fs_space(inode);
1225 
1226 	gfs2_inplace_release(ip);
1227 
1228 	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1229 		gfs2_quota_unlock(ip);
1230 
1231 	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1232 		/* Deallocate blocks that were just allocated. */
1233 		loff_t hstart = round_up(pos + written, i_blocksize(inode));
1234 		loff_t hend = iomap->offset + iomap->length;
1235 
1236 		if (hstart < hend) {
1237 			truncate_pagecache_range(inode, hstart, hend - 1);
1238 			punch_hole(ip, hstart, hend - hstart);
1239 		}
1240 	}
1241 
1242 	if (unlikely(!written))
1243 		goto out_unlock;
1244 
1245 	if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1246 		mark_inode_dirty(inode);
1247 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1248 
1249 out_unlock:
1250 	if (gfs2_iomap_need_write_lock(flags))
1251 		gfs2_write_unlock(inode);
1252 	return 0;
1253 }
1254 
1255 const struct iomap_ops gfs2_iomap_ops = {
1256 	.iomap_begin = gfs2_iomap_begin,
1257 	.iomap_end = gfs2_iomap_end,
1258 };
1259 
1260 /**
1261  * gfs2_block_map - Map one or more blocks of an inode to a disk block
1262  * @inode: The inode
1263  * @lblock: The logical block number
1264  * @bh_map: The bh to be mapped
1265  * @create: True if its ok to alloc blocks to satify the request
1266  *
1267  * The size of the requested mapping is defined in bh_map->b_size.
1268  *
1269  * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1270  * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
1271  * bh_map->b_size to indicate the size of the mapping when @lblock and
1272  * successive blocks are mapped, up to the requested size.
1273  *
1274  * Sets buffer_boundary() if a read of metadata will be required
1275  * before the next block can be mapped. Sets buffer_new() if new
1276  * blocks were allocated.
1277  *
1278  * Returns: errno
1279  */
1280 
gfs2_block_map(struct inode * inode,sector_t lblock,struct buffer_head * bh_map,int create)1281 int gfs2_block_map(struct inode *inode, sector_t lblock,
1282 		   struct buffer_head *bh_map, int create)
1283 {
1284 	struct gfs2_inode *ip = GFS2_I(inode);
1285 	loff_t pos = (loff_t)lblock << inode->i_blkbits;
1286 	loff_t length = bh_map->b_size;
1287 	struct metapath mp = { .mp_aheight = 1, };
1288 	struct iomap iomap = { };
1289 	int ret;
1290 
1291 	clear_buffer_mapped(bh_map);
1292 	clear_buffer_new(bh_map);
1293 	clear_buffer_boundary(bh_map);
1294 	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1295 
1296 	if (create) {
1297 		ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1298 		if (!ret && iomap.type == IOMAP_HOLE)
1299 			ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1300 		release_metapath(&mp);
1301 	} else {
1302 		ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1303 		release_metapath(&mp);
1304 	}
1305 	if (ret)
1306 		goto out;
1307 
1308 	if (iomap.length > bh_map->b_size) {
1309 		iomap.length = bh_map->b_size;
1310 		iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1311 	}
1312 	if (iomap.addr != IOMAP_NULL_ADDR)
1313 		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1314 	bh_map->b_size = iomap.length;
1315 	if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1316 		set_buffer_boundary(bh_map);
1317 	if (iomap.flags & IOMAP_F_NEW)
1318 		set_buffer_new(bh_map);
1319 
1320 out:
1321 	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1322 	return ret;
1323 }
1324 
1325 /*
1326  * Deprecated: do not use in new code
1327  */
gfs2_extent_map(struct inode * inode,u64 lblock,int * new,u64 * dblock,unsigned * extlen)1328 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1329 {
1330 	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1331 	int ret;
1332 	int create = *new;
1333 
1334 	BUG_ON(!extlen);
1335 	BUG_ON(!dblock);
1336 	BUG_ON(!new);
1337 
1338 	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1339 	ret = gfs2_block_map(inode, lblock, &bh, create);
1340 	*extlen = bh.b_size >> inode->i_blkbits;
1341 	*dblock = bh.b_blocknr;
1342 	if (buffer_new(&bh))
1343 		*new = 1;
1344 	else
1345 		*new = 0;
1346 	return ret;
1347 }
1348 
1349 /*
1350  * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1351  * uses iomap write to perform its actions, which begin their own transactions
1352  * (iomap_begin, page_prepare, etc.)
1353  */
gfs2_block_zero_range(struct inode * inode,loff_t from,unsigned int length)1354 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1355 				 unsigned int length)
1356 {
1357 	BUG_ON(current->journal_info);
1358 	return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1359 }
1360 
1361 #define GFS2_JTRUNC_REVOKES 8192
1362 
1363 /**
1364  * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1365  * @inode: The inode being truncated
1366  * @oldsize: The original (larger) size
1367  * @newsize: The new smaller size
1368  *
1369  * With jdata files, we have to journal a revoke for each block which is
1370  * truncated. As a result, we need to split this into separate transactions
1371  * if the number of pages being truncated gets too large.
1372  */
1373 
gfs2_journaled_truncate(struct inode * inode,u64 oldsize,u64 newsize)1374 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1375 {
1376 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1377 	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1378 	u64 chunk;
1379 	int error;
1380 
1381 	while (oldsize != newsize) {
1382 		struct gfs2_trans *tr;
1383 		unsigned int offs;
1384 
1385 		chunk = oldsize - newsize;
1386 		if (chunk > max_chunk)
1387 			chunk = max_chunk;
1388 
1389 		offs = oldsize & ~PAGE_MASK;
1390 		if (offs && chunk > PAGE_SIZE)
1391 			chunk = offs + ((chunk - offs) & PAGE_MASK);
1392 
1393 		truncate_pagecache(inode, oldsize - chunk);
1394 		oldsize -= chunk;
1395 
1396 		tr = current->journal_info;
1397 		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1398 			continue;
1399 
1400 		gfs2_trans_end(sdp);
1401 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1402 		if (error)
1403 			return error;
1404 	}
1405 
1406 	return 0;
1407 }
1408 
trunc_start(struct inode * inode,u64 newsize)1409 static int trunc_start(struct inode *inode, u64 newsize)
1410 {
1411 	struct gfs2_inode *ip = GFS2_I(inode);
1412 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1413 	struct buffer_head *dibh = NULL;
1414 	int journaled = gfs2_is_jdata(ip);
1415 	u64 oldsize = inode->i_size;
1416 	int error;
1417 
1418 	if (!gfs2_is_stuffed(ip)) {
1419 		unsigned int blocksize = i_blocksize(inode);
1420 		unsigned int offs = newsize & (blocksize - 1);
1421 		if (offs) {
1422 			error = gfs2_block_zero_range(inode, newsize,
1423 						      blocksize - offs);
1424 			if (error)
1425 				return error;
1426 		}
1427 	}
1428 	if (journaled)
1429 		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1430 	else
1431 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1432 	if (error)
1433 		return error;
1434 
1435 	error = gfs2_meta_inode_buffer(ip, &dibh);
1436 	if (error)
1437 		goto out;
1438 
1439 	gfs2_trans_add_meta(ip->i_gl, dibh);
1440 
1441 	if (gfs2_is_stuffed(ip))
1442 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1443 	else
1444 		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1445 
1446 	i_size_write(inode, newsize);
1447 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1448 	gfs2_dinode_out(ip, dibh->b_data);
1449 
1450 	if (journaled)
1451 		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1452 	else
1453 		truncate_pagecache(inode, newsize);
1454 
1455 out:
1456 	brelse(dibh);
1457 	if (current->journal_info)
1458 		gfs2_trans_end(sdp);
1459 	return error;
1460 }
1461 
gfs2_iomap_get_alloc(struct inode * inode,loff_t pos,loff_t length,struct iomap * iomap)1462 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1463 			 struct iomap *iomap)
1464 {
1465 	struct metapath mp = { .mp_aheight = 1, };
1466 	int ret;
1467 
1468 	ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1469 	if (!ret && iomap->type == IOMAP_HOLE)
1470 		ret = gfs2_iomap_alloc(inode, iomap, &mp);
1471 	release_metapath(&mp);
1472 	return ret;
1473 }
1474 
1475 /**
1476  * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1477  * @ip: inode
1478  * @rg_gh: holder of resource group glock
1479  * @bh: buffer head to sweep
1480  * @start: starting point in bh
1481  * @end: end point in bh
1482  * @meta: true if bh points to metadata (rather than data)
1483  * @btotal: place to keep count of total blocks freed
1484  *
1485  * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1486  * free, and free them all. However, we do it one rgrp at a time. If this
1487  * block has references to multiple rgrps, we break it into individual
1488  * transactions. This allows other processes to use the rgrps while we're
1489  * focused on a single one, for better concurrency / performance.
1490  * At every transaction boundary, we rewrite the inode into the journal.
1491  * That way the bitmaps are kept consistent with the inode and we can recover
1492  * if we're interrupted by power-outages.
1493  *
1494  * Returns: 0, or return code if an error occurred.
1495  *          *btotal has the total number of blocks freed
1496  */
sweep_bh_for_rgrps(struct gfs2_inode * ip,struct gfs2_holder * rd_gh,struct buffer_head * bh,__be64 * start,__be64 * end,bool meta,u32 * btotal)1497 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1498 			      struct buffer_head *bh, __be64 *start, __be64 *end,
1499 			      bool meta, u32 *btotal)
1500 {
1501 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1502 	struct gfs2_rgrpd *rgd;
1503 	struct gfs2_trans *tr;
1504 	__be64 *p;
1505 	int blks_outside_rgrp;
1506 	u64 bn, bstart, isize_blks;
1507 	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1508 	int ret = 0;
1509 	bool buf_in_tr = false; /* buffer was added to transaction */
1510 
1511 more_rgrps:
1512 	rgd = NULL;
1513 	if (gfs2_holder_initialized(rd_gh)) {
1514 		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1515 		gfs2_assert_withdraw(sdp,
1516 			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1517 	}
1518 	blks_outside_rgrp = 0;
1519 	bstart = 0;
1520 	blen = 0;
1521 
1522 	for (p = start; p < end; p++) {
1523 		if (!*p)
1524 			continue;
1525 		bn = be64_to_cpu(*p);
1526 
1527 		if (rgd) {
1528 			if (!rgrp_contains_block(rgd, bn)) {
1529 				blks_outside_rgrp++;
1530 				continue;
1531 			}
1532 		} else {
1533 			rgd = gfs2_blk2rgrpd(sdp, bn, true);
1534 			if (unlikely(!rgd)) {
1535 				ret = -EIO;
1536 				goto out;
1537 			}
1538 			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1539 						 0, rd_gh);
1540 			if (ret)
1541 				goto out;
1542 
1543 			/* Must be done with the rgrp glock held: */
1544 			if (gfs2_rs_active(&ip->i_res) &&
1545 			    rgd == ip->i_res.rs_rbm.rgd)
1546 				gfs2_rs_deltree(&ip->i_res);
1547 		}
1548 
1549 		/* The size of our transactions will be unknown until we
1550 		   actually process all the metadata blocks that relate to
1551 		   the rgrp. So we estimate. We know it can't be more than
1552 		   the dinode's i_blocks and we don't want to exceed the
1553 		   journal flush threshold, sd_log_thresh2. */
1554 		if (current->journal_info == NULL) {
1555 			unsigned int jblocks_rqsted, revokes;
1556 
1557 			jblocks_rqsted = rgd->rd_length + RES_DINODE +
1558 				RES_INDIRECT;
1559 			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1560 			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1561 				jblocks_rqsted +=
1562 					atomic_read(&sdp->sd_log_thresh2);
1563 			else
1564 				jblocks_rqsted += isize_blks;
1565 			revokes = jblocks_rqsted;
1566 			if (meta)
1567 				revokes += end - start;
1568 			else if (ip->i_depth)
1569 				revokes += sdp->sd_inptrs;
1570 			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1571 			if (ret)
1572 				goto out_unlock;
1573 			down_write(&ip->i_rw_mutex);
1574 		}
1575 		/* check if we will exceed the transaction blocks requested */
1576 		tr = current->journal_info;
1577 		if (tr->tr_num_buf_new + RES_STATFS +
1578 		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1579 			/* We set blks_outside_rgrp to ensure the loop will
1580 			   be repeated for the same rgrp, but with a new
1581 			   transaction. */
1582 			blks_outside_rgrp++;
1583 			/* This next part is tricky. If the buffer was added
1584 			   to the transaction, we've already set some block
1585 			   pointers to 0, so we better follow through and free
1586 			   them, or we will introduce corruption (so break).
1587 			   This may be impossible, or at least rare, but I
1588 			   decided to cover the case regardless.
1589 
1590 			   If the buffer was not added to the transaction
1591 			   (this call), doing so would exceed our transaction
1592 			   size, so we need to end the transaction and start a
1593 			   new one (so goto). */
1594 
1595 			if (buf_in_tr)
1596 				break;
1597 			goto out_unlock;
1598 		}
1599 
1600 		gfs2_trans_add_meta(ip->i_gl, bh);
1601 		buf_in_tr = true;
1602 		*p = 0;
1603 		if (bstart + blen == bn) {
1604 			blen++;
1605 			continue;
1606 		}
1607 		if (bstart) {
1608 			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1609 			(*btotal) += blen;
1610 			gfs2_add_inode_blocks(&ip->i_inode, -blen);
1611 		}
1612 		bstart = bn;
1613 		blen = 1;
1614 	}
1615 	if (bstart) {
1616 		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1617 		(*btotal) += blen;
1618 		gfs2_add_inode_blocks(&ip->i_inode, -blen);
1619 	}
1620 out_unlock:
1621 	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1622 					    outside the rgrp we just processed,
1623 					    do it all over again. */
1624 		if (current->journal_info) {
1625 			struct buffer_head *dibh;
1626 
1627 			ret = gfs2_meta_inode_buffer(ip, &dibh);
1628 			if (ret)
1629 				goto out;
1630 
1631 			/* Every transaction boundary, we rewrite the dinode
1632 			   to keep its di_blocks current in case of failure. */
1633 			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1634 				current_time(&ip->i_inode);
1635 			gfs2_trans_add_meta(ip->i_gl, dibh);
1636 			gfs2_dinode_out(ip, dibh->b_data);
1637 			brelse(dibh);
1638 			up_write(&ip->i_rw_mutex);
1639 			gfs2_trans_end(sdp);
1640 			buf_in_tr = false;
1641 		}
1642 		gfs2_glock_dq_uninit(rd_gh);
1643 		cond_resched();
1644 		goto more_rgrps;
1645 	}
1646 out:
1647 	return ret;
1648 }
1649 
mp_eq_to_hgt(struct metapath * mp,__u16 * list,unsigned int h)1650 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1651 {
1652 	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1653 		return false;
1654 	return true;
1655 }
1656 
1657 /**
1658  * find_nonnull_ptr - find a non-null pointer given a metapath and height
1659  * @mp: starting metapath
1660  * @h: desired height to search
1661  *
1662  * Assumes the metapath is valid (with buffers) out to height h.
1663  * Returns: true if a non-null pointer was found in the metapath buffer
1664  *          false if all remaining pointers are NULL in the buffer
1665  */
find_nonnull_ptr(struct gfs2_sbd * sdp,struct metapath * mp,unsigned int h,__u16 * end_list,unsigned int end_aligned)1666 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1667 			     unsigned int h,
1668 			     __u16 *end_list, unsigned int end_aligned)
1669 {
1670 	struct buffer_head *bh = mp->mp_bh[h];
1671 	__be64 *first, *ptr, *end;
1672 
1673 	first = metaptr1(h, mp);
1674 	ptr = first + mp->mp_list[h];
1675 	end = (__be64 *)(bh->b_data + bh->b_size);
1676 	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1677 		bool keep_end = h < end_aligned;
1678 		end = first + end_list[h] + keep_end;
1679 	}
1680 
1681 	while (ptr < end) {
1682 		if (*ptr) { /* if we have a non-null pointer */
1683 			mp->mp_list[h] = ptr - first;
1684 			h++;
1685 			if (h < GFS2_MAX_META_HEIGHT)
1686 				mp->mp_list[h] = 0;
1687 			return true;
1688 		}
1689 		ptr++;
1690 	}
1691 	return false;
1692 }
1693 
1694 enum dealloc_states {
1695 	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
1696 	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
1697 	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
1698 	DEALLOC_DONE = 3,       /* process complete */
1699 };
1700 
1701 static inline void
metapointer_range(struct metapath * mp,int height,__u16 * start_list,unsigned int start_aligned,__u16 * end_list,unsigned int end_aligned,__be64 ** start,__be64 ** end)1702 metapointer_range(struct metapath *mp, int height,
1703 		  __u16 *start_list, unsigned int start_aligned,
1704 		  __u16 *end_list, unsigned int end_aligned,
1705 		  __be64 **start, __be64 **end)
1706 {
1707 	struct buffer_head *bh = mp->mp_bh[height];
1708 	__be64 *first;
1709 
1710 	first = metaptr1(height, mp);
1711 	*start = first;
1712 	if (mp_eq_to_hgt(mp, start_list, height)) {
1713 		bool keep_start = height < start_aligned;
1714 		*start = first + start_list[height] + keep_start;
1715 	}
1716 	*end = (__be64 *)(bh->b_data + bh->b_size);
1717 	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1718 		bool keep_end = height < end_aligned;
1719 		*end = first + end_list[height] + keep_end;
1720 	}
1721 }
1722 
walk_done(struct gfs2_sbd * sdp,struct metapath * mp,int height,__u16 * end_list,unsigned int end_aligned)1723 static inline bool walk_done(struct gfs2_sbd *sdp,
1724 			     struct metapath *mp, int height,
1725 			     __u16 *end_list, unsigned int end_aligned)
1726 {
1727 	__u16 end;
1728 
1729 	if (end_list) {
1730 		bool keep_end = height < end_aligned;
1731 		if (!mp_eq_to_hgt(mp, end_list, height))
1732 			return false;
1733 		end = end_list[height] + keep_end;
1734 	} else
1735 		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1736 	return mp->mp_list[height] >= end;
1737 }
1738 
1739 /**
1740  * punch_hole - deallocate blocks in a file
1741  * @ip: inode to truncate
1742  * @offset: the start of the hole
1743  * @length: the size of the hole (or 0 for truncate)
1744  *
1745  * Punch a hole into a file or truncate a file at a given position.  This
1746  * function operates in whole blocks (@offset and @length are rounded
1747  * accordingly); partially filled blocks must be cleared otherwise.
1748  *
1749  * This function works from the bottom up, and from the right to the left. In
1750  * other words, it strips off the highest layer (data) before stripping any of
1751  * the metadata. Doing it this way is best in case the operation is interrupted
1752  * by power failure, etc.  The dinode is rewritten in every transaction to
1753  * guarantee integrity.
1754  */
punch_hole(struct gfs2_inode * ip,u64 offset,u64 length)1755 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1756 {
1757 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1758 	u64 maxsize = sdp->sd_heightsize[ip->i_height];
1759 	struct metapath mp = {};
1760 	struct buffer_head *dibh, *bh;
1761 	struct gfs2_holder rd_gh;
1762 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1763 	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1764 	__u16 start_list[GFS2_MAX_META_HEIGHT];
1765 	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1766 	unsigned int start_aligned, end_aligned;
1767 	unsigned int strip_h = ip->i_height - 1;
1768 	u32 btotal = 0;
1769 	int ret, state;
1770 	int mp_h; /* metapath buffers are read in to this height */
1771 	u64 prev_bnr = 0;
1772 	__be64 *start, *end;
1773 
1774 	if (offset >= maxsize) {
1775 		/*
1776 		 * The starting point lies beyond the allocated meta-data;
1777 		 * there are no blocks do deallocate.
1778 		 */
1779 		return 0;
1780 	}
1781 
1782 	/*
1783 	 * The start position of the hole is defined by lblock, start_list, and
1784 	 * start_aligned.  The end position of the hole is defined by lend,
1785 	 * end_list, and end_aligned.
1786 	 *
1787 	 * start_aligned and end_aligned define down to which height the start
1788 	 * and end positions are aligned to the metadata tree (i.e., the
1789 	 * position is a multiple of the metadata granularity at the height
1790 	 * above).  This determines at which heights additional meta pointers
1791 	 * needs to be preserved for the remaining data.
1792 	 */
1793 
1794 	if (length) {
1795 		u64 end_offset = offset + length;
1796 		u64 lend;
1797 
1798 		/*
1799 		 * Clip the end at the maximum file size for the given height:
1800 		 * that's how far the metadata goes; files bigger than that
1801 		 * will have additional layers of indirection.
1802 		 */
1803 		if (end_offset > maxsize)
1804 			end_offset = maxsize;
1805 		lend = end_offset >> bsize_shift;
1806 
1807 		if (lblock >= lend)
1808 			return 0;
1809 
1810 		find_metapath(sdp, lend, &mp, ip->i_height);
1811 		end_list = __end_list;
1812 		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1813 
1814 		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1815 			if (end_list[mp_h])
1816 				break;
1817 		}
1818 		end_aligned = mp_h;
1819 	}
1820 
1821 	find_metapath(sdp, lblock, &mp, ip->i_height);
1822 	memcpy(start_list, mp.mp_list, sizeof(start_list));
1823 
1824 	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1825 		if (start_list[mp_h])
1826 			break;
1827 	}
1828 	start_aligned = mp_h;
1829 
1830 	ret = gfs2_meta_inode_buffer(ip, &dibh);
1831 	if (ret)
1832 		return ret;
1833 
1834 	mp.mp_bh[0] = dibh;
1835 	ret = lookup_metapath(ip, &mp);
1836 	if (ret)
1837 		goto out_metapath;
1838 
1839 	/* issue read-ahead on metadata */
1840 	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1841 		metapointer_range(&mp, mp_h, start_list, start_aligned,
1842 				  end_list, end_aligned, &start, &end);
1843 		gfs2_metapath_ra(ip->i_gl, start, end);
1844 	}
1845 
1846 	if (mp.mp_aheight == ip->i_height)
1847 		state = DEALLOC_MP_FULL; /* We have a complete metapath */
1848 	else
1849 		state = DEALLOC_FILL_MP; /* deal with partial metapath */
1850 
1851 	ret = gfs2_rindex_update(sdp);
1852 	if (ret)
1853 		goto out_metapath;
1854 
1855 	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1856 	if (ret)
1857 		goto out_metapath;
1858 	gfs2_holder_mark_uninitialized(&rd_gh);
1859 
1860 	mp_h = strip_h;
1861 
1862 	while (state != DEALLOC_DONE) {
1863 		switch (state) {
1864 		/* Truncate a full metapath at the given strip height.
1865 		 * Note that strip_h == mp_h in order to be in this state. */
1866 		case DEALLOC_MP_FULL:
1867 			bh = mp.mp_bh[mp_h];
1868 			gfs2_assert_withdraw(sdp, bh);
1869 			if (gfs2_assert_withdraw(sdp,
1870 						 prev_bnr != bh->b_blocknr)) {
1871 				fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1872 					 "s_h:%u, mp_h:%u\n",
1873 				       (unsigned long long)ip->i_no_addr,
1874 				       prev_bnr, ip->i_height, strip_h, mp_h);
1875 			}
1876 			prev_bnr = bh->b_blocknr;
1877 
1878 			if (gfs2_metatype_check(sdp, bh,
1879 						(mp_h ? GFS2_METATYPE_IN :
1880 							GFS2_METATYPE_DI))) {
1881 				ret = -EIO;
1882 				goto out;
1883 			}
1884 
1885 			/*
1886 			 * Below, passing end_aligned as 0 gives us the
1887 			 * metapointer range excluding the end point: the end
1888 			 * point is the first metapath we must not deallocate!
1889 			 */
1890 
1891 			metapointer_range(&mp, mp_h, start_list, start_aligned,
1892 					  end_list, 0 /* end_aligned */,
1893 					  &start, &end);
1894 			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1895 						 start, end,
1896 						 mp_h != ip->i_height - 1,
1897 						 &btotal);
1898 
1899 			/* If we hit an error or just swept dinode buffer,
1900 			   just exit. */
1901 			if (ret || !mp_h) {
1902 				state = DEALLOC_DONE;
1903 				break;
1904 			}
1905 			state = DEALLOC_MP_LOWER;
1906 			break;
1907 
1908 		/* lower the metapath strip height */
1909 		case DEALLOC_MP_LOWER:
1910 			/* We're done with the current buffer, so release it,
1911 			   unless it's the dinode buffer. Then back up to the
1912 			   previous pointer. */
1913 			if (mp_h) {
1914 				brelse(mp.mp_bh[mp_h]);
1915 				mp.mp_bh[mp_h] = NULL;
1916 			}
1917 			/* If we can't get any lower in height, we've stripped
1918 			   off all we can. Next step is to back up and start
1919 			   stripping the previous level of metadata. */
1920 			if (mp_h == 0) {
1921 				strip_h--;
1922 				memcpy(mp.mp_list, start_list, sizeof(start_list));
1923 				mp_h = strip_h;
1924 				state = DEALLOC_FILL_MP;
1925 				break;
1926 			}
1927 			mp.mp_list[mp_h] = 0;
1928 			mp_h--; /* search one metadata height down */
1929 			mp.mp_list[mp_h]++;
1930 			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1931 				break;
1932 			/* Here we've found a part of the metapath that is not
1933 			 * allocated. We need to search at that height for the
1934 			 * next non-null pointer. */
1935 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1936 				state = DEALLOC_FILL_MP;
1937 				mp_h++;
1938 			}
1939 			/* No more non-null pointers at this height. Back up
1940 			   to the previous height and try again. */
1941 			break; /* loop around in the same state */
1942 
1943 		/* Fill the metapath with buffers to the given height. */
1944 		case DEALLOC_FILL_MP:
1945 			/* Fill the buffers out to the current height. */
1946 			ret = fillup_metapath(ip, &mp, mp_h);
1947 			if (ret < 0)
1948 				goto out;
1949 
1950 			/* On the first pass, issue read-ahead on metadata. */
1951 			if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1952 				unsigned int height = mp.mp_aheight - 1;
1953 
1954 				/* No read-ahead for data blocks. */
1955 				if (mp.mp_aheight - 1 == strip_h)
1956 					height--;
1957 
1958 				for (; height >= mp.mp_aheight - ret; height--) {
1959 					metapointer_range(&mp, height,
1960 							  start_list, start_aligned,
1961 							  end_list, end_aligned,
1962 							  &start, &end);
1963 					gfs2_metapath_ra(ip->i_gl, start, end);
1964 				}
1965 			}
1966 
1967 			/* If buffers found for the entire strip height */
1968 			if (mp.mp_aheight - 1 == strip_h) {
1969 				state = DEALLOC_MP_FULL;
1970 				break;
1971 			}
1972 			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1973 				mp_h = mp.mp_aheight - 1;
1974 
1975 			/* If we find a non-null block pointer, crawl a bit
1976 			   higher up in the metapath and try again, otherwise
1977 			   we need to look lower for a new starting point. */
1978 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1979 				mp_h++;
1980 			else
1981 				state = DEALLOC_MP_LOWER;
1982 			break;
1983 		}
1984 	}
1985 
1986 	if (btotal) {
1987 		if (current->journal_info == NULL) {
1988 			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1989 					       RES_QUOTA, 0);
1990 			if (ret)
1991 				goto out;
1992 			down_write(&ip->i_rw_mutex);
1993 		}
1994 		gfs2_statfs_change(sdp, 0, +btotal, 0);
1995 		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1996 				  ip->i_inode.i_gid);
1997 		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1998 		gfs2_trans_add_meta(ip->i_gl, dibh);
1999 		gfs2_dinode_out(ip, dibh->b_data);
2000 		up_write(&ip->i_rw_mutex);
2001 		gfs2_trans_end(sdp);
2002 	}
2003 
2004 out:
2005 	if (gfs2_holder_initialized(&rd_gh))
2006 		gfs2_glock_dq_uninit(&rd_gh);
2007 	if (current->journal_info) {
2008 		up_write(&ip->i_rw_mutex);
2009 		gfs2_trans_end(sdp);
2010 		cond_resched();
2011 	}
2012 	gfs2_quota_unhold(ip);
2013 out_metapath:
2014 	release_metapath(&mp);
2015 	return ret;
2016 }
2017 
trunc_end(struct gfs2_inode * ip)2018 static int trunc_end(struct gfs2_inode *ip)
2019 {
2020 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2021 	struct buffer_head *dibh;
2022 	int error;
2023 
2024 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2025 	if (error)
2026 		return error;
2027 
2028 	down_write(&ip->i_rw_mutex);
2029 
2030 	error = gfs2_meta_inode_buffer(ip, &dibh);
2031 	if (error)
2032 		goto out;
2033 
2034 	if (!i_size_read(&ip->i_inode)) {
2035 		ip->i_height = 0;
2036 		ip->i_goal = ip->i_no_addr;
2037 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2038 		gfs2_ordered_del_inode(ip);
2039 	}
2040 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2041 	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2042 
2043 	gfs2_trans_add_meta(ip->i_gl, dibh);
2044 	gfs2_dinode_out(ip, dibh->b_data);
2045 	brelse(dibh);
2046 
2047 out:
2048 	up_write(&ip->i_rw_mutex);
2049 	gfs2_trans_end(sdp);
2050 	return error;
2051 }
2052 
2053 /**
2054  * do_shrink - make a file smaller
2055  * @inode: the inode
2056  * @newsize: the size to make the file
2057  *
2058  * Called with an exclusive lock on @inode. The @size must
2059  * be equal to or smaller than the current inode size.
2060  *
2061  * Returns: errno
2062  */
2063 
do_shrink(struct inode * inode,u64 newsize)2064 static int do_shrink(struct inode *inode, u64 newsize)
2065 {
2066 	struct gfs2_inode *ip = GFS2_I(inode);
2067 	int error;
2068 
2069 	error = trunc_start(inode, newsize);
2070 	if (error < 0)
2071 		return error;
2072 	if (gfs2_is_stuffed(ip))
2073 		return 0;
2074 
2075 	error = punch_hole(ip, newsize, 0);
2076 	if (error == 0)
2077 		error = trunc_end(ip);
2078 
2079 	return error;
2080 }
2081 
gfs2_trim_blocks(struct inode * inode)2082 void gfs2_trim_blocks(struct inode *inode)
2083 {
2084 	int ret;
2085 
2086 	ret = do_shrink(inode, inode->i_size);
2087 	WARN_ON(ret != 0);
2088 }
2089 
2090 /**
2091  * do_grow - Touch and update inode size
2092  * @inode: The inode
2093  * @size: The new size
2094  *
2095  * This function updates the timestamps on the inode and
2096  * may also increase the size of the inode. This function
2097  * must not be called with @size any smaller than the current
2098  * inode size.
2099  *
2100  * Although it is not strictly required to unstuff files here,
2101  * earlier versions of GFS2 have a bug in the stuffed file reading
2102  * code which will result in a buffer overrun if the size is larger
2103  * than the max stuffed file size. In order to prevent this from
2104  * occurring, such files are unstuffed, but in other cases we can
2105  * just update the inode size directly.
2106  *
2107  * Returns: 0 on success, or -ve on error
2108  */
2109 
do_grow(struct inode * inode,u64 size)2110 static int do_grow(struct inode *inode, u64 size)
2111 {
2112 	struct gfs2_inode *ip = GFS2_I(inode);
2113 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2114 	struct gfs2_alloc_parms ap = { .target = 1, };
2115 	struct buffer_head *dibh;
2116 	int error;
2117 	int unstuff = 0;
2118 
2119 	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2120 		error = gfs2_quota_lock_check(ip, &ap);
2121 		if (error)
2122 			return error;
2123 
2124 		error = gfs2_inplace_reserve(ip, &ap);
2125 		if (error)
2126 			goto do_grow_qunlock;
2127 		unstuff = 1;
2128 	}
2129 
2130 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2131 				 (unstuff &&
2132 				  gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2133 				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2134 				  0 : RES_QUOTA), 0);
2135 	if (error)
2136 		goto do_grow_release;
2137 
2138 	if (unstuff) {
2139 		error = gfs2_unstuff_dinode(ip, NULL);
2140 		if (error)
2141 			goto do_end_trans;
2142 	}
2143 
2144 	error = gfs2_meta_inode_buffer(ip, &dibh);
2145 	if (error)
2146 		goto do_end_trans;
2147 
2148 	truncate_setsize(inode, size);
2149 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2150 	gfs2_trans_add_meta(ip->i_gl, dibh);
2151 	gfs2_dinode_out(ip, dibh->b_data);
2152 	brelse(dibh);
2153 
2154 do_end_trans:
2155 	gfs2_trans_end(sdp);
2156 do_grow_release:
2157 	if (unstuff) {
2158 		gfs2_inplace_release(ip);
2159 do_grow_qunlock:
2160 		gfs2_quota_unlock(ip);
2161 	}
2162 	return error;
2163 }
2164 
2165 /**
2166  * gfs2_setattr_size - make a file a given size
2167  * @inode: the inode
2168  * @newsize: the size to make the file
2169  *
2170  * The file size can grow, shrink, or stay the same size. This
2171  * is called holding i_rwsem and an exclusive glock on the inode
2172  * in question.
2173  *
2174  * Returns: errno
2175  */
2176 
gfs2_setattr_size(struct inode * inode,u64 newsize)2177 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2178 {
2179 	struct gfs2_inode *ip = GFS2_I(inode);
2180 	int ret;
2181 
2182 	BUG_ON(!S_ISREG(inode->i_mode));
2183 
2184 	ret = inode_newsize_ok(inode, newsize);
2185 	if (ret)
2186 		return ret;
2187 
2188 	inode_dio_wait(inode);
2189 
2190 	ret = gfs2_rsqa_alloc(ip);
2191 	if (ret)
2192 		goto out;
2193 
2194 	if (newsize >= inode->i_size) {
2195 		ret = do_grow(inode, newsize);
2196 		goto out;
2197 	}
2198 
2199 	ret = do_shrink(inode, newsize);
2200 out:
2201 	gfs2_rsqa_delete(ip, NULL);
2202 	return ret;
2203 }
2204 
gfs2_truncatei_resume(struct gfs2_inode * ip)2205 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2206 {
2207 	int error;
2208 	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2209 	if (!error)
2210 		error = trunc_end(ip);
2211 	return error;
2212 }
2213 
gfs2_file_dealloc(struct gfs2_inode * ip)2214 int gfs2_file_dealloc(struct gfs2_inode *ip)
2215 {
2216 	return punch_hole(ip, 0, 0);
2217 }
2218 
2219 /**
2220  * gfs2_free_journal_extents - Free cached journal bmap info
2221  * @jd: The journal
2222  *
2223  */
2224 
gfs2_free_journal_extents(struct gfs2_jdesc * jd)2225 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2226 {
2227 	struct gfs2_journal_extent *jext;
2228 
2229 	while(!list_empty(&jd->extent_list)) {
2230 		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2231 		list_del(&jext->list);
2232 		kfree(jext);
2233 	}
2234 }
2235 
2236 /**
2237  * gfs2_add_jextent - Add or merge a new extent to extent cache
2238  * @jd: The journal descriptor
2239  * @lblock: The logical block at start of new extent
2240  * @dblock: The physical block at start of new extent
2241  * @blocks: Size of extent in fs blocks
2242  *
2243  * Returns: 0 on success or -ENOMEM
2244  */
2245 
gfs2_add_jextent(struct gfs2_jdesc * jd,u64 lblock,u64 dblock,u64 blocks)2246 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2247 {
2248 	struct gfs2_journal_extent *jext;
2249 
2250 	if (!list_empty(&jd->extent_list)) {
2251 		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2252 		if ((jext->dblock + jext->blocks) == dblock) {
2253 			jext->blocks += blocks;
2254 			return 0;
2255 		}
2256 	}
2257 
2258 	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2259 	if (jext == NULL)
2260 		return -ENOMEM;
2261 	jext->dblock = dblock;
2262 	jext->lblock = lblock;
2263 	jext->blocks = blocks;
2264 	list_add_tail(&jext->list, &jd->extent_list);
2265 	jd->nr_extents++;
2266 	return 0;
2267 }
2268 
2269 /**
2270  * gfs2_map_journal_extents - Cache journal bmap info
2271  * @sdp: The super block
2272  * @jd: The journal to map
2273  *
2274  * Create a reusable "extent" mapping from all logical
2275  * blocks to all physical blocks for the given journal.  This will save
2276  * us time when writing journal blocks.  Most journals will have only one
2277  * extent that maps all their logical blocks.  That's because gfs2.mkfs
2278  * arranges the journal blocks sequentially to maximize performance.
2279  * So the extent would map the first block for the entire file length.
2280  * However, gfs2_jadd can happen while file activity is happening, so
2281  * those journals may not be sequential.  Less likely is the case where
2282  * the users created their own journals by mounting the metafs and
2283  * laying it out.  But it's still possible.  These journals might have
2284  * several extents.
2285  *
2286  * Returns: 0 on success, or error on failure
2287  */
2288 
gfs2_map_journal_extents(struct gfs2_sbd * sdp,struct gfs2_jdesc * jd)2289 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2290 {
2291 	u64 lblock = 0;
2292 	u64 lblock_stop;
2293 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2294 	struct buffer_head bh;
2295 	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2296 	u64 size;
2297 	int rc;
2298 	ktime_t start, end;
2299 
2300 	start = ktime_get();
2301 	lblock_stop = i_size_read(jd->jd_inode) >> shift;
2302 	size = (lblock_stop - lblock) << shift;
2303 	jd->nr_extents = 0;
2304 	WARN_ON(!list_empty(&jd->extent_list));
2305 
2306 	do {
2307 		bh.b_state = 0;
2308 		bh.b_blocknr = 0;
2309 		bh.b_size = size;
2310 		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2311 		if (rc || !buffer_mapped(&bh))
2312 			goto fail;
2313 		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2314 		if (rc)
2315 			goto fail;
2316 		size -= bh.b_size;
2317 		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2318 	} while(size > 0);
2319 
2320 	end = ktime_get();
2321 	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2322 		jd->nr_extents, ktime_ms_delta(end, start));
2323 	return 0;
2324 
2325 fail:
2326 	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2327 		rc, jd->jd_jid,
2328 		(unsigned long long)(i_size_read(jd->jd_inode) - size),
2329 		jd->nr_extents);
2330 	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2331 		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2332 		bh.b_state, (unsigned long long)bh.b_size);
2333 	gfs2_free_journal_extents(jd);
2334 	return rc;
2335 }
2336 
2337 /**
2338  * gfs2_write_alloc_required - figure out if a write will require an allocation
2339  * @ip: the file being written to
2340  * @offset: the offset to write to
2341  * @len: the number of bytes being written
2342  *
2343  * Returns: 1 if an alloc is required, 0 otherwise
2344  */
2345 
gfs2_write_alloc_required(struct gfs2_inode * ip,u64 offset,unsigned int len)2346 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2347 			      unsigned int len)
2348 {
2349 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2350 	struct buffer_head bh;
2351 	unsigned int shift;
2352 	u64 lblock, lblock_stop, size;
2353 	u64 end_of_file;
2354 
2355 	if (!len)
2356 		return 0;
2357 
2358 	if (gfs2_is_stuffed(ip)) {
2359 		if (offset + len > gfs2_max_stuffed_size(ip))
2360 			return 1;
2361 		return 0;
2362 	}
2363 
2364 	shift = sdp->sd_sb.sb_bsize_shift;
2365 	BUG_ON(gfs2_is_dir(ip));
2366 	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2367 	lblock = offset >> shift;
2368 	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2369 	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2370 		return 1;
2371 
2372 	size = (lblock_stop - lblock) << shift;
2373 	do {
2374 		bh.b_state = 0;
2375 		bh.b_size = size;
2376 		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2377 		if (!buffer_mapped(&bh))
2378 			return 1;
2379 		size -= bh.b_size;
2380 		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2381 	} while(size > 0);
2382 
2383 	return 0;
2384 }
2385 
stuffed_zero_range(struct inode * inode,loff_t offset,loff_t length)2386 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2387 {
2388 	struct gfs2_inode *ip = GFS2_I(inode);
2389 	struct buffer_head *dibh;
2390 	int error;
2391 
2392 	if (offset >= inode->i_size)
2393 		return 0;
2394 	if (offset + length > inode->i_size)
2395 		length = inode->i_size - offset;
2396 
2397 	error = gfs2_meta_inode_buffer(ip, &dibh);
2398 	if (error)
2399 		return error;
2400 	gfs2_trans_add_meta(ip->i_gl, dibh);
2401 	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2402 	       length);
2403 	brelse(dibh);
2404 	return 0;
2405 }
2406 
gfs2_journaled_truncate_range(struct inode * inode,loff_t offset,loff_t length)2407 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2408 					 loff_t length)
2409 {
2410 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2411 	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2412 	int error;
2413 
2414 	while (length) {
2415 		struct gfs2_trans *tr;
2416 		loff_t chunk;
2417 		unsigned int offs;
2418 
2419 		chunk = length;
2420 		if (chunk > max_chunk)
2421 			chunk = max_chunk;
2422 
2423 		offs = offset & ~PAGE_MASK;
2424 		if (offs && chunk > PAGE_SIZE)
2425 			chunk = offs + ((chunk - offs) & PAGE_MASK);
2426 
2427 		truncate_pagecache_range(inode, offset, chunk);
2428 		offset += chunk;
2429 		length -= chunk;
2430 
2431 		tr = current->journal_info;
2432 		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2433 			continue;
2434 
2435 		gfs2_trans_end(sdp);
2436 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2437 		if (error)
2438 			return error;
2439 	}
2440 	return 0;
2441 }
2442 
__gfs2_punch_hole(struct file * file,loff_t offset,loff_t length)2443 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2444 {
2445 	struct inode *inode = file_inode(file);
2446 	struct gfs2_inode *ip = GFS2_I(inode);
2447 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2448 	unsigned int blocksize = i_blocksize(inode);
2449 	loff_t start, end;
2450 	int error;
2451 
2452 	if (!gfs2_is_stuffed(ip)) {
2453 		unsigned int start_off, end_len;
2454 
2455 		start_off = offset & (blocksize - 1);
2456 		end_len = (offset + length) & (blocksize - 1);
2457 		if (start_off) {
2458 			unsigned int len = length;
2459 			if (length > blocksize - start_off)
2460 				len = blocksize - start_off;
2461 			error = gfs2_block_zero_range(inode, offset, len);
2462 			if (error)
2463 				goto out;
2464 			if (start_off + length < blocksize)
2465 				end_len = 0;
2466 		}
2467 		if (end_len) {
2468 			error = gfs2_block_zero_range(inode,
2469 				offset + length - end_len, end_len);
2470 			if (error)
2471 				goto out;
2472 		}
2473 	}
2474 
2475 	start = round_down(offset, blocksize);
2476 	end = round_up(offset + length, blocksize) - 1;
2477 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2478 	if (error)
2479 		return error;
2480 
2481 	if (gfs2_is_jdata(ip))
2482 		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2483 					 GFS2_JTRUNC_REVOKES);
2484 	else
2485 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2486 	if (error)
2487 		return error;
2488 
2489 	if (gfs2_is_stuffed(ip)) {
2490 		error = stuffed_zero_range(inode, offset, length);
2491 		if (error)
2492 			goto out;
2493 	}
2494 
2495 	if (gfs2_is_jdata(ip)) {
2496 		BUG_ON(!current->journal_info);
2497 		gfs2_journaled_truncate_range(inode, offset, length);
2498 	} else
2499 		truncate_pagecache_range(inode, offset, offset + length - 1);
2500 
2501 	file_update_time(file);
2502 	mark_inode_dirty(inode);
2503 
2504 	if (current->journal_info)
2505 		gfs2_trans_end(sdp);
2506 
2507 	if (!gfs2_is_stuffed(ip))
2508 		error = punch_hole(ip, offset, length);
2509 
2510 out:
2511 	if (current->journal_info)
2512 		gfs2_trans_end(sdp);
2513 	return error;
2514 }
2515