1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
16 #include <linux/fs.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "quota.h"
30 #include "rgrp.h"
31 #include "super.h"
32 #include "trans.h"
33 #include "util.h"
34 #include "log.h"
35 #include "inode.h"
36 #include "trace_gfs2.h"
37
38 #define BFITNOENT ((u32)~0)
39 #define NO_BLOCK ((u64)~0)
40
41 #if BITS_PER_LONG == 32
42 #define LBITMASK (0x55555555UL)
43 #define LBITSKIP55 (0x55555555UL)
44 #define LBITSKIP00 (0x00000000UL)
45 #else
46 #define LBITMASK (0x5555555555555555UL)
47 #define LBITSKIP55 (0x5555555555555555UL)
48 #define LBITSKIP00 (0x0000000000000000UL)
49 #endif
50
51 /*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
60 */
61
62 struct gfs2_extent {
63 struct gfs2_rbm rbm;
64 u32 len;
65 };
66
67 static const char valid_change[16] = {
68 /* current */
69 /* n */ 0, 1, 1, 1,
70 /* e */ 1, 0, 0, 0,
71 /* w */ 0, 0, 0, 1,
72 1, 0, 0, 0
73 };
74
75 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 const struct gfs2_inode *ip, bool nowrap,
77 const struct gfs2_alloc_parms *ap);
78
79
80 /**
81 * gfs2_setbit - Set a bit in the bitmaps
82 * @rbm: The position of the bit to set
83 * @do_clone: Also set the clone bitmap, if it exists
84 * @new_state: the new state of the block
85 *
86 */
87
gfs2_setbit(const struct gfs2_rbm * rbm,bool do_clone,unsigned char new_state)88 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
89 unsigned char new_state)
90 {
91 unsigned char *byte1, *byte2, *end, cur_state;
92 struct gfs2_bitmap *bi = rbm_bi(rbm);
93 unsigned int buflen = bi->bi_len;
94 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
95
96 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
97 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
98
99 BUG_ON(byte1 >= end);
100
101 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
102
103 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
104 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
105 rbm->offset, cur_state, new_state);
106 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
107 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
108 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
109 bi->bi_offset, bi->bi_len);
110 dump_stack();
111 gfs2_consist_rgrpd(rbm->rgd);
112 return;
113 }
114 *byte1 ^= (cur_state ^ new_state) << bit;
115
116 if (do_clone && bi->bi_clone) {
117 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
118 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
119 *byte2 ^= (cur_state ^ new_state) << bit;
120 }
121 }
122
123 /**
124 * gfs2_testbit - test a bit in the bitmaps
125 * @rbm: The bit to test
126 *
127 * Returns: The two bit block state of the requested bit
128 */
129
gfs2_testbit(const struct gfs2_rbm * rbm)130 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
131 {
132 struct gfs2_bitmap *bi = rbm_bi(rbm);
133 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
134 const u8 *byte;
135 unsigned int bit;
136
137 byte = buffer + (rbm->offset / GFS2_NBBY);
138 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
139
140 return (*byte >> bit) & GFS2_BIT_MASK;
141 }
142
143 /**
144 * gfs2_bit_search
145 * @ptr: Pointer to bitmap data
146 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
147 * @state: The state we are searching for
148 *
149 * We xor the bitmap data with a patter which is the bitwise opposite
150 * of what we are looking for, this gives rise to a pattern of ones
151 * wherever there is a match. Since we have two bits per entry, we
152 * take this pattern, shift it down by one place and then and it with
153 * the original. All the even bit positions (0,2,4, etc) then represent
154 * successful matches, so we mask with 0x55555..... to remove the unwanted
155 * odd bit positions.
156 *
157 * This allows searching of a whole u64 at once (32 blocks) with a
158 * single test (on 64 bit arches).
159 */
160
gfs2_bit_search(const __le64 * ptr,u64 mask,u8 state)161 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
162 {
163 u64 tmp;
164 static const u64 search[] = {
165 [0] = 0xffffffffffffffffULL,
166 [1] = 0xaaaaaaaaaaaaaaaaULL,
167 [2] = 0x5555555555555555ULL,
168 [3] = 0x0000000000000000ULL,
169 };
170 tmp = le64_to_cpu(*ptr) ^ search[state];
171 tmp &= (tmp >> 1);
172 tmp &= mask;
173 return tmp;
174 }
175
176 /**
177 * rs_cmp - multi-block reservation range compare
178 * @blk: absolute file system block number of the new reservation
179 * @len: number of blocks in the new reservation
180 * @rs: existing reservation to compare against
181 *
182 * returns: 1 if the block range is beyond the reach of the reservation
183 * -1 if the block range is before the start of the reservation
184 * 0 if the block range overlaps with the reservation
185 */
rs_cmp(u64 blk,u32 len,struct gfs2_blkreserv * rs)186 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187 {
188 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
189
190 if (blk >= startblk + rs->rs_free)
191 return 1;
192 if (blk + len - 1 < startblk)
193 return -1;
194 return 0;
195 }
196
197 /**
198 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
199 * a block in a given allocation state.
200 * @buf: the buffer that holds the bitmaps
201 * @len: the length (in bytes) of the buffer
202 * @goal: start search at this block's bit-pair (within @buffer)
203 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
204 *
205 * Scope of @goal and returned block number is only within this bitmap buffer,
206 * not entire rgrp or filesystem. @buffer will be offset from the actual
207 * beginning of a bitmap block buffer, skipping any header structures, but
208 * headers are always a multiple of 64 bits long so that the buffer is
209 * always aligned to a 64 bit boundary.
210 *
211 * The size of the buffer is in bytes, but is it assumed that it is
212 * always ok to read a complete multiple of 64 bits at the end
213 * of the block in case the end is no aligned to a natural boundary.
214 *
215 * Return: the block number (bitmap buffer scope) that was found
216 */
217
gfs2_bitfit(const u8 * buf,const unsigned int len,u32 goal,u8 state)218 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
219 u32 goal, u8 state)
220 {
221 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
222 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
223 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 tmp;
225 u64 mask = 0x5555555555555555ULL;
226 u32 bit;
227
228 /* Mask off bits we don't care about at the start of the search */
229 mask <<= spoint;
230 tmp = gfs2_bit_search(ptr, mask, state);
231 ptr++;
232 while(tmp == 0 && ptr < end) {
233 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
234 ptr++;
235 }
236 /* Mask off any bits which are more than len bytes from the start */
237 if (ptr == end && (len & (sizeof(u64) - 1)))
238 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
239 /* Didn't find anything, so return */
240 if (tmp == 0)
241 return BFITNOENT;
242 ptr--;
243 bit = __ffs64(tmp);
244 bit /= 2; /* two bits per entry in the bitmap */
245 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
246 }
247
248 /**
249 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
250 * @rbm: The rbm with rgd already set correctly
251 * @block: The block number (filesystem relative)
252 *
253 * This sets the bi and offset members of an rbm based on a
254 * resource group and a filesystem relative block number. The
255 * resource group must be set in the rbm on entry, the bi and
256 * offset members will be set by this function.
257 *
258 * Returns: 0 on success, or an error code
259 */
260
gfs2_rbm_from_block(struct gfs2_rbm * rbm,u64 block)261 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262 {
263 u64 rblock = block - rbm->rgd->rd_data0;
264
265 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 return -EINVAL;
267 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
268 return -E2BIG;
269
270 rbm->bii = 0;
271 rbm->offset = (u32)(rblock);
272 /* Check if the block is within the first block */
273 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
274 return 0;
275
276 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
277 rbm->offset += (sizeof(struct gfs2_rgrp) -
278 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
279 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
280 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
281 return 0;
282 }
283
284 /**
285 * gfs2_rbm_incr - increment an rbm structure
286 * @rbm: The rbm with rgd already set correctly
287 *
288 * This function takes an existing rbm structure and increments it to the next
289 * viable block offset.
290 *
291 * Returns: If incrementing the offset would cause the rbm to go past the
292 * end of the rgrp, true is returned, otherwise false.
293 *
294 */
295
gfs2_rbm_incr(struct gfs2_rbm * rbm)296 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297 {
298 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
299 rbm->offset++;
300 return false;
301 }
302 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
303 return true;
304
305 rbm->offset = 0;
306 rbm->bii++;
307 return false;
308 }
309
310 /**
311 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
312 * @rbm: Position to search (value/result)
313 * @n_unaligned: Number of unaligned blocks to check
314 * @len: Decremented for each block found (terminate on zero)
315 *
316 * Returns: true if a non-free block is encountered
317 */
318
gfs2_unaligned_extlen(struct gfs2_rbm * rbm,u32 n_unaligned,u32 * len)319 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
320 {
321 u32 n;
322 u8 res;
323
324 for (n = 0; n < n_unaligned; n++) {
325 res = gfs2_testbit(rbm);
326 if (res != GFS2_BLKST_FREE)
327 return true;
328 (*len)--;
329 if (*len == 0)
330 return true;
331 if (gfs2_rbm_incr(rbm))
332 return true;
333 }
334
335 return false;
336 }
337
338 /**
339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rrbm: Starting position
341 * @len: Max length to check
342 *
343 * Starting at the block specified by the rbm, see how many free blocks
344 * there are, not reading more than len blocks ahead. This can be done
345 * using memchr_inv when the blocks are byte aligned, but has to be done
346 * on a block by block basis in case of unaligned blocks. Also this
347 * function can cope with bitmap boundaries (although it must stop on
348 * a resource group boundary)
349 *
350 * Returns: Number of free blocks in the extent
351 */
352
gfs2_free_extlen(const struct gfs2_rbm * rrbm,u32 len)353 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354 {
355 struct gfs2_rbm rbm = *rrbm;
356 u32 n_unaligned = rbm.offset & 3;
357 u32 size = len;
358 u32 bytes;
359 u32 chunk_size;
360 u8 *ptr, *start, *end;
361 u64 block;
362 struct gfs2_bitmap *bi;
363
364 if (n_unaligned &&
365 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
366 goto out;
367
368 n_unaligned = len & 3;
369 /* Start is now byte aligned */
370 while (len > 3) {
371 bi = rbm_bi(&rbm);
372 start = bi->bi_bh->b_data;
373 if (bi->bi_clone)
374 start = bi->bi_clone;
375 end = start + bi->bi_bh->b_size;
376 start += bi->bi_offset;
377 BUG_ON(rbm.offset & 3);
378 start += (rbm.offset / GFS2_NBBY);
379 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
380 ptr = memchr_inv(start, 0, bytes);
381 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
382 chunk_size *= GFS2_NBBY;
383 BUG_ON(len < chunk_size);
384 len -= chunk_size;
385 block = gfs2_rbm_to_block(&rbm);
386 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
387 n_unaligned = 0;
388 break;
389 }
390 if (ptr) {
391 n_unaligned = 3;
392 break;
393 }
394 n_unaligned = len & 3;
395 }
396
397 /* Deal with any bits left over at the end */
398 if (n_unaligned)
399 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
400 out:
401 return size - len;
402 }
403
404 /**
405 * gfs2_bitcount - count the number of bits in a certain state
406 * @rgd: the resource group descriptor
407 * @buffer: the buffer that holds the bitmaps
408 * @buflen: the length (in bytes) of the buffer
409 * @state: the state of the block we're looking for
410 *
411 * Returns: The number of bits
412 */
413
gfs2_bitcount(struct gfs2_rgrpd * rgd,const u8 * buffer,unsigned int buflen,u8 state)414 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
415 unsigned int buflen, u8 state)
416 {
417 const u8 *byte = buffer;
418 const u8 *end = buffer + buflen;
419 const u8 state1 = state << 2;
420 const u8 state2 = state << 4;
421 const u8 state3 = state << 6;
422 u32 count = 0;
423
424 for (; byte < end; byte++) {
425 if (((*byte) & 0x03) == state)
426 count++;
427 if (((*byte) & 0x0C) == state1)
428 count++;
429 if (((*byte) & 0x30) == state2)
430 count++;
431 if (((*byte) & 0xC0) == state3)
432 count++;
433 }
434
435 return count;
436 }
437
438 /**
439 * gfs2_rgrp_verify - Verify that a resource group is consistent
440 * @rgd: the rgrp
441 *
442 */
443
gfs2_rgrp_verify(struct gfs2_rgrpd * rgd)444 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445 {
446 struct gfs2_sbd *sdp = rgd->rd_sbd;
447 struct gfs2_bitmap *bi = NULL;
448 u32 length = rgd->rd_length;
449 u32 count[4], tmp;
450 int buf, x;
451
452 memset(count, 0, 4 * sizeof(u32));
453
454 /* Count # blocks in each of 4 possible allocation states */
455 for (buf = 0; buf < length; buf++) {
456 bi = rgd->rd_bits + buf;
457 for (x = 0; x < 4; x++)
458 count[x] += gfs2_bitcount(rgd,
459 bi->bi_bh->b_data +
460 bi->bi_offset,
461 bi->bi_len, x);
462 }
463
464 if (count[0] != rgd->rd_free) {
465 if (gfs2_consist_rgrpd(rgd))
466 fs_err(sdp, "free data mismatch: %u != %u\n",
467 count[0], rgd->rd_free);
468 return;
469 }
470
471 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
472 if (count[1] != tmp) {
473 if (gfs2_consist_rgrpd(rgd))
474 fs_err(sdp, "used data mismatch: %u != %u\n",
475 count[1], tmp);
476 return;
477 }
478
479 if (count[2] + count[3] != rgd->rd_dinodes) {
480 if (gfs2_consist_rgrpd(rgd))
481 fs_err(sdp, "used metadata mismatch: %u != %u\n",
482 count[2] + count[3], rgd->rd_dinodes);
483 return;
484 }
485 }
486
rgrp_contains_block(struct gfs2_rgrpd * rgd,u64 block)487 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
488 {
489 u64 first = rgd->rd_data0;
490 u64 last = first + rgd->rd_data;
491 return first <= block && block < last;
492 }
493
494 /**
495 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
496 * @sdp: The GFS2 superblock
497 * @blk: The data block number
498 * @exact: True if this needs to be an exact match
499 *
500 * Returns: The resource group, or NULL if not found
501 */
502
gfs2_blk2rgrpd(struct gfs2_sbd * sdp,u64 blk,bool exact)503 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
504 {
505 struct rb_node *n, *next;
506 struct gfs2_rgrpd *cur;
507
508 spin_lock(&sdp->sd_rindex_spin);
509 n = sdp->sd_rindex_tree.rb_node;
510 while (n) {
511 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
512 next = NULL;
513 if (blk < cur->rd_addr)
514 next = n->rb_left;
515 else if (blk >= cur->rd_data0 + cur->rd_data)
516 next = n->rb_right;
517 if (next == NULL) {
518 spin_unlock(&sdp->sd_rindex_spin);
519 if (exact) {
520 if (blk < cur->rd_addr)
521 return NULL;
522 if (blk >= cur->rd_data0 + cur->rd_data)
523 return NULL;
524 }
525 return cur;
526 }
527 n = next;
528 }
529 spin_unlock(&sdp->sd_rindex_spin);
530
531 return NULL;
532 }
533
534 /**
535 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
536 * @sdp: The GFS2 superblock
537 *
538 * Returns: The first rgrp in the filesystem
539 */
540
gfs2_rgrpd_get_first(struct gfs2_sbd * sdp)541 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
542 {
543 const struct rb_node *n;
544 struct gfs2_rgrpd *rgd;
545
546 spin_lock(&sdp->sd_rindex_spin);
547 n = rb_first(&sdp->sd_rindex_tree);
548 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
549 spin_unlock(&sdp->sd_rindex_spin);
550
551 return rgd;
552 }
553
554 /**
555 * gfs2_rgrpd_get_next - get the next RG
556 * @rgd: the resource group descriptor
557 *
558 * Returns: The next rgrp
559 */
560
gfs2_rgrpd_get_next(struct gfs2_rgrpd * rgd)561 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
562 {
563 struct gfs2_sbd *sdp = rgd->rd_sbd;
564 const struct rb_node *n;
565
566 spin_lock(&sdp->sd_rindex_spin);
567 n = rb_next(&rgd->rd_node);
568 if (n == NULL)
569 n = rb_first(&sdp->sd_rindex_tree);
570
571 if (unlikely(&rgd->rd_node == n)) {
572 spin_unlock(&sdp->sd_rindex_spin);
573 return NULL;
574 }
575 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
576 spin_unlock(&sdp->sd_rindex_spin);
577 return rgd;
578 }
579
check_and_update_goal(struct gfs2_inode * ip)580 void check_and_update_goal(struct gfs2_inode *ip)
581 {
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
583 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
584 ip->i_goal = ip->i_no_addr;
585 }
586
gfs2_free_clones(struct gfs2_rgrpd * rgd)587 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
588 {
589 int x;
590
591 for (x = 0; x < rgd->rd_length; x++) {
592 struct gfs2_bitmap *bi = rgd->rd_bits + x;
593 kfree(bi->bi_clone);
594 bi->bi_clone = NULL;
595 }
596 }
597
598 /**
599 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
600 * @ip: the inode for this reservation
601 */
gfs2_rs_alloc(struct gfs2_inode * ip)602 int gfs2_rs_alloc(struct gfs2_inode *ip)
603 {
604 int error = 0;
605
606 down_write(&ip->i_rw_mutex);
607 if (ip->i_res)
608 goto out;
609
610 ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
611 if (!ip->i_res) {
612 error = -ENOMEM;
613 goto out;
614 }
615
616 RB_CLEAR_NODE(&ip->i_res->rs_node);
617 out:
618 up_write(&ip->i_rw_mutex);
619 return error;
620 }
621
dump_rs(struct seq_file * seq,const struct gfs2_blkreserv * rs)622 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
623 {
624 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
625 (unsigned long long)rs->rs_inum,
626 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
627 rs->rs_rbm.offset, rs->rs_free);
628 }
629
630 /**
631 * __rs_deltree - remove a multi-block reservation from the rgd tree
632 * @rs: The reservation to remove
633 *
634 */
__rs_deltree(struct gfs2_blkreserv * rs)635 static void __rs_deltree(struct gfs2_blkreserv *rs)
636 {
637 struct gfs2_rgrpd *rgd;
638
639 if (!gfs2_rs_active(rs))
640 return;
641
642 rgd = rs->rs_rbm.rgd;
643 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
644 rb_erase(&rs->rs_node, &rgd->rd_rstree);
645 RB_CLEAR_NODE(&rs->rs_node);
646
647 if (rs->rs_free) {
648 u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
649 rs->rs_free - 1;
650 struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
651 struct gfs2_bitmap *start, *last;
652
653 /* return reserved blocks to the rgrp */
654 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
655 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
656 /* The rgrp extent failure point is likely not to increase;
657 it will only do so if the freed blocks are somehow
658 contiguous with a span of free blocks that follows. Still,
659 it will force the number to be recalculated later. */
660 rgd->rd_extfail_pt += rs->rs_free;
661 rs->rs_free = 0;
662 if (gfs2_rbm_from_block(&last_rbm, last_block))
663 return;
664 start = rbm_bi(&rs->rs_rbm);
665 last = rbm_bi(&last_rbm);
666 do
667 clear_bit(GBF_FULL, &start->bi_flags);
668 while (start++ != last);
669 }
670 }
671
672 /**
673 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
674 * @rs: The reservation to remove
675 *
676 */
gfs2_rs_deltree(struct gfs2_blkreserv * rs)677 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
678 {
679 struct gfs2_rgrpd *rgd;
680
681 rgd = rs->rs_rbm.rgd;
682 if (rgd) {
683 spin_lock(&rgd->rd_rsspin);
684 __rs_deltree(rs);
685 spin_unlock(&rgd->rd_rsspin);
686 }
687 }
688
689 /**
690 * gfs2_rs_delete - delete a multi-block reservation
691 * @ip: The inode for this reservation
692 * @wcount: The inode's write count, or NULL
693 *
694 */
gfs2_rs_delete(struct gfs2_inode * ip,atomic_t * wcount)695 void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
696 {
697 down_write(&ip->i_rw_mutex);
698 if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
699 gfs2_rs_deltree(ip->i_res);
700 BUG_ON(ip->i_res->rs_free);
701 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
702 ip->i_res = NULL;
703 }
704 up_write(&ip->i_rw_mutex);
705 }
706
707 /**
708 * return_all_reservations - return all reserved blocks back to the rgrp.
709 * @rgd: the rgrp that needs its space back
710 *
711 * We previously reserved a bunch of blocks for allocation. Now we need to
712 * give them back. This leave the reservation structures in tact, but removes
713 * all of their corresponding "no-fly zones".
714 */
return_all_reservations(struct gfs2_rgrpd * rgd)715 static void return_all_reservations(struct gfs2_rgrpd *rgd)
716 {
717 struct rb_node *n;
718 struct gfs2_blkreserv *rs;
719
720 spin_lock(&rgd->rd_rsspin);
721 while ((n = rb_first(&rgd->rd_rstree))) {
722 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
723 __rs_deltree(rs);
724 }
725 spin_unlock(&rgd->rd_rsspin);
726 }
727
gfs2_clear_rgrpd(struct gfs2_sbd * sdp)728 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
729 {
730 struct rb_node *n;
731 struct gfs2_rgrpd *rgd;
732 struct gfs2_glock *gl;
733
734 while ((n = rb_first(&sdp->sd_rindex_tree))) {
735 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
736 gl = rgd->rd_gl;
737
738 rb_erase(n, &sdp->sd_rindex_tree);
739
740 if (gl) {
741 spin_lock(&gl->gl_lockref.lock);
742 gl->gl_object = NULL;
743 spin_unlock(&gl->gl_lockref.lock);
744 gfs2_rgrp_brelse(rgd);
745 gfs2_glock_add_to_lru(gl);
746 gfs2_glock_put(gl);
747 }
748
749 gfs2_free_clones(rgd);
750 return_all_reservations(rgd);
751 kfree(rgd->rd_bits);
752 rgd->rd_bits = NULL;
753 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
754 }
755 }
756
gfs2_rindex_print(const struct gfs2_rgrpd * rgd)757 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
758 {
759 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
760 pr_info("ri_length = %u\n", rgd->rd_length);
761 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
762 pr_info("ri_data = %u\n", rgd->rd_data);
763 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
764 }
765
766 /**
767 * gfs2_compute_bitstructs - Compute the bitmap sizes
768 * @rgd: The resource group descriptor
769 *
770 * Calculates bitmap descriptors, one for each block that contains bitmap data
771 *
772 * Returns: errno
773 */
774
compute_bitstructs(struct gfs2_rgrpd * rgd)775 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
776 {
777 struct gfs2_sbd *sdp = rgd->rd_sbd;
778 struct gfs2_bitmap *bi;
779 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
780 u32 bytes_left, bytes;
781 int x;
782
783 if (!length)
784 return -EINVAL;
785
786 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
787 if (!rgd->rd_bits)
788 return -ENOMEM;
789
790 bytes_left = rgd->rd_bitbytes;
791
792 for (x = 0; x < length; x++) {
793 bi = rgd->rd_bits + x;
794
795 bi->bi_flags = 0;
796 /* small rgrp; bitmap stored completely in header block */
797 if (length == 1) {
798 bytes = bytes_left;
799 bi->bi_offset = sizeof(struct gfs2_rgrp);
800 bi->bi_start = 0;
801 bi->bi_len = bytes;
802 bi->bi_blocks = bytes * GFS2_NBBY;
803 /* header block */
804 } else if (x == 0) {
805 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
806 bi->bi_offset = sizeof(struct gfs2_rgrp);
807 bi->bi_start = 0;
808 bi->bi_len = bytes;
809 bi->bi_blocks = bytes * GFS2_NBBY;
810 /* last block */
811 } else if (x + 1 == length) {
812 bytes = bytes_left;
813 bi->bi_offset = sizeof(struct gfs2_meta_header);
814 bi->bi_start = rgd->rd_bitbytes - bytes_left;
815 bi->bi_len = bytes;
816 bi->bi_blocks = bytes * GFS2_NBBY;
817 /* other blocks */
818 } else {
819 bytes = sdp->sd_sb.sb_bsize -
820 sizeof(struct gfs2_meta_header);
821 bi->bi_offset = sizeof(struct gfs2_meta_header);
822 bi->bi_start = rgd->rd_bitbytes - bytes_left;
823 bi->bi_len = bytes;
824 bi->bi_blocks = bytes * GFS2_NBBY;
825 }
826
827 bytes_left -= bytes;
828 }
829
830 if (bytes_left) {
831 gfs2_consist_rgrpd(rgd);
832 return -EIO;
833 }
834 bi = rgd->rd_bits + (length - 1);
835 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
836 if (gfs2_consist_rgrpd(rgd)) {
837 gfs2_rindex_print(rgd);
838 fs_err(sdp, "start=%u len=%u offset=%u\n",
839 bi->bi_start, bi->bi_len, bi->bi_offset);
840 }
841 return -EIO;
842 }
843
844 return 0;
845 }
846
847 /**
848 * gfs2_ri_total - Total up the file system space, according to the rindex.
849 * @sdp: the filesystem
850 *
851 */
gfs2_ri_total(struct gfs2_sbd * sdp)852 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
853 {
854 u64 total_data = 0;
855 struct inode *inode = sdp->sd_rindex;
856 struct gfs2_inode *ip = GFS2_I(inode);
857 char buf[sizeof(struct gfs2_rindex)];
858 int error, rgrps;
859
860 for (rgrps = 0;; rgrps++) {
861 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
862
863 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
864 break;
865 error = gfs2_internal_read(ip, buf, &pos,
866 sizeof(struct gfs2_rindex));
867 if (error != sizeof(struct gfs2_rindex))
868 break;
869 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
870 }
871 return total_data;
872 }
873
rgd_insert(struct gfs2_rgrpd * rgd)874 static int rgd_insert(struct gfs2_rgrpd *rgd)
875 {
876 struct gfs2_sbd *sdp = rgd->rd_sbd;
877 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
878
879 /* Figure out where to put new node */
880 while (*newn) {
881 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
882 rd_node);
883
884 parent = *newn;
885 if (rgd->rd_addr < cur->rd_addr)
886 newn = &((*newn)->rb_left);
887 else if (rgd->rd_addr > cur->rd_addr)
888 newn = &((*newn)->rb_right);
889 else
890 return -EEXIST;
891 }
892
893 rb_link_node(&rgd->rd_node, parent, newn);
894 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
895 sdp->sd_rgrps++;
896 return 0;
897 }
898
899 /**
900 * read_rindex_entry - Pull in a new resource index entry from the disk
901 * @ip: Pointer to the rindex inode
902 *
903 * Returns: 0 on success, > 0 on EOF, error code otherwise
904 */
905
read_rindex_entry(struct gfs2_inode * ip)906 static int read_rindex_entry(struct gfs2_inode *ip)
907 {
908 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
909 const unsigned bsize = sdp->sd_sb.sb_bsize;
910 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
911 struct gfs2_rindex buf;
912 int error;
913 struct gfs2_rgrpd *rgd;
914
915 if (pos >= i_size_read(&ip->i_inode))
916 return 1;
917
918 error = gfs2_internal_read(ip, (char *)&buf, &pos,
919 sizeof(struct gfs2_rindex));
920
921 if (error != sizeof(struct gfs2_rindex))
922 return (error == 0) ? 1 : error;
923
924 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
925 error = -ENOMEM;
926 if (!rgd)
927 return error;
928
929 rgd->rd_sbd = sdp;
930 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
931 rgd->rd_length = be32_to_cpu(buf.ri_length);
932 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
933 rgd->rd_data = be32_to_cpu(buf.ri_data);
934 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
935 spin_lock_init(&rgd->rd_rsspin);
936
937 error = compute_bitstructs(rgd);
938 if (error)
939 goto fail;
940
941 error = gfs2_glock_get(sdp, rgd->rd_addr,
942 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
943 if (error)
944 goto fail;
945
946 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
947 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
948 if (rgd->rd_data > sdp->sd_max_rg_data)
949 sdp->sd_max_rg_data = rgd->rd_data;
950 spin_lock(&sdp->sd_rindex_spin);
951 error = rgd_insert(rgd);
952 spin_unlock(&sdp->sd_rindex_spin);
953 if (!error) {
954 rgd->rd_gl->gl_object = rgd;
955 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
956 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
957 rgd->rd_length) * bsize) - 1;
958 return 0;
959 }
960
961 error = 0; /* someone else read in the rgrp; free it and ignore it */
962 gfs2_glock_put(rgd->rd_gl);
963
964 fail:
965 kfree(rgd->rd_bits);
966 rgd->rd_bits = NULL;
967 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
968 return error;
969 }
970
971 /**
972 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
973 * @sdp: the GFS2 superblock
974 *
975 * The purpose of this function is to select a subset of the resource groups
976 * and mark them as PREFERRED. We do it in such a way that each node prefers
977 * to use a unique set of rgrps to minimize glock contention.
978 */
set_rgrp_preferences(struct gfs2_sbd * sdp)979 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
980 {
981 struct gfs2_rgrpd *rgd, *first;
982 int i;
983
984 /* Skip an initial number of rgrps, based on this node's journal ID.
985 That should start each node out on its own set. */
986 rgd = gfs2_rgrpd_get_first(sdp);
987 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
988 rgd = gfs2_rgrpd_get_next(rgd);
989 first = rgd;
990
991 do {
992 rgd->rd_flags |= GFS2_RDF_PREFERRED;
993 for (i = 0; i < sdp->sd_journals; i++) {
994 rgd = gfs2_rgrpd_get_next(rgd);
995 if (!rgd || rgd == first)
996 break;
997 }
998 } while (rgd && rgd != first);
999 }
1000
1001 /**
1002 * gfs2_ri_update - Pull in a new resource index from the disk
1003 * @ip: pointer to the rindex inode
1004 *
1005 * Returns: 0 on successful update, error code otherwise
1006 */
1007
gfs2_ri_update(struct gfs2_inode * ip)1008 static int gfs2_ri_update(struct gfs2_inode *ip)
1009 {
1010 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1011 int error;
1012
1013 do {
1014 error = read_rindex_entry(ip);
1015 } while (error == 0);
1016
1017 if (error < 0)
1018 return error;
1019
1020 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
1021 fs_err(sdp, "no resource groups found in the file system.\n");
1022 return -ENOENT;
1023 }
1024 set_rgrp_preferences(sdp);
1025
1026 sdp->sd_rindex_uptodate = 1;
1027 return 0;
1028 }
1029
1030 /**
1031 * gfs2_rindex_update - Update the rindex if required
1032 * @sdp: The GFS2 superblock
1033 *
1034 * We grab a lock on the rindex inode to make sure that it doesn't
1035 * change whilst we are performing an operation. We keep this lock
1036 * for quite long periods of time compared to other locks. This
1037 * doesn't matter, since it is shared and it is very, very rarely
1038 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1039 *
1040 * This makes sure that we're using the latest copy of the resource index
1041 * special file, which might have been updated if someone expanded the
1042 * filesystem (via gfs2_grow utility), which adds new resource groups.
1043 *
1044 * Returns: 0 on succeess, error code otherwise
1045 */
1046
gfs2_rindex_update(struct gfs2_sbd * sdp)1047 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1048 {
1049 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1050 struct gfs2_glock *gl = ip->i_gl;
1051 struct gfs2_holder ri_gh;
1052 int error = 0;
1053 int unlock_required = 0;
1054
1055 /* Read new copy from disk if we don't have the latest */
1056 if (!sdp->sd_rindex_uptodate) {
1057 if (!gfs2_glock_is_locked_by_me(gl)) {
1058 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1059 if (error)
1060 return error;
1061 unlock_required = 1;
1062 }
1063 if (!sdp->sd_rindex_uptodate)
1064 error = gfs2_ri_update(ip);
1065 if (unlock_required)
1066 gfs2_glock_dq_uninit(&ri_gh);
1067 }
1068
1069 return error;
1070 }
1071
gfs2_rgrp_in(struct gfs2_rgrpd * rgd,const void * buf)1072 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1073 {
1074 const struct gfs2_rgrp *str = buf;
1075 u32 rg_flags;
1076
1077 rg_flags = be32_to_cpu(str->rg_flags);
1078 rg_flags &= ~GFS2_RDF_MASK;
1079 rgd->rd_flags &= GFS2_RDF_MASK;
1080 rgd->rd_flags |= rg_flags;
1081 rgd->rd_free = be32_to_cpu(str->rg_free);
1082 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1083 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1084 }
1085
gfs2_rgrp_out(struct gfs2_rgrpd * rgd,void * buf)1086 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1087 {
1088 struct gfs2_rgrp *str = buf;
1089
1090 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1091 str->rg_free = cpu_to_be32(rgd->rd_free);
1092 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1093 str->__pad = cpu_to_be32(0);
1094 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1095 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1096 }
1097
gfs2_rgrp_lvb_valid(struct gfs2_rgrpd * rgd)1098 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1099 {
1100 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1101 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1102
1103 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1104 rgl->rl_dinodes != str->rg_dinodes ||
1105 rgl->rl_igeneration != str->rg_igeneration)
1106 return 0;
1107 return 1;
1108 }
1109
gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb * rgl,const void * buf)1110 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1111 {
1112 const struct gfs2_rgrp *str = buf;
1113
1114 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1115 rgl->rl_flags = str->rg_flags;
1116 rgl->rl_free = str->rg_free;
1117 rgl->rl_dinodes = str->rg_dinodes;
1118 rgl->rl_igeneration = str->rg_igeneration;
1119 rgl->__pad = 0UL;
1120 }
1121
update_rgrp_lvb_unlinked(struct gfs2_rgrpd * rgd,u32 change)1122 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1123 {
1124 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1125 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1126 rgl->rl_unlinked = cpu_to_be32(unlinked);
1127 }
1128
count_unlinked(struct gfs2_rgrpd * rgd)1129 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1130 {
1131 struct gfs2_bitmap *bi;
1132 const u32 length = rgd->rd_length;
1133 const u8 *buffer = NULL;
1134 u32 i, goal, count = 0;
1135
1136 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1137 goal = 0;
1138 buffer = bi->bi_bh->b_data + bi->bi_offset;
1139 WARN_ON(!buffer_uptodate(bi->bi_bh));
1140 while (goal < bi->bi_len * GFS2_NBBY) {
1141 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1142 GFS2_BLKST_UNLINKED);
1143 if (goal == BFITNOENT)
1144 break;
1145 count++;
1146 goal++;
1147 }
1148 }
1149
1150 return count;
1151 }
1152
1153
1154 /**
1155 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1156 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1157 *
1158 * Read in all of a Resource Group's header and bitmap blocks.
1159 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1160 *
1161 * Returns: errno
1162 */
1163
gfs2_rgrp_bh_get(struct gfs2_rgrpd * rgd)1164 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1165 {
1166 struct gfs2_sbd *sdp = rgd->rd_sbd;
1167 struct gfs2_glock *gl = rgd->rd_gl;
1168 unsigned int length = rgd->rd_length;
1169 struct gfs2_bitmap *bi;
1170 unsigned int x, y;
1171 int error;
1172
1173 if (rgd->rd_bits[0].bi_bh != NULL)
1174 return 0;
1175
1176 for (x = 0; x < length; x++) {
1177 bi = rgd->rd_bits + x;
1178 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
1179 if (error)
1180 goto fail;
1181 }
1182
1183 for (y = length; y--;) {
1184 bi = rgd->rd_bits + y;
1185 error = gfs2_meta_wait(sdp, bi->bi_bh);
1186 if (error)
1187 goto fail;
1188 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1189 GFS2_METATYPE_RG)) {
1190 error = -EIO;
1191 goto fail;
1192 }
1193 }
1194
1195 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1196 for (x = 0; x < length; x++)
1197 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1198 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1199 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1200 rgd->rd_free_clone = rgd->rd_free;
1201 /* max out the rgrp allocation failure point */
1202 rgd->rd_extfail_pt = rgd->rd_free;
1203 }
1204 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1205 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1206 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1207 rgd->rd_bits[0].bi_bh->b_data);
1208 }
1209 else if (sdp->sd_args.ar_rgrplvb) {
1210 if (!gfs2_rgrp_lvb_valid(rgd)){
1211 gfs2_consist_rgrpd(rgd);
1212 error = -EIO;
1213 goto fail;
1214 }
1215 if (rgd->rd_rgl->rl_unlinked == 0)
1216 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1217 }
1218 return 0;
1219
1220 fail:
1221 while (x--) {
1222 bi = rgd->rd_bits + x;
1223 brelse(bi->bi_bh);
1224 bi->bi_bh = NULL;
1225 gfs2_assert_warn(sdp, !bi->bi_clone);
1226 }
1227
1228 return error;
1229 }
1230
update_rgrp_lvb(struct gfs2_rgrpd * rgd)1231 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1232 {
1233 u32 rl_flags;
1234
1235 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1236 return 0;
1237
1238 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1239 return gfs2_rgrp_bh_get(rgd);
1240
1241 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1242 rl_flags &= ~GFS2_RDF_MASK;
1243 rgd->rd_flags &= GFS2_RDF_MASK;
1244 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1245 if (rgd->rd_rgl->rl_unlinked == 0)
1246 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1247 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1248 rgd->rd_free_clone = rgd->rd_free;
1249 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1250 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1251 return 0;
1252 }
1253
gfs2_rgrp_go_lock(struct gfs2_holder * gh)1254 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1255 {
1256 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1257 struct gfs2_sbd *sdp = rgd->rd_sbd;
1258
1259 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1260 return 0;
1261 return gfs2_rgrp_bh_get(rgd);
1262 }
1263
1264 /**
1265 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1266 * @rgd: The resource group
1267 *
1268 */
1269
gfs2_rgrp_brelse(struct gfs2_rgrpd * rgd)1270 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1271 {
1272 int x, length = rgd->rd_length;
1273
1274 for (x = 0; x < length; x++) {
1275 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1276 if (bi->bi_bh) {
1277 brelse(bi->bi_bh);
1278 bi->bi_bh = NULL;
1279 }
1280 }
1281
1282 }
1283
1284 /**
1285 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1286 * @gh: The glock holder for the resource group
1287 *
1288 */
1289
gfs2_rgrp_go_unlock(struct gfs2_holder * gh)1290 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1291 {
1292 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1293 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1294 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1295
1296 if (rgd && demote_requested)
1297 gfs2_rgrp_brelse(rgd);
1298 }
1299
gfs2_rgrp_send_discards(struct gfs2_sbd * sdp,u64 offset,struct buffer_head * bh,const struct gfs2_bitmap * bi,unsigned minlen,u64 * ptrimmed)1300 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1301 struct buffer_head *bh,
1302 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1303 {
1304 struct super_block *sb = sdp->sd_vfs;
1305 u64 blk;
1306 sector_t start = 0;
1307 sector_t nr_blks = 0;
1308 int rv;
1309 unsigned int x;
1310 u32 trimmed = 0;
1311 u8 diff;
1312
1313 for (x = 0; x < bi->bi_len; x++) {
1314 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1315 clone += bi->bi_offset;
1316 clone += x;
1317 if (bh) {
1318 const u8 *orig = bh->b_data + bi->bi_offset + x;
1319 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1320 } else {
1321 diff = ~(*clone | (*clone >> 1));
1322 }
1323 diff &= 0x55;
1324 if (diff == 0)
1325 continue;
1326 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1327 while(diff) {
1328 if (diff & 1) {
1329 if (nr_blks == 0)
1330 goto start_new_extent;
1331 if ((start + nr_blks) != blk) {
1332 if (nr_blks >= minlen) {
1333 rv = sb_issue_discard(sb,
1334 start, nr_blks,
1335 GFP_NOFS, 0);
1336 if (rv)
1337 goto fail;
1338 trimmed += nr_blks;
1339 }
1340 nr_blks = 0;
1341 start_new_extent:
1342 start = blk;
1343 }
1344 nr_blks++;
1345 }
1346 diff >>= 2;
1347 blk++;
1348 }
1349 }
1350 if (nr_blks >= minlen) {
1351 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1352 if (rv)
1353 goto fail;
1354 trimmed += nr_blks;
1355 }
1356 if (ptrimmed)
1357 *ptrimmed = trimmed;
1358 return 0;
1359
1360 fail:
1361 if (sdp->sd_args.ar_discard)
1362 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1363 sdp->sd_args.ar_discard = 0;
1364 return -EIO;
1365 }
1366
1367 /**
1368 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1369 * @filp: Any file on the filesystem
1370 * @argp: Pointer to the arguments (also used to pass result)
1371 *
1372 * Returns: 0 on success, otherwise error code
1373 */
1374
gfs2_fitrim(struct file * filp,void __user * argp)1375 int gfs2_fitrim(struct file *filp, void __user *argp)
1376 {
1377 struct inode *inode = file_inode(filp);
1378 struct gfs2_sbd *sdp = GFS2_SB(inode);
1379 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1380 struct buffer_head *bh;
1381 struct gfs2_rgrpd *rgd;
1382 struct gfs2_rgrpd *rgd_end;
1383 struct gfs2_holder gh;
1384 struct fstrim_range r;
1385 int ret = 0;
1386 u64 amt;
1387 u64 trimmed = 0;
1388 u64 start, end, minlen;
1389 unsigned int x;
1390 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1391
1392 if (!capable(CAP_SYS_ADMIN))
1393 return -EPERM;
1394
1395 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1396 return -EROFS;
1397
1398 if (!blk_queue_discard(q))
1399 return -EOPNOTSUPP;
1400
1401 if (copy_from_user(&r, argp, sizeof(r)))
1402 return -EFAULT;
1403
1404 ret = gfs2_rindex_update(sdp);
1405 if (ret)
1406 return ret;
1407
1408 start = r.start >> bs_shift;
1409 end = start + (r.len >> bs_shift);
1410 minlen = max_t(u64, r.minlen,
1411 q->limits.discard_granularity) >> bs_shift;
1412
1413 if (end <= start || minlen > sdp->sd_max_rg_data)
1414 return -EINVAL;
1415
1416 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1417 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1418
1419 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1420 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1421 return -EINVAL; /* start is beyond the end of the fs */
1422
1423 while (1) {
1424
1425 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1426 if (ret)
1427 goto out;
1428
1429 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1430 /* Trim each bitmap in the rgrp */
1431 for (x = 0; x < rgd->rd_length; x++) {
1432 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1433 ret = gfs2_rgrp_send_discards(sdp,
1434 rgd->rd_data0, NULL, bi, minlen,
1435 &amt);
1436 if (ret) {
1437 gfs2_glock_dq_uninit(&gh);
1438 goto out;
1439 }
1440 trimmed += amt;
1441 }
1442
1443 /* Mark rgrp as having been trimmed */
1444 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1445 if (ret == 0) {
1446 bh = rgd->rd_bits[0].bi_bh;
1447 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1448 gfs2_trans_add_meta(rgd->rd_gl, bh);
1449 gfs2_rgrp_out(rgd, bh->b_data);
1450 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1451 gfs2_trans_end(sdp);
1452 }
1453 }
1454 gfs2_glock_dq_uninit(&gh);
1455
1456 if (rgd == rgd_end)
1457 break;
1458
1459 rgd = gfs2_rgrpd_get_next(rgd);
1460 }
1461
1462 out:
1463 r.len = trimmed << bs_shift;
1464 if (copy_to_user(argp, &r, sizeof(r)))
1465 return -EFAULT;
1466
1467 return ret;
1468 }
1469
1470 /**
1471 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1472 * @ip: the inode structure
1473 *
1474 */
rs_insert(struct gfs2_inode * ip)1475 static void rs_insert(struct gfs2_inode *ip)
1476 {
1477 struct rb_node **newn, *parent = NULL;
1478 int rc;
1479 struct gfs2_blkreserv *rs = ip->i_res;
1480 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1481 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1482
1483 BUG_ON(gfs2_rs_active(rs));
1484
1485 spin_lock(&rgd->rd_rsspin);
1486 newn = &rgd->rd_rstree.rb_node;
1487 while (*newn) {
1488 struct gfs2_blkreserv *cur =
1489 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1490
1491 parent = *newn;
1492 rc = rs_cmp(fsblock, rs->rs_free, cur);
1493 if (rc > 0)
1494 newn = &((*newn)->rb_right);
1495 else if (rc < 0)
1496 newn = &((*newn)->rb_left);
1497 else {
1498 spin_unlock(&rgd->rd_rsspin);
1499 WARN_ON(1);
1500 return;
1501 }
1502 }
1503
1504 rb_link_node(&rs->rs_node, parent, newn);
1505 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1506
1507 /* Do our rgrp accounting for the reservation */
1508 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1509 spin_unlock(&rgd->rd_rsspin);
1510 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1511 }
1512
1513 /**
1514 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1515 * @rgd: the resource group descriptor
1516 * @ip: pointer to the inode for which we're reserving blocks
1517 * @ap: the allocation parameters
1518 *
1519 */
1520
rg_mblk_search(struct gfs2_rgrpd * rgd,struct gfs2_inode * ip,const struct gfs2_alloc_parms * ap)1521 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1522 const struct gfs2_alloc_parms *ap)
1523 {
1524 struct gfs2_rbm rbm = { .rgd = rgd, };
1525 u64 goal;
1526 struct gfs2_blkreserv *rs = ip->i_res;
1527 u32 extlen;
1528 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1529 int ret;
1530 struct inode *inode = &ip->i_inode;
1531
1532 if (S_ISDIR(inode->i_mode))
1533 extlen = 1;
1534 else {
1535 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1536 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1537 }
1538 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1539 return;
1540
1541 /* Find bitmap block that contains bits for goal block */
1542 if (rgrp_contains_block(rgd, ip->i_goal))
1543 goal = ip->i_goal;
1544 else
1545 goal = rgd->rd_last_alloc + rgd->rd_data0;
1546
1547 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1548 return;
1549
1550 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true, ap);
1551 if (ret == 0) {
1552 rs->rs_rbm = rbm;
1553 rs->rs_free = extlen;
1554 rs->rs_inum = ip->i_no_addr;
1555 rs_insert(ip);
1556 } else {
1557 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1558 rgd->rd_last_alloc = 0;
1559 }
1560 }
1561
1562 /**
1563 * gfs2_next_unreserved_block - Return next block that is not reserved
1564 * @rgd: The resource group
1565 * @block: The starting block
1566 * @length: The required length
1567 * @ip: Ignore any reservations for this inode
1568 *
1569 * If the block does not appear in any reservation, then return the
1570 * block number unchanged. If it does appear in the reservation, then
1571 * keep looking through the tree of reservations in order to find the
1572 * first block number which is not reserved.
1573 */
1574
gfs2_next_unreserved_block(struct gfs2_rgrpd * rgd,u64 block,u32 length,const struct gfs2_inode * ip)1575 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1576 u32 length,
1577 const struct gfs2_inode *ip)
1578 {
1579 struct gfs2_blkreserv *rs;
1580 struct rb_node *n;
1581 int rc;
1582
1583 spin_lock(&rgd->rd_rsspin);
1584 n = rgd->rd_rstree.rb_node;
1585 while (n) {
1586 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1587 rc = rs_cmp(block, length, rs);
1588 if (rc < 0)
1589 n = n->rb_left;
1590 else if (rc > 0)
1591 n = n->rb_right;
1592 else
1593 break;
1594 }
1595
1596 if (n) {
1597 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
1598 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1599 n = n->rb_right;
1600 if (n == NULL)
1601 break;
1602 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1603 }
1604 }
1605
1606 spin_unlock(&rgd->rd_rsspin);
1607 return block;
1608 }
1609
1610 /**
1611 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1612 * @rbm: The current position in the resource group
1613 * @ip: The inode for which we are searching for blocks
1614 * @minext: The minimum extent length
1615 * @maxext: A pointer to the maximum extent structure
1616 *
1617 * This checks the current position in the rgrp to see whether there is
1618 * a reservation covering this block. If not then this function is a
1619 * no-op. If there is, then the position is moved to the end of the
1620 * contiguous reservation(s) so that we are pointing at the first
1621 * non-reserved block.
1622 *
1623 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1624 */
1625
gfs2_reservation_check_and_update(struct gfs2_rbm * rbm,const struct gfs2_inode * ip,u32 minext,struct gfs2_extent * maxext)1626 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1627 const struct gfs2_inode *ip,
1628 u32 minext,
1629 struct gfs2_extent *maxext)
1630 {
1631 u64 block = gfs2_rbm_to_block(rbm);
1632 u32 extlen = 1;
1633 u64 nblock;
1634 int ret;
1635
1636 /*
1637 * If we have a minimum extent length, then skip over any extent
1638 * which is less than the min extent length in size.
1639 */
1640 if (minext) {
1641 extlen = gfs2_free_extlen(rbm, minext);
1642 if (extlen <= maxext->len)
1643 goto fail;
1644 }
1645
1646 /*
1647 * Check the extent which has been found against the reservations
1648 * and skip if parts of it are already reserved
1649 */
1650 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1651 if (nblock == block) {
1652 if (!minext || extlen >= minext)
1653 return 0;
1654
1655 if (extlen > maxext->len) {
1656 maxext->len = extlen;
1657 maxext->rbm = *rbm;
1658 }
1659 fail:
1660 nblock = block + extlen;
1661 }
1662 ret = gfs2_rbm_from_block(rbm, nblock);
1663 if (ret < 0)
1664 return ret;
1665 return 1;
1666 }
1667
1668 /**
1669 * gfs2_rbm_find - Look for blocks of a particular state
1670 * @rbm: Value/result starting position and final position
1671 * @state: The state which we want to find
1672 * @minext: Pointer to the requested extent length (NULL for a single block)
1673 * This is updated to be the actual reservation size.
1674 * @ip: If set, check for reservations
1675 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1676 * around until we've reached the starting point.
1677 * @ap: the allocation parameters
1678 *
1679 * Side effects:
1680 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1681 * has no free blocks in it.
1682 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1683 * has come up short on a free block search.
1684 *
1685 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1686 */
1687
gfs2_rbm_find(struct gfs2_rbm * rbm,u8 state,u32 * minext,const struct gfs2_inode * ip,bool nowrap,const struct gfs2_alloc_parms * ap)1688 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1689 const struct gfs2_inode *ip, bool nowrap,
1690 const struct gfs2_alloc_parms *ap)
1691 {
1692 struct buffer_head *bh;
1693 int initial_bii;
1694 u32 initial_offset;
1695 int first_bii = rbm->bii;
1696 u32 first_offset = rbm->offset;
1697 u32 offset;
1698 u8 *buffer;
1699 int n = 0;
1700 int iters = rbm->rgd->rd_length;
1701 int ret;
1702 struct gfs2_bitmap *bi;
1703 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1704
1705 /* If we are not starting at the beginning of a bitmap, then we
1706 * need to add one to the bitmap count to ensure that we search
1707 * the starting bitmap twice.
1708 */
1709 if (rbm->offset != 0)
1710 iters++;
1711
1712 while(1) {
1713 bi = rbm_bi(rbm);
1714 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1715 (state == GFS2_BLKST_FREE))
1716 goto next_bitmap;
1717
1718 bh = bi->bi_bh;
1719 buffer = bh->b_data + bi->bi_offset;
1720 WARN_ON(!buffer_uptodate(bh));
1721 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1722 buffer = bi->bi_clone + bi->bi_offset;
1723 initial_offset = rbm->offset;
1724 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1725 if (offset == BFITNOENT)
1726 goto bitmap_full;
1727 rbm->offset = offset;
1728 if (ip == NULL)
1729 return 0;
1730
1731 initial_bii = rbm->bii;
1732 ret = gfs2_reservation_check_and_update(rbm, ip,
1733 minext ? *minext : 0,
1734 &maxext);
1735 if (ret == 0)
1736 return 0;
1737 if (ret > 0) {
1738 n += (rbm->bii - initial_bii);
1739 goto next_iter;
1740 }
1741 if (ret == -E2BIG) {
1742 rbm->bii = 0;
1743 rbm->offset = 0;
1744 n += (rbm->bii - initial_bii);
1745 goto res_covered_end_of_rgrp;
1746 }
1747 return ret;
1748
1749 bitmap_full: /* Mark bitmap as full and fall through */
1750 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1751 set_bit(GBF_FULL, &bi->bi_flags);
1752
1753 next_bitmap: /* Find next bitmap in the rgrp */
1754 rbm->offset = 0;
1755 rbm->bii++;
1756 if (rbm->bii == rbm->rgd->rd_length)
1757 rbm->bii = 0;
1758 res_covered_end_of_rgrp:
1759 if ((rbm->bii == 0) && nowrap)
1760 break;
1761 n++;
1762 next_iter:
1763 if (n >= iters)
1764 break;
1765 }
1766
1767 if (minext == NULL || state != GFS2_BLKST_FREE)
1768 return -ENOSPC;
1769
1770 /* If the extent was too small, and it's smaller than the smallest
1771 to have failed before, remember for future reference that it's
1772 useless to search this rgrp again for this amount or more. */
1773 if ((first_offset == 0) && (first_bii == 0) &&
1774 (*minext < rbm->rgd->rd_extfail_pt))
1775 rbm->rgd->rd_extfail_pt = *minext;
1776
1777 /* If the maximum extent we found is big enough to fulfill the
1778 minimum requirements, use it anyway. */
1779 if (maxext.len) {
1780 *rbm = maxext.rbm;
1781 *minext = maxext.len;
1782 return 0;
1783 }
1784
1785 return -ENOSPC;
1786 }
1787
1788 /**
1789 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1790 * @rgd: The rgrp
1791 * @last_unlinked: block address of the last dinode we unlinked
1792 * @skip: block address we should explicitly not unlink
1793 *
1794 * Returns: 0 if no error
1795 * The inode, if one has been found, in inode.
1796 */
1797
try_rgrp_unlink(struct gfs2_rgrpd * rgd,u64 * last_unlinked,u64 skip)1798 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1799 {
1800 u64 block;
1801 struct gfs2_sbd *sdp = rgd->rd_sbd;
1802 struct gfs2_glock *gl;
1803 struct gfs2_inode *ip;
1804 int error;
1805 int found = 0;
1806 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1807
1808 while (1) {
1809 down_write(&sdp->sd_log_flush_lock);
1810 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1811 true, NULL);
1812 up_write(&sdp->sd_log_flush_lock);
1813 if (error == -ENOSPC)
1814 break;
1815 if (WARN_ON_ONCE(error))
1816 break;
1817
1818 block = gfs2_rbm_to_block(&rbm);
1819 if (gfs2_rbm_from_block(&rbm, block + 1))
1820 break;
1821 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1822 continue;
1823 if (block == skip)
1824 continue;
1825 *last_unlinked = block;
1826
1827 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
1828 if (error)
1829 continue;
1830
1831 /* If the inode is already in cache, we can ignore it here
1832 * because the existing inode disposal code will deal with
1833 * it when all refs have gone away. Accessing gl_object like
1834 * this is not safe in general. Here it is ok because we do
1835 * not dereference the pointer, and we only need an approx
1836 * answer to whether it is NULL or not.
1837 */
1838 ip = gl->gl_object;
1839
1840 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1841 gfs2_glock_put(gl);
1842 else
1843 found++;
1844
1845 /* Limit reclaim to sensible number of tasks */
1846 if (found > NR_CPUS)
1847 return;
1848 }
1849
1850 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1851 return;
1852 }
1853
1854 /**
1855 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1856 * @rgd: The rgrp in question
1857 * @loops: An indication of how picky we can be (0=very, 1=less so)
1858 *
1859 * This function uses the recently added glock statistics in order to
1860 * figure out whether a parciular resource group is suffering from
1861 * contention from multiple nodes. This is done purely on the basis
1862 * of timings, since this is the only data we have to work with and
1863 * our aim here is to reject a resource group which is highly contended
1864 * but (very important) not to do this too often in order to ensure that
1865 * we do not land up introducing fragmentation by changing resource
1866 * groups when not actually required.
1867 *
1868 * The calculation is fairly simple, we want to know whether the SRTTB
1869 * (i.e. smoothed round trip time for blocking operations) to acquire
1870 * the lock for this rgrp's glock is significantly greater than the
1871 * time taken for resource groups on average. We introduce a margin in
1872 * the form of the variable @var which is computed as the sum of the two
1873 * respective variences, and multiplied by a factor depending on @loops
1874 * and whether we have a lot of data to base the decision on. This is
1875 * then tested against the square difference of the means in order to
1876 * decide whether the result is statistically significant or not.
1877 *
1878 * Returns: A boolean verdict on the congestion status
1879 */
1880
gfs2_rgrp_congested(const struct gfs2_rgrpd * rgd,int loops)1881 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1882 {
1883 const struct gfs2_glock *gl = rgd->rd_gl;
1884 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1885 struct gfs2_lkstats *st;
1886 u64 r_dcount, l_dcount;
1887 u64 l_srttb, a_srttb = 0;
1888 s64 srttb_diff;
1889 u64 sqr_diff;
1890 u64 var;
1891 int cpu, nonzero = 0;
1892
1893 preempt_disable();
1894 for_each_present_cpu(cpu) {
1895 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1896 if (st->stats[GFS2_LKS_SRTTB]) {
1897 a_srttb += st->stats[GFS2_LKS_SRTTB];
1898 nonzero++;
1899 }
1900 }
1901 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1902 if (nonzero)
1903 do_div(a_srttb, nonzero);
1904 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1905 var = st->stats[GFS2_LKS_SRTTVARB] +
1906 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1907 preempt_enable();
1908
1909 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1910 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1911
1912 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1913 return false;
1914
1915 srttb_diff = a_srttb - l_srttb;
1916 sqr_diff = srttb_diff * srttb_diff;
1917
1918 var *= 2;
1919 if (l_dcount < 8 || r_dcount < 8)
1920 var *= 2;
1921 if (loops == 1)
1922 var *= 2;
1923
1924 return ((srttb_diff < 0) && (sqr_diff > var));
1925 }
1926
1927 /**
1928 * gfs2_rgrp_used_recently
1929 * @rs: The block reservation with the rgrp to test
1930 * @msecs: The time limit in milliseconds
1931 *
1932 * Returns: True if the rgrp glock has been used within the time limit
1933 */
gfs2_rgrp_used_recently(const struct gfs2_blkreserv * rs,u64 msecs)1934 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1935 u64 msecs)
1936 {
1937 u64 tdiff;
1938
1939 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1940 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1941
1942 return tdiff > (msecs * 1000 * 1000);
1943 }
1944
gfs2_orlov_skip(const struct gfs2_inode * ip)1945 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1946 {
1947 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1948 u32 skip;
1949
1950 get_random_bytes(&skip, sizeof(skip));
1951 return skip % sdp->sd_rgrps;
1952 }
1953
gfs2_select_rgrp(struct gfs2_rgrpd ** pos,const struct gfs2_rgrpd * begin)1954 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1955 {
1956 struct gfs2_rgrpd *rgd = *pos;
1957 struct gfs2_sbd *sdp = rgd->rd_sbd;
1958
1959 rgd = gfs2_rgrpd_get_next(rgd);
1960 if (rgd == NULL)
1961 rgd = gfs2_rgrpd_get_first(sdp);
1962 *pos = rgd;
1963 if (rgd != begin) /* If we didn't wrap */
1964 return true;
1965 return false;
1966 }
1967
1968 /**
1969 * fast_to_acquire - determine if a resource group will be fast to acquire
1970 *
1971 * If this is one of our preferred rgrps, it should be quicker to acquire,
1972 * because we tried to set ourselves up as dlm lock master.
1973 */
fast_to_acquire(struct gfs2_rgrpd * rgd)1974 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1975 {
1976 struct gfs2_glock *gl = rgd->rd_gl;
1977
1978 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1979 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1980 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1981 return 1;
1982 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1983 return 1;
1984 return 0;
1985 }
1986
1987 /**
1988 * gfs2_inplace_reserve - Reserve space in the filesystem
1989 * @ip: the inode to reserve space for
1990 * @ap: the allocation parameters
1991 *
1992 * We try our best to find an rgrp that has at least ap->target blocks
1993 * available. After a couple of passes (loops == 2), the prospects of finding
1994 * such an rgrp diminish. At this stage, we return the first rgrp that has
1995 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1996 * the number of blocks available in the chosen rgrp.
1997 *
1998 * Returns: 0 on success,
1999 * -ENOMEM if a suitable rgrp can't be found
2000 * errno otherwise
2001 */
2002
gfs2_inplace_reserve(struct gfs2_inode * ip,struct gfs2_alloc_parms * ap)2003 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
2004 {
2005 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2006 struct gfs2_rgrpd *begin = NULL;
2007 struct gfs2_blkreserv *rs = ip->i_res;
2008 int error = 0, rg_locked, flags = 0;
2009 u64 last_unlinked = NO_BLOCK;
2010 int loops = 0;
2011 u32 skip = 0;
2012
2013 if (sdp->sd_args.ar_rgrplvb)
2014 flags |= GL_SKIP;
2015 if (gfs2_assert_warn(sdp, ap->target))
2016 return -EINVAL;
2017 if (gfs2_rs_active(rs)) {
2018 begin = rs->rs_rbm.rgd;
2019 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
2020 rs->rs_rbm.rgd = begin = ip->i_rgd;
2021 } else {
2022 check_and_update_goal(ip);
2023 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
2024 }
2025 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2026 skip = gfs2_orlov_skip(ip);
2027 if (rs->rs_rbm.rgd == NULL)
2028 return -EBADSLT;
2029
2030 while (loops < 3) {
2031 rg_locked = 1;
2032
2033 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2034 rg_locked = 0;
2035 if (skip && skip--)
2036 goto next_rgrp;
2037 if (!gfs2_rs_active(rs)) {
2038 if (loops == 0 &&
2039 !fast_to_acquire(rs->rs_rbm.rgd))
2040 goto next_rgrp;
2041 if ((loops < 2) &&
2042 gfs2_rgrp_used_recently(rs, 1000) &&
2043 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2044 goto next_rgrp;
2045 }
2046 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2047 LM_ST_EXCLUSIVE, flags,
2048 &rs->rs_rgd_gh);
2049 if (unlikely(error))
2050 return error;
2051 if (!gfs2_rs_active(rs) && (loops < 2) &&
2052 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2053 goto skip_rgrp;
2054 if (sdp->sd_args.ar_rgrplvb) {
2055 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2056 if (unlikely(error)) {
2057 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2058 return error;
2059 }
2060 }
2061 }
2062
2063 /* Skip unuseable resource groups */
2064 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2065 GFS2_RDF_ERROR)) ||
2066 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2067 goto skip_rgrp;
2068
2069 if (sdp->sd_args.ar_rgrplvb)
2070 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2071
2072 /* Get a reservation if we don't already have one */
2073 if (!gfs2_rs_active(rs))
2074 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2075
2076 /* Skip rgrps when we can't get a reservation on first pass */
2077 if (!gfs2_rs_active(rs) && (loops < 1))
2078 goto check_rgrp;
2079
2080 /* If rgrp has enough free space, use it */
2081 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2082 (loops == 2 && ap->min_target &&
2083 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2084 ip->i_rgd = rs->rs_rbm.rgd;
2085 ap->allowed = ip->i_rgd->rd_free_clone;
2086 return 0;
2087 }
2088 check_rgrp:
2089 /* Check for unlinked inodes which can be reclaimed */
2090 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2091 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2092 ip->i_no_addr);
2093 skip_rgrp:
2094 /* Drop reservation, if we couldn't use reserved rgrp */
2095 if (gfs2_rs_active(rs))
2096 gfs2_rs_deltree(rs);
2097
2098 /* Unlock rgrp if required */
2099 if (!rg_locked)
2100 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2101 next_rgrp:
2102 /* Find the next rgrp, and continue looking */
2103 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2104 continue;
2105 if (skip)
2106 continue;
2107
2108 /* If we've scanned all the rgrps, but found no free blocks
2109 * then this checks for some less likely conditions before
2110 * trying again.
2111 */
2112 loops++;
2113 /* Check that fs hasn't grown if writing to rindex */
2114 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2115 error = gfs2_ri_update(ip);
2116 if (error)
2117 return error;
2118 }
2119 /* Flushing the log may release space */
2120 if (loops == 2)
2121 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
2122 }
2123
2124 return -ENOSPC;
2125 }
2126
2127 /**
2128 * gfs2_inplace_release - release an inplace reservation
2129 * @ip: the inode the reservation was taken out on
2130 *
2131 * Release a reservation made by gfs2_inplace_reserve().
2132 */
2133
gfs2_inplace_release(struct gfs2_inode * ip)2134 void gfs2_inplace_release(struct gfs2_inode *ip)
2135 {
2136 struct gfs2_blkreserv *rs = ip->i_res;
2137
2138 if (rs->rs_rgd_gh.gh_gl)
2139 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2140 }
2141
2142 /**
2143 * gfs2_get_block_type - Check a block in a RG is of given type
2144 * @rgd: the resource group holding the block
2145 * @block: the block number
2146 *
2147 * Returns: The block type (GFS2_BLKST_*)
2148 */
2149
gfs2_get_block_type(struct gfs2_rgrpd * rgd,u64 block)2150 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2151 {
2152 struct gfs2_rbm rbm = { .rgd = rgd, };
2153 int ret;
2154
2155 ret = gfs2_rbm_from_block(&rbm, block);
2156 WARN_ON_ONCE(ret != 0);
2157
2158 return gfs2_testbit(&rbm);
2159 }
2160
2161
2162 /**
2163 * gfs2_alloc_extent - allocate an extent from a given bitmap
2164 * @rbm: the resource group information
2165 * @dinode: TRUE if the first block we allocate is for a dinode
2166 * @n: The extent length (value/result)
2167 *
2168 * Add the bitmap buffer to the transaction.
2169 * Set the found bits to @new_state to change block's allocation state.
2170 */
gfs2_alloc_extent(const struct gfs2_rbm * rbm,bool dinode,unsigned int * n)2171 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2172 unsigned int *n)
2173 {
2174 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2175 const unsigned int elen = *n;
2176 u64 block;
2177 int ret;
2178
2179 *n = 1;
2180 block = gfs2_rbm_to_block(rbm);
2181 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2182 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2183 block++;
2184 while (*n < elen) {
2185 ret = gfs2_rbm_from_block(&pos, block);
2186 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2187 break;
2188 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2189 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2190 (*n)++;
2191 block++;
2192 }
2193 }
2194
2195 /**
2196 * rgblk_free - Change alloc state of given block(s)
2197 * @sdp: the filesystem
2198 * @bstart: the start of a run of blocks to free
2199 * @blen: the length of the block run (all must lie within ONE RG!)
2200 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2201 *
2202 * Returns: Resource group containing the block(s)
2203 */
2204
rgblk_free(struct gfs2_sbd * sdp,u64 bstart,u32 blen,unsigned char new_state)2205 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2206 u32 blen, unsigned char new_state)
2207 {
2208 struct gfs2_rbm rbm;
2209 struct gfs2_bitmap *bi, *bi_prev = NULL;
2210
2211 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2212 if (!rbm.rgd) {
2213 if (gfs2_consist(sdp))
2214 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2215 return NULL;
2216 }
2217
2218 gfs2_rbm_from_block(&rbm, bstart);
2219 while (blen--) {
2220 bi = rbm_bi(&rbm);
2221 if (bi != bi_prev) {
2222 if (!bi->bi_clone) {
2223 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2224 GFP_NOFS | __GFP_NOFAIL);
2225 memcpy(bi->bi_clone + bi->bi_offset,
2226 bi->bi_bh->b_data + bi->bi_offset,
2227 bi->bi_len);
2228 }
2229 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2230 bi_prev = bi;
2231 }
2232 gfs2_setbit(&rbm, false, new_state);
2233 gfs2_rbm_incr(&rbm);
2234 }
2235
2236 return rbm.rgd;
2237 }
2238
2239 /**
2240 * gfs2_rgrp_dump - print out an rgrp
2241 * @seq: The iterator
2242 * @gl: The glock in question
2243 *
2244 */
2245
gfs2_rgrp_dump(struct seq_file * seq,const struct gfs2_glock * gl)2246 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2247 {
2248 struct gfs2_rgrpd *rgd = gl->gl_object;
2249 struct gfs2_blkreserv *trs;
2250 const struct rb_node *n;
2251
2252 if (rgd == NULL)
2253 return;
2254 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2255 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2256 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2257 rgd->rd_reserved, rgd->rd_extfail_pt);
2258 spin_lock(&rgd->rd_rsspin);
2259 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2260 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2261 dump_rs(seq, trs);
2262 }
2263 spin_unlock(&rgd->rd_rsspin);
2264 }
2265
gfs2_rgrp_error(struct gfs2_rgrpd * rgd)2266 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2267 {
2268 struct gfs2_sbd *sdp = rgd->rd_sbd;
2269 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2270 (unsigned long long)rgd->rd_addr);
2271 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2272 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2273 rgd->rd_flags |= GFS2_RDF_ERROR;
2274 }
2275
2276 /**
2277 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2278 * @ip: The inode we have just allocated blocks for
2279 * @rbm: The start of the allocated blocks
2280 * @len: The extent length
2281 *
2282 * Adjusts a reservation after an allocation has taken place. If the
2283 * reservation does not match the allocation, or if it is now empty
2284 * then it is removed.
2285 */
2286
gfs2_adjust_reservation(struct gfs2_inode * ip,const struct gfs2_rbm * rbm,unsigned len)2287 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2288 const struct gfs2_rbm *rbm, unsigned len)
2289 {
2290 struct gfs2_blkreserv *rs = ip->i_res;
2291 struct gfs2_rgrpd *rgd = rbm->rgd;
2292 unsigned rlen;
2293 u64 block;
2294 int ret;
2295
2296 spin_lock(&rgd->rd_rsspin);
2297 if (gfs2_rs_active(rs)) {
2298 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2299 block = gfs2_rbm_to_block(rbm);
2300 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2301 rlen = min(rs->rs_free, len);
2302 rs->rs_free -= rlen;
2303 rgd->rd_reserved -= rlen;
2304 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2305 if (rs->rs_free && !ret)
2306 goto out;
2307 /* We used up our block reservation, so we should
2308 reserve more blocks next time. */
2309 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2310 }
2311 __rs_deltree(rs);
2312 }
2313 out:
2314 spin_unlock(&rgd->rd_rsspin);
2315 }
2316
2317 /**
2318 * gfs2_set_alloc_start - Set starting point for block allocation
2319 * @rbm: The rbm which will be set to the required location
2320 * @ip: The gfs2 inode
2321 * @dinode: Flag to say if allocation includes a new inode
2322 *
2323 * This sets the starting point from the reservation if one is active
2324 * otherwise it falls back to guessing a start point based on the
2325 * inode's goal block or the last allocation point in the rgrp.
2326 */
2327
gfs2_set_alloc_start(struct gfs2_rbm * rbm,const struct gfs2_inode * ip,bool dinode)2328 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2329 const struct gfs2_inode *ip, bool dinode)
2330 {
2331 u64 goal;
2332
2333 if (gfs2_rs_active(ip->i_res)) {
2334 *rbm = ip->i_res->rs_rbm;
2335 return;
2336 }
2337
2338 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2339 goal = ip->i_goal;
2340 else
2341 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2342
2343 gfs2_rbm_from_block(rbm, goal);
2344 }
2345
2346 /**
2347 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2348 * @ip: the inode to allocate the block for
2349 * @bn: Used to return the starting block number
2350 * @nblocks: requested number of blocks/extent length (value/result)
2351 * @dinode: 1 if we're allocating a dinode block, else 0
2352 * @generation: the generation number of the inode
2353 *
2354 * Returns: 0 or error
2355 */
2356
gfs2_alloc_blocks(struct gfs2_inode * ip,u64 * bn,unsigned int * nblocks,bool dinode,u64 * generation)2357 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2358 bool dinode, u64 *generation)
2359 {
2360 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2361 struct buffer_head *dibh;
2362 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2363 unsigned int ndata;
2364 u64 block; /* block, within the file system scope */
2365 int error;
2366
2367 gfs2_set_alloc_start(&rbm, ip, dinode);
2368 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false, NULL);
2369
2370 if (error == -ENOSPC) {
2371 gfs2_set_alloc_start(&rbm, ip, dinode);
2372 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false,
2373 NULL);
2374 }
2375
2376 /* Since all blocks are reserved in advance, this shouldn't happen */
2377 if (error) {
2378 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2379 (unsigned long long)ip->i_no_addr, error, *nblocks,
2380 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2381 rbm.rgd->rd_extfail_pt);
2382 goto rgrp_error;
2383 }
2384
2385 gfs2_alloc_extent(&rbm, dinode, nblocks);
2386 block = gfs2_rbm_to_block(&rbm);
2387 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2388 if (gfs2_rs_active(ip->i_res))
2389 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2390 ndata = *nblocks;
2391 if (dinode)
2392 ndata--;
2393
2394 if (!dinode) {
2395 ip->i_goal = block + ndata - 1;
2396 error = gfs2_meta_inode_buffer(ip, &dibh);
2397 if (error == 0) {
2398 struct gfs2_dinode *di =
2399 (struct gfs2_dinode *)dibh->b_data;
2400 gfs2_trans_add_meta(ip->i_gl, dibh);
2401 di->di_goal_meta = di->di_goal_data =
2402 cpu_to_be64(ip->i_goal);
2403 brelse(dibh);
2404 }
2405 }
2406 if (rbm.rgd->rd_free < *nblocks) {
2407 pr_warn("nblocks=%u\n", *nblocks);
2408 goto rgrp_error;
2409 }
2410
2411 rbm.rgd->rd_free -= *nblocks;
2412 if (dinode) {
2413 rbm.rgd->rd_dinodes++;
2414 *generation = rbm.rgd->rd_igeneration++;
2415 if (*generation == 0)
2416 *generation = rbm.rgd->rd_igeneration++;
2417 }
2418
2419 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2420 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2421 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2422
2423 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2424 if (dinode)
2425 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2426
2427 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2428
2429 rbm.rgd->rd_free_clone -= *nblocks;
2430 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2431 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2432 *bn = block;
2433 return 0;
2434
2435 rgrp_error:
2436 gfs2_rgrp_error(rbm.rgd);
2437 return -EIO;
2438 }
2439
2440 /**
2441 * __gfs2_free_blocks - free a contiguous run of block(s)
2442 * @ip: the inode these blocks are being freed from
2443 * @bstart: first block of a run of contiguous blocks
2444 * @blen: the length of the block run
2445 * @meta: 1 if the blocks represent metadata
2446 *
2447 */
2448
__gfs2_free_blocks(struct gfs2_inode * ip,u64 bstart,u32 blen,int meta)2449 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2450 {
2451 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2452 struct gfs2_rgrpd *rgd;
2453
2454 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2455 if (!rgd)
2456 return;
2457 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2458 rgd->rd_free += blen;
2459 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2460 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2461 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2462 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2463
2464 /* Directories keep their data in the metadata address space */
2465 if (meta || ip->i_depth)
2466 gfs2_meta_wipe(ip, bstart, blen);
2467 }
2468
2469 /**
2470 * gfs2_free_meta - free a contiguous run of data block(s)
2471 * @ip: the inode these blocks are being freed from
2472 * @bstart: first block of a run of contiguous blocks
2473 * @blen: the length of the block run
2474 *
2475 */
2476
gfs2_free_meta(struct gfs2_inode * ip,u64 bstart,u32 blen)2477 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2478 {
2479 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2480
2481 __gfs2_free_blocks(ip, bstart, blen, 1);
2482 gfs2_statfs_change(sdp, 0, +blen, 0);
2483 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2484 }
2485
gfs2_unlink_di(struct inode * inode)2486 void gfs2_unlink_di(struct inode *inode)
2487 {
2488 struct gfs2_inode *ip = GFS2_I(inode);
2489 struct gfs2_sbd *sdp = GFS2_SB(inode);
2490 struct gfs2_rgrpd *rgd;
2491 u64 blkno = ip->i_no_addr;
2492
2493 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2494 if (!rgd)
2495 return;
2496 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2497 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2498 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2499 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2500 update_rgrp_lvb_unlinked(rgd, 1);
2501 }
2502
gfs2_free_uninit_di(struct gfs2_rgrpd * rgd,u64 blkno)2503 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2504 {
2505 struct gfs2_sbd *sdp = rgd->rd_sbd;
2506 struct gfs2_rgrpd *tmp_rgd;
2507
2508 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2509 if (!tmp_rgd)
2510 return;
2511 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2512
2513 if (!rgd->rd_dinodes)
2514 gfs2_consist_rgrpd(rgd);
2515 rgd->rd_dinodes--;
2516 rgd->rd_free++;
2517
2518 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2519 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2520 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2521 update_rgrp_lvb_unlinked(rgd, -1);
2522
2523 gfs2_statfs_change(sdp, 0, +1, -1);
2524 }
2525
2526
gfs2_free_di(struct gfs2_rgrpd * rgd,struct gfs2_inode * ip)2527 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2528 {
2529 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2530 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2531 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2532 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2533 }
2534
2535 /**
2536 * gfs2_check_blk_type - Check the type of a block
2537 * @sdp: The superblock
2538 * @no_addr: The block number to check
2539 * @type: The block type we are looking for
2540 *
2541 * Returns: 0 if the block type matches the expected type
2542 * -ESTALE if it doesn't match
2543 * or -ve errno if something went wrong while checking
2544 */
2545
gfs2_check_blk_type(struct gfs2_sbd * sdp,u64 no_addr,unsigned int type)2546 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2547 {
2548 struct gfs2_rgrpd *rgd;
2549 struct gfs2_holder rgd_gh;
2550 int error = -EINVAL;
2551
2552 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2553 if (!rgd)
2554 goto fail;
2555
2556 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2557 if (error)
2558 goto fail;
2559
2560 if (gfs2_get_block_type(rgd, no_addr) != type)
2561 error = -ESTALE;
2562
2563 gfs2_glock_dq_uninit(&rgd_gh);
2564 fail:
2565 return error;
2566 }
2567
2568 /**
2569 * gfs2_rlist_add - add a RG to a list of RGs
2570 * @ip: the inode
2571 * @rlist: the list of resource groups
2572 * @block: the block
2573 *
2574 * Figure out what RG a block belongs to and add that RG to the list
2575 *
2576 * FIXME: Don't use NOFAIL
2577 *
2578 */
2579
gfs2_rlist_add(struct gfs2_inode * ip,struct gfs2_rgrp_list * rlist,u64 block)2580 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2581 u64 block)
2582 {
2583 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2584 struct gfs2_rgrpd *rgd;
2585 struct gfs2_rgrpd **tmp;
2586 unsigned int new_space;
2587 unsigned int x;
2588
2589 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2590 return;
2591
2592 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2593 rgd = ip->i_rgd;
2594 else
2595 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2596 if (!rgd) {
2597 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2598 return;
2599 }
2600 ip->i_rgd = rgd;
2601
2602 for (x = 0; x < rlist->rl_rgrps; x++)
2603 if (rlist->rl_rgd[x] == rgd)
2604 return;
2605
2606 if (rlist->rl_rgrps == rlist->rl_space) {
2607 new_space = rlist->rl_space + 10;
2608
2609 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2610 GFP_NOFS | __GFP_NOFAIL);
2611
2612 if (rlist->rl_rgd) {
2613 memcpy(tmp, rlist->rl_rgd,
2614 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2615 kfree(rlist->rl_rgd);
2616 }
2617
2618 rlist->rl_space = new_space;
2619 rlist->rl_rgd = tmp;
2620 }
2621
2622 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2623 }
2624
2625 /**
2626 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2627 * and initialize an array of glock holders for them
2628 * @rlist: the list of resource groups
2629 * @state: the lock state to acquire the RG lock in
2630 *
2631 * FIXME: Don't use NOFAIL
2632 *
2633 */
2634
gfs2_rlist_alloc(struct gfs2_rgrp_list * rlist,unsigned int state)2635 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2636 {
2637 unsigned int x;
2638
2639 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
2640 GFP_NOFS | __GFP_NOFAIL);
2641 for (x = 0; x < rlist->rl_rgrps; x++)
2642 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2643 state, 0,
2644 &rlist->rl_ghs[x]);
2645 }
2646
2647 /**
2648 * gfs2_rlist_free - free a resource group list
2649 * @rlist: the list of resource groups
2650 *
2651 */
2652
gfs2_rlist_free(struct gfs2_rgrp_list * rlist)2653 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2654 {
2655 unsigned int x;
2656
2657 kfree(rlist->rl_rgd);
2658
2659 if (rlist->rl_ghs) {
2660 for (x = 0; x < rlist->rl_rgrps; x++)
2661 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2662 kfree(rlist->rl_ghs);
2663 rlist->rl_ghs = NULL;
2664 }
2665 }
2666
2667