1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_dir2.h"
16 #include "xfs_dir2_priv.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
19 #include "xfs_bmap.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
24 #include "xfs_log.h"
25
26 /*
27 * xfs_da_btree.c
28 *
29 * Routines to implement directories as Btrees of hashed names.
30 */
31
32 /*========================================================================
33 * Function prototypes for the kernel.
34 *========================================================================*/
35
36 /*
37 * Routines used for growing the Btree.
38 */
39 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
40 xfs_da_state_blk_t *existing_root,
41 xfs_da_state_blk_t *new_child);
42 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
43 xfs_da_state_blk_t *existing_blk,
44 xfs_da_state_blk_t *split_blk,
45 xfs_da_state_blk_t *blk_to_add,
46 int treelevel,
47 int *result);
48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
49 xfs_da_state_blk_t *node_blk_1,
50 xfs_da_state_blk_t *node_blk_2);
51 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
52 xfs_da_state_blk_t *old_node_blk,
53 xfs_da_state_blk_t *new_node_blk);
54
55 /*
56 * Routines used for shrinking the Btree.
57 */
58 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
59 xfs_da_state_blk_t *root_blk);
60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
62 xfs_da_state_blk_t *drop_blk);
63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
64 xfs_da_state_blk_t *src_node_blk,
65 xfs_da_state_blk_t *dst_node_blk);
66
67 /*
68 * Utility routines.
69 */
70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
71 xfs_da_state_blk_t *drop_blk,
72 xfs_da_state_blk_t *save_blk);
73
74
75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
76
77 /*
78 * Allocate a dir-state structure.
79 * We don't put them on the stack since they're large.
80 */
81 xfs_da_state_t *
xfs_da_state_alloc(void)82 xfs_da_state_alloc(void)
83 {
84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
85 }
86
87 /*
88 * Kill the altpath contents of a da-state structure.
89 */
90 STATIC void
xfs_da_state_kill_altpath(xfs_da_state_t * state)91 xfs_da_state_kill_altpath(xfs_da_state_t *state)
92 {
93 int i;
94
95 for (i = 0; i < state->altpath.active; i++)
96 state->altpath.blk[i].bp = NULL;
97 state->altpath.active = 0;
98 }
99
100 /*
101 * Free a da-state structure.
102 */
103 void
xfs_da_state_free(xfs_da_state_t * state)104 xfs_da_state_free(xfs_da_state_t *state)
105 {
106 xfs_da_state_kill_altpath(state);
107 #ifdef DEBUG
108 memset((char *)state, 0, sizeof(*state));
109 #endif /* DEBUG */
110 kmem_zone_free(xfs_da_state_zone, state);
111 }
112
113 /*
114 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
115 * accessible on v5 filesystems. This header format is common across da node,
116 * attr leaf and dir leaf blocks.
117 */
118 xfs_failaddr_t
xfs_da3_blkinfo_verify(struct xfs_buf * bp,struct xfs_da3_blkinfo * hdr3)119 xfs_da3_blkinfo_verify(
120 struct xfs_buf *bp,
121 struct xfs_da3_blkinfo *hdr3)
122 {
123 struct xfs_mount *mp = bp->b_mount;
124 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
125
126 if (!xfs_verify_magic16(bp, hdr->magic))
127 return __this_address;
128
129 if (xfs_sb_version_hascrc(&mp->m_sb)) {
130 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
131 return __this_address;
132 if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
133 return __this_address;
134 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
135 return __this_address;
136 }
137
138 return NULL;
139 }
140
141 static xfs_failaddr_t
xfs_da3_node_verify(struct xfs_buf * bp)142 xfs_da3_node_verify(
143 struct xfs_buf *bp)
144 {
145 struct xfs_mount *mp = bp->b_mount;
146 struct xfs_da_intnode *hdr = bp->b_addr;
147 struct xfs_da3_icnode_hdr ichdr;
148 const struct xfs_dir_ops *ops;
149 xfs_failaddr_t fa;
150
151 ops = xfs_dir_get_ops(mp, NULL);
152
153 ops->node_hdr_from_disk(&ichdr, hdr);
154
155 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
156 if (fa)
157 return fa;
158
159 if (ichdr.level == 0)
160 return __this_address;
161 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
162 return __this_address;
163 if (ichdr.count == 0)
164 return __this_address;
165
166 /*
167 * we don't know if the node is for and attribute or directory tree,
168 * so only fail if the count is outside both bounds
169 */
170 if (ichdr.count > mp->m_dir_geo->node_ents &&
171 ichdr.count > mp->m_attr_geo->node_ents)
172 return __this_address;
173
174 /* XXX: hash order check? */
175
176 return NULL;
177 }
178
179 static void
xfs_da3_node_write_verify(struct xfs_buf * bp)180 xfs_da3_node_write_verify(
181 struct xfs_buf *bp)
182 {
183 struct xfs_mount *mp = bp->b_mount;
184 struct xfs_buf_log_item *bip = bp->b_log_item;
185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
186 xfs_failaddr_t fa;
187
188 fa = xfs_da3_node_verify(bp);
189 if (fa) {
190 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
191 return;
192 }
193
194 if (!xfs_sb_version_hascrc(&mp->m_sb))
195 return;
196
197 if (bip)
198 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
199
200 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
201 }
202
203 /*
204 * leaf/node format detection on trees is sketchy, so a node read can be done on
205 * leaf level blocks when detection identifies the tree as a node format tree
206 * incorrectly. In this case, we need to swap the verifier to match the correct
207 * format of the block being read.
208 */
209 static void
xfs_da3_node_read_verify(struct xfs_buf * bp)210 xfs_da3_node_read_verify(
211 struct xfs_buf *bp)
212 {
213 struct xfs_da_blkinfo *info = bp->b_addr;
214 xfs_failaddr_t fa;
215
216 switch (be16_to_cpu(info->magic)) {
217 case XFS_DA3_NODE_MAGIC:
218 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
219 xfs_verifier_error(bp, -EFSBADCRC,
220 __this_address);
221 break;
222 }
223 /* fall through */
224 case XFS_DA_NODE_MAGIC:
225 fa = xfs_da3_node_verify(bp);
226 if (fa)
227 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
228 return;
229 case XFS_ATTR_LEAF_MAGIC:
230 case XFS_ATTR3_LEAF_MAGIC:
231 bp->b_ops = &xfs_attr3_leaf_buf_ops;
232 bp->b_ops->verify_read(bp);
233 return;
234 case XFS_DIR2_LEAFN_MAGIC:
235 case XFS_DIR3_LEAFN_MAGIC:
236 bp->b_ops = &xfs_dir3_leafn_buf_ops;
237 bp->b_ops->verify_read(bp);
238 return;
239 default:
240 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
241 break;
242 }
243 }
244
245 /* Verify the structure of a da3 block. */
246 static xfs_failaddr_t
xfs_da3_node_verify_struct(struct xfs_buf * bp)247 xfs_da3_node_verify_struct(
248 struct xfs_buf *bp)
249 {
250 struct xfs_da_blkinfo *info = bp->b_addr;
251
252 switch (be16_to_cpu(info->magic)) {
253 case XFS_DA3_NODE_MAGIC:
254 case XFS_DA_NODE_MAGIC:
255 return xfs_da3_node_verify(bp);
256 case XFS_ATTR_LEAF_MAGIC:
257 case XFS_ATTR3_LEAF_MAGIC:
258 bp->b_ops = &xfs_attr3_leaf_buf_ops;
259 return bp->b_ops->verify_struct(bp);
260 case XFS_DIR2_LEAFN_MAGIC:
261 case XFS_DIR3_LEAFN_MAGIC:
262 bp->b_ops = &xfs_dir3_leafn_buf_ops;
263 return bp->b_ops->verify_struct(bp);
264 default:
265 return __this_address;
266 }
267 }
268
269 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
270 .name = "xfs_da3_node",
271 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
272 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
273 .verify_read = xfs_da3_node_read_verify,
274 .verify_write = xfs_da3_node_write_verify,
275 .verify_struct = xfs_da3_node_verify_struct,
276 };
277
278 int
xfs_da3_node_read(struct xfs_trans * tp,struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,struct xfs_buf ** bpp,int which_fork)279 xfs_da3_node_read(
280 struct xfs_trans *tp,
281 struct xfs_inode *dp,
282 xfs_dablk_t bno,
283 xfs_daddr_t mappedbno,
284 struct xfs_buf **bpp,
285 int which_fork)
286 {
287 int err;
288
289 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
290 which_fork, &xfs_da3_node_buf_ops);
291 if (!err && tp && *bpp) {
292 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
293 int type;
294
295 switch (be16_to_cpu(info->magic)) {
296 case XFS_DA_NODE_MAGIC:
297 case XFS_DA3_NODE_MAGIC:
298 type = XFS_BLFT_DA_NODE_BUF;
299 break;
300 case XFS_ATTR_LEAF_MAGIC:
301 case XFS_ATTR3_LEAF_MAGIC:
302 type = XFS_BLFT_ATTR_LEAF_BUF;
303 break;
304 case XFS_DIR2_LEAFN_MAGIC:
305 case XFS_DIR3_LEAFN_MAGIC:
306 type = XFS_BLFT_DIR_LEAFN_BUF;
307 break;
308 default:
309 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
310 tp->t_mountp, info, sizeof(*info));
311 xfs_trans_brelse(tp, *bpp);
312 *bpp = NULL;
313 return -EFSCORRUPTED;
314 }
315 xfs_trans_buf_set_type(tp, *bpp, type);
316 }
317 return err;
318 }
319
320 /*========================================================================
321 * Routines used for growing the Btree.
322 *========================================================================*/
323
324 /*
325 * Create the initial contents of an intermediate node.
326 */
327 int
xfs_da3_node_create(struct xfs_da_args * args,xfs_dablk_t blkno,int level,struct xfs_buf ** bpp,int whichfork)328 xfs_da3_node_create(
329 struct xfs_da_args *args,
330 xfs_dablk_t blkno,
331 int level,
332 struct xfs_buf **bpp,
333 int whichfork)
334 {
335 struct xfs_da_intnode *node;
336 struct xfs_trans *tp = args->trans;
337 struct xfs_mount *mp = tp->t_mountp;
338 struct xfs_da3_icnode_hdr ichdr = {0};
339 struct xfs_buf *bp;
340 int error;
341 struct xfs_inode *dp = args->dp;
342
343 trace_xfs_da_node_create(args);
344 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
345
346 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
347 if (error)
348 return error;
349 bp->b_ops = &xfs_da3_node_buf_ops;
350 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
351 node = bp->b_addr;
352
353 if (xfs_sb_version_hascrc(&mp->m_sb)) {
354 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
355
356 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
357 ichdr.magic = XFS_DA3_NODE_MAGIC;
358 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
359 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
360 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
361 } else {
362 ichdr.magic = XFS_DA_NODE_MAGIC;
363 }
364 ichdr.level = level;
365
366 dp->d_ops->node_hdr_to_disk(node, &ichdr);
367 xfs_trans_log_buf(tp, bp,
368 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
369
370 *bpp = bp;
371 return 0;
372 }
373
374 /*
375 * Split a leaf node, rebalance, then possibly split
376 * intermediate nodes, rebalance, etc.
377 */
378 int /* error */
xfs_da3_split(struct xfs_da_state * state)379 xfs_da3_split(
380 struct xfs_da_state *state)
381 {
382 struct xfs_da_state_blk *oldblk;
383 struct xfs_da_state_blk *newblk;
384 struct xfs_da_state_blk *addblk;
385 struct xfs_da_intnode *node;
386 int max;
387 int action = 0;
388 int error;
389 int i;
390
391 trace_xfs_da_split(state->args);
392
393 /*
394 * Walk back up the tree splitting/inserting/adjusting as necessary.
395 * If we need to insert and there isn't room, split the node, then
396 * decide which fragment to insert the new block from below into.
397 * Note that we may split the root this way, but we need more fixup.
398 */
399 max = state->path.active - 1;
400 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
401 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
402 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
403
404 addblk = &state->path.blk[max]; /* initial dummy value */
405 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
406 oldblk = &state->path.blk[i];
407 newblk = &state->altpath.blk[i];
408
409 /*
410 * If a leaf node then
411 * Allocate a new leaf node, then rebalance across them.
412 * else if an intermediate node then
413 * We split on the last layer, must we split the node?
414 */
415 switch (oldblk->magic) {
416 case XFS_ATTR_LEAF_MAGIC:
417 error = xfs_attr3_leaf_split(state, oldblk, newblk);
418 if ((error != 0) && (error != -ENOSPC)) {
419 return error; /* GROT: attr is inconsistent */
420 }
421 if (!error) {
422 addblk = newblk;
423 break;
424 }
425 /*
426 * Entry wouldn't fit, split the leaf again. The new
427 * extrablk will be consumed by xfs_da3_node_split if
428 * the node is split.
429 */
430 state->extravalid = 1;
431 if (state->inleaf) {
432 state->extraafter = 0; /* before newblk */
433 trace_xfs_attr_leaf_split_before(state->args);
434 error = xfs_attr3_leaf_split(state, oldblk,
435 &state->extrablk);
436 } else {
437 state->extraafter = 1; /* after newblk */
438 trace_xfs_attr_leaf_split_after(state->args);
439 error = xfs_attr3_leaf_split(state, newblk,
440 &state->extrablk);
441 }
442 if (error)
443 return error; /* GROT: attr inconsistent */
444 addblk = newblk;
445 break;
446 case XFS_DIR2_LEAFN_MAGIC:
447 error = xfs_dir2_leafn_split(state, oldblk, newblk);
448 if (error)
449 return error;
450 addblk = newblk;
451 break;
452 case XFS_DA_NODE_MAGIC:
453 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
454 max - i, &action);
455 addblk->bp = NULL;
456 if (error)
457 return error; /* GROT: dir is inconsistent */
458 /*
459 * Record the newly split block for the next time thru?
460 */
461 if (action)
462 addblk = newblk;
463 else
464 addblk = NULL;
465 break;
466 }
467
468 /*
469 * Update the btree to show the new hashval for this child.
470 */
471 xfs_da3_fixhashpath(state, &state->path);
472 }
473 if (!addblk)
474 return 0;
475
476 /*
477 * xfs_da3_node_split() should have consumed any extra blocks we added
478 * during a double leaf split in the attr fork. This is guaranteed as
479 * we can't be here if the attr fork only has a single leaf block.
480 */
481 ASSERT(state->extravalid == 0 ||
482 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
483
484 /*
485 * Split the root node.
486 */
487 ASSERT(state->path.active == 0);
488 oldblk = &state->path.blk[0];
489 error = xfs_da3_root_split(state, oldblk, addblk);
490 if (error)
491 goto out;
492
493 /*
494 * Update pointers to the node which used to be block 0 and just got
495 * bumped because of the addition of a new root node. Note that the
496 * original block 0 could be at any position in the list of blocks in
497 * the tree.
498 *
499 * Note: the magic numbers and sibling pointers are in the same physical
500 * place for both v2 and v3 headers (by design). Hence it doesn't matter
501 * which version of the xfs_da_intnode structure we use here as the
502 * result will be the same using either structure.
503 */
504 node = oldblk->bp->b_addr;
505 if (node->hdr.info.forw) {
506 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
507 xfs_buf_mark_corrupt(oldblk->bp);
508 error = -EFSCORRUPTED;
509 goto out;
510 }
511 node = addblk->bp->b_addr;
512 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
513 xfs_trans_log_buf(state->args->trans, addblk->bp,
514 XFS_DA_LOGRANGE(node, &node->hdr.info,
515 sizeof(node->hdr.info)));
516 }
517 node = oldblk->bp->b_addr;
518 if (node->hdr.info.back) {
519 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
520 xfs_buf_mark_corrupt(oldblk->bp);
521 error = -EFSCORRUPTED;
522 goto out;
523 }
524 node = addblk->bp->b_addr;
525 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
526 xfs_trans_log_buf(state->args->trans, addblk->bp,
527 XFS_DA_LOGRANGE(node, &node->hdr.info,
528 sizeof(node->hdr.info)));
529 }
530 out:
531 addblk->bp = NULL;
532 return error;
533 }
534
535 /*
536 * Split the root. We have to create a new root and point to the two
537 * parts (the split old root) that we just created. Copy block zero to
538 * the EOF, extending the inode in process.
539 */
540 STATIC int /* error */
xfs_da3_root_split(struct xfs_da_state * state,struct xfs_da_state_blk * blk1,struct xfs_da_state_blk * blk2)541 xfs_da3_root_split(
542 struct xfs_da_state *state,
543 struct xfs_da_state_blk *blk1,
544 struct xfs_da_state_blk *blk2)
545 {
546 struct xfs_da_intnode *node;
547 struct xfs_da_intnode *oldroot;
548 struct xfs_da_node_entry *btree;
549 struct xfs_da3_icnode_hdr nodehdr;
550 struct xfs_da_args *args;
551 struct xfs_buf *bp;
552 struct xfs_inode *dp;
553 struct xfs_trans *tp;
554 struct xfs_dir2_leaf *leaf;
555 xfs_dablk_t blkno;
556 int level;
557 int error;
558 int size;
559
560 trace_xfs_da_root_split(state->args);
561
562 /*
563 * Copy the existing (incorrect) block from the root node position
564 * to a free space somewhere.
565 */
566 args = state->args;
567 error = xfs_da_grow_inode(args, &blkno);
568 if (error)
569 return error;
570
571 dp = args->dp;
572 tp = args->trans;
573 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
574 if (error)
575 return error;
576 node = bp->b_addr;
577 oldroot = blk1->bp->b_addr;
578 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
579 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
580 struct xfs_da3_icnode_hdr icnodehdr;
581
582 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
583 btree = dp->d_ops->node_tree_p(oldroot);
584 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
585 level = icnodehdr.level;
586
587 /*
588 * we are about to copy oldroot to bp, so set up the type
589 * of bp while we know exactly what it will be.
590 */
591 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
592 } else {
593 struct xfs_dir3_icleaf_hdr leafhdr;
594 struct xfs_dir2_leaf_entry *ents;
595
596 leaf = (xfs_dir2_leaf_t *)oldroot;
597 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
598 ents = dp->d_ops->leaf_ents_p(leaf);
599
600 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
601 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
602 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
603 level = 0;
604
605 /*
606 * we are about to copy oldroot to bp, so set up the type
607 * of bp while we know exactly what it will be.
608 */
609 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
610 }
611
612 /*
613 * we can copy most of the information in the node from one block to
614 * another, but for CRC enabled headers we have to make sure that the
615 * block specific identifiers are kept intact. We update the buffer
616 * directly for this.
617 */
618 memcpy(node, oldroot, size);
619 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
620 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
621 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
622
623 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
624 }
625 xfs_trans_log_buf(tp, bp, 0, size - 1);
626
627 bp->b_ops = blk1->bp->b_ops;
628 xfs_trans_buf_copy_type(bp, blk1->bp);
629 blk1->bp = bp;
630 blk1->blkno = blkno;
631
632 /*
633 * Set up the new root node.
634 */
635 error = xfs_da3_node_create(args,
636 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
637 level + 1, &bp, args->whichfork);
638 if (error)
639 return error;
640
641 node = bp->b_addr;
642 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
643 btree = dp->d_ops->node_tree_p(node);
644 btree[0].hashval = cpu_to_be32(blk1->hashval);
645 btree[0].before = cpu_to_be32(blk1->blkno);
646 btree[1].hashval = cpu_to_be32(blk2->hashval);
647 btree[1].before = cpu_to_be32(blk2->blkno);
648 nodehdr.count = 2;
649 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
650
651 #ifdef DEBUG
652 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
653 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
654 ASSERT(blk1->blkno >= args->geo->leafblk &&
655 blk1->blkno < args->geo->freeblk);
656 ASSERT(blk2->blkno >= args->geo->leafblk &&
657 blk2->blkno < args->geo->freeblk);
658 }
659 #endif
660
661 /* Header is already logged by xfs_da_node_create */
662 xfs_trans_log_buf(tp, bp,
663 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
664
665 return 0;
666 }
667
668 /*
669 * Split the node, rebalance, then add the new entry.
670 */
671 STATIC int /* error */
xfs_da3_node_split(struct xfs_da_state * state,struct xfs_da_state_blk * oldblk,struct xfs_da_state_blk * newblk,struct xfs_da_state_blk * addblk,int treelevel,int * result)672 xfs_da3_node_split(
673 struct xfs_da_state *state,
674 struct xfs_da_state_blk *oldblk,
675 struct xfs_da_state_blk *newblk,
676 struct xfs_da_state_blk *addblk,
677 int treelevel,
678 int *result)
679 {
680 struct xfs_da_intnode *node;
681 struct xfs_da3_icnode_hdr nodehdr;
682 xfs_dablk_t blkno;
683 int newcount;
684 int error;
685 int useextra;
686 struct xfs_inode *dp = state->args->dp;
687
688 trace_xfs_da_node_split(state->args);
689
690 node = oldblk->bp->b_addr;
691 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
692
693 /*
694 * With V2 dirs the extra block is data or freespace.
695 */
696 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
697 newcount = 1 + useextra;
698 /*
699 * Do we have to split the node?
700 */
701 if (nodehdr.count + newcount > state->args->geo->node_ents) {
702 /*
703 * Allocate a new node, add to the doubly linked chain of
704 * nodes, then move some of our excess entries into it.
705 */
706 error = xfs_da_grow_inode(state->args, &blkno);
707 if (error)
708 return error; /* GROT: dir is inconsistent */
709
710 error = xfs_da3_node_create(state->args, blkno, treelevel,
711 &newblk->bp, state->args->whichfork);
712 if (error)
713 return error; /* GROT: dir is inconsistent */
714 newblk->blkno = blkno;
715 newblk->magic = XFS_DA_NODE_MAGIC;
716 xfs_da3_node_rebalance(state, oldblk, newblk);
717 error = xfs_da3_blk_link(state, oldblk, newblk);
718 if (error)
719 return error;
720 *result = 1;
721 } else {
722 *result = 0;
723 }
724
725 /*
726 * Insert the new entry(s) into the correct block
727 * (updating last hashval in the process).
728 *
729 * xfs_da3_node_add() inserts BEFORE the given index,
730 * and as a result of using node_lookup_int() we always
731 * point to a valid entry (not after one), but a split
732 * operation always results in a new block whose hashvals
733 * FOLLOW the current block.
734 *
735 * If we had double-split op below us, then add the extra block too.
736 */
737 node = oldblk->bp->b_addr;
738 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
739 if (oldblk->index <= nodehdr.count) {
740 oldblk->index++;
741 xfs_da3_node_add(state, oldblk, addblk);
742 if (useextra) {
743 if (state->extraafter)
744 oldblk->index++;
745 xfs_da3_node_add(state, oldblk, &state->extrablk);
746 state->extravalid = 0;
747 }
748 } else {
749 newblk->index++;
750 xfs_da3_node_add(state, newblk, addblk);
751 if (useextra) {
752 if (state->extraafter)
753 newblk->index++;
754 xfs_da3_node_add(state, newblk, &state->extrablk);
755 state->extravalid = 0;
756 }
757 }
758
759 return 0;
760 }
761
762 /*
763 * Balance the btree elements between two intermediate nodes,
764 * usually one full and one empty.
765 *
766 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
767 */
768 STATIC void
xfs_da3_node_rebalance(struct xfs_da_state * state,struct xfs_da_state_blk * blk1,struct xfs_da_state_blk * blk2)769 xfs_da3_node_rebalance(
770 struct xfs_da_state *state,
771 struct xfs_da_state_blk *blk1,
772 struct xfs_da_state_blk *blk2)
773 {
774 struct xfs_da_intnode *node1;
775 struct xfs_da_intnode *node2;
776 struct xfs_da_intnode *tmpnode;
777 struct xfs_da_node_entry *btree1;
778 struct xfs_da_node_entry *btree2;
779 struct xfs_da_node_entry *btree_s;
780 struct xfs_da_node_entry *btree_d;
781 struct xfs_da3_icnode_hdr nodehdr1;
782 struct xfs_da3_icnode_hdr nodehdr2;
783 struct xfs_trans *tp;
784 int count;
785 int tmp;
786 int swap = 0;
787 struct xfs_inode *dp = state->args->dp;
788
789 trace_xfs_da_node_rebalance(state->args);
790
791 node1 = blk1->bp->b_addr;
792 node2 = blk2->bp->b_addr;
793 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
794 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
795 btree1 = dp->d_ops->node_tree_p(node1);
796 btree2 = dp->d_ops->node_tree_p(node2);
797
798 /*
799 * Figure out how many entries need to move, and in which direction.
800 * Swap the nodes around if that makes it simpler.
801 */
802 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
803 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
804 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
805 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
806 tmpnode = node1;
807 node1 = node2;
808 node2 = tmpnode;
809 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
810 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
811 btree1 = dp->d_ops->node_tree_p(node1);
812 btree2 = dp->d_ops->node_tree_p(node2);
813 swap = 1;
814 }
815
816 count = (nodehdr1.count - nodehdr2.count) / 2;
817 if (count == 0)
818 return;
819 tp = state->args->trans;
820 /*
821 * Two cases: high-to-low and low-to-high.
822 */
823 if (count > 0) {
824 /*
825 * Move elements in node2 up to make a hole.
826 */
827 tmp = nodehdr2.count;
828 if (tmp > 0) {
829 tmp *= (uint)sizeof(xfs_da_node_entry_t);
830 btree_s = &btree2[0];
831 btree_d = &btree2[count];
832 memmove(btree_d, btree_s, tmp);
833 }
834
835 /*
836 * Move the req'd B-tree elements from high in node1 to
837 * low in node2.
838 */
839 nodehdr2.count += count;
840 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
841 btree_s = &btree1[nodehdr1.count - count];
842 btree_d = &btree2[0];
843 memcpy(btree_d, btree_s, tmp);
844 nodehdr1.count -= count;
845 } else {
846 /*
847 * Move the req'd B-tree elements from low in node2 to
848 * high in node1.
849 */
850 count = -count;
851 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
852 btree_s = &btree2[0];
853 btree_d = &btree1[nodehdr1.count];
854 memcpy(btree_d, btree_s, tmp);
855 nodehdr1.count += count;
856
857 xfs_trans_log_buf(tp, blk1->bp,
858 XFS_DA_LOGRANGE(node1, btree_d, tmp));
859
860 /*
861 * Move elements in node2 down to fill the hole.
862 */
863 tmp = nodehdr2.count - count;
864 tmp *= (uint)sizeof(xfs_da_node_entry_t);
865 btree_s = &btree2[count];
866 btree_d = &btree2[0];
867 memmove(btree_d, btree_s, tmp);
868 nodehdr2.count -= count;
869 }
870
871 /*
872 * Log header of node 1 and all current bits of node 2.
873 */
874 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
875 xfs_trans_log_buf(tp, blk1->bp,
876 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
877
878 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
879 xfs_trans_log_buf(tp, blk2->bp,
880 XFS_DA_LOGRANGE(node2, &node2->hdr,
881 dp->d_ops->node_hdr_size +
882 (sizeof(btree2[0]) * nodehdr2.count)));
883
884 /*
885 * Record the last hashval from each block for upward propagation.
886 * (note: don't use the swapped node pointers)
887 */
888 if (swap) {
889 node1 = blk1->bp->b_addr;
890 node2 = blk2->bp->b_addr;
891 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
892 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
893 btree1 = dp->d_ops->node_tree_p(node1);
894 btree2 = dp->d_ops->node_tree_p(node2);
895 }
896 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
897 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
898
899 /*
900 * Adjust the expected index for insertion.
901 */
902 if (blk1->index >= nodehdr1.count) {
903 blk2->index = blk1->index - nodehdr1.count;
904 blk1->index = nodehdr1.count + 1; /* make it invalid */
905 }
906 }
907
908 /*
909 * Add a new entry to an intermediate node.
910 */
911 STATIC void
xfs_da3_node_add(struct xfs_da_state * state,struct xfs_da_state_blk * oldblk,struct xfs_da_state_blk * newblk)912 xfs_da3_node_add(
913 struct xfs_da_state *state,
914 struct xfs_da_state_blk *oldblk,
915 struct xfs_da_state_blk *newblk)
916 {
917 struct xfs_da_intnode *node;
918 struct xfs_da3_icnode_hdr nodehdr;
919 struct xfs_da_node_entry *btree;
920 int tmp;
921 struct xfs_inode *dp = state->args->dp;
922
923 trace_xfs_da_node_add(state->args);
924
925 node = oldblk->bp->b_addr;
926 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
927 btree = dp->d_ops->node_tree_p(node);
928
929 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
930 ASSERT(newblk->blkno != 0);
931 if (state->args->whichfork == XFS_DATA_FORK)
932 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
933 newblk->blkno < state->args->geo->freeblk);
934
935 /*
936 * We may need to make some room before we insert the new node.
937 */
938 tmp = 0;
939 if (oldblk->index < nodehdr.count) {
940 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
941 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
942 }
943 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
944 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
945 xfs_trans_log_buf(state->args->trans, oldblk->bp,
946 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
947 tmp + sizeof(*btree)));
948
949 nodehdr.count += 1;
950 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
951 xfs_trans_log_buf(state->args->trans, oldblk->bp,
952 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
953
954 /*
955 * Copy the last hash value from the oldblk to propagate upwards.
956 */
957 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
958 }
959
960 /*========================================================================
961 * Routines used for shrinking the Btree.
962 *========================================================================*/
963
964 /*
965 * Deallocate an empty leaf node, remove it from its parent,
966 * possibly deallocating that block, etc...
967 */
968 int
xfs_da3_join(struct xfs_da_state * state)969 xfs_da3_join(
970 struct xfs_da_state *state)
971 {
972 struct xfs_da_state_blk *drop_blk;
973 struct xfs_da_state_blk *save_blk;
974 int action = 0;
975 int error;
976
977 trace_xfs_da_join(state->args);
978
979 drop_blk = &state->path.blk[ state->path.active-1 ];
980 save_blk = &state->altpath.blk[ state->path.active-1 ];
981 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
982 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
983 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
984
985 /*
986 * Walk back up the tree joining/deallocating as necessary.
987 * When we stop dropping blocks, break out.
988 */
989 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
990 state->path.active--) {
991 /*
992 * See if we can combine the block with a neighbor.
993 * (action == 0) => no options, just leave
994 * (action == 1) => coalesce, then unlink
995 * (action == 2) => block empty, unlink it
996 */
997 switch (drop_blk->magic) {
998 case XFS_ATTR_LEAF_MAGIC:
999 error = xfs_attr3_leaf_toosmall(state, &action);
1000 if (error)
1001 return error;
1002 if (action == 0)
1003 return 0;
1004 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1005 break;
1006 case XFS_DIR2_LEAFN_MAGIC:
1007 error = xfs_dir2_leafn_toosmall(state, &action);
1008 if (error)
1009 return error;
1010 if (action == 0)
1011 return 0;
1012 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1013 break;
1014 case XFS_DA_NODE_MAGIC:
1015 /*
1016 * Remove the offending node, fixup hashvals,
1017 * check for a toosmall neighbor.
1018 */
1019 xfs_da3_node_remove(state, drop_blk);
1020 xfs_da3_fixhashpath(state, &state->path);
1021 error = xfs_da3_node_toosmall(state, &action);
1022 if (error)
1023 return error;
1024 if (action == 0)
1025 return 0;
1026 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1027 break;
1028 }
1029 xfs_da3_fixhashpath(state, &state->altpath);
1030 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1031 xfs_da_state_kill_altpath(state);
1032 if (error)
1033 return error;
1034 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1035 drop_blk->bp);
1036 drop_blk->bp = NULL;
1037 if (error)
1038 return error;
1039 }
1040 /*
1041 * We joined all the way to the top. If it turns out that
1042 * we only have one entry in the root, make the child block
1043 * the new root.
1044 */
1045 xfs_da3_node_remove(state, drop_blk);
1046 xfs_da3_fixhashpath(state, &state->path);
1047 error = xfs_da3_root_join(state, &state->path.blk[0]);
1048 return error;
1049 }
1050
1051 #ifdef DEBUG
1052 static void
xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo * blkinfo,__u16 level)1053 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1054 {
1055 __be16 magic = blkinfo->magic;
1056
1057 if (level == 1) {
1058 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1059 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1060 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1061 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1062 } else {
1063 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1064 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1065 }
1066 ASSERT(!blkinfo->forw);
1067 ASSERT(!blkinfo->back);
1068 }
1069 #else /* !DEBUG */
1070 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1071 #endif /* !DEBUG */
1072
1073 /*
1074 * We have only one entry in the root. Copy the only remaining child of
1075 * the old root to block 0 as the new root node.
1076 */
1077 STATIC int
xfs_da3_root_join(struct xfs_da_state * state,struct xfs_da_state_blk * root_blk)1078 xfs_da3_root_join(
1079 struct xfs_da_state *state,
1080 struct xfs_da_state_blk *root_blk)
1081 {
1082 struct xfs_da_intnode *oldroot;
1083 struct xfs_da_args *args;
1084 xfs_dablk_t child;
1085 struct xfs_buf *bp;
1086 struct xfs_da3_icnode_hdr oldroothdr;
1087 struct xfs_da_node_entry *btree;
1088 int error;
1089 struct xfs_inode *dp = state->args->dp;
1090
1091 trace_xfs_da_root_join(state->args);
1092
1093 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1094
1095 args = state->args;
1096 oldroot = root_blk->bp->b_addr;
1097 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1098 ASSERT(oldroothdr.forw == 0);
1099 ASSERT(oldroothdr.back == 0);
1100
1101 /*
1102 * If the root has more than one child, then don't do anything.
1103 */
1104 if (oldroothdr.count > 1)
1105 return 0;
1106
1107 /*
1108 * Read in the (only) child block, then copy those bytes into
1109 * the root block's buffer and free the original child block.
1110 */
1111 btree = dp->d_ops->node_tree_p(oldroot);
1112 child = be32_to_cpu(btree[0].before);
1113 ASSERT(child != 0);
1114 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1115 args->whichfork);
1116 if (error)
1117 return error;
1118 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1119
1120 /*
1121 * This could be copying a leaf back into the root block in the case of
1122 * there only being a single leaf block left in the tree. Hence we have
1123 * to update the b_ops pointer as well to match the buffer type change
1124 * that could occur. For dir3 blocks we also need to update the block
1125 * number in the buffer header.
1126 */
1127 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1128 root_blk->bp->b_ops = bp->b_ops;
1129 xfs_trans_buf_copy_type(root_blk->bp, bp);
1130 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1131 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1132 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1133 }
1134 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1135 args->geo->blksize - 1);
1136 error = xfs_da_shrink_inode(args, child, bp);
1137 return error;
1138 }
1139
1140 /*
1141 * Check a node block and its neighbors to see if the block should be
1142 * collapsed into one or the other neighbor. Always keep the block
1143 * with the smaller block number.
1144 * If the current block is over 50% full, don't try to join it, return 0.
1145 * If the block is empty, fill in the state structure and return 2.
1146 * If it can be collapsed, fill in the state structure and return 1.
1147 * If nothing can be done, return 0.
1148 */
1149 STATIC int
xfs_da3_node_toosmall(struct xfs_da_state * state,int * action)1150 xfs_da3_node_toosmall(
1151 struct xfs_da_state *state,
1152 int *action)
1153 {
1154 struct xfs_da_intnode *node;
1155 struct xfs_da_state_blk *blk;
1156 struct xfs_da_blkinfo *info;
1157 xfs_dablk_t blkno;
1158 struct xfs_buf *bp;
1159 struct xfs_da3_icnode_hdr nodehdr;
1160 int count;
1161 int forward;
1162 int error;
1163 int retval;
1164 int i;
1165 struct xfs_inode *dp = state->args->dp;
1166
1167 trace_xfs_da_node_toosmall(state->args);
1168
1169 /*
1170 * Check for the degenerate case of the block being over 50% full.
1171 * If so, it's not worth even looking to see if we might be able
1172 * to coalesce with a sibling.
1173 */
1174 blk = &state->path.blk[ state->path.active-1 ];
1175 info = blk->bp->b_addr;
1176 node = (xfs_da_intnode_t *)info;
1177 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1178 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1179 *action = 0; /* blk over 50%, don't try to join */
1180 return 0; /* blk over 50%, don't try to join */
1181 }
1182
1183 /*
1184 * Check for the degenerate case of the block being empty.
1185 * If the block is empty, we'll simply delete it, no need to
1186 * coalesce it with a sibling block. We choose (arbitrarily)
1187 * to merge with the forward block unless it is NULL.
1188 */
1189 if (nodehdr.count == 0) {
1190 /*
1191 * Make altpath point to the block we want to keep and
1192 * path point to the block we want to drop (this one).
1193 */
1194 forward = (info->forw != 0);
1195 memcpy(&state->altpath, &state->path, sizeof(state->path));
1196 error = xfs_da3_path_shift(state, &state->altpath, forward,
1197 0, &retval);
1198 if (error)
1199 return error;
1200 if (retval) {
1201 *action = 0;
1202 } else {
1203 *action = 2;
1204 }
1205 return 0;
1206 }
1207
1208 /*
1209 * Examine each sibling block to see if we can coalesce with
1210 * at least 25% free space to spare. We need to figure out
1211 * whether to merge with the forward or the backward block.
1212 * We prefer coalescing with the lower numbered sibling so as
1213 * to shrink a directory over time.
1214 */
1215 count = state->args->geo->node_ents;
1216 count -= state->args->geo->node_ents >> 2;
1217 count -= nodehdr.count;
1218
1219 /* start with smaller blk num */
1220 forward = nodehdr.forw < nodehdr.back;
1221 for (i = 0; i < 2; forward = !forward, i++) {
1222 struct xfs_da3_icnode_hdr thdr;
1223 if (forward)
1224 blkno = nodehdr.forw;
1225 else
1226 blkno = nodehdr.back;
1227 if (blkno == 0)
1228 continue;
1229 error = xfs_da3_node_read(state->args->trans, dp,
1230 blkno, -1, &bp, state->args->whichfork);
1231 if (error)
1232 return error;
1233
1234 node = bp->b_addr;
1235 dp->d_ops->node_hdr_from_disk(&thdr, node);
1236 xfs_trans_brelse(state->args->trans, bp);
1237
1238 if (count - thdr.count >= 0)
1239 break; /* fits with at least 25% to spare */
1240 }
1241 if (i >= 2) {
1242 *action = 0;
1243 return 0;
1244 }
1245
1246 /*
1247 * Make altpath point to the block we want to keep (the lower
1248 * numbered block) and path point to the block we want to drop.
1249 */
1250 memcpy(&state->altpath, &state->path, sizeof(state->path));
1251 if (blkno < blk->blkno) {
1252 error = xfs_da3_path_shift(state, &state->altpath, forward,
1253 0, &retval);
1254 } else {
1255 error = xfs_da3_path_shift(state, &state->path, forward,
1256 0, &retval);
1257 }
1258 if (error)
1259 return error;
1260 if (retval) {
1261 *action = 0;
1262 return 0;
1263 }
1264 *action = 1;
1265 return 0;
1266 }
1267
1268 /*
1269 * Pick up the last hashvalue from an intermediate node.
1270 */
1271 STATIC uint
xfs_da3_node_lasthash(struct xfs_inode * dp,struct xfs_buf * bp,int * count)1272 xfs_da3_node_lasthash(
1273 struct xfs_inode *dp,
1274 struct xfs_buf *bp,
1275 int *count)
1276 {
1277 struct xfs_da_intnode *node;
1278 struct xfs_da_node_entry *btree;
1279 struct xfs_da3_icnode_hdr nodehdr;
1280
1281 node = bp->b_addr;
1282 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1283 if (count)
1284 *count = nodehdr.count;
1285 if (!nodehdr.count)
1286 return 0;
1287 btree = dp->d_ops->node_tree_p(node);
1288 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1289 }
1290
1291 /*
1292 * Walk back up the tree adjusting hash values as necessary,
1293 * when we stop making changes, return.
1294 */
1295 void
xfs_da3_fixhashpath(struct xfs_da_state * state,struct xfs_da_state_path * path)1296 xfs_da3_fixhashpath(
1297 struct xfs_da_state *state,
1298 struct xfs_da_state_path *path)
1299 {
1300 struct xfs_da_state_blk *blk;
1301 struct xfs_da_intnode *node;
1302 struct xfs_da_node_entry *btree;
1303 xfs_dahash_t lasthash=0;
1304 int level;
1305 int count;
1306 struct xfs_inode *dp = state->args->dp;
1307
1308 trace_xfs_da_fixhashpath(state->args);
1309
1310 level = path->active-1;
1311 blk = &path->blk[ level ];
1312 switch (blk->magic) {
1313 case XFS_ATTR_LEAF_MAGIC:
1314 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1315 if (count == 0)
1316 return;
1317 break;
1318 case XFS_DIR2_LEAFN_MAGIC:
1319 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1320 if (count == 0)
1321 return;
1322 break;
1323 case XFS_DA_NODE_MAGIC:
1324 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1325 if (count == 0)
1326 return;
1327 break;
1328 }
1329 for (blk--, level--; level >= 0; blk--, level--) {
1330 struct xfs_da3_icnode_hdr nodehdr;
1331
1332 node = blk->bp->b_addr;
1333 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1334 btree = dp->d_ops->node_tree_p(node);
1335 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1336 break;
1337 blk->hashval = lasthash;
1338 btree[blk->index].hashval = cpu_to_be32(lasthash);
1339 xfs_trans_log_buf(state->args->trans, blk->bp,
1340 XFS_DA_LOGRANGE(node, &btree[blk->index],
1341 sizeof(*btree)));
1342
1343 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1344 }
1345 }
1346
1347 /*
1348 * Remove an entry from an intermediate node.
1349 */
1350 STATIC void
xfs_da3_node_remove(struct xfs_da_state * state,struct xfs_da_state_blk * drop_blk)1351 xfs_da3_node_remove(
1352 struct xfs_da_state *state,
1353 struct xfs_da_state_blk *drop_blk)
1354 {
1355 struct xfs_da_intnode *node;
1356 struct xfs_da3_icnode_hdr nodehdr;
1357 struct xfs_da_node_entry *btree;
1358 int index;
1359 int tmp;
1360 struct xfs_inode *dp = state->args->dp;
1361
1362 trace_xfs_da_node_remove(state->args);
1363
1364 node = drop_blk->bp->b_addr;
1365 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1366 ASSERT(drop_blk->index < nodehdr.count);
1367 ASSERT(drop_blk->index >= 0);
1368
1369 /*
1370 * Copy over the offending entry, or just zero it out.
1371 */
1372 index = drop_blk->index;
1373 btree = dp->d_ops->node_tree_p(node);
1374 if (index < nodehdr.count - 1) {
1375 tmp = nodehdr.count - index - 1;
1376 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1377 memmove(&btree[index], &btree[index + 1], tmp);
1378 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1379 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1380 index = nodehdr.count - 1;
1381 }
1382 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1383 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1384 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1385 nodehdr.count -= 1;
1386 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1387 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1388 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1389
1390 /*
1391 * Copy the last hash value from the block to propagate upwards.
1392 */
1393 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1394 }
1395
1396 /*
1397 * Unbalance the elements between two intermediate nodes,
1398 * move all Btree elements from one node into another.
1399 */
1400 STATIC void
xfs_da3_node_unbalance(struct xfs_da_state * state,struct xfs_da_state_blk * drop_blk,struct xfs_da_state_blk * save_blk)1401 xfs_da3_node_unbalance(
1402 struct xfs_da_state *state,
1403 struct xfs_da_state_blk *drop_blk,
1404 struct xfs_da_state_blk *save_blk)
1405 {
1406 struct xfs_da_intnode *drop_node;
1407 struct xfs_da_intnode *save_node;
1408 struct xfs_da_node_entry *drop_btree;
1409 struct xfs_da_node_entry *save_btree;
1410 struct xfs_da3_icnode_hdr drop_hdr;
1411 struct xfs_da3_icnode_hdr save_hdr;
1412 struct xfs_trans *tp;
1413 int sindex;
1414 int tmp;
1415 struct xfs_inode *dp = state->args->dp;
1416
1417 trace_xfs_da_node_unbalance(state->args);
1418
1419 drop_node = drop_blk->bp->b_addr;
1420 save_node = save_blk->bp->b_addr;
1421 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1422 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1423 drop_btree = dp->d_ops->node_tree_p(drop_node);
1424 save_btree = dp->d_ops->node_tree_p(save_node);
1425 tp = state->args->trans;
1426
1427 /*
1428 * If the dying block has lower hashvals, then move all the
1429 * elements in the remaining block up to make a hole.
1430 */
1431 if ((be32_to_cpu(drop_btree[0].hashval) <
1432 be32_to_cpu(save_btree[0].hashval)) ||
1433 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1434 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1435 /* XXX: check this - is memmove dst correct? */
1436 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1437 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1438
1439 sindex = 0;
1440 xfs_trans_log_buf(tp, save_blk->bp,
1441 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1442 (save_hdr.count + drop_hdr.count) *
1443 sizeof(xfs_da_node_entry_t)));
1444 } else {
1445 sindex = save_hdr.count;
1446 xfs_trans_log_buf(tp, save_blk->bp,
1447 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1448 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1449 }
1450
1451 /*
1452 * Move all the B-tree elements from drop_blk to save_blk.
1453 */
1454 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1455 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1456 save_hdr.count += drop_hdr.count;
1457
1458 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1459 xfs_trans_log_buf(tp, save_blk->bp,
1460 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1461 dp->d_ops->node_hdr_size));
1462
1463 /*
1464 * Save the last hashval in the remaining block for upward propagation.
1465 */
1466 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1467 }
1468
1469 /*========================================================================
1470 * Routines used for finding things in the Btree.
1471 *========================================================================*/
1472
1473 /*
1474 * Walk down the Btree looking for a particular filename, filling
1475 * in the state structure as we go.
1476 *
1477 * We will set the state structure to point to each of the elements
1478 * in each of the nodes where either the hashval is or should be.
1479 *
1480 * We support duplicate hashval's so for each entry in the current
1481 * node that could contain the desired hashval, descend. This is a
1482 * pruned depth-first tree search.
1483 */
1484 int /* error */
xfs_da3_node_lookup_int(struct xfs_da_state * state,int * result)1485 xfs_da3_node_lookup_int(
1486 struct xfs_da_state *state,
1487 int *result)
1488 {
1489 struct xfs_da_state_blk *blk;
1490 struct xfs_da_blkinfo *curr;
1491 struct xfs_da_intnode *node;
1492 struct xfs_da_node_entry *btree;
1493 struct xfs_da3_icnode_hdr nodehdr;
1494 struct xfs_da_args *args;
1495 xfs_dablk_t blkno;
1496 xfs_dahash_t hashval;
1497 xfs_dahash_t btreehashval;
1498 int probe;
1499 int span;
1500 int max;
1501 int error;
1502 int retval;
1503 unsigned int expected_level = 0;
1504 uint16_t magic;
1505 struct xfs_inode *dp = state->args->dp;
1506
1507 args = state->args;
1508
1509 /*
1510 * Descend thru the B-tree searching each level for the right
1511 * node to use, until the right hashval is found.
1512 */
1513 blkno = args->geo->leafblk;
1514 for (blk = &state->path.blk[0], state->path.active = 1;
1515 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1516 blk++, state->path.active++) {
1517 /*
1518 * Read the next node down in the tree.
1519 */
1520 blk->blkno = blkno;
1521 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1522 -1, &blk->bp, args->whichfork);
1523 if (error) {
1524 blk->blkno = 0;
1525 state->path.active--;
1526 return error;
1527 }
1528 curr = blk->bp->b_addr;
1529 magic = be16_to_cpu(curr->magic);
1530
1531 if (magic == XFS_ATTR_LEAF_MAGIC ||
1532 magic == XFS_ATTR3_LEAF_MAGIC) {
1533 blk->magic = XFS_ATTR_LEAF_MAGIC;
1534 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1535 break;
1536 }
1537
1538 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1539 magic == XFS_DIR3_LEAFN_MAGIC) {
1540 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1541 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1542 blk->bp, NULL);
1543 break;
1544 }
1545
1546 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
1547 xfs_buf_mark_corrupt(blk->bp);
1548 return -EFSCORRUPTED;
1549 }
1550
1551 blk->magic = XFS_DA_NODE_MAGIC;
1552
1553 /*
1554 * Search an intermediate node for a match.
1555 */
1556 node = blk->bp->b_addr;
1557 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1558 btree = dp->d_ops->node_tree_p(node);
1559
1560 /* Tree taller than we can handle; bail out! */
1561 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
1562 xfs_buf_mark_corrupt(blk->bp);
1563 return -EFSCORRUPTED;
1564 }
1565
1566 /* Check the level from the root. */
1567 if (blkno == args->geo->leafblk)
1568 expected_level = nodehdr.level - 1;
1569 else if (expected_level != nodehdr.level) {
1570 xfs_buf_mark_corrupt(blk->bp);
1571 return -EFSCORRUPTED;
1572 } else
1573 expected_level--;
1574
1575 max = nodehdr.count;
1576 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1577
1578 /*
1579 * Binary search. (note: small blocks will skip loop)
1580 */
1581 probe = span = max / 2;
1582 hashval = args->hashval;
1583 while (span > 4) {
1584 span /= 2;
1585 btreehashval = be32_to_cpu(btree[probe].hashval);
1586 if (btreehashval < hashval)
1587 probe += span;
1588 else if (btreehashval > hashval)
1589 probe -= span;
1590 else
1591 break;
1592 }
1593 ASSERT((probe >= 0) && (probe < max));
1594 ASSERT((span <= 4) ||
1595 (be32_to_cpu(btree[probe].hashval) == hashval));
1596
1597 /*
1598 * Since we may have duplicate hashval's, find the first
1599 * matching hashval in the node.
1600 */
1601 while (probe > 0 &&
1602 be32_to_cpu(btree[probe].hashval) >= hashval) {
1603 probe--;
1604 }
1605 while (probe < max &&
1606 be32_to_cpu(btree[probe].hashval) < hashval) {
1607 probe++;
1608 }
1609
1610 /*
1611 * Pick the right block to descend on.
1612 */
1613 if (probe == max) {
1614 blk->index = max - 1;
1615 blkno = be32_to_cpu(btree[max - 1].before);
1616 } else {
1617 blk->index = probe;
1618 blkno = be32_to_cpu(btree[probe].before);
1619 }
1620
1621 /* We can't point back to the root. */
1622 if (blkno == args->geo->leafblk) {
1623 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
1624 dp->i_mount);
1625 return -EFSCORRUPTED;
1626 }
1627 }
1628
1629 if (expected_level != 0) {
1630 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, dp->i_mount);
1631 return -EFSCORRUPTED;
1632 }
1633
1634 /*
1635 * A leaf block that ends in the hashval that we are interested in
1636 * (final hashval == search hashval) means that the next block may
1637 * contain more entries with the same hashval, shift upward to the
1638 * next leaf and keep searching.
1639 */
1640 for (;;) {
1641 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1642 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1643 &blk->index, state);
1644 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1645 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1646 blk->index = args->index;
1647 args->blkno = blk->blkno;
1648 } else {
1649 ASSERT(0);
1650 return -EFSCORRUPTED;
1651 }
1652 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1653 (blk->hashval == args->hashval)) {
1654 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1655 &retval);
1656 if (error)
1657 return error;
1658 if (retval == 0) {
1659 continue;
1660 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1661 /* path_shift() gives ENOENT */
1662 retval = -ENOATTR;
1663 }
1664 }
1665 break;
1666 }
1667 *result = retval;
1668 return 0;
1669 }
1670
1671 /*========================================================================
1672 * Utility routines.
1673 *========================================================================*/
1674
1675 /*
1676 * Compare two intermediate nodes for "order".
1677 */
1678 STATIC int
xfs_da3_node_order(struct xfs_inode * dp,struct xfs_buf * node1_bp,struct xfs_buf * node2_bp)1679 xfs_da3_node_order(
1680 struct xfs_inode *dp,
1681 struct xfs_buf *node1_bp,
1682 struct xfs_buf *node2_bp)
1683 {
1684 struct xfs_da_intnode *node1;
1685 struct xfs_da_intnode *node2;
1686 struct xfs_da_node_entry *btree1;
1687 struct xfs_da_node_entry *btree2;
1688 struct xfs_da3_icnode_hdr node1hdr;
1689 struct xfs_da3_icnode_hdr node2hdr;
1690
1691 node1 = node1_bp->b_addr;
1692 node2 = node2_bp->b_addr;
1693 dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1694 dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1695 btree1 = dp->d_ops->node_tree_p(node1);
1696 btree2 = dp->d_ops->node_tree_p(node2);
1697
1698 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1699 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1700 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1701 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1702 return 1;
1703 }
1704 return 0;
1705 }
1706
1707 /*
1708 * Link a new block into a doubly linked list of blocks (of whatever type).
1709 */
1710 int /* error */
xfs_da3_blk_link(struct xfs_da_state * state,struct xfs_da_state_blk * old_blk,struct xfs_da_state_blk * new_blk)1711 xfs_da3_blk_link(
1712 struct xfs_da_state *state,
1713 struct xfs_da_state_blk *old_blk,
1714 struct xfs_da_state_blk *new_blk)
1715 {
1716 struct xfs_da_blkinfo *old_info;
1717 struct xfs_da_blkinfo *new_info;
1718 struct xfs_da_blkinfo *tmp_info;
1719 struct xfs_da_args *args;
1720 struct xfs_buf *bp;
1721 int before = 0;
1722 int error;
1723 struct xfs_inode *dp = state->args->dp;
1724
1725 /*
1726 * Set up environment.
1727 */
1728 args = state->args;
1729 ASSERT(args != NULL);
1730 old_info = old_blk->bp->b_addr;
1731 new_info = new_blk->bp->b_addr;
1732 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1733 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1734 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1735
1736 switch (old_blk->magic) {
1737 case XFS_ATTR_LEAF_MAGIC:
1738 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1739 break;
1740 case XFS_DIR2_LEAFN_MAGIC:
1741 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1742 break;
1743 case XFS_DA_NODE_MAGIC:
1744 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1745 break;
1746 }
1747
1748 /*
1749 * Link blocks in appropriate order.
1750 */
1751 if (before) {
1752 /*
1753 * Link new block in before existing block.
1754 */
1755 trace_xfs_da_link_before(args);
1756 new_info->forw = cpu_to_be32(old_blk->blkno);
1757 new_info->back = old_info->back;
1758 if (old_info->back) {
1759 error = xfs_da3_node_read(args->trans, dp,
1760 be32_to_cpu(old_info->back),
1761 -1, &bp, args->whichfork);
1762 if (error)
1763 return error;
1764 ASSERT(bp != NULL);
1765 tmp_info = bp->b_addr;
1766 ASSERT(tmp_info->magic == old_info->magic);
1767 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1768 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1769 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1770 }
1771 old_info->back = cpu_to_be32(new_blk->blkno);
1772 } else {
1773 /*
1774 * Link new block in after existing block.
1775 */
1776 trace_xfs_da_link_after(args);
1777 new_info->forw = old_info->forw;
1778 new_info->back = cpu_to_be32(old_blk->blkno);
1779 if (old_info->forw) {
1780 error = xfs_da3_node_read(args->trans, dp,
1781 be32_to_cpu(old_info->forw),
1782 -1, &bp, args->whichfork);
1783 if (error)
1784 return error;
1785 ASSERT(bp != NULL);
1786 tmp_info = bp->b_addr;
1787 ASSERT(tmp_info->magic == old_info->magic);
1788 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1789 tmp_info->back = cpu_to_be32(new_blk->blkno);
1790 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1791 }
1792 old_info->forw = cpu_to_be32(new_blk->blkno);
1793 }
1794
1795 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1796 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1797 return 0;
1798 }
1799
1800 /*
1801 * Unlink a block from a doubly linked list of blocks.
1802 */
1803 STATIC int /* error */
xfs_da3_blk_unlink(struct xfs_da_state * state,struct xfs_da_state_blk * drop_blk,struct xfs_da_state_blk * save_blk)1804 xfs_da3_blk_unlink(
1805 struct xfs_da_state *state,
1806 struct xfs_da_state_blk *drop_blk,
1807 struct xfs_da_state_blk *save_blk)
1808 {
1809 struct xfs_da_blkinfo *drop_info;
1810 struct xfs_da_blkinfo *save_info;
1811 struct xfs_da_blkinfo *tmp_info;
1812 struct xfs_da_args *args;
1813 struct xfs_buf *bp;
1814 int error;
1815
1816 /*
1817 * Set up environment.
1818 */
1819 args = state->args;
1820 ASSERT(args != NULL);
1821 save_info = save_blk->bp->b_addr;
1822 drop_info = drop_blk->bp->b_addr;
1823 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1824 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1825 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1826 ASSERT(save_blk->magic == drop_blk->magic);
1827 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1828 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1829 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1830 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1831
1832 /*
1833 * Unlink the leaf block from the doubly linked chain of leaves.
1834 */
1835 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1836 trace_xfs_da_unlink_back(args);
1837 save_info->back = drop_info->back;
1838 if (drop_info->back) {
1839 error = xfs_da3_node_read(args->trans, args->dp,
1840 be32_to_cpu(drop_info->back),
1841 -1, &bp, args->whichfork);
1842 if (error)
1843 return error;
1844 ASSERT(bp != NULL);
1845 tmp_info = bp->b_addr;
1846 ASSERT(tmp_info->magic == save_info->magic);
1847 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1848 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1849 xfs_trans_log_buf(args->trans, bp, 0,
1850 sizeof(*tmp_info) - 1);
1851 }
1852 } else {
1853 trace_xfs_da_unlink_forward(args);
1854 save_info->forw = drop_info->forw;
1855 if (drop_info->forw) {
1856 error = xfs_da3_node_read(args->trans, args->dp,
1857 be32_to_cpu(drop_info->forw),
1858 -1, &bp, args->whichfork);
1859 if (error)
1860 return error;
1861 ASSERT(bp != NULL);
1862 tmp_info = bp->b_addr;
1863 ASSERT(tmp_info->magic == save_info->magic);
1864 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1865 tmp_info->back = cpu_to_be32(save_blk->blkno);
1866 xfs_trans_log_buf(args->trans, bp, 0,
1867 sizeof(*tmp_info) - 1);
1868 }
1869 }
1870
1871 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1872 return 0;
1873 }
1874
1875 /*
1876 * Move a path "forward" or "!forward" one block at the current level.
1877 *
1878 * This routine will adjust a "path" to point to the next block
1879 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1880 * Btree, including updating pointers to the intermediate nodes between
1881 * the new bottom and the root.
1882 */
1883 int /* error */
xfs_da3_path_shift(struct xfs_da_state * state,struct xfs_da_state_path * path,int forward,int release,int * result)1884 xfs_da3_path_shift(
1885 struct xfs_da_state *state,
1886 struct xfs_da_state_path *path,
1887 int forward,
1888 int release,
1889 int *result)
1890 {
1891 struct xfs_da_state_blk *blk;
1892 struct xfs_da_blkinfo *info;
1893 struct xfs_da_intnode *node;
1894 struct xfs_da_args *args;
1895 struct xfs_da_node_entry *btree;
1896 struct xfs_da3_icnode_hdr nodehdr;
1897 struct xfs_buf *bp;
1898 xfs_dablk_t blkno = 0;
1899 int level;
1900 int error;
1901 struct xfs_inode *dp = state->args->dp;
1902
1903 trace_xfs_da_path_shift(state->args);
1904
1905 /*
1906 * Roll up the Btree looking for the first block where our
1907 * current index is not at the edge of the block. Note that
1908 * we skip the bottom layer because we want the sibling block.
1909 */
1910 args = state->args;
1911 ASSERT(args != NULL);
1912 ASSERT(path != NULL);
1913 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1914 level = (path->active-1) - 1; /* skip bottom layer in path */
1915 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1916 node = blk->bp->b_addr;
1917 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1918 btree = dp->d_ops->node_tree_p(node);
1919
1920 if (forward && (blk->index < nodehdr.count - 1)) {
1921 blk->index++;
1922 blkno = be32_to_cpu(btree[blk->index].before);
1923 break;
1924 } else if (!forward && (blk->index > 0)) {
1925 blk->index--;
1926 blkno = be32_to_cpu(btree[blk->index].before);
1927 break;
1928 }
1929 }
1930 if (level < 0) {
1931 *result = -ENOENT; /* we're out of our tree */
1932 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1933 return 0;
1934 }
1935
1936 /*
1937 * Roll down the edge of the subtree until we reach the
1938 * same depth we were at originally.
1939 */
1940 for (blk++, level++; level < path->active; blk++, level++) {
1941 /*
1942 * Read the next child block into a local buffer.
1943 */
1944 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1945 args->whichfork);
1946 if (error)
1947 return error;
1948
1949 /*
1950 * Release the old block (if it's dirty, the trans doesn't
1951 * actually let go) and swap the local buffer into the path
1952 * structure. This ensures failure of the above read doesn't set
1953 * a NULL buffer in an active slot in the path.
1954 */
1955 if (release)
1956 xfs_trans_brelse(args->trans, blk->bp);
1957 blk->blkno = blkno;
1958 blk->bp = bp;
1959
1960 info = blk->bp->b_addr;
1961 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1962 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1963 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1964 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1965 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1966 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1967
1968
1969 /*
1970 * Note: we flatten the magic number to a single type so we
1971 * don't have to compare against crc/non-crc types elsewhere.
1972 */
1973 switch (be16_to_cpu(info->magic)) {
1974 case XFS_DA_NODE_MAGIC:
1975 case XFS_DA3_NODE_MAGIC:
1976 blk->magic = XFS_DA_NODE_MAGIC;
1977 node = (xfs_da_intnode_t *)info;
1978 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1979 btree = dp->d_ops->node_tree_p(node);
1980 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1981 if (forward)
1982 blk->index = 0;
1983 else
1984 blk->index = nodehdr.count - 1;
1985 blkno = be32_to_cpu(btree[blk->index].before);
1986 break;
1987 case XFS_ATTR_LEAF_MAGIC:
1988 case XFS_ATTR3_LEAF_MAGIC:
1989 blk->magic = XFS_ATTR_LEAF_MAGIC;
1990 ASSERT(level == path->active-1);
1991 blk->index = 0;
1992 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1993 break;
1994 case XFS_DIR2_LEAFN_MAGIC:
1995 case XFS_DIR3_LEAFN_MAGIC:
1996 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1997 ASSERT(level == path->active-1);
1998 blk->index = 0;
1999 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2000 blk->bp, NULL);
2001 break;
2002 default:
2003 ASSERT(0);
2004 break;
2005 }
2006 }
2007 *result = 0;
2008 return 0;
2009 }
2010
2011
2012 /*========================================================================
2013 * Utility routines.
2014 *========================================================================*/
2015
2016 /*
2017 * Implement a simple hash on a character string.
2018 * Rotate the hash value by 7 bits, then XOR each character in.
2019 * This is implemented with some source-level loop unrolling.
2020 */
2021 xfs_dahash_t
xfs_da_hashname(const uint8_t * name,int namelen)2022 xfs_da_hashname(const uint8_t *name, int namelen)
2023 {
2024 xfs_dahash_t hash;
2025
2026 /*
2027 * Do four characters at a time as long as we can.
2028 */
2029 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2030 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2031 (name[3] << 0) ^ rol32(hash, 7 * 4);
2032
2033 /*
2034 * Now do the rest of the characters.
2035 */
2036 switch (namelen) {
2037 case 3:
2038 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2039 rol32(hash, 7 * 3);
2040 case 2:
2041 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2042 case 1:
2043 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2044 default: /* case 0: */
2045 return hash;
2046 }
2047 }
2048
2049 enum xfs_dacmp
xfs_da_compname(struct xfs_da_args * args,const unsigned char * name,int len)2050 xfs_da_compname(
2051 struct xfs_da_args *args,
2052 const unsigned char *name,
2053 int len)
2054 {
2055 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2056 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2057 }
2058
2059 static xfs_dahash_t
xfs_default_hashname(struct xfs_name * name)2060 xfs_default_hashname(
2061 struct xfs_name *name)
2062 {
2063 return xfs_da_hashname(name->name, name->len);
2064 }
2065
2066 const struct xfs_nameops xfs_default_nameops = {
2067 .hashname = xfs_default_hashname,
2068 .compname = xfs_da_compname
2069 };
2070
2071 int
xfs_da_grow_inode_int(struct xfs_da_args * args,xfs_fileoff_t * bno,int count)2072 xfs_da_grow_inode_int(
2073 struct xfs_da_args *args,
2074 xfs_fileoff_t *bno,
2075 int count)
2076 {
2077 struct xfs_trans *tp = args->trans;
2078 struct xfs_inode *dp = args->dp;
2079 int w = args->whichfork;
2080 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2081 struct xfs_bmbt_irec map, *mapp;
2082 int nmap, error, got, i, mapi;
2083
2084 /*
2085 * Find a spot in the file space to put the new block.
2086 */
2087 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2088 if (error)
2089 return error;
2090
2091 /*
2092 * Try mapping it in one filesystem block.
2093 */
2094 nmap = 1;
2095 error = xfs_bmapi_write(tp, dp, *bno, count,
2096 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2097 args->total, &map, &nmap);
2098 if (error)
2099 return error;
2100
2101 ASSERT(nmap <= 1);
2102 if (nmap == 1) {
2103 mapp = ↦
2104 mapi = 1;
2105 } else if (nmap == 0 && count > 1) {
2106 xfs_fileoff_t b;
2107 int c;
2108
2109 /*
2110 * If we didn't get it and the block might work if fragmented,
2111 * try without the CONTIG flag. Loop until we get it all.
2112 */
2113 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2114 for (b = *bno, mapi = 0; b < *bno + count; ) {
2115 nmap = min(XFS_BMAP_MAX_NMAP, count);
2116 c = (int)(*bno + count - b);
2117 error = xfs_bmapi_write(tp, dp, b, c,
2118 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2119 args->total, &mapp[mapi], &nmap);
2120 if (error)
2121 goto out_free_map;
2122 if (nmap < 1)
2123 break;
2124 mapi += nmap;
2125 b = mapp[mapi - 1].br_startoff +
2126 mapp[mapi - 1].br_blockcount;
2127 }
2128 } else {
2129 mapi = 0;
2130 mapp = NULL;
2131 }
2132
2133 /*
2134 * Count the blocks we got, make sure it matches the total.
2135 */
2136 for (i = 0, got = 0; i < mapi; i++)
2137 got += mapp[i].br_blockcount;
2138 if (got != count || mapp[0].br_startoff != *bno ||
2139 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2140 *bno + count) {
2141 error = -ENOSPC;
2142 goto out_free_map;
2143 }
2144
2145 /* account for newly allocated blocks in reserved blocks total */
2146 args->total -= dp->i_d.di_nblocks - nblks;
2147
2148 out_free_map:
2149 if (mapp != &map)
2150 kmem_free(mapp);
2151 return error;
2152 }
2153
2154 /*
2155 * Add a block to the btree ahead of the file.
2156 * Return the new block number to the caller.
2157 */
2158 int
xfs_da_grow_inode(struct xfs_da_args * args,xfs_dablk_t * new_blkno)2159 xfs_da_grow_inode(
2160 struct xfs_da_args *args,
2161 xfs_dablk_t *new_blkno)
2162 {
2163 xfs_fileoff_t bno;
2164 int error;
2165
2166 trace_xfs_da_grow_inode(args);
2167
2168 bno = args->geo->leafblk;
2169 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2170 if (!error)
2171 *new_blkno = (xfs_dablk_t)bno;
2172 return error;
2173 }
2174
2175 /*
2176 * Ick. We need to always be able to remove a btree block, even
2177 * if there's no space reservation because the filesystem is full.
2178 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2179 * It swaps the target block with the last block in the file. The
2180 * last block in the file can always be removed since it can't cause
2181 * a bmap btree split to do that.
2182 */
2183 STATIC int
xfs_da3_swap_lastblock(struct xfs_da_args * args,xfs_dablk_t * dead_blknop,struct xfs_buf ** dead_bufp)2184 xfs_da3_swap_lastblock(
2185 struct xfs_da_args *args,
2186 xfs_dablk_t *dead_blknop,
2187 struct xfs_buf **dead_bufp)
2188 {
2189 struct xfs_da_blkinfo *dead_info;
2190 struct xfs_da_blkinfo *sib_info;
2191 struct xfs_da_intnode *par_node;
2192 struct xfs_da_intnode *dead_node;
2193 struct xfs_dir2_leaf *dead_leaf2;
2194 struct xfs_da_node_entry *btree;
2195 struct xfs_da3_icnode_hdr par_hdr;
2196 struct xfs_inode *dp;
2197 struct xfs_trans *tp;
2198 struct xfs_mount *mp;
2199 struct xfs_buf *dead_buf;
2200 struct xfs_buf *last_buf;
2201 struct xfs_buf *sib_buf;
2202 struct xfs_buf *par_buf;
2203 xfs_dahash_t dead_hash;
2204 xfs_fileoff_t lastoff;
2205 xfs_dablk_t dead_blkno;
2206 xfs_dablk_t last_blkno;
2207 xfs_dablk_t sib_blkno;
2208 xfs_dablk_t par_blkno;
2209 int error;
2210 int w;
2211 int entno;
2212 int level;
2213 int dead_level;
2214
2215 trace_xfs_da_swap_lastblock(args);
2216
2217 dead_buf = *dead_bufp;
2218 dead_blkno = *dead_blknop;
2219 tp = args->trans;
2220 dp = args->dp;
2221 w = args->whichfork;
2222 ASSERT(w == XFS_DATA_FORK);
2223 mp = dp->i_mount;
2224 lastoff = args->geo->freeblk;
2225 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2226 if (error)
2227 return error;
2228 if (unlikely(lastoff == 0)) {
2229 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2230 mp);
2231 return -EFSCORRUPTED;
2232 }
2233 /*
2234 * Read the last block in the btree space.
2235 */
2236 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2237 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2238 if (error)
2239 return error;
2240 /*
2241 * Copy the last block into the dead buffer and log it.
2242 */
2243 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2244 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2245 dead_info = dead_buf->b_addr;
2246 /*
2247 * Get values from the moved block.
2248 */
2249 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2250 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2251 struct xfs_dir3_icleaf_hdr leafhdr;
2252 struct xfs_dir2_leaf_entry *ents;
2253
2254 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2255 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2256 ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2257 dead_level = 0;
2258 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2259 } else {
2260 struct xfs_da3_icnode_hdr deadhdr;
2261
2262 dead_node = (xfs_da_intnode_t *)dead_info;
2263 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2264 btree = dp->d_ops->node_tree_p(dead_node);
2265 dead_level = deadhdr.level;
2266 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2267 }
2268 sib_buf = par_buf = NULL;
2269 /*
2270 * If the moved block has a left sibling, fix up the pointers.
2271 */
2272 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2273 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2274 if (error)
2275 goto done;
2276 sib_info = sib_buf->b_addr;
2277 if (unlikely(
2278 be32_to_cpu(sib_info->forw) != last_blkno ||
2279 sib_info->magic != dead_info->magic)) {
2280 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2281 XFS_ERRLEVEL_LOW, mp);
2282 error = -EFSCORRUPTED;
2283 goto done;
2284 }
2285 sib_info->forw = cpu_to_be32(dead_blkno);
2286 xfs_trans_log_buf(tp, sib_buf,
2287 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2288 sizeof(sib_info->forw)));
2289 sib_buf = NULL;
2290 }
2291 /*
2292 * If the moved block has a right sibling, fix up the pointers.
2293 */
2294 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2295 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2296 if (error)
2297 goto done;
2298 sib_info = sib_buf->b_addr;
2299 if (unlikely(
2300 be32_to_cpu(sib_info->back) != last_blkno ||
2301 sib_info->magic != dead_info->magic)) {
2302 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2303 XFS_ERRLEVEL_LOW, mp);
2304 error = -EFSCORRUPTED;
2305 goto done;
2306 }
2307 sib_info->back = cpu_to_be32(dead_blkno);
2308 xfs_trans_log_buf(tp, sib_buf,
2309 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2310 sizeof(sib_info->back)));
2311 sib_buf = NULL;
2312 }
2313 par_blkno = args->geo->leafblk;
2314 level = -1;
2315 /*
2316 * Walk down the tree looking for the parent of the moved block.
2317 */
2318 for (;;) {
2319 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2320 if (error)
2321 goto done;
2322 par_node = par_buf->b_addr;
2323 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2324 if (level >= 0 && level != par_hdr.level + 1) {
2325 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2326 XFS_ERRLEVEL_LOW, mp);
2327 error = -EFSCORRUPTED;
2328 goto done;
2329 }
2330 level = par_hdr.level;
2331 btree = dp->d_ops->node_tree_p(par_node);
2332 for (entno = 0;
2333 entno < par_hdr.count &&
2334 be32_to_cpu(btree[entno].hashval) < dead_hash;
2335 entno++)
2336 continue;
2337 if (entno == par_hdr.count) {
2338 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2339 XFS_ERRLEVEL_LOW, mp);
2340 error = -EFSCORRUPTED;
2341 goto done;
2342 }
2343 par_blkno = be32_to_cpu(btree[entno].before);
2344 if (level == dead_level + 1)
2345 break;
2346 xfs_trans_brelse(tp, par_buf);
2347 par_buf = NULL;
2348 }
2349 /*
2350 * We're in the right parent block.
2351 * Look for the right entry.
2352 */
2353 for (;;) {
2354 for (;
2355 entno < par_hdr.count &&
2356 be32_to_cpu(btree[entno].before) != last_blkno;
2357 entno++)
2358 continue;
2359 if (entno < par_hdr.count)
2360 break;
2361 par_blkno = par_hdr.forw;
2362 xfs_trans_brelse(tp, par_buf);
2363 par_buf = NULL;
2364 if (unlikely(par_blkno == 0)) {
2365 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2366 XFS_ERRLEVEL_LOW, mp);
2367 error = -EFSCORRUPTED;
2368 goto done;
2369 }
2370 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2371 if (error)
2372 goto done;
2373 par_node = par_buf->b_addr;
2374 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2375 if (par_hdr.level != level) {
2376 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2377 XFS_ERRLEVEL_LOW, mp);
2378 error = -EFSCORRUPTED;
2379 goto done;
2380 }
2381 btree = dp->d_ops->node_tree_p(par_node);
2382 entno = 0;
2383 }
2384 /*
2385 * Update the parent entry pointing to the moved block.
2386 */
2387 btree[entno].before = cpu_to_be32(dead_blkno);
2388 xfs_trans_log_buf(tp, par_buf,
2389 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2390 sizeof(btree[entno].before)));
2391 *dead_blknop = last_blkno;
2392 *dead_bufp = last_buf;
2393 return 0;
2394 done:
2395 if (par_buf)
2396 xfs_trans_brelse(tp, par_buf);
2397 if (sib_buf)
2398 xfs_trans_brelse(tp, sib_buf);
2399 xfs_trans_brelse(tp, last_buf);
2400 return error;
2401 }
2402
2403 /*
2404 * Remove a btree block from a directory or attribute.
2405 */
2406 int
xfs_da_shrink_inode(struct xfs_da_args * args,xfs_dablk_t dead_blkno,struct xfs_buf * dead_buf)2407 xfs_da_shrink_inode(
2408 struct xfs_da_args *args,
2409 xfs_dablk_t dead_blkno,
2410 struct xfs_buf *dead_buf)
2411 {
2412 struct xfs_inode *dp;
2413 int done, error, w, count;
2414 struct xfs_trans *tp;
2415
2416 trace_xfs_da_shrink_inode(args);
2417
2418 dp = args->dp;
2419 w = args->whichfork;
2420 tp = args->trans;
2421 count = args->geo->fsbcount;
2422 for (;;) {
2423 /*
2424 * Remove extents. If we get ENOSPC for a dir we have to move
2425 * the last block to the place we want to kill.
2426 */
2427 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2428 xfs_bmapi_aflag(w), 0, &done);
2429 if (error == -ENOSPC) {
2430 if (w != XFS_DATA_FORK)
2431 break;
2432 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2433 &dead_buf);
2434 if (error)
2435 break;
2436 } else {
2437 break;
2438 }
2439 }
2440 xfs_trans_binval(tp, dead_buf);
2441 return error;
2442 }
2443
2444 /*
2445 * See if the mapping(s) for this btree block are valid, i.e.
2446 * don't contain holes, are logically contiguous, and cover the whole range.
2447 */
2448 STATIC int
xfs_da_map_covers_blocks(int nmap,xfs_bmbt_irec_t * mapp,xfs_dablk_t bno,int count)2449 xfs_da_map_covers_blocks(
2450 int nmap,
2451 xfs_bmbt_irec_t *mapp,
2452 xfs_dablk_t bno,
2453 int count)
2454 {
2455 int i;
2456 xfs_fileoff_t off;
2457
2458 for (i = 0, off = bno; i < nmap; i++) {
2459 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2460 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2461 return 0;
2462 }
2463 if (off != mapp[i].br_startoff) {
2464 return 0;
2465 }
2466 off += mapp[i].br_blockcount;
2467 }
2468 return off == bno + count;
2469 }
2470
2471 /*
2472 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2473 *
2474 * For the single map case, it is assumed that the caller has provided a pointer
2475 * to a valid xfs_buf_map. For the multiple map case, this function will
2476 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2477 * map pointer with the allocated map.
2478 */
2479 static int
xfs_buf_map_from_irec(struct xfs_mount * mp,struct xfs_buf_map ** mapp,int * nmaps,struct xfs_bmbt_irec * irecs,int nirecs)2480 xfs_buf_map_from_irec(
2481 struct xfs_mount *mp,
2482 struct xfs_buf_map **mapp,
2483 int *nmaps,
2484 struct xfs_bmbt_irec *irecs,
2485 int nirecs)
2486 {
2487 struct xfs_buf_map *map;
2488 int i;
2489
2490 ASSERT(*nmaps == 1);
2491 ASSERT(nirecs >= 1);
2492
2493 if (nirecs > 1) {
2494 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2495 KM_NOFS);
2496 if (!map)
2497 return -ENOMEM;
2498 *mapp = map;
2499 }
2500
2501 *nmaps = nirecs;
2502 map = *mapp;
2503 for (i = 0; i < *nmaps; i++) {
2504 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2505 irecs[i].br_startblock != HOLESTARTBLOCK);
2506 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2507 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2508 }
2509 return 0;
2510 }
2511
2512 /*
2513 * Map the block we are given ready for reading. There are three possible return
2514 * values:
2515 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2516 * caller knows not to execute a subsequent read.
2517 * 0 - if we mapped the block successfully
2518 * >0 - positive error number if there was an error.
2519 */
2520 static int
xfs_dabuf_map(struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,int whichfork,struct xfs_buf_map ** map,int * nmaps)2521 xfs_dabuf_map(
2522 struct xfs_inode *dp,
2523 xfs_dablk_t bno,
2524 xfs_daddr_t mappedbno,
2525 int whichfork,
2526 struct xfs_buf_map **map,
2527 int *nmaps)
2528 {
2529 struct xfs_mount *mp = dp->i_mount;
2530 int nfsb;
2531 int error = 0;
2532 struct xfs_bmbt_irec irec;
2533 struct xfs_bmbt_irec *irecs = &irec;
2534 int nirecs;
2535
2536 ASSERT(map && *map);
2537 ASSERT(*nmaps == 1);
2538
2539 if (whichfork == XFS_DATA_FORK)
2540 nfsb = mp->m_dir_geo->fsbcount;
2541 else
2542 nfsb = mp->m_attr_geo->fsbcount;
2543
2544 /*
2545 * Caller doesn't have a mapping. -2 means don't complain
2546 * if we land in a hole.
2547 */
2548 if (mappedbno == -1 || mappedbno == -2) {
2549 /*
2550 * Optimize the one-block case.
2551 */
2552 if (nfsb != 1)
2553 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2554 KM_NOFS);
2555
2556 nirecs = nfsb;
2557 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2558 &nirecs, xfs_bmapi_aflag(whichfork));
2559 if (error)
2560 goto out;
2561 } else {
2562 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2563 irecs->br_startoff = (xfs_fileoff_t)bno;
2564 irecs->br_blockcount = nfsb;
2565 irecs->br_state = 0;
2566 nirecs = 1;
2567 }
2568
2569 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2570 error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2571 if (unlikely(error == -EFSCORRUPTED)) {
2572 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2573 int i;
2574 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2575 __func__, (long long)bno,
2576 (long long)dp->i_ino);
2577 for (i = 0; i < *nmaps; i++) {
2578 xfs_alert(mp,
2579 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2580 i,
2581 (long long)irecs[i].br_startoff,
2582 (long long)irecs[i].br_startblock,
2583 (long long)irecs[i].br_blockcount,
2584 irecs[i].br_state);
2585 }
2586 }
2587 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2588 XFS_ERRLEVEL_LOW, mp);
2589 }
2590 goto out;
2591 }
2592 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2593 out:
2594 if (irecs != &irec)
2595 kmem_free(irecs);
2596 return error;
2597 }
2598
2599 /*
2600 * Get a buffer for the dir/attr block.
2601 */
2602 int
xfs_da_get_buf(struct xfs_trans * trans,struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,struct xfs_buf ** bpp,int whichfork)2603 xfs_da_get_buf(
2604 struct xfs_trans *trans,
2605 struct xfs_inode *dp,
2606 xfs_dablk_t bno,
2607 xfs_daddr_t mappedbno,
2608 struct xfs_buf **bpp,
2609 int whichfork)
2610 {
2611 struct xfs_buf *bp;
2612 struct xfs_buf_map map;
2613 struct xfs_buf_map *mapp;
2614 int nmap;
2615 int error;
2616
2617 *bpp = NULL;
2618 mapp = ↦
2619 nmap = 1;
2620 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2621 &mapp, &nmap);
2622 if (error) {
2623 /* mapping a hole is not an error, but we don't continue */
2624 if (error == -1)
2625 error = 0;
2626 goto out_free;
2627 }
2628
2629 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2630 mapp, nmap, 0);
2631 error = bp ? bp->b_error : -EIO;
2632 if (error) {
2633 if (bp)
2634 xfs_trans_brelse(trans, bp);
2635 goto out_free;
2636 }
2637
2638 *bpp = bp;
2639
2640 out_free:
2641 if (mapp != &map)
2642 kmem_free(mapp);
2643
2644 return error;
2645 }
2646
2647 /*
2648 * Get a buffer for the dir/attr block, fill in the contents.
2649 */
2650 int
xfs_da_read_buf(struct xfs_trans * trans,struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,struct xfs_buf ** bpp,int whichfork,const struct xfs_buf_ops * ops)2651 xfs_da_read_buf(
2652 struct xfs_trans *trans,
2653 struct xfs_inode *dp,
2654 xfs_dablk_t bno,
2655 xfs_daddr_t mappedbno,
2656 struct xfs_buf **bpp,
2657 int whichfork,
2658 const struct xfs_buf_ops *ops)
2659 {
2660 struct xfs_buf *bp;
2661 struct xfs_buf_map map;
2662 struct xfs_buf_map *mapp;
2663 int nmap;
2664 int error;
2665
2666 *bpp = NULL;
2667 mapp = ↦
2668 nmap = 1;
2669 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2670 &mapp, &nmap);
2671 if (error) {
2672 /* mapping a hole is not an error, but we don't continue */
2673 if (error == -1)
2674 error = 0;
2675 goto out_free;
2676 }
2677
2678 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2679 dp->i_mount->m_ddev_targp,
2680 mapp, nmap, 0, &bp, ops);
2681 if (error)
2682 goto out_free;
2683
2684 if (whichfork == XFS_ATTR_FORK)
2685 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2686 else
2687 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2688 *bpp = bp;
2689 out_free:
2690 if (mapp != &map)
2691 kmem_free(mapp);
2692
2693 return error;
2694 }
2695
2696 /*
2697 * Readahead the dir/attr block.
2698 */
2699 int
xfs_da_reada_buf(struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,int whichfork,const struct xfs_buf_ops * ops)2700 xfs_da_reada_buf(
2701 struct xfs_inode *dp,
2702 xfs_dablk_t bno,
2703 xfs_daddr_t mappedbno,
2704 int whichfork,
2705 const struct xfs_buf_ops *ops)
2706 {
2707 struct xfs_buf_map map;
2708 struct xfs_buf_map *mapp;
2709 int nmap;
2710 int error;
2711
2712 mapp = ↦
2713 nmap = 1;
2714 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2715 &mapp, &nmap);
2716 if (error) {
2717 /* mapping a hole is not an error, but we don't continue */
2718 if (error == -1)
2719 error = 0;
2720 goto out_free;
2721 }
2722
2723 mappedbno = mapp[0].bm_bn;
2724 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2725
2726 out_free:
2727 if (mapp != &map)
2728 kmem_free(mapp);
2729
2730 return error;
2731 }
2732