• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_da_btree.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_alloc.h"
40 #include "xfs_btree.h"
41 #include "xfs_bmap.h"
42 #include "xfs_attr.h"
43 #include "xfs_attr_leaf.h"
44 #include "xfs_dir2_data.h"
45 #include "xfs_dir2_leaf.h"
46 #include "xfs_dir2_block.h"
47 #include "xfs_dir2_node.h"
48 #include "xfs_error.h"
49 
50 /*
51  * xfs_da_btree.c
52  *
53  * Routines to implement directories as Btrees of hashed names.
54  */
55 
56 /*========================================================================
57  * Function prototypes for the kernel.
58  *========================================================================*/
59 
60 /*
61  * Routines used for growing the Btree.
62  */
63 STATIC int xfs_da_root_split(xfs_da_state_t *state,
64 					    xfs_da_state_blk_t *existing_root,
65 					    xfs_da_state_blk_t *new_child);
66 STATIC int xfs_da_node_split(xfs_da_state_t *state,
67 					    xfs_da_state_blk_t *existing_blk,
68 					    xfs_da_state_blk_t *split_blk,
69 					    xfs_da_state_blk_t *blk_to_add,
70 					    int treelevel,
71 					    int *result);
72 STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
73 					 xfs_da_state_blk_t *node_blk_1,
74 					 xfs_da_state_blk_t *node_blk_2);
75 STATIC void xfs_da_node_add(xfs_da_state_t *state,
76 				   xfs_da_state_blk_t *old_node_blk,
77 				   xfs_da_state_blk_t *new_node_blk);
78 
79 /*
80  * Routines used for shrinking the Btree.
81  */
82 STATIC int xfs_da_root_join(xfs_da_state_t *state,
83 					   xfs_da_state_blk_t *root_blk);
84 STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
85 STATIC void xfs_da_node_remove(xfs_da_state_t *state,
86 					      xfs_da_state_blk_t *drop_blk);
87 STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
88 					 xfs_da_state_blk_t *src_node_blk,
89 					 xfs_da_state_blk_t *dst_node_blk);
90 
91 /*
92  * Utility routines.
93  */
94 STATIC uint	xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
95 STATIC int	xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
96 STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra);
97 STATIC int	xfs_da_blk_unlink(xfs_da_state_t *state,
98 				  xfs_da_state_blk_t *drop_blk,
99 				  xfs_da_state_blk_t *save_blk);
100 STATIC void	xfs_da_state_kill_altpath(xfs_da_state_t *state);
101 
102 /*========================================================================
103  * Routines used for growing the Btree.
104  *========================================================================*/
105 
106 /*
107  * Create the initial contents of an intermediate node.
108  */
109 int
xfs_da_node_create(xfs_da_args_t * args,xfs_dablk_t blkno,int level,xfs_dabuf_t ** bpp,int whichfork)110 xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
111 				 xfs_dabuf_t **bpp, int whichfork)
112 {
113 	xfs_da_intnode_t *node;
114 	xfs_dabuf_t *bp;
115 	int error;
116 	xfs_trans_t *tp;
117 
118 	tp = args->trans;
119 	error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
120 	if (error)
121 		return(error);
122 	ASSERT(bp != NULL);
123 	node = bp->data;
124 	node->hdr.info.forw = 0;
125 	node->hdr.info.back = 0;
126 	node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
127 	node->hdr.info.pad = 0;
128 	node->hdr.count = 0;
129 	node->hdr.level = cpu_to_be16(level);
130 
131 	xfs_da_log_buf(tp, bp,
132 		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
133 
134 	*bpp = bp;
135 	return(0);
136 }
137 
138 /*
139  * Split a leaf node, rebalance, then possibly split
140  * intermediate nodes, rebalance, etc.
141  */
142 int							/* error */
xfs_da_split(xfs_da_state_t * state)143 xfs_da_split(xfs_da_state_t *state)
144 {
145 	xfs_da_state_blk_t *oldblk, *newblk, *addblk;
146 	xfs_da_intnode_t *node;
147 	xfs_dabuf_t *bp;
148 	int max, action, error, i;
149 
150 	/*
151 	 * Walk back up the tree splitting/inserting/adjusting as necessary.
152 	 * If we need to insert and there isn't room, split the node, then
153 	 * decide which fragment to insert the new block from below into.
154 	 * Note that we may split the root this way, but we need more fixup.
155 	 */
156 	max = state->path.active - 1;
157 	ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
158 	ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
159 	       state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
160 
161 	addblk = &state->path.blk[max];		/* initial dummy value */
162 	for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
163 		oldblk = &state->path.blk[i];
164 		newblk = &state->altpath.blk[i];
165 
166 		/*
167 		 * If a leaf node then
168 		 *     Allocate a new leaf node, then rebalance across them.
169 		 * else if an intermediate node then
170 		 *     We split on the last layer, must we split the node?
171 		 */
172 		switch (oldblk->magic) {
173 		case XFS_ATTR_LEAF_MAGIC:
174 			error = xfs_attr_leaf_split(state, oldblk, newblk);
175 			if ((error != 0) && (error != ENOSPC)) {
176 				return(error);	/* GROT: attr is inconsistent */
177 			}
178 			if (!error) {
179 				addblk = newblk;
180 				break;
181 			}
182 			/*
183 			 * Entry wouldn't fit, split the leaf again.
184 			 */
185 			state->extravalid = 1;
186 			if (state->inleaf) {
187 				state->extraafter = 0;	/* before newblk */
188 				error = xfs_attr_leaf_split(state, oldblk,
189 							    &state->extrablk);
190 			} else {
191 				state->extraafter = 1;	/* after newblk */
192 				error = xfs_attr_leaf_split(state, newblk,
193 							    &state->extrablk);
194 			}
195 			if (error)
196 				return(error);	/* GROT: attr inconsistent */
197 			addblk = newblk;
198 			break;
199 		case XFS_DIR2_LEAFN_MAGIC:
200 			error = xfs_dir2_leafn_split(state, oldblk, newblk);
201 			if (error)
202 				return error;
203 			addblk = newblk;
204 			break;
205 		case XFS_DA_NODE_MAGIC:
206 			error = xfs_da_node_split(state, oldblk, newblk, addblk,
207 							 max - i, &action);
208 			xfs_da_buf_done(addblk->bp);
209 			addblk->bp = NULL;
210 			if (error)
211 				return(error);	/* GROT: dir is inconsistent */
212 			/*
213 			 * Record the newly split block for the next time thru?
214 			 */
215 			if (action)
216 				addblk = newblk;
217 			else
218 				addblk = NULL;
219 			break;
220 		}
221 
222 		/*
223 		 * Update the btree to show the new hashval for this child.
224 		 */
225 		xfs_da_fixhashpath(state, &state->path);
226 		/*
227 		 * If we won't need this block again, it's getting dropped
228 		 * from the active path by the loop control, so we need
229 		 * to mark it done now.
230 		 */
231 		if (i > 0 || !addblk)
232 			xfs_da_buf_done(oldblk->bp);
233 	}
234 	if (!addblk)
235 		return(0);
236 
237 	/*
238 	 * Split the root node.
239 	 */
240 	ASSERT(state->path.active == 0);
241 	oldblk = &state->path.blk[0];
242 	error = xfs_da_root_split(state, oldblk, addblk);
243 	if (error) {
244 		xfs_da_buf_done(oldblk->bp);
245 		xfs_da_buf_done(addblk->bp);
246 		addblk->bp = NULL;
247 		return(error);	/* GROT: dir is inconsistent */
248 	}
249 
250 	/*
251 	 * Update pointers to the node which used to be block 0 and
252 	 * just got bumped because of the addition of a new root node.
253 	 * There might be three blocks involved if a double split occurred,
254 	 * and the original block 0 could be at any position in the list.
255 	 */
256 
257 	node = oldblk->bp->data;
258 	if (node->hdr.info.forw) {
259 		if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
260 			bp = addblk->bp;
261 		} else {
262 			ASSERT(state->extravalid);
263 			bp = state->extrablk.bp;
264 		}
265 		node = bp->data;
266 		node->hdr.info.back = cpu_to_be32(oldblk->blkno);
267 		xfs_da_log_buf(state->args->trans, bp,
268 		    XFS_DA_LOGRANGE(node, &node->hdr.info,
269 		    sizeof(node->hdr.info)));
270 	}
271 	node = oldblk->bp->data;
272 	if (node->hdr.info.back) {
273 		if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
274 			bp = addblk->bp;
275 		} else {
276 			ASSERT(state->extravalid);
277 			bp = state->extrablk.bp;
278 		}
279 		node = bp->data;
280 		node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
281 		xfs_da_log_buf(state->args->trans, bp,
282 		    XFS_DA_LOGRANGE(node, &node->hdr.info,
283 		    sizeof(node->hdr.info)));
284 	}
285 	xfs_da_buf_done(oldblk->bp);
286 	xfs_da_buf_done(addblk->bp);
287 	addblk->bp = NULL;
288 	return(0);
289 }
290 
291 /*
292  * Split the root.  We have to create a new root and point to the two
293  * parts (the split old root) that we just created.  Copy block zero to
294  * the EOF, extending the inode in process.
295  */
296 STATIC int						/* error */
xfs_da_root_split(xfs_da_state_t * state,xfs_da_state_blk_t * blk1,xfs_da_state_blk_t * blk2)297 xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
298 				 xfs_da_state_blk_t *blk2)
299 {
300 	xfs_da_intnode_t *node, *oldroot;
301 	xfs_da_args_t *args;
302 	xfs_dablk_t blkno;
303 	xfs_dabuf_t *bp;
304 	int error, size;
305 	xfs_inode_t *dp;
306 	xfs_trans_t *tp;
307 	xfs_mount_t *mp;
308 	xfs_dir2_leaf_t *leaf;
309 
310 	/*
311 	 * Copy the existing (incorrect) block from the root node position
312 	 * to a free space somewhere.
313 	 */
314 	args = state->args;
315 	ASSERT(args != NULL);
316 	error = xfs_da_grow_inode(args, &blkno);
317 	if (error)
318 		return(error);
319 	dp = args->dp;
320 	tp = args->trans;
321 	mp = state->mp;
322 	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
323 	if (error)
324 		return(error);
325 	ASSERT(bp != NULL);
326 	node = bp->data;
327 	oldroot = blk1->bp->data;
328 	if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) {
329 		size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
330 			     (char *)oldroot);
331 	} else {
332 		ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
333 		leaf = (xfs_dir2_leaf_t *)oldroot;
334 		size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
335 			     (char *)leaf);
336 	}
337 	memcpy(node, oldroot, size);
338 	xfs_da_log_buf(tp, bp, 0, size - 1);
339 	xfs_da_buf_done(blk1->bp);
340 	blk1->bp = bp;
341 	blk1->blkno = blkno;
342 
343 	/*
344 	 * Set up the new root node.
345 	 */
346 	error = xfs_da_node_create(args,
347 		(args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
348 		be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
349 	if (error)
350 		return(error);
351 	node = bp->data;
352 	node->btree[0].hashval = cpu_to_be32(blk1->hashval);
353 	node->btree[0].before = cpu_to_be32(blk1->blkno);
354 	node->btree[1].hashval = cpu_to_be32(blk2->hashval);
355 	node->btree[1].before = cpu_to_be32(blk2->blkno);
356 	node->hdr.count = cpu_to_be16(2);
357 
358 #ifdef DEBUG
359 	if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) {
360 		ASSERT(blk1->blkno >= mp->m_dirleafblk &&
361 		       blk1->blkno < mp->m_dirfreeblk);
362 		ASSERT(blk2->blkno >= mp->m_dirleafblk &&
363 		       blk2->blkno < mp->m_dirfreeblk);
364 	}
365 #endif
366 
367 	/* Header is already logged by xfs_da_node_create */
368 	xfs_da_log_buf(tp, bp,
369 		XFS_DA_LOGRANGE(node, node->btree,
370 			sizeof(xfs_da_node_entry_t) * 2));
371 	xfs_da_buf_done(bp);
372 
373 	return(0);
374 }
375 
376 /*
377  * Split the node, rebalance, then add the new entry.
378  */
379 STATIC int						/* error */
xfs_da_node_split(xfs_da_state_t * state,xfs_da_state_blk_t * oldblk,xfs_da_state_blk_t * newblk,xfs_da_state_blk_t * addblk,int treelevel,int * result)380 xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
381 				 xfs_da_state_blk_t *newblk,
382 				 xfs_da_state_blk_t *addblk,
383 				 int treelevel, int *result)
384 {
385 	xfs_da_intnode_t *node;
386 	xfs_dablk_t blkno;
387 	int newcount, error;
388 	int useextra;
389 
390 	node = oldblk->bp->data;
391 	ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
392 
393 	/*
394 	 * With V2 dirs the extra block is data or freespace.
395 	 */
396 	useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
397 	newcount = 1 + useextra;
398 	/*
399 	 * Do we have to split the node?
400 	 */
401 	if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
402 		/*
403 		 * Allocate a new node, add to the doubly linked chain of
404 		 * nodes, then move some of our excess entries into it.
405 		 */
406 		error = xfs_da_grow_inode(state->args, &blkno);
407 		if (error)
408 			return(error);	/* GROT: dir is inconsistent */
409 
410 		error = xfs_da_node_create(state->args, blkno, treelevel,
411 					   &newblk->bp, state->args->whichfork);
412 		if (error)
413 			return(error);	/* GROT: dir is inconsistent */
414 		newblk->blkno = blkno;
415 		newblk->magic = XFS_DA_NODE_MAGIC;
416 		xfs_da_node_rebalance(state, oldblk, newblk);
417 		error = xfs_da_blk_link(state, oldblk, newblk);
418 		if (error)
419 			return(error);
420 		*result = 1;
421 	} else {
422 		*result = 0;
423 	}
424 
425 	/*
426 	 * Insert the new entry(s) into the correct block
427 	 * (updating last hashval in the process).
428 	 *
429 	 * xfs_da_node_add() inserts BEFORE the given index,
430 	 * and as a result of using node_lookup_int() we always
431 	 * point to a valid entry (not after one), but a split
432 	 * operation always results in a new block whose hashvals
433 	 * FOLLOW the current block.
434 	 *
435 	 * If we had double-split op below us, then add the extra block too.
436 	 */
437 	node = oldblk->bp->data;
438 	if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
439 		oldblk->index++;
440 		xfs_da_node_add(state, oldblk, addblk);
441 		if (useextra) {
442 			if (state->extraafter)
443 				oldblk->index++;
444 			xfs_da_node_add(state, oldblk, &state->extrablk);
445 			state->extravalid = 0;
446 		}
447 	} else {
448 		newblk->index++;
449 		xfs_da_node_add(state, newblk, addblk);
450 		if (useextra) {
451 			if (state->extraafter)
452 				newblk->index++;
453 			xfs_da_node_add(state, newblk, &state->extrablk);
454 			state->extravalid = 0;
455 		}
456 	}
457 
458 	return(0);
459 }
460 
461 /*
462  * Balance the btree elements between two intermediate nodes,
463  * usually one full and one empty.
464  *
465  * NOTE: if blk2 is empty, then it will get the upper half of blk1.
466  */
467 STATIC void
xfs_da_node_rebalance(xfs_da_state_t * state,xfs_da_state_blk_t * blk1,xfs_da_state_blk_t * blk2)468 xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
469 				     xfs_da_state_blk_t *blk2)
470 {
471 	xfs_da_intnode_t *node1, *node2, *tmpnode;
472 	xfs_da_node_entry_t *btree_s, *btree_d;
473 	int count, tmp;
474 	xfs_trans_t *tp;
475 
476 	node1 = blk1->bp->data;
477 	node2 = blk2->bp->data;
478 	/*
479 	 * Figure out how many entries need to move, and in which direction.
480 	 * Swap the nodes around if that makes it simpler.
481 	 */
482 	if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
483 	    ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
484 	     (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
485 	      be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
486 		tmpnode = node1;
487 		node1 = node2;
488 		node2 = tmpnode;
489 	}
490 	ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC);
491 	ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC);
492 	count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
493 	if (count == 0)
494 		return;
495 	tp = state->args->trans;
496 	/*
497 	 * Two cases: high-to-low and low-to-high.
498 	 */
499 	if (count > 0) {
500 		/*
501 		 * Move elements in node2 up to make a hole.
502 		 */
503 		if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
504 			tmp *= (uint)sizeof(xfs_da_node_entry_t);
505 			btree_s = &node2->btree[0];
506 			btree_d = &node2->btree[count];
507 			memmove(btree_d, btree_s, tmp);
508 		}
509 
510 		/*
511 		 * Move the req'd B-tree elements from high in node1 to
512 		 * low in node2.
513 		 */
514 		be16_add_cpu(&node2->hdr.count, count);
515 		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
516 		btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
517 		btree_d = &node2->btree[0];
518 		memcpy(btree_d, btree_s, tmp);
519 		be16_add_cpu(&node1->hdr.count, -count);
520 	} else {
521 		/*
522 		 * Move the req'd B-tree elements from low in node2 to
523 		 * high in node1.
524 		 */
525 		count = -count;
526 		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
527 		btree_s = &node2->btree[0];
528 		btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
529 		memcpy(btree_d, btree_s, tmp);
530 		be16_add_cpu(&node1->hdr.count, count);
531 		xfs_da_log_buf(tp, blk1->bp,
532 			XFS_DA_LOGRANGE(node1, btree_d, tmp));
533 
534 		/*
535 		 * Move elements in node2 down to fill the hole.
536 		 */
537 		tmp  = be16_to_cpu(node2->hdr.count) - count;
538 		tmp *= (uint)sizeof(xfs_da_node_entry_t);
539 		btree_s = &node2->btree[count];
540 		btree_d = &node2->btree[0];
541 		memmove(btree_d, btree_s, tmp);
542 		be16_add_cpu(&node2->hdr.count, -count);
543 	}
544 
545 	/*
546 	 * Log header of node 1 and all current bits of node 2.
547 	 */
548 	xfs_da_log_buf(tp, blk1->bp,
549 		XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
550 	xfs_da_log_buf(tp, blk2->bp,
551 		XFS_DA_LOGRANGE(node2, &node2->hdr,
552 			sizeof(node2->hdr) +
553 			sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
554 
555 	/*
556 	 * Record the last hashval from each block for upward propagation.
557 	 * (note: don't use the swapped node pointers)
558 	 */
559 	node1 = blk1->bp->data;
560 	node2 = blk2->bp->data;
561 	blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
562 	blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
563 
564 	/*
565 	 * Adjust the expected index for insertion.
566 	 */
567 	if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
568 		blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
569 		blk1->index = be16_to_cpu(node1->hdr.count) + 1;	/* make it invalid */
570 	}
571 }
572 
573 /*
574  * Add a new entry to an intermediate node.
575  */
576 STATIC void
xfs_da_node_add(xfs_da_state_t * state,xfs_da_state_blk_t * oldblk,xfs_da_state_blk_t * newblk)577 xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
578 			       xfs_da_state_blk_t *newblk)
579 {
580 	xfs_da_intnode_t *node;
581 	xfs_da_node_entry_t *btree;
582 	int tmp;
583 	xfs_mount_t *mp;
584 
585 	node = oldblk->bp->data;
586 	mp = state->mp;
587 	ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
588 	ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
589 	ASSERT(newblk->blkno != 0);
590 	if (state->args->whichfork == XFS_DATA_FORK)
591 		ASSERT(newblk->blkno >= mp->m_dirleafblk &&
592 		       newblk->blkno < mp->m_dirfreeblk);
593 
594 	/*
595 	 * We may need to make some room before we insert the new node.
596 	 */
597 	tmp = 0;
598 	btree = &node->btree[ oldblk->index ];
599 	if (oldblk->index < be16_to_cpu(node->hdr.count)) {
600 		tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
601 		memmove(btree + 1, btree, tmp);
602 	}
603 	btree->hashval = cpu_to_be32(newblk->hashval);
604 	btree->before = cpu_to_be32(newblk->blkno);
605 	xfs_da_log_buf(state->args->trans, oldblk->bp,
606 		XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
607 	be16_add_cpu(&node->hdr.count, 1);
608 	xfs_da_log_buf(state->args->trans, oldblk->bp,
609 		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
610 
611 	/*
612 	 * Copy the last hash value from the oldblk to propagate upwards.
613 	 */
614 	oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
615 }
616 
617 /*========================================================================
618  * Routines used for shrinking the Btree.
619  *========================================================================*/
620 
621 /*
622  * Deallocate an empty leaf node, remove it from its parent,
623  * possibly deallocating that block, etc...
624  */
625 int
xfs_da_join(xfs_da_state_t * state)626 xfs_da_join(xfs_da_state_t *state)
627 {
628 	xfs_da_state_blk_t *drop_blk, *save_blk;
629 	int action, error;
630 
631 	action = 0;
632 	drop_blk = &state->path.blk[ state->path.active-1 ];
633 	save_blk = &state->altpath.blk[ state->path.active-1 ];
634 	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
635 	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
636 	       drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
637 
638 	/*
639 	 * Walk back up the tree joining/deallocating as necessary.
640 	 * When we stop dropping blocks, break out.
641 	 */
642 	for (  ; state->path.active >= 2; drop_blk--, save_blk--,
643 		 state->path.active--) {
644 		/*
645 		 * See if we can combine the block with a neighbor.
646 		 *   (action == 0) => no options, just leave
647 		 *   (action == 1) => coalesce, then unlink
648 		 *   (action == 2) => block empty, unlink it
649 		 */
650 		switch (drop_blk->magic) {
651 		case XFS_ATTR_LEAF_MAGIC:
652 			error = xfs_attr_leaf_toosmall(state, &action);
653 			if (error)
654 				return(error);
655 			if (action == 0)
656 				return(0);
657 			xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
658 			break;
659 		case XFS_DIR2_LEAFN_MAGIC:
660 			error = xfs_dir2_leafn_toosmall(state, &action);
661 			if (error)
662 				return error;
663 			if (action == 0)
664 				return 0;
665 			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
666 			break;
667 		case XFS_DA_NODE_MAGIC:
668 			/*
669 			 * Remove the offending node, fixup hashvals,
670 			 * check for a toosmall neighbor.
671 			 */
672 			xfs_da_node_remove(state, drop_blk);
673 			xfs_da_fixhashpath(state, &state->path);
674 			error = xfs_da_node_toosmall(state, &action);
675 			if (error)
676 				return(error);
677 			if (action == 0)
678 				return 0;
679 			xfs_da_node_unbalance(state, drop_blk, save_blk);
680 			break;
681 		}
682 		xfs_da_fixhashpath(state, &state->altpath);
683 		error = xfs_da_blk_unlink(state, drop_blk, save_blk);
684 		xfs_da_state_kill_altpath(state);
685 		if (error)
686 			return(error);
687 		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
688 							 drop_blk->bp);
689 		drop_blk->bp = NULL;
690 		if (error)
691 			return(error);
692 	}
693 	/*
694 	 * We joined all the way to the top.  If it turns out that
695 	 * we only have one entry in the root, make the child block
696 	 * the new root.
697 	 */
698 	xfs_da_node_remove(state, drop_blk);
699 	xfs_da_fixhashpath(state, &state->path);
700 	error = xfs_da_root_join(state, &state->path.blk[0]);
701 	return(error);
702 }
703 
704 /*
705  * We have only one entry in the root.  Copy the only remaining child of
706  * the old root to block 0 as the new root node.
707  */
708 STATIC int
xfs_da_root_join(xfs_da_state_t * state,xfs_da_state_blk_t * root_blk)709 xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
710 {
711 	xfs_da_intnode_t *oldroot;
712 	/* REFERENCED */
713 	xfs_da_blkinfo_t *blkinfo;
714 	xfs_da_args_t *args;
715 	xfs_dablk_t child;
716 	xfs_dabuf_t *bp;
717 	int error;
718 
719 	args = state->args;
720 	ASSERT(args != NULL);
721 	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
722 	oldroot = root_blk->bp->data;
723 	ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC);
724 	ASSERT(!oldroot->hdr.info.forw);
725 	ASSERT(!oldroot->hdr.info.back);
726 
727 	/*
728 	 * If the root has more than one child, then don't do anything.
729 	 */
730 	if (be16_to_cpu(oldroot->hdr.count) > 1)
731 		return(0);
732 
733 	/*
734 	 * Read in the (only) child block, then copy those bytes into
735 	 * the root block's buffer and free the original child block.
736 	 */
737 	child = be32_to_cpu(oldroot->btree[0].before);
738 	ASSERT(child != 0);
739 	error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
740 					     args->whichfork);
741 	if (error)
742 		return(error);
743 	ASSERT(bp != NULL);
744 	blkinfo = bp->data;
745 	if (be16_to_cpu(oldroot->hdr.level) == 1) {
746 		ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC ||
747 		       be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC);
748 	} else {
749 		ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC);
750 	}
751 	ASSERT(!blkinfo->forw);
752 	ASSERT(!blkinfo->back);
753 	memcpy(root_blk->bp->data, bp->data, state->blocksize);
754 	xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
755 	error = xfs_da_shrink_inode(args, child, bp);
756 	return(error);
757 }
758 
759 /*
760  * Check a node block and its neighbors to see if the block should be
761  * collapsed into one or the other neighbor.  Always keep the block
762  * with the smaller block number.
763  * If the current block is over 50% full, don't try to join it, return 0.
764  * If the block is empty, fill in the state structure and return 2.
765  * If it can be collapsed, fill in the state structure and return 1.
766  * If nothing can be done, return 0.
767  */
768 STATIC int
xfs_da_node_toosmall(xfs_da_state_t * state,int * action)769 xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
770 {
771 	xfs_da_intnode_t *node;
772 	xfs_da_state_blk_t *blk;
773 	xfs_da_blkinfo_t *info;
774 	int count, forward, error, retval, i;
775 	xfs_dablk_t blkno;
776 	xfs_dabuf_t *bp;
777 
778 	/*
779 	 * Check for the degenerate case of the block being over 50% full.
780 	 * If so, it's not worth even looking to see if we might be able
781 	 * to coalesce with a sibling.
782 	 */
783 	blk = &state->path.blk[ state->path.active-1 ];
784 	info = blk->bp->data;
785 	ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC);
786 	node = (xfs_da_intnode_t *)info;
787 	count = be16_to_cpu(node->hdr.count);
788 	if (count > (state->node_ents >> 1)) {
789 		*action = 0;	/* blk over 50%, don't try to join */
790 		return(0);	/* blk over 50%, don't try to join */
791 	}
792 
793 	/*
794 	 * Check for the degenerate case of the block being empty.
795 	 * If the block is empty, we'll simply delete it, no need to
796 	 * coalesce it with a sibling block.  We choose (arbitrarily)
797 	 * to merge with the forward block unless it is NULL.
798 	 */
799 	if (count == 0) {
800 		/*
801 		 * Make altpath point to the block we want to keep and
802 		 * path point to the block we want to drop (this one).
803 		 */
804 		forward = (info->forw != 0);
805 		memcpy(&state->altpath, &state->path, sizeof(state->path));
806 		error = xfs_da_path_shift(state, &state->altpath, forward,
807 						 0, &retval);
808 		if (error)
809 			return(error);
810 		if (retval) {
811 			*action = 0;
812 		} else {
813 			*action = 2;
814 		}
815 		return(0);
816 	}
817 
818 	/*
819 	 * Examine each sibling block to see if we can coalesce with
820 	 * at least 25% free space to spare.  We need to figure out
821 	 * whether to merge with the forward or the backward block.
822 	 * We prefer coalescing with the lower numbered sibling so as
823 	 * to shrink a directory over time.
824 	 */
825 	/* start with smaller blk num */
826 	forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
827 	for (i = 0; i < 2; forward = !forward, i++) {
828 		if (forward)
829 			blkno = be32_to_cpu(info->forw);
830 		else
831 			blkno = be32_to_cpu(info->back);
832 		if (blkno == 0)
833 			continue;
834 		error = xfs_da_read_buf(state->args->trans, state->args->dp,
835 					blkno, -1, &bp, state->args->whichfork);
836 		if (error)
837 			return(error);
838 		ASSERT(bp != NULL);
839 
840 		node = (xfs_da_intnode_t *)info;
841 		count  = state->node_ents;
842 		count -= state->node_ents >> 2;
843 		count -= be16_to_cpu(node->hdr.count);
844 		node = bp->data;
845 		ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
846 		count -= be16_to_cpu(node->hdr.count);
847 		xfs_da_brelse(state->args->trans, bp);
848 		if (count >= 0)
849 			break;	/* fits with at least 25% to spare */
850 	}
851 	if (i >= 2) {
852 		*action = 0;
853 		return(0);
854 	}
855 
856 	/*
857 	 * Make altpath point to the block we want to keep (the lower
858 	 * numbered block) and path point to the block we want to drop.
859 	 */
860 	memcpy(&state->altpath, &state->path, sizeof(state->path));
861 	if (blkno < blk->blkno) {
862 		error = xfs_da_path_shift(state, &state->altpath, forward,
863 						 0, &retval);
864 		if (error) {
865 			return(error);
866 		}
867 		if (retval) {
868 			*action = 0;
869 			return(0);
870 		}
871 	} else {
872 		error = xfs_da_path_shift(state, &state->path, forward,
873 						 0, &retval);
874 		if (error) {
875 			return(error);
876 		}
877 		if (retval) {
878 			*action = 0;
879 			return(0);
880 		}
881 	}
882 	*action = 1;
883 	return(0);
884 }
885 
886 /*
887  * Walk back up the tree adjusting hash values as necessary,
888  * when we stop making changes, return.
889  */
890 void
xfs_da_fixhashpath(xfs_da_state_t * state,xfs_da_state_path_t * path)891 xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
892 {
893 	xfs_da_state_blk_t *blk;
894 	xfs_da_intnode_t *node;
895 	xfs_da_node_entry_t *btree;
896 	xfs_dahash_t lasthash=0;
897 	int level, count;
898 
899 	level = path->active-1;
900 	blk = &path->blk[ level ];
901 	switch (blk->magic) {
902 	case XFS_ATTR_LEAF_MAGIC:
903 		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
904 		if (count == 0)
905 			return;
906 		break;
907 	case XFS_DIR2_LEAFN_MAGIC:
908 		lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
909 		if (count == 0)
910 			return;
911 		break;
912 	case XFS_DA_NODE_MAGIC:
913 		lasthash = xfs_da_node_lasthash(blk->bp, &count);
914 		if (count == 0)
915 			return;
916 		break;
917 	}
918 	for (blk--, level--; level >= 0; blk--, level--) {
919 		node = blk->bp->data;
920 		ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
921 		btree = &node->btree[ blk->index ];
922 		if (be32_to_cpu(btree->hashval) == lasthash)
923 			break;
924 		blk->hashval = lasthash;
925 		btree->hashval = cpu_to_be32(lasthash);
926 		xfs_da_log_buf(state->args->trans, blk->bp,
927 				  XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
928 
929 		lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
930 	}
931 }
932 
933 /*
934  * Remove an entry from an intermediate node.
935  */
936 STATIC void
xfs_da_node_remove(xfs_da_state_t * state,xfs_da_state_blk_t * drop_blk)937 xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
938 {
939 	xfs_da_intnode_t *node;
940 	xfs_da_node_entry_t *btree;
941 	int tmp;
942 
943 	node = drop_blk->bp->data;
944 	ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
945 	ASSERT(drop_blk->index >= 0);
946 
947 	/*
948 	 * Copy over the offending entry, or just zero it out.
949 	 */
950 	btree = &node->btree[drop_blk->index];
951 	if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
952 		tmp  = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
953 		tmp *= (uint)sizeof(xfs_da_node_entry_t);
954 		memmove(btree, btree + 1, tmp);
955 		xfs_da_log_buf(state->args->trans, drop_blk->bp,
956 		    XFS_DA_LOGRANGE(node, btree, tmp));
957 		btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
958 	}
959 	memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
960 	xfs_da_log_buf(state->args->trans, drop_blk->bp,
961 	    XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
962 	be16_add_cpu(&node->hdr.count, -1);
963 	xfs_da_log_buf(state->args->trans, drop_blk->bp,
964 	    XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
965 
966 	/*
967 	 * Copy the last hash value from the block to propagate upwards.
968 	 */
969 	btree--;
970 	drop_blk->hashval = be32_to_cpu(btree->hashval);
971 }
972 
973 /*
974  * Unbalance the btree elements between two intermediate nodes,
975  * move all Btree elements from one node into another.
976  */
977 STATIC void
xfs_da_node_unbalance(xfs_da_state_t * state,xfs_da_state_blk_t * drop_blk,xfs_da_state_blk_t * save_blk)978 xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
979 				     xfs_da_state_blk_t *save_blk)
980 {
981 	xfs_da_intnode_t *drop_node, *save_node;
982 	xfs_da_node_entry_t *btree;
983 	int tmp;
984 	xfs_trans_t *tp;
985 
986 	drop_node = drop_blk->bp->data;
987 	save_node = save_blk->bp->data;
988 	ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
989 	ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
990 	tp = state->args->trans;
991 
992 	/*
993 	 * If the dying block has lower hashvals, then move all the
994 	 * elements in the remaining block up to make a hole.
995 	 */
996 	if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
997 	    (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
998 	     be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
999 	{
1000 		btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1001 		tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1002 		memmove(btree, &save_node->btree[0], tmp);
1003 		btree = &save_node->btree[0];
1004 		xfs_da_log_buf(tp, save_blk->bp,
1005 			XFS_DA_LOGRANGE(save_node, btree,
1006 				(be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1007 				sizeof(xfs_da_node_entry_t)));
1008 	} else {
1009 		btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1010 		xfs_da_log_buf(tp, save_blk->bp,
1011 			XFS_DA_LOGRANGE(save_node, btree,
1012 				be16_to_cpu(drop_node->hdr.count) *
1013 				sizeof(xfs_da_node_entry_t)));
1014 	}
1015 
1016 	/*
1017 	 * Move all the B-tree elements from drop_blk to save_blk.
1018 	 */
1019 	tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1020 	memcpy(btree, &drop_node->btree[0], tmp);
1021 	be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1022 
1023 	xfs_da_log_buf(tp, save_blk->bp,
1024 		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1025 			sizeof(save_node->hdr)));
1026 
1027 	/*
1028 	 * Save the last hashval in the remaining block for upward propagation.
1029 	 */
1030 	save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1031 }
1032 
1033 /*========================================================================
1034  * Routines used for finding things in the Btree.
1035  *========================================================================*/
1036 
1037 /*
1038  * Walk down the Btree looking for a particular filename, filling
1039  * in the state structure as we go.
1040  *
1041  * We will set the state structure to point to each of the elements
1042  * in each of the nodes where either the hashval is or should be.
1043  *
1044  * We support duplicate hashval's so for each entry in the current
1045  * node that could contain the desired hashval, descend.  This is a
1046  * pruned depth-first tree search.
1047  */
1048 int							/* error */
xfs_da_node_lookup_int(xfs_da_state_t * state,int * result)1049 xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1050 {
1051 	xfs_da_state_blk_t *blk;
1052 	xfs_da_blkinfo_t *curr;
1053 	xfs_da_intnode_t *node;
1054 	xfs_da_node_entry_t *btree;
1055 	xfs_dablk_t blkno;
1056 	int probe, span, max, error, retval;
1057 	xfs_dahash_t hashval, btreehashval;
1058 	xfs_da_args_t *args;
1059 
1060 	args = state->args;
1061 
1062 	/*
1063 	 * Descend thru the B-tree searching each level for the right
1064 	 * node to use, until the right hashval is found.
1065 	 */
1066 	blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1067 	for (blk = &state->path.blk[0], state->path.active = 1;
1068 			 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1069 			 blk++, state->path.active++) {
1070 		/*
1071 		 * Read the next node down in the tree.
1072 		 */
1073 		blk->blkno = blkno;
1074 		error = xfs_da_read_buf(args->trans, args->dp, blkno,
1075 					-1, &blk->bp, args->whichfork);
1076 		if (error) {
1077 			blk->blkno = 0;
1078 			state->path.active--;
1079 			return(error);
1080 		}
1081 		curr = blk->bp->data;
1082 		blk->magic = be16_to_cpu(curr->magic);
1083 		ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1084 		       blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1085 		       blk->magic == XFS_ATTR_LEAF_MAGIC);
1086 
1087 		/*
1088 		 * Search an intermediate node for a match.
1089 		 */
1090 		if (blk->magic == XFS_DA_NODE_MAGIC) {
1091 			node = blk->bp->data;
1092 			max = be16_to_cpu(node->hdr.count);
1093 			blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1094 
1095 			/*
1096 			 * Binary search.  (note: small blocks will skip loop)
1097 			 */
1098 			probe = span = max / 2;
1099 			hashval = args->hashval;
1100 			for (btree = &node->btree[probe]; span > 4;
1101 				   btree = &node->btree[probe]) {
1102 				span /= 2;
1103 				btreehashval = be32_to_cpu(btree->hashval);
1104 				if (btreehashval < hashval)
1105 					probe += span;
1106 				else if (btreehashval > hashval)
1107 					probe -= span;
1108 				else
1109 					break;
1110 			}
1111 			ASSERT((probe >= 0) && (probe < max));
1112 			ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1113 
1114 			/*
1115 			 * Since we may have duplicate hashval's, find the first
1116 			 * matching hashval in the node.
1117 			 */
1118 			while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1119 				btree--;
1120 				probe--;
1121 			}
1122 			while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1123 				btree++;
1124 				probe++;
1125 			}
1126 
1127 			/*
1128 			 * Pick the right block to descend on.
1129 			 */
1130 			if (probe == max) {
1131 				blk->index = max-1;
1132 				blkno = be32_to_cpu(node->btree[max-1].before);
1133 			} else {
1134 				blk->index = probe;
1135 				blkno = be32_to_cpu(btree->before);
1136 			}
1137 		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1138 			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1139 			break;
1140 		} else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1141 			blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1142 			break;
1143 		}
1144 	}
1145 
1146 	/*
1147 	 * A leaf block that ends in the hashval that we are interested in
1148 	 * (final hashval == search hashval) means that the next block may
1149 	 * contain more entries with the same hashval, shift upward to the
1150 	 * next leaf and keep searching.
1151 	 */
1152 	for (;;) {
1153 		if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1154 			retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1155 							&blk->index, state);
1156 		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1157 			retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1158 			blk->index = args->index;
1159 			args->blkno = blk->blkno;
1160 		} else {
1161 			ASSERT(0);
1162 			return XFS_ERROR(EFSCORRUPTED);
1163 		}
1164 		if (((retval == ENOENT) || (retval == ENOATTR)) &&
1165 		    (blk->hashval == args->hashval)) {
1166 			error = xfs_da_path_shift(state, &state->path, 1, 1,
1167 							 &retval);
1168 			if (error)
1169 				return(error);
1170 			if (retval == 0) {
1171 				continue;
1172 			} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1173 				/* path_shift() gives ENOENT */
1174 				retval = XFS_ERROR(ENOATTR);
1175 			}
1176 		}
1177 		break;
1178 	}
1179 	*result = retval;
1180 	return(0);
1181 }
1182 
1183 /*========================================================================
1184  * Utility routines.
1185  *========================================================================*/
1186 
1187 /*
1188  * Link a new block into a doubly linked list of blocks (of whatever type).
1189  */
1190 int							/* error */
xfs_da_blk_link(xfs_da_state_t * state,xfs_da_state_blk_t * old_blk,xfs_da_state_blk_t * new_blk)1191 xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1192 			       xfs_da_state_blk_t *new_blk)
1193 {
1194 	xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1195 	xfs_da_args_t *args;
1196 	int before=0, error;
1197 	xfs_dabuf_t *bp;
1198 
1199 	/*
1200 	 * Set up environment.
1201 	 */
1202 	args = state->args;
1203 	ASSERT(args != NULL);
1204 	old_info = old_blk->bp->data;
1205 	new_info = new_blk->bp->data;
1206 	ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1207 	       old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1208 	       old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1209 	ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1210 	ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1211 	ASSERT(old_blk->magic == new_blk->magic);
1212 
1213 	switch (old_blk->magic) {
1214 	case XFS_ATTR_LEAF_MAGIC:
1215 		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1216 		break;
1217 	case XFS_DIR2_LEAFN_MAGIC:
1218 		before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1219 		break;
1220 	case XFS_DA_NODE_MAGIC:
1221 		before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1222 		break;
1223 	}
1224 
1225 	/*
1226 	 * Link blocks in appropriate order.
1227 	 */
1228 	if (before) {
1229 		/*
1230 		 * Link new block in before existing block.
1231 		 */
1232 		new_info->forw = cpu_to_be32(old_blk->blkno);
1233 		new_info->back = old_info->back;
1234 		if (old_info->back) {
1235 			error = xfs_da_read_buf(args->trans, args->dp,
1236 						be32_to_cpu(old_info->back),
1237 						-1, &bp, args->whichfork);
1238 			if (error)
1239 				return(error);
1240 			ASSERT(bp != NULL);
1241 			tmp_info = bp->data;
1242 			ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1243 			ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1244 			tmp_info->forw = cpu_to_be32(new_blk->blkno);
1245 			xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1246 			xfs_da_buf_done(bp);
1247 		}
1248 		old_info->back = cpu_to_be32(new_blk->blkno);
1249 	} else {
1250 		/*
1251 		 * Link new block in after existing block.
1252 		 */
1253 		new_info->forw = old_info->forw;
1254 		new_info->back = cpu_to_be32(old_blk->blkno);
1255 		if (old_info->forw) {
1256 			error = xfs_da_read_buf(args->trans, args->dp,
1257 						be32_to_cpu(old_info->forw),
1258 						-1, &bp, args->whichfork);
1259 			if (error)
1260 				return(error);
1261 			ASSERT(bp != NULL);
1262 			tmp_info = bp->data;
1263 			ASSERT(tmp_info->magic == old_info->magic);
1264 			ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1265 			tmp_info->back = cpu_to_be32(new_blk->blkno);
1266 			xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1267 			xfs_da_buf_done(bp);
1268 		}
1269 		old_info->forw = cpu_to_be32(new_blk->blkno);
1270 	}
1271 
1272 	xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1273 	xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1274 	return(0);
1275 }
1276 
1277 /*
1278  * Compare two intermediate nodes for "order".
1279  */
1280 STATIC int
xfs_da_node_order(xfs_dabuf_t * node1_bp,xfs_dabuf_t * node2_bp)1281 xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1282 {
1283 	xfs_da_intnode_t *node1, *node2;
1284 
1285 	node1 = node1_bp->data;
1286 	node2 = node2_bp->data;
1287 	ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) &&
1288 	       (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC));
1289 	if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1290 	    ((be32_to_cpu(node2->btree[0].hashval) <
1291 	      be32_to_cpu(node1->btree[0].hashval)) ||
1292 	     (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1293 	      be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1294 		return(1);
1295 	}
1296 	return(0);
1297 }
1298 
1299 /*
1300  * Pick up the last hashvalue from an intermediate node.
1301  */
1302 STATIC uint
xfs_da_node_lasthash(xfs_dabuf_t * bp,int * count)1303 xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1304 {
1305 	xfs_da_intnode_t *node;
1306 
1307 	node = bp->data;
1308 	ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
1309 	if (count)
1310 		*count = be16_to_cpu(node->hdr.count);
1311 	if (!node->hdr.count)
1312 		return(0);
1313 	return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1314 }
1315 
1316 /*
1317  * Unlink a block from a doubly linked list of blocks.
1318  */
1319 STATIC int						/* error */
xfs_da_blk_unlink(xfs_da_state_t * state,xfs_da_state_blk_t * drop_blk,xfs_da_state_blk_t * save_blk)1320 xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1321 				 xfs_da_state_blk_t *save_blk)
1322 {
1323 	xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1324 	xfs_da_args_t *args;
1325 	xfs_dabuf_t *bp;
1326 	int error;
1327 
1328 	/*
1329 	 * Set up environment.
1330 	 */
1331 	args = state->args;
1332 	ASSERT(args != NULL);
1333 	save_info = save_blk->bp->data;
1334 	drop_info = drop_blk->bp->data;
1335 	ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1336 	       save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1337 	       save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1338 	ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1339 	ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1340 	ASSERT(save_blk->magic == drop_blk->magic);
1341 	ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1342 	       (be32_to_cpu(save_info->back) == drop_blk->blkno));
1343 	ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1344 	       (be32_to_cpu(drop_info->back) == save_blk->blkno));
1345 
1346 	/*
1347 	 * Unlink the leaf block from the doubly linked chain of leaves.
1348 	 */
1349 	if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1350 		save_info->back = drop_info->back;
1351 		if (drop_info->back) {
1352 			error = xfs_da_read_buf(args->trans, args->dp,
1353 						be32_to_cpu(drop_info->back),
1354 						-1, &bp, args->whichfork);
1355 			if (error)
1356 				return(error);
1357 			ASSERT(bp != NULL);
1358 			tmp_info = bp->data;
1359 			ASSERT(tmp_info->magic == save_info->magic);
1360 			ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1361 			tmp_info->forw = cpu_to_be32(save_blk->blkno);
1362 			xfs_da_log_buf(args->trans, bp, 0,
1363 						    sizeof(*tmp_info) - 1);
1364 			xfs_da_buf_done(bp);
1365 		}
1366 	} else {
1367 		save_info->forw = drop_info->forw;
1368 		if (drop_info->forw) {
1369 			error = xfs_da_read_buf(args->trans, args->dp,
1370 						be32_to_cpu(drop_info->forw),
1371 						-1, &bp, args->whichfork);
1372 			if (error)
1373 				return(error);
1374 			ASSERT(bp != NULL);
1375 			tmp_info = bp->data;
1376 			ASSERT(tmp_info->magic == save_info->magic);
1377 			ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1378 			tmp_info->back = cpu_to_be32(save_blk->blkno);
1379 			xfs_da_log_buf(args->trans, bp, 0,
1380 						    sizeof(*tmp_info) - 1);
1381 			xfs_da_buf_done(bp);
1382 		}
1383 	}
1384 
1385 	xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1386 	return(0);
1387 }
1388 
1389 /*
1390  * Move a path "forward" or "!forward" one block at the current level.
1391  *
1392  * This routine will adjust a "path" to point to the next block
1393  * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1394  * Btree, including updating pointers to the intermediate nodes between
1395  * the new bottom and the root.
1396  */
1397 int							/* error */
xfs_da_path_shift(xfs_da_state_t * state,xfs_da_state_path_t * path,int forward,int release,int * result)1398 xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1399 				 int forward, int release, int *result)
1400 {
1401 	xfs_da_state_blk_t *blk;
1402 	xfs_da_blkinfo_t *info;
1403 	xfs_da_intnode_t *node;
1404 	xfs_da_args_t *args;
1405 	xfs_dablk_t blkno=0;
1406 	int level, error;
1407 
1408 	/*
1409 	 * Roll up the Btree looking for the first block where our
1410 	 * current index is not at the edge of the block.  Note that
1411 	 * we skip the bottom layer because we want the sibling block.
1412 	 */
1413 	args = state->args;
1414 	ASSERT(args != NULL);
1415 	ASSERT(path != NULL);
1416 	ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1417 	level = (path->active-1) - 1;	/* skip bottom layer in path */
1418 	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1419 		ASSERT(blk->bp != NULL);
1420 		node = blk->bp->data;
1421 		ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
1422 		if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1423 			blk->index++;
1424 			blkno = be32_to_cpu(node->btree[blk->index].before);
1425 			break;
1426 		} else if (!forward && (blk->index > 0)) {
1427 			blk->index--;
1428 			blkno = be32_to_cpu(node->btree[blk->index].before);
1429 			break;
1430 		}
1431 	}
1432 	if (level < 0) {
1433 		*result = XFS_ERROR(ENOENT);	/* we're out of our tree */
1434 		ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1435 		return(0);
1436 	}
1437 
1438 	/*
1439 	 * Roll down the edge of the subtree until we reach the
1440 	 * same depth we were at originally.
1441 	 */
1442 	for (blk++, level++; level < path->active; blk++, level++) {
1443 		/*
1444 		 * Release the old block.
1445 		 * (if it's dirty, trans won't actually let go)
1446 		 */
1447 		if (release)
1448 			xfs_da_brelse(args->trans, blk->bp);
1449 
1450 		/*
1451 		 * Read the next child block.
1452 		 */
1453 		blk->blkno = blkno;
1454 		error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
1455 						     &blk->bp, args->whichfork);
1456 		if (error)
1457 			return(error);
1458 		ASSERT(blk->bp != NULL);
1459 		info = blk->bp->data;
1460 		ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC ||
1461 		       be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC ||
1462 		       be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
1463 		blk->magic = be16_to_cpu(info->magic);
1464 		if (blk->magic == XFS_DA_NODE_MAGIC) {
1465 			node = (xfs_da_intnode_t *)info;
1466 			blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1467 			if (forward)
1468 				blk->index = 0;
1469 			else
1470 				blk->index = be16_to_cpu(node->hdr.count)-1;
1471 			blkno = be32_to_cpu(node->btree[blk->index].before);
1472 		} else {
1473 			ASSERT(level == path->active-1);
1474 			blk->index = 0;
1475 			switch(blk->magic) {
1476 			case XFS_ATTR_LEAF_MAGIC:
1477 				blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1478 								      NULL);
1479 				break;
1480 			case XFS_DIR2_LEAFN_MAGIC:
1481 				blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1482 								       NULL);
1483 				break;
1484 			default:
1485 				ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1486 				       blk->magic == XFS_DIR2_LEAFN_MAGIC);
1487 				break;
1488 			}
1489 		}
1490 	}
1491 	*result = 0;
1492 	return(0);
1493 }
1494 
1495 
1496 /*========================================================================
1497  * Utility routines.
1498  *========================================================================*/
1499 
1500 /*
1501  * Implement a simple hash on a character string.
1502  * Rotate the hash value by 7 bits, then XOR each character in.
1503  * This is implemented with some source-level loop unrolling.
1504  */
1505 xfs_dahash_t
xfs_da_hashname(const uchar_t * name,int namelen)1506 xfs_da_hashname(const uchar_t *name, int namelen)
1507 {
1508 	xfs_dahash_t hash;
1509 
1510 	/*
1511 	 * Do four characters at a time as long as we can.
1512 	 */
1513 	for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1514 		hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1515 		       (name[3] << 0) ^ rol32(hash, 7 * 4);
1516 
1517 	/*
1518 	 * Now do the rest of the characters.
1519 	 */
1520 	switch (namelen) {
1521 	case 3:
1522 		return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1523 		       rol32(hash, 7 * 3);
1524 	case 2:
1525 		return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1526 	case 1:
1527 		return (name[0] << 0) ^ rol32(hash, 7 * 1);
1528 	default: /* case 0: */
1529 		return hash;
1530 	}
1531 }
1532 
1533 enum xfs_dacmp
xfs_da_compname(struct xfs_da_args * args,const char * name,int len)1534 xfs_da_compname(
1535 	struct xfs_da_args *args,
1536 	const char 	*name,
1537 	int 		len)
1538 {
1539 	return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1540 					XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1541 }
1542 
1543 static xfs_dahash_t
xfs_default_hashname(struct xfs_name * name)1544 xfs_default_hashname(
1545 	struct xfs_name	*name)
1546 {
1547 	return xfs_da_hashname(name->name, name->len);
1548 }
1549 
1550 const struct xfs_nameops xfs_default_nameops = {
1551 	.hashname	= xfs_default_hashname,
1552 	.compname	= xfs_da_compname
1553 };
1554 
1555 /*
1556  * Add a block to the btree ahead of the file.
1557  * Return the new block number to the caller.
1558  */
1559 int
xfs_da_grow_inode(xfs_da_args_t * args,xfs_dablk_t * new_blkno)1560 xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
1561 {
1562 	xfs_fileoff_t bno, b;
1563 	xfs_bmbt_irec_t map;
1564 	xfs_bmbt_irec_t	*mapp;
1565 	xfs_inode_t *dp;
1566 	int nmap, error, w, count, c, got, i, mapi;
1567 	xfs_trans_t *tp;
1568 	xfs_mount_t *mp;
1569 	xfs_drfsbno_t	nblks;
1570 
1571 	dp = args->dp;
1572 	mp = dp->i_mount;
1573 	w = args->whichfork;
1574 	tp = args->trans;
1575 	nblks = dp->i_d.di_nblocks;
1576 
1577 	/*
1578 	 * For new directories adjust the file offset and block count.
1579 	 */
1580 	if (w == XFS_DATA_FORK) {
1581 		bno = mp->m_dirleafblk;
1582 		count = mp->m_dirblkfsbs;
1583 	} else {
1584 		bno = 0;
1585 		count = 1;
1586 	}
1587 	/*
1588 	 * Find a spot in the file space to put the new block.
1589 	 */
1590 	if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w)))
1591 		return error;
1592 	if (w == XFS_DATA_FORK)
1593 		ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk);
1594 	/*
1595 	 * Try mapping it in one filesystem block.
1596 	 */
1597 	nmap = 1;
1598 	ASSERT(args->firstblock != NULL);
1599 	if ((error = xfs_bmapi(tp, dp, bno, count,
1600 			xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
1601 			XFS_BMAPI_CONTIG,
1602 			args->firstblock, args->total, &map, &nmap,
1603 			args->flist, NULL))) {
1604 		return error;
1605 	}
1606 	ASSERT(nmap <= 1);
1607 	if (nmap == 1) {
1608 		mapp = &map;
1609 		mapi = 1;
1610 	}
1611 	/*
1612 	 * If we didn't get it and the block might work if fragmented,
1613 	 * try without the CONTIG flag.  Loop until we get it all.
1614 	 */
1615 	else if (nmap == 0 && count > 1) {
1616 		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1617 		for (b = bno, mapi = 0; b < bno + count; ) {
1618 			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1619 			c = (int)(bno + count - b);
1620 			if ((error = xfs_bmapi(tp, dp, b, c,
1621 					xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
1622 					XFS_BMAPI_METADATA,
1623 					args->firstblock, args->total,
1624 					&mapp[mapi], &nmap, args->flist,
1625 					NULL))) {
1626 				kmem_free(mapp);
1627 				return error;
1628 			}
1629 			if (nmap < 1)
1630 				break;
1631 			mapi += nmap;
1632 			b = mapp[mapi - 1].br_startoff +
1633 			    mapp[mapi - 1].br_blockcount;
1634 		}
1635 	} else {
1636 		mapi = 0;
1637 		mapp = NULL;
1638 	}
1639 	/*
1640 	 * Count the blocks we got, make sure it matches the total.
1641 	 */
1642 	for (i = 0, got = 0; i < mapi; i++)
1643 		got += mapp[i].br_blockcount;
1644 	if (got != count || mapp[0].br_startoff != bno ||
1645 	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1646 	    bno + count) {
1647 		if (mapp != &map)
1648 			kmem_free(mapp);
1649 		return XFS_ERROR(ENOSPC);
1650 	}
1651 	if (mapp != &map)
1652 		kmem_free(mapp);
1653 	/* account for newly allocated blocks in reserved blocks total */
1654 	args->total -= dp->i_d.di_nblocks - nblks;
1655 	*new_blkno = (xfs_dablk_t)bno;
1656 	return 0;
1657 }
1658 
1659 /*
1660  * Ick.  We need to always be able to remove a btree block, even
1661  * if there's no space reservation because the filesystem is full.
1662  * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1663  * It swaps the target block with the last block in the file.  The
1664  * last block in the file can always be removed since it can't cause
1665  * a bmap btree split to do that.
1666  */
1667 STATIC int
xfs_da_swap_lastblock(xfs_da_args_t * args,xfs_dablk_t * dead_blknop,xfs_dabuf_t ** dead_bufp)1668 xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1669 		      xfs_dabuf_t **dead_bufp)
1670 {
1671 	xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1672 	xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
1673 	xfs_fileoff_t lastoff;
1674 	xfs_inode_t *ip;
1675 	xfs_trans_t *tp;
1676 	xfs_mount_t *mp;
1677 	int error, w, entno, level, dead_level;
1678 	xfs_da_blkinfo_t *dead_info, *sib_info;
1679 	xfs_da_intnode_t *par_node, *dead_node;
1680 	xfs_dir2_leaf_t *dead_leaf2;
1681 	xfs_dahash_t dead_hash;
1682 
1683 	dead_buf = *dead_bufp;
1684 	dead_blkno = *dead_blknop;
1685 	tp = args->trans;
1686 	ip = args->dp;
1687 	w = args->whichfork;
1688 	ASSERT(w == XFS_DATA_FORK);
1689 	mp = ip->i_mount;
1690 	lastoff = mp->m_dirfreeblk;
1691 	error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1692 	if (error)
1693 		return error;
1694 	if (unlikely(lastoff == 0)) {
1695 		XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1696 				 mp);
1697 		return XFS_ERROR(EFSCORRUPTED);
1698 	}
1699 	/*
1700 	 * Read the last block in the btree space.
1701 	 */
1702 	last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1703 	if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
1704 		return error;
1705 	/*
1706 	 * Copy the last block into the dead buffer and log it.
1707 	 */
1708 	memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
1709 	xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1710 	dead_info = dead_buf->data;
1711 	/*
1712 	 * Get values from the moved block.
1713 	 */
1714 	if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) {
1715 		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1716 		dead_level = 0;
1717 		dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1718 	} else {
1719 		ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC);
1720 		dead_node = (xfs_da_intnode_t *)dead_info;
1721 		dead_level = be16_to_cpu(dead_node->hdr.level);
1722 		dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1723 	}
1724 	sib_buf = par_buf = NULL;
1725 	/*
1726 	 * If the moved block has a left sibling, fix up the pointers.
1727 	 */
1728 	if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1729 		if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1730 			goto done;
1731 		sib_info = sib_buf->data;
1732 		if (unlikely(
1733 		    be32_to_cpu(sib_info->forw) != last_blkno ||
1734 		    sib_info->magic != dead_info->magic)) {
1735 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1736 					 XFS_ERRLEVEL_LOW, mp);
1737 			error = XFS_ERROR(EFSCORRUPTED);
1738 			goto done;
1739 		}
1740 		sib_info->forw = cpu_to_be32(dead_blkno);
1741 		xfs_da_log_buf(tp, sib_buf,
1742 			XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1743 					sizeof(sib_info->forw)));
1744 		xfs_da_buf_done(sib_buf);
1745 		sib_buf = NULL;
1746 	}
1747 	/*
1748 	 * If the moved block has a right sibling, fix up the pointers.
1749 	 */
1750 	if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1751 		if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1752 			goto done;
1753 		sib_info = sib_buf->data;
1754 		if (unlikely(
1755 		       be32_to_cpu(sib_info->back) != last_blkno ||
1756 		       sib_info->magic != dead_info->magic)) {
1757 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1758 					 XFS_ERRLEVEL_LOW, mp);
1759 			error = XFS_ERROR(EFSCORRUPTED);
1760 			goto done;
1761 		}
1762 		sib_info->back = cpu_to_be32(dead_blkno);
1763 		xfs_da_log_buf(tp, sib_buf,
1764 			XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1765 					sizeof(sib_info->back)));
1766 		xfs_da_buf_done(sib_buf);
1767 		sib_buf = NULL;
1768 	}
1769 	par_blkno = mp->m_dirleafblk;
1770 	level = -1;
1771 	/*
1772 	 * Walk down the tree looking for the parent of the moved block.
1773 	 */
1774 	for (;;) {
1775 		if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1776 			goto done;
1777 		par_node = par_buf->data;
1778 		if (unlikely(
1779 		    be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC ||
1780 		    (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1781 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1782 					 XFS_ERRLEVEL_LOW, mp);
1783 			error = XFS_ERROR(EFSCORRUPTED);
1784 			goto done;
1785 		}
1786 		level = be16_to_cpu(par_node->hdr.level);
1787 		for (entno = 0;
1788 		     entno < be16_to_cpu(par_node->hdr.count) &&
1789 		     be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1790 		     entno++)
1791 			continue;
1792 		if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1793 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1794 					 XFS_ERRLEVEL_LOW, mp);
1795 			error = XFS_ERROR(EFSCORRUPTED);
1796 			goto done;
1797 		}
1798 		par_blkno = be32_to_cpu(par_node->btree[entno].before);
1799 		if (level == dead_level + 1)
1800 			break;
1801 		xfs_da_brelse(tp, par_buf);
1802 		par_buf = NULL;
1803 	}
1804 	/*
1805 	 * We're in the right parent block.
1806 	 * Look for the right entry.
1807 	 */
1808 	for (;;) {
1809 		for (;
1810 		     entno < be16_to_cpu(par_node->hdr.count) &&
1811 		     be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1812 		     entno++)
1813 			continue;
1814 		if (entno < be16_to_cpu(par_node->hdr.count))
1815 			break;
1816 		par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1817 		xfs_da_brelse(tp, par_buf);
1818 		par_buf = NULL;
1819 		if (unlikely(par_blkno == 0)) {
1820 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1821 					 XFS_ERRLEVEL_LOW, mp);
1822 			error = XFS_ERROR(EFSCORRUPTED);
1823 			goto done;
1824 		}
1825 		if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1826 			goto done;
1827 		par_node = par_buf->data;
1828 		if (unlikely(
1829 		    be16_to_cpu(par_node->hdr.level) != level ||
1830 		    be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) {
1831 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1832 					 XFS_ERRLEVEL_LOW, mp);
1833 			error = XFS_ERROR(EFSCORRUPTED);
1834 			goto done;
1835 		}
1836 		entno = 0;
1837 	}
1838 	/*
1839 	 * Update the parent entry pointing to the moved block.
1840 	 */
1841 	par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1842 	xfs_da_log_buf(tp, par_buf,
1843 		XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1844 				sizeof(par_node->btree[entno].before)));
1845 	xfs_da_buf_done(par_buf);
1846 	xfs_da_buf_done(dead_buf);
1847 	*dead_blknop = last_blkno;
1848 	*dead_bufp = last_buf;
1849 	return 0;
1850 done:
1851 	if (par_buf)
1852 		xfs_da_brelse(tp, par_buf);
1853 	if (sib_buf)
1854 		xfs_da_brelse(tp, sib_buf);
1855 	xfs_da_brelse(tp, last_buf);
1856 	return error;
1857 }
1858 
1859 /*
1860  * Remove a btree block from a directory or attribute.
1861  */
1862 int
xfs_da_shrink_inode(xfs_da_args_t * args,xfs_dablk_t dead_blkno,xfs_dabuf_t * dead_buf)1863 xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1864 		    xfs_dabuf_t *dead_buf)
1865 {
1866 	xfs_inode_t *dp;
1867 	int done, error, w, count;
1868 	xfs_trans_t *tp;
1869 	xfs_mount_t *mp;
1870 
1871 	dp = args->dp;
1872 	w = args->whichfork;
1873 	tp = args->trans;
1874 	mp = dp->i_mount;
1875 	if (w == XFS_DATA_FORK)
1876 		count = mp->m_dirblkfsbs;
1877 	else
1878 		count = 1;
1879 	for (;;) {
1880 		/*
1881 		 * Remove extents.  If we get ENOSPC for a dir we have to move
1882 		 * the last block to the place we want to kill.
1883 		 */
1884 		if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1885 				xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1886 				0, args->firstblock, args->flist, NULL,
1887 				&done)) == ENOSPC) {
1888 			if (w != XFS_DATA_FORK)
1889 				break;
1890 			if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
1891 					&dead_buf)))
1892 				break;
1893 		} else {
1894 			break;
1895 		}
1896 	}
1897 	xfs_da_binval(tp, dead_buf);
1898 	return error;
1899 }
1900 
1901 /*
1902  * See if the mapping(s) for this btree block are valid, i.e.
1903  * don't contain holes, are logically contiguous, and cover the whole range.
1904  */
1905 STATIC int
xfs_da_map_covers_blocks(int nmap,xfs_bmbt_irec_t * mapp,xfs_dablk_t bno,int count)1906 xfs_da_map_covers_blocks(
1907 	int		nmap,
1908 	xfs_bmbt_irec_t	*mapp,
1909 	xfs_dablk_t	bno,
1910 	int		count)
1911 {
1912 	int		i;
1913 	xfs_fileoff_t	off;
1914 
1915 	for (i = 0, off = bno; i < nmap; i++) {
1916 		if (mapp[i].br_startblock == HOLESTARTBLOCK ||
1917 		    mapp[i].br_startblock == DELAYSTARTBLOCK) {
1918 			return 0;
1919 		}
1920 		if (off != mapp[i].br_startoff) {
1921 			return 0;
1922 		}
1923 		off += mapp[i].br_blockcount;
1924 	}
1925 	return off == bno + count;
1926 }
1927 
1928 /*
1929  * Make a dabuf.
1930  * Used for get_buf, read_buf, read_bufr, and reada_buf.
1931  */
1932 STATIC int
xfs_da_do_buf(xfs_trans_t * trans,xfs_inode_t * dp,xfs_dablk_t bno,xfs_daddr_t * mappedbnop,xfs_dabuf_t ** bpp,int whichfork,int caller,inst_t * ra)1933 xfs_da_do_buf(
1934 	xfs_trans_t	*trans,
1935 	xfs_inode_t	*dp,
1936 	xfs_dablk_t	bno,
1937 	xfs_daddr_t	*mappedbnop,
1938 	xfs_dabuf_t	**bpp,
1939 	int		whichfork,
1940 	int		caller,
1941 	inst_t		*ra)
1942 {
1943 	xfs_buf_t	*bp = NULL;
1944 	xfs_buf_t	**bplist;
1945 	int		error=0;
1946 	int		i;
1947 	xfs_bmbt_irec_t	map;
1948 	xfs_bmbt_irec_t	*mapp;
1949 	xfs_daddr_t	mappedbno;
1950 	xfs_mount_t	*mp;
1951 	int		nbplist=0;
1952 	int		nfsb;
1953 	int		nmap;
1954 	xfs_dabuf_t	*rbp;
1955 
1956 	mp = dp->i_mount;
1957 	nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1958 	mappedbno = *mappedbnop;
1959 	/*
1960 	 * Caller doesn't have a mapping.  -2 means don't complain
1961 	 * if we land in a hole.
1962 	 */
1963 	if (mappedbno == -1 || mappedbno == -2) {
1964 		/*
1965 		 * Optimize the one-block case.
1966 		 */
1967 		if (nfsb == 1) {
1968 			xfs_fsblock_t	fsb;
1969 
1970 			if ((error =
1971 			    xfs_bmapi_single(trans, dp, whichfork, &fsb,
1972 				    (xfs_fileoff_t)bno))) {
1973 				return error;
1974 			}
1975 			mapp = &map;
1976 			if (fsb == NULLFSBLOCK) {
1977 				nmap = 0;
1978 			} else {
1979 				map.br_startblock = fsb;
1980 				map.br_startoff = (xfs_fileoff_t)bno;
1981 				map.br_blockcount = 1;
1982 				nmap = 1;
1983 			}
1984 		} else {
1985 			mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
1986 			nmap = nfsb;
1987 			if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
1988 					nfsb,
1989 					XFS_BMAPI_METADATA |
1990 						xfs_bmapi_aflag(whichfork),
1991 					NULL, 0, mapp, &nmap, NULL, NULL)))
1992 				goto exit0;
1993 		}
1994 	} else {
1995 		map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
1996 		map.br_startoff = (xfs_fileoff_t)bno;
1997 		map.br_blockcount = nfsb;
1998 		mapp = &map;
1999 		nmap = 1;
2000 	}
2001 	if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
2002 		error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
2003 		if (unlikely(error == EFSCORRUPTED)) {
2004 			if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2005 				cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
2006 					(long long)bno);
2007 				cmn_err(CE_ALERT, "dir: inode %lld\n",
2008 					(long long)dp->i_ino);
2009 				for (i = 0; i < nmap; i++) {
2010 					cmn_err(CE_ALERT,
2011 						"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n",
2012 						i,
2013 						(long long)mapp[i].br_startoff,
2014 						(long long)mapp[i].br_startblock,
2015 						(long long)mapp[i].br_blockcount,
2016 						mapp[i].br_state);
2017 				}
2018 			}
2019 			XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2020 					 XFS_ERRLEVEL_LOW, mp);
2021 		}
2022 		goto exit0;
2023 	}
2024 	if (caller != 3 && nmap > 1) {
2025 		bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
2026 		nbplist = 0;
2027 	} else
2028 		bplist = NULL;
2029 	/*
2030 	 * Turn the mapping(s) into buffer(s).
2031 	 */
2032 	for (i = 0; i < nmap; i++) {
2033 		int	nmapped;
2034 
2035 		mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
2036 		if (i == 0)
2037 			*mappedbnop = mappedbno;
2038 		nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
2039 		switch (caller) {
2040 		case 0:
2041 			bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
2042 				mappedbno, nmapped, 0);
2043 			error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO);
2044 			break;
2045 		case 1:
2046 		case 2:
2047 			bp = NULL;
2048 			error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
2049 				mappedbno, nmapped, 0, &bp);
2050 			break;
2051 		case 3:
2052 			xfs_baread(mp->m_ddev_targp, mappedbno, nmapped);
2053 			error = 0;
2054 			bp = NULL;
2055 			break;
2056 		}
2057 		if (error) {
2058 			if (bp)
2059 				xfs_trans_brelse(trans, bp);
2060 			goto exit1;
2061 		}
2062 		if (!bp)
2063 			continue;
2064 		if (caller == 1) {
2065 			if (whichfork == XFS_ATTR_FORK) {
2066 				XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
2067 						XFS_ATTR_BTREE_REF);
2068 			} else {
2069 				XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
2070 						XFS_DIR_BTREE_REF);
2071 			}
2072 		}
2073 		if (bplist) {
2074 			bplist[nbplist++] = bp;
2075 		}
2076 	}
2077 	/*
2078 	 * Build a dabuf structure.
2079 	 */
2080 	if (bplist) {
2081 		rbp = xfs_da_buf_make(nbplist, bplist, ra);
2082 	} else if (bp)
2083 		rbp = xfs_da_buf_make(1, &bp, ra);
2084 	else
2085 		rbp = NULL;
2086 	/*
2087 	 * For read_buf, check the magic number.
2088 	 */
2089 	if (caller == 1) {
2090 		xfs_dir2_data_t		*data;
2091 		xfs_dir2_free_t		*free;
2092 		xfs_da_blkinfo_t	*info;
2093 		uint			magic, magic1;
2094 
2095 		info = rbp->data;
2096 		data = rbp->data;
2097 		free = rbp->data;
2098 		magic = be16_to_cpu(info->magic);
2099 		magic1 = be32_to_cpu(data->hdr.magic);
2100 		if (unlikely(
2101 		    XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2102 				   (magic != XFS_ATTR_LEAF_MAGIC) &&
2103 				   (magic != XFS_DIR2_LEAF1_MAGIC) &&
2104 				   (magic != XFS_DIR2_LEAFN_MAGIC) &&
2105 				   (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2106 				   (magic1 != XFS_DIR2_DATA_MAGIC) &&
2107 				   (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
2108 				mp, XFS_ERRTAG_DA_READ_BUF,
2109 				XFS_RANDOM_DA_READ_BUF))) {
2110 			xfs_buftrace("DA READ ERROR", rbp->bps[0]);
2111 			XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2112 					     XFS_ERRLEVEL_LOW, mp, info);
2113 			error = XFS_ERROR(EFSCORRUPTED);
2114 			xfs_da_brelse(trans, rbp);
2115 			nbplist = 0;
2116 			goto exit1;
2117 		}
2118 	}
2119 	if (bplist) {
2120 		kmem_free(bplist);
2121 	}
2122 	if (mapp != &map) {
2123 		kmem_free(mapp);
2124 	}
2125 	if (bpp)
2126 		*bpp = rbp;
2127 	return 0;
2128 exit1:
2129 	if (bplist) {
2130 		for (i = 0; i < nbplist; i++)
2131 			xfs_trans_brelse(trans, bplist[i]);
2132 		kmem_free(bplist);
2133 	}
2134 exit0:
2135 	if (mapp != &map)
2136 		kmem_free(mapp);
2137 	if (bpp)
2138 		*bpp = NULL;
2139 	return error;
2140 }
2141 
2142 /*
2143  * Get a buffer for the dir/attr block.
2144  */
2145 int
xfs_da_get_buf(xfs_trans_t * trans,xfs_inode_t * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,xfs_dabuf_t ** bpp,int whichfork)2146 xfs_da_get_buf(
2147 	xfs_trans_t	*trans,
2148 	xfs_inode_t	*dp,
2149 	xfs_dablk_t	bno,
2150 	xfs_daddr_t		mappedbno,
2151 	xfs_dabuf_t	**bpp,
2152 	int		whichfork)
2153 {
2154 	return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0,
2155 						 (inst_t *)__return_address);
2156 }
2157 
2158 /*
2159  * Get a buffer for the dir/attr block, fill in the contents.
2160  */
2161 int
xfs_da_read_buf(xfs_trans_t * trans,xfs_inode_t * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,xfs_dabuf_t ** bpp,int whichfork)2162 xfs_da_read_buf(
2163 	xfs_trans_t	*trans,
2164 	xfs_inode_t	*dp,
2165 	xfs_dablk_t	bno,
2166 	xfs_daddr_t		mappedbno,
2167 	xfs_dabuf_t	**bpp,
2168 	int		whichfork)
2169 {
2170 	return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1,
2171 		(inst_t *)__return_address);
2172 }
2173 
2174 /*
2175  * Readahead the dir/attr block.
2176  */
2177 xfs_daddr_t
xfs_da_reada_buf(xfs_trans_t * trans,xfs_inode_t * dp,xfs_dablk_t bno,int whichfork)2178 xfs_da_reada_buf(
2179 	xfs_trans_t	*trans,
2180 	xfs_inode_t	*dp,
2181 	xfs_dablk_t	bno,
2182 	int		whichfork)
2183 {
2184 	xfs_daddr_t		rval;
2185 
2186 	rval = -1;
2187 	if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3,
2188 			(inst_t *)__return_address))
2189 		return -1;
2190 	else
2191 		return rval;
2192 }
2193 
2194 kmem_zone_t *xfs_da_state_zone;	/* anchor for state struct zone */
2195 kmem_zone_t *xfs_dabuf_zone;		/* dabuf zone */
2196 
2197 /*
2198  * Allocate a dir-state structure.
2199  * We don't put them on the stack since they're large.
2200  */
2201 xfs_da_state_t *
xfs_da_state_alloc(void)2202 xfs_da_state_alloc(void)
2203 {
2204 	return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP);
2205 }
2206 
2207 /*
2208  * Kill the altpath contents of a da-state structure.
2209  */
2210 STATIC void
xfs_da_state_kill_altpath(xfs_da_state_t * state)2211 xfs_da_state_kill_altpath(xfs_da_state_t *state)
2212 {
2213 	int	i;
2214 
2215 	for (i = 0; i < state->altpath.active; i++) {
2216 		if (state->altpath.blk[i].bp) {
2217 			if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2218 				xfs_da_buf_done(state->altpath.blk[i].bp);
2219 			state->altpath.blk[i].bp = NULL;
2220 		}
2221 	}
2222 	state->altpath.active = 0;
2223 }
2224 
2225 /*
2226  * Free a da-state structure.
2227  */
2228 void
xfs_da_state_free(xfs_da_state_t * state)2229 xfs_da_state_free(xfs_da_state_t *state)
2230 {
2231 	int	i;
2232 
2233 	xfs_da_state_kill_altpath(state);
2234 	for (i = 0; i < state->path.active; i++) {
2235 		if (state->path.blk[i].bp)
2236 			xfs_da_buf_done(state->path.blk[i].bp);
2237 	}
2238 	if (state->extravalid && state->extrablk.bp)
2239 		xfs_da_buf_done(state->extrablk.bp);
2240 #ifdef DEBUG
2241 	memset((char *)state, 0, sizeof(*state));
2242 #endif /* DEBUG */
2243 	kmem_zone_free(xfs_da_state_zone, state);
2244 }
2245 
2246 #ifdef XFS_DABUF_DEBUG
2247 xfs_dabuf_t	*xfs_dabuf_global_list;
2248 static DEFINE_SPINLOCK(xfs_dabuf_global_lock);
2249 #endif
2250 
2251 /*
2252  * Create a dabuf.
2253  */
2254 /* ARGSUSED */
2255 STATIC xfs_dabuf_t *
xfs_da_buf_make(int nbuf,xfs_buf_t ** bps,inst_t * ra)2256 xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
2257 {
2258 	xfs_buf_t	*bp;
2259 	xfs_dabuf_t	*dabuf;
2260 	int		i;
2261 	int		off;
2262 
2263 	if (nbuf == 1)
2264 		dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP);
2265 	else
2266 		dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP);
2267 	dabuf->dirty = 0;
2268 #ifdef XFS_DABUF_DEBUG
2269 	dabuf->ra = ra;
2270 	dabuf->target = XFS_BUF_TARGET(bps[0]);
2271 	dabuf->blkno = XFS_BUF_ADDR(bps[0]);
2272 #endif
2273 	if (nbuf == 1) {
2274 		dabuf->nbuf = 1;
2275 		bp = bps[0];
2276 		dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
2277 		dabuf->data = XFS_BUF_PTR(bp);
2278 		dabuf->bps[0] = bp;
2279 	} else {
2280 		dabuf->nbuf = nbuf;
2281 		for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2282 			dabuf->bps[i] = bp = bps[i];
2283 			dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
2284 		}
2285 		dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2286 		for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2287 			bp = bps[i];
2288 			memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
2289 				XFS_BUF_COUNT(bp));
2290 		}
2291 	}
2292 #ifdef XFS_DABUF_DEBUG
2293 	{
2294 		xfs_dabuf_t	*p;
2295 
2296 		spin_lock(&xfs_dabuf_global_lock);
2297 		for (p = xfs_dabuf_global_list; p; p = p->next) {
2298 			ASSERT(p->blkno != dabuf->blkno ||
2299 			       p->target != dabuf->target);
2300 		}
2301 		dabuf->prev = NULL;
2302 		if (xfs_dabuf_global_list)
2303 			xfs_dabuf_global_list->prev = dabuf;
2304 		dabuf->next = xfs_dabuf_global_list;
2305 		xfs_dabuf_global_list = dabuf;
2306 		spin_unlock(&xfs_dabuf_global_lock);
2307 	}
2308 #endif
2309 	return dabuf;
2310 }
2311 
2312 /*
2313  * Un-dirty a dabuf.
2314  */
2315 STATIC void
xfs_da_buf_clean(xfs_dabuf_t * dabuf)2316 xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2317 {
2318 	xfs_buf_t	*bp;
2319 	int		i;
2320 	int		off;
2321 
2322 	if (dabuf->dirty) {
2323 		ASSERT(dabuf->nbuf > 1);
2324 		dabuf->dirty = 0;
2325 		for (i = off = 0; i < dabuf->nbuf;
2326 				i++, off += XFS_BUF_COUNT(bp)) {
2327 			bp = dabuf->bps[i];
2328 			memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
2329 				XFS_BUF_COUNT(bp));
2330 		}
2331 	}
2332 }
2333 
2334 /*
2335  * Release a dabuf.
2336  */
2337 void
xfs_da_buf_done(xfs_dabuf_t * dabuf)2338 xfs_da_buf_done(xfs_dabuf_t *dabuf)
2339 {
2340 	ASSERT(dabuf);
2341 	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2342 	if (dabuf->dirty)
2343 		xfs_da_buf_clean(dabuf);
2344 	if (dabuf->nbuf > 1)
2345 		kmem_free(dabuf->data);
2346 #ifdef XFS_DABUF_DEBUG
2347 	{
2348 		spin_lock(&xfs_dabuf_global_lock);
2349 		if (dabuf->prev)
2350 			dabuf->prev->next = dabuf->next;
2351 		else
2352 			xfs_dabuf_global_list = dabuf->next;
2353 		if (dabuf->next)
2354 			dabuf->next->prev = dabuf->prev;
2355 		spin_unlock(&xfs_dabuf_global_lock);
2356 	}
2357 	memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
2358 #endif
2359 	if (dabuf->nbuf == 1)
2360 		kmem_zone_free(xfs_dabuf_zone, dabuf);
2361 	else
2362 		kmem_free(dabuf);
2363 }
2364 
2365 /*
2366  * Log transaction from a dabuf.
2367  */
2368 void
xfs_da_log_buf(xfs_trans_t * tp,xfs_dabuf_t * dabuf,uint first,uint last)2369 xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2370 {
2371 	xfs_buf_t	*bp;
2372 	uint		f;
2373 	int		i;
2374 	uint		l;
2375 	int		off;
2376 
2377 	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2378 	if (dabuf->nbuf == 1) {
2379 		ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0]));
2380 		xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2381 		return;
2382 	}
2383 	dabuf->dirty = 1;
2384 	ASSERT(first <= last);
2385 	for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2386 		bp = dabuf->bps[i];
2387 		f = off;
2388 		l = f + XFS_BUF_COUNT(bp) - 1;
2389 		if (f < first)
2390 			f = first;
2391 		if (l > last)
2392 			l = last;
2393 		if (f <= l)
2394 			xfs_trans_log_buf(tp, bp, f - off, l - off);
2395 		/*
2396 		 * B_DONE is set by xfs_trans_log buf.
2397 		 * If we don't set it on a new buffer (get not read)
2398 		 * then if we don't put anything in the buffer it won't
2399 		 * be set, and at commit it it released into the cache,
2400 		 * and then a read will fail.
2401 		 */
2402 		else if (!(XFS_BUF_ISDONE(bp)))
2403 		  XFS_BUF_DONE(bp);
2404 	}
2405 	ASSERT(last < off);
2406 }
2407 
2408 /*
2409  * Release dabuf from a transaction.
2410  * Have to free up the dabuf before the buffers are released,
2411  * since the synchronization on the dabuf is really the lock on the buffer.
2412  */
2413 void
xfs_da_brelse(xfs_trans_t * tp,xfs_dabuf_t * dabuf)2414 xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2415 {
2416 	xfs_buf_t	*bp;
2417 	xfs_buf_t	**bplist;
2418 	int		i;
2419 	int		nbuf;
2420 
2421 	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2422 	if ((nbuf = dabuf->nbuf) == 1) {
2423 		bplist = &bp;
2424 		bp = dabuf->bps[0];
2425 	} else {
2426 		bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2427 		memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2428 	}
2429 	xfs_da_buf_done(dabuf);
2430 	for (i = 0; i < nbuf; i++)
2431 		xfs_trans_brelse(tp, bplist[i]);
2432 	if (bplist != &bp)
2433 		kmem_free(bplist);
2434 }
2435 
2436 /*
2437  * Invalidate dabuf from a transaction.
2438  */
2439 void
xfs_da_binval(xfs_trans_t * tp,xfs_dabuf_t * dabuf)2440 xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2441 {
2442 	xfs_buf_t	*bp;
2443 	xfs_buf_t	**bplist;
2444 	int		i;
2445 	int		nbuf;
2446 
2447 	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2448 	if ((nbuf = dabuf->nbuf) == 1) {
2449 		bplist = &bp;
2450 		bp = dabuf->bps[0];
2451 	} else {
2452 		bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2453 		memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2454 	}
2455 	xfs_da_buf_done(dabuf);
2456 	for (i = 0; i < nbuf; i++)
2457 		xfs_trans_binval(tp, bplist[i]);
2458 	if (bplist != &bp)
2459 		kmem_free(bplist);
2460 }
2461 
2462 /*
2463  * Get the first daddr from a dabuf.
2464  */
2465 xfs_daddr_t
xfs_da_blkno(xfs_dabuf_t * dabuf)2466 xfs_da_blkno(xfs_dabuf_t *dabuf)
2467 {
2468 	ASSERT(dabuf->nbuf);
2469 	ASSERT(dabuf->data);
2470 	return XFS_BUF_ADDR(dabuf->bps[0]);
2471 }
2472