• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ext4_utils.h"
18 #include "allocate.h"
19 
20 #include <sparse/sparse.h>
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 
25 struct region {
26 	u32 block;
27 	u32 len;
28 	int bg;
29 	struct region *next;
30 	struct region *prev;
31 };
32 
33 struct block_group_info {
34 	u32 first_block;
35 	int header_blocks;
36 	int data_blocks_used;
37 	int has_superblock;
38 	u8 *bitmaps;
39 	u8 *block_bitmap;
40 	u8 *inode_bitmap;
41 	u8 *inode_table;
42 	u32 free_blocks;
43 	u32 first_free_block;
44 	u32 free_inodes;
45 	u32 first_free_inode;
46 	u16 flags;
47 	u16 used_dirs;
48 };
49 
50 struct xattr_list_element {
51 	struct ext4_inode *inode;
52 	struct ext4_xattr_header *header;
53 	struct xattr_list_element *next;
54 };
55 
create_allocation()56 struct block_allocation *create_allocation()
57 {
58 	struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
59 	alloc->list.first = NULL;
60 	alloc->list.last = NULL;
61 	alloc->oob_list.first = NULL;
62 	alloc->oob_list.last = NULL;
63 	alloc->list.iter = NULL;
64 	alloc->list.partial_iter = 0;
65 	alloc->oob_list.iter = NULL;
66 	alloc->oob_list.partial_iter = 0;
67 	alloc->filename = NULL;
68 	alloc->next = NULL;
69 	return alloc;
70 }
71 
xattr_list_find(struct ext4_inode * inode)72 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
73 {
74 	struct xattr_list_element *element;
75 	for (element = aux_info.xattrs; element != NULL; element = element->next) {
76 		if (element->inode == inode)
77 			return element->header;
78 	}
79 	return NULL;
80 }
81 
xattr_list_insert(struct ext4_inode * inode,struct ext4_xattr_header * header)82 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
83 {
84 	struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
85 	element->inode = inode;
86 	element->header = header;
87 	element->next = aux_info.xattrs;
88 	aux_info.xattrs = element;
89 }
90 
region_list_remove(struct region_list * list,struct region * reg)91 static void region_list_remove(struct region_list *list, struct region *reg)
92 {
93 	if (reg->prev)
94 		reg->prev->next = reg->next;
95 
96 	if (reg->next)
97 		reg->next->prev = reg->prev;
98 
99 	if (list->first == reg)
100 		list->first = reg->next;
101 
102 	if (list->last == reg)
103 		list->last = reg->prev;
104 
105 	reg->next = NULL;
106 	reg->prev = NULL;
107 }
108 
region_list_append(struct region_list * list,struct region * reg)109 static void region_list_append(struct region_list *list, struct region *reg)
110 {
111 	if (list->first == NULL) {
112 		list->first = reg;
113 		list->last = reg;
114 		list->iter = reg;
115 		list->partial_iter = 0;
116 		reg->prev = NULL;
117 	} else {
118 		list->last->next = reg;
119 		reg->prev = list->last;
120 		list->last = reg;
121 	}
122 	reg->next = NULL;
123 }
124 
125 #if 0
126 static void dump_starting_from(struct region *reg)
127 {
128 	for (; reg; reg = reg->next) {
129 		printf("%p: Blocks %d-%d (%d)\n", reg,
130 			   reg->block, reg->block + reg->len - 1, reg->len)
131 	}
132 }
133 
134 static void dump_region_lists(struct block_allocation *alloc) {
135 
136 	printf("Main list:\n");
137 	dump_starting_from(alloc->list.first);
138 
139 	printf("OOB list:\n");
140 	dump_starting_from(alloc->oob_list.first);
141 }
142 #endif
143 
print_blocks(FILE * f,struct block_allocation * alloc)144 void print_blocks(FILE* f, struct block_allocation *alloc)
145 {
146 	struct region *reg;
147 	for (reg = alloc->list.first; reg; reg = reg->next) {
148 		if (reg->len == 1) {
149 			fprintf(f, " %d", reg->block);
150 		} else {
151 			fprintf(f, " %d-%d", reg->block, reg->block + reg->len - 1);
152 		}
153 	}
154 	fputc('\n', f);
155 }
156 
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)157 void append_region(struct block_allocation *alloc,
158 		u32 block, u32 len, int bg_num)
159 {
160 	struct region *reg;
161 	reg = malloc(sizeof(struct region));
162 	reg->block = block;
163 	reg->len = len;
164 	reg->bg = bg_num;
165 	reg->next = NULL;
166 
167 	region_list_append(&alloc->list, reg);
168 }
169 
allocate_bg_inode_table(struct block_group_info * bg)170 static void allocate_bg_inode_table(struct block_group_info *bg)
171 {
172 	if (bg->inode_table != NULL)
173 		return;
174 
175 	u32 block = bg->first_block + 2;
176 
177 	if (bg->has_superblock)
178 		block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
179 
180 	bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
181 	if (bg->inode_table == NULL)
182 		critical_error_errno("calloc");
183 
184 	sparse_file_add_data(ext4_sparse_file, bg->inode_table,
185 			aux_info.inode_table_blocks	* info.block_size, block);
186 
187 	bg->flags &= ~EXT4_BG_INODE_UNINIT;
188 }
189 
bitmap_set_bit(u8 * bitmap,u32 bit)190 static int bitmap_set_bit(u8 *bitmap, u32 bit)
191 {
192 	if (bitmap[bit / 8] & 1 << (bit % 8))
193 		return 1;
194 
195 	bitmap[bit / 8] |= 1 << (bit % 8);
196 	return 0;
197 }
198 
bitmap_set_8_bits(u8 * bitmap,u32 bit)199 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
200 {
201 	int ret = bitmap[bit / 8];
202 	bitmap[bit / 8] = 0xFF;
203 	return ret;
204 }
205 
206 /* Marks a the first num_blocks blocks in a block group as used, and accounts
207  for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 start,u32 num)208 static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
209 {
210 	unsigned int i = 0;
211 
212 	u32 block = start;
213 	if (num > bg->free_blocks)
214 		return -1;
215 
216 	for (i = 0; i < num && block % 8 != 0; i++, block++) {
217 		if (bitmap_set_bit(bg->block_bitmap, block)) {
218 			error("attempted to reserve already reserved block");
219 			return -1;
220 		}
221 	}
222 
223 	for (; i + 8 <= (num & ~7); i += 8, block += 8) {
224 		if (bitmap_set_8_bits(bg->block_bitmap, block)) {
225 			error("attempted to reserve already reserved block");
226 			return -1;
227 		}
228 	}
229 
230 	for (; i < num; i++, block++) {
231 		if (bitmap_set_bit(bg->block_bitmap, block)) {
232 			error("attempted to reserve already reserved block");
233 			return -1;
234 		}
235 	}
236 
237 	bg->free_blocks -= num;
238 	if (start == bg->first_free_block)
239 		bg->first_free_block = start + num;
240 
241 	return 0;
242 }
243 
free_blocks(struct block_group_info * bg,u32 num_blocks)244 static void free_blocks(struct block_group_info *bg, u32 num_blocks)
245 {
246 	unsigned int i;
247 	u32 block = bg->first_free_block - 1;
248 	for (i = 0; i < num_blocks; i++, block--)
249 		bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
250 	bg->free_blocks += num_blocks;
251 	bg->first_free_block -= num_blocks;
252 }
253 
254 /* Reduces an existing allocation by len blocks by return the last blocks
255    to the free pool in their block group. Assumes that the blocks being
256    returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)257 void reduce_allocation(struct block_allocation *alloc, u32 len)
258 {
259 	while (len) {
260 		struct region *last_reg = alloc->list.last;
261 
262 		if (last_reg->len > len) {
263 			free_blocks(&aux_info.bgs[last_reg->bg], len);
264 			last_reg->len -= len;
265 			len = 0;
266 		} else {
267 			struct region *reg = alloc->list.last->prev;
268 			free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
269 			len -= last_reg->len;
270 			if (reg) {
271 				reg->next = NULL;
272 			} else {
273 				alloc->list.first = NULL;
274 				alloc->list.last = NULL;
275 				alloc->list.iter = NULL;
276 				alloc->list.partial_iter = 0;
277 			}
278 			free(last_reg);
279 		}
280 	}
281 }
282 
init_bg(struct block_group_info * bg,unsigned int i)283 static void init_bg(struct block_group_info *bg, unsigned int i)
284 {
285 	int header_blocks = 2 + aux_info.inode_table_blocks;
286 
287 	bg->has_superblock = ext4_bg_has_super_block(i);
288 
289 	if (bg->has_superblock)
290 		header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
291 
292 	bg->bitmaps = calloc(info.block_size, 2);
293 	bg->block_bitmap = bg->bitmaps;
294 	bg->inode_bitmap = bg->bitmaps + info.block_size;
295 
296 	bg->header_blocks = header_blocks;
297 	bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
298 
299 	u32 block = bg->first_block;
300 	if (bg->has_superblock)
301 		block += 1 + aux_info.bg_desc_blocks +  info.bg_desc_reserve_blocks;
302 	sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
303 			block);
304 
305 	bg->data_blocks_used = 0;
306 	bg->free_blocks = info.blocks_per_group;
307 	bg->first_free_block = 0;
308 	bg->free_inodes = info.inodes_per_group;
309 	bg->first_free_inode = 1;
310 	bg->flags = EXT4_BG_INODE_UNINIT;
311 
312 	if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
313 		error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
314 
315 	if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
316 		u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
317 		reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
318 	}
319 }
320 
block_allocator_init()321 void block_allocator_init()
322 {
323 	unsigned int i;
324 
325 	aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
326 	if (aux_info.bgs == NULL)
327 		critical_error_errno("calloc");
328 
329 	for (i = 0; i < aux_info.groups; i++)
330 		init_bg(&aux_info.bgs[i], i);
331 }
332 
block_allocator_free()333 void block_allocator_free()
334 {
335 	unsigned int i;
336 
337 	for (i = 0; i < aux_info.groups; i++) {
338 		free(aux_info.bgs[i].bitmaps);
339 		free(aux_info.bgs[i].inode_table);
340 	}
341 	free(aux_info.bgs);
342 }
343 
ext4_allocate_blocks_from_block_group(u32 len,int bg_num)344 static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
345 {
346 	if (get_free_blocks(bg_num) < len)
347 		return EXT4_ALLOCATE_FAILED;
348 
349 	u32 block = aux_info.bgs[bg_num].first_free_block;
350 	struct block_group_info *bg = &aux_info.bgs[bg_num];
351 	if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
352 		error("failed to reserve %u blocks in block group %u\n", len, bg_num);
353 		return EXT4_ALLOCATE_FAILED;
354 	}
355 
356 	aux_info.bgs[bg_num].data_blocks_used += len;
357 
358 	return bg->first_block + block;
359 }
360 
361 /* Allocate a single block and return its block number */
allocate_block()362 u32 allocate_block()
363 {
364 	unsigned int i;
365 	for (i = 0; i < aux_info.groups; i++) {
366 		u32 block = ext4_allocate_blocks_from_block_group(1, i);
367 
368 		if (block != EXT4_ALLOCATE_FAILED)
369 			return block;
370 	}
371 
372 	return EXT4_ALLOCATE_FAILED;
373 }
374 
ext4_allocate_best_fit_partial(u32 len)375 static struct region *ext4_allocate_best_fit_partial(u32 len)
376 {
377 	unsigned int i;
378 	unsigned int found_bg = 0;
379 	u32 found_bg_len = 0;
380 
381 	for (i = 0; i < aux_info.groups; i++) {
382 		u32 bg_len = aux_info.bgs[i].free_blocks;
383 
384 		if ((len <= bg_len && (found_bg_len == 0 || bg_len < found_bg_len)) ||
385 		    (len > found_bg_len && bg_len > found_bg_len)) {
386 			found_bg = i;
387 			found_bg_len = bg_len;
388 		}
389 	}
390 
391 	if (found_bg_len) {
392 		u32 allocate_len = min(len, found_bg_len);
393 		struct region *reg;
394 		u32 block = ext4_allocate_blocks_from_block_group(allocate_len, found_bg);
395 		if (block == EXT4_ALLOCATE_FAILED) {
396 			error("failed to allocate %d blocks in block group %d", allocate_len, found_bg);
397 			return NULL;
398 		}
399 		reg = malloc(sizeof(struct region));
400 		reg->block = block;
401 		reg->len = allocate_len;
402 		reg->next = NULL;
403 		reg->prev = NULL;
404 		reg->bg = found_bg;
405 		return reg;
406 	} else {
407 		error("failed to allocate %u blocks, out of space?", len);
408 	}
409 
410 	return NULL;
411 }
412 
ext4_allocate_best_fit(u32 len)413 static struct region *ext4_allocate_best_fit(u32 len)
414 {
415 	struct region *first_reg = NULL;
416 	struct region *prev_reg = NULL;
417 	struct region *reg;
418 
419 	while (len > 0) {
420 		reg = ext4_allocate_best_fit_partial(len);
421 		if (reg == NULL)
422 			return NULL;
423 
424 		if (first_reg == NULL)
425 			first_reg = reg;
426 
427 		if (prev_reg) {
428 			prev_reg->next = reg;
429 			reg->prev = prev_reg;
430 		}
431 
432 		prev_reg = reg;
433 		len -= reg->len;
434 	}
435 
436 	return first_reg;
437 }
438 
439 /* Allocate len blocks.  The blocks may be spread across multiple block groups,
440    and are returned in a linked list of the blocks in each block group.  The
441    allocation algorithm is:
442       1.  If the remaining allocation is larger than any available contiguous region,
443           allocate the largest contiguous region and loop
444       2.  Otherwise, allocate the smallest contiguous region that it fits in
445 */
allocate_blocks(u32 len)446 struct block_allocation *allocate_blocks(u32 len)
447 {
448 	struct region *reg = ext4_allocate_best_fit(len);
449 
450 	if (reg == NULL)
451 		return NULL;
452 
453 	struct block_allocation *alloc = create_allocation();
454 	alloc->list.first = reg;
455 	alloc->list.last = reg;
456 	alloc->list.iter = alloc->list.first;
457 	alloc->list.partial_iter = 0;
458 	return alloc;
459 }
460 
461 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)462 int block_allocation_num_regions(struct block_allocation *alloc)
463 {
464 	unsigned int i;
465 	struct region *reg = alloc->list.first;
466 
467 	for (i = 0; reg != NULL; reg = reg->next)
468 		i++;
469 
470 	return i;
471 }
472 
block_allocation_len(struct block_allocation * alloc)473 int block_allocation_len(struct block_allocation *alloc)
474 {
475 	unsigned int i;
476 	struct region *reg = alloc->list.first;
477 
478 	for (i = 0; reg != NULL; reg = reg->next)
479 		i += reg->len;
480 
481 	return i;
482 }
483 
484 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)485 u32 get_block(struct block_allocation *alloc, u32 block)
486 {
487 	struct region *reg = alloc->list.iter;
488 	block += alloc->list.partial_iter;
489 
490 	for (; reg; reg = reg->next) {
491 		if (block < reg->len)
492 			return reg->block + block;
493 		block -= reg->len;
494 	}
495 	return EXT4_ALLOCATE_FAILED;
496 }
497 
get_oob_block(struct block_allocation * alloc,u32 block)498 u32 get_oob_block(struct block_allocation *alloc, u32 block)
499 {
500 	struct region *reg = alloc->oob_list.iter;
501 	block += alloc->oob_list.partial_iter;
502 
503 	for (; reg; reg = reg->next) {
504 		if (block < reg->len)
505 			return reg->block + block;
506 		block -= reg->len;
507 	}
508 	return EXT4_ALLOCATE_FAILED;
509 }
510 
511 /* Gets the starting block and length in blocks of the first region
512    of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)513 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
514 {
515 	*block = alloc->list.iter->block;
516 	*len = alloc->list.iter->len - alloc->list.partial_iter;
517 }
518 
519 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)520 void get_next_region(struct block_allocation *alloc)
521 {
522 	alloc->list.iter = alloc->list.iter->next;
523 	alloc->list.partial_iter = 0;
524 }
525 
526 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)527 u32 get_free_blocks(u32 bg)
528 {
529 	return aux_info.bgs[bg].free_blocks;
530 }
531 
last_region(struct block_allocation * alloc)532 int last_region(struct block_allocation *alloc)
533 {
534 	return (alloc->list.iter == NULL);
535 }
536 
rewind_alloc(struct block_allocation * alloc)537 void rewind_alloc(struct block_allocation *alloc)
538 {
539 	alloc->list.iter = alloc->list.first;
540 	alloc->list.partial_iter = 0;
541 }
542 
do_split_allocation(struct block_allocation * alloc,u32 len)543 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
544 {
545 	struct region *reg = alloc->list.iter;
546 	struct region *new;
547 	struct region *tmp;
548 
549 	while (reg && len >= reg->len) {
550 		len -= reg->len;
551 		reg = reg->next;
552 	}
553 
554 	if (reg == NULL && len > 0)
555 		return NULL;
556 
557 	if (len > 0) {
558 		new = malloc(sizeof(struct region));
559 
560 		new->bg = reg->bg;
561 		new->block = reg->block + len;
562 		new->len = reg->len - len;
563 		new->next = reg->next;
564 		new->prev = reg;
565 
566 		reg->next = new;
567 		reg->len = len;
568 
569 		tmp = alloc->list.iter;
570 		alloc->list.iter = new;
571 		return tmp;
572 	} else {
573 		return reg;
574 	}
575 }
576 
577 /* Splits an allocation into two allocations.  The returned allocation will
578    point to the first half, and the original allocation ptr will point to the
579    second half. */
split_allocation(struct block_allocation * alloc,u32 len)580 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
581 {
582 	/* First make sure there is a split at the current ptr */
583 	do_split_allocation(alloc, alloc->list.partial_iter);
584 
585 	/* Then split off len blocks */
586 	struct region *middle = do_split_allocation(alloc, len);
587 	alloc->list.partial_iter = 0;
588 	return middle;
589 }
590 
591 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)592 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
593 {
594 	struct region *oob = split_allocation(alloc, blocks);
595 	struct region *next;
596 
597 	if (oob == NULL)
598 		return -1;
599 
600 	while (oob && oob != alloc->list.iter) {
601 		next = oob->next;
602 		region_list_remove(&alloc->list, oob);
603 		region_list_append(&alloc->oob_list, oob);
604 		oob = next;
605 	}
606 
607 	return 0;
608 }
609 
advance_list_ptr(struct region_list * list,int blocks)610 static int advance_list_ptr(struct region_list *list, int blocks)
611 {
612 	struct region *reg = list->iter;
613 
614 	while (reg != NULL && blocks > 0) {
615 		if (reg->len > list->partial_iter + blocks) {
616 			list->partial_iter += blocks;
617 			return 0;
618 		}
619 
620 		blocks -= (reg->len - list->partial_iter);
621 		list->partial_iter = 0;
622 		reg = reg->next;
623 	}
624 
625 	if (blocks > 0)
626 		return -1;
627 
628 	return 0;
629 }
630 
631 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)632 int advance_blocks(struct block_allocation *alloc, int blocks)
633 {
634 	return advance_list_ptr(&alloc->list, blocks);
635 }
636 
advance_oob_blocks(struct block_allocation * alloc,int blocks)637 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
638 {
639 	return advance_list_ptr(&alloc->oob_list, blocks);
640 }
641 
append_oob_allocation(struct block_allocation * alloc,u32 len)642 int append_oob_allocation(struct block_allocation *alloc, u32 len)
643 {
644 	struct region *reg = ext4_allocate_best_fit(len);
645 
646 	if (reg == NULL) {
647 		error("failed to allocate %d blocks", len);
648 		return -1;
649 	}
650 
651 	for (; reg; reg = reg->next)
652 		region_list_append(&alloc->oob_list, reg);
653 
654 	return 0;
655 }
656 
657 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)658 struct ext4_inode *get_inode(u32 inode)
659 {
660 	inode -= 1;
661 	int bg = inode / info.inodes_per_group;
662 	inode %= info.inodes_per_group;
663 
664 	allocate_bg_inode_table(&aux_info.bgs[bg]);
665 	return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
666 		info.inode_size);
667 }
668 
get_xattr_block_for_inode(struct ext4_inode * inode)669 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
670 {
671 	struct ext4_xattr_header *block = xattr_list_find(inode);
672 	if (block != NULL)
673 		return block;
674 
675 	u32 block_num = allocate_block();
676 	block = calloc(info.block_size, 1);
677 	if (block == NULL) {
678 		error("get_xattr: failed to allocate %d", info.block_size);
679 		return NULL;
680 	}
681 
682 	block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
683 	block->h_refcount = cpu_to_le32(1);
684 	block->h_blocks = cpu_to_le32(1);
685 	inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
686 	inode->i_file_acl_lo = cpu_to_le32(block_num);
687 
688 	int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
689 	if (result != 0) {
690 		error("get_xattr: sparse_file_add_data failure %d", result);
691 		free(block);
692 		return NULL;
693 	}
694 	xattr_list_insert(inode, block);
695 	return block;
696 }
697 
698 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)699 u32 reserve_inodes(int bg, u32 num)
700 {
701 	unsigned int i;
702 	u32 inode;
703 
704 	if (get_free_inodes(bg) < num)
705 		return EXT4_ALLOCATE_FAILED;
706 
707 	for (i = 0; i < num; i++) {
708 		inode = aux_info.bgs[bg].first_free_inode + i - 1;
709 		aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
710 	}
711 
712 	inode = aux_info.bgs[bg].first_free_inode;
713 
714 	aux_info.bgs[bg].first_free_inode += num;
715 	aux_info.bgs[bg].free_inodes -= num;
716 
717 	return inode;
718 }
719 
720 /* Returns the first free inode number
721    TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()722 u32 allocate_inode()
723 {
724 	unsigned int bg;
725 	u32 inode;
726 
727 	for (bg = 0; bg < aux_info.groups; bg++) {
728 		inode = reserve_inodes(bg, 1);
729 		if (inode != EXT4_ALLOCATE_FAILED)
730 			return bg * info.inodes_per_group + inode;
731 	}
732 
733 	return EXT4_ALLOCATE_FAILED;
734 }
735 
736 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)737 u32 get_free_inodes(u32 bg)
738 {
739 	return aux_info.bgs[bg].free_inodes;
740 }
741 
742 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)743 void add_directory(u32 inode)
744 {
745 	int bg = (inode - 1) / info.inodes_per_group;
746 	aux_info.bgs[bg].used_dirs += 1;
747 }
748 
749 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)750 u16 get_directories(int bg)
751 {
752 	return aux_info.bgs[bg].used_dirs;
753 }
754 
755 /* Returns the flags for a block group */
get_bg_flags(int bg)756 u16 get_bg_flags(int bg)
757 {
758 	return aux_info.bgs[bg].flags;
759 }
760 
761 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)762 void free_alloc(struct block_allocation *alloc)
763 {
764 	struct region *reg;
765 
766 	reg = alloc->list.first;
767 	while (reg) {
768 		struct region *next = reg->next;
769 		free(reg);
770 		reg = next;
771 	}
772 
773 	reg = alloc->oob_list.first;
774 	while (reg) {
775 		struct region *next = reg->next;
776 		free(reg);
777 		reg = next;
778 	}
779 
780 	free(alloc);
781 }
782