• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ext4_utils.h"
18 #include "allocate.h"
19 
20 #include <sparse/sparse.h>
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 
25 struct region_list {
26 	struct region *first;
27 	struct region *last;
28 	struct region *iter;
29 	u32 partial_iter;
30 };
31 
32 struct block_allocation {
33 	struct region_list list;
34 	struct region_list oob_list;
35 };
36 
37 struct region {
38 	u32 block;
39 	u32 len;
40 	int bg;
41 	struct region *next;
42 	struct region *prev;
43 };
44 
45 struct block_group_info {
46 	u32 first_block;
47 	int header_blocks;
48 	int data_blocks_used;
49 	int has_superblock;
50 	u8 *bitmaps;
51 	u8 *block_bitmap;
52 	u8 *inode_bitmap;
53 	u8 *inode_table;
54 	u32 free_blocks;
55 	u32 first_free_block;
56 	u32 free_inodes;
57 	u32 first_free_inode;
58 	u16 flags;
59 	u16 used_dirs;
60 };
61 
62 struct xattr_list_element {
63 	struct ext4_inode *inode;
64 	struct ext4_xattr_header *header;
65 	struct xattr_list_element *next;
66 };
67 
create_allocation()68 struct block_allocation *create_allocation()
69 {
70 	struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
71 	alloc->list.first = NULL;
72 	alloc->list.last = NULL;
73 	alloc->oob_list.first = NULL;
74 	alloc->oob_list.last = NULL;
75 	alloc->list.iter = NULL;
76 	alloc->list.partial_iter = 0;
77 	alloc->oob_list.iter = NULL;
78 	alloc->oob_list.partial_iter = 0;
79 	return alloc;
80 }
81 
xattr_list_find(struct ext4_inode * inode)82 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
83 {
84 	struct xattr_list_element *element;
85 	for (element = aux_info.xattrs; element != NULL; element = element->next) {
86 		if (element->inode == inode)
87 			return element->header;
88 	}
89 	return NULL;
90 }
91 
xattr_list_insert(struct ext4_inode * inode,struct ext4_xattr_header * header)92 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
93 {
94 	struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
95 	element->inode = inode;
96 	element->header = header;
97 	element->next = aux_info.xattrs;
98 	aux_info.xattrs = element;
99 }
100 
region_list_remove(struct region_list * list,struct region * reg)101 static void region_list_remove(struct region_list *list, struct region *reg)
102 {
103 	if (reg->prev)
104 		reg->prev->next = reg->next;
105 
106 	if (reg->next)
107 		reg->next->prev = reg->prev;
108 
109 	if (list->first == reg)
110 		list->first = reg->next;
111 
112 	if (list->last == reg)
113 		list->last = reg->prev;
114 
115 	reg->next = NULL;
116 	reg->prev = NULL;
117 }
118 
region_list_append(struct region_list * list,struct region * reg)119 static void region_list_append(struct region_list *list, struct region *reg)
120 {
121 	if (list->first == NULL) {
122 		list->first = reg;
123 		list->last = reg;
124 		list->iter = reg;
125 		list->partial_iter = 0;
126 		reg->prev = NULL;
127 	} else {
128 		list->last->next = reg;
129 		reg->prev = list->last;
130 		list->last = reg;
131 	}
132 	reg->next = NULL;
133 }
134 
135 #if 0
136 static void dump_starting_from(struct region *reg)
137 {
138 	for (; reg; reg = reg->next) {
139 		printf("%p: Blocks %d-%d (%d)\n", reg,
140 				reg->bg * info.blocks_per_group + reg->block,
141 				reg->bg * info.blocks_per_group + reg->block + reg->len - 1,
142 				reg->len);
143 	}
144 }
145 
146 static void dump_region_lists(struct block_allocation *alloc) {
147 
148 	printf("Main list:\n");
149 	dump_starting_from(alloc->list.first);
150 
151 	printf("OOB list:\n");
152 	dump_starting_from(alloc->oob_list.first);
153 }
154 #endif
155 
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)156 void append_region(struct block_allocation *alloc,
157 		u32 block, u32 len, int bg_num)
158 {
159 	struct region *reg;
160 	reg = malloc(sizeof(struct region));
161 	reg->block = block;
162 	reg->len = len;
163 	reg->bg = bg_num;
164 	reg->next = NULL;
165 
166 	region_list_append(&alloc->list, reg);
167 }
168 
allocate_bg_inode_table(struct block_group_info * bg)169 static void allocate_bg_inode_table(struct block_group_info *bg)
170 {
171 	if (bg->inode_table != NULL)
172 		return;
173 
174 	u32 block = bg->first_block + 2;
175 
176 	if (bg->has_superblock)
177 		block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
178 
179 	bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
180 	if (bg->inode_table == NULL)
181 		critical_error_errno("calloc");
182 
183 	sparse_file_add_data(ext4_sparse_file, bg->inode_table,
184 			aux_info.inode_table_blocks	* info.block_size, block);
185 
186 	bg->flags &= ~EXT4_BG_INODE_UNINIT;
187 }
188 
bitmap_set_bit(u8 * bitmap,u32 bit)189 static int bitmap_set_bit(u8 *bitmap, u32 bit)
190 {
191 	if (bitmap[bit / 8] & 1 << (bit % 8))
192 		return 1;
193 
194 	bitmap[bit / 8] |= 1 << (bit % 8);
195 	return 0;
196 }
197 
bitmap_set_8_bits(u8 * bitmap,u32 bit)198 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
199 {
200 	int ret = bitmap[bit / 8];
201 	bitmap[bit / 8] = 0xFF;
202 	return ret;
203 }
204 
205 /* Marks a the first num_blocks blocks in a block group as used, and accounts
206  for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 start,u32 num)207 static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
208 {
209 	unsigned int i = 0;
210 
211 	u32 block = start;
212 	if (num > bg->free_blocks)
213 		return -1;
214 
215 	for (i = 0; i < num && block % 8 != 0; i++, block++) {
216 		if (bitmap_set_bit(bg->block_bitmap, block)) {
217 			error("attempted to reserve already reserved block");
218 			return -1;
219 		}
220 	}
221 
222 	for (; i + 8 <= (num & ~7); i += 8, block += 8) {
223 		if (bitmap_set_8_bits(bg->block_bitmap, block)) {
224 			error("attempted to reserve already reserved block");
225 			return -1;
226 		}
227 	}
228 
229 	for (; i < num; i++, block++) {
230 		if (bitmap_set_bit(bg->block_bitmap, block)) {
231 			error("attempted to reserve already reserved block");
232 			return -1;
233 		}
234 	}
235 
236 	bg->free_blocks -= num;
237 	if (start == bg->first_free_block)
238 		bg->first_free_block = start + num;
239 
240 	return 0;
241 }
242 
free_blocks(struct block_group_info * bg,u32 num_blocks)243 static void free_blocks(struct block_group_info *bg, u32 num_blocks)
244 {
245 	unsigned int i;
246 	u32 block = bg->first_free_block - 1;
247 	for (i = 0; i < num_blocks; i++, block--)
248 		bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
249 	bg->free_blocks += num_blocks;
250 	bg->first_free_block -= num_blocks;
251 }
252 
253 /* Reduces an existing allocation by len blocks by return the last blocks
254    to the free pool in their block group. Assumes that the blocks being
255    returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)256 void reduce_allocation(struct block_allocation *alloc, u32 len)
257 {
258 	while (len) {
259 		struct region *last_reg = alloc->list.last;
260 
261 		if (last_reg->len > len) {
262 			free_blocks(&aux_info.bgs[last_reg->bg], len);
263 			last_reg->len -= len;
264 			len = 0;
265 		} else {
266 			struct region *reg = alloc->list.last->prev;
267 			free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
268 			len -= last_reg->len;
269 			if (reg) {
270 				reg->next = NULL;
271 			} else {
272 				alloc->list.first = NULL;
273 				alloc->list.last = NULL;
274 				alloc->list.iter = NULL;
275 				alloc->list.partial_iter = 0;
276 			}
277 			free(last_reg);
278 		}
279 	}
280 }
281 
init_bg(struct block_group_info * bg,unsigned int i)282 static void init_bg(struct block_group_info *bg, unsigned int i)
283 {
284 	int header_blocks = 2 + aux_info.inode_table_blocks;
285 
286 	bg->has_superblock = ext4_bg_has_super_block(i);
287 
288 	if (bg->has_superblock)
289 		header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
290 
291 	bg->bitmaps = calloc(info.block_size, 2);
292 	bg->block_bitmap = bg->bitmaps;
293 	bg->inode_bitmap = bg->bitmaps + info.block_size;
294 
295 	bg->header_blocks = header_blocks;
296 	bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
297 
298 	u32 block = bg->first_block;
299 	if (bg->has_superblock)
300 		block += 1 + aux_info.bg_desc_blocks +  info.bg_desc_reserve_blocks;
301 	sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
302 			block);
303 
304 	bg->data_blocks_used = 0;
305 	bg->free_blocks = info.blocks_per_group;
306 	bg->first_free_block = 0;
307 	bg->free_inodes = info.inodes_per_group;
308 	bg->first_free_inode = 1;
309 	bg->flags = EXT4_BG_INODE_UNINIT;
310 
311 	if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
312 		error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
313 
314 	if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
315 		u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
316 		reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
317 	}
318 }
319 
block_allocator_init()320 void block_allocator_init()
321 {
322 	unsigned int i;
323 
324 	aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
325 	if (aux_info.bgs == NULL)
326 		critical_error_errno("calloc");
327 
328 	for (i = 0; i < aux_info.groups; i++)
329 		init_bg(&aux_info.bgs[i], i);
330 }
331 
block_allocator_free()332 void block_allocator_free()
333 {
334 	unsigned int i;
335 
336 	for (i = 0; i < aux_info.groups; i++) {
337 		free(aux_info.bgs[i].bitmaps);
338 		free(aux_info.bgs[i].inode_table);
339 	}
340 	free(aux_info.bgs);
341 }
342 
ext4_allocate_blocks_from_block_group(u32 len,int bg_num)343 static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
344 {
345 	if (get_free_blocks(bg_num) < len)
346 		return EXT4_ALLOCATE_FAILED;
347 
348 	u32 block = aux_info.bgs[bg_num].first_free_block;
349 	struct block_group_info *bg = &aux_info.bgs[bg_num];
350 	if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
351 		error("failed to reserve %u blocks in block group %u\n", len, bg_num);
352 		return EXT4_ALLOCATE_FAILED;
353 	}
354 
355 	aux_info.bgs[bg_num].data_blocks_used += len;
356 
357 	return bg->first_block + block;
358 }
359 
360 /* Allocate a single block and return its block number */
allocate_block()361 u32 allocate_block()
362 {
363 	unsigned int i;
364 	for (i = 0; i < aux_info.groups; i++) {
365 		u32 block = ext4_allocate_blocks_from_block_group(1, i);
366 
367 		if (block != EXT4_ALLOCATE_FAILED)
368 			return block;
369 	}
370 
371 	return EXT4_ALLOCATE_FAILED;
372 }
373 
ext4_allocate_best_fit_partial(u32 len)374 static struct region *ext4_allocate_best_fit_partial(u32 len)
375 {
376 	unsigned int i;
377 	unsigned int found_bg = 0;
378 	u32 found_bg_len = 0;
379 
380 	for (i = 0; i < aux_info.groups; i++) {
381 		u32 bg_len = aux_info.bgs[i].free_blocks;
382 
383 		if ((len <= bg_len && (found_bg_len == 0 || bg_len < found_bg_len)) ||
384 		    (len > found_bg_len && bg_len > found_bg_len)) {
385 			found_bg = i;
386 			found_bg_len = bg_len;
387 		}
388 	}
389 
390 	if (found_bg_len) {
391 		u32 allocate_len = min(len, found_bg_len);
392 		struct region *reg;
393 		u32 block = ext4_allocate_blocks_from_block_group(allocate_len, found_bg);
394 		if (block == EXT4_ALLOCATE_FAILED) {
395 			error("failed to allocate %d blocks in block group %d", allocate_len, found_bg);
396 			return NULL;
397 		}
398 		reg = malloc(sizeof(struct region));
399 		reg->block = block;
400 		reg->len = allocate_len;
401 		reg->next = NULL;
402 		reg->prev = NULL;
403 		reg->bg = found_bg;
404 		return reg;
405 	} else {
406 		error("failed to allocate %u blocks, out of space?", len);
407 	}
408 
409 	return NULL;
410 }
411 
ext4_allocate_best_fit(u32 len)412 static struct region *ext4_allocate_best_fit(u32 len)
413 {
414 	struct region *first_reg = NULL;
415 	struct region *prev_reg = NULL;
416 	struct region *reg;
417 
418 	while (len > 0) {
419 		reg = ext4_allocate_best_fit_partial(len);
420 		if (reg == NULL)
421 			return NULL;
422 
423 		if (first_reg == NULL)
424 			first_reg = reg;
425 
426 		if (prev_reg) {
427 			prev_reg->next = reg;
428 			reg->prev = prev_reg;
429 		}
430 
431 		prev_reg = reg;
432 		len -= reg->len;
433 	}
434 
435 	return first_reg;
436 }
437 
438 /* Allocate len blocks.  The blocks may be spread across multiple block groups,
439    and are returned in a linked list of the blocks in each block group.  The
440    allocation algorithm is:
441       1.  If the remaining allocation is larger than any available contiguous region,
442           allocate the largest contiguous region and loop
443       2.  Otherwise, allocate the smallest contiguous region that it fits in
444 */
allocate_blocks(u32 len)445 struct block_allocation *allocate_blocks(u32 len)
446 {
447 	struct region *reg = ext4_allocate_best_fit(len);
448 
449 	if (reg == NULL)
450 		return NULL;
451 
452 	struct block_allocation *alloc = create_allocation();
453 	alloc->list.first = reg;
454 	alloc->list.last = reg;
455 	alloc->list.iter = alloc->list.first;
456 	alloc->list.partial_iter = 0;
457 	return alloc;
458 }
459 
460 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)461 int block_allocation_num_regions(struct block_allocation *alloc)
462 {
463 	unsigned int i;
464 	struct region *reg = alloc->list.first;
465 
466 	for (i = 0; reg != NULL; reg = reg->next)
467 		i++;
468 
469 	return i;
470 }
471 
block_allocation_len(struct block_allocation * alloc)472 int block_allocation_len(struct block_allocation *alloc)
473 {
474 	unsigned int i;
475 	struct region *reg = alloc->list.first;
476 
477 	for (i = 0; reg != NULL; reg = reg->next)
478 		i += reg->len;
479 
480 	return i;
481 }
482 
483 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)484 u32 get_block(struct block_allocation *alloc, u32 block)
485 {
486 	struct region *reg = alloc->list.iter;
487 	block += alloc->list.partial_iter;
488 
489 	for (; reg; reg = reg->next) {
490 		if (block < reg->len)
491 			return reg->block + block;
492 		block -= reg->len;
493 	}
494 	return EXT4_ALLOCATE_FAILED;
495 }
496 
get_oob_block(struct block_allocation * alloc,u32 block)497 u32 get_oob_block(struct block_allocation *alloc, u32 block)
498 {
499 	struct region *reg = alloc->oob_list.iter;
500 	block += alloc->oob_list.partial_iter;
501 
502 	for (; reg; reg = reg->next) {
503 		if (block < reg->len)
504 			return reg->block + block;
505 		block -= reg->len;
506 	}
507 	return EXT4_ALLOCATE_FAILED;
508 }
509 
510 /* Gets the starting block and length in blocks of the first region
511    of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)512 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
513 {
514 	*block = alloc->list.iter->block;
515 	*len = alloc->list.iter->len - alloc->list.partial_iter;
516 }
517 
518 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)519 void get_next_region(struct block_allocation *alloc)
520 {
521 	alloc->list.iter = alloc->list.iter->next;
522 	alloc->list.partial_iter = 0;
523 }
524 
525 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)526 u32 get_free_blocks(u32 bg)
527 {
528 	return aux_info.bgs[bg].free_blocks;
529 }
530 
last_region(struct block_allocation * alloc)531 int last_region(struct block_allocation *alloc)
532 {
533 	return (alloc->list.iter == NULL);
534 }
535 
rewind_alloc(struct block_allocation * alloc)536 void rewind_alloc(struct block_allocation *alloc)
537 {
538 	alloc->list.iter = alloc->list.first;
539 	alloc->list.partial_iter = 0;
540 }
541 
do_split_allocation(struct block_allocation * alloc,u32 len)542 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
543 {
544 	struct region *reg = alloc->list.iter;
545 	struct region *new;
546 	struct region *tmp;
547 
548 	while (reg && len >= reg->len) {
549 		len -= reg->len;
550 		reg = reg->next;
551 	}
552 
553 	if (reg == NULL && len > 0)
554 		return NULL;
555 
556 	if (len > 0) {
557 		new = malloc(sizeof(struct region));
558 
559 		new->bg = reg->bg;
560 		new->block = reg->block + len;
561 		new->len = reg->len - len;
562 		new->next = reg->next;
563 		new->prev = reg;
564 
565 		reg->next = new;
566 		reg->len = len;
567 
568 		tmp = alloc->list.iter;
569 		alloc->list.iter = new;
570 		return tmp;
571 	} else {
572 		return reg;
573 	}
574 }
575 
576 /* Splits an allocation into two allocations.  The returned allocation will
577    point to the first half, and the original allocation ptr will point to the
578    second half. */
split_allocation(struct block_allocation * alloc,u32 len)579 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
580 {
581 	/* First make sure there is a split at the current ptr */
582 	do_split_allocation(alloc, alloc->list.partial_iter);
583 
584 	/* Then split off len blocks */
585 	struct region *middle = do_split_allocation(alloc, len);
586 	alloc->list.partial_iter = 0;
587 	return middle;
588 }
589 
590 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)591 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
592 {
593 	struct region *oob = split_allocation(alloc, blocks);
594 	struct region *next;
595 
596 	if (oob == NULL)
597 		return -1;
598 
599 	while (oob && oob != alloc->list.iter) {
600 		next = oob->next;
601 		region_list_remove(&alloc->list, oob);
602 		region_list_append(&alloc->oob_list, oob);
603 		oob = next;
604 	}
605 
606 	return 0;
607 }
608 
advance_list_ptr(struct region_list * list,int blocks)609 static int advance_list_ptr(struct region_list *list, int blocks)
610 {
611 	struct region *reg = list->iter;
612 
613 	while (reg != NULL && blocks > 0) {
614 		if (reg->len > list->partial_iter + blocks) {
615 			list->partial_iter += blocks;
616 			return 0;
617 		}
618 
619 		blocks -= (reg->len - list->partial_iter);
620 		list->partial_iter = 0;
621 		reg = reg->next;
622 	}
623 
624 	if (blocks > 0)
625 		return -1;
626 
627 	return 0;
628 }
629 
630 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)631 int advance_blocks(struct block_allocation *alloc, int blocks)
632 {
633 	return advance_list_ptr(&alloc->list, blocks);
634 }
635 
advance_oob_blocks(struct block_allocation * alloc,int blocks)636 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
637 {
638 	return advance_list_ptr(&alloc->oob_list, blocks);
639 }
640 
append_oob_allocation(struct block_allocation * alloc,u32 len)641 int append_oob_allocation(struct block_allocation *alloc, u32 len)
642 {
643 	struct region *reg = ext4_allocate_best_fit(len);
644 
645 	if (reg == NULL) {
646 		error("failed to allocate %d blocks", len);
647 		return -1;
648 	}
649 
650 	for (; reg; reg = reg->next)
651 		region_list_append(&alloc->oob_list, reg);
652 
653 	return 0;
654 }
655 
656 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)657 struct ext4_inode *get_inode(u32 inode)
658 {
659 	inode -= 1;
660 	int bg = inode / info.inodes_per_group;
661 	inode %= info.inodes_per_group;
662 
663 	allocate_bg_inode_table(&aux_info.bgs[bg]);
664 	return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
665 		info.inode_size);
666 }
667 
get_xattr_block_for_inode(struct ext4_inode * inode)668 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
669 {
670 	struct ext4_xattr_header *block = xattr_list_find(inode);
671 	if (block != NULL)
672 		return block;
673 
674 	u32 block_num = allocate_block();
675 	block = calloc(info.block_size, 1);
676 	if (block == NULL) {
677 		error("get_xattr: failed to allocate %d", info.block_size);
678 		return NULL;
679 	}
680 
681 	block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
682 	block->h_refcount = cpu_to_le32(1);
683 	block->h_blocks = cpu_to_le32(1);
684 	inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
685 	inode->i_file_acl_lo = cpu_to_le32(block_num);
686 
687 	int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
688 	if (result != 0) {
689 		error("get_xattr: sparse_file_add_data failure %d", result);
690 		free(block);
691 		return NULL;
692 	}
693 	xattr_list_insert(inode, block);
694 	return block;
695 }
696 
697 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)698 u32 reserve_inodes(int bg, u32 num)
699 {
700 	unsigned int i;
701 	u32 inode;
702 
703 	if (get_free_inodes(bg) < num)
704 		return EXT4_ALLOCATE_FAILED;
705 
706 	for (i = 0; i < num; i++) {
707 		inode = aux_info.bgs[bg].first_free_inode + i - 1;
708 		aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
709 	}
710 
711 	inode = aux_info.bgs[bg].first_free_inode;
712 
713 	aux_info.bgs[bg].first_free_inode += num;
714 	aux_info.bgs[bg].free_inodes -= num;
715 
716 	return inode;
717 }
718 
719 /* Returns the first free inode number
720    TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()721 u32 allocate_inode()
722 {
723 	unsigned int bg;
724 	u32 inode;
725 
726 	for (bg = 0; bg < aux_info.groups; bg++) {
727 		inode = reserve_inodes(bg, 1);
728 		if (inode != EXT4_ALLOCATE_FAILED)
729 			return bg * info.inodes_per_group + inode;
730 	}
731 
732 	return EXT4_ALLOCATE_FAILED;
733 }
734 
735 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)736 u32 get_free_inodes(u32 bg)
737 {
738 	return aux_info.bgs[bg].free_inodes;
739 }
740 
741 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)742 void add_directory(u32 inode)
743 {
744 	int bg = (inode - 1) / info.inodes_per_group;
745 	aux_info.bgs[bg].used_dirs += 1;
746 }
747 
748 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)749 u16 get_directories(int bg)
750 {
751 	return aux_info.bgs[bg].used_dirs;
752 }
753 
754 /* Returns the flags for a block group */
get_bg_flags(int bg)755 u16 get_bg_flags(int bg)
756 {
757 	return aux_info.bgs[bg].flags;
758 }
759 
760 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)761 void free_alloc(struct block_allocation *alloc)
762 {
763 	struct region *reg;
764 
765 	reg = alloc->list.first;
766 	while (reg) {
767 		struct region *next = reg->next;
768 		free(reg);
769 		reg = next;
770 	}
771 
772 	reg = alloc->oob_list.first;
773 	while (reg) {
774 		struct region *next = reg->next;
775 		free(reg);
776 		reg = next;
777 	}
778 
779 	free(alloc);
780 }
781