1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ext4_utils.h"
18 #include "allocate.h"
19 #include "ext4.h"
20
21 #include <sparse/sparse.h>
22
23 #include <stdio.h>
24 #include <stdlib.h>
25
26 struct region_list {
27 struct region *first;
28 struct region *last;
29 struct region *iter;
30 u32 partial_iter;
31 };
32
33 struct block_allocation {
34 struct region_list list;
35 struct region_list oob_list;
36 };
37
38 struct region {
39 u32 block;
40 u32 len;
41 int bg;
42 struct region *next;
43 struct region *prev;
44 };
45
46 struct block_group_info {
47 u32 first_block;
48 int header_blocks;
49 int data_blocks_used;
50 int has_superblock;
51 u8 *bitmaps;
52 u8 *block_bitmap;
53 u8 *inode_bitmap;
54 u8 *inode_table;
55 u32 free_blocks;
56 u32 first_free_block;
57 u32 free_inodes;
58 u32 first_free_inode;
59 u16 flags;
60 u16 used_dirs;
61 };
62
63 struct xattr_list_element {
64 struct ext4_inode *inode;
65 struct ext4_xattr_header *header;
66 struct xattr_list_element *next;
67 };
68
create_allocation()69 struct block_allocation *create_allocation()
70 {
71 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
72 alloc->list.first = NULL;
73 alloc->list.last = NULL;
74 alloc->oob_list.first = NULL;
75 alloc->oob_list.last = NULL;
76 alloc->list.iter = NULL;
77 alloc->list.partial_iter = 0;
78 alloc->oob_list.iter = NULL;
79 alloc->oob_list.partial_iter = 0;
80 return alloc;
81 }
82
xattr_list_find(struct ext4_inode * inode)83 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
84 {
85 struct xattr_list_element *element;
86 for (element = aux_info.xattrs; element != NULL; element = element->next) {
87 if (element->inode == inode)
88 return element->header;
89 }
90 return NULL;
91 }
92
xattr_list_insert(struct ext4_inode * inode,struct ext4_xattr_header * header)93 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
94 {
95 struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
96 element->inode = inode;
97 element->header = header;
98 element->next = aux_info.xattrs;
99 aux_info.xattrs = element;
100 }
101
region_list_remove(struct region_list * list,struct region * reg)102 static void region_list_remove(struct region_list *list, struct region *reg)
103 {
104 if (reg->prev)
105 reg->prev->next = reg->next;
106
107 if (reg->next)
108 reg->next->prev = reg->prev;
109
110 if (list->first == reg)
111 list->first = reg->next;
112
113 if (list->last == reg)
114 list->last = reg->prev;
115
116 reg->next = NULL;
117 reg->prev = NULL;
118 }
119
region_list_append(struct region_list * list,struct region * reg)120 static void region_list_append(struct region_list *list, struct region *reg)
121 {
122 if (list->first == NULL) {
123 list->first = reg;
124 list->last = reg;
125 list->iter = reg;
126 list->partial_iter = 0;
127 reg->prev = NULL;
128 } else {
129 list->last->next = reg;
130 reg->prev = list->last;
131 list->last = reg;
132 }
133 reg->next = NULL;
134 }
135
136 #if 0
137 static void dump_starting_from(struct region *reg)
138 {
139 for (; reg; reg = reg->next) {
140 printf("%p: Blocks %d-%d (%d)\n", reg,
141 reg->bg * info.blocks_per_group + reg->block,
142 reg->bg * info.blocks_per_group + reg->block + reg->len - 1,
143 reg->len);
144 }
145 }
146
147 static void dump_region_lists(struct block_allocation *alloc) {
148
149 printf("Main list:\n");
150 dump_starting_from(alloc->list.first);
151
152 printf("OOB list:\n");
153 dump_starting_from(alloc->oob_list.first);
154 }
155 #endif
156
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)157 void append_region(struct block_allocation *alloc,
158 u32 block, u32 len, int bg_num)
159 {
160 struct region *reg;
161 reg = malloc(sizeof(struct region));
162 reg->block = block;
163 reg->len = len;
164 reg->bg = bg_num;
165 reg->next = NULL;
166
167 region_list_append(&alloc->list, reg);
168 }
169
allocate_bg_inode_table(struct block_group_info * bg)170 static void allocate_bg_inode_table(struct block_group_info *bg)
171 {
172 if (bg->inode_table != NULL)
173 return;
174
175 u32 block = bg->first_block + 2;
176
177 if (bg->has_superblock)
178 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
179
180 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
181 if (bg->inode_table == NULL)
182 critical_error_errno("calloc");
183
184 sparse_file_add_data(info.sparse_file, bg->inode_table,
185 aux_info.inode_table_blocks * info.block_size, block);
186
187 bg->flags &= ~EXT4_BG_INODE_UNINIT;
188 }
189
bitmap_set_bit(u8 * bitmap,u32 bit)190 static int bitmap_set_bit(u8 *bitmap, u32 bit)
191 {
192 if (bitmap[bit / 8] & 1 << (bit % 8))
193 return 1;
194
195 bitmap[bit / 8] |= 1 << (bit % 8);
196 return 0;
197 }
198
bitmap_set_8_bits(u8 * bitmap,u32 bit)199 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
200 {
201 int ret = bitmap[bit / 8];
202 bitmap[bit / 8] = 0xFF;
203 return ret;
204 }
205
206 /* Marks a the first num_blocks blocks in a block group as used, and accounts
207 for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 start,u32 num)208 static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
209 {
210 unsigned int i = 0;
211
212 u32 block = start;
213 if (num > bg->free_blocks)
214 return -1;
215
216 for (i = 0; i < num && block % 8 != 0; i++, block++) {
217 if (bitmap_set_bit(bg->block_bitmap, block)) {
218 error("attempted to reserve already reserved block");
219 return -1;
220 }
221 }
222
223 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
224 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
225 error("attempted to reserve already reserved block");
226 return -1;
227 }
228 }
229
230 for (; i < num; i++, block++) {
231 if (bitmap_set_bit(bg->block_bitmap, block)) {
232 error("attempted to reserve already reserved block");
233 return -1;
234 }
235 }
236
237 bg->free_blocks -= num;
238 if (start == bg->first_free_block)
239 bg->first_free_block = start + num;
240
241 return 0;
242 }
243
free_blocks(struct block_group_info * bg,u32 num_blocks)244 static void free_blocks(struct block_group_info *bg, u32 num_blocks)
245 {
246 unsigned int i;
247 u32 block = bg->first_free_block - 1;
248 for (i = 0; i < num_blocks; i++, block--)
249 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
250 bg->free_blocks += num_blocks;
251 bg->first_free_block -= num_blocks;
252 }
253
254 /* Reduces an existing allocation by len blocks by return the last blocks
255 to the free pool in their block group. Assumes that the blocks being
256 returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)257 void reduce_allocation(struct block_allocation *alloc, u32 len)
258 {
259 while (len) {
260 struct region *last_reg = alloc->list.last;
261
262 if (last_reg->len > len) {
263 free_blocks(&aux_info.bgs[last_reg->bg], len);
264 last_reg->len -= len;
265 len = 0;
266 } else {
267 struct region *reg = alloc->list.last->prev;
268 free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
269 len -= last_reg->len;
270 if (reg) {
271 reg->next = NULL;
272 } else {
273 alloc->list.first = NULL;
274 alloc->list.last = NULL;
275 alloc->list.iter = NULL;
276 alloc->list.partial_iter = 0;
277 }
278 free(last_reg);
279 }
280 }
281 }
282
init_bg(struct block_group_info * bg,unsigned int i)283 static void init_bg(struct block_group_info *bg, unsigned int i)
284 {
285 int header_blocks = 2 + aux_info.inode_table_blocks;
286
287 bg->has_superblock = ext4_bg_has_super_block(i);
288
289 if (bg->has_superblock)
290 header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
291
292 bg->bitmaps = calloc(info.block_size, 2);
293 bg->block_bitmap = bg->bitmaps;
294 bg->inode_bitmap = bg->bitmaps + info.block_size;
295
296 bg->header_blocks = header_blocks;
297 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
298
299 u32 block = bg->first_block;
300 if (bg->has_superblock)
301 block += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
302 sparse_file_add_data(info.sparse_file, bg->bitmaps, 2 * info.block_size,
303 block);
304
305 bg->data_blocks_used = 0;
306 bg->free_blocks = info.blocks_per_group;
307 bg->first_free_block = 0;
308 bg->free_inodes = info.inodes_per_group;
309 bg->first_free_inode = 1;
310 bg->flags = EXT4_BG_INODE_UNINIT;
311
312 if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
313 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
314
315 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
316 if (overrun > 0)
317 reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
318 }
319
block_allocator_init()320 void block_allocator_init()
321 {
322 unsigned int i;
323
324 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
325 if (aux_info.bgs == NULL)
326 critical_error_errno("calloc");
327
328 for (i = 0; i < aux_info.groups; i++)
329 init_bg(&aux_info.bgs[i], i);
330 }
331
block_allocator_free()332 void block_allocator_free()
333 {
334 unsigned int i;
335
336 for (i = 0; i < aux_info.groups; i++) {
337 free(aux_info.bgs[i].bitmaps);
338 free(aux_info.bgs[i].inode_table);
339 }
340 free(aux_info.bgs);
341 }
342
ext4_allocate_blocks_from_block_group(u32 len,int bg_num)343 static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
344 {
345 if (get_free_blocks(bg_num) < len)
346 return EXT4_ALLOCATE_FAILED;
347
348 u32 block = aux_info.bgs[bg_num].first_free_block;
349 struct block_group_info *bg = &aux_info.bgs[bg_num];
350 if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
351 error("failed to reserve %u blocks in block group %u\n", len, bg_num);
352 return EXT4_ALLOCATE_FAILED;
353 }
354
355 aux_info.bgs[bg_num].data_blocks_used += len;
356
357 return bg->first_block + block;
358 }
359
ext4_allocate_contiguous_blocks(u32 len)360 static struct region *ext4_allocate_contiguous_blocks(u32 len)
361 {
362 unsigned int i;
363 struct region *reg;
364
365 for (i = 0; i < aux_info.groups; i++) {
366 u32 block = ext4_allocate_blocks_from_block_group(len, i);
367
368 if (block != EXT4_ALLOCATE_FAILED) {
369 reg = malloc(sizeof(struct region));
370 reg->block = block;
371 reg->len = len;
372 reg->next = NULL;
373 reg->prev = NULL;
374 reg->bg = i;
375 return reg;
376 }
377 }
378
379 return NULL;
380 }
381
382 /* Allocate a single block and return its block number */
allocate_block()383 u32 allocate_block()
384 {
385 unsigned int i;
386 for (i = 0; i < aux_info.groups; i++) {
387 u32 block = ext4_allocate_blocks_from_block_group(1, i);
388
389 if (block != EXT4_ALLOCATE_FAILED)
390 return block;
391 }
392
393 return EXT4_ALLOCATE_FAILED;
394 }
395
ext4_allocate_partial(u32 len)396 static struct region *ext4_allocate_partial(u32 len)
397 {
398 unsigned int i;
399 struct region *reg;
400
401 for (i = 0; i < aux_info.groups; i++) {
402 if (aux_info.bgs[i].data_blocks_used == 0) {
403 u32 bg_len = aux_info.bgs[i].free_blocks;
404 u32 block;
405
406 if (len <= bg_len) {
407 /* If the requested length would fit in a block group,
408 use the regular allocator to try to fit it in a partially
409 used block group */
410 bg_len = len;
411 reg = ext4_allocate_contiguous_blocks(len);
412 } else {
413 block = ext4_allocate_blocks_from_block_group(bg_len, i);
414
415 if (block == EXT4_ALLOCATE_FAILED) {
416 error("failed to allocate %d blocks in block group %d", bg_len, i);
417 return NULL;
418 }
419
420 reg = malloc(sizeof(struct region));
421 reg->block = block;
422 reg->len = bg_len;
423 reg->next = NULL;
424 reg->prev = NULL;
425 reg->bg = i;
426 }
427
428 return reg;
429 }
430 }
431 return NULL;
432 }
433
ext4_allocate_multiple_contiguous_blocks(u32 len)434 static struct region *ext4_allocate_multiple_contiguous_blocks(u32 len)
435 {
436 struct region *first_reg = NULL;
437 struct region *prev_reg = NULL;
438 struct region *reg;
439
440 while (len > 0) {
441 reg = ext4_allocate_partial(len);
442 if (reg == NULL)
443 return NULL;
444
445 if (first_reg == NULL)
446 first_reg = reg;
447
448 if (prev_reg) {
449 prev_reg->next = reg;
450 reg->prev = prev_reg;
451 }
452
453 prev_reg = reg;
454 len -= reg->len;
455 }
456
457 return first_reg;
458 }
459
do_allocate(u32 len)460 struct region *do_allocate(u32 len)
461 {
462 struct region *reg = ext4_allocate_contiguous_blocks(len);
463
464 if (reg == NULL)
465 reg = ext4_allocate_multiple_contiguous_blocks(len);
466
467 return reg;
468 }
469
470 /* Allocate len blocks. The blocks may be spread across multiple block groups,
471 and are returned in a linked list of the blocks in each block group. The
472 allocation algorithm is:
473 1. Try to allocate the entire block len in each block group
474 2. If the request doesn't fit in any block group, allocate as many
475 blocks as possible into each block group that is completely empty
476 3. Put the last set of blocks in the first block group they fit in
477 */
allocate_blocks(u32 len)478 struct block_allocation *allocate_blocks(u32 len)
479 {
480 struct region *reg = do_allocate(len);
481
482 if (reg == NULL)
483 return NULL;
484
485 struct block_allocation *alloc = create_allocation();
486 alloc->list.first = reg;
487 alloc->list.last = reg;
488 alloc->list.iter = alloc->list.first;
489 alloc->list.partial_iter = 0;
490 return alloc;
491 }
492
493 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)494 int block_allocation_num_regions(struct block_allocation *alloc)
495 {
496 unsigned int i;
497 struct region *reg = alloc->list.first;
498
499 for (i = 0; reg != NULL; reg = reg->next)
500 i++;
501
502 return i;
503 }
504
block_allocation_len(struct block_allocation * alloc)505 int block_allocation_len(struct block_allocation *alloc)
506 {
507 unsigned int i;
508 struct region *reg = alloc->list.first;
509
510 for (i = 0; reg != NULL; reg = reg->next)
511 i += reg->len;
512
513 return i;
514 }
515
516 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)517 u32 get_block(struct block_allocation *alloc, u32 block)
518 {
519 struct region *reg = alloc->list.iter;
520 block += alloc->list.partial_iter;
521
522 for (; reg; reg = reg->next) {
523 if (block < reg->len)
524 return reg->block + block;
525 block -= reg->len;
526 }
527 return EXT4_ALLOCATE_FAILED;
528 }
529
get_oob_block(struct block_allocation * alloc,u32 block)530 u32 get_oob_block(struct block_allocation *alloc, u32 block)
531 {
532 struct region *reg = alloc->oob_list.iter;
533 block += alloc->oob_list.partial_iter;
534
535 for (; reg; reg = reg->next) {
536 if (block < reg->len)
537 return reg->block + block;
538 block -= reg->len;
539 }
540 return EXT4_ALLOCATE_FAILED;
541 }
542
543 /* Gets the starting block and length in blocks of the first region
544 of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)545 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
546 {
547 *block = alloc->list.iter->block;
548 *len = alloc->list.iter->len - alloc->list.partial_iter;
549 }
550
551 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)552 void get_next_region(struct block_allocation *alloc)
553 {
554 alloc->list.iter = alloc->list.iter->next;
555 alloc->list.partial_iter = 0;
556 }
557
558 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)559 u32 get_free_blocks(u32 bg)
560 {
561 return aux_info.bgs[bg].free_blocks;
562 }
563
last_region(struct block_allocation * alloc)564 int last_region(struct block_allocation *alloc)
565 {
566 return (alloc->list.iter == NULL);
567 }
568
rewind_alloc(struct block_allocation * alloc)569 void rewind_alloc(struct block_allocation *alloc)
570 {
571 alloc->list.iter = alloc->list.first;
572 alloc->list.partial_iter = 0;
573 }
574
do_split_allocation(struct block_allocation * alloc,u32 len)575 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
576 {
577 struct region *reg = alloc->list.iter;
578 struct region *new;
579 struct region *tmp;
580
581 while (reg && len >= reg->len) {
582 len -= reg->len;
583 reg = reg->next;
584 }
585
586 if (reg == NULL && len > 0)
587 return NULL;
588
589 if (len > 0) {
590 new = malloc(sizeof(struct region));
591
592 new->bg = reg->bg;
593 new->block = reg->block + len;
594 new->len = reg->len - len;
595 new->next = reg->next;
596 new->prev = reg;
597
598 reg->next = new;
599 reg->len = len;
600
601 tmp = alloc->list.iter;
602 alloc->list.iter = new;
603 return tmp;
604 } else {
605 return reg;
606 }
607 }
608
609 /* Splits an allocation into two allocations. The returned allocation will
610 point to the first half, and the original allocation ptr will point to the
611 second half. */
split_allocation(struct block_allocation * alloc,u32 len)612 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
613 {
614 /* First make sure there is a split at the current ptr */
615 do_split_allocation(alloc, alloc->list.partial_iter);
616
617 /* Then split off len blocks */
618 struct region *middle = do_split_allocation(alloc, len);
619 alloc->list.partial_iter = 0;
620 return middle;
621 }
622
623 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)624 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
625 {
626 struct region *oob = split_allocation(alloc, blocks);
627 struct region *next;
628
629 if (oob == NULL)
630 return -1;
631
632 while (oob && oob != alloc->list.iter) {
633 next = oob->next;
634 region_list_remove(&alloc->list, oob);
635 region_list_append(&alloc->oob_list, oob);
636 oob = next;
637 }
638
639 return 0;
640 }
641
advance_list_ptr(struct region_list * list,int blocks)642 static int advance_list_ptr(struct region_list *list, int blocks)
643 {
644 struct region *reg = list->iter;
645
646 while (reg != NULL && blocks > 0) {
647 if (reg->len > list->partial_iter + blocks) {
648 list->partial_iter += blocks;
649 return 0;
650 }
651
652 blocks -= (reg->len - list->partial_iter);
653 list->partial_iter = 0;
654 reg = reg->next;
655 }
656
657 if (blocks > 0)
658 return -1;
659
660 return 0;
661 }
662
663 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)664 int advance_blocks(struct block_allocation *alloc, int blocks)
665 {
666 return advance_list_ptr(&alloc->list, blocks);
667 }
668
advance_oob_blocks(struct block_allocation * alloc,int blocks)669 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
670 {
671 return advance_list_ptr(&alloc->oob_list, blocks);
672 }
673
append_oob_allocation(struct block_allocation * alloc,u32 len)674 int append_oob_allocation(struct block_allocation *alloc, u32 len)
675 {
676 struct region *reg = do_allocate(len);
677
678 if (reg == NULL) {
679 error("failed to allocate %d blocks", len);
680 return -1;
681 }
682
683 for (; reg; reg = reg->next)
684 region_list_append(&alloc->oob_list, reg);
685
686 return 0;
687 }
688
689 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)690 struct ext4_inode *get_inode(u32 inode)
691 {
692 inode -= 1;
693 int bg = inode / info.inodes_per_group;
694 inode %= info.inodes_per_group;
695
696 allocate_bg_inode_table(&aux_info.bgs[bg]);
697 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
698 info.inode_size);
699 }
700
get_xattr_block_for_inode(struct ext4_inode * inode)701 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
702 {
703 struct ext4_xattr_header *block = xattr_list_find(inode);
704 if (block != NULL)
705 return block;
706
707 u32 block_num = allocate_block();
708 block = calloc(info.block_size, 1);
709 if (block == NULL) {
710 error("get_xattr: failed to allocate %d", info.block_size);
711 return NULL;
712 }
713
714 block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
715 block->h_refcount = cpu_to_le32(1);
716 block->h_blocks = cpu_to_le32(1);
717 inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
718 inode->i_file_acl_lo = cpu_to_le32(block_num);
719
720 int result = sparse_file_add_data(info.sparse_file, block, info.block_size, block_num);
721 if (result != 0) {
722 error("get_xattr: sparse_file_add_data failure %d", result);
723 free(block);
724 return NULL;
725 }
726 xattr_list_insert(inode, block);
727 return block;
728 }
729
730 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)731 u32 reserve_inodes(int bg, u32 num)
732 {
733 unsigned int i;
734 u32 inode;
735
736 if (get_free_inodes(bg) < num)
737 return EXT4_ALLOCATE_FAILED;
738
739 for (i = 0; i < num; i++) {
740 inode = aux_info.bgs[bg].first_free_inode + i - 1;
741 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
742 }
743
744 inode = aux_info.bgs[bg].first_free_inode;
745
746 aux_info.bgs[bg].first_free_inode += num;
747 aux_info.bgs[bg].free_inodes -= num;
748
749 return inode;
750 }
751
752 /* Returns the first free inode number
753 TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()754 u32 allocate_inode()
755 {
756 unsigned int bg;
757 u32 inode;
758
759 for (bg = 0; bg < aux_info.groups; bg++) {
760 inode = reserve_inodes(bg, 1);
761 if (inode != EXT4_ALLOCATE_FAILED)
762 return bg * info.inodes_per_group + inode;
763 }
764
765 return EXT4_ALLOCATE_FAILED;
766 }
767
768 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)769 u32 get_free_inodes(u32 bg)
770 {
771 return aux_info.bgs[bg].free_inodes;
772 }
773
774 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)775 void add_directory(u32 inode)
776 {
777 int bg = (inode - 1) / info.inodes_per_group;
778 aux_info.bgs[bg].used_dirs += 1;
779 }
780
781 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)782 u16 get_directories(int bg)
783 {
784 return aux_info.bgs[bg].used_dirs;
785 }
786
787 /* Returns the flags for a block group */
get_bg_flags(int bg)788 u16 get_bg_flags(int bg)
789 {
790 return aux_info.bgs[bg].flags;
791 }
792
793 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)794 void free_alloc(struct block_allocation *alloc)
795 {
796 struct region *reg;
797
798 reg = alloc->list.first;
799 while (reg) {
800 struct region *next = reg->next;
801 free(reg);
802 reg = next;
803 }
804
805 reg = alloc->oob_list.first;
806 while (reg) {
807 struct region *next = reg->next;
808 free(reg);
809 reg = next;
810 }
811
812 free(alloc);
813 }
814