1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "allocate.h"
18
19 #include <stdio.h>
20 #include <stdlib.h>
21
22 #include <sparse/sparse.h>
23
24 #include "ext4_utils/ext4_utils.h"
25
26 struct xattr_list_element {
27 struct ext4_inode *inode;
28 struct ext4_xattr_header *header;
29 struct xattr_list_element *next;
30 };
31
create_allocation()32 struct block_allocation *create_allocation()
33 {
34 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
35 alloc->list.first = NULL;
36 alloc->list.last = NULL;
37 alloc->oob_list.first = NULL;
38 alloc->oob_list.last = NULL;
39 alloc->list.iter = NULL;
40 alloc->list.partial_iter = 0;
41 alloc->oob_list.iter = NULL;
42 alloc->oob_list.partial_iter = 0;
43 alloc->filename = NULL;
44 alloc->next = NULL;
45 return alloc;
46 }
47
xattr_list_find(struct ext4_inode * inode)48 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
49 {
50 struct xattr_list_element *element;
51 for (element = aux_info.xattrs; element != NULL; element = element->next) {
52 if (element->inode == inode)
53 return element->header;
54 }
55 return NULL;
56 }
57
xattr_list_insert(struct ext4_inode * inode,struct ext4_xattr_header * header)58 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
59 {
60 struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
61 element->inode = inode;
62 element->header = header;
63 element->next = aux_info.xattrs;
64 aux_info.xattrs = element;
65 }
66
region_list_remove(struct region_list * list,struct region * reg)67 static void region_list_remove(struct region_list *list, struct region *reg)
68 {
69 if (reg->prev)
70 reg->prev->next = reg->next;
71
72 if (reg->next)
73 reg->next->prev = reg->prev;
74
75 if (list->first == reg)
76 list->first = reg->next;
77
78 if (list->last == reg)
79 list->last = reg->prev;
80
81 reg->next = NULL;
82 reg->prev = NULL;
83 }
84
region_list_append(struct region_list * list,struct region * reg)85 void region_list_append(struct region_list *list, struct region *reg)
86 {
87 if (list->first == NULL) {
88 list->first = reg;
89 list->last = reg;
90 list->iter = reg;
91 list->partial_iter = 0;
92 reg->prev = NULL;
93 } else {
94 list->last->next = reg;
95 reg->prev = list->last;
96 list->last = reg;
97 }
98 reg->next = NULL;
99 }
100
region_list_merge(struct region_list * list1,struct region_list * list2)101 void region_list_merge(struct region_list *list1, struct region_list *list2)
102 {
103 if (list1->first == NULL) {
104 list1->first = list2->first;
105 list1->last = list2->last;
106 list1->iter = list2->first;
107 list1->partial_iter = 0;
108 list1->first->prev = NULL;
109 } else {
110 list1->last->next = list2->first;
111 list2->first->prev = list1->last;
112 list1->last = list2->last;
113 }
114 }
115 #if 0
116 static void dump_starting_from(struct region *reg)
117 {
118 for (; reg; reg = reg->next) {
119 printf("%p: Blocks %d-%d (%d)\n", reg,
120 reg->block, reg->block + reg->len - 1, reg->len)
121 }
122 }
123
124 static void dump_region_lists(struct block_allocation *alloc) {
125
126 printf("Main list:\n");
127 dump_starting_from(alloc->list.first);
128
129 printf("OOB list:\n");
130 dump_starting_from(alloc->oob_list.first);
131 }
132 #endif
133
print_blocks(FILE * f,struct block_allocation * alloc,char separator)134 void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
135 {
136 struct region *reg;
137 fputc(' ', f);
138 for (reg = alloc->list.first; reg; reg = reg->next) {
139 if (reg->len == 1) {
140 fprintf(f, "%d", reg->block);
141 } else {
142 fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
143 }
144 fputc(separator, f);
145 }
146 fputc('\n', f);
147 }
148
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)149 void append_region(struct block_allocation *alloc,
150 u32 block, u32 len, int bg_num)
151 {
152 struct region *reg;
153 reg = malloc(sizeof(struct region));
154 reg->block = block;
155 reg->len = len;
156 reg->bg = bg_num;
157 reg->next = NULL;
158
159 region_list_append(&alloc->list, reg);
160 }
161
allocate_bg_inode_table(struct block_group_info * bg)162 static void allocate_bg_inode_table(struct block_group_info *bg)
163 {
164 if (bg->inode_table != NULL)
165 return;
166
167 u32 block = bg->first_block + 2;
168
169 if (bg->has_superblock)
170 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
171
172 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
173 if (bg->inode_table == NULL)
174 critical_error_errno("calloc");
175
176 sparse_file_add_data(ext4_sparse_file, bg->inode_table,
177 aux_info.inode_table_blocks * info.block_size, block);
178
179 bg->flags &= ~EXT4_BG_INODE_UNINIT;
180 }
181
bitmap_set_bit(u8 * bitmap,u32 bit)182 static int bitmap_set_bit(u8 *bitmap, u32 bit)
183 {
184 if (bitmap[bit / 8] & 1 << (bit % 8))
185 return 1;
186
187 bitmap[bit / 8] |= 1 << (bit % 8);
188 return 0;
189 }
190
bitmap_set_8_bits(u8 * bitmap,u32 bit)191 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
192 {
193 int ret = bitmap[bit / 8];
194 bitmap[bit / 8] = 0xFF;
195 return ret;
196 }
197
198 /* Marks a the first num_blocks blocks in a block group as used, and accounts
199 for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 bg_num,u32 start,u32 num)200 static int reserve_blocks(struct block_group_info *bg, u32 bg_num, u32 start, u32 num)
201 {
202 unsigned int i = 0;
203
204 u32 block = start;
205 for (i = 0; i < num && block % 8 != 0; i++, block++) {
206 if (bitmap_set_bit(bg->block_bitmap, block)) {
207 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
208 return -1;
209 }
210 }
211
212 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
213 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
214 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
215 return -1;
216 }
217 }
218
219 for (; i < num; i++, block++) {
220 if (bitmap_set_bit(bg->block_bitmap, block)) {
221 error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
222 return -1;
223 }
224 }
225
226 bg->free_blocks -= num;
227
228 return 0;
229 }
230
free_blocks(struct block_group_info * bg,u32 block,u32 num_blocks)231 static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
232 {
233 unsigned int i;
234
235 if (num_blocks == 0)
236 return;
237 for (i = 0; i < num_blocks; i++, block--)
238 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
239 bg->free_blocks += num_blocks;
240 block++;
241 for (i = bg->chunk_count; i > 0 ;) {
242 --i;
243 if (bg->chunks[i].len >= num_blocks && bg->chunks[i].block <= block) {
244 if (bg->chunks[i].block == block) {
245 bg->chunks[i].block += num_blocks;
246 bg->chunks[i].len -= num_blocks;
247 } else if (bg->chunks[i].block + bg->chunks[i].len == block + num_blocks) {
248 bg->chunks[i].len -= num_blocks;
249 }
250 break;
251 }
252 }
253 }
254
255 /* Reduces an existing allocation by len blocks by return the last blocks
256 to the free pool in their block group. Assumes that the blocks being
257 returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)258 void reduce_allocation(struct block_allocation *alloc, u32 len)
259 {
260 while (len) {
261 struct region *last_reg = alloc->list.last;
262 struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
263
264 if (last_reg->len > len) {
265 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
266 last_reg->len -= len;
267 len = 0;
268 } else {
269 struct region *reg = alloc->list.last->prev;
270 free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
271 len -= last_reg->len;
272 if (reg) {
273 reg->next = NULL;
274 } else {
275 alloc->list.first = NULL;
276 alloc->list.last = NULL;
277 alloc->list.iter = NULL;
278 alloc->list.partial_iter = 0;
279 }
280 free(last_reg);
281 }
282 }
283 }
284
init_bg(struct block_group_info * bg,unsigned int i)285 static void init_bg(struct block_group_info *bg, unsigned int i)
286 {
287 int header_blocks = 2 + aux_info.inode_table_blocks;
288
289 bg->has_superblock = ext4_bg_has_super_block(i);
290
291 if (bg->has_superblock)
292 header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
293
294 bg->bitmaps = calloc(info.block_size, 2);
295 bg->block_bitmap = bg->bitmaps;
296 bg->inode_bitmap = bg->bitmaps + info.block_size;
297
298 bg->header_blocks = header_blocks;
299 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
300
301 u32 block = bg->first_block;
302 if (bg->has_superblock)
303 block += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
304 sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
305 block);
306
307 bg->data_blocks_used = 0;
308 bg->free_blocks = info.blocks_per_group;
309 bg->free_inodes = info.inodes_per_group;
310 bg->first_free_inode = 1;
311 bg->flags = EXT4_BG_INODE_UNINIT;
312
313 bg->chunk_count = 0;
314 bg->max_chunk_count = 1;
315 bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
316
317 if (reserve_blocks(bg, i, 0, bg->header_blocks) < 0)
318 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
319 // Add empty starting delimiter chunk
320 reserve_bg_chunk(i, bg->header_blocks, 0);
321
322 if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
323 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
324 reserve_blocks(bg, i, info.blocks_per_group - overrun, overrun);
325 // Add empty ending delimiter chunk
326 reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
327 } else {
328 reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
329 }
330
331 }
332
block_allocator_init()333 void block_allocator_init()
334 {
335 unsigned int i;
336
337 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
338 if (aux_info.bgs == NULL)
339 critical_error_errno("calloc");
340
341 for (i = 0; i < aux_info.groups; i++)
342 init_bg(&aux_info.bgs[i], i);
343 }
344
block_allocator_free()345 void block_allocator_free()
346 {
347 unsigned int i;
348
349 for (i = 0; i < aux_info.groups; i++) {
350 free(aux_info.bgs[i].bitmaps);
351 free(aux_info.bgs[i].inode_table);
352 }
353 free(aux_info.bgs);
354 }
355
356 /* Allocate a single block and return its block number */
allocate_block()357 u32 allocate_block()
358 {
359 u32 block;
360 struct block_allocation *blk_alloc = allocate_blocks(1);
361 if (!blk_alloc) {
362 return EXT4_ALLOCATE_FAILED;
363 }
364 block = blk_alloc->list.first->block;
365 free_alloc(blk_alloc);
366 return block;
367 }
368
ext4_allocate_best_fit_partial(u32 len)369 static struct region *ext4_allocate_best_fit_partial(u32 len)
370 {
371 unsigned int i;
372 int j;
373 unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
374 u32 found_allocate_len = 0;
375 bool minimize = false;
376 struct block_group_info *bgs = aux_info.bgs;
377 struct region *reg;
378
379 for (i = 0; i < aux_info.groups; i++) {
380 for (j = 1; j < bgs[i].chunk_count; j++) {
381 u32 hole_start, hole_size;
382 hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
383 hole_size = bgs[i].chunks[j].block - hole_start;
384 if (hole_size == len) {
385 // Perfect fit i.e. right between 2 chunks no need to keep searching
386 found_bg = i;
387 found_prev_chunk = j - 1;
388 found_block = hole_start;
389 found_allocate_len = hole_size;
390 goto done;
391 } else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
392 found_bg = i;
393 found_prev_chunk = j - 1;
394 found_block = hole_start;
395 found_allocate_len = hole_size;
396 minimize = true;
397 } else if (!minimize) {
398 if (found_allocate_len < hole_size) {
399 found_bg = i;
400 found_prev_chunk = j - 1;
401 found_block = hole_start;
402 found_allocate_len = hole_size;
403 }
404 }
405 }
406 }
407
408 if (found_allocate_len == 0) {
409 error("failed to allocate %u blocks, out of space?", len);
410 return NULL;
411 }
412 if (found_allocate_len > len) found_allocate_len = len;
413 done:
414 // reclaim allocated space in chunk
415 bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
416 if (reserve_blocks(&bgs[found_bg],
417 found_bg,
418 found_block,
419 found_allocate_len) < 0) {
420 error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
421 return NULL;
422 }
423 bgs[found_bg].data_blocks_used += found_allocate_len;
424 reg = malloc(sizeof(struct region));
425 reg->block = found_block + bgs[found_bg].first_block;
426 reg->len = found_allocate_len;
427 reg->next = NULL;
428 reg->prev = NULL;
429 reg->bg = found_bg;
430 return reg;
431 }
432
ext4_allocate_best_fit(u32 len)433 static struct region *ext4_allocate_best_fit(u32 len)
434 {
435 struct region *first_reg = NULL;
436 struct region *prev_reg = NULL;
437 struct region *reg;
438
439 while (len > 0) {
440 reg = ext4_allocate_best_fit_partial(len);
441 if (reg == NULL)
442 return NULL;
443
444 if (first_reg == NULL)
445 first_reg = reg;
446
447 if (prev_reg) {
448 prev_reg->next = reg;
449 reg->prev = prev_reg;
450 }
451
452 prev_reg = reg;
453 len -= reg->len;
454 }
455
456 return first_reg;
457 }
458
459 /* Allocate len blocks. The blocks may be spread across multiple block groups,
460 and are returned in a linked list of the blocks in each block group. The
461 allocation algorithm is:
462 1. If the remaining allocation is larger than any available contiguous region,
463 allocate the largest contiguous region and loop
464 2. Otherwise, allocate the smallest contiguous region that it fits in
465 */
allocate_blocks(u32 len)466 struct block_allocation *allocate_blocks(u32 len)
467 {
468 struct region *reg = ext4_allocate_best_fit(len);
469
470 if (reg == NULL)
471 return NULL;
472
473 struct block_allocation *alloc = create_allocation();
474 alloc->list.first = reg;
475 while (reg->next != NULL)
476 reg = reg->next;
477 alloc->list.last = reg;
478 alloc->list.iter = alloc->list.first;
479 alloc->list.partial_iter = 0;
480 return alloc;
481 }
482
483 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)484 int block_allocation_num_regions(struct block_allocation *alloc)
485 {
486 unsigned int i;
487 struct region *reg = alloc->list.first;
488
489 for (i = 0; reg != NULL; reg = reg->next)
490 i++;
491
492 return i;
493 }
494
block_allocation_len(struct block_allocation * alloc)495 int block_allocation_len(struct block_allocation *alloc)
496 {
497 unsigned int i;
498 struct region *reg = alloc->list.first;
499
500 for (i = 0; reg != NULL; reg = reg->next)
501 i += reg->len;
502
503 return i;
504 }
505
506 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)507 u32 get_block(struct block_allocation *alloc, u32 block)
508 {
509 struct region *reg = alloc->list.iter;
510 block += alloc->list.partial_iter;
511
512 for (; reg; reg = reg->next) {
513 if (block < reg->len)
514 return reg->block + block;
515 block -= reg->len;
516 }
517 return EXT4_ALLOCATE_FAILED;
518 }
519
get_oob_block(struct block_allocation * alloc,u32 block)520 u32 get_oob_block(struct block_allocation *alloc, u32 block)
521 {
522 struct region *reg = alloc->oob_list.iter;
523 block += alloc->oob_list.partial_iter;
524
525 for (; reg; reg = reg->next) {
526 if (block < reg->len)
527 return reg->block + block;
528 block -= reg->len;
529 }
530 return EXT4_ALLOCATE_FAILED;
531 }
532
533 /* Gets the starting block and length in blocks of the first region
534 of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)535 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
536 {
537 *block = alloc->list.iter->block;
538 *len = alloc->list.iter->len - alloc->list.partial_iter;
539 }
540
541 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)542 void get_next_region(struct block_allocation *alloc)
543 {
544 alloc->list.iter = alloc->list.iter->next;
545 alloc->list.partial_iter = 0;
546 }
547
548 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)549 u32 get_free_blocks(u32 bg)
550 {
551 return aux_info.bgs[bg].free_blocks;
552 }
553
last_region(struct block_allocation * alloc)554 int last_region(struct block_allocation *alloc)
555 {
556 return (alloc->list.iter == NULL);
557 }
558
rewind_alloc(struct block_allocation * alloc)559 void rewind_alloc(struct block_allocation *alloc)
560 {
561 alloc->list.iter = alloc->list.first;
562 alloc->list.partial_iter = 0;
563 }
564
do_split_allocation(struct block_allocation * alloc,u32 len)565 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
566 {
567 struct region *reg = alloc->list.iter;
568 struct region *new;
569 struct region *tmp;
570
571 while (reg && len >= reg->len) {
572 len -= reg->len;
573 reg = reg->next;
574 }
575
576 if (reg == NULL && len > 0)
577 return NULL;
578
579 if (len > 0) {
580 new = malloc(sizeof(struct region));
581
582 new->bg = reg->bg;
583 new->block = reg->block + len;
584 new->len = reg->len - len;
585 new->next = reg->next;
586 new->prev = reg;
587
588 reg->next = new;
589 reg->len = len;
590
591 tmp = alloc->list.iter;
592 alloc->list.iter = new;
593 return tmp;
594 } else {
595 return reg;
596 }
597 }
598
599 /* Splits an allocation into two allocations. The returned allocation will
600 point to the first half, and the original allocation ptr will point to the
601 second half. */
split_allocation(struct block_allocation * alloc,u32 len)602 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
603 {
604 /* First make sure there is a split at the current ptr */
605 do_split_allocation(alloc, alloc->list.partial_iter);
606
607 /* Then split off len blocks */
608 struct region *middle = do_split_allocation(alloc, len);
609 alloc->list.partial_iter = 0;
610 return middle;
611 }
612
613 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)614 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
615 {
616 struct region *oob = split_allocation(alloc, blocks);
617 struct region *next;
618
619 if (oob == NULL)
620 return -1;
621
622 while (oob && oob != alloc->list.iter) {
623 next = oob->next;
624 region_list_remove(&alloc->list, oob);
625 region_list_append(&alloc->oob_list, oob);
626 oob = next;
627 }
628
629 return 0;
630 }
631
advance_list_ptr(struct region_list * list,int blocks)632 static int advance_list_ptr(struct region_list *list, int blocks)
633 {
634 struct region *reg = list->iter;
635
636 while (reg != NULL && blocks > 0) {
637 if (reg->len > list->partial_iter + blocks) {
638 list->partial_iter += blocks;
639 return 0;
640 }
641
642 blocks -= (reg->len - list->partial_iter);
643 list->partial_iter = 0;
644 reg = reg->next;
645 }
646
647 if (blocks > 0)
648 return -1;
649
650 return 0;
651 }
652
653 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)654 int advance_blocks(struct block_allocation *alloc, int blocks)
655 {
656 return advance_list_ptr(&alloc->list, blocks);
657 }
658
advance_oob_blocks(struct block_allocation * alloc,int blocks)659 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
660 {
661 return advance_list_ptr(&alloc->oob_list, blocks);
662 }
663
append_oob_allocation(struct block_allocation * alloc,u32 len)664 int append_oob_allocation(struct block_allocation *alloc, u32 len)
665 {
666 struct region *reg = ext4_allocate_best_fit(len);
667
668 if (reg == NULL) {
669 error("failed to allocate %d blocks", len);
670 return -1;
671 }
672
673 for (; reg; reg = reg->next)
674 region_list_append(&alloc->oob_list, reg);
675
676 return 0;
677 }
678
679 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)680 struct ext4_inode *get_inode(u32 inode)
681 {
682 inode -= 1;
683 int bg = inode / info.inodes_per_group;
684 inode %= info.inodes_per_group;
685
686 allocate_bg_inode_table(&aux_info.bgs[bg]);
687 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
688 info.inode_size);
689 }
690
get_xattr_block_for_inode(struct ext4_inode * inode)691 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
692 {
693 struct ext4_xattr_header *block = xattr_list_find(inode);
694 if (block != NULL)
695 return block;
696
697 u32 block_num = allocate_block();
698 block = calloc(info.block_size, 1);
699 if (block == NULL) {
700 error("get_xattr: failed to allocate %d", info.block_size);
701 return NULL;
702 }
703
704 block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
705 block->h_refcount = cpu_to_le32(1);
706 block->h_blocks = cpu_to_le32(1);
707 inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
708 inode->i_file_acl_lo = cpu_to_le32(block_num);
709
710 int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
711 if (result != 0) {
712 error("get_xattr: sparse_file_add_data failure %d", result);
713 free(block);
714 return NULL;
715 }
716 xattr_list_insert(inode, block);
717 return block;
718 }
719
720 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)721 u32 reserve_inodes(int bg, u32 num)
722 {
723 unsigned int i;
724 u32 inode;
725
726 if (get_free_inodes(bg) < num)
727 return EXT4_ALLOCATE_FAILED;
728
729 for (i = 0; i < num; i++) {
730 inode = aux_info.bgs[bg].first_free_inode + i - 1;
731 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
732 }
733
734 inode = aux_info.bgs[bg].first_free_inode;
735
736 aux_info.bgs[bg].first_free_inode += num;
737 aux_info.bgs[bg].free_inodes -= num;
738
739 return inode;
740 }
741
742 /* Returns the first free inode number
743 TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()744 u32 allocate_inode()
745 {
746 unsigned int bg;
747 u32 inode;
748
749 for (bg = 0; bg < aux_info.groups; bg++) {
750 inode = reserve_inodes(bg, 1);
751 if (inode != EXT4_ALLOCATE_FAILED)
752 return bg * info.inodes_per_group + inode;
753 }
754
755 return EXT4_ALLOCATE_FAILED;
756 }
757
758 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)759 u32 get_free_inodes(u32 bg)
760 {
761 return aux_info.bgs[bg].free_inodes;
762 }
763
764 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)765 void add_directory(u32 inode)
766 {
767 int bg = (inode - 1) / info.inodes_per_group;
768 aux_info.bgs[bg].used_dirs += 1;
769 }
770
771 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)772 u16 get_directories(int bg)
773 {
774 return aux_info.bgs[bg].used_dirs;
775 }
776
777 /* Returns the flags for a block group */
get_bg_flags(int bg)778 u16 get_bg_flags(int bg)
779 {
780 return aux_info.bgs[bg].flags;
781 }
782
783 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)784 void free_alloc(struct block_allocation *alloc)
785 {
786 struct region *reg;
787
788 reg = alloc->list.first;
789 while (reg) {
790 struct region *next = reg->next;
791 free(reg);
792 reg = next;
793 }
794
795 reg = alloc->oob_list.first;
796 while (reg) {
797 struct region *next = reg->next;
798 free(reg);
799 reg = next;
800 }
801
802 free(alloc);
803 }
804
reserve_bg_chunk(int bg,u32 start_block,u32 size)805 void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
806 struct block_group_info *bgs = aux_info.bgs;
807 int chunk_count;
808 if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
809 bgs[bg].max_chunk_count *= 2;
810 bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
811 if (!bgs[bg].chunks)
812 critical_error("realloc failed");
813 }
814 chunk_count = bgs[bg].chunk_count;
815 bgs[bg].chunks[chunk_count].block = start_block;
816 bgs[bg].chunks[chunk_count].len = size;
817 bgs[bg].chunks[chunk_count].bg = bg;
818 bgs[bg].chunk_count++;
819 }
820
reserve_blocks_for_allocation(struct block_allocation * alloc)821 int reserve_blocks_for_allocation(struct block_allocation *alloc) {
822 struct region *reg;
823 struct block_group_info *bgs = aux_info.bgs;
824
825 if (!alloc) return 0;
826 reg = alloc->list.first;
827 while (reg != NULL) {
828 if (reserve_blocks(&bgs[reg->bg], reg->bg, reg->block - bgs[reg->bg].first_block, reg->len) < 0) {
829 return -1;
830 }
831 reg = reg->next;
832 }
833 return 0;
834 }
835
836