1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19
20 #include "ext4_utils.h"
21 #include "allocate.h"
22 #include "backed_block.h"
23 #include "ext4.h"
24
25 struct region_list {
26 struct region *first;
27 struct region *last;
28 struct region *iter;
29 u32 partial_iter;
30 };
31
32 struct block_allocation {
33 struct region_list list;
34 struct region_list oob_list;
35 };
36
37 struct region {
38 u32 block;
39 u32 len;
40 int bg;
41 struct region *next;
42 struct region *prev;
43 };
44
45 struct block_group_info {
46 u32 first_block;
47 int header_blocks;
48 int data_blocks_used;
49 int has_superblock;
50 u8 *bitmaps;
51 u8 *block_bitmap;
52 u8 *inode_bitmap;
53 u8 *inode_table;
54 u32 free_blocks;
55 u32 first_free_block;
56 u32 free_inodes;
57 u32 first_free_inode;
58 u16 used_dirs;
59 };
60
create_allocation()61 struct block_allocation *create_allocation()
62 {
63 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
64 alloc->list.first = NULL;
65 alloc->list.last = NULL;
66 alloc->oob_list.first = NULL;
67 alloc->oob_list.last = NULL;
68 alloc->list.iter = NULL;
69 alloc->list.partial_iter = 0;
70 alloc->oob_list.iter = NULL;
71 alloc->oob_list.partial_iter = 0;
72 return alloc;
73 }
74
region_list_remove(struct region_list * list,struct region * reg)75 static void region_list_remove(struct region_list *list, struct region *reg)
76 {
77 if (reg->prev)
78 reg->prev->next = reg->next;
79
80 if (reg->next)
81 reg->next->prev = reg->prev;
82
83 if (list->first == reg)
84 list->first = reg->next;
85
86 if (list->last == reg)
87 list->last = reg->prev;
88
89 reg->next = NULL;
90 reg->prev = NULL;
91 }
92
region_list_append(struct region_list * list,struct region * reg)93 static void region_list_append(struct region_list *list, struct region *reg)
94 {
95 if (list->first == NULL) {
96 list->first = reg;
97 list->last = reg;
98 list->iter = reg;
99 list->partial_iter = 0;
100 reg->prev = NULL;
101 } else {
102 list->last->next = reg;
103 reg->prev = list->last;
104 list->last = reg;
105 }
106 reg->next = NULL;
107 }
108
109 #if 0
110 static void dump_starting_from(struct region *reg)
111 {
112 for (; reg; reg = reg->next) {
113 printf("%p: Blocks %d-%d (%d)\n", reg,
114 reg->bg * info.blocks_per_group + reg->block,
115 reg->bg * info.blocks_per_group + reg->block + reg->len - 1,
116 reg->len);
117 }
118 }
119
120 static void dump_region_lists(struct block_allocation *alloc) {
121
122 printf("Main list:\n");
123 dump_starting_from(alloc->list.first);
124
125 printf("OOB list:\n");
126 dump_starting_from(alloc->oob_list.first);
127 }
128 #endif
129
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)130 void append_region(struct block_allocation *alloc,
131 u32 block, u32 len, int bg_num)
132 {
133 struct region *reg;
134 reg = malloc(sizeof(struct region));
135 reg->block = block;
136 reg->len = len;
137 reg->bg = bg_num;
138 reg->next = NULL;
139
140 region_list_append(&alloc->list, reg);
141 }
142
allocate_bg_inode_table(struct block_group_info * bg)143 static void allocate_bg_inode_table(struct block_group_info *bg)
144 {
145 if (bg->inode_table != NULL)
146 return;
147
148 u32 block = bg->first_block + 2;
149
150 if (bg->has_superblock)
151 block += aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks + 1;
152
153 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
154 if (bg->inode_table == NULL)
155 critical_error_errno("calloc");
156
157 queue_data_block(bg->inode_table, aux_info.inode_table_blocks
158 * info.block_size, block);
159 }
160
bitmap_set_bit(u8 * bitmap,u32 bit)161 static int bitmap_set_bit(u8 *bitmap, u32 bit)
162 {
163 if (bitmap[bit / 8] & 1 << (bit % 8))
164 return 1;
165
166 bitmap[bit / 8] |= 1 << (bit % 8);
167 return 0;
168 }
169
bitmap_set_8_bits(u8 * bitmap,u32 bit)170 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
171 {
172 int ret = bitmap[bit / 8];
173 bitmap[bit / 8] = 0xFF;
174 return ret;
175 }
176
177 /* Marks a the first num_blocks blocks in a block group as used, and accounts
178 for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 start,u32 num)179 static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
180 {
181 unsigned int i = 0;
182
183 u32 block = start;
184 if (num > bg->free_blocks)
185 return -1;
186
187 for (i = 0; i < num && block % 8 != 0; i++, block++) {
188 if (bitmap_set_bit(bg->block_bitmap, block)) {
189 error("attempted to reserve already reserved block");
190 return -1;
191 }
192 }
193
194 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
195 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
196 error("attempted to reserve already reserved block");
197 return -1;
198 }
199 }
200
201 for (; i < num; i++, block++) {
202 if (bitmap_set_bit(bg->block_bitmap, block)) {
203 error("attempted to reserve already reserved block");
204 return -1;
205 }
206 }
207
208 bg->free_blocks -= num;
209 if (start == bg->first_free_block)
210 bg->first_free_block = start + num;
211
212 return 0;
213 }
214
free_blocks(struct block_group_info * bg,u32 num_blocks)215 static void free_blocks(struct block_group_info *bg, u32 num_blocks)
216 {
217 unsigned int i;
218 u32 block = bg->first_free_block - 1;
219 for (i = 0; i < num_blocks; i++, block--)
220 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
221 bg->free_blocks += num_blocks;
222 bg->first_free_block -= num_blocks;
223 }
224
225 /* Reduces an existing allocation by len blocks by return the last blocks
226 to the free pool in their block group. Assumes that the blocks being
227 returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)228 void reduce_allocation(struct block_allocation *alloc, u32 len)
229 {
230 while (len) {
231 struct region *last_reg = alloc->list.last;
232
233 if (last_reg->len > len) {
234 free_blocks(&aux_info.bgs[last_reg->bg], len);
235 last_reg->len -= len;
236 len = 0;
237 } else {
238 struct region *reg = alloc->list.last->prev;
239 free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
240 len -= last_reg->len;
241 if (reg) {
242 reg->next = NULL;
243 } else {
244 alloc->list.first = NULL;
245 alloc->list.last = NULL;
246 alloc->list.iter = NULL;
247 alloc->list.partial_iter = 0;
248 }
249 free(last_reg);
250 }
251 }
252 }
253
init_bg(struct block_group_info * bg,unsigned int i)254 static void init_bg(struct block_group_info *bg, unsigned int i)
255 {
256 int header_blocks = 2 + aux_info.inode_table_blocks;
257
258 bg->has_superblock = ext4_bg_has_super_block(i);
259
260 if (bg->has_superblock)
261 header_blocks += 1 + aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks;
262
263 bg->bitmaps = calloc(info.block_size, 2);
264 bg->block_bitmap = bg->bitmaps;
265 bg->inode_bitmap = bg->bitmaps + info.block_size;
266
267 bg->header_blocks = header_blocks;
268 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
269
270 u32 block = bg->first_block;
271 if (bg->has_superblock)
272 block += 1 + aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks;
273 queue_data_block(bg->bitmaps, 2 * info.block_size, block);
274
275 bg->data_blocks_used = 0;
276 bg->free_blocks = info.blocks_per_group;
277 bg->first_free_block = 0;
278 bg->free_inodes = info.inodes_per_group;
279 bg->first_free_inode = 1;
280
281 if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
282 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
283
284 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
285 if (overrun > 0)
286 reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
287 }
288
block_allocator_init()289 void block_allocator_init()
290 {
291 unsigned int i;
292
293 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
294 if (aux_info.bgs == NULL)
295 critical_error_errno("calloc");
296
297 for (i = 0; i < aux_info.groups; i++)
298 init_bg(&aux_info.bgs[i], i);
299 }
300
block_allocator_free()301 void block_allocator_free()
302 {
303 unsigned int i;
304
305 for (i = 0; i < aux_info.groups; i++) {
306 free(aux_info.bgs[i].bitmaps);
307 free(aux_info.bgs[i].inode_table);
308 }
309 free(aux_info.bgs);
310 }
311
ext4_allocate_blocks_from_block_group(u32 len,int bg_num)312 static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
313 {
314 if (get_free_blocks(bg_num) < len)
315 return EXT4_ALLOCATE_FAILED;
316
317 u32 block = aux_info.bgs[bg_num].first_free_block;
318 struct block_group_info *bg = &aux_info.bgs[bg_num];
319 if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
320 error("failed to reserve %u blocks in block group %u\n", len, bg_num);
321 return EXT4_ALLOCATE_FAILED;
322 }
323
324 aux_info.bgs[bg_num].data_blocks_used += len;
325
326 return bg->first_block + block;
327 }
328
ext4_allocate_contiguous_blocks(u32 len)329 static struct region *ext4_allocate_contiguous_blocks(u32 len)
330 {
331 unsigned int i;
332 struct region *reg;
333
334 for (i = 0; i < aux_info.groups; i++) {
335 u32 block = ext4_allocate_blocks_from_block_group(len, i);
336
337 if (block != EXT4_ALLOCATE_FAILED) {
338 reg = malloc(sizeof(struct region));
339 reg->block = block;
340 reg->len = len;
341 reg->next = NULL;
342 reg->prev = NULL;
343 reg->bg = i;
344 return reg;
345 }
346 }
347
348 return NULL;
349 }
350
351 /* Allocate a single block and return its block number */
allocate_block()352 u32 allocate_block()
353 {
354 unsigned int i;
355 for (i = 0; i < aux_info.groups; i++) {
356 u32 block = ext4_allocate_blocks_from_block_group(1, i);
357
358 if (block != EXT4_ALLOCATE_FAILED)
359 return block;
360 }
361
362 return EXT4_ALLOCATE_FAILED;
363 }
364
ext4_allocate_partial(u32 len)365 static struct region *ext4_allocate_partial(u32 len)
366 {
367 unsigned int i;
368 struct region *reg;
369
370 for (i = 0; i < aux_info.groups; i++) {
371 if (aux_info.bgs[i].data_blocks_used == 0) {
372 u32 bg_len = aux_info.bgs[i].free_blocks;
373 u32 block;
374
375 if (len <= bg_len) {
376 /* If the requested length would fit in a block group,
377 use the regular allocator to try to fit it in a partially
378 used block group */
379 bg_len = len;
380 reg = ext4_allocate_contiguous_blocks(len);
381 } else {
382 block = ext4_allocate_blocks_from_block_group(bg_len, i);
383
384 if (block == EXT4_ALLOCATE_FAILED) {
385 error("failed to allocate %d blocks in block group %d", bg_len, i);
386 return NULL;
387 }
388
389 reg = malloc(sizeof(struct region));
390 reg->block = block;
391 reg->len = bg_len;
392 reg->next = NULL;
393 reg->prev = NULL;
394 reg->bg = i;
395 }
396
397 return reg;
398 }
399 }
400 return NULL;
401 }
402
ext4_allocate_multiple_contiguous_blocks(u32 len)403 static struct region *ext4_allocate_multiple_contiguous_blocks(u32 len)
404 {
405 struct region *first_reg = NULL;
406 struct region *prev_reg = NULL;
407 struct region *reg;
408
409 while (len > 0) {
410 reg = ext4_allocate_partial(len);
411 if (reg == NULL)
412 return NULL;
413
414 if (first_reg == NULL)
415 first_reg = reg;
416
417 if (prev_reg) {
418 prev_reg->next = reg;
419 reg->prev = prev_reg;
420 }
421
422 prev_reg = reg;
423 len -= reg->len;
424 }
425
426 return first_reg;
427 }
428
do_allocate(u32 len)429 struct region *do_allocate(u32 len)
430 {
431 struct region *reg = ext4_allocate_contiguous_blocks(len);
432
433 if (reg == NULL)
434 reg = ext4_allocate_multiple_contiguous_blocks(len);
435
436 return reg;
437 }
438
439 /* Allocate len blocks. The blocks may be spread across multiple block groups,
440 and are returned in a linked list of the blocks in each block group. The
441 allocation algorithm is:
442 1. Try to allocate the entire block len in each block group
443 2. If the request doesn't fit in any block group, allocate as many
444 blocks as possible into each block group that is completely empty
445 3. Put the last set of blocks in the first block group they fit in
446 */
allocate_blocks(u32 len)447 struct block_allocation *allocate_blocks(u32 len)
448 {
449 struct region *reg = do_allocate(len);
450
451 if (reg == NULL)
452 return NULL;
453
454 struct block_allocation *alloc = create_allocation();
455 alloc->list.first = reg;
456 alloc->list.last = reg;
457 alloc->list.iter = alloc->list.first;
458 alloc->list.partial_iter = 0;
459 return alloc;
460 }
461
462 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)463 int block_allocation_num_regions(struct block_allocation *alloc)
464 {
465 unsigned int i;
466 struct region *reg = alloc->list.first;
467
468 for (i = 0; reg != NULL; reg = reg->next)
469 i++;
470
471 return i;
472 }
473
block_allocation_len(struct block_allocation * alloc)474 int block_allocation_len(struct block_allocation *alloc)
475 {
476 unsigned int i;
477 struct region *reg = alloc->list.first;
478
479 for (i = 0; reg != NULL; reg = reg->next)
480 i += reg->len;
481
482 return i;
483 }
484
485 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)486 u32 get_block(struct block_allocation *alloc, u32 block)
487 {
488 struct region *reg = alloc->list.iter;
489 block += alloc->list.partial_iter;
490
491 for (; reg; reg = reg->next) {
492 if (block < reg->len)
493 return reg->block + block;
494 block -= reg->len;
495 }
496 return EXT4_ALLOCATE_FAILED;
497 }
498
get_oob_block(struct block_allocation * alloc,u32 block)499 u32 get_oob_block(struct block_allocation *alloc, u32 block)
500 {
501 struct region *reg = alloc->oob_list.iter;
502 block += alloc->oob_list.partial_iter;
503
504 for (; reg; reg = reg->next) {
505 if (block < reg->len)
506 return reg->block + block;
507 block -= reg->len;
508 }
509 return EXT4_ALLOCATE_FAILED;
510 }
511
512 /* Gets the starting block and length in blocks of the first region
513 of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)514 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
515 {
516 *block = alloc->list.iter->block;
517 *len = alloc->list.iter->len - alloc->list.partial_iter;
518 }
519
520 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)521 void get_next_region(struct block_allocation *alloc)
522 {
523 alloc->list.iter = alloc->list.iter->next;
524 alloc->list.partial_iter = 0;
525 }
526
527 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)528 u32 get_free_blocks(u32 bg)
529 {
530 return aux_info.bgs[bg].free_blocks;
531 }
532
last_region(struct block_allocation * alloc)533 int last_region(struct block_allocation *alloc)
534 {
535 return (alloc->list.iter == NULL);
536 }
537
rewind_alloc(struct block_allocation * alloc)538 void rewind_alloc(struct block_allocation *alloc)
539 {
540 alloc->list.iter = alloc->list.first;
541 alloc->list.partial_iter = 0;
542 }
543
do_split_allocation(struct block_allocation * alloc,u32 len)544 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
545 {
546 struct region *reg = alloc->list.iter;
547 struct region *new;
548 struct region *tmp;
549
550 while (reg && len >= reg->len) {
551 len -= reg->len;
552 reg = reg->next;
553 }
554
555 if (reg == NULL && len > 0)
556 return NULL;
557
558 if (len > 0) {
559 new = malloc(sizeof(struct region));
560
561 new->bg = reg->bg;
562 new->block = reg->block + len;
563 new->len = reg->len - len;
564 new->next = reg->next;
565 new->prev = reg;
566
567 reg->next = new;
568 reg->len = len;
569
570 tmp = alloc->list.iter;
571 alloc->list.iter = new;
572 return tmp;
573 } else {
574 return reg;
575 }
576 }
577
578 /* Splits an allocation into two allocations. The returned allocation will
579 point to the first half, and the original allocation ptr will point to the
580 second half. */
split_allocation(struct block_allocation * alloc,u32 len)581 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
582 {
583 /* First make sure there is a split at the current ptr */
584 do_split_allocation(alloc, alloc->list.partial_iter);
585
586 /* Then split off len blocks */
587 struct region *middle = do_split_allocation(alloc, len);
588 alloc->list.partial_iter = 0;
589 return middle;
590 }
591
592 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)593 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
594 {
595 struct region *oob = split_allocation(alloc, blocks);
596 struct region *next;
597
598 if (oob == NULL)
599 return -1;
600
601 while (oob && oob != alloc->list.iter) {
602 next = oob->next;
603 region_list_remove(&alloc->list, oob);
604 region_list_append(&alloc->oob_list, oob);
605 oob = next;
606 }
607
608 return 0;
609 }
610
advance_list_ptr(struct region_list * list,int blocks)611 static int advance_list_ptr(struct region_list *list, int blocks)
612 {
613 struct region *reg = list->iter;
614
615 while (reg != NULL && blocks > 0) {
616 if (reg->len > list->partial_iter + blocks) {
617 list->partial_iter += blocks;
618 return 0;
619 }
620
621 blocks -= (reg->len - list->partial_iter);
622 list->partial_iter = 0;
623 reg = reg->next;
624 }
625
626 if (blocks > 0)
627 return -1;
628
629 return 0;
630 }
631
632 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)633 int advance_blocks(struct block_allocation *alloc, int blocks)
634 {
635 return advance_list_ptr(&alloc->list, blocks);
636 }
637
advance_oob_blocks(struct block_allocation * alloc,int blocks)638 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
639 {
640 return advance_list_ptr(&alloc->oob_list, blocks);
641 }
642
append_oob_allocation(struct block_allocation * alloc,u32 len)643 int append_oob_allocation(struct block_allocation *alloc, u32 len)
644 {
645 struct region *reg = do_allocate(len);
646
647 if (reg == NULL) {
648 error("failed to allocate %d blocks", len);
649 return -1;
650 }
651
652 for (; reg; reg = reg->next)
653 region_list_append(&alloc->oob_list, reg);
654
655 return 0;
656 }
657
658 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)659 struct ext4_inode *get_inode(u32 inode)
660 {
661 inode -= 1;
662 int bg = inode / info.inodes_per_group;
663 inode %= info.inodes_per_group;
664
665 allocate_bg_inode_table(&aux_info.bgs[bg]);
666 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
667 info.inode_size);
668 }
669
670 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)671 u32 reserve_inodes(int bg, u32 num)
672 {
673 unsigned int i;
674 u32 inode;
675
676 if (get_free_inodes(bg) < num)
677 return EXT4_ALLOCATE_FAILED;
678
679 for (i = 0; i < num; i++) {
680 inode = aux_info.bgs[bg].first_free_inode + i - 1;
681 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
682 }
683
684 inode = aux_info.bgs[bg].first_free_inode;
685
686 aux_info.bgs[bg].first_free_inode += num;
687 aux_info.bgs[bg].free_inodes -= num;
688
689 return inode;
690 }
691
692 /* Returns the first free inode number
693 TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()694 u32 allocate_inode()
695 {
696 unsigned int bg;
697 u32 inode;
698
699 for (bg = 0; bg < aux_info.groups; bg++) {
700 inode = reserve_inodes(bg, 1);
701 if (inode != EXT4_ALLOCATE_FAILED)
702 return bg * info.inodes_per_group + inode;
703 }
704
705 return EXT4_ALLOCATE_FAILED;
706 }
707
708 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)709 u32 get_free_inodes(u32 bg)
710 {
711 return aux_info.bgs[bg].free_inodes;
712 }
713
714 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)715 void add_directory(u32 inode)
716 {
717 int bg = (inode - 1) / info.inodes_per_group;
718 aux_info.bgs[bg].used_dirs += 1;
719 }
720
721 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)722 u16 get_directories(int bg)
723 {
724 return aux_info.bgs[bg].used_dirs;
725 }
726
727 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)728 void free_alloc(struct block_allocation *alloc)
729 {
730 struct region *reg;
731
732 reg = alloc->list.first;
733 while (reg) {
734 struct region *next = reg->next;
735 free(reg);
736 reg = next;
737 }
738
739 reg = alloc->oob_list.first;
740 while (reg) {
741 struct region *next = reg->next;
742 free(reg);
743 reg = next;
744 }
745
746 free(alloc);
747 }
748