1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ext4_utils.h"
18 #include "allocate.h"
19 #include "backed_block.h"
20 #include "ext4.h"
21
22 #include <stdio.h>
23 #include <stdlib.h>
24
25 struct region_list {
26 struct region *first;
27 struct region *last;
28 struct region *iter;
29 u32 partial_iter;
30 };
31
32 struct block_allocation {
33 struct region_list list;
34 struct region_list oob_list;
35 };
36
37 struct region {
38 u32 block;
39 u32 len;
40 int bg;
41 struct region *next;
42 struct region *prev;
43 };
44
45 struct block_group_info {
46 u32 first_block;
47 int header_blocks;
48 int data_blocks_used;
49 int has_superblock;
50 u8 *bitmaps;
51 u8 *block_bitmap;
52 u8 *inode_bitmap;
53 u8 *inode_table;
54 u32 free_blocks;
55 u32 first_free_block;
56 u32 free_inodes;
57 u32 first_free_inode;
58 u16 used_dirs;
59 };
60
create_allocation()61 struct block_allocation *create_allocation()
62 {
63 struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
64 alloc->list.first = NULL;
65 alloc->list.last = NULL;
66 alloc->oob_list.first = NULL;
67 alloc->oob_list.last = NULL;
68 alloc->list.iter = NULL;
69 alloc->list.partial_iter = 0;
70 alloc->oob_list.iter = NULL;
71 alloc->oob_list.partial_iter = 0;
72 return alloc;
73 }
74
region_list_remove(struct region_list * list,struct region * reg)75 static void region_list_remove(struct region_list *list, struct region *reg)
76 {
77 if (reg->prev)
78 reg->prev->next = reg->next;
79
80 if (reg->next)
81 reg->next->prev = reg->prev;
82
83 if (list->first == reg)
84 list->first = reg->next;
85
86 if (list->last == reg)
87 list->last = reg->prev;
88
89 reg->next = NULL;
90 reg->prev = NULL;
91 }
92
region_list_append(struct region_list * list,struct region * reg)93 static void region_list_append(struct region_list *list, struct region *reg)
94 {
95 if (list->first == NULL) {
96 list->first = reg;
97 list->last = reg;
98 list->iter = reg;
99 list->partial_iter = 0;
100 reg->prev = NULL;
101 } else {
102 list->last->next = reg;
103 reg->prev = list->last;
104 list->last = reg;
105 }
106 reg->next = NULL;
107 }
108
109 #if 0
110 static void dump_starting_from(struct region *reg)
111 {
112 for (; reg; reg = reg->next) {
113 printf("%p: Blocks %d-%d (%d)\n", reg,
114 reg->bg * info.blocks_per_group + reg->block,
115 reg->bg * info.blocks_per_group + reg->block + reg->len - 1,
116 reg->len);
117 }
118 }
119
120 static void dump_region_lists(struct block_allocation *alloc) {
121
122 printf("Main list:\n");
123 dump_starting_from(alloc->list.first);
124
125 printf("OOB list:\n");
126 dump_starting_from(alloc->oob_list.first);
127 }
128 #endif
129
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)130 void append_region(struct block_allocation *alloc,
131 u32 block, u32 len, int bg_num)
132 {
133 struct region *reg;
134 reg = malloc(sizeof(struct region));
135 reg->block = block;
136 reg->len = len;
137 reg->bg = bg_num;
138 reg->next = NULL;
139
140 region_list_append(&alloc->list, reg);
141 }
142
allocate_bg_inode_table(struct block_group_info * bg)143 static void allocate_bg_inode_table(struct block_group_info *bg)
144 {
145 if (bg->inode_table != NULL)
146 return;
147
148 u32 block = bg->first_block + 2;
149
150 if (bg->has_superblock)
151 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
152
153 bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
154 if (bg->inode_table == NULL)
155 critical_error_errno("calloc");
156
157 queue_data_block(bg->inode_table, aux_info.inode_table_blocks
158 * info.block_size, block);
159 }
160
init_unused_inode_tables(void)161 void init_unused_inode_tables(void)
162 {
163 unsigned int i;
164 u32 block;
165 struct block_group_info *bg;
166
167 for (i = 0; i < aux_info.groups; i++) {
168 if (!aux_info.bgs[i].inode_table) {
169 bg = &aux_info.bgs[i];
170 block = bg->first_block + 2;
171 if (bg->has_superblock)
172 block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
173 queue_fill_block(0, aux_info.inode_table_blocks * info.block_size, block);
174 }
175 }
176 }
177
bitmap_set_bit(u8 * bitmap,u32 bit)178 static int bitmap_set_bit(u8 *bitmap, u32 bit)
179 {
180 if (bitmap[bit / 8] & 1 << (bit % 8))
181 return 1;
182
183 bitmap[bit / 8] |= 1 << (bit % 8);
184 return 0;
185 }
186
bitmap_set_8_bits(u8 * bitmap,u32 bit)187 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
188 {
189 int ret = bitmap[bit / 8];
190 bitmap[bit / 8] = 0xFF;
191 return ret;
192 }
193
194 /* Marks a the first num_blocks blocks in a block group as used, and accounts
195 for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 start,u32 num)196 static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
197 {
198 unsigned int i = 0;
199
200 u32 block = start;
201 if (num > bg->free_blocks)
202 return -1;
203
204 for (i = 0; i < num && block % 8 != 0; i++, block++) {
205 if (bitmap_set_bit(bg->block_bitmap, block)) {
206 error("attempted to reserve already reserved block");
207 return -1;
208 }
209 }
210
211 for (; i + 8 <= (num & ~7); i += 8, block += 8) {
212 if (bitmap_set_8_bits(bg->block_bitmap, block)) {
213 error("attempted to reserve already reserved block");
214 return -1;
215 }
216 }
217
218 for (; i < num; i++, block++) {
219 if (bitmap_set_bit(bg->block_bitmap, block)) {
220 error("attempted to reserve already reserved block");
221 return -1;
222 }
223 }
224
225 bg->free_blocks -= num;
226 if (start == bg->first_free_block)
227 bg->first_free_block = start + num;
228
229 return 0;
230 }
231
free_blocks(struct block_group_info * bg,u32 num_blocks)232 static void free_blocks(struct block_group_info *bg, u32 num_blocks)
233 {
234 unsigned int i;
235 u32 block = bg->first_free_block - 1;
236 for (i = 0; i < num_blocks; i++, block--)
237 bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
238 bg->free_blocks += num_blocks;
239 bg->first_free_block -= num_blocks;
240 }
241
242 /* Reduces an existing allocation by len blocks by return the last blocks
243 to the free pool in their block group. Assumes that the blocks being
244 returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)245 void reduce_allocation(struct block_allocation *alloc, u32 len)
246 {
247 while (len) {
248 struct region *last_reg = alloc->list.last;
249
250 if (last_reg->len > len) {
251 free_blocks(&aux_info.bgs[last_reg->bg], len);
252 last_reg->len -= len;
253 len = 0;
254 } else {
255 struct region *reg = alloc->list.last->prev;
256 free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
257 len -= last_reg->len;
258 if (reg) {
259 reg->next = NULL;
260 } else {
261 alloc->list.first = NULL;
262 alloc->list.last = NULL;
263 alloc->list.iter = NULL;
264 alloc->list.partial_iter = 0;
265 }
266 free(last_reg);
267 }
268 }
269 }
270
init_bg(struct block_group_info * bg,unsigned int i)271 static void init_bg(struct block_group_info *bg, unsigned int i)
272 {
273 int header_blocks = 2 + aux_info.inode_table_blocks;
274
275 bg->has_superblock = ext4_bg_has_super_block(i);
276
277 if (bg->has_superblock)
278 header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
279
280 bg->bitmaps = calloc(info.block_size, 2);
281 bg->block_bitmap = bg->bitmaps;
282 bg->inode_bitmap = bg->bitmaps + info.block_size;
283
284 bg->header_blocks = header_blocks;
285 bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
286
287 u32 block = bg->first_block;
288 if (bg->has_superblock)
289 block += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
290 queue_data_block(bg->bitmaps, 2 * info.block_size, block);
291
292 bg->data_blocks_used = 0;
293 bg->free_blocks = info.blocks_per_group;
294 bg->first_free_block = 0;
295 bg->free_inodes = info.inodes_per_group;
296 bg->first_free_inode = 1;
297
298 if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
299 error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
300
301 u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
302 if (overrun > 0)
303 reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
304 }
305
block_allocator_init()306 void block_allocator_init()
307 {
308 unsigned int i;
309
310 aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
311 if (aux_info.bgs == NULL)
312 critical_error_errno("calloc");
313
314 for (i = 0; i < aux_info.groups; i++)
315 init_bg(&aux_info.bgs[i], i);
316 }
317
block_allocator_free()318 void block_allocator_free()
319 {
320 unsigned int i;
321
322 for (i = 0; i < aux_info.groups; i++) {
323 free(aux_info.bgs[i].bitmaps);
324 free(aux_info.bgs[i].inode_table);
325 }
326 free(aux_info.bgs);
327 }
328
ext4_allocate_blocks_from_block_group(u32 len,int bg_num)329 static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
330 {
331 if (get_free_blocks(bg_num) < len)
332 return EXT4_ALLOCATE_FAILED;
333
334 u32 block = aux_info.bgs[bg_num].first_free_block;
335 struct block_group_info *bg = &aux_info.bgs[bg_num];
336 if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
337 error("failed to reserve %u blocks in block group %u\n", len, bg_num);
338 return EXT4_ALLOCATE_FAILED;
339 }
340
341 aux_info.bgs[bg_num].data_blocks_used += len;
342
343 return bg->first_block + block;
344 }
345
ext4_allocate_contiguous_blocks(u32 len)346 static struct region *ext4_allocate_contiguous_blocks(u32 len)
347 {
348 unsigned int i;
349 struct region *reg;
350
351 for (i = 0; i < aux_info.groups; i++) {
352 u32 block = ext4_allocate_blocks_from_block_group(len, i);
353
354 if (block != EXT4_ALLOCATE_FAILED) {
355 reg = malloc(sizeof(struct region));
356 reg->block = block;
357 reg->len = len;
358 reg->next = NULL;
359 reg->prev = NULL;
360 reg->bg = i;
361 return reg;
362 }
363 }
364
365 return NULL;
366 }
367
368 /* Allocate a single block and return its block number */
allocate_block()369 u32 allocate_block()
370 {
371 unsigned int i;
372 for (i = 0; i < aux_info.groups; i++) {
373 u32 block = ext4_allocate_blocks_from_block_group(1, i);
374
375 if (block != EXT4_ALLOCATE_FAILED)
376 return block;
377 }
378
379 return EXT4_ALLOCATE_FAILED;
380 }
381
ext4_allocate_partial(u32 len)382 static struct region *ext4_allocate_partial(u32 len)
383 {
384 unsigned int i;
385 struct region *reg;
386
387 for (i = 0; i < aux_info.groups; i++) {
388 if (aux_info.bgs[i].data_blocks_used == 0) {
389 u32 bg_len = aux_info.bgs[i].free_blocks;
390 u32 block;
391
392 if (len <= bg_len) {
393 /* If the requested length would fit in a block group,
394 use the regular allocator to try to fit it in a partially
395 used block group */
396 bg_len = len;
397 reg = ext4_allocate_contiguous_blocks(len);
398 } else {
399 block = ext4_allocate_blocks_from_block_group(bg_len, i);
400
401 if (block == EXT4_ALLOCATE_FAILED) {
402 error("failed to allocate %d blocks in block group %d", bg_len, i);
403 return NULL;
404 }
405
406 reg = malloc(sizeof(struct region));
407 reg->block = block;
408 reg->len = bg_len;
409 reg->next = NULL;
410 reg->prev = NULL;
411 reg->bg = i;
412 }
413
414 return reg;
415 }
416 }
417 return NULL;
418 }
419
ext4_allocate_multiple_contiguous_blocks(u32 len)420 static struct region *ext4_allocate_multiple_contiguous_blocks(u32 len)
421 {
422 struct region *first_reg = NULL;
423 struct region *prev_reg = NULL;
424 struct region *reg;
425
426 while (len > 0) {
427 reg = ext4_allocate_partial(len);
428 if (reg == NULL)
429 return NULL;
430
431 if (first_reg == NULL)
432 first_reg = reg;
433
434 if (prev_reg) {
435 prev_reg->next = reg;
436 reg->prev = prev_reg;
437 }
438
439 prev_reg = reg;
440 len -= reg->len;
441 }
442
443 return first_reg;
444 }
445
do_allocate(u32 len)446 struct region *do_allocate(u32 len)
447 {
448 struct region *reg = ext4_allocate_contiguous_blocks(len);
449
450 if (reg == NULL)
451 reg = ext4_allocate_multiple_contiguous_blocks(len);
452
453 return reg;
454 }
455
456 /* Allocate len blocks. The blocks may be spread across multiple block groups,
457 and are returned in a linked list of the blocks in each block group. The
458 allocation algorithm is:
459 1. Try to allocate the entire block len in each block group
460 2. If the request doesn't fit in any block group, allocate as many
461 blocks as possible into each block group that is completely empty
462 3. Put the last set of blocks in the first block group they fit in
463 */
allocate_blocks(u32 len)464 struct block_allocation *allocate_blocks(u32 len)
465 {
466 struct region *reg = do_allocate(len);
467
468 if (reg == NULL)
469 return NULL;
470
471 struct block_allocation *alloc = create_allocation();
472 alloc->list.first = reg;
473 alloc->list.last = reg;
474 alloc->list.iter = alloc->list.first;
475 alloc->list.partial_iter = 0;
476 return alloc;
477 }
478
479 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)480 int block_allocation_num_regions(struct block_allocation *alloc)
481 {
482 unsigned int i;
483 struct region *reg = alloc->list.first;
484
485 for (i = 0; reg != NULL; reg = reg->next)
486 i++;
487
488 return i;
489 }
490
block_allocation_len(struct block_allocation * alloc)491 int block_allocation_len(struct block_allocation *alloc)
492 {
493 unsigned int i;
494 struct region *reg = alloc->list.first;
495
496 for (i = 0; reg != NULL; reg = reg->next)
497 i += reg->len;
498
499 return i;
500 }
501
502 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)503 u32 get_block(struct block_allocation *alloc, u32 block)
504 {
505 struct region *reg = alloc->list.iter;
506 block += alloc->list.partial_iter;
507
508 for (; reg; reg = reg->next) {
509 if (block < reg->len)
510 return reg->block + block;
511 block -= reg->len;
512 }
513 return EXT4_ALLOCATE_FAILED;
514 }
515
get_oob_block(struct block_allocation * alloc,u32 block)516 u32 get_oob_block(struct block_allocation *alloc, u32 block)
517 {
518 struct region *reg = alloc->oob_list.iter;
519 block += alloc->oob_list.partial_iter;
520
521 for (; reg; reg = reg->next) {
522 if (block < reg->len)
523 return reg->block + block;
524 block -= reg->len;
525 }
526 return EXT4_ALLOCATE_FAILED;
527 }
528
529 /* Gets the starting block and length in blocks of the first region
530 of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)531 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
532 {
533 *block = alloc->list.iter->block;
534 *len = alloc->list.iter->len - alloc->list.partial_iter;
535 }
536
537 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)538 void get_next_region(struct block_allocation *alloc)
539 {
540 alloc->list.iter = alloc->list.iter->next;
541 alloc->list.partial_iter = 0;
542 }
543
544 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)545 u32 get_free_blocks(u32 bg)
546 {
547 return aux_info.bgs[bg].free_blocks;
548 }
549
last_region(struct block_allocation * alloc)550 int last_region(struct block_allocation *alloc)
551 {
552 return (alloc->list.iter == NULL);
553 }
554
rewind_alloc(struct block_allocation * alloc)555 void rewind_alloc(struct block_allocation *alloc)
556 {
557 alloc->list.iter = alloc->list.first;
558 alloc->list.partial_iter = 0;
559 }
560
do_split_allocation(struct block_allocation * alloc,u32 len)561 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
562 {
563 struct region *reg = alloc->list.iter;
564 struct region *new;
565 struct region *tmp;
566
567 while (reg && len >= reg->len) {
568 len -= reg->len;
569 reg = reg->next;
570 }
571
572 if (reg == NULL && len > 0)
573 return NULL;
574
575 if (len > 0) {
576 new = malloc(sizeof(struct region));
577
578 new->bg = reg->bg;
579 new->block = reg->block + len;
580 new->len = reg->len - len;
581 new->next = reg->next;
582 new->prev = reg;
583
584 reg->next = new;
585 reg->len = len;
586
587 tmp = alloc->list.iter;
588 alloc->list.iter = new;
589 return tmp;
590 } else {
591 return reg;
592 }
593 }
594
595 /* Splits an allocation into two allocations. The returned allocation will
596 point to the first half, and the original allocation ptr will point to the
597 second half. */
split_allocation(struct block_allocation * alloc,u32 len)598 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
599 {
600 /* First make sure there is a split at the current ptr */
601 do_split_allocation(alloc, alloc->list.partial_iter);
602
603 /* Then split off len blocks */
604 struct region *middle = do_split_allocation(alloc, len);
605 alloc->list.partial_iter = 0;
606 return middle;
607 }
608
609 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)610 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
611 {
612 struct region *oob = split_allocation(alloc, blocks);
613 struct region *next;
614
615 if (oob == NULL)
616 return -1;
617
618 while (oob && oob != alloc->list.iter) {
619 next = oob->next;
620 region_list_remove(&alloc->list, oob);
621 region_list_append(&alloc->oob_list, oob);
622 oob = next;
623 }
624
625 return 0;
626 }
627
advance_list_ptr(struct region_list * list,int blocks)628 static int advance_list_ptr(struct region_list *list, int blocks)
629 {
630 struct region *reg = list->iter;
631
632 while (reg != NULL && blocks > 0) {
633 if (reg->len > list->partial_iter + blocks) {
634 list->partial_iter += blocks;
635 return 0;
636 }
637
638 blocks -= (reg->len - list->partial_iter);
639 list->partial_iter = 0;
640 reg = reg->next;
641 }
642
643 if (blocks > 0)
644 return -1;
645
646 return 0;
647 }
648
649 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)650 int advance_blocks(struct block_allocation *alloc, int blocks)
651 {
652 return advance_list_ptr(&alloc->list, blocks);
653 }
654
advance_oob_blocks(struct block_allocation * alloc,int blocks)655 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
656 {
657 return advance_list_ptr(&alloc->oob_list, blocks);
658 }
659
append_oob_allocation(struct block_allocation * alloc,u32 len)660 int append_oob_allocation(struct block_allocation *alloc, u32 len)
661 {
662 struct region *reg = do_allocate(len);
663
664 if (reg == NULL) {
665 error("failed to allocate %d blocks", len);
666 return -1;
667 }
668
669 for (; reg; reg = reg->next)
670 region_list_append(&alloc->oob_list, reg);
671
672 return 0;
673 }
674
675 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)676 struct ext4_inode *get_inode(u32 inode)
677 {
678 inode -= 1;
679 int bg = inode / info.inodes_per_group;
680 inode %= info.inodes_per_group;
681
682 allocate_bg_inode_table(&aux_info.bgs[bg]);
683 return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
684 info.inode_size);
685 }
686
687 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)688 u32 reserve_inodes(int bg, u32 num)
689 {
690 unsigned int i;
691 u32 inode;
692
693 if (get_free_inodes(bg) < num)
694 return EXT4_ALLOCATE_FAILED;
695
696 for (i = 0; i < num; i++) {
697 inode = aux_info.bgs[bg].first_free_inode + i - 1;
698 aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
699 }
700
701 inode = aux_info.bgs[bg].first_free_inode;
702
703 aux_info.bgs[bg].first_free_inode += num;
704 aux_info.bgs[bg].free_inodes -= num;
705
706 return inode;
707 }
708
709 /* Returns the first free inode number
710 TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()711 u32 allocate_inode()
712 {
713 unsigned int bg;
714 u32 inode;
715
716 for (bg = 0; bg < aux_info.groups; bg++) {
717 inode = reserve_inodes(bg, 1);
718 if (inode != EXT4_ALLOCATE_FAILED)
719 return bg * info.inodes_per_group + inode;
720 }
721
722 return EXT4_ALLOCATE_FAILED;
723 }
724
725 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)726 u32 get_free_inodes(u32 bg)
727 {
728 return aux_info.bgs[bg].free_inodes;
729 }
730
731 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)732 void add_directory(u32 inode)
733 {
734 int bg = (inode - 1) / info.inodes_per_group;
735 aux_info.bgs[bg].used_dirs += 1;
736 }
737
738 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)739 u16 get_directories(int bg)
740 {
741 return aux_info.bgs[bg].used_dirs;
742 }
743
744 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)745 void free_alloc(struct block_allocation *alloc)
746 {
747 struct region *reg;
748
749 reg = alloc->list.first;
750 while (reg) {
751 struct region *next = reg->next;
752 free(reg);
753 reg = next;
754 }
755
756 reg = alloc->oob_list.first;
757 while (reg) {
758 struct region *next = reg->next;
759 free(reg);
760 reg = next;
761 }
762
763 free(alloc);
764 }
765