• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * free.c
3  *
4  * Very simple linked-list based malloc()/free().
5  */
6 
7 #include <syslinux/firmware.h>
8 #include <stdlib.h>
9 #include <dprintf.h>
10 #include "malloc.h"
11 
12 #include <stdio.h>
13 
14 static struct free_arena_header *
__free_block(struct free_arena_header * ah)15 __free_block(struct free_arena_header *ah)
16 {
17     struct free_arena_header *pah, *nah;
18     struct free_arena_header *head =
19 	&__core_malloc_head[ARENA_HEAP_GET(ah->a.attrs)];
20 
21     pah = ah->a.prev;
22     nah = ah->a.next;
23     if ( ARENA_TYPE_GET(pah->a.attrs) == ARENA_TYPE_FREE &&
24            (char *)pah+ARENA_SIZE_GET(pah->a.attrs) == (char *)ah ) {
25         /* Coalesce into the previous block */
26         ARENA_SIZE_SET(pah->a.attrs, ARENA_SIZE_GET(pah->a.attrs) +
27 		ARENA_SIZE_GET(ah->a.attrs));
28         pah->a.next = nah;
29         nah->a.prev = pah;
30 
31 #ifdef DEBUG_MALLOC
32         ARENA_TYPE_SET(ah->a.attrs, ARENA_TYPE_DEAD);
33 #endif
34 
35         ah = pah;
36         pah = ah->a.prev;
37     } else {
38         /* Need to add this block to the free chain */
39         ARENA_TYPE_SET(ah->a.attrs, ARENA_TYPE_FREE);
40         ah->a.tag = MALLOC_FREE;
41 
42         ah->next_free = head->next_free;
43         ah->prev_free = head;
44         head->next_free = ah;
45         ah->next_free->prev_free = ah;
46     }
47 
48     /* In either of the previous cases, we might be able to merge
49        with the subsequent block... */
50     if ( ARENA_TYPE_GET(nah->a.attrs) == ARENA_TYPE_FREE &&
51            (char *)ah+ARENA_SIZE_GET(ah->a.attrs) == (char *)nah ) {
52         ARENA_SIZE_SET(ah->a.attrs, ARENA_SIZE_GET(ah->a.attrs) +
53 		ARENA_SIZE_GET(nah->a.attrs));
54 
55         /* Remove the old block from the chains */
56         nah->next_free->prev_free = nah->prev_free;
57         nah->prev_free->next_free = nah->next_free;
58         ah->a.next = nah->a.next;
59         nah->a.next->a.prev = ah;
60 
61 #ifdef DEBUG_MALLOC
62         ARENA_TYPE_SET(nah->a.attrs, ARENA_TYPE_DEAD);
63 #endif
64     }
65 
66     /* Return the block that contains the called block */
67     return ah;
68 }
69 
bios_free(void * ptr)70 void bios_free(void *ptr)
71 {
72     struct free_arena_header *ah;
73 
74     ah = (struct free_arena_header *)
75         ((struct arena_header *)ptr - 1);
76 
77 #ifdef DEBUG_MALLOC
78     if (ah->a.magic != ARENA_MAGIC)
79 	dprintf("failed free() magic check: %p\n", ptr);
80 
81     if (ARENA_TYPE_GET(ah->a.attrs) != ARENA_TYPE_USED)
82 	dprintf("invalid arena type: %d\n", ARENA_TYPE_GET(ah->a.attrs));
83 #endif
84 
85     __free_block(ah);
86 }
87 
free(void * ptr)88 __export void free(void *ptr)
89 {
90     dprintf("free(%p) @ %p\n", ptr, __builtin_return_address(0));
91 
92     if ( !ptr )
93         return;
94 
95     sem_down(&__malloc_semaphore, 0);
96     firmware->mem->free(ptr);
97     sem_up(&__malloc_semaphore);
98 
99   /* Here we could insert code to return memory to the system. */
100 }
101 
102 /*
103  * This is used to insert a block which is not previously on the
104  * free list.  Only the a.size field of the arena header is assumed
105  * to be valid.
106  */
__inject_free_block(struct free_arena_header * ah)107 void __inject_free_block(struct free_arena_header *ah)
108 {
109     struct free_arena_header *head =
110 	&__core_malloc_head[ARENA_HEAP_GET(ah->a.attrs)];
111     struct free_arena_header *nah;
112     size_t a_end = (size_t) ah + ARENA_SIZE_GET(ah->a.attrs);
113     size_t n_end;
114 
115     dprintf("inject: %#zx bytes @ %p, heap %u (%p)\n",
116 	    ARENA_SIZE_GET(ah->a.attrs), ah,
117 	    ARENA_HEAP_GET(ah->a.attrs), head);
118 
119     sem_down(&__malloc_semaphore, 0);
120 
121     for (nah = head->a.next ; nah != head ; nah = nah->a.next) {
122         n_end = (size_t) nah + ARENA_SIZE_GET(nah->a.attrs);
123 
124         /* Is nah entirely beyond this block? */
125         if ((size_t) nah >= a_end)
126             break;
127 
128         /* Is this block entirely beyond nah? */
129         if ((size_t) ah >= n_end)
130             continue;
131 
132 	printf("conflict:ah: %p, a_end: %p, nah: %p, n_end: %p\n", ah, a_end, nah, n_end);
133 
134         /* Otherwise we have some sort of overlap - reject this block */
135 	sem_up(&__malloc_semaphore);
136         return;
137     }
138 
139     /* Now, nah should point to the successor block */
140     ah->a.next = nah;
141     ah->a.prev = nah->a.prev;
142     nah->a.prev = ah;
143     ah->a.prev->a.next = ah;
144 
145     __free_block(ah);
146 
147     sem_up(&__malloc_semaphore);
148 }
149 
150 /*
151  * Free all memory which is tagged with a specific tag.
152  */
__free_tagged(malloc_tag_t tag)153 static void __free_tagged(malloc_tag_t tag) {
154     struct free_arena_header *fp, *head;
155     int i;
156 
157     sem_down(&__malloc_semaphore, 0);
158 
159     for (i = 0; i < NHEAP; i++) {
160 	dprintf("__free_tagged(%u) heap %d\n", tag, i);
161 	head = &__core_malloc_head[i];
162 	for (fp = head->a.next ; fp != head ; fp = fp->a.next) {
163 	    if (ARENA_TYPE_GET(fp->a.attrs) == ARENA_TYPE_USED &&
164 		fp->a.tag == tag)
165 		fp = __free_block(fp);
166 	}
167     }
168 
169     sem_up(&__malloc_semaphore);
170     dprintf("__free_tagged(%u) done\n", tag);
171 }
172 
comboot_cleanup_lowmem(com32sys_t * regs)173 void comboot_cleanup_lowmem(com32sys_t *regs)
174 {
175     (void)regs;
176 
177     __free_tagged(MALLOC_MODULE);
178 }
179