• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include "upb/upb.int.h"
3 
4 #include <errno.h>
5 #include <stdarg.h>
6 #include <stddef.h>
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 
12 #include "upb/port_def.inc"
13 
14 /* upb_status *****************************************************************/
15 
upb_status_clear(upb_status * status)16 void upb_status_clear(upb_status *status) {
17   if (!status) return;
18   status->ok = true;
19   status->msg[0] = '\0';
20 }
21 
upb_ok(const upb_status * status)22 bool upb_ok(const upb_status *status) { return status->ok; }
23 
upb_status_errmsg(const upb_status * status)24 const char *upb_status_errmsg(const upb_status *status) { return status->msg; }
25 
upb_status_seterrmsg(upb_status * status,const char * msg)26 void upb_status_seterrmsg(upb_status *status, const char *msg) {
27   if (!status) return;
28   status->ok = false;
29   strncpy(status->msg, msg, UPB_STATUS_MAX_MESSAGE - 1);
30   status->msg[UPB_STATUS_MAX_MESSAGE - 1] = '\0';
31 }
32 
upb_status_seterrf(upb_status * status,const char * fmt,...)33 void upb_status_seterrf(upb_status *status, const char *fmt, ...) {
34   va_list args;
35   va_start(args, fmt);
36   upb_status_vseterrf(status, fmt, args);
37   va_end(args);
38 }
39 
upb_status_vseterrf(upb_status * status,const char * fmt,va_list args)40 void upb_status_vseterrf(upb_status *status, const char *fmt, va_list args) {
41   if (!status) return;
42   status->ok = false;
43   vsnprintf(status->msg, sizeof(status->msg), fmt, args);
44   status->msg[UPB_STATUS_MAX_MESSAGE - 1] = '\0';
45 }
46 
upb_status_vappenderrf(upb_status * status,const char * fmt,va_list args)47 void upb_status_vappenderrf(upb_status *status, const char *fmt, va_list args) {
48   size_t len;
49   if (!status) return;
50   status->ok = false;
51   len = strlen(status->msg);
52   vsnprintf(status->msg + len, sizeof(status->msg) - len, fmt, args);
53   status->msg[UPB_STATUS_MAX_MESSAGE - 1] = '\0';
54 }
55 
56 /* upb_alloc ******************************************************************/
57 
upb_global_allocfunc(upb_alloc * alloc,void * ptr,size_t oldsize,size_t size)58 static void *upb_global_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
59                                   size_t size) {
60   UPB_UNUSED(alloc);
61   UPB_UNUSED(oldsize);
62   if (size == 0) {
63     free(ptr);
64     return NULL;
65   } else {
66     return realloc(ptr, size);
67   }
68 }
69 
70 upb_alloc upb_alloc_global = {&upb_global_allocfunc};
71 
72 /* upb_arena ******************************************************************/
73 
74 /* Be conservative and choose 16 in case anyone is using SSE. */
75 
76 struct mem_block {
77   struct mem_block *next;
78   uint32_t size;
79   uint32_t cleanups;
80   /* Data follows. */
81 };
82 
83 typedef struct cleanup_ent {
84   upb_cleanup_func *cleanup;
85   void *ud;
86 } cleanup_ent;
87 
88 static const size_t memblock_reserve = UPB_ALIGN_UP(sizeof(mem_block), 16);
89 
arena_findroot(upb_arena * a)90 static upb_arena *arena_findroot(upb_arena *a) {
91   /* Path splitting keeps time complexity down, see:
92    *   https://en.wikipedia.org/wiki/Disjoint-set_data_structure */
93   while (a->parent != a) {
94     upb_arena *next = a->parent;
95     a->parent = next->parent;
96     a = next;
97   }
98   return a;
99 }
100 
upb_arena_addblock(upb_arena * a,upb_arena * root,void * ptr,size_t size)101 static void upb_arena_addblock(upb_arena *a, upb_arena *root, void *ptr,
102                                size_t size) {
103   mem_block *block = ptr;
104 
105   /* The block is for arena |a|, but should appear in the freelist of |root|. */
106   block->next = root->freelist;
107   block->size = (uint32_t)size;
108   block->cleanups = 0;
109   root->freelist = block;
110   a->last_size = block->size;
111   if (!root->freelist_tail) root->freelist_tail = block;
112 
113   a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
114   a->head.end = UPB_PTR_AT(block, size, char);
115   a->cleanups = &block->cleanups;
116 
117   UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
118 }
119 
upb_arena_allocblock(upb_arena * a,size_t size)120 static bool upb_arena_allocblock(upb_arena *a, size_t size) {
121   upb_arena *root = arena_findroot(a);
122   size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve;
123   mem_block *block = upb_malloc(root->block_alloc, block_size);
124 
125   if (!block) return false;
126   upb_arena_addblock(a, root, block, block_size);
127   return true;
128 }
129 
_upb_arena_slowmalloc(upb_arena * a,size_t size)130 void *_upb_arena_slowmalloc(upb_arena *a, size_t size) {
131   if (!upb_arena_allocblock(a, size)) return NULL;  /* Out of memory. */
132   UPB_ASSERT(_upb_arenahas(a) >= size);
133   return upb_arena_malloc(a, size);
134 }
135 
upb_arena_doalloc(upb_alloc * alloc,void * ptr,size_t oldsize,size_t size)136 static void *upb_arena_doalloc(upb_alloc *alloc, void *ptr, size_t oldsize,
137                                size_t size) {
138   upb_arena *a = (upb_arena*)alloc;  /* upb_alloc is initial member. */
139   return upb_arena_realloc(a, ptr, oldsize, size);
140 }
141 
142 /* Public Arena API ***********************************************************/
143 
arena_initslow(void * mem,size_t n,upb_alloc * alloc)144 upb_arena *arena_initslow(void *mem, size_t n, upb_alloc *alloc) {
145   const size_t first_block_overhead = sizeof(upb_arena) + memblock_reserve;
146   upb_arena *a;
147 
148   /* We need to malloc the initial block. */
149   n = first_block_overhead + 256;
150   if (!alloc || !(mem = upb_malloc(alloc, n))) {
151     return NULL;
152   }
153 
154   a = UPB_PTR_AT(mem, n - sizeof(*a), upb_arena);
155   n -= sizeof(*a);
156 
157   a->head.alloc.func = &upb_arena_doalloc;
158   a->block_alloc = alloc;
159   a->parent = a;
160   a->refcount = 1;
161   a->freelist = NULL;
162   a->freelist_tail = NULL;
163 
164   upb_arena_addblock(a, a, mem, n);
165 
166   return a;
167 }
168 
upb_arena_init(void * mem,size_t n,upb_alloc * alloc)169 upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc) {
170   upb_arena *a;
171 
172   /* Round block size down to alignof(*a) since we will allocate the arena
173    * itself at the end. */
174   n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_arena));
175 
176   if (UPB_UNLIKELY(n < sizeof(upb_arena))) {
177     return arena_initslow(mem, n, alloc);
178   }
179 
180   a = UPB_PTR_AT(mem, n - sizeof(*a), upb_arena);
181 
182   a->head.alloc.func = &upb_arena_doalloc;
183   a->block_alloc = alloc;
184   a->parent = a;
185   a->refcount = 1;
186   a->last_size = UPB_MAX(128, n);
187   a->head.ptr = mem;
188   a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
189   a->freelist = NULL;
190   a->cleanups = NULL;
191 
192   return a;
193 }
194 
arena_dofree(upb_arena * a)195 static void arena_dofree(upb_arena *a) {
196   mem_block *block = a->freelist;
197   UPB_ASSERT(a->parent == a);
198   UPB_ASSERT(a->refcount == 0);
199 
200   while (block) {
201     /* Load first since we are deleting block. */
202     mem_block *next = block->next;
203 
204     if (block->cleanups > 0) {
205       cleanup_ent *end = UPB_PTR_AT(block, block->size, void);
206       cleanup_ent *ptr = end - block->cleanups;
207 
208       for (; ptr < end; ptr++) {
209         ptr->cleanup(ptr->ud);
210       }
211     }
212 
213     upb_free(a->block_alloc, block);
214     block = next;
215   }
216 }
217 
upb_arena_free(upb_arena * a)218 void upb_arena_free(upb_arena *a) {
219   a = arena_findroot(a);
220   if (--a->refcount == 0) arena_dofree(a);
221 }
222 
upb_arena_addcleanup(upb_arena * a,void * ud,upb_cleanup_func * func)223 bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func) {
224   cleanup_ent *ent;
225 
226   if (!a->cleanups || _upb_arenahas(a) < sizeof(cleanup_ent)) {
227     if (!upb_arena_allocblock(a, 128)) return false;  /* Out of memory. */
228     UPB_ASSERT(_upb_arenahas(a) >= sizeof(cleanup_ent));
229   }
230 
231   a->head.end -= sizeof(cleanup_ent);
232   ent = (cleanup_ent*)a->head.end;
233   (*a->cleanups)++;
234   UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent));
235 
236   ent->cleanup = func;
237   ent->ud = ud;
238 
239   return true;
240 }
241 
upb_arena_fuse(upb_arena * a1,upb_arena * a2)242 void upb_arena_fuse(upb_arena *a1, upb_arena *a2) {
243   upb_arena *r1 = arena_findroot(a1);
244   upb_arena *r2 = arena_findroot(a2);
245 
246   if (r1 == r2) return;  /* Already fused. */
247 
248   /* We want to join the smaller tree to the larger tree.
249    * So swap first if they are backwards. */
250   if (r1->refcount < r2->refcount) {
251     upb_arena *tmp = r1;
252     r1 = r2;
253     r2 = tmp;
254   }
255 
256   /* r1 takes over r2's freelist and refcount. */
257   r1->refcount += r2->refcount;
258   if (r2->freelist_tail) {
259     UPB_ASSERT(r2->freelist_tail->next == NULL);
260     r2->freelist_tail->next = r1->freelist;
261     r1->freelist = r2->freelist;
262   }
263   r2->parent = r1;
264 }
265