• Home
  • Raw
  • Download

Lines Matching +full:- +full:- +full:needed

57 	cmask = ~((2u << (last_idx - r)) - 1);  in get_randomIdx()
60 idx = 31 - a_clz_32(mask & cmask); in get_randomIdx()
117 if (!ctx.avail_meta_area_count && ctx.brk!=-1) { in alloc_meta()
125 ctx.brk += -ctx.brk & (pagesize-1); in alloc_meta()
129 ctx.brk = -1; in alloc_meta()
132 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ctx.brk, new - ctx.brk, "native_heap:meta"); in alloc_meta()
135 PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0); in alloc_meta()
137 ctx.avail_meta_areas = (void *)(new - pagesize); in alloc_meta()
145 MAP_PRIVATE|MAP_ANON, -1, 0); in alloc_meta()
148 ctx.avail_meta_area_count = (n-1)*(pagesize>>12); in alloc_meta()
152 if ((uintptr_t)p & (pagesize-1)) need_unprotect = 0; in alloc_meta()
157 ctx.avail_meta_area_count--; in alloc_meta()
160 ctx.meta_area_tail->next = (void *)p; in alloc_meta()
165 ctx.meta_area_tail->check = ctx.secret; in alloc_meta()
166 ctx.avail_meta_count = ctx.meta_area_tail->nslots in alloc_meta()
167 = (4096-sizeof(struct meta_area))/sizeof *m; in alloc_meta()
168 ctx.avail_meta = ctx.meta_area_tail->slots; in alloc_meta()
170 ctx.avail_meta_count--; in alloc_meta()
172 m->prev = m->next = 0; in alloc_meta()
181 uint32_t mask = m->avail_mask; in try_avail()
184 if (!m->freed_mask) { in try_avail()
189 m = m->next; in try_avail()
193 mask = m->freed_mask; in try_avail()
195 // skip fully-free group unless it's the only one in try_avail()
196 // or it's a permanently non-freeable group in try_avail()
197 if (mask == (2u<<m->last_idx)-1 && m->freeable) { in try_avail()
198 m = m->next; in try_avail()
200 mask = m->freed_mask; in try_avail()
203 // activate more slots in a not-fully-active group in try_avail()
204 // if needed, but only as a last resort. prefer using in try_avail()
206 // touching & dirtying as-yet-unused pages. in try_avail()
207 if (!(mask & ((2u<<m->mem->active_idx)-1))) { in try_avail()
208 if (m->next != m) { in try_avail()
209 m = m->next; in try_avail()
212 int cnt = m->mem->active_idx + 2; in try_avail()
213 int size = size_classes[m->sizeclass]*UNIT; in try_avail()
216 while ((span^(span+size-1)) < 4096) { in try_avail()
220 if (cnt > m->last_idx+1) in try_avail()
221 cnt = m->last_idx+1; in try_avail()
222 m->mem->active_idx = cnt-1; in try_avail()
227 decay_bounces(m->sizeclass); in try_avail()
230 int idx = get_randomIdx(mask, m->last_idx); in try_avail()
233 first = mask&-mask; in try_avail()
235 m->avail_mask = mask-first; in try_avail()
256 // lookup max number of slots fitting in power-of-two size in alloc_group()
279 // of freed maps, and inhibit use of low-count, odd-size in alloc_group()
280 // small mappings and single-slot groups if activated. in alloc_group()
301 size_t needed = size*cnt + UNIT; in alloc_group() local
302 needed += -needed & (pagesize-1); in alloc_group()
304 // produce an individually-mmapped allocation if usage is low, in alloc_group()
309 req += -req & (pagesize-1); in alloc_group()
312 needed = req; in alloc_group()
316 p = mmap(0, needed, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); in alloc_group()
322 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, needed, "native_heap:brk"); in alloc_group()
324 m->maplen = needed>>12; in alloc_group()
326 active_idx = (4096-UNIT)/size-1; in alloc_group()
327 if (active_idx > cnt-1) active_idx = cnt-1; in alloc_group()
330 int j = size_to_class(UNIT+cnt*size-IB); in alloc_group()
331 int idx = alloc_slot(j, UNIT+cnt*size-IB); in alloc_group()
337 p = enframe(g, idx, UNIT*size_classes[j]-IB, ctx.mmap_counter); in alloc_group()
338 m->maplen = 0; in alloc_group()
339 p[-3] = (p[-3]&31) | (6<<5); in alloc_group()
341 p[UNIT+i*size-4] = 0; in alloc_group()
342 active_idx = cnt-1; in alloc_group()
345 m->avail_mask = (2u<<active_idx)-1; in alloc_group()
346 m->freed_mask = (2u<<(cnt-1))-1 - m->avail_mask; in alloc_group()
347 m->mem = (void *)p; in alloc_group()
348 m->mem->meta = encode_ptr(m, ctx.secret); in alloc_group()
349 m->mem->active_idx = active_idx; in alloc_group()
350 m->last_idx = cnt-1; in alloc_group()
351 m->freeable = 1; in alloc_group()
352 m->sizeclass = sc; in alloc_group()
362 if (!g) return -1; in alloc_slot()
364 g->avail_mask--; in alloc_slot()
379 size_t needed = n + IB + UNIT; in malloc() local
380 void *p = mmap(0, needed, PROT_READ|PROT_WRITE, in malloc()
381 MAP_PRIVATE|MAP_ANON, -1, 0); in malloc()
385 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, needed, "native_heap:mmap"); in malloc()
391 munmap(p, needed); in malloc()
394 g->mem = p; in malloc()
395 g->mem->meta = encode_ptr(g, ctx.secret); in malloc()
396 g->last_idx = 0; in malloc()
397 g->freeable = 1; in malloc()
398 g->sizeclass = 63; in malloc()
399 g->maplen = (needed+4095)/4096; in malloc()
400 g->avail_mask = g->freed_mask = 0; in malloc()
402 // individually-mmapped allocations. in malloc()
421 if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask in malloc()
422 && !ctx.active[sc|1]->freed_mask)) in malloc()
430 mask = g ? g->avail_mask : 0; in malloc()
433 idx = get_randomIdx(mask, g->last_idx); in malloc()
437 g->avail_mask = mask-first; in malloc()
438 else if (a_cas(&g->avail_mask, mask, mask-first)!=mask) in malloc()
441 first = mask&-mask; in malloc()
444 g->avail_mask = mask-first; in malloc()
445 else if (a_cas(&g->avail_mask, mask, mask-first)!=mask) in malloc()
469 return g->sizeclass >= 48 || in is_allzero()
470 get_stride(g) < UNIT*size_classes[g->sizeclass]; in is_allzero()