• Home
  • Raw
  • Download

Lines Matching refs:b

207 static void slob_free_pages(void *b, int order)  in slob_free_pages()  argument
211 free_pages((unsigned long)b, order); in slob_free_pages()
273 slob_t *b = NULL; in slob_alloc() local
300 b = slob_page_alloc(sp, size, align); in slob_alloc()
301 if (!b) in slob_alloc()
315 if (!b) { in slob_alloc()
316 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc()
317 if (!b) in slob_alloc()
319 sp = virt_to_page(b); in slob_alloc()
324 sp->freelist = b; in slob_alloc()
326 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); in slob_alloc()
328 b = slob_page_alloc(sp, size, align); in slob_alloc()
329 BUG_ON(!b); in slob_alloc()
332 if (unlikely((gfp & __GFP_ZERO) && b)) in slob_alloc()
333 memset(b, 0, size); in slob_alloc()
334 return b; in slob_alloc()
343 slob_t *prev, *next, *b = (slob_t *)block; in slob_free() local
364 slob_free_pages(b, 0); in slob_free()
371 sp->freelist = b; in slob_free()
372 set_slob(b, units, in slob_free()
373 (void *)((unsigned long)(b + in slob_free()
391 if (b < (slob_t *)sp->freelist) { in slob_free()
392 if (b + units == sp->freelist) { in slob_free()
396 set_slob(b, units, sp->freelist); in slob_free()
397 sp->freelist = b; in slob_free()
401 while (b > next) { in slob_free()
406 if (!slob_last(prev) && b + units == next) { in slob_free()
408 set_slob(b, units, slob_next(next)); in slob_free()
410 set_slob(b, units, next); in slob_free()
412 if (prev + slob_units(prev) == b) { in slob_free()
413 units = slob_units(b) + slob_units(prev); in slob_free()
414 set_slob(prev, units, slob_next(b)); in slob_free()
416 set_slob(prev, slob_units(prev), b); in slob_free()
537 void *b; in slob_alloc_node() local
544 b = slob_alloc(c->size, flags, c->align, node); in slob_alloc_node()
545 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
549 b = slob_new_pages(flags, get_order(c->size), node); in slob_alloc_node()
550 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
555 if (b && c->ctor) in slob_alloc_node()
556 c->ctor(b); in slob_alloc_node()
558 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); in slob_alloc_node()
559 return b; in slob_alloc_node()
583 static void __kmem_cache_free(void *b, int size) in __kmem_cache_free() argument
586 slob_free(b, size); in __kmem_cache_free()
588 slob_free_pages(b, get_order(size)); in __kmem_cache_free()
594 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); in kmem_rcu_free() local
596 __kmem_cache_free(b, slob_rcu->size); in kmem_rcu_free()
599 void kmem_cache_free(struct kmem_cache *c, void *b) in kmem_cache_free() argument
601 kmemleak_free_recursive(b, c->flags); in kmem_cache_free()
604 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); in kmem_cache_free()
608 __kmem_cache_free(b, c->size); in kmem_cache_free()
611 trace_kmem_cache_free(_RET_IP_, b); in kmem_cache_free()