Lines Matching refs:size
126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT) argument
135 int size; member
146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
151 if (size > 1) { in set_slob()
152 s[0].units = size; in set_slob()
237 static void *slob_page_alloc(struct page *sp, size_t size, int align, in slob_page_alloc() argument
241 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
310 if (size < SLOB_BREAK1) in slob_alloc()
312 else if (size < SLOB_BREAK2) in slob_alloc()
330 if (sp->units < SLOB_UNITS(size)) in slob_alloc()
333 b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list); in slob_alloc()
370 b = slob_page_alloc(sp, size, align, align_offset, &_unused); in slob_alloc()
375 memset(b, 0, size); in slob_alloc()
382 static void slob_free(void *block, int size) in slob_free() argument
392 BUG_ON(!size); in slob_free()
395 units = SLOB_UNITS(size); in slob_free()
417 if (size < SLOB_BREAK1) in slob_free()
419 else if (size < SLOB_BREAK2) in slob_free()
469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
480 if (size < PAGE_SIZE - minalign) { in __do_kmalloc_node()
487 if (is_power_of_2(size)) in __do_kmalloc_node()
488 align = max(minalign, (int) size); in __do_kmalloc_node()
490 if (!size) in __do_kmalloc_node()
493 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node()
497 *m = size; in __do_kmalloc_node()
501 size, size + minalign, gfp, node); in __do_kmalloc_node()
503 unsigned int order = get_order(size); in __do_kmalloc_node()
510 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
513 kmemleak_alloc(ret, size, 1, gfp); in __do_kmalloc_node()
517 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() argument
519 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); in __kmalloc()
523 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) in __kmalloc_track_caller() argument
525 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); in __kmalloc_track_caller()
529 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() argument
532 return __do_kmalloc_node(size, gfp, node, caller); in __kmalloc_node_track_caller()
586 c->size += sizeof(struct slob_rcu); in __kmem_cache_create()
601 if (c->size < PAGE_SIZE) { in slob_alloc_node()
602 b = slob_alloc(c->size, flags, c->align, node, 0); in slob_alloc_node()
604 SLOB_UNITS(c->size) * SLOB_UNIT, in slob_alloc_node()
607 b = slob_new_pages(flags, get_order(c->size), node); in slob_alloc_node()
609 PAGE_SIZE << get_order(c->size), in slob_alloc_node()
618 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); in slob_alloc_node()
629 void *__kmalloc_node(size_t size, gfp_t gfp, int node) in __kmalloc_node() argument
631 return __do_kmalloc_node(size, gfp, node, _RET_IP_); in __kmalloc_node()
642 static void __kmem_cache_free(void *b, int size) in __kmem_cache_free() argument
644 if (size < PAGE_SIZE) in __kmem_cache_free()
645 slob_free(b, size); in __kmem_cache_free()
647 slob_free_pages(b, get_order(size)); in __kmem_cache_free()
653 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); in kmem_rcu_free()
655 __kmem_cache_free(b, slob_rcu->size); in kmem_rcu_free()
663 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); in kmem_cache_free()
664 slob_rcu->size = c->size; in kmem_cache_free()
667 __kmem_cache_free(b, c->size); in kmem_cache_free()
674 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
676 __kmem_cache_free_bulk(s, size, p); in kmem_cache_free_bulk()
680 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
683 return __kmem_cache_alloc_bulk(s, flags, size, p); in kmem_cache_alloc_bulk()
704 .size = sizeof(struct kmem_cache),