Lines Matching refs:flags
284 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
361 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
368 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
369 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
379 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
527 size_t align, int flags, size_t *left_over, in cache_estimate() argument
548 if (flags & CFLGS_OFF_SLAB) { in cache_estimate()
700 unsigned long flags; in recheck_pfmemalloc_active() local
705 spin_lock_irqsave(&n->list_lock, flags); in recheck_pfmemalloc_active()
720 spin_unlock_irqrestore(&n->list_lock, flags); in recheck_pfmemalloc_active()
724 gfp_t flags, bool force_refill) in __ac_get_obj() argument
733 if (gfp_pfmemalloc_allowed(flags)) { in __ac_get_obj()
771 struct array_cache *ac, gfp_t flags, bool force_refill) in ac_get_obj() argument
776 objp = __ac_get_obj(cachep, ac, flags, force_refill); in ac_get_obj()
849 gfp_t flags) in alternate_node_alloc() argument
855 gfp_t flags, int nodeid) in ____cache_alloc_node() argument
966 unsigned long flags; in drain_alien_cache() local
974 spin_lock_irqsave(&alc->lock, flags); in drain_alien_cache()
976 spin_unlock_irqrestore(&alc->lock, flags); in drain_alien_cache()
1519 unsigned long flags; in slab_out_of_memory() local
1537 spin_lock_irqsave(&n->list_lock, flags); in slab_out_of_memory()
1550 spin_unlock_irqrestore(&n->list_lock, flags); in slab_out_of_memory()
1570 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages() argument
1576 flags |= cachep->allocflags; in kmem_getpages()
1577 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1578 flags |= __GFP_RECLAIMABLE; in kmem_getpages()
1580 if (memcg_charge_slab(cachep, flags, cachep->gfporder)) in kmem_getpages()
1583 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); in kmem_getpages()
1586 slab_out_of_memory(cachep, flags, nodeid); in kmem_getpages()
1595 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1605 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { in kmem_getpages()
1606 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); in kmem_getpages()
1626 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_freepages()
1740 if (cachep->flags & SLAB_RED_ZONE) { in print_objinfo()
1746 if (cachep->flags & SLAB_STORE_USER) { in print_objinfo()
1832 if (cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1844 if (cachep->flags & SLAB_RED_ZONE) { in slab_destroy_debugcheck()
1876 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { in slab_destroy()
1924 size_t size, size_t align, unsigned long flags) in calculate_slab_order() argument
1934 cache_estimate(gfporder, size, align, flags, &remainder, &num); in calculate_slab_order()
1942 if (flags & CFLGS_OFF_SLAB) { in calculate_slab_order()
1968 if (flags & SLAB_RECLAIM_ACCOUNT) in calculate_slab_order()
2048 unsigned long flags, const char *name, in kmem_cache_flags() argument
2051 return flags; in kmem_cache_flags()
2056 unsigned long flags, void (*ctor)(void *)) in __kmem_cache_alias() argument
2060 cachep = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
2095 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) in __kmem_cache_create() argument
2113 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; in __kmem_cache_create()
2114 if (!(flags & SLAB_DESTROY_BY_RCU)) in __kmem_cache_create()
2115 flags |= SLAB_POISON; in __kmem_cache_create()
2117 if (flags & SLAB_DESTROY_BY_RCU) in __kmem_cache_create()
2118 BUG_ON(flags & SLAB_POISON); in __kmem_cache_create()
2131 if (flags & SLAB_RED_ZONE) { in __kmem_cache_create()
2145 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); in __kmem_cache_create()
2162 if (flags & SLAB_RED_ZONE) { in __kmem_cache_create()
2167 if (flags & SLAB_STORE_USER) { in __kmem_cache_create()
2172 if (flags & SLAB_RED_ZONE) in __kmem_cache_create()
2201 !(flags & SLAB_NOLEAKTRACE)) in __kmem_cache_create()
2206 flags |= CFLGS_OFF_SLAB; in __kmem_cache_create()
2216 left_over = calculate_slab_order(cachep, size, cachep->align, flags); in __kmem_cache_create()
2227 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { in __kmem_cache_create()
2228 flags &= ~CFLGS_OFF_SLAB; in __kmem_cache_create()
2232 if (flags & CFLGS_OFF_SLAB) { in __kmem_cache_create()
2241 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) in __kmem_cache_create()
2242 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); in __kmem_cache_create()
2252 cachep->flags = flags; in __kmem_cache_create()
2254 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) in __kmem_cache_create()
2259 if (flags & CFLGS_OFF_SLAB) { in __kmem_cache_create()
2487 if (cachep->flags & SLAB_POISON) in cache_init_objs()
2489 if (cachep->flags & SLAB_STORE_USER) in cache_init_objs()
2492 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs()
2501 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) in cache_init_objs()
2504 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs()
2513 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) in cache_init_objs()
2525 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) in kmem_flagcheck() argument
2528 if (flags & GFP_DMA) in kmem_flagcheck()
2589 gfp_t flags, int nodeid, struct page *page) in cache_grow() argument
2600 BUG_ON(flags & GFP_SLAB_BUG_MASK); in cache_grow()
2601 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); in cache_grow()
2626 kmem_flagcheck(cachep, flags); in cache_grow()
2716 if (cachep->flags & SLAB_RED_ZONE) { in cache_free_debugcheck()
2721 if (cachep->flags & SLAB_STORE_USER) in cache_free_debugcheck()
2730 if (cachep->flags & SLAB_POISON) { in cache_free_debugcheck()
2751 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, in cache_alloc_refill() argument
2832 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); in cache_alloc_refill()
2847 return ac_get_obj(cachep, ac, flags, force_refill); in cache_alloc_refill()
2851 gfp_t flags) in cache_alloc_debugcheck_before() argument
2853 might_sleep_if(flags & __GFP_WAIT); in cache_alloc_debugcheck_before()
2855 kmem_flagcheck(cachep, flags); in cache_alloc_debugcheck_before()
2861 gfp_t flags, void *objp, unsigned long caller) in cache_alloc_debugcheck_after() argument
2867 if (cachep->flags & SLAB_POISON) { in cache_alloc_debugcheck_after()
2879 if (cachep->flags & SLAB_STORE_USER) in cache_alloc_debugcheck_after()
2882 if (cachep->flags & SLAB_RED_ZONE) { in cache_alloc_debugcheck_after()
2899 if (cachep->ctor && cachep->flags & SLAB_POISON) in cache_alloc_debugcheck_after()
2912 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) in slab_should_failslab() argument
2917 return should_failslab(cachep->object_size, flags, cachep->flags); in slab_should_failslab()
2920 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) in ____cache_alloc() argument
2931 objp = ac_get_obj(cachep, ac, flags, false); in ____cache_alloc()
2945 objp = cache_alloc_refill(cachep, flags, force_refill); in ____cache_alloc()
2970 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) in alternate_node_alloc() argument
2974 if (in_interrupt() || (flags & __GFP_THISNODE)) in alternate_node_alloc()
2977 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) in alternate_node_alloc()
2982 return ____cache_alloc_node(cachep, flags, nid_alloc); in alternate_node_alloc()
2994 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) in fallback_alloc() argument
3000 enum zone_type high_zoneidx = gfp_zone(flags); in fallback_alloc()
3005 if (flags & __GFP_THISNODE) in fallback_alloc()
3008 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); in fallback_alloc()
3012 zonelist = node_zonelist(mempolicy_slab_node(), flags); in fallback_alloc()
3022 if (cpuset_zone_allowed_hardwall(zone, flags) && in fallback_alloc()
3026 flags | GFP_THISNODE, nid); in fallback_alloc()
3043 kmem_flagcheck(cache, flags); in fallback_alloc()
3052 if (cache_grow(cache, flags, nid, page)) { in fallback_alloc()
3054 flags | GFP_THISNODE, nid); in fallback_alloc()
3077 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, in ____cache_alloc_node() argument
3125 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); in ____cache_alloc_node()
3129 return fallback_alloc(cachep, flags); in ____cache_alloc_node()
3136 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, in slab_alloc_node() argument
3143 flags &= gfp_allowed_mask; in slab_alloc_node()
3145 lockdep_trace_alloc(flags); in slab_alloc_node()
3147 if (slab_should_failslab(cachep, flags)) in slab_alloc_node()
3150 cachep = memcg_kmem_get_cache(cachep, flags); in slab_alloc_node()
3152 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc_node()
3160 ptr = fallback_alloc(cachep, flags); in slab_alloc_node()
3171 ptr = ____cache_alloc(cachep, flags); in slab_alloc_node()
3176 ptr = ____cache_alloc_node(cachep, flags, nodeid); in slab_alloc_node()
3179 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); in slab_alloc_node()
3180 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, in slab_alloc_node()
3181 flags); in slab_alloc_node()
3184 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); in slab_alloc_node()
3185 if (unlikely(flags & __GFP_ZERO)) in slab_alloc_node()
3193 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) in __do_cache_alloc() argument
3198 objp = alternate_node_alloc(cache, flags); in __do_cache_alloc()
3202 objp = ____cache_alloc(cache, flags); in __do_cache_alloc()
3209 objp = ____cache_alloc_node(cache, flags, numa_mem_id()); in __do_cache_alloc()
3217 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in __do_cache_alloc() argument
3219 return ____cache_alloc(cachep, flags); in __do_cache_alloc()
3225 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) in slab_alloc() argument
3230 flags &= gfp_allowed_mask; in slab_alloc()
3232 lockdep_trace_alloc(flags); in slab_alloc()
3234 if (slab_should_failslab(cachep, flags)) in slab_alloc()
3237 cachep = memcg_kmem_get_cache(cachep, flags); in slab_alloc()
3239 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc()
3241 objp = __do_cache_alloc(cachep, flags); in slab_alloc()
3243 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); in slab_alloc()
3244 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, in slab_alloc()
3245 flags); in slab_alloc()
3249 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); in slab_alloc()
3250 if (unlikely(flags & __GFP_ZERO)) in slab_alloc()
3362 kmemleak_free_recursive(objp, cachep->flags); in __cache_free()
3395 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in kmem_cache_alloc() argument
3397 void *ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc()
3400 cachep->object_size, cachep->size, flags); in kmem_cache_alloc()
3408 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) in kmem_cache_alloc_trace() argument
3412 ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc_trace()
3415 size, cachep->size, flags); in kmem_cache_alloc_trace()
3433 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) in kmem_cache_alloc_node() argument
3435 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node()
3439 flags, nodeid); in kmem_cache_alloc_node()
3447 gfp_t flags, in kmem_cache_alloc_node_trace() argument
3453 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node_trace()
3457 flags, nodeid); in kmem_cache_alloc_node_trace()
3464 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) in __do_kmalloc_node() argument
3468 cachep = kmalloc_slab(size, flags); in __do_kmalloc_node()
3471 return kmem_cache_alloc_node_trace(cachep, flags, node, size); in __do_kmalloc_node()
3474 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
3476 return __do_kmalloc_node(size, flags, node, _RET_IP_); in __kmalloc_node()
3480 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, in __kmalloc_node_track_caller() argument
3483 return __do_kmalloc_node(size, flags, node, caller); in __kmalloc_node_track_caller()
3494 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, in __do_kmalloc() argument
3500 cachep = kmalloc_slab(size, flags); in __do_kmalloc()
3503 ret = slab_alloc(cachep, flags, caller); in __do_kmalloc()
3506 size, cachep->size, flags); in __do_kmalloc()
3511 void *__kmalloc(size_t size, gfp_t flags) in __kmalloc() argument
3513 return __do_kmalloc(size, flags, _RET_IP_); in __kmalloc()
3517 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) in __kmalloc_track_caller() argument
3519 return __do_kmalloc(size, flags, caller); in __kmalloc_track_caller()
3533 unsigned long flags; in kmem_cache_free() local
3538 local_irq_save(flags); in kmem_cache_free()
3540 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) in kmem_cache_free()
3543 local_irq_restore(flags); in kmem_cache_free()
3561 unsigned long flags; in kfree() local
3567 local_irq_save(flags); in kfree()
3574 local_irq_restore(flags); in kfree()
4132 if (!(cachep->flags & SLAB_STORE_USER)) in leaks_show()
4134 if (!(cachep->flags & SLAB_RED_ZONE)) in leaks_show()