Lines Matching refs:flags
122 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) in fixup_red_left()
355 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
361 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
373 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
409 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
417 unsigned long flags; in cmpxchg_double_slab() local
419 local_irq_save(flags); in cmpxchg_double_slab()
426 local_irq_restore(flags); in cmpxchg_double_slab()
430 local_irq_restore(flags); in cmpxchg_double_slab()
461 if (s->flags & SLAB_RED_ZONE) in size_from_object()
469 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
576 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
605 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
615 page, page->objects, page->inuse, page->freelist, page->flags); in print_page_info()
659 if (s->flags & SLAB_RED_ZONE) in print_trailer()
667 if (s->flags & SLAB_RED_ZONE) in print_trailer()
676 if (s->flags & SLAB_STORE_USER) in print_trailer()
714 if (s->flags & SLAB_RED_ZONE) in init_object()
717 if (s->flags & __OBJECT_POISON) { in init_object()
722 if (s->flags & SLAB_RED_ZONE) in init_object()
805 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
828 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
860 if (s->flags & SLAB_RED_ZONE) { in check_object()
869 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
876 if (s->flags & SLAB_POISON) { in check_object()
877 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
993 if (s->flags & SLAB_TRACE) { in trace()
1014 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1023 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1070 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) in setup_object_debug()
1080 if (!(s->flags & SLAB_POISON)) in setup_page_debug()
1109 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1115 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1176 unsigned long uninitialized_var(flags); in free_debug_processing()
1179 spin_lock_irqsave(&n->list_lock, flags); in free_debug_processing()
1182 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1190 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1195 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1214 spin_unlock_irqrestore(&n->list_lock, flags); in free_debug_processing()
1305 slab_flags_t flags, const char *name, in kmem_cache_flags() argument
1313 return flags | slub_debug; in kmem_cache_flags()
1330 flags |= slub_debug; in kmem_cache_flags()
1339 return flags; in kmem_cache_flags()
1364 slab_flags_t flags, const char *name, in kmem_cache_flags() argument
1367 return flags; in kmem_cache_flags()
1388 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) in kmalloc_large_node_hook() argument
1390 ptr = kasan_kmalloc_large(ptr, size, flags); in kmalloc_large_node_hook()
1392 kmemleak_alloc(ptr, size, 1, flags); in kmalloc_large_node_hook()
1404 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1413 unsigned long flags; in slab_free_hook() local
1415 local_irq_save(flags); in slab_free_hook()
1417 local_irq_restore(flags); in slab_free_hook()
1420 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1450 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad in slab_free_freelist_hook()
1489 gfp_t flags, int node, struct kmem_cache_order_objects oo) in alloc_slab_page() argument
1495 page = alloc_pages(flags, order); in alloc_slab_page()
1497 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1499 if (page && charge_slab_page(page, flags, order, s)) { in alloc_slab_page()
1616 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1625 flags &= gfp_allowed_mask; in allocate_slab()
1627 if (gfpflags_allow_blocking(flags)) in allocate_slab()
1630 flags |= s->allocflags; in allocate_slab()
1636 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; in allocate_slab()
1643 alloc_gfp = flags; in allocate_slab()
1686 if (gfpflags_allow_blocking(flags)) in allocate_slab()
1696 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1698 if (unlikely(flags & GFP_SLAB_BUG_MASK)) { in new_slab()
1699 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; in new_slab()
1700 flags &= ~GFP_SLAB_BUG_MASK; in new_slab()
1702 invalid_mask, &invalid_mask, flags, &flags); in new_slab()
1707 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); in new_slab()
1715 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in __free_slab()
1743 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { in free_slab()
1836 struct kmem_cache_cpu *c, gfp_t flags) in get_partial_node() argument
1856 if (!pfmemalloc_match(page, flags)) in get_partial_node()
1884 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, in get_any_partial() argument
1891 enum zone_type high_zoneidx = gfp_zone(flags); in get_any_partial()
1919 zonelist = node_zonelist(mempolicy_slab_node(), flags); in get_any_partial()
1925 if (n && cpuset_zone_allowed(zone, flags) && in get_any_partial()
1927 object = get_partial_node(s, n, c, flags); in get_any_partial()
1948 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
1959 object = get_partial_node(s, get_node(s, searchnode), c, flags); in get_partial()
1963 return get_any_partial(s, flags, c); in get_partial()
2268 unsigned long flags; in put_cpu_partial() local
2273 local_irq_save(flags); in put_cpu_partial()
2275 local_irq_restore(flags); in put_cpu_partial()
2293 unsigned long flags; in put_cpu_partial() local
2295 local_irq_save(flags); in put_cpu_partial()
2297 local_irq_restore(flags); in put_cpu_partial()
2353 unsigned long flags; in slub_cpu_dead() local
2357 local_irq_save(flags); in slub_cpu_dead()
2359 local_irq_restore(flags); in slub_cpu_dead()
2394 unsigned long flags; in count_partial() local
2398 spin_lock_irqsave(&n->list_lock, flags); in count_partial()
2401 spin_unlock_irqrestore(&n->list_lock, flags); in count_partial()
2443 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, in new_slab_objects() argument
2450 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab_objects()
2452 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2457 page = new_slab(s, flags, node); in new_slab_objects()
2636 unsigned long flags; in __slab_alloc() local
2638 local_irq_save(flags); in __slab_alloc()
2649 local_irq_restore(flags); in __slab_alloc()
2838 unsigned long uninitialized_var(flags); in __slab_free()
2848 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2880 spin_lock_irqsave(&n->list_lock, flags); in __slab_free()
2921 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2936 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
3135 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3142 s = slab_pre_alloc_hook(s, flags); in kmem_cache_alloc_bulk()
3161 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in kmem_cache_alloc_bulk()
3179 if (unlikely(slab_want_init_on_alloc(flags, s))) { in kmem_cache_alloc_bulk()
3187 slab_post_alloc_hook(s, flags, size, p); in kmem_cache_alloc_bulk()
3191 slab_post_alloc_hook(s, flags, i, p); in kmem_cache_alloc_bulk()
3494 slab_flags_t flags = s->flags; in calculate_sizes() local
3511 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && in calculate_sizes()
3513 s->flags |= __OBJECT_POISON; in calculate_sizes()
3515 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3523 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3533 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || in calculate_sizes()
3548 if (flags & SLAB_STORE_USER) in calculate_sizes()
3556 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
3558 if (flags & SLAB_RED_ZONE) { in calculate_sizes()
3593 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3596 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
3599 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3613 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) in kmem_cache_open() argument
3615 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); in kmem_cache_open()
3628 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3637 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) in kmem_cache_open()
3639 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3785 void *__kmalloc(size_t size, gfp_t flags) in __kmalloc() argument
3791 return kmalloc_large(size, flags); in __kmalloc()
3793 s = kmalloc_slab(size, flags); in __kmalloc()
3798 ret = slab_alloc(s, flags, _RET_IP_); in __kmalloc()
3800 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
3802 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc()
3809 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) in kmalloc_large_node() argument
3815 flags |= __GFP_COMP; in kmalloc_large_node()
3816 page = alloc_pages_node(node, flags, order); in kmalloc_large_node()
3823 return kmalloc_large_node_hook(ptr, size, flags); in kmalloc_large_node()
3826 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
3832 ret = kmalloc_large_node(size, flags, node); in __kmalloc_node()
3836 flags, node); in __kmalloc_node()
3841 s = kmalloc_slab(size, flags); in __kmalloc_node()
3846 ret = slab_alloc_node(s, flags, node, _RET_IP_); in __kmalloc_node()
3848 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
3850 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc_node()
3887 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { in __check_heap_object()
3980 unsigned long flags; in __kmem_cache_shrink() local
3989 spin_lock_irqsave(&n->list_lock, flags); in __kmem_cache_shrink()
4020 spin_unlock_irqrestore(&n->list_lock, flags); in __kmem_cache_shrink()
4272 slab_flags_t flags, void (*ctor)(void *)) in __kmem_cache_alias() argument
4276 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
4301 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) in __kmem_cache_create() argument
4305 err = kmem_cache_open(s, flags); in __kmem_cache_create()
4426 unsigned long flags; in validate_slab_node() local
4428 spin_lock_irqsave(&n->list_lock, flags); in validate_slab_node()
4438 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4450 spin_unlock_irqrestore(&n->list_lock, flags); in validate_slab_node()
4500 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) in alloc_loc_track() argument
4507 l = (void *)__get_free_pages(flags, order); in alloc_loc_track()
4630 unsigned long flags; in list_locations() local
4636 spin_lock_irqsave(&n->list_lock, flags); in list_locations()
4641 spin_unlock_irqrestore(&n->list_lock, flags); in list_locations()
4788 char *buf, unsigned long flags) in show_slab_objects() argument
4799 if (flags & SO_CPU) { in show_slab_objects()
4813 if (flags & SO_TOTAL) in show_slab_objects()
4815 else if (flags & SO_OBJECTS) in show_slab_objects()
4826 if (flags & SO_TOTAL) in show_slab_objects()
4828 else if (flags & SO_OBJECTS) in show_slab_objects()
4850 if (flags & SO_ALL) { in show_slab_objects()
4855 if (flags & SO_TOTAL) in show_slab_objects()
4857 else if (flags & SO_OBJECTS) in show_slab_objects()
4868 if (flags & SO_PARTIAL) { in show_slab_objects()
4872 if (flags & SO_TOTAL) in show_slab_objects()
4874 else if (flags & SO_OBJECTS) in show_slab_objects()
5089 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5095 s->flags &= ~SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
5097 s->flags |= SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
5104 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5111 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5124 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5143 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5149 s->flags &= ~SLAB_CONSISTENCY_CHECKS; in sanity_checks_store()
5151 s->flags &= ~__CMPXCHG_DOUBLE; in sanity_checks_store()
5152 s->flags |= SLAB_CONSISTENCY_CHECKS; in sanity_checks_store()
5160 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5174 s->flags &= ~SLAB_TRACE; in trace_store()
5176 s->flags &= ~__CMPXCHG_DOUBLE; in trace_store()
5177 s->flags |= SLAB_TRACE; in trace_store()
5185 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5194 s->flags &= ~SLAB_RED_ZONE; in red_zone_store()
5196 s->flags |= SLAB_RED_ZONE; in red_zone_store()
5205 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5214 s->flags &= ~SLAB_POISON; in poison_store()
5216 s->flags |= SLAB_POISON; in poison_store()
5225 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5234 s->flags &= ~SLAB_STORE_USER; in store_user_store()
5236 s->flags &= ~__CMPXCHG_DOUBLE; in store_user_store()
5237 s->flags |= SLAB_STORE_USER; in store_user_store()
5265 if (!(s->flags & SLAB_STORE_USER)) in alloc_calls_show()
5273 if (!(s->flags & SLAB_STORE_USER)) in free_calls_show()
5283 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5292 s->flags &= ~SLAB_FAILSLAB; in failslab_store()
5294 s->flags |= SLAB_FAILSLAB; in failslab_store()
5684 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5686 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
5688 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5690 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
5692 if (s->flags & SLAB_ACCOUNT) in create_unique_id()