Lines Matching refs:c
2319 struct kmem_cache_cpu *c; in init_kmem_cache_cpus() local
2322 c = per_cpu_ptr(s->cpu_slab, cpu); in init_kmem_cache_cpus()
2323 local_lock_init(&c->lock); in init_kmem_cache_cpus()
2324 c->tid = init_tid(cpu); in init_kmem_cache_cpus()
2546 struct kmem_cache_cpu *c) in unfreeze_partials_cpu() argument
2550 partial_page = slub_percpu_partial(c); in unfreeze_partials_cpu()
2551 c->partial = NULL; in unfreeze_partials_cpu()
2612 struct kmem_cache_cpu *c) { } in unfreeze_partials_cpu() argument
2616 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2624 page = c->page; in flush_slab()
2625 freelist = c->freelist; in flush_slab()
2627 c->page = NULL; in flush_slab()
2628 c->freelist = NULL; in flush_slab()
2629 c->tid = next_tid(c->tid); in flush_slab()
2641 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab() local
2642 void *freelist = c->freelist; in __flush_cpu_slab()
2643 struct page *page = c->page; in __flush_cpu_slab()
2645 c->page = NULL; in __flush_cpu_slab()
2646 c->freelist = NULL; in __flush_cpu_slab()
2647 c->tid = next_tid(c->tid); in __flush_cpu_slab()
2654 unfreeze_partials_cpu(s, c); in __flush_cpu_slab()
2671 struct kmem_cache_cpu *c; in flush_cpu_slab() local
2677 c = this_cpu_ptr(s->cpu_slab); in flush_cpu_slab()
2679 if (c->page) in flush_cpu_slab()
2680 flush_slab(s, c); in flush_cpu_slab()
2687 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab() local
2689 return c->page || slub_percpu_partial(c); in has_cpu_slab()
2900 unsigned long addr, struct kmem_cache_cpu *c) in ___slab_alloc() argument
2910 page = READ_ONCE(c->page); in ___slab_alloc()
2947 if (unlikely(page != c->page)) { in ___slab_alloc()
2951 freelist = c->freelist; in ___slab_alloc()
2958 c->page = NULL; in ___slab_alloc()
2959 c->tid = next_tid(c->tid); in ___slab_alloc()
2976 VM_BUG_ON(!c->page->frozen); in ___slab_alloc()
2977 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
2978 c->tid = next_tid(c->tid); in ___slab_alloc()
2985 if (page != c->page) { in ___slab_alloc()
2989 freelist = c->freelist; in ___slab_alloc()
2990 c->page = NULL; in ___slab_alloc()
2991 c->freelist = NULL; in ___slab_alloc()
2992 c->tid = next_tid(c->tid); in ___slab_alloc()
2998 if (slub_percpu_partial(c)) { in ___slab_alloc()
3000 if (unlikely(c->page)) { in ___slab_alloc()
3004 if (unlikely(!slub_percpu_partial(c))) { in ___slab_alloc()
3010 page = c->page = slub_percpu_partial(c); in ___slab_alloc()
3011 slub_set_percpu_partial(c, page); in ___slab_alloc()
3025 c = slub_get_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3066 if (unlikely(c->page)) { in ___slab_alloc()
3067 void *flush_freelist = c->freelist; in ___slab_alloc()
3068 struct page *flush_page = c->page; in ___slab_alloc()
3070 c->page = NULL; in ___slab_alloc()
3071 c->freelist = NULL; in ___slab_alloc()
3072 c->tid = next_tid(c->tid); in ___slab_alloc()
3082 c->page = page; in ___slab_alloc()
3098 unsigned long addr, struct kmem_cache_cpu *c) in __slab_alloc() argument
3108 c = slub_get_cpu_ptr(s->cpu_slab); in __slab_alloc()
3111 p = ___slab_alloc(s, gfpflags, node, addr, c); in __slab_alloc()
3144 struct kmem_cache_cpu *c; in slab_alloc_node() local
3171 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
3172 tid = READ_ONCE(c->tid); in slab_alloc_node()
3191 object = c->freelist; in slab_alloc_node()
3192 page = c->page; in slab_alloc_node()
3202 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
3443 struct kmem_cache_cpu *c; in do_slab_free() local
3456 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
3457 tid = READ_ONCE(c->tid); in do_slab_free()
3462 if (likely(page == c->page)) { in do_slab_free()
3464 void **freelist = READ_ONCE(c->freelist); in do_slab_free()
3487 c = this_cpu_ptr(s->cpu_slab); in do_slab_free()
3488 if (unlikely(page != c->page)) { in do_slab_free()
3492 tid = c->tid; in do_slab_free()
3493 freelist = c->freelist; in do_slab_free()
3496 c->freelist = head; in do_slab_free()
3497 c->tid = next_tid(tid); in do_slab_free()
3665 struct kmem_cache_cpu *c; in kmem_cache_alloc_bulk() local
3678 c = slub_get_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3689 object = c->freelist; in kmem_cache_alloc_bulk()
3698 c->tid = next_tid(c->tid); in kmem_cache_alloc_bulk()
3707 _RET_IP_, c); in kmem_cache_alloc_bulk()
3711 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3718 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
3722 c->tid = next_tid(c->tid); in kmem_cache_alloc_bulk()
5260 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects() local
5265 page = READ_ONCE(c->page); in show_slab_objects()
5280 page = slub_percpu_partial_read_once(c); in show_slab_objects()