Lines Matching refs:c
1613 struct kmem_cache_cpu *c, gfp_t flags) in get_partial_node() argument
1642 c->page = page; in get_partial_node()
1662 struct kmem_cache_cpu *c) in get_any_partial() argument
1704 object = get_partial_node(s, n, c, flags); in get_any_partial()
1726 struct kmem_cache_cpu *c) in get_partial() argument
1736 object = get_partial_node(s, get_node(s, searchnode), c, flags); in get_partial()
1740 return get_any_partial(s, flags, c); in get_partial()
1963 struct kmem_cache_cpu *c) in unfreeze_partials() argument
1969 while ((page = c->partial)) { in unfreeze_partials()
1973 c->partial = page->next; in unfreeze_partials()
2075 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2078 deactivate_slab(s, c->page, c->freelist); in flush_slab()
2080 c->tid = next_tid(c->tid); in flush_slab()
2081 c->page = NULL; in flush_slab()
2082 c->freelist = NULL; in flush_slab()
2092 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab() local
2094 if (likely(c)) { in __flush_cpu_slab()
2095 if (c->page) in __flush_cpu_slab()
2096 flush_slab(s, c); in __flush_cpu_slab()
2098 unfreeze_partials(s, c); in __flush_cpu_slab()
2112 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab() local
2114 return c->page || c->partial; in has_cpu_slab()
2204 struct kmem_cache_cpu *c = *pc; in new_slab_objects() local
2207 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2214 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2215 if (c->page) in new_slab_objects()
2216 flush_slab(s, c); in new_slab_objects()
2226 c->page = page; in new_slab_objects()
2227 *pc = c; in new_slab_objects()
2293 unsigned long addr, struct kmem_cache_cpu *c) in __slab_alloc() argument
2306 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2309 page = c->page; in __slab_alloc()
2322 deactivate_slab(s, page, c->freelist); in __slab_alloc()
2323 c->page = NULL; in __slab_alloc()
2324 c->freelist = NULL; in __slab_alloc()
2335 deactivate_slab(s, page, c->freelist); in __slab_alloc()
2336 c->page = NULL; in __slab_alloc()
2337 c->freelist = NULL; in __slab_alloc()
2342 freelist = c->freelist; in __slab_alloc()
2349 c->page = NULL; in __slab_alloc()
2362 VM_BUG_ON(!c->page->frozen); in __slab_alloc()
2363 c->freelist = get_freepointer(s, freelist); in __slab_alloc()
2364 c->tid = next_tid(c->tid); in __slab_alloc()
2370 if (c->partial) { in __slab_alloc()
2371 page = c->page = c->partial; in __slab_alloc()
2372 c->partial = page->next; in __slab_alloc()
2374 c->freelist = NULL; in __slab_alloc()
2378 freelist = new_slab_objects(s, gfpflags, node, &c); in __slab_alloc()
2386 page = c->page; in __slab_alloc()
2396 c->page = NULL; in __slab_alloc()
2397 c->freelist = NULL; in __slab_alloc()
2416 struct kmem_cache_cpu *c; in slab_alloc_node() local
2437 c = this_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2445 tid = c->tid; in slab_alloc_node()
2448 object = c->freelist; in slab_alloc_node()
2449 page = c->page; in slab_alloc_node()
2451 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2680 struct kmem_cache_cpu *c; in slab_free() local
2693 c = this_cpu_ptr(s->cpu_slab); in slab_free()
2695 tid = c->tid; in slab_free()
2698 if (likely(page == c->page)) { in slab_free()
2699 set_freepointer(s, object, c->freelist); in slab_free()
2703 c->freelist, tid, in slab_free()
3707 struct kmem_cache *c; in __kmem_cache_alias() local
3719 c = cache_from_memcg_idx(s, i); in __kmem_cache_alias()
3720 if (!c) in __kmem_cache_alias()
3722 c->object_size = s->object_size; in __kmem_cache_alias()
3723 c->inuse = max_t(int, c->inuse, in __kmem_cache_alias()
4264 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects() local
4269 page = ACCESS_ONCE(c->page); in show_slab_objects()
4284 page = ACCESS_ONCE(c->partial); in show_slab_objects()
5011 struct kmem_cache *c = cache_from_memcg_idx(s, i); in slab_attr_store() local
5012 if (c) in slab_attr_store()
5013 attribute->store(c, buf, len); in slab_attr_store()