Lines Matching refs:c
210 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) in stat() argument
213 c->stat[si]++; in stat()
1373 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); in unfreeze_slab() local
1380 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); in unfreeze_slab()
1382 stat(c, DEACTIVATE_FULL); in unfreeze_slab()
1389 stat(c, DEACTIVATE_EMPTY); in unfreeze_slab()
1414 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in deactivate_slab() argument
1416 struct page *page = c->page; in deactivate_slab()
1420 stat(c, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
1426 while (unlikely(c->freelist)) { in deactivate_slab()
1432 object = c->freelist; in deactivate_slab()
1433 c->freelist = c->freelist[c->offset]; in deactivate_slab()
1436 object[c->offset] = page->freelist; in deactivate_slab()
1440 c->page = NULL; in deactivate_slab()
1444 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
1446 stat(c, CPUSLAB_FLUSH); in flush_slab()
1447 slab_lock(c->page); in flush_slab()
1448 deactivate_slab(s, c); in flush_slab()
1458 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); in __flush_cpu_slab() local
1460 if (likely(c && c->page)) in __flush_cpu_slab()
1461 flush_slab(s, c); in __flush_cpu_slab()
1480 static inline int node_match(struct kmem_cache_cpu *c, int node) in node_match() argument
1483 if (node != -1 && c->node != node) in node_match()
1508 unsigned long addr, struct kmem_cache_cpu *c) in __slab_alloc() argument
1516 if (!c->page) in __slab_alloc()
1519 slab_lock(c->page); in __slab_alloc()
1520 if (unlikely(!node_match(c, node))) in __slab_alloc()
1523 stat(c, ALLOC_REFILL); in __slab_alloc()
1526 object = c->page->freelist; in __slab_alloc()
1529 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) in __slab_alloc()
1532 c->freelist = object[c->offset]; in __slab_alloc()
1533 c->page->inuse = c->page->objects; in __slab_alloc()
1534 c->page->freelist = NULL; in __slab_alloc()
1535 c->node = page_to_nid(c->page); in __slab_alloc()
1537 slab_unlock(c->page); in __slab_alloc()
1538 stat(c, ALLOC_SLOWPATH); in __slab_alloc()
1542 deactivate_slab(s, c); in __slab_alloc()
1547 c->page = new; in __slab_alloc()
1548 stat(c, ALLOC_FROM_PARTIAL); in __slab_alloc()
1561 c = get_cpu_slab(s, smp_processor_id()); in __slab_alloc()
1562 stat(c, ALLOC_SLAB); in __slab_alloc()
1563 if (c->page) in __slab_alloc()
1564 flush_slab(s, c); in __slab_alloc()
1567 c->page = new; in __slab_alloc()
1572 if (!alloc_debug_processing(s, c->page, object, addr)) in __slab_alloc()
1575 c->page->inuse++; in __slab_alloc()
1576 c->page->freelist = object[c->offset]; in __slab_alloc()
1577 c->node = -1; in __slab_alloc()
1595 struct kmem_cache_cpu *c; in slab_alloc() local
1605 c = get_cpu_slab(s, smp_processor_id()); in slab_alloc()
1606 objsize = c->objsize; in slab_alloc()
1607 if (unlikely(!c->freelist || !node_match(c, node))) in slab_alloc()
1609 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc()
1612 object = c->freelist; in slab_alloc()
1613 c->freelist = object[c->offset]; in slab_alloc()
1614 stat(c, ALLOC_FASTPATH); in slab_alloc()
1651 struct kmem_cache_cpu *c; in __slab_free() local
1653 c = get_cpu_slab(s, raw_smp_processor_id()); in __slab_free()
1654 stat(c, FREE_SLOWPATH); in __slab_free()
1666 stat(c, FREE_FROZEN); in __slab_free()
1679 stat(c, FREE_ADD_PARTIAL); in __slab_free()
1692 stat(c, FREE_REMOVE_PARTIAL); in __slab_free()
1695 stat(c, FREE_SLAB); in __slab_free()
1720 struct kmem_cache_cpu *c; in slab_free() local
1724 c = get_cpu_slab(s, smp_processor_id()); in slab_free()
1725 debug_check_no_locks_freed(object, c->objsize); in slab_free()
1728 if (likely(page == c->page && c->node >= 0)) { in slab_free()
1729 object[c->offset] = c->freelist; in slab_free()
1730 c->freelist = object; in slab_free()
1731 stat(c, FREE_FASTPATH); in slab_free()
1733 __slab_free(s, page, x, addr, c->offset); in slab_free()
1915 struct kmem_cache_cpu *c) in init_kmem_cache_cpu() argument
1917 c->page = NULL; in init_kmem_cache_cpu()
1918 c->freelist = NULL; in init_kmem_cache_cpu()
1919 c->node = 0; in init_kmem_cache_cpu()
1920 c->offset = s->offset / sizeof(void *); in init_kmem_cache_cpu()
1921 c->objsize = s->objsize; in init_kmem_cache_cpu()
1923 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); in init_kmem_cache_cpu()
1978 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); in alloc_kmem_cache_cpu() local
1980 if (c) in alloc_kmem_cache_cpu()
1982 (void *)c->freelist; in alloc_kmem_cache_cpu()
1985 c = kmalloc_node( in alloc_kmem_cache_cpu()
1988 if (!c) in alloc_kmem_cache_cpu()
1992 init_kmem_cache_cpu(s, c); in alloc_kmem_cache_cpu()
1993 return c; in alloc_kmem_cache_cpu()
1996 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) in free_kmem_cache_cpu() argument
1998 if (c < per_cpu(kmem_cache_cpu, cpu) || in free_kmem_cache_cpu()
1999 c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { in free_kmem_cache_cpu()
2000 kfree(c); in free_kmem_cache_cpu()
2003 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); in free_kmem_cache_cpu()
2004 per_cpu(kmem_cache_cpu_free, cpu) = c; in free_kmem_cache_cpu()
2012 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); in free_kmem_cache_cpus() local
2014 if (c) { in free_kmem_cache_cpus()
2016 free_kmem_cache_cpu(c, cpu); in free_kmem_cache_cpus()
2026 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); in alloc_kmem_cache_cpus() local
2028 if (c) in alloc_kmem_cache_cpus()
2031 c = alloc_kmem_cache_cpu(s, cpu, flags); in alloc_kmem_cache_cpus()
2032 if (!c) { in alloc_kmem_cache_cpus()
2036 s->cpu_slab[cpu] = c; in alloc_kmem_cache_cpus()
3200 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); in slab_cpuup_callback() local
3205 free_kmem_cache_cpu(c, cpu); in slab_cpuup_callback()
3699 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); in show_slab_objects() local
3701 if (!c || c->node < 0) in show_slab_objects()
3704 if (c->page) { in show_slab_objects()
3706 x = c->page->objects; in show_slab_objects()
3708 x = c->page->inuse; in show_slab_objects()
3713 nodes[c->node] += x; in show_slab_objects()
3715 per_cpu[c->node]++; in show_slab_objects()