• Home
  • Raw
  • Download

Lines Matching refs:s

84 unsigned int kmem_cache_size(struct kmem_cache *s)  in kmem_cache_size()  argument
86 return s->object_size; in kmem_cache_size()
108 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument
113 if (s) in __kmem_cache_free_bulk()
114 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
120 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument
126 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
128 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
166 int slab_unmergeable(struct kmem_cache *s) in slab_unmergeable() argument
168 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) in slab_unmergeable()
171 if (s->ctor) in slab_unmergeable()
174 if (s->usersize) in slab_unmergeable()
180 if (s->refcount < 0) in slab_unmergeable()
189 struct kmem_cache *s; in find_mergeable() local
205 list_for_each_entry_reverse(s, &slab_caches, list) { in find_mergeable()
206 if (slab_unmergeable(s)) in find_mergeable()
209 if (size > s->size) in find_mergeable()
212 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) in find_mergeable()
218 if ((s->size & ~(align - 1)) != s->size) in find_mergeable()
221 if (s->size - size >= sizeof(void *)) in find_mergeable()
225 (align > s->align || s->align % align)) in find_mergeable()
228 return s; in find_mergeable()
239 struct kmem_cache *s; in create_cache() local
246 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); in create_cache()
247 if (!s) in create_cache()
250 s->name = name; in create_cache()
251 s->size = s->object_size = object_size; in create_cache()
252 s->align = align; in create_cache()
253 s->ctor = ctor; in create_cache()
254 s->useroffset = useroffset; in create_cache()
255 s->usersize = usersize; in create_cache()
257 err = __kmem_cache_create(s, flags); in create_cache()
261 s->refcount = 1; in create_cache()
262 list_add(&s->list, &slab_caches); in create_cache()
266 return s; in create_cache()
269 kmem_cache_free(kmem_cache, s); in create_cache()
308 struct kmem_cache *s = NULL; in kmem_cache_create_usercopy() local
352 s = __kmem_cache_alias(name, size, align, flags, ctor); in kmem_cache_create_usercopy()
353 if (s) in kmem_cache_create_usercopy()
362 s = create_cache(cache_name, size, in kmem_cache_create_usercopy()
365 if (IS_ERR(s)) { in kmem_cache_create_usercopy()
366 err = PTR_ERR(s); in kmem_cache_create_usercopy()
387 return s; in kmem_cache_create_usercopy()
428 struct kmem_cache *s, *s2; in slab_caches_to_rcu_destroy_workfn() local
448 list_for_each_entry_safe(s, s2, &to_destroy, list) { in slab_caches_to_rcu_destroy_workfn()
449 debugfs_slab_release(s); in slab_caches_to_rcu_destroy_workfn()
450 kfence_shutdown_cache(s); in slab_caches_to_rcu_destroy_workfn()
452 sysfs_slab_release(s); in slab_caches_to_rcu_destroy_workfn()
454 slab_kmem_cache_release(s); in slab_caches_to_rcu_destroy_workfn()
459 static int shutdown_cache(struct kmem_cache *s) in shutdown_cache() argument
462 kasan_cache_shutdown(s); in shutdown_cache()
464 if (__kmem_cache_shutdown(s) != 0) in shutdown_cache()
467 list_del(&s->list); in shutdown_cache()
469 if (s->flags & SLAB_TYPESAFE_BY_RCU) { in shutdown_cache()
471 sysfs_slab_unlink(s); in shutdown_cache()
473 list_add_tail(&s->list, &slab_caches_to_rcu_destroy); in shutdown_cache()
476 kfence_shutdown_cache(s); in shutdown_cache()
477 debugfs_slab_release(s); in shutdown_cache()
479 sysfs_slab_unlink(s); in shutdown_cache()
480 sysfs_slab_release(s); in shutdown_cache()
482 slab_kmem_cache_release(s); in shutdown_cache()
489 void slab_kmem_cache_release(struct kmem_cache *s) in slab_kmem_cache_release() argument
491 __kmem_cache_release(s); in slab_kmem_cache_release()
492 kfree_const(s->name); in slab_kmem_cache_release()
493 kmem_cache_free(kmem_cache, s); in slab_kmem_cache_release()
496 void kmem_cache_destroy(struct kmem_cache *s) in kmem_cache_destroy() argument
500 if (unlikely(!s)) in kmem_cache_destroy()
508 s->refcount--; in kmem_cache_destroy()
509 if (s->refcount) in kmem_cache_destroy()
512 err = shutdown_cache(s); in kmem_cache_destroy()
515 s->name); in kmem_cache_destroy()
556 void __init create_boot_cache(struct kmem_cache *s, const char *name, in create_boot_cache() argument
563 s->name = name; in create_boot_cache()
564 s->size = s->object_size = size; in create_boot_cache()
572 s->align = calculate_alignment(flags, align, size); in create_boot_cache()
574 s->useroffset = useroffset; in create_boot_cache()
575 s->usersize = usersize; in create_boot_cache()
577 err = __kmem_cache_create(s, flags); in create_boot_cache()
583 s->refcount = -1; /* Exempt from merging for now */ in create_boot_cache()
590 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in create_kmalloc_cache() local
592 if (!s) in create_kmalloc_cache()
595 create_boot_cache(s, name, size, flags, useroffset, usersize); in create_kmalloc_cache()
596 kasan_cache_create_kmalloc(s); in create_kmalloc_cache()
597 list_add(&s->list, &slab_caches); in create_kmalloc_cache()
598 s->refcount = 1; in create_kmalloc_cache()
599 return s; in create_kmalloc_cache()
652 struct kmem_cache *s = NULL; in kmalloc_slab() local
665 trace_android_vh_kmalloc_slab(index, flags, &s); in kmalloc_slab()
666 if (s) in kmalloc_slab()
667 return s; in kmalloc_slab()
817 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i]; in create_kmalloc_caches() local
819 if (s) { in create_kmalloc_caches()
971 static void cache_show(struct kmem_cache *s, struct seq_file *m) in cache_show() argument
976 get_slabinfo(s, &sinfo); in cache_show()
979 s->name, sinfo.active_objs, sinfo.num_objs, s->size, in cache_show()
986 slabinfo_show_stats(m, s); in cache_show()
992 struct kmem_cache *s = list_entry(p, struct kmem_cache, list); in slab_show() local
996 cache_show(s, m); in slab_show()
1002 struct kmem_cache *s, *s2; in dump_unreclaimable_slab() local
1020 list_for_each_entry_safe(s, s2, &slab_caches, list) { in dump_unreclaimable_slab()
1021 if (s->flags & SLAB_RECLAIM_ACCOUNT) in dump_unreclaimable_slab()
1024 get_slabinfo(s, &sinfo); in dump_unreclaimable_slab()
1027 pr_info("%-17s %10luKB %10luKB\n", s->name, in dump_unreclaimable_slab()
1028 (sinfo.active_objs * s->size) / 1024, in dump_unreclaimable_slab()
1029 (sinfo.num_objs * s->size) / 1024); in dump_unreclaimable_slab()
1226 int should_failslab(struct kmem_cache *s, gfp_t gfpflags) in should_failslab() argument
1228 if (__should_failslab(s, gfpflags)) in should_failslab()