Lines Matching +full:page +full:- +full:size
1 /* SPDX-License-Identifier: GPL-2.0 */
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */ member
26 unsigned int usersize; /* Usercopy region size */
44 #include <linux/fault-inject.h>
61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
80 unsigned int size; member
88 /* Find the kmalloc slab corresponding for a certain size */
97 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
101 unsigned int size, slab_flags_t flags,
105 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
109 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
116 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, in __kmem_cache_alias() argument
207 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? in cache_vmstat_idx()
234 return s->flags & flags; in kmem_cache_debug_flags()
240 static inline struct obj_cgroup **page_obj_cgroups(struct page *page) in page_obj_cgroups() argument
243 * page->mem_cgroup and page->obj_cgroups are sharing the same in page_obj_cgroups()
245 * that the page is a slab page (e.g. page_cgroup_ino()), let's in page_obj_cgroups()
249 ((unsigned long)page->obj_cgroups & ~0x1UL); in page_obj_cgroups()
252 static inline bool page_has_obj_cgroups(struct page *page) in page_has_obj_cgroups() argument
254 return ((unsigned long)page->obj_cgroups & 0x1UL); in page_has_obj_cgroups()
257 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
260 static inline void memcg_free_page_obj_cgroups(struct page *page) in memcg_free_page_obj_cgroups() argument
262 kfree(page_obj_cgroups(page)); in memcg_free_page_obj_cgroups()
263 page->obj_cgroups = NULL; in memcg_free_page_obj_cgroups()
272 return s->size + sizeof(struct obj_cgroup *); in obj_full_size()
287 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) in memcg_slab_pre_alloc_hook()
319 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook() argument
322 struct page *page; in memcg_slab_post_alloc_hook() local
329 for (i = 0; i < size; i++) { in memcg_slab_post_alloc_hook()
331 page = virt_to_head_page(p[i]); in memcg_slab_post_alloc_hook()
333 if (!page_has_obj_cgroups(page) && in memcg_slab_post_alloc_hook()
334 memcg_alloc_page_obj_cgroups(page, s, flags)) { in memcg_slab_post_alloc_hook()
339 off = obj_to_index(s, page, p[i]); in memcg_slab_post_alloc_hook()
341 page_obj_cgroups(page)[off] = objcg; in memcg_slab_post_alloc_hook() local
342 mod_objcg_state(objcg, page_pgdat(page), in memcg_slab_post_alloc_hook()
356 struct page *page; in memcg_slab_free_hook() local
367 page = virt_to_head_page(p[i]); in memcg_slab_free_hook()
368 if (!page_has_obj_cgroups(page)) in memcg_slab_free_hook()
372 s = page->slab_cache; in memcg_slab_free_hook()
376 off = obj_to_index(s, page, p[i]); in memcg_slab_free_hook()
377 objcg = page_obj_cgroups(page)[off]; in memcg_slab_free_hook()
381 page_obj_cgroups(page)[off] = NULL; in memcg_slab_free_hook() local
383 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), in memcg_slab_free_hook()
384 -obj_full_size(s)); in memcg_slab_free_hook()
390 static inline bool page_has_obj_cgroups(struct page *page) in page_has_obj_cgroups() argument
400 static inline int memcg_alloc_page_obj_cgroups(struct page *page, in memcg_alloc_page_obj_cgroups() argument
406 static inline void memcg_free_page_obj_cgroups(struct page *page) in memcg_free_page_obj_cgroups() argument
419 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook() argument
432 struct page *page; in virt_to_cache() local
434 page = virt_to_head_page(obj); in virt_to_cache()
435 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", in virt_to_cache()
438 return page->slab_cache; in virt_to_cache()
441 static __always_inline void account_slab_page(struct page *page, int order, in account_slab_page() argument
444 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), in account_slab_page()
448 static __always_inline void unaccount_slab_page(struct page *page, int order, in unaccount_slab_page() argument
452 memcg_free_page_obj_cgroups(page); in unaccount_slab_page()
454 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), in unaccount_slab_page()
455 -(PAGE_SIZE << order)); in unaccount_slab_page()
469 __func__, s->name, cachep->name)) in cache_from_obj()
477 return s->object_size; in slab_ksize()
485 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) in slab_ksize()
486 return s->object_size; in slab_ksize()
488 if (s->flags & SLAB_KASAN) in slab_ksize()
489 return s->object_size; in slab_ksize()
495 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) in slab_ksize()
496 return s->inuse; in slab_ksize()
500 return s->size; in slab_ksize()
506 size_t size, gfp_t flags) in slab_pre_alloc_hook() argument
518 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) in slab_pre_alloc_hook()
526 gfp_t flags, size_t size, void **p) in slab_post_alloc_hook() argument
531 for (i = 0; i < size; i++) { in slab_post_alloc_hook()
534 kmemleak_alloc_recursive(p[i], s->object_size, 1, in slab_post_alloc_hook()
535 s->flags, flags); in slab_post_alloc_hook()
538 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); in slab_post_alloc_hook()
556 unsigned int colour_next; /* Per-node cache coloring */
577 return s->node[node]; in get_node()
621 if (c->ctor) in slab_want_init_on_alloc()
623 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) in slab_want_init_on_alloc()
633 return !(c->ctor || in slab_want_init_on_free()
634 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); in slab_want_init_on_free()