Searched refs:objects (Results 1 – 7 of 7) sorted by relevance
/mm/ |
D | slub.c | 625 bitmap_zero(obj_map, slab->objects); in __fill_map() 711 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer() 779 for_each_object(p, s, slab_address(slab), slab->objects) { in get_each_object_track() 880 slab, slab->objects, slab->inuse, slab->freelist, in print_slab_info() 1289 if (slab->objects > maxobj) { in check_slab() 1291 slab->objects, maxobj); in check_slab() 1294 if (slab->inuse > slab->objects) { in check_slab() 1296 slab->inuse, slab->objects); in check_slab() 1316 while (fp && nr <= slab->objects) { in on_freelist() 1327 slab->inuse = slab->objects; in on_freelist() [all …]
|
D | slab.h | 80 unsigned objects:15; member 508 size_t objects, gfp_t flags) in memcg_slab_pre_alloc_hook() argument 534 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) in memcg_slab_pre_alloc_hook() 580 void **p, int objects) in memcg_slab_free_hook() argument 592 for (i = 0; i < objects; i++) { in memcg_slab_free_hook() 634 size_t objects, gfp_t flags) in memcg_slab_pre_alloc_hook() argument 647 void **p, int objects) in memcg_slab_free_hook() argument
|
D | Kconfig | 221 such as the number of pages per zspage and the number of objects 248 well in all environments. It organizes cache hot objects in 255 instead of managing queues of cached objects (SLAB approach). 256 Per cpu caching is realized using slabs of objects instead 257 of queues of objects. SLUB can use memory efficiently 289 overwrite objects from merged caches (and more easily control 292 can usually only damage objects in the same cache. To disable 333 Per cpu partial caches accelerate objects allocation and freeing 347 vulnerable memory objects on the heap for the purpose of exploiting 351 that effectively diverges the memory objects allocated for different
|
D | Kconfig.debug | 244 difference being that the orphan objects are not freed but 251 of finding leaks due to the slab objects poisoning. 265 of metadata objects to track such callbacks. After kmemleak is
|
D | memcontrol.c | 2949 unsigned int objects = objs_per_slab(s, slab); in memcg_alloc_slab_cgroups() local 2954 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, in memcg_alloc_slab_cgroups()
|
/mm/kfence/ |
D | kfence_test.c | 539 void *objects[] = { in test_free_bulk() local 547 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects); in test_free_bulk() 746 void *objects[100]; in test_memcache_alloc_bulk() local 747 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), in test_memcache_alloc_bulk() 748 objects); in test_memcache_alloc_bulk() 751 for (i = 0; i < ARRAY_SIZE(objects); i++) { in test_memcache_alloc_bulk() 752 if (is_kfence_address(objects[i])) { in test_memcache_alloc_bulk() 757 kmem_cache_free_bulk(test_cache, num, objects); in test_memcache_alloc_bulk()
|
D | core.c | 467 slab->objects = 1; in kfence_guarded_alloc() 754 DEFINE_SEQ_ATTRIBUTE(objects);
|