/mm/ |
D | slab.c | 257 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) argument 258 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) argument 272 #define STATS_INC_ACTIVE(x) ((x)->num_active++) argument 273 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) argument 274 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) argument 275 #define STATS_INC_GROWN(x) ((x)->grown++) argument 276 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) argument 277 #define STATS_SET_HIGH(x) \ argument 279 if ((x)->num_active > (x)->high_mark) \ 280 (x)->high_mark = (x)->num_active; \ [all …]
|
D | page-writeback.c | 311 unsigned long x = 0; in highmem_dirtyable_memory() local 331 x += nr_pages; in highmem_dirtyable_memory() 344 if ((long)x < 0) in highmem_dirtyable_memory() 345 x = 0; in highmem_dirtyable_memory() 353 return min(x, total); in highmem_dirtyable_memory() 367 unsigned long x; in global_dirtyable_memory() local 369 x = global_zone_page_state(NR_FREE_PAGES); in global_dirtyable_memory() 375 x -= min(x, totalreserve_pages); in global_dirtyable_memory() 377 x += global_node_page_state(NR_INACTIVE_FILE); in global_dirtyable_memory() 378 x += global_node_page_state(NR_ACTIVE_FILE); in global_dirtyable_memory() [all …]
|
D | slub.c | 321 struct kmem_cache_order_objects x = { in oo_make() local 325 return x; in oo_make() 328 static inline unsigned int oo_order(struct kmem_cache_order_objects x) in oo_order() argument 330 return x.x >> OO_SHIFT; in oo_order() 333 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) in oo_objects() argument 335 return x.x & OO_MASK; in oo_objects() 1553 static __always_inline void kfree_hook(void *x) in kfree_hook() argument 1555 kmemleak_free(x); in kfree_hook() 1556 kasan_kfree_large(x); in kfree_hook() 1560 void *x, bool init) in slab_free_hook() argument [all …]
|
D | vmstat.c | 321 long x; in __mod_zone_page_state() local 324 x = delta + __this_cpu_read(*p); in __mod_zone_page_state() 328 if (unlikely(abs(x) > t)) { in __mod_zone_page_state() 329 zone_page_state_add(x, zone, item); in __mod_zone_page_state() 330 x = 0; in __mod_zone_page_state() 332 __this_cpu_write(*p, x); in __mod_zone_page_state() 341 long x; in __mod_node_page_state() local 349 x = delta + __this_cpu_read(*p); in __mod_node_page_state() 353 if (unlikely(abs(x) > t)) { in __mod_node_page_state() 354 node_page_state_add(x, pgdat, item); in __mod_node_page_state() [all …]
|
D | memcontrol.c | 214 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) argument 776 long x, threshold = MEMCG_CHARGE_BATCH; in __mod_memcg_state() local 784 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state() 785 if (unlikely(abs(x) > threshold)) { in __mod_memcg_state() 792 __this_cpu_add(memcg->vmstats_local->stat[idx], x); in __mod_memcg_state() 794 atomic_long_add(x, &mi->vmstats[idx]); in __mod_memcg_state() 795 x = 0; in __mod_memcg_state() 797 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); in __mod_memcg_state() 816 long x, threshold = MEMCG_CHARGE_BATCH; in __mod_memcg_lruvec_state() local 830 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); in __mod_memcg_lruvec_state() [all …]
|
D | slab.h | 501 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) in cache_from_obj() argument 509 cachep = virt_to_cache(x); in cache_from_obj() 513 print_tracking(cachep, x); in cache_from_obj() 655 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
|
D | vmalloc.c | 46 bool is_vmalloc_addr(const void *x) in is_vmalloc_addr() argument 48 unsigned long addr = (unsigned long)x; in is_vmalloc_addr() 332 int is_vmalloc_or_module_addr(const void *x) in is_vmalloc_or_module_addr() argument 340 unsigned long addr = (unsigned long)x; in is_vmalloc_or_module_addr() 344 return is_vmalloc_addr(x); in is_vmalloc_or_module_addr() 1481 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ argument 1482 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ argument
|
D | swap_state.c | 61 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) argument 62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) argument
|
D | util.c | 41 void kfree_const(const void *x) in kfree_const() argument 43 if (!is_kernel_rodata((unsigned long)x)) in kfree_const() 44 kfree(x); in kfree_const()
|
D | readahead.c | 118 gfp_t readahead_gfp_mask(struct address_space *x) in readahead_gfp_mask() argument 120 gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; in readahead_gfp_mask()
|
D | kmemleak.c | 240 #define kmemleak_warn(x...) do { \ argument 241 pr_warn(x); \ 251 #define kmemleak_stop(x...) do { \ argument 252 kmemleak_warn(x); \
|
D | ksm.c | 46 #define NUMA(x) (x) argument 47 #define DO_NUMA(x) do { (x); } while (0) argument 49 #define NUMA(x) (0) argument 50 #define DO_NUMA(x) do { } while (0) argument
|
D | hugetlb_cgroup.c | 26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) argument
|
D | slab_common.c | 126 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk() local 127 if (!x) { in __kmem_cache_alloc_bulk()
|
D | oom_kill.c | 524 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|
D | backing-dev.c | 72 #define K(x) ((x) << (PAGE_SHIFT - 10)) in bdi_debug_stats_show() argument
|
D | Kconfig | 689 tristate "Low (Up to 2x) density storage for compressed pages" 698 tristate "Up to 3x density storage for compressed pages"
|
D | percpu.c | 571 static inline bool pcpu_region_overlap(int a, int b, int x, int y) in pcpu_region_overlap() argument 573 return (a < y) && (x < b); in pcpu_region_overlap()
|
D | page_alloc.c | 5666 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|
/mm/kasan/ |
D | generic.c | 317 __memset((void *)addr, 0x##byte, size); \
|