/mm/ |
D | slab.c | 284 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) argument 299 #define STATS_INC_ACTIVE(x) ((x)->num_active++) argument 300 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) argument 301 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) argument 302 #define STATS_INC_GROWN(x) ((x)->grown++) argument 303 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) argument 304 #define STATS_SET_HIGH(x) \ argument 306 if ((x)->num_active > (x)->high_mark) \ 307 (x)->high_mark = (x)->num_active; \ 309 #define STATS_INC_ERR(x) ((x)->errors++) argument [all …]
|
D | page-writeback.c | 293 unsigned long x = 0; in highmem_dirtyable_memory() local 298 x += zone_dirtyable_memory(z); in highmem_dirtyable_memory() 309 if ((long)x < 0) in highmem_dirtyable_memory() 310 x = 0; in highmem_dirtyable_memory() 318 return min(x, total); in highmem_dirtyable_memory() 332 unsigned long x; in global_dirtyable_memory() local 334 x = global_page_state(NR_FREE_PAGES); in global_dirtyable_memory() 335 x -= min(x, dirty_balance_reserve); in global_dirtyable_memory() 337 x += global_page_state(NR_INACTIVE_FILE); in global_dirtyable_memory() 338 x += global_page_state(NR_ACTIVE_FILE); in global_dirtyable_memory() [all …]
|
D | slub.c | 314 struct kmem_cache_order_objects x = { in oo_make() local 318 return x; in oo_make() 321 static inline int oo_order(struct kmem_cache_order_objects x) in oo_order() argument 323 return x.x >> OO_SHIFT; in oo_order() 326 static inline int oo_objects(struct kmem_cache_order_objects x) in oo_objects() argument 328 return x.x & OO_MASK; in oo_objects() 1318 static inline void kfree_hook(const void *x) in kfree_hook() argument 1320 kmemleak_free(x); in kfree_hook() 1321 kasan_kfree_large(x); in kfree_hook() 1354 static inline void *slab_free_hook(struct kmem_cache *s, void *x) in slab_free_hook() argument [all …]
|
D | slab.h | 296 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) in cache_from_obj() argument 311 page = virt_to_head_page(x); in cache_from_obj() 374 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
|
D | util.c | 32 void kfree_const(const void *x) in kfree_const() argument 34 if (!is_kernel_rodata((unsigned long)x)) in kfree_const() 35 kfree(x); in kfree_const()
|
D | vmstat.c | 226 long x; in __mod_zone_page_state() local 229 x = delta + __this_cpu_read(*p); in __mod_zone_page_state() 233 if (unlikely(x > t || x < -t)) { in __mod_zone_page_state() 234 zone_page_state_add(x, zone, item); in __mod_zone_page_state() 235 x = 0; in __mod_zone_page_state() 237 __this_cpu_write(*p, x); in __mod_zone_page_state()
|
D | vmalloc.c | 216 int is_vmalloc_or_module_addr(const void *x) in is_vmalloc_or_module_addr() argument 224 unsigned long addr = (unsigned long)x; in is_vmalloc_or_module_addr() 228 return is_vmalloc_addr(x); in is_vmalloc_or_module_addr() 754 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ argument 755 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ argument
|
D | swap_state.c | 44 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) argument
|
D | hugetlb_cgroup.c | 30 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) argument
|
D | ksm.c | 45 #define NUMA(x) (x) argument 46 #define DO_NUMA(x) do { (x); } while (0) argument 48 #define NUMA(x) (0) argument 49 #define DO_NUMA(x) do { } while (0) argument
|
D | oom_kill.c | 503 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|
D | kmemleak.c | 279 #define kmemleak_warn(x...) do { \ argument 280 pr_warning(x); \ 290 #define kmemleak_stop(x...) do { \ argument 291 kmemleak_warn(x); \
|
D | backing-dev.c | 73 #define K(x) ((x) << (PAGE_SHIFT - 10)) in bdi_debug_stats_show() argument
|
D | slab_common.c | 121 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk() local 122 if (!x) { in __kmem_cache_alloc_bulk()
|
D | mempolicy.c | 939 static struct page *new_node_page(struct page *page, unsigned long node, int **x) in new_node_page() argument 1094 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() argument 1129 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() argument
|
D | memcontrol.c | 236 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) argument 1247 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|
D | memory-failure.c | 1524 static struct page *new_page(struct page *p, unsigned long private, int **x) in new_page() argument
|
D | hugetlb.c | 4352 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) argument
|
D | page_alloc.c | 3769 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|