/mm/ |
D | slab.c | 284 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) argument 298 #define STATS_INC_ACTIVE(x) ((x)->num_active++) argument 299 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) argument 300 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) argument 301 #define STATS_INC_GROWN(x) ((x)->grown++) argument 302 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) argument 303 #define STATS_SET_HIGH(x) \ argument 308 #define STATS_INC_ERR(x) ((x)->errors++) argument 309 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) argument 310 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) argument [all …]
|
D | page-writeback.c | 200 unsigned long x = 0; in highmem_dirtyable_memory() local 239 unsigned long x; in global_dirtyable_memory() local 580 long x; in pos_ratio_polynom() local 682 long x; in bdi_position_ratio() local 969 unsigned long x; in bdi_update_dirty_ratelimit() local
|
D | slub.c | 310 struct kmem_cache_order_objects x = { in oo_make() local 317 static inline int oo_order(struct kmem_cache_order_objects x) in oo_order() 322 static inline int oo_objects(struct kmem_cache_order_objects x) in oo_objects() 1268 static inline void kfree_hook(const void *x) in kfree_hook() 1290 static inline void slab_free_hook(struct kmem_cache *s, void *x) in slab_free_hook() 2152 unsigned long x = 0; in count_partial() local 2553 void *x, unsigned long addr) in __slab_free() 2677 struct page *page, void *x, unsigned long addr) in slab_free() 2715 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() 3404 void kfree(const void *x) in kfree() [all …]
|
D | ksm.c | 45 #define NUMA(x) (x) argument 46 #define DO_NUMA(x) do { (x); } while (0) argument 48 #define NUMA(x) (0) argument 49 #define DO_NUMA(x) do { } while (0) argument
|
D | swap_state.c | 50 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) argument
|
D | hugetlb_cgroup.c | 29 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) argument
|
D | oom_kill.c | 424 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|
D | vmalloc.c | 209 int is_vmalloc_or_module_addr(const void *x) in is_vmalloc_or_module_addr() 744 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ argument 745 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ argument
|
D | mempolicy.c | 984 static struct page *new_node_page(struct page *page, unsigned long node, int **x) in new_node_page() 1143 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() 1178 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page()
|
D | kmemleak.c | 270 #define kmemleak_warn(x...) do { \ argument 281 #define kmemleak_stop(x...) do { \ argument
|
D | slab.h | 287 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) in cache_from_obj()
|
D | backing-dev.c | 91 #define K(x) ((x) << (PAGE_SHIFT - 10)) in bdi_debug_stats_show() argument
|
D | memory-failure.c | 1475 static struct page *new_page(struct page *p, unsigned long private, int **x) in new_page()
|
D | memcontrol.c | 477 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) argument 1617 #define K(x) ((x) << (PAGE_SHIFT-10)) argument 2467 long x = per_cpu(memcg->stat->count[i], cpu); in mem_cgroup_drain_pcp_counter() local 2473 unsigned long x = per_cpu(memcg->stat->events[i], cpu); in mem_cgroup_drain_pcp_counter() local
|
D | vmstat.c | 221 long x; in __mod_zone_page_state() local
|
D | page_alloc.c | 3187 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
|