Home
last modified time | relevance | path

Searched refs:x (Results 1 – 19 of 19) sorted by relevance

/mm/
Dslab.c256 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) argument
257 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) argument
271 #define STATS_INC_ACTIVE(x) ((x)->num_active++) argument
272 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) argument
273 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) argument
274 #define STATS_INC_GROWN(x) ((x)->grown++) argument
275 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) argument
276 #define STATS_SET_HIGH(x) \ argument
278 if ((x)->num_active > (x)->high_mark) \
279 (x)->high_mark = (x)->num_active; \
[all …]
Dpage-writeback.c308 unsigned long x = 0; in highmem_dirtyable_memory() local
328 x += nr_pages; in highmem_dirtyable_memory()
341 if ((long)x < 0) in highmem_dirtyable_memory()
342 x = 0; in highmem_dirtyable_memory()
350 return min(x, total); in highmem_dirtyable_memory()
364 unsigned long x; in global_dirtyable_memory() local
366 x = global_zone_page_state(NR_FREE_PAGES); in global_dirtyable_memory()
372 x -= min(x, totalreserve_pages); in global_dirtyable_memory()
374 x += global_node_page_state(NR_INACTIVE_FILE); in global_dirtyable_memory()
375 x += global_node_page_state(NR_ACTIVE_FILE); in global_dirtyable_memory()
[all …]
Dslub.c332 struct kmem_cache_order_objects x = { in oo_make() local
336 return x; in oo_make()
339 static inline unsigned int oo_order(struct kmem_cache_order_objects x) in oo_order() argument
341 return x.x >> OO_SHIFT; in oo_order()
344 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) in oo_objects() argument
346 return x.x & OO_MASK; in oo_objects()
1396 static __always_inline void kfree_hook(void *x) in kfree_hook() argument
1398 kmemleak_free(x); in kfree_hook()
1399 kasan_kfree_large(x, _RET_IP_); in kfree_hook()
1402 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) in slab_free_hook() argument
[all …]
Dvmstat.c321 long x; in __mod_zone_page_state() local
324 x = delta + __this_cpu_read(*p); in __mod_zone_page_state()
328 if (unlikely(x > t || x < -t)) { in __mod_zone_page_state()
329 zone_page_state_add(x, zone, item); in __mod_zone_page_state()
330 x = 0; in __mod_zone_page_state()
332 __this_cpu_write(*p, x); in __mod_zone_page_state()
341 long x; in __mod_node_page_state() local
344 x = delta + __this_cpu_read(*p); in __mod_node_page_state()
348 if (unlikely(x > t || x < -t)) { in __mod_node_page_state()
349 node_page_state_add(x, pgdat, item); in __mod_node_page_state()
[all …]
Dmemcontrol.c228 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) argument
691 long x; in __mod_memcg_state() local
696 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state()
697 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { in __mod_memcg_state()
704 __this_cpu_add(memcg->vmstats_local->stat[idx], x); in __mod_memcg_state()
706 atomic_long_add(x, &mi->vmstats[idx]); in __mod_memcg_state()
707 x = 0; in __mod_memcg_state()
709 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); in __mod_memcg_state()
739 long x; in __mod_lruvec_state() local
756 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); in __mod_lruvec_state()
[all …]
Dswap_state.c61 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) argument
62 #define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0) argument
Dslab.h504 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) in cache_from_obj() argument
520 cachep = virt_to_cache(x); in cache_from_obj()
659 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
Dutil.c37 void kfree_const(const void *x) in kfree_const() argument
39 if (!is_kernel_rodata((unsigned long)x)) in kfree_const()
40 kfree(x); in kfree_const()
Dvmalloc.c251 int is_vmalloc_or_module_addr(const void *x) in is_vmalloc_or_module_addr() argument
259 unsigned long addr = (unsigned long)x; in is_vmalloc_or_module_addr()
263 return is_vmalloc_addr(x); in is_vmalloc_or_module_addr()
1390 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ argument
1391 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ argument
Dhugetlb_cgroup.c30 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) argument
Dkmemleak.c239 #define kmemleak_warn(x...) do { \ argument
240 pr_warn(x); \
250 #define kmemleak_stop(x...) do { \ argument
251 kmemleak_warn(x); \
Dksm.c46 #define NUMA(x) (x) argument
47 #define DO_NUMA(x) do { (x); } while (0) argument
49 #define NUMA(x) (0) argument
50 #define DO_NUMA(x) do { } while (0) argument
Doom_kill.c482 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
Dbacking-dev.c74 #define K(x) ((x) << (PAGE_SHIFT - 10)) in bdi_debug_stats_show() argument
Dslab_common.c122 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk() local
123 if (!x) { in __kmem_cache_alloc_bulk()
DKconfig551 tristate "Low (Up to 2x) density storage for compressed pages"
560 tristate "Up to 3x density storage for compressed pages"
Dpercpu.c588 static inline bool pcpu_region_overlap(int a, int b, int x, int y) in pcpu_region_overlap() argument
590 return (a < y) && (x < b); in pcpu_region_overlap()
Dpage_alloc.c5213 #define K(x) ((x) << (PAGE_SHIFT-10)) argument
/mm/kasan/
Dgeneric.c317 __memset((void *)addr, 0x##byte, size); \