/mm/ |
D | cma_debug.c | 24 static int cma_debugfs_get(void *data, u64 *val) in cma_debugfs_get() argument 28 *val = *p; in cma_debugfs_get() 34 static int cma_used_get(void *data, u64 *val) in cma_used_get() argument 43 *val = (u64)used << cma->order_per_bit; in cma_used_get() 49 static int cma_maxchunk_get(void *data, u64 *val) in cma_maxchunk_get() argument 65 *val = (u64)maxchunk << cma->order_per_bit; in cma_maxchunk_get() 122 static int cma_free_write(void *data, u64 val) in cma_free_write() argument 124 int pages = val; in cma_free_write() 154 static int cma_alloc_write(void *data, u64 val) in cma_alloc_write() argument 156 int pages = val; in cma_alloc_write()
|
D | hwpoison-inject.c | 14 static int hwpoison_inject(void *data, u64 val) in hwpoison_inject() argument 16 unsigned long pfn = val; in hwpoison_inject() 62 static int hwpoison_unpoison(void *data, u64 val) in hwpoison_unpoison() argument 67 return unpoison_memory(val); in hwpoison_unpoison()
|
D | memcontrol.c | 228 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) argument 229 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) argument 230 #define MEMFILE_ATTR(val) ((val) & 0xffff) argument 689 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) in __mod_memcg_state() argument 696 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state() 734 int val) in __mod_lruvec_state() argument 742 __mod_node_page_state(pgdat, idx, val); in __mod_lruvec_state() 751 __mod_memcg_state(memcg, idx, val); in __mod_lruvec_state() 754 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); in __mod_lruvec_state() 756 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); in __mod_lruvec_state() [all …]
|
D | hugetlb_cgroup.c | 30 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) argument 31 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) argument 32 #define MEMFILE_ATTR(val) ((val) & 0xffff) argument
|
D | swap_slots.c | 315 entry.val = 0; in get_swap_page() 341 pentry->val = 0; in get_swap_page() 349 if (entry.val) in get_swap_page() 357 entry.val = 0; in get_swap_page()
|
D | swapfile.c | 1109 if (!entry.val) in __swap_info_get() 1122 pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val); in __swap_info_get() 1125 pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val); in __swap_info_get() 1128 pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val); in __swap_info_get() 1145 pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val); in _swap_info_get() 1254 if (!entry.val) in get_swap_device() 1269 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); in get_swap_device() 1333 unsigned char val; in put_swap_page() local 1345 val = map[i]; in put_swap_page() 1346 VM_BUG_ON(!(val & SWAP_HAS_CACHE)); in put_swap_page() [all …]
|
D | zswap.c | 666 static int __zswap_param_set(const char *val, const struct kernel_param *kp, in __zswap_param_set() argument 670 char *s = strstrip((char *)val); in __zswap_param_set() 761 static int zswap_compressor_param_set(const char *val, in zswap_compressor_param_set() argument 764 return __zswap_param_set(val, kp, zswap_zpool_type, NULL); in zswap_compressor_param_set() 767 static int zswap_zpool_param_set(const char *val, in zswap_zpool_param_set() argument 770 return __zswap_param_set(val, kp, NULL, zswap_compressor); in zswap_zpool_param_set() 773 static int zswap_enabled_param_set(const char *val, in zswap_enabled_param_set() argument 785 return param_set_bool(val, kp); in zswap_enabled_param_set()
|
D | shuffle.c | 42 static __meminit int shuffle_store(const char *val, in shuffle_store() argument 45 int rc = param_set_bool(val, kp); in shuffle_store()
|
D | page_alloc.c | 5147 void si_meminfo(struct sysinfo *val) in si_meminfo() argument 5149 val->totalram = totalram_pages(); in si_meminfo() 5150 val->sharedram = global_node_page_state(NR_SHMEM); in si_meminfo() 5151 val->freeram = global_zone_page_state(NR_FREE_PAGES); in si_meminfo() 5152 val->bufferram = nr_blockdev_pages(); in si_meminfo() 5153 val->totalhigh = totalhigh_pages(); in si_meminfo() 5154 val->freehigh = nr_free_highpages(); in si_meminfo() 5155 val->mem_unit = PAGE_SIZE; in si_meminfo() 5161 void si_meminfo_node(struct sysinfo *val, int nid) in si_meminfo_node() argument 5171 val->totalram = managed_pages; in si_meminfo_node() [all …]
|
D | vmstat.c | 1764 long val; in vmstat_refresh() local 1784 val = atomic_long_read(&vm_zone_stat[i]); in vmstat_refresh() 1785 if (val < 0) { in vmstat_refresh() 1787 __func__, vmstat_text[i], val); in vmstat_refresh() 1793 val = atomic_long_read(&vm_numa_stat[i]); in vmstat_refresh() 1794 if (val < 0) { in vmstat_refresh() 1796 __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val); in vmstat_refresh()
|
D | memory.c | 178 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument 183 task->rss_stat.count[member] += val; in add_mm_counter_fast() 185 add_mm_counter(mm, member, val); in add_mm_counter_fast() 711 return entry.val; in copy_one_pte() 842 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, in copy_pte_range() 844 if (entry.val) in copy_pte_range() 856 if (entry.val) { in copy_pte_range() 2817 set_page_private(page, entry.val); in do_swap_page() 2869 page_private(page) != entry.val)) && swapcache) in do_swap_page() 3397 static int fault_around_bytes_get(void *data, u64 *val) in fault_around_bytes_get() argument [all …]
|
D | frontswap.c | 248 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_store() 297 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_load()
|
D | swap_state.c | 135 set_page_private(page + i, entry.val + i); in add_to_swap_cache() 197 if (!entry.val) in add_to_swap() 246 swp_entry_t entry = { .val = page_private(page) }; in delete_from_swap_cache()
|
D | slub.c | 710 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument 715 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object() 723 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object() 855 void *object, u8 val) in check_object() argument 862 object - s->red_left_pad, val, s->red_left_pad)) in check_object() 866 endobject, val, s->inuse - s->object_size)) in check_object() 877 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object() 889 if (!s->offset && val == SLUB_RED_ACTIVE) in check_object() 1358 void *object, u8 val) { return 1; } in check_object() argument
|
D | page_io.c | 109 entry.val = page_private(page); in swap_slot_free_notify()
|
D | huge_memory.c | 2523 swp_entry_t entry = { .val = page_private(head) }; in __split_huge_page() 2812 swp_entry_t entry = { .val = page_private(head) }; in split_huge_page_to_list() 2976 static int split_huge_pages_set(void *data, u64 val) in split_huge_pages_set() argument 2983 if (val != 1) in split_huge_pages_set()
|
D | util.c | 661 entry.val = page_private(page); in page_mapping()
|
D | slab.c | 1440 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) in poison_obj() argument 1445 memset(addr, val, size); in poison_obj() 2326 unsigned int idx, freelist_idx_t val) in set_free_obj() argument 2328 ((freelist_idx_t *)(page->freelist))[idx] = val; in set_free_obj()
|
D | shmem.c | 1357 if (!swap.val) in shmem_writepage() 1563 entry.val = page_private(oldpage); in shmem_replace_page() 1583 set_page_private(newpage, entry.val); in shmem_replace_page() 1662 if (!PageSwapCache(page) || page_private(page) != swap.val || in shmem_swapin_page()
|
D | mmap.c | 2522 unsigned long val; in cmdline_parse_stack_guard_gap() local 2525 val = simple_strtoul(p, &endptr, 10); in cmdline_parse_stack_guard_gap() 2527 stack_guard_gap = val << PAGE_SHIFT; in cmdline_parse_stack_guard_gap()
|
D | zsmalloc.c | 481 static inline void mod_zspage_inuse(struct zspage *zspage, int val) in mod_zspage_inuse() argument 483 zspage->inuse += val; in mod_zspage_inuse()
|
D | rmap.c | 1580 swp_entry_t entry = { .val = page_private(subpage) }; in try_to_unmap_one()
|
D | vmscan.c | 946 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping()
|