/mm/ |
D | ptdump.c | 32 pgd_t val = READ_ONCE(*pgd); in ptdump_pgd_entry() local 36 if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) in ptdump_pgd_entry() 41 st->effective_prot(st, 0, pgd_val(val)); in ptdump_pgd_entry() 43 if (pgd_leaf(val)) in ptdump_pgd_entry() 44 st->note_page(st, addr, 0, pgd_val(val)); in ptdump_pgd_entry() 53 p4d_t val = READ_ONCE(*p4d); in ptdump_p4d_entry() local 57 if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) in ptdump_p4d_entry() 62 st->effective_prot(st, 1, p4d_val(val)); in ptdump_p4d_entry() 64 if (p4d_leaf(val)) in ptdump_p4d_entry() 65 st->note_page(st, addr, 1, p4d_val(val)); in ptdump_p4d_entry() [all …]
|
D | cma_debug.c | 24 static int cma_debugfs_get(void *data, u64 *val) in cma_debugfs_get() argument 28 *val = *p; in cma_debugfs_get() 34 static int cma_used_get(void *data, u64 *val) in cma_used_get() argument 43 *val = (u64)used << cma->order_per_bit; in cma_used_get() 49 static int cma_maxchunk_get(void *data, u64 *val) in cma_maxchunk_get() argument 65 *val = (u64)maxchunk << cma->order_per_bit; in cma_maxchunk_get() 122 static int cma_free_write(void *data, u64 val) in cma_free_write() argument 124 int pages = val; in cma_free_write() 154 static int cma_alloc_write(void *data, u64 val) in cma_alloc_write() argument 156 int pages = val; in cma_alloc_write()
|
D | hwpoison-inject.c | 14 static int hwpoison_inject(void *data, u64 val) in hwpoison_inject() argument 16 unsigned long pfn = val; in hwpoison_inject() 54 static int hwpoison_unpoison(void *data, u64 val) in hwpoison_unpoison() argument 59 return unpoison_memory(val); in hwpoison_unpoison()
|
D | hugetlb_cgroup.c | 26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) argument 27 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) argument 28 #define MEMFILE_ATTR(val) ((val) & 0xffff) argument 459 u64 val; in hugetlb_cgroup_read_u64_max() local 476 val = (u64)page_counter_read(counter); in hugetlb_cgroup_read_u64_max() 477 seq_printf(seq, "%llu\n", val * PAGE_SIZE); in hugetlb_cgroup_read_u64_max() 483 val = (u64)counter->max; in hugetlb_cgroup_read_u64_max() 484 if (val == limit) in hugetlb_cgroup_read_u64_max() 487 seq_printf(seq, "%llu\n", val * PAGE_SIZE); in hugetlb_cgroup_read_u64_max()
|
D | page_pinner.c | 452 static int pp_threshold_set(void *data, unsigned long long val) in pp_threshold_set() argument 456 threshold_usec = (s64)val; in pp_threshold_set() 466 static int pp_threshold_get(void *data, unsigned long long *val) in pp_threshold_get() argument 468 *val = (unsigned long long)threshold_usec; in pp_threshold_get() 475 static int failure_tracking_set(void *data, u64 val) in failure_tracking_set() argument 479 on = (bool)val; in failure_tracking_set() 487 static int failure_tracking_get(void *data, u64 *val) in failure_tracking_get() argument 489 *val = static_branch_unlikely(&failure_tracking); in failure_tracking_get()
|
D | memcontrol.c | 214 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) argument 215 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) argument 216 #define MEMFILE_ATTR(val) ((val) & 0xffff) argument 774 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) in __mod_memcg_state() argument 784 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state() 812 int val) in __mod_memcg_lruvec_state() argument 822 __mod_memcg_state(memcg, idx, val); in __mod_memcg_lruvec_state() 825 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); in __mod_memcg_lruvec_state() 830 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); in __mod_memcg_lruvec_state() 853 int val) in __mod_lruvec_state() argument [all …]
|
D | swap_slots.c | 341 entry.val = 0; in get_swap_page() 371 cache->slots[cache->cur++].val = 0; in get_swap_page() 378 if (entry.val) in get_swap_page() 386 entry.val = 0; in get_swap_page()
|
D | swapfile.c | 1187 if (!entry.val) in __swap_info_get() 1200 pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val); in __swap_info_get() 1203 pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val); in __swap_info_get() 1206 pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val); in __swap_info_get() 1223 pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val); in _swap_info_get() 1334 if (!entry.val) in get_swap_device() 1349 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); in get_swap_device() 1414 unsigned char val; in put_swap_page() local 1426 val = map[i]; in put_swap_page() 1427 VM_BUG_ON(!(val & SWAP_HAS_CACHE)); in put_swap_page() [all …]
|
D | zswap.c | 697 static int __zswap_param_set(const char *val, const struct kernel_param *kp, in __zswap_param_set() argument 701 char *s = strstrip((char *)val); in __zswap_param_set() 792 static int zswap_compressor_param_set(const char *val, in zswap_compressor_param_set() argument 795 return __zswap_param_set(val, kp, zswap_zpool_type, NULL); in zswap_compressor_param_set() 798 static int zswap_zpool_param_set(const char *val, in zswap_zpool_param_set() argument 801 return __zswap_param_set(val, kp, NULL, zswap_compressor); in zswap_zpool_param_set() 804 static int zswap_enabled_param_set(const char *val, in zswap_enabled_param_set() argument 816 return param_set_bool(val, kp); in zswap_enabled_param_set()
|
D | page_ext.c | 368 void *val; in __invalidate_page_ext() local 373 val = (void *)ms->page_ext + PAGE_EXT_INVALID; in __invalidate_page_ext() 374 WRITE_ONCE(ms->page_ext, val); in __invalidate_page_ext()
|
D | shuffle.c | 20 static __meminit int shuffle_store(const char *val, in shuffle_store() argument 23 int rc = param_set_bool(val, kp); in shuffle_store()
|
D | debug_vm_pgtable.c | 65 unsigned long val = idx, *ptr = &val; in pte_basic_tests() local 151 unsigned long val = idx, *ptr = &val; in pmd_basic_tests() local 296 unsigned long val = idx, *ptr = &val; in pud_basic_tests() local
|
D | page_alloc.c | 5600 void si_meminfo(struct sysinfo *val) in si_meminfo() argument 5602 val->totalram = totalram_pages(); in si_meminfo() 5603 val->sharedram = global_node_page_state(NR_SHMEM); in si_meminfo() 5604 val->freeram = global_zone_page_state(NR_FREE_PAGES); in si_meminfo() 5605 val->bufferram = nr_blockdev_pages(); in si_meminfo() 5606 val->totalhigh = totalhigh_pages(); in si_meminfo() 5607 val->freehigh = nr_free_highpages(); in si_meminfo() 5608 val->mem_unit = PAGE_SIZE; in si_meminfo() 5614 void si_meminfo_node(struct sysinfo *val, int nid) in si_meminfo_node() argument 5624 val->totalram = managed_pages; in si_meminfo_node() [all …]
|
D | vmstat.c | 1821 long val; in vmstat_refresh() local 1841 val = atomic_long_read(&vm_zone_stat[i]); in vmstat_refresh() 1842 if (val < 0) { in vmstat_refresh() 1844 __func__, zone_stat_name(i), val); in vmstat_refresh() 1850 val = atomic_long_read(&vm_numa_stat[i]); in vmstat_refresh() 1851 if (val < 0) { in vmstat_refresh() 1853 __func__, numa_stat_name(i), val); in vmstat_refresh()
|
D | frontswap.c | 248 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_store() 297 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_load()
|
D | memory.c | 209 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument 214 task->rss_stat.count[member] += val; in add_mm_counter_fast() 216 add_mm_counter(mm, member, val); in add_mm_counter_fast() 758 return entry.val; in copy_nonpresent_pte() 1029 entry.val = copy_nonpresent_pte(dst_mm, src_mm, in copy_pte_range() 1033 if (entry.val) in copy_pte_range() 1067 if (entry.val) { in copy_pte_range() 1072 entry.val = 0; in copy_pte_range() 3693 set_page_private(page, entry.val); in do_swap_page() 3776 page_private(page) != entry.val)) && swapcache) in do_swap_page() [all …]
|
D | swap_state.c | 160 set_page_private(page + i, entry.val + i); in add_to_swap_cache() 226 if (!entry.val) in add_to_swap() 275 swp_entry_t entry = { .val = page_private(page) }; in delete_from_swap_cache()
|
D | slub.c | 789 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument 794 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object() 802 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object() 934 void *object, u8 val) in check_object() argument 941 object - s->red_left_pad, val, s->red_left_pad)) in check_object() 945 endobject, val, s->inuse - s->object_size)) in check_object() 956 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object() 968 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) in check_object() 1511 void *object, u8 val) { return 1; } in check_object() argument 4613 u8 val = test_bit(__obj_to_index(s, addr, p), map) ? in validate_slab() local [all …]
|
D | huge_memory.c | 2475 swp_entry_t entry = { .val = page_private(head) }; in __split_huge_page() 2524 swp_entry_t entry = { .val = page_private(head) }; in __split_huge_page() 2917 static int split_huge_pages_set(void *data, u64 val) in split_huge_pages_set() argument 2924 if (val != 1) in split_huge_pages_set()
|
D | slab.c | 1447 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) in poison_obj() argument 1452 memset(addr, val, size); in poison_obj() 2323 unsigned int idx, freelist_idx_t val) in set_free_obj() argument 2325 ((freelist_idx_t *)(page->freelist))[idx] = val; in set_free_obj()
|
D | util.c | 805 entry.val = page_private(page); in page_mapping()
|
D | shmem.c | 1436 if (!swap.val) in shmem_writepage() 1647 entry.val = page_private(oldpage); in shmem_replace_page() 1667 set_page_private(newpage, entry.val); in shmem_replace_page() 1745 if (!PageSwapCache(page) || page_private(page) != swap.val || in shmem_swapin_page()
|
D | oom_kill.c | 774 trace_mark_victim(tsk, cred->uid.val); in mark_oom_victim()
|
/mm/damon/ |
D | reclaim.c | 366 static int enabled_store(const char *val, in enabled_store() argument 369 int rc = param_set_bool(val, kp); in enabled_store()
|
/mm/kfence/ |
D | core.c | 53 static int param_set_sample_interval(const char *val, const struct kernel_param *kp) in param_set_sample_interval() argument 56 int ret = kstrtoul(val, 0, &num); in param_set_sample_interval()
|