/mm/ |
D | vmalloc.c | 102 unsigned int max_page_shift, pgtbl_mod_mask *mask) in vmap_pte_range() argument 109 pte = pte_alloc_kernel_track(pmd, addr, mask); in vmap_pte_range() 130 *mask |= PGTBL_PTE_MODIFIED; in vmap_pte_range() 161 unsigned int max_page_shift, pgtbl_mod_mask *mask) in vmap_pmd_range() argument 166 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); in vmap_pmd_range() 174 *mask |= PGTBL_PMD_MODIFIED; in vmap_pmd_range() 178 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) in vmap_pmd_range() 211 unsigned int max_page_shift, pgtbl_mod_mask *mask) in vmap_pud_range() argument 216 pud = pud_alloc_track(&init_mm, p4d, addr, mask); in vmap_pud_range() 224 *mask |= PGTBL_PUD_MODIFIED; in vmap_pud_range() [all …]
|
D | mempolicy.c | 1351 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, in get_bitmap() argument 1358 ret = compat_get_bitmap(mask, in get_bitmap() 1362 ret = copy_from_user(mask, nmask, in get_bitmap() 1369 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; in get_bitmap() 1411 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, in copy_nodes_to_user() argument 1424 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) in copy_nodes_to_user() 1431 return compat_put_bitmap((compat_ulong_t __user *)mask, in copy_nodes_to_user() 1434 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; in copy_nodes_to_user() 1967 bool init_nodemask_of_mempolicy(nodemask_t *mask) in init_nodemask_of_mempolicy() argument 1971 if (!(mask && current->mempolicy)) in init_nodemask_of_mempolicy() [all …]
|
D | pgalloc-track.h | 46 #define pte_alloc_kernel_track(pmd, address, mask) \ argument 48 (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\
|
D | cma.c | 438 unsigned long mask, offset; in cma_alloc() local 469 mask = cma_bitmap_aligned_mask(cma, align); in cma_alloc() 482 bitmap_maxno, start, bitmap_count, mask, in cma_alloc() 557 start = bitmap_no + mask + 1; in cma_alloc()
|
D | swap_state.c | 620 unsigned long mask; in swap_cluster_readahead() local 627 mask = swapin_nr_pages(offset) - 1; in swap_cluster_readahead() 628 if (!mask) in swap_cluster_readahead() 633 start_offset = offset & ~mask; in swap_cluster_readahead() 634 end_offset = offset | mask; in swap_cluster_readahead()
|
D | mremap.c | 456 unsigned long next, extent, mask, size; in get_extent() local 461 mask = PMD_MASK; in get_extent() 466 mask = PUD_MASK; in get_extent() 474 next = (old_addr + size) & mask; in get_extent() 479 next = (new_addr + size) & mask; in get_extent()
|
D | readahead.c | 120 gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; in readahead_gfp_mask() local 122 trace_android_rvh_set_readahead_gfp_mask(&mask); in readahead_gfp_mask() 123 return mask; in readahead_gfp_mask()
|
D | memory.c | 2585 pgtbl_mod_mask *mask) in apply_to_pte_range() argument 2593 pte_alloc_kernel_track(pmd, addr, mask) : in apply_to_pte_range() 2616 *mask |= PGTBL_PTE_MODIFIED; in apply_to_pte_range() 2628 pgtbl_mod_mask *mask) in apply_to_pmd_range() argument 2637 pmd = pmd_alloc_track(mm, pud, addr, mask); in apply_to_pmd_range() 2655 fn, data, create, mask); in apply_to_pmd_range() 2666 pgtbl_mod_mask *mask) in apply_to_pud_range() argument 2673 pud = pud_alloc_track(mm, p4d, addr, mask); in apply_to_pud_range() 2691 fn, data, create, mask); in apply_to_pud_range() 2702 pgtbl_mod_mask *mask) in apply_to_p4d_range() argument [all …]
|
D | percpu.c | 2862 static struct cpumask mask __initdata; in pcpu_build_alloc_info() 2875 cpumask_clear(&mask); in pcpu_build_alloc_info() 2897 cpumask_copy(&mask, cpu_possible_mask); in pcpu_build_alloc_info() 2900 for (group = 0; !cpumask_empty(&mask); group++) { in pcpu_build_alloc_info() 2902 cpu = cpumask_first(&mask); in pcpu_build_alloc_info() 2905 cpumask_clear_cpu(cpu, &mask); in pcpu_build_alloc_info() 2907 for_each_cpu(tcpu, &mask) { in pcpu_build_alloc_info() 2913 cpumask_clear_cpu(tcpu, &mask); in pcpu_build_alloc_info()
|
D | oom_kill.c | 98 const nodemask_t *mask = oc->nodemask; in oom_cpuset_eligible() local 105 if (mask) { in oom_cpuset_eligible() 112 ret = mempolicy_in_oom_domain(tsk, mask); in oom_cpuset_eligible()
|
D | memory-failure.c | 816 unsigned long mask; member 1439 if ((p->flags & ps->mask) == ps->res) in identify_page_state() 1444 if (!ps->mask) in identify_page_state() 1446 if ((page_flags & ps->mask) == ps->res) in identify_page_state()
|
D | page_alloc.c | 553 unsigned long mask) in __get_pfnblock_flags_mask() argument 569 return (word >> bitidx) & mask; in __get_pfnblock_flags_mask() 581 unsigned long pfn, unsigned long mask) in get_pfnblock_flags_mask() argument 583 return __get_pfnblock_flags_mask(page, pfn, mask); in get_pfnblock_flags_mask() 619 unsigned long mask) in set_pfnblock_flags_mask() argument 635 mask <<= bitidx; in set_pfnblock_flags_mask() 640 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); in set_pfnblock_flags_mask() 7988 unsigned long start, end, mask; in node_map_pfn_alignment() local 8004 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment() 8005 while (mask && last_end <= (start & (mask << 1))) in node_map_pfn_alignment() [all …]
|
D | compaction.c | 3107 const struct cpumask *mask; in kcompactd_cpu_online() local 3109 mask = cpumask_of_node(pgdat->node_id); in kcompactd_cpu_online() 3111 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) in kcompactd_cpu_online() 3113 set_cpus_allowed_ptr(pgdat->kcompactd, mask); in kcompactd_cpu_online()
|
D | internal.h | 554 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, in node_reclaim() argument
|
D | hugetlb.c | 1253 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ argument 1254 for (nr_nodes = nodes_weight(*mask); \ 1256 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1259 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ argument 1260 for (nr_nodes = nodes_weight(*mask); \ 1262 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 3724 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
|
D | vmscan.c | 3585 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, in get_next_vma() argument 3589 unsigned long end = (start | ~mask) + 1; in get_next_vma() 3591 VM_WARN_ON_ONCE(mask & size); in get_next_vma() 3592 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); in get_next_vma()
|
D | slab.c | 956 const struct cpumask *mask = cpumask_of_node(node); in cpuup_canceled() local 978 if (!cpumask_empty(mask)) { in cpuup_canceled()
|
D | memcontrol.c | 1845 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument 1878 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom() 1892 if (mem_cgroup_out_of_memory(memcg, mask, order)) in mem_cgroup_oom()
|