/mm/ |
D | cma.c | 415 unsigned long mask, offset; in cma_alloc() local 432 mask = cma_bitmap_aligned_mask(cma, align); in cma_alloc() 443 bitmap_maxno, start, bitmap_count, mask, in cma_alloc() 474 start = bitmap_no + mask + 1; in cma_alloc()
|
D | swap_state.c | 549 unsigned long mask; in swap_cluster_readahead() local 556 mask = swapin_nr_pages(offset) - 1; in swap_cluster_readahead() 557 if (!mask) in swap_cluster_readahead() 569 start_offset = offset & ~mask; in swap_cluster_readahead() 570 end_offset = offset | mask; in swap_cluster_readahead()
|
D | mempolicy.c | 1384 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, in copy_nodes_to_user() argument 1393 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) in copy_nodes_to_user() 1397 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; in copy_nodes_to_user() 1973 bool init_nodemask_of_mempolicy(nodemask_t *mask) in init_nodemask_of_mempolicy() argument 1978 if (!(mask && current->mempolicy)) in init_nodemask_of_mempolicy() 1989 init_nodemask_of_node(mask, nid); in init_nodemask_of_mempolicy() 1995 *mask = mempolicy->v.nodes; in init_nodemask_of_mempolicy() 2018 const nodemask_t *mask) in mempolicy_nodemask_intersects() argument 2023 if (!mask) in mempolicy_nodemask_intersects() 2041 ret = nodes_intersects(mempolicy->v.nodes, *mask); in mempolicy_nodemask_intersects()
|
D | oom_kill.c | 92 const nodemask_t *mask = oc->nodemask; in oom_cpuset_eligible() local 99 if (mask) { in oom_cpuset_eligible() 106 ret = mempolicy_nodemask_intersects(tsk, mask); in oom_cpuset_eligible()
|
D | memory-failure.c | 836 unsigned long mask; member 1072 if ((p->flags & ps->mask) == ps->res) in identify_page_state() 1077 if (!ps->mask) in identify_page_state() 1079 if ((page_flags & ps->mask) == ps->res) in identify_page_state()
|
D | page_alloc.c | 495 unsigned long mask) in __get_pfnblock_flags_mask() argument 508 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; in __get_pfnblock_flags_mask() 513 unsigned long mask) in get_pfnblock_flags_mask() argument 515 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask); in get_pfnblock_flags_mask() 534 unsigned long mask) in set_pfnblock_flags_mask() argument 551 mask <<= (BITS_PER_LONG - bitidx - 1); in set_pfnblock_flags_mask() 556 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); in set_pfnblock_flags_mask() 7198 unsigned long start, end, mask; in node_map_pfn_alignment() local 7214 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment() 7215 while (mask && last_end <= (start & (mask << 1))) in node_map_pfn_alignment() [all …]
|
D | compaction.c | 2703 const struct cpumask *mask; in kcompactd_cpu_online() local 2705 mask = cpumask_of_node(pgdat->node_id); in kcompactd_cpu_online() 2707 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) in kcompactd_cpu_online() 2709 set_cpus_allowed_ptr(pgdat->kcompactd, mask); in kcompactd_cpu_online()
|
D | hugetlb.c | 1061 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ argument 1062 for (nr_nodes = nodes_weight(*mask); \ 1064 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1067 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ argument 1068 for (nr_nodes = nodes_weight(*mask); \ 1070 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 3056 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); in hugetlb_add_hstate()
|
D | internal.h | 504 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, in node_reclaim() argument
|
D | vmscan.c | 4102 const struct cpumask *mask; in kswapd_cpu_online() local 4104 mask = cpumask_of_node(pgdat->node_id); in kswapd_cpu_online() 4106 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) in kswapd_cpu_online() 4108 set_cpus_allowed_ptr(pgdat->kswapd, mask); in kswapd_cpu_online()
|
D | memory.c | 3587 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around() local 3594 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; in do_fault_around() 3596 vmf->address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
|
D | memcontrol.c | 1896 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument 1929 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom() 1943 if (mem_cgroup_out_of_memory(memcg, mask, order)) in mem_cgroup_oom()
|
D | slab.c | 945 const struct cpumask *mask = cpumask_of_node(node); in cpuup_canceled() local 967 if (!cpumask_empty(mask)) { in cpuup_canceled()
|