Home
last modified time | relevance | path

Searched refs:mpol (Results 1 – 6 of 6) sorted by relevance

/kernel/linux/linux-5.10/include/linux/
Dmempolicy.h131 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
151 struct mempolicy **mpol, nodemask_t **nodemask);
159 struct mempolicy *mpol = get_task_policy(current); in policy_nodemask_current() local
161 return policy_nodemask(gfp, mpol); in policy_nodemask_current()
179 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
214 struct mempolicy *mpol) in mpol_shared_policy_init() argument
255 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
257 *mpol = NULL; in huge_node()
278 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument
Dshmem_fs.h42 struct mempolicy *mpol; /* default memory policy for mappings */ member
/kernel/linux/linux-5.10/Documentation/filesystems/
Dtmpfs.rst94 mpol=default use the process allocation policy
96 mpol=prefer:Node prefers to allocate memory from the given Node
97 mpol=bind:NodeList allocates memory only from nodes in NodeList
98 mpol=interleave prefers to allocate from each node in turn
99 mpol=interleave:NodeList allocates from each node of NodeList in turn
100 mpol=local prefers to allocate memory from the local node
105 largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
127 For example, mpol=bind=static:NodeList, is the equivalent of an
130 Note that trying to mount a tmpfs with an mpol option will fail if the
135 online, then it is advisable to omit the mpol option from automatic
[all …]
/kernel/linux/linux-5.10/mm/
Dshmem.c113 struct mempolicy *mpol; member
1471 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument
1475 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
1478 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol()
1485 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() local
1486 if (sbinfo->mpol) { in shmem_get_sbmpol()
1488 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1489 mpol_get(mpol); in shmem_get_sbmpol()
1492 return mpol; in shmem_get_sbmpol()
1495 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument
[all …]
Dmempolicy.c2015 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
2019 *mpol = get_vma_policy(vma, addr); in huge_node()
2022 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { in huge_node()
2023 nid = interleave_nid(*mpol, vma, addr, in huge_node()
2026 nid = policy_node(gfp_flags, *mpol, numa_node_id()); in huge_node()
2027 if ((*mpol)->mode == MPOL_BIND) in huge_node()
2028 *nodemask = &(*mpol)->v.nodes; in huge_node()
2655 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) in mpol_shared_policy_init() argument
2662 if (mpol) { in mpol_shared_policy_init()
2670 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); in mpol_shared_policy_init()
[all …]
Dhugetlb.c1142 struct mempolicy *mpol; in dequeue_huge_page_vma() local
1161 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
1168 mpol_cond_put(mpol); in dequeue_huge_page_vma()
1972 struct mempolicy *mpol; in alloc_buddy_huge_page_with_mpol() local
1977 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_huge_page_with_mpol()
1979 mpol_cond_put(mpol); in alloc_buddy_huge_page_with_mpol()
2007 struct mempolicy *mpol; in alloc_huge_page_vma() local
2014 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in alloc_huge_page_vma()
2016 mpol_cond_put(mpol); in alloc_huge_page_vma()