• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 
70 #include <linux/mempolicy.h>
71 #include <linux/pagewalk.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/sched/mm.h>
77 #include <linux/sched/numa_balancing.h>
78 #include <linux/sched/task.h>
79 #include <linux/nodemask.h>
80 #include <linux/cpuset.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/export.h>
84 #include <linux/nsproxy.h>
85 #include <linux/interrupt.h>
86 #include <linux/init.h>
87 #include <linux/compat.h>
88 #include <linux/ptrace.h>
89 #include <linux/swap.h>
90 #include <linux/seq_file.h>
91 #include <linux/proc_fs.h>
92 #include <linux/migrate.h>
93 #include <linux/ksm.h>
94 #include <linux/rmap.h>
95 #include <linux/security.h>
96 #include <linux/syscalls.h>
97 #include <linux/ctype.h>
98 #include <linux/mm_inline.h>
99 #include <linux/mmu_notifier.h>
100 #include <linux/printk.h>
101 #include <linux/swapops.h>
102 
103 #include <asm/tlbflush.h>
104 #include <linux/uaccess.h>
105 
106 #include "internal.h"
107 
108 /* Internal flags */
109 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
110 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111 
112 static struct kmem_cache *policy_cache;
113 static struct kmem_cache *sn_cache;
114 
115 /* Highest zone. An specific allocation for a zone below that is not
116    policied. */
117 enum zone_type policy_zone = 0;
118 
119 /*
120  * run-time system-wide default policy => local allocation
121  */
122 static struct mempolicy default_policy = {
123 	.refcnt = ATOMIC_INIT(1), /* never free it */
124 	.mode = MPOL_PREFERRED,
125 	.flags = MPOL_F_LOCAL,
126 };
127 
128 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129 
130 /**
131  * numa_map_to_online_node - Find closest online node
132  * @node: Node id to start the search
133  *
134  * Lookup the next closest node by distance if @nid is not online.
135  */
numa_map_to_online_node(int node)136 int numa_map_to_online_node(int node)
137 {
138 	int min_dist = INT_MAX, dist, n, min_node;
139 
140 	if (node == NUMA_NO_NODE || node_online(node))
141 		return node;
142 
143 	min_node = node;
144 	for_each_online_node(n) {
145 		dist = node_distance(node, n);
146 		if (dist < min_dist) {
147 			min_dist = dist;
148 			min_node = n;
149 		}
150 	}
151 
152 	return min_node;
153 }
154 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155 
get_task_policy(struct task_struct * p)156 struct mempolicy *get_task_policy(struct task_struct *p)
157 {
158 	struct mempolicy *pol = p->mempolicy;
159 	int node;
160 
161 	if (pol)
162 		return pol;
163 
164 	node = numa_node_id();
165 	if (node != NUMA_NO_NODE) {
166 		pol = &preferred_node_policy[node];
167 		/* preferred_node_policy is not initialised early in boot */
168 		if (pol->mode)
169 			return pol;
170 	}
171 
172 	return &default_policy;
173 }
174 
175 static const struct mempolicy_operations {
176 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
178 } mpol_ops[MPOL_MAX];
179 
mpol_store_user_nodemask(const struct mempolicy * pol)180 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181 {
182 	return pol->flags & MPOL_MODE_FLAGS;
183 }
184 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)185 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 				   const nodemask_t *rel)
187 {
188 	nodemask_t tmp;
189 	nodes_fold(tmp, *orig, nodes_weight(*rel));
190 	nodes_onto(*ret, tmp, *rel);
191 }
192 
mpol_new_interleave(struct mempolicy * pol,const nodemask_t * nodes)193 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194 {
195 	if (nodes_empty(*nodes))
196 		return -EINVAL;
197 	pol->v.nodes = *nodes;
198 	return 0;
199 }
200 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)201 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202 {
203 	if (!nodes)
204 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
205 	else if (nodes_empty(*nodes))
206 		return -EINVAL;			/*  no allowed nodes */
207 	else
208 		pol->v.preferred_node = first_node(*nodes);
209 	return 0;
210 }
211 
mpol_new_bind(struct mempolicy * pol,const nodemask_t * nodes)212 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213 {
214 	if (nodes_empty(*nodes))
215 		return -EINVAL;
216 	pol->v.nodes = *nodes;
217 	return 0;
218 }
219 
220 /*
221  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222  * any, for the new policy.  mpol_new() has already validated the nodes
223  * parameter with respect to the policy mode and flags.  But, we need to
224  * handle an empty nodemask with MPOL_PREFERRED here.
225  *
226  * Must be called holding task's alloc_lock to protect task's mems_allowed
227  * and mempolicy.  May also be called holding the mmap_lock for write.
228  */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)229 static int mpol_set_nodemask(struct mempolicy *pol,
230 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
231 {
232 	int ret;
233 
234 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 	if (pol == NULL)
236 		return 0;
237 	/* Check N_MEMORY */
238 	nodes_and(nsc->mask1,
239 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
240 
241 	VM_BUG_ON(!nodes);
242 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 		nodes = NULL;	/* explicit local allocation */
244 	else {
245 		if (pol->flags & MPOL_F_RELATIVE_NODES)
246 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
247 		else
248 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
249 
250 		if (mpol_store_user_nodemask(pol))
251 			pol->w.user_nodemask = *nodes;
252 		else
253 			pol->w.cpuset_mems_allowed =
254 						cpuset_current_mems_allowed;
255 	}
256 
257 	if (nodes)
258 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 	else
260 		ret = mpol_ops[pol->mode].create(pol, NULL);
261 	return ret;
262 }
263 
264 /*
265  * This function just creates a new policy, does some check and simple
266  * initialization. You must invoke mpol_set_nodemask() to set nodes.
267  */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)268 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 				  nodemask_t *nodes)
270 {
271 	struct mempolicy *policy;
272 
273 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
274 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
275 
276 	if (mode == MPOL_DEFAULT) {
277 		if (nodes && !nodes_empty(*nodes))
278 			return ERR_PTR(-EINVAL);
279 		return NULL;
280 	}
281 	VM_BUG_ON(!nodes);
282 
283 	/*
284 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 	 * All other modes require a valid pointer to a non-empty nodemask.
287 	 */
288 	if (mode == MPOL_PREFERRED) {
289 		if (nodes_empty(*nodes)) {
290 			if (((flags & MPOL_F_STATIC_NODES) ||
291 			     (flags & MPOL_F_RELATIVE_NODES)))
292 				return ERR_PTR(-EINVAL);
293 		}
294 	} else if (mode == MPOL_LOCAL) {
295 		if (!nodes_empty(*nodes) ||
296 		    (flags & MPOL_F_STATIC_NODES) ||
297 		    (flags & MPOL_F_RELATIVE_NODES))
298 			return ERR_PTR(-EINVAL);
299 		mode = MPOL_PREFERRED;
300 	} else if (nodes_empty(*nodes))
301 		return ERR_PTR(-EINVAL);
302 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 	if (!policy)
304 		return ERR_PTR(-ENOMEM);
305 	atomic_set(&policy->refcnt, 1);
306 	policy->mode = mode;
307 	policy->flags = flags;
308 
309 	return policy;
310 }
311 
312 /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)313 void __mpol_put(struct mempolicy *p)
314 {
315 	if (!atomic_dec_and_test(&p->refcnt))
316 		return;
317 	kmem_cache_free(policy_cache, p);
318 }
319 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)320 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
321 {
322 }
323 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)324 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
325 {
326 	nodemask_t tmp;
327 
328 	if (pol->flags & MPOL_F_STATIC_NODES)
329 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 	else {
333 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334 								*nodes);
335 		pol->w.cpuset_mems_allowed = *nodes;
336 	}
337 
338 	if (nodes_empty(tmp))
339 		tmp = *nodes;
340 
341 	pol->v.nodes = tmp;
342 }
343 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)344 static void mpol_rebind_preferred(struct mempolicy *pol,
345 						const nodemask_t *nodes)
346 {
347 	nodemask_t tmp;
348 
349 	if (pol->flags & MPOL_F_STATIC_NODES) {
350 		int node = first_node(pol->w.user_nodemask);
351 
352 		if (node_isset(node, *nodes)) {
353 			pol->v.preferred_node = node;
354 			pol->flags &= ~MPOL_F_LOCAL;
355 		} else
356 			pol->flags |= MPOL_F_LOCAL;
357 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 		pol->v.preferred_node = first_node(tmp);
360 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
361 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 						   pol->w.cpuset_mems_allowed,
363 						   *nodes);
364 		pol->w.cpuset_mems_allowed = *nodes;
365 	}
366 }
367 
368 /*
369  * mpol_rebind_policy - Migrate a policy to a different set of nodes
370  *
371  * Per-vma policies are protected by mmap_lock. Allocations using per-task
372  * policies are protected by task->mems_allowed_seq to prevent a premature
373  * OOM/allocation failure due to parallel nodemask modification.
374  */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)375 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
376 {
377 	if (!pol || pol->mode == MPOL_LOCAL)
378 		return;
379 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
380 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 		return;
382 
383 	mpol_ops[pol->mode].rebind(pol, newmask);
384 }
385 
386 /*
387  * Wrapper for mpol_rebind_policy() that just requires task
388  * pointer, and updates task mempolicy.
389  *
390  * Called with task's alloc_lock held.
391  */
392 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)393 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
394 {
395 	mpol_rebind_policy(tsk->mempolicy, new);
396 }
397 
398 /*
399  * Rebind each vma in mm to new nodemask.
400  *
401  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
402  */
403 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)404 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405 {
406 	struct vm_area_struct *vma;
407 
408 	mmap_write_lock(mm);
409 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
410 		vm_write_begin(vma);
411 		mpol_rebind_policy(vma->vm_policy, new);
412 		vm_write_end(vma);
413 	}
414 	mmap_write_unlock(mm);
415 }
416 
417 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
418 	[MPOL_DEFAULT] = {
419 		.rebind = mpol_rebind_default,
420 	},
421 	[MPOL_INTERLEAVE] = {
422 		.create = mpol_new_interleave,
423 		.rebind = mpol_rebind_nodemask,
424 	},
425 	[MPOL_PREFERRED] = {
426 		.create = mpol_new_preferred,
427 		.rebind = mpol_rebind_preferred,
428 	},
429 	[MPOL_BIND] = {
430 		.create = mpol_new_bind,
431 		.rebind = mpol_rebind_nodemask,
432 	},
433 };
434 
435 static int migrate_page_add(struct page *page, struct list_head *pagelist,
436 				unsigned long flags);
437 
438 struct queue_pages {
439 	struct list_head *pagelist;
440 	unsigned long flags;
441 	nodemask_t *nmask;
442 	unsigned long start;
443 	unsigned long end;
444 	struct vm_area_struct *first;
445 };
446 
447 /*
448  * Check if the page's nid is in qp->nmask.
449  *
450  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
451  * in the invert of qp->nmask.
452  */
queue_pages_required(struct page * page,struct queue_pages * qp)453 static inline bool queue_pages_required(struct page *page,
454 					struct queue_pages *qp)
455 {
456 	int nid = page_to_nid(page);
457 	unsigned long flags = qp->flags;
458 
459 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
460 }
461 
462 /*
463  * queue_pages_pmd() has four possible return values:
464  * 0 - pages are placed on the right node or queued successfully.
465  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
466  *     specified.
467  * 2 - THP was split.
468  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
469  *        existing page was already on a node that does not follow the
470  *        policy.
471  */
queue_pages_pmd(pmd_t * pmd,spinlock_t * ptl,unsigned long addr,unsigned long end,struct mm_walk * walk)472 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
473 				unsigned long end, struct mm_walk *walk)
474 	__releases(ptl)
475 {
476 	int ret = 0;
477 	struct page *page;
478 	struct queue_pages *qp = walk->private;
479 	unsigned long flags;
480 
481 	if (unlikely(is_pmd_migration_entry(*pmd))) {
482 		ret = -EIO;
483 		goto unlock;
484 	}
485 	page = pmd_page(*pmd);
486 	if (is_huge_zero_page(page)) {
487 		spin_unlock(ptl);
488 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
489 		ret = 2;
490 		goto out;
491 	}
492 	if (!queue_pages_required(page, qp))
493 		goto unlock;
494 
495 	flags = qp->flags;
496 	/* go to thp migration */
497 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
498 		if (!vma_migratable(walk->vma) ||
499 		    migrate_page_add(page, qp->pagelist, flags)) {
500 			ret = 1;
501 			goto unlock;
502 		}
503 	} else
504 		ret = -EIO;
505 unlock:
506 	spin_unlock(ptl);
507 out:
508 	return ret;
509 }
510 
511 /*
512  * Scan through pages checking if pages follow certain conditions,
513  * and move them to the pagelist if they do.
514  *
515  * queue_pages_pte_range() has three possible return values:
516  * 0 - pages are placed on the right node or queued successfully.
517  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
518  *     specified.
519  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
520  *        on a node that does not follow the policy.
521  */
queue_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)522 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
523 			unsigned long end, struct mm_walk *walk)
524 {
525 	struct vm_area_struct *vma = walk->vma;
526 	struct page *page;
527 	struct queue_pages *qp = walk->private;
528 	unsigned long flags = qp->flags;
529 	int ret;
530 	bool has_unmovable = false;
531 	pte_t *pte, *mapped_pte;
532 	spinlock_t *ptl;
533 
534 	ptl = pmd_trans_huge_lock(pmd, vma);
535 	if (ptl) {
536 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
537 		if (ret != 2)
538 			return ret;
539 	}
540 	/* THP was split, fall through to pte walk */
541 
542 	if (pmd_trans_unstable(pmd))
543 		return 0;
544 
545 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
546 	for (; addr != end; pte++, addr += PAGE_SIZE) {
547 		if (!pte_present(*pte))
548 			continue;
549 		page = vm_normal_page(vma, addr, *pte);
550 		if (!page)
551 			continue;
552 		/*
553 		 * vm_normal_page() filters out zero pages, but there might
554 		 * still be PageReserved pages to skip, perhaps in a VDSO.
555 		 */
556 		if (PageReserved(page))
557 			continue;
558 		if (!queue_pages_required(page, qp))
559 			continue;
560 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
561 			/* MPOL_MF_STRICT must be specified if we get here */
562 			if (!vma_migratable(vma)) {
563 				has_unmovable = true;
564 				break;
565 			}
566 
567 			/*
568 			 * Do not abort immediately since there may be
569 			 * temporary off LRU pages in the range.  Still
570 			 * need migrate other LRU pages.
571 			 */
572 			if (migrate_page_add(page, qp->pagelist, flags))
573 				has_unmovable = true;
574 		} else
575 			break;
576 	}
577 	pte_unmap_unlock(mapped_pte, ptl);
578 	cond_resched();
579 
580 	if (has_unmovable)
581 		return 1;
582 
583 	return addr != end ? -EIO : 0;
584 }
585 
queue_pages_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)586 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
587 			       unsigned long addr, unsigned long end,
588 			       struct mm_walk *walk)
589 {
590 	int ret = 0;
591 #ifdef CONFIG_HUGETLB_PAGE
592 	struct queue_pages *qp = walk->private;
593 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
594 	struct page *page;
595 	spinlock_t *ptl;
596 	pte_t entry;
597 
598 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
599 	entry = huge_ptep_get(pte);
600 	if (!pte_present(entry))
601 		goto unlock;
602 	page = pte_page(entry);
603 	if (!queue_pages_required(page, qp))
604 		goto unlock;
605 
606 	if (flags == MPOL_MF_STRICT) {
607 		/*
608 		 * STRICT alone means only detecting misplaced page and no
609 		 * need to further check other vma.
610 		 */
611 		ret = -EIO;
612 		goto unlock;
613 	}
614 
615 	if (!vma_migratable(walk->vma)) {
616 		/*
617 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
618 		 * stopped walking current vma.
619 		 * Detecting misplaced page but allow migrating pages which
620 		 * have been queued.
621 		 */
622 		ret = 1;
623 		goto unlock;
624 	}
625 
626 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
627 	if (flags & (MPOL_MF_MOVE_ALL) ||
628 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
629 	     !hugetlb_pmd_shared(pte))) {
630 		if (isolate_hugetlb(page, qp->pagelist) &&
631 			(flags & MPOL_MF_STRICT))
632 			/*
633 			 * Failed to isolate page but allow migrating pages
634 			 * which have been queued.
635 			 */
636 			ret = 1;
637 	}
638 unlock:
639 	spin_unlock(ptl);
640 #else
641 	BUG();
642 #endif
643 	return ret;
644 }
645 
646 #ifdef CONFIG_NUMA_BALANCING
647 /*
648  * This is used to mark a range of virtual addresses to be inaccessible.
649  * These are later cleared by a NUMA hinting fault. Depending on these
650  * faults, pages may be migrated for better NUMA placement.
651  *
652  * This is assuming that NUMA faults are handled using PROT_NONE. If
653  * an architecture makes a different choice, it will need further
654  * changes to the core.
655  */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)656 unsigned long change_prot_numa(struct vm_area_struct *vma,
657 			unsigned long addr, unsigned long end)
658 {
659 	int nr_updated;
660 
661 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
662 	if (nr_updated)
663 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
664 
665 	return nr_updated;
666 }
667 #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)668 static unsigned long change_prot_numa(struct vm_area_struct *vma,
669 			unsigned long addr, unsigned long end)
670 {
671 	return 0;
672 }
673 #endif /* CONFIG_NUMA_BALANCING */
674 
queue_pages_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)675 static int queue_pages_test_walk(unsigned long start, unsigned long end,
676 				struct mm_walk *walk)
677 {
678 	struct vm_area_struct *vma = walk->vma;
679 	struct queue_pages *qp = walk->private;
680 	unsigned long endvma = vma->vm_end;
681 	unsigned long flags = qp->flags;
682 
683 	/* range check first */
684 	VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
685 
686 	if (!qp->first) {
687 		qp->first = vma;
688 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
689 			(qp->start < vma->vm_start))
690 			/* hole at head side of range */
691 			return -EFAULT;
692 	}
693 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
694 		((vma->vm_end < qp->end) &&
695 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
696 		/* hole at middle or tail of range */
697 		return -EFAULT;
698 
699 	/*
700 	 * Need check MPOL_MF_STRICT to return -EIO if possible
701 	 * regardless of vma_migratable
702 	 */
703 	if (!vma_migratable(vma) &&
704 	    !(flags & MPOL_MF_STRICT))
705 		return 1;
706 
707 	if (endvma > end)
708 		endvma = end;
709 
710 	if (flags & MPOL_MF_LAZY) {
711 		/* Similar to task_numa_work, skip inaccessible VMAs */
712 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
713 			!(vma->vm_flags & VM_MIXEDMAP))
714 			change_prot_numa(vma, start, endvma);
715 		return 1;
716 	}
717 
718 	/* queue pages from current vma */
719 	if (flags & MPOL_MF_VALID)
720 		return 0;
721 	return 1;
722 }
723 
724 static const struct mm_walk_ops queue_pages_walk_ops = {
725 	.hugetlb_entry		= queue_pages_hugetlb,
726 	.pmd_entry		= queue_pages_pte_range,
727 	.test_walk		= queue_pages_test_walk,
728 };
729 
730 /*
731  * Walk through page tables and collect pages to be migrated.
732  *
733  * If pages found in a given range are on a set of nodes (determined by
734  * @nodes and @flags,) it's isolated and queued to the pagelist which is
735  * passed via @private.
736  *
737  * queue_pages_range() has three possible return values:
738  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
739  *     specified.
740  * 0 - queue pages successfully or no misplaced page.
741  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
742  *         memory range specified by nodemask and maxnode points outside
743  *         your accessible address space (-EFAULT)
744  */
745 static int
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,nodemask_t * nodes,unsigned long flags,struct list_head * pagelist)746 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
747 		nodemask_t *nodes, unsigned long flags,
748 		struct list_head *pagelist)
749 {
750 	int err;
751 	struct queue_pages qp = {
752 		.pagelist = pagelist,
753 		.flags = flags,
754 		.nmask = nodes,
755 		.start = start,
756 		.end = end,
757 		.first = NULL,
758 	};
759 
760 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
761 
762 	if (!qp.first)
763 		/* whole range in hole */
764 		err = -EFAULT;
765 
766 	return err;
767 }
768 
769 /*
770  * Apply policy to a single VMA
771  * This must be called with the mmap_lock held for writing.
772  */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)773 static int vma_replace_policy(struct vm_area_struct *vma,
774 						struct mempolicy *pol)
775 {
776 	int err;
777 	struct mempolicy *old;
778 	struct mempolicy *new;
779 
780 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
781 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
782 		 vma->vm_ops, vma->vm_file,
783 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
784 
785 	new = mpol_dup(pol);
786 	if (IS_ERR(new))
787 		return PTR_ERR(new);
788 
789 	vm_write_begin(vma);
790 	if (vma->vm_ops && vma->vm_ops->set_policy) {
791 		err = vma->vm_ops->set_policy(vma, new);
792 		if (err)
793 			goto err_out;
794 	}
795 
796 	old = vma->vm_policy;
797 	/*
798 	 * The speculative page fault handler accesses this field without
799 	 * hodling the mmap_sem.
800 	 */
801 	WRITE_ONCE(vma->vm_policy,  new);
802 	vm_write_end(vma);
803 	mpol_put(old);
804 
805 	return 0;
806  err_out:
807 	vm_write_end(vma);
808 	mpol_put(new);
809 	return err;
810 }
811 
812 /* Step 2: apply policy to a range and do splits. */
mbind_range(struct mm_struct * mm,unsigned long start,unsigned long end,struct mempolicy * new_pol)813 static int mbind_range(struct mm_struct *mm, unsigned long start,
814 		       unsigned long end, struct mempolicy *new_pol)
815 {
816 	struct vm_area_struct *prev;
817 	struct vm_area_struct *vma;
818 	int err = 0;
819 	pgoff_t pgoff;
820 	unsigned long vmstart;
821 	unsigned long vmend;
822 
823 	vma = find_vma(mm, start);
824 	VM_BUG_ON(!vma);
825 
826 	prev = vma->vm_prev;
827 	if (start > vma->vm_start)
828 		prev = vma;
829 
830 	for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
831 		vmstart = max(start, vma->vm_start);
832 		vmend   = min(end, vma->vm_end);
833 
834 		if (mpol_equal(vma_policy(vma), new_pol))
835 			continue;
836 
837 		pgoff = vma->vm_pgoff +
838 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
839 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
840 				 vma->anon_vma, vma->vm_file, pgoff,
841 				 new_pol, vma->vm_userfaultfd_ctx,
842 				 vma_get_anon_name(vma));
843 		if (prev) {
844 			vma = prev;
845 			goto replace;
846 		}
847 		if (vma->vm_start != vmstart) {
848 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
849 			if (err)
850 				goto out;
851 		}
852 		if (vma->vm_end != vmend) {
853 			err = split_vma(vma->vm_mm, vma, vmend, 0);
854 			if (err)
855 				goto out;
856 		}
857  replace:
858 		err = vma_replace_policy(vma, new_pol);
859 		if (err)
860 			goto out;
861 	}
862 
863  out:
864 	return err;
865 }
866 
867 /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)868 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
869 			     nodemask_t *nodes)
870 {
871 	struct mempolicy *new, *old;
872 	NODEMASK_SCRATCH(scratch);
873 	int ret;
874 
875 	if (!scratch)
876 		return -ENOMEM;
877 
878 	new = mpol_new(mode, flags, nodes);
879 	if (IS_ERR(new)) {
880 		ret = PTR_ERR(new);
881 		goto out;
882 	}
883 
884 	ret = mpol_set_nodemask(new, nodes, scratch);
885 	if (ret) {
886 		mpol_put(new);
887 		goto out;
888 	}
889 	task_lock(current);
890 	old = current->mempolicy;
891 	current->mempolicy = new;
892 	if (new && new->mode == MPOL_INTERLEAVE)
893 		current->il_prev = MAX_NUMNODES-1;
894 	task_unlock(current);
895 	mpol_put(old);
896 	ret = 0;
897 out:
898 	NODEMASK_SCRATCH_FREE(scratch);
899 	return ret;
900 }
901 
902 /*
903  * Return nodemask for policy for get_mempolicy() query
904  *
905  * Called with task's alloc_lock held
906  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)907 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
908 {
909 	nodes_clear(*nodes);
910 	if (p == &default_policy)
911 		return;
912 
913 	switch (p->mode) {
914 	case MPOL_BIND:
915 	case MPOL_INTERLEAVE:
916 		*nodes = p->v.nodes;
917 		break;
918 	case MPOL_PREFERRED:
919 		if (!(p->flags & MPOL_F_LOCAL))
920 			node_set(p->v.preferred_node, *nodes);
921 		/* else return empty node mask for local allocation */
922 		break;
923 	default:
924 		BUG();
925 	}
926 }
927 
lookup_node(struct mm_struct * mm,unsigned long addr)928 static int lookup_node(struct mm_struct *mm, unsigned long addr)
929 {
930 	struct page *p = NULL;
931 	int err;
932 
933 	int locked = 1;
934 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
935 	if (err > 0) {
936 		err = page_to_nid(p);
937 		put_page(p);
938 	}
939 	if (locked)
940 		mmap_read_unlock(mm);
941 	return err;
942 }
943 
944 /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)945 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
946 			     unsigned long addr, unsigned long flags)
947 {
948 	int err;
949 	struct mm_struct *mm = current->mm;
950 	struct vm_area_struct *vma = NULL;
951 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
952 
953 	if (flags &
954 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
955 		return -EINVAL;
956 
957 	if (flags & MPOL_F_MEMS_ALLOWED) {
958 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
959 			return -EINVAL;
960 		*policy = 0;	/* just so it's initialized */
961 		task_lock(current);
962 		*nmask  = cpuset_current_mems_allowed;
963 		task_unlock(current);
964 		return 0;
965 	}
966 
967 	if (flags & MPOL_F_ADDR) {
968 		/*
969 		 * Do NOT fall back to task policy if the
970 		 * vma/shared policy at addr is NULL.  We
971 		 * want to return MPOL_DEFAULT in this case.
972 		 */
973 		mmap_read_lock(mm);
974 		vma = find_vma_intersection(mm, addr, addr+1);
975 		if (!vma) {
976 			mmap_read_unlock(mm);
977 			return -EFAULT;
978 		}
979 		if (vma->vm_ops && vma->vm_ops->get_policy)
980 			pol = vma->vm_ops->get_policy(vma, addr);
981 		else
982 			pol = vma->vm_policy;
983 	} else if (addr)
984 		return -EINVAL;
985 
986 	if (!pol)
987 		pol = &default_policy;	/* indicates default behavior */
988 
989 	if (flags & MPOL_F_NODE) {
990 		if (flags & MPOL_F_ADDR) {
991 			/*
992 			 * Take a refcount on the mpol, lookup_node()
993 			 * wil drop the mmap_lock, so after calling
994 			 * lookup_node() only "pol" remains valid, "vma"
995 			 * is stale.
996 			 */
997 			pol_refcount = pol;
998 			vma = NULL;
999 			mpol_get(pol);
1000 			err = lookup_node(mm, addr);
1001 			if (err < 0)
1002 				goto out;
1003 			*policy = err;
1004 		} else if (pol == current->mempolicy &&
1005 				pol->mode == MPOL_INTERLEAVE) {
1006 			*policy = next_node_in(current->il_prev, pol->v.nodes);
1007 		} else {
1008 			err = -EINVAL;
1009 			goto out;
1010 		}
1011 	} else {
1012 		*policy = pol == &default_policy ? MPOL_DEFAULT :
1013 						pol->mode;
1014 		/*
1015 		 * Internal mempolicy flags must be masked off before exposing
1016 		 * the policy to userspace.
1017 		 */
1018 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1019 	}
1020 
1021 	err = 0;
1022 	if (nmask) {
1023 		if (mpol_store_user_nodemask(pol)) {
1024 			*nmask = pol->w.user_nodemask;
1025 		} else {
1026 			task_lock(current);
1027 			get_policy_nodemask(pol, nmask);
1028 			task_unlock(current);
1029 		}
1030 	}
1031 
1032  out:
1033 	mpol_cond_put(pol);
1034 	if (vma)
1035 		mmap_read_unlock(mm);
1036 	if (pol_refcount)
1037 		mpol_put(pol_refcount);
1038 	return err;
1039 }
1040 
1041 #ifdef CONFIG_MIGRATION
1042 /*
1043  * page migration, thp tail pages can be passed.
1044  */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1045 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1046 				unsigned long flags)
1047 {
1048 	struct page *head = compound_head(page);
1049 	/*
1050 	 * Avoid migrating a page that is shared with others.
1051 	 */
1052 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1053 		if (!isolate_lru_page(head)) {
1054 			list_add_tail(&head->lru, pagelist);
1055 			mod_node_page_state(page_pgdat(head),
1056 				NR_ISOLATED_ANON + page_is_file_lru(head),
1057 				thp_nr_pages(head));
1058 		} else if (flags & MPOL_MF_STRICT) {
1059 			/*
1060 			 * Non-movable page may reach here.  And, there may be
1061 			 * temporary off LRU pages or non-LRU movable pages.
1062 			 * Treat them as unmovable pages since they can't be
1063 			 * isolated, so they can't be moved at the moment.  It
1064 			 * should return -EIO for this case too.
1065 			 */
1066 			return -EIO;
1067 		}
1068 	}
1069 
1070 	return 0;
1071 }
1072 
1073 /*
1074  * Migrate pages from one node to a target node.
1075  * Returns error or the number of pages not migrated.
1076  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)1077 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1078 			   int flags)
1079 {
1080 	nodemask_t nmask;
1081 	LIST_HEAD(pagelist);
1082 	int err = 0;
1083 	struct migration_target_control mtc = {
1084 		.nid = dest,
1085 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1086 	};
1087 
1088 	nodes_clear(nmask);
1089 	node_set(source, nmask);
1090 
1091 	/*
1092 	 * This does not "check" the range but isolates all pages that
1093 	 * need migration.  Between passing in the full user address
1094 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1095 	 */
1096 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1097 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1098 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1099 
1100 	if (!list_empty(&pagelist)) {
1101 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1102 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1103 		if (err)
1104 			putback_movable_pages(&pagelist);
1105 	}
1106 
1107 	return err;
1108 }
1109 
1110 /*
1111  * Move pages between the two nodesets so as to preserve the physical
1112  * layout as much as possible.
1113  *
1114  * Returns the number of page that could not be moved.
1115  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1116 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1117 		     const nodemask_t *to, int flags)
1118 {
1119 	int busy = 0;
1120 	int err = 0;
1121 	nodemask_t tmp;
1122 
1123 	lru_cache_disable();
1124 
1125 	mmap_read_lock(mm);
1126 
1127 	/*
1128 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1129 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1130 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1131 	 * The pair of nodemasks 'to' and 'from' define the map.
1132 	 *
1133 	 * If no pair of bits is found that way, fallback to picking some
1134 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1135 	 * 'source' and 'dest' bits are the same, this represents a node
1136 	 * that will be migrating to itself, so no pages need move.
1137 	 *
1138 	 * If no bits are left in 'tmp', or if all remaining bits left
1139 	 * in 'tmp' correspond to the same bit in 'to', return false
1140 	 * (nothing left to migrate).
1141 	 *
1142 	 * This lets us pick a pair of nodes to migrate between, such that
1143 	 * if possible the dest node is not already occupied by some other
1144 	 * source node, minimizing the risk of overloading the memory on a
1145 	 * node that would happen if we migrated incoming memory to a node
1146 	 * before migrating outgoing memory source that same node.
1147 	 *
1148 	 * A single scan of tmp is sufficient.  As we go, we remember the
1149 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1150 	 * that not only moved, but what's better, moved to an empty slot
1151 	 * (d is not set in tmp), then we break out then, with that pair.
1152 	 * Otherwise when we finish scanning from_tmp, we at least have the
1153 	 * most recent <s, d> pair that moved.  If we get all the way through
1154 	 * the scan of tmp without finding any node that moved, much less
1155 	 * moved to an empty node, then there is nothing left worth migrating.
1156 	 */
1157 
1158 	tmp = *from;
1159 	while (!nodes_empty(tmp)) {
1160 		int s,d;
1161 		int source = NUMA_NO_NODE;
1162 		int dest = 0;
1163 
1164 		for_each_node_mask(s, tmp) {
1165 
1166 			/*
1167 			 * do_migrate_pages() tries to maintain the relative
1168 			 * node relationship of the pages established between
1169 			 * threads and memory areas.
1170                          *
1171 			 * However if the number of source nodes is not equal to
1172 			 * the number of destination nodes we can not preserve
1173 			 * this node relative relationship.  In that case, skip
1174 			 * copying memory from a node that is in the destination
1175 			 * mask.
1176 			 *
1177 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1178 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1179 			 */
1180 
1181 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1182 						(node_isset(s, *to)))
1183 				continue;
1184 
1185 			d = node_remap(s, *from, *to);
1186 			if (s == d)
1187 				continue;
1188 
1189 			source = s;	/* Node moved. Memorize */
1190 			dest = d;
1191 
1192 			/* dest not in remaining from nodes? */
1193 			if (!node_isset(dest, tmp))
1194 				break;
1195 		}
1196 		if (source == NUMA_NO_NODE)
1197 			break;
1198 
1199 		node_clear(source, tmp);
1200 		err = migrate_to_node(mm, source, dest, flags);
1201 		if (err > 0)
1202 			busy += err;
1203 		if (err < 0)
1204 			break;
1205 	}
1206 	mmap_read_unlock(mm);
1207 
1208 	lru_cache_enable();
1209 	if (err < 0)
1210 		return err;
1211 	return busy;
1212 
1213 }
1214 
1215 /*
1216  * Allocate a new page for page migration based on vma policy.
1217  * Start by assuming the page is mapped by the same vma as contains @start.
1218  * Search forward from there, if not.  N.B., this assumes that the
1219  * list of pages handed to migrate_pages()--which is how we get here--
1220  * is in virtual address order.
1221  */
new_page(struct page * page,unsigned long start)1222 static struct page *new_page(struct page *page, unsigned long start)
1223 {
1224 	struct vm_area_struct *vma;
1225 	unsigned long address;
1226 
1227 	vma = find_vma(current->mm, start);
1228 	while (vma) {
1229 		address = page_address_in_vma(page, vma);
1230 		if (address != -EFAULT)
1231 			break;
1232 		vma = vma->vm_next;
1233 	}
1234 
1235 	if (PageHuge(page)) {
1236 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1237 				vma, address);
1238 	} else if (PageTransHuge(page)) {
1239 		struct page *thp;
1240 
1241 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1242 					 HPAGE_PMD_ORDER);
1243 		if (!thp)
1244 			return NULL;
1245 		prep_transhuge_page(thp);
1246 		return thp;
1247 	}
1248 	/*
1249 	 * if !vma, alloc_page_vma() will use task or system default policy
1250 	 */
1251 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1252 			vma, address);
1253 }
1254 #else
1255 
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1256 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1257 				unsigned long flags)
1258 {
1259 	return -EIO;
1260 }
1261 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1262 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1263 		     const nodemask_t *to, int flags)
1264 {
1265 	return -ENOSYS;
1266 }
1267 
new_page(struct page * page,unsigned long start)1268 static struct page *new_page(struct page *page, unsigned long start)
1269 {
1270 	return NULL;
1271 }
1272 #endif
1273 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1274 static long do_mbind(unsigned long start, unsigned long len,
1275 		     unsigned short mode, unsigned short mode_flags,
1276 		     nodemask_t *nmask, unsigned long flags)
1277 {
1278 	struct mm_struct *mm = current->mm;
1279 	struct mempolicy *new;
1280 	unsigned long end;
1281 	int err;
1282 	int ret;
1283 	LIST_HEAD(pagelist);
1284 
1285 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1286 		return -EINVAL;
1287 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1288 		return -EPERM;
1289 
1290 	if (start & ~PAGE_MASK)
1291 		return -EINVAL;
1292 
1293 	if (mode == MPOL_DEFAULT)
1294 		flags &= ~MPOL_MF_STRICT;
1295 
1296 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1297 	end = start + len;
1298 
1299 	if (end < start)
1300 		return -EINVAL;
1301 	if (end == start)
1302 		return 0;
1303 
1304 	new = mpol_new(mode, mode_flags, nmask);
1305 	if (IS_ERR(new))
1306 		return PTR_ERR(new);
1307 
1308 	if (flags & MPOL_MF_LAZY)
1309 		new->flags |= MPOL_F_MOF;
1310 
1311 	/*
1312 	 * If we are using the default policy then operation
1313 	 * on discontinuous address spaces is okay after all
1314 	 */
1315 	if (!new)
1316 		flags |= MPOL_MF_DISCONTIG_OK;
1317 
1318 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1319 		 start, start + len, mode, mode_flags,
1320 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1321 
1322 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1323 
1324 		lru_cache_disable();
1325 	}
1326 	{
1327 		NODEMASK_SCRATCH(scratch);
1328 		if (scratch) {
1329 			mmap_write_lock(mm);
1330 			err = mpol_set_nodemask(new, nmask, scratch);
1331 			if (err)
1332 				mmap_write_unlock(mm);
1333 		} else
1334 			err = -ENOMEM;
1335 		NODEMASK_SCRATCH_FREE(scratch);
1336 	}
1337 	if (err)
1338 		goto mpol_out;
1339 
1340 	ret = queue_pages_range(mm, start, end, nmask,
1341 			  flags | MPOL_MF_INVERT, &pagelist);
1342 
1343 	if (ret < 0) {
1344 		err = ret;
1345 		goto up_out;
1346 	}
1347 
1348 	err = mbind_range(mm, start, end, new);
1349 
1350 	if (!err) {
1351 		int nr_failed = 0;
1352 
1353 		if (!list_empty(&pagelist)) {
1354 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1355 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1356 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1357 			if (nr_failed)
1358 				putback_movable_pages(&pagelist);
1359 		}
1360 
1361 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1362 			err = -EIO;
1363 	} else {
1364 up_out:
1365 		if (!list_empty(&pagelist))
1366 			putback_movable_pages(&pagelist);
1367 	}
1368 
1369 	mmap_write_unlock(mm);
1370 mpol_out:
1371 	mpol_put(new);
1372 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1373 		lru_cache_enable();
1374 	return err;
1375 }
1376 
1377 /*
1378  * User space interface with variable sized bitmaps for nodelists.
1379  */
1380 
1381 /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1382 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1383 		     unsigned long maxnode)
1384 {
1385 	unsigned long k;
1386 	unsigned long t;
1387 	unsigned long nlongs;
1388 	unsigned long endmask;
1389 
1390 	--maxnode;
1391 	nodes_clear(*nodes);
1392 	if (maxnode == 0 || !nmask)
1393 		return 0;
1394 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395 		return -EINVAL;
1396 
1397 	nlongs = BITS_TO_LONGS(maxnode);
1398 	if ((maxnode % BITS_PER_LONG) == 0)
1399 		endmask = ~0UL;
1400 	else
1401 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1402 
1403 	/*
1404 	 * When the user specified more nodes than supported just check
1405 	 * if the non supported part is all zero.
1406 	 *
1407 	 * If maxnode have more longs than MAX_NUMNODES, check
1408 	 * the bits in that area first. And then go through to
1409 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
1410 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1411 	 */
1412 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1413 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1414 			if (get_user(t, nmask + k))
1415 				return -EFAULT;
1416 			if (k == nlongs - 1) {
1417 				if (t & endmask)
1418 					return -EINVAL;
1419 			} else if (t)
1420 				return -EINVAL;
1421 		}
1422 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1423 		endmask = ~0UL;
1424 	}
1425 
1426 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1427 		unsigned long valid_mask = endmask;
1428 
1429 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1430 		if (get_user(t, nmask + nlongs - 1))
1431 			return -EFAULT;
1432 		if (t & valid_mask)
1433 			return -EINVAL;
1434 	}
1435 
1436 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1437 		return -EFAULT;
1438 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1439 	return 0;
1440 }
1441 
1442 /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1443 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1444 			      nodemask_t *nodes)
1445 {
1446 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1447 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1448 
1449 	if (copy > nbytes) {
1450 		if (copy > PAGE_SIZE)
1451 			return -EINVAL;
1452 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1453 			return -EFAULT;
1454 		copy = nbytes;
1455 	}
1456 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1457 }
1458 
kernel_mbind(unsigned long start,unsigned long len,unsigned long mode,const unsigned long __user * nmask,unsigned long maxnode,unsigned int flags)1459 static long kernel_mbind(unsigned long start, unsigned long len,
1460 			 unsigned long mode, const unsigned long __user *nmask,
1461 			 unsigned long maxnode, unsigned int flags)
1462 {
1463 	nodemask_t nodes;
1464 	int err;
1465 	unsigned short mode_flags;
1466 
1467 	start = untagged_addr(start);
1468 	mode_flags = mode & MPOL_MODE_FLAGS;
1469 	mode &= ~MPOL_MODE_FLAGS;
1470 	if (mode >= MPOL_MAX)
1471 		return -EINVAL;
1472 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1473 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1474 		return -EINVAL;
1475 	err = get_nodes(&nodes, nmask, maxnode);
1476 	if (err)
1477 		return err;
1478 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1479 }
1480 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned int,flags)1481 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1482 		unsigned long, mode, const unsigned long __user *, nmask,
1483 		unsigned long, maxnode, unsigned int, flags)
1484 {
1485 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1486 }
1487 
1488 /* Set the process memory policy */
kernel_set_mempolicy(int mode,const unsigned long __user * nmask,unsigned long maxnode)1489 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1490 				 unsigned long maxnode)
1491 {
1492 	int err;
1493 	nodemask_t nodes;
1494 	unsigned short flags;
1495 
1496 	flags = mode & MPOL_MODE_FLAGS;
1497 	mode &= ~MPOL_MODE_FLAGS;
1498 	if ((unsigned int)mode >= MPOL_MAX)
1499 		return -EINVAL;
1500 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1501 		return -EINVAL;
1502 	err = get_nodes(&nodes, nmask, maxnode);
1503 	if (err)
1504 		return err;
1505 	return do_set_mempolicy(mode, flags, &nodes);
1506 }
1507 
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1508 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1509 		unsigned long, maxnode)
1510 {
1511 	return kernel_set_mempolicy(mode, nmask, maxnode);
1512 }
1513 
kernel_migrate_pages(pid_t pid,unsigned long maxnode,const unsigned long __user * old_nodes,const unsigned long __user * new_nodes)1514 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1515 				const unsigned long __user *old_nodes,
1516 				const unsigned long __user *new_nodes)
1517 {
1518 	struct mm_struct *mm = NULL;
1519 	struct task_struct *task;
1520 	nodemask_t task_nodes;
1521 	int err;
1522 	nodemask_t *old;
1523 	nodemask_t *new;
1524 	NODEMASK_SCRATCH(scratch);
1525 
1526 	if (!scratch)
1527 		return -ENOMEM;
1528 
1529 	old = &scratch->mask1;
1530 	new = &scratch->mask2;
1531 
1532 	err = get_nodes(old, old_nodes, maxnode);
1533 	if (err)
1534 		goto out;
1535 
1536 	err = get_nodes(new, new_nodes, maxnode);
1537 	if (err)
1538 		goto out;
1539 
1540 	/* Find the mm_struct */
1541 	rcu_read_lock();
1542 	task = pid ? find_task_by_vpid(pid) : current;
1543 	if (!task) {
1544 		rcu_read_unlock();
1545 		err = -ESRCH;
1546 		goto out;
1547 	}
1548 	get_task_struct(task);
1549 
1550 	err = -EINVAL;
1551 
1552 	/*
1553 	 * Check if this process has the right to modify the specified process.
1554 	 * Use the regular "ptrace_may_access()" checks.
1555 	 */
1556 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1557 		rcu_read_unlock();
1558 		err = -EPERM;
1559 		goto out_put;
1560 	}
1561 	rcu_read_unlock();
1562 
1563 	task_nodes = cpuset_mems_allowed(task);
1564 	/* Is the user allowed to access the target nodes? */
1565 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1566 		err = -EPERM;
1567 		goto out_put;
1568 	}
1569 
1570 	task_nodes = cpuset_mems_allowed(current);
1571 	nodes_and(*new, *new, task_nodes);
1572 	if (nodes_empty(*new))
1573 		goto out_put;
1574 
1575 	err = security_task_movememory(task);
1576 	if (err)
1577 		goto out_put;
1578 
1579 	mm = get_task_mm(task);
1580 	put_task_struct(task);
1581 
1582 	if (!mm) {
1583 		err = -EINVAL;
1584 		goto out;
1585 	}
1586 
1587 	err = do_migrate_pages(mm, old, new,
1588 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1589 
1590 	mmput(mm);
1591 out:
1592 	NODEMASK_SCRATCH_FREE(scratch);
1593 
1594 	return err;
1595 
1596 out_put:
1597 	put_task_struct(task);
1598 	goto out;
1599 
1600 }
1601 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1602 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1603 		const unsigned long __user *, old_nodes,
1604 		const unsigned long __user *, new_nodes)
1605 {
1606 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1607 }
1608 
1609 
1610 /* Retrieve NUMA policy */
kernel_get_mempolicy(int __user * policy,unsigned long __user * nmask,unsigned long maxnode,unsigned long addr,unsigned long flags)1611 static int kernel_get_mempolicy(int __user *policy,
1612 				unsigned long __user *nmask,
1613 				unsigned long maxnode,
1614 				unsigned long addr,
1615 				unsigned long flags)
1616 {
1617 	int err;
1618 	int pval;
1619 	nodemask_t nodes;
1620 
1621 	if (nmask != NULL && maxnode < nr_node_ids)
1622 		return -EINVAL;
1623 
1624 	addr = untagged_addr(addr);
1625 
1626 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1627 
1628 	if (err)
1629 		return err;
1630 
1631 	if (policy && put_user(pval, policy))
1632 		return -EFAULT;
1633 
1634 	if (nmask)
1635 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1636 
1637 	return err;
1638 }
1639 
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1640 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1641 		unsigned long __user *, nmask, unsigned long, maxnode,
1642 		unsigned long, addr, unsigned long, flags)
1643 {
1644 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1645 }
1646 
1647 #ifdef CONFIG_COMPAT
1648 
COMPAT_SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,addr,compat_ulong_t,flags)1649 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1650 		       compat_ulong_t __user *, nmask,
1651 		       compat_ulong_t, maxnode,
1652 		       compat_ulong_t, addr, compat_ulong_t, flags)
1653 {
1654 	long err;
1655 	unsigned long __user *nm = NULL;
1656 	unsigned long nr_bits, alloc_size;
1657 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1658 
1659 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1660 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1661 
1662 	if (nmask)
1663 		nm = compat_alloc_user_space(alloc_size);
1664 
1665 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1666 
1667 	if (!err && nmask) {
1668 		unsigned long copy_size;
1669 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1670 		err = copy_from_user(bm, nm, copy_size);
1671 		/* ensure entire bitmap is zeroed */
1672 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1673 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1674 	}
1675 
1676 	return err;
1677 }
1678 
COMPAT_SYSCALL_DEFINE3(set_mempolicy,int,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode)1679 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1680 		       compat_ulong_t, maxnode)
1681 {
1682 	unsigned long __user *nm = NULL;
1683 	unsigned long nr_bits, alloc_size;
1684 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1685 
1686 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1687 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1688 
1689 	if (nmask) {
1690 		if (compat_get_bitmap(bm, nmask, nr_bits))
1691 			return -EFAULT;
1692 		nm = compat_alloc_user_space(alloc_size);
1693 		if (copy_to_user(nm, bm, alloc_size))
1694 			return -EFAULT;
1695 	}
1696 
1697 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
1698 }
1699 
COMPAT_SYSCALL_DEFINE6(mbind,compat_ulong_t,start,compat_ulong_t,len,compat_ulong_t,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,flags)1700 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1701 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1702 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1703 {
1704 	unsigned long __user *nm = NULL;
1705 	unsigned long nr_bits, alloc_size;
1706 	nodemask_t bm;
1707 
1708 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1709 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1710 
1711 	if (nmask) {
1712 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1713 			return -EFAULT;
1714 		nm = compat_alloc_user_space(alloc_size);
1715 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1716 			return -EFAULT;
1717 	}
1718 
1719 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1720 }
1721 
COMPAT_SYSCALL_DEFINE4(migrate_pages,compat_pid_t,pid,compat_ulong_t,maxnode,const compat_ulong_t __user *,old_nodes,const compat_ulong_t __user *,new_nodes)1722 COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1723 		       compat_ulong_t, maxnode,
1724 		       const compat_ulong_t __user *, old_nodes,
1725 		       const compat_ulong_t __user *, new_nodes)
1726 {
1727 	unsigned long __user *old = NULL;
1728 	unsigned long __user *new = NULL;
1729 	nodemask_t tmp_mask;
1730 	unsigned long nr_bits;
1731 	unsigned long size;
1732 
1733 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1734 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1735 	if (old_nodes) {
1736 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1737 			return -EFAULT;
1738 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1739 		if (new_nodes)
1740 			new = old + size / sizeof(unsigned long);
1741 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1742 			return -EFAULT;
1743 	}
1744 	if (new_nodes) {
1745 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1746 			return -EFAULT;
1747 		if (new == NULL)
1748 			new = compat_alloc_user_space(size);
1749 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1750 			return -EFAULT;
1751 	}
1752 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1753 }
1754 
1755 #endif /* CONFIG_COMPAT */
1756 
vma_migratable(struct vm_area_struct * vma)1757 bool vma_migratable(struct vm_area_struct *vma)
1758 {
1759 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1760 		return false;
1761 
1762 	/*
1763 	 * DAX device mappings require predictable access latency, so avoid
1764 	 * incurring periodic faults.
1765 	 */
1766 	if (vma_is_dax(vma))
1767 		return false;
1768 
1769 	if (is_vm_hugetlb_page(vma) &&
1770 		!hugepage_migration_supported(hstate_vma(vma)))
1771 		return false;
1772 
1773 	/*
1774 	 * Migration allocates pages in the highest zone. If we cannot
1775 	 * do so then migration (at least from node to node) is not
1776 	 * possible.
1777 	 */
1778 	if (vma->vm_file &&
1779 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1780 			< policy_zone)
1781 		return false;
1782 	return true;
1783 }
1784 
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1785 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1786 						unsigned long addr)
1787 {
1788 	struct mempolicy *pol;
1789 
1790 	if (!vma)
1791 		return NULL;
1792 
1793 	if (vma->vm_ops && vma->vm_ops->get_policy)
1794 		return vma->vm_ops->get_policy(vma, addr);
1795 
1796 	/*
1797 	 * This could be called without holding the mmap_sem in the
1798 	 * speculative page fault handler's path.
1799 	 */
1800 	pol = READ_ONCE(vma->vm_policy);
1801 	if (pol) {
1802 		/*
1803 		 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1804 		 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1805 		 * count on these policies which will be dropped by
1806 		 * mpol_cond_put() later
1807 		 */
1808 		if (mpol_needs_cond_ref(pol))
1809 			mpol_get(pol);
1810 	}
1811 
1812 	return pol;
1813 }
1814 
1815 /*
1816  * get_vma_policy(@vma, @addr)
1817  * @vma: virtual memory area whose policy is sought
1818  * @addr: address in @vma for shared policy lookup
1819  *
1820  * Returns effective policy for a VMA at specified address.
1821  * Falls back to current->mempolicy or system default policy, as necessary.
1822  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1823  * count--added by the get_policy() vm_op, as appropriate--to protect against
1824  * freeing by another task.  It is the caller's responsibility to free the
1825  * extra reference for shared policies.
1826  */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1827 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1828 						unsigned long addr)
1829 {
1830 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1831 
1832 	if (!pol)
1833 		pol = get_task_policy(current);
1834 
1835 	return pol;
1836 }
1837 
vma_policy_mof(struct vm_area_struct * vma)1838 bool vma_policy_mof(struct vm_area_struct *vma)
1839 {
1840 	struct mempolicy *pol;
1841 
1842 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1843 		bool ret = false;
1844 
1845 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1846 		if (pol && (pol->flags & MPOL_F_MOF))
1847 			ret = true;
1848 		mpol_cond_put(pol);
1849 
1850 		return ret;
1851 	}
1852 
1853 	pol = vma->vm_policy;
1854 	if (!pol)
1855 		pol = get_task_policy(current);
1856 
1857 	return pol->flags & MPOL_F_MOF;
1858 }
1859 
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1860 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1861 {
1862 	enum zone_type dynamic_policy_zone = policy_zone;
1863 
1864 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1865 
1866 	/*
1867 	 * if policy->v.nodes has movable memory only,
1868 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1869 	 *
1870 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1871 	 * so if the following test faile, it implies
1872 	 * policy->v.nodes has movable memory only.
1873 	 */
1874 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1875 		dynamic_policy_zone = ZONE_MOVABLE;
1876 
1877 	return zone >= dynamic_policy_zone;
1878 }
1879 
1880 /*
1881  * Return a nodemask representing a mempolicy for filtering nodes for
1882  * page allocation
1883  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1884 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1885 {
1886 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1887 	if (unlikely(policy->mode == MPOL_BIND) &&
1888 			apply_policy_zone(policy, gfp_zone(gfp)) &&
1889 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1890 		return &policy->v.nodes;
1891 
1892 	return NULL;
1893 }
1894 
1895 /* Return the node id preferred by the given mempolicy, or the given id */
policy_node(gfp_t gfp,struct mempolicy * policy,int nd)1896 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1897 {
1898 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1899 		nd = policy->v.preferred_node;
1900 	else {
1901 		/*
1902 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1903 		 * because we might easily break the expectation to stay on the
1904 		 * requested node and not break the policy.
1905 		 */
1906 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1907 	}
1908 
1909 	return nd;
1910 }
1911 
1912 /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1913 static unsigned interleave_nodes(struct mempolicy *policy)
1914 {
1915 	unsigned next;
1916 	struct task_struct *me = current;
1917 
1918 	next = next_node_in(me->il_prev, policy->v.nodes);
1919 	if (next < MAX_NUMNODES)
1920 		me->il_prev = next;
1921 	return next;
1922 }
1923 
1924 /*
1925  * Depending on the memory policy provide a node from which to allocate the
1926  * next slab entry.
1927  */
mempolicy_slab_node(void)1928 unsigned int mempolicy_slab_node(void)
1929 {
1930 	struct mempolicy *policy;
1931 	int node = numa_mem_id();
1932 
1933 	if (in_interrupt())
1934 		return node;
1935 
1936 	policy = current->mempolicy;
1937 	if (!policy || policy->flags & MPOL_F_LOCAL)
1938 		return node;
1939 
1940 	switch (policy->mode) {
1941 	case MPOL_PREFERRED:
1942 		/*
1943 		 * handled MPOL_F_LOCAL above
1944 		 */
1945 		return policy->v.preferred_node;
1946 
1947 	case MPOL_INTERLEAVE:
1948 		return interleave_nodes(policy);
1949 
1950 	case MPOL_BIND: {
1951 		struct zoneref *z;
1952 
1953 		/*
1954 		 * Follow bind policy behavior and start allocation at the
1955 		 * first node.
1956 		 */
1957 		struct zonelist *zonelist;
1958 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1959 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1960 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1961 							&policy->v.nodes);
1962 		return z->zone ? zone_to_nid(z->zone) : node;
1963 	}
1964 
1965 	default:
1966 		BUG();
1967 	}
1968 }
1969 
1970 /*
1971  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1972  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1973  * number of present nodes.
1974  */
offset_il_node(struct mempolicy * pol,unsigned long n)1975 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1976 {
1977 	unsigned nnodes = nodes_weight(pol->v.nodes);
1978 	unsigned target;
1979 	int i;
1980 	int nid;
1981 
1982 	if (!nnodes)
1983 		return numa_node_id();
1984 	target = (unsigned int)n % nnodes;
1985 	nid = first_node(pol->v.nodes);
1986 	for (i = 0; i < target; i++)
1987 		nid = next_node(nid, pol->v.nodes);
1988 	return nid;
1989 }
1990 
1991 /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1992 static inline unsigned interleave_nid(struct mempolicy *pol,
1993 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1994 {
1995 	if (vma) {
1996 		unsigned long off;
1997 
1998 		/*
1999 		 * for small pages, there is no difference between
2000 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
2001 		 * for huge pages, since vm_pgoff is in units of small
2002 		 * pages, we need to shift off the always 0 bits to get
2003 		 * a useful offset.
2004 		 */
2005 		BUG_ON(shift < PAGE_SHIFT);
2006 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
2007 		off += (addr - vma->vm_start) >> shift;
2008 		return offset_il_node(pol, off);
2009 	} else
2010 		return interleave_nodes(pol);
2011 }
2012 
2013 #ifdef CONFIG_HUGETLBFS
2014 /*
2015  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2016  * @vma: virtual memory area whose policy is sought
2017  * @addr: address in @vma for shared policy lookup and interleave policy
2018  * @gfp_flags: for requested zone
2019  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2020  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2021  *
2022  * Returns a nid suitable for a huge page allocation and a pointer
2023  * to the struct mempolicy for conditional unref after allocation.
2024  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2025  * @nodemask for filtering the zonelist.
2026  *
2027  * Must be protected by read_mems_allowed_begin()
2028  */
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)2029 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2030 				struct mempolicy **mpol, nodemask_t **nodemask)
2031 {
2032 	int nid;
2033 
2034 	*mpol = get_vma_policy(vma, addr);
2035 	*nodemask = NULL;	/* assume !MPOL_BIND */
2036 
2037 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
2038 		nid = interleave_nid(*mpol, vma, addr,
2039 					huge_page_shift(hstate_vma(vma)));
2040 	} else {
2041 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2042 		if ((*mpol)->mode == MPOL_BIND)
2043 			*nodemask = &(*mpol)->v.nodes;
2044 	}
2045 	return nid;
2046 }
2047 
2048 /*
2049  * init_nodemask_of_mempolicy
2050  *
2051  * If the current task's mempolicy is "default" [NULL], return 'false'
2052  * to indicate default policy.  Otherwise, extract the policy nodemask
2053  * for 'bind' or 'interleave' policy into the argument nodemask, or
2054  * initialize the argument nodemask to contain the single node for
2055  * 'preferred' or 'local' policy and return 'true' to indicate presence
2056  * of non-default mempolicy.
2057  *
2058  * We don't bother with reference counting the mempolicy [mpol_get/put]
2059  * because the current task is examining it's own mempolicy and a task's
2060  * mempolicy is only ever changed by the task itself.
2061  *
2062  * N.B., it is the caller's responsibility to free a returned nodemask.
2063  */
init_nodemask_of_mempolicy(nodemask_t * mask)2064 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2065 {
2066 	struct mempolicy *mempolicy;
2067 	int nid;
2068 
2069 	if (!(mask && current->mempolicy))
2070 		return false;
2071 
2072 	task_lock(current);
2073 	mempolicy = current->mempolicy;
2074 	switch (mempolicy->mode) {
2075 	case MPOL_PREFERRED:
2076 		if (mempolicy->flags & MPOL_F_LOCAL)
2077 			nid = numa_node_id();
2078 		else
2079 			nid = mempolicy->v.preferred_node;
2080 		init_nodemask_of_node(mask, nid);
2081 		break;
2082 
2083 	case MPOL_BIND:
2084 	case MPOL_INTERLEAVE:
2085 		*mask =  mempolicy->v.nodes;
2086 		break;
2087 
2088 	default:
2089 		BUG();
2090 	}
2091 	task_unlock(current);
2092 
2093 	return true;
2094 }
2095 #endif
2096 
2097 /*
2098  * mempolicy_nodemask_intersects
2099  *
2100  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2101  * policy.  Otherwise, check for intersection between mask and the policy
2102  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
2103  * policy, always return true since it may allocate elsewhere on fallback.
2104  *
2105  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2106  */
mempolicy_nodemask_intersects(struct task_struct * tsk,const nodemask_t * mask)2107 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2108 					const nodemask_t *mask)
2109 {
2110 	struct mempolicy *mempolicy;
2111 	bool ret = true;
2112 
2113 	if (!mask)
2114 		return ret;
2115 	task_lock(tsk);
2116 	mempolicy = tsk->mempolicy;
2117 	if (!mempolicy)
2118 		goto out;
2119 
2120 	switch (mempolicy->mode) {
2121 	case MPOL_PREFERRED:
2122 		/*
2123 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2124 		 * allocate from, they may fallback to other nodes when oom.
2125 		 * Thus, it's possible for tsk to have allocated memory from
2126 		 * nodes in mask.
2127 		 */
2128 		break;
2129 	case MPOL_BIND:
2130 	case MPOL_INTERLEAVE:
2131 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
2132 		break;
2133 	default:
2134 		BUG();
2135 	}
2136 out:
2137 	task_unlock(tsk);
2138 	return ret;
2139 }
2140 
2141 /* Allocate a page in interleaved policy.
2142    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)2143 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2144 					unsigned nid)
2145 {
2146 	struct page *page;
2147 
2148 	page = __alloc_pages(gfp, order, nid);
2149 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2150 	if (!static_branch_likely(&vm_numa_stat_key))
2151 		return page;
2152 	if (page && page_to_nid(page) == nid) {
2153 		preempt_disable();
2154 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2155 		preempt_enable();
2156 	}
2157 	return page;
2158 }
2159 
2160 /**
2161  * 	alloc_pages_vma	- Allocate a page for a VMA.
2162  *
2163  * 	@gfp:
2164  *      %GFP_USER    user allocation.
2165  *      %GFP_KERNEL  kernel allocations,
2166  *      %GFP_HIGHMEM highmem/user allocations,
2167  *      %GFP_FS      allocation should not call back into a file system.
2168  *      %GFP_ATOMIC  don't sleep.
2169  *
2170  *	@order:Order of the GFP allocation.
2171  * 	@vma:  Pointer to VMA or NULL if not available.
2172  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2173  *	@node: Which node to prefer for allocation (modulo policy).
2174  *	@hugepage: for hugepages try only the preferred node if possible
2175  *
2176  * 	This function allocates a page from the kernel page pool and applies
2177  *	a NUMA policy associated with the VMA or the current process.
2178  *	When VMA is not NULL caller must read-lock the mmap_lock of the
2179  *	mm_struct of the VMA to prevent it from going away. Should be used for
2180  *	all allocations for pages that will be mapped into user space. Returns
2181  *	NULL when no page can be allocated.
2182  */
2183 struct page *
alloc_pages_vma(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,int node,bool hugepage)2184 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2185 		unsigned long addr, int node, bool hugepage)
2186 {
2187 	struct mempolicy *pol;
2188 	struct page *page;
2189 	int preferred_nid;
2190 	nodemask_t *nmask;
2191 
2192 	pol = get_vma_policy(vma, addr);
2193 
2194 	if (pol->mode == MPOL_INTERLEAVE) {
2195 		unsigned nid;
2196 
2197 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2198 		mpol_cond_put(pol);
2199 		page = alloc_page_interleave(gfp, order, nid);
2200 		goto out;
2201 	}
2202 
2203 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2204 		int hpage_node = node;
2205 
2206 		/*
2207 		 * For hugepage allocation and non-interleave policy which
2208 		 * allows the current node (or other explicitly preferred
2209 		 * node) we only try to allocate from the current/preferred
2210 		 * node and don't fall back to other nodes, as the cost of
2211 		 * remote accesses would likely offset THP benefits.
2212 		 *
2213 		 * If the policy is interleave, or does not allow the current
2214 		 * node in its nodemask, we allocate the standard way.
2215 		 */
2216 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2217 			hpage_node = pol->v.preferred_node;
2218 
2219 		nmask = policy_nodemask(gfp, pol);
2220 		if (!nmask || node_isset(hpage_node, *nmask)) {
2221 			mpol_cond_put(pol);
2222 			/*
2223 			 * First, try to allocate THP only on local node, but
2224 			 * don't reclaim unnecessarily, just compact.
2225 			 */
2226 			page = __alloc_pages_node(hpage_node,
2227 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2228 
2229 			/*
2230 			 * If hugepage allocations are configured to always
2231 			 * synchronous compact or the vma has been madvised
2232 			 * to prefer hugepage backing, retry allowing remote
2233 			 * memory with both reclaim and compact as well.
2234 			 */
2235 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2236 				page = __alloc_pages_nodemask(gfp, order,
2237 							hpage_node, nmask);
2238 
2239 			goto out;
2240 		}
2241 	}
2242 
2243 	nmask = policy_nodemask(gfp, pol);
2244 	preferred_nid = policy_node(gfp, pol, node);
2245 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2246 	mpol_cond_put(pol);
2247 out:
2248 	return page;
2249 }
2250 EXPORT_SYMBOL(alloc_pages_vma);
2251 
2252 /**
2253  * 	alloc_pages_current - Allocate pages.
2254  *
2255  *	@gfp:
2256  *		%GFP_USER   user allocation,
2257  *      	%GFP_KERNEL kernel allocation,
2258  *      	%GFP_HIGHMEM highmem allocation,
2259  *      	%GFP_FS     don't call back into a file system.
2260  *      	%GFP_ATOMIC don't sleep.
2261  *	@order: Power of two of allocation size in pages. 0 is a single page.
2262  *
2263  *	Allocate a page from the kernel page pool.  When not in
2264  *	interrupt context and apply the current process NUMA policy.
2265  *	Returns NULL when no page can be allocated.
2266  */
alloc_pages_current(gfp_t gfp,unsigned order)2267 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2268 {
2269 	struct mempolicy *pol = &default_policy;
2270 	struct page *page;
2271 
2272 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2273 		pol = get_task_policy(current);
2274 
2275 	/*
2276 	 * No reference counting needed for current->mempolicy
2277 	 * nor system default_policy
2278 	 */
2279 	if (pol->mode == MPOL_INTERLEAVE)
2280 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2281 	else
2282 		page = __alloc_pages_nodemask(gfp, order,
2283 				policy_node(gfp, pol, numa_node_id()),
2284 				policy_nodemask(gfp, pol));
2285 
2286 	return page;
2287 }
2288 EXPORT_SYMBOL(alloc_pages_current);
2289 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2290 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2291 {
2292 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2293 
2294 	if (IS_ERR(pol))
2295 		return PTR_ERR(pol);
2296 	dst->vm_policy = pol;
2297 	return 0;
2298 }
2299 
2300 /*
2301  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2302  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2303  * with the mems_allowed returned by cpuset_mems_allowed().  This
2304  * keeps mempolicies cpuset relative after its cpuset moves.  See
2305  * further kernel/cpuset.c update_nodemask().
2306  *
2307  * current's mempolicy may be rebinded by the other task(the task that changes
2308  * cpuset's mems), so we needn't do rebind work for current task.
2309  */
2310 
2311 /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2312 struct mempolicy *__mpol_dup(struct mempolicy *old)
2313 {
2314 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2315 
2316 	if (!new)
2317 		return ERR_PTR(-ENOMEM);
2318 
2319 	/* task's mempolicy is protected by alloc_lock */
2320 	if (old == current->mempolicy) {
2321 		task_lock(current);
2322 		*new = *old;
2323 		task_unlock(current);
2324 	} else
2325 		*new = *old;
2326 
2327 	if (current_cpuset_is_being_rebound()) {
2328 		nodemask_t mems = cpuset_mems_allowed(current);
2329 		mpol_rebind_policy(new, &mems);
2330 	}
2331 	atomic_set(&new->refcnt, 1);
2332 	return new;
2333 }
2334 
2335 /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2336 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2337 {
2338 	if (!a || !b)
2339 		return false;
2340 	if (a->mode != b->mode)
2341 		return false;
2342 	if (a->flags != b->flags)
2343 		return false;
2344 	if (mpol_store_user_nodemask(a))
2345 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2346 			return false;
2347 
2348 	switch (a->mode) {
2349 	case MPOL_BIND:
2350 	case MPOL_INTERLEAVE:
2351 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2352 	case MPOL_PREFERRED:
2353 		/* a's ->flags is the same as b's */
2354 		if (a->flags & MPOL_F_LOCAL)
2355 			return true;
2356 		return a->v.preferred_node == b->v.preferred_node;
2357 	default:
2358 		BUG();
2359 		return false;
2360 	}
2361 }
2362 
2363 /*
2364  * Shared memory backing store policy support.
2365  *
2366  * Remember policies even when nobody has shared memory mapped.
2367  * The policies are kept in Red-Black tree linked from the inode.
2368  * They are protected by the sp->lock rwlock, which should be held
2369  * for any accesses to the tree.
2370  */
2371 
2372 /*
2373  * lookup first element intersecting start-end.  Caller holds sp->lock for
2374  * reading or for writing
2375  */
2376 static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)2377 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2378 {
2379 	struct rb_node *n = sp->root.rb_node;
2380 
2381 	while (n) {
2382 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2383 
2384 		if (start >= p->end)
2385 			n = n->rb_right;
2386 		else if (end <= p->start)
2387 			n = n->rb_left;
2388 		else
2389 			break;
2390 	}
2391 	if (!n)
2392 		return NULL;
2393 	for (;;) {
2394 		struct sp_node *w = NULL;
2395 		struct rb_node *prev = rb_prev(n);
2396 		if (!prev)
2397 			break;
2398 		w = rb_entry(prev, struct sp_node, nd);
2399 		if (w->end <= start)
2400 			break;
2401 		n = prev;
2402 	}
2403 	return rb_entry(n, struct sp_node, nd);
2404 }
2405 
2406 /*
2407  * Insert a new shared policy into the list.  Caller holds sp->lock for
2408  * writing.
2409  */
sp_insert(struct shared_policy * sp,struct sp_node * new)2410 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2411 {
2412 	struct rb_node **p = &sp->root.rb_node;
2413 	struct rb_node *parent = NULL;
2414 	struct sp_node *nd;
2415 
2416 	while (*p) {
2417 		parent = *p;
2418 		nd = rb_entry(parent, struct sp_node, nd);
2419 		if (new->start < nd->start)
2420 			p = &(*p)->rb_left;
2421 		else if (new->end > nd->end)
2422 			p = &(*p)->rb_right;
2423 		else
2424 			BUG();
2425 	}
2426 	rb_link_node(&new->nd, parent, p);
2427 	rb_insert_color(&new->nd, &sp->root);
2428 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2429 		 new->policy ? new->policy->mode : 0);
2430 }
2431 
2432 /* Find shared policy intersecting idx */
2433 struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)2434 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2435 {
2436 	struct mempolicy *pol = NULL;
2437 	struct sp_node *sn;
2438 
2439 	if (!sp->root.rb_node)
2440 		return NULL;
2441 	read_lock(&sp->lock);
2442 	sn = sp_lookup(sp, idx, idx+1);
2443 	if (sn) {
2444 		mpol_get(sn->policy);
2445 		pol = sn->policy;
2446 	}
2447 	read_unlock(&sp->lock);
2448 	return pol;
2449 }
2450 
sp_free(struct sp_node * n)2451 static void sp_free(struct sp_node *n)
2452 {
2453 	mpol_put(n->policy);
2454 	kmem_cache_free(sn_cache, n);
2455 }
2456 
2457 /**
2458  * mpol_misplaced - check whether current page node is valid in policy
2459  *
2460  * @page: page to be checked
2461  * @vma: vm area where page mapped
2462  * @addr: virtual address where page mapped
2463  *
2464  * Lookup current policy node id for vma,addr and "compare to" page's
2465  * node id.
2466  *
2467  * Returns:
2468  *	-1	- not misplaced, page is in the right node
2469  *	node	- node id where the page should be
2470  *
2471  * Policy determination "mimics" alloc_page_vma().
2472  * Called from fault path where we know the vma and faulting address.
2473  */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2474 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2475 {
2476 	struct mempolicy *pol;
2477 	struct zoneref *z;
2478 	int curnid = page_to_nid(page);
2479 	unsigned long pgoff;
2480 	int thiscpu = raw_smp_processor_id();
2481 	int thisnid = cpu_to_node(thiscpu);
2482 	int polnid = NUMA_NO_NODE;
2483 	int ret = -1;
2484 
2485 	pol = get_vma_policy(vma, addr);
2486 	if (!(pol->flags & MPOL_F_MOF))
2487 		goto out;
2488 
2489 	switch (pol->mode) {
2490 	case MPOL_INTERLEAVE:
2491 		pgoff = vma->vm_pgoff;
2492 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2493 		polnid = offset_il_node(pol, pgoff);
2494 		break;
2495 
2496 	case MPOL_PREFERRED:
2497 		if (pol->flags & MPOL_F_LOCAL)
2498 			polnid = numa_node_id();
2499 		else
2500 			polnid = pol->v.preferred_node;
2501 		break;
2502 
2503 	case MPOL_BIND:
2504 
2505 		/*
2506 		 * allows binding to multiple nodes.
2507 		 * use current page if in policy nodemask,
2508 		 * else select nearest allowed node, if any.
2509 		 * If no allowed nodes, use current [!misplaced].
2510 		 */
2511 		if (node_isset(curnid, pol->v.nodes))
2512 			goto out;
2513 		z = first_zones_zonelist(
2514 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2515 				gfp_zone(GFP_HIGHUSER),
2516 				&pol->v.nodes);
2517 		polnid = zone_to_nid(z->zone);
2518 		break;
2519 
2520 	default:
2521 		BUG();
2522 	}
2523 
2524 	/* Migrate the page towards the node whose CPU is referencing it */
2525 	if (pol->flags & MPOL_F_MORON) {
2526 		polnid = thisnid;
2527 
2528 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2529 			goto out;
2530 	}
2531 
2532 	if (curnid != polnid)
2533 		ret = polnid;
2534 out:
2535 	mpol_cond_put(pol);
2536 
2537 	return ret;
2538 }
2539 
2540 /*
2541  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2542  * dropped after task->mempolicy is set to NULL so that any allocation done as
2543  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2544  * policy.
2545  */
mpol_put_task_policy(struct task_struct * task)2546 void mpol_put_task_policy(struct task_struct *task)
2547 {
2548 	struct mempolicy *pol;
2549 
2550 	task_lock(task);
2551 	pol = task->mempolicy;
2552 	task->mempolicy = NULL;
2553 	task_unlock(task);
2554 	mpol_put(pol);
2555 }
2556 
sp_delete(struct shared_policy * sp,struct sp_node * n)2557 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2558 {
2559 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2560 	rb_erase(&n->nd, &sp->root);
2561 	sp_free(n);
2562 }
2563 
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)2564 static void sp_node_init(struct sp_node *node, unsigned long start,
2565 			unsigned long end, struct mempolicy *pol)
2566 {
2567 	node->start = start;
2568 	node->end = end;
2569 	node->policy = pol;
2570 }
2571 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2572 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2573 				struct mempolicy *pol)
2574 {
2575 	struct sp_node *n;
2576 	struct mempolicy *newpol;
2577 
2578 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2579 	if (!n)
2580 		return NULL;
2581 
2582 	newpol = mpol_dup(pol);
2583 	if (IS_ERR(newpol)) {
2584 		kmem_cache_free(sn_cache, n);
2585 		return NULL;
2586 	}
2587 	newpol->flags |= MPOL_F_SHARED;
2588 	sp_node_init(n, start, end, newpol);
2589 
2590 	return n;
2591 }
2592 
2593 /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)2594 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2595 				 unsigned long end, struct sp_node *new)
2596 {
2597 	struct sp_node *n;
2598 	struct sp_node *n_new = NULL;
2599 	struct mempolicy *mpol_new = NULL;
2600 	int ret = 0;
2601 
2602 restart:
2603 	write_lock(&sp->lock);
2604 	n = sp_lookup(sp, start, end);
2605 	/* Take care of old policies in the same range. */
2606 	while (n && n->start < end) {
2607 		struct rb_node *next = rb_next(&n->nd);
2608 		if (n->start >= start) {
2609 			if (n->end <= end)
2610 				sp_delete(sp, n);
2611 			else
2612 				n->start = end;
2613 		} else {
2614 			/* Old policy spanning whole new range. */
2615 			if (n->end > end) {
2616 				if (!n_new)
2617 					goto alloc_new;
2618 
2619 				*mpol_new = *n->policy;
2620 				atomic_set(&mpol_new->refcnt, 1);
2621 				sp_node_init(n_new, end, n->end, mpol_new);
2622 				n->end = start;
2623 				sp_insert(sp, n_new);
2624 				n_new = NULL;
2625 				mpol_new = NULL;
2626 				break;
2627 			} else
2628 				n->end = start;
2629 		}
2630 		if (!next)
2631 			break;
2632 		n = rb_entry(next, struct sp_node, nd);
2633 	}
2634 	if (new)
2635 		sp_insert(sp, new);
2636 	write_unlock(&sp->lock);
2637 	ret = 0;
2638 
2639 err_out:
2640 	if (mpol_new)
2641 		mpol_put(mpol_new);
2642 	if (n_new)
2643 		kmem_cache_free(sn_cache, n_new);
2644 
2645 	return ret;
2646 
2647 alloc_new:
2648 	write_unlock(&sp->lock);
2649 	ret = -ENOMEM;
2650 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2651 	if (!n_new)
2652 		goto err_out;
2653 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2654 	if (!mpol_new)
2655 		goto err_out;
2656 	atomic_set(&mpol_new->refcnt, 1);
2657 	goto restart;
2658 }
2659 
2660 /**
2661  * mpol_shared_policy_init - initialize shared policy for inode
2662  * @sp: pointer to inode shared policy
2663  * @mpol:  struct mempolicy to install
2664  *
2665  * Install non-NULL @mpol in inode's shared policy rb-tree.
2666  * On entry, the current task has a reference on a non-NULL @mpol.
2667  * This must be released on exit.
2668  * This is called at get_inode() calls and we can use GFP_KERNEL.
2669  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)2670 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2671 {
2672 	int ret;
2673 
2674 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2675 	rwlock_init(&sp->lock);
2676 
2677 	if (mpol) {
2678 		struct vm_area_struct pvma;
2679 		struct mempolicy *new;
2680 		NODEMASK_SCRATCH(scratch);
2681 
2682 		if (!scratch)
2683 			goto put_mpol;
2684 		/* contextualize the tmpfs mount point mempolicy */
2685 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2686 		if (IS_ERR(new))
2687 			goto free_scratch; /* no valid nodemask intersection */
2688 
2689 		task_lock(current);
2690 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2691 		task_unlock(current);
2692 		if (ret)
2693 			goto put_new;
2694 
2695 		/* Create pseudo-vma that contains just the policy */
2696 		vma_init(&pvma, NULL);
2697 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2698 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2699 
2700 put_new:
2701 		mpol_put(new);			/* drop initial ref */
2702 free_scratch:
2703 		NODEMASK_SCRATCH_FREE(scratch);
2704 put_mpol:
2705 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2706 	}
2707 }
2708 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)2709 int mpol_set_shared_policy(struct shared_policy *info,
2710 			struct vm_area_struct *vma, struct mempolicy *npol)
2711 {
2712 	int err;
2713 	struct sp_node *new = NULL;
2714 	unsigned long sz = vma_pages(vma);
2715 
2716 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2717 		 vma->vm_pgoff,
2718 		 sz, npol ? npol->mode : -1,
2719 		 npol ? npol->flags : -1,
2720 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2721 
2722 	if (npol) {
2723 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2724 		if (!new)
2725 			return -ENOMEM;
2726 	}
2727 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2728 	if (err && new)
2729 		sp_free(new);
2730 	return err;
2731 }
2732 
2733 /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)2734 void mpol_free_shared_policy(struct shared_policy *p)
2735 {
2736 	struct sp_node *n;
2737 	struct rb_node *next;
2738 
2739 	if (!p->root.rb_node)
2740 		return;
2741 	write_lock(&p->lock);
2742 	next = rb_first(&p->root);
2743 	while (next) {
2744 		n = rb_entry(next, struct sp_node, nd);
2745 		next = rb_next(&n->nd);
2746 		sp_delete(p, n);
2747 	}
2748 	write_unlock(&p->lock);
2749 }
2750 
2751 #ifdef CONFIG_NUMA_BALANCING
2752 static int __initdata numabalancing_override;
2753 
check_numabalancing_enable(void)2754 static void __init check_numabalancing_enable(void)
2755 {
2756 	bool numabalancing_default = false;
2757 
2758 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2759 		numabalancing_default = true;
2760 
2761 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2762 	if (numabalancing_override)
2763 		set_numabalancing_state(numabalancing_override == 1);
2764 
2765 	if (num_online_nodes() > 1 && !numabalancing_override) {
2766 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2767 			numabalancing_default ? "Enabling" : "Disabling");
2768 		set_numabalancing_state(numabalancing_default);
2769 	}
2770 }
2771 
setup_numabalancing(char * str)2772 static int __init setup_numabalancing(char *str)
2773 {
2774 	int ret = 0;
2775 	if (!str)
2776 		goto out;
2777 
2778 	if (!strcmp(str, "enable")) {
2779 		numabalancing_override = 1;
2780 		ret = 1;
2781 	} else if (!strcmp(str, "disable")) {
2782 		numabalancing_override = -1;
2783 		ret = 1;
2784 	}
2785 out:
2786 	if (!ret)
2787 		pr_warn("Unable to parse numa_balancing=\n");
2788 
2789 	return ret;
2790 }
2791 __setup("numa_balancing=", setup_numabalancing);
2792 #else
check_numabalancing_enable(void)2793 static inline void __init check_numabalancing_enable(void)
2794 {
2795 }
2796 #endif /* CONFIG_NUMA_BALANCING */
2797 
2798 /* assumes fs == KERNEL_DS */
numa_policy_init(void)2799 void __init numa_policy_init(void)
2800 {
2801 	nodemask_t interleave_nodes;
2802 	unsigned long largest = 0;
2803 	int nid, prefer = 0;
2804 
2805 	policy_cache = kmem_cache_create("numa_policy",
2806 					 sizeof(struct mempolicy),
2807 					 0, SLAB_PANIC, NULL);
2808 
2809 	sn_cache = kmem_cache_create("shared_policy_node",
2810 				     sizeof(struct sp_node),
2811 				     0, SLAB_PANIC, NULL);
2812 
2813 	for_each_node(nid) {
2814 		preferred_node_policy[nid] = (struct mempolicy) {
2815 			.refcnt = ATOMIC_INIT(1),
2816 			.mode = MPOL_PREFERRED,
2817 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2818 			.v = { .preferred_node = nid, },
2819 		};
2820 	}
2821 
2822 	/*
2823 	 * Set interleaving policy for system init. Interleaving is only
2824 	 * enabled across suitably sized nodes (default is >= 16MB), or
2825 	 * fall back to the largest node if they're all smaller.
2826 	 */
2827 	nodes_clear(interleave_nodes);
2828 	for_each_node_state(nid, N_MEMORY) {
2829 		unsigned long total_pages = node_present_pages(nid);
2830 
2831 		/* Preserve the largest node */
2832 		if (largest < total_pages) {
2833 			largest = total_pages;
2834 			prefer = nid;
2835 		}
2836 
2837 		/* Interleave this node? */
2838 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2839 			node_set(nid, interleave_nodes);
2840 	}
2841 
2842 	/* All too small, use the largest */
2843 	if (unlikely(nodes_empty(interleave_nodes)))
2844 		node_set(prefer, interleave_nodes);
2845 
2846 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2847 		pr_err("%s: interleaving failed\n", __func__);
2848 
2849 	check_numabalancing_enable();
2850 }
2851 
2852 /* Reset policy of current process to default */
numa_default_policy(void)2853 void numa_default_policy(void)
2854 {
2855 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2856 }
2857 
2858 /*
2859  * Parse and format mempolicy from/to strings
2860  */
2861 
2862 /*
2863  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2864  */
2865 static const char * const policy_modes[] =
2866 {
2867 	[MPOL_DEFAULT]    = "default",
2868 	[MPOL_PREFERRED]  = "prefer",
2869 	[MPOL_BIND]       = "bind",
2870 	[MPOL_INTERLEAVE] = "interleave",
2871 	[MPOL_LOCAL]      = "local",
2872 };
2873 
2874 
2875 #ifdef CONFIG_TMPFS
2876 /**
2877  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2878  * @str:  string containing mempolicy to parse
2879  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2880  *
2881  * Format of input:
2882  *	<mode>[=<flags>][:<nodelist>]
2883  *
2884  * On success, returns 0, else 1
2885  */
mpol_parse_str(char * str,struct mempolicy ** mpol)2886 int mpol_parse_str(char *str, struct mempolicy **mpol)
2887 {
2888 	struct mempolicy *new = NULL;
2889 	unsigned short mode_flags;
2890 	nodemask_t nodes;
2891 	char *nodelist = strchr(str, ':');
2892 	char *flags = strchr(str, '=');
2893 	int err = 1, mode;
2894 
2895 	if (flags)
2896 		*flags++ = '\0';	/* terminate mode string */
2897 
2898 	if (nodelist) {
2899 		/* NUL-terminate mode or flags string */
2900 		*nodelist++ = '\0';
2901 		if (nodelist_parse(nodelist, nodes))
2902 			goto out;
2903 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2904 			goto out;
2905 	} else
2906 		nodes_clear(nodes);
2907 
2908 	mode = match_string(policy_modes, MPOL_MAX, str);
2909 	if (mode < 0)
2910 		goto out;
2911 
2912 	switch (mode) {
2913 	case MPOL_PREFERRED:
2914 		/*
2915 		 * Insist on a nodelist of one node only, although later
2916 		 * we use first_node(nodes) to grab a single node, so here
2917 		 * nodelist (or nodes) cannot be empty.
2918 		 */
2919 		if (nodelist) {
2920 			char *rest = nodelist;
2921 			while (isdigit(*rest))
2922 				rest++;
2923 			if (*rest)
2924 				goto out;
2925 			if (nodes_empty(nodes))
2926 				goto out;
2927 		}
2928 		break;
2929 	case MPOL_INTERLEAVE:
2930 		/*
2931 		 * Default to online nodes with memory if no nodelist
2932 		 */
2933 		if (!nodelist)
2934 			nodes = node_states[N_MEMORY];
2935 		break;
2936 	case MPOL_LOCAL:
2937 		/*
2938 		 * Don't allow a nodelist;  mpol_new() checks flags
2939 		 */
2940 		if (nodelist)
2941 			goto out;
2942 		mode = MPOL_PREFERRED;
2943 		break;
2944 	case MPOL_DEFAULT:
2945 		/*
2946 		 * Insist on a empty nodelist
2947 		 */
2948 		if (!nodelist)
2949 			err = 0;
2950 		goto out;
2951 	case MPOL_BIND:
2952 		/*
2953 		 * Insist on a nodelist
2954 		 */
2955 		if (!nodelist)
2956 			goto out;
2957 	}
2958 
2959 	mode_flags = 0;
2960 	if (flags) {
2961 		/*
2962 		 * Currently, we only support two mutually exclusive
2963 		 * mode flags.
2964 		 */
2965 		if (!strcmp(flags, "static"))
2966 			mode_flags |= MPOL_F_STATIC_NODES;
2967 		else if (!strcmp(flags, "relative"))
2968 			mode_flags |= MPOL_F_RELATIVE_NODES;
2969 		else
2970 			goto out;
2971 	}
2972 
2973 	new = mpol_new(mode, mode_flags, &nodes);
2974 	if (IS_ERR(new))
2975 		goto out;
2976 
2977 	/*
2978 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2979 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2980 	 */
2981 	if (mode != MPOL_PREFERRED)
2982 		new->v.nodes = nodes;
2983 	else if (nodelist)
2984 		new->v.preferred_node = first_node(nodes);
2985 	else
2986 		new->flags |= MPOL_F_LOCAL;
2987 
2988 	/*
2989 	 * Save nodes for contextualization: this will be used to "clone"
2990 	 * the mempolicy in a specific context [cpuset] at a later time.
2991 	 */
2992 	new->w.user_nodemask = nodes;
2993 
2994 	err = 0;
2995 
2996 out:
2997 	/* Restore string for error message */
2998 	if (nodelist)
2999 		*--nodelist = ':';
3000 	if (flags)
3001 		*--flags = '=';
3002 	if (!err)
3003 		*mpol = new;
3004 	return err;
3005 }
3006 #endif /* CONFIG_TMPFS */
3007 
3008 /**
3009  * mpol_to_str - format a mempolicy structure for printing
3010  * @buffer:  to contain formatted mempolicy string
3011  * @maxlen:  length of @buffer
3012  * @pol:  pointer to mempolicy to be formatted
3013  *
3014  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3015  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3016  * longest flag, "relative", and to display at least a few node ids.
3017  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)3018 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3019 {
3020 	char *p = buffer;
3021 	nodemask_t nodes = NODE_MASK_NONE;
3022 	unsigned short mode = MPOL_DEFAULT;
3023 	unsigned short flags = 0;
3024 
3025 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3026 		mode = pol->mode;
3027 		flags = pol->flags;
3028 	}
3029 
3030 	switch (mode) {
3031 	case MPOL_DEFAULT:
3032 		break;
3033 	case MPOL_PREFERRED:
3034 		if (flags & MPOL_F_LOCAL)
3035 			mode = MPOL_LOCAL;
3036 		else
3037 			node_set(pol->v.preferred_node, nodes);
3038 		break;
3039 	case MPOL_BIND:
3040 	case MPOL_INTERLEAVE:
3041 		nodes = pol->v.nodes;
3042 		break;
3043 	default:
3044 		WARN_ON_ONCE(1);
3045 		snprintf(p, maxlen, "unknown");
3046 		return;
3047 	}
3048 
3049 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3050 
3051 	if (flags & MPOL_MODE_FLAGS) {
3052 		p += snprintf(p, buffer + maxlen - p, "=");
3053 
3054 		/*
3055 		 * Currently, the only defined flags are mutually exclusive
3056 		 */
3057 		if (flags & MPOL_F_STATIC_NODES)
3058 			p += snprintf(p, buffer + maxlen - p, "static");
3059 		else if (flags & MPOL_F_RELATIVE_NODES)
3060 			p += snprintf(p, buffer + maxlen - p, "relative");
3061 	}
3062 
3063 	if (!nodes_empty(nodes))
3064 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3065 			       nodemask_pr_args(&nodes));
3066 }
3067