• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 
70 #include <linux/mempolicy.h>
71 #include <linux/pagewalk.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/sched/mm.h>
77 #include <linux/sched/numa_balancing.h>
78 #include <linux/sched/task.h>
79 #include <linux/nodemask.h>
80 #include <linux/cpuset.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/export.h>
84 #include <linux/nsproxy.h>
85 #include <linux/interrupt.h>
86 #include <linux/init.h>
87 #include <linux/compat.h>
88 #include <linux/ptrace.h>
89 #include <linux/swap.h>
90 #include <linux/seq_file.h>
91 #include <linux/proc_fs.h>
92 #include <linux/migrate.h>
93 #include <linux/ksm.h>
94 #include <linux/rmap.h>
95 #include <linux/security.h>
96 #include <linux/syscalls.h>
97 #include <linux/ctype.h>
98 #include <linux/mm_inline.h>
99 #include <linux/mmu_notifier.h>
100 #include <linux/printk.h>
101 #include <linux/swapops.h>
102 
103 #include <asm/tlbflush.h>
104 #include <linux/uaccess.h>
105 
106 #include "internal.h"
107 
108 /* Internal flags */
109 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
110 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111 
112 static struct kmem_cache *policy_cache;
113 static struct kmem_cache *sn_cache;
114 
115 /* Highest zone. An specific allocation for a zone below that is not
116    policied. */
117 enum zone_type policy_zone = 0;
118 
119 /*
120  * run-time system-wide default policy => local allocation
121  */
122 static struct mempolicy default_policy = {
123 	.refcnt = ATOMIC_INIT(1), /* never free it */
124 	.mode = MPOL_PREFERRED,
125 	.flags = MPOL_F_LOCAL,
126 };
127 
128 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129 
get_task_policy(struct task_struct * p)130 struct mempolicy *get_task_policy(struct task_struct *p)
131 {
132 	struct mempolicy *pol = p->mempolicy;
133 	int node;
134 
135 	if (pol)
136 		return pol;
137 
138 	node = numa_node_id();
139 	if (node != NUMA_NO_NODE) {
140 		pol = &preferred_node_policy[node];
141 		/* preferred_node_policy is not initialised early in boot */
142 		if (pol->mode)
143 			return pol;
144 	}
145 
146 	return &default_policy;
147 }
148 
149 static const struct mempolicy_operations {
150 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
152 } mpol_ops[MPOL_MAX];
153 
mpol_store_user_nodemask(const struct mempolicy * pol)154 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155 {
156 	return pol->flags & MPOL_MODE_FLAGS;
157 }
158 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)159 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 				   const nodemask_t *rel)
161 {
162 	nodemask_t tmp;
163 	nodes_fold(tmp, *orig, nodes_weight(*rel));
164 	nodes_onto(*ret, tmp, *rel);
165 }
166 
mpol_new_interleave(struct mempolicy * pol,const nodemask_t * nodes)167 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168 {
169 	if (nodes_empty(*nodes))
170 		return -EINVAL;
171 	pol->v.nodes = *nodes;
172 	return 0;
173 }
174 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)175 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176 {
177 	if (!nodes)
178 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
179 	else if (nodes_empty(*nodes))
180 		return -EINVAL;			/*  no allowed nodes */
181 	else
182 		pol->v.preferred_node = first_node(*nodes);
183 	return 0;
184 }
185 
mpol_new_bind(struct mempolicy * pol,const nodemask_t * nodes)186 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187 {
188 	if (nodes_empty(*nodes))
189 		return -EINVAL;
190 	pol->v.nodes = *nodes;
191 	return 0;
192 }
193 
194 /*
195  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196  * any, for the new policy.  mpol_new() has already validated the nodes
197  * parameter with respect to the policy mode and flags.  But, we need to
198  * handle an empty nodemask with MPOL_PREFERRED here.
199  *
200  * Must be called holding task's alloc_lock to protect task's mems_allowed
201  * and mempolicy.  May also be called holding the mmap_semaphore for write.
202  */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)203 static int mpol_set_nodemask(struct mempolicy *pol,
204 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
205 {
206 	int ret;
207 
208 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 	if (pol == NULL)
210 		return 0;
211 	/* Check N_MEMORY */
212 	nodes_and(nsc->mask1,
213 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
214 
215 	VM_BUG_ON(!nodes);
216 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 		nodes = NULL;	/* explicit local allocation */
218 	else {
219 		if (pol->flags & MPOL_F_RELATIVE_NODES)
220 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
221 		else
222 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
223 
224 		if (mpol_store_user_nodemask(pol))
225 			pol->w.user_nodemask = *nodes;
226 		else
227 			pol->w.cpuset_mems_allowed =
228 						cpuset_current_mems_allowed;
229 	}
230 
231 	if (nodes)
232 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 	else
234 		ret = mpol_ops[pol->mode].create(pol, NULL);
235 	return ret;
236 }
237 
238 /*
239  * This function just creates a new policy, does some check and simple
240  * initialization. You must invoke mpol_set_nodemask() to set nodes.
241  */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)242 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 				  nodemask_t *nodes)
244 {
245 	struct mempolicy *policy;
246 
247 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
248 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249 
250 	if (mode == MPOL_DEFAULT) {
251 		if (nodes && !nodes_empty(*nodes))
252 			return ERR_PTR(-EINVAL);
253 		return NULL;
254 	}
255 	VM_BUG_ON(!nodes);
256 
257 	/*
258 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 	 * All other modes require a valid pointer to a non-empty nodemask.
261 	 */
262 	if (mode == MPOL_PREFERRED) {
263 		if (nodes_empty(*nodes)) {
264 			if (((flags & MPOL_F_STATIC_NODES) ||
265 			     (flags & MPOL_F_RELATIVE_NODES)))
266 				return ERR_PTR(-EINVAL);
267 		}
268 	} else if (mode == MPOL_LOCAL) {
269 		if (!nodes_empty(*nodes) ||
270 		    (flags & MPOL_F_STATIC_NODES) ||
271 		    (flags & MPOL_F_RELATIVE_NODES))
272 			return ERR_PTR(-EINVAL);
273 		mode = MPOL_PREFERRED;
274 	} else if (nodes_empty(*nodes))
275 		return ERR_PTR(-EINVAL);
276 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 	if (!policy)
278 		return ERR_PTR(-ENOMEM);
279 	atomic_set(&policy->refcnt, 1);
280 	policy->mode = mode;
281 	policy->flags = flags;
282 
283 	return policy;
284 }
285 
286 /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)287 void __mpol_put(struct mempolicy *p)
288 {
289 	if (!atomic_dec_and_test(&p->refcnt))
290 		return;
291 	kmem_cache_free(policy_cache, p);
292 }
293 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)294 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
295 {
296 }
297 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)298 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
299 {
300 	nodemask_t tmp;
301 
302 	if (pol->flags & MPOL_F_STATIC_NODES)
303 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 	else {
307 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 								*nodes);
309 		pol->w.cpuset_mems_allowed = *nodes;
310 	}
311 
312 	if (nodes_empty(tmp))
313 		tmp = *nodes;
314 
315 	pol->v.nodes = tmp;
316 }
317 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)318 static void mpol_rebind_preferred(struct mempolicy *pol,
319 						const nodemask_t *nodes)
320 {
321 	nodemask_t tmp;
322 
323 	if (pol->flags & MPOL_F_STATIC_NODES) {
324 		int node = first_node(pol->w.user_nodemask);
325 
326 		if (node_isset(node, *nodes)) {
327 			pol->v.preferred_node = node;
328 			pol->flags &= ~MPOL_F_LOCAL;
329 		} else
330 			pol->flags |= MPOL_F_LOCAL;
331 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 		pol->v.preferred_node = first_node(tmp);
334 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
335 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 						   pol->w.cpuset_mems_allowed,
337 						   *nodes);
338 		pol->w.cpuset_mems_allowed = *nodes;
339 	}
340 }
341 
342 /*
343  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344  *
345  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346  * policies are protected by task->mems_allowed_seq to prevent a premature
347  * OOM/allocation failure due to parallel nodemask modification.
348  */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)349 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
350 {
351 	if (!pol || pol->mode == MPOL_LOCAL)
352 		return;
353 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
354 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 		return;
356 
357 	mpol_ops[pol->mode].rebind(pol, newmask);
358 }
359 
360 /*
361  * Wrapper for mpol_rebind_policy() that just requires task
362  * pointer, and updates task mempolicy.
363  *
364  * Called with task's alloc_lock held.
365  */
366 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)367 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
368 {
369 	mpol_rebind_policy(tsk->mempolicy, new);
370 }
371 
372 /*
373  * Rebind each vma in mm to new nodemask.
374  *
375  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
376  */
377 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)378 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379 {
380 	struct vm_area_struct *vma;
381 
382 	down_write(&mm->mmap_sem);
383 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384 		mpol_rebind_policy(vma->vm_policy, new);
385 	up_write(&mm->mmap_sem);
386 }
387 
388 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 	[MPOL_DEFAULT] = {
390 		.rebind = mpol_rebind_default,
391 	},
392 	[MPOL_INTERLEAVE] = {
393 		.create = mpol_new_interleave,
394 		.rebind = mpol_rebind_nodemask,
395 	},
396 	[MPOL_PREFERRED] = {
397 		.create = mpol_new_preferred,
398 		.rebind = mpol_rebind_preferred,
399 	},
400 	[MPOL_BIND] = {
401 		.create = mpol_new_bind,
402 		.rebind = mpol_rebind_nodemask,
403 	},
404 };
405 
406 static int migrate_page_add(struct page *page, struct list_head *pagelist,
407 				unsigned long flags);
408 
409 struct queue_pages {
410 	struct list_head *pagelist;
411 	unsigned long flags;
412 	nodemask_t *nmask;
413 	struct vm_area_struct *prev;
414 };
415 
416 /*
417  * Check if the page's nid is in qp->nmask.
418  *
419  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
420  * in the invert of qp->nmask.
421  */
queue_pages_required(struct page * page,struct queue_pages * qp)422 static inline bool queue_pages_required(struct page *page,
423 					struct queue_pages *qp)
424 {
425 	int nid = page_to_nid(page);
426 	unsigned long flags = qp->flags;
427 
428 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429 }
430 
431 /*
432  * queue_pages_pmd() has four possible return values:
433  * 0 - pages are placed on the right node or queued successfully.
434  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435  *     specified.
436  * 2 - THP was split.
437  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438  *        existing page was already on a node that does not follow the
439  *        policy.
440  */
queue_pages_pmd(pmd_t * pmd,spinlock_t * ptl,unsigned long addr,unsigned long end,struct mm_walk * walk)441 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
442 				unsigned long end, struct mm_walk *walk)
443 {
444 	int ret = 0;
445 	struct page *page;
446 	struct queue_pages *qp = walk->private;
447 	unsigned long flags;
448 
449 	if (unlikely(is_pmd_migration_entry(*pmd))) {
450 		ret = -EIO;
451 		goto unlock;
452 	}
453 	page = pmd_page(*pmd);
454 	if (is_huge_zero_page(page)) {
455 		spin_unlock(ptl);
456 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457 		ret = 2;
458 		goto out;
459 	}
460 	if (!queue_pages_required(page, qp))
461 		goto unlock;
462 
463 	flags = qp->flags;
464 	/* go to thp migration */
465 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
466 		if (!vma_migratable(walk->vma) ||
467 		    migrate_page_add(page, qp->pagelist, flags)) {
468 			ret = 1;
469 			goto unlock;
470 		}
471 	} else
472 		ret = -EIO;
473 unlock:
474 	spin_unlock(ptl);
475 out:
476 	return ret;
477 }
478 
479 /*
480  * Scan through pages checking if pages follow certain conditions,
481  * and move them to the pagelist if they do.
482  *
483  * queue_pages_pte_range() has three possible return values:
484  * 0 - pages are placed on the right node or queued successfully.
485  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
486  *     specified.
487  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
488  *        on a node that does not follow the policy.
489  */
queue_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)490 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
491 			unsigned long end, struct mm_walk *walk)
492 {
493 	struct vm_area_struct *vma = walk->vma;
494 	struct page *page;
495 	struct queue_pages *qp = walk->private;
496 	unsigned long flags = qp->flags;
497 	int ret;
498 	bool has_unmovable = false;
499 	pte_t *pte, *mapped_pte;
500 	spinlock_t *ptl;
501 
502 	ptl = pmd_trans_huge_lock(pmd, vma);
503 	if (ptl) {
504 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
505 		if (ret != 2)
506 			return ret;
507 	}
508 	/* THP was split, fall through to pte walk */
509 
510 	if (pmd_trans_unstable(pmd))
511 		return 0;
512 
513 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
514 	for (; addr != end; pte++, addr += PAGE_SIZE) {
515 		if (!pte_present(*pte))
516 			continue;
517 		page = vm_normal_page(vma, addr, *pte);
518 		if (!page)
519 			continue;
520 		/*
521 		 * vm_normal_page() filters out zero pages, but there might
522 		 * still be PageReserved pages to skip, perhaps in a VDSO.
523 		 */
524 		if (PageReserved(page))
525 			continue;
526 		if (!queue_pages_required(page, qp))
527 			continue;
528 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
529 			/* MPOL_MF_STRICT must be specified if we get here */
530 			if (!vma_migratable(vma)) {
531 				has_unmovable = true;
532 				break;
533 			}
534 
535 			/*
536 			 * Do not abort immediately since there may be
537 			 * temporary off LRU pages in the range.  Still
538 			 * need migrate other LRU pages.
539 			 */
540 			if (migrate_page_add(page, qp->pagelist, flags))
541 				has_unmovable = true;
542 		} else
543 			break;
544 	}
545 	pte_unmap_unlock(mapped_pte, ptl);
546 	cond_resched();
547 
548 	if (has_unmovable)
549 		return 1;
550 
551 	return addr != end ? -EIO : 0;
552 }
553 
queue_pages_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)554 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
555 			       unsigned long addr, unsigned long end,
556 			       struct mm_walk *walk)
557 {
558 #ifdef CONFIG_HUGETLB_PAGE
559 	struct queue_pages *qp = walk->private;
560 	unsigned long flags = qp->flags;
561 	struct page *page;
562 	spinlock_t *ptl;
563 	pte_t entry;
564 
565 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
566 	entry = huge_ptep_get(pte);
567 	if (!pte_present(entry))
568 		goto unlock;
569 	page = pte_page(entry);
570 	if (!queue_pages_required(page, qp))
571 		goto unlock;
572 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
573 	if (flags & (MPOL_MF_MOVE_ALL) ||
574 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
575 	     !hugetlb_pmd_shared(pte)))
576 		isolate_huge_page(page, qp->pagelist);
577 unlock:
578 	spin_unlock(ptl);
579 #else
580 	BUG();
581 #endif
582 	return 0;
583 }
584 
585 #ifdef CONFIG_NUMA_BALANCING
586 /*
587  * This is used to mark a range of virtual addresses to be inaccessible.
588  * These are later cleared by a NUMA hinting fault. Depending on these
589  * faults, pages may be migrated for better NUMA placement.
590  *
591  * This is assuming that NUMA faults are handled using PROT_NONE. If
592  * an architecture makes a different choice, it will need further
593  * changes to the core.
594  */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)595 unsigned long change_prot_numa(struct vm_area_struct *vma,
596 			unsigned long addr, unsigned long end)
597 {
598 	int nr_updated;
599 
600 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
601 	if (nr_updated)
602 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
603 
604 	return nr_updated;
605 }
606 #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)607 static unsigned long change_prot_numa(struct vm_area_struct *vma,
608 			unsigned long addr, unsigned long end)
609 {
610 	return 0;
611 }
612 #endif /* CONFIG_NUMA_BALANCING */
613 
queue_pages_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)614 static int queue_pages_test_walk(unsigned long start, unsigned long end,
615 				struct mm_walk *walk)
616 {
617 	struct vm_area_struct *vma = walk->vma;
618 	struct queue_pages *qp = walk->private;
619 	unsigned long endvma = vma->vm_end;
620 	unsigned long flags = qp->flags;
621 
622 	/*
623 	 * Need check MPOL_MF_STRICT to return -EIO if possible
624 	 * regardless of vma_migratable
625 	 */
626 	if (!vma_migratable(vma) &&
627 	    !(flags & MPOL_MF_STRICT))
628 		return 1;
629 
630 	if (endvma > end)
631 		endvma = end;
632 	if (vma->vm_start > start)
633 		start = vma->vm_start;
634 
635 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
636 		if (!vma->vm_next && vma->vm_end < end)
637 			return -EFAULT;
638 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
639 			return -EFAULT;
640 	}
641 
642 	qp->prev = vma;
643 
644 	if (flags & MPOL_MF_LAZY) {
645 		/* Similar to task_numa_work, skip inaccessible VMAs */
646 		if (!is_vm_hugetlb_page(vma) &&
647 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
648 			!(vma->vm_flags & VM_MIXEDMAP))
649 			change_prot_numa(vma, start, endvma);
650 		return 1;
651 	}
652 
653 	/* queue pages from current vma */
654 	if (flags & MPOL_MF_VALID)
655 		return 0;
656 	return 1;
657 }
658 
659 static const struct mm_walk_ops queue_pages_walk_ops = {
660 	.hugetlb_entry		= queue_pages_hugetlb,
661 	.pmd_entry		= queue_pages_pte_range,
662 	.test_walk		= queue_pages_test_walk,
663 };
664 
665 /*
666  * Walk through page tables and collect pages to be migrated.
667  *
668  * If pages found in a given range are on a set of nodes (determined by
669  * @nodes and @flags,) it's isolated and queued to the pagelist which is
670  * passed via @private.
671  *
672  * queue_pages_range() has three possible return values:
673  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
674  *     specified.
675  * 0 - queue pages successfully or no misplaced page.
676  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
677  *         memory range specified by nodemask and maxnode points outside
678  *         your accessible address space (-EFAULT)
679  */
680 static int
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,nodemask_t * nodes,unsigned long flags,struct list_head * pagelist)681 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
682 		nodemask_t *nodes, unsigned long flags,
683 		struct list_head *pagelist)
684 {
685 	struct queue_pages qp = {
686 		.pagelist = pagelist,
687 		.flags = flags,
688 		.nmask = nodes,
689 		.prev = NULL,
690 	};
691 
692 	return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
693 }
694 
695 /*
696  * Apply policy to a single VMA
697  * This must be called with the mmap_sem held for writing.
698  */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)699 static int vma_replace_policy(struct vm_area_struct *vma,
700 						struct mempolicy *pol)
701 {
702 	int err;
703 	struct mempolicy *old;
704 	struct mempolicy *new;
705 
706 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
707 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
708 		 vma->vm_ops, vma->vm_file,
709 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
710 
711 	new = mpol_dup(pol);
712 	if (IS_ERR(new))
713 		return PTR_ERR(new);
714 
715 	if (vma->vm_ops && vma->vm_ops->set_policy) {
716 		err = vma->vm_ops->set_policy(vma, new);
717 		if (err)
718 			goto err_out;
719 	}
720 
721 	old = vma->vm_policy;
722 	vma->vm_policy = new; /* protected by mmap_sem */
723 	mpol_put(old);
724 
725 	return 0;
726  err_out:
727 	mpol_put(new);
728 	return err;
729 }
730 
731 /* Step 2: apply policy to a range and do splits. */
mbind_range(struct mm_struct * mm,unsigned long start,unsigned long end,struct mempolicy * new_pol)732 static int mbind_range(struct mm_struct *mm, unsigned long start,
733 		       unsigned long end, struct mempolicy *new_pol)
734 {
735 	struct vm_area_struct *prev;
736 	struct vm_area_struct *vma;
737 	int err = 0;
738 	pgoff_t pgoff;
739 	unsigned long vmstart;
740 	unsigned long vmend;
741 
742 	vma = find_vma(mm, start);
743 	if (!vma || vma->vm_start > start)
744 		return -EFAULT;
745 
746 	prev = vma->vm_prev;
747 	if (start > vma->vm_start)
748 		prev = vma;
749 
750 	for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
751 		vmstart = max(start, vma->vm_start);
752 		vmend   = min(end, vma->vm_end);
753 
754 		if (mpol_equal(vma_policy(vma), new_pol))
755 			continue;
756 
757 		pgoff = vma->vm_pgoff +
758 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
759 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
760 				 vma->anon_vma, vma->vm_file, pgoff,
761 				 new_pol, vma->vm_userfaultfd_ctx,
762 				 vma_get_anon_name(vma));
763 		if (prev) {
764 			vma = prev;
765 			goto replace;
766 		}
767 		if (vma->vm_start != vmstart) {
768 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
769 			if (err)
770 				goto out;
771 		}
772 		if (vma->vm_end != vmend) {
773 			err = split_vma(vma->vm_mm, vma, vmend, 0);
774 			if (err)
775 				goto out;
776 		}
777  replace:
778 		err = vma_replace_policy(vma, new_pol);
779 		if (err)
780 			goto out;
781 	}
782 
783  out:
784 	return err;
785 }
786 
787 /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)788 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
789 			     nodemask_t *nodes)
790 {
791 	struct mempolicy *new, *old;
792 	NODEMASK_SCRATCH(scratch);
793 	int ret;
794 
795 	if (!scratch)
796 		return -ENOMEM;
797 
798 	new = mpol_new(mode, flags, nodes);
799 	if (IS_ERR(new)) {
800 		ret = PTR_ERR(new);
801 		goto out;
802 	}
803 
804 	task_lock(current);
805 	ret = mpol_set_nodemask(new, nodes, scratch);
806 	if (ret) {
807 		task_unlock(current);
808 		mpol_put(new);
809 		goto out;
810 	}
811 	old = current->mempolicy;
812 	current->mempolicy = new;
813 	if (new && new->mode == MPOL_INTERLEAVE)
814 		current->il_prev = MAX_NUMNODES-1;
815 	task_unlock(current);
816 	mpol_put(old);
817 	ret = 0;
818 out:
819 	NODEMASK_SCRATCH_FREE(scratch);
820 	return ret;
821 }
822 
823 /*
824  * Return nodemask for policy for get_mempolicy() query
825  *
826  * Called with task's alloc_lock held
827  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)828 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
829 {
830 	nodes_clear(*nodes);
831 	if (p == &default_policy)
832 		return;
833 
834 	switch (p->mode) {
835 	case MPOL_BIND:
836 		/* Fall through */
837 	case MPOL_INTERLEAVE:
838 		*nodes = p->v.nodes;
839 		break;
840 	case MPOL_PREFERRED:
841 		if (!(p->flags & MPOL_F_LOCAL))
842 			node_set(p->v.preferred_node, *nodes);
843 		/* else return empty node mask for local allocation */
844 		break;
845 	default:
846 		BUG();
847 	}
848 }
849 
lookup_node(struct mm_struct * mm,unsigned long addr)850 static int lookup_node(struct mm_struct *mm, unsigned long addr)
851 {
852 	struct page *p;
853 	int err;
854 
855 	int locked = 1;
856 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
857 	if (err >= 0) {
858 		err = page_to_nid(p);
859 		put_page(p);
860 	}
861 	if (locked)
862 		up_read(&mm->mmap_sem);
863 	return err;
864 }
865 
866 /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)867 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
868 			     unsigned long addr, unsigned long flags)
869 {
870 	int err;
871 	struct mm_struct *mm = current->mm;
872 	struct vm_area_struct *vma = NULL;
873 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
874 
875 	if (flags &
876 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
877 		return -EINVAL;
878 
879 	if (flags & MPOL_F_MEMS_ALLOWED) {
880 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
881 			return -EINVAL;
882 		*policy = 0;	/* just so it's initialized */
883 		task_lock(current);
884 		*nmask  = cpuset_current_mems_allowed;
885 		task_unlock(current);
886 		return 0;
887 	}
888 
889 	if (flags & MPOL_F_ADDR) {
890 		/*
891 		 * Do NOT fall back to task policy if the
892 		 * vma/shared policy at addr is NULL.  We
893 		 * want to return MPOL_DEFAULT in this case.
894 		 */
895 		down_read(&mm->mmap_sem);
896 		vma = find_vma_intersection(mm, addr, addr+1);
897 		if (!vma) {
898 			up_read(&mm->mmap_sem);
899 			return -EFAULT;
900 		}
901 		if (vma->vm_ops && vma->vm_ops->get_policy)
902 			pol = vma->vm_ops->get_policy(vma, addr);
903 		else
904 			pol = vma->vm_policy;
905 	} else if (addr)
906 		return -EINVAL;
907 
908 	if (!pol)
909 		pol = &default_policy;	/* indicates default behavior */
910 
911 	if (flags & MPOL_F_NODE) {
912 		if (flags & MPOL_F_ADDR) {
913 			/*
914 			 * Take a refcount on the mpol, lookup_node()
915 			 * wil drop the mmap_sem, so after calling
916 			 * lookup_node() only "pol" remains valid, "vma"
917 			 * is stale.
918 			 */
919 			pol_refcount = pol;
920 			vma = NULL;
921 			mpol_get(pol);
922 			err = lookup_node(mm, addr);
923 			if (err < 0)
924 				goto out;
925 			*policy = err;
926 		} else if (pol == current->mempolicy &&
927 				pol->mode == MPOL_INTERLEAVE) {
928 			*policy = next_node_in(current->il_prev, pol->v.nodes);
929 		} else {
930 			err = -EINVAL;
931 			goto out;
932 		}
933 	} else {
934 		*policy = pol == &default_policy ? MPOL_DEFAULT :
935 						pol->mode;
936 		/*
937 		 * Internal mempolicy flags must be masked off before exposing
938 		 * the policy to userspace.
939 		 */
940 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
941 	}
942 
943 	err = 0;
944 	if (nmask) {
945 		if (mpol_store_user_nodemask(pol)) {
946 			*nmask = pol->w.user_nodemask;
947 		} else {
948 			task_lock(current);
949 			get_policy_nodemask(pol, nmask);
950 			task_unlock(current);
951 		}
952 	}
953 
954  out:
955 	mpol_cond_put(pol);
956 	if (vma)
957 		up_read(&mm->mmap_sem);
958 	if (pol_refcount)
959 		mpol_put(pol_refcount);
960 	return err;
961 }
962 
963 #ifdef CONFIG_MIGRATION
964 /*
965  * page migration, thp tail pages can be passed.
966  */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)967 static int migrate_page_add(struct page *page, struct list_head *pagelist,
968 				unsigned long flags)
969 {
970 	struct page *head = compound_head(page);
971 	/*
972 	 * Avoid migrating a page that is shared with others.
973 	 */
974 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
975 		if (!isolate_lru_page(head)) {
976 			list_add_tail(&head->lru, pagelist);
977 			mod_node_page_state(page_pgdat(head),
978 				NR_ISOLATED_ANON + page_is_file_cache(head),
979 				hpage_nr_pages(head));
980 		} else if (flags & MPOL_MF_STRICT) {
981 			/*
982 			 * Non-movable page may reach here.  And, there may be
983 			 * temporary off LRU pages or non-LRU movable pages.
984 			 * Treat them as unmovable pages since they can't be
985 			 * isolated, so they can't be moved at the moment.  It
986 			 * should return -EIO for this case too.
987 			 */
988 			return -EIO;
989 		}
990 	}
991 
992 	return 0;
993 }
994 
995 /* page allocation callback for NUMA node migration */
alloc_new_node_page(struct page * page,unsigned long node)996 struct page *alloc_new_node_page(struct page *page, unsigned long node)
997 {
998 	if (PageHuge(page))
999 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1000 					node);
1001 	else if (PageTransHuge(page)) {
1002 		struct page *thp;
1003 
1004 		thp = alloc_pages_node(node,
1005 			(GFP_TRANSHUGE | __GFP_THISNODE),
1006 			HPAGE_PMD_ORDER);
1007 		if (!thp)
1008 			return NULL;
1009 		prep_transhuge_page(thp);
1010 		return thp;
1011 	} else
1012 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
1013 						    __GFP_THISNODE, 0);
1014 }
1015 
1016 /*
1017  * Migrate pages from one node to a target node.
1018  * Returns error or the number of pages not migrated.
1019  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)1020 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1021 			   int flags)
1022 {
1023 	nodemask_t nmask;
1024 	LIST_HEAD(pagelist);
1025 	int err = 0;
1026 
1027 	nodes_clear(nmask);
1028 	node_set(source, nmask);
1029 
1030 	/*
1031 	 * This does not "check" the range but isolates all pages that
1032 	 * need migration.  Between passing in the full user address
1033 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1034 	 */
1035 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1036 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1037 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1038 
1039 	if (!list_empty(&pagelist)) {
1040 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
1041 					MIGRATE_SYNC, MR_SYSCALL);
1042 		if (err)
1043 			putback_movable_pages(&pagelist);
1044 	}
1045 
1046 	return err;
1047 }
1048 
1049 /*
1050  * Move pages between the two nodesets so as to preserve the physical
1051  * layout as much as possible.
1052  *
1053  * Returns the number of page that could not be moved.
1054  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1055 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1056 		     const nodemask_t *to, int flags)
1057 {
1058 	int busy = 0;
1059 	int err;
1060 	nodemask_t tmp;
1061 
1062 	err = migrate_prep();
1063 	if (err)
1064 		return err;
1065 
1066 	down_read(&mm->mmap_sem);
1067 
1068 	/*
1069 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1070 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1071 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1072 	 * The pair of nodemasks 'to' and 'from' define the map.
1073 	 *
1074 	 * If no pair of bits is found that way, fallback to picking some
1075 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1076 	 * 'source' and 'dest' bits are the same, this represents a node
1077 	 * that will be migrating to itself, so no pages need move.
1078 	 *
1079 	 * If no bits are left in 'tmp', or if all remaining bits left
1080 	 * in 'tmp' correspond to the same bit in 'to', return false
1081 	 * (nothing left to migrate).
1082 	 *
1083 	 * This lets us pick a pair of nodes to migrate between, such that
1084 	 * if possible the dest node is not already occupied by some other
1085 	 * source node, minimizing the risk of overloading the memory on a
1086 	 * node that would happen if we migrated incoming memory to a node
1087 	 * before migrating outgoing memory source that same node.
1088 	 *
1089 	 * A single scan of tmp is sufficient.  As we go, we remember the
1090 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1091 	 * that not only moved, but what's better, moved to an empty slot
1092 	 * (d is not set in tmp), then we break out then, with that pair.
1093 	 * Otherwise when we finish scanning from_tmp, we at least have the
1094 	 * most recent <s, d> pair that moved.  If we get all the way through
1095 	 * the scan of tmp without finding any node that moved, much less
1096 	 * moved to an empty node, then there is nothing left worth migrating.
1097 	 */
1098 
1099 	tmp = *from;
1100 	while (!nodes_empty(tmp)) {
1101 		int s,d;
1102 		int source = NUMA_NO_NODE;
1103 		int dest = 0;
1104 
1105 		for_each_node_mask(s, tmp) {
1106 
1107 			/*
1108 			 * do_migrate_pages() tries to maintain the relative
1109 			 * node relationship of the pages established between
1110 			 * threads and memory areas.
1111                          *
1112 			 * However if the number of source nodes is not equal to
1113 			 * the number of destination nodes we can not preserve
1114 			 * this node relative relationship.  In that case, skip
1115 			 * copying memory from a node that is in the destination
1116 			 * mask.
1117 			 *
1118 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1119 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1120 			 */
1121 
1122 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1123 						(node_isset(s, *to)))
1124 				continue;
1125 
1126 			d = node_remap(s, *from, *to);
1127 			if (s == d)
1128 				continue;
1129 
1130 			source = s;	/* Node moved. Memorize */
1131 			dest = d;
1132 
1133 			/* dest not in remaining from nodes? */
1134 			if (!node_isset(dest, tmp))
1135 				break;
1136 		}
1137 		if (source == NUMA_NO_NODE)
1138 			break;
1139 
1140 		node_clear(source, tmp);
1141 		err = migrate_to_node(mm, source, dest, flags);
1142 		if (err > 0)
1143 			busy += err;
1144 		if (err < 0)
1145 			break;
1146 	}
1147 	up_read(&mm->mmap_sem);
1148 	if (err < 0)
1149 		return err;
1150 	return busy;
1151 
1152 }
1153 
1154 /*
1155  * Allocate a new page for page migration based on vma policy.
1156  * Start by assuming the page is mapped by the same vma as contains @start.
1157  * Search forward from there, if not.  N.B., this assumes that the
1158  * list of pages handed to migrate_pages()--which is how we get here--
1159  * is in virtual address order.
1160  */
new_page(struct page * page,unsigned long start)1161 static struct page *new_page(struct page *page, unsigned long start)
1162 {
1163 	struct vm_area_struct *vma;
1164 	unsigned long address;
1165 
1166 	vma = find_vma(current->mm, start);
1167 	while (vma) {
1168 		address = page_address_in_vma(page, vma);
1169 		if (address != -EFAULT)
1170 			break;
1171 		vma = vma->vm_next;
1172 	}
1173 
1174 	if (PageHuge(page)) {
1175 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1176 				vma, address);
1177 	} else if (PageTransHuge(page)) {
1178 		struct page *thp;
1179 
1180 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1181 					 HPAGE_PMD_ORDER);
1182 		if (!thp)
1183 			return NULL;
1184 		prep_transhuge_page(thp);
1185 		return thp;
1186 	}
1187 	/*
1188 	 * if !vma, alloc_page_vma() will use task or system default policy
1189 	 */
1190 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1191 			vma, address);
1192 }
1193 #else
1194 
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1195 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1196 				unsigned long flags)
1197 {
1198 	return -EIO;
1199 }
1200 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1201 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1202 		     const nodemask_t *to, int flags)
1203 {
1204 	return -ENOSYS;
1205 }
1206 
new_page(struct page * page,unsigned long start)1207 static struct page *new_page(struct page *page, unsigned long start)
1208 {
1209 	return NULL;
1210 }
1211 #endif
1212 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1213 static long do_mbind(unsigned long start, unsigned long len,
1214 		     unsigned short mode, unsigned short mode_flags,
1215 		     nodemask_t *nmask, unsigned long flags)
1216 {
1217 	struct mm_struct *mm = current->mm;
1218 	struct mempolicy *new;
1219 	unsigned long end;
1220 	int err;
1221 	int ret;
1222 	LIST_HEAD(pagelist);
1223 
1224 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1225 		return -EINVAL;
1226 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1227 		return -EPERM;
1228 
1229 	if (start & ~PAGE_MASK)
1230 		return -EINVAL;
1231 
1232 	if (mode == MPOL_DEFAULT)
1233 		flags &= ~MPOL_MF_STRICT;
1234 
1235 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1236 	end = start + len;
1237 
1238 	if (end < start)
1239 		return -EINVAL;
1240 	if (end == start)
1241 		return 0;
1242 
1243 	new = mpol_new(mode, mode_flags, nmask);
1244 	if (IS_ERR(new))
1245 		return PTR_ERR(new);
1246 
1247 	if (flags & MPOL_MF_LAZY)
1248 		new->flags |= MPOL_F_MOF;
1249 
1250 	/*
1251 	 * If we are using the default policy then operation
1252 	 * on discontinuous address spaces is okay after all
1253 	 */
1254 	if (!new)
1255 		flags |= MPOL_MF_DISCONTIG_OK;
1256 
1257 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1258 		 start, start + len, mode, mode_flags,
1259 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1260 
1261 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1262 
1263 		err = migrate_prep();
1264 		if (err)
1265 			goto mpol_out;
1266 	}
1267 	{
1268 		NODEMASK_SCRATCH(scratch);
1269 		if (scratch) {
1270 			down_write(&mm->mmap_sem);
1271 			task_lock(current);
1272 			err = mpol_set_nodemask(new, nmask, scratch);
1273 			task_unlock(current);
1274 			if (err)
1275 				up_write(&mm->mmap_sem);
1276 		} else
1277 			err = -ENOMEM;
1278 		NODEMASK_SCRATCH_FREE(scratch);
1279 	}
1280 	if (err)
1281 		goto mpol_out;
1282 
1283 	ret = queue_pages_range(mm, start, end, nmask,
1284 			  flags | MPOL_MF_INVERT, &pagelist);
1285 
1286 	if (ret < 0) {
1287 		err = ret;
1288 		goto up_out;
1289 	}
1290 
1291 	err = mbind_range(mm, start, end, new);
1292 
1293 	if (!err) {
1294 		int nr_failed = 0;
1295 
1296 		if (!list_empty(&pagelist)) {
1297 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1298 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1299 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1300 			if (nr_failed)
1301 				putback_movable_pages(&pagelist);
1302 		}
1303 
1304 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1305 			err = -EIO;
1306 	} else {
1307 up_out:
1308 		if (!list_empty(&pagelist))
1309 			putback_movable_pages(&pagelist);
1310 	}
1311 
1312 	up_write(&mm->mmap_sem);
1313 mpol_out:
1314 	mpol_put(new);
1315 	return err;
1316 }
1317 
1318 /*
1319  * User space interface with variable sized bitmaps for nodelists.
1320  */
1321 
1322 /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1323 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1324 		     unsigned long maxnode)
1325 {
1326 	unsigned long k;
1327 	unsigned long t;
1328 	unsigned long nlongs;
1329 	unsigned long endmask;
1330 
1331 	--maxnode;
1332 	nodes_clear(*nodes);
1333 	if (maxnode == 0 || !nmask)
1334 		return 0;
1335 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1336 		return -EINVAL;
1337 
1338 	nlongs = BITS_TO_LONGS(maxnode);
1339 	if ((maxnode % BITS_PER_LONG) == 0)
1340 		endmask = ~0UL;
1341 	else
1342 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1343 
1344 	/*
1345 	 * When the user specified more nodes than supported just check
1346 	 * if the non supported part is all zero.
1347 	 *
1348 	 * If maxnode have more longs than MAX_NUMNODES, check
1349 	 * the bits in that area first. And then go through to
1350 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
1351 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1352 	 */
1353 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1354 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1355 			if (get_user(t, nmask + k))
1356 				return -EFAULT;
1357 			if (k == nlongs - 1) {
1358 				if (t & endmask)
1359 					return -EINVAL;
1360 			} else if (t)
1361 				return -EINVAL;
1362 		}
1363 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1364 		endmask = ~0UL;
1365 	}
1366 
1367 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1368 		unsigned long valid_mask = endmask;
1369 
1370 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1371 		if (get_user(t, nmask + nlongs - 1))
1372 			return -EFAULT;
1373 		if (t & valid_mask)
1374 			return -EINVAL;
1375 	}
1376 
1377 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1378 		return -EFAULT;
1379 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1380 	return 0;
1381 }
1382 
1383 /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1384 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1385 			      nodemask_t *nodes)
1386 {
1387 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1388 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1389 
1390 	if (copy > nbytes) {
1391 		if (copy > PAGE_SIZE)
1392 			return -EINVAL;
1393 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1394 			return -EFAULT;
1395 		copy = nbytes;
1396 	}
1397 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1398 }
1399 
kernel_mbind(unsigned long start,unsigned long len,unsigned long mode,const unsigned long __user * nmask,unsigned long maxnode,unsigned int flags)1400 static long kernel_mbind(unsigned long start, unsigned long len,
1401 			 unsigned long mode, const unsigned long __user *nmask,
1402 			 unsigned long maxnode, unsigned int flags)
1403 {
1404 	nodemask_t nodes;
1405 	int err;
1406 	unsigned short mode_flags;
1407 
1408 	start = untagged_addr(start);
1409 	mode_flags = mode & MPOL_MODE_FLAGS;
1410 	mode &= ~MPOL_MODE_FLAGS;
1411 	if (mode >= MPOL_MAX)
1412 		return -EINVAL;
1413 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1414 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1415 		return -EINVAL;
1416 	err = get_nodes(&nodes, nmask, maxnode);
1417 	if (err)
1418 		return err;
1419 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1420 }
1421 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned int,flags)1422 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1423 		unsigned long, mode, const unsigned long __user *, nmask,
1424 		unsigned long, maxnode, unsigned int, flags)
1425 {
1426 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1427 }
1428 
1429 /* Set the process memory policy */
kernel_set_mempolicy(int mode,const unsigned long __user * nmask,unsigned long maxnode)1430 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1431 				 unsigned long maxnode)
1432 {
1433 	int err;
1434 	nodemask_t nodes;
1435 	unsigned short flags;
1436 
1437 	flags = mode & MPOL_MODE_FLAGS;
1438 	mode &= ~MPOL_MODE_FLAGS;
1439 	if ((unsigned int)mode >= MPOL_MAX)
1440 		return -EINVAL;
1441 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1442 		return -EINVAL;
1443 	err = get_nodes(&nodes, nmask, maxnode);
1444 	if (err)
1445 		return err;
1446 	return do_set_mempolicy(mode, flags, &nodes);
1447 }
1448 
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1449 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1450 		unsigned long, maxnode)
1451 {
1452 	return kernel_set_mempolicy(mode, nmask, maxnode);
1453 }
1454 
kernel_migrate_pages(pid_t pid,unsigned long maxnode,const unsigned long __user * old_nodes,const unsigned long __user * new_nodes)1455 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1456 				const unsigned long __user *old_nodes,
1457 				const unsigned long __user *new_nodes)
1458 {
1459 	struct mm_struct *mm = NULL;
1460 	struct task_struct *task;
1461 	nodemask_t task_nodes;
1462 	int err;
1463 	nodemask_t *old;
1464 	nodemask_t *new;
1465 	NODEMASK_SCRATCH(scratch);
1466 
1467 	if (!scratch)
1468 		return -ENOMEM;
1469 
1470 	old = &scratch->mask1;
1471 	new = &scratch->mask2;
1472 
1473 	err = get_nodes(old, old_nodes, maxnode);
1474 	if (err)
1475 		goto out;
1476 
1477 	err = get_nodes(new, new_nodes, maxnode);
1478 	if (err)
1479 		goto out;
1480 
1481 	/* Find the mm_struct */
1482 	rcu_read_lock();
1483 	task = pid ? find_task_by_vpid(pid) : current;
1484 	if (!task) {
1485 		rcu_read_unlock();
1486 		err = -ESRCH;
1487 		goto out;
1488 	}
1489 	get_task_struct(task);
1490 
1491 	err = -EINVAL;
1492 
1493 	/*
1494 	 * Check if this process has the right to modify the specified process.
1495 	 * Use the regular "ptrace_may_access()" checks.
1496 	 */
1497 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1498 		rcu_read_unlock();
1499 		err = -EPERM;
1500 		goto out_put;
1501 	}
1502 	rcu_read_unlock();
1503 
1504 	task_nodes = cpuset_mems_allowed(task);
1505 	/* Is the user allowed to access the target nodes? */
1506 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1507 		err = -EPERM;
1508 		goto out_put;
1509 	}
1510 
1511 	task_nodes = cpuset_mems_allowed(current);
1512 	nodes_and(*new, *new, task_nodes);
1513 	if (nodes_empty(*new))
1514 		goto out_put;
1515 
1516 	err = security_task_movememory(task);
1517 	if (err)
1518 		goto out_put;
1519 
1520 	mm = get_task_mm(task);
1521 	put_task_struct(task);
1522 
1523 	if (!mm) {
1524 		err = -EINVAL;
1525 		goto out;
1526 	}
1527 
1528 	err = do_migrate_pages(mm, old, new,
1529 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1530 
1531 	mmput(mm);
1532 out:
1533 	NODEMASK_SCRATCH_FREE(scratch);
1534 
1535 	return err;
1536 
1537 out_put:
1538 	put_task_struct(task);
1539 	goto out;
1540 
1541 }
1542 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1543 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1544 		const unsigned long __user *, old_nodes,
1545 		const unsigned long __user *, new_nodes)
1546 {
1547 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1548 }
1549 
1550 
1551 /* Retrieve NUMA policy */
kernel_get_mempolicy(int __user * policy,unsigned long __user * nmask,unsigned long maxnode,unsigned long addr,unsigned long flags)1552 static int kernel_get_mempolicy(int __user *policy,
1553 				unsigned long __user *nmask,
1554 				unsigned long maxnode,
1555 				unsigned long addr,
1556 				unsigned long flags)
1557 {
1558 	int err;
1559 	int pval;
1560 	nodemask_t nodes;
1561 
1562 	addr = untagged_addr(addr);
1563 
1564 	if (nmask != NULL && maxnode < nr_node_ids)
1565 		return -EINVAL;
1566 
1567 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1568 
1569 	if (err)
1570 		return err;
1571 
1572 	if (policy && put_user(pval, policy))
1573 		return -EFAULT;
1574 
1575 	if (nmask)
1576 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1577 
1578 	return err;
1579 }
1580 
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1581 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1582 		unsigned long __user *, nmask, unsigned long, maxnode,
1583 		unsigned long, addr, unsigned long, flags)
1584 {
1585 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1586 }
1587 
1588 #ifdef CONFIG_COMPAT
1589 
COMPAT_SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,addr,compat_ulong_t,flags)1590 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1591 		       compat_ulong_t __user *, nmask,
1592 		       compat_ulong_t, maxnode,
1593 		       compat_ulong_t, addr, compat_ulong_t, flags)
1594 {
1595 	long err;
1596 	unsigned long __user *nm = NULL;
1597 	unsigned long nr_bits, alloc_size;
1598 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1599 
1600 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1601 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1602 
1603 	if (nmask)
1604 		nm = compat_alloc_user_space(alloc_size);
1605 
1606 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1607 
1608 	if (!err && nmask) {
1609 		unsigned long copy_size;
1610 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1611 		err = copy_from_user(bm, nm, copy_size);
1612 		/* ensure entire bitmap is zeroed */
1613 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1614 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1615 	}
1616 
1617 	return err;
1618 }
1619 
COMPAT_SYSCALL_DEFINE3(set_mempolicy,int,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode)1620 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1621 		       compat_ulong_t, maxnode)
1622 {
1623 	unsigned long __user *nm = NULL;
1624 	unsigned long nr_bits, alloc_size;
1625 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1626 
1627 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1628 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1629 
1630 	if (nmask) {
1631 		if (compat_get_bitmap(bm, nmask, nr_bits))
1632 			return -EFAULT;
1633 		nm = compat_alloc_user_space(alloc_size);
1634 		if (copy_to_user(nm, bm, alloc_size))
1635 			return -EFAULT;
1636 	}
1637 
1638 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
1639 }
1640 
COMPAT_SYSCALL_DEFINE6(mbind,compat_ulong_t,start,compat_ulong_t,len,compat_ulong_t,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,flags)1641 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1642 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1643 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1644 {
1645 	unsigned long __user *nm = NULL;
1646 	unsigned long nr_bits, alloc_size;
1647 	nodemask_t bm;
1648 
1649 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1650 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1651 
1652 	if (nmask) {
1653 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1654 			return -EFAULT;
1655 		nm = compat_alloc_user_space(alloc_size);
1656 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1657 			return -EFAULT;
1658 	}
1659 
1660 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1661 }
1662 
COMPAT_SYSCALL_DEFINE4(migrate_pages,compat_pid_t,pid,compat_ulong_t,maxnode,const compat_ulong_t __user *,old_nodes,const compat_ulong_t __user *,new_nodes)1663 COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1664 		       compat_ulong_t, maxnode,
1665 		       const compat_ulong_t __user *, old_nodes,
1666 		       const compat_ulong_t __user *, new_nodes)
1667 {
1668 	unsigned long __user *old = NULL;
1669 	unsigned long __user *new = NULL;
1670 	nodemask_t tmp_mask;
1671 	unsigned long nr_bits;
1672 	unsigned long size;
1673 
1674 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1675 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1676 	if (old_nodes) {
1677 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1678 			return -EFAULT;
1679 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1680 		if (new_nodes)
1681 			new = old + size / sizeof(unsigned long);
1682 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1683 			return -EFAULT;
1684 	}
1685 	if (new_nodes) {
1686 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1687 			return -EFAULT;
1688 		if (new == NULL)
1689 			new = compat_alloc_user_space(size);
1690 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1691 			return -EFAULT;
1692 	}
1693 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1694 }
1695 
1696 #endif /* CONFIG_COMPAT */
1697 
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1698 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1699 						unsigned long addr)
1700 {
1701 	struct mempolicy *pol = NULL;
1702 
1703 	if (vma) {
1704 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1705 			pol = vma->vm_ops->get_policy(vma, addr);
1706 		} else if (vma->vm_policy) {
1707 			pol = vma->vm_policy;
1708 
1709 			/*
1710 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1711 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1712 			 * count on these policies which will be dropped by
1713 			 * mpol_cond_put() later
1714 			 */
1715 			if (mpol_needs_cond_ref(pol))
1716 				mpol_get(pol);
1717 		}
1718 	}
1719 
1720 	return pol;
1721 }
1722 
1723 /*
1724  * get_vma_policy(@vma, @addr)
1725  * @vma: virtual memory area whose policy is sought
1726  * @addr: address in @vma for shared policy lookup
1727  *
1728  * Returns effective policy for a VMA at specified address.
1729  * Falls back to current->mempolicy or system default policy, as necessary.
1730  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1731  * count--added by the get_policy() vm_op, as appropriate--to protect against
1732  * freeing by another task.  It is the caller's responsibility to free the
1733  * extra reference for shared policies.
1734  */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1735 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1736 						unsigned long addr)
1737 {
1738 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1739 
1740 	if (!pol)
1741 		pol = get_task_policy(current);
1742 
1743 	return pol;
1744 }
1745 
vma_policy_mof(struct vm_area_struct * vma)1746 bool vma_policy_mof(struct vm_area_struct *vma)
1747 {
1748 	struct mempolicy *pol;
1749 
1750 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1751 		bool ret = false;
1752 
1753 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1754 		if (pol && (pol->flags & MPOL_F_MOF))
1755 			ret = true;
1756 		mpol_cond_put(pol);
1757 
1758 		return ret;
1759 	}
1760 
1761 	pol = vma->vm_policy;
1762 	if (!pol)
1763 		pol = get_task_policy(current);
1764 
1765 	return pol->flags & MPOL_F_MOF;
1766 }
1767 
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1768 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1769 {
1770 	enum zone_type dynamic_policy_zone = policy_zone;
1771 
1772 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1773 
1774 	/*
1775 	 * if policy->v.nodes has movable memory only,
1776 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1777 	 *
1778 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1779 	 * so if the following test faile, it implies
1780 	 * policy->v.nodes has movable memory only.
1781 	 */
1782 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1783 		dynamic_policy_zone = ZONE_MOVABLE;
1784 
1785 	return zone >= dynamic_policy_zone;
1786 }
1787 
1788 /*
1789  * Return a nodemask representing a mempolicy for filtering nodes for
1790  * page allocation
1791  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1792 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1793 {
1794 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1795 	if (unlikely(policy->mode == MPOL_BIND) &&
1796 			apply_policy_zone(policy, gfp_zone(gfp)) &&
1797 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1798 		return &policy->v.nodes;
1799 
1800 	return NULL;
1801 }
1802 
1803 /* Return the node id preferred by the given mempolicy, or the given id */
policy_node(gfp_t gfp,struct mempolicy * policy,int nd)1804 static int policy_node(gfp_t gfp, struct mempolicy *policy,
1805 								int nd)
1806 {
1807 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1808 		nd = policy->v.preferred_node;
1809 	else {
1810 		/*
1811 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1812 		 * because we might easily break the expectation to stay on the
1813 		 * requested node and not break the policy.
1814 		 */
1815 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1816 	}
1817 
1818 	return nd;
1819 }
1820 
1821 /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1822 static unsigned interleave_nodes(struct mempolicy *policy)
1823 {
1824 	unsigned next;
1825 	struct task_struct *me = current;
1826 
1827 	next = next_node_in(me->il_prev, policy->v.nodes);
1828 	if (next < MAX_NUMNODES)
1829 		me->il_prev = next;
1830 	return next;
1831 }
1832 
1833 /*
1834  * Depending on the memory policy provide a node from which to allocate the
1835  * next slab entry.
1836  */
mempolicy_slab_node(void)1837 unsigned int mempolicy_slab_node(void)
1838 {
1839 	struct mempolicy *policy;
1840 	int node = numa_mem_id();
1841 
1842 	if (in_interrupt())
1843 		return node;
1844 
1845 	policy = current->mempolicy;
1846 	if (!policy || policy->flags & MPOL_F_LOCAL)
1847 		return node;
1848 
1849 	switch (policy->mode) {
1850 	case MPOL_PREFERRED:
1851 		/*
1852 		 * handled MPOL_F_LOCAL above
1853 		 */
1854 		return policy->v.preferred_node;
1855 
1856 	case MPOL_INTERLEAVE:
1857 		return interleave_nodes(policy);
1858 
1859 	case MPOL_BIND: {
1860 		struct zoneref *z;
1861 
1862 		/*
1863 		 * Follow bind policy behavior and start allocation at the
1864 		 * first node.
1865 		 */
1866 		struct zonelist *zonelist;
1867 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1868 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1869 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1870 							&policy->v.nodes);
1871 		return z->zone ? zone_to_nid(z->zone) : node;
1872 	}
1873 
1874 	default:
1875 		BUG();
1876 	}
1877 }
1878 
1879 /*
1880  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1881  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1882  * number of present nodes.
1883  */
offset_il_node(struct mempolicy * pol,unsigned long n)1884 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1885 {
1886 	unsigned nnodes = nodes_weight(pol->v.nodes);
1887 	unsigned target;
1888 	int i;
1889 	int nid;
1890 
1891 	if (!nnodes)
1892 		return numa_node_id();
1893 	target = (unsigned int)n % nnodes;
1894 	nid = first_node(pol->v.nodes);
1895 	for (i = 0; i < target; i++)
1896 		nid = next_node(nid, pol->v.nodes);
1897 	return nid;
1898 }
1899 
1900 /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1901 static inline unsigned interleave_nid(struct mempolicy *pol,
1902 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1903 {
1904 	if (vma) {
1905 		unsigned long off;
1906 
1907 		/*
1908 		 * for small pages, there is no difference between
1909 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1910 		 * for huge pages, since vm_pgoff is in units of small
1911 		 * pages, we need to shift off the always 0 bits to get
1912 		 * a useful offset.
1913 		 */
1914 		BUG_ON(shift < PAGE_SHIFT);
1915 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1916 		off += (addr - vma->vm_start) >> shift;
1917 		return offset_il_node(pol, off);
1918 	} else
1919 		return interleave_nodes(pol);
1920 }
1921 
1922 #ifdef CONFIG_HUGETLBFS
1923 /*
1924  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1925  * @vma: virtual memory area whose policy is sought
1926  * @addr: address in @vma for shared policy lookup and interleave policy
1927  * @gfp_flags: for requested zone
1928  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1929  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1930  *
1931  * Returns a nid suitable for a huge page allocation and a pointer
1932  * to the struct mempolicy for conditional unref after allocation.
1933  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1934  * @nodemask for filtering the zonelist.
1935  *
1936  * Must be protected by read_mems_allowed_begin()
1937  */
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)1938 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1939 				struct mempolicy **mpol, nodemask_t **nodemask)
1940 {
1941 	int nid;
1942 
1943 	*mpol = get_vma_policy(vma, addr);
1944 	*nodemask = NULL;	/* assume !MPOL_BIND */
1945 
1946 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1947 		nid = interleave_nid(*mpol, vma, addr,
1948 					huge_page_shift(hstate_vma(vma)));
1949 	} else {
1950 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1951 		if ((*mpol)->mode == MPOL_BIND)
1952 			*nodemask = &(*mpol)->v.nodes;
1953 	}
1954 	return nid;
1955 }
1956 
1957 /*
1958  * init_nodemask_of_mempolicy
1959  *
1960  * If the current task's mempolicy is "default" [NULL], return 'false'
1961  * to indicate default policy.  Otherwise, extract the policy nodemask
1962  * for 'bind' or 'interleave' policy into the argument nodemask, or
1963  * initialize the argument nodemask to contain the single node for
1964  * 'preferred' or 'local' policy and return 'true' to indicate presence
1965  * of non-default mempolicy.
1966  *
1967  * We don't bother with reference counting the mempolicy [mpol_get/put]
1968  * because the current task is examining it's own mempolicy and a task's
1969  * mempolicy is only ever changed by the task itself.
1970  *
1971  * N.B., it is the caller's responsibility to free a returned nodemask.
1972  */
init_nodemask_of_mempolicy(nodemask_t * mask)1973 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1974 {
1975 	struct mempolicy *mempolicy;
1976 	int nid;
1977 
1978 	if (!(mask && current->mempolicy))
1979 		return false;
1980 
1981 	task_lock(current);
1982 	mempolicy = current->mempolicy;
1983 	switch (mempolicy->mode) {
1984 	case MPOL_PREFERRED:
1985 		if (mempolicy->flags & MPOL_F_LOCAL)
1986 			nid = numa_node_id();
1987 		else
1988 			nid = mempolicy->v.preferred_node;
1989 		init_nodemask_of_node(mask, nid);
1990 		break;
1991 
1992 	case MPOL_BIND:
1993 		/* Fall through */
1994 	case MPOL_INTERLEAVE:
1995 		*mask =  mempolicy->v.nodes;
1996 		break;
1997 
1998 	default:
1999 		BUG();
2000 	}
2001 	task_unlock(current);
2002 
2003 	return true;
2004 }
2005 #endif
2006 
2007 /*
2008  * mempolicy_nodemask_intersects
2009  *
2010  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2011  * policy.  Otherwise, check for intersection between mask and the policy
2012  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
2013  * policy, always return true since it may allocate elsewhere on fallback.
2014  *
2015  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2016  */
mempolicy_nodemask_intersects(struct task_struct * tsk,const nodemask_t * mask)2017 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2018 					const nodemask_t *mask)
2019 {
2020 	struct mempolicy *mempolicy;
2021 	bool ret = true;
2022 
2023 	if (!mask)
2024 		return ret;
2025 	task_lock(tsk);
2026 	mempolicy = tsk->mempolicy;
2027 	if (!mempolicy)
2028 		goto out;
2029 
2030 	switch (mempolicy->mode) {
2031 	case MPOL_PREFERRED:
2032 		/*
2033 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2034 		 * allocate from, they may fallback to other nodes when oom.
2035 		 * Thus, it's possible for tsk to have allocated memory from
2036 		 * nodes in mask.
2037 		 */
2038 		break;
2039 	case MPOL_BIND:
2040 	case MPOL_INTERLEAVE:
2041 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
2042 		break;
2043 	default:
2044 		BUG();
2045 	}
2046 out:
2047 	task_unlock(tsk);
2048 	return ret;
2049 }
2050 
2051 /* Allocate a page in interleaved policy.
2052    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)2053 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2054 					unsigned nid)
2055 {
2056 	struct page *page;
2057 
2058 	page = __alloc_pages(gfp, order, nid);
2059 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2060 	if (!static_branch_likely(&vm_numa_stat_key))
2061 		return page;
2062 	if (page && page_to_nid(page) == nid) {
2063 		preempt_disable();
2064 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2065 		preempt_enable();
2066 	}
2067 	return page;
2068 }
2069 
2070 /**
2071  * 	alloc_pages_vma	- Allocate a page for a VMA.
2072  *
2073  * 	@gfp:
2074  *      %GFP_USER    user allocation.
2075  *      %GFP_KERNEL  kernel allocations,
2076  *      %GFP_HIGHMEM highmem/user allocations,
2077  *      %GFP_FS      allocation should not call back into a file system.
2078  *      %GFP_ATOMIC  don't sleep.
2079  *
2080  *	@order:Order of the GFP allocation.
2081  * 	@vma:  Pointer to VMA or NULL if not available.
2082  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2083  *	@node: Which node to prefer for allocation (modulo policy).
2084  *	@hugepage: for hugepages try only the preferred node if possible
2085  *
2086  * 	This function allocates a page from the kernel page pool and applies
2087  *	a NUMA policy associated with the VMA or the current process.
2088  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
2089  *	mm_struct of the VMA to prevent it from going away. Should be used for
2090  *	all allocations for pages that will be mapped into user space. Returns
2091  *	NULL when no page can be allocated.
2092  */
2093 struct page *
alloc_pages_vma(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,int node,bool hugepage)2094 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2095 		unsigned long addr, int node, bool hugepage)
2096 {
2097 	struct mempolicy *pol;
2098 	struct page *page;
2099 	int preferred_nid;
2100 	nodemask_t *nmask;
2101 
2102 	pol = get_vma_policy(vma, addr);
2103 
2104 	if (pol->mode == MPOL_INTERLEAVE) {
2105 		unsigned nid;
2106 
2107 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2108 		mpol_cond_put(pol);
2109 		page = alloc_page_interleave(gfp, order, nid);
2110 		goto out;
2111 	}
2112 
2113 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2114 		int hpage_node = node;
2115 
2116 		/*
2117 		 * For hugepage allocation and non-interleave policy which
2118 		 * allows the current node (or other explicitly preferred
2119 		 * node) we only try to allocate from the current/preferred
2120 		 * node and don't fall back to other nodes, as the cost of
2121 		 * remote accesses would likely offset THP benefits.
2122 		 *
2123 		 * If the policy is interleave, or does not allow the current
2124 		 * node in its nodemask, we allocate the standard way.
2125 		 */
2126 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2127 			hpage_node = pol->v.preferred_node;
2128 
2129 		nmask = policy_nodemask(gfp, pol);
2130 		if (!nmask || node_isset(hpage_node, *nmask)) {
2131 			mpol_cond_put(pol);
2132 			page = __alloc_pages_node(hpage_node,
2133 						gfp | __GFP_THISNODE, order);
2134 
2135 			/*
2136 			 * If hugepage allocations are configured to always
2137 			 * synchronous compact or the vma has been madvised
2138 			 * to prefer hugepage backing, retry allowing remote
2139 			 * memory as well.
2140 			 */
2141 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2142 				page = __alloc_pages_nodemask(gfp | __GFP_NORETRY,
2143 							order, hpage_node,
2144 							nmask);
2145 
2146 			goto out;
2147 		}
2148 	}
2149 
2150 	nmask = policy_nodemask(gfp, pol);
2151 	preferred_nid = policy_node(gfp, pol, node);
2152 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2153 	mpol_cond_put(pol);
2154 out:
2155 	return page;
2156 }
2157 EXPORT_SYMBOL(alloc_pages_vma);
2158 
2159 /**
2160  * 	alloc_pages_current - Allocate pages.
2161  *
2162  *	@gfp:
2163  *		%GFP_USER   user allocation,
2164  *      	%GFP_KERNEL kernel allocation,
2165  *      	%GFP_HIGHMEM highmem allocation,
2166  *      	%GFP_FS     don't call back into a file system.
2167  *      	%GFP_ATOMIC don't sleep.
2168  *	@order: Power of two of allocation size in pages. 0 is a single page.
2169  *
2170  *	Allocate a page from the kernel page pool.  When not in
2171  *	interrupt context and apply the current process NUMA policy.
2172  *	Returns NULL when no page can be allocated.
2173  */
alloc_pages_current(gfp_t gfp,unsigned order)2174 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2175 {
2176 	struct mempolicy *pol = &default_policy;
2177 	struct page *page;
2178 
2179 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2180 		pol = get_task_policy(current);
2181 
2182 	/*
2183 	 * No reference counting needed for current->mempolicy
2184 	 * nor system default_policy
2185 	 */
2186 	if (pol->mode == MPOL_INTERLEAVE)
2187 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2188 	else
2189 		page = __alloc_pages_nodemask(gfp, order,
2190 				policy_node(gfp, pol, numa_node_id()),
2191 				policy_nodemask(gfp, pol));
2192 
2193 	return page;
2194 }
2195 EXPORT_SYMBOL(alloc_pages_current);
2196 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2197 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2198 {
2199 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2200 
2201 	if (IS_ERR(pol))
2202 		return PTR_ERR(pol);
2203 	dst->vm_policy = pol;
2204 	return 0;
2205 }
2206 
2207 /*
2208  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2209  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2210  * with the mems_allowed returned by cpuset_mems_allowed().  This
2211  * keeps mempolicies cpuset relative after its cpuset moves.  See
2212  * further kernel/cpuset.c update_nodemask().
2213  *
2214  * current's mempolicy may be rebinded by the other task(the task that changes
2215  * cpuset's mems), so we needn't do rebind work for current task.
2216  */
2217 
2218 /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2219 struct mempolicy *__mpol_dup(struct mempolicy *old)
2220 {
2221 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2222 
2223 	if (!new)
2224 		return ERR_PTR(-ENOMEM);
2225 
2226 	/* task's mempolicy is protected by alloc_lock */
2227 	if (old == current->mempolicy) {
2228 		task_lock(current);
2229 		*new = *old;
2230 		task_unlock(current);
2231 	} else
2232 		*new = *old;
2233 
2234 	if (current_cpuset_is_being_rebound()) {
2235 		nodemask_t mems = cpuset_mems_allowed(current);
2236 		mpol_rebind_policy(new, &mems);
2237 	}
2238 	atomic_set(&new->refcnt, 1);
2239 	return new;
2240 }
2241 
2242 /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2243 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2244 {
2245 	if (!a || !b)
2246 		return false;
2247 	if (a->mode != b->mode)
2248 		return false;
2249 	if (a->flags != b->flags)
2250 		return false;
2251 	if (mpol_store_user_nodemask(a))
2252 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2253 			return false;
2254 
2255 	switch (a->mode) {
2256 	case MPOL_BIND:
2257 		/* Fall through */
2258 	case MPOL_INTERLEAVE:
2259 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2260 	case MPOL_PREFERRED:
2261 		/* a's ->flags is the same as b's */
2262 		if (a->flags & MPOL_F_LOCAL)
2263 			return true;
2264 		return a->v.preferred_node == b->v.preferred_node;
2265 	default:
2266 		BUG();
2267 		return false;
2268 	}
2269 }
2270 
2271 /*
2272  * Shared memory backing store policy support.
2273  *
2274  * Remember policies even when nobody has shared memory mapped.
2275  * The policies are kept in Red-Black tree linked from the inode.
2276  * They are protected by the sp->lock rwlock, which should be held
2277  * for any accesses to the tree.
2278  */
2279 
2280 /*
2281  * lookup first element intersecting start-end.  Caller holds sp->lock for
2282  * reading or for writing
2283  */
2284 static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)2285 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2286 {
2287 	struct rb_node *n = sp->root.rb_node;
2288 
2289 	while (n) {
2290 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2291 
2292 		if (start >= p->end)
2293 			n = n->rb_right;
2294 		else if (end <= p->start)
2295 			n = n->rb_left;
2296 		else
2297 			break;
2298 	}
2299 	if (!n)
2300 		return NULL;
2301 	for (;;) {
2302 		struct sp_node *w = NULL;
2303 		struct rb_node *prev = rb_prev(n);
2304 		if (!prev)
2305 			break;
2306 		w = rb_entry(prev, struct sp_node, nd);
2307 		if (w->end <= start)
2308 			break;
2309 		n = prev;
2310 	}
2311 	return rb_entry(n, struct sp_node, nd);
2312 }
2313 
2314 /*
2315  * Insert a new shared policy into the list.  Caller holds sp->lock for
2316  * writing.
2317  */
sp_insert(struct shared_policy * sp,struct sp_node * new)2318 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2319 {
2320 	struct rb_node **p = &sp->root.rb_node;
2321 	struct rb_node *parent = NULL;
2322 	struct sp_node *nd;
2323 
2324 	while (*p) {
2325 		parent = *p;
2326 		nd = rb_entry(parent, struct sp_node, nd);
2327 		if (new->start < nd->start)
2328 			p = &(*p)->rb_left;
2329 		else if (new->end > nd->end)
2330 			p = &(*p)->rb_right;
2331 		else
2332 			BUG();
2333 	}
2334 	rb_link_node(&new->nd, parent, p);
2335 	rb_insert_color(&new->nd, &sp->root);
2336 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2337 		 new->policy ? new->policy->mode : 0);
2338 }
2339 
2340 /* Find shared policy intersecting idx */
2341 struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)2342 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2343 {
2344 	struct mempolicy *pol = NULL;
2345 	struct sp_node *sn;
2346 
2347 	if (!sp->root.rb_node)
2348 		return NULL;
2349 	read_lock(&sp->lock);
2350 	sn = sp_lookup(sp, idx, idx+1);
2351 	if (sn) {
2352 		mpol_get(sn->policy);
2353 		pol = sn->policy;
2354 	}
2355 	read_unlock(&sp->lock);
2356 	return pol;
2357 }
2358 
sp_free(struct sp_node * n)2359 static void sp_free(struct sp_node *n)
2360 {
2361 	mpol_put(n->policy);
2362 	kmem_cache_free(sn_cache, n);
2363 }
2364 
2365 /**
2366  * mpol_misplaced - check whether current page node is valid in policy
2367  *
2368  * @page: page to be checked
2369  * @vma: vm area where page mapped
2370  * @addr: virtual address where page mapped
2371  *
2372  * Lookup current policy node id for vma,addr and "compare to" page's
2373  * node id.
2374  *
2375  * Returns:
2376  *	-1	- not misplaced, page is in the right node
2377  *	node	- node id where the page should be
2378  *
2379  * Policy determination "mimics" alloc_page_vma().
2380  * Called from fault path where we know the vma and faulting address.
2381  */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2382 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2383 {
2384 	struct mempolicy *pol;
2385 	struct zoneref *z;
2386 	int curnid = page_to_nid(page);
2387 	unsigned long pgoff;
2388 	int thiscpu = raw_smp_processor_id();
2389 	int thisnid = cpu_to_node(thiscpu);
2390 	int polnid = NUMA_NO_NODE;
2391 	int ret = -1;
2392 
2393 	pol = get_vma_policy(vma, addr);
2394 	if (!(pol->flags & MPOL_F_MOF))
2395 		goto out;
2396 
2397 	switch (pol->mode) {
2398 	case MPOL_INTERLEAVE:
2399 		pgoff = vma->vm_pgoff;
2400 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2401 		polnid = offset_il_node(pol, pgoff);
2402 		break;
2403 
2404 	case MPOL_PREFERRED:
2405 		if (pol->flags & MPOL_F_LOCAL)
2406 			polnid = numa_node_id();
2407 		else
2408 			polnid = pol->v.preferred_node;
2409 		break;
2410 
2411 	case MPOL_BIND:
2412 
2413 		/*
2414 		 * allows binding to multiple nodes.
2415 		 * use current page if in policy nodemask,
2416 		 * else select nearest allowed node, if any.
2417 		 * If no allowed nodes, use current [!misplaced].
2418 		 */
2419 		if (node_isset(curnid, pol->v.nodes))
2420 			goto out;
2421 		z = first_zones_zonelist(
2422 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2423 				gfp_zone(GFP_HIGHUSER),
2424 				&pol->v.nodes);
2425 		polnid = zone_to_nid(z->zone);
2426 		break;
2427 
2428 	default:
2429 		BUG();
2430 	}
2431 
2432 	/* Migrate the page towards the node whose CPU is referencing it */
2433 	if (pol->flags & MPOL_F_MORON) {
2434 		polnid = thisnid;
2435 
2436 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2437 			goto out;
2438 	}
2439 
2440 	if (curnid != polnid)
2441 		ret = polnid;
2442 out:
2443 	mpol_cond_put(pol);
2444 
2445 	return ret;
2446 }
2447 
2448 /*
2449  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2450  * dropped after task->mempolicy is set to NULL so that any allocation done as
2451  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2452  * policy.
2453  */
mpol_put_task_policy(struct task_struct * task)2454 void mpol_put_task_policy(struct task_struct *task)
2455 {
2456 	struct mempolicy *pol;
2457 
2458 	task_lock(task);
2459 	pol = task->mempolicy;
2460 	task->mempolicy = NULL;
2461 	task_unlock(task);
2462 	mpol_put(pol);
2463 }
2464 
sp_delete(struct shared_policy * sp,struct sp_node * n)2465 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2466 {
2467 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2468 	rb_erase(&n->nd, &sp->root);
2469 	sp_free(n);
2470 }
2471 
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)2472 static void sp_node_init(struct sp_node *node, unsigned long start,
2473 			unsigned long end, struct mempolicy *pol)
2474 {
2475 	node->start = start;
2476 	node->end = end;
2477 	node->policy = pol;
2478 }
2479 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2480 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2481 				struct mempolicy *pol)
2482 {
2483 	struct sp_node *n;
2484 	struct mempolicy *newpol;
2485 
2486 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2487 	if (!n)
2488 		return NULL;
2489 
2490 	newpol = mpol_dup(pol);
2491 	if (IS_ERR(newpol)) {
2492 		kmem_cache_free(sn_cache, n);
2493 		return NULL;
2494 	}
2495 	newpol->flags |= MPOL_F_SHARED;
2496 	sp_node_init(n, start, end, newpol);
2497 
2498 	return n;
2499 }
2500 
2501 /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)2502 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2503 				 unsigned long end, struct sp_node *new)
2504 {
2505 	struct sp_node *n;
2506 	struct sp_node *n_new = NULL;
2507 	struct mempolicy *mpol_new = NULL;
2508 	int ret = 0;
2509 
2510 restart:
2511 	write_lock(&sp->lock);
2512 	n = sp_lookup(sp, start, end);
2513 	/* Take care of old policies in the same range. */
2514 	while (n && n->start < end) {
2515 		struct rb_node *next = rb_next(&n->nd);
2516 		if (n->start >= start) {
2517 			if (n->end <= end)
2518 				sp_delete(sp, n);
2519 			else
2520 				n->start = end;
2521 		} else {
2522 			/* Old policy spanning whole new range. */
2523 			if (n->end > end) {
2524 				if (!n_new)
2525 					goto alloc_new;
2526 
2527 				*mpol_new = *n->policy;
2528 				atomic_set(&mpol_new->refcnt, 1);
2529 				sp_node_init(n_new, end, n->end, mpol_new);
2530 				n->end = start;
2531 				sp_insert(sp, n_new);
2532 				n_new = NULL;
2533 				mpol_new = NULL;
2534 				break;
2535 			} else
2536 				n->end = start;
2537 		}
2538 		if (!next)
2539 			break;
2540 		n = rb_entry(next, struct sp_node, nd);
2541 	}
2542 	if (new)
2543 		sp_insert(sp, new);
2544 	write_unlock(&sp->lock);
2545 	ret = 0;
2546 
2547 err_out:
2548 	if (mpol_new)
2549 		mpol_put(mpol_new);
2550 	if (n_new)
2551 		kmem_cache_free(sn_cache, n_new);
2552 
2553 	return ret;
2554 
2555 alloc_new:
2556 	write_unlock(&sp->lock);
2557 	ret = -ENOMEM;
2558 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2559 	if (!n_new)
2560 		goto err_out;
2561 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2562 	if (!mpol_new)
2563 		goto err_out;
2564 	atomic_set(&mpol_new->refcnt, 1);
2565 	goto restart;
2566 }
2567 
2568 /**
2569  * mpol_shared_policy_init - initialize shared policy for inode
2570  * @sp: pointer to inode shared policy
2571  * @mpol:  struct mempolicy to install
2572  *
2573  * Install non-NULL @mpol in inode's shared policy rb-tree.
2574  * On entry, the current task has a reference on a non-NULL @mpol.
2575  * This must be released on exit.
2576  * This is called at get_inode() calls and we can use GFP_KERNEL.
2577  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)2578 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2579 {
2580 	int ret;
2581 
2582 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2583 	rwlock_init(&sp->lock);
2584 
2585 	if (mpol) {
2586 		struct vm_area_struct pvma;
2587 		struct mempolicy *new;
2588 		NODEMASK_SCRATCH(scratch);
2589 
2590 		if (!scratch)
2591 			goto put_mpol;
2592 		/* contextualize the tmpfs mount point mempolicy */
2593 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2594 		if (IS_ERR(new))
2595 			goto free_scratch; /* no valid nodemask intersection */
2596 
2597 		task_lock(current);
2598 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2599 		task_unlock(current);
2600 		if (ret)
2601 			goto put_new;
2602 
2603 		/* Create pseudo-vma that contains just the policy */
2604 		vma_init(&pvma, NULL);
2605 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2606 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2607 
2608 put_new:
2609 		mpol_put(new);			/* drop initial ref */
2610 free_scratch:
2611 		NODEMASK_SCRATCH_FREE(scratch);
2612 put_mpol:
2613 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2614 	}
2615 }
2616 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)2617 int mpol_set_shared_policy(struct shared_policy *info,
2618 			struct vm_area_struct *vma, struct mempolicy *npol)
2619 {
2620 	int err;
2621 	struct sp_node *new = NULL;
2622 	unsigned long sz = vma_pages(vma);
2623 
2624 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2625 		 vma->vm_pgoff,
2626 		 sz, npol ? npol->mode : -1,
2627 		 npol ? npol->flags : -1,
2628 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2629 
2630 	if (npol) {
2631 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2632 		if (!new)
2633 			return -ENOMEM;
2634 	}
2635 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2636 	if (err && new)
2637 		sp_free(new);
2638 	return err;
2639 }
2640 
2641 /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)2642 void mpol_free_shared_policy(struct shared_policy *p)
2643 {
2644 	struct sp_node *n;
2645 	struct rb_node *next;
2646 
2647 	if (!p->root.rb_node)
2648 		return;
2649 	write_lock(&p->lock);
2650 	next = rb_first(&p->root);
2651 	while (next) {
2652 		n = rb_entry(next, struct sp_node, nd);
2653 		next = rb_next(&n->nd);
2654 		sp_delete(p, n);
2655 	}
2656 	write_unlock(&p->lock);
2657 }
2658 
2659 #ifdef CONFIG_NUMA_BALANCING
2660 static int __initdata numabalancing_override;
2661 
check_numabalancing_enable(void)2662 static void __init check_numabalancing_enable(void)
2663 {
2664 	bool numabalancing_default = false;
2665 
2666 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2667 		numabalancing_default = true;
2668 
2669 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2670 	if (numabalancing_override)
2671 		set_numabalancing_state(numabalancing_override == 1);
2672 
2673 	if (num_online_nodes() > 1 && !numabalancing_override) {
2674 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2675 			numabalancing_default ? "Enabling" : "Disabling");
2676 		set_numabalancing_state(numabalancing_default);
2677 	}
2678 }
2679 
setup_numabalancing(char * str)2680 static int __init setup_numabalancing(char *str)
2681 {
2682 	int ret = 0;
2683 	if (!str)
2684 		goto out;
2685 
2686 	if (!strcmp(str, "enable")) {
2687 		numabalancing_override = 1;
2688 		ret = 1;
2689 	} else if (!strcmp(str, "disable")) {
2690 		numabalancing_override = -1;
2691 		ret = 1;
2692 	}
2693 out:
2694 	if (!ret)
2695 		pr_warn("Unable to parse numa_balancing=\n");
2696 
2697 	return ret;
2698 }
2699 __setup("numa_balancing=", setup_numabalancing);
2700 #else
check_numabalancing_enable(void)2701 static inline void __init check_numabalancing_enable(void)
2702 {
2703 }
2704 #endif /* CONFIG_NUMA_BALANCING */
2705 
2706 /* assumes fs == KERNEL_DS */
numa_policy_init(void)2707 void __init numa_policy_init(void)
2708 {
2709 	nodemask_t interleave_nodes;
2710 	unsigned long largest = 0;
2711 	int nid, prefer = 0;
2712 
2713 	policy_cache = kmem_cache_create("numa_policy",
2714 					 sizeof(struct mempolicy),
2715 					 0, SLAB_PANIC, NULL);
2716 
2717 	sn_cache = kmem_cache_create("shared_policy_node",
2718 				     sizeof(struct sp_node),
2719 				     0, SLAB_PANIC, NULL);
2720 
2721 	for_each_node(nid) {
2722 		preferred_node_policy[nid] = (struct mempolicy) {
2723 			.refcnt = ATOMIC_INIT(1),
2724 			.mode = MPOL_PREFERRED,
2725 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2726 			.v = { .preferred_node = nid, },
2727 		};
2728 	}
2729 
2730 	/*
2731 	 * Set interleaving policy for system init. Interleaving is only
2732 	 * enabled across suitably sized nodes (default is >= 16MB), or
2733 	 * fall back to the largest node if they're all smaller.
2734 	 */
2735 	nodes_clear(interleave_nodes);
2736 	for_each_node_state(nid, N_MEMORY) {
2737 		unsigned long total_pages = node_present_pages(nid);
2738 
2739 		/* Preserve the largest node */
2740 		if (largest < total_pages) {
2741 			largest = total_pages;
2742 			prefer = nid;
2743 		}
2744 
2745 		/* Interleave this node? */
2746 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2747 			node_set(nid, interleave_nodes);
2748 	}
2749 
2750 	/* All too small, use the largest */
2751 	if (unlikely(nodes_empty(interleave_nodes)))
2752 		node_set(prefer, interleave_nodes);
2753 
2754 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2755 		pr_err("%s: interleaving failed\n", __func__);
2756 
2757 	check_numabalancing_enable();
2758 }
2759 
2760 /* Reset policy of current process to default */
numa_default_policy(void)2761 void numa_default_policy(void)
2762 {
2763 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2764 }
2765 
2766 /*
2767  * Parse and format mempolicy from/to strings
2768  */
2769 
2770 /*
2771  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2772  */
2773 static const char * const policy_modes[] =
2774 {
2775 	[MPOL_DEFAULT]    = "default",
2776 	[MPOL_PREFERRED]  = "prefer",
2777 	[MPOL_BIND]       = "bind",
2778 	[MPOL_INTERLEAVE] = "interleave",
2779 	[MPOL_LOCAL]      = "local",
2780 };
2781 
2782 
2783 #ifdef CONFIG_TMPFS
2784 /**
2785  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2786  * @str:  string containing mempolicy to parse
2787  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2788  *
2789  * Format of input:
2790  *	<mode>[=<flags>][:<nodelist>]
2791  *
2792  * On success, returns 0, else 1
2793  */
mpol_parse_str(char * str,struct mempolicy ** mpol)2794 int mpol_parse_str(char *str, struct mempolicy **mpol)
2795 {
2796 	struct mempolicy *new = NULL;
2797 	unsigned short mode_flags;
2798 	nodemask_t nodes;
2799 	char *nodelist = strchr(str, ':');
2800 	char *flags = strchr(str, '=');
2801 	int err = 1, mode;
2802 
2803 	if (flags)
2804 		*flags++ = '\0';	/* terminate mode string */
2805 
2806 	if (nodelist) {
2807 		/* NUL-terminate mode or flags string */
2808 		*nodelist++ = '\0';
2809 		if (nodelist_parse(nodelist, nodes))
2810 			goto out;
2811 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2812 			goto out;
2813 	} else
2814 		nodes_clear(nodes);
2815 
2816 	mode = match_string(policy_modes, MPOL_MAX, str);
2817 	if (mode < 0)
2818 		goto out;
2819 
2820 	switch (mode) {
2821 	case MPOL_PREFERRED:
2822 		/*
2823 		 * Insist on a nodelist of one node only, although later
2824 		 * we use first_node(nodes) to grab a single node, so here
2825 		 * nodelist (or nodes) cannot be empty.
2826 		 */
2827 		if (nodelist) {
2828 			char *rest = nodelist;
2829 			while (isdigit(*rest))
2830 				rest++;
2831 			if (*rest)
2832 				goto out;
2833 			if (nodes_empty(nodes))
2834 				goto out;
2835 		}
2836 		break;
2837 	case MPOL_INTERLEAVE:
2838 		/*
2839 		 * Default to online nodes with memory if no nodelist
2840 		 */
2841 		if (!nodelist)
2842 			nodes = node_states[N_MEMORY];
2843 		break;
2844 	case MPOL_LOCAL:
2845 		/*
2846 		 * Don't allow a nodelist;  mpol_new() checks flags
2847 		 */
2848 		if (nodelist)
2849 			goto out;
2850 		mode = MPOL_PREFERRED;
2851 		break;
2852 	case MPOL_DEFAULT:
2853 		/*
2854 		 * Insist on a empty nodelist
2855 		 */
2856 		if (!nodelist)
2857 			err = 0;
2858 		goto out;
2859 	case MPOL_BIND:
2860 		/*
2861 		 * Insist on a nodelist
2862 		 */
2863 		if (!nodelist)
2864 			goto out;
2865 	}
2866 
2867 	mode_flags = 0;
2868 	if (flags) {
2869 		/*
2870 		 * Currently, we only support two mutually exclusive
2871 		 * mode flags.
2872 		 */
2873 		if (!strcmp(flags, "static"))
2874 			mode_flags |= MPOL_F_STATIC_NODES;
2875 		else if (!strcmp(flags, "relative"))
2876 			mode_flags |= MPOL_F_RELATIVE_NODES;
2877 		else
2878 			goto out;
2879 	}
2880 
2881 	new = mpol_new(mode, mode_flags, &nodes);
2882 	if (IS_ERR(new))
2883 		goto out;
2884 
2885 	/*
2886 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2887 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2888 	 */
2889 	if (mode != MPOL_PREFERRED)
2890 		new->v.nodes = nodes;
2891 	else if (nodelist)
2892 		new->v.preferred_node = first_node(nodes);
2893 	else
2894 		new->flags |= MPOL_F_LOCAL;
2895 
2896 	/*
2897 	 * Save nodes for contextualization: this will be used to "clone"
2898 	 * the mempolicy in a specific context [cpuset] at a later time.
2899 	 */
2900 	new->w.user_nodemask = nodes;
2901 
2902 	err = 0;
2903 
2904 out:
2905 	/* Restore string for error message */
2906 	if (nodelist)
2907 		*--nodelist = ':';
2908 	if (flags)
2909 		*--flags = '=';
2910 	if (!err)
2911 		*mpol = new;
2912 	return err;
2913 }
2914 #endif /* CONFIG_TMPFS */
2915 
2916 /**
2917  * mpol_to_str - format a mempolicy structure for printing
2918  * @buffer:  to contain formatted mempolicy string
2919  * @maxlen:  length of @buffer
2920  * @pol:  pointer to mempolicy to be formatted
2921  *
2922  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2923  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2924  * longest flag, "relative", and to display at least a few node ids.
2925  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)2926 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2927 {
2928 	char *p = buffer;
2929 	nodemask_t nodes = NODE_MASK_NONE;
2930 	unsigned short mode = MPOL_DEFAULT;
2931 	unsigned short flags = 0;
2932 
2933 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2934 		mode = pol->mode;
2935 		flags = pol->flags;
2936 	}
2937 
2938 	switch (mode) {
2939 	case MPOL_DEFAULT:
2940 		break;
2941 	case MPOL_PREFERRED:
2942 		if (flags & MPOL_F_LOCAL)
2943 			mode = MPOL_LOCAL;
2944 		else
2945 			node_set(pol->v.preferred_node, nodes);
2946 		break;
2947 	case MPOL_BIND:
2948 	case MPOL_INTERLEAVE:
2949 		nodes = pol->v.nodes;
2950 		break;
2951 	default:
2952 		WARN_ON_ONCE(1);
2953 		snprintf(p, maxlen, "unknown");
2954 		return;
2955 	}
2956 
2957 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2958 
2959 	if (flags & MPOL_MODE_FLAGS) {
2960 		p += snprintf(p, buffer + maxlen - p, "=");
2961 
2962 		/*
2963 		 * Currently, the only defined flags are mutually exclusive
2964 		 */
2965 		if (flags & MPOL_F_STATIC_NODES)
2966 			p += snprintf(p, buffer + maxlen - p, "static");
2967 		else if (flags & MPOL_F_RELATIVE_NODES)
2968 			p += snprintf(p, buffer + maxlen - p, "relative");
2969 	}
2970 
2971 	if (!nodes_empty(nodes))
2972 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2973 			       nodemask_pr_args(&nodes));
2974 }
2975