• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 
70 #include <linux/mempolicy.h>
71 #include <linux/pagewalk.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/sched/mm.h>
77 #include <linux/sched/numa_balancing.h>
78 #include <linux/sched/task.h>
79 #include <linux/nodemask.h>
80 #include <linux/cpuset.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/export.h>
84 #include <linux/nsproxy.h>
85 #include <linux/interrupt.h>
86 #include <linux/init.h>
87 #include <linux/compat.h>
88 #include <linux/ptrace.h>
89 #include <linux/swap.h>
90 #include <linux/seq_file.h>
91 #include <linux/proc_fs.h>
92 #include <linux/migrate.h>
93 #include <linux/ksm.h>
94 #include <linux/rmap.h>
95 #include <linux/security.h>
96 #include <linux/syscalls.h>
97 #include <linux/ctype.h>
98 #include <linux/mm_inline.h>
99 #include <linux/mmu_notifier.h>
100 #include <linux/printk.h>
101 #include <linux/swapops.h>
102 
103 #include <asm/tlbflush.h>
104 #include <linux/uaccess.h>
105 
106 #include "internal.h"
107 
108 /* Internal flags */
109 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
110 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111 
112 static struct kmem_cache *policy_cache;
113 static struct kmem_cache *sn_cache;
114 
115 /* Highest zone. An specific allocation for a zone below that is not
116    policied. */
117 enum zone_type policy_zone = 0;
118 
119 /*
120  * run-time system-wide default policy => local allocation
121  */
122 static struct mempolicy default_policy = {
123 	.refcnt = ATOMIC_INIT(1), /* never free it */
124 	.mode = MPOL_PREFERRED,
125 	.flags = MPOL_F_LOCAL,
126 };
127 
128 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129 
get_task_policy(struct task_struct * p)130 struct mempolicy *get_task_policy(struct task_struct *p)
131 {
132 	struct mempolicy *pol = p->mempolicy;
133 	int node;
134 
135 	if (pol)
136 		return pol;
137 
138 	node = numa_node_id();
139 	if (node != NUMA_NO_NODE) {
140 		pol = &preferred_node_policy[node];
141 		/* preferred_node_policy is not initialised early in boot */
142 		if (pol->mode)
143 			return pol;
144 	}
145 
146 	return &default_policy;
147 }
148 
149 static const struct mempolicy_operations {
150 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
152 } mpol_ops[MPOL_MAX];
153 
mpol_store_user_nodemask(const struct mempolicy * pol)154 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155 {
156 	return pol->flags & MPOL_MODE_FLAGS;
157 }
158 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)159 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 				   const nodemask_t *rel)
161 {
162 	nodemask_t tmp;
163 	nodes_fold(tmp, *orig, nodes_weight(*rel));
164 	nodes_onto(*ret, tmp, *rel);
165 }
166 
mpol_new_interleave(struct mempolicy * pol,const nodemask_t * nodes)167 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168 {
169 	if (nodes_empty(*nodes))
170 		return -EINVAL;
171 	pol->v.nodes = *nodes;
172 	return 0;
173 }
174 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)175 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176 {
177 	if (!nodes)
178 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
179 	else if (nodes_empty(*nodes))
180 		return -EINVAL;			/*  no allowed nodes */
181 	else
182 		pol->v.preferred_node = first_node(*nodes);
183 	return 0;
184 }
185 
mpol_new_bind(struct mempolicy * pol,const nodemask_t * nodes)186 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187 {
188 	if (nodes_empty(*nodes))
189 		return -EINVAL;
190 	pol->v.nodes = *nodes;
191 	return 0;
192 }
193 
194 /*
195  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196  * any, for the new policy.  mpol_new() has already validated the nodes
197  * parameter with respect to the policy mode and flags.  But, we need to
198  * handle an empty nodemask with MPOL_PREFERRED here.
199  *
200  * Must be called holding task's alloc_lock to protect task's mems_allowed
201  * and mempolicy.  May also be called holding the mmap_semaphore for write.
202  */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)203 static int mpol_set_nodemask(struct mempolicy *pol,
204 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
205 {
206 	int ret;
207 
208 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 	if (pol == NULL)
210 		return 0;
211 	/* Check N_MEMORY */
212 	nodes_and(nsc->mask1,
213 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
214 
215 	VM_BUG_ON(!nodes);
216 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 		nodes = NULL;	/* explicit local allocation */
218 	else {
219 		if (pol->flags & MPOL_F_RELATIVE_NODES)
220 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
221 		else
222 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
223 
224 		if (mpol_store_user_nodemask(pol))
225 			pol->w.user_nodemask = *nodes;
226 		else
227 			pol->w.cpuset_mems_allowed =
228 						cpuset_current_mems_allowed;
229 	}
230 
231 	if (nodes)
232 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 	else
234 		ret = mpol_ops[pol->mode].create(pol, NULL);
235 	return ret;
236 }
237 
238 /*
239  * This function just creates a new policy, does some check and simple
240  * initialization. You must invoke mpol_set_nodemask() to set nodes.
241  */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)242 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 				  nodemask_t *nodes)
244 {
245 	struct mempolicy *policy;
246 
247 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
248 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249 
250 	if (mode == MPOL_DEFAULT) {
251 		if (nodes && !nodes_empty(*nodes))
252 			return ERR_PTR(-EINVAL);
253 		return NULL;
254 	}
255 	VM_BUG_ON(!nodes);
256 
257 	/*
258 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 	 * All other modes require a valid pointer to a non-empty nodemask.
261 	 */
262 	if (mode == MPOL_PREFERRED) {
263 		if (nodes_empty(*nodes)) {
264 			if (((flags & MPOL_F_STATIC_NODES) ||
265 			     (flags & MPOL_F_RELATIVE_NODES)))
266 				return ERR_PTR(-EINVAL);
267 		}
268 	} else if (mode == MPOL_LOCAL) {
269 		if (!nodes_empty(*nodes) ||
270 		    (flags & MPOL_F_STATIC_NODES) ||
271 		    (flags & MPOL_F_RELATIVE_NODES))
272 			return ERR_PTR(-EINVAL);
273 		mode = MPOL_PREFERRED;
274 	} else if (nodes_empty(*nodes))
275 		return ERR_PTR(-EINVAL);
276 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 	if (!policy)
278 		return ERR_PTR(-ENOMEM);
279 	atomic_set(&policy->refcnt, 1);
280 	policy->mode = mode;
281 	policy->flags = flags;
282 
283 	return policy;
284 }
285 
286 /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)287 void __mpol_put(struct mempolicy *p)
288 {
289 	if (!atomic_dec_and_test(&p->refcnt))
290 		return;
291 	kmem_cache_free(policy_cache, p);
292 }
293 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)294 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
295 {
296 }
297 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)298 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
299 {
300 	nodemask_t tmp;
301 
302 	if (pol->flags & MPOL_F_STATIC_NODES)
303 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 	else {
307 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 								*nodes);
309 		pol->w.cpuset_mems_allowed = *nodes;
310 	}
311 
312 	if (nodes_empty(tmp))
313 		tmp = *nodes;
314 
315 	pol->v.nodes = tmp;
316 }
317 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)318 static void mpol_rebind_preferred(struct mempolicy *pol,
319 						const nodemask_t *nodes)
320 {
321 	nodemask_t tmp;
322 
323 	if (pol->flags & MPOL_F_STATIC_NODES) {
324 		int node = first_node(pol->w.user_nodemask);
325 
326 		if (node_isset(node, *nodes)) {
327 			pol->v.preferred_node = node;
328 			pol->flags &= ~MPOL_F_LOCAL;
329 		} else
330 			pol->flags |= MPOL_F_LOCAL;
331 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 		pol->v.preferred_node = first_node(tmp);
334 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
335 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 						   pol->w.cpuset_mems_allowed,
337 						   *nodes);
338 		pol->w.cpuset_mems_allowed = *nodes;
339 	}
340 }
341 
342 /*
343  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344  *
345  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346  * policies are protected by task->mems_allowed_seq to prevent a premature
347  * OOM/allocation failure due to parallel nodemask modification.
348  */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)349 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
350 {
351 	if (!pol)
352 		return;
353 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
354 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 		return;
356 
357 	mpol_ops[pol->mode].rebind(pol, newmask);
358 }
359 
360 /*
361  * Wrapper for mpol_rebind_policy() that just requires task
362  * pointer, and updates task mempolicy.
363  *
364  * Called with task's alloc_lock held.
365  */
366 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)367 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
368 {
369 	mpol_rebind_policy(tsk->mempolicy, new);
370 }
371 
372 /*
373  * Rebind each vma in mm to new nodemask.
374  *
375  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
376  */
377 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)378 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379 {
380 	struct vm_area_struct *vma;
381 
382 	down_write(&mm->mmap_sem);
383 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384 		mpol_rebind_policy(vma->vm_policy, new);
385 	up_write(&mm->mmap_sem);
386 }
387 
388 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 	[MPOL_DEFAULT] = {
390 		.rebind = mpol_rebind_default,
391 	},
392 	[MPOL_INTERLEAVE] = {
393 		.create = mpol_new_interleave,
394 		.rebind = mpol_rebind_nodemask,
395 	},
396 	[MPOL_PREFERRED] = {
397 		.create = mpol_new_preferred,
398 		.rebind = mpol_rebind_preferred,
399 	},
400 	[MPOL_BIND] = {
401 		.create = mpol_new_bind,
402 		.rebind = mpol_rebind_nodemask,
403 	},
404 };
405 
406 static int migrate_page_add(struct page *page, struct list_head *pagelist,
407 				unsigned long flags);
408 
409 struct queue_pages {
410 	struct list_head *pagelist;
411 	unsigned long flags;
412 	nodemask_t *nmask;
413 	struct vm_area_struct *prev;
414 };
415 
416 /*
417  * Check if the page's nid is in qp->nmask.
418  *
419  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
420  * in the invert of qp->nmask.
421  */
queue_pages_required(struct page * page,struct queue_pages * qp)422 static inline bool queue_pages_required(struct page *page,
423 					struct queue_pages *qp)
424 {
425 	int nid = page_to_nid(page);
426 	unsigned long flags = qp->flags;
427 
428 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429 }
430 
431 /*
432  * queue_pages_pmd() has four possible return values:
433  * 0 - pages are placed on the right node or queued successfully.
434  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435  *     specified.
436  * 2 - THP was split.
437  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438  *        existing page was already on a node that does not follow the
439  *        policy.
440  */
queue_pages_pmd(pmd_t * pmd,spinlock_t * ptl,unsigned long addr,unsigned long end,struct mm_walk * walk)441 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
442 				unsigned long end, struct mm_walk *walk)
443 {
444 	int ret = 0;
445 	struct page *page;
446 	struct queue_pages *qp = walk->private;
447 	unsigned long flags;
448 
449 	if (unlikely(is_pmd_migration_entry(*pmd))) {
450 		ret = -EIO;
451 		goto unlock;
452 	}
453 	page = pmd_page(*pmd);
454 	if (is_huge_zero_page(page)) {
455 		spin_unlock(ptl);
456 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457 		ret = 2;
458 		goto out;
459 	}
460 	if (!queue_pages_required(page, qp))
461 		goto unlock;
462 
463 	flags = qp->flags;
464 	/* go to thp migration */
465 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
466 		if (!vma_migratable(walk->vma) ||
467 		    migrate_page_add(page, qp->pagelist, flags)) {
468 			ret = 1;
469 			goto unlock;
470 		}
471 	} else
472 		ret = -EIO;
473 unlock:
474 	spin_unlock(ptl);
475 out:
476 	return ret;
477 }
478 
479 /*
480  * Scan through pages checking if pages follow certain conditions,
481  * and move them to the pagelist if they do.
482  *
483  * queue_pages_pte_range() has three possible return values:
484  * 0 - pages are placed on the right node or queued successfully.
485  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
486  *     specified.
487  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
488  *        on a node that does not follow the policy.
489  */
queue_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)490 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
491 			unsigned long end, struct mm_walk *walk)
492 {
493 	struct vm_area_struct *vma = walk->vma;
494 	struct page *page;
495 	struct queue_pages *qp = walk->private;
496 	unsigned long flags = qp->flags;
497 	int ret;
498 	bool has_unmovable = false;
499 	pte_t *pte;
500 	spinlock_t *ptl;
501 
502 	ptl = pmd_trans_huge_lock(pmd, vma);
503 	if (ptl) {
504 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
505 		if (ret != 2)
506 			return ret;
507 	}
508 	/* THP was split, fall through to pte walk */
509 
510 	if (pmd_trans_unstable(pmd))
511 		return 0;
512 
513 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
514 	for (; addr != end; pte++, addr += PAGE_SIZE) {
515 		if (!pte_present(*pte))
516 			continue;
517 		page = vm_normal_page(vma, addr, *pte);
518 		if (!page)
519 			continue;
520 		/*
521 		 * vm_normal_page() filters out zero pages, but there might
522 		 * still be PageReserved pages to skip, perhaps in a VDSO.
523 		 */
524 		if (PageReserved(page))
525 			continue;
526 		if (!queue_pages_required(page, qp))
527 			continue;
528 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
529 			/* MPOL_MF_STRICT must be specified if we get here */
530 			if (!vma_migratable(vma)) {
531 				has_unmovable = true;
532 				break;
533 			}
534 
535 			/*
536 			 * Do not abort immediately since there may be
537 			 * temporary off LRU pages in the range.  Still
538 			 * need migrate other LRU pages.
539 			 */
540 			if (migrate_page_add(page, qp->pagelist, flags))
541 				has_unmovable = true;
542 		} else
543 			break;
544 	}
545 	pte_unmap_unlock(pte - 1, ptl);
546 	cond_resched();
547 
548 	if (has_unmovable)
549 		return 1;
550 
551 	return addr != end ? -EIO : 0;
552 }
553 
queue_pages_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)554 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
555 			       unsigned long addr, unsigned long end,
556 			       struct mm_walk *walk)
557 {
558 #ifdef CONFIG_HUGETLB_PAGE
559 	struct queue_pages *qp = walk->private;
560 	unsigned long flags = qp->flags;
561 	struct page *page;
562 	spinlock_t *ptl;
563 	pte_t entry;
564 
565 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
566 	entry = huge_ptep_get(pte);
567 	if (!pte_present(entry))
568 		goto unlock;
569 	page = pte_page(entry);
570 	if (!queue_pages_required(page, qp))
571 		goto unlock;
572 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
573 	if (flags & (MPOL_MF_MOVE_ALL) ||
574 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
575 		isolate_huge_page(page, qp->pagelist);
576 unlock:
577 	spin_unlock(ptl);
578 #else
579 	BUG();
580 #endif
581 	return 0;
582 }
583 
584 #ifdef CONFIG_NUMA_BALANCING
585 /*
586  * This is used to mark a range of virtual addresses to be inaccessible.
587  * These are later cleared by a NUMA hinting fault. Depending on these
588  * faults, pages may be migrated for better NUMA placement.
589  *
590  * This is assuming that NUMA faults are handled using PROT_NONE. If
591  * an architecture makes a different choice, it will need further
592  * changes to the core.
593  */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)594 unsigned long change_prot_numa(struct vm_area_struct *vma,
595 			unsigned long addr, unsigned long end)
596 {
597 	int nr_updated;
598 
599 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
600 	if (nr_updated)
601 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
602 
603 	return nr_updated;
604 }
605 #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)606 static unsigned long change_prot_numa(struct vm_area_struct *vma,
607 			unsigned long addr, unsigned long end)
608 {
609 	return 0;
610 }
611 #endif /* CONFIG_NUMA_BALANCING */
612 
queue_pages_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)613 static int queue_pages_test_walk(unsigned long start, unsigned long end,
614 				struct mm_walk *walk)
615 {
616 	struct vm_area_struct *vma = walk->vma;
617 	struct queue_pages *qp = walk->private;
618 	unsigned long endvma = vma->vm_end;
619 	unsigned long flags = qp->flags;
620 
621 	/*
622 	 * Need check MPOL_MF_STRICT to return -EIO if possible
623 	 * regardless of vma_migratable
624 	 */
625 	if (!vma_migratable(vma) &&
626 	    !(flags & MPOL_MF_STRICT))
627 		return 1;
628 
629 	if (endvma > end)
630 		endvma = end;
631 	if (vma->vm_start > start)
632 		start = vma->vm_start;
633 
634 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
635 		if (!vma->vm_next && vma->vm_end < end)
636 			return -EFAULT;
637 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
638 			return -EFAULT;
639 	}
640 
641 	qp->prev = vma;
642 
643 	if (flags & MPOL_MF_LAZY) {
644 		/* Similar to task_numa_work, skip inaccessible VMAs */
645 		if (!is_vm_hugetlb_page(vma) &&
646 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
647 			!(vma->vm_flags & VM_MIXEDMAP))
648 			change_prot_numa(vma, start, endvma);
649 		return 1;
650 	}
651 
652 	/* queue pages from current vma */
653 	if (flags & MPOL_MF_VALID)
654 		return 0;
655 	return 1;
656 }
657 
658 static const struct mm_walk_ops queue_pages_walk_ops = {
659 	.hugetlb_entry		= queue_pages_hugetlb,
660 	.pmd_entry		= queue_pages_pte_range,
661 	.test_walk		= queue_pages_test_walk,
662 };
663 
664 /*
665  * Walk through page tables and collect pages to be migrated.
666  *
667  * If pages found in a given range are on a set of nodes (determined by
668  * @nodes and @flags,) it's isolated and queued to the pagelist which is
669  * passed via @private.
670  *
671  * queue_pages_range() has three possible return values:
672  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
673  *     specified.
674  * 0 - queue pages successfully or no misplaced page.
675  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
676  *         memory range specified by nodemask and maxnode points outside
677  *         your accessible address space (-EFAULT)
678  */
679 static int
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,nodemask_t * nodes,unsigned long flags,struct list_head * pagelist)680 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
681 		nodemask_t *nodes, unsigned long flags,
682 		struct list_head *pagelist)
683 {
684 	struct queue_pages qp = {
685 		.pagelist = pagelist,
686 		.flags = flags,
687 		.nmask = nodes,
688 		.prev = NULL,
689 	};
690 
691 	return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
692 }
693 
694 /*
695  * Apply policy to a single VMA
696  * This must be called with the mmap_sem held for writing.
697  */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)698 static int vma_replace_policy(struct vm_area_struct *vma,
699 						struct mempolicy *pol)
700 {
701 	int err;
702 	struct mempolicy *old;
703 	struct mempolicy *new;
704 
705 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
706 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
707 		 vma->vm_ops, vma->vm_file,
708 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
709 
710 	new = mpol_dup(pol);
711 	if (IS_ERR(new))
712 		return PTR_ERR(new);
713 
714 	if (vma->vm_ops && vma->vm_ops->set_policy) {
715 		err = vma->vm_ops->set_policy(vma, new);
716 		if (err)
717 			goto err_out;
718 	}
719 
720 	old = vma->vm_policy;
721 	vma->vm_policy = new; /* protected by mmap_sem */
722 	mpol_put(old);
723 
724 	return 0;
725  err_out:
726 	mpol_put(new);
727 	return err;
728 }
729 
730 /* Step 2: apply policy to a range and do splits. */
mbind_range(struct mm_struct * mm,unsigned long start,unsigned long end,struct mempolicy * new_pol)731 static int mbind_range(struct mm_struct *mm, unsigned long start,
732 		       unsigned long end, struct mempolicy *new_pol)
733 {
734 	struct vm_area_struct *next;
735 	struct vm_area_struct *prev;
736 	struct vm_area_struct *vma;
737 	int err = 0;
738 	pgoff_t pgoff;
739 	unsigned long vmstart;
740 	unsigned long vmend;
741 
742 	vma = find_vma(mm, start);
743 	if (!vma || vma->vm_start > start)
744 		return -EFAULT;
745 
746 	prev = vma->vm_prev;
747 	if (start > vma->vm_start)
748 		prev = vma;
749 
750 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
751 		next = vma->vm_next;
752 		vmstart = max(start, vma->vm_start);
753 		vmend   = min(end, vma->vm_end);
754 
755 		if (mpol_equal(vma_policy(vma), new_pol))
756 			continue;
757 
758 		pgoff = vma->vm_pgoff +
759 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
760 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
761 				 vma->anon_vma, vma->vm_file, pgoff,
762 				 new_pol, vma->vm_userfaultfd_ctx,
763 				 vma_get_anon_name(vma));
764 		if (prev) {
765 			vma = prev;
766 			next = vma->vm_next;
767 			if (mpol_equal(vma_policy(vma), new_pol))
768 				continue;
769 			/* vma_merge() joined vma && vma->next, case 8 */
770 			goto replace;
771 		}
772 		if (vma->vm_start != vmstart) {
773 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
774 			if (err)
775 				goto out;
776 		}
777 		if (vma->vm_end != vmend) {
778 			err = split_vma(vma->vm_mm, vma, vmend, 0);
779 			if (err)
780 				goto out;
781 		}
782  replace:
783 		err = vma_replace_policy(vma, new_pol);
784 		if (err)
785 			goto out;
786 	}
787 
788  out:
789 	return err;
790 }
791 
792 /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)793 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
794 			     nodemask_t *nodes)
795 {
796 	struct mempolicy *new, *old;
797 	NODEMASK_SCRATCH(scratch);
798 	int ret;
799 
800 	if (!scratch)
801 		return -ENOMEM;
802 
803 	new = mpol_new(mode, flags, nodes);
804 	if (IS_ERR(new)) {
805 		ret = PTR_ERR(new);
806 		goto out;
807 	}
808 
809 	task_lock(current);
810 	ret = mpol_set_nodemask(new, nodes, scratch);
811 	if (ret) {
812 		task_unlock(current);
813 		mpol_put(new);
814 		goto out;
815 	}
816 	old = current->mempolicy;
817 	current->mempolicy = new;
818 	if (new && new->mode == MPOL_INTERLEAVE)
819 		current->il_prev = MAX_NUMNODES-1;
820 	task_unlock(current);
821 	mpol_put(old);
822 	ret = 0;
823 out:
824 	NODEMASK_SCRATCH_FREE(scratch);
825 	return ret;
826 }
827 
828 /*
829  * Return nodemask for policy for get_mempolicy() query
830  *
831  * Called with task's alloc_lock held
832  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)833 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
834 {
835 	nodes_clear(*nodes);
836 	if (p == &default_policy)
837 		return;
838 
839 	switch (p->mode) {
840 	case MPOL_BIND:
841 		/* Fall through */
842 	case MPOL_INTERLEAVE:
843 		*nodes = p->v.nodes;
844 		break;
845 	case MPOL_PREFERRED:
846 		if (!(p->flags & MPOL_F_LOCAL))
847 			node_set(p->v.preferred_node, *nodes);
848 		/* else return empty node mask for local allocation */
849 		break;
850 	default:
851 		BUG();
852 	}
853 }
854 
lookup_node(struct mm_struct * mm,unsigned long addr)855 static int lookup_node(struct mm_struct *mm, unsigned long addr)
856 {
857 	struct page *p;
858 	int err;
859 
860 	int locked = 1;
861 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
862 	if (err >= 0) {
863 		err = page_to_nid(p);
864 		put_page(p);
865 	}
866 	if (locked)
867 		up_read(&mm->mmap_sem);
868 	return err;
869 }
870 
871 /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)872 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
873 			     unsigned long addr, unsigned long flags)
874 {
875 	int err;
876 	struct mm_struct *mm = current->mm;
877 	struct vm_area_struct *vma = NULL;
878 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
879 
880 	if (flags &
881 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
882 		return -EINVAL;
883 
884 	if (flags & MPOL_F_MEMS_ALLOWED) {
885 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
886 			return -EINVAL;
887 		*policy = 0;	/* just so it's initialized */
888 		task_lock(current);
889 		*nmask  = cpuset_current_mems_allowed;
890 		task_unlock(current);
891 		return 0;
892 	}
893 
894 	if (flags & MPOL_F_ADDR) {
895 		/*
896 		 * Do NOT fall back to task policy if the
897 		 * vma/shared policy at addr is NULL.  We
898 		 * want to return MPOL_DEFAULT in this case.
899 		 */
900 		down_read(&mm->mmap_sem);
901 		vma = find_vma_intersection(mm, addr, addr+1);
902 		if (!vma) {
903 			up_read(&mm->mmap_sem);
904 			return -EFAULT;
905 		}
906 		if (vma->vm_ops && vma->vm_ops->get_policy)
907 			pol = vma->vm_ops->get_policy(vma, addr);
908 		else
909 			pol = vma->vm_policy;
910 	} else if (addr)
911 		return -EINVAL;
912 
913 	if (!pol)
914 		pol = &default_policy;	/* indicates default behavior */
915 
916 	if (flags & MPOL_F_NODE) {
917 		if (flags & MPOL_F_ADDR) {
918 			/*
919 			 * Take a refcount on the mpol, lookup_node()
920 			 * wil drop the mmap_sem, so after calling
921 			 * lookup_node() only "pol" remains valid, "vma"
922 			 * is stale.
923 			 */
924 			pol_refcount = pol;
925 			vma = NULL;
926 			mpol_get(pol);
927 			err = lookup_node(mm, addr);
928 			if (err < 0)
929 				goto out;
930 			*policy = err;
931 		} else if (pol == current->mempolicy &&
932 				pol->mode == MPOL_INTERLEAVE) {
933 			*policy = next_node_in(current->il_prev, pol->v.nodes);
934 		} else {
935 			err = -EINVAL;
936 			goto out;
937 		}
938 	} else {
939 		*policy = pol == &default_policy ? MPOL_DEFAULT :
940 						pol->mode;
941 		/*
942 		 * Internal mempolicy flags must be masked off before exposing
943 		 * the policy to userspace.
944 		 */
945 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
946 	}
947 
948 	err = 0;
949 	if (nmask) {
950 		if (mpol_store_user_nodemask(pol)) {
951 			*nmask = pol->w.user_nodemask;
952 		} else {
953 			task_lock(current);
954 			get_policy_nodemask(pol, nmask);
955 			task_unlock(current);
956 		}
957 	}
958 
959  out:
960 	mpol_cond_put(pol);
961 	if (vma)
962 		up_read(&mm->mmap_sem);
963 	if (pol_refcount)
964 		mpol_put(pol_refcount);
965 	return err;
966 }
967 
968 #ifdef CONFIG_MIGRATION
969 /*
970  * page migration, thp tail pages can be passed.
971  */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)972 static int migrate_page_add(struct page *page, struct list_head *pagelist,
973 				unsigned long flags)
974 {
975 	struct page *head = compound_head(page);
976 	/*
977 	 * Avoid migrating a page that is shared with others.
978 	 */
979 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
980 		if (!isolate_lru_page(head)) {
981 			list_add_tail(&head->lru, pagelist);
982 			mod_node_page_state(page_pgdat(head),
983 				NR_ISOLATED_ANON + page_is_file_cache(head),
984 				hpage_nr_pages(head));
985 		} else if (flags & MPOL_MF_STRICT) {
986 			/*
987 			 * Non-movable page may reach here.  And, there may be
988 			 * temporary off LRU pages or non-LRU movable pages.
989 			 * Treat them as unmovable pages since they can't be
990 			 * isolated, so they can't be moved at the moment.  It
991 			 * should return -EIO for this case too.
992 			 */
993 			return -EIO;
994 		}
995 	}
996 
997 	return 0;
998 }
999 
1000 /* page allocation callback for NUMA node migration */
alloc_new_node_page(struct page * page,unsigned long node)1001 struct page *alloc_new_node_page(struct page *page, unsigned long node)
1002 {
1003 	if (PageHuge(page))
1004 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1005 					node);
1006 	else if (PageTransHuge(page)) {
1007 		struct page *thp;
1008 
1009 		thp = alloc_pages_node(node,
1010 			(GFP_TRANSHUGE | __GFP_THISNODE),
1011 			HPAGE_PMD_ORDER);
1012 		if (!thp)
1013 			return NULL;
1014 		prep_transhuge_page(thp);
1015 		return thp;
1016 	} else
1017 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
1018 						    __GFP_THISNODE, 0);
1019 }
1020 
1021 /*
1022  * Migrate pages from one node to a target node.
1023  * Returns error or the number of pages not migrated.
1024  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)1025 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1026 			   int flags)
1027 {
1028 	nodemask_t nmask;
1029 	LIST_HEAD(pagelist);
1030 	int err = 0;
1031 
1032 	nodes_clear(nmask);
1033 	node_set(source, nmask);
1034 
1035 	/*
1036 	 * This does not "check" the range but isolates all pages that
1037 	 * need migration.  Between passing in the full user address
1038 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1039 	 */
1040 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1041 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1042 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1043 
1044 	if (!list_empty(&pagelist)) {
1045 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
1046 					MIGRATE_SYNC, MR_SYSCALL);
1047 		if (err)
1048 			putback_movable_pages(&pagelist);
1049 	}
1050 
1051 	return err;
1052 }
1053 
1054 /*
1055  * Move pages between the two nodesets so as to preserve the physical
1056  * layout as much as possible.
1057  *
1058  * Returns the number of page that could not be moved.
1059  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1060 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1061 		     const nodemask_t *to, int flags)
1062 {
1063 	int busy = 0;
1064 	int err;
1065 	nodemask_t tmp;
1066 
1067 	err = migrate_prep();
1068 	if (err)
1069 		return err;
1070 
1071 	down_read(&mm->mmap_sem);
1072 
1073 	/*
1074 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1075 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1076 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1077 	 * The pair of nodemasks 'to' and 'from' define the map.
1078 	 *
1079 	 * If no pair of bits is found that way, fallback to picking some
1080 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1081 	 * 'source' and 'dest' bits are the same, this represents a node
1082 	 * that will be migrating to itself, so no pages need move.
1083 	 *
1084 	 * If no bits are left in 'tmp', or if all remaining bits left
1085 	 * in 'tmp' correspond to the same bit in 'to', return false
1086 	 * (nothing left to migrate).
1087 	 *
1088 	 * This lets us pick a pair of nodes to migrate between, such that
1089 	 * if possible the dest node is not already occupied by some other
1090 	 * source node, minimizing the risk of overloading the memory on a
1091 	 * node that would happen if we migrated incoming memory to a node
1092 	 * before migrating outgoing memory source that same node.
1093 	 *
1094 	 * A single scan of tmp is sufficient.  As we go, we remember the
1095 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1096 	 * that not only moved, but what's better, moved to an empty slot
1097 	 * (d is not set in tmp), then we break out then, with that pair.
1098 	 * Otherwise when we finish scanning from_tmp, we at least have the
1099 	 * most recent <s, d> pair that moved.  If we get all the way through
1100 	 * the scan of tmp without finding any node that moved, much less
1101 	 * moved to an empty node, then there is nothing left worth migrating.
1102 	 */
1103 
1104 	tmp = *from;
1105 	while (!nodes_empty(tmp)) {
1106 		int s,d;
1107 		int source = NUMA_NO_NODE;
1108 		int dest = 0;
1109 
1110 		for_each_node_mask(s, tmp) {
1111 
1112 			/*
1113 			 * do_migrate_pages() tries to maintain the relative
1114 			 * node relationship of the pages established between
1115 			 * threads and memory areas.
1116                          *
1117 			 * However if the number of source nodes is not equal to
1118 			 * the number of destination nodes we can not preserve
1119 			 * this node relative relationship.  In that case, skip
1120 			 * copying memory from a node that is in the destination
1121 			 * mask.
1122 			 *
1123 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1124 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1125 			 */
1126 
1127 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1128 						(node_isset(s, *to)))
1129 				continue;
1130 
1131 			d = node_remap(s, *from, *to);
1132 			if (s == d)
1133 				continue;
1134 
1135 			source = s;	/* Node moved. Memorize */
1136 			dest = d;
1137 
1138 			/* dest not in remaining from nodes? */
1139 			if (!node_isset(dest, tmp))
1140 				break;
1141 		}
1142 		if (source == NUMA_NO_NODE)
1143 			break;
1144 
1145 		node_clear(source, tmp);
1146 		err = migrate_to_node(mm, source, dest, flags);
1147 		if (err > 0)
1148 			busy += err;
1149 		if (err < 0)
1150 			break;
1151 	}
1152 	up_read(&mm->mmap_sem);
1153 	if (err < 0)
1154 		return err;
1155 	return busy;
1156 
1157 }
1158 
1159 /*
1160  * Allocate a new page for page migration based on vma policy.
1161  * Start by assuming the page is mapped by the same vma as contains @start.
1162  * Search forward from there, if not.  N.B., this assumes that the
1163  * list of pages handed to migrate_pages()--which is how we get here--
1164  * is in virtual address order.
1165  */
new_page(struct page * page,unsigned long start)1166 static struct page *new_page(struct page *page, unsigned long start)
1167 {
1168 	struct vm_area_struct *vma;
1169 	unsigned long uninitialized_var(address);
1170 
1171 	vma = find_vma(current->mm, start);
1172 	while (vma) {
1173 		address = page_address_in_vma(page, vma);
1174 		if (address != -EFAULT)
1175 			break;
1176 		vma = vma->vm_next;
1177 	}
1178 
1179 	if (PageHuge(page)) {
1180 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1181 				vma, address);
1182 	} else if (PageTransHuge(page)) {
1183 		struct page *thp;
1184 
1185 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1186 					 HPAGE_PMD_ORDER);
1187 		if (!thp)
1188 			return NULL;
1189 		prep_transhuge_page(thp);
1190 		return thp;
1191 	}
1192 	/*
1193 	 * if !vma, alloc_page_vma() will use task or system default policy
1194 	 */
1195 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1196 			vma, address);
1197 }
1198 #else
1199 
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1200 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1201 				unsigned long flags)
1202 {
1203 	return -EIO;
1204 }
1205 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1206 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1207 		     const nodemask_t *to, int flags)
1208 {
1209 	return -ENOSYS;
1210 }
1211 
new_page(struct page * page,unsigned long start)1212 static struct page *new_page(struct page *page, unsigned long start)
1213 {
1214 	return NULL;
1215 }
1216 #endif
1217 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1218 static long do_mbind(unsigned long start, unsigned long len,
1219 		     unsigned short mode, unsigned short mode_flags,
1220 		     nodemask_t *nmask, unsigned long flags)
1221 {
1222 	struct mm_struct *mm = current->mm;
1223 	struct mempolicy *new;
1224 	unsigned long end;
1225 	int err;
1226 	int ret;
1227 	LIST_HEAD(pagelist);
1228 
1229 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1230 		return -EINVAL;
1231 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1232 		return -EPERM;
1233 
1234 	if (start & ~PAGE_MASK)
1235 		return -EINVAL;
1236 
1237 	if (mode == MPOL_DEFAULT)
1238 		flags &= ~MPOL_MF_STRICT;
1239 
1240 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1241 	end = start + len;
1242 
1243 	if (end < start)
1244 		return -EINVAL;
1245 	if (end == start)
1246 		return 0;
1247 
1248 	new = mpol_new(mode, mode_flags, nmask);
1249 	if (IS_ERR(new))
1250 		return PTR_ERR(new);
1251 
1252 	if (flags & MPOL_MF_LAZY)
1253 		new->flags |= MPOL_F_MOF;
1254 
1255 	/*
1256 	 * If we are using the default policy then operation
1257 	 * on discontinuous address spaces is okay after all
1258 	 */
1259 	if (!new)
1260 		flags |= MPOL_MF_DISCONTIG_OK;
1261 
1262 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1263 		 start, start + len, mode, mode_flags,
1264 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1265 
1266 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1267 
1268 		err = migrate_prep();
1269 		if (err)
1270 			goto mpol_out;
1271 	}
1272 	{
1273 		NODEMASK_SCRATCH(scratch);
1274 		if (scratch) {
1275 			down_write(&mm->mmap_sem);
1276 			task_lock(current);
1277 			err = mpol_set_nodemask(new, nmask, scratch);
1278 			task_unlock(current);
1279 			if (err)
1280 				up_write(&mm->mmap_sem);
1281 		} else
1282 			err = -ENOMEM;
1283 		NODEMASK_SCRATCH_FREE(scratch);
1284 	}
1285 	if (err)
1286 		goto mpol_out;
1287 
1288 	ret = queue_pages_range(mm, start, end, nmask,
1289 			  flags | MPOL_MF_INVERT, &pagelist);
1290 
1291 	if (ret < 0) {
1292 		err = ret;
1293 		goto up_out;
1294 	}
1295 
1296 	err = mbind_range(mm, start, end, new);
1297 
1298 	if (!err) {
1299 		int nr_failed = 0;
1300 
1301 		if (!list_empty(&pagelist)) {
1302 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1303 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1304 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1305 			if (nr_failed)
1306 				putback_movable_pages(&pagelist);
1307 		}
1308 
1309 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1310 			err = -EIO;
1311 	} else {
1312 up_out:
1313 		if (!list_empty(&pagelist))
1314 			putback_movable_pages(&pagelist);
1315 	}
1316 
1317 	up_write(&mm->mmap_sem);
1318 mpol_out:
1319 	mpol_put(new);
1320 	return err;
1321 }
1322 
1323 /*
1324  * User space interface with variable sized bitmaps for nodelists.
1325  */
1326 
1327 /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1328 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1329 		     unsigned long maxnode)
1330 {
1331 	unsigned long k;
1332 	unsigned long t;
1333 	unsigned long nlongs;
1334 	unsigned long endmask;
1335 
1336 	--maxnode;
1337 	nodes_clear(*nodes);
1338 	if (maxnode == 0 || !nmask)
1339 		return 0;
1340 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1341 		return -EINVAL;
1342 
1343 	nlongs = BITS_TO_LONGS(maxnode);
1344 	if ((maxnode % BITS_PER_LONG) == 0)
1345 		endmask = ~0UL;
1346 	else
1347 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1348 
1349 	/*
1350 	 * When the user specified more nodes than supported just check
1351 	 * if the non supported part is all zero.
1352 	 *
1353 	 * If maxnode have more longs than MAX_NUMNODES, check
1354 	 * the bits in that area first. And then go through to
1355 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
1356 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1357 	 */
1358 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1359 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1360 			if (get_user(t, nmask + k))
1361 				return -EFAULT;
1362 			if (k == nlongs - 1) {
1363 				if (t & endmask)
1364 					return -EINVAL;
1365 			} else if (t)
1366 				return -EINVAL;
1367 		}
1368 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1369 		endmask = ~0UL;
1370 	}
1371 
1372 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1373 		unsigned long valid_mask = endmask;
1374 
1375 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1376 		if (get_user(t, nmask + nlongs - 1))
1377 			return -EFAULT;
1378 		if (t & valid_mask)
1379 			return -EINVAL;
1380 	}
1381 
1382 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1383 		return -EFAULT;
1384 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1385 	return 0;
1386 }
1387 
1388 /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1389 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1390 			      nodemask_t *nodes)
1391 {
1392 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1393 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1394 
1395 	if (copy > nbytes) {
1396 		if (copy > PAGE_SIZE)
1397 			return -EINVAL;
1398 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1399 			return -EFAULT;
1400 		copy = nbytes;
1401 	}
1402 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1403 }
1404 
kernel_mbind(unsigned long start,unsigned long len,unsigned long mode,const unsigned long __user * nmask,unsigned long maxnode,unsigned int flags)1405 static long kernel_mbind(unsigned long start, unsigned long len,
1406 			 unsigned long mode, const unsigned long __user *nmask,
1407 			 unsigned long maxnode, unsigned int flags)
1408 {
1409 	nodemask_t nodes;
1410 	int err;
1411 	unsigned short mode_flags;
1412 
1413 	start = untagged_addr(start);
1414 	mode_flags = mode & MPOL_MODE_FLAGS;
1415 	mode &= ~MPOL_MODE_FLAGS;
1416 	if (mode >= MPOL_MAX)
1417 		return -EINVAL;
1418 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1419 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1420 		return -EINVAL;
1421 	err = get_nodes(&nodes, nmask, maxnode);
1422 	if (err)
1423 		return err;
1424 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1425 }
1426 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned int,flags)1427 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1428 		unsigned long, mode, const unsigned long __user *, nmask,
1429 		unsigned long, maxnode, unsigned int, flags)
1430 {
1431 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1432 }
1433 
1434 /* Set the process memory policy */
kernel_set_mempolicy(int mode,const unsigned long __user * nmask,unsigned long maxnode)1435 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1436 				 unsigned long maxnode)
1437 {
1438 	int err;
1439 	nodemask_t nodes;
1440 	unsigned short flags;
1441 
1442 	flags = mode & MPOL_MODE_FLAGS;
1443 	mode &= ~MPOL_MODE_FLAGS;
1444 	if ((unsigned int)mode >= MPOL_MAX)
1445 		return -EINVAL;
1446 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1447 		return -EINVAL;
1448 	err = get_nodes(&nodes, nmask, maxnode);
1449 	if (err)
1450 		return err;
1451 	return do_set_mempolicy(mode, flags, &nodes);
1452 }
1453 
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1454 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1455 		unsigned long, maxnode)
1456 {
1457 	return kernel_set_mempolicy(mode, nmask, maxnode);
1458 }
1459 
kernel_migrate_pages(pid_t pid,unsigned long maxnode,const unsigned long __user * old_nodes,const unsigned long __user * new_nodes)1460 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1461 				const unsigned long __user *old_nodes,
1462 				const unsigned long __user *new_nodes)
1463 {
1464 	struct mm_struct *mm = NULL;
1465 	struct task_struct *task;
1466 	nodemask_t task_nodes;
1467 	int err;
1468 	nodemask_t *old;
1469 	nodemask_t *new;
1470 	NODEMASK_SCRATCH(scratch);
1471 
1472 	if (!scratch)
1473 		return -ENOMEM;
1474 
1475 	old = &scratch->mask1;
1476 	new = &scratch->mask2;
1477 
1478 	err = get_nodes(old, old_nodes, maxnode);
1479 	if (err)
1480 		goto out;
1481 
1482 	err = get_nodes(new, new_nodes, maxnode);
1483 	if (err)
1484 		goto out;
1485 
1486 	/* Find the mm_struct */
1487 	rcu_read_lock();
1488 	task = pid ? find_task_by_vpid(pid) : current;
1489 	if (!task) {
1490 		rcu_read_unlock();
1491 		err = -ESRCH;
1492 		goto out;
1493 	}
1494 	get_task_struct(task);
1495 
1496 	err = -EINVAL;
1497 
1498 	/*
1499 	 * Check if this process has the right to modify the specified process.
1500 	 * Use the regular "ptrace_may_access()" checks.
1501 	 */
1502 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1503 		rcu_read_unlock();
1504 		err = -EPERM;
1505 		goto out_put;
1506 	}
1507 	rcu_read_unlock();
1508 
1509 	task_nodes = cpuset_mems_allowed(task);
1510 	/* Is the user allowed to access the target nodes? */
1511 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1512 		err = -EPERM;
1513 		goto out_put;
1514 	}
1515 
1516 	task_nodes = cpuset_mems_allowed(current);
1517 	nodes_and(*new, *new, task_nodes);
1518 	if (nodes_empty(*new))
1519 		goto out_put;
1520 
1521 	err = security_task_movememory(task);
1522 	if (err)
1523 		goto out_put;
1524 
1525 	mm = get_task_mm(task);
1526 	put_task_struct(task);
1527 
1528 	if (!mm) {
1529 		err = -EINVAL;
1530 		goto out;
1531 	}
1532 
1533 	err = do_migrate_pages(mm, old, new,
1534 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1535 
1536 	mmput(mm);
1537 out:
1538 	NODEMASK_SCRATCH_FREE(scratch);
1539 
1540 	return err;
1541 
1542 out_put:
1543 	put_task_struct(task);
1544 	goto out;
1545 
1546 }
1547 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1548 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1549 		const unsigned long __user *, old_nodes,
1550 		const unsigned long __user *, new_nodes)
1551 {
1552 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1553 }
1554 
1555 
1556 /* Retrieve NUMA policy */
kernel_get_mempolicy(int __user * policy,unsigned long __user * nmask,unsigned long maxnode,unsigned long addr,unsigned long flags)1557 static int kernel_get_mempolicy(int __user *policy,
1558 				unsigned long __user *nmask,
1559 				unsigned long maxnode,
1560 				unsigned long addr,
1561 				unsigned long flags)
1562 {
1563 	int err;
1564 	int uninitialized_var(pval);
1565 	nodemask_t nodes;
1566 
1567 	addr = untagged_addr(addr);
1568 
1569 	if (nmask != NULL && maxnode < nr_node_ids)
1570 		return -EINVAL;
1571 
1572 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1573 
1574 	if (err)
1575 		return err;
1576 
1577 	if (policy && put_user(pval, policy))
1578 		return -EFAULT;
1579 
1580 	if (nmask)
1581 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1582 
1583 	return err;
1584 }
1585 
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1586 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1587 		unsigned long __user *, nmask, unsigned long, maxnode,
1588 		unsigned long, addr, unsigned long, flags)
1589 {
1590 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1591 }
1592 
1593 #ifdef CONFIG_COMPAT
1594 
COMPAT_SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,addr,compat_ulong_t,flags)1595 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1596 		       compat_ulong_t __user *, nmask,
1597 		       compat_ulong_t, maxnode,
1598 		       compat_ulong_t, addr, compat_ulong_t, flags)
1599 {
1600 	long err;
1601 	unsigned long __user *nm = NULL;
1602 	unsigned long nr_bits, alloc_size;
1603 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1604 
1605 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1606 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1607 
1608 	if (nmask)
1609 		nm = compat_alloc_user_space(alloc_size);
1610 
1611 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1612 
1613 	if (!err && nmask) {
1614 		unsigned long copy_size;
1615 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1616 		err = copy_from_user(bm, nm, copy_size);
1617 		/* ensure entire bitmap is zeroed */
1618 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1619 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1620 	}
1621 
1622 	return err;
1623 }
1624 
COMPAT_SYSCALL_DEFINE3(set_mempolicy,int,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode)1625 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1626 		       compat_ulong_t, maxnode)
1627 {
1628 	unsigned long __user *nm = NULL;
1629 	unsigned long nr_bits, alloc_size;
1630 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1631 
1632 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1633 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1634 
1635 	if (nmask) {
1636 		if (compat_get_bitmap(bm, nmask, nr_bits))
1637 			return -EFAULT;
1638 		nm = compat_alloc_user_space(alloc_size);
1639 		if (copy_to_user(nm, bm, alloc_size))
1640 			return -EFAULT;
1641 	}
1642 
1643 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
1644 }
1645 
COMPAT_SYSCALL_DEFINE6(mbind,compat_ulong_t,start,compat_ulong_t,len,compat_ulong_t,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,flags)1646 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1647 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1648 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1649 {
1650 	unsigned long __user *nm = NULL;
1651 	unsigned long nr_bits, alloc_size;
1652 	nodemask_t bm;
1653 
1654 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1655 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1656 
1657 	if (nmask) {
1658 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1659 			return -EFAULT;
1660 		nm = compat_alloc_user_space(alloc_size);
1661 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1662 			return -EFAULT;
1663 	}
1664 
1665 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1666 }
1667 
COMPAT_SYSCALL_DEFINE4(migrate_pages,compat_pid_t,pid,compat_ulong_t,maxnode,const compat_ulong_t __user *,old_nodes,const compat_ulong_t __user *,new_nodes)1668 COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1669 		       compat_ulong_t, maxnode,
1670 		       const compat_ulong_t __user *, old_nodes,
1671 		       const compat_ulong_t __user *, new_nodes)
1672 {
1673 	unsigned long __user *old = NULL;
1674 	unsigned long __user *new = NULL;
1675 	nodemask_t tmp_mask;
1676 	unsigned long nr_bits;
1677 	unsigned long size;
1678 
1679 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1680 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1681 	if (old_nodes) {
1682 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1683 			return -EFAULT;
1684 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1685 		if (new_nodes)
1686 			new = old + size / sizeof(unsigned long);
1687 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1688 			return -EFAULT;
1689 	}
1690 	if (new_nodes) {
1691 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1692 			return -EFAULT;
1693 		if (new == NULL)
1694 			new = compat_alloc_user_space(size);
1695 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1696 			return -EFAULT;
1697 	}
1698 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1699 }
1700 
1701 #endif /* CONFIG_COMPAT */
1702 
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1703 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1704 						unsigned long addr)
1705 {
1706 	struct mempolicy *pol = NULL;
1707 
1708 	if (vma) {
1709 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1710 			pol = vma->vm_ops->get_policy(vma, addr);
1711 		} else if (vma->vm_policy) {
1712 			pol = vma->vm_policy;
1713 
1714 			/*
1715 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1716 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1717 			 * count on these policies which will be dropped by
1718 			 * mpol_cond_put() later
1719 			 */
1720 			if (mpol_needs_cond_ref(pol))
1721 				mpol_get(pol);
1722 		}
1723 	}
1724 
1725 	return pol;
1726 }
1727 
1728 /*
1729  * get_vma_policy(@vma, @addr)
1730  * @vma: virtual memory area whose policy is sought
1731  * @addr: address in @vma for shared policy lookup
1732  *
1733  * Returns effective policy for a VMA at specified address.
1734  * Falls back to current->mempolicy or system default policy, as necessary.
1735  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1736  * count--added by the get_policy() vm_op, as appropriate--to protect against
1737  * freeing by another task.  It is the caller's responsibility to free the
1738  * extra reference for shared policies.
1739  */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1740 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1741 						unsigned long addr)
1742 {
1743 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1744 
1745 	if (!pol)
1746 		pol = get_task_policy(current);
1747 
1748 	return pol;
1749 }
1750 
vma_policy_mof(struct vm_area_struct * vma)1751 bool vma_policy_mof(struct vm_area_struct *vma)
1752 {
1753 	struct mempolicy *pol;
1754 
1755 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1756 		bool ret = false;
1757 
1758 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1759 		if (pol && (pol->flags & MPOL_F_MOF))
1760 			ret = true;
1761 		mpol_cond_put(pol);
1762 
1763 		return ret;
1764 	}
1765 
1766 	pol = vma->vm_policy;
1767 	if (!pol)
1768 		pol = get_task_policy(current);
1769 
1770 	return pol->flags & MPOL_F_MOF;
1771 }
1772 
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1773 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1774 {
1775 	enum zone_type dynamic_policy_zone = policy_zone;
1776 
1777 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1778 
1779 	/*
1780 	 * if policy->v.nodes has movable memory only,
1781 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1782 	 *
1783 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1784 	 * so if the following test faile, it implies
1785 	 * policy->v.nodes has movable memory only.
1786 	 */
1787 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1788 		dynamic_policy_zone = ZONE_MOVABLE;
1789 
1790 	return zone >= dynamic_policy_zone;
1791 }
1792 
1793 /*
1794  * Return a nodemask representing a mempolicy for filtering nodes for
1795  * page allocation
1796  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1797 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1798 {
1799 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1800 	if (unlikely(policy->mode == MPOL_BIND) &&
1801 			apply_policy_zone(policy, gfp_zone(gfp)) &&
1802 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1803 		return &policy->v.nodes;
1804 
1805 	return NULL;
1806 }
1807 
1808 /* Return the node id preferred by the given mempolicy, or the given id */
policy_node(gfp_t gfp,struct mempolicy * policy,int nd)1809 static int policy_node(gfp_t gfp, struct mempolicy *policy,
1810 								int nd)
1811 {
1812 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1813 		nd = policy->v.preferred_node;
1814 	else {
1815 		/*
1816 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1817 		 * because we might easily break the expectation to stay on the
1818 		 * requested node and not break the policy.
1819 		 */
1820 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1821 	}
1822 
1823 	return nd;
1824 }
1825 
1826 /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1827 static unsigned interleave_nodes(struct mempolicy *policy)
1828 {
1829 	unsigned next;
1830 	struct task_struct *me = current;
1831 
1832 	next = next_node_in(me->il_prev, policy->v.nodes);
1833 	if (next < MAX_NUMNODES)
1834 		me->il_prev = next;
1835 	return next;
1836 }
1837 
1838 /*
1839  * Depending on the memory policy provide a node from which to allocate the
1840  * next slab entry.
1841  */
mempolicy_slab_node(void)1842 unsigned int mempolicy_slab_node(void)
1843 {
1844 	struct mempolicy *policy;
1845 	int node = numa_mem_id();
1846 
1847 	if (in_interrupt())
1848 		return node;
1849 
1850 	policy = current->mempolicy;
1851 	if (!policy || policy->flags & MPOL_F_LOCAL)
1852 		return node;
1853 
1854 	switch (policy->mode) {
1855 	case MPOL_PREFERRED:
1856 		/*
1857 		 * handled MPOL_F_LOCAL above
1858 		 */
1859 		return policy->v.preferred_node;
1860 
1861 	case MPOL_INTERLEAVE:
1862 		return interleave_nodes(policy);
1863 
1864 	case MPOL_BIND: {
1865 		struct zoneref *z;
1866 
1867 		/*
1868 		 * Follow bind policy behavior and start allocation at the
1869 		 * first node.
1870 		 */
1871 		struct zonelist *zonelist;
1872 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1873 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1874 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1875 							&policy->v.nodes);
1876 		return z->zone ? zone_to_nid(z->zone) : node;
1877 	}
1878 
1879 	default:
1880 		BUG();
1881 	}
1882 }
1883 
1884 /*
1885  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1886  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1887  * number of present nodes.
1888  */
offset_il_node(struct mempolicy * pol,unsigned long n)1889 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1890 {
1891 	unsigned nnodes = nodes_weight(pol->v.nodes);
1892 	unsigned target;
1893 	int i;
1894 	int nid;
1895 
1896 	if (!nnodes)
1897 		return numa_node_id();
1898 	target = (unsigned int)n % nnodes;
1899 	nid = first_node(pol->v.nodes);
1900 	for (i = 0; i < target; i++)
1901 		nid = next_node(nid, pol->v.nodes);
1902 	return nid;
1903 }
1904 
1905 /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1906 static inline unsigned interleave_nid(struct mempolicy *pol,
1907 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1908 {
1909 	if (vma) {
1910 		unsigned long off;
1911 
1912 		/*
1913 		 * for small pages, there is no difference between
1914 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1915 		 * for huge pages, since vm_pgoff is in units of small
1916 		 * pages, we need to shift off the always 0 bits to get
1917 		 * a useful offset.
1918 		 */
1919 		BUG_ON(shift < PAGE_SHIFT);
1920 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1921 		off += (addr - vma->vm_start) >> shift;
1922 		return offset_il_node(pol, off);
1923 	} else
1924 		return interleave_nodes(pol);
1925 }
1926 
1927 #ifdef CONFIG_HUGETLBFS
1928 /*
1929  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1930  * @vma: virtual memory area whose policy is sought
1931  * @addr: address in @vma for shared policy lookup and interleave policy
1932  * @gfp_flags: for requested zone
1933  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1934  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1935  *
1936  * Returns a nid suitable for a huge page allocation and a pointer
1937  * to the struct mempolicy for conditional unref after allocation.
1938  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1939  * @nodemask for filtering the zonelist.
1940  *
1941  * Must be protected by read_mems_allowed_begin()
1942  */
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)1943 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1944 				struct mempolicy **mpol, nodemask_t **nodemask)
1945 {
1946 	int nid;
1947 
1948 	*mpol = get_vma_policy(vma, addr);
1949 	*nodemask = NULL;	/* assume !MPOL_BIND */
1950 
1951 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1952 		nid = interleave_nid(*mpol, vma, addr,
1953 					huge_page_shift(hstate_vma(vma)));
1954 	} else {
1955 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1956 		if ((*mpol)->mode == MPOL_BIND)
1957 			*nodemask = &(*mpol)->v.nodes;
1958 	}
1959 	return nid;
1960 }
1961 
1962 /*
1963  * init_nodemask_of_mempolicy
1964  *
1965  * If the current task's mempolicy is "default" [NULL], return 'false'
1966  * to indicate default policy.  Otherwise, extract the policy nodemask
1967  * for 'bind' or 'interleave' policy into the argument nodemask, or
1968  * initialize the argument nodemask to contain the single node for
1969  * 'preferred' or 'local' policy and return 'true' to indicate presence
1970  * of non-default mempolicy.
1971  *
1972  * We don't bother with reference counting the mempolicy [mpol_get/put]
1973  * because the current task is examining it's own mempolicy and a task's
1974  * mempolicy is only ever changed by the task itself.
1975  *
1976  * N.B., it is the caller's responsibility to free a returned nodemask.
1977  */
init_nodemask_of_mempolicy(nodemask_t * mask)1978 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1979 {
1980 	struct mempolicy *mempolicy;
1981 	int nid;
1982 
1983 	if (!(mask && current->mempolicy))
1984 		return false;
1985 
1986 	task_lock(current);
1987 	mempolicy = current->mempolicy;
1988 	switch (mempolicy->mode) {
1989 	case MPOL_PREFERRED:
1990 		if (mempolicy->flags & MPOL_F_LOCAL)
1991 			nid = numa_node_id();
1992 		else
1993 			nid = mempolicy->v.preferred_node;
1994 		init_nodemask_of_node(mask, nid);
1995 		break;
1996 
1997 	case MPOL_BIND:
1998 		/* Fall through */
1999 	case MPOL_INTERLEAVE:
2000 		*mask =  mempolicy->v.nodes;
2001 		break;
2002 
2003 	default:
2004 		BUG();
2005 	}
2006 	task_unlock(current);
2007 
2008 	return true;
2009 }
2010 #endif
2011 
2012 /*
2013  * mempolicy_nodemask_intersects
2014  *
2015  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2016  * policy.  Otherwise, check for intersection between mask and the policy
2017  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
2018  * policy, always return true since it may allocate elsewhere on fallback.
2019  *
2020  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2021  */
mempolicy_nodemask_intersects(struct task_struct * tsk,const nodemask_t * mask)2022 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2023 					const nodemask_t *mask)
2024 {
2025 	struct mempolicy *mempolicy;
2026 	bool ret = true;
2027 
2028 	if (!mask)
2029 		return ret;
2030 	task_lock(tsk);
2031 	mempolicy = tsk->mempolicy;
2032 	if (!mempolicy)
2033 		goto out;
2034 
2035 	switch (mempolicy->mode) {
2036 	case MPOL_PREFERRED:
2037 		/*
2038 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2039 		 * allocate from, they may fallback to other nodes when oom.
2040 		 * Thus, it's possible for tsk to have allocated memory from
2041 		 * nodes in mask.
2042 		 */
2043 		break;
2044 	case MPOL_BIND:
2045 	case MPOL_INTERLEAVE:
2046 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
2047 		break;
2048 	default:
2049 		BUG();
2050 	}
2051 out:
2052 	task_unlock(tsk);
2053 	return ret;
2054 }
2055 
2056 /* Allocate a page in interleaved policy.
2057    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)2058 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2059 					unsigned nid)
2060 {
2061 	struct page *page;
2062 
2063 	page = __alloc_pages(gfp, order, nid);
2064 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2065 	if (!static_branch_likely(&vm_numa_stat_key))
2066 		return page;
2067 	if (page && page_to_nid(page) == nid) {
2068 		preempt_disable();
2069 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2070 		preempt_enable();
2071 	}
2072 	return page;
2073 }
2074 
2075 /**
2076  * 	alloc_pages_vma	- Allocate a page for a VMA.
2077  *
2078  * 	@gfp:
2079  *      %GFP_USER    user allocation.
2080  *      %GFP_KERNEL  kernel allocations,
2081  *      %GFP_HIGHMEM highmem/user allocations,
2082  *      %GFP_FS      allocation should not call back into a file system.
2083  *      %GFP_ATOMIC  don't sleep.
2084  *
2085  *	@order:Order of the GFP allocation.
2086  * 	@vma:  Pointer to VMA or NULL if not available.
2087  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2088  *	@node: Which node to prefer for allocation (modulo policy).
2089  *	@hugepage: for hugepages try only the preferred node if possible
2090  *
2091  * 	This function allocates a page from the kernel page pool and applies
2092  *	a NUMA policy associated with the VMA or the current process.
2093  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
2094  *	mm_struct of the VMA to prevent it from going away. Should be used for
2095  *	all allocations for pages that will be mapped into user space. Returns
2096  *	NULL when no page can be allocated.
2097  */
2098 struct page *
alloc_pages_vma(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,int node,bool hugepage)2099 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2100 		unsigned long addr, int node, bool hugepage)
2101 {
2102 	struct mempolicy *pol;
2103 	struct page *page;
2104 	int preferred_nid;
2105 	nodemask_t *nmask;
2106 
2107 	pol = get_vma_policy(vma, addr);
2108 
2109 	if (pol->mode == MPOL_INTERLEAVE) {
2110 		unsigned nid;
2111 
2112 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2113 		mpol_cond_put(pol);
2114 		page = alloc_page_interleave(gfp, order, nid);
2115 		goto out;
2116 	}
2117 
2118 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2119 		int hpage_node = node;
2120 
2121 		/*
2122 		 * For hugepage allocation and non-interleave policy which
2123 		 * allows the current node (or other explicitly preferred
2124 		 * node) we only try to allocate from the current/preferred
2125 		 * node and don't fall back to other nodes, as the cost of
2126 		 * remote accesses would likely offset THP benefits.
2127 		 *
2128 		 * If the policy is interleave, or does not allow the current
2129 		 * node in its nodemask, we allocate the standard way.
2130 		 */
2131 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2132 			hpage_node = pol->v.preferred_node;
2133 
2134 		nmask = policy_nodemask(gfp, pol);
2135 		if (!nmask || node_isset(hpage_node, *nmask)) {
2136 			mpol_cond_put(pol);
2137 			page = __alloc_pages_node(hpage_node,
2138 						gfp | __GFP_THISNODE, order);
2139 
2140 			/*
2141 			 * If hugepage allocations are configured to always
2142 			 * synchronous compact or the vma has been madvised
2143 			 * to prefer hugepage backing, retry allowing remote
2144 			 * memory as well.
2145 			 */
2146 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2147 				page = __alloc_pages_node(hpage_node,
2148 						gfp | __GFP_NORETRY, order);
2149 
2150 			goto out;
2151 		}
2152 	}
2153 
2154 	nmask = policy_nodemask(gfp, pol);
2155 	preferred_nid = policy_node(gfp, pol, node);
2156 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2157 	mpol_cond_put(pol);
2158 out:
2159 	return page;
2160 }
2161 EXPORT_SYMBOL(alloc_pages_vma);
2162 
2163 /**
2164  * 	alloc_pages_current - Allocate pages.
2165  *
2166  *	@gfp:
2167  *		%GFP_USER   user allocation,
2168  *      	%GFP_KERNEL kernel allocation,
2169  *      	%GFP_HIGHMEM highmem allocation,
2170  *      	%GFP_FS     don't call back into a file system.
2171  *      	%GFP_ATOMIC don't sleep.
2172  *	@order: Power of two of allocation size in pages. 0 is a single page.
2173  *
2174  *	Allocate a page from the kernel page pool.  When not in
2175  *	interrupt context and apply the current process NUMA policy.
2176  *	Returns NULL when no page can be allocated.
2177  */
alloc_pages_current(gfp_t gfp,unsigned order)2178 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2179 {
2180 	struct mempolicy *pol = &default_policy;
2181 	struct page *page;
2182 
2183 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2184 		pol = get_task_policy(current);
2185 
2186 	/*
2187 	 * No reference counting needed for current->mempolicy
2188 	 * nor system default_policy
2189 	 */
2190 	if (pol->mode == MPOL_INTERLEAVE)
2191 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2192 	else
2193 		page = __alloc_pages_nodemask(gfp, order,
2194 				policy_node(gfp, pol, numa_node_id()),
2195 				policy_nodemask(gfp, pol));
2196 
2197 	return page;
2198 }
2199 EXPORT_SYMBOL(alloc_pages_current);
2200 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2201 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2202 {
2203 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2204 
2205 	if (IS_ERR(pol))
2206 		return PTR_ERR(pol);
2207 	dst->vm_policy = pol;
2208 	return 0;
2209 }
2210 
2211 /*
2212  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2213  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2214  * with the mems_allowed returned by cpuset_mems_allowed().  This
2215  * keeps mempolicies cpuset relative after its cpuset moves.  See
2216  * further kernel/cpuset.c update_nodemask().
2217  *
2218  * current's mempolicy may be rebinded by the other task(the task that changes
2219  * cpuset's mems), so we needn't do rebind work for current task.
2220  */
2221 
2222 /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2223 struct mempolicy *__mpol_dup(struct mempolicy *old)
2224 {
2225 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2226 
2227 	if (!new)
2228 		return ERR_PTR(-ENOMEM);
2229 
2230 	/* task's mempolicy is protected by alloc_lock */
2231 	if (old == current->mempolicy) {
2232 		task_lock(current);
2233 		*new = *old;
2234 		task_unlock(current);
2235 	} else
2236 		*new = *old;
2237 
2238 	if (current_cpuset_is_being_rebound()) {
2239 		nodemask_t mems = cpuset_mems_allowed(current);
2240 		mpol_rebind_policy(new, &mems);
2241 	}
2242 	atomic_set(&new->refcnt, 1);
2243 	return new;
2244 }
2245 
2246 /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2247 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2248 {
2249 	if (!a || !b)
2250 		return false;
2251 	if (a->mode != b->mode)
2252 		return false;
2253 	if (a->flags != b->flags)
2254 		return false;
2255 	if (mpol_store_user_nodemask(a))
2256 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2257 			return false;
2258 
2259 	switch (a->mode) {
2260 	case MPOL_BIND:
2261 		/* Fall through */
2262 	case MPOL_INTERLEAVE:
2263 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2264 	case MPOL_PREFERRED:
2265 		/* a's ->flags is the same as b's */
2266 		if (a->flags & MPOL_F_LOCAL)
2267 			return true;
2268 		return a->v.preferred_node == b->v.preferred_node;
2269 	default:
2270 		BUG();
2271 		return false;
2272 	}
2273 }
2274 
2275 /*
2276  * Shared memory backing store policy support.
2277  *
2278  * Remember policies even when nobody has shared memory mapped.
2279  * The policies are kept in Red-Black tree linked from the inode.
2280  * They are protected by the sp->lock rwlock, which should be held
2281  * for any accesses to the tree.
2282  */
2283 
2284 /*
2285  * lookup first element intersecting start-end.  Caller holds sp->lock for
2286  * reading or for writing
2287  */
2288 static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)2289 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2290 {
2291 	struct rb_node *n = sp->root.rb_node;
2292 
2293 	while (n) {
2294 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2295 
2296 		if (start >= p->end)
2297 			n = n->rb_right;
2298 		else if (end <= p->start)
2299 			n = n->rb_left;
2300 		else
2301 			break;
2302 	}
2303 	if (!n)
2304 		return NULL;
2305 	for (;;) {
2306 		struct sp_node *w = NULL;
2307 		struct rb_node *prev = rb_prev(n);
2308 		if (!prev)
2309 			break;
2310 		w = rb_entry(prev, struct sp_node, nd);
2311 		if (w->end <= start)
2312 			break;
2313 		n = prev;
2314 	}
2315 	return rb_entry(n, struct sp_node, nd);
2316 }
2317 
2318 /*
2319  * Insert a new shared policy into the list.  Caller holds sp->lock for
2320  * writing.
2321  */
sp_insert(struct shared_policy * sp,struct sp_node * new)2322 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2323 {
2324 	struct rb_node **p = &sp->root.rb_node;
2325 	struct rb_node *parent = NULL;
2326 	struct sp_node *nd;
2327 
2328 	while (*p) {
2329 		parent = *p;
2330 		nd = rb_entry(parent, struct sp_node, nd);
2331 		if (new->start < nd->start)
2332 			p = &(*p)->rb_left;
2333 		else if (new->end > nd->end)
2334 			p = &(*p)->rb_right;
2335 		else
2336 			BUG();
2337 	}
2338 	rb_link_node(&new->nd, parent, p);
2339 	rb_insert_color(&new->nd, &sp->root);
2340 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2341 		 new->policy ? new->policy->mode : 0);
2342 }
2343 
2344 /* Find shared policy intersecting idx */
2345 struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)2346 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2347 {
2348 	struct mempolicy *pol = NULL;
2349 	struct sp_node *sn;
2350 
2351 	if (!sp->root.rb_node)
2352 		return NULL;
2353 	read_lock(&sp->lock);
2354 	sn = sp_lookup(sp, idx, idx+1);
2355 	if (sn) {
2356 		mpol_get(sn->policy);
2357 		pol = sn->policy;
2358 	}
2359 	read_unlock(&sp->lock);
2360 	return pol;
2361 }
2362 
sp_free(struct sp_node * n)2363 static void sp_free(struct sp_node *n)
2364 {
2365 	mpol_put(n->policy);
2366 	kmem_cache_free(sn_cache, n);
2367 }
2368 
2369 /**
2370  * mpol_misplaced - check whether current page node is valid in policy
2371  *
2372  * @page: page to be checked
2373  * @vma: vm area where page mapped
2374  * @addr: virtual address where page mapped
2375  *
2376  * Lookup current policy node id for vma,addr and "compare to" page's
2377  * node id.
2378  *
2379  * Returns:
2380  *	-1	- not misplaced, page is in the right node
2381  *	node	- node id where the page should be
2382  *
2383  * Policy determination "mimics" alloc_page_vma().
2384  * Called from fault path where we know the vma and faulting address.
2385  */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2386 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2387 {
2388 	struct mempolicy *pol;
2389 	struct zoneref *z;
2390 	int curnid = page_to_nid(page);
2391 	unsigned long pgoff;
2392 	int thiscpu = raw_smp_processor_id();
2393 	int thisnid = cpu_to_node(thiscpu);
2394 	int polnid = NUMA_NO_NODE;
2395 	int ret = -1;
2396 
2397 	pol = get_vma_policy(vma, addr);
2398 	if (!(pol->flags & MPOL_F_MOF))
2399 		goto out;
2400 
2401 	switch (pol->mode) {
2402 	case MPOL_INTERLEAVE:
2403 		pgoff = vma->vm_pgoff;
2404 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2405 		polnid = offset_il_node(pol, pgoff);
2406 		break;
2407 
2408 	case MPOL_PREFERRED:
2409 		if (pol->flags & MPOL_F_LOCAL)
2410 			polnid = numa_node_id();
2411 		else
2412 			polnid = pol->v.preferred_node;
2413 		break;
2414 
2415 	case MPOL_BIND:
2416 
2417 		/*
2418 		 * allows binding to multiple nodes.
2419 		 * use current page if in policy nodemask,
2420 		 * else select nearest allowed node, if any.
2421 		 * If no allowed nodes, use current [!misplaced].
2422 		 */
2423 		if (node_isset(curnid, pol->v.nodes))
2424 			goto out;
2425 		z = first_zones_zonelist(
2426 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2427 				gfp_zone(GFP_HIGHUSER),
2428 				&pol->v.nodes);
2429 		polnid = zone_to_nid(z->zone);
2430 		break;
2431 
2432 	default:
2433 		BUG();
2434 	}
2435 
2436 	/* Migrate the page towards the node whose CPU is referencing it */
2437 	if (pol->flags & MPOL_F_MORON) {
2438 		polnid = thisnid;
2439 
2440 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2441 			goto out;
2442 	}
2443 
2444 	if (curnid != polnid)
2445 		ret = polnid;
2446 out:
2447 	mpol_cond_put(pol);
2448 
2449 	return ret;
2450 }
2451 
2452 /*
2453  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2454  * dropped after task->mempolicy is set to NULL so that any allocation done as
2455  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2456  * policy.
2457  */
mpol_put_task_policy(struct task_struct * task)2458 void mpol_put_task_policy(struct task_struct *task)
2459 {
2460 	struct mempolicy *pol;
2461 
2462 	task_lock(task);
2463 	pol = task->mempolicy;
2464 	task->mempolicy = NULL;
2465 	task_unlock(task);
2466 	mpol_put(pol);
2467 }
2468 
sp_delete(struct shared_policy * sp,struct sp_node * n)2469 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2470 {
2471 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2472 	rb_erase(&n->nd, &sp->root);
2473 	sp_free(n);
2474 }
2475 
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)2476 static void sp_node_init(struct sp_node *node, unsigned long start,
2477 			unsigned long end, struct mempolicy *pol)
2478 {
2479 	node->start = start;
2480 	node->end = end;
2481 	node->policy = pol;
2482 }
2483 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2484 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2485 				struct mempolicy *pol)
2486 {
2487 	struct sp_node *n;
2488 	struct mempolicy *newpol;
2489 
2490 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2491 	if (!n)
2492 		return NULL;
2493 
2494 	newpol = mpol_dup(pol);
2495 	if (IS_ERR(newpol)) {
2496 		kmem_cache_free(sn_cache, n);
2497 		return NULL;
2498 	}
2499 	newpol->flags |= MPOL_F_SHARED;
2500 	sp_node_init(n, start, end, newpol);
2501 
2502 	return n;
2503 }
2504 
2505 /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)2506 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2507 				 unsigned long end, struct sp_node *new)
2508 {
2509 	struct sp_node *n;
2510 	struct sp_node *n_new = NULL;
2511 	struct mempolicy *mpol_new = NULL;
2512 	int ret = 0;
2513 
2514 restart:
2515 	write_lock(&sp->lock);
2516 	n = sp_lookup(sp, start, end);
2517 	/* Take care of old policies in the same range. */
2518 	while (n && n->start < end) {
2519 		struct rb_node *next = rb_next(&n->nd);
2520 		if (n->start >= start) {
2521 			if (n->end <= end)
2522 				sp_delete(sp, n);
2523 			else
2524 				n->start = end;
2525 		} else {
2526 			/* Old policy spanning whole new range. */
2527 			if (n->end > end) {
2528 				if (!n_new)
2529 					goto alloc_new;
2530 
2531 				*mpol_new = *n->policy;
2532 				atomic_set(&mpol_new->refcnt, 1);
2533 				sp_node_init(n_new, end, n->end, mpol_new);
2534 				n->end = start;
2535 				sp_insert(sp, n_new);
2536 				n_new = NULL;
2537 				mpol_new = NULL;
2538 				break;
2539 			} else
2540 				n->end = start;
2541 		}
2542 		if (!next)
2543 			break;
2544 		n = rb_entry(next, struct sp_node, nd);
2545 	}
2546 	if (new)
2547 		sp_insert(sp, new);
2548 	write_unlock(&sp->lock);
2549 	ret = 0;
2550 
2551 err_out:
2552 	if (mpol_new)
2553 		mpol_put(mpol_new);
2554 	if (n_new)
2555 		kmem_cache_free(sn_cache, n_new);
2556 
2557 	return ret;
2558 
2559 alloc_new:
2560 	write_unlock(&sp->lock);
2561 	ret = -ENOMEM;
2562 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2563 	if (!n_new)
2564 		goto err_out;
2565 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2566 	if (!mpol_new)
2567 		goto err_out;
2568 	goto restart;
2569 }
2570 
2571 /**
2572  * mpol_shared_policy_init - initialize shared policy for inode
2573  * @sp: pointer to inode shared policy
2574  * @mpol:  struct mempolicy to install
2575  *
2576  * Install non-NULL @mpol in inode's shared policy rb-tree.
2577  * On entry, the current task has a reference on a non-NULL @mpol.
2578  * This must be released on exit.
2579  * This is called at get_inode() calls and we can use GFP_KERNEL.
2580  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)2581 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2582 {
2583 	int ret;
2584 
2585 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2586 	rwlock_init(&sp->lock);
2587 
2588 	if (mpol) {
2589 		struct vm_area_struct pvma;
2590 		struct mempolicy *new;
2591 		NODEMASK_SCRATCH(scratch);
2592 
2593 		if (!scratch)
2594 			goto put_mpol;
2595 		/* contextualize the tmpfs mount point mempolicy */
2596 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2597 		if (IS_ERR(new))
2598 			goto free_scratch; /* no valid nodemask intersection */
2599 
2600 		task_lock(current);
2601 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2602 		task_unlock(current);
2603 		if (ret)
2604 			goto put_new;
2605 
2606 		/* Create pseudo-vma that contains just the policy */
2607 		vma_init(&pvma, NULL);
2608 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2609 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2610 
2611 put_new:
2612 		mpol_put(new);			/* drop initial ref */
2613 free_scratch:
2614 		NODEMASK_SCRATCH_FREE(scratch);
2615 put_mpol:
2616 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2617 	}
2618 }
2619 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)2620 int mpol_set_shared_policy(struct shared_policy *info,
2621 			struct vm_area_struct *vma, struct mempolicy *npol)
2622 {
2623 	int err;
2624 	struct sp_node *new = NULL;
2625 	unsigned long sz = vma_pages(vma);
2626 
2627 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2628 		 vma->vm_pgoff,
2629 		 sz, npol ? npol->mode : -1,
2630 		 npol ? npol->flags : -1,
2631 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2632 
2633 	if (npol) {
2634 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2635 		if (!new)
2636 			return -ENOMEM;
2637 	}
2638 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2639 	if (err && new)
2640 		sp_free(new);
2641 	return err;
2642 }
2643 
2644 /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)2645 void mpol_free_shared_policy(struct shared_policy *p)
2646 {
2647 	struct sp_node *n;
2648 	struct rb_node *next;
2649 
2650 	if (!p->root.rb_node)
2651 		return;
2652 	write_lock(&p->lock);
2653 	next = rb_first(&p->root);
2654 	while (next) {
2655 		n = rb_entry(next, struct sp_node, nd);
2656 		next = rb_next(&n->nd);
2657 		sp_delete(p, n);
2658 	}
2659 	write_unlock(&p->lock);
2660 }
2661 
2662 #ifdef CONFIG_NUMA_BALANCING
2663 static int __initdata numabalancing_override;
2664 
check_numabalancing_enable(void)2665 static void __init check_numabalancing_enable(void)
2666 {
2667 	bool numabalancing_default = false;
2668 
2669 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2670 		numabalancing_default = true;
2671 
2672 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2673 	if (numabalancing_override)
2674 		set_numabalancing_state(numabalancing_override == 1);
2675 
2676 	if (num_online_nodes() > 1 && !numabalancing_override) {
2677 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2678 			numabalancing_default ? "Enabling" : "Disabling");
2679 		set_numabalancing_state(numabalancing_default);
2680 	}
2681 }
2682 
setup_numabalancing(char * str)2683 static int __init setup_numabalancing(char *str)
2684 {
2685 	int ret = 0;
2686 	if (!str)
2687 		goto out;
2688 
2689 	if (!strcmp(str, "enable")) {
2690 		numabalancing_override = 1;
2691 		ret = 1;
2692 	} else if (!strcmp(str, "disable")) {
2693 		numabalancing_override = -1;
2694 		ret = 1;
2695 	}
2696 out:
2697 	if (!ret)
2698 		pr_warn("Unable to parse numa_balancing=\n");
2699 
2700 	return ret;
2701 }
2702 __setup("numa_balancing=", setup_numabalancing);
2703 #else
check_numabalancing_enable(void)2704 static inline void __init check_numabalancing_enable(void)
2705 {
2706 }
2707 #endif /* CONFIG_NUMA_BALANCING */
2708 
2709 /* assumes fs == KERNEL_DS */
numa_policy_init(void)2710 void __init numa_policy_init(void)
2711 {
2712 	nodemask_t interleave_nodes;
2713 	unsigned long largest = 0;
2714 	int nid, prefer = 0;
2715 
2716 	policy_cache = kmem_cache_create("numa_policy",
2717 					 sizeof(struct mempolicy),
2718 					 0, SLAB_PANIC, NULL);
2719 
2720 	sn_cache = kmem_cache_create("shared_policy_node",
2721 				     sizeof(struct sp_node),
2722 				     0, SLAB_PANIC, NULL);
2723 
2724 	for_each_node(nid) {
2725 		preferred_node_policy[nid] = (struct mempolicy) {
2726 			.refcnt = ATOMIC_INIT(1),
2727 			.mode = MPOL_PREFERRED,
2728 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2729 			.v = { .preferred_node = nid, },
2730 		};
2731 	}
2732 
2733 	/*
2734 	 * Set interleaving policy for system init. Interleaving is only
2735 	 * enabled across suitably sized nodes (default is >= 16MB), or
2736 	 * fall back to the largest node if they're all smaller.
2737 	 */
2738 	nodes_clear(interleave_nodes);
2739 	for_each_node_state(nid, N_MEMORY) {
2740 		unsigned long total_pages = node_present_pages(nid);
2741 
2742 		/* Preserve the largest node */
2743 		if (largest < total_pages) {
2744 			largest = total_pages;
2745 			prefer = nid;
2746 		}
2747 
2748 		/* Interleave this node? */
2749 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2750 			node_set(nid, interleave_nodes);
2751 	}
2752 
2753 	/* All too small, use the largest */
2754 	if (unlikely(nodes_empty(interleave_nodes)))
2755 		node_set(prefer, interleave_nodes);
2756 
2757 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2758 		pr_err("%s: interleaving failed\n", __func__);
2759 
2760 	check_numabalancing_enable();
2761 }
2762 
2763 /* Reset policy of current process to default */
numa_default_policy(void)2764 void numa_default_policy(void)
2765 {
2766 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2767 }
2768 
2769 /*
2770  * Parse and format mempolicy from/to strings
2771  */
2772 
2773 /*
2774  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2775  */
2776 static const char * const policy_modes[] =
2777 {
2778 	[MPOL_DEFAULT]    = "default",
2779 	[MPOL_PREFERRED]  = "prefer",
2780 	[MPOL_BIND]       = "bind",
2781 	[MPOL_INTERLEAVE] = "interleave",
2782 	[MPOL_LOCAL]      = "local",
2783 };
2784 
2785 
2786 #ifdef CONFIG_TMPFS
2787 /**
2788  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2789  * @str:  string containing mempolicy to parse
2790  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2791  *
2792  * Format of input:
2793  *	<mode>[=<flags>][:<nodelist>]
2794  *
2795  * On success, returns 0, else 1
2796  */
mpol_parse_str(char * str,struct mempolicy ** mpol)2797 int mpol_parse_str(char *str, struct mempolicy **mpol)
2798 {
2799 	struct mempolicy *new = NULL;
2800 	unsigned short mode_flags;
2801 	nodemask_t nodes;
2802 	char *nodelist = strchr(str, ':');
2803 	char *flags = strchr(str, '=');
2804 	int err = 1, mode;
2805 
2806 	if (nodelist) {
2807 		/* NUL-terminate mode or flags string */
2808 		*nodelist++ = '\0';
2809 		if (nodelist_parse(nodelist, nodes))
2810 			goto out;
2811 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2812 			goto out;
2813 	} else
2814 		nodes_clear(nodes);
2815 
2816 	if (flags)
2817 		*flags++ = '\0';	/* terminate mode string */
2818 
2819 	mode = match_string(policy_modes, MPOL_MAX, str);
2820 	if (mode < 0)
2821 		goto out;
2822 
2823 	switch (mode) {
2824 	case MPOL_PREFERRED:
2825 		/*
2826 		 * Insist on a nodelist of one node only
2827 		 */
2828 		if (nodelist) {
2829 			char *rest = nodelist;
2830 			while (isdigit(*rest))
2831 				rest++;
2832 			if (*rest)
2833 				goto out;
2834 		}
2835 		break;
2836 	case MPOL_INTERLEAVE:
2837 		/*
2838 		 * Default to online nodes with memory if no nodelist
2839 		 */
2840 		if (!nodelist)
2841 			nodes = node_states[N_MEMORY];
2842 		break;
2843 	case MPOL_LOCAL:
2844 		/*
2845 		 * Don't allow a nodelist;  mpol_new() checks flags
2846 		 */
2847 		if (nodelist)
2848 			goto out;
2849 		mode = MPOL_PREFERRED;
2850 		break;
2851 	case MPOL_DEFAULT:
2852 		/*
2853 		 * Insist on a empty nodelist
2854 		 */
2855 		if (!nodelist)
2856 			err = 0;
2857 		goto out;
2858 	case MPOL_BIND:
2859 		/*
2860 		 * Insist on a nodelist
2861 		 */
2862 		if (!nodelist)
2863 			goto out;
2864 	}
2865 
2866 	mode_flags = 0;
2867 	if (flags) {
2868 		/*
2869 		 * Currently, we only support two mutually exclusive
2870 		 * mode flags.
2871 		 */
2872 		if (!strcmp(flags, "static"))
2873 			mode_flags |= MPOL_F_STATIC_NODES;
2874 		else if (!strcmp(flags, "relative"))
2875 			mode_flags |= MPOL_F_RELATIVE_NODES;
2876 		else
2877 			goto out;
2878 	}
2879 
2880 	new = mpol_new(mode, mode_flags, &nodes);
2881 	if (IS_ERR(new))
2882 		goto out;
2883 
2884 	/*
2885 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2886 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2887 	 */
2888 	if (mode != MPOL_PREFERRED)
2889 		new->v.nodes = nodes;
2890 	else if (nodelist)
2891 		new->v.preferred_node = first_node(nodes);
2892 	else
2893 		new->flags |= MPOL_F_LOCAL;
2894 
2895 	/*
2896 	 * Save nodes for contextualization: this will be used to "clone"
2897 	 * the mempolicy in a specific context [cpuset] at a later time.
2898 	 */
2899 	new->w.user_nodemask = nodes;
2900 
2901 	err = 0;
2902 
2903 out:
2904 	/* Restore string for error message */
2905 	if (nodelist)
2906 		*--nodelist = ':';
2907 	if (flags)
2908 		*--flags = '=';
2909 	if (!err)
2910 		*mpol = new;
2911 	return err;
2912 }
2913 #endif /* CONFIG_TMPFS */
2914 
2915 /**
2916  * mpol_to_str - format a mempolicy structure for printing
2917  * @buffer:  to contain formatted mempolicy string
2918  * @maxlen:  length of @buffer
2919  * @pol:  pointer to mempolicy to be formatted
2920  *
2921  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2922  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2923  * longest flag, "relative", and to display at least a few node ids.
2924  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)2925 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2926 {
2927 	char *p = buffer;
2928 	nodemask_t nodes = NODE_MASK_NONE;
2929 	unsigned short mode = MPOL_DEFAULT;
2930 	unsigned short flags = 0;
2931 
2932 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2933 		mode = pol->mode;
2934 		flags = pol->flags;
2935 	}
2936 
2937 	switch (mode) {
2938 	case MPOL_DEFAULT:
2939 		break;
2940 	case MPOL_PREFERRED:
2941 		if (flags & MPOL_F_LOCAL)
2942 			mode = MPOL_LOCAL;
2943 		else
2944 			node_set(pol->v.preferred_node, nodes);
2945 		break;
2946 	case MPOL_BIND:
2947 	case MPOL_INTERLEAVE:
2948 		nodes = pol->v.nodes;
2949 		break;
2950 	default:
2951 		WARN_ON_ONCE(1);
2952 		snprintf(p, maxlen, "unknown");
2953 		return;
2954 	}
2955 
2956 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2957 
2958 	if (flags & MPOL_MODE_FLAGS) {
2959 		p += snprintf(p, buffer + maxlen - p, "=");
2960 
2961 		/*
2962 		 * Currently, the only defined flags are mutually exclusive
2963 		 */
2964 		if (flags & MPOL_F_STATIC_NODES)
2965 			p += snprintf(p, buffer + maxlen - p, "static");
2966 		else if (flags & MPOL_F_RELATIVE_NODES)
2967 			p += snprintf(p, buffer + maxlen - p, "relative");
2968 	}
2969 
2970 	if (!nodes_empty(nodes))
2971 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2972 			       nodemask_pr_args(&nodes));
2973 }
2974