• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * preferred many Try a set of nodes first before normal fallback. This is
35  *                similar to preferred without the special case.
36  *
37  * default        Allocate on the local node first, or when on a VMA
38  *                use the process policy. This is what Linux always did
39  *		  in a NUMA aware kernel and still does by, ahem, default.
40  *
41  * The process policy is applied for most non interrupt memory allocations
42  * in that process' context. Interrupts ignore the policies and always
43  * try to allocate on the local CPU. The VMA policy is only applied for memory
44  * allocations for a VMA in the VM.
45  *
46  * Currently there are a few corner cases in swapping where the policy
47  * is not applied, but the majority should be handled. When process policy
48  * is used it is not remembered over swap outs/swap ins.
49  *
50  * Only the highest zone in the zone hierarchy gets policied. Allocations
51  * requesting a lower zone just use default policy. This implies that
52  * on systems with highmem kernel lowmem allocation don't get policied.
53  * Same with GFP_DMA allocations.
54  *
55  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56  * all users and remembered even when nobody has memory mapped.
57  */
58 
59 /* Notebook:
60    fix mmap readahead to honour policy and enable policy for any page cache
61    object
62    statistics for bigpages
63    global policy for page cache? currently it uses process policy. Requires
64    first item above.
65    handle mremap for shared memory (currently ignored for the policy)
66    grows down?
67    make bind policy root only? It can trigger oom much faster and the
68    kernel is not always grateful with that.
69 */
70 
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72 
73 #include <linux/mempolicy.h>
74 #include <linux/pagewalk.h>
75 #include <linux/highmem.h>
76 #include <linux/hugetlb.h>
77 #include <linux/kernel.h>
78 #include <linux/sched.h>
79 #include <linux/sched/mm.h>
80 #include <linux/sched/numa_balancing.h>
81 #include <linux/sched/task.h>
82 #include <linux/nodemask.h>
83 #include <linux/cpuset.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/export.h>
87 #include <linux/nsproxy.h>
88 #include <linux/interrupt.h>
89 #include <linux/init.h>
90 #include <linux/compat.h>
91 #include <linux/ptrace.h>
92 #include <linux/swap.h>
93 #include <linux/seq_file.h>
94 #include <linux/proc_fs.h>
95 #include <linux/migrate.h>
96 #include <linux/ksm.h>
97 #include <linux/rmap.h>
98 #include <linux/security.h>
99 #include <linux/syscalls.h>
100 #include <linux/ctype.h>
101 #include <linux/mm_inline.h>
102 #include <linux/mmu_notifier.h>
103 #include <linux/printk.h>
104 #include <linux/swapops.h>
105 
106 #include <asm/tlbflush.h>
107 #include <asm/tlb.h>
108 #include <linux/uaccess.h>
109 
110 #include "internal.h"
111 
112 /* Internal flags */
113 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
114 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115 
116 static struct kmem_cache *policy_cache;
117 static struct kmem_cache *sn_cache;
118 
119 /* Highest zone. An specific allocation for a zone below that is not
120    policied. */
121 enum zone_type policy_zone = 0;
122 
123 /*
124  * run-time system-wide default policy => local allocation
125  */
126 static struct mempolicy default_policy = {
127 	.refcnt = ATOMIC_INIT(1), /* never free it */
128 	.mode = MPOL_LOCAL,
129 };
130 
131 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
132 
133 /**
134  * numa_map_to_online_node - Find closest online node
135  * @node: Node id to start the search
136  *
137  * Lookup the next closest node by distance if @nid is not online.
138  */
numa_map_to_online_node(int node)139 int numa_map_to_online_node(int node)
140 {
141 	int min_dist = INT_MAX, dist, n, min_node;
142 
143 	if (node == NUMA_NO_NODE || node_online(node))
144 		return node;
145 
146 	min_node = node;
147 	for_each_online_node(n) {
148 		dist = node_distance(node, n);
149 		if (dist < min_dist) {
150 			min_dist = dist;
151 			min_node = n;
152 		}
153 	}
154 
155 	return min_node;
156 }
157 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
158 
get_task_policy(struct task_struct * p)159 struct mempolicy *get_task_policy(struct task_struct *p)
160 {
161 	struct mempolicy *pol = p->mempolicy;
162 	int node;
163 
164 	if (pol)
165 		return pol;
166 
167 	node = numa_node_id();
168 	if (node != NUMA_NO_NODE) {
169 		pol = &preferred_node_policy[node];
170 		/* preferred_node_policy is not initialised early in boot */
171 		if (pol->mode)
172 			return pol;
173 	}
174 
175 	return &default_policy;
176 }
177 
178 static const struct mempolicy_operations {
179 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
180 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
181 } mpol_ops[MPOL_MAX];
182 
mpol_store_user_nodemask(const struct mempolicy * pol)183 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
184 {
185 	return pol->flags & MPOL_MODE_FLAGS;
186 }
187 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)188 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
189 				   const nodemask_t *rel)
190 {
191 	nodemask_t tmp;
192 	nodes_fold(tmp, *orig, nodes_weight(*rel));
193 	nodes_onto(*ret, tmp, *rel);
194 }
195 
mpol_new_nodemask(struct mempolicy * pol,const nodemask_t * nodes)196 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
197 {
198 	if (nodes_empty(*nodes))
199 		return -EINVAL;
200 	pol->nodes = *nodes;
201 	return 0;
202 }
203 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)204 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
205 {
206 	if (nodes_empty(*nodes))
207 		return -EINVAL;
208 
209 	nodes_clear(pol->nodes);
210 	node_set(first_node(*nodes), pol->nodes);
211 	return 0;
212 }
213 
214 /*
215  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
216  * any, for the new policy.  mpol_new() has already validated the nodes
217  * parameter with respect to the policy mode and flags.
218  *
219  * Must be called holding task's alloc_lock to protect task's mems_allowed
220  * and mempolicy.  May also be called holding the mmap_lock for write.
221  */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)222 static int mpol_set_nodemask(struct mempolicy *pol,
223 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
224 {
225 	int ret;
226 
227 	/*
228 	 * Default (pol==NULL) resp. local memory policies are not a
229 	 * subject of any remapping. They also do not need any special
230 	 * constructor.
231 	 */
232 	if (!pol || pol->mode == MPOL_LOCAL)
233 		return 0;
234 
235 	/* Check N_MEMORY */
236 	nodes_and(nsc->mask1,
237 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
238 
239 	VM_BUG_ON(!nodes);
240 
241 	if (pol->flags & MPOL_F_RELATIVE_NODES)
242 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
243 	else
244 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
245 
246 	if (mpol_store_user_nodemask(pol))
247 		pol->w.user_nodemask = *nodes;
248 	else
249 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
250 
251 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
252 	return ret;
253 }
254 
255 /*
256  * This function just creates a new policy, does some check and simple
257  * initialization. You must invoke mpol_set_nodemask() to set nodes.
258  */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)259 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
260 				  nodemask_t *nodes)
261 {
262 	struct mempolicy *policy;
263 
264 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
265 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
266 
267 	if (mode == MPOL_DEFAULT) {
268 		if (nodes && !nodes_empty(*nodes))
269 			return ERR_PTR(-EINVAL);
270 		return NULL;
271 	}
272 	VM_BUG_ON(!nodes);
273 
274 	/*
275 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
276 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
277 	 * All other modes require a valid pointer to a non-empty nodemask.
278 	 */
279 	if (mode == MPOL_PREFERRED) {
280 		if (nodes_empty(*nodes)) {
281 			if (((flags & MPOL_F_STATIC_NODES) ||
282 			     (flags & MPOL_F_RELATIVE_NODES)))
283 				return ERR_PTR(-EINVAL);
284 
285 			mode = MPOL_LOCAL;
286 		}
287 	} else if (mode == MPOL_LOCAL) {
288 		if (!nodes_empty(*nodes) ||
289 		    (flags & MPOL_F_STATIC_NODES) ||
290 		    (flags & MPOL_F_RELATIVE_NODES))
291 			return ERR_PTR(-EINVAL);
292 	} else if (nodes_empty(*nodes))
293 		return ERR_PTR(-EINVAL);
294 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
295 	if (!policy)
296 		return ERR_PTR(-ENOMEM);
297 	atomic_set(&policy->refcnt, 1);
298 	policy->mode = mode;
299 	policy->flags = flags;
300 
301 	return policy;
302 }
303 
304 /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)305 void __mpol_put(struct mempolicy *p)
306 {
307 	if (!atomic_dec_and_test(&p->refcnt))
308 		return;
309 	kmem_cache_free(policy_cache, p);
310 }
311 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)312 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
313 {
314 }
315 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)316 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
317 {
318 	nodemask_t tmp;
319 
320 	if (pol->flags & MPOL_F_STATIC_NODES)
321 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
322 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
323 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
324 	else {
325 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
326 								*nodes);
327 		pol->w.cpuset_mems_allowed = *nodes;
328 	}
329 
330 	if (nodes_empty(tmp))
331 		tmp = *nodes;
332 
333 	pol->nodes = tmp;
334 }
335 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)336 static void mpol_rebind_preferred(struct mempolicy *pol,
337 						const nodemask_t *nodes)
338 {
339 	pol->w.cpuset_mems_allowed = *nodes;
340 }
341 
342 /*
343  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344  *
345  * Per-vma policies are protected by mmap_lock. Allocations using per-task
346  * policies are protected by task->mems_allowed_seq to prevent a premature
347  * OOM/allocation failure due to parallel nodemask modification.
348  */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)349 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
350 {
351 	if (!pol || pol->mode == MPOL_LOCAL)
352 		return;
353 	if (!mpol_store_user_nodemask(pol) &&
354 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 		return;
356 
357 	mpol_ops[pol->mode].rebind(pol, newmask);
358 }
359 
360 /*
361  * Wrapper for mpol_rebind_policy() that just requires task
362  * pointer, and updates task mempolicy.
363  *
364  * Called with task's alloc_lock held.
365  */
366 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)367 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
368 {
369 	mpol_rebind_policy(tsk->mempolicy, new);
370 }
371 
372 /*
373  * Rebind each vma in mm to new nodemask.
374  *
375  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
376  */
377 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)378 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379 {
380 	struct vm_area_struct *vma;
381 
382 	mmap_write_lock(mm);
383 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384 		mpol_rebind_policy(vma->vm_policy, new);
385 	mmap_write_unlock(mm);
386 }
387 
388 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 	[MPOL_DEFAULT] = {
390 		.rebind = mpol_rebind_default,
391 	},
392 	[MPOL_INTERLEAVE] = {
393 		.create = mpol_new_nodemask,
394 		.rebind = mpol_rebind_nodemask,
395 	},
396 	[MPOL_PREFERRED] = {
397 		.create = mpol_new_preferred,
398 		.rebind = mpol_rebind_preferred,
399 	},
400 	[MPOL_BIND] = {
401 		.create = mpol_new_nodemask,
402 		.rebind = mpol_rebind_nodemask,
403 	},
404 	[MPOL_LOCAL] = {
405 		.rebind = mpol_rebind_default,
406 	},
407 	[MPOL_PREFERRED_MANY] = {
408 		.create = mpol_new_nodemask,
409 		.rebind = mpol_rebind_preferred,
410 	},
411 };
412 
413 static int migrate_page_add(struct page *page, struct list_head *pagelist,
414 				unsigned long flags);
415 
416 struct queue_pages {
417 	struct list_head *pagelist;
418 	unsigned long flags;
419 	nodemask_t *nmask;
420 	unsigned long start;
421 	unsigned long end;
422 	struct vm_area_struct *first;
423 };
424 
425 /*
426  * Check if the page's nid is in qp->nmask.
427  *
428  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
429  * in the invert of qp->nmask.
430  */
queue_pages_required(struct page * page,struct queue_pages * qp)431 static inline bool queue_pages_required(struct page *page,
432 					struct queue_pages *qp)
433 {
434 	int nid = page_to_nid(page);
435 	unsigned long flags = qp->flags;
436 
437 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
438 }
439 
440 /*
441  * queue_pages_pmd() has four possible return values:
442  * 0 - pages are placed on the right node or queued successfully, or
443  *     special page is met, i.e. huge zero page.
444  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
445  *     specified.
446  * 2 - THP was split.
447  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
448  *        existing page was already on a node that does not follow the
449  *        policy.
450  */
queue_pages_pmd(pmd_t * pmd,spinlock_t * ptl,unsigned long addr,unsigned long end,struct mm_walk * walk)451 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
452 				unsigned long end, struct mm_walk *walk)
453 	__releases(ptl)
454 {
455 	int ret = 0;
456 	struct page *page;
457 	struct queue_pages *qp = walk->private;
458 	unsigned long flags;
459 
460 	if (unlikely(is_pmd_migration_entry(*pmd))) {
461 		ret = -EIO;
462 		goto unlock;
463 	}
464 	page = pmd_page(*pmd);
465 	if (is_huge_zero_page(page)) {
466 		spin_unlock(ptl);
467 		walk->action = ACTION_CONTINUE;
468 		goto out;
469 	}
470 	if (!queue_pages_required(page, qp))
471 		goto unlock;
472 
473 	flags = qp->flags;
474 	/* go to thp migration */
475 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
476 		if (!vma_migratable(walk->vma) ||
477 		    migrate_page_add(page, qp->pagelist, flags)) {
478 			ret = 1;
479 			goto unlock;
480 		}
481 	} else
482 		ret = -EIO;
483 unlock:
484 	spin_unlock(ptl);
485 out:
486 	return ret;
487 }
488 
489 /*
490  * Scan through pages checking if pages follow certain conditions,
491  * and move them to the pagelist if they do.
492  *
493  * queue_pages_pte_range() has three possible return values:
494  * 0 - pages are placed on the right node or queued successfully, or
495  *     special page is met, i.e. zero page.
496  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
497  *     specified.
498  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
499  *        on a node that does not follow the policy.
500  */
queue_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)501 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
502 			unsigned long end, struct mm_walk *walk)
503 {
504 	struct vm_area_struct *vma = walk->vma;
505 	struct page *page;
506 	struct queue_pages *qp = walk->private;
507 	unsigned long flags = qp->flags;
508 	int ret;
509 	bool has_unmovable = false;
510 	pte_t *pte, *mapped_pte;
511 	spinlock_t *ptl;
512 
513 	ptl = pmd_trans_huge_lock(pmd, vma);
514 	if (ptl) {
515 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
516 		if (ret != 2)
517 			return ret;
518 	}
519 	/* THP was split, fall through to pte walk */
520 
521 	if (pmd_trans_unstable(pmd))
522 		return 0;
523 
524 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
525 	for (; addr != end; pte++, addr += PAGE_SIZE) {
526 		if (!pte_present(*pte))
527 			continue;
528 		page = vm_normal_page(vma, addr, *pte);
529 		if (!page)
530 			continue;
531 		/*
532 		 * vm_normal_page() filters out zero pages, but there might
533 		 * still be PageReserved pages to skip, perhaps in a VDSO.
534 		 */
535 		if (PageReserved(page))
536 			continue;
537 		if (!queue_pages_required(page, qp))
538 			continue;
539 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
540 			/* MPOL_MF_STRICT must be specified if we get here */
541 			if (!vma_migratable(vma)) {
542 				has_unmovable = true;
543 				break;
544 			}
545 
546 			/*
547 			 * Do not abort immediately since there may be
548 			 * temporary off LRU pages in the range.  Still
549 			 * need migrate other LRU pages.
550 			 */
551 			if (migrate_page_add(page, qp->pagelist, flags))
552 				has_unmovable = true;
553 		} else
554 			break;
555 	}
556 	pte_unmap_unlock(mapped_pte, ptl);
557 	cond_resched();
558 
559 	if (has_unmovable)
560 		return 1;
561 
562 	return addr != end ? -EIO : 0;
563 }
564 
queue_pages_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)565 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
566 			       unsigned long addr, unsigned long end,
567 			       struct mm_walk *walk)
568 {
569 	int ret = 0;
570 #ifdef CONFIG_HUGETLB_PAGE
571 	struct queue_pages *qp = walk->private;
572 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
573 	struct page *page;
574 	spinlock_t *ptl;
575 	pte_t entry;
576 
577 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
578 	entry = huge_ptep_get(pte);
579 	if (!pte_present(entry))
580 		goto unlock;
581 	page = pte_page(entry);
582 	if (!queue_pages_required(page, qp))
583 		goto unlock;
584 
585 	if (flags == MPOL_MF_STRICT) {
586 		/*
587 		 * STRICT alone means only detecting misplaced page and no
588 		 * need to further check other vma.
589 		 */
590 		ret = -EIO;
591 		goto unlock;
592 	}
593 
594 	if (!vma_migratable(walk->vma)) {
595 		/*
596 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
597 		 * stopped walking current vma.
598 		 * Detecting misplaced page but allow migrating pages which
599 		 * have been queued.
600 		 */
601 		ret = 1;
602 		goto unlock;
603 	}
604 
605 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
606 	if (flags & (MPOL_MF_MOVE_ALL) ||
607 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
608 	     !hugetlb_pmd_shared(pte))) {
609 		if (isolate_hugetlb(page, qp->pagelist) &&
610 			(flags & MPOL_MF_STRICT))
611 			/*
612 			 * Failed to isolate page but allow migrating pages
613 			 * which have been queued.
614 			 */
615 			ret = 1;
616 	}
617 unlock:
618 	spin_unlock(ptl);
619 #else
620 	BUG();
621 #endif
622 	return ret;
623 }
624 
625 #ifdef CONFIG_NUMA_BALANCING
626 /*
627  * This is used to mark a range of virtual addresses to be inaccessible.
628  * These are later cleared by a NUMA hinting fault. Depending on these
629  * faults, pages may be migrated for better NUMA placement.
630  *
631  * This is assuming that NUMA faults are handled using PROT_NONE. If
632  * an architecture makes a different choice, it will need further
633  * changes to the core.
634  */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)635 unsigned long change_prot_numa(struct vm_area_struct *vma,
636 			unsigned long addr, unsigned long end)
637 {
638 	struct mmu_gather tlb;
639 	int nr_updated;
640 
641 	tlb_gather_mmu(&tlb, vma->vm_mm);
642 
643 	nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
644 				       MM_CP_PROT_NUMA);
645 	if (nr_updated)
646 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
647 
648 	tlb_finish_mmu(&tlb);
649 
650 	return nr_updated;
651 }
652 #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)653 static unsigned long change_prot_numa(struct vm_area_struct *vma,
654 			unsigned long addr, unsigned long end)
655 {
656 	return 0;
657 }
658 #endif /* CONFIG_NUMA_BALANCING */
659 
queue_pages_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)660 static int queue_pages_test_walk(unsigned long start, unsigned long end,
661 				struct mm_walk *walk)
662 {
663 	struct vm_area_struct *vma = walk->vma;
664 	struct queue_pages *qp = walk->private;
665 	unsigned long endvma = vma->vm_end;
666 	unsigned long flags = qp->flags;
667 
668 	/* range check first */
669 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
670 
671 	if (!qp->first) {
672 		qp->first = vma;
673 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
674 			(qp->start < vma->vm_start))
675 			/* hole at head side of range */
676 			return -EFAULT;
677 	}
678 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
679 		((vma->vm_end < qp->end) &&
680 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
681 		/* hole at middle or tail of range */
682 		return -EFAULT;
683 
684 	/*
685 	 * Need check MPOL_MF_STRICT to return -EIO if possible
686 	 * regardless of vma_migratable
687 	 */
688 	if (!vma_migratable(vma) &&
689 	    !(flags & MPOL_MF_STRICT))
690 		return 1;
691 
692 	if (endvma > end)
693 		endvma = end;
694 
695 	if (flags & MPOL_MF_LAZY) {
696 		/* Similar to task_numa_work, skip inaccessible VMAs */
697 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
698 			!(vma->vm_flags & VM_MIXEDMAP))
699 			change_prot_numa(vma, start, endvma);
700 		return 1;
701 	}
702 
703 	/* queue pages from current vma */
704 	if (flags & MPOL_MF_VALID)
705 		return 0;
706 	return 1;
707 }
708 
709 static const struct mm_walk_ops queue_pages_walk_ops = {
710 	.hugetlb_entry		= queue_pages_hugetlb,
711 	.pmd_entry		= queue_pages_pte_range,
712 	.test_walk		= queue_pages_test_walk,
713 };
714 
715 /*
716  * Walk through page tables and collect pages to be migrated.
717  *
718  * If pages found in a given range are on a set of nodes (determined by
719  * @nodes and @flags,) it's isolated and queued to the pagelist which is
720  * passed via @private.
721  *
722  * queue_pages_range() has three possible return values:
723  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
724  *     specified.
725  * 0 - queue pages successfully or no misplaced page.
726  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
727  *         memory range specified by nodemask and maxnode points outside
728  *         your accessible address space (-EFAULT)
729  */
730 static int
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,nodemask_t * nodes,unsigned long flags,struct list_head * pagelist)731 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
732 		nodemask_t *nodes, unsigned long flags,
733 		struct list_head *pagelist)
734 {
735 	int err;
736 	struct queue_pages qp = {
737 		.pagelist = pagelist,
738 		.flags = flags,
739 		.nmask = nodes,
740 		.start = start,
741 		.end = end,
742 		.first = NULL,
743 	};
744 
745 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
746 
747 	if (!qp.first)
748 		/* whole range in hole */
749 		err = -EFAULT;
750 
751 	return err;
752 }
753 
754 /*
755  * Apply policy to a single VMA
756  * This must be called with the mmap_lock held for writing.
757  */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)758 static int vma_replace_policy(struct vm_area_struct *vma,
759 						struct mempolicy *pol)
760 {
761 	int err;
762 	struct mempolicy *old;
763 	struct mempolicy *new;
764 
765 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
766 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
767 		 vma->vm_ops, vma->vm_file,
768 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
769 
770 	new = mpol_dup(pol);
771 	if (IS_ERR(new))
772 		return PTR_ERR(new);
773 
774 	if (vma->vm_ops && vma->vm_ops->set_policy) {
775 		err = vma->vm_ops->set_policy(vma, new);
776 		if (err)
777 			goto err_out;
778 	}
779 
780 	old = vma->vm_policy;
781 	vma->vm_policy = new; /* protected by mmap_lock */
782 	mpol_put(old);
783 
784 	return 0;
785  err_out:
786 	mpol_put(new);
787 	return err;
788 }
789 
790 /* Step 2: apply policy to a range and do splits. */
mbind_range(struct mm_struct * mm,unsigned long start,unsigned long end,struct mempolicy * new_pol)791 static int mbind_range(struct mm_struct *mm, unsigned long start,
792 		       unsigned long end, struct mempolicy *new_pol)
793 {
794 	struct vm_area_struct *prev;
795 	struct vm_area_struct *vma;
796 	int err = 0;
797 	pgoff_t pgoff;
798 	unsigned long vmstart;
799 	unsigned long vmend;
800 
801 	vma = find_vma(mm, start);
802 	VM_BUG_ON(!vma);
803 
804 	prev = vma->vm_prev;
805 	if (start > vma->vm_start)
806 		prev = vma;
807 
808 	for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
809 		vmstart = max(start, vma->vm_start);
810 		vmend   = min(end, vma->vm_end);
811 
812 		if (mpol_equal(vma_policy(vma), new_pol))
813 			continue;
814 
815 		pgoff = vma->vm_pgoff +
816 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
817 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
818 				 vma->anon_vma, vma->vm_file, pgoff,
819 				 new_pol, vma->vm_userfaultfd_ctx,
820 				 anon_vma_name(vma));
821 		if (prev) {
822 			vma = prev;
823 			goto replace;
824 		}
825 		if (vma->vm_start != vmstart) {
826 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
827 			if (err)
828 				goto out;
829 		}
830 		if (vma->vm_end != vmend) {
831 			err = split_vma(vma->vm_mm, vma, vmend, 0);
832 			if (err)
833 				goto out;
834 		}
835  replace:
836 		err = vma_replace_policy(vma, new_pol);
837 		if (err)
838 			goto out;
839 	}
840 
841  out:
842 	return err;
843 }
844 
845 /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)846 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
847 			     nodemask_t *nodes)
848 {
849 	struct mempolicy *new, *old;
850 	NODEMASK_SCRATCH(scratch);
851 	int ret;
852 
853 	if (!scratch)
854 		return -ENOMEM;
855 
856 	new = mpol_new(mode, flags, nodes);
857 	if (IS_ERR(new)) {
858 		ret = PTR_ERR(new);
859 		goto out;
860 	}
861 
862 	ret = mpol_set_nodemask(new, nodes, scratch);
863 	if (ret) {
864 		mpol_put(new);
865 		goto out;
866 	}
867 	task_lock(current);
868 	old = current->mempolicy;
869 	current->mempolicy = new;
870 	if (new && new->mode == MPOL_INTERLEAVE)
871 		current->il_prev = MAX_NUMNODES-1;
872 	task_unlock(current);
873 	mpol_put(old);
874 	ret = 0;
875 out:
876 	NODEMASK_SCRATCH_FREE(scratch);
877 	return ret;
878 }
879 
880 /*
881  * Return nodemask for policy for get_mempolicy() query
882  *
883  * Called with task's alloc_lock held
884  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)885 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
886 {
887 	nodes_clear(*nodes);
888 	if (p == &default_policy)
889 		return;
890 
891 	switch (p->mode) {
892 	case MPOL_BIND:
893 	case MPOL_INTERLEAVE:
894 	case MPOL_PREFERRED:
895 	case MPOL_PREFERRED_MANY:
896 		*nodes = p->nodes;
897 		break;
898 	case MPOL_LOCAL:
899 		/* return empty node mask for local allocation */
900 		break;
901 	default:
902 		BUG();
903 	}
904 }
905 
lookup_node(struct mm_struct * mm,unsigned long addr)906 static int lookup_node(struct mm_struct *mm, unsigned long addr)
907 {
908 	struct page *p = NULL;
909 	int err;
910 
911 	int locked = 1;
912 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
913 	if (err > 0) {
914 		err = page_to_nid(p);
915 		put_page(p);
916 	}
917 	if (locked)
918 		mmap_read_unlock(mm);
919 	return err;
920 }
921 
922 /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)923 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
924 			     unsigned long addr, unsigned long flags)
925 {
926 	int err;
927 	struct mm_struct *mm = current->mm;
928 	struct vm_area_struct *vma = NULL;
929 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
930 
931 	if (flags &
932 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
933 		return -EINVAL;
934 
935 	if (flags & MPOL_F_MEMS_ALLOWED) {
936 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
937 			return -EINVAL;
938 		*policy = 0;	/* just so it's initialized */
939 		task_lock(current);
940 		*nmask  = cpuset_current_mems_allowed;
941 		task_unlock(current);
942 		return 0;
943 	}
944 
945 	if (flags & MPOL_F_ADDR) {
946 		/*
947 		 * Do NOT fall back to task policy if the
948 		 * vma/shared policy at addr is NULL.  We
949 		 * want to return MPOL_DEFAULT in this case.
950 		 */
951 		mmap_read_lock(mm);
952 		vma = vma_lookup(mm, addr);
953 		if (!vma) {
954 			mmap_read_unlock(mm);
955 			return -EFAULT;
956 		}
957 		if (vma->vm_ops && vma->vm_ops->get_policy)
958 			pol = vma->vm_ops->get_policy(vma, addr);
959 		else
960 			pol = vma->vm_policy;
961 	} else if (addr)
962 		return -EINVAL;
963 
964 	if (!pol)
965 		pol = &default_policy;	/* indicates default behavior */
966 
967 	if (flags & MPOL_F_NODE) {
968 		if (flags & MPOL_F_ADDR) {
969 			/*
970 			 * Take a refcount on the mpol, lookup_node()
971 			 * will drop the mmap_lock, so after calling
972 			 * lookup_node() only "pol" remains valid, "vma"
973 			 * is stale.
974 			 */
975 			pol_refcount = pol;
976 			vma = NULL;
977 			mpol_get(pol);
978 			err = lookup_node(mm, addr);
979 			if (err < 0)
980 				goto out;
981 			*policy = err;
982 		} else if (pol == current->mempolicy &&
983 				pol->mode == MPOL_INTERLEAVE) {
984 			*policy = next_node_in(current->il_prev, pol->nodes);
985 		} else {
986 			err = -EINVAL;
987 			goto out;
988 		}
989 	} else {
990 		*policy = pol == &default_policy ? MPOL_DEFAULT :
991 						pol->mode;
992 		/*
993 		 * Internal mempolicy flags must be masked off before exposing
994 		 * the policy to userspace.
995 		 */
996 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
997 	}
998 
999 	err = 0;
1000 	if (nmask) {
1001 		if (mpol_store_user_nodemask(pol)) {
1002 			*nmask = pol->w.user_nodemask;
1003 		} else {
1004 			task_lock(current);
1005 			get_policy_nodemask(pol, nmask);
1006 			task_unlock(current);
1007 		}
1008 	}
1009 
1010  out:
1011 	mpol_cond_put(pol);
1012 	if (vma)
1013 		mmap_read_unlock(mm);
1014 	if (pol_refcount)
1015 		mpol_put(pol_refcount);
1016 	return err;
1017 }
1018 
1019 #ifdef CONFIG_MIGRATION
1020 /*
1021  * page migration, thp tail pages can be passed.
1022  */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1023 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1024 				unsigned long flags)
1025 {
1026 	struct page *head = compound_head(page);
1027 	/*
1028 	 * Avoid migrating a page that is shared with others.
1029 	 */
1030 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1031 		if (!isolate_lru_page(head)) {
1032 			list_add_tail(&head->lru, pagelist);
1033 			mod_node_page_state(page_pgdat(head),
1034 				NR_ISOLATED_ANON + page_is_file_lru(head),
1035 				thp_nr_pages(head));
1036 		} else if (flags & MPOL_MF_STRICT) {
1037 			/*
1038 			 * Non-movable page may reach here.  And, there may be
1039 			 * temporary off LRU pages or non-LRU movable pages.
1040 			 * Treat them as unmovable pages since they can't be
1041 			 * isolated, so they can't be moved at the moment.  It
1042 			 * should return -EIO for this case too.
1043 			 */
1044 			return -EIO;
1045 		}
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 /*
1052  * Migrate pages from one node to a target node.
1053  * Returns error or the number of pages not migrated.
1054  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)1055 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1056 			   int flags)
1057 {
1058 	nodemask_t nmask;
1059 	LIST_HEAD(pagelist);
1060 	int err = 0;
1061 	struct migration_target_control mtc = {
1062 		.nid = dest,
1063 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1064 	};
1065 
1066 	nodes_clear(nmask);
1067 	node_set(source, nmask);
1068 
1069 	/*
1070 	 * This does not "check" the range but isolates all pages that
1071 	 * need migration.  Between passing in the full user address
1072 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1073 	 */
1074 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1075 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1076 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1077 
1078 	if (!list_empty(&pagelist)) {
1079 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1080 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1081 		if (err)
1082 			putback_movable_pages(&pagelist);
1083 	}
1084 
1085 	return err;
1086 }
1087 
1088 /*
1089  * Move pages between the two nodesets so as to preserve the physical
1090  * layout as much as possible.
1091  *
1092  * Returns the number of page that could not be moved.
1093  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1094 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1095 		     const nodemask_t *to, int flags)
1096 {
1097 	int busy = 0;
1098 	int err = 0;
1099 	nodemask_t tmp;
1100 
1101 	lru_cache_disable();
1102 
1103 	mmap_read_lock(mm);
1104 
1105 	/*
1106 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1107 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1108 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1109 	 * The pair of nodemasks 'to' and 'from' define the map.
1110 	 *
1111 	 * If no pair of bits is found that way, fallback to picking some
1112 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1113 	 * 'source' and 'dest' bits are the same, this represents a node
1114 	 * that will be migrating to itself, so no pages need move.
1115 	 *
1116 	 * If no bits are left in 'tmp', or if all remaining bits left
1117 	 * in 'tmp' correspond to the same bit in 'to', return false
1118 	 * (nothing left to migrate).
1119 	 *
1120 	 * This lets us pick a pair of nodes to migrate between, such that
1121 	 * if possible the dest node is not already occupied by some other
1122 	 * source node, minimizing the risk of overloading the memory on a
1123 	 * node that would happen if we migrated incoming memory to a node
1124 	 * before migrating outgoing memory source that same node.
1125 	 *
1126 	 * A single scan of tmp is sufficient.  As we go, we remember the
1127 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1128 	 * that not only moved, but what's better, moved to an empty slot
1129 	 * (d is not set in tmp), then we break out then, with that pair.
1130 	 * Otherwise when we finish scanning from_tmp, we at least have the
1131 	 * most recent <s, d> pair that moved.  If we get all the way through
1132 	 * the scan of tmp without finding any node that moved, much less
1133 	 * moved to an empty node, then there is nothing left worth migrating.
1134 	 */
1135 
1136 	tmp = *from;
1137 	while (!nodes_empty(tmp)) {
1138 		int s, d;
1139 		int source = NUMA_NO_NODE;
1140 		int dest = 0;
1141 
1142 		for_each_node_mask(s, tmp) {
1143 
1144 			/*
1145 			 * do_migrate_pages() tries to maintain the relative
1146 			 * node relationship of the pages established between
1147 			 * threads and memory areas.
1148                          *
1149 			 * However if the number of source nodes is not equal to
1150 			 * the number of destination nodes we can not preserve
1151 			 * this node relative relationship.  In that case, skip
1152 			 * copying memory from a node that is in the destination
1153 			 * mask.
1154 			 *
1155 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1156 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1157 			 */
1158 
1159 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1160 						(node_isset(s, *to)))
1161 				continue;
1162 
1163 			d = node_remap(s, *from, *to);
1164 			if (s == d)
1165 				continue;
1166 
1167 			source = s;	/* Node moved. Memorize */
1168 			dest = d;
1169 
1170 			/* dest not in remaining from nodes? */
1171 			if (!node_isset(dest, tmp))
1172 				break;
1173 		}
1174 		if (source == NUMA_NO_NODE)
1175 			break;
1176 
1177 		node_clear(source, tmp);
1178 		err = migrate_to_node(mm, source, dest, flags);
1179 		if (err > 0)
1180 			busy += err;
1181 		if (err < 0)
1182 			break;
1183 	}
1184 	mmap_read_unlock(mm);
1185 
1186 	lru_cache_enable();
1187 	if (err < 0)
1188 		return err;
1189 	return busy;
1190 
1191 }
1192 
1193 /*
1194  * Allocate a new page for page migration based on vma policy.
1195  * Start by assuming the page is mapped by the same vma as contains @start.
1196  * Search forward from there, if not.  N.B., this assumes that the
1197  * list of pages handed to migrate_pages()--which is how we get here--
1198  * is in virtual address order.
1199  */
new_page(struct page * page,unsigned long start)1200 static struct page *new_page(struct page *page, unsigned long start)
1201 {
1202 	struct vm_area_struct *vma;
1203 	unsigned long address;
1204 
1205 	vma = find_vma(current->mm, start);
1206 	while (vma) {
1207 		address = page_address_in_vma(page, vma);
1208 		if (address != -EFAULT)
1209 			break;
1210 		vma = vma->vm_next;
1211 	}
1212 
1213 	if (PageHuge(page)) {
1214 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1215 				vma, address);
1216 	} else if (PageTransHuge(page)) {
1217 		struct page *thp;
1218 
1219 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1220 					 HPAGE_PMD_ORDER);
1221 		if (!thp)
1222 			return NULL;
1223 		prep_transhuge_page(thp);
1224 		return thp;
1225 	}
1226 	/*
1227 	 * if !vma, alloc_page_vma() will use task or system default policy
1228 	 */
1229 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1230 			vma, address);
1231 }
1232 #else
1233 
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1234 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1235 				unsigned long flags)
1236 {
1237 	return -EIO;
1238 }
1239 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1240 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1241 		     const nodemask_t *to, int flags)
1242 {
1243 	return -ENOSYS;
1244 }
1245 
new_page(struct page * page,unsigned long start)1246 static struct page *new_page(struct page *page, unsigned long start)
1247 {
1248 	return NULL;
1249 }
1250 #endif
1251 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1252 static long do_mbind(unsigned long start, unsigned long len,
1253 		     unsigned short mode, unsigned short mode_flags,
1254 		     nodemask_t *nmask, unsigned long flags)
1255 {
1256 	struct mm_struct *mm = current->mm;
1257 	struct mempolicy *new;
1258 	unsigned long end;
1259 	int err;
1260 	int ret;
1261 	LIST_HEAD(pagelist);
1262 
1263 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1264 		return -EINVAL;
1265 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1266 		return -EPERM;
1267 
1268 	if (start & ~PAGE_MASK)
1269 		return -EINVAL;
1270 
1271 	if (mode == MPOL_DEFAULT)
1272 		flags &= ~MPOL_MF_STRICT;
1273 
1274 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1275 	end = start + len;
1276 
1277 	if (end < start)
1278 		return -EINVAL;
1279 	if (end == start)
1280 		return 0;
1281 
1282 	new = mpol_new(mode, mode_flags, nmask);
1283 	if (IS_ERR(new))
1284 		return PTR_ERR(new);
1285 
1286 	if (flags & MPOL_MF_LAZY)
1287 		new->flags |= MPOL_F_MOF;
1288 
1289 	/*
1290 	 * If we are using the default policy then operation
1291 	 * on discontinuous address spaces is okay after all
1292 	 */
1293 	if (!new)
1294 		flags |= MPOL_MF_DISCONTIG_OK;
1295 
1296 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1297 		 start, start + len, mode, mode_flags,
1298 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1299 
1300 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1301 
1302 		lru_cache_disable();
1303 	}
1304 	{
1305 		NODEMASK_SCRATCH(scratch);
1306 		if (scratch) {
1307 			mmap_write_lock(mm);
1308 			err = mpol_set_nodemask(new, nmask, scratch);
1309 			if (err)
1310 				mmap_write_unlock(mm);
1311 		} else
1312 			err = -ENOMEM;
1313 		NODEMASK_SCRATCH_FREE(scratch);
1314 	}
1315 	if (err)
1316 		goto mpol_out;
1317 
1318 	ret = queue_pages_range(mm, start, end, nmask,
1319 			  flags | MPOL_MF_INVERT, &pagelist);
1320 
1321 	if (ret < 0) {
1322 		err = ret;
1323 		goto up_out;
1324 	}
1325 
1326 	err = mbind_range(mm, start, end, new);
1327 
1328 	if (!err) {
1329 		int nr_failed = 0;
1330 
1331 		if (!list_empty(&pagelist)) {
1332 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1333 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1334 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1335 			if (nr_failed)
1336 				putback_movable_pages(&pagelist);
1337 		}
1338 
1339 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1340 			err = -EIO;
1341 	} else {
1342 up_out:
1343 		if (!list_empty(&pagelist))
1344 			putback_movable_pages(&pagelist);
1345 	}
1346 
1347 	mmap_write_unlock(mm);
1348 mpol_out:
1349 	mpol_put(new);
1350 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1351 		lru_cache_enable();
1352 	return err;
1353 }
1354 
1355 /*
1356  * User space interface with variable sized bitmaps for nodelists.
1357  */
get_bitmap(unsigned long * mask,const unsigned long __user * nmask,unsigned long maxnode)1358 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1359 		      unsigned long maxnode)
1360 {
1361 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1362 	int ret;
1363 
1364 	if (in_compat_syscall())
1365 		ret = compat_get_bitmap(mask,
1366 					(const compat_ulong_t __user *)nmask,
1367 					maxnode);
1368 	else
1369 		ret = copy_from_user(mask, nmask,
1370 				     nlongs * sizeof(unsigned long));
1371 
1372 	if (ret)
1373 		return -EFAULT;
1374 
1375 	if (maxnode % BITS_PER_LONG)
1376 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1377 
1378 	return 0;
1379 }
1380 
1381 /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1382 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1383 		     unsigned long maxnode)
1384 {
1385 	--maxnode;
1386 	nodes_clear(*nodes);
1387 	if (maxnode == 0 || !nmask)
1388 		return 0;
1389 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1390 		return -EINVAL;
1391 
1392 	/*
1393 	 * When the user specified more nodes than supported just check
1394 	 * if the non supported part is all zero, one word at a time,
1395 	 * starting at the end.
1396 	 */
1397 	while (maxnode > MAX_NUMNODES) {
1398 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1399 		unsigned long t;
1400 
1401 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1402 			return -EFAULT;
1403 
1404 		if (maxnode - bits >= MAX_NUMNODES) {
1405 			maxnode -= bits;
1406 		} else {
1407 			maxnode = MAX_NUMNODES;
1408 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1409 		}
1410 		if (t)
1411 			return -EINVAL;
1412 	}
1413 
1414 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1415 }
1416 
1417 /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1418 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1419 			      nodemask_t *nodes)
1420 {
1421 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1422 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1423 	bool compat = in_compat_syscall();
1424 
1425 	if (compat)
1426 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1427 
1428 	if (copy > nbytes) {
1429 		if (copy > PAGE_SIZE)
1430 			return -EINVAL;
1431 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1432 			return -EFAULT;
1433 		copy = nbytes;
1434 		maxnode = nr_node_ids;
1435 	}
1436 
1437 	if (compat)
1438 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1439 					 nodes_addr(*nodes), maxnode);
1440 
1441 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1442 }
1443 
1444 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
sanitize_mpol_flags(int * mode,unsigned short * flags)1445 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1446 {
1447 	*flags = *mode & MPOL_MODE_FLAGS;
1448 	*mode &= ~MPOL_MODE_FLAGS;
1449 
1450 	if ((unsigned int)(*mode) >=  MPOL_MAX)
1451 		return -EINVAL;
1452 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1453 		return -EINVAL;
1454 	if (*flags & MPOL_F_NUMA_BALANCING) {
1455 		if (*mode != MPOL_BIND)
1456 			return -EINVAL;
1457 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1458 	}
1459 	return 0;
1460 }
1461 
kernel_mbind(unsigned long start,unsigned long len,unsigned long mode,const unsigned long __user * nmask,unsigned long maxnode,unsigned int flags)1462 static long kernel_mbind(unsigned long start, unsigned long len,
1463 			 unsigned long mode, const unsigned long __user *nmask,
1464 			 unsigned long maxnode, unsigned int flags)
1465 {
1466 	unsigned short mode_flags;
1467 	nodemask_t nodes;
1468 	int lmode = mode;
1469 	int err;
1470 
1471 	start = untagged_addr(start);
1472 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1473 	if (err)
1474 		return err;
1475 
1476 	err = get_nodes(&nodes, nmask, maxnode);
1477 	if (err)
1478 		return err;
1479 
1480 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1481 }
1482 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned int,flags)1483 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1484 		unsigned long, mode, const unsigned long __user *, nmask,
1485 		unsigned long, maxnode, unsigned int, flags)
1486 {
1487 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1488 }
1489 
1490 /* Set the process memory policy */
kernel_set_mempolicy(int mode,const unsigned long __user * nmask,unsigned long maxnode)1491 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1492 				 unsigned long maxnode)
1493 {
1494 	unsigned short mode_flags;
1495 	nodemask_t nodes;
1496 	int lmode = mode;
1497 	int err;
1498 
1499 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1500 	if (err)
1501 		return err;
1502 
1503 	err = get_nodes(&nodes, nmask, maxnode);
1504 	if (err)
1505 		return err;
1506 
1507 	return do_set_mempolicy(lmode, mode_flags, &nodes);
1508 }
1509 
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1510 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1511 		unsigned long, maxnode)
1512 {
1513 	return kernel_set_mempolicy(mode, nmask, maxnode);
1514 }
1515 
kernel_migrate_pages(pid_t pid,unsigned long maxnode,const unsigned long __user * old_nodes,const unsigned long __user * new_nodes)1516 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1517 				const unsigned long __user *old_nodes,
1518 				const unsigned long __user *new_nodes)
1519 {
1520 	struct mm_struct *mm = NULL;
1521 	struct task_struct *task;
1522 	nodemask_t task_nodes;
1523 	int err;
1524 	nodemask_t *old;
1525 	nodemask_t *new;
1526 	NODEMASK_SCRATCH(scratch);
1527 
1528 	if (!scratch)
1529 		return -ENOMEM;
1530 
1531 	old = &scratch->mask1;
1532 	new = &scratch->mask2;
1533 
1534 	err = get_nodes(old, old_nodes, maxnode);
1535 	if (err)
1536 		goto out;
1537 
1538 	err = get_nodes(new, new_nodes, maxnode);
1539 	if (err)
1540 		goto out;
1541 
1542 	/* Find the mm_struct */
1543 	rcu_read_lock();
1544 	task = pid ? find_task_by_vpid(pid) : current;
1545 	if (!task) {
1546 		rcu_read_unlock();
1547 		err = -ESRCH;
1548 		goto out;
1549 	}
1550 	get_task_struct(task);
1551 
1552 	err = -EINVAL;
1553 
1554 	/*
1555 	 * Check if this process has the right to modify the specified process.
1556 	 * Use the regular "ptrace_may_access()" checks.
1557 	 */
1558 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1559 		rcu_read_unlock();
1560 		err = -EPERM;
1561 		goto out_put;
1562 	}
1563 	rcu_read_unlock();
1564 
1565 	task_nodes = cpuset_mems_allowed(task);
1566 	/* Is the user allowed to access the target nodes? */
1567 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1568 		err = -EPERM;
1569 		goto out_put;
1570 	}
1571 
1572 	task_nodes = cpuset_mems_allowed(current);
1573 	nodes_and(*new, *new, task_nodes);
1574 	if (nodes_empty(*new))
1575 		goto out_put;
1576 
1577 	err = security_task_movememory(task);
1578 	if (err)
1579 		goto out_put;
1580 
1581 	mm = get_task_mm(task);
1582 	put_task_struct(task);
1583 
1584 	if (!mm) {
1585 		err = -EINVAL;
1586 		goto out;
1587 	}
1588 
1589 	err = do_migrate_pages(mm, old, new,
1590 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1591 
1592 	mmput(mm);
1593 out:
1594 	NODEMASK_SCRATCH_FREE(scratch);
1595 
1596 	return err;
1597 
1598 out_put:
1599 	put_task_struct(task);
1600 	goto out;
1601 
1602 }
1603 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1604 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1605 		const unsigned long __user *, old_nodes,
1606 		const unsigned long __user *, new_nodes)
1607 {
1608 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1609 }
1610 
1611 
1612 /* Retrieve NUMA policy */
kernel_get_mempolicy(int __user * policy,unsigned long __user * nmask,unsigned long maxnode,unsigned long addr,unsigned long flags)1613 static int kernel_get_mempolicy(int __user *policy,
1614 				unsigned long __user *nmask,
1615 				unsigned long maxnode,
1616 				unsigned long addr,
1617 				unsigned long flags)
1618 {
1619 	int err;
1620 	int pval;
1621 	nodemask_t nodes;
1622 
1623 	if (nmask != NULL && maxnode < nr_node_ids)
1624 		return -EINVAL;
1625 
1626 	addr = untagged_addr(addr);
1627 
1628 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1629 
1630 	if (err)
1631 		return err;
1632 
1633 	if (policy && put_user(pval, policy))
1634 		return -EFAULT;
1635 
1636 	if (nmask)
1637 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1638 
1639 	return err;
1640 }
1641 
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1642 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1643 		unsigned long __user *, nmask, unsigned long, maxnode,
1644 		unsigned long, addr, unsigned long, flags)
1645 {
1646 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1647 }
1648 
vma_migratable(struct vm_area_struct * vma)1649 bool vma_migratable(struct vm_area_struct *vma)
1650 {
1651 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1652 		return false;
1653 
1654 	/*
1655 	 * DAX device mappings require predictable access latency, so avoid
1656 	 * incurring periodic faults.
1657 	 */
1658 	if (vma_is_dax(vma))
1659 		return false;
1660 
1661 	if (is_vm_hugetlb_page(vma) &&
1662 		!hugepage_migration_supported(hstate_vma(vma)))
1663 		return false;
1664 
1665 	/*
1666 	 * Migration allocates pages in the highest zone. If we cannot
1667 	 * do so then migration (at least from node to node) is not
1668 	 * possible.
1669 	 */
1670 	if (vma->vm_file &&
1671 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1672 			< policy_zone)
1673 		return false;
1674 	return true;
1675 }
1676 
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1677 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1678 						unsigned long addr)
1679 {
1680 	struct mempolicy *pol = NULL;
1681 
1682 	if (vma) {
1683 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1684 			pol = vma->vm_ops->get_policy(vma, addr);
1685 		} else if (vma->vm_policy) {
1686 			pol = vma->vm_policy;
1687 
1688 			/*
1689 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1690 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1691 			 * count on these policies which will be dropped by
1692 			 * mpol_cond_put() later
1693 			 */
1694 			if (mpol_needs_cond_ref(pol))
1695 				mpol_get(pol);
1696 		}
1697 	}
1698 
1699 	return pol;
1700 }
1701 
1702 /*
1703  * get_vma_policy(@vma, @addr)
1704  * @vma: virtual memory area whose policy is sought
1705  * @addr: address in @vma for shared policy lookup
1706  *
1707  * Returns effective policy for a VMA at specified address.
1708  * Falls back to current->mempolicy or system default policy, as necessary.
1709  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1710  * count--added by the get_policy() vm_op, as appropriate--to protect against
1711  * freeing by another task.  It is the caller's responsibility to free the
1712  * extra reference for shared policies.
1713  */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1714 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1715 						unsigned long addr)
1716 {
1717 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1718 
1719 	if (!pol)
1720 		pol = get_task_policy(current);
1721 
1722 	return pol;
1723 }
1724 
vma_policy_mof(struct vm_area_struct * vma)1725 bool vma_policy_mof(struct vm_area_struct *vma)
1726 {
1727 	struct mempolicy *pol;
1728 
1729 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1730 		bool ret = false;
1731 
1732 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1733 		if (pol && (pol->flags & MPOL_F_MOF))
1734 			ret = true;
1735 		mpol_cond_put(pol);
1736 
1737 		return ret;
1738 	}
1739 
1740 	pol = vma->vm_policy;
1741 	if (!pol)
1742 		pol = get_task_policy(current);
1743 
1744 	return pol->flags & MPOL_F_MOF;
1745 }
1746 
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1747 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1748 {
1749 	enum zone_type dynamic_policy_zone = policy_zone;
1750 
1751 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1752 
1753 	/*
1754 	 * if policy->nodes has movable memory only,
1755 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1756 	 *
1757 	 * policy->nodes is intersect with node_states[N_MEMORY].
1758 	 * so if the following test fails, it implies
1759 	 * policy->nodes has movable memory only.
1760 	 */
1761 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1762 		dynamic_policy_zone = ZONE_MOVABLE;
1763 
1764 	return zone >= dynamic_policy_zone;
1765 }
1766 
1767 /*
1768  * Return a nodemask representing a mempolicy for filtering nodes for
1769  * page allocation
1770  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1771 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1772 {
1773 	int mode = policy->mode;
1774 
1775 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1776 	if (unlikely(mode == MPOL_BIND) &&
1777 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1778 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1779 		return &policy->nodes;
1780 
1781 	if (mode == MPOL_PREFERRED_MANY)
1782 		return &policy->nodes;
1783 
1784 	return NULL;
1785 }
1786 
1787 /*
1788  * Return the  preferred node id for 'prefer' mempolicy, and return
1789  * the given id for all other policies.
1790  *
1791  * policy_node() is always coupled with policy_nodemask(), which
1792  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1793  */
policy_node(gfp_t gfp,struct mempolicy * policy,int nd)1794 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1795 {
1796 	if (policy->mode == MPOL_PREFERRED) {
1797 		nd = first_node(policy->nodes);
1798 	} else {
1799 		/*
1800 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1801 		 * because we might easily break the expectation to stay on the
1802 		 * requested node and not break the policy.
1803 		 */
1804 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1805 	}
1806 
1807 	return nd;
1808 }
1809 
1810 /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1811 static unsigned interleave_nodes(struct mempolicy *policy)
1812 {
1813 	unsigned next;
1814 	struct task_struct *me = current;
1815 
1816 	next = next_node_in(me->il_prev, policy->nodes);
1817 	if (next < MAX_NUMNODES)
1818 		me->il_prev = next;
1819 	return next;
1820 }
1821 
1822 /*
1823  * Depending on the memory policy provide a node from which to allocate the
1824  * next slab entry.
1825  */
mempolicy_slab_node(void)1826 unsigned int mempolicy_slab_node(void)
1827 {
1828 	struct mempolicy *policy;
1829 	int node = numa_mem_id();
1830 
1831 	if (!in_task())
1832 		return node;
1833 
1834 	policy = current->mempolicy;
1835 	if (!policy)
1836 		return node;
1837 
1838 	switch (policy->mode) {
1839 	case MPOL_PREFERRED:
1840 		return first_node(policy->nodes);
1841 
1842 	case MPOL_INTERLEAVE:
1843 		return interleave_nodes(policy);
1844 
1845 	case MPOL_BIND:
1846 	case MPOL_PREFERRED_MANY:
1847 	{
1848 		struct zoneref *z;
1849 
1850 		/*
1851 		 * Follow bind policy behavior and start allocation at the
1852 		 * first node.
1853 		 */
1854 		struct zonelist *zonelist;
1855 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1856 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1857 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1858 							&policy->nodes);
1859 		return z->zone ? zone_to_nid(z->zone) : node;
1860 	}
1861 	case MPOL_LOCAL:
1862 		return node;
1863 
1864 	default:
1865 		BUG();
1866 	}
1867 }
1868 
1869 /*
1870  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1871  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1872  * number of present nodes.
1873  */
offset_il_node(struct mempolicy * pol,unsigned long n)1874 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1875 {
1876 	nodemask_t nodemask = pol->nodes;
1877 	unsigned int target, nnodes;
1878 	int i;
1879 	int nid;
1880 	/*
1881 	 * The barrier will stabilize the nodemask in a register or on
1882 	 * the stack so that it will stop changing under the code.
1883 	 *
1884 	 * Between first_node() and next_node(), pol->nodes could be changed
1885 	 * by other threads. So we put pol->nodes in a local stack.
1886 	 */
1887 	barrier();
1888 
1889 	nnodes = nodes_weight(nodemask);
1890 	if (!nnodes)
1891 		return numa_node_id();
1892 	target = (unsigned int)n % nnodes;
1893 	nid = first_node(nodemask);
1894 	for (i = 0; i < target; i++)
1895 		nid = next_node(nid, nodemask);
1896 	return nid;
1897 }
1898 
1899 /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1900 static inline unsigned interleave_nid(struct mempolicy *pol,
1901 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1902 {
1903 	if (vma) {
1904 		unsigned long off;
1905 
1906 		/*
1907 		 * for small pages, there is no difference between
1908 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1909 		 * for huge pages, since vm_pgoff is in units of small
1910 		 * pages, we need to shift off the always 0 bits to get
1911 		 * a useful offset.
1912 		 */
1913 		BUG_ON(shift < PAGE_SHIFT);
1914 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1915 		off += (addr - vma->vm_start) >> shift;
1916 		return offset_il_node(pol, off);
1917 	} else
1918 		return interleave_nodes(pol);
1919 }
1920 
1921 #ifdef CONFIG_HUGETLBFS
1922 /*
1923  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1924  * @vma: virtual memory area whose policy is sought
1925  * @addr: address in @vma for shared policy lookup and interleave policy
1926  * @gfp_flags: for requested zone
1927  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1928  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1929  *
1930  * Returns a nid suitable for a huge page allocation and a pointer
1931  * to the struct mempolicy for conditional unref after allocation.
1932  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1933  * to the mempolicy's @nodemask for filtering the zonelist.
1934  *
1935  * Must be protected by read_mems_allowed_begin()
1936  */
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)1937 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1938 				struct mempolicy **mpol, nodemask_t **nodemask)
1939 {
1940 	int nid;
1941 	int mode;
1942 
1943 	*mpol = get_vma_policy(vma, addr);
1944 	*nodemask = NULL;
1945 	mode = (*mpol)->mode;
1946 
1947 	if (unlikely(mode == MPOL_INTERLEAVE)) {
1948 		nid = interleave_nid(*mpol, vma, addr,
1949 					huge_page_shift(hstate_vma(vma)));
1950 	} else {
1951 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1952 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
1953 			*nodemask = &(*mpol)->nodes;
1954 	}
1955 	return nid;
1956 }
1957 
1958 /*
1959  * init_nodemask_of_mempolicy
1960  *
1961  * If the current task's mempolicy is "default" [NULL], return 'false'
1962  * to indicate default policy.  Otherwise, extract the policy nodemask
1963  * for 'bind' or 'interleave' policy into the argument nodemask, or
1964  * initialize the argument nodemask to contain the single node for
1965  * 'preferred' or 'local' policy and return 'true' to indicate presence
1966  * of non-default mempolicy.
1967  *
1968  * We don't bother with reference counting the mempolicy [mpol_get/put]
1969  * because the current task is examining it's own mempolicy and a task's
1970  * mempolicy is only ever changed by the task itself.
1971  *
1972  * N.B., it is the caller's responsibility to free a returned nodemask.
1973  */
init_nodemask_of_mempolicy(nodemask_t * mask)1974 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1975 {
1976 	struct mempolicy *mempolicy;
1977 
1978 	if (!(mask && current->mempolicy))
1979 		return false;
1980 
1981 	task_lock(current);
1982 	mempolicy = current->mempolicy;
1983 	switch (mempolicy->mode) {
1984 	case MPOL_PREFERRED:
1985 	case MPOL_PREFERRED_MANY:
1986 	case MPOL_BIND:
1987 	case MPOL_INTERLEAVE:
1988 		*mask = mempolicy->nodes;
1989 		break;
1990 
1991 	case MPOL_LOCAL:
1992 		init_nodemask_of_node(mask, numa_node_id());
1993 		break;
1994 
1995 	default:
1996 		BUG();
1997 	}
1998 	task_unlock(current);
1999 
2000 	return true;
2001 }
2002 #endif
2003 
2004 /*
2005  * mempolicy_in_oom_domain
2006  *
2007  * If tsk's mempolicy is "bind", check for intersection between mask and
2008  * the policy nodemask. Otherwise, return true for all other policies
2009  * including "interleave", as a tsk with "interleave" policy may have
2010  * memory allocated from all nodes in system.
2011  *
2012  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2013  */
mempolicy_in_oom_domain(struct task_struct * tsk,const nodemask_t * mask)2014 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2015 					const nodemask_t *mask)
2016 {
2017 	struct mempolicy *mempolicy;
2018 	bool ret = true;
2019 
2020 	if (!mask)
2021 		return ret;
2022 
2023 	task_lock(tsk);
2024 	mempolicy = tsk->mempolicy;
2025 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2026 		ret = nodes_intersects(mempolicy->nodes, *mask);
2027 	task_unlock(tsk);
2028 
2029 	return ret;
2030 }
2031 
2032 /* Allocate a page in interleaved policy.
2033    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)2034 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2035 					unsigned nid)
2036 {
2037 	struct page *page;
2038 
2039 	page = __alloc_pages(gfp, order, nid, NULL);
2040 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2041 	if (!static_branch_likely(&vm_numa_stat_key))
2042 		return page;
2043 	if (page && page_to_nid(page) == nid) {
2044 		preempt_disable();
2045 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2046 		preempt_enable();
2047 	}
2048 	return page;
2049 }
2050 
alloc_pages_preferred_many(gfp_t gfp,unsigned int order,int nid,struct mempolicy * pol)2051 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2052 						int nid, struct mempolicy *pol)
2053 {
2054 	struct page *page;
2055 	gfp_t preferred_gfp;
2056 
2057 	/*
2058 	 * This is a two pass approach. The first pass will only try the
2059 	 * preferred nodes but skip the direct reclaim and allow the
2060 	 * allocation to fail, while the second pass will try all the
2061 	 * nodes in system.
2062 	 */
2063 	preferred_gfp = gfp | __GFP_NOWARN;
2064 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2065 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2066 	if (!page)
2067 		page = __alloc_pages(gfp, order, numa_node_id(), NULL);
2068 
2069 	return page;
2070 }
2071 
2072 /**
2073  * alloc_pages_vma - Allocate a page for a VMA.
2074  * @gfp: GFP flags.
2075  * @order: Order of the GFP allocation.
2076  * @vma: Pointer to VMA or NULL if not available.
2077  * @addr: Virtual address of the allocation.  Must be inside @vma.
2078  * @node: Which node to prefer for allocation (modulo policy).
2079  * @hugepage: For hugepages try only the preferred node if possible.
2080  *
2081  * Allocate a page for a specific address in @vma, using the appropriate
2082  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2083  * of the mm_struct of the VMA to prevent it from going away.  Should be
2084  * used for all allocations for pages that will be mapped into user space.
2085  *
2086  * Return: The page on success or NULL if allocation fails.
2087  */
alloc_pages_vma(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,int node,bool hugepage)2088 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2089 		unsigned long addr, int node, bool hugepage)
2090 {
2091 	struct mempolicy *pol;
2092 	struct page *page;
2093 	int preferred_nid;
2094 	nodemask_t *nmask;
2095 
2096 	pol = get_vma_policy(vma, addr);
2097 
2098 	if (pol->mode == MPOL_INTERLEAVE) {
2099 		unsigned nid;
2100 
2101 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2102 		mpol_cond_put(pol);
2103 		page = alloc_page_interleave(gfp, order, nid);
2104 		goto out;
2105 	}
2106 
2107 	if (pol->mode == MPOL_PREFERRED_MANY) {
2108 		page = alloc_pages_preferred_many(gfp, order, node, pol);
2109 		mpol_cond_put(pol);
2110 		goto out;
2111 	}
2112 
2113 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2114 		int hpage_node = node;
2115 
2116 		/*
2117 		 * For hugepage allocation and non-interleave policy which
2118 		 * allows the current node (or other explicitly preferred
2119 		 * node) we only try to allocate from the current/preferred
2120 		 * node and don't fall back to other nodes, as the cost of
2121 		 * remote accesses would likely offset THP benefits.
2122 		 *
2123 		 * If the policy is interleave or does not allow the current
2124 		 * node in its nodemask, we allocate the standard way.
2125 		 */
2126 		if (pol->mode == MPOL_PREFERRED)
2127 			hpage_node = first_node(pol->nodes);
2128 
2129 		nmask = policy_nodemask(gfp, pol);
2130 		if (!nmask || node_isset(hpage_node, *nmask)) {
2131 			mpol_cond_put(pol);
2132 			/*
2133 			 * First, try to allocate THP only on local node, but
2134 			 * don't reclaim unnecessarily, just compact.
2135 			 */
2136 			page = __alloc_pages_node(hpage_node,
2137 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2138 
2139 			/*
2140 			 * If hugepage allocations are configured to always
2141 			 * synchronous compact or the vma has been madvised
2142 			 * to prefer hugepage backing, retry allowing remote
2143 			 * memory with both reclaim and compact as well.
2144 			 */
2145 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2146 				page = __alloc_pages(gfp, order, hpage_node, nmask);
2147 
2148 			goto out;
2149 		}
2150 	}
2151 
2152 	nmask = policy_nodemask(gfp, pol);
2153 	preferred_nid = policy_node(gfp, pol, node);
2154 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2155 	mpol_cond_put(pol);
2156 out:
2157 	return page;
2158 }
2159 EXPORT_SYMBOL(alloc_pages_vma);
2160 
2161 /**
2162  * alloc_pages - Allocate pages.
2163  * @gfp: GFP flags.
2164  * @order: Power of two of number of pages to allocate.
2165  *
2166  * Allocate 1 << @order contiguous pages.  The physical address of the
2167  * first page is naturally aligned (eg an order-3 allocation will be aligned
2168  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2169  * process is honoured when in process context.
2170  *
2171  * Context: Can be called from any context, providing the appropriate GFP
2172  * flags are used.
2173  * Return: The page on success or NULL if allocation fails.
2174  */
alloc_pages(gfp_t gfp,unsigned order)2175 struct page *alloc_pages(gfp_t gfp, unsigned order)
2176 {
2177 	struct mempolicy *pol = &default_policy;
2178 	struct page *page;
2179 
2180 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2181 		pol = get_task_policy(current);
2182 
2183 	/*
2184 	 * No reference counting needed for current->mempolicy
2185 	 * nor system default_policy
2186 	 */
2187 	if (pol->mode == MPOL_INTERLEAVE)
2188 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2189 	else if (pol->mode == MPOL_PREFERRED_MANY)
2190 		page = alloc_pages_preferred_many(gfp, order,
2191 				numa_node_id(), pol);
2192 	else
2193 		page = __alloc_pages(gfp, order,
2194 				policy_node(gfp, pol, numa_node_id()),
2195 				policy_nodemask(gfp, pol));
2196 
2197 	return page;
2198 }
2199 EXPORT_SYMBOL(alloc_pages);
2200 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2201 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2202 {
2203 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2204 
2205 	if (IS_ERR(pol))
2206 		return PTR_ERR(pol);
2207 	dst->vm_policy = pol;
2208 	return 0;
2209 }
2210 
2211 /*
2212  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2213  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2214  * with the mems_allowed returned by cpuset_mems_allowed().  This
2215  * keeps mempolicies cpuset relative after its cpuset moves.  See
2216  * further kernel/cpuset.c update_nodemask().
2217  *
2218  * current's mempolicy may be rebinded by the other task(the task that changes
2219  * cpuset's mems), so we needn't do rebind work for current task.
2220  */
2221 
2222 /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2223 struct mempolicy *__mpol_dup(struct mempolicy *old)
2224 {
2225 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2226 
2227 	if (!new)
2228 		return ERR_PTR(-ENOMEM);
2229 
2230 	/* task's mempolicy is protected by alloc_lock */
2231 	if (old == current->mempolicy) {
2232 		task_lock(current);
2233 		*new = *old;
2234 		task_unlock(current);
2235 	} else
2236 		*new = *old;
2237 
2238 	if (current_cpuset_is_being_rebound()) {
2239 		nodemask_t mems = cpuset_mems_allowed(current);
2240 		mpol_rebind_policy(new, &mems);
2241 	}
2242 	atomic_set(&new->refcnt, 1);
2243 	return new;
2244 }
2245 
2246 /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2247 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2248 {
2249 	if (!a || !b)
2250 		return false;
2251 	if (a->mode != b->mode)
2252 		return false;
2253 	if (a->flags != b->flags)
2254 		return false;
2255 	if (mpol_store_user_nodemask(a))
2256 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2257 			return false;
2258 
2259 	switch (a->mode) {
2260 	case MPOL_BIND:
2261 	case MPOL_INTERLEAVE:
2262 	case MPOL_PREFERRED:
2263 	case MPOL_PREFERRED_MANY:
2264 		return !!nodes_equal(a->nodes, b->nodes);
2265 	case MPOL_LOCAL:
2266 		return true;
2267 	default:
2268 		BUG();
2269 		return false;
2270 	}
2271 }
2272 
2273 /*
2274  * Shared memory backing store policy support.
2275  *
2276  * Remember policies even when nobody has shared memory mapped.
2277  * The policies are kept in Red-Black tree linked from the inode.
2278  * They are protected by the sp->lock rwlock, which should be held
2279  * for any accesses to the tree.
2280  */
2281 
2282 /*
2283  * lookup first element intersecting start-end.  Caller holds sp->lock for
2284  * reading or for writing
2285  */
2286 static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)2287 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2288 {
2289 	struct rb_node *n = sp->root.rb_node;
2290 
2291 	while (n) {
2292 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2293 
2294 		if (start >= p->end)
2295 			n = n->rb_right;
2296 		else if (end <= p->start)
2297 			n = n->rb_left;
2298 		else
2299 			break;
2300 	}
2301 	if (!n)
2302 		return NULL;
2303 	for (;;) {
2304 		struct sp_node *w = NULL;
2305 		struct rb_node *prev = rb_prev(n);
2306 		if (!prev)
2307 			break;
2308 		w = rb_entry(prev, struct sp_node, nd);
2309 		if (w->end <= start)
2310 			break;
2311 		n = prev;
2312 	}
2313 	return rb_entry(n, struct sp_node, nd);
2314 }
2315 
2316 /*
2317  * Insert a new shared policy into the list.  Caller holds sp->lock for
2318  * writing.
2319  */
sp_insert(struct shared_policy * sp,struct sp_node * new)2320 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2321 {
2322 	struct rb_node **p = &sp->root.rb_node;
2323 	struct rb_node *parent = NULL;
2324 	struct sp_node *nd;
2325 
2326 	while (*p) {
2327 		parent = *p;
2328 		nd = rb_entry(parent, struct sp_node, nd);
2329 		if (new->start < nd->start)
2330 			p = &(*p)->rb_left;
2331 		else if (new->end > nd->end)
2332 			p = &(*p)->rb_right;
2333 		else
2334 			BUG();
2335 	}
2336 	rb_link_node(&new->nd, parent, p);
2337 	rb_insert_color(&new->nd, &sp->root);
2338 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2339 		 new->policy ? new->policy->mode : 0);
2340 }
2341 
2342 /* Find shared policy intersecting idx */
2343 struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)2344 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2345 {
2346 	struct mempolicy *pol = NULL;
2347 	struct sp_node *sn;
2348 
2349 	if (!sp->root.rb_node)
2350 		return NULL;
2351 	read_lock(&sp->lock);
2352 	sn = sp_lookup(sp, idx, idx+1);
2353 	if (sn) {
2354 		mpol_get(sn->policy);
2355 		pol = sn->policy;
2356 	}
2357 	read_unlock(&sp->lock);
2358 	return pol;
2359 }
2360 
sp_free(struct sp_node * n)2361 static void sp_free(struct sp_node *n)
2362 {
2363 	mpol_put(n->policy);
2364 	kmem_cache_free(sn_cache, n);
2365 }
2366 
2367 /**
2368  * mpol_misplaced - check whether current page node is valid in policy
2369  *
2370  * @page: page to be checked
2371  * @vma: vm area where page mapped
2372  * @addr: virtual address where page mapped
2373  *
2374  * Lookup current policy node id for vma,addr and "compare to" page's
2375  * node id.  Policy determination "mimics" alloc_page_vma().
2376  * Called from fault path where we know the vma and faulting address.
2377  *
2378  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2379  * policy, or a suitable node ID to allocate a replacement page from.
2380  */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2381 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2382 {
2383 	struct mempolicy *pol;
2384 	struct zoneref *z;
2385 	int curnid = page_to_nid(page);
2386 	unsigned long pgoff;
2387 	int thiscpu = raw_smp_processor_id();
2388 	int thisnid = cpu_to_node(thiscpu);
2389 	int polnid = NUMA_NO_NODE;
2390 	int ret = NUMA_NO_NODE;
2391 
2392 	pol = get_vma_policy(vma, addr);
2393 	if (!(pol->flags & MPOL_F_MOF))
2394 		goto out;
2395 
2396 	switch (pol->mode) {
2397 	case MPOL_INTERLEAVE:
2398 		pgoff = vma->vm_pgoff;
2399 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2400 		polnid = offset_il_node(pol, pgoff);
2401 		break;
2402 
2403 	case MPOL_PREFERRED:
2404 		if (node_isset(curnid, pol->nodes))
2405 			goto out;
2406 		polnid = first_node(pol->nodes);
2407 		break;
2408 
2409 	case MPOL_LOCAL:
2410 		polnid = numa_node_id();
2411 		break;
2412 
2413 	case MPOL_BIND:
2414 		/* Optimize placement among multiple nodes via NUMA balancing */
2415 		if (pol->flags & MPOL_F_MORON) {
2416 			if (node_isset(thisnid, pol->nodes))
2417 				break;
2418 			goto out;
2419 		}
2420 		fallthrough;
2421 
2422 	case MPOL_PREFERRED_MANY:
2423 		/*
2424 		 * use current page if in policy nodemask,
2425 		 * else select nearest allowed node, if any.
2426 		 * If no allowed nodes, use current [!misplaced].
2427 		 */
2428 		if (node_isset(curnid, pol->nodes))
2429 			goto out;
2430 		z = first_zones_zonelist(
2431 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2432 				gfp_zone(GFP_HIGHUSER),
2433 				&pol->nodes);
2434 		polnid = zone_to_nid(z->zone);
2435 		break;
2436 
2437 	default:
2438 		BUG();
2439 	}
2440 
2441 	/* Migrate the page towards the node whose CPU is referencing it */
2442 	if (pol->flags & MPOL_F_MORON) {
2443 		polnid = thisnid;
2444 
2445 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2446 			goto out;
2447 	}
2448 
2449 	if (curnid != polnid)
2450 		ret = polnid;
2451 out:
2452 	mpol_cond_put(pol);
2453 
2454 	return ret;
2455 }
2456 
2457 /*
2458  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2459  * dropped after task->mempolicy is set to NULL so that any allocation done as
2460  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2461  * policy.
2462  */
mpol_put_task_policy(struct task_struct * task)2463 void mpol_put_task_policy(struct task_struct *task)
2464 {
2465 	struct mempolicy *pol;
2466 
2467 	task_lock(task);
2468 	pol = task->mempolicy;
2469 	task->mempolicy = NULL;
2470 	task_unlock(task);
2471 	mpol_put(pol);
2472 }
2473 
sp_delete(struct shared_policy * sp,struct sp_node * n)2474 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2475 {
2476 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2477 	rb_erase(&n->nd, &sp->root);
2478 	sp_free(n);
2479 }
2480 
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)2481 static void sp_node_init(struct sp_node *node, unsigned long start,
2482 			unsigned long end, struct mempolicy *pol)
2483 {
2484 	node->start = start;
2485 	node->end = end;
2486 	node->policy = pol;
2487 }
2488 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2489 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2490 				struct mempolicy *pol)
2491 {
2492 	struct sp_node *n;
2493 	struct mempolicy *newpol;
2494 
2495 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2496 	if (!n)
2497 		return NULL;
2498 
2499 	newpol = mpol_dup(pol);
2500 	if (IS_ERR(newpol)) {
2501 		kmem_cache_free(sn_cache, n);
2502 		return NULL;
2503 	}
2504 	newpol->flags |= MPOL_F_SHARED;
2505 	sp_node_init(n, start, end, newpol);
2506 
2507 	return n;
2508 }
2509 
2510 /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)2511 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2512 				 unsigned long end, struct sp_node *new)
2513 {
2514 	struct sp_node *n;
2515 	struct sp_node *n_new = NULL;
2516 	struct mempolicy *mpol_new = NULL;
2517 	int ret = 0;
2518 
2519 restart:
2520 	write_lock(&sp->lock);
2521 	n = sp_lookup(sp, start, end);
2522 	/* Take care of old policies in the same range. */
2523 	while (n && n->start < end) {
2524 		struct rb_node *next = rb_next(&n->nd);
2525 		if (n->start >= start) {
2526 			if (n->end <= end)
2527 				sp_delete(sp, n);
2528 			else
2529 				n->start = end;
2530 		} else {
2531 			/* Old policy spanning whole new range. */
2532 			if (n->end > end) {
2533 				if (!n_new)
2534 					goto alloc_new;
2535 
2536 				*mpol_new = *n->policy;
2537 				atomic_set(&mpol_new->refcnt, 1);
2538 				sp_node_init(n_new, end, n->end, mpol_new);
2539 				n->end = start;
2540 				sp_insert(sp, n_new);
2541 				n_new = NULL;
2542 				mpol_new = NULL;
2543 				break;
2544 			} else
2545 				n->end = start;
2546 		}
2547 		if (!next)
2548 			break;
2549 		n = rb_entry(next, struct sp_node, nd);
2550 	}
2551 	if (new)
2552 		sp_insert(sp, new);
2553 	write_unlock(&sp->lock);
2554 	ret = 0;
2555 
2556 err_out:
2557 	if (mpol_new)
2558 		mpol_put(mpol_new);
2559 	if (n_new)
2560 		kmem_cache_free(sn_cache, n_new);
2561 
2562 	return ret;
2563 
2564 alloc_new:
2565 	write_unlock(&sp->lock);
2566 	ret = -ENOMEM;
2567 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2568 	if (!n_new)
2569 		goto err_out;
2570 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2571 	if (!mpol_new)
2572 		goto err_out;
2573 	atomic_set(&mpol_new->refcnt, 1);
2574 	goto restart;
2575 }
2576 
2577 /**
2578  * mpol_shared_policy_init - initialize shared policy for inode
2579  * @sp: pointer to inode shared policy
2580  * @mpol:  struct mempolicy to install
2581  *
2582  * Install non-NULL @mpol in inode's shared policy rb-tree.
2583  * On entry, the current task has a reference on a non-NULL @mpol.
2584  * This must be released on exit.
2585  * This is called at get_inode() calls and we can use GFP_KERNEL.
2586  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)2587 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2588 {
2589 	int ret;
2590 
2591 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2592 	rwlock_init(&sp->lock);
2593 
2594 	if (mpol) {
2595 		struct vm_area_struct pvma;
2596 		struct mempolicy *new;
2597 		NODEMASK_SCRATCH(scratch);
2598 
2599 		if (!scratch)
2600 			goto put_mpol;
2601 		/* contextualize the tmpfs mount point mempolicy */
2602 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2603 		if (IS_ERR(new))
2604 			goto free_scratch; /* no valid nodemask intersection */
2605 
2606 		task_lock(current);
2607 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2608 		task_unlock(current);
2609 		if (ret)
2610 			goto put_new;
2611 
2612 		/* Create pseudo-vma that contains just the policy */
2613 		vma_init(&pvma, NULL);
2614 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2615 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2616 
2617 put_new:
2618 		mpol_put(new);			/* drop initial ref */
2619 free_scratch:
2620 		NODEMASK_SCRATCH_FREE(scratch);
2621 put_mpol:
2622 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2623 	}
2624 }
2625 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)2626 int mpol_set_shared_policy(struct shared_policy *info,
2627 			struct vm_area_struct *vma, struct mempolicy *npol)
2628 {
2629 	int err;
2630 	struct sp_node *new = NULL;
2631 	unsigned long sz = vma_pages(vma);
2632 
2633 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2634 		 vma->vm_pgoff,
2635 		 sz, npol ? npol->mode : -1,
2636 		 npol ? npol->flags : -1,
2637 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2638 
2639 	if (npol) {
2640 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2641 		if (!new)
2642 			return -ENOMEM;
2643 	}
2644 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2645 	if (err && new)
2646 		sp_free(new);
2647 	return err;
2648 }
2649 
2650 /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)2651 void mpol_free_shared_policy(struct shared_policy *p)
2652 {
2653 	struct sp_node *n;
2654 	struct rb_node *next;
2655 
2656 	if (!p->root.rb_node)
2657 		return;
2658 	write_lock(&p->lock);
2659 	next = rb_first(&p->root);
2660 	while (next) {
2661 		n = rb_entry(next, struct sp_node, nd);
2662 		next = rb_next(&n->nd);
2663 		sp_delete(p, n);
2664 	}
2665 	write_unlock(&p->lock);
2666 }
2667 
2668 #ifdef CONFIG_NUMA_BALANCING
2669 static int __initdata numabalancing_override;
2670 
check_numabalancing_enable(void)2671 static void __init check_numabalancing_enable(void)
2672 {
2673 	bool numabalancing_default = false;
2674 
2675 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2676 		numabalancing_default = true;
2677 
2678 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2679 	if (numabalancing_override)
2680 		set_numabalancing_state(numabalancing_override == 1);
2681 
2682 	if (num_online_nodes() > 1 && !numabalancing_override) {
2683 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2684 			numabalancing_default ? "Enabling" : "Disabling");
2685 		set_numabalancing_state(numabalancing_default);
2686 	}
2687 }
2688 
setup_numabalancing(char * str)2689 static int __init setup_numabalancing(char *str)
2690 {
2691 	int ret = 0;
2692 	if (!str)
2693 		goto out;
2694 
2695 	if (!strcmp(str, "enable")) {
2696 		numabalancing_override = 1;
2697 		ret = 1;
2698 	} else if (!strcmp(str, "disable")) {
2699 		numabalancing_override = -1;
2700 		ret = 1;
2701 	}
2702 out:
2703 	if (!ret)
2704 		pr_warn("Unable to parse numa_balancing=\n");
2705 
2706 	return ret;
2707 }
2708 __setup("numa_balancing=", setup_numabalancing);
2709 #else
check_numabalancing_enable(void)2710 static inline void __init check_numabalancing_enable(void)
2711 {
2712 }
2713 #endif /* CONFIG_NUMA_BALANCING */
2714 
2715 /* assumes fs == KERNEL_DS */
numa_policy_init(void)2716 void __init numa_policy_init(void)
2717 {
2718 	nodemask_t interleave_nodes;
2719 	unsigned long largest = 0;
2720 	int nid, prefer = 0;
2721 
2722 	policy_cache = kmem_cache_create("numa_policy",
2723 					 sizeof(struct mempolicy),
2724 					 0, SLAB_PANIC, NULL);
2725 
2726 	sn_cache = kmem_cache_create("shared_policy_node",
2727 				     sizeof(struct sp_node),
2728 				     0, SLAB_PANIC, NULL);
2729 
2730 	for_each_node(nid) {
2731 		preferred_node_policy[nid] = (struct mempolicy) {
2732 			.refcnt = ATOMIC_INIT(1),
2733 			.mode = MPOL_PREFERRED,
2734 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2735 			.nodes = nodemask_of_node(nid),
2736 		};
2737 	}
2738 
2739 	/*
2740 	 * Set interleaving policy for system init. Interleaving is only
2741 	 * enabled across suitably sized nodes (default is >= 16MB), or
2742 	 * fall back to the largest node if they're all smaller.
2743 	 */
2744 	nodes_clear(interleave_nodes);
2745 	for_each_node_state(nid, N_MEMORY) {
2746 		unsigned long total_pages = node_present_pages(nid);
2747 
2748 		/* Preserve the largest node */
2749 		if (largest < total_pages) {
2750 			largest = total_pages;
2751 			prefer = nid;
2752 		}
2753 
2754 		/* Interleave this node? */
2755 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2756 			node_set(nid, interleave_nodes);
2757 	}
2758 
2759 	/* All too small, use the largest */
2760 	if (unlikely(nodes_empty(interleave_nodes)))
2761 		node_set(prefer, interleave_nodes);
2762 
2763 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2764 		pr_err("%s: interleaving failed\n", __func__);
2765 
2766 	check_numabalancing_enable();
2767 }
2768 
2769 /* Reset policy of current process to default */
numa_default_policy(void)2770 void numa_default_policy(void)
2771 {
2772 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2773 }
2774 
2775 /*
2776  * Parse and format mempolicy from/to strings
2777  */
2778 
2779 static const char * const policy_modes[] =
2780 {
2781 	[MPOL_DEFAULT]    = "default",
2782 	[MPOL_PREFERRED]  = "prefer",
2783 	[MPOL_BIND]       = "bind",
2784 	[MPOL_INTERLEAVE] = "interleave",
2785 	[MPOL_LOCAL]      = "local",
2786 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2787 };
2788 
2789 
2790 #ifdef CONFIG_TMPFS
2791 /**
2792  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2793  * @str:  string containing mempolicy to parse
2794  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2795  *
2796  * Format of input:
2797  *	<mode>[=<flags>][:<nodelist>]
2798  *
2799  * On success, returns 0, else 1
2800  */
mpol_parse_str(char * str,struct mempolicy ** mpol)2801 int mpol_parse_str(char *str, struct mempolicy **mpol)
2802 {
2803 	struct mempolicy *new = NULL;
2804 	unsigned short mode_flags;
2805 	nodemask_t nodes;
2806 	char *nodelist = strchr(str, ':');
2807 	char *flags = strchr(str, '=');
2808 	int err = 1, mode;
2809 
2810 	if (flags)
2811 		*flags++ = '\0';	/* terminate mode string */
2812 
2813 	if (nodelist) {
2814 		/* NUL-terminate mode or flags string */
2815 		*nodelist++ = '\0';
2816 		if (nodelist_parse(nodelist, nodes))
2817 			goto out;
2818 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2819 			goto out;
2820 	} else
2821 		nodes_clear(nodes);
2822 
2823 	mode = match_string(policy_modes, MPOL_MAX, str);
2824 	if (mode < 0)
2825 		goto out;
2826 
2827 	switch (mode) {
2828 	case MPOL_PREFERRED:
2829 		/*
2830 		 * Insist on a nodelist of one node only, although later
2831 		 * we use first_node(nodes) to grab a single node, so here
2832 		 * nodelist (or nodes) cannot be empty.
2833 		 */
2834 		if (nodelist) {
2835 			char *rest = nodelist;
2836 			while (isdigit(*rest))
2837 				rest++;
2838 			if (*rest)
2839 				goto out;
2840 			if (nodes_empty(nodes))
2841 				goto out;
2842 		}
2843 		break;
2844 	case MPOL_INTERLEAVE:
2845 		/*
2846 		 * Default to online nodes with memory if no nodelist
2847 		 */
2848 		if (!nodelist)
2849 			nodes = node_states[N_MEMORY];
2850 		break;
2851 	case MPOL_LOCAL:
2852 		/*
2853 		 * Don't allow a nodelist;  mpol_new() checks flags
2854 		 */
2855 		if (nodelist)
2856 			goto out;
2857 		break;
2858 	case MPOL_DEFAULT:
2859 		/*
2860 		 * Insist on a empty nodelist
2861 		 */
2862 		if (!nodelist)
2863 			err = 0;
2864 		goto out;
2865 	case MPOL_PREFERRED_MANY:
2866 	case MPOL_BIND:
2867 		/*
2868 		 * Insist on a nodelist
2869 		 */
2870 		if (!nodelist)
2871 			goto out;
2872 	}
2873 
2874 	mode_flags = 0;
2875 	if (flags) {
2876 		/*
2877 		 * Currently, we only support two mutually exclusive
2878 		 * mode flags.
2879 		 */
2880 		if (!strcmp(flags, "static"))
2881 			mode_flags |= MPOL_F_STATIC_NODES;
2882 		else if (!strcmp(flags, "relative"))
2883 			mode_flags |= MPOL_F_RELATIVE_NODES;
2884 		else
2885 			goto out;
2886 	}
2887 
2888 	new = mpol_new(mode, mode_flags, &nodes);
2889 	if (IS_ERR(new))
2890 		goto out;
2891 
2892 	/*
2893 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2894 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2895 	 */
2896 	if (mode != MPOL_PREFERRED) {
2897 		new->nodes = nodes;
2898 	} else if (nodelist) {
2899 		nodes_clear(new->nodes);
2900 		node_set(first_node(nodes), new->nodes);
2901 	} else {
2902 		new->mode = MPOL_LOCAL;
2903 	}
2904 
2905 	/*
2906 	 * Save nodes for contextualization: this will be used to "clone"
2907 	 * the mempolicy in a specific context [cpuset] at a later time.
2908 	 */
2909 	new->w.user_nodemask = nodes;
2910 
2911 	err = 0;
2912 
2913 out:
2914 	/* Restore string for error message */
2915 	if (nodelist)
2916 		*--nodelist = ':';
2917 	if (flags)
2918 		*--flags = '=';
2919 	if (!err)
2920 		*mpol = new;
2921 	return err;
2922 }
2923 #endif /* CONFIG_TMPFS */
2924 
2925 /**
2926  * mpol_to_str - format a mempolicy structure for printing
2927  * @buffer:  to contain formatted mempolicy string
2928  * @maxlen:  length of @buffer
2929  * @pol:  pointer to mempolicy to be formatted
2930  *
2931  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2932  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2933  * longest flag, "relative", and to display at least a few node ids.
2934  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)2935 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2936 {
2937 	char *p = buffer;
2938 	nodemask_t nodes = NODE_MASK_NONE;
2939 	unsigned short mode = MPOL_DEFAULT;
2940 	unsigned short flags = 0;
2941 
2942 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2943 		mode = pol->mode;
2944 		flags = pol->flags;
2945 	}
2946 
2947 	switch (mode) {
2948 	case MPOL_DEFAULT:
2949 	case MPOL_LOCAL:
2950 		break;
2951 	case MPOL_PREFERRED:
2952 	case MPOL_PREFERRED_MANY:
2953 	case MPOL_BIND:
2954 	case MPOL_INTERLEAVE:
2955 		nodes = pol->nodes;
2956 		break;
2957 	default:
2958 		WARN_ON_ONCE(1);
2959 		snprintf(p, maxlen, "unknown");
2960 		return;
2961 	}
2962 
2963 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2964 
2965 	if (flags & MPOL_MODE_FLAGS) {
2966 		p += snprintf(p, buffer + maxlen - p, "=");
2967 
2968 		/*
2969 		 * Currently, the only defined flags are mutually exclusive
2970 		 */
2971 		if (flags & MPOL_F_STATIC_NODES)
2972 			p += snprintf(p, buffer + maxlen - p, "static");
2973 		else if (flags & MPOL_F_RELATIVE_NODES)
2974 			p += snprintf(p, buffer + maxlen - p, "relative");
2975 	}
2976 
2977 	if (!nodes_empty(nodes))
2978 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2979 			       nodemask_pr_args(&nodes));
2980 }
2981 
2982 bool numa_demotion_enabled = false;
2983 
2984 #ifdef CONFIG_SYSFS
numa_demotion_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2985 static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
2986 					  struct kobj_attribute *attr, char *buf)
2987 {
2988 	return sysfs_emit(buf, "%s\n",
2989 			  numa_demotion_enabled? "true" : "false");
2990 }
2991 
numa_demotion_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)2992 static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
2993 					   struct kobj_attribute *attr,
2994 					   const char *buf, size_t count)
2995 {
2996 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
2997 		numa_demotion_enabled = true;
2998 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
2999 		numa_demotion_enabled = false;
3000 	else
3001 		return -EINVAL;
3002 
3003 	return count;
3004 }
3005 
3006 static struct kobj_attribute numa_demotion_enabled_attr =
3007 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3008 	       numa_demotion_enabled_store);
3009 
3010 static struct attribute *numa_attrs[] = {
3011 	&numa_demotion_enabled_attr.attr,
3012 	NULL,
3013 };
3014 
3015 static const struct attribute_group numa_attr_group = {
3016 	.attrs = numa_attrs,
3017 };
3018 
numa_init_sysfs(void)3019 static int __init numa_init_sysfs(void)
3020 {
3021 	int err;
3022 	struct kobject *numa_kobj;
3023 
3024 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
3025 	if (!numa_kobj) {
3026 		pr_err("failed to create numa kobject\n");
3027 		return -ENOMEM;
3028 	}
3029 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
3030 	if (err) {
3031 		pr_err("failed to register numa group\n");
3032 		goto delete_obj;
3033 	}
3034 	return 0;
3035 
3036 delete_obj:
3037 	kobject_put(numa_kobj);
3038 	return err;
3039 }
3040 subsys_initcall(numa_init_sysfs);
3041 #endif
3042