• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Simple NUMA memory policy for the Linux kernel.
3  *
4  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6  * Subject to the GNU Public License, version 2.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case node -1 here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #include <linux/mempolicy.h>
69 #include <linux/mm.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/gfp.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/module.h>
80 #include <linux/nsproxy.h>
81 #include <linux/interrupt.h>
82 #include <linux/init.h>
83 #include <linux/compat.h>
84 #include <linux/swap.h>
85 #include <linux/seq_file.h>
86 #include <linux/proc_fs.h>
87 #include <linux/migrate.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
95 
96 #include "internal.h"
97 
98 /* Internal flags */
99 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
100 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
101 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
102 
103 static struct kmem_cache *policy_cache;
104 static struct kmem_cache *sn_cache;
105 
106 /* Highest zone. An specific allocation for a zone below that is not
107    policied. */
108 enum zone_type policy_zone = 0;
109 
110 /*
111  * run-time system-wide default policy => local allocation
112  */
113 struct mempolicy default_policy = {
114 	.refcnt = ATOMIC_INIT(1), /* never free it */
115 	.mode = MPOL_PREFERRED,
116 	.flags = MPOL_F_LOCAL,
117 };
118 
119 static const struct mempolicy_operations {
120 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
121 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
122 } mpol_ops[MPOL_MAX];
123 
124 /* Check that the nodemask contains at least one populated zone */
is_valid_nodemask(const nodemask_t * nodemask)125 static int is_valid_nodemask(const nodemask_t *nodemask)
126 {
127 	int nd, k;
128 
129 	/* Check that there is something useful in this mask */
130 	k = policy_zone;
131 
132 	for_each_node_mask(nd, *nodemask) {
133 		struct zone *z;
134 
135 		for (k = 0; k <= policy_zone; k++) {
136 			z = &NODE_DATA(nd)->node_zones[k];
137 			if (z->present_pages > 0)
138 				return 1;
139 		}
140 	}
141 
142 	return 0;
143 }
144 
mpol_store_user_nodemask(const struct mempolicy * pol)145 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
146 {
147 	return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
148 }
149 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)150 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
151 				   const nodemask_t *rel)
152 {
153 	nodemask_t tmp;
154 	nodes_fold(tmp, *orig, nodes_weight(*rel));
155 	nodes_onto(*ret, tmp, *rel);
156 }
157 
mpol_new_interleave(struct mempolicy * pol,const nodemask_t * nodes)158 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
159 {
160 	if (nodes_empty(*nodes))
161 		return -EINVAL;
162 	pol->v.nodes = *nodes;
163 	return 0;
164 }
165 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)166 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
167 {
168 	if (!nodes)
169 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
170 	else if (nodes_empty(*nodes))
171 		return -EINVAL;			/*  no allowed nodes */
172 	else
173 		pol->v.preferred_node = first_node(*nodes);
174 	return 0;
175 }
176 
mpol_new_bind(struct mempolicy * pol,const nodemask_t * nodes)177 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
178 {
179 	if (!is_valid_nodemask(nodes))
180 		return -EINVAL;
181 	pol->v.nodes = *nodes;
182 	return 0;
183 }
184 
185 /* Create a new policy */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)186 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
187 				  nodemask_t *nodes)
188 {
189 	struct mempolicy *policy;
190 	nodemask_t cpuset_context_nmask;
191 	int ret;
192 
193 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
194 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
195 
196 	if (mode == MPOL_DEFAULT) {
197 		if (nodes && !nodes_empty(*nodes))
198 			return ERR_PTR(-EINVAL);
199 		return NULL;	/* simply delete any existing policy */
200 	}
201 	VM_BUG_ON(!nodes);
202 
203 	/*
204 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
205 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
206 	 * All other modes require a valid pointer to a non-empty nodemask.
207 	 */
208 	if (mode == MPOL_PREFERRED) {
209 		if (nodes_empty(*nodes)) {
210 			if (((flags & MPOL_F_STATIC_NODES) ||
211 			     (flags & MPOL_F_RELATIVE_NODES)))
212 				return ERR_PTR(-EINVAL);
213 			nodes = NULL;	/* flag local alloc */
214 		}
215 	} else if (nodes_empty(*nodes))
216 		return ERR_PTR(-EINVAL);
217 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
218 	if (!policy)
219 		return ERR_PTR(-ENOMEM);
220 	atomic_set(&policy->refcnt, 1);
221 	policy->mode = mode;
222 	policy->flags = flags;
223 
224 	if (nodes) {
225 		/*
226 		 * cpuset related setup doesn't apply to local allocation
227 		 */
228 		cpuset_update_task_memory_state();
229 		if (flags & MPOL_F_RELATIVE_NODES)
230 			mpol_relative_nodemask(&cpuset_context_nmask, nodes,
231 					       &cpuset_current_mems_allowed);
232 		else
233 			nodes_and(cpuset_context_nmask, *nodes,
234 				  cpuset_current_mems_allowed);
235 		if (mpol_store_user_nodemask(policy))
236 			policy->w.user_nodemask = *nodes;
237 		else
238 			policy->w.cpuset_mems_allowed =
239 						cpuset_mems_allowed(current);
240 	}
241 
242 	ret = mpol_ops[mode].create(policy,
243 				nodes ? &cpuset_context_nmask : NULL);
244 	if (ret < 0) {
245 		kmem_cache_free(policy_cache, policy);
246 		return ERR_PTR(ret);
247 	}
248 	return policy;
249 }
250 
251 /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)252 void __mpol_put(struct mempolicy *p)
253 {
254 	if (!atomic_dec_and_test(&p->refcnt))
255 		return;
256 	kmem_cache_free(policy_cache, p);
257 }
258 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)259 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
260 {
261 }
262 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)263 static void mpol_rebind_nodemask(struct mempolicy *pol,
264 				 const nodemask_t *nodes)
265 {
266 	nodemask_t tmp;
267 
268 	if (pol->flags & MPOL_F_STATIC_NODES)
269 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
270 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
271 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
272 	else {
273 		nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
274 			    *nodes);
275 		pol->w.cpuset_mems_allowed = *nodes;
276 	}
277 
278 	pol->v.nodes = tmp;
279 	if (!node_isset(current->il_next, tmp)) {
280 		current->il_next = next_node(current->il_next, tmp);
281 		if (current->il_next >= MAX_NUMNODES)
282 			current->il_next = first_node(tmp);
283 		if (current->il_next >= MAX_NUMNODES)
284 			current->il_next = numa_node_id();
285 	}
286 }
287 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)288 static void mpol_rebind_preferred(struct mempolicy *pol,
289 				  const nodemask_t *nodes)
290 {
291 	nodemask_t tmp;
292 
293 	if (pol->flags & MPOL_F_STATIC_NODES) {
294 		int node = first_node(pol->w.user_nodemask);
295 
296 		if (node_isset(node, *nodes)) {
297 			pol->v.preferred_node = node;
298 			pol->flags &= ~MPOL_F_LOCAL;
299 		} else
300 			pol->flags |= MPOL_F_LOCAL;
301 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
302 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
303 		pol->v.preferred_node = first_node(tmp);
304 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
305 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
306 						   pol->w.cpuset_mems_allowed,
307 						   *nodes);
308 		pol->w.cpuset_mems_allowed = *nodes;
309 	}
310 }
311 
312 /* Migrate a policy to a different set of nodes */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)313 static void mpol_rebind_policy(struct mempolicy *pol,
314 			       const nodemask_t *newmask)
315 {
316 	if (!pol)
317 		return;
318 	if (!mpol_store_user_nodemask(pol) &&
319 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
320 		return;
321 	mpol_ops[pol->mode].rebind(pol, newmask);
322 }
323 
324 /*
325  * Wrapper for mpol_rebind_policy() that just requires task
326  * pointer, and updates task mempolicy.
327  */
328 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)329 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
330 {
331 	mpol_rebind_policy(tsk->mempolicy, new);
332 }
333 
334 /*
335  * Rebind each vma in mm to new nodemask.
336  *
337  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
338  */
339 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)340 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
341 {
342 	struct vm_area_struct *vma;
343 
344 	down_write(&mm->mmap_sem);
345 	for (vma = mm->mmap; vma; vma = vma->vm_next)
346 		mpol_rebind_policy(vma->vm_policy, new);
347 	up_write(&mm->mmap_sem);
348 }
349 
350 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
351 	[MPOL_DEFAULT] = {
352 		.rebind = mpol_rebind_default,
353 	},
354 	[MPOL_INTERLEAVE] = {
355 		.create = mpol_new_interleave,
356 		.rebind = mpol_rebind_nodemask,
357 	},
358 	[MPOL_PREFERRED] = {
359 		.create = mpol_new_preferred,
360 		.rebind = mpol_rebind_preferred,
361 	},
362 	[MPOL_BIND] = {
363 		.create = mpol_new_bind,
364 		.rebind = mpol_rebind_nodemask,
365 	},
366 };
367 
368 static void gather_stats(struct page *, void *, int pte_dirty);
369 static void migrate_page_add(struct page *page, struct list_head *pagelist,
370 				unsigned long flags);
371 
372 /* Scan through pages checking if pages follow certain conditions. */
check_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)373 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
374 		unsigned long addr, unsigned long end,
375 		const nodemask_t *nodes, unsigned long flags,
376 		void *private)
377 {
378 	pte_t *orig_pte;
379 	pte_t *pte;
380 	spinlock_t *ptl;
381 
382 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
383 	do {
384 		struct page *page;
385 		int nid;
386 
387 		if (!pte_present(*pte))
388 			continue;
389 		page = vm_normal_page(vma, addr, *pte);
390 		if (!page)
391 			continue;
392 		/*
393 		 * The check for PageReserved here is important to avoid
394 		 * handling zero pages and other pages that may have been
395 		 * marked special by the system.
396 		 *
397 		 * If the PageReserved would not be checked here then f.e.
398 		 * the location of the zero page could have an influence
399 		 * on MPOL_MF_STRICT, zero pages would be counted for
400 		 * the per node stats, and there would be useless attempts
401 		 * to put zero pages on the migration list.
402 		 */
403 		if (PageReserved(page))
404 			continue;
405 		nid = page_to_nid(page);
406 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
407 			continue;
408 
409 		if (flags & MPOL_MF_STATS)
410 			gather_stats(page, private, pte_dirty(*pte));
411 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
412 			migrate_page_add(page, private, flags);
413 		else
414 			break;
415 	} while (pte++, addr += PAGE_SIZE, addr != end);
416 	pte_unmap_unlock(orig_pte, ptl);
417 	return addr != end;
418 }
419 
check_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)420 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
421 		unsigned long addr, unsigned long end,
422 		const nodemask_t *nodes, unsigned long flags,
423 		void *private)
424 {
425 	pmd_t *pmd;
426 	unsigned long next;
427 
428 	pmd = pmd_offset(pud, addr);
429 	do {
430 		next = pmd_addr_end(addr, end);
431 		if (pmd_none_or_clear_bad(pmd))
432 			continue;
433 		if (check_pte_range(vma, pmd, addr, next, nodes,
434 				    flags, private))
435 			return -EIO;
436 	} while (pmd++, addr = next, addr != end);
437 	return 0;
438 }
439 
check_pud_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)440 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
441 		unsigned long addr, unsigned long end,
442 		const nodemask_t *nodes, unsigned long flags,
443 		void *private)
444 {
445 	pud_t *pud;
446 	unsigned long next;
447 
448 	pud = pud_offset(pgd, addr);
449 	do {
450 		next = pud_addr_end(addr, end);
451 		if (pud_none_or_clear_bad(pud))
452 			continue;
453 		if (check_pmd_range(vma, pud, addr, next, nodes,
454 				    flags, private))
455 			return -EIO;
456 	} while (pud++, addr = next, addr != end);
457 	return 0;
458 }
459 
check_pgd_range(struct vm_area_struct * vma,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)460 static inline int check_pgd_range(struct vm_area_struct *vma,
461 		unsigned long addr, unsigned long end,
462 		const nodemask_t *nodes, unsigned long flags,
463 		void *private)
464 {
465 	pgd_t *pgd;
466 	unsigned long next;
467 
468 	pgd = pgd_offset(vma->vm_mm, addr);
469 	do {
470 		next = pgd_addr_end(addr, end);
471 		if (pgd_none_or_clear_bad(pgd))
472 			continue;
473 		if (check_pud_range(vma, pgd, addr, next, nodes,
474 				    flags, private))
475 			return -EIO;
476 	} while (pgd++, addr = next, addr != end);
477 	return 0;
478 }
479 
480 /*
481  * Check if all pages in a range are on a set of nodes.
482  * If pagelist != NULL then isolate pages from the LRU and
483  * put them on the pagelist.
484  */
485 static struct vm_area_struct *
check_range(struct mm_struct * mm,unsigned long start,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)486 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
487 		const nodemask_t *nodes, unsigned long flags, void *private)
488 {
489 	int err;
490 	struct vm_area_struct *first, *vma, *prev;
491 
492 
493 	first = find_vma(mm, start);
494 	if (!first)
495 		return ERR_PTR(-EFAULT);
496 	prev = NULL;
497 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
498 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
499 			if (!vma->vm_next && vma->vm_end < end)
500 				return ERR_PTR(-EFAULT);
501 			if (prev && prev->vm_end < vma->vm_start)
502 				return ERR_PTR(-EFAULT);
503 		}
504 		if (!is_vm_hugetlb_page(vma) &&
505 		    ((flags & MPOL_MF_STRICT) ||
506 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
507 				vma_migratable(vma)))) {
508 			unsigned long endvma = vma->vm_end;
509 
510 			if (endvma > end)
511 				endvma = end;
512 			if (vma->vm_start > start)
513 				start = vma->vm_start;
514 			err = check_pgd_range(vma, start, endvma, nodes,
515 						flags, private);
516 			if (err) {
517 				first = ERR_PTR(err);
518 				break;
519 			}
520 		}
521 		prev = vma;
522 	}
523 	return first;
524 }
525 
526 /* Apply policy to a single VMA */
policy_vma(struct vm_area_struct * vma,struct mempolicy * new)527 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
528 {
529 	int err = 0;
530 	struct mempolicy *old = vma->vm_policy;
531 
532 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
533 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
534 		 vma->vm_ops, vma->vm_file,
535 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
536 
537 	if (vma->vm_ops && vma->vm_ops->set_policy)
538 		err = vma->vm_ops->set_policy(vma, new);
539 	if (!err) {
540 		mpol_get(new);
541 		vma->vm_policy = new;
542 		mpol_put(old);
543 	}
544 	return err;
545 }
546 
547 /* Step 2: apply policy to a range and do splits. */
mbind_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct mempolicy * new)548 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
549 		       unsigned long end, struct mempolicy *new)
550 {
551 	struct vm_area_struct *next;
552 	int err;
553 
554 	err = 0;
555 	for (; vma && vma->vm_start < end; vma = next) {
556 		next = vma->vm_next;
557 		if (vma->vm_start < start)
558 			err = split_vma(vma->vm_mm, vma, start, 1);
559 		if (!err && vma->vm_end > end)
560 			err = split_vma(vma->vm_mm, vma, end, 0);
561 		if (!err)
562 			err = policy_vma(vma, new);
563 		if (err)
564 			break;
565 	}
566 	return err;
567 }
568 
569 /*
570  * Update task->flags PF_MEMPOLICY bit: set iff non-default
571  * mempolicy.  Allows more rapid checking of this (combined perhaps
572  * with other PF_* flag bits) on memory allocation hot code paths.
573  *
574  * If called from outside this file, the task 'p' should -only- be
575  * a newly forked child not yet visible on the task list, because
576  * manipulating the task flags of a visible task is not safe.
577  *
578  * The above limitation is why this routine has the funny name
579  * mpol_fix_fork_child_flag().
580  *
581  * It is also safe to call this with a task pointer of current,
582  * which the static wrapper mpol_set_task_struct_flag() does,
583  * for use within this file.
584  */
585 
mpol_fix_fork_child_flag(struct task_struct * p)586 void mpol_fix_fork_child_flag(struct task_struct *p)
587 {
588 	if (p->mempolicy)
589 		p->flags |= PF_MEMPOLICY;
590 	else
591 		p->flags &= ~PF_MEMPOLICY;
592 }
593 
mpol_set_task_struct_flag(void)594 static void mpol_set_task_struct_flag(void)
595 {
596 	mpol_fix_fork_child_flag(current);
597 }
598 
599 /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)600 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
601 			     nodemask_t *nodes)
602 {
603 	struct mempolicy *new;
604 	struct mm_struct *mm = current->mm;
605 
606 	new = mpol_new(mode, flags, nodes);
607 	if (IS_ERR(new))
608 		return PTR_ERR(new);
609 
610 	/*
611 	 * prevent changing our mempolicy while show_numa_maps()
612 	 * is using it.
613 	 * Note:  do_set_mempolicy() can be called at init time
614 	 * with no 'mm'.
615 	 */
616 	if (mm)
617 		down_write(&mm->mmap_sem);
618 	mpol_put(current->mempolicy);
619 	current->mempolicy = new;
620 	mpol_set_task_struct_flag();
621 	if (new && new->mode == MPOL_INTERLEAVE &&
622 	    nodes_weight(new->v.nodes))
623 		current->il_next = first_node(new->v.nodes);
624 	if (mm)
625 		up_write(&mm->mmap_sem);
626 
627 	return 0;
628 }
629 
630 /*
631  * Return nodemask for policy for get_mempolicy() query
632  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)633 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
634 {
635 	nodes_clear(*nodes);
636 	if (p == &default_policy)
637 		return;
638 
639 	switch (p->mode) {
640 	case MPOL_BIND:
641 		/* Fall through */
642 	case MPOL_INTERLEAVE:
643 		*nodes = p->v.nodes;
644 		break;
645 	case MPOL_PREFERRED:
646 		if (!(p->flags & MPOL_F_LOCAL))
647 			node_set(p->v.preferred_node, *nodes);
648 		/* else return empty node mask for local allocation */
649 		break;
650 	default:
651 		BUG();
652 	}
653 }
654 
lookup_node(struct mm_struct * mm,unsigned long addr)655 static int lookup_node(struct mm_struct *mm, unsigned long addr)
656 {
657 	struct page *p;
658 	int err;
659 
660 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
661 	if (err >= 0) {
662 		err = page_to_nid(p);
663 		put_page(p);
664 	}
665 	return err;
666 }
667 
668 /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)669 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
670 			     unsigned long addr, unsigned long flags)
671 {
672 	int err;
673 	struct mm_struct *mm = current->mm;
674 	struct vm_area_struct *vma = NULL;
675 	struct mempolicy *pol = current->mempolicy;
676 
677 	cpuset_update_task_memory_state();
678 	if (flags &
679 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
680 		return -EINVAL;
681 
682 	if (flags & MPOL_F_MEMS_ALLOWED) {
683 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
684 			return -EINVAL;
685 		*policy = 0;	/* just so it's initialized */
686 		*nmask  = cpuset_current_mems_allowed;
687 		return 0;
688 	}
689 
690 	if (flags & MPOL_F_ADDR) {
691 		/*
692 		 * Do NOT fall back to task policy if the
693 		 * vma/shared policy at addr is NULL.  We
694 		 * want to return MPOL_DEFAULT in this case.
695 		 */
696 		down_read(&mm->mmap_sem);
697 		vma = find_vma_intersection(mm, addr, addr+1);
698 		if (!vma) {
699 			up_read(&mm->mmap_sem);
700 			return -EFAULT;
701 		}
702 		if (vma->vm_ops && vma->vm_ops->get_policy)
703 			pol = vma->vm_ops->get_policy(vma, addr);
704 		else
705 			pol = vma->vm_policy;
706 	} else if (addr)
707 		return -EINVAL;
708 
709 	if (!pol)
710 		pol = &default_policy;	/* indicates default behavior */
711 
712 	if (flags & MPOL_F_NODE) {
713 		if (flags & MPOL_F_ADDR) {
714 			err = lookup_node(mm, addr);
715 			if (err < 0)
716 				goto out;
717 			*policy = err;
718 		} else if (pol == current->mempolicy &&
719 				pol->mode == MPOL_INTERLEAVE) {
720 			*policy = current->il_next;
721 		} else {
722 			err = -EINVAL;
723 			goto out;
724 		}
725 	} else {
726 		*policy = pol == &default_policy ? MPOL_DEFAULT :
727 						pol->mode;
728 		/*
729 		 * Internal mempolicy flags must be masked off before exposing
730 		 * the policy to userspace.
731 		 */
732 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
733 	}
734 
735 	if (vma) {
736 		up_read(&current->mm->mmap_sem);
737 		vma = NULL;
738 	}
739 
740 	err = 0;
741 	if (nmask)
742 		get_policy_nodemask(pol, nmask);
743 
744  out:
745 	mpol_cond_put(pol);
746 	if (vma)
747 		up_read(&current->mm->mmap_sem);
748 	return err;
749 }
750 
751 #ifdef CONFIG_MIGRATION
752 /*
753  * page migration
754  */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)755 static void migrate_page_add(struct page *page, struct list_head *pagelist,
756 				unsigned long flags)
757 {
758 	/*
759 	 * Avoid migrating a page that is shared with others.
760 	 */
761 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
762 		if (!isolate_lru_page(page)) {
763 			list_add_tail(&page->lru, pagelist);
764 		}
765 	}
766 }
767 
new_node_page(struct page * page,unsigned long node,int ** x)768 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
769 {
770 	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
771 }
772 
773 /*
774  * Migrate pages from one node to a target node.
775  * Returns error or the number of pages not migrated.
776  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)777 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
778 			   int flags)
779 {
780 	nodemask_t nmask;
781 	LIST_HEAD(pagelist);
782 	int err = 0;
783 
784 	nodes_clear(nmask);
785 	node_set(source, nmask);
786 
787 	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
788 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
789 
790 	if (!list_empty(&pagelist))
791 		err = migrate_pages(&pagelist, new_node_page, dest);
792 
793 	return err;
794 }
795 
796 /*
797  * Move pages between the two nodesets so as to preserve the physical
798  * layout as much as possible.
799  *
800  * Returns the number of page that could not be moved.
801  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from_nodes,const nodemask_t * to_nodes,int flags)802 int do_migrate_pages(struct mm_struct *mm,
803 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
804 {
805 	int busy = 0;
806 	int err;
807 	nodemask_t tmp;
808 
809 	err = migrate_prep();
810 	if (err)
811 		return err;
812 
813 	down_read(&mm->mmap_sem);
814 
815 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
816 	if (err)
817 		goto out;
818 
819 /*
820  * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
821  * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
822  * bit in 'tmp', and return that <source, dest> pair for migration.
823  * The pair of nodemasks 'to' and 'from' define the map.
824  *
825  * If no pair of bits is found that way, fallback to picking some
826  * pair of 'source' and 'dest' bits that are not the same.  If the
827  * 'source' and 'dest' bits are the same, this represents a node
828  * that will be migrating to itself, so no pages need move.
829  *
830  * If no bits are left in 'tmp', or if all remaining bits left
831  * in 'tmp' correspond to the same bit in 'to', return false
832  * (nothing left to migrate).
833  *
834  * This lets us pick a pair of nodes to migrate between, such that
835  * if possible the dest node is not already occupied by some other
836  * source node, minimizing the risk of overloading the memory on a
837  * node that would happen if we migrated incoming memory to a node
838  * before migrating outgoing memory source that same node.
839  *
840  * A single scan of tmp is sufficient.  As we go, we remember the
841  * most recent <s, d> pair that moved (s != d).  If we find a pair
842  * that not only moved, but what's better, moved to an empty slot
843  * (d is not set in tmp), then we break out then, with that pair.
844  * Otherwise when we finish scannng from_tmp, we at least have the
845  * most recent <s, d> pair that moved.  If we get all the way through
846  * the scan of tmp without finding any node that moved, much less
847  * moved to an empty node, then there is nothing left worth migrating.
848  */
849 
850 	tmp = *from_nodes;
851 	while (!nodes_empty(tmp)) {
852 		int s,d;
853 		int source = -1;
854 		int dest = 0;
855 
856 		for_each_node_mask(s, tmp) {
857 			d = node_remap(s, *from_nodes, *to_nodes);
858 			if (s == d)
859 				continue;
860 
861 			source = s;	/* Node moved. Memorize */
862 			dest = d;
863 
864 			/* dest not in remaining from nodes? */
865 			if (!node_isset(dest, tmp))
866 				break;
867 		}
868 		if (source == -1)
869 			break;
870 
871 		node_clear(source, tmp);
872 		err = migrate_to_node(mm, source, dest, flags);
873 		if (err > 0)
874 			busy += err;
875 		if (err < 0)
876 			break;
877 	}
878 out:
879 	up_read(&mm->mmap_sem);
880 	if (err < 0)
881 		return err;
882 	return busy;
883 
884 }
885 
886 /*
887  * Allocate a new page for page migration based on vma policy.
888  * Start assuming that page is mapped by vma pointed to by @private.
889  * Search forward from there, if not.  N.B., this assumes that the
890  * list of pages handed to migrate_pages()--which is how we get here--
891  * is in virtual address order.
892  */
new_vma_page(struct page * page,unsigned long private,int ** x)893 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
894 {
895 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
896 	unsigned long uninitialized_var(address);
897 
898 	while (vma) {
899 		address = page_address_in_vma(page, vma);
900 		if (address != -EFAULT)
901 			break;
902 		vma = vma->vm_next;
903 	}
904 
905 	/*
906 	 * if !vma, alloc_page_vma() will use task or system default policy
907 	 */
908 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
909 }
910 #else
911 
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)912 static void migrate_page_add(struct page *page, struct list_head *pagelist,
913 				unsigned long flags)
914 {
915 }
916 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from_nodes,const nodemask_t * to_nodes,int flags)917 int do_migrate_pages(struct mm_struct *mm,
918 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
919 {
920 	return -ENOSYS;
921 }
922 
new_vma_page(struct page * page,unsigned long private,int ** x)923 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
924 {
925 	return NULL;
926 }
927 #endif
928 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)929 static long do_mbind(unsigned long start, unsigned long len,
930 		     unsigned short mode, unsigned short mode_flags,
931 		     nodemask_t *nmask, unsigned long flags)
932 {
933 	struct vm_area_struct *vma;
934 	struct mm_struct *mm = current->mm;
935 	struct mempolicy *new;
936 	unsigned long end;
937 	int err;
938 	LIST_HEAD(pagelist);
939 
940 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
941 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
942 		return -EINVAL;
943 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
944 		return -EPERM;
945 
946 	if (start & ~PAGE_MASK)
947 		return -EINVAL;
948 
949 	if (mode == MPOL_DEFAULT)
950 		flags &= ~MPOL_MF_STRICT;
951 
952 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
953 	end = start + len;
954 
955 	if (end < start)
956 		return -EINVAL;
957 	if (end == start)
958 		return 0;
959 
960 	new = mpol_new(mode, mode_flags, nmask);
961 	if (IS_ERR(new))
962 		return PTR_ERR(new);
963 
964 	/*
965 	 * If we are using the default policy then operation
966 	 * on discontinuous address spaces is okay after all
967 	 */
968 	if (!new)
969 		flags |= MPOL_MF_DISCONTIG_OK;
970 
971 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
972 		 start, start + len, mode, mode_flags,
973 		 nmask ? nodes_addr(*nmask)[0] : -1);
974 
975 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
976 
977 		err = migrate_prep();
978 		if (err)
979 			return err;
980 	}
981 	down_write(&mm->mmap_sem);
982 	vma = check_range(mm, start, end, nmask,
983 			  flags | MPOL_MF_INVERT, &pagelist);
984 
985 	err = PTR_ERR(vma);
986 	if (!IS_ERR(vma)) {
987 		int nr_failed = 0;
988 
989 		err = mbind_range(vma, start, end, new);
990 
991 		if (!list_empty(&pagelist))
992 			nr_failed = migrate_pages(&pagelist, new_vma_page,
993 						(unsigned long)vma);
994 
995 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
996 			err = -EIO;
997 	}
998 
999 	up_write(&mm->mmap_sem);
1000 	mpol_put(new);
1001 	return err;
1002 }
1003 
1004 /*
1005  * User space interface with variable sized bitmaps for nodelists.
1006  */
1007 
1008 /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1009 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1010 		     unsigned long maxnode)
1011 {
1012 	unsigned long k;
1013 	unsigned long nlongs;
1014 	unsigned long endmask;
1015 
1016 	--maxnode;
1017 	nodes_clear(*nodes);
1018 	if (maxnode == 0 || !nmask)
1019 		return 0;
1020 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1021 		return -EINVAL;
1022 
1023 	nlongs = BITS_TO_LONGS(maxnode);
1024 	if ((maxnode % BITS_PER_LONG) == 0)
1025 		endmask = ~0UL;
1026 	else
1027 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1028 
1029 	/* When the user specified more nodes than supported just check
1030 	   if the non supported part is all zero. */
1031 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1032 		if (nlongs > PAGE_SIZE/sizeof(long))
1033 			return -EINVAL;
1034 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1035 			unsigned long t;
1036 			if (get_user(t, nmask + k))
1037 				return -EFAULT;
1038 			if (k == nlongs - 1) {
1039 				if (t & endmask)
1040 					return -EINVAL;
1041 			} else if (t)
1042 				return -EINVAL;
1043 		}
1044 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1045 		endmask = ~0UL;
1046 	}
1047 
1048 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1049 		return -EFAULT;
1050 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1051 	return 0;
1052 }
1053 
1054 /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1055 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1056 			      nodemask_t *nodes)
1057 {
1058 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1059 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1060 
1061 	if (copy > nbytes) {
1062 		if (copy > PAGE_SIZE)
1063 			return -EINVAL;
1064 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1065 			return -EFAULT;
1066 		copy = nbytes;
1067 	}
1068 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1069 }
1070 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,unsigned long __user *,nmask,unsigned long,maxnode,unsigned,flags)1071 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1072 		unsigned long, mode, unsigned long __user *, nmask,
1073 		unsigned long, maxnode, unsigned, flags)
1074 {
1075 	nodemask_t nodes;
1076 	int err;
1077 	unsigned short mode_flags;
1078 
1079 	mode_flags = mode & MPOL_MODE_FLAGS;
1080 	mode &= ~MPOL_MODE_FLAGS;
1081 	if (mode >= MPOL_MAX)
1082 		return -EINVAL;
1083 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1084 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1085 		return -EINVAL;
1086 	err = get_nodes(&nodes, nmask, maxnode);
1087 	if (err)
1088 		return err;
1089 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1090 }
1091 
1092 /* Set the process memory policy */
SYSCALL_DEFINE3(set_mempolicy,int,mode,unsigned long __user *,nmask,unsigned long,maxnode)1093 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1094 		unsigned long, maxnode)
1095 {
1096 	int err;
1097 	nodemask_t nodes;
1098 	unsigned short flags;
1099 
1100 	flags = mode & MPOL_MODE_FLAGS;
1101 	mode &= ~MPOL_MODE_FLAGS;
1102 	if ((unsigned int)mode >= MPOL_MAX)
1103 		return -EINVAL;
1104 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1105 		return -EINVAL;
1106 	err = get_nodes(&nodes, nmask, maxnode);
1107 	if (err)
1108 		return err;
1109 	return do_set_mempolicy(mode, flags, &nodes);
1110 }
1111 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1112 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1113 		const unsigned long __user *, old_nodes,
1114 		const unsigned long __user *, new_nodes)
1115 {
1116 	const struct cred *cred = current_cred(), *tcred;
1117 	struct mm_struct *mm;
1118 	struct task_struct *task;
1119 	nodemask_t old;
1120 	nodemask_t new;
1121 	nodemask_t task_nodes;
1122 	int err;
1123 
1124 	err = get_nodes(&old, old_nodes, maxnode);
1125 	if (err)
1126 		return err;
1127 
1128 	err = get_nodes(&new, new_nodes, maxnode);
1129 	if (err)
1130 		return err;
1131 
1132 	/* Find the mm_struct */
1133 	read_lock(&tasklist_lock);
1134 	task = pid ? find_task_by_vpid(pid) : current;
1135 	if (!task) {
1136 		read_unlock(&tasklist_lock);
1137 		return -ESRCH;
1138 	}
1139 	mm = get_task_mm(task);
1140 	read_unlock(&tasklist_lock);
1141 
1142 	if (!mm)
1143 		return -EINVAL;
1144 
1145 	/*
1146 	 * Check if this process has the right to modify the specified
1147 	 * process. The right exists if the process has administrative
1148 	 * capabilities, superuser privileges or the same
1149 	 * userid as the target process.
1150 	 */
1151 	rcu_read_lock();
1152 	tcred = __task_cred(task);
1153 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1154 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1155 	    !capable(CAP_SYS_NICE)) {
1156 		rcu_read_unlock();
1157 		err = -EPERM;
1158 		goto out;
1159 	}
1160 	rcu_read_unlock();
1161 
1162 	task_nodes = cpuset_mems_allowed(task);
1163 	/* Is the user allowed to access the target nodes? */
1164 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1165 		err = -EPERM;
1166 		goto out;
1167 	}
1168 
1169 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1170 		err = -EINVAL;
1171 		goto out;
1172 	}
1173 
1174 	err = security_task_movememory(task);
1175 	if (err)
1176 		goto out;
1177 
1178 	err = do_migrate_pages(mm, &old, &new,
1179 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1180 out:
1181 	mmput(mm);
1182 	return err;
1183 }
1184 
1185 
1186 /* Retrieve NUMA policy */
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1187 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1188 		unsigned long __user *, nmask, unsigned long, maxnode,
1189 		unsigned long, addr, unsigned long, flags)
1190 {
1191 	int err;
1192 	int uninitialized_var(pval);
1193 	nodemask_t nodes;
1194 
1195 	if (nmask != NULL && maxnode < MAX_NUMNODES)
1196 		return -EINVAL;
1197 
1198 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1199 
1200 	if (err)
1201 		return err;
1202 
1203 	if (policy && put_user(pval, policy))
1204 		return -EFAULT;
1205 
1206 	if (nmask)
1207 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1208 
1209 	return err;
1210 }
1211 
1212 #ifdef CONFIG_COMPAT
1213 
compat_sys_get_mempolicy(int __user * policy,compat_ulong_t __user * nmask,compat_ulong_t maxnode,compat_ulong_t addr,compat_ulong_t flags)1214 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1215 				     compat_ulong_t __user *nmask,
1216 				     compat_ulong_t maxnode,
1217 				     compat_ulong_t addr, compat_ulong_t flags)
1218 {
1219 	long err;
1220 	unsigned long __user *nm = NULL;
1221 	unsigned long nr_bits, alloc_size;
1222 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1223 
1224 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1225 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1226 
1227 	if (nmask)
1228 		nm = compat_alloc_user_space(alloc_size);
1229 
1230 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1231 
1232 	if (!err && nmask) {
1233 		err = copy_from_user(bm, nm, alloc_size);
1234 		/* ensure entire bitmap is zeroed */
1235 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1236 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1237 	}
1238 
1239 	return err;
1240 }
1241 
compat_sys_set_mempolicy(int mode,compat_ulong_t __user * nmask,compat_ulong_t maxnode)1242 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1243 				     compat_ulong_t maxnode)
1244 {
1245 	long err = 0;
1246 	unsigned long __user *nm = NULL;
1247 	unsigned long nr_bits, alloc_size;
1248 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1249 
1250 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1251 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1252 
1253 	if (nmask) {
1254 		err = compat_get_bitmap(bm, nmask, nr_bits);
1255 		nm = compat_alloc_user_space(alloc_size);
1256 		err |= copy_to_user(nm, bm, alloc_size);
1257 	}
1258 
1259 	if (err)
1260 		return -EFAULT;
1261 
1262 	return sys_set_mempolicy(mode, nm, nr_bits+1);
1263 }
1264 
compat_sys_mbind(compat_ulong_t start,compat_ulong_t len,compat_ulong_t mode,compat_ulong_t __user * nmask,compat_ulong_t maxnode,compat_ulong_t flags)1265 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1266 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
1267 			     compat_ulong_t maxnode, compat_ulong_t flags)
1268 {
1269 	long err = 0;
1270 	unsigned long __user *nm = NULL;
1271 	unsigned long nr_bits, alloc_size;
1272 	nodemask_t bm;
1273 
1274 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1275 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1276 
1277 	if (nmask) {
1278 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1279 		nm = compat_alloc_user_space(alloc_size);
1280 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1281 	}
1282 
1283 	if (err)
1284 		return -EFAULT;
1285 
1286 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1287 }
1288 
1289 #endif
1290 
1291 /*
1292  * get_vma_policy(@task, @vma, @addr)
1293  * @task - task for fallback if vma policy == default
1294  * @vma   - virtual memory area whose policy is sought
1295  * @addr  - address in @vma for shared policy lookup
1296  *
1297  * Returns effective policy for a VMA at specified address.
1298  * Falls back to @task or system default policy, as necessary.
1299  * Current or other task's task mempolicy and non-shared vma policies
1300  * are protected by the task's mmap_sem, which must be held for read by
1301  * the caller.
1302  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1303  * count--added by the get_policy() vm_op, as appropriate--to protect against
1304  * freeing by another task.  It is the caller's responsibility to free the
1305  * extra reference for shared policies.
1306  */
get_vma_policy(struct task_struct * task,struct vm_area_struct * vma,unsigned long addr)1307 static struct mempolicy *get_vma_policy(struct task_struct *task,
1308 		struct vm_area_struct *vma, unsigned long addr)
1309 {
1310 	struct mempolicy *pol = task->mempolicy;
1311 
1312 	if (vma) {
1313 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1314 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1315 									addr);
1316 			if (vpol)
1317 				pol = vpol;
1318 		} else if (vma->vm_policy)
1319 			pol = vma->vm_policy;
1320 	}
1321 	if (!pol)
1322 		pol = &default_policy;
1323 	return pol;
1324 }
1325 
1326 /*
1327  * Return a nodemask representing a mempolicy for filtering nodes for
1328  * page allocation
1329  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1330 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1331 {
1332 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1333 	if (unlikely(policy->mode == MPOL_BIND) &&
1334 			gfp_zone(gfp) >= policy_zone &&
1335 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1336 		return &policy->v.nodes;
1337 
1338 	return NULL;
1339 }
1340 
1341 /* Return a zonelist indicated by gfp for node representing a mempolicy */
policy_zonelist(gfp_t gfp,struct mempolicy * policy)1342 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1343 {
1344 	int nd = numa_node_id();
1345 
1346 	switch (policy->mode) {
1347 	case MPOL_PREFERRED:
1348 		if (!(policy->flags & MPOL_F_LOCAL))
1349 			nd = policy->v.preferred_node;
1350 		break;
1351 	case MPOL_BIND:
1352 		/*
1353 		 * Normally, MPOL_BIND allocations are node-local within the
1354 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1355 		 * current node is part of the mask, we use the zonelist for
1356 		 * the first node in the mask instead.
1357 		 */
1358 		if (unlikely(gfp & __GFP_THISNODE) &&
1359 				unlikely(!node_isset(nd, policy->v.nodes)))
1360 			nd = first_node(policy->v.nodes);
1361 		break;
1362 	case MPOL_INTERLEAVE: /* should not happen */
1363 		break;
1364 	default:
1365 		BUG();
1366 	}
1367 	return node_zonelist(nd, gfp);
1368 }
1369 
1370 /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1371 static unsigned interleave_nodes(struct mempolicy *policy)
1372 {
1373 	unsigned nid, next;
1374 	struct task_struct *me = current;
1375 
1376 	nid = me->il_next;
1377 	next = next_node(nid, policy->v.nodes);
1378 	if (next >= MAX_NUMNODES)
1379 		next = first_node(policy->v.nodes);
1380 	if (next < MAX_NUMNODES)
1381 		me->il_next = next;
1382 	return nid;
1383 }
1384 
1385 /*
1386  * Depending on the memory policy provide a node from which to allocate the
1387  * next slab entry.
1388  * @policy must be protected by freeing by the caller.  If @policy is
1389  * the current task's mempolicy, this protection is implicit, as only the
1390  * task can change it's policy.  The system default policy requires no
1391  * such protection.
1392  */
slab_node(struct mempolicy * policy)1393 unsigned slab_node(struct mempolicy *policy)
1394 {
1395 	if (!policy || policy->flags & MPOL_F_LOCAL)
1396 		return numa_node_id();
1397 
1398 	switch (policy->mode) {
1399 	case MPOL_PREFERRED:
1400 		/*
1401 		 * handled MPOL_F_LOCAL above
1402 		 */
1403 		return policy->v.preferred_node;
1404 
1405 	case MPOL_INTERLEAVE:
1406 		return interleave_nodes(policy);
1407 
1408 	case MPOL_BIND: {
1409 		/*
1410 		 * Follow bind policy behavior and start allocation at the
1411 		 * first node.
1412 		 */
1413 		struct zonelist *zonelist;
1414 		struct zone *zone;
1415 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1416 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1417 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1418 							&policy->v.nodes,
1419 							&zone);
1420 		return zone->node;
1421 	}
1422 
1423 	default:
1424 		BUG();
1425 	}
1426 }
1427 
1428 /* Do static interleaving for a VMA with known offset. */
offset_il_node(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long off)1429 static unsigned offset_il_node(struct mempolicy *pol,
1430 		struct vm_area_struct *vma, unsigned long off)
1431 {
1432 	unsigned nnodes = nodes_weight(pol->v.nodes);
1433 	unsigned target;
1434 	int c;
1435 	int nid = -1;
1436 
1437 	if (!nnodes)
1438 		return numa_node_id();
1439 	target = (unsigned int)off % nnodes;
1440 	c = 0;
1441 	do {
1442 		nid = next_node(nid, pol->v.nodes);
1443 		c++;
1444 	} while (c <= target);
1445 	return nid;
1446 }
1447 
1448 /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1449 static inline unsigned interleave_nid(struct mempolicy *pol,
1450 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1451 {
1452 	if (vma) {
1453 		unsigned long off;
1454 
1455 		/*
1456 		 * for small pages, there is no difference between
1457 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1458 		 * for huge pages, since vm_pgoff is in units of small
1459 		 * pages, we need to shift off the always 0 bits to get
1460 		 * a useful offset.
1461 		 */
1462 		BUG_ON(shift < PAGE_SHIFT);
1463 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1464 		off += (addr - vma->vm_start) >> shift;
1465 		return offset_il_node(pol, vma, off);
1466 	} else
1467 		return interleave_nodes(pol);
1468 }
1469 
1470 #ifdef CONFIG_HUGETLBFS
1471 /*
1472  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1473  * @vma = virtual memory area whose policy is sought
1474  * @addr = address in @vma for shared policy lookup and interleave policy
1475  * @gfp_flags = for requested zone
1476  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1477  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1478  *
1479  * Returns a zonelist suitable for a huge page allocation and a pointer
1480  * to the struct mempolicy for conditional unref after allocation.
1481  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1482  * @nodemask for filtering the zonelist.
1483  */
huge_zonelist(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)1484 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1485 				gfp_t gfp_flags, struct mempolicy **mpol,
1486 				nodemask_t **nodemask)
1487 {
1488 	struct zonelist *zl;
1489 
1490 	*mpol = get_vma_policy(current, vma, addr);
1491 	*nodemask = NULL;	/* assume !MPOL_BIND */
1492 
1493 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1494 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1495 				huge_page_shift(hstate_vma(vma))), gfp_flags);
1496 	} else {
1497 		zl = policy_zonelist(gfp_flags, *mpol);
1498 		if ((*mpol)->mode == MPOL_BIND)
1499 			*nodemask = &(*mpol)->v.nodes;
1500 	}
1501 	return zl;
1502 }
1503 #endif
1504 
1505 /* Allocate a page in interleaved policy.
1506    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)1507 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1508 					unsigned nid)
1509 {
1510 	struct zonelist *zl;
1511 	struct page *page;
1512 
1513 	zl = node_zonelist(nid, gfp);
1514 	page = __alloc_pages(gfp, order, zl);
1515 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1516 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1517 	return page;
1518 }
1519 
1520 /**
1521  * 	alloc_page_vma	- Allocate a page for a VMA.
1522  *
1523  * 	@gfp:
1524  *      %GFP_USER    user allocation.
1525  *      %GFP_KERNEL  kernel allocations,
1526  *      %GFP_HIGHMEM highmem/user allocations,
1527  *      %GFP_FS      allocation should not call back into a file system.
1528  *      %GFP_ATOMIC  don't sleep.
1529  *
1530  * 	@vma:  Pointer to VMA or NULL if not available.
1531  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1532  *
1533  * 	This function allocates a page from the kernel page pool and applies
1534  *	a NUMA policy associated with the VMA or the current process.
1535  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1536  *	mm_struct of the VMA to prevent it from going away. Should be used for
1537  *	all allocations for pages that will be mapped into
1538  * 	user space. Returns NULL when no page can be allocated.
1539  *
1540  *	Should be called with the mm_sem of the vma hold.
1541  */
1542 struct page *
alloc_page_vma(gfp_t gfp,struct vm_area_struct * vma,unsigned long addr)1543 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1544 {
1545 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1546 	struct zonelist *zl;
1547 
1548 	cpuset_update_task_memory_state();
1549 
1550 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1551 		unsigned nid;
1552 
1553 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1554 		mpol_cond_put(pol);
1555 		return alloc_page_interleave(gfp, 0, nid);
1556 	}
1557 	zl = policy_zonelist(gfp, pol);
1558 	if (unlikely(mpol_needs_cond_ref(pol))) {
1559 		/*
1560 		 * slow path: ref counted shared policy
1561 		 */
1562 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
1563 						zl, policy_nodemask(gfp, pol));
1564 		__mpol_put(pol);
1565 		return page;
1566 	}
1567 	/*
1568 	 * fast path:  default or task policy
1569 	 */
1570 	return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1571 }
1572 
1573 /**
1574  * 	alloc_pages_current - Allocate pages.
1575  *
1576  *	@gfp:
1577  *		%GFP_USER   user allocation,
1578  *      	%GFP_KERNEL kernel allocation,
1579  *      	%GFP_HIGHMEM highmem allocation,
1580  *      	%GFP_FS     don't call back into a file system.
1581  *      	%GFP_ATOMIC don't sleep.
1582  *	@order: Power of two of allocation size in pages. 0 is a single page.
1583  *
1584  *	Allocate a page from the kernel page pool.  When not in
1585  *	interrupt context and apply the current process NUMA policy.
1586  *	Returns NULL when no page can be allocated.
1587  *
1588  *	Don't call cpuset_update_task_memory_state() unless
1589  *	1) it's ok to take cpuset_sem (can WAIT), and
1590  *	2) allocating for current task (not interrupt).
1591  */
alloc_pages_current(gfp_t gfp,unsigned order)1592 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1593 {
1594 	struct mempolicy *pol = current->mempolicy;
1595 
1596 	if ((gfp & __GFP_WAIT) && !in_interrupt())
1597 		cpuset_update_task_memory_state();
1598 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1599 		pol = &default_policy;
1600 
1601 	/*
1602 	 * No reference counting needed for current->mempolicy
1603 	 * nor system default_policy
1604 	 */
1605 	if (pol->mode == MPOL_INTERLEAVE)
1606 		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1607 	return __alloc_pages_nodemask(gfp, order,
1608 			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1609 }
1610 EXPORT_SYMBOL(alloc_pages_current);
1611 
1612 /*
1613  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1614  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1615  * with the mems_allowed returned by cpuset_mems_allowed().  This
1616  * keeps mempolicies cpuset relative after its cpuset moves.  See
1617  * further kernel/cpuset.c update_nodemask().
1618  */
1619 
1620 /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)1621 struct mempolicy *__mpol_dup(struct mempolicy *old)
1622 {
1623 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1624 
1625 	if (!new)
1626 		return ERR_PTR(-ENOMEM);
1627 	if (current_cpuset_is_being_rebound()) {
1628 		nodemask_t mems = cpuset_mems_allowed(current);
1629 		mpol_rebind_policy(old, &mems);
1630 	}
1631 	*new = *old;
1632 	atomic_set(&new->refcnt, 1);
1633 	return new;
1634 }
1635 
1636 /*
1637  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1638  * eliminate the * MPOL_F_* flags that require conditional ref and
1639  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1640  * after return.  Use the returned value.
1641  *
1642  * Allows use of a mempolicy for, e.g., multiple allocations with a single
1643  * policy lookup, even if the policy needs/has extra ref on lookup.
1644  * shmem_readahead needs this.
1645  */
__mpol_cond_copy(struct mempolicy * tompol,struct mempolicy * frompol)1646 struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1647 						struct mempolicy *frompol)
1648 {
1649 	if (!mpol_needs_cond_ref(frompol))
1650 		return frompol;
1651 
1652 	*tompol = *frompol;
1653 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
1654 	__mpol_put(frompol);
1655 	return tompol;
1656 }
1657 
mpol_match_intent(const struct mempolicy * a,const struct mempolicy * b)1658 static int mpol_match_intent(const struct mempolicy *a,
1659 			     const struct mempolicy *b)
1660 {
1661 	if (a->flags != b->flags)
1662 		return 0;
1663 	if (!mpol_store_user_nodemask(a))
1664 		return 1;
1665 	return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1666 }
1667 
1668 /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)1669 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1670 {
1671 	if (!a || !b)
1672 		return 0;
1673 	if (a->mode != b->mode)
1674 		return 0;
1675 	if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1676 		return 0;
1677 	switch (a->mode) {
1678 	case MPOL_BIND:
1679 		/* Fall through */
1680 	case MPOL_INTERLEAVE:
1681 		return nodes_equal(a->v.nodes, b->v.nodes);
1682 	case MPOL_PREFERRED:
1683 		return a->v.preferred_node == b->v.preferred_node &&
1684 			a->flags == b->flags;
1685 	default:
1686 		BUG();
1687 		return 0;
1688 	}
1689 }
1690 
1691 /*
1692  * Shared memory backing store policy support.
1693  *
1694  * Remember policies even when nobody has shared memory mapped.
1695  * The policies are kept in Red-Black tree linked from the inode.
1696  * They are protected by the sp->lock spinlock, which should be held
1697  * for any accesses to the tree.
1698  */
1699 
1700 /* lookup first element intersecting start-end */
1701 /* Caller holds sp->lock */
1702 static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)1703 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1704 {
1705 	struct rb_node *n = sp->root.rb_node;
1706 
1707 	while (n) {
1708 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
1709 
1710 		if (start >= p->end)
1711 			n = n->rb_right;
1712 		else if (end <= p->start)
1713 			n = n->rb_left;
1714 		else
1715 			break;
1716 	}
1717 	if (!n)
1718 		return NULL;
1719 	for (;;) {
1720 		struct sp_node *w = NULL;
1721 		struct rb_node *prev = rb_prev(n);
1722 		if (!prev)
1723 			break;
1724 		w = rb_entry(prev, struct sp_node, nd);
1725 		if (w->end <= start)
1726 			break;
1727 		n = prev;
1728 	}
1729 	return rb_entry(n, struct sp_node, nd);
1730 }
1731 
1732 /* Insert a new shared policy into the list. */
1733 /* Caller holds sp->lock */
sp_insert(struct shared_policy * sp,struct sp_node * new)1734 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1735 {
1736 	struct rb_node **p = &sp->root.rb_node;
1737 	struct rb_node *parent = NULL;
1738 	struct sp_node *nd;
1739 
1740 	while (*p) {
1741 		parent = *p;
1742 		nd = rb_entry(parent, struct sp_node, nd);
1743 		if (new->start < nd->start)
1744 			p = &(*p)->rb_left;
1745 		else if (new->end > nd->end)
1746 			p = &(*p)->rb_right;
1747 		else
1748 			BUG();
1749 	}
1750 	rb_link_node(&new->nd, parent, p);
1751 	rb_insert_color(&new->nd, &sp->root);
1752 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1753 		 new->policy ? new->policy->mode : 0);
1754 }
1755 
1756 /* Find shared policy intersecting idx */
1757 struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)1758 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1759 {
1760 	struct mempolicy *pol = NULL;
1761 	struct sp_node *sn;
1762 
1763 	if (!sp->root.rb_node)
1764 		return NULL;
1765 	spin_lock(&sp->lock);
1766 	sn = sp_lookup(sp, idx, idx+1);
1767 	if (sn) {
1768 		mpol_get(sn->policy);
1769 		pol = sn->policy;
1770 	}
1771 	spin_unlock(&sp->lock);
1772 	return pol;
1773 }
1774 
sp_delete(struct shared_policy * sp,struct sp_node * n)1775 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1776 {
1777 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1778 	rb_erase(&n->nd, &sp->root);
1779 	mpol_put(n->policy);
1780 	kmem_cache_free(sn_cache, n);
1781 }
1782 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)1783 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1784 				struct mempolicy *pol)
1785 {
1786 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1787 
1788 	if (!n)
1789 		return NULL;
1790 	n->start = start;
1791 	n->end = end;
1792 	mpol_get(pol);
1793 	pol->flags |= MPOL_F_SHARED;	/* for unref */
1794 	n->policy = pol;
1795 	return n;
1796 }
1797 
1798 /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)1799 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1800 				 unsigned long end, struct sp_node *new)
1801 {
1802 	struct sp_node *n, *new2 = NULL;
1803 
1804 restart:
1805 	spin_lock(&sp->lock);
1806 	n = sp_lookup(sp, start, end);
1807 	/* Take care of old policies in the same range. */
1808 	while (n && n->start < end) {
1809 		struct rb_node *next = rb_next(&n->nd);
1810 		if (n->start >= start) {
1811 			if (n->end <= end)
1812 				sp_delete(sp, n);
1813 			else
1814 				n->start = end;
1815 		} else {
1816 			/* Old policy spanning whole new range. */
1817 			if (n->end > end) {
1818 				if (!new2) {
1819 					spin_unlock(&sp->lock);
1820 					new2 = sp_alloc(end, n->end, n->policy);
1821 					if (!new2)
1822 						return -ENOMEM;
1823 					goto restart;
1824 				}
1825 				n->end = start;
1826 				sp_insert(sp, new2);
1827 				new2 = NULL;
1828 				break;
1829 			} else
1830 				n->end = start;
1831 		}
1832 		if (!next)
1833 			break;
1834 		n = rb_entry(next, struct sp_node, nd);
1835 	}
1836 	if (new)
1837 		sp_insert(sp, new);
1838 	spin_unlock(&sp->lock);
1839 	if (new2) {
1840 		mpol_put(new2->policy);
1841 		kmem_cache_free(sn_cache, new2);
1842 	}
1843 	return 0;
1844 }
1845 
1846 /**
1847  * mpol_shared_policy_init - initialize shared policy for inode
1848  * @sp: pointer to inode shared policy
1849  * @mpol:  struct mempolicy to install
1850  *
1851  * Install non-NULL @mpol in inode's shared policy rb-tree.
1852  * On entry, the current task has a reference on a non-NULL @mpol.
1853  * This must be released on exit.
1854  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)1855 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1856 {
1857 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
1858 	spin_lock_init(&sp->lock);
1859 
1860 	if (mpol) {
1861 		struct vm_area_struct pvma;
1862 		struct mempolicy *new;
1863 
1864 		/* contextualize the tmpfs mount point mempolicy */
1865 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1866 		mpol_put(mpol);	/* drop our ref on sb mpol */
1867 		if (IS_ERR(new))
1868 			return;		/* no valid nodemask intersection */
1869 
1870 		/* Create pseudo-vma that contains just the policy */
1871 		memset(&pvma, 0, sizeof(struct vm_area_struct));
1872 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
1873 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
1874 		mpol_put(new);			/* drop initial ref */
1875 	}
1876 }
1877 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)1878 int mpol_set_shared_policy(struct shared_policy *info,
1879 			struct vm_area_struct *vma, struct mempolicy *npol)
1880 {
1881 	int err;
1882 	struct sp_node *new = NULL;
1883 	unsigned long sz = vma_pages(vma);
1884 
1885 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1886 		 vma->vm_pgoff,
1887 		 sz, npol ? npol->mode : -1,
1888 		 npol ? npol->flags : -1,
1889 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1890 
1891 	if (npol) {
1892 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1893 		if (!new)
1894 			return -ENOMEM;
1895 	}
1896 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1897 	if (err && new)
1898 		kmem_cache_free(sn_cache, new);
1899 	return err;
1900 }
1901 
1902 /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)1903 void mpol_free_shared_policy(struct shared_policy *p)
1904 {
1905 	struct sp_node *n;
1906 	struct rb_node *next;
1907 
1908 	if (!p->root.rb_node)
1909 		return;
1910 	spin_lock(&p->lock);
1911 	next = rb_first(&p->root);
1912 	while (next) {
1913 		n = rb_entry(next, struct sp_node, nd);
1914 		next = rb_next(&n->nd);
1915 		rb_erase(&n->nd, &p->root);
1916 		mpol_put(n->policy);
1917 		kmem_cache_free(sn_cache, n);
1918 	}
1919 	spin_unlock(&p->lock);
1920 }
1921 
1922 /* assumes fs == KERNEL_DS */
numa_policy_init(void)1923 void __init numa_policy_init(void)
1924 {
1925 	nodemask_t interleave_nodes;
1926 	unsigned long largest = 0;
1927 	int nid, prefer = 0;
1928 
1929 	policy_cache = kmem_cache_create("numa_policy",
1930 					 sizeof(struct mempolicy),
1931 					 0, SLAB_PANIC, NULL);
1932 
1933 	sn_cache = kmem_cache_create("shared_policy_node",
1934 				     sizeof(struct sp_node),
1935 				     0, SLAB_PANIC, NULL);
1936 
1937 	/*
1938 	 * Set interleaving policy for system init. Interleaving is only
1939 	 * enabled across suitably sized nodes (default is >= 16MB), or
1940 	 * fall back to the largest node if they're all smaller.
1941 	 */
1942 	nodes_clear(interleave_nodes);
1943 	for_each_node_state(nid, N_HIGH_MEMORY) {
1944 		unsigned long total_pages = node_present_pages(nid);
1945 
1946 		/* Preserve the largest node */
1947 		if (largest < total_pages) {
1948 			largest = total_pages;
1949 			prefer = nid;
1950 		}
1951 
1952 		/* Interleave this node? */
1953 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1954 			node_set(nid, interleave_nodes);
1955 	}
1956 
1957 	/* All too small, use the largest */
1958 	if (unlikely(nodes_empty(interleave_nodes)))
1959 		node_set(prefer, interleave_nodes);
1960 
1961 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1962 		printk("numa_policy_init: interleaving failed\n");
1963 }
1964 
1965 /* Reset policy of current process to default */
numa_default_policy(void)1966 void numa_default_policy(void)
1967 {
1968 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1969 }
1970 
1971 /*
1972  * Parse and format mempolicy from/to strings
1973  */
1974 
1975 /*
1976  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
1977  * Used only for mpol_parse_str() and mpol_to_str()
1978  */
1979 #define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
1980 static const char * const policy_types[] =
1981 	{ "default", "prefer", "bind", "interleave", "local" };
1982 
1983 
1984 #ifdef CONFIG_TMPFS
1985 /**
1986  * mpol_parse_str - parse string to mempolicy
1987  * @str:  string containing mempolicy to parse
1988  * @mpol:  pointer to struct mempolicy pointer, returned on success.
1989  * @no_context:  flag whether to "contextualize" the mempolicy
1990  *
1991  * Format of input:
1992  *	<mode>[=<flags>][:<nodelist>]
1993  *
1994  * if @no_context is true, save the input nodemask in w.user_nodemask in
1995  * the returned mempolicy.  This will be used to "clone" the mempolicy in
1996  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
1997  * mount option.  Note that if 'static' or 'relative' mode flags were
1998  * specified, the input nodemask will already have been saved.  Saving
1999  * it again is redundant, but safe.
2000  *
2001  * On success, returns 0, else 1
2002  */
mpol_parse_str(char * str,struct mempolicy ** mpol,int no_context)2003 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2004 {
2005 	struct mempolicy *new = NULL;
2006 	unsigned short uninitialized_var(mode);
2007 	unsigned short uninitialized_var(mode_flags);
2008 	nodemask_t nodes;
2009 	char *nodelist = strchr(str, ':');
2010 	char *flags = strchr(str, '=');
2011 	int i;
2012 	int err = 1;
2013 
2014 	if (nodelist) {
2015 		/* NUL-terminate mode or flags string */
2016 		*nodelist++ = '\0';
2017 		if (nodelist_parse(nodelist, nodes))
2018 			goto out;
2019 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2020 			goto out;
2021 	} else
2022 		nodes_clear(nodes);
2023 
2024 	if (flags)
2025 		*flags++ = '\0';	/* terminate mode string */
2026 
2027 	for (i = 0; i <= MPOL_LOCAL; i++) {
2028 		if (!strcmp(str, policy_types[i])) {
2029 			mode = i;
2030 			break;
2031 		}
2032 	}
2033 	if (i > MPOL_LOCAL)
2034 		goto out;
2035 
2036 	switch (mode) {
2037 	case MPOL_PREFERRED:
2038 		/*
2039 		 * Insist on a nodelist of one node only
2040 		 */
2041 		if (nodelist) {
2042 			char *rest = nodelist;
2043 			while (isdigit(*rest))
2044 				rest++;
2045 			if (!*rest)
2046 				err = 0;
2047 		}
2048 		break;
2049 	case MPOL_INTERLEAVE:
2050 		/*
2051 		 * Default to online nodes with memory if no nodelist
2052 		 */
2053 		if (!nodelist)
2054 			nodes = node_states[N_HIGH_MEMORY];
2055 		err = 0;
2056 		break;
2057 	case MPOL_LOCAL:
2058 		/*
2059 		 * Don't allow a nodelist;  mpol_new() checks flags
2060 		 */
2061 		if (nodelist)
2062 			goto out;
2063 		mode = MPOL_PREFERRED;
2064 		break;
2065 
2066 	/*
2067 	 * case MPOL_BIND:    mpol_new() enforces non-empty nodemask.
2068 	 * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
2069 	 */
2070 	}
2071 
2072 	mode_flags = 0;
2073 	if (flags) {
2074 		/*
2075 		 * Currently, we only support two mutually exclusive
2076 		 * mode flags.
2077 		 */
2078 		if (!strcmp(flags, "static"))
2079 			mode_flags |= MPOL_F_STATIC_NODES;
2080 		else if (!strcmp(flags, "relative"))
2081 			mode_flags |= MPOL_F_RELATIVE_NODES;
2082 		else
2083 			err = 1;
2084 	}
2085 
2086 	new = mpol_new(mode, mode_flags, &nodes);
2087 	if (IS_ERR(new))
2088 		err = 1;
2089 	else if (no_context)
2090 		new->w.user_nodemask = nodes;	/* save for contextualization */
2091 
2092 out:
2093 	/* Restore string for error message */
2094 	if (nodelist)
2095 		*--nodelist = ':';
2096 	if (flags)
2097 		*--flags = '=';
2098 	if (!err)
2099 		*mpol = new;
2100 	return err;
2101 }
2102 #endif /* CONFIG_TMPFS */
2103 
2104 /**
2105  * mpol_to_str - format a mempolicy structure for printing
2106  * @buffer:  to contain formatted mempolicy string
2107  * @maxlen:  length of @buffer
2108  * @pol:  pointer to mempolicy to be formatted
2109  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2110  *
2111  * Convert a mempolicy into a string.
2112  * Returns the number of characters in buffer (if positive)
2113  * or an error (negative)
2114  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol,int no_context)2115 int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2116 {
2117 	char *p = buffer;
2118 	int l;
2119 	nodemask_t nodes;
2120 	unsigned short mode;
2121 	unsigned short flags = pol ? pol->flags : 0;
2122 
2123 	/*
2124 	 * Sanity check:  room for longest mode, flag and some nodes
2125 	 */
2126 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2127 
2128 	if (!pol || pol == &default_policy)
2129 		mode = MPOL_DEFAULT;
2130 	else
2131 		mode = pol->mode;
2132 
2133 	switch (mode) {
2134 	case MPOL_DEFAULT:
2135 		nodes_clear(nodes);
2136 		break;
2137 
2138 	case MPOL_PREFERRED:
2139 		nodes_clear(nodes);
2140 		if (flags & MPOL_F_LOCAL)
2141 			mode = MPOL_LOCAL;	/* pseudo-policy */
2142 		else
2143 			node_set(pol->v.preferred_node, nodes);
2144 		break;
2145 
2146 	case MPOL_BIND:
2147 		/* Fall through */
2148 	case MPOL_INTERLEAVE:
2149 		if (no_context)
2150 			nodes = pol->w.user_nodemask;
2151 		else
2152 			nodes = pol->v.nodes;
2153 		break;
2154 
2155 	default:
2156 		BUG();
2157 	}
2158 
2159 	l = strlen(policy_types[mode]);
2160 	if (buffer + maxlen < p + l + 1)
2161 		return -ENOSPC;
2162 
2163 	strcpy(p, policy_types[mode]);
2164 	p += l;
2165 
2166 	if (flags & MPOL_MODE_FLAGS) {
2167 		if (buffer + maxlen < p + 2)
2168 			return -ENOSPC;
2169 		*p++ = '=';
2170 
2171 		/*
2172 		 * Currently, the only defined flags are mutually exclusive
2173 		 */
2174 		if (flags & MPOL_F_STATIC_NODES)
2175 			p += snprintf(p, buffer + maxlen - p, "static");
2176 		else if (flags & MPOL_F_RELATIVE_NODES)
2177 			p += snprintf(p, buffer + maxlen - p, "relative");
2178 	}
2179 
2180 	if (!nodes_empty(nodes)) {
2181 		if (buffer + maxlen < p + 2)
2182 			return -ENOSPC;
2183 		*p++ = ':';
2184 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2185 	}
2186 	return p - buffer;
2187 }
2188 
2189 struct numa_maps {
2190 	unsigned long pages;
2191 	unsigned long anon;
2192 	unsigned long active;
2193 	unsigned long writeback;
2194 	unsigned long mapcount_max;
2195 	unsigned long dirty;
2196 	unsigned long swapcache;
2197 	unsigned long node[MAX_NUMNODES];
2198 };
2199 
gather_stats(struct page * page,void * private,int pte_dirty)2200 static void gather_stats(struct page *page, void *private, int pte_dirty)
2201 {
2202 	struct numa_maps *md = private;
2203 	int count = page_mapcount(page);
2204 
2205 	md->pages++;
2206 	if (pte_dirty || PageDirty(page))
2207 		md->dirty++;
2208 
2209 	if (PageSwapCache(page))
2210 		md->swapcache++;
2211 
2212 	if (PageActive(page) || PageUnevictable(page))
2213 		md->active++;
2214 
2215 	if (PageWriteback(page))
2216 		md->writeback++;
2217 
2218 	if (PageAnon(page))
2219 		md->anon++;
2220 
2221 	if (count > md->mapcount_max)
2222 		md->mapcount_max = count;
2223 
2224 	md->node[page_to_nid(page)]++;
2225 }
2226 
2227 #ifdef CONFIG_HUGETLB_PAGE
check_huge_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct numa_maps * md)2228 static void check_huge_range(struct vm_area_struct *vma,
2229 		unsigned long start, unsigned long end,
2230 		struct numa_maps *md)
2231 {
2232 	unsigned long addr;
2233 	struct page *page;
2234 	struct hstate *h = hstate_vma(vma);
2235 	unsigned long sz = huge_page_size(h);
2236 
2237 	for (addr = start; addr < end; addr += sz) {
2238 		pte_t *ptep = huge_pte_offset(vma->vm_mm,
2239 						addr & huge_page_mask(h));
2240 		pte_t pte;
2241 
2242 		if (!ptep)
2243 			continue;
2244 
2245 		pte = *ptep;
2246 		if (pte_none(pte))
2247 			continue;
2248 
2249 		page = pte_page(pte);
2250 		if (!page)
2251 			continue;
2252 
2253 		gather_stats(page, md, pte_dirty(*ptep));
2254 	}
2255 }
2256 #else
check_huge_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct numa_maps * md)2257 static inline void check_huge_range(struct vm_area_struct *vma,
2258 		unsigned long start, unsigned long end,
2259 		struct numa_maps *md)
2260 {
2261 }
2262 #endif
2263 
2264 /*
2265  * Display pages allocated per node and memory policy via /proc.
2266  */
show_numa_map(struct seq_file * m,void * v)2267 int show_numa_map(struct seq_file *m, void *v)
2268 {
2269 	struct proc_maps_private *priv = m->private;
2270 	struct vm_area_struct *vma = v;
2271 	struct numa_maps *md;
2272 	struct file *file = vma->vm_file;
2273 	struct mm_struct *mm = vma->vm_mm;
2274 	struct mempolicy *pol;
2275 	int n;
2276 	char buffer[50];
2277 
2278 	if (!mm)
2279 		return 0;
2280 
2281 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2282 	if (!md)
2283 		return 0;
2284 
2285 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
2286 	mpol_to_str(buffer, sizeof(buffer), pol, 0);
2287 	mpol_cond_put(pol);
2288 
2289 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2290 
2291 	if (file) {
2292 		seq_printf(m, " file=");
2293 		seq_path(m, &file->f_path, "\n\t= ");
2294 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2295 		seq_printf(m, " heap");
2296 	} else if (vma->vm_start <= mm->start_stack &&
2297 			vma->vm_end >= mm->start_stack) {
2298 		seq_printf(m, " stack");
2299 	}
2300 
2301 	if (is_vm_hugetlb_page(vma)) {
2302 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2303 		seq_printf(m, " huge");
2304 	} else {
2305 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
2306 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2307 	}
2308 
2309 	if (!md->pages)
2310 		goto out;
2311 
2312 	if (md->anon)
2313 		seq_printf(m," anon=%lu",md->anon);
2314 
2315 	if (md->dirty)
2316 		seq_printf(m," dirty=%lu",md->dirty);
2317 
2318 	if (md->pages != md->anon && md->pages != md->dirty)
2319 		seq_printf(m, " mapped=%lu", md->pages);
2320 
2321 	if (md->mapcount_max > 1)
2322 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2323 
2324 	if (md->swapcache)
2325 		seq_printf(m," swapcache=%lu", md->swapcache);
2326 
2327 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2328 		seq_printf(m," active=%lu", md->active);
2329 
2330 	if (md->writeback)
2331 		seq_printf(m," writeback=%lu", md->writeback);
2332 
2333 	for_each_node_state(n, N_HIGH_MEMORY)
2334 		if (md->node[n])
2335 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2336 out:
2337 	seq_putc(m, '\n');
2338 	kfree(md);
2339 
2340 	if (m->count < m->size)
2341 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2342 	return 0;
2343 }
2344