1 /*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 */
67
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70 #include <linux/mempolicy.h>
71 #include <linux/mm.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
97
98 #include <asm/tlbflush.h>
99 #include <asm/uaccess.h>
100 #include <linux/random.h>
101
102 #include "internal.h"
103
104 /* Internal flags */
105 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
106 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
107
108 static struct kmem_cache *policy_cache;
109 static struct kmem_cache *sn_cache;
110
111 /* Highest zone. An specific allocation for a zone below that is not
112 policied. */
113 enum zone_type policy_zone = 0;
114
115 /*
116 * run-time system-wide default policy => local allocation
117 */
118 static struct mempolicy default_policy = {
119 .refcnt = ATOMIC_INIT(1), /* never free it */
120 .mode = MPOL_PREFERRED,
121 .flags = MPOL_F_LOCAL,
122 };
123
124 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
125
get_task_policy(struct task_struct * p)126 struct mempolicy *get_task_policy(struct task_struct *p)
127 {
128 struct mempolicy *pol = p->mempolicy;
129 int node;
130
131 if (pol)
132 return pol;
133
134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137 /* preferred_node_policy is not initialised early in boot */
138 if (pol->mode)
139 return pol;
140 }
141
142 return &default_policy;
143 }
144
145 static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147 /*
148 * If read-side task has no lock to protect task->mempolicy, write-side
149 * task will rebind the task->mempolicy by two step. The first step is
150 * setting all the newly nodes, and the second step is cleaning all the
151 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 * page.
153 * If we have a lock to protect task->mempolicy in read-side, we do
154 * rebind directly.
155 *
156 * step:
157 * MPOL_REBIND_ONCE - do rebind work at once
158 * MPOL_REBIND_STEP1 - set all the newly nodes
159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
160 */
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
163 } mpol_ops[MPOL_MAX];
164
165 /* Check that the nodemask contains at least one populated zone */
is_valid_nodemask(const nodemask_t * nodemask)166 static int is_valid_nodemask(const nodemask_t *nodemask)
167 {
168 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
169 }
170
mpol_store_user_nodemask(const struct mempolicy * pol)171 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
172 {
173 return pol->flags & MPOL_MODE_FLAGS;
174 }
175
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)176 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
177 const nodemask_t *rel)
178 {
179 nodemask_t tmp;
180 nodes_fold(tmp, *orig, nodes_weight(*rel));
181 nodes_onto(*ret, tmp, *rel);
182 }
183
mpol_new_interleave(struct mempolicy * pol,const nodemask_t * nodes)184 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
185 {
186 if (nodes_empty(*nodes))
187 return -EINVAL;
188 pol->v.nodes = *nodes;
189 return 0;
190 }
191
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)192 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
193 {
194 if (!nodes)
195 pol->flags |= MPOL_F_LOCAL; /* local allocation */
196 else if (nodes_empty(*nodes))
197 return -EINVAL; /* no allowed nodes */
198 else
199 pol->v.preferred_node = first_node(*nodes);
200 return 0;
201 }
202
mpol_new_bind(struct mempolicy * pol,const nodemask_t * nodes)203 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
204 {
205 if (!is_valid_nodemask(nodes))
206 return -EINVAL;
207 pol->v.nodes = *nodes;
208 return 0;
209 }
210
211 /*
212 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
213 * any, for the new policy. mpol_new() has already validated the nodes
214 * parameter with respect to the policy mode and flags. But, we need to
215 * handle an empty nodemask with MPOL_PREFERRED here.
216 *
217 * Must be called holding task's alloc_lock to protect task's mems_allowed
218 * and mempolicy. May also be called holding the mmap_semaphore for write.
219 */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)220 static int mpol_set_nodemask(struct mempolicy *pol,
221 const nodemask_t *nodes, struct nodemask_scratch *nsc)
222 {
223 int ret;
224
225 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
226 if (pol == NULL)
227 return 0;
228 /* Check N_MEMORY */
229 nodes_and(nsc->mask1,
230 cpuset_current_mems_allowed, node_states[N_MEMORY]);
231
232 VM_BUG_ON(!nodes);
233 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
234 nodes = NULL; /* explicit local allocation */
235 else {
236 if (pol->flags & MPOL_F_RELATIVE_NODES)
237 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
238 else
239 nodes_and(nsc->mask2, *nodes, nsc->mask1);
240
241 if (mpol_store_user_nodemask(pol))
242 pol->w.user_nodemask = *nodes;
243 else
244 pol->w.cpuset_mems_allowed =
245 cpuset_current_mems_allowed;
246 }
247
248 if (nodes)
249 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
250 else
251 ret = mpol_ops[pol->mode].create(pol, NULL);
252 return ret;
253 }
254
255 /*
256 * This function just creates a new policy, does some check and simple
257 * initialization. You must invoke mpol_set_nodemask() to set nodes.
258 */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)259 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
260 nodemask_t *nodes)
261 {
262 struct mempolicy *policy;
263
264 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
265 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
266
267 if (mode == MPOL_DEFAULT) {
268 if (nodes && !nodes_empty(*nodes))
269 return ERR_PTR(-EINVAL);
270 return NULL;
271 }
272 VM_BUG_ON(!nodes);
273
274 /*
275 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
276 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
277 * All other modes require a valid pointer to a non-empty nodemask.
278 */
279 if (mode == MPOL_PREFERRED) {
280 if (nodes_empty(*nodes)) {
281 if (((flags & MPOL_F_STATIC_NODES) ||
282 (flags & MPOL_F_RELATIVE_NODES)))
283 return ERR_PTR(-EINVAL);
284 }
285 } else if (mode == MPOL_LOCAL) {
286 if (!nodes_empty(*nodes))
287 return ERR_PTR(-EINVAL);
288 mode = MPOL_PREFERRED;
289 } else if (nodes_empty(*nodes))
290 return ERR_PTR(-EINVAL);
291 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
292 if (!policy)
293 return ERR_PTR(-ENOMEM);
294 atomic_set(&policy->refcnt, 1);
295 policy->mode = mode;
296 policy->flags = flags;
297
298 return policy;
299 }
300
301 /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)302 void __mpol_put(struct mempolicy *p)
303 {
304 if (!atomic_dec_and_test(&p->refcnt))
305 return;
306 kmem_cache_free(policy_cache, p);
307 }
308
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes,enum mpol_rebind_step step)309 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
310 enum mpol_rebind_step step)
311 {
312 }
313
314 /*
315 * step:
316 * MPOL_REBIND_ONCE - do rebind work at once
317 * MPOL_REBIND_STEP1 - set all the newly nodes
318 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
319 */
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes,enum mpol_rebind_step step)320 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
321 enum mpol_rebind_step step)
322 {
323 nodemask_t tmp;
324
325 if (pol->flags & MPOL_F_STATIC_NODES)
326 nodes_and(tmp, pol->w.user_nodemask, *nodes);
327 else if (pol->flags & MPOL_F_RELATIVE_NODES)
328 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
329 else {
330 /*
331 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
332 * result
333 */
334 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
335 nodes_remap(tmp, pol->v.nodes,
336 pol->w.cpuset_mems_allowed, *nodes);
337 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
338 } else if (step == MPOL_REBIND_STEP2) {
339 tmp = pol->w.cpuset_mems_allowed;
340 pol->w.cpuset_mems_allowed = *nodes;
341 } else
342 BUG();
343 }
344
345 if (nodes_empty(tmp))
346 tmp = *nodes;
347
348 if (step == MPOL_REBIND_STEP1)
349 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
350 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
351 pol->v.nodes = tmp;
352 else
353 BUG();
354
355 if (!node_isset(current->il_next, tmp)) {
356 current->il_next = next_node(current->il_next, tmp);
357 if (current->il_next >= MAX_NUMNODES)
358 current->il_next = first_node(tmp);
359 if (current->il_next >= MAX_NUMNODES)
360 current->il_next = numa_node_id();
361 }
362 }
363
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes,enum mpol_rebind_step step)364 static void mpol_rebind_preferred(struct mempolicy *pol,
365 const nodemask_t *nodes,
366 enum mpol_rebind_step step)
367 {
368 nodemask_t tmp;
369
370 if (pol->flags & MPOL_F_STATIC_NODES) {
371 int node = first_node(pol->w.user_nodemask);
372
373 if (node_isset(node, *nodes)) {
374 pol->v.preferred_node = node;
375 pol->flags &= ~MPOL_F_LOCAL;
376 } else
377 pol->flags |= MPOL_F_LOCAL;
378 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
379 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
380 pol->v.preferred_node = first_node(tmp);
381 } else if (!(pol->flags & MPOL_F_LOCAL)) {
382 pol->v.preferred_node = node_remap(pol->v.preferred_node,
383 pol->w.cpuset_mems_allowed,
384 *nodes);
385 pol->w.cpuset_mems_allowed = *nodes;
386 }
387 }
388
389 /*
390 * mpol_rebind_policy - Migrate a policy to a different set of nodes
391 *
392 * If read-side task has no lock to protect task->mempolicy, write-side
393 * task will rebind the task->mempolicy by two step. The first step is
394 * setting all the newly nodes, and the second step is cleaning all the
395 * disallowed nodes. In this way, we can avoid finding no node to alloc
396 * page.
397 * If we have a lock to protect task->mempolicy in read-side, we do
398 * rebind directly.
399 *
400 * step:
401 * MPOL_REBIND_ONCE - do rebind work at once
402 * MPOL_REBIND_STEP1 - set all the newly nodes
403 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
404 */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask,enum mpol_rebind_step step)405 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
406 enum mpol_rebind_step step)
407 {
408 if (!pol)
409 return;
410 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
411 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
412 return;
413
414 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
415 return;
416
417 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
418 BUG();
419
420 if (step == MPOL_REBIND_STEP1)
421 pol->flags |= MPOL_F_REBINDING;
422 else if (step == MPOL_REBIND_STEP2)
423 pol->flags &= ~MPOL_F_REBINDING;
424 else if (step >= MPOL_REBIND_NSTEP)
425 BUG();
426
427 mpol_ops[pol->mode].rebind(pol, newmask, step);
428 }
429
430 /*
431 * Wrapper for mpol_rebind_policy() that just requires task
432 * pointer, and updates task mempolicy.
433 *
434 * Called with task's alloc_lock held.
435 */
436
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new,enum mpol_rebind_step step)437 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
438 enum mpol_rebind_step step)
439 {
440 mpol_rebind_policy(tsk->mempolicy, new, step);
441 }
442
443 /*
444 * Rebind each vma in mm to new nodemask.
445 *
446 * Call holding a reference to mm. Takes mm->mmap_sem during call.
447 */
448
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)449 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
450 {
451 struct vm_area_struct *vma;
452
453 down_write(&mm->mmap_sem);
454 for (vma = mm->mmap; vma; vma = vma->vm_next)
455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
456 up_write(&mm->mmap_sem);
457 }
458
459 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
460 [MPOL_DEFAULT] = {
461 .rebind = mpol_rebind_default,
462 },
463 [MPOL_INTERLEAVE] = {
464 .create = mpol_new_interleave,
465 .rebind = mpol_rebind_nodemask,
466 },
467 [MPOL_PREFERRED] = {
468 .create = mpol_new_preferred,
469 .rebind = mpol_rebind_preferred,
470 },
471 [MPOL_BIND] = {
472 .create = mpol_new_bind,
473 .rebind = mpol_rebind_nodemask,
474 },
475 };
476
477 static void migrate_page_add(struct page *page, struct list_head *pagelist,
478 unsigned long flags);
479
480 /*
481 * Scan through pages checking if pages follow certain conditions,
482 * and move them to the pagelist if they do.
483 */
queue_pages_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)484 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
485 unsigned long addr, unsigned long end,
486 const nodemask_t *nodes, unsigned long flags,
487 void *private)
488 {
489 pte_t *orig_pte;
490 pte_t *pte;
491 spinlock_t *ptl;
492
493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
494 do {
495 struct page *page;
496 int nid;
497
498 if (!pte_present(*pte))
499 continue;
500 page = vm_normal_page(vma, addr, *pte);
501 if (!page)
502 continue;
503 /*
504 * vm_normal_page() filters out zero pages, but there might
505 * still be PageReserved pages to skip, perhaps in a VDSO.
506 */
507 if (PageReserved(page))
508 continue;
509 nid = page_to_nid(page);
510 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
511 continue;
512
513 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
514 migrate_page_add(page, private, flags);
515 else
516 break;
517 } while (pte++, addr += PAGE_SIZE, addr != end);
518 pte_unmap_unlock(orig_pte, ptl);
519 return addr != end;
520 }
521
queue_pages_hugetlb_pmd_range(struct vm_area_struct * vma,pmd_t * pmd,const nodemask_t * nodes,unsigned long flags,void * private)522 static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
523 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
524 void *private)
525 {
526 #ifdef CONFIG_HUGETLB_PAGE
527 int nid;
528 struct page *page;
529 spinlock_t *ptl;
530 pte_t entry;
531
532 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
533 entry = huge_ptep_get((pte_t *)pmd);
534 if (!pte_present(entry))
535 goto unlock;
536 page = pte_page(entry);
537 nid = page_to_nid(page);
538 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
539 goto unlock;
540 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
541 if (flags & (MPOL_MF_MOVE_ALL) ||
542 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
543 isolate_huge_page(page, private);
544 unlock:
545 spin_unlock(ptl);
546 #else
547 BUG();
548 #endif
549 }
550
queue_pages_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)551 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
552 unsigned long addr, unsigned long end,
553 const nodemask_t *nodes, unsigned long flags,
554 void *private)
555 {
556 pmd_t *pmd;
557 unsigned long next;
558
559 pmd = pmd_offset(pud, addr);
560 do {
561 next = pmd_addr_end(addr, end);
562 if (!pmd_present(*pmd))
563 continue;
564 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
565 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
566 flags, private);
567 continue;
568 }
569 split_huge_page_pmd(vma, addr, pmd);
570 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
571 continue;
572 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
573 flags, private))
574 return -EIO;
575 } while (pmd++, addr = next, addr != end);
576 return 0;
577 }
578
queue_pages_pud_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)579 static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
580 unsigned long addr, unsigned long end,
581 const nodemask_t *nodes, unsigned long flags,
582 void *private)
583 {
584 pud_t *pud;
585 unsigned long next;
586
587 pud = pud_offset(pgd, addr);
588 do {
589 next = pud_addr_end(addr, end);
590 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
591 continue;
592 if (pud_none_or_clear_bad(pud))
593 continue;
594 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
595 flags, private))
596 return -EIO;
597 } while (pud++, addr = next, addr != end);
598 return 0;
599 }
600
queue_pages_pgd_range(struct vm_area_struct * vma,unsigned long addr,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)601 static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
602 unsigned long addr, unsigned long end,
603 const nodemask_t *nodes, unsigned long flags,
604 void *private)
605 {
606 pgd_t *pgd;
607 unsigned long next;
608
609 pgd = pgd_offset(vma->vm_mm, addr);
610 do {
611 next = pgd_addr_end(addr, end);
612 if (pgd_none_or_clear_bad(pgd))
613 continue;
614 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
615 flags, private))
616 return -EIO;
617 } while (pgd++, addr = next, addr != end);
618 return 0;
619 }
620
621 #ifdef CONFIG_NUMA_BALANCING
622 /*
623 * This is used to mark a range of virtual addresses to be inaccessible.
624 * These are later cleared by a NUMA hinting fault. Depending on these
625 * faults, pages may be migrated for better NUMA placement.
626 *
627 * This is assuming that NUMA faults are handled using PROT_NONE. If
628 * an architecture makes a different choice, it will need further
629 * changes to the core.
630 */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)631 unsigned long change_prot_numa(struct vm_area_struct *vma,
632 unsigned long addr, unsigned long end)
633 {
634 int nr_updated;
635
636 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
637 if (nr_updated)
638 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
639
640 return nr_updated;
641 }
642 #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)643 static unsigned long change_prot_numa(struct vm_area_struct *vma,
644 unsigned long addr, unsigned long end)
645 {
646 return 0;
647 }
648 #endif /* CONFIG_NUMA_BALANCING */
649
650 /*
651 * Walk through page tables and collect pages to be migrated.
652 *
653 * If pages found in a given range are on a set of nodes (determined by
654 * @nodes and @flags,) it's isolated and queued to the pagelist which is
655 * passed via @private.)
656 */
657 static int
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,const nodemask_t * nodes,unsigned long flags,void * private)658 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
659 const nodemask_t *nodes, unsigned long flags, void *private)
660 {
661 int err = 0;
662 struct vm_area_struct *vma, *prev;
663
664 vma = find_vma(mm, start);
665 if (!vma)
666 return -EFAULT;
667 prev = NULL;
668 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
669 unsigned long endvma = vma->vm_end;
670
671 if (endvma > end)
672 endvma = end;
673 if (vma->vm_start > start)
674 start = vma->vm_start;
675
676 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
677 if (!vma->vm_next && vma->vm_end < end)
678 return -EFAULT;
679 if (prev && prev->vm_end < vma->vm_start)
680 return -EFAULT;
681 }
682
683 if (flags & MPOL_MF_LAZY) {
684 /* Similar to task_numa_work, skip inaccessible VMAs */
685 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
686 change_prot_numa(vma, start, endvma);
687 goto next;
688 }
689
690 if ((flags & MPOL_MF_STRICT) ||
691 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
692 vma_migratable(vma))) {
693
694 err = queue_pages_pgd_range(vma, start, endvma, nodes,
695 flags, private);
696 if (err)
697 break;
698 }
699 next:
700 prev = vma;
701 }
702 return err;
703 }
704
705 /*
706 * Apply policy to a single VMA
707 * This must be called with the mmap_sem held for writing.
708 */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)709 static int vma_replace_policy(struct vm_area_struct *vma,
710 struct mempolicy *pol)
711 {
712 int err;
713 struct mempolicy *old;
714 struct mempolicy *new;
715
716 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
717 vma->vm_start, vma->vm_end, vma->vm_pgoff,
718 vma->vm_ops, vma->vm_file,
719 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
720
721 new = mpol_dup(pol);
722 if (IS_ERR(new))
723 return PTR_ERR(new);
724
725 if (vma->vm_ops && vma->vm_ops->set_policy) {
726 err = vma->vm_ops->set_policy(vma, new);
727 if (err)
728 goto err_out;
729 }
730
731 old = vma->vm_policy;
732 vma->vm_policy = new; /* protected by mmap_sem */
733 mpol_put(old);
734
735 return 0;
736 err_out:
737 mpol_put(new);
738 return err;
739 }
740
741 /* Step 2: apply policy to a range and do splits. */
mbind_range(struct mm_struct * mm,unsigned long start,unsigned long end,struct mempolicy * new_pol)742 static int mbind_range(struct mm_struct *mm, unsigned long start,
743 unsigned long end, struct mempolicy *new_pol)
744 {
745 struct vm_area_struct *next;
746 struct vm_area_struct *prev;
747 struct vm_area_struct *vma;
748 int err = 0;
749 pgoff_t pgoff;
750 unsigned long vmstart;
751 unsigned long vmend;
752
753 vma = find_vma(mm, start);
754 if (!vma || vma->vm_start > start)
755 return -EFAULT;
756
757 prev = vma->vm_prev;
758 if (start > vma->vm_start)
759 prev = vma;
760
761 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
762 next = vma->vm_next;
763 vmstart = max(start, vma->vm_start);
764 vmend = min(end, vma->vm_end);
765
766 if (mpol_equal(vma_policy(vma), new_pol))
767 continue;
768
769 pgoff = vma->vm_pgoff +
770 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
771 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
772 vma->anon_vma, vma->vm_file, pgoff,
773 new_pol, vma_get_anon_name(vma));
774 if (prev) {
775 vma = prev;
776 next = vma->vm_next;
777 if (mpol_equal(vma_policy(vma), new_pol))
778 continue;
779 /* vma_merge() joined vma && vma->next, case 8 */
780 goto replace;
781 }
782 if (vma->vm_start != vmstart) {
783 err = split_vma(vma->vm_mm, vma, vmstart, 1);
784 if (err)
785 goto out;
786 }
787 if (vma->vm_end != vmend) {
788 err = split_vma(vma->vm_mm, vma, vmend, 0);
789 if (err)
790 goto out;
791 }
792 replace:
793 err = vma_replace_policy(vma, new_pol);
794 if (err)
795 goto out;
796 }
797
798 out:
799 return err;
800 }
801
802 /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)803 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
804 nodemask_t *nodes)
805 {
806 struct mempolicy *new, *old;
807 NODEMASK_SCRATCH(scratch);
808 int ret;
809
810 if (!scratch)
811 return -ENOMEM;
812
813 new = mpol_new(mode, flags, nodes);
814 if (IS_ERR(new)) {
815 ret = PTR_ERR(new);
816 goto out;
817 }
818
819 task_lock(current);
820 ret = mpol_set_nodemask(new, nodes, scratch);
821 if (ret) {
822 task_unlock(current);
823 mpol_put(new);
824 goto out;
825 }
826 old = current->mempolicy;
827 current->mempolicy = new;
828 if (new && new->mode == MPOL_INTERLEAVE &&
829 nodes_weight(new->v.nodes))
830 current->il_next = first_node(new->v.nodes);
831 task_unlock(current);
832 mpol_put(old);
833 ret = 0;
834 out:
835 NODEMASK_SCRATCH_FREE(scratch);
836 return ret;
837 }
838
839 /*
840 * Return nodemask for policy for get_mempolicy() query
841 *
842 * Called with task's alloc_lock held
843 */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)844 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
845 {
846 nodes_clear(*nodes);
847 if (p == &default_policy)
848 return;
849
850 switch (p->mode) {
851 case MPOL_BIND:
852 /* Fall through */
853 case MPOL_INTERLEAVE:
854 *nodes = p->v.nodes;
855 break;
856 case MPOL_PREFERRED:
857 if (!(p->flags & MPOL_F_LOCAL))
858 node_set(p->v.preferred_node, *nodes);
859 /* else return empty node mask for local allocation */
860 break;
861 default:
862 BUG();
863 }
864 }
865
lookup_node(struct mm_struct * mm,unsigned long addr)866 static int lookup_node(struct mm_struct *mm, unsigned long addr)
867 {
868 struct page *p;
869 int err;
870
871 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
872 if (err >= 0) {
873 err = page_to_nid(p);
874 put_page(p);
875 }
876 return err;
877 }
878
879 /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)880 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
881 unsigned long addr, unsigned long flags)
882 {
883 int err;
884 struct mm_struct *mm = current->mm;
885 struct vm_area_struct *vma = NULL;
886 struct mempolicy *pol = current->mempolicy;
887
888 if (flags &
889 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
890 return -EINVAL;
891
892 if (flags & MPOL_F_MEMS_ALLOWED) {
893 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
894 return -EINVAL;
895 *policy = 0; /* just so it's initialized */
896 task_lock(current);
897 *nmask = cpuset_current_mems_allowed;
898 task_unlock(current);
899 return 0;
900 }
901
902 if (flags & MPOL_F_ADDR) {
903 /*
904 * Do NOT fall back to task policy if the
905 * vma/shared policy at addr is NULL. We
906 * want to return MPOL_DEFAULT in this case.
907 */
908 down_read(&mm->mmap_sem);
909 vma = find_vma_intersection(mm, addr, addr+1);
910 if (!vma) {
911 up_read(&mm->mmap_sem);
912 return -EFAULT;
913 }
914 if (vma->vm_ops && vma->vm_ops->get_policy)
915 pol = vma->vm_ops->get_policy(vma, addr);
916 else
917 pol = vma->vm_policy;
918 } else if (addr)
919 return -EINVAL;
920
921 if (!pol)
922 pol = &default_policy; /* indicates default behavior */
923
924 if (flags & MPOL_F_NODE) {
925 if (flags & MPOL_F_ADDR) {
926 err = lookup_node(mm, addr);
927 if (err < 0)
928 goto out;
929 *policy = err;
930 } else if (pol == current->mempolicy &&
931 pol->mode == MPOL_INTERLEAVE) {
932 *policy = current->il_next;
933 } else {
934 err = -EINVAL;
935 goto out;
936 }
937 } else {
938 *policy = pol == &default_policy ? MPOL_DEFAULT :
939 pol->mode;
940 /*
941 * Internal mempolicy flags must be masked off before exposing
942 * the policy to userspace.
943 */
944 *policy |= (pol->flags & MPOL_MODE_FLAGS);
945 }
946
947 err = 0;
948 if (nmask) {
949 if (mpol_store_user_nodemask(pol)) {
950 *nmask = pol->w.user_nodemask;
951 } else {
952 task_lock(current);
953 get_policy_nodemask(pol, nmask);
954 task_unlock(current);
955 }
956 }
957
958 out:
959 mpol_cond_put(pol);
960 if (vma)
961 up_read(¤t->mm->mmap_sem);
962 return err;
963 }
964
965 #ifdef CONFIG_MIGRATION
966 /*
967 * page migration
968 */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)969 static void migrate_page_add(struct page *page, struct list_head *pagelist,
970 unsigned long flags)
971 {
972 /*
973 * Avoid migrating a page that is shared with others.
974 */
975 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
976 if (!isolate_lru_page(page)) {
977 list_add_tail(&page->lru, pagelist);
978 inc_zone_page_state(page, NR_ISOLATED_ANON +
979 page_is_file_cache(page));
980 }
981 }
982 }
983
new_node_page(struct page * page,unsigned long node,int ** x)984 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
985 {
986 if (PageHuge(page))
987 return alloc_huge_page_node(page_hstate(compound_head(page)),
988 node);
989 else
990 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
991 }
992
993 /*
994 * Migrate pages from one node to a target node.
995 * Returns error or the number of pages not migrated.
996 */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)997 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
998 int flags)
999 {
1000 nodemask_t nmask;
1001 LIST_HEAD(pagelist);
1002 int err = 0;
1003
1004 nodes_clear(nmask);
1005 node_set(source, nmask);
1006
1007 /*
1008 * This does not "check" the range but isolates all pages that
1009 * need migration. Between passing in the full user address
1010 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1011 */
1012 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1013 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1014 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1015
1016 if (!list_empty(&pagelist)) {
1017 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1018 MIGRATE_SYNC, MR_SYSCALL);
1019 if (err)
1020 putback_movable_pages(&pagelist);
1021 }
1022
1023 return err;
1024 }
1025
1026 /*
1027 * Move pages between the two nodesets so as to preserve the physical
1028 * layout as much as possible.
1029 *
1030 * Returns the number of page that could not be moved.
1031 */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1032 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1033 const nodemask_t *to, int flags)
1034 {
1035 int busy = 0;
1036 int err;
1037 nodemask_t tmp;
1038
1039 err = migrate_prep();
1040 if (err)
1041 return err;
1042
1043 down_read(&mm->mmap_sem);
1044
1045 err = migrate_vmas(mm, from, to, flags);
1046 if (err)
1047 goto out;
1048
1049 /*
1050 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1051 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1052 * bit in 'tmp', and return that <source, dest> pair for migration.
1053 * The pair of nodemasks 'to' and 'from' define the map.
1054 *
1055 * If no pair of bits is found that way, fallback to picking some
1056 * pair of 'source' and 'dest' bits that are not the same. If the
1057 * 'source' and 'dest' bits are the same, this represents a node
1058 * that will be migrating to itself, so no pages need move.
1059 *
1060 * If no bits are left in 'tmp', or if all remaining bits left
1061 * in 'tmp' correspond to the same bit in 'to', return false
1062 * (nothing left to migrate).
1063 *
1064 * This lets us pick a pair of nodes to migrate between, such that
1065 * if possible the dest node is not already occupied by some other
1066 * source node, minimizing the risk of overloading the memory on a
1067 * node that would happen if we migrated incoming memory to a node
1068 * before migrating outgoing memory source that same node.
1069 *
1070 * A single scan of tmp is sufficient. As we go, we remember the
1071 * most recent <s, d> pair that moved (s != d). If we find a pair
1072 * that not only moved, but what's better, moved to an empty slot
1073 * (d is not set in tmp), then we break out then, with that pair.
1074 * Otherwise when we finish scanning from_tmp, we at least have the
1075 * most recent <s, d> pair that moved. If we get all the way through
1076 * the scan of tmp without finding any node that moved, much less
1077 * moved to an empty node, then there is nothing left worth migrating.
1078 */
1079
1080 tmp = *from;
1081 while (!nodes_empty(tmp)) {
1082 int s,d;
1083 int source = NUMA_NO_NODE;
1084 int dest = 0;
1085
1086 for_each_node_mask(s, tmp) {
1087
1088 /*
1089 * do_migrate_pages() tries to maintain the relative
1090 * node relationship of the pages established between
1091 * threads and memory areas.
1092 *
1093 * However if the number of source nodes is not equal to
1094 * the number of destination nodes we can not preserve
1095 * this node relative relationship. In that case, skip
1096 * copying memory from a node that is in the destination
1097 * mask.
1098 *
1099 * Example: [2,3,4] -> [3,4,5] moves everything.
1100 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1101 */
1102
1103 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1104 (node_isset(s, *to)))
1105 continue;
1106
1107 d = node_remap(s, *from, *to);
1108 if (s == d)
1109 continue;
1110
1111 source = s; /* Node moved. Memorize */
1112 dest = d;
1113
1114 /* dest not in remaining from nodes? */
1115 if (!node_isset(dest, tmp))
1116 break;
1117 }
1118 if (source == NUMA_NO_NODE)
1119 break;
1120
1121 node_clear(source, tmp);
1122 err = migrate_to_node(mm, source, dest, flags);
1123 if (err > 0)
1124 busy += err;
1125 if (err < 0)
1126 break;
1127 }
1128 out:
1129 up_read(&mm->mmap_sem);
1130 if (err < 0)
1131 return err;
1132 return busy;
1133
1134 }
1135
1136 /*
1137 * Allocate a new page for page migration based on vma policy.
1138 * Start by assuming the page is mapped by the same vma as contains @start.
1139 * Search forward from there, if not. N.B., this assumes that the
1140 * list of pages handed to migrate_pages()--which is how we get here--
1141 * is in virtual address order.
1142 */
new_page(struct page * page,unsigned long start,int ** x)1143 static struct page *new_page(struct page *page, unsigned long start, int **x)
1144 {
1145 struct vm_area_struct *vma;
1146 unsigned long uninitialized_var(address);
1147
1148 vma = find_vma(current->mm, start);
1149 while (vma) {
1150 address = page_address_in_vma(page, vma);
1151 if (address != -EFAULT)
1152 break;
1153 vma = vma->vm_next;
1154 }
1155
1156 if (PageHuge(page)) {
1157 BUG_ON(!vma);
1158 return alloc_huge_page_noerr(vma, address, 1);
1159 }
1160 /*
1161 * if !vma, alloc_page_vma() will use task or system default policy
1162 */
1163 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1164 }
1165 #else
1166
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1167 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1168 unsigned long flags)
1169 {
1170 }
1171
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1172 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1173 const nodemask_t *to, int flags)
1174 {
1175 return -ENOSYS;
1176 }
1177
new_page(struct page * page,unsigned long start,int ** x)1178 static struct page *new_page(struct page *page, unsigned long start, int **x)
1179 {
1180 return NULL;
1181 }
1182 #endif
1183
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1184 static long do_mbind(unsigned long start, unsigned long len,
1185 unsigned short mode, unsigned short mode_flags,
1186 nodemask_t *nmask, unsigned long flags)
1187 {
1188 struct mm_struct *mm = current->mm;
1189 struct mempolicy *new;
1190 unsigned long end;
1191 int err;
1192 LIST_HEAD(pagelist);
1193
1194 if (flags & ~(unsigned long)MPOL_MF_VALID)
1195 return -EINVAL;
1196 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1197 return -EPERM;
1198
1199 if (start & ~PAGE_MASK)
1200 return -EINVAL;
1201
1202 if (mode == MPOL_DEFAULT)
1203 flags &= ~MPOL_MF_STRICT;
1204
1205 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1206 end = start + len;
1207
1208 if (end < start)
1209 return -EINVAL;
1210 if (end == start)
1211 return 0;
1212
1213 new = mpol_new(mode, mode_flags, nmask);
1214 if (IS_ERR(new))
1215 return PTR_ERR(new);
1216
1217 if (flags & MPOL_MF_LAZY)
1218 new->flags |= MPOL_F_MOF;
1219
1220 /*
1221 * If we are using the default policy then operation
1222 * on discontinuous address spaces is okay after all
1223 */
1224 if (!new)
1225 flags |= MPOL_MF_DISCONTIG_OK;
1226
1227 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1228 start, start + len, mode, mode_flags,
1229 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1230
1231 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1232
1233 err = migrate_prep();
1234 if (err)
1235 goto mpol_out;
1236 }
1237 {
1238 NODEMASK_SCRATCH(scratch);
1239 if (scratch) {
1240 down_write(&mm->mmap_sem);
1241 task_lock(current);
1242 err = mpol_set_nodemask(new, nmask, scratch);
1243 task_unlock(current);
1244 if (err)
1245 up_write(&mm->mmap_sem);
1246 } else
1247 err = -ENOMEM;
1248 NODEMASK_SCRATCH_FREE(scratch);
1249 }
1250 if (err)
1251 goto mpol_out;
1252
1253 err = queue_pages_range(mm, start, end, nmask,
1254 flags | MPOL_MF_INVERT, &pagelist);
1255 if (!err)
1256 err = mbind_range(mm, start, end, new);
1257
1258 if (!err) {
1259 int nr_failed = 0;
1260
1261 if (!list_empty(&pagelist)) {
1262 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1263 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1264 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1265 if (nr_failed)
1266 putback_movable_pages(&pagelist);
1267 }
1268
1269 if (nr_failed && (flags & MPOL_MF_STRICT))
1270 err = -EIO;
1271 } else
1272 putback_movable_pages(&pagelist);
1273
1274 up_write(&mm->mmap_sem);
1275 mpol_out:
1276 mpol_put(new);
1277 return err;
1278 }
1279
1280 /*
1281 * User space interface with variable sized bitmaps for nodelists.
1282 */
1283
1284 /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1285 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1286 unsigned long maxnode)
1287 {
1288 unsigned long k;
1289 unsigned long nlongs;
1290 unsigned long endmask;
1291
1292 --maxnode;
1293 nodes_clear(*nodes);
1294 if (maxnode == 0 || !nmask)
1295 return 0;
1296 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1297 return -EINVAL;
1298
1299 nlongs = BITS_TO_LONGS(maxnode);
1300 if ((maxnode % BITS_PER_LONG) == 0)
1301 endmask = ~0UL;
1302 else
1303 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1304
1305 /* When the user specified more nodes than supported just check
1306 if the non supported part is all zero. */
1307 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1308 if (nlongs > PAGE_SIZE/sizeof(long))
1309 return -EINVAL;
1310 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1311 unsigned long t;
1312 if (get_user(t, nmask + k))
1313 return -EFAULT;
1314 if (k == nlongs - 1) {
1315 if (t & endmask)
1316 return -EINVAL;
1317 } else if (t)
1318 return -EINVAL;
1319 }
1320 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1321 endmask = ~0UL;
1322 }
1323
1324 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1325 return -EFAULT;
1326 nodes_addr(*nodes)[nlongs-1] &= endmask;
1327 return 0;
1328 }
1329
1330 /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1331 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1332 nodemask_t *nodes)
1333 {
1334 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1335 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1336
1337 if (copy > nbytes) {
1338 if (copy > PAGE_SIZE)
1339 return -EINVAL;
1340 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1341 return -EFAULT;
1342 copy = nbytes;
1343 }
1344 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1345 }
1346
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned,flags)1347 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1348 unsigned long, mode, const unsigned long __user *, nmask,
1349 unsigned long, maxnode, unsigned, flags)
1350 {
1351 nodemask_t nodes;
1352 int err;
1353 unsigned short mode_flags;
1354
1355 mode_flags = mode & MPOL_MODE_FLAGS;
1356 mode &= ~MPOL_MODE_FLAGS;
1357 if (mode >= MPOL_MAX)
1358 return -EINVAL;
1359 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1360 (mode_flags & MPOL_F_RELATIVE_NODES))
1361 return -EINVAL;
1362 err = get_nodes(&nodes, nmask, maxnode);
1363 if (err)
1364 return err;
1365 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1366 }
1367
1368 /* Set the process memory policy */
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1369 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1370 unsigned long, maxnode)
1371 {
1372 int err;
1373 nodemask_t nodes;
1374 unsigned short flags;
1375
1376 flags = mode & MPOL_MODE_FLAGS;
1377 mode &= ~MPOL_MODE_FLAGS;
1378 if ((unsigned int)mode >= MPOL_MAX)
1379 return -EINVAL;
1380 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1381 return -EINVAL;
1382 err = get_nodes(&nodes, nmask, maxnode);
1383 if (err)
1384 return err;
1385 return do_set_mempolicy(mode, flags, &nodes);
1386 }
1387
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1388 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1389 const unsigned long __user *, old_nodes,
1390 const unsigned long __user *, new_nodes)
1391 {
1392 const struct cred *cred = current_cred(), *tcred;
1393 struct mm_struct *mm = NULL;
1394 struct task_struct *task;
1395 nodemask_t task_nodes;
1396 int err;
1397 nodemask_t *old;
1398 nodemask_t *new;
1399 NODEMASK_SCRATCH(scratch);
1400
1401 if (!scratch)
1402 return -ENOMEM;
1403
1404 old = &scratch->mask1;
1405 new = &scratch->mask2;
1406
1407 err = get_nodes(old, old_nodes, maxnode);
1408 if (err)
1409 goto out;
1410
1411 err = get_nodes(new, new_nodes, maxnode);
1412 if (err)
1413 goto out;
1414
1415 /* Find the mm_struct */
1416 rcu_read_lock();
1417 task = pid ? find_task_by_vpid(pid) : current;
1418 if (!task) {
1419 rcu_read_unlock();
1420 err = -ESRCH;
1421 goto out;
1422 }
1423 get_task_struct(task);
1424
1425 err = -EINVAL;
1426
1427 /*
1428 * Check if this process has the right to modify the specified
1429 * process. The right exists if the process has administrative
1430 * capabilities, superuser privileges or the same
1431 * userid as the target process.
1432 */
1433 tcred = __task_cred(task);
1434 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1435 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1436 !capable(CAP_SYS_NICE)) {
1437 rcu_read_unlock();
1438 err = -EPERM;
1439 goto out_put;
1440 }
1441 rcu_read_unlock();
1442
1443 task_nodes = cpuset_mems_allowed(task);
1444 /* Is the user allowed to access the target nodes? */
1445 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1446 err = -EPERM;
1447 goto out_put;
1448 }
1449
1450 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1451 err = -EINVAL;
1452 goto out_put;
1453 }
1454
1455 err = security_task_movememory(task);
1456 if (err)
1457 goto out_put;
1458
1459 mm = get_task_mm(task);
1460 put_task_struct(task);
1461
1462 if (!mm) {
1463 err = -EINVAL;
1464 goto out;
1465 }
1466
1467 err = do_migrate_pages(mm, old, new,
1468 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1469
1470 mmput(mm);
1471 out:
1472 NODEMASK_SCRATCH_FREE(scratch);
1473
1474 return err;
1475
1476 out_put:
1477 put_task_struct(task);
1478 goto out;
1479
1480 }
1481
1482
1483 /* Retrieve NUMA policy */
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1484 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1485 unsigned long __user *, nmask, unsigned long, maxnode,
1486 unsigned long, addr, unsigned long, flags)
1487 {
1488 int err;
1489 int uninitialized_var(pval);
1490 nodemask_t nodes;
1491
1492 if (nmask != NULL && maxnode < MAX_NUMNODES)
1493 return -EINVAL;
1494
1495 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1496
1497 if (err)
1498 return err;
1499
1500 if (policy && put_user(pval, policy))
1501 return -EFAULT;
1502
1503 if (nmask)
1504 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1505
1506 return err;
1507 }
1508
1509 #ifdef CONFIG_COMPAT
1510
COMPAT_SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,addr,compat_ulong_t,flags)1511 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1512 compat_ulong_t __user *, nmask,
1513 compat_ulong_t, maxnode,
1514 compat_ulong_t, addr, compat_ulong_t, flags)
1515 {
1516 long err;
1517 unsigned long __user *nm = NULL;
1518 unsigned long nr_bits, alloc_size;
1519 DECLARE_BITMAP(bm, MAX_NUMNODES);
1520
1521 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1522 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1523
1524 if (nmask)
1525 nm = compat_alloc_user_space(alloc_size);
1526
1527 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1528
1529 if (!err && nmask) {
1530 unsigned long copy_size;
1531 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1532 err = copy_from_user(bm, nm, copy_size);
1533 /* ensure entire bitmap is zeroed */
1534 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1535 err |= compat_put_bitmap(nmask, bm, nr_bits);
1536 }
1537
1538 return err;
1539 }
1540
COMPAT_SYSCALL_DEFINE3(set_mempolicy,int,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode)1541 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1542 compat_ulong_t, maxnode)
1543 {
1544 unsigned long __user *nm = NULL;
1545 unsigned long nr_bits, alloc_size;
1546 DECLARE_BITMAP(bm, MAX_NUMNODES);
1547
1548 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1549 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1550
1551 if (nmask) {
1552 if (compat_get_bitmap(bm, nmask, nr_bits))
1553 return -EFAULT;
1554 nm = compat_alloc_user_space(alloc_size);
1555 if (copy_to_user(nm, bm, alloc_size))
1556 return -EFAULT;
1557 }
1558
1559 return sys_set_mempolicy(mode, nm, nr_bits+1);
1560 }
1561
COMPAT_SYSCALL_DEFINE6(mbind,compat_ulong_t,start,compat_ulong_t,len,compat_ulong_t,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,flags)1562 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1563 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1564 compat_ulong_t, maxnode, compat_ulong_t, flags)
1565 {
1566 unsigned long __user *nm = NULL;
1567 unsigned long nr_bits, alloc_size;
1568 nodemask_t bm;
1569
1570 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1571 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1572
1573 if (nmask) {
1574 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1575 return -EFAULT;
1576 nm = compat_alloc_user_space(alloc_size);
1577 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1578 return -EFAULT;
1579 }
1580
1581 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1582 }
1583
1584 #endif
1585
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1586 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1587 unsigned long addr)
1588 {
1589 struct mempolicy *pol = NULL;
1590
1591 if (vma) {
1592 if (vma->vm_ops && vma->vm_ops->get_policy) {
1593 pol = vma->vm_ops->get_policy(vma, addr);
1594 } else if (vma->vm_policy) {
1595 pol = vma->vm_policy;
1596
1597 /*
1598 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1599 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1600 * count on these policies which will be dropped by
1601 * mpol_cond_put() later
1602 */
1603 if (mpol_needs_cond_ref(pol))
1604 mpol_get(pol);
1605 }
1606 }
1607
1608 return pol;
1609 }
1610
1611 /*
1612 * get_vma_policy(@vma, @addr)
1613 * @vma: virtual memory area whose policy is sought
1614 * @addr: address in @vma for shared policy lookup
1615 *
1616 * Returns effective policy for a VMA at specified address.
1617 * Falls back to current->mempolicy or system default policy, as necessary.
1618 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1619 * count--added by the get_policy() vm_op, as appropriate--to protect against
1620 * freeing by another task. It is the caller's responsibility to free the
1621 * extra reference for shared policies.
1622 */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1623 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1624 unsigned long addr)
1625 {
1626 struct mempolicy *pol = __get_vma_policy(vma, addr);
1627
1628 if (!pol)
1629 pol = get_task_policy(current);
1630
1631 return pol;
1632 }
1633
vma_policy_mof(struct vm_area_struct * vma)1634 bool vma_policy_mof(struct vm_area_struct *vma)
1635 {
1636 struct mempolicy *pol;
1637
1638 if (vma->vm_ops && vma->vm_ops->get_policy) {
1639 bool ret = false;
1640
1641 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1642 if (pol && (pol->flags & MPOL_F_MOF))
1643 ret = true;
1644 mpol_cond_put(pol);
1645
1646 return ret;
1647 }
1648
1649 pol = vma->vm_policy;
1650 if (!pol)
1651 pol = get_task_policy(current);
1652
1653 return pol->flags & MPOL_F_MOF;
1654 }
1655
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1656 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1657 {
1658 enum zone_type dynamic_policy_zone = policy_zone;
1659
1660 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1661
1662 /*
1663 * if policy->v.nodes has movable memory only,
1664 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1665 *
1666 * policy->v.nodes is intersect with node_states[N_MEMORY].
1667 * so if the following test faile, it implies
1668 * policy->v.nodes has movable memory only.
1669 */
1670 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1671 dynamic_policy_zone = ZONE_MOVABLE;
1672
1673 return zone >= dynamic_policy_zone;
1674 }
1675
1676 /*
1677 * Return a nodemask representing a mempolicy for filtering nodes for
1678 * page allocation
1679 */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1680 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1681 {
1682 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1683 if (unlikely(policy->mode == MPOL_BIND) &&
1684 apply_policy_zone(policy, gfp_zone(gfp)) &&
1685 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1686 return &policy->v.nodes;
1687
1688 return NULL;
1689 }
1690
1691 /* Return a zonelist indicated by gfp for node representing a mempolicy */
policy_zonelist(gfp_t gfp,struct mempolicy * policy,int nd)1692 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1693 int nd)
1694 {
1695 switch (policy->mode) {
1696 case MPOL_PREFERRED:
1697 if (!(policy->flags & MPOL_F_LOCAL))
1698 nd = policy->v.preferred_node;
1699 break;
1700 case MPOL_BIND:
1701 /*
1702 * Normally, MPOL_BIND allocations are node-local within the
1703 * allowed nodemask. However, if __GFP_THISNODE is set and the
1704 * current node isn't part of the mask, we use the zonelist for
1705 * the first node in the mask instead.
1706 */
1707 if (unlikely(gfp & __GFP_THISNODE) &&
1708 unlikely(!node_isset(nd, policy->v.nodes)))
1709 nd = first_node(policy->v.nodes);
1710 break;
1711 default:
1712 BUG();
1713 }
1714 return node_zonelist(nd, gfp);
1715 }
1716
1717 /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1718 static unsigned interleave_nodes(struct mempolicy *policy)
1719 {
1720 unsigned nid, next;
1721 struct task_struct *me = current;
1722
1723 nid = me->il_next;
1724 next = next_node(nid, policy->v.nodes);
1725 if (next >= MAX_NUMNODES)
1726 next = first_node(policy->v.nodes);
1727 if (next < MAX_NUMNODES)
1728 me->il_next = next;
1729 return nid;
1730 }
1731
1732 /*
1733 * Depending on the memory policy provide a node from which to allocate the
1734 * next slab entry.
1735 */
mempolicy_slab_node(void)1736 unsigned int mempolicy_slab_node(void)
1737 {
1738 struct mempolicy *policy;
1739 int node = numa_mem_id();
1740
1741 if (in_interrupt())
1742 return node;
1743
1744 policy = current->mempolicy;
1745 if (!policy || policy->flags & MPOL_F_LOCAL)
1746 return node;
1747
1748 switch (policy->mode) {
1749 case MPOL_PREFERRED:
1750 /*
1751 * handled MPOL_F_LOCAL above
1752 */
1753 return policy->v.preferred_node;
1754
1755 case MPOL_INTERLEAVE:
1756 return interleave_nodes(policy);
1757
1758 case MPOL_BIND: {
1759 /*
1760 * Follow bind policy behavior and start allocation at the
1761 * first node.
1762 */
1763 struct zonelist *zonelist;
1764 struct zone *zone;
1765 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1766 zonelist = &NODE_DATA(node)->node_zonelists[0];
1767 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1768 &policy->v.nodes,
1769 &zone);
1770 return zone ? zone->node : node;
1771 }
1772
1773 default:
1774 BUG();
1775 }
1776 }
1777
1778 /* Do static interleaving for a VMA with known offset. */
offset_il_node(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long off)1779 static unsigned offset_il_node(struct mempolicy *pol,
1780 struct vm_area_struct *vma, unsigned long off)
1781 {
1782 unsigned nnodes = nodes_weight(pol->v.nodes);
1783 unsigned target;
1784 int c;
1785 int nid = NUMA_NO_NODE;
1786
1787 if (!nnodes)
1788 return numa_node_id();
1789 target = (unsigned int)off % nnodes;
1790 c = 0;
1791 do {
1792 nid = next_node(nid, pol->v.nodes);
1793 c++;
1794 } while (c <= target);
1795 return nid;
1796 }
1797
1798 /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1799 static inline unsigned interleave_nid(struct mempolicy *pol,
1800 struct vm_area_struct *vma, unsigned long addr, int shift)
1801 {
1802 if (vma) {
1803 unsigned long off;
1804
1805 /*
1806 * for small pages, there is no difference between
1807 * shift and PAGE_SHIFT, so the bit-shift is safe.
1808 * for huge pages, since vm_pgoff is in units of small
1809 * pages, we need to shift off the always 0 bits to get
1810 * a useful offset.
1811 */
1812 BUG_ON(shift < PAGE_SHIFT);
1813 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1814 off += (addr - vma->vm_start) >> shift;
1815 return offset_il_node(pol, vma, off);
1816 } else
1817 return interleave_nodes(pol);
1818 }
1819
1820 /*
1821 * Return the bit number of a random bit set in the nodemask.
1822 * (returns NUMA_NO_NODE if nodemask is empty)
1823 */
node_random(const nodemask_t * maskp)1824 int node_random(const nodemask_t *maskp)
1825 {
1826 int w, bit = NUMA_NO_NODE;
1827
1828 w = nodes_weight(*maskp);
1829 if (w)
1830 bit = bitmap_ord_to_pos(maskp->bits,
1831 get_random_int() % w, MAX_NUMNODES);
1832 return bit;
1833 }
1834
1835 #ifdef CONFIG_HUGETLBFS
1836 /*
1837 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1838 * @vma: virtual memory area whose policy is sought
1839 * @addr: address in @vma for shared policy lookup and interleave policy
1840 * @gfp_flags: for requested zone
1841 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1842 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1843 *
1844 * Returns a zonelist suitable for a huge page allocation and a pointer
1845 * to the struct mempolicy for conditional unref after allocation.
1846 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1847 * @nodemask for filtering the zonelist.
1848 *
1849 * Must be protected by read_mems_allowed_begin()
1850 */
huge_zonelist(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)1851 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1852 gfp_t gfp_flags, struct mempolicy **mpol,
1853 nodemask_t **nodemask)
1854 {
1855 struct zonelist *zl;
1856
1857 *mpol = get_vma_policy(vma, addr);
1858 *nodemask = NULL; /* assume !MPOL_BIND */
1859
1860 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1861 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1862 huge_page_shift(hstate_vma(vma))), gfp_flags);
1863 } else {
1864 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1865 if ((*mpol)->mode == MPOL_BIND)
1866 *nodemask = &(*mpol)->v.nodes;
1867 }
1868 return zl;
1869 }
1870
1871 /*
1872 * init_nodemask_of_mempolicy
1873 *
1874 * If the current task's mempolicy is "default" [NULL], return 'false'
1875 * to indicate default policy. Otherwise, extract the policy nodemask
1876 * for 'bind' or 'interleave' policy into the argument nodemask, or
1877 * initialize the argument nodemask to contain the single node for
1878 * 'preferred' or 'local' policy and return 'true' to indicate presence
1879 * of non-default mempolicy.
1880 *
1881 * We don't bother with reference counting the mempolicy [mpol_get/put]
1882 * because the current task is examining it's own mempolicy and a task's
1883 * mempolicy is only ever changed by the task itself.
1884 *
1885 * N.B., it is the caller's responsibility to free a returned nodemask.
1886 */
init_nodemask_of_mempolicy(nodemask_t * mask)1887 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1888 {
1889 struct mempolicy *mempolicy;
1890 int nid;
1891
1892 if (!(mask && current->mempolicy))
1893 return false;
1894
1895 task_lock(current);
1896 mempolicy = current->mempolicy;
1897 switch (mempolicy->mode) {
1898 case MPOL_PREFERRED:
1899 if (mempolicy->flags & MPOL_F_LOCAL)
1900 nid = numa_node_id();
1901 else
1902 nid = mempolicy->v.preferred_node;
1903 init_nodemask_of_node(mask, nid);
1904 break;
1905
1906 case MPOL_BIND:
1907 /* Fall through */
1908 case MPOL_INTERLEAVE:
1909 *mask = mempolicy->v.nodes;
1910 break;
1911
1912 default:
1913 BUG();
1914 }
1915 task_unlock(current);
1916
1917 return true;
1918 }
1919 #endif
1920
1921 /*
1922 * mempolicy_nodemask_intersects
1923 *
1924 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1925 * policy. Otherwise, check for intersection between mask and the policy
1926 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1927 * policy, always return true since it may allocate elsewhere on fallback.
1928 *
1929 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1930 */
mempolicy_nodemask_intersects(struct task_struct * tsk,const nodemask_t * mask)1931 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1932 const nodemask_t *mask)
1933 {
1934 struct mempolicy *mempolicy;
1935 bool ret = true;
1936
1937 if (!mask)
1938 return ret;
1939 task_lock(tsk);
1940 mempolicy = tsk->mempolicy;
1941 if (!mempolicy)
1942 goto out;
1943
1944 switch (mempolicy->mode) {
1945 case MPOL_PREFERRED:
1946 /*
1947 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1948 * allocate from, they may fallback to other nodes when oom.
1949 * Thus, it's possible for tsk to have allocated memory from
1950 * nodes in mask.
1951 */
1952 break;
1953 case MPOL_BIND:
1954 case MPOL_INTERLEAVE:
1955 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1956 break;
1957 default:
1958 BUG();
1959 }
1960 out:
1961 task_unlock(tsk);
1962 return ret;
1963 }
1964
1965 /* Allocate a page in interleaved policy.
1966 Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)1967 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1968 unsigned nid)
1969 {
1970 struct zonelist *zl;
1971 struct page *page;
1972
1973 zl = node_zonelist(nid, gfp);
1974 page = __alloc_pages(gfp, order, zl);
1975 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1976 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1977 return page;
1978 }
1979
1980 /**
1981 * alloc_pages_vma - Allocate a page for a VMA.
1982 *
1983 * @gfp:
1984 * %GFP_USER user allocation.
1985 * %GFP_KERNEL kernel allocations,
1986 * %GFP_HIGHMEM highmem/user allocations,
1987 * %GFP_FS allocation should not call back into a file system.
1988 * %GFP_ATOMIC don't sleep.
1989 *
1990 * @order:Order of the GFP allocation.
1991 * @vma: Pointer to VMA or NULL if not available.
1992 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1993 *
1994 * This function allocates a page from the kernel page pool and applies
1995 * a NUMA policy associated with the VMA or the current process.
1996 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1997 * mm_struct of the VMA to prevent it from going away. Should be used for
1998 * all allocations for pages that will be mapped into
1999 * user space. Returns NULL when no page can be allocated.
2000 *
2001 * Should be called with the mm_sem of the vma hold.
2002 */
2003 struct page *
alloc_pages_vma(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,int node)2004 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2005 unsigned long addr, int node)
2006 {
2007 struct mempolicy *pol;
2008 struct page *page;
2009 unsigned int cpuset_mems_cookie;
2010
2011 retry_cpuset:
2012 pol = get_vma_policy(vma, addr);
2013 cpuset_mems_cookie = read_mems_allowed_begin();
2014
2015 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
2016 unsigned nid;
2017
2018 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2019 mpol_cond_put(pol);
2020 page = alloc_page_interleave(gfp, order, nid);
2021 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2022 goto retry_cpuset;
2023
2024 return page;
2025 }
2026 page = __alloc_pages_nodemask(gfp, order,
2027 policy_zonelist(gfp, pol, node),
2028 policy_nodemask(gfp, pol));
2029 mpol_cond_put(pol);
2030 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2031 goto retry_cpuset;
2032 return page;
2033 }
2034
2035 /**
2036 * alloc_pages_current - Allocate pages.
2037 *
2038 * @gfp:
2039 * %GFP_USER user allocation,
2040 * %GFP_KERNEL kernel allocation,
2041 * %GFP_HIGHMEM highmem allocation,
2042 * %GFP_FS don't call back into a file system.
2043 * %GFP_ATOMIC don't sleep.
2044 * @order: Power of two of allocation size in pages. 0 is a single page.
2045 *
2046 * Allocate a page from the kernel page pool. When not in
2047 * interrupt context and apply the current process NUMA policy.
2048 * Returns NULL when no page can be allocated.
2049 *
2050 * Don't call cpuset_update_task_memory_state() unless
2051 * 1) it's ok to take cpuset_sem (can WAIT), and
2052 * 2) allocating for current task (not interrupt).
2053 */
alloc_pages_current(gfp_t gfp,unsigned order)2054 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2055 {
2056 struct mempolicy *pol = &default_policy;
2057 struct page *page;
2058 unsigned int cpuset_mems_cookie;
2059
2060 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2061 pol = get_task_policy(current);
2062
2063 retry_cpuset:
2064 cpuset_mems_cookie = read_mems_allowed_begin();
2065
2066 /*
2067 * No reference counting needed for current->mempolicy
2068 * nor system default_policy
2069 */
2070 if (pol->mode == MPOL_INTERLEAVE)
2071 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2072 else
2073 page = __alloc_pages_nodemask(gfp, order,
2074 policy_zonelist(gfp, pol, numa_node_id()),
2075 policy_nodemask(gfp, pol));
2076
2077 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2078 goto retry_cpuset;
2079
2080 return page;
2081 }
2082 EXPORT_SYMBOL(alloc_pages_current);
2083
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2084 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2085 {
2086 struct mempolicy *pol = mpol_dup(vma_policy(src));
2087
2088 if (IS_ERR(pol))
2089 return PTR_ERR(pol);
2090 dst->vm_policy = pol;
2091 return 0;
2092 }
2093
2094 /*
2095 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2096 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2097 * with the mems_allowed returned by cpuset_mems_allowed(). This
2098 * keeps mempolicies cpuset relative after its cpuset moves. See
2099 * further kernel/cpuset.c update_nodemask().
2100 *
2101 * current's mempolicy may be rebinded by the other task(the task that changes
2102 * cpuset's mems), so we needn't do rebind work for current task.
2103 */
2104
2105 /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2106 struct mempolicy *__mpol_dup(struct mempolicy *old)
2107 {
2108 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2109
2110 if (!new)
2111 return ERR_PTR(-ENOMEM);
2112
2113 /* task's mempolicy is protected by alloc_lock */
2114 if (old == current->mempolicy) {
2115 task_lock(current);
2116 *new = *old;
2117 task_unlock(current);
2118 } else
2119 *new = *old;
2120
2121 if (current_cpuset_is_being_rebound()) {
2122 nodemask_t mems = cpuset_mems_allowed(current);
2123 if (new->flags & MPOL_F_REBINDING)
2124 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2125 else
2126 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2127 }
2128 atomic_set(&new->refcnt, 1);
2129 return new;
2130 }
2131
2132 /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2133 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2134 {
2135 if (!a || !b)
2136 return false;
2137 if (a->mode != b->mode)
2138 return false;
2139 if (a->flags != b->flags)
2140 return false;
2141 if (mpol_store_user_nodemask(a))
2142 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2143 return false;
2144
2145 switch (a->mode) {
2146 case MPOL_BIND:
2147 /* Fall through */
2148 case MPOL_INTERLEAVE:
2149 return !!nodes_equal(a->v.nodes, b->v.nodes);
2150 case MPOL_PREFERRED:
2151 return a->v.preferred_node == b->v.preferred_node;
2152 default:
2153 BUG();
2154 return false;
2155 }
2156 }
2157
2158 /*
2159 * Shared memory backing store policy support.
2160 *
2161 * Remember policies even when nobody has shared memory mapped.
2162 * The policies are kept in Red-Black tree linked from the inode.
2163 * They are protected by the sp->lock spinlock, which should be held
2164 * for any accesses to the tree.
2165 */
2166
2167 /* lookup first element intersecting start-end */
2168 /* Caller holds sp->lock */
2169 static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)2170 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2171 {
2172 struct rb_node *n = sp->root.rb_node;
2173
2174 while (n) {
2175 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2176
2177 if (start >= p->end)
2178 n = n->rb_right;
2179 else if (end <= p->start)
2180 n = n->rb_left;
2181 else
2182 break;
2183 }
2184 if (!n)
2185 return NULL;
2186 for (;;) {
2187 struct sp_node *w = NULL;
2188 struct rb_node *prev = rb_prev(n);
2189 if (!prev)
2190 break;
2191 w = rb_entry(prev, struct sp_node, nd);
2192 if (w->end <= start)
2193 break;
2194 n = prev;
2195 }
2196 return rb_entry(n, struct sp_node, nd);
2197 }
2198
2199 /* Insert a new shared policy into the list. */
2200 /* Caller holds sp->lock */
sp_insert(struct shared_policy * sp,struct sp_node * new)2201 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2202 {
2203 struct rb_node **p = &sp->root.rb_node;
2204 struct rb_node *parent = NULL;
2205 struct sp_node *nd;
2206
2207 while (*p) {
2208 parent = *p;
2209 nd = rb_entry(parent, struct sp_node, nd);
2210 if (new->start < nd->start)
2211 p = &(*p)->rb_left;
2212 else if (new->end > nd->end)
2213 p = &(*p)->rb_right;
2214 else
2215 BUG();
2216 }
2217 rb_link_node(&new->nd, parent, p);
2218 rb_insert_color(&new->nd, &sp->root);
2219 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2220 new->policy ? new->policy->mode : 0);
2221 }
2222
2223 /* Find shared policy intersecting idx */
2224 struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)2225 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2226 {
2227 struct mempolicy *pol = NULL;
2228 struct sp_node *sn;
2229
2230 if (!sp->root.rb_node)
2231 return NULL;
2232 spin_lock(&sp->lock);
2233 sn = sp_lookup(sp, idx, idx+1);
2234 if (sn) {
2235 mpol_get(sn->policy);
2236 pol = sn->policy;
2237 }
2238 spin_unlock(&sp->lock);
2239 return pol;
2240 }
2241
sp_free(struct sp_node * n)2242 static void sp_free(struct sp_node *n)
2243 {
2244 mpol_put(n->policy);
2245 kmem_cache_free(sn_cache, n);
2246 }
2247
2248 /**
2249 * mpol_misplaced - check whether current page node is valid in policy
2250 *
2251 * @page: page to be checked
2252 * @vma: vm area where page mapped
2253 * @addr: virtual address where page mapped
2254 *
2255 * Lookup current policy node id for vma,addr and "compare to" page's
2256 * node id.
2257 *
2258 * Returns:
2259 * -1 - not misplaced, page is in the right node
2260 * node - node id where the page should be
2261 *
2262 * Policy determination "mimics" alloc_page_vma().
2263 * Called from fault path where we know the vma and faulting address.
2264 */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2265 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2266 {
2267 struct mempolicy *pol;
2268 struct zone *zone;
2269 int curnid = page_to_nid(page);
2270 unsigned long pgoff;
2271 int thiscpu = raw_smp_processor_id();
2272 int thisnid = cpu_to_node(thiscpu);
2273 int polnid = -1;
2274 int ret = -1;
2275
2276 BUG_ON(!vma);
2277
2278 pol = get_vma_policy(vma, addr);
2279 if (!(pol->flags & MPOL_F_MOF))
2280 goto out;
2281
2282 switch (pol->mode) {
2283 case MPOL_INTERLEAVE:
2284 BUG_ON(addr >= vma->vm_end);
2285 BUG_ON(addr < vma->vm_start);
2286
2287 pgoff = vma->vm_pgoff;
2288 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2289 polnid = offset_il_node(pol, vma, pgoff);
2290 break;
2291
2292 case MPOL_PREFERRED:
2293 if (pol->flags & MPOL_F_LOCAL)
2294 polnid = numa_node_id();
2295 else
2296 polnid = pol->v.preferred_node;
2297 break;
2298
2299 case MPOL_BIND:
2300 /*
2301 * allows binding to multiple nodes.
2302 * use current page if in policy nodemask,
2303 * else select nearest allowed node, if any.
2304 * If no allowed nodes, use current [!misplaced].
2305 */
2306 if (node_isset(curnid, pol->v.nodes))
2307 goto out;
2308 (void)first_zones_zonelist(
2309 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2310 gfp_zone(GFP_HIGHUSER),
2311 &pol->v.nodes, &zone);
2312 polnid = zone->node;
2313 break;
2314
2315 default:
2316 BUG();
2317 }
2318
2319 /* Migrate the page towards the node whose CPU is referencing it */
2320 if (pol->flags & MPOL_F_MORON) {
2321 polnid = thisnid;
2322
2323 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2324 goto out;
2325 }
2326
2327 if (curnid != polnid)
2328 ret = polnid;
2329 out:
2330 mpol_cond_put(pol);
2331
2332 return ret;
2333 }
2334
sp_delete(struct shared_policy * sp,struct sp_node * n)2335 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2336 {
2337 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2338 rb_erase(&n->nd, &sp->root);
2339 sp_free(n);
2340 }
2341
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)2342 static void sp_node_init(struct sp_node *node, unsigned long start,
2343 unsigned long end, struct mempolicy *pol)
2344 {
2345 node->start = start;
2346 node->end = end;
2347 node->policy = pol;
2348 }
2349
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2350 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2351 struct mempolicy *pol)
2352 {
2353 struct sp_node *n;
2354 struct mempolicy *newpol;
2355
2356 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2357 if (!n)
2358 return NULL;
2359
2360 newpol = mpol_dup(pol);
2361 if (IS_ERR(newpol)) {
2362 kmem_cache_free(sn_cache, n);
2363 return NULL;
2364 }
2365 newpol->flags |= MPOL_F_SHARED;
2366 sp_node_init(n, start, end, newpol);
2367
2368 return n;
2369 }
2370
2371 /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)2372 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2373 unsigned long end, struct sp_node *new)
2374 {
2375 struct sp_node *n;
2376 struct sp_node *n_new = NULL;
2377 struct mempolicy *mpol_new = NULL;
2378 int ret = 0;
2379
2380 restart:
2381 spin_lock(&sp->lock);
2382 n = sp_lookup(sp, start, end);
2383 /* Take care of old policies in the same range. */
2384 while (n && n->start < end) {
2385 struct rb_node *next = rb_next(&n->nd);
2386 if (n->start >= start) {
2387 if (n->end <= end)
2388 sp_delete(sp, n);
2389 else
2390 n->start = end;
2391 } else {
2392 /* Old policy spanning whole new range. */
2393 if (n->end > end) {
2394 if (!n_new)
2395 goto alloc_new;
2396
2397 *mpol_new = *n->policy;
2398 atomic_set(&mpol_new->refcnt, 1);
2399 sp_node_init(n_new, end, n->end, mpol_new);
2400 n->end = start;
2401 sp_insert(sp, n_new);
2402 n_new = NULL;
2403 mpol_new = NULL;
2404 break;
2405 } else
2406 n->end = start;
2407 }
2408 if (!next)
2409 break;
2410 n = rb_entry(next, struct sp_node, nd);
2411 }
2412 if (new)
2413 sp_insert(sp, new);
2414 spin_unlock(&sp->lock);
2415 ret = 0;
2416
2417 err_out:
2418 if (mpol_new)
2419 mpol_put(mpol_new);
2420 if (n_new)
2421 kmem_cache_free(sn_cache, n_new);
2422
2423 return ret;
2424
2425 alloc_new:
2426 spin_unlock(&sp->lock);
2427 ret = -ENOMEM;
2428 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2429 if (!n_new)
2430 goto err_out;
2431 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2432 if (!mpol_new)
2433 goto err_out;
2434 goto restart;
2435 }
2436
2437 /**
2438 * mpol_shared_policy_init - initialize shared policy for inode
2439 * @sp: pointer to inode shared policy
2440 * @mpol: struct mempolicy to install
2441 *
2442 * Install non-NULL @mpol in inode's shared policy rb-tree.
2443 * On entry, the current task has a reference on a non-NULL @mpol.
2444 * This must be released on exit.
2445 * This is called at get_inode() calls and we can use GFP_KERNEL.
2446 */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)2447 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2448 {
2449 int ret;
2450
2451 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2452 spin_lock_init(&sp->lock);
2453
2454 if (mpol) {
2455 struct vm_area_struct pvma;
2456 struct mempolicy *new;
2457 NODEMASK_SCRATCH(scratch);
2458
2459 if (!scratch)
2460 goto put_mpol;
2461 /* contextualize the tmpfs mount point mempolicy */
2462 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2463 if (IS_ERR(new))
2464 goto free_scratch; /* no valid nodemask intersection */
2465
2466 task_lock(current);
2467 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2468 task_unlock(current);
2469 if (ret)
2470 goto put_new;
2471
2472 /* Create pseudo-vma that contains just the policy */
2473 memset(&pvma, 0, sizeof(struct vm_area_struct));
2474 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2475 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2476
2477 put_new:
2478 mpol_put(new); /* drop initial ref */
2479 free_scratch:
2480 NODEMASK_SCRATCH_FREE(scratch);
2481 put_mpol:
2482 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2483 }
2484 }
2485
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)2486 int mpol_set_shared_policy(struct shared_policy *info,
2487 struct vm_area_struct *vma, struct mempolicy *npol)
2488 {
2489 int err;
2490 struct sp_node *new = NULL;
2491 unsigned long sz = vma_pages(vma);
2492
2493 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2494 vma->vm_pgoff,
2495 sz, npol ? npol->mode : -1,
2496 npol ? npol->flags : -1,
2497 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2498
2499 if (npol) {
2500 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2501 if (!new)
2502 return -ENOMEM;
2503 }
2504 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2505 if (err && new)
2506 sp_free(new);
2507 return err;
2508 }
2509
2510 /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)2511 void mpol_free_shared_policy(struct shared_policy *p)
2512 {
2513 struct sp_node *n;
2514 struct rb_node *next;
2515
2516 if (!p->root.rb_node)
2517 return;
2518 spin_lock(&p->lock);
2519 next = rb_first(&p->root);
2520 while (next) {
2521 n = rb_entry(next, struct sp_node, nd);
2522 next = rb_next(&n->nd);
2523 sp_delete(p, n);
2524 }
2525 spin_unlock(&p->lock);
2526 }
2527
2528 #ifdef CONFIG_NUMA_BALANCING
2529 static int __initdata numabalancing_override;
2530
check_numabalancing_enable(void)2531 static void __init check_numabalancing_enable(void)
2532 {
2533 bool numabalancing_default = false;
2534
2535 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2536 numabalancing_default = true;
2537
2538 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2539 if (numabalancing_override)
2540 set_numabalancing_state(numabalancing_override == 1);
2541
2542 if (num_online_nodes() > 1 && !numabalancing_override) {
2543 pr_info("%s automatic NUMA balancing. "
2544 "Configure with numa_balancing= or the "
2545 "kernel.numa_balancing sysctl",
2546 numabalancing_default ? "Enabling" : "Disabling");
2547 set_numabalancing_state(numabalancing_default);
2548 }
2549 }
2550
setup_numabalancing(char * str)2551 static int __init setup_numabalancing(char *str)
2552 {
2553 int ret = 0;
2554 if (!str)
2555 goto out;
2556
2557 if (!strcmp(str, "enable")) {
2558 numabalancing_override = 1;
2559 ret = 1;
2560 } else if (!strcmp(str, "disable")) {
2561 numabalancing_override = -1;
2562 ret = 1;
2563 }
2564 out:
2565 if (!ret)
2566 pr_warn("Unable to parse numa_balancing=\n");
2567
2568 return ret;
2569 }
2570 __setup("numa_balancing=", setup_numabalancing);
2571 #else
check_numabalancing_enable(void)2572 static inline void __init check_numabalancing_enable(void)
2573 {
2574 }
2575 #endif /* CONFIG_NUMA_BALANCING */
2576
2577 /* assumes fs == KERNEL_DS */
numa_policy_init(void)2578 void __init numa_policy_init(void)
2579 {
2580 nodemask_t interleave_nodes;
2581 unsigned long largest = 0;
2582 int nid, prefer = 0;
2583
2584 policy_cache = kmem_cache_create("numa_policy",
2585 sizeof(struct mempolicy),
2586 0, SLAB_PANIC, NULL);
2587
2588 sn_cache = kmem_cache_create("shared_policy_node",
2589 sizeof(struct sp_node),
2590 0, SLAB_PANIC, NULL);
2591
2592 for_each_node(nid) {
2593 preferred_node_policy[nid] = (struct mempolicy) {
2594 .refcnt = ATOMIC_INIT(1),
2595 .mode = MPOL_PREFERRED,
2596 .flags = MPOL_F_MOF | MPOL_F_MORON,
2597 .v = { .preferred_node = nid, },
2598 };
2599 }
2600
2601 /*
2602 * Set interleaving policy for system init. Interleaving is only
2603 * enabled across suitably sized nodes (default is >= 16MB), or
2604 * fall back to the largest node if they're all smaller.
2605 */
2606 nodes_clear(interleave_nodes);
2607 for_each_node_state(nid, N_MEMORY) {
2608 unsigned long total_pages = node_present_pages(nid);
2609
2610 /* Preserve the largest node */
2611 if (largest < total_pages) {
2612 largest = total_pages;
2613 prefer = nid;
2614 }
2615
2616 /* Interleave this node? */
2617 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2618 node_set(nid, interleave_nodes);
2619 }
2620
2621 /* All too small, use the largest */
2622 if (unlikely(nodes_empty(interleave_nodes)))
2623 node_set(prefer, interleave_nodes);
2624
2625 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2626 pr_err("%s: interleaving failed\n", __func__);
2627
2628 check_numabalancing_enable();
2629 }
2630
2631 /* Reset policy of current process to default */
numa_default_policy(void)2632 void numa_default_policy(void)
2633 {
2634 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2635 }
2636
2637 /*
2638 * Parse and format mempolicy from/to strings
2639 */
2640
2641 /*
2642 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2643 */
2644 static const char * const policy_modes[] =
2645 {
2646 [MPOL_DEFAULT] = "default",
2647 [MPOL_PREFERRED] = "prefer",
2648 [MPOL_BIND] = "bind",
2649 [MPOL_INTERLEAVE] = "interleave",
2650 [MPOL_LOCAL] = "local",
2651 };
2652
2653
2654 #ifdef CONFIG_TMPFS
2655 /**
2656 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2657 * @str: string containing mempolicy to parse
2658 * @mpol: pointer to struct mempolicy pointer, returned on success.
2659 *
2660 * Format of input:
2661 * <mode>[=<flags>][:<nodelist>]
2662 *
2663 * On success, returns 0, else 1
2664 */
mpol_parse_str(char * str,struct mempolicy ** mpol)2665 int mpol_parse_str(char *str, struct mempolicy **mpol)
2666 {
2667 struct mempolicy *new = NULL;
2668 unsigned short mode;
2669 unsigned short mode_flags;
2670 nodemask_t nodes;
2671 char *nodelist = strchr(str, ':');
2672 char *flags = strchr(str, '=');
2673 int err = 1;
2674
2675 if (nodelist) {
2676 /* NUL-terminate mode or flags string */
2677 *nodelist++ = '\0';
2678 if (nodelist_parse(nodelist, nodes))
2679 goto out;
2680 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2681 goto out;
2682 } else
2683 nodes_clear(nodes);
2684
2685 if (flags)
2686 *flags++ = '\0'; /* terminate mode string */
2687
2688 for (mode = 0; mode < MPOL_MAX; mode++) {
2689 if (!strcmp(str, policy_modes[mode])) {
2690 break;
2691 }
2692 }
2693 if (mode >= MPOL_MAX)
2694 goto out;
2695
2696 switch (mode) {
2697 case MPOL_PREFERRED:
2698 /*
2699 * Insist on a nodelist of one node only
2700 */
2701 if (nodelist) {
2702 char *rest = nodelist;
2703 while (isdigit(*rest))
2704 rest++;
2705 if (*rest)
2706 goto out;
2707 }
2708 break;
2709 case MPOL_INTERLEAVE:
2710 /*
2711 * Default to online nodes with memory if no nodelist
2712 */
2713 if (!nodelist)
2714 nodes = node_states[N_MEMORY];
2715 break;
2716 case MPOL_LOCAL:
2717 /*
2718 * Don't allow a nodelist; mpol_new() checks flags
2719 */
2720 if (nodelist)
2721 goto out;
2722 mode = MPOL_PREFERRED;
2723 break;
2724 case MPOL_DEFAULT:
2725 /*
2726 * Insist on a empty nodelist
2727 */
2728 if (!nodelist)
2729 err = 0;
2730 goto out;
2731 case MPOL_BIND:
2732 /*
2733 * Insist on a nodelist
2734 */
2735 if (!nodelist)
2736 goto out;
2737 }
2738
2739 mode_flags = 0;
2740 if (flags) {
2741 /*
2742 * Currently, we only support two mutually exclusive
2743 * mode flags.
2744 */
2745 if (!strcmp(flags, "static"))
2746 mode_flags |= MPOL_F_STATIC_NODES;
2747 else if (!strcmp(flags, "relative"))
2748 mode_flags |= MPOL_F_RELATIVE_NODES;
2749 else
2750 goto out;
2751 }
2752
2753 new = mpol_new(mode, mode_flags, &nodes);
2754 if (IS_ERR(new))
2755 goto out;
2756
2757 /*
2758 * Save nodes for mpol_to_str() to show the tmpfs mount options
2759 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2760 */
2761 if (mode != MPOL_PREFERRED)
2762 new->v.nodes = nodes;
2763 else if (nodelist)
2764 new->v.preferred_node = first_node(nodes);
2765 else
2766 new->flags |= MPOL_F_LOCAL;
2767
2768 /*
2769 * Save nodes for contextualization: this will be used to "clone"
2770 * the mempolicy in a specific context [cpuset] at a later time.
2771 */
2772 new->w.user_nodemask = nodes;
2773
2774 err = 0;
2775
2776 out:
2777 /* Restore string for error message */
2778 if (nodelist)
2779 *--nodelist = ':';
2780 if (flags)
2781 *--flags = '=';
2782 if (!err)
2783 *mpol = new;
2784 return err;
2785 }
2786 #endif /* CONFIG_TMPFS */
2787
2788 /**
2789 * mpol_to_str - format a mempolicy structure for printing
2790 * @buffer: to contain formatted mempolicy string
2791 * @maxlen: length of @buffer
2792 * @pol: pointer to mempolicy to be formatted
2793 *
2794 * Convert @pol into a string. If @buffer is too short, truncate the string.
2795 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2796 * longest flag, "relative", and to display at least a few node ids.
2797 */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)2798 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2799 {
2800 char *p = buffer;
2801 nodemask_t nodes = NODE_MASK_NONE;
2802 unsigned short mode = MPOL_DEFAULT;
2803 unsigned short flags = 0;
2804
2805 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2806 mode = pol->mode;
2807 flags = pol->flags;
2808 }
2809
2810 switch (mode) {
2811 case MPOL_DEFAULT:
2812 break;
2813 case MPOL_PREFERRED:
2814 if (flags & MPOL_F_LOCAL)
2815 mode = MPOL_LOCAL;
2816 else
2817 node_set(pol->v.preferred_node, nodes);
2818 break;
2819 case MPOL_BIND:
2820 case MPOL_INTERLEAVE:
2821 nodes = pol->v.nodes;
2822 break;
2823 default:
2824 WARN_ON_ONCE(1);
2825 snprintf(p, maxlen, "unknown");
2826 return;
2827 }
2828
2829 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2830
2831 if (flags & MPOL_MODE_FLAGS) {
2832 p += snprintf(p, buffer + maxlen - p, "=");
2833
2834 /*
2835 * Currently, the only defined flags are mutually exclusive
2836 */
2837 if (flags & MPOL_F_STATIC_NODES)
2838 p += snprintf(p, buffer + maxlen - p, "static");
2839 else if (flags & MPOL_F_RELATIVE_NODES)
2840 p += snprintf(p, buffer + maxlen - p, "relative");
2841 }
2842
2843 if (!nodes_empty(nodes)) {
2844 p += snprintf(p, buffer + maxlen - p, ":");
2845 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2846 }
2847 }
2848