• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/mmdebug.h>
23 #include <linux/sched/signal.h>
24 #include <linux/rmap.h>
25 #include <linux/string_helpers.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/jhash.h>
29 #include <linux/numa.h>
30 #include <linux/llist.h>
31 
32 #include <asm/page.h>
33 #include <asm/pgtable.h>
34 #include <asm/tlb.h>
35 
36 #include <linux/io.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/node.h>
40 #include <linux/userfaultfd_k.h>
41 #include <linux/page_owner.h>
42 #include "internal.h"
43 
44 int hugetlb_max_hstate __read_mostly;
45 unsigned int default_hstate_idx;
46 struct hstate hstates[HUGE_MAX_HSTATE];
47 /*
48  * Minimum page order among possible hugepage sizes, set to a proper value
49  * at boot time.
50  */
51 static unsigned int minimum_order __read_mostly = UINT_MAX;
52 
53 __initdata LIST_HEAD(huge_boot_pages);
54 
55 /* for command line parsing */
56 static struct hstate * __initdata parsed_hstate;
57 static unsigned long __initdata default_hstate_max_huge_pages;
58 static unsigned long __initdata default_hstate_size;
59 static bool __initdata parsed_valid_hugepagesz = true;
60 
61 /*
62  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
63  * free_huge_pages, and surplus_huge_pages.
64  */
65 DEFINE_SPINLOCK(hugetlb_lock);
66 
67 /*
68  * Serializes faults on the same logical page.  This is used to
69  * prevent spurious OOMs when the hugepage pool is fully utilized.
70  */
71 static int num_fault_mutexes;
72 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
73 
PageHugeFreed(struct page * head)74 static inline bool PageHugeFreed(struct page *head)
75 {
76 	return page_private(head + 4) == -1UL;
77 }
78 
SetPageHugeFreed(struct page * head)79 static inline void SetPageHugeFreed(struct page *head)
80 {
81 	set_page_private(head + 4, -1UL);
82 }
83 
ClearPageHugeFreed(struct page * head)84 static inline void ClearPageHugeFreed(struct page *head)
85 {
86 	set_page_private(head + 4, 0);
87 }
88 
89 /* Forward declaration */
90 static int hugetlb_acct_memory(struct hstate *h, long delta);
91 
unlock_or_release_subpool(struct hugepage_subpool * spool)92 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
93 {
94 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
95 
96 	spin_unlock(&spool->lock);
97 
98 	/* If no pages are used, and no other handles to the subpool
99 	 * remain, give up any reservations mased on minimum size and
100 	 * free the subpool */
101 	if (free) {
102 		if (spool->min_hpages != -1)
103 			hugetlb_acct_memory(spool->hstate,
104 						-spool->min_hpages);
105 		kfree(spool);
106 	}
107 }
108 
hugepage_new_subpool(struct hstate * h,long max_hpages,long min_hpages)109 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
110 						long min_hpages)
111 {
112 	struct hugepage_subpool *spool;
113 
114 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
115 	if (!spool)
116 		return NULL;
117 
118 	spin_lock_init(&spool->lock);
119 	spool->count = 1;
120 	spool->max_hpages = max_hpages;
121 	spool->hstate = h;
122 	spool->min_hpages = min_hpages;
123 
124 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
125 		kfree(spool);
126 		return NULL;
127 	}
128 	spool->rsv_hpages = min_hpages;
129 
130 	return spool;
131 }
132 
hugepage_put_subpool(struct hugepage_subpool * spool)133 void hugepage_put_subpool(struct hugepage_subpool *spool)
134 {
135 	spin_lock(&spool->lock);
136 	BUG_ON(!spool->count);
137 	spool->count--;
138 	unlock_or_release_subpool(spool);
139 }
140 
141 /*
142  * Subpool accounting for allocating and reserving pages.
143  * Return -ENOMEM if there are not enough resources to satisfy the
144  * the request.  Otherwise, return the number of pages by which the
145  * global pools must be adjusted (upward).  The returned value may
146  * only be different than the passed value (delta) in the case where
147  * a subpool minimum size must be manitained.
148  */
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)149 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
150 				      long delta)
151 {
152 	long ret = delta;
153 
154 	if (!spool)
155 		return ret;
156 
157 	spin_lock(&spool->lock);
158 
159 	if (spool->max_hpages != -1) {		/* maximum size accounting */
160 		if ((spool->used_hpages + delta) <= spool->max_hpages)
161 			spool->used_hpages += delta;
162 		else {
163 			ret = -ENOMEM;
164 			goto unlock_ret;
165 		}
166 	}
167 
168 	/* minimum size accounting */
169 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
170 		if (delta > spool->rsv_hpages) {
171 			/*
172 			 * Asking for more reserves than those already taken on
173 			 * behalf of subpool.  Return difference.
174 			 */
175 			ret = delta - spool->rsv_hpages;
176 			spool->rsv_hpages = 0;
177 		} else {
178 			ret = 0;	/* reserves already accounted for */
179 			spool->rsv_hpages -= delta;
180 		}
181 	}
182 
183 unlock_ret:
184 	spin_unlock(&spool->lock);
185 	return ret;
186 }
187 
188 /*
189  * Subpool accounting for freeing and unreserving pages.
190  * Return the number of global page reservations that must be dropped.
191  * The return value may only be different than the passed value (delta)
192  * in the case where a subpool minimum size must be maintained.
193  */
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)194 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
195 				       long delta)
196 {
197 	long ret = delta;
198 
199 	if (!spool)
200 		return delta;
201 
202 	spin_lock(&spool->lock);
203 
204 	if (spool->max_hpages != -1)		/* maximum size accounting */
205 		spool->used_hpages -= delta;
206 
207 	 /* minimum size accounting */
208 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
209 		if (spool->rsv_hpages + delta <= spool->min_hpages)
210 			ret = 0;
211 		else
212 			ret = spool->rsv_hpages + delta - spool->min_hpages;
213 
214 		spool->rsv_hpages += delta;
215 		if (spool->rsv_hpages > spool->min_hpages)
216 			spool->rsv_hpages = spool->min_hpages;
217 	}
218 
219 	/*
220 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
221 	 * quota reference, free it now.
222 	 */
223 	unlock_or_release_subpool(spool);
224 
225 	return ret;
226 }
227 
subpool_inode(struct inode * inode)228 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
229 {
230 	return HUGETLBFS_SB(inode->i_sb)->spool;
231 }
232 
subpool_vma(struct vm_area_struct * vma)233 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
234 {
235 	return subpool_inode(file_inode(vma->vm_file));
236 }
237 
238 /*
239  * Region tracking -- allows tracking of reservations and instantiated pages
240  *                    across the pages in a mapping.
241  *
242  * The region data structures are embedded into a resv_map and protected
243  * by a resv_map's lock.  The set of regions within the resv_map represent
244  * reservations for huge pages, or huge pages that have already been
245  * instantiated within the map.  The from and to elements are huge page
246  * indicies into the associated mapping.  from indicates the starting index
247  * of the region.  to represents the first index past the end of  the region.
248  *
249  * For example, a file region structure with from == 0 and to == 4 represents
250  * four huge pages in a mapping.  It is important to note that the to element
251  * represents the first element past the end of the region. This is used in
252  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
253  *
254  * Interval notation of the form [from, to) will be used to indicate that
255  * the endpoint from is inclusive and to is exclusive.
256  */
257 struct file_region {
258 	struct list_head link;
259 	long from;
260 	long to;
261 };
262 
263 /*
264  * Add the huge page range represented by [f, t) to the reserve
265  * map.  In the normal case, existing regions will be expanded
266  * to accommodate the specified range.  Sufficient regions should
267  * exist for expansion due to the previous call to region_chg
268  * with the same range.  However, it is possible that region_del
269  * could have been called after region_chg and modifed the map
270  * in such a way that no region exists to be expanded.  In this
271  * case, pull a region descriptor from the cache associated with
272  * the map and use that for the new range.
273  *
274  * Return the number of new huge pages added to the map.  This
275  * number is greater than or equal to zero.
276  */
region_add(struct resv_map * resv,long f,long t)277 static long region_add(struct resv_map *resv, long f, long t)
278 {
279 	struct list_head *head = &resv->regions;
280 	struct file_region *rg, *nrg, *trg;
281 	long add = 0;
282 
283 	spin_lock(&resv->lock);
284 	/* Locate the region we are either in or before. */
285 	list_for_each_entry(rg, head, link)
286 		if (f <= rg->to)
287 			break;
288 
289 	/*
290 	 * If no region exists which can be expanded to include the
291 	 * specified range, the list must have been modified by an
292 	 * interleving call to region_del().  Pull a region descriptor
293 	 * from the cache and use it for this range.
294 	 */
295 	if (&rg->link == head || t < rg->from) {
296 		VM_BUG_ON(resv->region_cache_count <= 0);
297 
298 		resv->region_cache_count--;
299 		nrg = list_first_entry(&resv->region_cache, struct file_region,
300 					link);
301 		list_del(&nrg->link);
302 
303 		nrg->from = f;
304 		nrg->to = t;
305 		list_add(&nrg->link, rg->link.prev);
306 
307 		add += t - f;
308 		goto out_locked;
309 	}
310 
311 	/* Round our left edge to the current segment if it encloses us. */
312 	if (f > rg->from)
313 		f = rg->from;
314 
315 	/* Check for and consume any regions we now overlap with. */
316 	nrg = rg;
317 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
318 		if (&rg->link == head)
319 			break;
320 		if (rg->from > t)
321 			break;
322 
323 		/* If this area reaches higher then extend our area to
324 		 * include it completely.  If this is not the first area
325 		 * which we intend to reuse, free it. */
326 		if (rg->to > t)
327 			t = rg->to;
328 		if (rg != nrg) {
329 			/* Decrement return value by the deleted range.
330 			 * Another range will span this area so that by
331 			 * end of routine add will be >= zero
332 			 */
333 			add -= (rg->to - rg->from);
334 			list_del(&rg->link);
335 			kfree(rg);
336 		}
337 	}
338 
339 	add += (nrg->from - f);		/* Added to beginning of region */
340 	nrg->from = f;
341 	add += t - nrg->to;		/* Added to end of region */
342 	nrg->to = t;
343 
344 out_locked:
345 	resv->adds_in_progress--;
346 	spin_unlock(&resv->lock);
347 	VM_BUG_ON(add < 0);
348 	return add;
349 }
350 
351 /*
352  * Examine the existing reserve map and determine how many
353  * huge pages in the specified range [f, t) are NOT currently
354  * represented.  This routine is called before a subsequent
355  * call to region_add that will actually modify the reserve
356  * map to add the specified range [f, t).  region_chg does
357  * not change the number of huge pages represented by the
358  * map.  However, if the existing regions in the map can not
359  * be expanded to represent the new range, a new file_region
360  * structure is added to the map as a placeholder.  This is
361  * so that the subsequent region_add call will have all the
362  * regions it needs and will not fail.
363  *
364  * Upon entry, region_chg will also examine the cache of region descriptors
365  * associated with the map.  If there are not enough descriptors cached, one
366  * will be allocated for the in progress add operation.
367  *
368  * Returns the number of huge pages that need to be added to the existing
369  * reservation map for the range [f, t).  This number is greater or equal to
370  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
371  * is needed and can not be allocated.
372  */
region_chg(struct resv_map * resv,long f,long t)373 static long region_chg(struct resv_map *resv, long f, long t)
374 {
375 	struct list_head *head = &resv->regions;
376 	struct file_region *rg, *nrg = NULL;
377 	long chg = 0;
378 
379 retry:
380 	spin_lock(&resv->lock);
381 retry_locked:
382 	resv->adds_in_progress++;
383 
384 	/*
385 	 * Check for sufficient descriptors in the cache to accommodate
386 	 * the number of in progress add operations.
387 	 */
388 	if (resv->adds_in_progress > resv->region_cache_count) {
389 		struct file_region *trg;
390 
391 		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
392 		/* Must drop lock to allocate a new descriptor. */
393 		resv->adds_in_progress--;
394 		spin_unlock(&resv->lock);
395 
396 		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
397 		if (!trg) {
398 			kfree(nrg);
399 			return -ENOMEM;
400 		}
401 
402 		spin_lock(&resv->lock);
403 		list_add(&trg->link, &resv->region_cache);
404 		resv->region_cache_count++;
405 		goto retry_locked;
406 	}
407 
408 	/* Locate the region we are before or in. */
409 	list_for_each_entry(rg, head, link)
410 		if (f <= rg->to)
411 			break;
412 
413 	/* If we are below the current region then a new region is required.
414 	 * Subtle, allocate a new region at the position but make it zero
415 	 * size such that we can guarantee to record the reservation. */
416 	if (&rg->link == head || t < rg->from) {
417 		if (!nrg) {
418 			resv->adds_in_progress--;
419 			spin_unlock(&resv->lock);
420 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
421 			if (!nrg)
422 				return -ENOMEM;
423 
424 			nrg->from = f;
425 			nrg->to   = f;
426 			INIT_LIST_HEAD(&nrg->link);
427 			goto retry;
428 		}
429 
430 		list_add(&nrg->link, rg->link.prev);
431 		chg = t - f;
432 		goto out_nrg;
433 	}
434 
435 	/* Round our left edge to the current segment if it encloses us. */
436 	if (f > rg->from)
437 		f = rg->from;
438 	chg = t - f;
439 
440 	/* Check for and consume any regions we now overlap with. */
441 	list_for_each_entry(rg, rg->link.prev, link) {
442 		if (&rg->link == head)
443 			break;
444 		if (rg->from > t)
445 			goto out;
446 
447 		/* We overlap with this area, if it extends further than
448 		 * us then we must extend ourselves.  Account for its
449 		 * existing reservation. */
450 		if (rg->to > t) {
451 			chg += rg->to - t;
452 			t = rg->to;
453 		}
454 		chg -= rg->to - rg->from;
455 	}
456 
457 out:
458 	spin_unlock(&resv->lock);
459 	/*  We already know we raced and no longer need the new region */
460 	kfree(nrg);
461 	return chg;
462 out_nrg:
463 	spin_unlock(&resv->lock);
464 	return chg;
465 }
466 
467 /*
468  * Abort the in progress add operation.  The adds_in_progress field
469  * of the resv_map keeps track of the operations in progress between
470  * calls to region_chg and region_add.  Operations are sometimes
471  * aborted after the call to region_chg.  In such cases, region_abort
472  * is called to decrement the adds_in_progress counter.
473  *
474  * NOTE: The range arguments [f, t) are not needed or used in this
475  * routine.  They are kept to make reading the calling code easier as
476  * arguments will match the associated region_chg call.
477  */
region_abort(struct resv_map * resv,long f,long t)478 static void region_abort(struct resv_map *resv, long f, long t)
479 {
480 	spin_lock(&resv->lock);
481 	VM_BUG_ON(!resv->region_cache_count);
482 	resv->adds_in_progress--;
483 	spin_unlock(&resv->lock);
484 }
485 
486 /*
487  * Delete the specified range [f, t) from the reserve map.  If the
488  * t parameter is LONG_MAX, this indicates that ALL regions after f
489  * should be deleted.  Locate the regions which intersect [f, t)
490  * and either trim, delete or split the existing regions.
491  *
492  * Returns the number of huge pages deleted from the reserve map.
493  * In the normal case, the return value is zero or more.  In the
494  * case where a region must be split, a new region descriptor must
495  * be allocated.  If the allocation fails, -ENOMEM will be returned.
496  * NOTE: If the parameter t == LONG_MAX, then we will never split
497  * a region and possibly return -ENOMEM.  Callers specifying
498  * t == LONG_MAX do not need to check for -ENOMEM error.
499  */
region_del(struct resv_map * resv,long f,long t)500 static long region_del(struct resv_map *resv, long f, long t)
501 {
502 	struct list_head *head = &resv->regions;
503 	struct file_region *rg, *trg;
504 	struct file_region *nrg = NULL;
505 	long del = 0;
506 
507 retry:
508 	spin_lock(&resv->lock);
509 	list_for_each_entry_safe(rg, trg, head, link) {
510 		/*
511 		 * Skip regions before the range to be deleted.  file_region
512 		 * ranges are normally of the form [from, to).  However, there
513 		 * may be a "placeholder" entry in the map which is of the form
514 		 * (from, to) with from == to.  Check for placeholder entries
515 		 * at the beginning of the range to be deleted.
516 		 */
517 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
518 			continue;
519 
520 		if (rg->from >= t)
521 			break;
522 
523 		if (f > rg->from && t < rg->to) { /* Must split region */
524 			/*
525 			 * Check for an entry in the cache before dropping
526 			 * lock and attempting allocation.
527 			 */
528 			if (!nrg &&
529 			    resv->region_cache_count > resv->adds_in_progress) {
530 				nrg = list_first_entry(&resv->region_cache,
531 							struct file_region,
532 							link);
533 				list_del(&nrg->link);
534 				resv->region_cache_count--;
535 			}
536 
537 			if (!nrg) {
538 				spin_unlock(&resv->lock);
539 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
540 				if (!nrg)
541 					return -ENOMEM;
542 				goto retry;
543 			}
544 
545 			del += t - f;
546 
547 			/* New entry for end of split region */
548 			nrg->from = t;
549 			nrg->to = rg->to;
550 			INIT_LIST_HEAD(&nrg->link);
551 
552 			/* Original entry is trimmed */
553 			rg->to = f;
554 
555 			list_add(&nrg->link, &rg->link);
556 			nrg = NULL;
557 			break;
558 		}
559 
560 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
561 			del += rg->to - rg->from;
562 			list_del(&rg->link);
563 			kfree(rg);
564 			continue;
565 		}
566 
567 		if (f <= rg->from) {	/* Trim beginning of region */
568 			del += t - rg->from;
569 			rg->from = t;
570 		} else {		/* Trim end of region */
571 			del += rg->to - f;
572 			rg->to = f;
573 		}
574 	}
575 
576 	spin_unlock(&resv->lock);
577 	kfree(nrg);
578 	return del;
579 }
580 
581 /*
582  * A rare out of memory error was encountered which prevented removal of
583  * the reserve map region for a page.  The huge page itself was free'ed
584  * and removed from the page cache.  This routine will adjust the subpool
585  * usage count, and the global reserve count if needed.  By incrementing
586  * these counts, the reserve map entry which could not be deleted will
587  * appear as a "reserved" entry instead of simply dangling with incorrect
588  * counts.
589  */
hugetlb_fix_reserve_counts(struct inode * inode)590 void hugetlb_fix_reserve_counts(struct inode *inode)
591 {
592 	struct hugepage_subpool *spool = subpool_inode(inode);
593 	long rsv_adjust;
594 	bool reserved = false;
595 
596 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
597 	if (rsv_adjust > 0) {
598 		struct hstate *h = hstate_inode(inode);
599 
600 		if (!hugetlb_acct_memory(h, 1))
601 			reserved = true;
602 	} else if (!rsv_adjust) {
603 		reserved = true;
604 	}
605 
606 	if (!reserved)
607 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
608 }
609 
610 /*
611  * Count and return the number of huge pages in the reserve map
612  * that intersect with the range [f, t).
613  */
region_count(struct resv_map * resv,long f,long t)614 static long region_count(struct resv_map *resv, long f, long t)
615 {
616 	struct list_head *head = &resv->regions;
617 	struct file_region *rg;
618 	long chg = 0;
619 
620 	spin_lock(&resv->lock);
621 	/* Locate each segment we overlap with, and count that overlap. */
622 	list_for_each_entry(rg, head, link) {
623 		long seg_from;
624 		long seg_to;
625 
626 		if (rg->to <= f)
627 			continue;
628 		if (rg->from >= t)
629 			break;
630 
631 		seg_from = max(rg->from, f);
632 		seg_to = min(rg->to, t);
633 
634 		chg += seg_to - seg_from;
635 	}
636 	spin_unlock(&resv->lock);
637 
638 	return chg;
639 }
640 
641 /*
642  * Convert the address within this vma to the page offset within
643  * the mapping, in pagecache page units; huge pages here.
644  */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)645 static pgoff_t vma_hugecache_offset(struct hstate *h,
646 			struct vm_area_struct *vma, unsigned long address)
647 {
648 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
649 			(vma->vm_pgoff >> huge_page_order(h));
650 }
651 
linear_hugepage_index(struct vm_area_struct * vma,unsigned long address)652 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
653 				     unsigned long address)
654 {
655 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
656 }
657 EXPORT_SYMBOL_GPL(linear_hugepage_index);
658 
659 /*
660  * Return the size of the pages allocated when backing a VMA. In the majority
661  * cases this will be same size as used by the page table entries.
662  */
vma_kernel_pagesize(struct vm_area_struct * vma)663 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
664 {
665 	if (vma->vm_ops && vma->vm_ops->pagesize)
666 		return vma->vm_ops->pagesize(vma);
667 	return PAGE_SIZE;
668 }
669 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
670 
671 /*
672  * Return the page size being used by the MMU to back a VMA. In the majority
673  * of cases, the page size used by the kernel matches the MMU size. On
674  * architectures where it differs, an architecture-specific 'strong'
675  * version of this symbol is required.
676  */
vma_mmu_pagesize(struct vm_area_struct * vma)677 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
678 {
679 	return vma_kernel_pagesize(vma);
680 }
681 
682 /*
683  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
684  * bits of the reservation map pointer, which are always clear due to
685  * alignment.
686  */
687 #define HPAGE_RESV_OWNER    (1UL << 0)
688 #define HPAGE_RESV_UNMAPPED (1UL << 1)
689 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
690 
691 /*
692  * These helpers are used to track how many pages are reserved for
693  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
694  * is guaranteed to have their future faults succeed.
695  *
696  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
697  * the reserve counters are updated with the hugetlb_lock held. It is safe
698  * to reset the VMA at fork() time as it is not in use yet and there is no
699  * chance of the global counters getting corrupted as a result of the values.
700  *
701  * The private mapping reservation is represented in a subtly different
702  * manner to a shared mapping.  A shared mapping has a region map associated
703  * with the underlying file, this region map represents the backing file
704  * pages which have ever had a reservation assigned which this persists even
705  * after the page is instantiated.  A private mapping has a region map
706  * associated with the original mmap which is attached to all VMAs which
707  * reference it, this region map represents those offsets which have consumed
708  * reservation ie. where pages have been instantiated.
709  */
get_vma_private_data(struct vm_area_struct * vma)710 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
711 {
712 	return (unsigned long)vma->vm_private_data;
713 }
714 
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)715 static void set_vma_private_data(struct vm_area_struct *vma,
716 							unsigned long value)
717 {
718 	vma->vm_private_data = (void *)value;
719 }
720 
resv_map_alloc(void)721 struct resv_map *resv_map_alloc(void)
722 {
723 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
724 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
725 
726 	if (!resv_map || !rg) {
727 		kfree(resv_map);
728 		kfree(rg);
729 		return NULL;
730 	}
731 
732 	kref_init(&resv_map->refs);
733 	spin_lock_init(&resv_map->lock);
734 	INIT_LIST_HEAD(&resv_map->regions);
735 
736 	resv_map->adds_in_progress = 0;
737 
738 	INIT_LIST_HEAD(&resv_map->region_cache);
739 	list_add(&rg->link, &resv_map->region_cache);
740 	resv_map->region_cache_count = 1;
741 
742 	return resv_map;
743 }
744 
resv_map_release(struct kref * ref)745 void resv_map_release(struct kref *ref)
746 {
747 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
748 	struct list_head *head = &resv_map->region_cache;
749 	struct file_region *rg, *trg;
750 
751 	/* Clear out any active regions before we release the map. */
752 	region_del(resv_map, 0, LONG_MAX);
753 
754 	/* ... and any entries left in the cache */
755 	list_for_each_entry_safe(rg, trg, head, link) {
756 		list_del(&rg->link);
757 		kfree(rg);
758 	}
759 
760 	VM_BUG_ON(resv_map->adds_in_progress);
761 
762 	kfree(resv_map);
763 }
764 
inode_resv_map(struct inode * inode)765 static inline struct resv_map *inode_resv_map(struct inode *inode)
766 {
767 	/*
768 	 * At inode evict time, i_mapping may not point to the original
769 	 * address space within the inode.  This original address space
770 	 * contains the pointer to the resv_map.  So, always use the
771 	 * address space embedded within the inode.
772 	 * The VERY common case is inode->mapping == &inode->i_data but,
773 	 * this may not be true for device special inodes.
774 	 */
775 	return (struct resv_map *)(&inode->i_data)->private_data;
776 }
777 
vma_resv_map(struct vm_area_struct * vma)778 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
779 {
780 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
781 	if (vma->vm_flags & VM_MAYSHARE) {
782 		struct address_space *mapping = vma->vm_file->f_mapping;
783 		struct inode *inode = mapping->host;
784 
785 		return inode_resv_map(inode);
786 
787 	} else {
788 		return (struct resv_map *)(get_vma_private_data(vma) &
789 							~HPAGE_RESV_MASK);
790 	}
791 }
792 
set_vma_resv_map(struct vm_area_struct * vma,struct resv_map * map)793 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
794 {
795 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
796 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
797 
798 	set_vma_private_data(vma, (get_vma_private_data(vma) &
799 				HPAGE_RESV_MASK) | (unsigned long)map);
800 }
801 
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)802 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
803 {
804 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
805 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
806 
807 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
808 }
809 
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)810 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
811 {
812 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
813 
814 	return (get_vma_private_data(vma) & flag) != 0;
815 }
816 
817 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
reset_vma_resv_huge_pages(struct vm_area_struct * vma)818 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
819 {
820 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
821 	if (!(vma->vm_flags & VM_MAYSHARE))
822 		vma->vm_private_data = (void *)0;
823 }
824 
825 /* Returns true if the VMA has associated reserve pages */
vma_has_reserves(struct vm_area_struct * vma,long chg)826 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
827 {
828 	if (vma->vm_flags & VM_NORESERVE) {
829 		/*
830 		 * This address is already reserved by other process(chg == 0),
831 		 * so, we should decrement reserved count. Without decrementing,
832 		 * reserve count remains after releasing inode, because this
833 		 * allocated page will go into page cache and is regarded as
834 		 * coming from reserved pool in releasing step.  Currently, we
835 		 * don't have any other solution to deal with this situation
836 		 * properly, so add work-around here.
837 		 */
838 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
839 			return true;
840 		else
841 			return false;
842 	}
843 
844 	/* Shared mappings always use reserves */
845 	if (vma->vm_flags & VM_MAYSHARE) {
846 		/*
847 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
848 		 * be a region map for all pages.  The only situation where
849 		 * there is no region map is if a hole was punched via
850 		 * fallocate.  In this case, there really are no reverves to
851 		 * use.  This situation is indicated if chg != 0.
852 		 */
853 		if (chg)
854 			return false;
855 		else
856 			return true;
857 	}
858 
859 	/*
860 	 * Only the process that called mmap() has reserves for
861 	 * private mappings.
862 	 */
863 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
864 		/*
865 		 * Like the shared case above, a hole punch or truncate
866 		 * could have been performed on the private mapping.
867 		 * Examine the value of chg to determine if reserves
868 		 * actually exist or were previously consumed.
869 		 * Very Subtle - The value of chg comes from a previous
870 		 * call to vma_needs_reserves().  The reserve map for
871 		 * private mappings has different (opposite) semantics
872 		 * than that of shared mappings.  vma_needs_reserves()
873 		 * has already taken this difference in semantics into
874 		 * account.  Therefore, the meaning of chg is the same
875 		 * as in the shared case above.  Code could easily be
876 		 * combined, but keeping it separate draws attention to
877 		 * subtle differences.
878 		 */
879 		if (chg)
880 			return false;
881 		else
882 			return true;
883 	}
884 
885 	return false;
886 }
887 
enqueue_huge_page(struct hstate * h,struct page * page)888 static void enqueue_huge_page(struct hstate *h, struct page *page)
889 {
890 	int nid = page_to_nid(page);
891 	list_move(&page->lru, &h->hugepage_freelists[nid]);
892 	h->free_huge_pages++;
893 	h->free_huge_pages_node[nid]++;
894 	SetPageHugeFreed(page);
895 }
896 
dequeue_huge_page_node_exact(struct hstate * h,int nid)897 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
898 {
899 	struct page *page;
900 
901 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
902 		if (!PageHWPoison(page))
903 			break;
904 	/*
905 	 * if 'non-isolated free hugepage' not found on the list,
906 	 * the allocation fails.
907 	 */
908 	if (&h->hugepage_freelists[nid] == &page->lru)
909 		return NULL;
910 	list_move(&page->lru, &h->hugepage_activelist);
911 	set_page_refcounted(page);
912 	ClearPageHugeFreed(page);
913 	h->free_huge_pages--;
914 	h->free_huge_pages_node[nid]--;
915 	return page;
916 }
917 
dequeue_huge_page_nodemask(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)918 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
919 		nodemask_t *nmask)
920 {
921 	unsigned int cpuset_mems_cookie;
922 	struct zonelist *zonelist;
923 	struct zone *zone;
924 	struct zoneref *z;
925 	int node = NUMA_NO_NODE;
926 
927 	zonelist = node_zonelist(nid, gfp_mask);
928 
929 retry_cpuset:
930 	cpuset_mems_cookie = read_mems_allowed_begin();
931 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
932 		struct page *page;
933 
934 		if (!cpuset_zone_allowed(zone, gfp_mask))
935 			continue;
936 		/*
937 		 * no need to ask again on the same node. Pool is node rather than
938 		 * zone aware
939 		 */
940 		if (zone_to_nid(zone) == node)
941 			continue;
942 		node = zone_to_nid(zone);
943 
944 		page = dequeue_huge_page_node_exact(h, node);
945 		if (page)
946 			return page;
947 	}
948 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
949 		goto retry_cpuset;
950 
951 	return NULL;
952 }
953 
954 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)955 static inline gfp_t htlb_alloc_mask(struct hstate *h)
956 {
957 	if (hugepage_movable_supported(h))
958 		return GFP_HIGHUSER_MOVABLE;
959 	else
960 		return GFP_HIGHUSER;
961 }
962 
dequeue_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,int avoid_reserve,long chg)963 static struct page *dequeue_huge_page_vma(struct hstate *h,
964 				struct vm_area_struct *vma,
965 				unsigned long address, int avoid_reserve,
966 				long chg)
967 {
968 	struct page *page;
969 	struct mempolicy *mpol;
970 	gfp_t gfp_mask;
971 	nodemask_t *nodemask;
972 	int nid;
973 
974 	/*
975 	 * A child process with MAP_PRIVATE mappings created by their parent
976 	 * have no page reserves. This check ensures that reservations are
977 	 * not "stolen". The child may still get SIGKILLed
978 	 */
979 	if (!vma_has_reserves(vma, chg) &&
980 			h->free_huge_pages - h->resv_huge_pages == 0)
981 		goto err;
982 
983 	/* If reserves cannot be used, ensure enough pages are in the pool */
984 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
985 		goto err;
986 
987 	gfp_mask = htlb_alloc_mask(h);
988 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
989 	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
990 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
991 		SetPagePrivate(page);
992 		h->resv_huge_pages--;
993 	}
994 
995 	mpol_cond_put(mpol);
996 	return page;
997 
998 err:
999 	return NULL;
1000 }
1001 
1002 /*
1003  * common helper functions for hstate_next_node_to_{alloc|free}.
1004  * We may have allocated or freed a huge page based on a different
1005  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1006  * be outside of *nodes_allowed.  Ensure that we use an allowed
1007  * node for alloc or free.
1008  */
next_node_allowed(int nid,nodemask_t * nodes_allowed)1009 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1010 {
1011 	nid = next_node_in(nid, *nodes_allowed);
1012 	VM_BUG_ON(nid >= MAX_NUMNODES);
1013 
1014 	return nid;
1015 }
1016 
get_valid_node_allowed(int nid,nodemask_t * nodes_allowed)1017 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1018 {
1019 	if (!node_isset(nid, *nodes_allowed))
1020 		nid = next_node_allowed(nid, nodes_allowed);
1021 	return nid;
1022 }
1023 
1024 /*
1025  * returns the previously saved node ["this node"] from which to
1026  * allocate a persistent huge page for the pool and advance the
1027  * next node from which to allocate, handling wrap at end of node
1028  * mask.
1029  */
hstate_next_node_to_alloc(struct hstate * h,nodemask_t * nodes_allowed)1030 static int hstate_next_node_to_alloc(struct hstate *h,
1031 					nodemask_t *nodes_allowed)
1032 {
1033 	int nid;
1034 
1035 	VM_BUG_ON(!nodes_allowed);
1036 
1037 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1038 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1039 
1040 	return nid;
1041 }
1042 
1043 /*
1044  * helper for free_pool_huge_page() - return the previously saved
1045  * node ["this node"] from which to free a huge page.  Advance the
1046  * next node id whether or not we find a free huge page to free so
1047  * that the next attempt to free addresses the next node.
1048  */
hstate_next_node_to_free(struct hstate * h,nodemask_t * nodes_allowed)1049 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1050 {
1051 	int nid;
1052 
1053 	VM_BUG_ON(!nodes_allowed);
1054 
1055 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1056 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1057 
1058 	return nid;
1059 }
1060 
1061 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
1062 	for (nr_nodes = nodes_weight(*mask);				\
1063 		nr_nodes > 0 &&						\
1064 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
1065 		nr_nodes--)
1066 
1067 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1068 	for (nr_nodes = nodes_weight(*mask);				\
1069 		nr_nodes > 0 &&						\
1070 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1071 		nr_nodes--)
1072 
1073 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
destroy_compound_gigantic_page(struct page * page,unsigned int order)1074 static void destroy_compound_gigantic_page(struct page *page,
1075 					unsigned int order)
1076 {
1077 	int i;
1078 	int nr_pages = 1 << order;
1079 	struct page *p = page + 1;
1080 
1081 	atomic_set(compound_mapcount_ptr(page), 0);
1082 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1083 		clear_compound_head(p);
1084 		set_page_refcounted(p);
1085 	}
1086 
1087 	set_compound_order(page, 0);
1088 	__ClearPageHead(page);
1089 }
1090 
free_gigantic_page(struct page * page,unsigned int order)1091 static void free_gigantic_page(struct page *page, unsigned int order)
1092 {
1093 	free_contig_range(page_to_pfn(page), 1 << order);
1094 }
1095 
1096 #ifdef CONFIG_CONTIG_ALLOC
__alloc_gigantic_page(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)1097 static int __alloc_gigantic_page(unsigned long start_pfn,
1098 				unsigned long nr_pages, gfp_t gfp_mask)
1099 {
1100 	unsigned long end_pfn = start_pfn + nr_pages;
1101 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1102 				  gfp_mask);
1103 }
1104 
pfn_range_valid_gigantic(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)1105 static bool pfn_range_valid_gigantic(struct zone *z,
1106 			unsigned long start_pfn, unsigned long nr_pages)
1107 {
1108 	unsigned long i, end_pfn = start_pfn + nr_pages;
1109 	struct page *page;
1110 
1111 	for (i = start_pfn; i < end_pfn; i++) {
1112 		page = pfn_to_online_page(i);
1113 		if (!page)
1114 			return false;
1115 
1116 		if (page_zone(page) != z)
1117 			return false;
1118 
1119 		if (PageReserved(page))
1120 			return false;
1121 
1122 		if (page_count(page) > 0)
1123 			return false;
1124 
1125 		if (PageHuge(page))
1126 			return false;
1127 	}
1128 
1129 	return true;
1130 }
1131 
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)1132 static bool zone_spans_last_pfn(const struct zone *zone,
1133 			unsigned long start_pfn, unsigned long nr_pages)
1134 {
1135 	unsigned long last_pfn = start_pfn + nr_pages - 1;
1136 	return zone_spans_pfn(zone, last_pfn);
1137 }
1138 
alloc_gigantic_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1139 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1140 		int nid, nodemask_t *nodemask)
1141 {
1142 	unsigned int order = huge_page_order(h);
1143 	unsigned long nr_pages = 1 << order;
1144 	unsigned long ret, pfn, flags;
1145 	struct zonelist *zonelist;
1146 	struct zone *zone;
1147 	struct zoneref *z;
1148 
1149 	zonelist = node_zonelist(nid, gfp_mask);
1150 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1151 		spin_lock_irqsave(&zone->lock, flags);
1152 
1153 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1154 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1155 			if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1156 				/*
1157 				 * We release the zone lock here because
1158 				 * alloc_contig_range() will also lock the zone
1159 				 * at some point. If there's an allocation
1160 				 * spinning on this lock, it may win the race
1161 				 * and cause alloc_contig_range() to fail...
1162 				 */
1163 				spin_unlock_irqrestore(&zone->lock, flags);
1164 				ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1165 				if (!ret)
1166 					return pfn_to_page(pfn);
1167 				spin_lock_irqsave(&zone->lock, flags);
1168 			}
1169 			pfn += nr_pages;
1170 		}
1171 
1172 		spin_unlock_irqrestore(&zone->lock, flags);
1173 	}
1174 
1175 	return NULL;
1176 }
1177 
1178 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1179 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1180 #else /* !CONFIG_CONTIG_ALLOC */
alloc_gigantic_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1181 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1182 					int nid, nodemask_t *nodemask)
1183 {
1184 	return NULL;
1185 }
1186 #endif /* CONFIG_CONTIG_ALLOC */
1187 
1188 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
alloc_gigantic_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1189 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1190 					int nid, nodemask_t *nodemask)
1191 {
1192 	return NULL;
1193 }
free_gigantic_page(struct page * page,unsigned int order)1194 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
destroy_compound_gigantic_page(struct page * page,unsigned int order)1195 static inline void destroy_compound_gigantic_page(struct page *page,
1196 						unsigned int order) { }
1197 #endif
1198 
update_and_free_page(struct hstate * h,struct page * page)1199 static void update_and_free_page(struct hstate *h, struct page *page)
1200 {
1201 	int i;
1202 	struct page *subpage = page;
1203 
1204 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1205 		return;
1206 
1207 	h->nr_huge_pages--;
1208 	h->nr_huge_pages_node[page_to_nid(page)]--;
1209 	for (i = 0; i < pages_per_huge_page(h);
1210 	     i++, subpage = mem_map_next(subpage, page, i)) {
1211 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1212 				1 << PG_referenced | 1 << PG_dirty |
1213 				1 << PG_active | 1 << PG_private |
1214 				1 << PG_writeback);
1215 	}
1216 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1217 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1218 	set_page_refcounted(page);
1219 	if (hstate_is_gigantic(h)) {
1220 		destroy_compound_gigantic_page(page, huge_page_order(h));
1221 		free_gigantic_page(page, huge_page_order(h));
1222 	} else {
1223 		__free_pages(page, huge_page_order(h));
1224 	}
1225 }
1226 
size_to_hstate(unsigned long size)1227 struct hstate *size_to_hstate(unsigned long size)
1228 {
1229 	struct hstate *h;
1230 
1231 	for_each_hstate(h) {
1232 		if (huge_page_size(h) == size)
1233 			return h;
1234 	}
1235 	return NULL;
1236 }
1237 
1238 /*
1239  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1240  * to hstate->hugepage_activelist.)
1241  *
1242  * This function can be called for tail pages, but never returns true for them.
1243  */
page_huge_active(struct page * page)1244 bool page_huge_active(struct page *page)
1245 {
1246 	return PageHeadHuge(page) && PagePrivate(&page[1]);
1247 }
1248 
1249 /* never called for tail page */
set_page_huge_active(struct page * page)1250 void set_page_huge_active(struct page *page)
1251 {
1252 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1253 	SetPagePrivate(&page[1]);
1254 }
1255 
clear_page_huge_active(struct page * page)1256 static void clear_page_huge_active(struct page *page)
1257 {
1258 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1259 	ClearPagePrivate(&page[1]);
1260 }
1261 
1262 /*
1263  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1264  * code
1265  */
PageHugeTemporary(struct page * page)1266 static inline bool PageHugeTemporary(struct page *page)
1267 {
1268 	if (!PageHuge(page))
1269 		return false;
1270 
1271 	return (unsigned long)page[2].mapping == -1U;
1272 }
1273 
SetPageHugeTemporary(struct page * page)1274 static inline void SetPageHugeTemporary(struct page *page)
1275 {
1276 	page[2].mapping = (void *)-1U;
1277 }
1278 
ClearPageHugeTemporary(struct page * page)1279 static inline void ClearPageHugeTemporary(struct page *page)
1280 {
1281 	page[2].mapping = NULL;
1282 }
1283 
__free_huge_page(struct page * page)1284 static void __free_huge_page(struct page *page)
1285 {
1286 	/*
1287 	 * Can't pass hstate in here because it is called from the
1288 	 * compound page destructor.
1289 	 */
1290 	struct hstate *h = page_hstate(page);
1291 	int nid = page_to_nid(page);
1292 	struct hugepage_subpool *spool =
1293 		(struct hugepage_subpool *)page_private(page);
1294 	bool restore_reserve;
1295 
1296 	VM_BUG_ON_PAGE(page_count(page), page);
1297 	VM_BUG_ON_PAGE(page_mapcount(page), page);
1298 
1299 	set_page_private(page, 0);
1300 	page->mapping = NULL;
1301 	restore_reserve = PagePrivate(page);
1302 	ClearPagePrivate(page);
1303 
1304 	/*
1305 	 * If PagePrivate() was set on page, page allocation consumed a
1306 	 * reservation.  If the page was associated with a subpool, there
1307 	 * would have been a page reserved in the subpool before allocation
1308 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1309 	 * reservtion, do not call hugepage_subpool_put_pages() as this will
1310 	 * remove the reserved page from the subpool.
1311 	 */
1312 	if (!restore_reserve) {
1313 		/*
1314 		 * A return code of zero implies that the subpool will be
1315 		 * under its minimum size if the reservation is not restored
1316 		 * after page is free.  Therefore, force restore_reserve
1317 		 * operation.
1318 		 */
1319 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1320 			restore_reserve = true;
1321 	}
1322 
1323 	spin_lock(&hugetlb_lock);
1324 	clear_page_huge_active(page);
1325 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1326 				     pages_per_huge_page(h), page);
1327 	if (restore_reserve)
1328 		h->resv_huge_pages++;
1329 
1330 	if (PageHugeTemporary(page)) {
1331 		list_del(&page->lru);
1332 		ClearPageHugeTemporary(page);
1333 		update_and_free_page(h, page);
1334 	} else if (h->surplus_huge_pages_node[nid]) {
1335 		/* remove the page from active list */
1336 		list_del(&page->lru);
1337 		update_and_free_page(h, page);
1338 		h->surplus_huge_pages--;
1339 		h->surplus_huge_pages_node[nid]--;
1340 	} else {
1341 		arch_clear_hugepage_flags(page);
1342 		enqueue_huge_page(h, page);
1343 	}
1344 	spin_unlock(&hugetlb_lock);
1345 }
1346 
1347 /*
1348  * As free_huge_page() can be called from a non-task context, we have
1349  * to defer the actual freeing in a workqueue to prevent potential
1350  * hugetlb_lock deadlock.
1351  *
1352  * free_hpage_workfn() locklessly retrieves the linked list of pages to
1353  * be freed and frees them one-by-one. As the page->mapping pointer is
1354  * going to be cleared in __free_huge_page() anyway, it is reused as the
1355  * llist_node structure of a lockless linked list of huge pages to be freed.
1356  */
1357 static LLIST_HEAD(hpage_freelist);
1358 
free_hpage_workfn(struct work_struct * work)1359 static void free_hpage_workfn(struct work_struct *work)
1360 {
1361 	struct llist_node *node;
1362 	struct page *page;
1363 
1364 	node = llist_del_all(&hpage_freelist);
1365 
1366 	while (node) {
1367 		page = container_of((struct address_space **)node,
1368 				     struct page, mapping);
1369 		node = node->next;
1370 		__free_huge_page(page);
1371 	}
1372 }
1373 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1374 
free_huge_page(struct page * page)1375 void free_huge_page(struct page *page)
1376 {
1377 	/*
1378 	 * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
1379 	 */
1380 	if (!in_task()) {
1381 		/*
1382 		 * Only call schedule_work() if hpage_freelist is previously
1383 		 * empty. Otherwise, schedule_work() had been called but the
1384 		 * workfn hasn't retrieved the list yet.
1385 		 */
1386 		if (llist_add((struct llist_node *)&page->mapping,
1387 			      &hpage_freelist))
1388 			schedule_work(&free_hpage_work);
1389 		return;
1390 	}
1391 
1392 	__free_huge_page(page);
1393 }
1394 
prep_new_huge_page(struct hstate * h,struct page * page,int nid)1395 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1396 {
1397 	INIT_LIST_HEAD(&page->lru);
1398 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1399 	spin_lock(&hugetlb_lock);
1400 	set_hugetlb_cgroup(page, NULL);
1401 	h->nr_huge_pages++;
1402 	h->nr_huge_pages_node[nid]++;
1403 	ClearPageHugeFreed(page);
1404 	spin_unlock(&hugetlb_lock);
1405 }
1406 
prep_compound_gigantic_page(struct page * page,unsigned int order)1407 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1408 {
1409 	int i;
1410 	int nr_pages = 1 << order;
1411 	struct page *p = page + 1;
1412 
1413 	/* we rely on prep_new_huge_page to set the destructor */
1414 	set_compound_order(page, order);
1415 	__ClearPageReserved(page);
1416 	__SetPageHead(page);
1417 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1418 		/*
1419 		 * For gigantic hugepages allocated through bootmem at
1420 		 * boot, it's safer to be consistent with the not-gigantic
1421 		 * hugepages and clear the PG_reserved bit from all tail pages
1422 		 * too.  Otherwse drivers using get_user_pages() to access tail
1423 		 * pages may get the reference counting wrong if they see
1424 		 * PG_reserved set on a tail page (despite the head page not
1425 		 * having PG_reserved set).  Enforcing this consistency between
1426 		 * head and tail pages allows drivers to optimize away a check
1427 		 * on the head page when they need know if put_page() is needed
1428 		 * after get_user_pages().
1429 		 */
1430 		__ClearPageReserved(p);
1431 		set_page_count(p, 0);
1432 		set_compound_head(p, page);
1433 	}
1434 	atomic_set(compound_mapcount_ptr(page), -1);
1435 }
1436 
1437 /*
1438  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1439  * transparent huge pages.  See the PageTransHuge() documentation for more
1440  * details.
1441  */
PageHuge(struct page * page)1442 int PageHuge(struct page *page)
1443 {
1444 	if (!PageCompound(page))
1445 		return 0;
1446 
1447 	page = compound_head(page);
1448 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1449 }
1450 EXPORT_SYMBOL_GPL(PageHuge);
1451 
1452 /*
1453  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1454  * normal or transparent huge pages.
1455  */
PageHeadHuge(struct page * page_head)1456 int PageHeadHuge(struct page *page_head)
1457 {
1458 	if (!PageHead(page_head))
1459 		return 0;
1460 
1461 	return get_compound_page_dtor(page_head) == free_huge_page;
1462 }
1463 
hugetlb_basepage_index(struct page * page)1464 pgoff_t hugetlb_basepage_index(struct page *page)
1465 {
1466 	struct page *page_head = compound_head(page);
1467 	pgoff_t index = page_index(page_head);
1468 	unsigned long compound_idx;
1469 
1470 	if (compound_order(page_head) >= MAX_ORDER)
1471 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1472 	else
1473 		compound_idx = page - page_head;
1474 
1475 	return (index << compound_order(page_head)) + compound_idx;
1476 }
1477 
alloc_buddy_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1478 static struct page *alloc_buddy_huge_page(struct hstate *h,
1479 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1480 		nodemask_t *node_alloc_noretry)
1481 {
1482 	int order = huge_page_order(h);
1483 	struct page *page;
1484 	bool alloc_try_hard = true;
1485 
1486 	/*
1487 	 * By default we always try hard to allocate the page with
1488 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1489 	 * a loop (to adjust global huge page counts) and previous allocation
1490 	 * failed, do not continue to try hard on the same node.  Use the
1491 	 * node_alloc_noretry bitmap to manage this state information.
1492 	 */
1493 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1494 		alloc_try_hard = false;
1495 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1496 	if (alloc_try_hard)
1497 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1498 	if (nid == NUMA_NO_NODE)
1499 		nid = numa_mem_id();
1500 	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1501 	if (page)
1502 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1503 	else
1504 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1505 
1506 	/*
1507 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1508 	 * indicates an overall state change.  Clear bit so that we resume
1509 	 * normal 'try hard' allocations.
1510 	 */
1511 	if (node_alloc_noretry && page && !alloc_try_hard)
1512 		node_clear(nid, *node_alloc_noretry);
1513 
1514 	/*
1515 	 * If we tried hard to get a page but failed, set bit so that
1516 	 * subsequent attempts will not try as hard until there is an
1517 	 * overall state change.
1518 	 */
1519 	if (node_alloc_noretry && !page && alloc_try_hard)
1520 		node_set(nid, *node_alloc_noretry);
1521 
1522 	return page;
1523 }
1524 
1525 /*
1526  * Common helper to allocate a fresh hugetlb page. All specific allocators
1527  * should use this function to get new hugetlb pages
1528  */
alloc_fresh_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1529 static struct page *alloc_fresh_huge_page(struct hstate *h,
1530 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1531 		nodemask_t *node_alloc_noretry)
1532 {
1533 	struct page *page;
1534 
1535 	if (hstate_is_gigantic(h))
1536 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1537 	else
1538 		page = alloc_buddy_huge_page(h, gfp_mask,
1539 				nid, nmask, node_alloc_noretry);
1540 	if (!page)
1541 		return NULL;
1542 
1543 	if (hstate_is_gigantic(h))
1544 		prep_compound_gigantic_page(page, huge_page_order(h));
1545 	prep_new_huge_page(h, page, page_to_nid(page));
1546 
1547 	return page;
1548 }
1549 
1550 /*
1551  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1552  * manner.
1553  */
alloc_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,nodemask_t * node_alloc_noretry)1554 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1555 				nodemask_t *node_alloc_noretry)
1556 {
1557 	struct page *page;
1558 	int nr_nodes, node;
1559 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1560 
1561 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1562 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1563 						node_alloc_noretry);
1564 		if (page)
1565 			break;
1566 	}
1567 
1568 	if (!page)
1569 		return 0;
1570 
1571 	put_page(page); /* free it into the hugepage allocator */
1572 
1573 	return 1;
1574 }
1575 
1576 /*
1577  * Free huge page from pool from next node to free.
1578  * Attempt to keep persistent huge pages more or less
1579  * balanced over allowed nodes.
1580  * Called with hugetlb_lock locked.
1581  */
free_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)1582 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1583 							 bool acct_surplus)
1584 {
1585 	int nr_nodes, node;
1586 	int ret = 0;
1587 
1588 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1589 		/*
1590 		 * If we're returning unused surplus pages, only examine
1591 		 * nodes with surplus pages.
1592 		 */
1593 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1594 		    !list_empty(&h->hugepage_freelists[node])) {
1595 			struct page *page =
1596 				list_entry(h->hugepage_freelists[node].next,
1597 					  struct page, lru);
1598 			list_del(&page->lru);
1599 			h->free_huge_pages--;
1600 			h->free_huge_pages_node[node]--;
1601 			if (acct_surplus) {
1602 				h->surplus_huge_pages--;
1603 				h->surplus_huge_pages_node[node]--;
1604 			}
1605 			update_and_free_page(h, page);
1606 			ret = 1;
1607 			break;
1608 		}
1609 	}
1610 
1611 	return ret;
1612 }
1613 
1614 /*
1615  * Dissolve a given free hugepage into free buddy pages. This function does
1616  * nothing for in-use hugepages and non-hugepages.
1617  * This function returns values like below:
1618  *
1619  *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1620  *          (allocated or reserved.)
1621  *       0: successfully dissolved free hugepages or the page is not a
1622  *          hugepage (considered as already dissolved)
1623  */
dissolve_free_huge_page(struct page * page)1624 int dissolve_free_huge_page(struct page *page)
1625 {
1626 	int rc = -EBUSY;
1627 
1628 retry:
1629 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
1630 	if (!PageHuge(page))
1631 		return 0;
1632 
1633 	spin_lock(&hugetlb_lock);
1634 	if (!PageHuge(page)) {
1635 		rc = 0;
1636 		goto out;
1637 	}
1638 
1639 	if (!page_count(page)) {
1640 		struct page *head = compound_head(page);
1641 		struct hstate *h = page_hstate(head);
1642 		int nid = page_to_nid(head);
1643 		if (h->free_huge_pages - h->resv_huge_pages == 0)
1644 			goto out;
1645 
1646 		/*
1647 		 * We should make sure that the page is already on the free list
1648 		 * when it is dissolved.
1649 		 */
1650 		if (unlikely(!PageHugeFreed(head))) {
1651 			spin_unlock(&hugetlb_lock);
1652 			cond_resched();
1653 
1654 			/*
1655 			 * Theoretically, we should return -EBUSY when we
1656 			 * encounter this race. In fact, we have a chance
1657 			 * to successfully dissolve the page if we do a
1658 			 * retry. Because the race window is quite small.
1659 			 * If we seize this opportunity, it is an optimization
1660 			 * for increasing the success rate of dissolving page.
1661 			 */
1662 			goto retry;
1663 		}
1664 
1665 		/*
1666 		 * Move PageHWPoison flag from head page to the raw error page,
1667 		 * which makes any subpages rather than the error page reusable.
1668 		 */
1669 		if (PageHWPoison(head) && page != head) {
1670 			SetPageHWPoison(page);
1671 			ClearPageHWPoison(head);
1672 		}
1673 		list_del(&head->lru);
1674 		h->free_huge_pages--;
1675 		h->free_huge_pages_node[nid]--;
1676 		h->max_huge_pages--;
1677 		update_and_free_page(h, head);
1678 		rc = 0;
1679 	}
1680 out:
1681 	spin_unlock(&hugetlb_lock);
1682 	return rc;
1683 }
1684 
1685 /*
1686  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1687  * make specified memory blocks removable from the system.
1688  * Note that this will dissolve a free gigantic hugepage completely, if any
1689  * part of it lies within the given range.
1690  * Also note that if dissolve_free_huge_page() returns with an error, all
1691  * free hugepages that were dissolved before that error are lost.
1692  */
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)1693 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1694 {
1695 	unsigned long pfn;
1696 	struct page *page;
1697 	int rc = 0;
1698 
1699 	if (!hugepages_supported())
1700 		return rc;
1701 
1702 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1703 		page = pfn_to_page(pfn);
1704 		rc = dissolve_free_huge_page(page);
1705 		if (rc)
1706 			break;
1707 	}
1708 
1709 	return rc;
1710 }
1711 
1712 /*
1713  * Allocates a fresh surplus page from the page allocator.
1714  */
alloc_surplus_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1715 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1716 		int nid, nodemask_t *nmask)
1717 {
1718 	struct page *page = NULL;
1719 
1720 	if (hstate_is_gigantic(h))
1721 		return NULL;
1722 
1723 	spin_lock(&hugetlb_lock);
1724 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1725 		goto out_unlock;
1726 	spin_unlock(&hugetlb_lock);
1727 
1728 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1729 	if (!page)
1730 		return NULL;
1731 
1732 	spin_lock(&hugetlb_lock);
1733 	/*
1734 	 * We could have raced with the pool size change.
1735 	 * Double check that and simply deallocate the new page
1736 	 * if we would end up overcommiting the surpluses. Abuse
1737 	 * temporary page to workaround the nasty free_huge_page
1738 	 * codeflow
1739 	 */
1740 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1741 		SetPageHugeTemporary(page);
1742 		spin_unlock(&hugetlb_lock);
1743 		put_page(page);
1744 		return NULL;
1745 	} else {
1746 		h->surplus_huge_pages++;
1747 		h->surplus_huge_pages_node[page_to_nid(page)]++;
1748 	}
1749 
1750 out_unlock:
1751 	spin_unlock(&hugetlb_lock);
1752 
1753 	return page;
1754 }
1755 
alloc_migrate_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1756 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1757 				     int nid, nodemask_t *nmask)
1758 {
1759 	struct page *page;
1760 
1761 	if (hstate_is_gigantic(h))
1762 		return NULL;
1763 
1764 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1765 	if (!page)
1766 		return NULL;
1767 
1768 	/*
1769 	 * We do not account these pages as surplus because they are only
1770 	 * temporary and will be released properly on the last reference
1771 	 */
1772 	SetPageHugeTemporary(page);
1773 
1774 	return page;
1775 }
1776 
1777 /*
1778  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1779  */
1780 static
alloc_buddy_huge_page_with_mpol(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)1781 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1782 		struct vm_area_struct *vma, unsigned long addr)
1783 {
1784 	struct page *page;
1785 	struct mempolicy *mpol;
1786 	gfp_t gfp_mask = htlb_alloc_mask(h);
1787 	int nid;
1788 	nodemask_t *nodemask;
1789 
1790 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1791 	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1792 	mpol_cond_put(mpol);
1793 
1794 	return page;
1795 }
1796 
1797 /* page migration callback function */
alloc_huge_page_node(struct hstate * h,int nid)1798 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1799 {
1800 	gfp_t gfp_mask = htlb_alloc_mask(h);
1801 	struct page *page = NULL;
1802 
1803 	if (nid != NUMA_NO_NODE)
1804 		gfp_mask |= __GFP_THISNODE;
1805 
1806 	spin_lock(&hugetlb_lock);
1807 	if (h->free_huge_pages - h->resv_huge_pages > 0)
1808 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1809 	spin_unlock(&hugetlb_lock);
1810 
1811 	if (!page)
1812 		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1813 
1814 	return page;
1815 }
1816 
1817 /* page migration callback function */
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask)1818 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1819 		nodemask_t *nmask)
1820 {
1821 	gfp_t gfp_mask = htlb_alloc_mask(h);
1822 
1823 	spin_lock(&hugetlb_lock);
1824 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
1825 		struct page *page;
1826 
1827 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1828 		if (page) {
1829 			spin_unlock(&hugetlb_lock);
1830 			return page;
1831 		}
1832 	}
1833 	spin_unlock(&hugetlb_lock);
1834 
1835 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1836 }
1837 
1838 /* mempolicy aware migration callback */
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)1839 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1840 		unsigned long address)
1841 {
1842 	struct mempolicy *mpol;
1843 	nodemask_t *nodemask;
1844 	struct page *page;
1845 	gfp_t gfp_mask;
1846 	int node;
1847 
1848 	gfp_mask = htlb_alloc_mask(h);
1849 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1850 	page = alloc_huge_page_nodemask(h, node, nodemask);
1851 	mpol_cond_put(mpol);
1852 
1853 	return page;
1854 }
1855 
1856 /*
1857  * Increase the hugetlb pool such that it can accommodate a reservation
1858  * of size 'delta'.
1859  */
gather_surplus_pages(struct hstate * h,int delta)1860 static int gather_surplus_pages(struct hstate *h, int delta)
1861 {
1862 	struct list_head surplus_list;
1863 	struct page *page, *tmp;
1864 	int ret, i;
1865 	int needed, allocated;
1866 	bool alloc_ok = true;
1867 
1868 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1869 	if (needed <= 0) {
1870 		h->resv_huge_pages += delta;
1871 		return 0;
1872 	}
1873 
1874 	allocated = 0;
1875 	INIT_LIST_HEAD(&surplus_list);
1876 
1877 	ret = -ENOMEM;
1878 retry:
1879 	spin_unlock(&hugetlb_lock);
1880 	for (i = 0; i < needed; i++) {
1881 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1882 				NUMA_NO_NODE, NULL);
1883 		if (!page) {
1884 			alloc_ok = false;
1885 			break;
1886 		}
1887 		list_add(&page->lru, &surplus_list);
1888 		cond_resched();
1889 	}
1890 	allocated += i;
1891 
1892 	/*
1893 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1894 	 * because either resv_huge_pages or free_huge_pages may have changed.
1895 	 */
1896 	spin_lock(&hugetlb_lock);
1897 	needed = (h->resv_huge_pages + delta) -
1898 			(h->free_huge_pages + allocated);
1899 	if (needed > 0) {
1900 		if (alloc_ok)
1901 			goto retry;
1902 		/*
1903 		 * We were not able to allocate enough pages to
1904 		 * satisfy the entire reservation so we free what
1905 		 * we've allocated so far.
1906 		 */
1907 		goto free;
1908 	}
1909 	/*
1910 	 * The surplus_list now contains _at_least_ the number of extra pages
1911 	 * needed to accommodate the reservation.  Add the appropriate number
1912 	 * of pages to the hugetlb pool and free the extras back to the buddy
1913 	 * allocator.  Commit the entire reservation here to prevent another
1914 	 * process from stealing the pages as they are added to the pool but
1915 	 * before they are reserved.
1916 	 */
1917 	needed += allocated;
1918 	h->resv_huge_pages += delta;
1919 	ret = 0;
1920 
1921 	/* Free the needed pages to the hugetlb pool */
1922 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1923 		if ((--needed) < 0)
1924 			break;
1925 		/*
1926 		 * This page is now managed by the hugetlb allocator and has
1927 		 * no users -- drop the buddy allocator's reference.
1928 		 */
1929 		put_page_testzero(page);
1930 		VM_BUG_ON_PAGE(page_count(page), page);
1931 		enqueue_huge_page(h, page);
1932 	}
1933 free:
1934 	spin_unlock(&hugetlb_lock);
1935 
1936 	/* Free unnecessary surplus pages to the buddy allocator */
1937 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1938 		put_page(page);
1939 	spin_lock(&hugetlb_lock);
1940 
1941 	return ret;
1942 }
1943 
1944 /*
1945  * This routine has two main purposes:
1946  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1947  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1948  *    to the associated reservation map.
1949  * 2) Free any unused surplus pages that may have been allocated to satisfy
1950  *    the reservation.  As many as unused_resv_pages may be freed.
1951  *
1952  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1953  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1954  * we must make sure nobody else can claim pages we are in the process of
1955  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1956  * number of huge pages we plan to free when dropping the lock.
1957  */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)1958 static void return_unused_surplus_pages(struct hstate *h,
1959 					unsigned long unused_resv_pages)
1960 {
1961 	unsigned long nr_pages;
1962 
1963 	/* Cannot return gigantic pages currently */
1964 	if (hstate_is_gigantic(h))
1965 		goto out;
1966 
1967 	/*
1968 	 * Part (or even all) of the reservation could have been backed
1969 	 * by pre-allocated pages. Only free surplus pages.
1970 	 */
1971 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1972 
1973 	/*
1974 	 * We want to release as many surplus pages as possible, spread
1975 	 * evenly across all nodes with memory. Iterate across these nodes
1976 	 * until we can no longer free unreserved surplus pages. This occurs
1977 	 * when the nodes with surplus pages have no free pages.
1978 	 * free_pool_huge_page() will balance the the freed pages across the
1979 	 * on-line nodes with memory and will handle the hstate accounting.
1980 	 *
1981 	 * Note that we decrement resv_huge_pages as we free the pages.  If
1982 	 * we drop the lock, resv_huge_pages will still be sufficiently large
1983 	 * to cover subsequent pages we may free.
1984 	 */
1985 	while (nr_pages--) {
1986 		h->resv_huge_pages--;
1987 		unused_resv_pages--;
1988 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1989 			goto out;
1990 		cond_resched_lock(&hugetlb_lock);
1991 	}
1992 
1993 out:
1994 	/* Fully uncommit the reservation */
1995 	h->resv_huge_pages -= unused_resv_pages;
1996 }
1997 
1998 
1999 /*
2000  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2001  * are used by the huge page allocation routines to manage reservations.
2002  *
2003  * vma_needs_reservation is called to determine if the huge page at addr
2004  * within the vma has an associated reservation.  If a reservation is
2005  * needed, the value 1 is returned.  The caller is then responsible for
2006  * managing the global reservation and subpool usage counts.  After
2007  * the huge page has been allocated, vma_commit_reservation is called
2008  * to add the page to the reservation map.  If the page allocation fails,
2009  * the reservation must be ended instead of committed.  vma_end_reservation
2010  * is called in such cases.
2011  *
2012  * In the normal case, vma_commit_reservation returns the same value
2013  * as the preceding vma_needs_reservation call.  The only time this
2014  * is not the case is if a reserve map was changed between calls.  It
2015  * is the responsibility of the caller to notice the difference and
2016  * take appropriate action.
2017  *
2018  * vma_add_reservation is used in error paths where a reservation must
2019  * be restored when a newly allocated huge page must be freed.  It is
2020  * to be called after calling vma_needs_reservation to determine if a
2021  * reservation exists.
2022  */
2023 enum vma_resv_mode {
2024 	VMA_NEEDS_RESV,
2025 	VMA_COMMIT_RESV,
2026 	VMA_END_RESV,
2027 	VMA_ADD_RESV,
2028 };
__vma_reservation_common(struct hstate * h,struct vm_area_struct * vma,unsigned long addr,enum vma_resv_mode mode)2029 static long __vma_reservation_common(struct hstate *h,
2030 				struct vm_area_struct *vma, unsigned long addr,
2031 				enum vma_resv_mode mode)
2032 {
2033 	struct resv_map *resv;
2034 	pgoff_t idx;
2035 	long ret;
2036 
2037 	resv = vma_resv_map(vma);
2038 	if (!resv)
2039 		return 1;
2040 
2041 	idx = vma_hugecache_offset(h, vma, addr);
2042 	switch (mode) {
2043 	case VMA_NEEDS_RESV:
2044 		ret = region_chg(resv, idx, idx + 1);
2045 		break;
2046 	case VMA_COMMIT_RESV:
2047 		ret = region_add(resv, idx, idx + 1);
2048 		break;
2049 	case VMA_END_RESV:
2050 		region_abort(resv, idx, idx + 1);
2051 		ret = 0;
2052 		break;
2053 	case VMA_ADD_RESV:
2054 		if (vma->vm_flags & VM_MAYSHARE)
2055 			ret = region_add(resv, idx, idx + 1);
2056 		else {
2057 			region_abort(resv, idx, idx + 1);
2058 			ret = region_del(resv, idx, idx + 1);
2059 		}
2060 		break;
2061 	default:
2062 		BUG();
2063 	}
2064 
2065 	if (vma->vm_flags & VM_MAYSHARE)
2066 		return ret;
2067 	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
2068 		/*
2069 		 * In most cases, reserves always exist for private mappings.
2070 		 * However, a file associated with mapping could have been
2071 		 * hole punched or truncated after reserves were consumed.
2072 		 * As subsequent fault on such a range will not use reserves.
2073 		 * Subtle - The reserve map for private mappings has the
2074 		 * opposite meaning than that of shared mappings.  If NO
2075 		 * entry is in the reserve map, it means a reservation exists.
2076 		 * If an entry exists in the reserve map, it means the
2077 		 * reservation has already been consumed.  As a result, the
2078 		 * return value of this routine is the opposite of the
2079 		 * value returned from reserve map manipulation routines above.
2080 		 */
2081 		if (ret)
2082 			return 0;
2083 		else
2084 			return 1;
2085 	}
2086 	else
2087 		return ret < 0 ? ret : 0;
2088 }
2089 
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2090 static long vma_needs_reservation(struct hstate *h,
2091 			struct vm_area_struct *vma, unsigned long addr)
2092 {
2093 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2094 }
2095 
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2096 static long vma_commit_reservation(struct hstate *h,
2097 			struct vm_area_struct *vma, unsigned long addr)
2098 {
2099 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2100 }
2101 
vma_end_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2102 static void vma_end_reservation(struct hstate *h,
2103 			struct vm_area_struct *vma, unsigned long addr)
2104 {
2105 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2106 }
2107 
vma_add_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2108 static long vma_add_reservation(struct hstate *h,
2109 			struct vm_area_struct *vma, unsigned long addr)
2110 {
2111 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2112 }
2113 
2114 /*
2115  * This routine is called to restore a reservation on error paths.  In the
2116  * specific error paths, a huge page was allocated (via alloc_huge_page)
2117  * and is about to be freed.  If a reservation for the page existed,
2118  * alloc_huge_page would have consumed the reservation and set PagePrivate
2119  * in the newly allocated page.  When the page is freed via free_huge_page,
2120  * the global reservation count will be incremented if PagePrivate is set.
2121  * However, free_huge_page can not adjust the reserve map.  Adjust the
2122  * reserve map here to be consistent with global reserve count adjustments
2123  * to be made by free_huge_page.
2124  */
restore_reserve_on_error(struct hstate * h,struct vm_area_struct * vma,unsigned long address,struct page * page)2125 static void restore_reserve_on_error(struct hstate *h,
2126 			struct vm_area_struct *vma, unsigned long address,
2127 			struct page *page)
2128 {
2129 	if (unlikely(PagePrivate(page))) {
2130 		long rc = vma_needs_reservation(h, vma, address);
2131 
2132 		if (unlikely(rc < 0)) {
2133 			/*
2134 			 * Rare out of memory condition in reserve map
2135 			 * manipulation.  Clear PagePrivate so that
2136 			 * global reserve count will not be incremented
2137 			 * by free_huge_page.  This will make it appear
2138 			 * as though the reservation for this page was
2139 			 * consumed.  This may prevent the task from
2140 			 * faulting in the page at a later time.  This
2141 			 * is better than inconsistent global huge page
2142 			 * accounting of reserve counts.
2143 			 */
2144 			ClearPagePrivate(page);
2145 		} else if (rc) {
2146 			rc = vma_add_reservation(h, vma, address);
2147 			if (unlikely(rc < 0))
2148 				/*
2149 				 * See above comment about rare out of
2150 				 * memory condition.
2151 				 */
2152 				ClearPagePrivate(page);
2153 		} else
2154 			vma_end_reservation(h, vma, address);
2155 	}
2156 }
2157 
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)2158 struct page *alloc_huge_page(struct vm_area_struct *vma,
2159 				    unsigned long addr, int avoid_reserve)
2160 {
2161 	struct hugepage_subpool *spool = subpool_vma(vma);
2162 	struct hstate *h = hstate_vma(vma);
2163 	struct page *page;
2164 	long map_chg, map_commit;
2165 	long gbl_chg;
2166 	int ret, idx;
2167 	struct hugetlb_cgroup *h_cg;
2168 
2169 	idx = hstate_index(h);
2170 	/*
2171 	 * Examine the region/reserve map to determine if the process
2172 	 * has a reservation for the page to be allocated.  A return
2173 	 * code of zero indicates a reservation exists (no change).
2174 	 */
2175 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2176 	if (map_chg < 0)
2177 		return ERR_PTR(-ENOMEM);
2178 
2179 	/*
2180 	 * Processes that did not create the mapping will have no
2181 	 * reserves as indicated by the region/reserve map. Check
2182 	 * that the allocation will not exceed the subpool limit.
2183 	 * Allocations for MAP_NORESERVE mappings also need to be
2184 	 * checked against any subpool limit.
2185 	 */
2186 	if (map_chg || avoid_reserve) {
2187 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2188 		if (gbl_chg < 0) {
2189 			vma_end_reservation(h, vma, addr);
2190 			return ERR_PTR(-ENOSPC);
2191 		}
2192 
2193 		/*
2194 		 * Even though there was no reservation in the region/reserve
2195 		 * map, there could be reservations associated with the
2196 		 * subpool that can be used.  This would be indicated if the
2197 		 * return value of hugepage_subpool_get_pages() is zero.
2198 		 * However, if avoid_reserve is specified we still avoid even
2199 		 * the subpool reservations.
2200 		 */
2201 		if (avoid_reserve)
2202 			gbl_chg = 1;
2203 	}
2204 
2205 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2206 	if (ret)
2207 		goto out_subpool_put;
2208 
2209 	spin_lock(&hugetlb_lock);
2210 	/*
2211 	 * glb_chg is passed to indicate whether or not a page must be taken
2212 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2213 	 * a reservation exists for the allocation.
2214 	 */
2215 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2216 	if (!page) {
2217 		spin_unlock(&hugetlb_lock);
2218 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2219 		if (!page)
2220 			goto out_uncharge_cgroup;
2221 		spin_lock(&hugetlb_lock);
2222 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2223 			SetPagePrivate(page);
2224 			h->resv_huge_pages--;
2225 		}
2226 		list_move(&page->lru, &h->hugepage_activelist);
2227 		/* Fall through */
2228 	}
2229 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2230 	spin_unlock(&hugetlb_lock);
2231 
2232 	set_page_private(page, (unsigned long)spool);
2233 
2234 	map_commit = vma_commit_reservation(h, vma, addr);
2235 	if (unlikely(map_chg > map_commit)) {
2236 		/*
2237 		 * The page was added to the reservation map between
2238 		 * vma_needs_reservation and vma_commit_reservation.
2239 		 * This indicates a race with hugetlb_reserve_pages.
2240 		 * Adjust for the subpool count incremented above AND
2241 		 * in hugetlb_reserve_pages for the same page.  Also,
2242 		 * the reservation count added in hugetlb_reserve_pages
2243 		 * no longer applies.
2244 		 */
2245 		long rsv_adjust;
2246 
2247 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2248 		hugetlb_acct_memory(h, -rsv_adjust);
2249 	}
2250 	return page;
2251 
2252 out_uncharge_cgroup:
2253 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2254 out_subpool_put:
2255 	if (map_chg || avoid_reserve)
2256 		hugepage_subpool_put_pages(spool, 1);
2257 	vma_end_reservation(h, vma, addr);
2258 	return ERR_PTR(-ENOSPC);
2259 }
2260 
2261 int alloc_bootmem_huge_page(struct hstate *h)
2262 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
__alloc_bootmem_huge_page(struct hstate * h)2263 int __alloc_bootmem_huge_page(struct hstate *h)
2264 {
2265 	struct huge_bootmem_page *m;
2266 	int nr_nodes, node;
2267 
2268 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2269 		void *addr;
2270 
2271 		addr = memblock_alloc_try_nid_raw(
2272 				huge_page_size(h), huge_page_size(h),
2273 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2274 		if (addr) {
2275 			/*
2276 			 * Use the beginning of the huge page to store the
2277 			 * huge_bootmem_page struct (until gather_bootmem
2278 			 * puts them into the mem_map).
2279 			 */
2280 			m = addr;
2281 			goto found;
2282 		}
2283 	}
2284 	return 0;
2285 
2286 found:
2287 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2288 	/* Put them into a private list first because mem_map is not up yet */
2289 	INIT_LIST_HEAD(&m->list);
2290 	list_add(&m->list, &huge_boot_pages);
2291 	m->hstate = h;
2292 	return 1;
2293 }
2294 
prep_compound_huge_page(struct page * page,unsigned int order)2295 static void __init prep_compound_huge_page(struct page *page,
2296 		unsigned int order)
2297 {
2298 	if (unlikely(order > (MAX_ORDER - 1)))
2299 		prep_compound_gigantic_page(page, order);
2300 	else
2301 		prep_compound_page(page, order);
2302 }
2303 
2304 /* Put bootmem huge pages into the standard lists after mem_map is up */
gather_bootmem_prealloc(void)2305 static void __init gather_bootmem_prealloc(void)
2306 {
2307 	struct huge_bootmem_page *m;
2308 
2309 	list_for_each_entry(m, &huge_boot_pages, list) {
2310 		struct page *page = virt_to_page(m);
2311 		struct hstate *h = m->hstate;
2312 
2313 		WARN_ON(page_count(page) != 1);
2314 		prep_compound_huge_page(page, h->order);
2315 		WARN_ON(PageReserved(page));
2316 		prep_new_huge_page(h, page, page_to_nid(page));
2317 		put_page(page); /* free it into the hugepage allocator */
2318 
2319 		/*
2320 		 * If we had gigantic hugepages allocated at boot time, we need
2321 		 * to restore the 'stolen' pages to totalram_pages in order to
2322 		 * fix confusing memory reports from free(1) and another
2323 		 * side-effects, like CommitLimit going negative.
2324 		 */
2325 		if (hstate_is_gigantic(h))
2326 			adjust_managed_page_count(page, 1 << h->order);
2327 		cond_resched();
2328 	}
2329 }
2330 
hugetlb_hstate_alloc_pages(struct hstate * h)2331 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2332 {
2333 	unsigned long i;
2334 	nodemask_t *node_alloc_noretry;
2335 
2336 	if (!hstate_is_gigantic(h)) {
2337 		/*
2338 		 * Bit mask controlling how hard we retry per-node allocations.
2339 		 * Ignore errors as lower level routines can deal with
2340 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
2341 		 * time, we are likely in bigger trouble.
2342 		 */
2343 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2344 						GFP_KERNEL);
2345 	} else {
2346 		/* allocations done at boot time */
2347 		node_alloc_noretry = NULL;
2348 	}
2349 
2350 	/* bit mask controlling how hard we retry per-node allocations */
2351 	if (node_alloc_noretry)
2352 		nodes_clear(*node_alloc_noretry);
2353 
2354 	for (i = 0; i < h->max_huge_pages; ++i) {
2355 		if (hstate_is_gigantic(h)) {
2356 			if (!alloc_bootmem_huge_page(h))
2357 				break;
2358 		} else if (!alloc_pool_huge_page(h,
2359 					 &node_states[N_MEMORY],
2360 					 node_alloc_noretry))
2361 			break;
2362 		cond_resched();
2363 	}
2364 	if (i < h->max_huge_pages) {
2365 		char buf[32];
2366 
2367 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2368 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2369 			h->max_huge_pages, buf, i);
2370 		h->max_huge_pages = i;
2371 	}
2372 
2373 	kfree(node_alloc_noretry);
2374 }
2375 
hugetlb_init_hstates(void)2376 static void __init hugetlb_init_hstates(void)
2377 {
2378 	struct hstate *h;
2379 
2380 	for_each_hstate(h) {
2381 		if (minimum_order > huge_page_order(h))
2382 			minimum_order = huge_page_order(h);
2383 
2384 		/* oversize hugepages were init'ed in early boot */
2385 		if (!hstate_is_gigantic(h))
2386 			hugetlb_hstate_alloc_pages(h);
2387 	}
2388 	VM_BUG_ON(minimum_order == UINT_MAX);
2389 }
2390 
report_hugepages(void)2391 static void __init report_hugepages(void)
2392 {
2393 	struct hstate *h;
2394 
2395 	for_each_hstate(h) {
2396 		char buf[32];
2397 
2398 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2399 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2400 			buf, h->free_huge_pages);
2401 	}
2402 }
2403 
2404 #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)2405 static void try_to_free_low(struct hstate *h, unsigned long count,
2406 						nodemask_t *nodes_allowed)
2407 {
2408 	int i;
2409 
2410 	if (hstate_is_gigantic(h))
2411 		return;
2412 
2413 	for_each_node_mask(i, *nodes_allowed) {
2414 		struct page *page, *next;
2415 		struct list_head *freel = &h->hugepage_freelists[i];
2416 		list_for_each_entry_safe(page, next, freel, lru) {
2417 			if (count >= h->nr_huge_pages)
2418 				return;
2419 			if (PageHighMem(page))
2420 				continue;
2421 			list_del(&page->lru);
2422 			update_and_free_page(h, page);
2423 			h->free_huge_pages--;
2424 			h->free_huge_pages_node[page_to_nid(page)]--;
2425 		}
2426 	}
2427 }
2428 #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)2429 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2430 						nodemask_t *nodes_allowed)
2431 {
2432 }
2433 #endif
2434 
2435 /*
2436  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2437  * balanced by operating on them in a round-robin fashion.
2438  * Returns 1 if an adjustment was made.
2439  */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)2440 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2441 				int delta)
2442 {
2443 	int nr_nodes, node;
2444 
2445 	VM_BUG_ON(delta != -1 && delta != 1);
2446 
2447 	if (delta < 0) {
2448 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2449 			if (h->surplus_huge_pages_node[node])
2450 				goto found;
2451 		}
2452 	} else {
2453 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2454 			if (h->surplus_huge_pages_node[node] <
2455 					h->nr_huge_pages_node[node])
2456 				goto found;
2457 		}
2458 	}
2459 	return 0;
2460 
2461 found:
2462 	h->surplus_huge_pages += delta;
2463 	h->surplus_huge_pages_node[node] += delta;
2464 	return 1;
2465 }
2466 
2467 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,int nid,nodemask_t * nodes_allowed)2468 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2469 			      nodemask_t *nodes_allowed)
2470 {
2471 	unsigned long min_count, ret;
2472 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2473 
2474 	/*
2475 	 * Bit mask controlling how hard we retry per-node allocations.
2476 	 * If we can not allocate the bit mask, do not attempt to allocate
2477 	 * the requested huge pages.
2478 	 */
2479 	if (node_alloc_noretry)
2480 		nodes_clear(*node_alloc_noretry);
2481 	else
2482 		return -ENOMEM;
2483 
2484 	spin_lock(&hugetlb_lock);
2485 
2486 	/*
2487 	 * Check for a node specific request.
2488 	 * Changing node specific huge page count may require a corresponding
2489 	 * change to the global count.  In any case, the passed node mask
2490 	 * (nodes_allowed) will restrict alloc/free to the specified node.
2491 	 */
2492 	if (nid != NUMA_NO_NODE) {
2493 		unsigned long old_count = count;
2494 
2495 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2496 		/*
2497 		 * User may have specified a large count value which caused the
2498 		 * above calculation to overflow.  In this case, they wanted
2499 		 * to allocate as many huge pages as possible.  Set count to
2500 		 * largest possible value to align with their intention.
2501 		 */
2502 		if (count < old_count)
2503 			count = ULONG_MAX;
2504 	}
2505 
2506 	/*
2507 	 * Gigantic pages runtime allocation depend on the capability for large
2508 	 * page range allocation.
2509 	 * If the system does not provide this feature, return an error when
2510 	 * the user tries to allocate gigantic pages but let the user free the
2511 	 * boottime allocated gigantic pages.
2512 	 */
2513 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2514 		if (count > persistent_huge_pages(h)) {
2515 			spin_unlock(&hugetlb_lock);
2516 			NODEMASK_FREE(node_alloc_noretry);
2517 			return -EINVAL;
2518 		}
2519 		/* Fall through to decrease pool */
2520 	}
2521 
2522 	/*
2523 	 * Increase the pool size
2524 	 * First take pages out of surplus state.  Then make up the
2525 	 * remaining difference by allocating fresh huge pages.
2526 	 *
2527 	 * We might race with alloc_surplus_huge_page() here and be unable
2528 	 * to convert a surplus huge page to a normal huge page. That is
2529 	 * not critical, though, it just means the overall size of the
2530 	 * pool might be one hugepage larger than it needs to be, but
2531 	 * within all the constraints specified by the sysctls.
2532 	 */
2533 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2534 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2535 			break;
2536 	}
2537 
2538 	while (count > persistent_huge_pages(h)) {
2539 		/*
2540 		 * If this allocation races such that we no longer need the
2541 		 * page, free_huge_page will handle it by freeing the page
2542 		 * and reducing the surplus.
2543 		 */
2544 		spin_unlock(&hugetlb_lock);
2545 
2546 		/* yield cpu to avoid soft lockup */
2547 		cond_resched();
2548 
2549 		ret = alloc_pool_huge_page(h, nodes_allowed,
2550 						node_alloc_noretry);
2551 		spin_lock(&hugetlb_lock);
2552 		if (!ret)
2553 			goto out;
2554 
2555 		/* Bail for signals. Probably ctrl-c from user */
2556 		if (signal_pending(current))
2557 			goto out;
2558 	}
2559 
2560 	/*
2561 	 * Decrease the pool size
2562 	 * First return free pages to the buddy allocator (being careful
2563 	 * to keep enough around to satisfy reservations).  Then place
2564 	 * pages into surplus state as needed so the pool will shrink
2565 	 * to the desired size as pages become free.
2566 	 *
2567 	 * By placing pages into the surplus state independent of the
2568 	 * overcommit value, we are allowing the surplus pool size to
2569 	 * exceed overcommit. There are few sane options here. Since
2570 	 * alloc_surplus_huge_page() is checking the global counter,
2571 	 * though, we'll note that we're not allowed to exceed surplus
2572 	 * and won't grow the pool anywhere else. Not until one of the
2573 	 * sysctls are changed, or the surplus pages go out of use.
2574 	 */
2575 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2576 	min_count = max(count, min_count);
2577 	try_to_free_low(h, min_count, nodes_allowed);
2578 	while (min_count < persistent_huge_pages(h)) {
2579 		if (!free_pool_huge_page(h, nodes_allowed, 0))
2580 			break;
2581 		cond_resched_lock(&hugetlb_lock);
2582 	}
2583 	while (count < persistent_huge_pages(h)) {
2584 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2585 			break;
2586 	}
2587 out:
2588 	h->max_huge_pages = persistent_huge_pages(h);
2589 	spin_unlock(&hugetlb_lock);
2590 
2591 	NODEMASK_FREE(node_alloc_noretry);
2592 
2593 	return 0;
2594 }
2595 
2596 #define HSTATE_ATTR_RO(_name) \
2597 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2598 
2599 #define HSTATE_ATTR(_name) \
2600 	static struct kobj_attribute _name##_attr = \
2601 		__ATTR(_name, 0644, _name##_show, _name##_store)
2602 
2603 static struct kobject *hugepages_kobj;
2604 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2605 
2606 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2607 
kobj_to_hstate(struct kobject * kobj,int * nidp)2608 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2609 {
2610 	int i;
2611 
2612 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2613 		if (hstate_kobjs[i] == kobj) {
2614 			if (nidp)
2615 				*nidp = NUMA_NO_NODE;
2616 			return &hstates[i];
2617 		}
2618 
2619 	return kobj_to_node_hstate(kobj, nidp);
2620 }
2621 
nr_hugepages_show_common(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2622 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2623 					struct kobj_attribute *attr, char *buf)
2624 {
2625 	struct hstate *h;
2626 	unsigned long nr_huge_pages;
2627 	int nid;
2628 
2629 	h = kobj_to_hstate(kobj, &nid);
2630 	if (nid == NUMA_NO_NODE)
2631 		nr_huge_pages = h->nr_huge_pages;
2632 	else
2633 		nr_huge_pages = h->nr_huge_pages_node[nid];
2634 
2635 	return sprintf(buf, "%lu\n", nr_huge_pages);
2636 }
2637 
__nr_hugepages_store_common(bool obey_mempolicy,struct hstate * h,int nid,unsigned long count,size_t len)2638 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2639 					   struct hstate *h, int nid,
2640 					   unsigned long count, size_t len)
2641 {
2642 	int err;
2643 	nodemask_t nodes_allowed, *n_mask;
2644 
2645 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2646 		return -EINVAL;
2647 
2648 	if (nid == NUMA_NO_NODE) {
2649 		/*
2650 		 * global hstate attribute
2651 		 */
2652 		if (!(obey_mempolicy &&
2653 				init_nodemask_of_mempolicy(&nodes_allowed)))
2654 			n_mask = &node_states[N_MEMORY];
2655 		else
2656 			n_mask = &nodes_allowed;
2657 	} else {
2658 		/*
2659 		 * Node specific request.  count adjustment happens in
2660 		 * set_max_huge_pages() after acquiring hugetlb_lock.
2661 		 */
2662 		init_nodemask_of_node(&nodes_allowed, nid);
2663 		n_mask = &nodes_allowed;
2664 	}
2665 
2666 	err = set_max_huge_pages(h, count, nid, n_mask);
2667 
2668 	return err ? err : len;
2669 }
2670 
nr_hugepages_store_common(bool obey_mempolicy,struct kobject * kobj,const char * buf,size_t len)2671 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2672 					 struct kobject *kobj, const char *buf,
2673 					 size_t len)
2674 {
2675 	struct hstate *h;
2676 	unsigned long count;
2677 	int nid;
2678 	int err;
2679 
2680 	err = kstrtoul(buf, 10, &count);
2681 	if (err)
2682 		return err;
2683 
2684 	h = kobj_to_hstate(kobj, &nid);
2685 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2686 }
2687 
nr_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2688 static ssize_t nr_hugepages_show(struct kobject *kobj,
2689 				       struct kobj_attribute *attr, char *buf)
2690 {
2691 	return nr_hugepages_show_common(kobj, attr, buf);
2692 }
2693 
nr_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)2694 static ssize_t nr_hugepages_store(struct kobject *kobj,
2695 	       struct kobj_attribute *attr, const char *buf, size_t len)
2696 {
2697 	return nr_hugepages_store_common(false, kobj, buf, len);
2698 }
2699 HSTATE_ATTR(nr_hugepages);
2700 
2701 #ifdef CONFIG_NUMA
2702 
2703 /*
2704  * hstate attribute for optionally mempolicy-based constraint on persistent
2705  * huge page alloc/free.
2706  */
nr_hugepages_mempolicy_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2707 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2708 				       struct kobj_attribute *attr, char *buf)
2709 {
2710 	return nr_hugepages_show_common(kobj, attr, buf);
2711 }
2712 
nr_hugepages_mempolicy_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)2713 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2714 	       struct kobj_attribute *attr, const char *buf, size_t len)
2715 {
2716 	return nr_hugepages_store_common(true, kobj, buf, len);
2717 }
2718 HSTATE_ATTR(nr_hugepages_mempolicy);
2719 #endif
2720 
2721 
nr_overcommit_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2722 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2723 					struct kobj_attribute *attr, char *buf)
2724 {
2725 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2726 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2727 }
2728 
nr_overcommit_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)2729 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2730 		struct kobj_attribute *attr, const char *buf, size_t count)
2731 {
2732 	int err;
2733 	unsigned long input;
2734 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2735 
2736 	if (hstate_is_gigantic(h))
2737 		return -EINVAL;
2738 
2739 	err = kstrtoul(buf, 10, &input);
2740 	if (err)
2741 		return err;
2742 
2743 	spin_lock(&hugetlb_lock);
2744 	h->nr_overcommit_huge_pages = input;
2745 	spin_unlock(&hugetlb_lock);
2746 
2747 	return count;
2748 }
2749 HSTATE_ATTR(nr_overcommit_hugepages);
2750 
free_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2751 static ssize_t free_hugepages_show(struct kobject *kobj,
2752 					struct kobj_attribute *attr, char *buf)
2753 {
2754 	struct hstate *h;
2755 	unsigned long free_huge_pages;
2756 	int nid;
2757 
2758 	h = kobj_to_hstate(kobj, &nid);
2759 	if (nid == NUMA_NO_NODE)
2760 		free_huge_pages = h->free_huge_pages;
2761 	else
2762 		free_huge_pages = h->free_huge_pages_node[nid];
2763 
2764 	return sprintf(buf, "%lu\n", free_huge_pages);
2765 }
2766 HSTATE_ATTR_RO(free_hugepages);
2767 
resv_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2768 static ssize_t resv_hugepages_show(struct kobject *kobj,
2769 					struct kobj_attribute *attr, char *buf)
2770 {
2771 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2772 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
2773 }
2774 HSTATE_ATTR_RO(resv_hugepages);
2775 
surplus_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2776 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2777 					struct kobj_attribute *attr, char *buf)
2778 {
2779 	struct hstate *h;
2780 	unsigned long surplus_huge_pages;
2781 	int nid;
2782 
2783 	h = kobj_to_hstate(kobj, &nid);
2784 	if (nid == NUMA_NO_NODE)
2785 		surplus_huge_pages = h->surplus_huge_pages;
2786 	else
2787 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
2788 
2789 	return sprintf(buf, "%lu\n", surplus_huge_pages);
2790 }
2791 HSTATE_ATTR_RO(surplus_hugepages);
2792 
2793 static struct attribute *hstate_attrs[] = {
2794 	&nr_hugepages_attr.attr,
2795 	&nr_overcommit_hugepages_attr.attr,
2796 	&free_hugepages_attr.attr,
2797 	&resv_hugepages_attr.attr,
2798 	&surplus_hugepages_attr.attr,
2799 #ifdef CONFIG_NUMA
2800 	&nr_hugepages_mempolicy_attr.attr,
2801 #endif
2802 	NULL,
2803 };
2804 
2805 static const struct attribute_group hstate_attr_group = {
2806 	.attrs = hstate_attrs,
2807 };
2808 
hugetlb_sysfs_add_hstate(struct hstate * h,struct kobject * parent,struct kobject ** hstate_kobjs,const struct attribute_group * hstate_attr_group)2809 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2810 				    struct kobject **hstate_kobjs,
2811 				    const struct attribute_group *hstate_attr_group)
2812 {
2813 	int retval;
2814 	int hi = hstate_index(h);
2815 
2816 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2817 	if (!hstate_kobjs[hi])
2818 		return -ENOMEM;
2819 
2820 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2821 	if (retval) {
2822 		kobject_put(hstate_kobjs[hi]);
2823 		hstate_kobjs[hi] = NULL;
2824 	}
2825 
2826 	return retval;
2827 }
2828 
hugetlb_sysfs_init(void)2829 static void __init hugetlb_sysfs_init(void)
2830 {
2831 	struct hstate *h;
2832 	int err;
2833 
2834 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2835 	if (!hugepages_kobj)
2836 		return;
2837 
2838 	for_each_hstate(h) {
2839 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2840 					 hstate_kobjs, &hstate_attr_group);
2841 		if (err)
2842 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
2843 	}
2844 }
2845 
2846 #ifdef CONFIG_NUMA
2847 
2848 /*
2849  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2850  * with node devices in node_devices[] using a parallel array.  The array
2851  * index of a node device or _hstate == node id.
2852  * This is here to avoid any static dependency of the node device driver, in
2853  * the base kernel, on the hugetlb module.
2854  */
2855 struct node_hstate {
2856 	struct kobject		*hugepages_kobj;
2857 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
2858 };
2859 static struct node_hstate node_hstates[MAX_NUMNODES];
2860 
2861 /*
2862  * A subset of global hstate attributes for node devices
2863  */
2864 static struct attribute *per_node_hstate_attrs[] = {
2865 	&nr_hugepages_attr.attr,
2866 	&free_hugepages_attr.attr,
2867 	&surplus_hugepages_attr.attr,
2868 	NULL,
2869 };
2870 
2871 static const struct attribute_group per_node_hstate_attr_group = {
2872 	.attrs = per_node_hstate_attrs,
2873 };
2874 
2875 /*
2876  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2877  * Returns node id via non-NULL nidp.
2878  */
kobj_to_node_hstate(struct kobject * kobj,int * nidp)2879 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2880 {
2881 	int nid;
2882 
2883 	for (nid = 0; nid < nr_node_ids; nid++) {
2884 		struct node_hstate *nhs = &node_hstates[nid];
2885 		int i;
2886 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
2887 			if (nhs->hstate_kobjs[i] == kobj) {
2888 				if (nidp)
2889 					*nidp = nid;
2890 				return &hstates[i];
2891 			}
2892 	}
2893 
2894 	BUG();
2895 	return NULL;
2896 }
2897 
2898 /*
2899  * Unregister hstate attributes from a single node device.
2900  * No-op if no hstate attributes attached.
2901  */
hugetlb_unregister_node(struct node * node)2902 static void hugetlb_unregister_node(struct node *node)
2903 {
2904 	struct hstate *h;
2905 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2906 
2907 	if (!nhs->hugepages_kobj)
2908 		return;		/* no hstate attributes */
2909 
2910 	for_each_hstate(h) {
2911 		int idx = hstate_index(h);
2912 		if (nhs->hstate_kobjs[idx]) {
2913 			kobject_put(nhs->hstate_kobjs[idx]);
2914 			nhs->hstate_kobjs[idx] = NULL;
2915 		}
2916 	}
2917 
2918 	kobject_put(nhs->hugepages_kobj);
2919 	nhs->hugepages_kobj = NULL;
2920 }
2921 
2922 
2923 /*
2924  * Register hstate attributes for a single node device.
2925  * No-op if attributes already registered.
2926  */
hugetlb_register_node(struct node * node)2927 static void hugetlb_register_node(struct node *node)
2928 {
2929 	struct hstate *h;
2930 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2931 	int err;
2932 
2933 	if (nhs->hugepages_kobj)
2934 		return;		/* already allocated */
2935 
2936 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2937 							&node->dev.kobj);
2938 	if (!nhs->hugepages_kobj)
2939 		return;
2940 
2941 	for_each_hstate(h) {
2942 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2943 						nhs->hstate_kobjs,
2944 						&per_node_hstate_attr_group);
2945 		if (err) {
2946 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2947 				h->name, node->dev.id);
2948 			hugetlb_unregister_node(node);
2949 			break;
2950 		}
2951 	}
2952 }
2953 
2954 /*
2955  * hugetlb init time:  register hstate attributes for all registered node
2956  * devices of nodes that have memory.  All on-line nodes should have
2957  * registered their associated device by this time.
2958  */
hugetlb_register_all_nodes(void)2959 static void __init hugetlb_register_all_nodes(void)
2960 {
2961 	int nid;
2962 
2963 	for_each_node_state(nid, N_MEMORY) {
2964 		struct node *node = node_devices[nid];
2965 		if (node->dev.id == nid)
2966 			hugetlb_register_node(node);
2967 	}
2968 
2969 	/*
2970 	 * Let the node device driver know we're here so it can
2971 	 * [un]register hstate attributes on node hotplug.
2972 	 */
2973 	register_hugetlbfs_with_node(hugetlb_register_node,
2974 				     hugetlb_unregister_node);
2975 }
2976 #else	/* !CONFIG_NUMA */
2977 
kobj_to_node_hstate(struct kobject * kobj,int * nidp)2978 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2979 {
2980 	BUG();
2981 	if (nidp)
2982 		*nidp = -1;
2983 	return NULL;
2984 }
2985 
hugetlb_register_all_nodes(void)2986 static void hugetlb_register_all_nodes(void) { }
2987 
2988 #endif
2989 
hugetlb_init(void)2990 static int __init hugetlb_init(void)
2991 {
2992 	int i;
2993 
2994 	if (!hugepages_supported())
2995 		return 0;
2996 
2997 	if (!size_to_hstate(default_hstate_size)) {
2998 		if (default_hstate_size != 0) {
2999 			pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
3000 			       default_hstate_size, HPAGE_SIZE);
3001 		}
3002 
3003 		default_hstate_size = HPAGE_SIZE;
3004 		if (!size_to_hstate(default_hstate_size))
3005 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3006 	}
3007 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
3008 	if (default_hstate_max_huge_pages) {
3009 		if (!default_hstate.max_huge_pages)
3010 			default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3011 	}
3012 
3013 	hugetlb_init_hstates();
3014 	gather_bootmem_prealloc();
3015 	report_hugepages();
3016 
3017 	hugetlb_sysfs_init();
3018 	hugetlb_register_all_nodes();
3019 	hugetlb_cgroup_file_init();
3020 
3021 #ifdef CONFIG_SMP
3022 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3023 #else
3024 	num_fault_mutexes = 1;
3025 #endif
3026 	hugetlb_fault_mutex_table =
3027 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3028 			      GFP_KERNEL);
3029 	BUG_ON(!hugetlb_fault_mutex_table);
3030 
3031 	for (i = 0; i < num_fault_mutexes; i++)
3032 		mutex_init(&hugetlb_fault_mutex_table[i]);
3033 	return 0;
3034 }
3035 subsys_initcall(hugetlb_init);
3036 
3037 /* Should be called on processing a hugepagesz=... option */
hugetlb_bad_size(void)3038 void __init hugetlb_bad_size(void)
3039 {
3040 	parsed_valid_hugepagesz = false;
3041 }
3042 
hugetlb_add_hstate(unsigned int order)3043 void __init hugetlb_add_hstate(unsigned int order)
3044 {
3045 	struct hstate *h;
3046 	unsigned long i;
3047 
3048 	if (size_to_hstate(PAGE_SIZE << order)) {
3049 		pr_warn("hugepagesz= specified twice, ignoring\n");
3050 		return;
3051 	}
3052 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3053 	BUG_ON(order == 0);
3054 	h = &hstates[hugetlb_max_hstate++];
3055 	h->order = order;
3056 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
3057 	h->nr_huge_pages = 0;
3058 	h->free_huge_pages = 0;
3059 	for (i = 0; i < MAX_NUMNODES; ++i)
3060 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3061 	INIT_LIST_HEAD(&h->hugepage_activelist);
3062 	h->next_nid_to_alloc = first_memory_node;
3063 	h->next_nid_to_free = first_memory_node;
3064 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3065 					huge_page_size(h)/1024);
3066 
3067 	parsed_hstate = h;
3068 }
3069 
hugetlb_nrpages_setup(char * s)3070 static int __init hugetlb_nrpages_setup(char *s)
3071 {
3072 	unsigned long *mhp;
3073 	static unsigned long *last_mhp;
3074 
3075 	if (!parsed_valid_hugepagesz) {
3076 		pr_warn("hugepages = %s preceded by "
3077 			"an unsupported hugepagesz, ignoring\n", s);
3078 		parsed_valid_hugepagesz = true;
3079 		return 1;
3080 	}
3081 	/*
3082 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
3083 	 * so this hugepages= parameter goes to the "default hstate".
3084 	 */
3085 	else if (!hugetlb_max_hstate)
3086 		mhp = &default_hstate_max_huge_pages;
3087 	else
3088 		mhp = &parsed_hstate->max_huge_pages;
3089 
3090 	if (mhp == last_mhp) {
3091 		pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
3092 		return 1;
3093 	}
3094 
3095 	if (sscanf(s, "%lu", mhp) <= 0)
3096 		*mhp = 0;
3097 
3098 	/*
3099 	 * Global state is always initialized later in hugetlb_init.
3100 	 * But we need to allocate >= MAX_ORDER hstates here early to still
3101 	 * use the bootmem allocator.
3102 	 */
3103 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3104 		hugetlb_hstate_alloc_pages(parsed_hstate);
3105 
3106 	last_mhp = mhp;
3107 
3108 	return 1;
3109 }
3110 __setup("hugepages=", hugetlb_nrpages_setup);
3111 
hugetlb_default_setup(char * s)3112 static int __init hugetlb_default_setup(char *s)
3113 {
3114 	default_hstate_size = memparse(s, &s);
3115 	return 1;
3116 }
3117 __setup("default_hugepagesz=", hugetlb_default_setup);
3118 
cpuset_mems_nr(unsigned int * array)3119 static unsigned int cpuset_mems_nr(unsigned int *array)
3120 {
3121 	int node;
3122 	unsigned int nr = 0;
3123 
3124 	for_each_node_mask(node, cpuset_current_mems_allowed)
3125 		nr += array[node];
3126 
3127 	return nr;
3128 }
3129 
3130 #ifdef CONFIG_SYSCTL
proc_hugetlb_doulongvec_minmax(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos,unsigned long * out)3131 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3132 					  void *buffer, size_t *length,
3133 					  loff_t *ppos, unsigned long *out)
3134 {
3135 	struct ctl_table dup_table;
3136 
3137 	/*
3138 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
3139 	 * can duplicate the @table and alter the duplicate of it.
3140 	 */
3141 	dup_table = *table;
3142 	dup_table.data = out;
3143 
3144 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3145 }
3146 
hugetlb_sysctl_handler_common(bool obey_mempolicy,struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)3147 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3148 			 struct ctl_table *table, int write,
3149 			 void __user *buffer, size_t *length, loff_t *ppos)
3150 {
3151 	struct hstate *h = &default_hstate;
3152 	unsigned long tmp = h->max_huge_pages;
3153 	int ret;
3154 
3155 	if (!hugepages_supported())
3156 		return -EOPNOTSUPP;
3157 
3158 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3159 					     &tmp);
3160 	if (ret)
3161 		goto out;
3162 
3163 	if (write)
3164 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
3165 						  NUMA_NO_NODE, tmp, *length);
3166 out:
3167 	return ret;
3168 }
3169 
hugetlb_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)3170 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3171 			  void __user *buffer, size_t *length, loff_t *ppos)
3172 {
3173 
3174 	return hugetlb_sysctl_handler_common(false, table, write,
3175 							buffer, length, ppos);
3176 }
3177 
3178 #ifdef CONFIG_NUMA
hugetlb_mempolicy_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)3179 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3180 			  void __user *buffer, size_t *length, loff_t *ppos)
3181 {
3182 	return hugetlb_sysctl_handler_common(true, table, write,
3183 							buffer, length, ppos);
3184 }
3185 #endif /* CONFIG_NUMA */
3186 
hugetlb_overcommit_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)3187 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3188 			void __user *buffer,
3189 			size_t *length, loff_t *ppos)
3190 {
3191 	struct hstate *h = &default_hstate;
3192 	unsigned long tmp;
3193 	int ret;
3194 
3195 	if (!hugepages_supported())
3196 		return -EOPNOTSUPP;
3197 
3198 	tmp = h->nr_overcommit_huge_pages;
3199 
3200 	if (write && hstate_is_gigantic(h))
3201 		return -EINVAL;
3202 
3203 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3204 					     &tmp);
3205 	if (ret)
3206 		goto out;
3207 
3208 	if (write) {
3209 		spin_lock(&hugetlb_lock);
3210 		h->nr_overcommit_huge_pages = tmp;
3211 		spin_unlock(&hugetlb_lock);
3212 	}
3213 out:
3214 	return ret;
3215 }
3216 
3217 #endif /* CONFIG_SYSCTL */
3218 
hugetlb_report_meminfo(struct seq_file * m)3219 void hugetlb_report_meminfo(struct seq_file *m)
3220 {
3221 	struct hstate *h;
3222 	unsigned long total = 0;
3223 
3224 	if (!hugepages_supported())
3225 		return;
3226 
3227 	for_each_hstate(h) {
3228 		unsigned long count = h->nr_huge_pages;
3229 
3230 		total += (PAGE_SIZE << huge_page_order(h)) * count;
3231 
3232 		if (h == &default_hstate)
3233 			seq_printf(m,
3234 				   "HugePages_Total:   %5lu\n"
3235 				   "HugePages_Free:    %5lu\n"
3236 				   "HugePages_Rsvd:    %5lu\n"
3237 				   "HugePages_Surp:    %5lu\n"
3238 				   "Hugepagesize:   %8lu kB\n",
3239 				   count,
3240 				   h->free_huge_pages,
3241 				   h->resv_huge_pages,
3242 				   h->surplus_huge_pages,
3243 				   (PAGE_SIZE << huge_page_order(h)) / 1024);
3244 	}
3245 
3246 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3247 }
3248 
hugetlb_report_node_meminfo(int nid,char * buf)3249 int hugetlb_report_node_meminfo(int nid, char *buf)
3250 {
3251 	struct hstate *h = &default_hstate;
3252 	if (!hugepages_supported())
3253 		return 0;
3254 	return sprintf(buf,
3255 		"Node %d HugePages_Total: %5u\n"
3256 		"Node %d HugePages_Free:  %5u\n"
3257 		"Node %d HugePages_Surp:  %5u\n",
3258 		nid, h->nr_huge_pages_node[nid],
3259 		nid, h->free_huge_pages_node[nid],
3260 		nid, h->surplus_huge_pages_node[nid]);
3261 }
3262 
hugetlb_show_meminfo(void)3263 void hugetlb_show_meminfo(void)
3264 {
3265 	struct hstate *h;
3266 	int nid;
3267 
3268 	if (!hugepages_supported())
3269 		return;
3270 
3271 	for_each_node_state(nid, N_MEMORY)
3272 		for_each_hstate(h)
3273 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3274 				nid,
3275 				h->nr_huge_pages_node[nid],
3276 				h->free_huge_pages_node[nid],
3277 				h->surplus_huge_pages_node[nid],
3278 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3279 }
3280 
hugetlb_report_usage(struct seq_file * m,struct mm_struct * mm)3281 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3282 {
3283 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3284 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3285 }
3286 
3287 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)3288 unsigned long hugetlb_total_pages(void)
3289 {
3290 	struct hstate *h;
3291 	unsigned long nr_total_pages = 0;
3292 
3293 	for_each_hstate(h)
3294 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3295 	return nr_total_pages;
3296 }
3297 
hugetlb_acct_memory(struct hstate * h,long delta)3298 static int hugetlb_acct_memory(struct hstate *h, long delta)
3299 {
3300 	int ret = -ENOMEM;
3301 
3302 	spin_lock(&hugetlb_lock);
3303 	/*
3304 	 * When cpuset is configured, it breaks the strict hugetlb page
3305 	 * reservation as the accounting is done on a global variable. Such
3306 	 * reservation is completely rubbish in the presence of cpuset because
3307 	 * the reservation is not checked against page availability for the
3308 	 * current cpuset. Application can still potentially OOM'ed by kernel
3309 	 * with lack of free htlb page in cpuset that the task is in.
3310 	 * Attempt to enforce strict accounting with cpuset is almost
3311 	 * impossible (or too ugly) because cpuset is too fluid that
3312 	 * task or memory node can be dynamically moved between cpusets.
3313 	 *
3314 	 * The change of semantics for shared hugetlb mapping with cpuset is
3315 	 * undesirable. However, in order to preserve some of the semantics,
3316 	 * we fall back to check against current free page availability as
3317 	 * a best attempt and hopefully to minimize the impact of changing
3318 	 * semantics that cpuset has.
3319 	 */
3320 	if (delta > 0) {
3321 		if (gather_surplus_pages(h, delta) < 0)
3322 			goto out;
3323 
3324 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3325 			return_unused_surplus_pages(h, delta);
3326 			goto out;
3327 		}
3328 	}
3329 
3330 	ret = 0;
3331 	if (delta < 0)
3332 		return_unused_surplus_pages(h, (unsigned long) -delta);
3333 
3334 out:
3335 	spin_unlock(&hugetlb_lock);
3336 	return ret;
3337 }
3338 
hugetlb_vm_op_open(struct vm_area_struct * vma)3339 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3340 {
3341 	struct resv_map *resv = vma_resv_map(vma);
3342 
3343 	/*
3344 	 * This new VMA should share its siblings reservation map if present.
3345 	 * The VMA will only ever have a valid reservation map pointer where
3346 	 * it is being copied for another still existing VMA.  As that VMA
3347 	 * has a reference to the reservation map it cannot disappear until
3348 	 * after this open call completes.  It is therefore safe to take a
3349 	 * new reference here without additional locking.
3350 	 */
3351 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3352 		kref_get(&resv->refs);
3353 }
3354 
hugetlb_vm_op_close(struct vm_area_struct * vma)3355 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3356 {
3357 	struct hstate *h = hstate_vma(vma);
3358 	struct resv_map *resv = vma_resv_map(vma);
3359 	struct hugepage_subpool *spool = subpool_vma(vma);
3360 	unsigned long reserve, start, end;
3361 	long gbl_reserve;
3362 
3363 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3364 		return;
3365 
3366 	start = vma_hugecache_offset(h, vma, vma->vm_start);
3367 	end = vma_hugecache_offset(h, vma, vma->vm_end);
3368 
3369 	reserve = (end - start) - region_count(resv, start, end);
3370 
3371 	kref_put(&resv->refs, resv_map_release);
3372 
3373 	if (reserve) {
3374 		/*
3375 		 * Decrement reserve counts.  The global reserve count may be
3376 		 * adjusted if the subpool has a minimum size.
3377 		 */
3378 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3379 		hugetlb_acct_memory(h, -gbl_reserve);
3380 	}
3381 }
3382 
hugetlb_vm_op_split(struct vm_area_struct * vma,unsigned long addr)3383 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3384 {
3385 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
3386 		return -EINVAL;
3387 	return 0;
3388 }
3389 
hugetlb_vm_op_pagesize(struct vm_area_struct * vma)3390 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3391 {
3392 	struct hstate *hstate = hstate_vma(vma);
3393 
3394 	return 1UL << huge_page_shift(hstate);
3395 }
3396 
3397 /*
3398  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3399  * handle_mm_fault() to try to instantiate regular-sized pages in the
3400  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3401  * this far.
3402  */
hugetlb_vm_op_fault(struct vm_fault * vmf)3403 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3404 {
3405 	BUG();
3406 	return 0;
3407 }
3408 
3409 /*
3410  * When a new function is introduced to vm_operations_struct and added
3411  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3412  * This is because under System V memory model, mappings created via
3413  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3414  * their original vm_ops are overwritten with shm_vm_ops.
3415  */
3416 const struct vm_operations_struct hugetlb_vm_ops = {
3417 	.fault = hugetlb_vm_op_fault,
3418 	.open = hugetlb_vm_op_open,
3419 	.close = hugetlb_vm_op_close,
3420 	.split = hugetlb_vm_op_split,
3421 	.pagesize = hugetlb_vm_op_pagesize,
3422 };
3423 
make_huge_pte(struct vm_area_struct * vma,struct page * page,int writable)3424 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3425 				int writable)
3426 {
3427 	pte_t entry;
3428 
3429 	if (writable) {
3430 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3431 					 vma->vm_page_prot)));
3432 	} else {
3433 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3434 					   vma->vm_page_prot));
3435 	}
3436 	entry = pte_mkyoung(entry);
3437 	entry = pte_mkhuge(entry);
3438 	entry = arch_make_huge_pte(entry, vma, page, writable);
3439 
3440 	return entry;
3441 }
3442 
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)3443 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3444 				   unsigned long address, pte_t *ptep)
3445 {
3446 	pte_t entry;
3447 
3448 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3449 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3450 		update_mmu_cache(vma, address, ptep);
3451 }
3452 
is_hugetlb_entry_migration(pte_t pte)3453 bool is_hugetlb_entry_migration(pte_t pte)
3454 {
3455 	swp_entry_t swp;
3456 
3457 	if (huge_pte_none(pte) || pte_present(pte))
3458 		return false;
3459 	swp = pte_to_swp_entry(pte);
3460 	if (non_swap_entry(swp) && is_migration_entry(swp))
3461 		return true;
3462 	else
3463 		return false;
3464 }
3465 
is_hugetlb_entry_hwpoisoned(pte_t pte)3466 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3467 {
3468 	swp_entry_t swp;
3469 
3470 	if (huge_pte_none(pte) || pte_present(pte))
3471 		return 0;
3472 	swp = pte_to_swp_entry(pte);
3473 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3474 		return 1;
3475 	else
3476 		return 0;
3477 }
3478 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)3479 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3480 			    struct vm_area_struct *vma)
3481 {
3482 	pte_t *src_pte, *dst_pte, entry, dst_entry;
3483 	struct page *ptepage;
3484 	unsigned long addr;
3485 	int cow;
3486 	struct hstate *h = hstate_vma(vma);
3487 	unsigned long sz = huge_page_size(h);
3488 	struct mmu_notifier_range range;
3489 	int ret = 0;
3490 
3491 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3492 
3493 	if (cow) {
3494 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3495 					vma->vm_start,
3496 					vma->vm_end);
3497 		mmu_notifier_invalidate_range_start(&range);
3498 	}
3499 
3500 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3501 		spinlock_t *src_ptl, *dst_ptl;
3502 		src_pte = huge_pte_offset(src, addr, sz);
3503 		if (!src_pte)
3504 			continue;
3505 		dst_pte = huge_pte_alloc(dst, addr, sz);
3506 		if (!dst_pte) {
3507 			ret = -ENOMEM;
3508 			break;
3509 		}
3510 
3511 		/*
3512 		 * If the pagetables are shared don't copy or take references.
3513 		 * dst_pte == src_pte is the common case of src/dest sharing.
3514 		 *
3515 		 * However, src could have 'unshared' and dst shares with
3516 		 * another vma.  If dst_pte !none, this implies sharing.
3517 		 * Check here before taking page table lock, and once again
3518 		 * after taking the lock below.
3519 		 */
3520 		dst_entry = huge_ptep_get(dst_pte);
3521 		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3522 			continue;
3523 
3524 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3525 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3526 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3527 		entry = huge_ptep_get(src_pte);
3528 		dst_entry = huge_ptep_get(dst_pte);
3529 		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3530 			/*
3531 			 * Skip if src entry none.  Also, skip in the
3532 			 * unlikely case dst entry !none as this implies
3533 			 * sharing with another vma.
3534 			 */
3535 			;
3536 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
3537 				    is_hugetlb_entry_hwpoisoned(entry))) {
3538 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
3539 
3540 			if (is_write_migration_entry(swp_entry) && cow) {
3541 				/*
3542 				 * COW mappings require pages in both
3543 				 * parent and child to be set to read.
3544 				 */
3545 				make_migration_entry_read(&swp_entry);
3546 				entry = swp_entry_to_pte(swp_entry);
3547 				set_huge_swap_pte_at(src, addr, src_pte,
3548 						     entry, sz);
3549 			}
3550 			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3551 		} else {
3552 			if (cow) {
3553 				/*
3554 				 * No need to notify as we are downgrading page
3555 				 * table protection not changing it to point
3556 				 * to a new page.
3557 				 *
3558 				 * See Documentation/vm/mmu_notifier.rst
3559 				 */
3560 				huge_ptep_set_wrprotect(src, addr, src_pte);
3561 			}
3562 			entry = huge_ptep_get(src_pte);
3563 			ptepage = pte_page(entry);
3564 			get_page(ptepage);
3565 			page_dup_rmap(ptepage, true);
3566 			set_huge_pte_at(dst, addr, dst_pte, entry);
3567 			hugetlb_count_add(pages_per_huge_page(h), dst);
3568 		}
3569 		spin_unlock(src_ptl);
3570 		spin_unlock(dst_ptl);
3571 	}
3572 
3573 	if (cow)
3574 		mmu_notifier_invalidate_range_end(&range);
3575 
3576 	return ret;
3577 }
3578 
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)3579 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3580 			    unsigned long start, unsigned long end,
3581 			    struct page *ref_page)
3582 {
3583 	struct mm_struct *mm = vma->vm_mm;
3584 	unsigned long address;
3585 	pte_t *ptep;
3586 	pte_t pte;
3587 	spinlock_t *ptl;
3588 	struct page *page;
3589 	struct hstate *h = hstate_vma(vma);
3590 	unsigned long sz = huge_page_size(h);
3591 	struct mmu_notifier_range range;
3592 	bool force_flush = false;
3593 
3594 	WARN_ON(!is_vm_hugetlb_page(vma));
3595 	BUG_ON(start & ~huge_page_mask(h));
3596 	BUG_ON(end & ~huge_page_mask(h));
3597 
3598 	/*
3599 	 * This is a hugetlb vma, all the pte entries should point
3600 	 * to huge page.
3601 	 */
3602 	tlb_change_page_size(tlb, sz);
3603 	tlb_start_vma(tlb, vma);
3604 
3605 	/*
3606 	 * If sharing possible, alert mmu notifiers of worst case.
3607 	 */
3608 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3609 				end);
3610 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3611 	mmu_notifier_invalidate_range_start(&range);
3612 	address = start;
3613 	for (; address < end; address += sz) {
3614 		ptep = huge_pte_offset(mm, address, sz);
3615 		if (!ptep)
3616 			continue;
3617 
3618 		ptl = huge_pte_lock(h, mm, ptep);
3619 		if (huge_pmd_unshare(mm, &address, ptep)) {
3620 			spin_unlock(ptl);
3621 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
3622 			force_flush = true;
3623 			continue;
3624 		}
3625 
3626 		pte = huge_ptep_get(ptep);
3627 		if (huge_pte_none(pte)) {
3628 			spin_unlock(ptl);
3629 			continue;
3630 		}
3631 
3632 		/*
3633 		 * Migrating hugepage or HWPoisoned hugepage is already
3634 		 * unmapped and its refcount is dropped, so just clear pte here.
3635 		 */
3636 		if (unlikely(!pte_present(pte))) {
3637 			huge_pte_clear(mm, address, ptep, sz);
3638 			spin_unlock(ptl);
3639 			continue;
3640 		}
3641 
3642 		page = pte_page(pte);
3643 		/*
3644 		 * If a reference page is supplied, it is because a specific
3645 		 * page is being unmapped, not a range. Ensure the page we
3646 		 * are about to unmap is the actual page of interest.
3647 		 */
3648 		if (ref_page) {
3649 			if (page != ref_page) {
3650 				spin_unlock(ptl);
3651 				continue;
3652 			}
3653 			/*
3654 			 * Mark the VMA as having unmapped its page so that
3655 			 * future faults in this VMA will fail rather than
3656 			 * looking like data was lost
3657 			 */
3658 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3659 		}
3660 
3661 		pte = huge_ptep_get_and_clear(mm, address, ptep);
3662 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3663 		if (huge_pte_dirty(pte))
3664 			set_page_dirty(page);
3665 
3666 		hugetlb_count_sub(pages_per_huge_page(h), mm);
3667 		page_remove_rmap(page, true);
3668 
3669 		spin_unlock(ptl);
3670 		tlb_remove_page_size(tlb, page, huge_page_size(h));
3671 		/*
3672 		 * Bail out after unmapping reference page if supplied
3673 		 */
3674 		if (ref_page)
3675 			break;
3676 	}
3677 	mmu_notifier_invalidate_range_end(&range);
3678 	tlb_end_vma(tlb, vma);
3679 
3680 	/*
3681 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
3682 	 * could defer the flush until now, since by holding i_mmap_rwsem we
3683 	 * guaranteed that the last refernece would not be dropped. But we must
3684 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
3685 	 * dropped and the last reference to the shared PMDs page might be
3686 	 * dropped as well.
3687 	 *
3688 	 * In theory we could defer the freeing of the PMD pages as well, but
3689 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
3690 	 * detect sharing, so we cannot defer the release of the page either.
3691 	 * Instead, do flush now.
3692 	 */
3693 	if (force_flush)
3694 		tlb_flush_mmu_tlbonly(tlb);
3695 }
3696 
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)3697 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3698 			  struct vm_area_struct *vma, unsigned long start,
3699 			  unsigned long end, struct page *ref_page)
3700 {
3701 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
3702 
3703 	/*
3704 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3705 	 * test will fail on a vma being torn down, and not grab a page table
3706 	 * on its way out.  We're lucky that the flag has such an appropriate
3707 	 * name, and can in fact be safely cleared here. We could clear it
3708 	 * before the __unmap_hugepage_range above, but all that's necessary
3709 	 * is to clear it before releasing the i_mmap_rwsem. This works
3710 	 * because in the context this is called, the VMA is about to be
3711 	 * destroyed and the i_mmap_rwsem is held.
3712 	 */
3713 	vma->vm_flags &= ~VM_MAYSHARE;
3714 }
3715 
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)3716 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3717 			  unsigned long end, struct page *ref_page)
3718 {
3719 	struct mm_struct *mm;
3720 	struct mmu_gather tlb;
3721 	unsigned long tlb_start = start;
3722 	unsigned long tlb_end = end;
3723 
3724 	/*
3725 	 * If shared PMDs were possibly used within this vma range, adjust
3726 	 * start/end for worst case tlb flushing.
3727 	 * Note that we can not be sure if PMDs are shared until we try to
3728 	 * unmap pages.  However, we want to make sure TLB flushing covers
3729 	 * the largest possible range.
3730 	 */
3731 	adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3732 
3733 	mm = vma->vm_mm;
3734 
3735 	tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3736 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3737 	tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3738 }
3739 
3740 /*
3741  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3742  * mappping it owns the reserve page for. The intention is to unmap the page
3743  * from other VMAs and let the children be SIGKILLed if they are faulting the
3744  * same region.
3745  */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct page * page,unsigned long address)3746 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3747 			      struct page *page, unsigned long address)
3748 {
3749 	struct hstate *h = hstate_vma(vma);
3750 	struct vm_area_struct *iter_vma;
3751 	struct address_space *mapping;
3752 	pgoff_t pgoff;
3753 
3754 	/*
3755 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3756 	 * from page cache lookup which is in HPAGE_SIZE units.
3757 	 */
3758 	address = address & huge_page_mask(h);
3759 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3760 			vma->vm_pgoff;
3761 	mapping = vma->vm_file->f_mapping;
3762 
3763 	/*
3764 	 * Take the mapping lock for the duration of the table walk. As
3765 	 * this mapping should be shared between all the VMAs,
3766 	 * __unmap_hugepage_range() is called as the lock is already held
3767 	 */
3768 	i_mmap_lock_write(mapping);
3769 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3770 		/* Do not unmap the current VMA */
3771 		if (iter_vma == vma)
3772 			continue;
3773 
3774 		/*
3775 		 * Shared VMAs have their own reserves and do not affect
3776 		 * MAP_PRIVATE accounting but it is possible that a shared
3777 		 * VMA is using the same page so check and skip such VMAs.
3778 		 */
3779 		if (iter_vma->vm_flags & VM_MAYSHARE)
3780 			continue;
3781 
3782 		/*
3783 		 * Unmap the page from other VMAs without their own reserves.
3784 		 * They get marked to be SIGKILLed if they fault in these
3785 		 * areas. This is because a future no-page fault on this VMA
3786 		 * could insert a zeroed page instead of the data existing
3787 		 * from the time of fork. This would look like data corruption
3788 		 */
3789 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3790 			unmap_hugepage_range(iter_vma, address,
3791 					     address + huge_page_size(h), page);
3792 	}
3793 	i_mmap_unlock_write(mapping);
3794 }
3795 
3796 /*
3797  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3798  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3799  * cannot race with other handlers or page migration.
3800  * Keep the pte_same checks anyway to make transition from the mutex easier.
3801  */
hugetlb_cow(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,struct page * pagecache_page,spinlock_t * ptl)3802 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3803 		       unsigned long address, pte_t *ptep,
3804 		       struct page *pagecache_page, spinlock_t *ptl)
3805 {
3806 	pte_t pte;
3807 	struct hstate *h = hstate_vma(vma);
3808 	struct page *old_page, *new_page;
3809 	int outside_reserve = 0;
3810 	vm_fault_t ret = 0;
3811 	unsigned long haddr = address & huge_page_mask(h);
3812 	struct mmu_notifier_range range;
3813 
3814 	pte = huge_ptep_get(ptep);
3815 	old_page = pte_page(pte);
3816 
3817 retry_avoidcopy:
3818 	/* If no-one else is actually using this page, avoid the copy
3819 	 * and just make the page writable */
3820 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3821 		page_move_anon_rmap(old_page, vma);
3822 		set_huge_ptep_writable(vma, haddr, ptep);
3823 		return 0;
3824 	}
3825 
3826 	/*
3827 	 * If the process that created a MAP_PRIVATE mapping is about to
3828 	 * perform a COW due to a shared page count, attempt to satisfy
3829 	 * the allocation without using the existing reserves. The pagecache
3830 	 * page is used to determine if the reserve at this address was
3831 	 * consumed or not. If reserves were used, a partial faulted mapping
3832 	 * at the time of fork() could consume its reserves on COW instead
3833 	 * of the full address range.
3834 	 */
3835 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3836 			old_page != pagecache_page)
3837 		outside_reserve = 1;
3838 
3839 	get_page(old_page);
3840 
3841 	/*
3842 	 * Drop page table lock as buddy allocator may be called. It will
3843 	 * be acquired again before returning to the caller, as expected.
3844 	 */
3845 	spin_unlock(ptl);
3846 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
3847 
3848 	if (IS_ERR(new_page)) {
3849 		/*
3850 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
3851 		 * it is due to references held by a child and an insufficient
3852 		 * huge page pool. To guarantee the original mappers
3853 		 * reliability, unmap the page from child processes. The child
3854 		 * may get SIGKILLed if it later faults.
3855 		 */
3856 		if (outside_reserve) {
3857 			put_page(old_page);
3858 			BUG_ON(huge_pte_none(pte));
3859 			unmap_ref_private(mm, vma, old_page, haddr);
3860 			BUG_ON(huge_pte_none(pte));
3861 			spin_lock(ptl);
3862 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3863 			if (likely(ptep &&
3864 				   pte_same(huge_ptep_get(ptep), pte)))
3865 				goto retry_avoidcopy;
3866 			/*
3867 			 * race occurs while re-acquiring page table
3868 			 * lock, and our job is done.
3869 			 */
3870 			return 0;
3871 		}
3872 
3873 		ret = vmf_error(PTR_ERR(new_page));
3874 		goto out_release_old;
3875 	}
3876 
3877 	/*
3878 	 * When the original hugepage is shared one, it does not have
3879 	 * anon_vma prepared.
3880 	 */
3881 	if (unlikely(anon_vma_prepare(vma))) {
3882 		ret = VM_FAULT_OOM;
3883 		goto out_release_all;
3884 	}
3885 
3886 	copy_user_huge_page(new_page, old_page, address, vma,
3887 			    pages_per_huge_page(h));
3888 	__SetPageUptodate(new_page);
3889 
3890 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
3891 				haddr + huge_page_size(h));
3892 	mmu_notifier_invalidate_range_start(&range);
3893 
3894 	/*
3895 	 * Retake the page table lock to check for racing updates
3896 	 * before the page tables are altered
3897 	 */
3898 	spin_lock(ptl);
3899 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3900 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3901 		ClearPagePrivate(new_page);
3902 
3903 		/* Break COW */
3904 		huge_ptep_clear_flush(vma, haddr, ptep);
3905 		mmu_notifier_invalidate_range(mm, range.start, range.end);
3906 		set_huge_pte_at(mm, haddr, ptep,
3907 				make_huge_pte(vma, new_page, 1));
3908 		page_remove_rmap(old_page, true);
3909 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
3910 		set_page_huge_active(new_page);
3911 		/* Make the old page be freed below */
3912 		new_page = old_page;
3913 	}
3914 	spin_unlock(ptl);
3915 	mmu_notifier_invalidate_range_end(&range);
3916 out_release_all:
3917 	restore_reserve_on_error(h, vma, haddr, new_page);
3918 	put_page(new_page);
3919 out_release_old:
3920 	put_page(old_page);
3921 
3922 	spin_lock(ptl); /* Caller expects lock to be held */
3923 	return ret;
3924 }
3925 
3926 /* Return the pagecache page at a given address within a VMA */
hugetlbfs_pagecache_page(struct hstate * h,struct vm_area_struct * vma,unsigned long address)3927 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3928 			struct vm_area_struct *vma, unsigned long address)
3929 {
3930 	struct address_space *mapping;
3931 	pgoff_t idx;
3932 
3933 	mapping = vma->vm_file->f_mapping;
3934 	idx = vma_hugecache_offset(h, vma, address);
3935 
3936 	return find_lock_page(mapping, idx);
3937 }
3938 
3939 /*
3940  * Return whether there is a pagecache page to back given address within VMA.
3941  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3942  */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)3943 static bool hugetlbfs_pagecache_present(struct hstate *h,
3944 			struct vm_area_struct *vma, unsigned long address)
3945 {
3946 	struct address_space *mapping;
3947 	pgoff_t idx;
3948 	struct page *page;
3949 
3950 	mapping = vma->vm_file->f_mapping;
3951 	idx = vma_hugecache_offset(h, vma, address);
3952 
3953 	page = find_get_page(mapping, idx);
3954 	if (page)
3955 		put_page(page);
3956 	return page != NULL;
3957 }
3958 
huge_add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t idx)3959 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3960 			   pgoff_t idx)
3961 {
3962 	struct inode *inode = mapping->host;
3963 	struct hstate *h = hstate_inode(inode);
3964 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3965 
3966 	if (err)
3967 		return err;
3968 	ClearPagePrivate(page);
3969 
3970 	/*
3971 	 * set page dirty so that it will not be removed from cache/file
3972 	 * by non-hugetlbfs specific code paths.
3973 	 */
3974 	set_page_dirty(page);
3975 
3976 	spin_lock(&inode->i_lock);
3977 	inode->i_blocks += blocks_per_huge_page(h);
3978 	spin_unlock(&inode->i_lock);
3979 	return 0;
3980 }
3981 
hugetlb_no_page(struct mm_struct * mm,struct vm_area_struct * vma,struct address_space * mapping,pgoff_t idx,unsigned long address,pte_t * ptep,unsigned int flags)3982 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3983 			struct vm_area_struct *vma,
3984 			struct address_space *mapping, pgoff_t idx,
3985 			unsigned long address, pte_t *ptep, unsigned int flags)
3986 {
3987 	struct hstate *h = hstate_vma(vma);
3988 	vm_fault_t ret = VM_FAULT_SIGBUS;
3989 	int anon_rmap = 0;
3990 	unsigned long size;
3991 	struct page *page;
3992 	pte_t new_pte;
3993 	spinlock_t *ptl;
3994 	unsigned long haddr = address & huge_page_mask(h);
3995 	bool new_page = false;
3996 
3997 	/*
3998 	 * Currently, we are forced to kill the process in the event the
3999 	 * original mapper has unmapped pages from the child due to a failed
4000 	 * COW. Warn that such a situation has occurred as it may not be obvious
4001 	 */
4002 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4003 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4004 			   current->pid);
4005 		return ret;
4006 	}
4007 
4008 	/*
4009 	 * Use page lock to guard against racing truncation
4010 	 * before we get page_table_lock.
4011 	 */
4012 retry:
4013 	page = find_lock_page(mapping, idx);
4014 	if (!page) {
4015 		size = i_size_read(mapping->host) >> huge_page_shift(h);
4016 		if (idx >= size)
4017 			goto out;
4018 
4019 		/*
4020 		 * Check for page in userfault range
4021 		 */
4022 		if (userfaultfd_missing(vma)) {
4023 			u32 hash;
4024 			struct vm_fault vmf = {
4025 				.vma = vma,
4026 				.address = haddr,
4027 				.flags = flags,
4028 				/*
4029 				 * Hard to debug if it ends up being
4030 				 * used by a callee that assumes
4031 				 * something about the other
4032 				 * uninitialized fields... same as in
4033 				 * memory.c
4034 				 */
4035 			};
4036 
4037 			/*
4038 			 * hugetlb_fault_mutex must be dropped before
4039 			 * handling userfault.  Reacquire after handling
4040 			 * fault to make calling code simpler.
4041 			 */
4042 			hash = hugetlb_fault_mutex_hash(h, mapping, idx);
4043 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4044 			ret = handle_userfault(&vmf, VM_UFFD_MISSING);
4045 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
4046 			goto out;
4047 		}
4048 
4049 		page = alloc_huge_page(vma, haddr, 0);
4050 		if (IS_ERR(page)) {
4051 			/*
4052 			 * Returning error will result in faulting task being
4053 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
4054 			 * tasks from racing to fault in the same page which
4055 			 * could result in false unable to allocate errors.
4056 			 * Page migration does not take the fault mutex, but
4057 			 * does a clear then write of pte's under page table
4058 			 * lock.  Page fault code could race with migration,
4059 			 * notice the clear pte and try to allocate a page
4060 			 * here.  Before returning error, get ptl and make
4061 			 * sure there really is no pte entry.
4062 			 */
4063 			ptl = huge_pte_lock(h, mm, ptep);
4064 			if (!huge_pte_none(huge_ptep_get(ptep))) {
4065 				ret = 0;
4066 				spin_unlock(ptl);
4067 				goto out;
4068 			}
4069 			spin_unlock(ptl);
4070 			ret = vmf_error(PTR_ERR(page));
4071 			goto out;
4072 		}
4073 		clear_huge_page(page, address, pages_per_huge_page(h));
4074 		__SetPageUptodate(page);
4075 		new_page = true;
4076 
4077 		if (vma->vm_flags & VM_MAYSHARE) {
4078 			int err = huge_add_to_page_cache(page, mapping, idx);
4079 			if (err) {
4080 				put_page(page);
4081 				if (err == -EEXIST)
4082 					goto retry;
4083 				goto out;
4084 			}
4085 		} else {
4086 			lock_page(page);
4087 			if (unlikely(anon_vma_prepare(vma))) {
4088 				ret = VM_FAULT_OOM;
4089 				goto backout_unlocked;
4090 			}
4091 			anon_rmap = 1;
4092 		}
4093 	} else {
4094 		/*
4095 		 * If memory error occurs between mmap() and fault, some process
4096 		 * don't have hwpoisoned swap entry for errored virtual address.
4097 		 * So we need to block hugepage fault by PG_hwpoison bit check.
4098 		 */
4099 		if (unlikely(PageHWPoison(page))) {
4100 			ret = VM_FAULT_HWPOISON_LARGE |
4101 				VM_FAULT_SET_HINDEX(hstate_index(h));
4102 			goto backout_unlocked;
4103 		}
4104 	}
4105 
4106 	/*
4107 	 * If we are going to COW a private mapping later, we examine the
4108 	 * pending reservations for this page now. This will ensure that
4109 	 * any allocations necessary to record that reservation occur outside
4110 	 * the spinlock.
4111 	 */
4112 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4113 		if (vma_needs_reservation(h, vma, haddr) < 0) {
4114 			ret = VM_FAULT_OOM;
4115 			goto backout_unlocked;
4116 		}
4117 		/* Just decrements count, does not deallocate */
4118 		vma_end_reservation(h, vma, haddr);
4119 	}
4120 
4121 	ptl = huge_pte_lock(h, mm, ptep);
4122 	size = i_size_read(mapping->host) >> huge_page_shift(h);
4123 	if (idx >= size)
4124 		goto backout;
4125 
4126 	ret = 0;
4127 	if (!huge_pte_none(huge_ptep_get(ptep)))
4128 		goto backout;
4129 
4130 	if (anon_rmap) {
4131 		ClearPagePrivate(page);
4132 		hugepage_add_new_anon_rmap(page, vma, haddr);
4133 	} else
4134 		page_dup_rmap(page, true);
4135 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4136 				&& (vma->vm_flags & VM_SHARED)));
4137 	set_huge_pte_at(mm, haddr, ptep, new_pte);
4138 
4139 	hugetlb_count_add(pages_per_huge_page(h), mm);
4140 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4141 		/* Optimization, do the COW without a second fault */
4142 		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4143 	}
4144 
4145 	spin_unlock(ptl);
4146 
4147 	/*
4148 	 * Only make newly allocated pages active.  Existing pages found
4149 	 * in the pagecache could be !page_huge_active() if they have been
4150 	 * isolated for migration.
4151 	 */
4152 	if (new_page)
4153 		set_page_huge_active(page);
4154 
4155 	unlock_page(page);
4156 out:
4157 	return ret;
4158 
4159 backout:
4160 	spin_unlock(ptl);
4161 backout_unlocked:
4162 	unlock_page(page);
4163 	restore_reserve_on_error(h, vma, haddr, page);
4164 	put_page(page);
4165 	goto out;
4166 }
4167 
4168 #ifdef CONFIG_SMP
hugetlb_fault_mutex_hash(struct hstate * h,struct address_space * mapping,pgoff_t idx)4169 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4170 			    pgoff_t idx)
4171 {
4172 	unsigned long key[2];
4173 	u32 hash;
4174 
4175 	key[0] = (unsigned long) mapping;
4176 	key[1] = idx;
4177 
4178 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4179 
4180 	return hash & (num_fault_mutexes - 1);
4181 }
4182 #else
4183 /*
4184  * For uniprocesor systems we always use a single mutex, so just
4185  * return 0 and avoid the hashing overhead.
4186  */
hugetlb_fault_mutex_hash(struct hstate * h,struct address_space * mapping,pgoff_t idx)4187 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4188 			    pgoff_t idx)
4189 {
4190 	return 0;
4191 }
4192 #endif
4193 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)4194 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4195 			unsigned long address, unsigned int flags)
4196 {
4197 	pte_t *ptep, entry;
4198 	spinlock_t *ptl;
4199 	vm_fault_t ret;
4200 	u32 hash;
4201 	pgoff_t idx;
4202 	struct page *page = NULL;
4203 	struct page *pagecache_page = NULL;
4204 	struct hstate *h = hstate_vma(vma);
4205 	struct address_space *mapping;
4206 	int need_wait_lock = 0;
4207 	unsigned long haddr = address & huge_page_mask(h);
4208 
4209 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4210 	if (ptep) {
4211 		entry = huge_ptep_get(ptep);
4212 		if (unlikely(is_hugetlb_entry_migration(entry))) {
4213 			migration_entry_wait_huge(vma, mm, ptep);
4214 			return 0;
4215 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4216 			return VM_FAULT_HWPOISON_LARGE |
4217 				VM_FAULT_SET_HINDEX(hstate_index(h));
4218 	} else {
4219 		ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4220 		if (!ptep)
4221 			return VM_FAULT_OOM;
4222 	}
4223 
4224 	mapping = vma->vm_file->f_mapping;
4225 	idx = vma_hugecache_offset(h, vma, haddr);
4226 
4227 	/*
4228 	 * Serialize hugepage allocation and instantiation, so that we don't
4229 	 * get spurious allocation failures if two CPUs race to instantiate
4230 	 * the same page in the page cache.
4231 	 */
4232 	hash = hugetlb_fault_mutex_hash(h, mapping, idx);
4233 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
4234 
4235 	entry = huge_ptep_get(ptep);
4236 	if (huge_pte_none(entry)) {
4237 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4238 		goto out_mutex;
4239 	}
4240 
4241 	ret = 0;
4242 
4243 	/*
4244 	 * entry could be a migration/hwpoison entry at this point, so this
4245 	 * check prevents the kernel from going below assuming that we have
4246 	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
4247 	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4248 	 * handle it.
4249 	 */
4250 	if (!pte_present(entry))
4251 		goto out_mutex;
4252 
4253 	/*
4254 	 * If we are going to COW the mapping later, we examine the pending
4255 	 * reservations for this page now. This will ensure that any
4256 	 * allocations necessary to record that reservation occur outside the
4257 	 * spinlock. For private mappings, we also lookup the pagecache
4258 	 * page now as it is used to determine if a reservation has been
4259 	 * consumed.
4260 	 */
4261 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4262 		if (vma_needs_reservation(h, vma, haddr) < 0) {
4263 			ret = VM_FAULT_OOM;
4264 			goto out_mutex;
4265 		}
4266 		/* Just decrements count, does not deallocate */
4267 		vma_end_reservation(h, vma, haddr);
4268 
4269 		if (!(vma->vm_flags & VM_MAYSHARE))
4270 			pagecache_page = hugetlbfs_pagecache_page(h,
4271 								vma, haddr);
4272 	}
4273 
4274 	ptl = huge_pte_lock(h, mm, ptep);
4275 
4276 	/* Check for a racing update before calling hugetlb_cow */
4277 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4278 		goto out_ptl;
4279 
4280 	/*
4281 	 * hugetlb_cow() requires page locks of pte_page(entry) and
4282 	 * pagecache_page, so here we need take the former one
4283 	 * when page != pagecache_page or !pagecache_page.
4284 	 */
4285 	page = pte_page(entry);
4286 	if (page != pagecache_page)
4287 		if (!trylock_page(page)) {
4288 			need_wait_lock = 1;
4289 			goto out_ptl;
4290 		}
4291 
4292 	get_page(page);
4293 
4294 	if (flags & FAULT_FLAG_WRITE) {
4295 		if (!huge_pte_write(entry)) {
4296 			ret = hugetlb_cow(mm, vma, address, ptep,
4297 					  pagecache_page, ptl);
4298 			goto out_put_page;
4299 		}
4300 		entry = huge_pte_mkdirty(entry);
4301 	}
4302 	entry = pte_mkyoung(entry);
4303 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4304 						flags & FAULT_FLAG_WRITE))
4305 		update_mmu_cache(vma, haddr, ptep);
4306 out_put_page:
4307 	if (page != pagecache_page)
4308 		unlock_page(page);
4309 	put_page(page);
4310 out_ptl:
4311 	spin_unlock(ptl);
4312 
4313 	if (pagecache_page) {
4314 		unlock_page(pagecache_page);
4315 		put_page(pagecache_page);
4316 	}
4317 out_mutex:
4318 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4319 	/*
4320 	 * Generally it's safe to hold refcount during waiting page lock. But
4321 	 * here we just wait to defer the next page fault to avoid busy loop and
4322 	 * the page is not used after unlocked before returning from the current
4323 	 * page fault. So we are safe from accessing freed page, even if we wait
4324 	 * here without taking refcount.
4325 	 */
4326 	if (need_wait_lock)
4327 		wait_on_page_locked(page);
4328 	return ret;
4329 }
4330 
4331 /*
4332  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4333  * modifications for huge pages.
4334  */
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,struct page ** pagep)4335 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4336 			    pte_t *dst_pte,
4337 			    struct vm_area_struct *dst_vma,
4338 			    unsigned long dst_addr,
4339 			    unsigned long src_addr,
4340 			    struct page **pagep)
4341 {
4342 	struct address_space *mapping;
4343 	pgoff_t idx;
4344 	unsigned long size;
4345 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
4346 	struct hstate *h = hstate_vma(dst_vma);
4347 	pte_t _dst_pte;
4348 	spinlock_t *ptl;
4349 	int ret;
4350 	struct page *page;
4351 
4352 	if (!*pagep) {
4353 		/* If a page already exists, then it's UFFDIO_COPY for
4354 		 * a non-missing case. Return -EEXIST.
4355 		 */
4356 		if (vm_shared &&
4357 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4358 			ret = -EEXIST;
4359 			goto out;
4360 		}
4361 
4362 		page = alloc_huge_page(dst_vma, dst_addr, 0);
4363 		if (IS_ERR(page)) {
4364 			ret = -ENOMEM;
4365 			goto out;
4366 		}
4367 
4368 		ret = copy_huge_page_from_user(page,
4369 						(const void __user *) src_addr,
4370 						pages_per_huge_page(h), false);
4371 
4372 		/* fallback to copy_from_user outside mmap_sem */
4373 		if (unlikely(ret)) {
4374 			ret = -ENOENT;
4375 			*pagep = page;
4376 			/* don't free the page */
4377 			goto out;
4378 		}
4379 	} else {
4380 		page = *pagep;
4381 		*pagep = NULL;
4382 	}
4383 
4384 	/*
4385 	 * The memory barrier inside __SetPageUptodate makes sure that
4386 	 * preceding stores to the page contents become visible before
4387 	 * the set_pte_at() write.
4388 	 */
4389 	__SetPageUptodate(page);
4390 
4391 	mapping = dst_vma->vm_file->f_mapping;
4392 	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4393 
4394 	/*
4395 	 * If shared, add to page cache
4396 	 */
4397 	if (vm_shared) {
4398 		size = i_size_read(mapping->host) >> huge_page_shift(h);
4399 		ret = -EFAULT;
4400 		if (idx >= size)
4401 			goto out_release_nounlock;
4402 
4403 		/*
4404 		 * Serialization between remove_inode_hugepages() and
4405 		 * huge_add_to_page_cache() below happens through the
4406 		 * hugetlb_fault_mutex_table that here must be hold by
4407 		 * the caller.
4408 		 */
4409 		ret = huge_add_to_page_cache(page, mapping, idx);
4410 		if (ret)
4411 			goto out_release_nounlock;
4412 	}
4413 
4414 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4415 	spin_lock(ptl);
4416 
4417 	/*
4418 	 * Recheck the i_size after holding PT lock to make sure not
4419 	 * to leave any page mapped (as page_mapped()) beyond the end
4420 	 * of the i_size (remove_inode_hugepages() is strict about
4421 	 * enforcing that). If we bail out here, we'll also leave a
4422 	 * page in the radix tree in the vm_shared case beyond the end
4423 	 * of the i_size, but remove_inode_hugepages() will take care
4424 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
4425 	 */
4426 	size = i_size_read(mapping->host) >> huge_page_shift(h);
4427 	ret = -EFAULT;
4428 	if (idx >= size)
4429 		goto out_release_unlock;
4430 
4431 	ret = -EEXIST;
4432 	if (!huge_pte_none(huge_ptep_get(dst_pte)))
4433 		goto out_release_unlock;
4434 
4435 	if (vm_shared) {
4436 		page_dup_rmap(page, true);
4437 	} else {
4438 		ClearPagePrivate(page);
4439 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4440 	}
4441 
4442 	_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4443 	if (dst_vma->vm_flags & VM_WRITE)
4444 		_dst_pte = huge_pte_mkdirty(_dst_pte);
4445 	_dst_pte = pte_mkyoung(_dst_pte);
4446 
4447 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4448 
4449 	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4450 					dst_vma->vm_flags & VM_WRITE);
4451 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4452 
4453 	/* No need to invalidate - it was non-present before */
4454 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
4455 
4456 	spin_unlock(ptl);
4457 	set_page_huge_active(page);
4458 	if (vm_shared)
4459 		unlock_page(page);
4460 	ret = 0;
4461 out:
4462 	return ret;
4463 out_release_unlock:
4464 	spin_unlock(ptl);
4465 	if (vm_shared)
4466 		unlock_page(page);
4467 out_release_nounlock:
4468 	put_page(page);
4469 	goto out;
4470 }
4471 
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)4472 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4473 			 struct page **pages, struct vm_area_struct **vmas,
4474 			 unsigned long *position, unsigned long *nr_pages,
4475 			 long i, unsigned int flags, int *nonblocking)
4476 {
4477 	unsigned long pfn_offset;
4478 	unsigned long vaddr = *position;
4479 	unsigned long remainder = *nr_pages;
4480 	struct hstate *h = hstate_vma(vma);
4481 	int err = -EFAULT;
4482 
4483 	while (vaddr < vma->vm_end && remainder) {
4484 		pte_t *pte;
4485 		spinlock_t *ptl = NULL;
4486 		int absent;
4487 		struct page *page;
4488 
4489 		/*
4490 		 * If we have a pending SIGKILL, don't keep faulting pages and
4491 		 * potentially allocating memory.
4492 		 */
4493 		if (fatal_signal_pending(current)) {
4494 			remainder = 0;
4495 			break;
4496 		}
4497 
4498 		/*
4499 		 * Some archs (sparc64, sh*) have multiple pte_ts to
4500 		 * each hugepage.  We have to make sure we get the
4501 		 * first, for the page indexing below to work.
4502 		 *
4503 		 * Note that page table lock is not held when pte is null.
4504 		 */
4505 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4506 				      huge_page_size(h));
4507 		if (pte)
4508 			ptl = huge_pte_lock(h, mm, pte);
4509 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
4510 
4511 		/*
4512 		 * When coredumping, it suits get_dump_page if we just return
4513 		 * an error where there's an empty slot with no huge pagecache
4514 		 * to back it.  This way, we avoid allocating a hugepage, and
4515 		 * the sparse dumpfile avoids allocating disk blocks, but its
4516 		 * huge holes still show up with zeroes where they need to be.
4517 		 */
4518 		if (absent && (flags & FOLL_DUMP) &&
4519 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4520 			if (pte)
4521 				spin_unlock(ptl);
4522 			remainder = 0;
4523 			break;
4524 		}
4525 
4526 		/*
4527 		 * We need call hugetlb_fault for both hugepages under migration
4528 		 * (in which case hugetlb_fault waits for the migration,) and
4529 		 * hwpoisoned hugepages (in which case we need to prevent the
4530 		 * caller from accessing to them.) In order to do this, we use
4531 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
4532 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4533 		 * both cases, and because we can't follow correct pages
4534 		 * directly from any kind of swap entries.
4535 		 */
4536 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4537 		    ((flags & FOLL_WRITE) &&
4538 		      !huge_pte_write(huge_ptep_get(pte)))) {
4539 			vm_fault_t ret;
4540 			unsigned int fault_flags = 0;
4541 
4542 			if (pte)
4543 				spin_unlock(ptl);
4544 			if (flags & FOLL_WRITE)
4545 				fault_flags |= FAULT_FLAG_WRITE;
4546 			if (nonblocking)
4547 				fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4548 			if (flags & FOLL_NOWAIT)
4549 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4550 					FAULT_FLAG_RETRY_NOWAIT;
4551 			if (flags & FOLL_TRIED) {
4552 				VM_WARN_ON_ONCE(fault_flags &
4553 						FAULT_FLAG_ALLOW_RETRY);
4554 				fault_flags |= FAULT_FLAG_TRIED;
4555 			}
4556 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4557 			if (ret & VM_FAULT_ERROR) {
4558 				err = vm_fault_to_errno(ret, flags);
4559 				remainder = 0;
4560 				break;
4561 			}
4562 			if (ret & VM_FAULT_RETRY) {
4563 				if (nonblocking &&
4564 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4565 					*nonblocking = 0;
4566 				*nr_pages = 0;
4567 				/*
4568 				 * VM_FAULT_RETRY must not return an
4569 				 * error, it will return zero
4570 				 * instead.
4571 				 *
4572 				 * No need to update "position" as the
4573 				 * caller will not check it after
4574 				 * *nr_pages is set to 0.
4575 				 */
4576 				return i;
4577 			}
4578 			continue;
4579 		}
4580 
4581 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4582 		page = pte_page(huge_ptep_get(pte));
4583 
4584 		/*
4585 		 * Instead of doing 'try_get_page()' below in the same_page
4586 		 * loop, just check the count once here.
4587 		 */
4588 		if (unlikely(page_count(page) <= 0)) {
4589 			if (pages) {
4590 				spin_unlock(ptl);
4591 				remainder = 0;
4592 				err = -ENOMEM;
4593 				break;
4594 			}
4595 		}
4596 same_page:
4597 		if (pages) {
4598 			pages[i] = mem_map_offset(page, pfn_offset);
4599 			get_page(pages[i]);
4600 		}
4601 
4602 		if (vmas)
4603 			vmas[i] = vma;
4604 
4605 		vaddr += PAGE_SIZE;
4606 		++pfn_offset;
4607 		--remainder;
4608 		++i;
4609 		if (vaddr < vma->vm_end && remainder &&
4610 				pfn_offset < pages_per_huge_page(h)) {
4611 			/*
4612 			 * We use pfn_offset to avoid touching the pageframes
4613 			 * of this compound page.
4614 			 */
4615 			goto same_page;
4616 		}
4617 		spin_unlock(ptl);
4618 	}
4619 	*nr_pages = remainder;
4620 	/*
4621 	 * setting position is actually required only if remainder is
4622 	 * not zero but it's faster not to add a "if (remainder)"
4623 	 * branch.
4624 	 */
4625 	*position = vaddr;
4626 
4627 	return i ? i : err;
4628 }
4629 
4630 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4631 /*
4632  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4633  * implement this.
4634  */
4635 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
4636 #endif
4637 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)4638 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4639 		unsigned long address, unsigned long end, pgprot_t newprot)
4640 {
4641 	struct mm_struct *mm = vma->vm_mm;
4642 	unsigned long start = address;
4643 	pte_t *ptep;
4644 	pte_t pte;
4645 	struct hstate *h = hstate_vma(vma);
4646 	unsigned long pages = 0;
4647 	bool shared_pmd = false;
4648 	struct mmu_notifier_range range;
4649 
4650 	/*
4651 	 * In the case of shared PMDs, the area to flush could be beyond
4652 	 * start/end.  Set range.start/range.end to cover the maximum possible
4653 	 * range if PMD sharing is possible.
4654 	 */
4655 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4656 				0, vma, mm, start, end);
4657 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4658 
4659 	BUG_ON(address >= end);
4660 	flush_cache_range(vma, range.start, range.end);
4661 
4662 	mmu_notifier_invalidate_range_start(&range);
4663 	i_mmap_lock_write(vma->vm_file->f_mapping);
4664 	for (; address < end; address += huge_page_size(h)) {
4665 		spinlock_t *ptl;
4666 		ptep = huge_pte_offset(mm, address, huge_page_size(h));
4667 		if (!ptep)
4668 			continue;
4669 		ptl = huge_pte_lock(h, mm, ptep);
4670 		if (huge_pmd_unshare(mm, &address, ptep)) {
4671 			pages++;
4672 			spin_unlock(ptl);
4673 			shared_pmd = true;
4674 			continue;
4675 		}
4676 		pte = huge_ptep_get(ptep);
4677 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4678 			spin_unlock(ptl);
4679 			continue;
4680 		}
4681 		if (unlikely(is_hugetlb_entry_migration(pte))) {
4682 			swp_entry_t entry = pte_to_swp_entry(pte);
4683 
4684 			if (is_write_migration_entry(entry)) {
4685 				pte_t newpte;
4686 
4687 				make_migration_entry_read(&entry);
4688 				newpte = swp_entry_to_pte(entry);
4689 				set_huge_swap_pte_at(mm, address, ptep,
4690 						     newpte, huge_page_size(h));
4691 				pages++;
4692 			}
4693 			spin_unlock(ptl);
4694 			continue;
4695 		}
4696 		if (!huge_pte_none(pte)) {
4697 			pte_t old_pte;
4698 
4699 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4700 			pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4701 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
4702 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4703 			pages++;
4704 		}
4705 		spin_unlock(ptl);
4706 	}
4707 	/*
4708 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4709 	 * may have cleared our pud entry and done put_page on the page table:
4710 	 * once we release i_mmap_rwsem, another task can do the final put_page
4711 	 * and that page table be reused and filled with junk.  If we actually
4712 	 * did unshare a page of pmds, flush the range corresponding to the pud.
4713 	 */
4714 	if (shared_pmd)
4715 		flush_hugetlb_tlb_range(vma, range.start, range.end);
4716 	else
4717 		flush_hugetlb_tlb_range(vma, start, end);
4718 	/*
4719 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
4720 	 * page table protection not changing it to point to a new page.
4721 	 *
4722 	 * See Documentation/vm/mmu_notifier.rst
4723 	 */
4724 	i_mmap_unlock_write(vma->vm_file->f_mapping);
4725 	mmu_notifier_invalidate_range_end(&range);
4726 
4727 	return pages << h->order;
4728 }
4729 
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_struct * vma,vm_flags_t vm_flags)4730 int hugetlb_reserve_pages(struct inode *inode,
4731 					long from, long to,
4732 					struct vm_area_struct *vma,
4733 					vm_flags_t vm_flags)
4734 {
4735 	long ret, chg;
4736 	struct hstate *h = hstate_inode(inode);
4737 	struct hugepage_subpool *spool = subpool_inode(inode);
4738 	struct resv_map *resv_map;
4739 	long gbl_reserve;
4740 
4741 	/* This should never happen */
4742 	if (from > to) {
4743 		VM_WARN(1, "%s called with a negative range\n", __func__);
4744 		return -EINVAL;
4745 	}
4746 
4747 	/*
4748 	 * Only apply hugepage reservation if asked. At fault time, an
4749 	 * attempt will be made for VM_NORESERVE to allocate a page
4750 	 * without using reserves
4751 	 */
4752 	if (vm_flags & VM_NORESERVE)
4753 		return 0;
4754 
4755 	/*
4756 	 * Shared mappings base their reservation on the number of pages that
4757 	 * are already allocated on behalf of the file. Private mappings need
4758 	 * to reserve the full area even if read-only as mprotect() may be
4759 	 * called to make the mapping read-write. Assume !vma is a shm mapping
4760 	 */
4761 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4762 		/*
4763 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
4764 		 * called for inodes for which resv_maps were created (see
4765 		 * hugetlbfs_get_inode).
4766 		 */
4767 		resv_map = inode_resv_map(inode);
4768 
4769 		chg = region_chg(resv_map, from, to);
4770 
4771 	} else {
4772 		resv_map = resv_map_alloc();
4773 		if (!resv_map)
4774 			return -ENOMEM;
4775 
4776 		chg = to - from;
4777 
4778 		set_vma_resv_map(vma, resv_map);
4779 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4780 	}
4781 
4782 	if (chg < 0) {
4783 		ret = chg;
4784 		goto out_err;
4785 	}
4786 
4787 	/*
4788 	 * There must be enough pages in the subpool for the mapping. If
4789 	 * the subpool has a minimum size, there may be some global
4790 	 * reservations already in place (gbl_reserve).
4791 	 */
4792 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4793 	if (gbl_reserve < 0) {
4794 		ret = -ENOSPC;
4795 		goto out_err;
4796 	}
4797 
4798 	/*
4799 	 * Check enough hugepages are available for the reservation.
4800 	 * Hand the pages back to the subpool if there are not
4801 	 */
4802 	ret = hugetlb_acct_memory(h, gbl_reserve);
4803 	if (ret < 0) {
4804 		/* put back original number of pages, chg */
4805 		(void)hugepage_subpool_put_pages(spool, chg);
4806 		goto out_err;
4807 	}
4808 
4809 	/*
4810 	 * Account for the reservations made. Shared mappings record regions
4811 	 * that have reservations as they are shared by multiple VMAs.
4812 	 * When the last VMA disappears, the region map says how much
4813 	 * the reservation was and the page cache tells how much of
4814 	 * the reservation was consumed. Private mappings are per-VMA and
4815 	 * only the consumed reservations are tracked. When the VMA
4816 	 * disappears, the original reservation is the VMA size and the
4817 	 * consumed reservations are stored in the map. Hence, nothing
4818 	 * else has to be done for private mappings here
4819 	 */
4820 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4821 		long add = region_add(resv_map, from, to);
4822 
4823 		if (unlikely(chg > add)) {
4824 			/*
4825 			 * pages in this range were added to the reserve
4826 			 * map between region_chg and region_add.  This
4827 			 * indicates a race with alloc_huge_page.  Adjust
4828 			 * the subpool and reserve counts modified above
4829 			 * based on the difference.
4830 			 */
4831 			long rsv_adjust;
4832 
4833 			rsv_adjust = hugepage_subpool_put_pages(spool,
4834 								chg - add);
4835 			hugetlb_acct_memory(h, -rsv_adjust);
4836 		}
4837 	}
4838 	return 0;
4839 out_err:
4840 	if (!vma || vma->vm_flags & VM_MAYSHARE)
4841 		/* Don't call region_abort if region_chg failed */
4842 		if (chg >= 0)
4843 			region_abort(resv_map, from, to);
4844 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4845 		kref_put(&resv_map->refs, resv_map_release);
4846 	return ret;
4847 }
4848 
hugetlb_unreserve_pages(struct inode * inode,long start,long end,long freed)4849 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4850 								long freed)
4851 {
4852 	struct hstate *h = hstate_inode(inode);
4853 	struct resv_map *resv_map = inode_resv_map(inode);
4854 	long chg = 0;
4855 	struct hugepage_subpool *spool = subpool_inode(inode);
4856 	long gbl_reserve;
4857 
4858 	/*
4859 	 * Since this routine can be called in the evict inode path for all
4860 	 * hugetlbfs inodes, resv_map could be NULL.
4861 	 */
4862 	if (resv_map) {
4863 		chg = region_del(resv_map, start, end);
4864 		/*
4865 		 * region_del() can fail in the rare case where a region
4866 		 * must be split and another region descriptor can not be
4867 		 * allocated.  If end == LONG_MAX, it will not fail.
4868 		 */
4869 		if (chg < 0)
4870 			return chg;
4871 	}
4872 
4873 	spin_lock(&inode->i_lock);
4874 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4875 	spin_unlock(&inode->i_lock);
4876 
4877 	/*
4878 	 * If the subpool has a minimum size, the number of global
4879 	 * reservations to be released may be adjusted.
4880 	 */
4881 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4882 	hugetlb_acct_memory(h, -gbl_reserve);
4883 
4884 	return 0;
4885 }
4886 
4887 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)4888 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4889 				struct vm_area_struct *vma,
4890 				unsigned long addr, pgoff_t idx)
4891 {
4892 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4893 				svma->vm_start;
4894 	unsigned long sbase = saddr & PUD_MASK;
4895 	unsigned long s_end = sbase + PUD_SIZE;
4896 
4897 	/* Allow segments to share if only one is marked locked */
4898 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4899 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4900 
4901 	/*
4902 	 * match the virtual addresses, permission and the alignment of the
4903 	 * page table page.
4904 	 */
4905 	if (pmd_index(addr) != pmd_index(saddr) ||
4906 	    vm_flags != svm_flags ||
4907 	    sbase < svma->vm_start || svma->vm_end < s_end)
4908 		return 0;
4909 
4910 	return saddr;
4911 }
4912 
vma_shareable(struct vm_area_struct * vma,unsigned long addr)4913 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4914 {
4915 	unsigned long base = addr & PUD_MASK;
4916 	unsigned long end = base + PUD_SIZE;
4917 
4918 	/*
4919 	 * check on proper vm_flags and page table alignment
4920 	 */
4921 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4922 		return true;
4923 	return false;
4924 }
4925 
4926 /*
4927  * Determine if start,end range within vma could be mapped by shared pmd.
4928  * If yes, adjust start and end to cover range associated with possible
4929  * shared pmd mappings.
4930  */
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)4931 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4932 				unsigned long *start, unsigned long *end)
4933 {
4934 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
4935 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
4936 
4937 	/*
4938 	 * vma need span at least one aligned PUD size and the start,end range
4939 	 * must at least partialy within it.
4940 	 */
4941 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
4942 		(*end <= v_start) || (*start >= v_end))
4943 		return;
4944 
4945 	/* Extend the range to be PUD aligned for a worst case scenario */
4946 	if (*start > v_start)
4947 		*start = ALIGN_DOWN(*start, PUD_SIZE);
4948 
4949 	if (*end < v_end)
4950 		*end = ALIGN(*end, PUD_SIZE);
4951 }
4952 
4953 /*
4954  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4955  * and returns the corresponding pte. While this is not necessary for the
4956  * !shared pmd case because we can allocate the pmd later as well, it makes the
4957  * code much cleaner. pmd allocation is essential for the shared case because
4958  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4959  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4960  * bad pmd for sharing.
4961  */
huge_pmd_share(struct mm_struct * mm,unsigned long addr,pud_t * pud)4962 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4963 {
4964 	struct vm_area_struct *vma = find_vma(mm, addr);
4965 	struct address_space *mapping = vma->vm_file->f_mapping;
4966 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4967 			vma->vm_pgoff;
4968 	struct vm_area_struct *svma;
4969 	unsigned long saddr;
4970 	pte_t *spte = NULL;
4971 	pte_t *pte;
4972 	spinlock_t *ptl;
4973 
4974 	if (!vma_shareable(vma, addr))
4975 		return (pte_t *)pmd_alloc(mm, pud, addr);
4976 
4977 	i_mmap_lock_write(mapping);
4978 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4979 		if (svma == vma)
4980 			continue;
4981 
4982 		saddr = page_table_shareable(svma, vma, addr, idx);
4983 		if (saddr) {
4984 			spte = huge_pte_offset(svma->vm_mm, saddr,
4985 					       vma_mmu_pagesize(svma));
4986 			if (spte) {
4987 				get_page(virt_to_page(spte));
4988 				break;
4989 			}
4990 		}
4991 	}
4992 
4993 	if (!spte)
4994 		goto out;
4995 
4996 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4997 	if (pud_none(*pud)) {
4998 		pud_populate(mm, pud,
4999 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
5000 		mm_inc_nr_pmds(mm);
5001 	} else {
5002 		put_page(virt_to_page(spte));
5003 	}
5004 	spin_unlock(ptl);
5005 out:
5006 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
5007 	i_mmap_unlock_write(mapping);
5008 	return pte;
5009 }
5010 
5011 /*
5012  * unmap huge page backed by shared pte.
5013  *
5014  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
5015  * indicated by page_count > 1, unmap is achieved by clearing pud and
5016  * decrementing the ref count. If count == 1, the pte page is not shared.
5017  *
5018  * called with page table lock held.
5019  *
5020  * returns: 1 successfully unmapped a shared pte page
5021  *	    0 the underlying pte page is not shared, or it is the last user
5022  */
huge_pmd_unshare(struct mm_struct * mm,unsigned long * addr,pte_t * ptep)5023 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
5024 {
5025 	pgd_t *pgd = pgd_offset(mm, *addr);
5026 	p4d_t *p4d = p4d_offset(pgd, *addr);
5027 	pud_t *pud = pud_offset(p4d, *addr);
5028 
5029 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
5030 	if (page_count(virt_to_page(ptep)) == 1)
5031 		return 0;
5032 
5033 	pud_clear(pud);
5034 	put_page(virt_to_page(ptep));
5035 	mm_dec_nr_pmds(mm);
5036 	/*
5037 	 * This update of passed address optimizes loops sequentially
5038 	 * processing addresses in increments of huge page size (PMD_SIZE
5039 	 * in this case).  By clearing the pud, a PUD_SIZE area is unmapped.
5040 	 * Update address to the 'last page' in the cleared area so that
5041 	 * calling loop can move to first page past this area.
5042 	 */
5043 	*addr |= PUD_SIZE - PMD_SIZE;
5044 	return 1;
5045 }
5046 #define want_pmd_share()	(1)
5047 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
huge_pmd_share(struct mm_struct * mm,unsigned long addr,pud_t * pud)5048 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5049 {
5050 	return NULL;
5051 }
5052 
huge_pmd_unshare(struct mm_struct * mm,unsigned long * addr,pte_t * ptep)5053 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
5054 {
5055 	return 0;
5056 }
5057 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)5058 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5059 				unsigned long *start, unsigned long *end)
5060 {
5061 }
5062 #define want_pmd_share()	(0)
5063 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5064 
5065 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
huge_pte_alloc(struct mm_struct * mm,unsigned long addr,unsigned long sz)5066 pte_t *huge_pte_alloc(struct mm_struct *mm,
5067 			unsigned long addr, unsigned long sz)
5068 {
5069 	pgd_t *pgd;
5070 	p4d_t *p4d;
5071 	pud_t *pud;
5072 	pte_t *pte = NULL;
5073 
5074 	pgd = pgd_offset(mm, addr);
5075 	p4d = p4d_alloc(mm, pgd, addr);
5076 	if (!p4d)
5077 		return NULL;
5078 	pud = pud_alloc(mm, p4d, addr);
5079 	if (pud) {
5080 		if (sz == PUD_SIZE) {
5081 			pte = (pte_t *)pud;
5082 		} else {
5083 			BUG_ON(sz != PMD_SIZE);
5084 			if (want_pmd_share() && pud_none(*pud))
5085 				pte = huge_pmd_share(mm, addr, pud);
5086 			else
5087 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
5088 		}
5089 	}
5090 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5091 
5092 	return pte;
5093 }
5094 
5095 /*
5096  * huge_pte_offset() - Walk the page table to resolve the hugepage
5097  * entry at address @addr
5098  *
5099  * Return: Pointer to page table or swap entry (PUD or PMD) for
5100  * address @addr, or NULL if a p*d_none() entry is encountered and the
5101  * size @sz doesn't match the hugepage size at this level of the page
5102  * table.
5103  */
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)5104 pte_t *huge_pte_offset(struct mm_struct *mm,
5105 		       unsigned long addr, unsigned long sz)
5106 {
5107 	pgd_t *pgd;
5108 	p4d_t *p4d;
5109 	pud_t *pud, pud_entry;
5110 	pmd_t *pmd, pmd_entry;
5111 
5112 	pgd = pgd_offset(mm, addr);
5113 	if (!pgd_present(*pgd))
5114 		return NULL;
5115 	p4d = p4d_offset(pgd, addr);
5116 	if (!p4d_present(*p4d))
5117 		return NULL;
5118 
5119 	pud = pud_offset(p4d, addr);
5120 	pud_entry = READ_ONCE(*pud);
5121 	if (sz != PUD_SIZE && pud_none(pud_entry))
5122 		return NULL;
5123 	/* hugepage or swap? */
5124 	if (pud_huge(pud_entry) || !pud_present(pud_entry))
5125 		return (pte_t *)pud;
5126 
5127 	pmd = pmd_offset(pud, addr);
5128 	pmd_entry = READ_ONCE(*pmd);
5129 	if (sz != PMD_SIZE && pmd_none(pmd_entry))
5130 		return NULL;
5131 	/* hugepage or swap? */
5132 	if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
5133 		return (pte_t *)pmd;
5134 
5135 	return NULL;
5136 }
5137 
5138 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5139 
5140 /*
5141  * These functions are overwritable if your architecture needs its own
5142  * behavior.
5143  */
5144 struct page * __weak
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)5145 follow_huge_addr(struct mm_struct *mm, unsigned long address,
5146 			      int write)
5147 {
5148 	return ERR_PTR(-EINVAL);
5149 }
5150 
5151 struct page * __weak
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)5152 follow_huge_pd(struct vm_area_struct *vma,
5153 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
5154 {
5155 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5156 	return NULL;
5157 }
5158 
5159 struct page * __weak
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)5160 follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
5161 {
5162 	struct hstate *h = hstate_vma(vma);
5163 	struct mm_struct *mm = vma->vm_mm;
5164 	struct page *page = NULL;
5165 	spinlock_t *ptl;
5166 	pte_t *ptep, pte;
5167 
5168 retry:
5169 	ptep = huge_pte_offset(mm, address, huge_page_size(h));
5170 	if (!ptep)
5171 		return NULL;
5172 
5173 	ptl = huge_pte_lock(h, mm, ptep);
5174 	pte = huge_ptep_get(ptep);
5175 	if (pte_present(pte)) {
5176 		page = pte_page(pte) +
5177 			((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
5178 		if (flags & FOLL_GET)
5179 			get_page(page);
5180 	} else {
5181 		if (is_hugetlb_entry_migration(pte)) {
5182 			spin_unlock(ptl);
5183 			__migration_entry_wait(mm, ptep, ptl);
5184 			goto retry;
5185 		}
5186 		/*
5187 		 * hwpoisoned entry is treated as no_page_table in
5188 		 * follow_page_mask().
5189 		 */
5190 	}
5191 
5192 	spin_unlock(ptl);
5193 	return page;
5194 }
5195 
5196 struct page * __weak
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)5197 follow_huge_pud(struct mm_struct *mm, unsigned long address,
5198 		pud_t *pud, int flags)
5199 {
5200 	if (flags & FOLL_GET)
5201 		return NULL;
5202 
5203 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5204 }
5205 
5206 struct page * __weak
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)5207 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5208 {
5209 	if (flags & FOLL_GET)
5210 		return NULL;
5211 
5212 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5213 }
5214 
isolate_huge_page(struct page * page,struct list_head * list)5215 bool isolate_huge_page(struct page *page, struct list_head *list)
5216 {
5217 	bool ret = true;
5218 
5219 	spin_lock(&hugetlb_lock);
5220 	if (!PageHeadHuge(page) || !page_huge_active(page) ||
5221 	    !get_page_unless_zero(page)) {
5222 		ret = false;
5223 		goto unlock;
5224 	}
5225 	clear_page_huge_active(page);
5226 	list_move_tail(&page->lru, list);
5227 unlock:
5228 	spin_unlock(&hugetlb_lock);
5229 	return ret;
5230 }
5231 
putback_active_hugepage(struct page * page)5232 void putback_active_hugepage(struct page *page)
5233 {
5234 	VM_BUG_ON_PAGE(!PageHead(page), page);
5235 	spin_lock(&hugetlb_lock);
5236 	set_page_huge_active(page);
5237 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5238 	spin_unlock(&hugetlb_lock);
5239 	put_page(page);
5240 }
5241 
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)5242 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5243 {
5244 	struct hstate *h = page_hstate(oldpage);
5245 
5246 	hugetlb_cgroup_migrate(oldpage, newpage);
5247 	set_page_owner_migrate_reason(newpage, reason);
5248 
5249 	/*
5250 	 * transfer temporary state of the new huge page. This is
5251 	 * reverse to other transitions because the newpage is going to
5252 	 * be final while the old one will be freed so it takes over
5253 	 * the temporary status.
5254 	 *
5255 	 * Also note that we have to transfer the per-node surplus state
5256 	 * here as well otherwise the global surplus count will not match
5257 	 * the per-node's.
5258 	 */
5259 	if (PageHugeTemporary(newpage)) {
5260 		int old_nid = page_to_nid(oldpage);
5261 		int new_nid = page_to_nid(newpage);
5262 
5263 		SetPageHugeTemporary(oldpage);
5264 		ClearPageHugeTemporary(newpage);
5265 
5266 		spin_lock(&hugetlb_lock);
5267 		if (h->surplus_huge_pages_node[old_nid]) {
5268 			h->surplus_huge_pages_node[old_nid]--;
5269 			h->surplus_huge_pages_node[new_nid]++;
5270 		}
5271 		spin_unlock(&hugetlb_lock);
5272 	}
5273 }
5274