• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21 
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #include "internal.h"
25 
26 enum scan_result {
27 	SCAN_FAIL,
28 	SCAN_SUCCEED,
29 	SCAN_PMD_NULL,
30 	SCAN_EXCEED_NONE_PTE,
31 	SCAN_EXCEED_SWAP_PTE,
32 	SCAN_EXCEED_SHARED_PTE,
33 	SCAN_PTE_NON_PRESENT,
34 	SCAN_PTE_UFFD_WP,
35 	SCAN_PAGE_RO,
36 	SCAN_LACK_REFERENCED_PAGE,
37 	SCAN_PAGE_NULL,
38 	SCAN_SCAN_ABORT,
39 	SCAN_PAGE_COUNT,
40 	SCAN_PAGE_LRU,
41 	SCAN_PAGE_LOCK,
42 	SCAN_PAGE_ANON,
43 	SCAN_PAGE_COMPOUND,
44 	SCAN_ANY_PROCESS,
45 	SCAN_VMA_NULL,
46 	SCAN_VMA_CHECK,
47 	SCAN_ADDRESS_RANGE,
48 	SCAN_SWAP_CACHE_PAGE,
49 	SCAN_DEL_PAGE_LRU,
50 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51 	SCAN_CGROUP_CHARGE_FAIL,
52 	SCAN_TRUNCATED,
53 	SCAN_PAGE_HAS_PRIVATE,
54 };
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
58 
59 static struct task_struct *khugepaged_thread __read_mostly;
60 static DEFINE_MUTEX(khugepaged_mutex);
61 
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly;
64 static unsigned int khugepaged_pages_collapsed;
65 static unsigned int khugepaged_full_scans;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69 static unsigned long khugepaged_sleep_expire;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72 /*
73  * default collapse hugepages if there is at least one pte mapped like
74  * it would have happened if the vma was large enough during page
75  * fault.
76  */
77 static unsigned int khugepaged_max_ptes_none __read_mostly;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly;
80 
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83 
84 static struct kmem_cache *mm_slot_cache __read_mostly;
85 
86 #define MAX_PTE_MAPPED_THP 8
87 
88 /**
89  * struct mm_slot - hash lookup from mm to mm_slot
90  * @hash: hash collision list
91  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92  * @mm: the mm that this information is valid for
93  */
94 struct mm_slot {
95 	struct hlist_node hash;
96 	struct list_head mm_node;
97 	struct mm_struct *mm;
98 
99 	/* pte-mapped THP in this mm */
100 	int nr_pte_mapped_thp;
101 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
102 };
103 
104 /**
105  * struct khugepaged_scan - cursor for scanning
106  * @mm_head: the head of the mm list to scan
107  * @mm_slot: the current mm_slot we are scanning
108  * @address: the next address inside that to be scanned
109  *
110  * There is only the one khugepaged_scan instance of this cursor structure.
111  */
112 struct khugepaged_scan {
113 	struct list_head mm_head;
114 	struct mm_slot *mm_slot;
115 	unsigned long address;
116 };
117 
118 static struct khugepaged_scan khugepaged_scan = {
119 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
120 };
121 
122 #ifdef CONFIG_SYSFS
scan_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)123 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124 					 struct kobj_attribute *attr,
125 					 char *buf)
126 {
127 	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
128 }
129 
scan_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)130 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131 					  struct kobj_attribute *attr,
132 					  const char *buf, size_t count)
133 {
134 	unsigned long msecs;
135 	int err;
136 
137 	err = kstrtoul(buf, 10, &msecs);
138 	if (err || msecs > UINT_MAX)
139 		return -EINVAL;
140 
141 	khugepaged_scan_sleep_millisecs = msecs;
142 	khugepaged_sleep_expire = 0;
143 	wake_up_interruptible(&khugepaged_wait);
144 
145 	return count;
146 }
147 static struct kobj_attribute scan_sleep_millisecs_attr =
148 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149 	       scan_sleep_millisecs_store);
150 
alloc_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)151 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152 					  struct kobj_attribute *attr,
153 					  char *buf)
154 {
155 	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
156 }
157 
alloc_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)158 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159 					   struct kobj_attribute *attr,
160 					   const char *buf, size_t count)
161 {
162 	unsigned long msecs;
163 	int err;
164 
165 	err = kstrtoul(buf, 10, &msecs);
166 	if (err || msecs > UINT_MAX)
167 		return -EINVAL;
168 
169 	khugepaged_alloc_sleep_millisecs = msecs;
170 	khugepaged_sleep_expire = 0;
171 	wake_up_interruptible(&khugepaged_wait);
172 
173 	return count;
174 }
175 static struct kobj_attribute alloc_sleep_millisecs_attr =
176 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177 	       alloc_sleep_millisecs_store);
178 
pages_to_scan_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)179 static ssize_t pages_to_scan_show(struct kobject *kobj,
180 				  struct kobj_attribute *attr,
181 				  char *buf)
182 {
183 	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
184 }
pages_to_scan_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)185 static ssize_t pages_to_scan_store(struct kobject *kobj,
186 				   struct kobj_attribute *attr,
187 				   const char *buf, size_t count)
188 {
189 	int err;
190 	unsigned long pages;
191 
192 	err = kstrtoul(buf, 10, &pages);
193 	if (err || !pages || pages > UINT_MAX)
194 		return -EINVAL;
195 
196 	khugepaged_pages_to_scan = pages;
197 
198 	return count;
199 }
200 static struct kobj_attribute pages_to_scan_attr =
201 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
202 	       pages_to_scan_store);
203 
pages_collapsed_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)204 static ssize_t pages_collapsed_show(struct kobject *kobj,
205 				    struct kobj_attribute *attr,
206 				    char *buf)
207 {
208 	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
209 }
210 static struct kobj_attribute pages_collapsed_attr =
211 	__ATTR_RO(pages_collapsed);
212 
full_scans_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)213 static ssize_t full_scans_show(struct kobject *kobj,
214 			       struct kobj_attribute *attr,
215 			       char *buf)
216 {
217 	return sprintf(buf, "%u\n", khugepaged_full_scans);
218 }
219 static struct kobj_attribute full_scans_attr =
220 	__ATTR_RO(full_scans);
221 
khugepaged_defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)222 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223 				      struct kobj_attribute *attr, char *buf)
224 {
225 	return single_hugepage_flag_show(kobj, attr, buf,
226 				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
227 }
khugepaged_defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)228 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229 				       struct kobj_attribute *attr,
230 				       const char *buf, size_t count)
231 {
232 	return single_hugepage_flag_store(kobj, attr, buf, count,
233 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234 }
235 static struct kobj_attribute khugepaged_defrag_attr =
236 	__ATTR(defrag, 0644, khugepaged_defrag_show,
237 	       khugepaged_defrag_store);
238 
239 /*
240  * max_ptes_none controls if khugepaged should collapse hugepages over
241  * any unmapped ptes in turn potentially increasing the memory
242  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243  * reduce the available free memory in the system as it
244  * runs. Increasing max_ptes_none will instead potentially reduce the
245  * free memory in the system during the khugepaged scan.
246  */
khugepaged_max_ptes_none_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)247 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248 					     struct kobj_attribute *attr,
249 					     char *buf)
250 {
251 	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
252 }
khugepaged_max_ptes_none_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)253 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254 					      struct kobj_attribute *attr,
255 					      const char *buf, size_t count)
256 {
257 	int err;
258 	unsigned long max_ptes_none;
259 
260 	err = kstrtoul(buf, 10, &max_ptes_none);
261 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
262 		return -EINVAL;
263 
264 	khugepaged_max_ptes_none = max_ptes_none;
265 
266 	return count;
267 }
268 static struct kobj_attribute khugepaged_max_ptes_none_attr =
269 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270 	       khugepaged_max_ptes_none_store);
271 
khugepaged_max_ptes_swap_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)272 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273 					     struct kobj_attribute *attr,
274 					     char *buf)
275 {
276 	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
277 }
278 
khugepaged_max_ptes_swap_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)279 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280 					      struct kobj_attribute *attr,
281 					      const char *buf, size_t count)
282 {
283 	int err;
284 	unsigned long max_ptes_swap;
285 
286 	err  = kstrtoul(buf, 10, &max_ptes_swap);
287 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
288 		return -EINVAL;
289 
290 	khugepaged_max_ptes_swap = max_ptes_swap;
291 
292 	return count;
293 }
294 
295 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297 	       khugepaged_max_ptes_swap_store);
298 
khugepaged_max_ptes_shared_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)299 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
300 					     struct kobj_attribute *attr,
301 					     char *buf)
302 {
303 	return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
304 }
305 
khugepaged_max_ptes_shared_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)306 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
307 					      struct kobj_attribute *attr,
308 					      const char *buf, size_t count)
309 {
310 	int err;
311 	unsigned long max_ptes_shared;
312 
313 	err  = kstrtoul(buf, 10, &max_ptes_shared);
314 	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
315 		return -EINVAL;
316 
317 	khugepaged_max_ptes_shared = max_ptes_shared;
318 
319 	return count;
320 }
321 
322 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
323 	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
324 	       khugepaged_max_ptes_shared_store);
325 
326 static struct attribute *khugepaged_attr[] = {
327 	&khugepaged_defrag_attr.attr,
328 	&khugepaged_max_ptes_none_attr.attr,
329 	&khugepaged_max_ptes_swap_attr.attr,
330 	&khugepaged_max_ptes_shared_attr.attr,
331 	&pages_to_scan_attr.attr,
332 	&pages_collapsed_attr.attr,
333 	&full_scans_attr.attr,
334 	&scan_sleep_millisecs_attr.attr,
335 	&alloc_sleep_millisecs_attr.attr,
336 	NULL,
337 };
338 
339 struct attribute_group khugepaged_attr_group = {
340 	.attrs = khugepaged_attr,
341 	.name = "khugepaged",
342 };
343 #endif /* CONFIG_SYSFS */
344 
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)345 int hugepage_madvise(struct vm_area_struct *vma,
346 		     unsigned long *vm_flags, int advice)
347 {
348 	switch (advice) {
349 	case MADV_HUGEPAGE:
350 #ifdef CONFIG_S390
351 		/*
352 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
353 		 * can't handle this properly after s390_enable_sie, so we simply
354 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
355 		 */
356 		if (mm_has_pgste(vma->vm_mm))
357 			return 0;
358 #endif
359 		*vm_flags &= ~VM_NOHUGEPAGE;
360 		*vm_flags |= VM_HUGEPAGE;
361 		/*
362 		 * If the vma become good for khugepaged to scan,
363 		 * register it here without waiting a page fault that
364 		 * may not happen any time soon.
365 		 */
366 		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
367 				khugepaged_enter_vma_merge(vma, *vm_flags))
368 			return -ENOMEM;
369 		break;
370 	case MADV_NOHUGEPAGE:
371 		*vm_flags &= ~VM_HUGEPAGE;
372 		*vm_flags |= VM_NOHUGEPAGE;
373 		/*
374 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375 		 * this vma even if we leave the mm registered in khugepaged if
376 		 * it got registered before VM_NOHUGEPAGE was set.
377 		 */
378 		break;
379 	}
380 
381 	return 0;
382 }
383 
khugepaged_init(void)384 int __init khugepaged_init(void)
385 {
386 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387 					  sizeof(struct mm_slot),
388 					  __alignof__(struct mm_slot), 0, NULL);
389 	if (!mm_slot_cache)
390 		return -ENOMEM;
391 
392 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
396 
397 	return 0;
398 }
399 
khugepaged_destroy(void)400 void __init khugepaged_destroy(void)
401 {
402 	kmem_cache_destroy(mm_slot_cache);
403 }
404 
alloc_mm_slot(void)405 static inline struct mm_slot *alloc_mm_slot(void)
406 {
407 	if (!mm_slot_cache)	/* initialization failed */
408 		return NULL;
409 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410 }
411 
free_mm_slot(struct mm_slot * mm_slot)412 static inline void free_mm_slot(struct mm_slot *mm_slot)
413 {
414 	kmem_cache_free(mm_slot_cache, mm_slot);
415 }
416 
get_mm_slot(struct mm_struct * mm)417 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418 {
419 	struct mm_slot *mm_slot;
420 
421 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422 		if (mm == mm_slot->mm)
423 			return mm_slot;
424 
425 	return NULL;
426 }
427 
insert_to_mm_slots_hash(struct mm_struct * mm,struct mm_slot * mm_slot)428 static void insert_to_mm_slots_hash(struct mm_struct *mm,
429 				    struct mm_slot *mm_slot)
430 {
431 	mm_slot->mm = mm;
432 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433 }
434 
khugepaged_test_exit(struct mm_struct * mm)435 static inline int khugepaged_test_exit(struct mm_struct *mm)
436 {
437 	return atomic_read(&mm->mm_users) == 0;
438 }
439 
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags)440 static bool hugepage_vma_check(struct vm_area_struct *vma,
441 			       unsigned long vm_flags)
442 {
443 	if (!transhuge_vma_enabled(vma, vm_flags))
444 		return false;
445 
446 	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
447 				vma->vm_pgoff, HPAGE_PMD_NR))
448 		return false;
449 
450 	/* Enabled via shmem mount options or sysfs settings. */
451 	if (shmem_file(vma->vm_file))
452 		return shmem_huge_enabled(vma);
453 
454 	/* THP settings require madvise. */
455 	if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
456 		return false;
457 
458 	/* Only regular file is valid */
459 	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
460 	    !inode_is_open_for_write(vma->vm_file->f_inode) &&
461 	    (vm_flags & VM_EXEC)) {
462 		struct inode *inode = vma->vm_file->f_inode;
463 
464 		return S_ISREG(inode->i_mode);
465 	}
466 
467 	if (!vma->anon_vma || vma->vm_ops)
468 		return false;
469 	if (vma_is_temporary_stack(vma))
470 		return false;
471 	return !(vm_flags & VM_NO_KHUGEPAGED);
472 }
473 
__khugepaged_enter(struct mm_struct * mm)474 int __khugepaged_enter(struct mm_struct *mm)
475 {
476 	struct mm_slot *mm_slot;
477 	int wakeup;
478 
479 	mm_slot = alloc_mm_slot();
480 	if (!mm_slot)
481 		return -ENOMEM;
482 
483 	/* __khugepaged_exit() must not run from under us */
484 	VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
485 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
486 		free_mm_slot(mm_slot);
487 		return 0;
488 	}
489 
490 	spin_lock(&khugepaged_mm_lock);
491 	insert_to_mm_slots_hash(mm, mm_slot);
492 	/*
493 	 * Insert just behind the scanning cursor, to let the area settle
494 	 * down a little.
495 	 */
496 	wakeup = list_empty(&khugepaged_scan.mm_head);
497 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
498 	spin_unlock(&khugepaged_mm_lock);
499 
500 	mmgrab(mm);
501 	if (wakeup)
502 		wake_up_interruptible(&khugepaged_wait);
503 
504 	return 0;
505 }
506 
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)507 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
508 			       unsigned long vm_flags)
509 {
510 	unsigned long hstart, hend;
511 
512 	/*
513 	 * khugepaged only supports read-only files for non-shmem files.
514 	 * khugepaged does not yet work on special mappings. And
515 	 * file-private shmem THP is not supported.
516 	 */
517 	if (!hugepage_vma_check(vma, vm_flags))
518 		return 0;
519 
520 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
521 	hend = vma->vm_end & HPAGE_PMD_MASK;
522 	if (hstart < hend)
523 		return khugepaged_enter(vma, vm_flags);
524 	return 0;
525 }
526 
__khugepaged_exit(struct mm_struct * mm)527 void __khugepaged_exit(struct mm_struct *mm)
528 {
529 	struct mm_slot *mm_slot;
530 	int free = 0;
531 
532 	spin_lock(&khugepaged_mm_lock);
533 	mm_slot = get_mm_slot(mm);
534 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
535 		hash_del(&mm_slot->hash);
536 		list_del(&mm_slot->mm_node);
537 		free = 1;
538 	}
539 	spin_unlock(&khugepaged_mm_lock);
540 
541 	if (free) {
542 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
543 		free_mm_slot(mm_slot);
544 		mmdrop(mm);
545 	} else if (mm_slot) {
546 		/*
547 		 * This is required to serialize against
548 		 * khugepaged_test_exit() (which is guaranteed to run
549 		 * under mmap sem read mode). Stop here (after we
550 		 * return all pagetables will be destroyed) until
551 		 * khugepaged has finished working on the pagetables
552 		 * under the mmap_lock.
553 		 */
554 		mmap_write_lock(mm);
555 		mmap_write_unlock(mm);
556 	}
557 }
558 
release_pte_page(struct page * page)559 static void release_pte_page(struct page *page)
560 {
561 	mod_node_page_state(page_pgdat(page),
562 			NR_ISOLATED_ANON + page_is_file_lru(page),
563 			-compound_nr(page));
564 	unlock_page(page);
565 	putback_lru_page(page);
566 }
567 
release_pte_pages(pte_t * pte,pte_t * _pte,struct list_head * compound_pagelist)568 static void release_pte_pages(pte_t *pte, pte_t *_pte,
569 		struct list_head *compound_pagelist)
570 {
571 	struct page *page, *tmp;
572 
573 	while (--_pte >= pte) {
574 		pte_t pteval = *_pte;
575 
576 		page = pte_page(pteval);
577 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
578 				!PageCompound(page))
579 			release_pte_page(page);
580 	}
581 
582 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
583 		list_del(&page->lru);
584 		release_pte_page(page);
585 	}
586 }
587 
is_refcount_suitable(struct page * page)588 static bool is_refcount_suitable(struct page *page)
589 {
590 	int expected_refcount;
591 
592 	expected_refcount = total_mapcount(page);
593 	if (PageSwapCache(page))
594 		expected_refcount += compound_nr(page);
595 
596 	return page_count(page) == expected_refcount;
597 }
598 
__collapse_huge_page_isolate(struct vm_area_struct * vma,unsigned long address,pte_t * pte,struct list_head * compound_pagelist)599 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
600 					unsigned long address,
601 					pte_t *pte,
602 					struct list_head *compound_pagelist)
603 {
604 	struct page *page = NULL;
605 	pte_t *_pte;
606 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
607 	bool writable = false;
608 
609 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
610 	     _pte++, address += PAGE_SIZE) {
611 		pte_t pteval = *_pte;
612 		if (pte_none(pteval) || (pte_present(pteval) &&
613 				is_zero_pfn(pte_pfn(pteval)))) {
614 			if (!userfaultfd_armed(vma) &&
615 			    ++none_or_zero <= khugepaged_max_ptes_none) {
616 				continue;
617 			} else {
618 				result = SCAN_EXCEED_NONE_PTE;
619 				goto out;
620 			}
621 		}
622 		if (!pte_present(pteval)) {
623 			result = SCAN_PTE_NON_PRESENT;
624 			goto out;
625 		}
626 		if (pte_uffd_wp(pteval)) {
627 			result = SCAN_PTE_UFFD_WP;
628 			goto out;
629 		}
630 		page = vm_normal_page(vma, address, pteval);
631 		if (unlikely(!page)) {
632 			result = SCAN_PAGE_NULL;
633 			goto out;
634 		}
635 
636 		VM_BUG_ON_PAGE(!PageAnon(page), page);
637 
638 		if (page_mapcount(page) > 1 &&
639 				++shared > khugepaged_max_ptes_shared) {
640 			result = SCAN_EXCEED_SHARED_PTE;
641 			goto out;
642 		}
643 
644 		if (PageCompound(page)) {
645 			struct page *p;
646 			page = compound_head(page);
647 
648 			/*
649 			 * Check if we have dealt with the compound page
650 			 * already
651 			 */
652 			list_for_each_entry(p, compound_pagelist, lru) {
653 				if (page == p)
654 					goto next;
655 			}
656 		}
657 
658 		/*
659 		 * We can do it before isolate_lru_page because the
660 		 * page can't be freed from under us. NOTE: PG_lock
661 		 * is needed to serialize against split_huge_page
662 		 * when invoked from the VM.
663 		 */
664 		if (!trylock_page(page)) {
665 			result = SCAN_PAGE_LOCK;
666 			goto out;
667 		}
668 
669 		/*
670 		 * Check if the page has any GUP (or other external) pins.
671 		 *
672 		 * The page table that maps the page has been already unlinked
673 		 * from the page table tree and this process cannot get
674 		 * an additinal pin on the page.
675 		 *
676 		 * New pins can come later if the page is shared across fork,
677 		 * but not from this process. The other process cannot write to
678 		 * the page, only trigger CoW.
679 		 */
680 		if (!is_refcount_suitable(page)) {
681 			unlock_page(page);
682 			result = SCAN_PAGE_COUNT;
683 			goto out;
684 		}
685 		if (!pte_write(pteval) && PageSwapCache(page) &&
686 				!reuse_swap_page(page, NULL)) {
687 			/*
688 			 * Page is in the swap cache and cannot be re-used.
689 			 * It cannot be collapsed into a THP.
690 			 */
691 			unlock_page(page);
692 			result = SCAN_SWAP_CACHE_PAGE;
693 			goto out;
694 		}
695 
696 		/*
697 		 * Isolate the page to avoid collapsing an hugepage
698 		 * currently in use by the VM.
699 		 */
700 		if (isolate_lru_page(page)) {
701 			unlock_page(page);
702 			result = SCAN_DEL_PAGE_LRU;
703 			goto out;
704 		}
705 		mod_node_page_state(page_pgdat(page),
706 				NR_ISOLATED_ANON + page_is_file_lru(page),
707 				compound_nr(page));
708 		VM_BUG_ON_PAGE(!PageLocked(page), page);
709 		VM_BUG_ON_PAGE(PageLRU(page), page);
710 
711 		if (PageCompound(page))
712 			list_add_tail(&page->lru, compound_pagelist);
713 next:
714 		/* There should be enough young pte to collapse the page */
715 		if (pte_young(pteval) ||
716 		    page_is_young(page) || PageReferenced(page) ||
717 		    mmu_notifier_test_young(vma->vm_mm, address))
718 			referenced++;
719 
720 		if (pte_write(pteval))
721 			writable = true;
722 	}
723 
724 	if (unlikely(!writable)) {
725 		result = SCAN_PAGE_RO;
726 	} else if (unlikely(!referenced)) {
727 		result = SCAN_LACK_REFERENCED_PAGE;
728 	} else {
729 		result = SCAN_SUCCEED;
730 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
731 						    referenced, writable, result);
732 		return 1;
733 	}
734 out:
735 	release_pte_pages(pte, _pte, compound_pagelist);
736 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
737 					    referenced, writable, result);
738 	return 0;
739 }
740 
__collapse_huge_page_copy(pte_t * pte,struct page * page,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)741 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
742 				      struct vm_area_struct *vma,
743 				      unsigned long address,
744 				      spinlock_t *ptl,
745 				      struct list_head *compound_pagelist)
746 {
747 	struct page *src_page, *tmp;
748 	pte_t *_pte;
749 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
750 				_pte++, page++, address += PAGE_SIZE) {
751 		pte_t pteval = *_pte;
752 
753 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
754 			clear_user_highpage(page, address);
755 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
756 			if (is_zero_pfn(pte_pfn(pteval))) {
757 				/*
758 				 * ptl mostly unnecessary.
759 				 */
760 				spin_lock(ptl);
761 				/*
762 				 * paravirt calls inside pte_clear here are
763 				 * superfluous.
764 				 */
765 				pte_clear(vma->vm_mm, address, _pte);
766 				spin_unlock(ptl);
767 			}
768 		} else {
769 			src_page = pte_page(pteval);
770 			copy_user_highpage(page, src_page, address, vma);
771 			if (!PageCompound(src_page))
772 				release_pte_page(src_page);
773 			/*
774 			 * ptl mostly unnecessary, but preempt has to
775 			 * be disabled to update the per-cpu stats
776 			 * inside page_remove_rmap().
777 			 */
778 			spin_lock(ptl);
779 			/*
780 			 * paravirt calls inside pte_clear here are
781 			 * superfluous.
782 			 */
783 			pte_clear(vma->vm_mm, address, _pte);
784 			page_remove_rmap(src_page, false);
785 			spin_unlock(ptl);
786 			free_page_and_swap_cache(src_page);
787 		}
788 	}
789 
790 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
791 		list_del(&src_page->lru);
792 		release_pte_page(src_page);
793 	}
794 }
795 
khugepaged_alloc_sleep(void)796 static void khugepaged_alloc_sleep(void)
797 {
798 	DEFINE_WAIT(wait);
799 
800 	add_wait_queue(&khugepaged_wait, &wait);
801 	freezable_schedule_timeout_interruptible(
802 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
803 	remove_wait_queue(&khugepaged_wait, &wait);
804 }
805 
806 static int khugepaged_node_load[MAX_NUMNODES];
807 
khugepaged_scan_abort(int nid)808 static bool khugepaged_scan_abort(int nid)
809 {
810 	int i;
811 
812 	/*
813 	 * If node_reclaim_mode is disabled, then no extra effort is made to
814 	 * allocate memory locally.
815 	 */
816 	if (!node_reclaim_mode)
817 		return false;
818 
819 	/* If there is a count for this node already, it must be acceptable */
820 	if (khugepaged_node_load[nid])
821 		return false;
822 
823 	for (i = 0; i < MAX_NUMNODES; i++) {
824 		if (!khugepaged_node_load[i])
825 			continue;
826 		if (node_distance(nid, i) > node_reclaim_distance)
827 			return true;
828 	}
829 	return false;
830 }
831 
832 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
alloc_hugepage_khugepaged_gfpmask(void)833 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
834 {
835 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
836 }
837 
838 #ifdef CONFIG_NUMA
khugepaged_find_target_node(void)839 static int khugepaged_find_target_node(void)
840 {
841 	static int last_khugepaged_target_node = NUMA_NO_NODE;
842 	int nid, target_node = 0, max_value = 0;
843 
844 	/* find first node with max normal pages hit */
845 	for (nid = 0; nid < MAX_NUMNODES; nid++)
846 		if (khugepaged_node_load[nid] > max_value) {
847 			max_value = khugepaged_node_load[nid];
848 			target_node = nid;
849 		}
850 
851 	/* do some balance if several nodes have the same hit record */
852 	if (target_node <= last_khugepaged_target_node)
853 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
854 				nid++)
855 			if (max_value == khugepaged_node_load[nid]) {
856 				target_node = nid;
857 				break;
858 			}
859 
860 	last_khugepaged_target_node = target_node;
861 	return target_node;
862 }
863 
khugepaged_prealloc_page(struct page ** hpage,bool * wait)864 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
865 {
866 	if (IS_ERR(*hpage)) {
867 		if (!*wait)
868 			return false;
869 
870 		*wait = false;
871 		*hpage = NULL;
872 		khugepaged_alloc_sleep();
873 	} else if (*hpage) {
874 		put_page(*hpage);
875 		*hpage = NULL;
876 	}
877 
878 	return true;
879 }
880 
881 static struct page *
khugepaged_alloc_page(struct page ** hpage,gfp_t gfp,int node)882 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
883 {
884 	VM_BUG_ON_PAGE(*hpage, *hpage);
885 
886 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
887 	if (unlikely(!*hpage)) {
888 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
889 		*hpage = ERR_PTR(-ENOMEM);
890 		return NULL;
891 	}
892 
893 	prep_transhuge_page(*hpage);
894 	count_vm_event(THP_COLLAPSE_ALLOC);
895 	return *hpage;
896 }
897 #else
khugepaged_find_target_node(void)898 static int khugepaged_find_target_node(void)
899 {
900 	return 0;
901 }
902 
alloc_khugepaged_hugepage(void)903 static inline struct page *alloc_khugepaged_hugepage(void)
904 {
905 	struct page *page;
906 
907 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
908 			   HPAGE_PMD_ORDER);
909 	if (page)
910 		prep_transhuge_page(page);
911 	return page;
912 }
913 
khugepaged_alloc_hugepage(bool * wait)914 static struct page *khugepaged_alloc_hugepage(bool *wait)
915 {
916 	struct page *hpage;
917 
918 	do {
919 		hpage = alloc_khugepaged_hugepage();
920 		if (!hpage) {
921 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
922 			if (!*wait)
923 				return NULL;
924 
925 			*wait = false;
926 			khugepaged_alloc_sleep();
927 		} else
928 			count_vm_event(THP_COLLAPSE_ALLOC);
929 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
930 
931 	return hpage;
932 }
933 
khugepaged_prealloc_page(struct page ** hpage,bool * wait)934 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
935 {
936 	/*
937 	 * If the hpage allocated earlier was briefly exposed in page cache
938 	 * before collapse_file() failed, it is possible that racing lookups
939 	 * have not yet completed, and would then be unpleasantly surprised by
940 	 * finding the hpage reused for the same mapping at a different offset.
941 	 * Just release the previous allocation if there is any danger of that.
942 	 */
943 	if (*hpage && page_count(*hpage) > 1) {
944 		put_page(*hpage);
945 		*hpage = NULL;
946 	}
947 
948 	if (!*hpage)
949 		*hpage = khugepaged_alloc_hugepage(wait);
950 
951 	if (unlikely(!*hpage))
952 		return false;
953 
954 	return true;
955 }
956 
957 static struct page *
khugepaged_alloc_page(struct page ** hpage,gfp_t gfp,int node)958 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
959 {
960 	VM_BUG_ON(!*hpage);
961 
962 	return  *hpage;
963 }
964 #endif
965 
966 /*
967  * If mmap_lock temporarily dropped, revalidate vma
968  * before taking mmap_lock.
969  * Return 0 if succeeds, otherwise return none-zero
970  * value (scan code).
971  */
972 
hugepage_vma_revalidate(struct mm_struct * mm,unsigned long address,struct vm_area_struct ** vmap)973 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
974 		struct vm_area_struct **vmap)
975 {
976 	struct vm_area_struct *vma;
977 	unsigned long hstart, hend;
978 
979 	if (unlikely(khugepaged_test_exit(mm)))
980 		return SCAN_ANY_PROCESS;
981 
982 	*vmap = vma = find_vma(mm, address);
983 	if (!vma)
984 		return SCAN_VMA_NULL;
985 
986 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
987 	hend = vma->vm_end & HPAGE_PMD_MASK;
988 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
989 		return SCAN_ADDRESS_RANGE;
990 	if (!hugepage_vma_check(vma, vma->vm_flags))
991 		return SCAN_VMA_CHECK;
992 	/* Anon VMA expected */
993 	if (!vma->anon_vma || vma->vm_ops)
994 		return SCAN_VMA_CHECK;
995 	return 0;
996 }
997 
998 /*
999  * Bring missing pages in from swap, to complete THP collapse.
1000  * Only done if khugepaged_scan_pmd believes it is worthwhile.
1001  *
1002  * Called and returns without pte mapped or spinlocks held,
1003  * but with mmap_lock held to protect against vma changes.
1004  */
1005 
__collapse_huge_page_swapin(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,int referenced)1006 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1007 					struct vm_area_struct *vma,
1008 					unsigned long haddr, pmd_t *pmd,
1009 					int referenced)
1010 {
1011 	int swapped_in = 0;
1012 	vm_fault_t ret = 0;
1013 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1014 
1015 	for (address = haddr; address < end; address += PAGE_SIZE) {
1016 		struct vm_fault vmf = {
1017 			.vma = vma,
1018 			.address = address,
1019 			.pgoff = linear_page_index(vma, haddr),
1020 			.flags = FAULT_FLAG_ALLOW_RETRY,
1021 			.pmd = pmd,
1022 			.vma_flags = vma->vm_flags,
1023 			.vma_page_prot = vma->vm_page_prot,
1024 		};
1025 
1026 		vmf.pte = pte_offset_map(pmd, address);
1027 		vmf.orig_pte = *vmf.pte;
1028 		if (!is_swap_pte(vmf.orig_pte)) {
1029 			pte_unmap(vmf.pte);
1030 			continue;
1031 		}
1032 		swapped_in++;
1033 		ret = do_swap_page(&vmf);
1034 
1035 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1036 		if (ret & VM_FAULT_RETRY) {
1037 			mmap_read_lock(mm);
1038 			if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1039 				/* vma is no longer available, don't continue to swapin */
1040 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1041 				return false;
1042 			}
1043 			/* check if the pmd is still valid */
1044 			if (mm_find_pmd(mm, haddr) != pmd) {
1045 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1046 				return false;
1047 			}
1048 		}
1049 		if (ret & VM_FAULT_ERROR) {
1050 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1051 			return false;
1052 		}
1053 	}
1054 
1055 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1056 	if (swapped_in)
1057 		lru_add_drain();
1058 
1059 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1060 	return true;
1061 }
1062 
collapse_huge_page(struct mm_struct * mm,unsigned long address,struct page ** hpage,int node,int referenced,int unmapped)1063 static void collapse_huge_page(struct mm_struct *mm,
1064 				   unsigned long address,
1065 				   struct page **hpage,
1066 				   int node, int referenced, int unmapped)
1067 {
1068 	LIST_HEAD(compound_pagelist);
1069 	pmd_t *pmd, _pmd;
1070 	pte_t *pte;
1071 	pgtable_t pgtable;
1072 	struct page *new_page;
1073 	spinlock_t *pmd_ptl, *pte_ptl;
1074 	int isolated = 0, result = 0;
1075 	struct vm_area_struct *vma;
1076 	struct mmu_notifier_range range;
1077 	gfp_t gfp;
1078 
1079 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1080 
1081 	/* Only allocate from the target node */
1082 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1083 
1084 	/*
1085 	 * Before allocating the hugepage, release the mmap_lock read lock.
1086 	 * The allocation can take potentially a long time if it involves
1087 	 * sync compaction, and we do not need to hold the mmap_lock during
1088 	 * that. We will recheck the vma after taking it again in write mode.
1089 	 */
1090 	mmap_read_unlock(mm);
1091 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1092 	if (!new_page) {
1093 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1094 		goto out_nolock;
1095 	}
1096 
1097 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1098 		result = SCAN_CGROUP_CHARGE_FAIL;
1099 		goto out_nolock;
1100 	}
1101 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1102 
1103 	mmap_read_lock(mm);
1104 	result = hugepage_vma_revalidate(mm, address, &vma);
1105 	if (result) {
1106 		mmap_read_unlock(mm);
1107 		goto out_nolock;
1108 	}
1109 
1110 	pmd = mm_find_pmd(mm, address);
1111 	if (!pmd) {
1112 		result = SCAN_PMD_NULL;
1113 		mmap_read_unlock(mm);
1114 		goto out_nolock;
1115 	}
1116 
1117 	/*
1118 	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1119 	 * If it fails, we release mmap_lock and jump out_nolock.
1120 	 * Continuing to collapse causes inconsistency.
1121 	 */
1122 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1123 						     pmd, referenced)) {
1124 		mmap_read_unlock(mm);
1125 		goto out_nolock;
1126 	}
1127 
1128 	mmap_read_unlock(mm);
1129 	/*
1130 	 * Prevent all access to pagetables with the exception of
1131 	 * gup_fast later handled by the ptep_clear_flush and the VM
1132 	 * handled by the anon_vma lock + PG_lock.
1133 	 */
1134 	mmap_write_lock(mm);
1135 	result = hugepage_vma_revalidate(mm, address, &vma);
1136 	if (result)
1137 		goto out;
1138 	/* check if the pmd is still valid */
1139 	if (mm_find_pmd(mm, address) != pmd)
1140 		goto out;
1141 
1142 	vm_write_begin(vma);
1143 	anon_vma_lock_write(vma->anon_vma);
1144 
1145 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1146 				address, address + HPAGE_PMD_SIZE);
1147 	mmu_notifier_invalidate_range_start(&range);
1148 
1149 	pte = pte_offset_map(pmd, address);
1150 	pte_ptl = pte_lockptr(mm, pmd);
1151 
1152 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1153 	/*
1154 	 * This removes any huge TLB entry from the CPU so we won't allow
1155 	 * huge and small TLB entries for the same virtual address to
1156 	 * avoid the risk of CPU bugs in that area.
1157 	 *
1158 	 * Parallel fast GUP is fine since fast GUP will back off when
1159 	 * it detects PMD is changed.
1160 	 */
1161 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1162 	spin_unlock(pmd_ptl);
1163 	mmu_notifier_invalidate_range_end(&range);
1164 	tlb_remove_table_sync_one();
1165 
1166 	spin_lock(pte_ptl);
1167 	isolated = __collapse_huge_page_isolate(vma, address, pte,
1168 			&compound_pagelist);
1169 	spin_unlock(pte_ptl);
1170 
1171 	if (unlikely(!isolated)) {
1172 		pte_unmap(pte);
1173 		spin_lock(pmd_ptl);
1174 		BUG_ON(!pmd_none(*pmd));
1175 		/*
1176 		 * We can only use set_pmd_at when establishing
1177 		 * hugepmds and never for establishing regular pmds that
1178 		 * points to regular pagetables. Use pmd_populate for that
1179 		 */
1180 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1181 		spin_unlock(pmd_ptl);
1182 		anon_vma_unlock_write(vma->anon_vma);
1183 		vm_write_end(vma);
1184 		result = SCAN_FAIL;
1185 		goto out;
1186 	}
1187 
1188 	/*
1189 	 * All pages are isolated and locked so anon_vma rmap
1190 	 * can't run anymore.
1191 	 */
1192 	anon_vma_unlock_write(vma->anon_vma);
1193 
1194 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1195 			&compound_pagelist);
1196 	pte_unmap(pte);
1197 	__SetPageUptodate(new_page);
1198 	pgtable = pmd_pgtable(_pmd);
1199 
1200 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1201 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1202 
1203 	/*
1204 	 * spin_lock() below is not the equivalent of smp_wmb(), so
1205 	 * this is needed to avoid the copy_huge_page writes to become
1206 	 * visible after the set_pmd_at() write.
1207 	 */
1208 	smp_wmb();
1209 
1210 	spin_lock(pmd_ptl);
1211 	BUG_ON(!pmd_none(*pmd));
1212 	page_add_new_anon_rmap(new_page, vma, address, true);
1213 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1214 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1215 	set_pmd_at(mm, address, pmd, _pmd);
1216 	update_mmu_cache_pmd(vma, address, pmd);
1217 	spin_unlock(pmd_ptl);
1218 	vm_write_end(vma);
1219 
1220 	*hpage = NULL;
1221 
1222 	khugepaged_pages_collapsed++;
1223 	result = SCAN_SUCCEED;
1224 out_up_write:
1225 	mmap_write_unlock(mm);
1226 out_nolock:
1227 	if (!IS_ERR_OR_NULL(*hpage))
1228 		mem_cgroup_uncharge(*hpage);
1229 	trace_mm_collapse_huge_page(mm, isolated, result);
1230 	return;
1231 out:
1232 	goto out_up_write;
1233 }
1234 
khugepaged_scan_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,struct page ** hpage)1235 static int khugepaged_scan_pmd(struct mm_struct *mm,
1236 			       struct vm_area_struct *vma,
1237 			       unsigned long address,
1238 			       struct page **hpage)
1239 {
1240 	pmd_t *pmd;
1241 	pte_t *pte, *_pte;
1242 	int ret = 0, result = 0, referenced = 0;
1243 	int none_or_zero = 0, shared = 0;
1244 	struct page *page = NULL;
1245 	unsigned long _address;
1246 	spinlock_t *ptl;
1247 	int node = NUMA_NO_NODE, unmapped = 0;
1248 	bool writable = false;
1249 
1250 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1251 
1252 	pmd = mm_find_pmd(mm, address);
1253 	if (!pmd) {
1254 		result = SCAN_PMD_NULL;
1255 		goto out;
1256 	}
1257 
1258 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1259 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1260 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1261 	     _pte++, _address += PAGE_SIZE) {
1262 		pte_t pteval = *_pte;
1263 		if (is_swap_pte(pteval)) {
1264 			if (++unmapped <= khugepaged_max_ptes_swap) {
1265 				/*
1266 				 * Always be strict with uffd-wp
1267 				 * enabled swap entries.  Please see
1268 				 * comment below for pte_uffd_wp().
1269 				 */
1270 				if (pte_swp_uffd_wp(pteval)) {
1271 					result = SCAN_PTE_UFFD_WP;
1272 					goto out_unmap;
1273 				}
1274 				continue;
1275 			} else {
1276 				result = SCAN_EXCEED_SWAP_PTE;
1277 				goto out_unmap;
1278 			}
1279 		}
1280 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1281 			if (!userfaultfd_armed(vma) &&
1282 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1283 				continue;
1284 			} else {
1285 				result = SCAN_EXCEED_NONE_PTE;
1286 				goto out_unmap;
1287 			}
1288 		}
1289 		if (!pte_present(pteval)) {
1290 			result = SCAN_PTE_NON_PRESENT;
1291 			goto out_unmap;
1292 		}
1293 		if (pte_uffd_wp(pteval)) {
1294 			/*
1295 			 * Don't collapse the page if any of the small
1296 			 * PTEs are armed with uffd write protection.
1297 			 * Here we can also mark the new huge pmd as
1298 			 * write protected if any of the small ones is
1299 			 * marked but that could bring uknown
1300 			 * userfault messages that falls outside of
1301 			 * the registered range.  So, just be simple.
1302 			 */
1303 			result = SCAN_PTE_UFFD_WP;
1304 			goto out_unmap;
1305 		}
1306 		if (pte_write(pteval))
1307 			writable = true;
1308 
1309 		page = vm_normal_page(vma, _address, pteval);
1310 		if (unlikely(!page)) {
1311 			result = SCAN_PAGE_NULL;
1312 			goto out_unmap;
1313 		}
1314 
1315 		if (page_mapcount(page) > 1 &&
1316 				++shared > khugepaged_max_ptes_shared) {
1317 			result = SCAN_EXCEED_SHARED_PTE;
1318 			goto out_unmap;
1319 		}
1320 
1321 		page = compound_head(page);
1322 
1323 		/*
1324 		 * Record which node the original page is from and save this
1325 		 * information to khugepaged_node_load[].
1326 		 * Khupaged will allocate hugepage from the node has the max
1327 		 * hit record.
1328 		 */
1329 		node = page_to_nid(page);
1330 		if (khugepaged_scan_abort(node)) {
1331 			result = SCAN_SCAN_ABORT;
1332 			goto out_unmap;
1333 		}
1334 		khugepaged_node_load[node]++;
1335 		if (!PageLRU(page)) {
1336 			result = SCAN_PAGE_LRU;
1337 			goto out_unmap;
1338 		}
1339 		if (PageLocked(page)) {
1340 			result = SCAN_PAGE_LOCK;
1341 			goto out_unmap;
1342 		}
1343 		if (!PageAnon(page)) {
1344 			result = SCAN_PAGE_ANON;
1345 			goto out_unmap;
1346 		}
1347 
1348 		/*
1349 		 * Check if the page has any GUP (or other external) pins.
1350 		 *
1351 		 * Here the check is racy it may see totmal_mapcount > refcount
1352 		 * in some cases.
1353 		 * For example, one process with one forked child process.
1354 		 * The parent has the PMD split due to MADV_DONTNEED, then
1355 		 * the child is trying unmap the whole PMD, but khugepaged
1356 		 * may be scanning the parent between the child has
1357 		 * PageDoubleMap flag cleared and dec the mapcount.  So
1358 		 * khugepaged may see total_mapcount > refcount.
1359 		 *
1360 		 * But such case is ephemeral we could always retry collapse
1361 		 * later.  However it may report false positive if the page
1362 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1363 		 * will be done again later the risk seems low.
1364 		 */
1365 		if (!is_refcount_suitable(page)) {
1366 			result = SCAN_PAGE_COUNT;
1367 			goto out_unmap;
1368 		}
1369 		if (pte_young(pteval) ||
1370 		    page_is_young(page) || PageReferenced(page) ||
1371 		    mmu_notifier_test_young(vma->vm_mm, address))
1372 			referenced++;
1373 	}
1374 	if (!writable) {
1375 		result = SCAN_PAGE_RO;
1376 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1377 		result = SCAN_LACK_REFERENCED_PAGE;
1378 	} else {
1379 		result = SCAN_SUCCEED;
1380 		ret = 1;
1381 	}
1382 out_unmap:
1383 	pte_unmap_unlock(pte, ptl);
1384 	if (ret) {
1385 		node = khugepaged_find_target_node();
1386 		/* collapse_huge_page will return with the mmap_lock released */
1387 		collapse_huge_page(mm, address, hpage, node,
1388 				referenced, unmapped);
1389 	}
1390 out:
1391 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1392 				     none_or_zero, result, unmapped);
1393 	return ret;
1394 }
1395 
collect_mm_slot(struct mm_slot * mm_slot)1396 static void collect_mm_slot(struct mm_slot *mm_slot)
1397 {
1398 	struct mm_struct *mm = mm_slot->mm;
1399 
1400 	lockdep_assert_held(&khugepaged_mm_lock);
1401 
1402 	if (khugepaged_test_exit(mm)) {
1403 		/* free mm_slot */
1404 		hash_del(&mm_slot->hash);
1405 		list_del(&mm_slot->mm_node);
1406 
1407 		/*
1408 		 * Not strictly needed because the mm exited already.
1409 		 *
1410 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1411 		 */
1412 
1413 		/* khugepaged_mm_lock actually not necessary for the below */
1414 		free_mm_slot(mm_slot);
1415 		mmdrop(mm);
1416 	}
1417 }
1418 
1419 #ifdef CONFIG_SHMEM
1420 /*
1421  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1422  * khugepaged should try to collapse the page table.
1423  */
khugepaged_add_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)1424 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1425 					 unsigned long addr)
1426 {
1427 	struct mm_slot *mm_slot;
1428 
1429 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1430 
1431 	spin_lock(&khugepaged_mm_lock);
1432 	mm_slot = get_mm_slot(mm);
1433 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1434 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1435 	spin_unlock(&khugepaged_mm_lock);
1436 	return 0;
1437 }
1438 
1439 /**
1440  * Try to collapse a pte-mapped THP for mm at address haddr.
1441  *
1442  * This function checks whether all the PTEs in the PMD are pointing to the
1443  * right THP. If so, retract the page table so the THP can refault in with
1444  * as pmd-mapped.
1445  */
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)1446 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1447 {
1448 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1449 	struct vm_area_struct *vma = find_vma(mm, haddr);
1450 	struct page *hpage;
1451 	pte_t *start_pte, *pte;
1452 	pmd_t *pmd, _pmd;
1453 	spinlock_t *ptl;
1454 	int count = 0;
1455 	int i;
1456 	struct mmu_notifier_range range;
1457 
1458 	if (!vma || !vma->vm_file ||
1459 	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1460 		return;
1461 
1462 	/*
1463 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1464 	 * collapsed by this mm. But we can still collapse if the page is
1465 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1466 	 * will not fail the vma for missing VM_HUGEPAGE
1467 	 */
1468 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1469 		return;
1470 
1471 	hpage = find_lock_page(vma->vm_file->f_mapping,
1472 			       linear_page_index(vma, haddr));
1473 	if (!hpage)
1474 		return;
1475 
1476 	if (!PageHead(hpage))
1477 		goto drop_hpage;
1478 
1479 	pmd = mm_find_pmd(mm, haddr);
1480 	if (!pmd)
1481 		goto drop_hpage;
1482 
1483 	vm_write_begin(vma);
1484 
1485 	/*
1486 	 * We need to lock the mapping so that from here on, only GUP-fast and
1487 	 * hardware page walks can access the parts of the page tables that
1488 	 * we're operating on.
1489 	 */
1490 	i_mmap_lock_write(vma->vm_file->f_mapping);
1491 
1492 	/*
1493 	 * This spinlock should be unnecessary: Nobody else should be accessing
1494 	 * the page tables under spinlock protection here, only
1495 	 * lockless_pages_from_mm() and the hardware page walker can access page
1496 	 * tables while all the high-level locks are held in write mode.
1497 	 */
1498 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1499 
1500 	/* step 1: check all mapped PTEs are to the right huge page */
1501 	for (i = 0, addr = haddr, pte = start_pte;
1502 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1503 		struct page *page;
1504 
1505 		/* empty pte, skip */
1506 		if (pte_none(*pte))
1507 			continue;
1508 
1509 		/* page swapped out, abort */
1510 		if (!pte_present(*pte))
1511 			goto abort;
1512 
1513 		page = vm_normal_page(vma, addr, *pte);
1514 
1515 		/*
1516 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1517 		 * page table, but the new page will not be a subpage of hpage.
1518 		 */
1519 		if (hpage + i != page)
1520 			goto abort;
1521 		count++;
1522 	}
1523 
1524 	/* step 2: adjust rmap */
1525 	for (i = 0, addr = haddr, pte = start_pte;
1526 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1527 		struct page *page;
1528 
1529 		if (pte_none(*pte))
1530 			continue;
1531 		page = vm_normal_page(vma, addr, *pte);
1532 		page_remove_rmap(page, false);
1533 	}
1534 
1535 	pte_unmap_unlock(start_pte, ptl);
1536 
1537 	/* step 3: set proper refcount and mm_counters. */
1538 	if (count) {
1539 		page_ref_sub(hpage, count);
1540 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1541 	}
1542 
1543 	/* step 4: collapse pmd */
1544 	/* we make no change to anon, but protect concurrent anon page lookup */
1545 	if (vma->anon_vma)
1546 		anon_vma_lock_write(vma->anon_vma);
1547 
1548 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
1549 				haddr + HPAGE_PMD_SIZE);
1550 	mmu_notifier_invalidate_range_start(&range);
1551 	_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1552 	vm_write_end(vma);
1553 	mm_dec_nr_ptes(mm);
1554 	tlb_remove_table_sync_one();
1555 	mmu_notifier_invalidate_range_end(&range);
1556 	pte_free(mm, pmd_pgtable(_pmd));
1557 
1558 	if (vma->anon_vma)
1559 		anon_vma_unlock_write(vma->anon_vma);
1560 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1561 
1562 drop_hpage:
1563 	unlock_page(hpage);
1564 	put_page(hpage);
1565 	return;
1566 
1567 abort:
1568 	pte_unmap_unlock(start_pte, ptl);
1569 	vm_write_end(vma);
1570 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1571 	goto drop_hpage;
1572 }
1573 
khugepaged_collapse_pte_mapped_thps(struct mm_slot * mm_slot)1574 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1575 {
1576 	struct mm_struct *mm = mm_slot->mm;
1577 	int i;
1578 
1579 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1580 		return 0;
1581 
1582 	if (!mmap_write_trylock(mm))
1583 		return -EBUSY;
1584 
1585 	if (unlikely(khugepaged_test_exit(mm)))
1586 		goto out;
1587 
1588 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1589 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1590 
1591 out:
1592 	mm_slot->nr_pte_mapped_thp = 0;
1593 	mmap_write_unlock(mm);
1594 	return 0;
1595 }
1596 
retract_page_tables(struct address_space * mapping,pgoff_t pgoff)1597 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1598 {
1599 	struct vm_area_struct *vma;
1600 	struct mm_struct *mm;
1601 	unsigned long addr;
1602 	pmd_t *pmd, _pmd;
1603 
1604 	i_mmap_lock_write(mapping);
1605 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1606 		/*
1607 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1608 		 * got written to. These VMAs are likely not worth investing
1609 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1610 		 * later.
1611 		 *
1612 		 * Not that vma->anon_vma check is racy: it can be set up after
1613 		 * the check but before we took mmap_lock by the fault path.
1614 		 * But page lock would prevent establishing any new ptes of the
1615 		 * page, so we are safe.
1616 		 *
1617 		 * An alternative would be drop the check, but check that page
1618 		 * table is clear before calling pmdp_collapse_flush() under
1619 		 * ptl. It has higher chance to recover THP for the VMA, but
1620 		 * has higher cost too. It would also probably require locking
1621 		 * the anon_vma.
1622 		 */
1623 		if (vma->anon_vma)
1624 			continue;
1625 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1626 		if (addr & ~HPAGE_PMD_MASK)
1627 			continue;
1628 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1629 			continue;
1630 		mm = vma->vm_mm;
1631 		pmd = mm_find_pmd(mm, addr);
1632 		if (!pmd)
1633 			continue;
1634 		/*
1635 		 * We need exclusive mmap_lock to retract page table.
1636 		 *
1637 		 * We use trylock due to lock inversion: we need to acquire
1638 		 * mmap_lock while holding page lock. Fault path does it in
1639 		 * reverse order. Trylock is a way to avoid deadlock.
1640 		 */
1641 		if (mmap_write_trylock(mm)) {
1642 			if (!khugepaged_test_exit(mm)) {
1643 				struct mmu_notifier_range range;
1644 
1645 				vm_write_begin(vma);
1646 				mmu_notifier_range_init(&range,
1647 							MMU_NOTIFY_CLEAR, 0,
1648 							NULL, mm, addr,
1649 							addr + HPAGE_PMD_SIZE);
1650 				mmu_notifier_invalidate_range_start(&range);
1651 				/* assume page table is clear */
1652 				_pmd = pmdp_collapse_flush(vma, addr, pmd);
1653 				vm_write_end(vma);
1654 				mm_dec_nr_ptes(mm);
1655 				tlb_remove_table_sync_one();
1656 				pte_free(mm, pmd_pgtable(_pmd));
1657 				mmu_notifier_invalidate_range_end(&range);
1658 			}
1659 			mmap_write_unlock(mm);
1660 		} else {
1661 			/* Try again later */
1662 			khugepaged_add_pte_mapped_thp(mm, addr);
1663 		}
1664 	}
1665 	i_mmap_unlock_write(mapping);
1666 }
1667 
1668 /**
1669  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1670  *
1671  * Basic scheme is simple, details are more complex:
1672  *  - allocate and lock a new huge page;
1673  *  - scan page cache replacing old pages with the new one
1674  *    + swap/gup in pages if necessary;
1675  *    + fill in gaps;
1676  *    + keep old pages around in case rollback is required;
1677  *  - if replacing succeeds:
1678  *    + copy data over;
1679  *    + free old pages;
1680  *    + unlock huge page;
1681  *  - if replacing failed;
1682  *    + put all pages back and unfreeze them;
1683  *    + restore gaps in the page cache;
1684  *    + unlock and free huge page;
1685  */
collapse_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage,int node)1686 static void collapse_file(struct mm_struct *mm,
1687 		struct file *file, pgoff_t start,
1688 		struct page **hpage, int node)
1689 {
1690 	struct address_space *mapping = file->f_mapping;
1691 	gfp_t gfp;
1692 	struct page *new_page;
1693 	pgoff_t index, end = start + HPAGE_PMD_NR;
1694 	LIST_HEAD(pagelist);
1695 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1696 	int nr_none = 0, result = SCAN_SUCCEED;
1697 	bool is_shmem = shmem_file(file);
1698 
1699 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1700 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1701 
1702 	/* Only allocate from the target node */
1703 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1704 
1705 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1706 	if (!new_page) {
1707 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1708 		goto out;
1709 	}
1710 
1711 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1712 		result = SCAN_CGROUP_CHARGE_FAIL;
1713 		goto out;
1714 	}
1715 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1716 
1717 	/* This will be less messy when we use multi-index entries */
1718 	do {
1719 		xas_lock_irq(&xas);
1720 		xas_create_range(&xas);
1721 		if (!xas_error(&xas))
1722 			break;
1723 		xas_unlock_irq(&xas);
1724 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1725 			result = SCAN_FAIL;
1726 			goto out;
1727 		}
1728 	} while (1);
1729 
1730 	__SetPageLocked(new_page);
1731 	if (is_shmem)
1732 		__SetPageSwapBacked(new_page);
1733 	new_page->index = start;
1734 	new_page->mapping = mapping;
1735 
1736 	/*
1737 	 * At this point the new_page is locked and not up-to-date.
1738 	 * It's safe to insert it into the page cache, because nobody would
1739 	 * be able to map it or use it in another way until we unlock it.
1740 	 */
1741 
1742 	xas_set(&xas, start);
1743 	for (index = start; index < end; index++) {
1744 		struct page *page = xas_next(&xas);
1745 
1746 		VM_BUG_ON(index != xas.xa_index);
1747 		if (is_shmem) {
1748 			if (!page) {
1749 				/*
1750 				 * Stop if extent has been truncated or
1751 				 * hole-punched, and is now completely
1752 				 * empty.
1753 				 */
1754 				if (index == start) {
1755 					if (!xas_next_entry(&xas, end - 1)) {
1756 						result = SCAN_TRUNCATED;
1757 						goto xa_locked;
1758 					}
1759 					xas_set(&xas, index);
1760 				}
1761 				if (!shmem_charge(mapping->host, 1)) {
1762 					result = SCAN_FAIL;
1763 					goto xa_locked;
1764 				}
1765 				xas_store(&xas, new_page);
1766 				nr_none++;
1767 				continue;
1768 			}
1769 
1770 			if (xa_is_value(page) || !PageUptodate(page)) {
1771 				xas_unlock_irq(&xas);
1772 				/* swap in or instantiate fallocated page */
1773 				if (shmem_getpage(mapping->host, index, &page,
1774 						  SGP_NOHUGE)) {
1775 					result = SCAN_FAIL;
1776 					goto xa_unlocked;
1777 				}
1778 			} else if (trylock_page(page)) {
1779 				get_page(page);
1780 				xas_unlock_irq(&xas);
1781 			} else {
1782 				result = SCAN_PAGE_LOCK;
1783 				goto xa_locked;
1784 			}
1785 		} else {	/* !is_shmem */
1786 			if (!page || xa_is_value(page)) {
1787 				xas_unlock_irq(&xas);
1788 				page_cache_sync_readahead(mapping, &file->f_ra,
1789 							  file, index,
1790 							  end - index);
1791 				/* drain pagevecs to help isolate_lru_page() */
1792 				lru_add_drain();
1793 				page = find_lock_page(mapping, index);
1794 				if (unlikely(page == NULL)) {
1795 					result = SCAN_FAIL;
1796 					goto xa_unlocked;
1797 				}
1798 			} else if (PageDirty(page)) {
1799 				/*
1800 				 * khugepaged only works on read-only fd,
1801 				 * so this page is dirty because it hasn't
1802 				 * been flushed since first write. There
1803 				 * won't be new dirty pages.
1804 				 *
1805 				 * Trigger async flush here and hope the
1806 				 * writeback is done when khugepaged
1807 				 * revisits this page.
1808 				 *
1809 				 * This is a one-off situation. We are not
1810 				 * forcing writeback in loop.
1811 				 */
1812 				xas_unlock_irq(&xas);
1813 				filemap_flush(mapping);
1814 				result = SCAN_FAIL;
1815 				goto xa_unlocked;
1816 			} else if (PageWriteback(page)) {
1817 				xas_unlock_irq(&xas);
1818 				result = SCAN_FAIL;
1819 				goto xa_unlocked;
1820 			} else if (trylock_page(page)) {
1821 				get_page(page);
1822 				xas_unlock_irq(&xas);
1823 			} else {
1824 				result = SCAN_PAGE_LOCK;
1825 				goto xa_locked;
1826 			}
1827 		}
1828 
1829 		/*
1830 		 * The page must be locked, so we can drop the i_pages lock
1831 		 * without racing with truncate.
1832 		 */
1833 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1834 
1835 		/* make sure the page is up to date */
1836 		if (unlikely(!PageUptodate(page))) {
1837 			result = SCAN_FAIL;
1838 			goto out_unlock;
1839 		}
1840 
1841 		/*
1842 		 * If file was truncated then extended, or hole-punched, before
1843 		 * we locked the first page, then a THP might be there already.
1844 		 */
1845 		if (PageTransCompound(page)) {
1846 			result = SCAN_PAGE_COMPOUND;
1847 			goto out_unlock;
1848 		}
1849 
1850 		if (page_mapping(page) != mapping) {
1851 			result = SCAN_TRUNCATED;
1852 			goto out_unlock;
1853 		}
1854 
1855 		if (!is_shmem && (PageDirty(page) ||
1856 				  PageWriteback(page))) {
1857 			/*
1858 			 * khugepaged only works on read-only fd, so this
1859 			 * page is dirty because it hasn't been flushed
1860 			 * since first write.
1861 			 */
1862 			result = SCAN_FAIL;
1863 			goto out_unlock;
1864 		}
1865 
1866 		if (isolate_lru_page(page)) {
1867 			result = SCAN_DEL_PAGE_LRU;
1868 			goto out_unlock;
1869 		}
1870 
1871 		if (page_has_private(page) &&
1872 		    !try_to_release_page(page, GFP_KERNEL)) {
1873 			result = SCAN_PAGE_HAS_PRIVATE;
1874 			putback_lru_page(page);
1875 			goto out_unlock;
1876 		}
1877 
1878 		if (page_mapped(page))
1879 			unmap_mapping_pages(mapping, index, 1, false);
1880 
1881 		xas_lock_irq(&xas);
1882 		xas_set(&xas, index);
1883 
1884 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1885 		VM_BUG_ON_PAGE(page_mapped(page), page);
1886 
1887 		/*
1888 		 * The page is expected to have page_count() == 3:
1889 		 *  - we hold a pin on it;
1890 		 *  - one reference from page cache;
1891 		 *  - one from isolate_lru_page;
1892 		 */
1893 		if (!page_ref_freeze(page, 3)) {
1894 			result = SCAN_PAGE_COUNT;
1895 			xas_unlock_irq(&xas);
1896 			putback_lru_page(page);
1897 			goto out_unlock;
1898 		}
1899 
1900 		/*
1901 		 * Add the page to the list to be able to undo the collapse if
1902 		 * something go wrong.
1903 		 */
1904 		list_add_tail(&page->lru, &pagelist);
1905 
1906 		/* Finally, replace with the new page. */
1907 		xas_store(&xas, new_page);
1908 		continue;
1909 out_unlock:
1910 		unlock_page(page);
1911 		put_page(page);
1912 		goto xa_unlocked;
1913 	}
1914 
1915 	if (is_shmem)
1916 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1917 	else {
1918 		__inc_node_page_state(new_page, NR_FILE_THPS);
1919 		filemap_nr_thps_inc(mapping);
1920 		/*
1921 		 * Paired with smp_mb() in do_dentry_open() to ensure
1922 		 * i_writecount is up to date and the update to nr_thps is
1923 		 * visible. Ensures the page cache will be truncated if the
1924 		 * file is opened writable.
1925 		*/
1926 		smp_mb();
1927 		if (inode_is_open_for_write(mapping->host)) {
1928 			result = SCAN_FAIL;
1929 			__dec_node_page_state(new_page, NR_FILE_THPS);
1930 			filemap_nr_thps_dec(mapping);
1931 			goto xa_locked;
1932 		}
1933 	}
1934 
1935 	if (nr_none) {
1936 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1937 		if (is_shmem)
1938 			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1939 	}
1940 
1941 xa_locked:
1942 	xas_unlock_irq(&xas);
1943 xa_unlocked:
1944 
1945 	if (result == SCAN_SUCCEED) {
1946 		struct page *page, *tmp;
1947 
1948 		/*
1949 		 * Replacing old pages with new one has succeeded, now we
1950 		 * need to copy the content and free the old pages.
1951 		 */
1952 		index = start;
1953 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1954 			while (index < page->index) {
1955 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1956 				index++;
1957 			}
1958 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1959 					page);
1960 			list_del(&page->lru);
1961 			page->mapping = NULL;
1962 			page_ref_unfreeze(page, 1);
1963 			ClearPageActive(page);
1964 			ClearPageUnevictable(page);
1965 			unlock_page(page);
1966 			put_page(page);
1967 			index++;
1968 		}
1969 		while (index < end) {
1970 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1971 			index++;
1972 		}
1973 
1974 		SetPageUptodate(new_page);
1975 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1976 		if (is_shmem)
1977 			set_page_dirty(new_page);
1978 		lru_cache_add(new_page);
1979 
1980 		/*
1981 		 * Remove pte page tables, so we can re-fault the page as huge.
1982 		 */
1983 		retract_page_tables(mapping, start);
1984 		*hpage = NULL;
1985 
1986 		khugepaged_pages_collapsed++;
1987 	} else {
1988 		struct page *page;
1989 
1990 		/* Something went wrong: roll back page cache changes */
1991 		xas_lock_irq(&xas);
1992 		mapping->nrpages -= nr_none;
1993 
1994 		if (is_shmem)
1995 			shmem_uncharge(mapping->host, nr_none);
1996 
1997 		xas_set(&xas, start);
1998 		xas_for_each(&xas, page, end - 1) {
1999 			page = list_first_entry_or_null(&pagelist,
2000 					struct page, lru);
2001 			if (!page || xas.xa_index < page->index) {
2002 				if (!nr_none)
2003 					break;
2004 				nr_none--;
2005 				/* Put holes back where they were */
2006 				xas_store(&xas, NULL);
2007 				continue;
2008 			}
2009 
2010 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2011 
2012 			/* Unfreeze the page. */
2013 			list_del(&page->lru);
2014 			page_ref_unfreeze(page, 2);
2015 			xas_store(&xas, page);
2016 			xas_pause(&xas);
2017 			xas_unlock_irq(&xas);
2018 			unlock_page(page);
2019 			putback_lru_page(page);
2020 			xas_lock_irq(&xas);
2021 		}
2022 		VM_BUG_ON(nr_none);
2023 		xas_unlock_irq(&xas);
2024 
2025 		new_page->mapping = NULL;
2026 	}
2027 
2028 	unlock_page(new_page);
2029 out:
2030 	VM_BUG_ON(!list_empty(&pagelist));
2031 	if (!IS_ERR_OR_NULL(*hpage))
2032 		mem_cgroup_uncharge(*hpage);
2033 	/* TODO: tracepoints */
2034 }
2035 
khugepaged_scan_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage)2036 static void khugepaged_scan_file(struct mm_struct *mm,
2037 		struct file *file, pgoff_t start, struct page **hpage)
2038 {
2039 	struct page *page = NULL;
2040 	struct address_space *mapping = file->f_mapping;
2041 	XA_STATE(xas, &mapping->i_pages, start);
2042 	int present, swap;
2043 	int node = NUMA_NO_NODE;
2044 	int result = SCAN_SUCCEED;
2045 
2046 	present = 0;
2047 	swap = 0;
2048 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2049 	rcu_read_lock();
2050 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2051 		if (xas_retry(&xas, page))
2052 			continue;
2053 
2054 		if (xa_is_value(page)) {
2055 			if (++swap > khugepaged_max_ptes_swap) {
2056 				result = SCAN_EXCEED_SWAP_PTE;
2057 				break;
2058 			}
2059 			continue;
2060 		}
2061 
2062 		if (PageTransCompound(page)) {
2063 			result = SCAN_PAGE_COMPOUND;
2064 			break;
2065 		}
2066 
2067 		node = page_to_nid(page);
2068 		if (khugepaged_scan_abort(node)) {
2069 			result = SCAN_SCAN_ABORT;
2070 			break;
2071 		}
2072 		khugepaged_node_load[node]++;
2073 
2074 		if (!PageLRU(page)) {
2075 			result = SCAN_PAGE_LRU;
2076 			break;
2077 		}
2078 
2079 		if (page_count(page) !=
2080 		    1 + page_mapcount(page) + page_has_private(page)) {
2081 			result = SCAN_PAGE_COUNT;
2082 			break;
2083 		}
2084 
2085 		/*
2086 		 * We probably should check if the page is referenced here, but
2087 		 * nobody would transfer pte_young() to PageReferenced() for us.
2088 		 * And rmap walk here is just too costly...
2089 		 */
2090 
2091 		present++;
2092 
2093 		if (need_resched()) {
2094 			xas_pause(&xas);
2095 			cond_resched_rcu();
2096 		}
2097 	}
2098 	rcu_read_unlock();
2099 
2100 	if (result == SCAN_SUCCEED) {
2101 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2102 			result = SCAN_EXCEED_NONE_PTE;
2103 		} else {
2104 			node = khugepaged_find_target_node();
2105 			collapse_file(mm, file, start, hpage, node);
2106 		}
2107 	}
2108 
2109 	/* TODO: tracepoints */
2110 }
2111 #else
khugepaged_scan_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage)2112 static void khugepaged_scan_file(struct mm_struct *mm,
2113 		struct file *file, pgoff_t start, struct page **hpage)
2114 {
2115 	BUILD_BUG();
2116 }
2117 
khugepaged_collapse_pte_mapped_thps(struct mm_slot * mm_slot)2118 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2119 {
2120 	return 0;
2121 }
2122 #endif
2123 
khugepaged_scan_mm_slot(unsigned int pages,struct page ** hpage)2124 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2125 					    struct page **hpage)
2126 	__releases(&khugepaged_mm_lock)
2127 	__acquires(&khugepaged_mm_lock)
2128 {
2129 	struct mm_slot *mm_slot;
2130 	struct mm_struct *mm;
2131 	struct vm_area_struct *vma;
2132 	int progress = 0;
2133 
2134 	VM_BUG_ON(!pages);
2135 	lockdep_assert_held(&khugepaged_mm_lock);
2136 
2137 	if (khugepaged_scan.mm_slot)
2138 		mm_slot = khugepaged_scan.mm_slot;
2139 	else {
2140 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2141 				     struct mm_slot, mm_node);
2142 		khugepaged_scan.address = 0;
2143 		khugepaged_scan.mm_slot = mm_slot;
2144 	}
2145 	spin_unlock(&khugepaged_mm_lock);
2146 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2147 
2148 	mm = mm_slot->mm;
2149 	/*
2150 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2151 	 * the next mm on the list.
2152 	 */
2153 	vma = NULL;
2154 	if (unlikely(!mmap_read_trylock(mm)))
2155 		goto breakouterloop_mmap_lock;
2156 	if (likely(!khugepaged_test_exit(mm)))
2157 		vma = find_vma(mm, khugepaged_scan.address);
2158 
2159 	progress++;
2160 	for (; vma; vma = vma->vm_next) {
2161 		unsigned long hstart, hend;
2162 
2163 		cond_resched();
2164 		if (unlikely(khugepaged_test_exit(mm))) {
2165 			progress++;
2166 			break;
2167 		}
2168 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2169 skip:
2170 			progress++;
2171 			continue;
2172 		}
2173 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2174 		hend = vma->vm_end & HPAGE_PMD_MASK;
2175 		if (hstart >= hend)
2176 			goto skip;
2177 		if (khugepaged_scan.address > hend)
2178 			goto skip;
2179 		if (khugepaged_scan.address < hstart)
2180 			khugepaged_scan.address = hstart;
2181 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2182 		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2183 			goto skip;
2184 
2185 		while (khugepaged_scan.address < hend) {
2186 			int ret;
2187 			cond_resched();
2188 			if (unlikely(khugepaged_test_exit(mm)))
2189 				goto breakouterloop;
2190 
2191 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2192 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2193 				  hend);
2194 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2195 				struct file *file = get_file(vma->vm_file);
2196 				pgoff_t pgoff = linear_page_index(vma,
2197 						khugepaged_scan.address);
2198 
2199 				mmap_read_unlock(mm);
2200 				ret = 1;
2201 				khugepaged_scan_file(mm, file, pgoff, hpage);
2202 				fput(file);
2203 			} else {
2204 				ret = khugepaged_scan_pmd(mm, vma,
2205 						khugepaged_scan.address,
2206 						hpage);
2207 			}
2208 			/* move to next address */
2209 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2210 			progress += HPAGE_PMD_NR;
2211 			if (ret)
2212 				/* we released mmap_lock so break loop */
2213 				goto breakouterloop_mmap_lock;
2214 			if (progress >= pages)
2215 				goto breakouterloop;
2216 		}
2217 	}
2218 breakouterloop:
2219 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2220 breakouterloop_mmap_lock:
2221 
2222 	spin_lock(&khugepaged_mm_lock);
2223 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2224 	/*
2225 	 * Release the current mm_slot if this mm is about to die, or
2226 	 * if we scanned all vmas of this mm.
2227 	 */
2228 	if (khugepaged_test_exit(mm) || !vma) {
2229 		/*
2230 		 * Make sure that if mm_users is reaching zero while
2231 		 * khugepaged runs here, khugepaged_exit will find
2232 		 * mm_slot not pointing to the exiting mm.
2233 		 */
2234 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2235 			khugepaged_scan.mm_slot = list_entry(
2236 				mm_slot->mm_node.next,
2237 				struct mm_slot, mm_node);
2238 			khugepaged_scan.address = 0;
2239 		} else {
2240 			khugepaged_scan.mm_slot = NULL;
2241 			khugepaged_full_scans++;
2242 		}
2243 
2244 		collect_mm_slot(mm_slot);
2245 	}
2246 
2247 	return progress;
2248 }
2249 
khugepaged_has_work(void)2250 static int khugepaged_has_work(void)
2251 {
2252 	return !list_empty(&khugepaged_scan.mm_head) &&
2253 		khugepaged_enabled();
2254 }
2255 
khugepaged_wait_event(void)2256 static int khugepaged_wait_event(void)
2257 {
2258 	return !list_empty(&khugepaged_scan.mm_head) ||
2259 		kthread_should_stop();
2260 }
2261 
khugepaged_do_scan(void)2262 static void khugepaged_do_scan(void)
2263 {
2264 	struct page *hpage = NULL;
2265 	unsigned int progress = 0, pass_through_head = 0;
2266 	unsigned int pages = khugepaged_pages_to_scan;
2267 	bool wait = true;
2268 
2269 	barrier(); /* write khugepaged_pages_to_scan to local stack */
2270 
2271 	lru_add_drain_all();
2272 
2273 	while (progress < pages) {
2274 		if (!khugepaged_prealloc_page(&hpage, &wait))
2275 			break;
2276 
2277 		cond_resched();
2278 
2279 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2280 			break;
2281 
2282 		spin_lock(&khugepaged_mm_lock);
2283 		if (!khugepaged_scan.mm_slot)
2284 			pass_through_head++;
2285 		if (khugepaged_has_work() &&
2286 		    pass_through_head < 2)
2287 			progress += khugepaged_scan_mm_slot(pages - progress,
2288 							    &hpage);
2289 		else
2290 			progress = pages;
2291 		spin_unlock(&khugepaged_mm_lock);
2292 	}
2293 
2294 	if (!IS_ERR_OR_NULL(hpage))
2295 		put_page(hpage);
2296 }
2297 
khugepaged_should_wakeup(void)2298 static bool khugepaged_should_wakeup(void)
2299 {
2300 	return kthread_should_stop() ||
2301 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2302 }
2303 
khugepaged_wait_work(void)2304 static void khugepaged_wait_work(void)
2305 {
2306 	if (khugepaged_has_work()) {
2307 		const unsigned long scan_sleep_jiffies =
2308 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2309 
2310 		if (!scan_sleep_jiffies)
2311 			return;
2312 
2313 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2314 		wait_event_freezable_timeout(khugepaged_wait,
2315 					     khugepaged_should_wakeup(),
2316 					     scan_sleep_jiffies);
2317 		return;
2318 	}
2319 
2320 	if (khugepaged_enabled())
2321 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2322 }
2323 
khugepaged(void * none)2324 static int khugepaged(void *none)
2325 {
2326 	struct mm_slot *mm_slot;
2327 
2328 	set_freezable();
2329 	set_user_nice(current, MAX_NICE);
2330 
2331 	while (!kthread_should_stop()) {
2332 		khugepaged_do_scan();
2333 		khugepaged_wait_work();
2334 	}
2335 
2336 	spin_lock(&khugepaged_mm_lock);
2337 	mm_slot = khugepaged_scan.mm_slot;
2338 	khugepaged_scan.mm_slot = NULL;
2339 	if (mm_slot)
2340 		collect_mm_slot(mm_slot);
2341 	spin_unlock(&khugepaged_mm_lock);
2342 	return 0;
2343 }
2344 
set_recommended_min_free_kbytes(void)2345 static void set_recommended_min_free_kbytes(void)
2346 {
2347 	struct zone *zone;
2348 	int nr_zones = 0;
2349 	unsigned long recommended_min;
2350 
2351 	for_each_populated_zone(zone) {
2352 		/*
2353 		 * We don't need to worry about fragmentation of
2354 		 * ZONE_MOVABLE since it only has movable pages.
2355 		 */
2356 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2357 			continue;
2358 
2359 		nr_zones++;
2360 	}
2361 
2362 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2363 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2364 
2365 	/*
2366 	 * Make sure that on average at least two pageblocks are almost free
2367 	 * of another type, one for a migratetype to fall back to and a
2368 	 * second to avoid subsequent fallbacks of other types There are 3
2369 	 * MIGRATE_TYPES we care about.
2370 	 */
2371 	recommended_min += pageblock_nr_pages * nr_zones *
2372 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2373 
2374 	/* don't ever allow to reserve more than 5% of the lowmem */
2375 	recommended_min = min(recommended_min,
2376 			      (unsigned long) nr_free_buffer_pages() / 20);
2377 	recommended_min <<= (PAGE_SHIFT-10);
2378 
2379 	if (recommended_min > min_free_kbytes) {
2380 		if (user_min_free_kbytes >= 0)
2381 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2382 				min_free_kbytes, recommended_min);
2383 
2384 		min_free_kbytes = recommended_min;
2385 	}
2386 	setup_per_zone_wmarks();
2387 }
2388 
start_stop_khugepaged(void)2389 int start_stop_khugepaged(void)
2390 {
2391 	int err = 0;
2392 
2393 	mutex_lock(&khugepaged_mutex);
2394 	if (khugepaged_enabled()) {
2395 		if (!khugepaged_thread)
2396 			khugepaged_thread = kthread_run(khugepaged, NULL,
2397 							"khugepaged");
2398 		if (IS_ERR(khugepaged_thread)) {
2399 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2400 			err = PTR_ERR(khugepaged_thread);
2401 			khugepaged_thread = NULL;
2402 			goto fail;
2403 		}
2404 
2405 		if (!list_empty(&khugepaged_scan.mm_head))
2406 			wake_up_interruptible(&khugepaged_wait);
2407 
2408 		set_recommended_min_free_kbytes();
2409 	} else if (khugepaged_thread) {
2410 		kthread_stop(khugepaged_thread);
2411 		khugepaged_thread = NULL;
2412 	}
2413 fail:
2414 	mutex_unlock(&khugepaged_mutex);
2415 	return err;
2416 }
2417 
khugepaged_min_free_kbytes_update(void)2418 void khugepaged_min_free_kbytes_update(void)
2419 {
2420 	mutex_lock(&khugepaged_mutex);
2421 	if (khugepaged_enabled() && khugepaged_thread)
2422 		set_recommended_min_free_kbytes();
2423 	mutex_unlock(&khugepaged_mutex);
2424 }
2425