• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21 
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #include "internal.h"
25 
26 enum scan_result {
27 	SCAN_FAIL,
28 	SCAN_SUCCEED,
29 	SCAN_PMD_NULL,
30 	SCAN_EXCEED_NONE_PTE,
31 	SCAN_EXCEED_SWAP_PTE,
32 	SCAN_EXCEED_SHARED_PTE,
33 	SCAN_PTE_NON_PRESENT,
34 	SCAN_PTE_UFFD_WP,
35 	SCAN_PAGE_RO,
36 	SCAN_LACK_REFERENCED_PAGE,
37 	SCAN_PAGE_NULL,
38 	SCAN_SCAN_ABORT,
39 	SCAN_PAGE_COUNT,
40 	SCAN_PAGE_LRU,
41 	SCAN_PAGE_LOCK,
42 	SCAN_PAGE_ANON,
43 	SCAN_PAGE_COMPOUND,
44 	SCAN_ANY_PROCESS,
45 	SCAN_VMA_NULL,
46 	SCAN_VMA_CHECK,
47 	SCAN_ADDRESS_RANGE,
48 	SCAN_SWAP_CACHE_PAGE,
49 	SCAN_DEL_PAGE_LRU,
50 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51 	SCAN_CGROUP_CHARGE_FAIL,
52 	SCAN_TRUNCATED,
53 	SCAN_PAGE_HAS_PRIVATE,
54 };
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
58 
59 static struct task_struct *khugepaged_thread __read_mostly;
60 static DEFINE_MUTEX(khugepaged_mutex);
61 
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly;
64 static unsigned int khugepaged_pages_collapsed;
65 static unsigned int khugepaged_full_scans;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69 static unsigned long khugepaged_sleep_expire;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72 /*
73  * default collapse hugepages if there is at least one pte mapped like
74  * it would have happened if the vma was large enough during page
75  * fault.
76  */
77 static unsigned int khugepaged_max_ptes_none __read_mostly;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly;
80 
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83 
84 static struct kmem_cache *mm_slot_cache __read_mostly;
85 
86 #define MAX_PTE_MAPPED_THP 8
87 
88 /**
89  * struct mm_slot - hash lookup from mm to mm_slot
90  * @hash: hash collision list
91  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92  * @mm: the mm that this information is valid for
93  * @nr_pte_mapped_thp: number of pte mapped THP
94  * @pte_mapped_thp: address array corresponding pte mapped THP
95  */
96 struct mm_slot {
97 	struct hlist_node hash;
98 	struct list_head mm_node;
99 	struct mm_struct *mm;
100 
101 	/* pte-mapped THP in this mm */
102 	int nr_pte_mapped_thp;
103 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
104 };
105 
106 /**
107  * struct khugepaged_scan - cursor for scanning
108  * @mm_head: the head of the mm list to scan
109  * @mm_slot: the current mm_slot we are scanning
110  * @address: the next address inside that to be scanned
111  *
112  * There is only the one khugepaged_scan instance of this cursor structure.
113  */
114 struct khugepaged_scan {
115 	struct list_head mm_head;
116 	struct mm_slot *mm_slot;
117 	unsigned long address;
118 };
119 
120 static struct khugepaged_scan khugepaged_scan = {
121 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122 };
123 
124 #ifdef CONFIG_SYSFS
scan_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)125 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 					 struct kobj_attribute *attr,
127 					 char *buf)
128 {
129 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130 }
131 
scan_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)132 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 					  struct kobj_attribute *attr,
134 					  const char *buf, size_t count)
135 {
136 	unsigned int msecs;
137 	int err;
138 
139 	err = kstrtouint(buf, 10, &msecs);
140 	if (err)
141 		return -EINVAL;
142 
143 	khugepaged_scan_sleep_millisecs = msecs;
144 	khugepaged_sleep_expire = 0;
145 	wake_up_interruptible(&khugepaged_wait);
146 
147 	return count;
148 }
149 static struct kobj_attribute scan_sleep_millisecs_attr =
150 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 	       scan_sleep_millisecs_store);
152 
alloc_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)153 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 					  struct kobj_attribute *attr,
155 					  char *buf)
156 {
157 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
158 }
159 
alloc_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)160 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 					   struct kobj_attribute *attr,
162 					   const char *buf, size_t count)
163 {
164 	unsigned int msecs;
165 	int err;
166 
167 	err = kstrtouint(buf, 10, &msecs);
168 	if (err)
169 		return -EINVAL;
170 
171 	khugepaged_alloc_sleep_millisecs = msecs;
172 	khugepaged_sleep_expire = 0;
173 	wake_up_interruptible(&khugepaged_wait);
174 
175 	return count;
176 }
177 static struct kobj_attribute alloc_sleep_millisecs_attr =
178 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 	       alloc_sleep_millisecs_store);
180 
pages_to_scan_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)181 static ssize_t pages_to_scan_show(struct kobject *kobj,
182 				  struct kobj_attribute *attr,
183 				  char *buf)
184 {
185 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
186 }
pages_to_scan_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)187 static ssize_t pages_to_scan_store(struct kobject *kobj,
188 				   struct kobj_attribute *attr,
189 				   const char *buf, size_t count)
190 {
191 	unsigned int pages;
192 	int err;
193 
194 	err = kstrtouint(buf, 10, &pages);
195 	if (err || !pages)
196 		return -EINVAL;
197 
198 	khugepaged_pages_to_scan = pages;
199 
200 	return count;
201 }
202 static struct kobj_attribute pages_to_scan_attr =
203 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 	       pages_to_scan_store);
205 
pages_collapsed_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)206 static ssize_t pages_collapsed_show(struct kobject *kobj,
207 				    struct kobj_attribute *attr,
208 				    char *buf)
209 {
210 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
211 }
212 static struct kobj_attribute pages_collapsed_attr =
213 	__ATTR_RO(pages_collapsed);
214 
full_scans_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)215 static ssize_t full_scans_show(struct kobject *kobj,
216 			       struct kobj_attribute *attr,
217 			       char *buf)
218 {
219 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
220 }
221 static struct kobj_attribute full_scans_attr =
222 	__ATTR_RO(full_scans);
223 
khugepaged_defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)224 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 				      struct kobj_attribute *attr, char *buf)
226 {
227 	return single_hugepage_flag_show(kobj, attr, buf,
228 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229 }
khugepaged_defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)230 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 				       struct kobj_attribute *attr,
232 				       const char *buf, size_t count)
233 {
234 	return single_hugepage_flag_store(kobj, attr, buf, count,
235 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236 }
237 static struct kobj_attribute khugepaged_defrag_attr =
238 	__ATTR(defrag, 0644, khugepaged_defrag_show,
239 	       khugepaged_defrag_store);
240 
241 /*
242  * max_ptes_none controls if khugepaged should collapse hugepages over
243  * any unmapped ptes in turn potentially increasing the memory
244  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245  * reduce the available free memory in the system as it
246  * runs. Increasing max_ptes_none will instead potentially reduce the
247  * free memory in the system during the khugepaged scan.
248  */
khugepaged_max_ptes_none_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)249 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 					     struct kobj_attribute *attr,
251 					     char *buf)
252 {
253 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
254 }
khugepaged_max_ptes_none_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)255 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 					      struct kobj_attribute *attr,
257 					      const char *buf, size_t count)
258 {
259 	int err;
260 	unsigned long max_ptes_none;
261 
262 	err = kstrtoul(buf, 10, &max_ptes_none);
263 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 		return -EINVAL;
265 
266 	khugepaged_max_ptes_none = max_ptes_none;
267 
268 	return count;
269 }
270 static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 	       khugepaged_max_ptes_none_store);
273 
khugepaged_max_ptes_swap_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)274 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 					     struct kobj_attribute *attr,
276 					     char *buf)
277 {
278 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
279 }
280 
khugepaged_max_ptes_swap_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)281 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 					      struct kobj_attribute *attr,
283 					      const char *buf, size_t count)
284 {
285 	int err;
286 	unsigned long max_ptes_swap;
287 
288 	err  = kstrtoul(buf, 10, &max_ptes_swap);
289 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 		return -EINVAL;
291 
292 	khugepaged_max_ptes_swap = max_ptes_swap;
293 
294 	return count;
295 }
296 
297 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 	       khugepaged_max_ptes_swap_store);
300 
khugepaged_max_ptes_shared_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)301 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
302 					       struct kobj_attribute *attr,
303 					       char *buf)
304 {
305 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
306 }
307 
khugepaged_max_ptes_shared_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)308 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 					      struct kobj_attribute *attr,
310 					      const char *buf, size_t count)
311 {
312 	int err;
313 	unsigned long max_ptes_shared;
314 
315 	err  = kstrtoul(buf, 10, &max_ptes_shared);
316 	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 		return -EINVAL;
318 
319 	khugepaged_max_ptes_shared = max_ptes_shared;
320 
321 	return count;
322 }
323 
324 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 	       khugepaged_max_ptes_shared_store);
327 
328 static struct attribute *khugepaged_attr[] = {
329 	&khugepaged_defrag_attr.attr,
330 	&khugepaged_max_ptes_none_attr.attr,
331 	&khugepaged_max_ptes_swap_attr.attr,
332 	&khugepaged_max_ptes_shared_attr.attr,
333 	&pages_to_scan_attr.attr,
334 	&pages_collapsed_attr.attr,
335 	&full_scans_attr.attr,
336 	&scan_sleep_millisecs_attr.attr,
337 	&alloc_sleep_millisecs_attr.attr,
338 	NULL,
339 };
340 
341 struct attribute_group khugepaged_attr_group = {
342 	.attrs = khugepaged_attr,
343 	.name = "khugepaged",
344 };
345 #endif /* CONFIG_SYSFS */
346 
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)347 int hugepage_madvise(struct vm_area_struct *vma,
348 		     unsigned long *vm_flags, int advice)
349 {
350 	switch (advice) {
351 	case MADV_HUGEPAGE:
352 #ifdef CONFIG_S390
353 		/*
354 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 		 * can't handle this properly after s390_enable_sie, so we simply
356 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 		 */
358 		if (mm_has_pgste(vma->vm_mm))
359 			return 0;
360 #endif
361 		*vm_flags &= ~VM_NOHUGEPAGE;
362 		*vm_flags |= VM_HUGEPAGE;
363 		/*
364 		 * If the vma become good for khugepaged to scan,
365 		 * register it here without waiting a page fault that
366 		 * may not happen any time soon.
367 		 */
368 		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 				khugepaged_enter_vma_merge(vma, *vm_flags))
370 			return -ENOMEM;
371 		break;
372 	case MADV_NOHUGEPAGE:
373 		*vm_flags &= ~VM_HUGEPAGE;
374 		*vm_flags |= VM_NOHUGEPAGE;
375 		/*
376 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 		 * this vma even if we leave the mm registered in khugepaged if
378 		 * it got registered before VM_NOHUGEPAGE was set.
379 		 */
380 		break;
381 	}
382 
383 	return 0;
384 }
385 
khugepaged_init(void)386 int __init khugepaged_init(void)
387 {
388 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 					  sizeof(struct mm_slot),
390 					  __alignof__(struct mm_slot), 0, NULL);
391 	if (!mm_slot_cache)
392 		return -ENOMEM;
393 
394 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
397 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
398 
399 	return 0;
400 }
401 
khugepaged_destroy(void)402 void __init khugepaged_destroy(void)
403 {
404 	kmem_cache_destroy(mm_slot_cache);
405 }
406 
alloc_mm_slot(void)407 static inline struct mm_slot *alloc_mm_slot(void)
408 {
409 	if (!mm_slot_cache)	/* initialization failed */
410 		return NULL;
411 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412 }
413 
free_mm_slot(struct mm_slot * mm_slot)414 static inline void free_mm_slot(struct mm_slot *mm_slot)
415 {
416 	kmem_cache_free(mm_slot_cache, mm_slot);
417 }
418 
get_mm_slot(struct mm_struct * mm)419 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420 {
421 	struct mm_slot *mm_slot;
422 
423 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 		if (mm == mm_slot->mm)
425 			return mm_slot;
426 
427 	return NULL;
428 }
429 
insert_to_mm_slots_hash(struct mm_struct * mm,struct mm_slot * mm_slot)430 static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 				    struct mm_slot *mm_slot)
432 {
433 	mm_slot->mm = mm;
434 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435 }
436 
khugepaged_test_exit(struct mm_struct * mm)437 static inline int khugepaged_test_exit(struct mm_struct *mm)
438 {
439 	return atomic_read(&mm->mm_users) == 0;
440 }
441 
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags)442 static bool hugepage_vma_check(struct vm_area_struct *vma,
443 			       unsigned long vm_flags)
444 {
445 	if (!transhuge_vma_enabled(vma, vm_flags))
446 		return false;
447 
448 	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
449 				vma->vm_pgoff, HPAGE_PMD_NR))
450 		return false;
451 
452 	/* Enabled via shmem mount options or sysfs settings. */
453 	if (shmem_file(vma->vm_file))
454 		return shmem_huge_enabled(vma);
455 
456 	/* THP settings require madvise. */
457 	if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
458 		return false;
459 
460 	/* Only regular file is valid */
461 	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
462 	    (vm_flags & VM_EXEC)) {
463 		struct inode *inode = vma->vm_file->f_inode;
464 
465 		return !inode_is_open_for_write(inode) &&
466 			S_ISREG(inode->i_mode);
467 	}
468 
469 	if (!vma->anon_vma || vma->vm_ops)
470 		return false;
471 	if (vma_is_temporary_stack(vma))
472 		return false;
473 	return !(vm_flags & VM_NO_KHUGEPAGED);
474 }
475 
__khugepaged_enter(struct mm_struct * mm)476 int __khugepaged_enter(struct mm_struct *mm)
477 {
478 	struct mm_slot *mm_slot;
479 	int wakeup;
480 
481 	mm_slot = alloc_mm_slot();
482 	if (!mm_slot)
483 		return -ENOMEM;
484 
485 	/* __khugepaged_exit() must not run from under us */
486 	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
487 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
488 		free_mm_slot(mm_slot);
489 		return 0;
490 	}
491 
492 	spin_lock(&khugepaged_mm_lock);
493 	insert_to_mm_slots_hash(mm, mm_slot);
494 	/*
495 	 * Insert just behind the scanning cursor, to let the area settle
496 	 * down a little.
497 	 */
498 	wakeup = list_empty(&khugepaged_scan.mm_head);
499 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
500 	spin_unlock(&khugepaged_mm_lock);
501 
502 	mmgrab(mm);
503 	if (wakeup)
504 		wake_up_interruptible(&khugepaged_wait);
505 
506 	return 0;
507 }
508 
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)509 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
510 			       unsigned long vm_flags)
511 {
512 	unsigned long hstart, hend;
513 
514 	/*
515 	 * khugepaged only supports read-only files for non-shmem files.
516 	 * khugepaged does not yet work on special mappings. And
517 	 * file-private shmem THP is not supported.
518 	 */
519 	if (!hugepage_vma_check(vma, vm_flags))
520 		return 0;
521 
522 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
523 	hend = vma->vm_end & HPAGE_PMD_MASK;
524 	if (hstart < hend)
525 		return khugepaged_enter(vma, vm_flags);
526 	return 0;
527 }
528 
__khugepaged_exit(struct mm_struct * mm)529 void __khugepaged_exit(struct mm_struct *mm)
530 {
531 	struct mm_slot *mm_slot;
532 	int free = 0;
533 
534 	spin_lock(&khugepaged_mm_lock);
535 	mm_slot = get_mm_slot(mm);
536 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
537 		hash_del(&mm_slot->hash);
538 		list_del(&mm_slot->mm_node);
539 		free = 1;
540 	}
541 	spin_unlock(&khugepaged_mm_lock);
542 
543 	if (free) {
544 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
545 		free_mm_slot(mm_slot);
546 		mmdrop(mm);
547 	} else if (mm_slot) {
548 		/*
549 		 * This is required to serialize against
550 		 * khugepaged_test_exit() (which is guaranteed to run
551 		 * under mmap sem read mode). Stop here (after we
552 		 * return all pagetables will be destroyed) until
553 		 * khugepaged has finished working on the pagetables
554 		 * under the mmap_lock.
555 		 */
556 		mmap_write_lock(mm);
557 		mmap_write_unlock(mm);
558 	}
559 }
560 
release_pte_page(struct page * page)561 static void release_pte_page(struct page *page)
562 {
563 	mod_node_page_state(page_pgdat(page),
564 			NR_ISOLATED_ANON + page_is_file_lru(page),
565 			-compound_nr(page));
566 	unlock_page(page);
567 	putback_lru_page(page);
568 }
569 
release_pte_pages(pte_t * pte,pte_t * _pte,struct list_head * compound_pagelist)570 static void release_pte_pages(pte_t *pte, pte_t *_pte,
571 		struct list_head *compound_pagelist)
572 {
573 	struct page *page, *tmp;
574 
575 	while (--_pte >= pte) {
576 		pte_t pteval = *_pte;
577 
578 		page = pte_page(pteval);
579 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
580 				!PageCompound(page))
581 			release_pte_page(page);
582 	}
583 
584 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
585 		list_del(&page->lru);
586 		release_pte_page(page);
587 	}
588 }
589 
is_refcount_suitable(struct page * page)590 static bool is_refcount_suitable(struct page *page)
591 {
592 	int expected_refcount;
593 
594 	expected_refcount = total_mapcount(page);
595 	if (PageSwapCache(page))
596 		expected_refcount += compound_nr(page);
597 
598 	return page_count(page) == expected_refcount;
599 }
600 
__collapse_huge_page_isolate(struct vm_area_struct * vma,unsigned long address,pte_t * pte,struct list_head * compound_pagelist)601 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
602 					unsigned long address,
603 					pte_t *pte,
604 					struct list_head *compound_pagelist)
605 {
606 	struct page *page = NULL;
607 	pte_t *_pte;
608 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
609 	bool writable = false;
610 
611 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
612 	     _pte++, address += PAGE_SIZE) {
613 		pte_t pteval = *_pte;
614 		if (pte_none(pteval) || (pte_present(pteval) &&
615 				is_zero_pfn(pte_pfn(pteval)))) {
616 			if (!userfaultfd_armed(vma) &&
617 			    ++none_or_zero <= khugepaged_max_ptes_none) {
618 				continue;
619 			} else {
620 				result = SCAN_EXCEED_NONE_PTE;
621 				goto out;
622 			}
623 		}
624 		if (!pte_present(pteval)) {
625 			result = SCAN_PTE_NON_PRESENT;
626 			goto out;
627 		}
628 		if (pte_uffd_wp(pteval)) {
629 			result = SCAN_PTE_UFFD_WP;
630 			goto out;
631 		}
632 		page = vm_normal_page(vma, address, pteval);
633 		if (unlikely(!page)) {
634 			result = SCAN_PAGE_NULL;
635 			goto out;
636 		}
637 
638 		VM_BUG_ON_PAGE(!PageAnon(page), page);
639 
640 		if (page_mapcount(page) > 1 &&
641 				++shared > khugepaged_max_ptes_shared) {
642 			result = SCAN_EXCEED_SHARED_PTE;
643 			goto out;
644 		}
645 
646 		if (PageCompound(page)) {
647 			struct page *p;
648 			page = compound_head(page);
649 
650 			/*
651 			 * Check if we have dealt with the compound page
652 			 * already
653 			 */
654 			list_for_each_entry(p, compound_pagelist, lru) {
655 				if (page == p)
656 					goto next;
657 			}
658 		}
659 
660 		/*
661 		 * We can do it before isolate_lru_page because the
662 		 * page can't be freed from under us. NOTE: PG_lock
663 		 * is needed to serialize against split_huge_page
664 		 * when invoked from the VM.
665 		 */
666 		if (!trylock_page(page)) {
667 			result = SCAN_PAGE_LOCK;
668 			goto out;
669 		}
670 
671 		/*
672 		 * Check if the page has any GUP (or other external) pins.
673 		 *
674 		 * The page table that maps the page has been already unlinked
675 		 * from the page table tree and this process cannot get
676 		 * an additional pin on the page.
677 		 *
678 		 * New pins can come later if the page is shared across fork,
679 		 * but not from this process. The other process cannot write to
680 		 * the page, only trigger CoW.
681 		 */
682 		if (!is_refcount_suitable(page)) {
683 			unlock_page(page);
684 			result = SCAN_PAGE_COUNT;
685 			goto out;
686 		}
687 		if (!pte_write(pteval) && PageSwapCache(page) &&
688 				!reuse_swap_page(page, NULL)) {
689 			/*
690 			 * Page is in the swap cache and cannot be re-used.
691 			 * It cannot be collapsed into a THP.
692 			 */
693 			unlock_page(page);
694 			result = SCAN_SWAP_CACHE_PAGE;
695 			goto out;
696 		}
697 
698 		/*
699 		 * Isolate the page to avoid collapsing an hugepage
700 		 * currently in use by the VM.
701 		 */
702 		if (isolate_lru_page(page)) {
703 			unlock_page(page);
704 			result = SCAN_DEL_PAGE_LRU;
705 			goto out;
706 		}
707 		mod_node_page_state(page_pgdat(page),
708 				NR_ISOLATED_ANON + page_is_file_lru(page),
709 				compound_nr(page));
710 		VM_BUG_ON_PAGE(!PageLocked(page), page);
711 		VM_BUG_ON_PAGE(PageLRU(page), page);
712 
713 		if (PageCompound(page))
714 			list_add_tail(&page->lru, compound_pagelist);
715 next:
716 		/* There should be enough young pte to collapse the page */
717 		if (pte_young(pteval) ||
718 		    page_is_young(page) || PageReferenced(page) ||
719 		    mmu_notifier_test_young(vma->vm_mm, address))
720 			referenced++;
721 
722 		if (pte_write(pteval))
723 			writable = true;
724 	}
725 
726 	if (unlikely(!writable)) {
727 		result = SCAN_PAGE_RO;
728 	} else if (unlikely(!referenced)) {
729 		result = SCAN_LACK_REFERENCED_PAGE;
730 	} else {
731 		result = SCAN_SUCCEED;
732 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
733 						    referenced, writable, result);
734 		return 1;
735 	}
736 out:
737 	release_pte_pages(pte, _pte, compound_pagelist);
738 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
739 					    referenced, writable, result);
740 	return 0;
741 }
742 
__collapse_huge_page_copy(pte_t * pte,struct page * page,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)743 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
744 				      struct vm_area_struct *vma,
745 				      unsigned long address,
746 				      spinlock_t *ptl,
747 				      struct list_head *compound_pagelist)
748 {
749 	struct page *src_page, *tmp;
750 	pte_t *_pte;
751 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
752 				_pte++, page++, address += PAGE_SIZE) {
753 		pte_t pteval = *_pte;
754 
755 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
756 			clear_user_highpage(page, address);
757 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
758 			if (is_zero_pfn(pte_pfn(pteval))) {
759 				/*
760 				 * ptl mostly unnecessary.
761 				 */
762 				spin_lock(ptl);
763 				/*
764 				 * paravirt calls inside pte_clear here are
765 				 * superfluous.
766 				 */
767 				pte_clear(vma->vm_mm, address, _pte);
768 				spin_unlock(ptl);
769 			}
770 		} else {
771 			src_page = pte_page(pteval);
772 			copy_user_highpage(page, src_page, address, vma);
773 			if (!PageCompound(src_page))
774 				release_pte_page(src_page);
775 			/*
776 			 * ptl mostly unnecessary, but preempt has to
777 			 * be disabled to update the per-cpu stats
778 			 * inside page_remove_rmap().
779 			 */
780 			spin_lock(ptl);
781 			/*
782 			 * paravirt calls inside pte_clear here are
783 			 * superfluous.
784 			 */
785 			pte_clear(vma->vm_mm, address, _pte);
786 			page_remove_rmap(src_page, false);
787 			spin_unlock(ptl);
788 			free_page_and_swap_cache(src_page);
789 		}
790 	}
791 
792 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
793 		list_del(&src_page->lru);
794 		release_pte_page(src_page);
795 	}
796 }
797 
khugepaged_alloc_sleep(void)798 static void khugepaged_alloc_sleep(void)
799 {
800 	DEFINE_WAIT(wait);
801 
802 	add_wait_queue(&khugepaged_wait, &wait);
803 	freezable_schedule_timeout_interruptible(
804 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
805 	remove_wait_queue(&khugepaged_wait, &wait);
806 }
807 
808 static int khugepaged_node_load[MAX_NUMNODES];
809 
khugepaged_scan_abort(int nid)810 static bool khugepaged_scan_abort(int nid)
811 {
812 	int i;
813 
814 	/*
815 	 * If node_reclaim_mode is disabled, then no extra effort is made to
816 	 * allocate memory locally.
817 	 */
818 	if (!node_reclaim_enabled())
819 		return false;
820 
821 	/* If there is a count for this node already, it must be acceptable */
822 	if (khugepaged_node_load[nid])
823 		return false;
824 
825 	for (i = 0; i < MAX_NUMNODES; i++) {
826 		if (!khugepaged_node_load[i])
827 			continue;
828 		if (node_distance(nid, i) > node_reclaim_distance)
829 			return true;
830 	}
831 	return false;
832 }
833 
834 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
alloc_hugepage_khugepaged_gfpmask(void)835 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
836 {
837 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
838 }
839 
840 #ifdef CONFIG_NUMA
khugepaged_find_target_node(void)841 static int khugepaged_find_target_node(void)
842 {
843 	static int last_khugepaged_target_node = NUMA_NO_NODE;
844 	int nid, target_node = 0, max_value = 0;
845 
846 	/* find first node with max normal pages hit */
847 	for (nid = 0; nid < MAX_NUMNODES; nid++)
848 		if (khugepaged_node_load[nid] > max_value) {
849 			max_value = khugepaged_node_load[nid];
850 			target_node = nid;
851 		}
852 
853 	/* do some balance if several nodes have the same hit record */
854 	if (target_node <= last_khugepaged_target_node)
855 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
856 				nid++)
857 			if (max_value == khugepaged_node_load[nid]) {
858 				target_node = nid;
859 				break;
860 			}
861 
862 	last_khugepaged_target_node = target_node;
863 	return target_node;
864 }
865 
khugepaged_prealloc_page(struct page ** hpage,bool * wait)866 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
867 {
868 	if (IS_ERR(*hpage)) {
869 		if (!*wait)
870 			return false;
871 
872 		*wait = false;
873 		*hpage = NULL;
874 		khugepaged_alloc_sleep();
875 	} else if (*hpage) {
876 		put_page(*hpage);
877 		*hpage = NULL;
878 	}
879 
880 	return true;
881 }
882 
883 static struct page *
khugepaged_alloc_page(struct page ** hpage,gfp_t gfp,int node)884 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
885 {
886 	VM_BUG_ON_PAGE(*hpage, *hpage);
887 
888 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
889 	if (unlikely(!*hpage)) {
890 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
891 		*hpage = ERR_PTR(-ENOMEM);
892 		return NULL;
893 	}
894 
895 	prep_transhuge_page(*hpage);
896 	count_vm_event(THP_COLLAPSE_ALLOC);
897 	return *hpage;
898 }
899 #else
khugepaged_find_target_node(void)900 static int khugepaged_find_target_node(void)
901 {
902 	return 0;
903 }
904 
alloc_khugepaged_hugepage(void)905 static inline struct page *alloc_khugepaged_hugepage(void)
906 {
907 	struct page *page;
908 
909 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
910 			   HPAGE_PMD_ORDER);
911 	if (page)
912 		prep_transhuge_page(page);
913 	return page;
914 }
915 
khugepaged_alloc_hugepage(bool * wait)916 static struct page *khugepaged_alloc_hugepage(bool *wait)
917 {
918 	struct page *hpage;
919 
920 	do {
921 		hpage = alloc_khugepaged_hugepage();
922 		if (!hpage) {
923 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
924 			if (!*wait)
925 				return NULL;
926 
927 			*wait = false;
928 			khugepaged_alloc_sleep();
929 		} else
930 			count_vm_event(THP_COLLAPSE_ALLOC);
931 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
932 
933 	return hpage;
934 }
935 
khugepaged_prealloc_page(struct page ** hpage,bool * wait)936 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
937 {
938 	/*
939 	 * If the hpage allocated earlier was briefly exposed in page cache
940 	 * before collapse_file() failed, it is possible that racing lookups
941 	 * have not yet completed, and would then be unpleasantly surprised by
942 	 * finding the hpage reused for the same mapping at a different offset.
943 	 * Just release the previous allocation if there is any danger of that.
944 	 */
945 	if (*hpage && page_count(*hpage) > 1) {
946 		put_page(*hpage);
947 		*hpage = NULL;
948 	}
949 
950 	if (!*hpage)
951 		*hpage = khugepaged_alloc_hugepage(wait);
952 
953 	if (unlikely(!*hpage))
954 		return false;
955 
956 	return true;
957 }
958 
959 static struct page *
khugepaged_alloc_page(struct page ** hpage,gfp_t gfp,int node)960 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
961 {
962 	VM_BUG_ON(!*hpage);
963 
964 	return  *hpage;
965 }
966 #endif
967 
968 /*
969  * If mmap_lock temporarily dropped, revalidate vma
970  * before taking mmap_lock.
971  * Return 0 if succeeds, otherwise return none-zero
972  * value (scan code).
973  */
974 
hugepage_vma_revalidate(struct mm_struct * mm,unsigned long address,struct vm_area_struct ** vmap)975 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
976 		struct vm_area_struct **vmap)
977 {
978 	struct vm_area_struct *vma;
979 	unsigned long hstart, hend;
980 
981 	if (unlikely(khugepaged_test_exit(mm)))
982 		return SCAN_ANY_PROCESS;
983 
984 	*vmap = vma = find_vma(mm, address);
985 	if (!vma)
986 		return SCAN_VMA_NULL;
987 
988 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
989 	hend = vma->vm_end & HPAGE_PMD_MASK;
990 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
991 		return SCAN_ADDRESS_RANGE;
992 	if (!hugepage_vma_check(vma, vma->vm_flags))
993 		return SCAN_VMA_CHECK;
994 	/* Anon VMA expected */
995 	if (!vma->anon_vma || vma->vm_ops)
996 		return SCAN_VMA_CHECK;
997 	return 0;
998 }
999 
1000 /*
1001  * Bring missing pages in from swap, to complete THP collapse.
1002  * Only done if khugepaged_scan_pmd believes it is worthwhile.
1003  *
1004  * Called and returns without pte mapped or spinlocks held,
1005  * but with mmap_lock held to protect against vma changes.
1006  */
1007 
__collapse_huge_page_swapin(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,int referenced)1008 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1009 					struct vm_area_struct *vma,
1010 					unsigned long haddr, pmd_t *pmd,
1011 					int referenced)
1012 {
1013 	int swapped_in = 0;
1014 	vm_fault_t ret = 0;
1015 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1016 
1017 	for (address = haddr; address < end; address += PAGE_SIZE) {
1018 		struct vm_fault vmf = {
1019 			.vma = vma,
1020 			.address = address,
1021 			.pgoff = linear_page_index(vma, haddr),
1022 			.flags = FAULT_FLAG_ALLOW_RETRY,
1023 			.pmd = pmd,
1024 		};
1025 
1026 		vmf.pte = pte_offset_map(pmd, address);
1027 		vmf.orig_pte = *vmf.pte;
1028 		if (!is_swap_pte(vmf.orig_pte)) {
1029 			pte_unmap(vmf.pte);
1030 			continue;
1031 		}
1032 		swapped_in++;
1033 		ret = do_swap_page(&vmf);
1034 
1035 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1036 		if (ret & VM_FAULT_RETRY) {
1037 			mmap_read_lock(mm);
1038 			if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1039 				/* vma is no longer available, don't continue to swapin */
1040 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1041 				return false;
1042 			}
1043 			/* check if the pmd is still valid */
1044 			if (mm_find_pmd(mm, haddr) != pmd) {
1045 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1046 				return false;
1047 			}
1048 		}
1049 		if (ret & VM_FAULT_ERROR) {
1050 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1051 			return false;
1052 		}
1053 	}
1054 
1055 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1056 	if (swapped_in)
1057 		lru_add_drain();
1058 
1059 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1060 	return true;
1061 }
1062 
collapse_huge_page(struct mm_struct * mm,unsigned long address,struct page ** hpage,int node,int referenced,int unmapped)1063 static void collapse_huge_page(struct mm_struct *mm,
1064 				   unsigned long address,
1065 				   struct page **hpage,
1066 				   int node, int referenced, int unmapped)
1067 {
1068 	LIST_HEAD(compound_pagelist);
1069 	pmd_t *pmd, _pmd;
1070 	pte_t *pte;
1071 	pgtable_t pgtable;
1072 	struct page *new_page;
1073 	spinlock_t *pmd_ptl, *pte_ptl;
1074 	int isolated = 0, result = 0;
1075 	struct vm_area_struct *vma;
1076 	struct mmu_notifier_range range;
1077 	gfp_t gfp;
1078 
1079 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1080 
1081 	/* Only allocate from the target node */
1082 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1083 
1084 	/*
1085 	 * Before allocating the hugepage, release the mmap_lock read lock.
1086 	 * The allocation can take potentially a long time if it involves
1087 	 * sync compaction, and we do not need to hold the mmap_lock during
1088 	 * that. We will recheck the vma after taking it again in write mode.
1089 	 */
1090 	mmap_read_unlock(mm);
1091 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1092 	if (!new_page) {
1093 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1094 		goto out_nolock;
1095 	}
1096 
1097 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1098 		result = SCAN_CGROUP_CHARGE_FAIL;
1099 		goto out_nolock;
1100 	}
1101 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1102 
1103 	mmap_read_lock(mm);
1104 	result = hugepage_vma_revalidate(mm, address, &vma);
1105 	if (result) {
1106 		mmap_read_unlock(mm);
1107 		goto out_nolock;
1108 	}
1109 
1110 	pmd = mm_find_pmd(mm, address);
1111 	if (!pmd) {
1112 		result = SCAN_PMD_NULL;
1113 		mmap_read_unlock(mm);
1114 		goto out_nolock;
1115 	}
1116 
1117 	/*
1118 	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1119 	 * If it fails, we release mmap_lock and jump out_nolock.
1120 	 * Continuing to collapse causes inconsistency.
1121 	 */
1122 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1123 						     pmd, referenced)) {
1124 		mmap_read_unlock(mm);
1125 		goto out_nolock;
1126 	}
1127 
1128 	mmap_read_unlock(mm);
1129 	/*
1130 	 * Prevent all access to pagetables with the exception of
1131 	 * gup_fast later handled by the ptep_clear_flush and the VM
1132 	 * handled by the anon_vma lock + PG_lock.
1133 	 */
1134 	mmap_write_lock(mm);
1135 	result = hugepage_vma_revalidate(mm, address, &vma);
1136 	if (result)
1137 		goto out_up_write;
1138 	/* check if the pmd is still valid */
1139 	if (mm_find_pmd(mm, address) != pmd)
1140 		goto out_up_write;
1141 
1142 	anon_vma_lock_write(vma->anon_vma);
1143 
1144 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1145 				address, address + HPAGE_PMD_SIZE);
1146 	mmu_notifier_invalidate_range_start(&range);
1147 
1148 	pte = pte_offset_map(pmd, address);
1149 	pte_ptl = pte_lockptr(mm, pmd);
1150 
1151 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1152 	/*
1153 	 * This removes any huge TLB entry from the CPU so we won't allow
1154 	 * huge and small TLB entries for the same virtual address to
1155 	 * avoid the risk of CPU bugs in that area.
1156 	 *
1157 	 * Parallel fast GUP is fine since fast GUP will back off when
1158 	 * it detects PMD is changed.
1159 	 */
1160 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1161 	spin_unlock(pmd_ptl);
1162 	mmu_notifier_invalidate_range_end(&range);
1163 	tlb_remove_table_sync_one();
1164 
1165 	spin_lock(pte_ptl);
1166 	isolated = __collapse_huge_page_isolate(vma, address, pte,
1167 			&compound_pagelist);
1168 	spin_unlock(pte_ptl);
1169 
1170 	if (unlikely(!isolated)) {
1171 		pte_unmap(pte);
1172 		spin_lock(pmd_ptl);
1173 		BUG_ON(!pmd_none(*pmd));
1174 		/*
1175 		 * We can only use set_pmd_at when establishing
1176 		 * hugepmds and never for establishing regular pmds that
1177 		 * points to regular pagetables. Use pmd_populate for that
1178 		 */
1179 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1180 		spin_unlock(pmd_ptl);
1181 		anon_vma_unlock_write(vma->anon_vma);
1182 		result = SCAN_FAIL;
1183 		goto out_up_write;
1184 	}
1185 
1186 	/*
1187 	 * All pages are isolated and locked so anon_vma rmap
1188 	 * can't run anymore.
1189 	 */
1190 	anon_vma_unlock_write(vma->anon_vma);
1191 
1192 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1193 			&compound_pagelist);
1194 	pte_unmap(pte);
1195 	/*
1196 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1197 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1198 	 * avoid the copy_huge_page writes to become visible after
1199 	 * the set_pmd_at() write.
1200 	 */
1201 	__SetPageUptodate(new_page);
1202 	pgtable = pmd_pgtable(_pmd);
1203 
1204 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1205 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1206 
1207 	spin_lock(pmd_ptl);
1208 	BUG_ON(!pmd_none(*pmd));
1209 	page_add_new_anon_rmap(new_page, vma, address, true);
1210 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1211 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1212 	set_pmd_at(mm, address, pmd, _pmd);
1213 	update_mmu_cache_pmd(vma, address, pmd);
1214 	spin_unlock(pmd_ptl);
1215 
1216 	*hpage = NULL;
1217 
1218 	khugepaged_pages_collapsed++;
1219 	result = SCAN_SUCCEED;
1220 out_up_write:
1221 	mmap_write_unlock(mm);
1222 out_nolock:
1223 	if (!IS_ERR_OR_NULL(*hpage))
1224 		mem_cgroup_uncharge(*hpage);
1225 	trace_mm_collapse_huge_page(mm, isolated, result);
1226 	return;
1227 }
1228 
khugepaged_scan_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,struct page ** hpage)1229 static int khugepaged_scan_pmd(struct mm_struct *mm,
1230 			       struct vm_area_struct *vma,
1231 			       unsigned long address,
1232 			       struct page **hpage)
1233 {
1234 	pmd_t *pmd;
1235 	pte_t *pte, *_pte;
1236 	int ret = 0, result = 0, referenced = 0;
1237 	int none_or_zero = 0, shared = 0;
1238 	struct page *page = NULL;
1239 	unsigned long _address;
1240 	spinlock_t *ptl;
1241 	int node = NUMA_NO_NODE, unmapped = 0;
1242 	bool writable = false;
1243 
1244 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1245 
1246 	pmd = mm_find_pmd(mm, address);
1247 	if (!pmd) {
1248 		result = SCAN_PMD_NULL;
1249 		goto out;
1250 	}
1251 
1252 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1253 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1254 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1255 	     _pte++, _address += PAGE_SIZE) {
1256 		pte_t pteval = *_pte;
1257 		if (is_swap_pte(pteval)) {
1258 			if (++unmapped <= khugepaged_max_ptes_swap) {
1259 				/*
1260 				 * Always be strict with uffd-wp
1261 				 * enabled swap entries.  Please see
1262 				 * comment below for pte_uffd_wp().
1263 				 */
1264 				if (pte_swp_uffd_wp(pteval)) {
1265 					result = SCAN_PTE_UFFD_WP;
1266 					goto out_unmap;
1267 				}
1268 				continue;
1269 			} else {
1270 				result = SCAN_EXCEED_SWAP_PTE;
1271 				goto out_unmap;
1272 			}
1273 		}
1274 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1275 			if (!userfaultfd_armed(vma) &&
1276 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1277 				continue;
1278 			} else {
1279 				result = SCAN_EXCEED_NONE_PTE;
1280 				goto out_unmap;
1281 			}
1282 		}
1283 		if (pte_uffd_wp(pteval)) {
1284 			/*
1285 			 * Don't collapse the page if any of the small
1286 			 * PTEs are armed with uffd write protection.
1287 			 * Here we can also mark the new huge pmd as
1288 			 * write protected if any of the small ones is
1289 			 * marked but that could bring unknown
1290 			 * userfault messages that falls outside of
1291 			 * the registered range.  So, just be simple.
1292 			 */
1293 			result = SCAN_PTE_UFFD_WP;
1294 			goto out_unmap;
1295 		}
1296 		if (pte_write(pteval))
1297 			writable = true;
1298 
1299 		page = vm_normal_page(vma, _address, pteval);
1300 		if (unlikely(!page)) {
1301 			result = SCAN_PAGE_NULL;
1302 			goto out_unmap;
1303 		}
1304 
1305 		if (page_mapcount(page) > 1 &&
1306 				++shared > khugepaged_max_ptes_shared) {
1307 			result = SCAN_EXCEED_SHARED_PTE;
1308 			goto out_unmap;
1309 		}
1310 
1311 		page = compound_head(page);
1312 
1313 		/*
1314 		 * Record which node the original page is from and save this
1315 		 * information to khugepaged_node_load[].
1316 		 * Khupaged will allocate hugepage from the node has the max
1317 		 * hit record.
1318 		 */
1319 		node = page_to_nid(page);
1320 		if (khugepaged_scan_abort(node)) {
1321 			result = SCAN_SCAN_ABORT;
1322 			goto out_unmap;
1323 		}
1324 		khugepaged_node_load[node]++;
1325 		if (!PageLRU(page)) {
1326 			result = SCAN_PAGE_LRU;
1327 			goto out_unmap;
1328 		}
1329 		if (PageLocked(page)) {
1330 			result = SCAN_PAGE_LOCK;
1331 			goto out_unmap;
1332 		}
1333 		if (!PageAnon(page)) {
1334 			result = SCAN_PAGE_ANON;
1335 			goto out_unmap;
1336 		}
1337 
1338 		/*
1339 		 * Check if the page has any GUP (or other external) pins.
1340 		 *
1341 		 * Here the check is racy it may see totmal_mapcount > refcount
1342 		 * in some cases.
1343 		 * For example, one process with one forked child process.
1344 		 * The parent has the PMD split due to MADV_DONTNEED, then
1345 		 * the child is trying unmap the whole PMD, but khugepaged
1346 		 * may be scanning the parent between the child has
1347 		 * PageDoubleMap flag cleared and dec the mapcount.  So
1348 		 * khugepaged may see total_mapcount > refcount.
1349 		 *
1350 		 * But such case is ephemeral we could always retry collapse
1351 		 * later.  However it may report false positive if the page
1352 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1353 		 * will be done again later the risk seems low.
1354 		 */
1355 		if (!is_refcount_suitable(page)) {
1356 			result = SCAN_PAGE_COUNT;
1357 			goto out_unmap;
1358 		}
1359 		if (pte_young(pteval) ||
1360 		    page_is_young(page) || PageReferenced(page) ||
1361 		    mmu_notifier_test_young(vma->vm_mm, address))
1362 			referenced++;
1363 	}
1364 	if (!writable) {
1365 		result = SCAN_PAGE_RO;
1366 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1367 		result = SCAN_LACK_REFERENCED_PAGE;
1368 	} else {
1369 		result = SCAN_SUCCEED;
1370 		ret = 1;
1371 	}
1372 out_unmap:
1373 	pte_unmap_unlock(pte, ptl);
1374 	if (ret) {
1375 		node = khugepaged_find_target_node();
1376 		/* collapse_huge_page will return with the mmap_lock released */
1377 		collapse_huge_page(mm, address, hpage, node,
1378 				referenced, unmapped);
1379 	}
1380 out:
1381 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1382 				     none_or_zero, result, unmapped);
1383 	return ret;
1384 }
1385 
collect_mm_slot(struct mm_slot * mm_slot)1386 static void collect_mm_slot(struct mm_slot *mm_slot)
1387 {
1388 	struct mm_struct *mm = mm_slot->mm;
1389 
1390 	lockdep_assert_held(&khugepaged_mm_lock);
1391 
1392 	if (khugepaged_test_exit(mm)) {
1393 		/* free mm_slot */
1394 		hash_del(&mm_slot->hash);
1395 		list_del(&mm_slot->mm_node);
1396 
1397 		/*
1398 		 * Not strictly needed because the mm exited already.
1399 		 *
1400 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1401 		 */
1402 
1403 		/* khugepaged_mm_lock actually not necessary for the below */
1404 		free_mm_slot(mm_slot);
1405 		mmdrop(mm);
1406 	}
1407 }
1408 
1409 #ifdef CONFIG_SHMEM
1410 /*
1411  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1412  * khugepaged should try to collapse the page table.
1413  */
khugepaged_add_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)1414 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1415 					 unsigned long addr)
1416 {
1417 	struct mm_slot *mm_slot;
1418 
1419 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1420 
1421 	spin_lock(&khugepaged_mm_lock);
1422 	mm_slot = get_mm_slot(mm);
1423 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1424 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1425 	spin_unlock(&khugepaged_mm_lock);
1426 	return 0;
1427 }
1428 
1429 /**
1430  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1431  * address haddr.
1432  *
1433  * @mm: process address space where collapse happens
1434  * @addr: THP collapse address
1435  *
1436  * This function checks whether all the PTEs in the PMD are pointing to the
1437  * right THP. If so, retract the page table so the THP can refault in with
1438  * as pmd-mapped.
1439  */
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)1440 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1441 {
1442 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1443 	struct vm_area_struct *vma = find_vma(mm, haddr);
1444 	struct page *hpage;
1445 	pte_t *start_pte, *pte;
1446 	pmd_t *pmd, _pmd;
1447 	spinlock_t *ptl;
1448 	int count = 0;
1449 	int i;
1450 	struct mmu_notifier_range range;
1451 
1452 	if (!vma || !vma->vm_file ||
1453 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1454 		return;
1455 
1456 	/*
1457 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1458 	 * collapsed by this mm. But we can still collapse if the page is
1459 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1460 	 * will not fail the vma for missing VM_HUGEPAGE
1461 	 */
1462 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1463 		return;
1464 
1465 	hpage = find_lock_page(vma->vm_file->f_mapping,
1466 			       linear_page_index(vma, haddr));
1467 	if (!hpage)
1468 		return;
1469 
1470 	if (!PageHead(hpage))
1471 		goto drop_hpage;
1472 
1473 	pmd = mm_find_pmd(mm, haddr);
1474 	if (!pmd)
1475 		goto drop_hpage;
1476 
1477 	/*
1478 	 * We need to lock the mapping so that from here on, only GUP-fast and
1479 	 * hardware page walks can access the parts of the page tables that
1480 	 * we're operating on.
1481 	 */
1482 	i_mmap_lock_write(vma->vm_file->f_mapping);
1483 
1484 	/*
1485 	 * This spinlock should be unnecessary: Nobody else should be accessing
1486 	 * the page tables under spinlock protection here, only
1487 	 * lockless_pages_from_mm() and the hardware page walker can access page
1488 	 * tables while all the high-level locks are held in write mode.
1489 	 */
1490 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1491 
1492 	/* step 1: check all mapped PTEs are to the right huge page */
1493 	for (i = 0, addr = haddr, pte = start_pte;
1494 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1495 		struct page *page;
1496 
1497 		/* empty pte, skip */
1498 		if (pte_none(*pte))
1499 			continue;
1500 
1501 		/* page swapped out, abort */
1502 		if (!pte_present(*pte))
1503 			goto abort;
1504 
1505 		page = vm_normal_page(vma, addr, *pte);
1506 
1507 		/*
1508 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1509 		 * page table, but the new page will not be a subpage of hpage.
1510 		 */
1511 		if (hpage + i != page)
1512 			goto abort;
1513 		count++;
1514 	}
1515 
1516 	/* step 2: adjust rmap */
1517 	for (i = 0, addr = haddr, pte = start_pte;
1518 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1519 		struct page *page;
1520 
1521 		if (pte_none(*pte))
1522 			continue;
1523 		page = vm_normal_page(vma, addr, *pte);
1524 		page_remove_rmap(page, false);
1525 	}
1526 
1527 	pte_unmap_unlock(start_pte, ptl);
1528 
1529 	/* step 3: set proper refcount and mm_counters. */
1530 	if (count) {
1531 		page_ref_sub(hpage, count);
1532 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1533 	}
1534 
1535 	/* step 4: collapse pmd */
1536 	/* we make no change to anon, but protect concurrent anon page lookup */
1537 	if (vma->anon_vma)
1538 		anon_vma_lock_write(vma->anon_vma);
1539 
1540 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
1541 				haddr + HPAGE_PMD_SIZE);
1542 	mmu_notifier_invalidate_range_start(&range);
1543 	_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1544 	mm_dec_nr_ptes(mm);
1545 	tlb_remove_table_sync_one();
1546 	mmu_notifier_invalidate_range_end(&range);
1547 	pte_free(mm, pmd_pgtable(_pmd));
1548 
1549 	if (vma->anon_vma)
1550 		anon_vma_unlock_write(vma->anon_vma);
1551 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1552 
1553 drop_hpage:
1554 	unlock_page(hpage);
1555 	put_page(hpage);
1556 	return;
1557 
1558 abort:
1559 	pte_unmap_unlock(start_pte, ptl);
1560 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1561 	goto drop_hpage;
1562 }
1563 
khugepaged_collapse_pte_mapped_thps(struct mm_slot * mm_slot)1564 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1565 {
1566 	struct mm_struct *mm = mm_slot->mm;
1567 	int i;
1568 
1569 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1570 		return;
1571 
1572 	if (!mmap_write_trylock(mm))
1573 		return;
1574 
1575 	if (unlikely(khugepaged_test_exit(mm)))
1576 		goto out;
1577 
1578 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1579 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1580 
1581 out:
1582 	mm_slot->nr_pte_mapped_thp = 0;
1583 	mmap_write_unlock(mm);
1584 }
1585 
retract_page_tables(struct address_space * mapping,pgoff_t pgoff)1586 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1587 {
1588 	struct vm_area_struct *vma;
1589 	struct mm_struct *mm;
1590 	unsigned long addr;
1591 	pmd_t *pmd, _pmd;
1592 
1593 	i_mmap_lock_write(mapping);
1594 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1595 		/*
1596 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1597 		 * got written to. These VMAs are likely not worth investing
1598 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1599 		 * later.
1600 		 *
1601 		 * Not that vma->anon_vma check is racy: it can be set up after
1602 		 * the check but before we took mmap_lock by the fault path.
1603 		 * But page lock would prevent establishing any new ptes of the
1604 		 * page, so we are safe.
1605 		 *
1606 		 * An alternative would be drop the check, but check that page
1607 		 * table is clear before calling pmdp_collapse_flush() under
1608 		 * ptl. It has higher chance to recover THP for the VMA, but
1609 		 * has higher cost too. It would also probably require locking
1610 		 * the anon_vma.
1611 		 */
1612 		if (vma->anon_vma)
1613 			continue;
1614 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1615 		if (addr & ~HPAGE_PMD_MASK)
1616 			continue;
1617 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1618 			continue;
1619 		mm = vma->vm_mm;
1620 		pmd = mm_find_pmd(mm, addr);
1621 		if (!pmd)
1622 			continue;
1623 		/*
1624 		 * We need exclusive mmap_lock to retract page table.
1625 		 *
1626 		 * We use trylock due to lock inversion: we need to acquire
1627 		 * mmap_lock while holding page lock. Fault path does it in
1628 		 * reverse order. Trylock is a way to avoid deadlock.
1629 		 */
1630 		if (mmap_write_trylock(mm)) {
1631 			if (!khugepaged_test_exit(mm)) {
1632 				struct mmu_notifier_range range;
1633 
1634 				mmu_notifier_range_init(&range,
1635 							MMU_NOTIFY_CLEAR, 0,
1636 							NULL, mm, addr,
1637 							addr + HPAGE_PMD_SIZE);
1638 				mmu_notifier_invalidate_range_start(&range);
1639 				/* assume page table is clear */
1640 				_pmd = pmdp_collapse_flush(vma, addr, pmd);
1641 				mm_dec_nr_ptes(mm);
1642 				tlb_remove_table_sync_one();
1643 				pte_free(mm, pmd_pgtable(_pmd));
1644 				mmu_notifier_invalidate_range_end(&range);
1645 			}
1646 			mmap_write_unlock(mm);
1647 		} else {
1648 			/* Try again later */
1649 			khugepaged_add_pte_mapped_thp(mm, addr);
1650 		}
1651 	}
1652 	i_mmap_unlock_write(mapping);
1653 }
1654 
1655 /**
1656  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1657  *
1658  * @mm: process address space where collapse happens
1659  * @file: file that collapse on
1660  * @start: collapse start address
1661  * @hpage: new allocated huge page for collapse
1662  * @node: appointed node the new huge page allocate from
1663  *
1664  * Basic scheme is simple, details are more complex:
1665  *  - allocate and lock a new huge page;
1666  *  - scan page cache replacing old pages with the new one
1667  *    + swap/gup in pages if necessary;
1668  *    + fill in gaps;
1669  *    + keep old pages around in case rollback is required;
1670  *  - if replacing succeeds:
1671  *    + copy data over;
1672  *    + free old pages;
1673  *    + unlock huge page;
1674  *  - if replacing failed;
1675  *    + put all pages back and unfreeze them;
1676  *    + restore gaps in the page cache;
1677  *    + unlock and free huge page;
1678  */
collapse_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage,int node)1679 static void collapse_file(struct mm_struct *mm,
1680 		struct file *file, pgoff_t start,
1681 		struct page **hpage, int node)
1682 {
1683 	struct address_space *mapping = file->f_mapping;
1684 	gfp_t gfp;
1685 	struct page *new_page;
1686 	pgoff_t index, end = start + HPAGE_PMD_NR;
1687 	LIST_HEAD(pagelist);
1688 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1689 	int nr_none = 0, result = SCAN_SUCCEED;
1690 	bool is_shmem = shmem_file(file);
1691 	int nr;
1692 
1693 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1694 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1695 
1696 	/* Only allocate from the target node */
1697 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1698 
1699 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1700 	if (!new_page) {
1701 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1702 		goto out;
1703 	}
1704 
1705 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1706 		result = SCAN_CGROUP_CHARGE_FAIL;
1707 		goto out;
1708 	}
1709 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1710 
1711 	/* This will be less messy when we use multi-index entries */
1712 	do {
1713 		xas_lock_irq(&xas);
1714 		xas_create_range(&xas);
1715 		if (!xas_error(&xas))
1716 			break;
1717 		xas_unlock_irq(&xas);
1718 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1719 			result = SCAN_FAIL;
1720 			goto out;
1721 		}
1722 	} while (1);
1723 
1724 	__SetPageLocked(new_page);
1725 	if (is_shmem)
1726 		__SetPageSwapBacked(new_page);
1727 	new_page->index = start;
1728 	new_page->mapping = mapping;
1729 
1730 	/*
1731 	 * At this point the new_page is locked and not up-to-date.
1732 	 * It's safe to insert it into the page cache, because nobody would
1733 	 * be able to map it or use it in another way until we unlock it.
1734 	 */
1735 
1736 	xas_set(&xas, start);
1737 	for (index = start; index < end; index++) {
1738 		struct page *page = xas_next(&xas);
1739 
1740 		VM_BUG_ON(index != xas.xa_index);
1741 		if (is_shmem) {
1742 			if (!page) {
1743 				/*
1744 				 * Stop if extent has been truncated or
1745 				 * hole-punched, and is now completely
1746 				 * empty.
1747 				 */
1748 				if (index == start) {
1749 					if (!xas_next_entry(&xas, end - 1)) {
1750 						result = SCAN_TRUNCATED;
1751 						goto xa_locked;
1752 					}
1753 					xas_set(&xas, index);
1754 				}
1755 				if (!shmem_charge(mapping->host, 1)) {
1756 					result = SCAN_FAIL;
1757 					goto xa_locked;
1758 				}
1759 				xas_store(&xas, new_page);
1760 				nr_none++;
1761 				continue;
1762 			}
1763 
1764 			if (xa_is_value(page) || !PageUptodate(page)) {
1765 				xas_unlock_irq(&xas);
1766 				/* swap in or instantiate fallocated page */
1767 				if (shmem_getpage(mapping->host, index, &page,
1768 						  SGP_NOALLOC)) {
1769 					result = SCAN_FAIL;
1770 					goto xa_unlocked;
1771 				}
1772 			} else if (trylock_page(page)) {
1773 				get_page(page);
1774 				xas_unlock_irq(&xas);
1775 			} else {
1776 				result = SCAN_PAGE_LOCK;
1777 				goto xa_locked;
1778 			}
1779 		} else {	/* !is_shmem */
1780 			if (!page || xa_is_value(page)) {
1781 				xas_unlock_irq(&xas);
1782 				page_cache_sync_readahead(mapping, &file->f_ra,
1783 							  file, index,
1784 							  end - index);
1785 				/* drain pagevecs to help isolate_lru_page() */
1786 				lru_add_drain();
1787 				page = find_lock_page(mapping, index);
1788 				if (unlikely(page == NULL)) {
1789 					result = SCAN_FAIL;
1790 					goto xa_unlocked;
1791 				}
1792 			} else if (PageDirty(page)) {
1793 				/*
1794 				 * khugepaged only works on read-only fd,
1795 				 * so this page is dirty because it hasn't
1796 				 * been flushed since first write. There
1797 				 * won't be new dirty pages.
1798 				 *
1799 				 * Trigger async flush here and hope the
1800 				 * writeback is done when khugepaged
1801 				 * revisits this page.
1802 				 *
1803 				 * This is a one-off situation. We are not
1804 				 * forcing writeback in loop.
1805 				 */
1806 				xas_unlock_irq(&xas);
1807 				filemap_flush(mapping);
1808 				result = SCAN_FAIL;
1809 				goto xa_unlocked;
1810 			} else if (PageWriteback(page)) {
1811 				xas_unlock_irq(&xas);
1812 				result = SCAN_FAIL;
1813 				goto xa_unlocked;
1814 			} else if (trylock_page(page)) {
1815 				get_page(page);
1816 				xas_unlock_irq(&xas);
1817 			} else {
1818 				result = SCAN_PAGE_LOCK;
1819 				goto xa_locked;
1820 			}
1821 		}
1822 
1823 		/*
1824 		 * The page must be locked, so we can drop the i_pages lock
1825 		 * without racing with truncate.
1826 		 */
1827 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1828 
1829 		/* make sure the page is up to date */
1830 		if (unlikely(!PageUptodate(page))) {
1831 			result = SCAN_FAIL;
1832 			goto out_unlock;
1833 		}
1834 
1835 		/*
1836 		 * If file was truncated then extended, or hole-punched, before
1837 		 * we locked the first page, then a THP might be there already.
1838 		 */
1839 		if (PageTransCompound(page)) {
1840 			result = SCAN_PAGE_COMPOUND;
1841 			goto out_unlock;
1842 		}
1843 
1844 		if (page_mapping(page) != mapping) {
1845 			result = SCAN_TRUNCATED;
1846 			goto out_unlock;
1847 		}
1848 
1849 		if (!is_shmem && (PageDirty(page) ||
1850 				  PageWriteback(page))) {
1851 			/*
1852 			 * khugepaged only works on read-only fd, so this
1853 			 * page is dirty because it hasn't been flushed
1854 			 * since first write.
1855 			 */
1856 			result = SCAN_FAIL;
1857 			goto out_unlock;
1858 		}
1859 
1860 		if (isolate_lru_page(page)) {
1861 			result = SCAN_DEL_PAGE_LRU;
1862 			goto out_unlock;
1863 		}
1864 
1865 		if (page_has_private(page) &&
1866 		    !try_to_release_page(page, GFP_KERNEL)) {
1867 			result = SCAN_PAGE_HAS_PRIVATE;
1868 			putback_lru_page(page);
1869 			goto out_unlock;
1870 		}
1871 
1872 		if (page_mapped(page))
1873 			unmap_mapping_pages(mapping, index, 1, false);
1874 
1875 		xas_lock_irq(&xas);
1876 		xas_set(&xas, index);
1877 
1878 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1879 		VM_BUG_ON_PAGE(page_mapped(page), page);
1880 
1881 		/*
1882 		 * The page is expected to have page_count() == 3:
1883 		 *  - we hold a pin on it;
1884 		 *  - one reference from page cache;
1885 		 *  - one from isolate_lru_page;
1886 		 */
1887 		if (!page_ref_freeze(page, 3)) {
1888 			result = SCAN_PAGE_COUNT;
1889 			xas_unlock_irq(&xas);
1890 			putback_lru_page(page);
1891 			goto out_unlock;
1892 		}
1893 
1894 		/*
1895 		 * Add the page to the list to be able to undo the collapse if
1896 		 * something go wrong.
1897 		 */
1898 		list_add_tail(&page->lru, &pagelist);
1899 
1900 		/* Finally, replace with the new page. */
1901 		xas_store(&xas, new_page);
1902 		continue;
1903 out_unlock:
1904 		unlock_page(page);
1905 		put_page(page);
1906 		goto xa_unlocked;
1907 	}
1908 	nr = thp_nr_pages(new_page);
1909 
1910 	if (is_shmem)
1911 		__mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
1912 	else {
1913 		__mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
1914 		filemap_nr_thps_inc(mapping);
1915 		/*
1916 		 * Paired with smp_mb() in do_dentry_open() to ensure
1917 		 * i_writecount is up to date and the update to nr_thps is
1918 		 * visible. Ensures the page cache will be truncated if the
1919 		 * file is opened writable.
1920 		 */
1921 		smp_mb();
1922 		if (inode_is_open_for_write(mapping->host)) {
1923 			result = SCAN_FAIL;
1924 			__mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1925 			filemap_nr_thps_dec(mapping);
1926 			goto xa_locked;
1927 		}
1928 	}
1929 
1930 	if (nr_none) {
1931 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1932 		if (is_shmem)
1933 			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1934 	}
1935 
1936 xa_locked:
1937 	xas_unlock_irq(&xas);
1938 xa_unlocked:
1939 
1940 	if (result == SCAN_SUCCEED) {
1941 		struct page *page, *tmp;
1942 
1943 		/*
1944 		 * Replacing old pages with new one has succeeded, now we
1945 		 * need to copy the content and free the old pages.
1946 		 */
1947 		index = start;
1948 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1949 			while (index < page->index) {
1950 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1951 				index++;
1952 			}
1953 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1954 					page);
1955 			list_del(&page->lru);
1956 			page->mapping = NULL;
1957 			page_ref_unfreeze(page, 1);
1958 			ClearPageActive(page);
1959 			ClearPageUnevictable(page);
1960 			unlock_page(page);
1961 			put_page(page);
1962 			index++;
1963 		}
1964 		while (index < end) {
1965 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1966 			index++;
1967 		}
1968 
1969 		SetPageUptodate(new_page);
1970 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1971 		if (is_shmem)
1972 			set_page_dirty(new_page);
1973 		lru_cache_add(new_page);
1974 
1975 		/*
1976 		 * Remove pte page tables, so we can re-fault the page as huge.
1977 		 */
1978 		retract_page_tables(mapping, start);
1979 		*hpage = NULL;
1980 
1981 		khugepaged_pages_collapsed++;
1982 	} else {
1983 		struct page *page;
1984 
1985 		/* Something went wrong: roll back page cache changes */
1986 		xas_lock_irq(&xas);
1987 		mapping->nrpages -= nr_none;
1988 
1989 		if (is_shmem)
1990 			shmem_uncharge(mapping->host, nr_none);
1991 
1992 		xas_set(&xas, start);
1993 		xas_for_each(&xas, page, end - 1) {
1994 			page = list_first_entry_or_null(&pagelist,
1995 					struct page, lru);
1996 			if (!page || xas.xa_index < page->index) {
1997 				if (!nr_none)
1998 					break;
1999 				nr_none--;
2000 				/* Put holes back where they were */
2001 				xas_store(&xas, NULL);
2002 				continue;
2003 			}
2004 
2005 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2006 
2007 			/* Unfreeze the page. */
2008 			list_del(&page->lru);
2009 			page_ref_unfreeze(page, 2);
2010 			xas_store(&xas, page);
2011 			xas_pause(&xas);
2012 			xas_unlock_irq(&xas);
2013 			unlock_page(page);
2014 			putback_lru_page(page);
2015 			xas_lock_irq(&xas);
2016 		}
2017 		VM_BUG_ON(nr_none);
2018 		xas_unlock_irq(&xas);
2019 
2020 		new_page->mapping = NULL;
2021 	}
2022 
2023 	unlock_page(new_page);
2024 out:
2025 	VM_BUG_ON(!list_empty(&pagelist));
2026 	if (!IS_ERR_OR_NULL(*hpage))
2027 		mem_cgroup_uncharge(*hpage);
2028 	/* TODO: tracepoints */
2029 }
2030 
khugepaged_scan_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage)2031 static void khugepaged_scan_file(struct mm_struct *mm,
2032 		struct file *file, pgoff_t start, struct page **hpage)
2033 {
2034 	struct page *page = NULL;
2035 	struct address_space *mapping = file->f_mapping;
2036 	XA_STATE(xas, &mapping->i_pages, start);
2037 	int present, swap;
2038 	int node = NUMA_NO_NODE;
2039 	int result = SCAN_SUCCEED;
2040 
2041 	present = 0;
2042 	swap = 0;
2043 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2044 	rcu_read_lock();
2045 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2046 		if (xas_retry(&xas, page))
2047 			continue;
2048 
2049 		if (xa_is_value(page)) {
2050 			if (++swap > khugepaged_max_ptes_swap) {
2051 				result = SCAN_EXCEED_SWAP_PTE;
2052 				break;
2053 			}
2054 			continue;
2055 		}
2056 
2057 		if (PageTransCompound(page)) {
2058 			result = SCAN_PAGE_COMPOUND;
2059 			break;
2060 		}
2061 
2062 		node = page_to_nid(page);
2063 		if (khugepaged_scan_abort(node)) {
2064 			result = SCAN_SCAN_ABORT;
2065 			break;
2066 		}
2067 		khugepaged_node_load[node]++;
2068 
2069 		if (!PageLRU(page)) {
2070 			result = SCAN_PAGE_LRU;
2071 			break;
2072 		}
2073 
2074 		if (page_count(page) !=
2075 		    1 + page_mapcount(page) + page_has_private(page)) {
2076 			result = SCAN_PAGE_COUNT;
2077 			break;
2078 		}
2079 
2080 		/*
2081 		 * We probably should check if the page is referenced here, but
2082 		 * nobody would transfer pte_young() to PageReferenced() for us.
2083 		 * And rmap walk here is just too costly...
2084 		 */
2085 
2086 		present++;
2087 
2088 		if (need_resched()) {
2089 			xas_pause(&xas);
2090 			cond_resched_rcu();
2091 		}
2092 	}
2093 	rcu_read_unlock();
2094 
2095 	if (result == SCAN_SUCCEED) {
2096 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2097 			result = SCAN_EXCEED_NONE_PTE;
2098 		} else {
2099 			node = khugepaged_find_target_node();
2100 			collapse_file(mm, file, start, hpage, node);
2101 		}
2102 	}
2103 
2104 	/* TODO: tracepoints */
2105 }
2106 #else
khugepaged_scan_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage)2107 static void khugepaged_scan_file(struct mm_struct *mm,
2108 		struct file *file, pgoff_t start, struct page **hpage)
2109 {
2110 	BUILD_BUG();
2111 }
2112 
khugepaged_collapse_pte_mapped_thps(struct mm_slot * mm_slot)2113 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2114 {
2115 }
2116 #endif
2117 
khugepaged_scan_mm_slot(unsigned int pages,struct page ** hpage)2118 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2119 					    struct page **hpage)
2120 	__releases(&khugepaged_mm_lock)
2121 	__acquires(&khugepaged_mm_lock)
2122 {
2123 	struct mm_slot *mm_slot;
2124 	struct mm_struct *mm;
2125 	struct vm_area_struct *vma;
2126 	int progress = 0;
2127 
2128 	VM_BUG_ON(!pages);
2129 	lockdep_assert_held(&khugepaged_mm_lock);
2130 
2131 	if (khugepaged_scan.mm_slot)
2132 		mm_slot = khugepaged_scan.mm_slot;
2133 	else {
2134 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2135 				     struct mm_slot, mm_node);
2136 		khugepaged_scan.address = 0;
2137 		khugepaged_scan.mm_slot = mm_slot;
2138 	}
2139 	spin_unlock(&khugepaged_mm_lock);
2140 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2141 
2142 	mm = mm_slot->mm;
2143 	/*
2144 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2145 	 * the next mm on the list.
2146 	 */
2147 	vma = NULL;
2148 	if (unlikely(!mmap_read_trylock(mm)))
2149 		goto breakouterloop_mmap_lock;
2150 	if (likely(!khugepaged_test_exit(mm)))
2151 		vma = find_vma(mm, khugepaged_scan.address);
2152 
2153 	progress++;
2154 	for (; vma; vma = vma->vm_next) {
2155 		unsigned long hstart, hend;
2156 
2157 		cond_resched();
2158 		if (unlikely(khugepaged_test_exit(mm))) {
2159 			progress++;
2160 			break;
2161 		}
2162 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2163 skip:
2164 			progress++;
2165 			continue;
2166 		}
2167 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2168 		hend = vma->vm_end & HPAGE_PMD_MASK;
2169 		if (hstart >= hend)
2170 			goto skip;
2171 		if (khugepaged_scan.address > hend)
2172 			goto skip;
2173 		if (khugepaged_scan.address < hstart)
2174 			khugepaged_scan.address = hstart;
2175 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2176 		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2177 			goto skip;
2178 
2179 		while (khugepaged_scan.address < hend) {
2180 			int ret;
2181 			cond_resched();
2182 			if (unlikely(khugepaged_test_exit(mm)))
2183 				goto breakouterloop;
2184 
2185 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2186 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2187 				  hend);
2188 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2189 				struct file *file = get_file(vma->vm_file);
2190 				pgoff_t pgoff = linear_page_index(vma,
2191 						khugepaged_scan.address);
2192 
2193 				mmap_read_unlock(mm);
2194 				ret = 1;
2195 				khugepaged_scan_file(mm, file, pgoff, hpage);
2196 				fput(file);
2197 			} else {
2198 				ret = khugepaged_scan_pmd(mm, vma,
2199 						khugepaged_scan.address,
2200 						hpage);
2201 			}
2202 			/* move to next address */
2203 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2204 			progress += HPAGE_PMD_NR;
2205 			if (ret)
2206 				/* we released mmap_lock so break loop */
2207 				goto breakouterloop_mmap_lock;
2208 			if (progress >= pages)
2209 				goto breakouterloop;
2210 		}
2211 	}
2212 breakouterloop:
2213 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2214 breakouterloop_mmap_lock:
2215 
2216 	spin_lock(&khugepaged_mm_lock);
2217 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2218 	/*
2219 	 * Release the current mm_slot if this mm is about to die, or
2220 	 * if we scanned all vmas of this mm.
2221 	 */
2222 	if (khugepaged_test_exit(mm) || !vma) {
2223 		/*
2224 		 * Make sure that if mm_users is reaching zero while
2225 		 * khugepaged runs here, khugepaged_exit will find
2226 		 * mm_slot not pointing to the exiting mm.
2227 		 */
2228 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2229 			khugepaged_scan.mm_slot = list_entry(
2230 				mm_slot->mm_node.next,
2231 				struct mm_slot, mm_node);
2232 			khugepaged_scan.address = 0;
2233 		} else {
2234 			khugepaged_scan.mm_slot = NULL;
2235 			khugepaged_full_scans++;
2236 		}
2237 
2238 		collect_mm_slot(mm_slot);
2239 	}
2240 
2241 	return progress;
2242 }
2243 
khugepaged_has_work(void)2244 static int khugepaged_has_work(void)
2245 {
2246 	return !list_empty(&khugepaged_scan.mm_head) &&
2247 		khugepaged_enabled();
2248 }
2249 
khugepaged_wait_event(void)2250 static int khugepaged_wait_event(void)
2251 {
2252 	return !list_empty(&khugepaged_scan.mm_head) ||
2253 		kthread_should_stop();
2254 }
2255 
khugepaged_do_scan(void)2256 static void khugepaged_do_scan(void)
2257 {
2258 	struct page *hpage = NULL;
2259 	unsigned int progress = 0, pass_through_head = 0;
2260 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2261 	bool wait = true;
2262 
2263 	lru_add_drain_all();
2264 
2265 	while (progress < pages) {
2266 		if (!khugepaged_prealloc_page(&hpage, &wait))
2267 			break;
2268 
2269 		cond_resched();
2270 
2271 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2272 			break;
2273 
2274 		spin_lock(&khugepaged_mm_lock);
2275 		if (!khugepaged_scan.mm_slot)
2276 			pass_through_head++;
2277 		if (khugepaged_has_work() &&
2278 		    pass_through_head < 2)
2279 			progress += khugepaged_scan_mm_slot(pages - progress,
2280 							    &hpage);
2281 		else
2282 			progress = pages;
2283 		spin_unlock(&khugepaged_mm_lock);
2284 	}
2285 
2286 	if (!IS_ERR_OR_NULL(hpage))
2287 		put_page(hpage);
2288 }
2289 
khugepaged_should_wakeup(void)2290 static bool khugepaged_should_wakeup(void)
2291 {
2292 	return kthread_should_stop() ||
2293 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2294 }
2295 
khugepaged_wait_work(void)2296 static void khugepaged_wait_work(void)
2297 {
2298 	if (khugepaged_has_work()) {
2299 		const unsigned long scan_sleep_jiffies =
2300 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2301 
2302 		if (!scan_sleep_jiffies)
2303 			return;
2304 
2305 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2306 		wait_event_freezable_timeout(khugepaged_wait,
2307 					     khugepaged_should_wakeup(),
2308 					     scan_sleep_jiffies);
2309 		return;
2310 	}
2311 
2312 	if (khugepaged_enabled())
2313 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2314 }
2315 
khugepaged(void * none)2316 static int khugepaged(void *none)
2317 {
2318 	struct mm_slot *mm_slot;
2319 
2320 	set_freezable();
2321 	set_user_nice(current, MAX_NICE);
2322 
2323 	while (!kthread_should_stop()) {
2324 		khugepaged_do_scan();
2325 		khugepaged_wait_work();
2326 	}
2327 
2328 	spin_lock(&khugepaged_mm_lock);
2329 	mm_slot = khugepaged_scan.mm_slot;
2330 	khugepaged_scan.mm_slot = NULL;
2331 	if (mm_slot)
2332 		collect_mm_slot(mm_slot);
2333 	spin_unlock(&khugepaged_mm_lock);
2334 	return 0;
2335 }
2336 
set_recommended_min_free_kbytes(void)2337 static void set_recommended_min_free_kbytes(void)
2338 {
2339 	struct zone *zone;
2340 	int nr_zones = 0;
2341 	unsigned long recommended_min;
2342 
2343 	if (!khugepaged_enabled()) {
2344 		calculate_min_free_kbytes();
2345 		goto update_wmarks;
2346 	}
2347 
2348 	for_each_populated_zone(zone) {
2349 		/*
2350 		 * We don't need to worry about fragmentation of
2351 		 * ZONE_MOVABLE since it only has movable pages.
2352 		 */
2353 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2354 			continue;
2355 
2356 		nr_zones++;
2357 	}
2358 
2359 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2360 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2361 
2362 	/*
2363 	 * Make sure that on average at least two pageblocks are almost free
2364 	 * of another type, one for a migratetype to fall back to and a
2365 	 * second to avoid subsequent fallbacks of other types There are 3
2366 	 * MIGRATE_TYPES we care about.
2367 	 */
2368 	recommended_min += pageblock_nr_pages * nr_zones *
2369 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2370 
2371 	/* don't ever allow to reserve more than 5% of the lowmem */
2372 	recommended_min = min(recommended_min,
2373 			      (unsigned long) nr_free_buffer_pages() / 20);
2374 	recommended_min <<= (PAGE_SHIFT-10);
2375 
2376 	if (recommended_min > min_free_kbytes) {
2377 		if (user_min_free_kbytes >= 0)
2378 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2379 				min_free_kbytes, recommended_min);
2380 
2381 		min_free_kbytes = recommended_min;
2382 	}
2383 
2384 update_wmarks:
2385 	setup_per_zone_wmarks();
2386 }
2387 
start_stop_khugepaged(void)2388 int start_stop_khugepaged(void)
2389 {
2390 	int err = 0;
2391 
2392 	mutex_lock(&khugepaged_mutex);
2393 	if (khugepaged_enabled()) {
2394 		if (!khugepaged_thread)
2395 			khugepaged_thread = kthread_run(khugepaged, NULL,
2396 							"khugepaged");
2397 		if (IS_ERR(khugepaged_thread)) {
2398 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2399 			err = PTR_ERR(khugepaged_thread);
2400 			khugepaged_thread = NULL;
2401 			goto fail;
2402 		}
2403 
2404 		if (!list_empty(&khugepaged_scan.mm_head))
2405 			wake_up_interruptible(&khugepaged_wait);
2406 	} else if (khugepaged_thread) {
2407 		kthread_stop(khugepaged_thread);
2408 		khugepaged_thread = NULL;
2409 	}
2410 	set_recommended_min_free_kbytes();
2411 fail:
2412 	mutex_unlock(&khugepaged_mutex);
2413 	return err;
2414 }
2415 
khugepaged_min_free_kbytes_update(void)2416 void khugepaged_min_free_kbytes_update(void)
2417 {
2418 	mutex_lock(&khugepaged_mutex);
2419 	if (khugepaged_enabled() && khugepaged_thread)
2420 		set_recommended_min_free_kbytes();
2421 	mutex_unlock(&khugepaged_mutex);
2422 }
2423