1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7
8 #include <linux/fs.h> /* only for vma_is_dax() */
9
10 extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr,
30 pmd_t *pmd,
31 unsigned int flags);
32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33 struct vm_area_struct *vma,
34 pmd_t *pmd, unsigned long addr, unsigned long next);
35 extern int zap_huge_pmd(struct mmu_gather *tlb,
36 struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr);
38 extern int zap_huge_pud(struct mmu_gather *tlb,
39 struct vm_area_struct *vma,
40 pud_t *pud, unsigned long addr);
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
42 unsigned long addr, unsigned long end,
43 unsigned char *vec);
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45 unsigned long new_addr, unsigned long old_end,
46 pmd_t *old_pmd, pmd_t *new_pmd);
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 unsigned long addr, pgprot_t newprot,
49 int prot_numa);
50 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
51 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
52 enum transparent_hugepage_flag {
53 TRANSPARENT_HUGEPAGE_FLAG,
54 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
55 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
56 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
57 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
58 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
59 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
60 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
61 #ifdef CONFIG_DEBUG_VM
62 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
63 #endif
64 };
65
66 struct kobject;
67 struct kobj_attribute;
68
69 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
70 struct kobj_attribute *attr,
71 const char *buf, size_t count,
72 enum transparent_hugepage_flag flag);
73 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
74 struct kobj_attribute *attr, char *buf,
75 enum transparent_hugepage_flag flag);
76 extern struct kobj_attribute shmem_enabled_attr;
77
78 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
79 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
80
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 #define HPAGE_PMD_SHIFT PMD_SHIFT
83 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
84 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
85
86 #define HPAGE_PUD_SHIFT PUD_SHIFT
87 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
88 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
89
90 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
91
92 extern unsigned long transparent_hugepage_flags;
93
94 /*
95 * to be used on vmas which are known to support THP.
96 * Use transparent_hugepage_enabled otherwise
97 */
__transparent_hugepage_enabled(struct vm_area_struct * vma)98 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
99 {
100 if (vma->vm_flags & VM_NOHUGEPAGE)
101 return false;
102
103 if (is_vma_temporary_stack(vma))
104 return false;
105
106 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
107 return false;
108
109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
110 return true;
111 /*
112 * For dax vmas, try to always use hugepage mappings. If the kernel does
113 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
114 * mappings, and device-dax namespaces, that try to guarantee a given
115 * mapping size, will fail to enable
116 */
117 if (vma_is_dax(vma))
118 return true;
119
120 if (transparent_hugepage_flags &
121 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
122 return !!(vma->vm_flags & VM_HUGEPAGE);
123
124 return false;
125 }
126
127 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
128
129 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
130
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long haddr)131 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
132 unsigned long haddr)
133 {
134 /* Don't have to check pgoff for anonymous vma */
135 if (!vma_is_anonymous(vma)) {
136 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
137 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
138 return false;
139 }
140
141 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
142 return false;
143 return true;
144 }
145
146 #define transparent_hugepage_use_zero_page() \
147 (transparent_hugepage_flags & \
148 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
149 #ifdef CONFIG_DEBUG_VM
150 #define transparent_hugepage_debug_cow() \
151 (transparent_hugepage_flags & \
152 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
153 #else /* CONFIG_DEBUG_VM */
154 #define transparent_hugepage_debug_cow() 0
155 #endif /* CONFIG_DEBUG_VM */
156
157 extern unsigned long thp_get_unmapped_area(struct file *filp,
158 unsigned long addr, unsigned long len, unsigned long pgoff,
159 unsigned long flags);
160
161 extern void prep_transhuge_page(struct page *page);
162 extern void free_transhuge_page(struct page *page);
163
164 bool can_split_huge_page(struct page *page, int *pextra_pins);
165 int split_huge_page_to_list(struct page *page, struct list_head *list);
split_huge_page(struct page * page)166 static inline int split_huge_page(struct page *page)
167 {
168 return split_huge_page_to_list(page, NULL);
169 }
170 void deferred_split_huge_page(struct page *page);
171
172 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
173 unsigned long address, bool freeze, struct page *page);
174
175 #define split_huge_pmd(__vma, __pmd, __address) \
176 do { \
177 pmd_t *____pmd = (__pmd); \
178 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
179 || pmd_devmap(*____pmd)) \
180 __split_huge_pmd(__vma, __pmd, __address, \
181 false, NULL); \
182 } while (0)
183
184
185 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
186 bool freeze, struct page *page);
187
188 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
189 unsigned long address);
190
191 #define split_huge_pud(__vma, __pud, __address) \
192 do { \
193 pud_t *____pud = (__pud); \
194 if (pud_trans_huge(*____pud) \
195 || pud_devmap(*____pud)) \
196 __split_huge_pud(__vma, __pud, __address); \
197 } while (0)
198
199 extern int hugepage_madvise(struct vm_area_struct *vma,
200 unsigned long *vm_flags, int advice);
201 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
202 unsigned long start,
203 unsigned long end,
204 long adjust_next);
205 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
206 struct vm_area_struct *vma);
207 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
208 struct vm_area_struct *vma);
209
is_swap_pmd(pmd_t pmd)210 static inline int is_swap_pmd(pmd_t pmd)
211 {
212 return !pmd_none(pmd) && !pmd_present(pmd);
213 }
214
215 /* mmap_sem must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)216 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
217 struct vm_area_struct *vma)
218 {
219 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
220 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
221 return __pmd_trans_huge_lock(pmd, vma);
222 else
223 return NULL;
224 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)225 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
226 struct vm_area_struct *vma)
227 {
228 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
229 if (pud_trans_huge(*pud) || pud_devmap(*pud))
230 return __pud_trans_huge_lock(pud, vma);
231 else
232 return NULL;
233 }
234
235 /**
236 * thp_order - Order of a transparent huge page.
237 * @page: Head page of a transparent huge page.
238 */
thp_order(struct page * page)239 static inline unsigned int thp_order(struct page *page)
240 {
241 VM_BUG_ON_PGFLAGS(PageTail(page), page);
242 if (PageHead(page))
243 return HPAGE_PMD_ORDER;
244 return 0;
245 }
246
hpage_nr_pages(struct page * page)247 static inline int hpage_nr_pages(struct page *page)
248 {
249 if (unlikely(PageTransHuge(page)))
250 return HPAGE_PMD_NR;
251 return 1;
252 }
253
254 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
255 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
256 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
257 pud_t *pud, int flags, struct dev_pagemap **pgmap);
258
259 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
260
261 extern struct page *huge_zero_page;
262 extern unsigned long huge_zero_pfn;
263
is_huge_zero_page(struct page * page)264 static inline bool is_huge_zero_page(struct page *page)
265 {
266 return READ_ONCE(huge_zero_page) == page;
267 }
268
is_huge_zero_pmd(pmd_t pmd)269 static inline bool is_huge_zero_pmd(pmd_t pmd)
270 {
271 return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
272 }
273
is_huge_zero_pud(pud_t pud)274 static inline bool is_huge_zero_pud(pud_t pud)
275 {
276 return false;
277 }
278
279 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
280 void mm_put_huge_zero_page(struct mm_struct *mm);
281
282 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
283
thp_migration_supported(void)284 static inline bool thp_migration_supported(void)
285 {
286 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
287 }
288
page_deferred_list(struct page * page)289 static inline struct list_head *page_deferred_list(struct page *page)
290 {
291 /*
292 * Global or memcg deferred list in the second tail pages is
293 * occupied by compound_head.
294 */
295 return &page[2].deferred_list;
296 }
297
298 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
299 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
300 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
301 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
302
303 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
304 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
305 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
306
thp_order(struct page * page)307 static inline unsigned int thp_order(struct page *page)
308 {
309 VM_BUG_ON_PGFLAGS(PageTail(page), page);
310 return 0;
311 }
312
313 #define hpage_nr_pages(x) 1
314
__transparent_hugepage_enabled(struct vm_area_struct * vma)315 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
316 {
317 return false;
318 }
319
transparent_hugepage_enabled(struct vm_area_struct * vma)320 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
321 {
322 return false;
323 }
324
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long haddr)325 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
326 unsigned long haddr)
327 {
328 return false;
329 }
330
prep_transhuge_page(struct page * page)331 static inline void prep_transhuge_page(struct page *page) {}
free_transhuge_page(struct page * page)332 static inline void free_transhuge_page(struct page *page) {}
333
334 #define transparent_hugepage_flags 0UL
335
336 #define thp_get_unmapped_area NULL
337
338 static inline bool
can_split_huge_page(struct page * page,int * pextra_pins)339 can_split_huge_page(struct page *page, int *pextra_pins)
340 {
341 BUILD_BUG();
342 return false;
343 }
344 static inline int
split_huge_page_to_list(struct page * page,struct list_head * list)345 split_huge_page_to_list(struct page *page, struct list_head *list)
346 {
347 return 0;
348 }
split_huge_page(struct page * page)349 static inline int split_huge_page(struct page *page)
350 {
351 return 0;
352 }
deferred_split_huge_page(struct page * page)353 static inline void deferred_split_huge_page(struct page *page) {}
354 #define split_huge_pmd(__vma, __pmd, __address) \
355 do { } while (0)
356
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze,struct page * page)357 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
358 unsigned long address, bool freeze, struct page *page) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze,struct page * page)359 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
360 unsigned long address, bool freeze, struct page *page) {}
361
362 #define split_huge_pud(__vma, __pmd, __address) \
363 do { } while (0)
364
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)365 static inline int hugepage_madvise(struct vm_area_struct *vma,
366 unsigned long *vm_flags, int advice)
367 {
368 BUG();
369 return 0;
370 }
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)371 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
372 unsigned long start,
373 unsigned long end,
374 long adjust_next)
375 {
376 }
is_swap_pmd(pmd_t pmd)377 static inline int is_swap_pmd(pmd_t pmd)
378 {
379 return 0;
380 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)381 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
382 struct vm_area_struct *vma)
383 {
384 return NULL;
385 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)386 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
387 struct vm_area_struct *vma)
388 {
389 return NULL;
390 }
391
do_huge_pmd_numa_page(struct vm_fault * vmf,pmd_t orig_pmd)392 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
393 pmd_t orig_pmd)
394 {
395 return 0;
396 }
397
is_huge_zero_page(struct page * page)398 static inline bool is_huge_zero_page(struct page *page)
399 {
400 return false;
401 }
402
is_huge_zero_pmd(pmd_t pmd)403 static inline bool is_huge_zero_pmd(pmd_t pmd)
404 {
405 return false;
406 }
407
is_huge_zero_pud(pud_t pud)408 static inline bool is_huge_zero_pud(pud_t pud)
409 {
410 return false;
411 }
412
mm_put_huge_zero_page(struct mm_struct * mm)413 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
414 {
415 return;
416 }
417
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)418 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
419 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
420 {
421 return NULL;
422 }
423
follow_devmap_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,int flags,struct dev_pagemap ** pgmap)424 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
425 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
426 {
427 return NULL;
428 }
429
thp_migration_supported(void)430 static inline bool thp_migration_supported(void)
431 {
432 return false;
433 }
434 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
435
436 #endif /* _LINUX_HUGE_MM_H */
437