1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/page_ref.h>
11 #include <linux/list.h>
12 #include <linux/kref.h>
13 #include <linux/pgtable.h>
14 #include <linux/gfp.h>
15 #include <linux/userfaultfd_k.h>
16
17 struct ctl_table;
18 struct user_struct;
19 struct mmu_gather;
20
21 #ifndef is_hugepd
22 typedef struct { unsigned long pd; } hugepd_t;
23 #define is_hugepd(hugepd) (0)
24 #define __hugepd(x) ((hugepd_t) { (x) })
25 #endif
26
27 #ifdef CONFIG_HUGETLB_PAGE
28
29 #include <linux/mempolicy.h>
30 #include <linux/shm.h>
31 #include <asm/tlbflush.h>
32
33 struct hugepage_subpool {
34 spinlock_t lock;
35 long count;
36 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
37 long used_hpages; /* Used count against maximum, includes */
38 /* both alloced and reserved pages. */
39 struct hstate *hstate;
40 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
41 long rsv_hpages; /* Pages reserved against global pool to */
42 /* sasitfy minimum size. */
43 };
44
45 struct resv_map {
46 struct kref refs;
47 spinlock_t lock;
48 struct list_head regions;
49 long adds_in_progress;
50 struct list_head region_cache;
51 long region_cache_count;
52 #ifdef CONFIG_CGROUP_HUGETLB
53 /*
54 * On private mappings, the counter to uncharge reservations is stored
55 * here. If these fields are 0, then either the mapping is shared, or
56 * cgroup accounting is disabled for this resv_map.
57 */
58 struct page_counter *reservation_counter;
59 unsigned long pages_per_hpage;
60 struct cgroup_subsys_state *css;
61 #endif
62 };
63
64 /*
65 * Region tracking -- allows tracking of reservations and instantiated pages
66 * across the pages in a mapping.
67 *
68 * The region data structures are embedded into a resv_map and protected
69 * by a resv_map's lock. The set of regions within the resv_map represent
70 * reservations for huge pages, or huge pages that have already been
71 * instantiated within the map. The from and to elements are huge page
72 * indicies into the associated mapping. from indicates the starting index
73 * of the region. to represents the first index past the end of the region.
74 *
75 * For example, a file region structure with from == 0 and to == 4 represents
76 * four huge pages in a mapping. It is important to note that the to element
77 * represents the first element past the end of the region. This is used in
78 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
79 *
80 * Interval notation of the form [from, to) will be used to indicate that
81 * the endpoint from is inclusive and to is exclusive.
82 */
83 struct file_region {
84 struct list_head link;
85 long from;
86 long to;
87 #ifdef CONFIG_CGROUP_HUGETLB
88 /*
89 * On shared mappings, each reserved region appears as a struct
90 * file_region in resv_map. These fields hold the info needed to
91 * uncharge each reservation.
92 */
93 struct page_counter *reservation_counter;
94 struct cgroup_subsys_state *css;
95 #endif
96 };
97
98 extern struct resv_map *resv_map_alloc(void);
99 void resv_map_release(struct kref *ref);
100
101 extern spinlock_t hugetlb_lock;
102 extern int hugetlb_max_hstate __read_mostly;
103 #define for_each_hstate(h) \
104 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
105
106 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
107 long min_hpages);
108 void hugepage_put_subpool(struct hugepage_subpool *spool);
109
110 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
111 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
112 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
113 loff_t *);
114 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
115 loff_t *);
116 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
117 loff_t *);
118
119 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
120 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
121 struct page **, struct vm_area_struct **,
122 unsigned long *, unsigned long *, long, unsigned int,
123 int *);
124 void unmap_hugepage_range(struct vm_area_struct *,
125 unsigned long, unsigned long, struct page *);
126 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
127 struct vm_area_struct *vma,
128 unsigned long start, unsigned long end,
129 struct page *ref_page);
130 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
131 unsigned long start, unsigned long end,
132 struct page *ref_page);
133 void hugetlb_report_meminfo(struct seq_file *);
134 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
135 void hugetlb_show_meminfo(void);
136 unsigned long hugetlb_total_pages(void);
137 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
138 unsigned long address, unsigned int flags);
139 #ifdef CONFIG_USERFAULTFD
140 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
141 struct vm_area_struct *dst_vma,
142 unsigned long dst_addr,
143 unsigned long src_addr,
144 enum mcopy_atomic_mode mode,
145 struct page **pagep);
146 #endif /* CONFIG_USERFAULTFD */
147 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
148 struct vm_area_struct *vma,
149 vm_flags_t vm_flags);
150 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
151 long freed);
152 int isolate_hugetlb(struct page *page, struct list_head *list);
153 void putback_active_hugepage(struct page *page);
154 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
155 void free_huge_page(struct page *page);
156 void hugetlb_fix_reserve_counts(struct inode *inode);
157 extern struct mutex *hugetlb_fault_mutex_table;
158 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
159
160 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
161 unsigned long addr, pud_t *pud);
162
163 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
164
165 extern int sysctl_hugetlb_shm_group;
166 extern struct list_head huge_boot_pages;
167
168 /* arch callbacks */
169
170 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
171 unsigned long addr, unsigned long sz);
172 pte_t *huge_pte_offset(struct mm_struct *mm,
173 unsigned long addr, unsigned long sz);
174 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
175 unsigned long *addr, pte_t *ptep);
176 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
177 unsigned long *start, unsigned long *end);
178 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
179 int write);
180 struct page *follow_huge_pd(struct vm_area_struct *vma,
181 unsigned long address, hugepd_t hpd,
182 int flags, int pdshift);
183 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
184 int flags);
185 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
186 pud_t *pud, int flags);
187 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
188 pgd_t *pgd, int flags);
189
190 int pmd_huge(pmd_t pmd);
191 int pud_huge(pud_t pud);
192 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
193 unsigned long address, unsigned long end, pgprot_t newprot);
194
195 bool is_hugetlb_entry_migration(pte_t pte);
196 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
197
198 #else /* !CONFIG_HUGETLB_PAGE */
199
reset_vma_resv_huge_pages(struct vm_area_struct * vma)200 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
201 {
202 }
203
hugetlb_total_pages(void)204 static inline unsigned long hugetlb_total_pages(void)
205 {
206 return 0;
207 }
208
hugetlb_page_mapping_lock_write(struct page * hpage)209 static inline struct address_space *hugetlb_page_mapping_lock_write(
210 struct page *hpage)
211 {
212 return NULL;
213 }
214
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)215 static inline int huge_pmd_unshare(struct mm_struct *mm,
216 struct vm_area_struct *vma,
217 unsigned long *addr, pte_t *ptep)
218 {
219 return 0;
220 }
221
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)222 static inline void adjust_range_if_pmd_sharing_possible(
223 struct vm_area_struct *vma,
224 unsigned long *start, unsigned long *end)
225 {
226 }
227
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)228 static inline long follow_hugetlb_page(struct mm_struct *mm,
229 struct vm_area_struct *vma, struct page **pages,
230 struct vm_area_struct **vmas, unsigned long *position,
231 unsigned long *nr_pages, long i, unsigned int flags,
232 int *nonblocking)
233 {
234 BUG();
235 return 0;
236 }
237
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)238 static inline struct page *follow_huge_addr(struct mm_struct *mm,
239 unsigned long address, int write)
240 {
241 return ERR_PTR(-EINVAL);
242 }
243
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)244 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
245 struct mm_struct *src, struct vm_area_struct *vma)
246 {
247 BUG();
248 return 0;
249 }
250
hugetlb_report_meminfo(struct seq_file * m)251 static inline void hugetlb_report_meminfo(struct seq_file *m)
252 {
253 }
254
hugetlb_report_node_meminfo(char * buf,int len,int nid)255 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
256 {
257 return 0;
258 }
259
hugetlb_show_meminfo(void)260 static inline void hugetlb_show_meminfo(void)
261 {
262 }
263
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)264 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
265 unsigned long address, hugepd_t hpd, int flags,
266 int pdshift)
267 {
268 return NULL;
269 }
270
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)271 static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
272 unsigned long address, int flags)
273 {
274 return NULL;
275 }
276
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)277 static inline struct page *follow_huge_pud(struct mm_struct *mm,
278 unsigned long address, pud_t *pud, int flags)
279 {
280 return NULL;
281 }
282
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)283 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
284 unsigned long address, pgd_t *pgd, int flags)
285 {
286 return NULL;
287 }
288
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)289 static inline int prepare_hugepage_range(struct file *file,
290 unsigned long addr, unsigned long len)
291 {
292 return -EINVAL;
293 }
294
pmd_huge(pmd_t pmd)295 static inline int pmd_huge(pmd_t pmd)
296 {
297 return 0;
298 }
299
pud_huge(pud_t pud)300 static inline int pud_huge(pud_t pud)
301 {
302 return 0;
303 }
304
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)305 static inline int is_hugepage_only_range(struct mm_struct *mm,
306 unsigned long addr, unsigned long len)
307 {
308 return 0;
309 }
310
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)311 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
312 unsigned long addr, unsigned long end,
313 unsigned long floor, unsigned long ceiling)
314 {
315 BUG();
316 }
317
318 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep)319 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
320 pte_t *dst_pte,
321 struct vm_area_struct *dst_vma,
322 unsigned long dst_addr,
323 unsigned long src_addr,
324 enum mcopy_atomic_mode mode,
325 struct page **pagep)
326 {
327 BUG();
328 return 0;
329 }
330 #endif /* CONFIG_USERFAULTFD */
331
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)332 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
333 unsigned long sz)
334 {
335 return NULL;
336 }
337
isolate_hugetlb(struct page * page,struct list_head * list)338 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
339 {
340 return -EBUSY;
341 }
342
putback_active_hugepage(struct page * page)343 static inline void putback_active_hugepage(struct page *page)
344 {
345 }
346
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)347 static inline void move_hugetlb_state(struct page *oldpage,
348 struct page *newpage, int reason)
349 {
350 }
351
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)352 static inline unsigned long hugetlb_change_protection(
353 struct vm_area_struct *vma, unsigned long address,
354 unsigned long end, pgprot_t newprot)
355 {
356 return 0;
357 }
358
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)359 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
360 struct vm_area_struct *vma, unsigned long start,
361 unsigned long end, struct page *ref_page)
362 {
363 BUG();
364 }
365
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)366 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
367 struct vm_area_struct *vma, unsigned long start,
368 unsigned long end, struct page *ref_page)
369 {
370 BUG();
371 }
372
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)373 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
374 struct vm_area_struct *vma, unsigned long address,
375 unsigned int flags)
376 {
377 BUG();
378 return 0;
379 }
380
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)381 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
382
383 #endif /* !CONFIG_HUGETLB_PAGE */
384 /*
385 * hugepages at page global directory. If arch support
386 * hugepages at pgd level, they need to define this.
387 */
388 #ifndef pgd_huge
389 #define pgd_huge(x) 0
390 #endif
391 #ifndef p4d_huge
392 #define p4d_huge(x) 0
393 #endif
394
395 #ifndef pgd_write
pgd_write(pgd_t pgd)396 static inline int pgd_write(pgd_t pgd)
397 {
398 BUG();
399 return 0;
400 }
401 #endif
402
403 #define HUGETLB_ANON_FILE "anon_hugepage"
404
405 enum {
406 /*
407 * The file will be used as an shm file so shmfs accounting rules
408 * apply
409 */
410 HUGETLB_SHMFS_INODE = 1,
411 /*
412 * The file is being created on the internal vfs mount and shmfs
413 * accounting rules do not apply
414 */
415 HUGETLB_ANONHUGE_INODE = 2,
416 };
417
418 #ifdef CONFIG_HUGETLBFS
419 struct hugetlbfs_sb_info {
420 long max_inodes; /* inodes allowed */
421 long free_inodes; /* inodes free */
422 spinlock_t stat_lock;
423 struct hstate *hstate;
424 struct hugepage_subpool *spool;
425 kuid_t uid;
426 kgid_t gid;
427 umode_t mode;
428 };
429
HUGETLBFS_SB(struct super_block * sb)430 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
431 {
432 return sb->s_fs_info;
433 }
434
435 struct hugetlbfs_inode_info {
436 struct shared_policy policy;
437 struct inode vfs_inode;
438 unsigned int seals;
439 };
440
HUGETLBFS_I(struct inode * inode)441 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
442 {
443 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
444 }
445
446 extern const struct file_operations hugetlbfs_file_operations;
447 extern const struct vm_operations_struct hugetlb_vm_ops;
448 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
449 struct user_struct **user, int creat_flags,
450 int page_size_log);
451
is_file_hugepages(struct file * file)452 static inline bool is_file_hugepages(struct file *file)
453 {
454 if (file->f_op == &hugetlbfs_file_operations)
455 return true;
456
457 return is_file_shm_hugepages(file);
458 }
459
hstate_inode(struct inode * i)460 static inline struct hstate *hstate_inode(struct inode *i)
461 {
462 return HUGETLBFS_SB(i->i_sb)->hstate;
463 }
464 #else /* !CONFIG_HUGETLBFS */
465
466 #define is_file_hugepages(file) false
467 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct user_struct ** user,int creat_flags,int page_size_log)468 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
469 struct user_struct **user, int creat_flags,
470 int page_size_log)
471 {
472 return ERR_PTR(-ENOSYS);
473 }
474
hstate_inode(struct inode * i)475 static inline struct hstate *hstate_inode(struct inode *i)
476 {
477 return NULL;
478 }
479 #endif /* !CONFIG_HUGETLBFS */
480
481 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
482 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
483 unsigned long len, unsigned long pgoff,
484 unsigned long flags);
485 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
486
487 #ifdef CONFIG_HUGETLB_PAGE
488
489 #define HSTATE_NAME_LEN 32
490 /* Defines one hugetlb page size */
491 struct hstate {
492 int next_nid_to_alloc;
493 int next_nid_to_free;
494 unsigned int order;
495 unsigned long mask;
496 unsigned long max_huge_pages;
497 unsigned long nr_huge_pages;
498 unsigned long free_huge_pages;
499 unsigned long resv_huge_pages;
500 unsigned long surplus_huge_pages;
501 unsigned long nr_overcommit_huge_pages;
502 struct list_head hugepage_activelist;
503 struct list_head hugepage_freelists[MAX_NUMNODES];
504 unsigned int nr_huge_pages_node[MAX_NUMNODES];
505 unsigned int free_huge_pages_node[MAX_NUMNODES];
506 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
507 #ifdef CONFIG_CGROUP_HUGETLB
508 /* cgroup control files */
509 struct cftype cgroup_files_dfl[7];
510 struct cftype cgroup_files_legacy[9];
511 #endif
512 char name[HSTATE_NAME_LEN];
513 };
514
515 struct huge_bootmem_page {
516 struct list_head list;
517 struct hstate *hstate;
518 };
519
520 struct page *alloc_huge_page(struct vm_area_struct *vma,
521 unsigned long addr, int avoid_reserve);
522 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
523 nodemask_t *nmask, gfp_t gfp_mask);
524 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
525 unsigned long address);
526 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
527 pgoff_t idx);
528
529 /* arch callback */
530 int __init __alloc_bootmem_huge_page(struct hstate *h);
531 int __init alloc_bootmem_huge_page(struct hstate *h);
532
533 void __init hugetlb_add_hstate(unsigned order);
534 bool __init arch_hugetlb_valid_size(unsigned long size);
535 struct hstate *size_to_hstate(unsigned long size);
536
537 #ifndef HUGE_MAX_HSTATE
538 #define HUGE_MAX_HSTATE 1
539 #endif
540
541 extern struct hstate hstates[HUGE_MAX_HSTATE];
542 extern unsigned int default_hstate_idx;
543
544 #define default_hstate (hstates[default_hstate_idx])
545
hstate_file(struct file * f)546 static inline struct hstate *hstate_file(struct file *f)
547 {
548 return hstate_inode(file_inode(f));
549 }
550
hstate_sizelog(int page_size_log)551 static inline struct hstate *hstate_sizelog(int page_size_log)
552 {
553 if (!page_size_log)
554 return &default_hstate;
555
556 if (page_size_log < BITS_PER_LONG)
557 return size_to_hstate(1UL << page_size_log);
558
559 return NULL;
560 }
561
hstate_vma(struct vm_area_struct * vma)562 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
563 {
564 return hstate_file(vma->vm_file);
565 }
566
huge_page_size(struct hstate * h)567 static inline unsigned long huge_page_size(struct hstate *h)
568 {
569 return (unsigned long)PAGE_SIZE << h->order;
570 }
571
572 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
573
574 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
575
huge_page_mask(struct hstate * h)576 static inline unsigned long huge_page_mask(struct hstate *h)
577 {
578 return h->mask;
579 }
580
huge_page_order(struct hstate * h)581 static inline unsigned int huge_page_order(struct hstate *h)
582 {
583 return h->order;
584 }
585
huge_page_shift(struct hstate * h)586 static inline unsigned huge_page_shift(struct hstate *h)
587 {
588 return h->order + PAGE_SHIFT;
589 }
590
hstate_is_gigantic(struct hstate * h)591 static inline bool hstate_is_gigantic(struct hstate *h)
592 {
593 return huge_page_order(h) >= MAX_ORDER;
594 }
595
pages_per_huge_page(struct hstate * h)596 static inline unsigned int pages_per_huge_page(struct hstate *h)
597 {
598 return 1 << h->order;
599 }
600
blocks_per_huge_page(struct hstate * h)601 static inline unsigned int blocks_per_huge_page(struct hstate *h)
602 {
603 return huge_page_size(h) / 512;
604 }
605
606 #include <asm/hugetlb.h>
607
608 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)609 static inline int is_hugepage_only_range(struct mm_struct *mm,
610 unsigned long addr, unsigned long len)
611 {
612 return 0;
613 }
614 #define is_hugepage_only_range is_hugepage_only_range
615 #endif
616
617 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)618 static inline void arch_clear_hugepage_flags(struct page *page) { }
619 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
620 #endif
621
622 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)623 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
624 struct page *page, int writable)
625 {
626 return entry;
627 }
628 #endif
629
page_hstate(struct page * page)630 static inline struct hstate *page_hstate(struct page *page)
631 {
632 VM_BUG_ON_PAGE(!PageHuge(page), page);
633 return size_to_hstate(page_size(page));
634 }
635
hstate_index_to_shift(unsigned index)636 static inline unsigned hstate_index_to_shift(unsigned index)
637 {
638 return hstates[index].order + PAGE_SHIFT;
639 }
640
hstate_index(struct hstate * h)641 static inline int hstate_index(struct hstate *h)
642 {
643 return h - hstates;
644 }
645
646 extern int dissolve_free_huge_page(struct page *page);
647 extern int dissolve_free_huge_pages(unsigned long start_pfn,
648 unsigned long end_pfn);
649
650 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
651 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)652 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
653 {
654 if ((huge_page_shift(h) == PMD_SHIFT) ||
655 (huge_page_shift(h) == PUD_SHIFT) ||
656 (huge_page_shift(h) == PGDIR_SHIFT))
657 return true;
658 else
659 return false;
660 }
661 #endif
662 #else
arch_hugetlb_migration_supported(struct hstate * h)663 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
664 {
665 return false;
666 }
667 #endif
668
hugepage_migration_supported(struct hstate * h)669 static inline bool hugepage_migration_supported(struct hstate *h)
670 {
671 return arch_hugetlb_migration_supported(h);
672 }
673
674 /*
675 * Movability check is different as compared to migration check.
676 * It determines whether or not a huge page should be placed on
677 * movable zone or not. Movability of any huge page should be
678 * required only if huge page size is supported for migration.
679 * There wont be any reason for the huge page to be movable if
680 * it is not migratable to start with. Also the size of the huge
681 * page should be large enough to be placed under a movable zone
682 * and still feasible enough to be migratable. Just the presence
683 * in movable zone does not make the migration feasible.
684 *
685 * So even though large huge page sizes like the gigantic ones
686 * are migratable they should not be movable because its not
687 * feasible to migrate them from movable zone.
688 */
hugepage_movable_supported(struct hstate * h)689 static inline bool hugepage_movable_supported(struct hstate *h)
690 {
691 if (!hugepage_migration_supported(h))
692 return false;
693
694 if (hstate_is_gigantic(h))
695 return false;
696 return true;
697 }
698
699 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)700 static inline gfp_t htlb_alloc_mask(struct hstate *h)
701 {
702 if (hugepage_movable_supported(h))
703 return GFP_HIGHUSER_MOVABLE;
704 else
705 return GFP_HIGHUSER;
706 }
707
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)708 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
709 {
710 gfp_t modified_mask = htlb_alloc_mask(h);
711
712 /* Some callers might want to enforce node */
713 modified_mask |= (gfp_mask & __GFP_THISNODE);
714
715 modified_mask |= (gfp_mask & __GFP_NOWARN);
716
717 return modified_mask;
718 }
719
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)720 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
721 struct mm_struct *mm, pte_t *pte)
722 {
723 if (huge_page_size(h) == PMD_SIZE)
724 return pmd_lockptr(mm, (pmd_t *) pte);
725 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
726 return &mm->page_table_lock;
727 }
728
729 #ifndef hugepages_supported
730 /*
731 * Some platform decide whether they support huge pages at boot
732 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
733 * when there is no such support
734 */
735 #define hugepages_supported() (HPAGE_SHIFT != 0)
736 #endif
737
738 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
739
hugetlb_count_init(struct mm_struct * mm)740 static inline void hugetlb_count_init(struct mm_struct *mm)
741 {
742 atomic_long_set(&mm->hugetlb_usage, 0);
743 }
744
hugetlb_count_add(long l,struct mm_struct * mm)745 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
746 {
747 atomic_long_add(l, &mm->hugetlb_usage);
748 }
749
hugetlb_count_sub(long l,struct mm_struct * mm)750 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
751 {
752 atomic_long_sub(l, &mm->hugetlb_usage);
753 }
754
755 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)756 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
757 pte_t *ptep, pte_t pte, unsigned long sz)
758 {
759 set_huge_pte_at(mm, addr, ptep, pte);
760 }
761 #endif
762
763 #ifndef huge_ptep_modify_prot_start
764 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)765 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
766 unsigned long addr, pte_t *ptep)
767 {
768 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
769 }
770 #endif
771
772 #ifndef huge_ptep_modify_prot_commit
773 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)774 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
775 unsigned long addr, pte_t *ptep,
776 pte_t old_pte, pte_t pte)
777 {
778 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
779 }
780 #endif
781
782 void set_page_huge_active(struct page *page);
783
784 #else /* CONFIG_HUGETLB_PAGE */
785 struct hstate {};
786
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)787 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
788 unsigned long addr,
789 int avoid_reserve)
790 {
791 return NULL;
792 }
793
794 static inline struct page *
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)795 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
796 nodemask_t *nmask, gfp_t gfp_mask)
797 {
798 return NULL;
799 }
800
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)801 static inline struct page *alloc_huge_page_vma(struct hstate *h,
802 struct vm_area_struct *vma,
803 unsigned long address)
804 {
805 return NULL;
806 }
807
__alloc_bootmem_huge_page(struct hstate * h)808 static inline int __alloc_bootmem_huge_page(struct hstate *h)
809 {
810 return 0;
811 }
812
hstate_file(struct file * f)813 static inline struct hstate *hstate_file(struct file *f)
814 {
815 return NULL;
816 }
817
hstate_sizelog(int page_size_log)818 static inline struct hstate *hstate_sizelog(int page_size_log)
819 {
820 return NULL;
821 }
822
hstate_vma(struct vm_area_struct * vma)823 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
824 {
825 return NULL;
826 }
827
page_hstate(struct page * page)828 static inline struct hstate *page_hstate(struct page *page)
829 {
830 return NULL;
831 }
832
huge_page_size(struct hstate * h)833 static inline unsigned long huge_page_size(struct hstate *h)
834 {
835 return PAGE_SIZE;
836 }
837
huge_page_mask(struct hstate * h)838 static inline unsigned long huge_page_mask(struct hstate *h)
839 {
840 return PAGE_MASK;
841 }
842
vma_kernel_pagesize(struct vm_area_struct * vma)843 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
844 {
845 return PAGE_SIZE;
846 }
847
vma_mmu_pagesize(struct vm_area_struct * vma)848 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
849 {
850 return PAGE_SIZE;
851 }
852
huge_page_order(struct hstate * h)853 static inline unsigned int huge_page_order(struct hstate *h)
854 {
855 return 0;
856 }
857
huge_page_shift(struct hstate * h)858 static inline unsigned int huge_page_shift(struct hstate *h)
859 {
860 return PAGE_SHIFT;
861 }
862
hstate_is_gigantic(struct hstate * h)863 static inline bool hstate_is_gigantic(struct hstate *h)
864 {
865 return false;
866 }
867
pages_per_huge_page(struct hstate * h)868 static inline unsigned int pages_per_huge_page(struct hstate *h)
869 {
870 return 1;
871 }
872
hstate_index_to_shift(unsigned index)873 static inline unsigned hstate_index_to_shift(unsigned index)
874 {
875 return 0;
876 }
877
hstate_index(struct hstate * h)878 static inline int hstate_index(struct hstate *h)
879 {
880 return 0;
881 }
882
dissolve_free_huge_page(struct page * page)883 static inline int dissolve_free_huge_page(struct page *page)
884 {
885 return 0;
886 }
887
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)888 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
889 unsigned long end_pfn)
890 {
891 return 0;
892 }
893
hugepage_migration_supported(struct hstate * h)894 static inline bool hugepage_migration_supported(struct hstate *h)
895 {
896 return false;
897 }
898
hugepage_movable_supported(struct hstate * h)899 static inline bool hugepage_movable_supported(struct hstate *h)
900 {
901 return false;
902 }
903
htlb_alloc_mask(struct hstate * h)904 static inline gfp_t htlb_alloc_mask(struct hstate *h)
905 {
906 return 0;
907 }
908
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)909 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
910 {
911 return 0;
912 }
913
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)914 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
915 struct mm_struct *mm, pte_t *pte)
916 {
917 return &mm->page_table_lock;
918 }
919
hugetlb_count_init(struct mm_struct * mm)920 static inline void hugetlb_count_init(struct mm_struct *mm)
921 {
922 }
923
hugetlb_report_usage(struct seq_file * f,struct mm_struct * m)924 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
925 {
926 }
927
hugetlb_count_sub(long l,struct mm_struct * mm)928 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
929 {
930 }
931
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)932 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
933 pte_t *ptep, pte_t pte, unsigned long sz)
934 {
935 }
936 #endif /* CONFIG_HUGETLB_PAGE */
937
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)938 static inline spinlock_t *huge_pte_lock(struct hstate *h,
939 struct mm_struct *mm, pte_t *pte)
940 {
941 spinlock_t *ptl;
942
943 ptl = huge_pte_lockptr(h, mm, pte);
944 spin_lock(ptl);
945 return ptl;
946 }
947
948 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
949 extern void __init hugetlb_cma_reserve(int order);
950 extern void __init hugetlb_cma_check(void);
951 #else
hugetlb_cma_reserve(int order)952 static inline __init void hugetlb_cma_reserve(int order)
953 {
954 }
hugetlb_cma_check(void)955 static inline __init void hugetlb_cma_check(void)
956 {
957 }
958 #endif
959
960 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
961
962 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
963 /*
964 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
965 * implement this.
966 */
967 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
968 #endif
969
970 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
hugetlb_pmd_shared(pte_t * pte)971 static inline bool hugetlb_pmd_shared(pte_t *pte)
972 {
973 return page_count(virt_to_page(pte)) > 1;
974 }
975 #else
hugetlb_pmd_shared(pte_t * pte)976 static inline bool hugetlb_pmd_shared(pte_t *pte)
977 {
978 return false;
979 }
980 #endif
981
982 #endif /* _LINUX_HUGETLB_H */
983