1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14
15 struct ctl_table;
16 struct user_struct;
17 struct mmu_gather;
18
19 #ifndef is_hugepd
20 typedef struct { unsigned long pd; } hugepd_t;
21 #define is_hugepd(hugepd) (0)
22 #define __hugepd(x) ((hugepd_t) { (x) })
23 #endif
24
25 #ifdef CONFIG_HUGETLB_PAGE
26
27 #include <linux/mempolicy.h>
28 #include <linux/shm.h>
29 #include <asm/tlbflush.h>
30
31 struct hugepage_subpool {
32 spinlock_t lock;
33 long count;
34 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
35 long used_hpages; /* Used count against maximum, includes */
36 /* both alloced and reserved pages. */
37 struct hstate *hstate;
38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
39 long rsv_hpages; /* Pages reserved against global pool to */
40 /* sasitfy minimum size. */
41 };
42
43 struct resv_map {
44 struct kref refs;
45 spinlock_t lock;
46 struct list_head regions;
47 long adds_in_progress;
48 struct list_head region_cache;
49 long region_cache_count;
50 #ifdef CONFIG_CGROUP_HUGETLB
51 /*
52 * On private mappings, the counter to uncharge reservations is stored
53 * here. If these fields are 0, then either the mapping is shared, or
54 * cgroup accounting is disabled for this resv_map.
55 */
56 struct page_counter *reservation_counter;
57 unsigned long pages_per_hpage;
58 struct cgroup_subsys_state *css;
59 #endif
60 };
61
62 /*
63 * Region tracking -- allows tracking of reservations and instantiated pages
64 * across the pages in a mapping.
65 *
66 * The region data structures are embedded into a resv_map and protected
67 * by a resv_map's lock. The set of regions within the resv_map represent
68 * reservations for huge pages, or huge pages that have already been
69 * instantiated within the map. The from and to elements are huge page
70 * indicies into the associated mapping. from indicates the starting index
71 * of the region. to represents the first index past the end of the region.
72 *
73 * For example, a file region structure with from == 0 and to == 4 represents
74 * four huge pages in a mapping. It is important to note that the to element
75 * represents the first element past the end of the region. This is used in
76 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
77 *
78 * Interval notation of the form [from, to) will be used to indicate that
79 * the endpoint from is inclusive and to is exclusive.
80 */
81 struct file_region {
82 struct list_head link;
83 long from;
84 long to;
85 #ifdef CONFIG_CGROUP_HUGETLB
86 /*
87 * On shared mappings, each reserved region appears as a struct
88 * file_region in resv_map. These fields hold the info needed to
89 * uncharge each reservation.
90 */
91 struct page_counter *reservation_counter;
92 struct cgroup_subsys_state *css;
93 #endif
94 };
95
96 extern struct resv_map *resv_map_alloc(void);
97 void resv_map_release(struct kref *ref);
98
99 extern spinlock_t hugetlb_lock;
100 extern int hugetlb_max_hstate __read_mostly;
101 #define for_each_hstate(h) \
102 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
103
104 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
105 long min_hpages);
106 void hugepage_put_subpool(struct hugepage_subpool *spool);
107
108 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
109 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
110 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
111 loff_t *);
112 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
113 loff_t *);
114 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
115 loff_t *);
116
117 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
118 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
119 struct page **, struct vm_area_struct **,
120 unsigned long *, unsigned long *, long, unsigned int,
121 int *);
122 void unmap_hugepage_range(struct vm_area_struct *,
123 unsigned long, unsigned long, struct page *);
124 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
125 struct vm_area_struct *vma,
126 unsigned long start, unsigned long end,
127 struct page *ref_page);
128 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
129 unsigned long start, unsigned long end,
130 struct page *ref_page);
131 void hugetlb_report_meminfo(struct seq_file *);
132 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
133 void hugetlb_show_meminfo(void);
134 unsigned long hugetlb_total_pages(void);
135 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
136 unsigned long address, unsigned int flags);
137 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
138 struct vm_area_struct *dst_vma,
139 unsigned long dst_addr,
140 unsigned long src_addr,
141 struct page **pagep);
142 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
143 struct vm_area_struct *vma,
144 vm_flags_t vm_flags);
145 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
146 long freed);
147 bool isolate_huge_page(struct page *page, struct list_head *list);
148 void putback_active_hugepage(struct page *page);
149 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
150 void free_huge_page(struct page *page);
151 void hugetlb_fix_reserve_counts(struct inode *inode);
152 extern struct mutex *hugetlb_fault_mutex_table;
153 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
154
155 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
156
157 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
158
159 extern int sysctl_hugetlb_shm_group;
160 extern struct list_head huge_boot_pages;
161
162 /* arch callbacks */
163
164 pte_t *huge_pte_alloc(struct mm_struct *mm,
165 unsigned long addr, unsigned long sz);
166 pte_t *huge_pte_offset(struct mm_struct *mm,
167 unsigned long addr, unsigned long sz);
168 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
169 unsigned long *addr, pte_t *ptep);
170 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
171 unsigned long *start, unsigned long *end);
172 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
173 int write);
174 struct page *follow_huge_pd(struct vm_area_struct *vma,
175 unsigned long address, hugepd_t hpd,
176 int flags, int pdshift);
177 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
178 int flags);
179 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
180 pud_t *pud, int flags);
181 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
182 pgd_t *pgd, int flags);
183
184 int pmd_huge(pmd_t pmd);
185 int pud_huge(pud_t pud);
186 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
187 unsigned long address, unsigned long end, pgprot_t newprot);
188
189 bool is_hugetlb_entry_migration(pte_t pte);
190
191 #else /* !CONFIG_HUGETLB_PAGE */
192
reset_vma_resv_huge_pages(struct vm_area_struct * vma)193 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
194 {
195 }
196
hugetlb_total_pages(void)197 static inline unsigned long hugetlb_total_pages(void)
198 {
199 return 0;
200 }
201
hugetlb_page_mapping_lock_write(struct page * hpage)202 static inline struct address_space *hugetlb_page_mapping_lock_write(
203 struct page *hpage)
204 {
205 return NULL;
206 }
207
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)208 static inline int huge_pmd_unshare(struct mm_struct *mm,
209 struct vm_area_struct *vma,
210 unsigned long *addr, pte_t *ptep)
211 {
212 return 0;
213 }
214
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)215 static inline void adjust_range_if_pmd_sharing_possible(
216 struct vm_area_struct *vma,
217 unsigned long *start, unsigned long *end)
218 {
219 }
220
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)221 static inline long follow_hugetlb_page(struct mm_struct *mm,
222 struct vm_area_struct *vma, struct page **pages,
223 struct vm_area_struct **vmas, unsigned long *position,
224 unsigned long *nr_pages, long i, unsigned int flags,
225 int *nonblocking)
226 {
227 BUG();
228 return 0;
229 }
230
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)231 static inline struct page *follow_huge_addr(struct mm_struct *mm,
232 unsigned long address, int write)
233 {
234 return ERR_PTR(-EINVAL);
235 }
236
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)237 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
238 struct mm_struct *src, struct vm_area_struct *vma)
239 {
240 BUG();
241 return 0;
242 }
243
hugetlb_report_meminfo(struct seq_file * m)244 static inline void hugetlb_report_meminfo(struct seq_file *m)
245 {
246 }
247
hugetlb_report_node_meminfo(char * buf,int len,int nid)248 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
249 {
250 return 0;
251 }
252
hugetlb_show_meminfo(void)253 static inline void hugetlb_show_meminfo(void)
254 {
255 }
256
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)257 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
258 unsigned long address, hugepd_t hpd, int flags,
259 int pdshift)
260 {
261 return NULL;
262 }
263
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)264 static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
265 unsigned long address, int flags)
266 {
267 return NULL;
268 }
269
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)270 static inline struct page *follow_huge_pud(struct mm_struct *mm,
271 unsigned long address, pud_t *pud, int flags)
272 {
273 return NULL;
274 }
275
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)276 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
277 unsigned long address, pgd_t *pgd, int flags)
278 {
279 return NULL;
280 }
281
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)282 static inline int prepare_hugepage_range(struct file *file,
283 unsigned long addr, unsigned long len)
284 {
285 return -EINVAL;
286 }
287
pmd_huge(pmd_t pmd)288 static inline int pmd_huge(pmd_t pmd)
289 {
290 return 0;
291 }
292
pud_huge(pud_t pud)293 static inline int pud_huge(pud_t pud)
294 {
295 return 0;
296 }
297
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)298 static inline int is_hugepage_only_range(struct mm_struct *mm,
299 unsigned long addr, unsigned long len)
300 {
301 return 0;
302 }
303
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)304 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
305 unsigned long addr, unsigned long end,
306 unsigned long floor, unsigned long ceiling)
307 {
308 BUG();
309 }
310
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,struct page ** pagep)311 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
312 pte_t *dst_pte,
313 struct vm_area_struct *dst_vma,
314 unsigned long dst_addr,
315 unsigned long src_addr,
316 struct page **pagep)
317 {
318 BUG();
319 return 0;
320 }
321
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)322 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
323 unsigned long sz)
324 {
325 return NULL;
326 }
327
isolate_huge_page(struct page * page,struct list_head * list)328 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
329 {
330 return false;
331 }
332
putback_active_hugepage(struct page * page)333 static inline void putback_active_hugepage(struct page *page)
334 {
335 }
336
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)337 static inline void move_hugetlb_state(struct page *oldpage,
338 struct page *newpage, int reason)
339 {
340 }
341
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)342 static inline unsigned long hugetlb_change_protection(
343 struct vm_area_struct *vma, unsigned long address,
344 unsigned long end, pgprot_t newprot)
345 {
346 return 0;
347 }
348
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)349 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
350 struct vm_area_struct *vma, unsigned long start,
351 unsigned long end, struct page *ref_page)
352 {
353 BUG();
354 }
355
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)356 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
357 struct vm_area_struct *vma, unsigned long start,
358 unsigned long end, struct page *ref_page)
359 {
360 BUG();
361 }
362
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)363 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
364 struct vm_area_struct *vma, unsigned long address,
365 unsigned int flags)
366 {
367 BUG();
368 return 0;
369 }
370
371 #endif /* !CONFIG_HUGETLB_PAGE */
372 /*
373 * hugepages at page global directory. If arch support
374 * hugepages at pgd level, they need to define this.
375 */
376 #ifndef pgd_huge
377 #define pgd_huge(x) 0
378 #endif
379 #ifndef p4d_huge
380 #define p4d_huge(x) 0
381 #endif
382
383 #ifndef pgd_write
pgd_write(pgd_t pgd)384 static inline int pgd_write(pgd_t pgd)
385 {
386 BUG();
387 return 0;
388 }
389 #endif
390
391 #define HUGETLB_ANON_FILE "anon_hugepage"
392
393 enum {
394 /*
395 * The file will be used as an shm file so shmfs accounting rules
396 * apply
397 */
398 HUGETLB_SHMFS_INODE = 1,
399 /*
400 * The file is being created on the internal vfs mount and shmfs
401 * accounting rules do not apply
402 */
403 HUGETLB_ANONHUGE_INODE = 2,
404 };
405
406 #ifdef CONFIG_HUGETLBFS
407 struct hugetlbfs_sb_info {
408 long max_inodes; /* inodes allowed */
409 long free_inodes; /* inodes free */
410 spinlock_t stat_lock;
411 struct hstate *hstate;
412 struct hugepage_subpool *spool;
413 kuid_t uid;
414 kgid_t gid;
415 umode_t mode;
416 };
417
HUGETLBFS_SB(struct super_block * sb)418 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
419 {
420 return sb->s_fs_info;
421 }
422
423 struct hugetlbfs_inode_info {
424 struct shared_policy policy;
425 struct inode vfs_inode;
426 unsigned int seals;
427 };
428
HUGETLBFS_I(struct inode * inode)429 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
430 {
431 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
432 }
433
434 extern const struct file_operations hugetlbfs_file_operations;
435 extern const struct vm_operations_struct hugetlb_vm_ops;
436 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
437 struct user_struct **user, int creat_flags,
438 int page_size_log);
439
is_file_hugepages(struct file * file)440 static inline bool is_file_hugepages(struct file *file)
441 {
442 if (file->f_op == &hugetlbfs_file_operations)
443 return true;
444
445 return is_file_shm_hugepages(file);
446 }
447
hstate_inode(struct inode * i)448 static inline struct hstate *hstate_inode(struct inode *i)
449 {
450 return HUGETLBFS_SB(i->i_sb)->hstate;
451 }
452 #else /* !CONFIG_HUGETLBFS */
453
454 #define is_file_hugepages(file) false
455 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct user_struct ** user,int creat_flags,int page_size_log)456 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
457 struct user_struct **user, int creat_flags,
458 int page_size_log)
459 {
460 return ERR_PTR(-ENOSYS);
461 }
462
hstate_inode(struct inode * i)463 static inline struct hstate *hstate_inode(struct inode *i)
464 {
465 return NULL;
466 }
467 #endif /* !CONFIG_HUGETLBFS */
468
469 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
470 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
471 unsigned long len, unsigned long pgoff,
472 unsigned long flags);
473 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
474
475 #ifdef CONFIG_HUGETLB_PAGE
476
477 #define HSTATE_NAME_LEN 32
478 /* Defines one hugetlb page size */
479 struct hstate {
480 int next_nid_to_alloc;
481 int next_nid_to_free;
482 unsigned int order;
483 unsigned long mask;
484 unsigned long max_huge_pages;
485 unsigned long nr_huge_pages;
486 unsigned long free_huge_pages;
487 unsigned long resv_huge_pages;
488 unsigned long surplus_huge_pages;
489 unsigned long nr_overcommit_huge_pages;
490 struct list_head hugepage_activelist;
491 struct list_head hugepage_freelists[MAX_NUMNODES];
492 unsigned int nr_huge_pages_node[MAX_NUMNODES];
493 unsigned int free_huge_pages_node[MAX_NUMNODES];
494 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
495 #ifdef CONFIG_CGROUP_HUGETLB
496 /* cgroup control files */
497 struct cftype cgroup_files_dfl[7];
498 struct cftype cgroup_files_legacy[9];
499 #endif
500 char name[HSTATE_NAME_LEN];
501 };
502
503 struct huge_bootmem_page {
504 struct list_head list;
505 struct hstate *hstate;
506 };
507
508 struct page *alloc_huge_page(struct vm_area_struct *vma,
509 unsigned long addr, int avoid_reserve);
510 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
511 nodemask_t *nmask, gfp_t gfp_mask);
512 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
513 unsigned long address);
514 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
515 pgoff_t idx);
516
517 /* arch callback */
518 int __init __alloc_bootmem_huge_page(struct hstate *h);
519 int __init alloc_bootmem_huge_page(struct hstate *h);
520
521 void __init hugetlb_add_hstate(unsigned order);
522 bool __init arch_hugetlb_valid_size(unsigned long size);
523 struct hstate *size_to_hstate(unsigned long size);
524
525 #ifndef HUGE_MAX_HSTATE
526 #define HUGE_MAX_HSTATE 1
527 #endif
528
529 extern struct hstate hstates[HUGE_MAX_HSTATE];
530 extern unsigned int default_hstate_idx;
531
532 #define default_hstate (hstates[default_hstate_idx])
533
hstate_file(struct file * f)534 static inline struct hstate *hstate_file(struct file *f)
535 {
536 return hstate_inode(file_inode(f));
537 }
538
hstate_sizelog(int page_size_log)539 static inline struct hstate *hstate_sizelog(int page_size_log)
540 {
541 if (!page_size_log)
542 return &default_hstate;
543
544 return size_to_hstate(1UL << page_size_log);
545 }
546
hstate_vma(struct vm_area_struct * vma)547 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
548 {
549 return hstate_file(vma->vm_file);
550 }
551
huge_page_size(struct hstate * h)552 static inline unsigned long huge_page_size(struct hstate *h)
553 {
554 return (unsigned long)PAGE_SIZE << h->order;
555 }
556
557 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
558
559 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
560
huge_page_mask(struct hstate * h)561 static inline unsigned long huge_page_mask(struct hstate *h)
562 {
563 return h->mask;
564 }
565
huge_page_order(struct hstate * h)566 static inline unsigned int huge_page_order(struct hstate *h)
567 {
568 return h->order;
569 }
570
huge_page_shift(struct hstate * h)571 static inline unsigned huge_page_shift(struct hstate *h)
572 {
573 return h->order + PAGE_SHIFT;
574 }
575
hstate_is_gigantic(struct hstate * h)576 static inline bool hstate_is_gigantic(struct hstate *h)
577 {
578 return huge_page_order(h) >= MAX_ORDER;
579 }
580
pages_per_huge_page(struct hstate * h)581 static inline unsigned int pages_per_huge_page(struct hstate *h)
582 {
583 return 1 << h->order;
584 }
585
blocks_per_huge_page(struct hstate * h)586 static inline unsigned int blocks_per_huge_page(struct hstate *h)
587 {
588 return huge_page_size(h) / 512;
589 }
590
591 #include <asm/hugetlb.h>
592
593 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)594 static inline int is_hugepage_only_range(struct mm_struct *mm,
595 unsigned long addr, unsigned long len)
596 {
597 return 0;
598 }
599 #define is_hugepage_only_range is_hugepage_only_range
600 #endif
601
602 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)603 static inline void arch_clear_hugepage_flags(struct page *page) { }
604 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
605 #endif
606
607 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)608 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
609 struct page *page, int writable)
610 {
611 return entry;
612 }
613 #endif
614
page_hstate(struct page * page)615 static inline struct hstate *page_hstate(struct page *page)
616 {
617 VM_BUG_ON_PAGE(!PageHuge(page), page);
618 return size_to_hstate(page_size(page));
619 }
620
hstate_index_to_shift(unsigned index)621 static inline unsigned hstate_index_to_shift(unsigned index)
622 {
623 return hstates[index].order + PAGE_SHIFT;
624 }
625
hstate_index(struct hstate * h)626 static inline int hstate_index(struct hstate *h)
627 {
628 return h - hstates;
629 }
630
631 extern int dissolve_free_huge_page(struct page *page);
632 extern int dissolve_free_huge_pages(unsigned long start_pfn,
633 unsigned long end_pfn);
634
635 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
636 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)637 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
638 {
639 if ((huge_page_shift(h) == PMD_SHIFT) ||
640 (huge_page_shift(h) == PUD_SHIFT) ||
641 (huge_page_shift(h) == PGDIR_SHIFT))
642 return true;
643 else
644 return false;
645 }
646 #endif
647 #else
arch_hugetlb_migration_supported(struct hstate * h)648 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
649 {
650 return false;
651 }
652 #endif
653
hugepage_migration_supported(struct hstate * h)654 static inline bool hugepage_migration_supported(struct hstate *h)
655 {
656 return arch_hugetlb_migration_supported(h);
657 }
658
659 /*
660 * Movability check is different as compared to migration check.
661 * It determines whether or not a huge page should be placed on
662 * movable zone or not. Movability of any huge page should be
663 * required only if huge page size is supported for migration.
664 * There wont be any reason for the huge page to be movable if
665 * it is not migratable to start with. Also the size of the huge
666 * page should be large enough to be placed under a movable zone
667 * and still feasible enough to be migratable. Just the presence
668 * in movable zone does not make the migration feasible.
669 *
670 * So even though large huge page sizes like the gigantic ones
671 * are migratable they should not be movable because its not
672 * feasible to migrate them from movable zone.
673 */
hugepage_movable_supported(struct hstate * h)674 static inline bool hugepage_movable_supported(struct hstate *h)
675 {
676 if (!hugepage_migration_supported(h))
677 return false;
678
679 if (hstate_is_gigantic(h))
680 return false;
681 return true;
682 }
683
684 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)685 static inline gfp_t htlb_alloc_mask(struct hstate *h)
686 {
687 if (hugepage_movable_supported(h))
688 return GFP_HIGHUSER_MOVABLE;
689 else
690 return GFP_HIGHUSER;
691 }
692
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)693 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
694 {
695 gfp_t modified_mask = htlb_alloc_mask(h);
696
697 /* Some callers might want to enforce node */
698 modified_mask |= (gfp_mask & __GFP_THISNODE);
699
700 modified_mask |= (gfp_mask & __GFP_NOWARN);
701
702 return modified_mask;
703 }
704
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)705 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
706 struct mm_struct *mm, pte_t *pte)
707 {
708 if (huge_page_size(h) == PMD_SIZE)
709 return pmd_lockptr(mm, (pmd_t *) pte);
710 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
711 return &mm->page_table_lock;
712 }
713
714 #ifndef hugepages_supported
715 /*
716 * Some platform decide whether they support huge pages at boot
717 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
718 * when there is no such support
719 */
720 #define hugepages_supported() (HPAGE_SHIFT != 0)
721 #endif
722
723 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
724
hugetlb_count_init(struct mm_struct * mm)725 static inline void hugetlb_count_init(struct mm_struct *mm)
726 {
727 atomic_long_set(&mm->hugetlb_usage, 0);
728 }
729
hugetlb_count_add(long l,struct mm_struct * mm)730 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
731 {
732 atomic_long_add(l, &mm->hugetlb_usage);
733 }
734
hugetlb_count_sub(long l,struct mm_struct * mm)735 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
736 {
737 atomic_long_sub(l, &mm->hugetlb_usage);
738 }
739
740 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)741 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
742 pte_t *ptep, pte_t pte, unsigned long sz)
743 {
744 set_huge_pte_at(mm, addr, ptep, pte);
745 }
746 #endif
747
748 #ifndef huge_ptep_modify_prot_start
749 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)750 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
751 unsigned long addr, pte_t *ptep)
752 {
753 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
754 }
755 #endif
756
757 #ifndef huge_ptep_modify_prot_commit
758 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)759 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
760 unsigned long addr, pte_t *ptep,
761 pte_t old_pte, pte_t pte)
762 {
763 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
764 }
765 #endif
766
767 void set_page_huge_active(struct page *page);
768
769 #else /* CONFIG_HUGETLB_PAGE */
770 struct hstate {};
771
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)772 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
773 unsigned long addr,
774 int avoid_reserve)
775 {
776 return NULL;
777 }
778
779 static inline struct page *
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)780 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
781 nodemask_t *nmask, gfp_t gfp_mask)
782 {
783 return NULL;
784 }
785
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)786 static inline struct page *alloc_huge_page_vma(struct hstate *h,
787 struct vm_area_struct *vma,
788 unsigned long address)
789 {
790 return NULL;
791 }
792
__alloc_bootmem_huge_page(struct hstate * h)793 static inline int __alloc_bootmem_huge_page(struct hstate *h)
794 {
795 return 0;
796 }
797
hstate_file(struct file * f)798 static inline struct hstate *hstate_file(struct file *f)
799 {
800 return NULL;
801 }
802
hstate_sizelog(int page_size_log)803 static inline struct hstate *hstate_sizelog(int page_size_log)
804 {
805 return NULL;
806 }
807
hstate_vma(struct vm_area_struct * vma)808 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
809 {
810 return NULL;
811 }
812
page_hstate(struct page * page)813 static inline struct hstate *page_hstate(struct page *page)
814 {
815 return NULL;
816 }
817
huge_page_size(struct hstate * h)818 static inline unsigned long huge_page_size(struct hstate *h)
819 {
820 return PAGE_SIZE;
821 }
822
huge_page_mask(struct hstate * h)823 static inline unsigned long huge_page_mask(struct hstate *h)
824 {
825 return PAGE_MASK;
826 }
827
vma_kernel_pagesize(struct vm_area_struct * vma)828 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
829 {
830 return PAGE_SIZE;
831 }
832
vma_mmu_pagesize(struct vm_area_struct * vma)833 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
834 {
835 return PAGE_SIZE;
836 }
837
huge_page_order(struct hstate * h)838 static inline unsigned int huge_page_order(struct hstate *h)
839 {
840 return 0;
841 }
842
huge_page_shift(struct hstate * h)843 static inline unsigned int huge_page_shift(struct hstate *h)
844 {
845 return PAGE_SHIFT;
846 }
847
hstate_is_gigantic(struct hstate * h)848 static inline bool hstate_is_gigantic(struct hstate *h)
849 {
850 return false;
851 }
852
pages_per_huge_page(struct hstate * h)853 static inline unsigned int pages_per_huge_page(struct hstate *h)
854 {
855 return 1;
856 }
857
hstate_index_to_shift(unsigned index)858 static inline unsigned hstate_index_to_shift(unsigned index)
859 {
860 return 0;
861 }
862
hstate_index(struct hstate * h)863 static inline int hstate_index(struct hstate *h)
864 {
865 return 0;
866 }
867
dissolve_free_huge_page(struct page * page)868 static inline int dissolve_free_huge_page(struct page *page)
869 {
870 return 0;
871 }
872
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)873 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
874 unsigned long end_pfn)
875 {
876 return 0;
877 }
878
hugepage_migration_supported(struct hstate * h)879 static inline bool hugepage_migration_supported(struct hstate *h)
880 {
881 return false;
882 }
883
hugepage_movable_supported(struct hstate * h)884 static inline bool hugepage_movable_supported(struct hstate *h)
885 {
886 return false;
887 }
888
htlb_alloc_mask(struct hstate * h)889 static inline gfp_t htlb_alloc_mask(struct hstate *h)
890 {
891 return 0;
892 }
893
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)894 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
895 {
896 return 0;
897 }
898
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)899 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
900 struct mm_struct *mm, pte_t *pte)
901 {
902 return &mm->page_table_lock;
903 }
904
hugetlb_count_init(struct mm_struct * mm)905 static inline void hugetlb_count_init(struct mm_struct *mm)
906 {
907 }
908
hugetlb_report_usage(struct seq_file * f,struct mm_struct * m)909 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
910 {
911 }
912
hugetlb_count_sub(long l,struct mm_struct * mm)913 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
914 {
915 }
916
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)917 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
918 pte_t *ptep, pte_t pte, unsigned long sz)
919 {
920 }
921 #endif /* CONFIG_HUGETLB_PAGE */
922
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)923 static inline spinlock_t *huge_pte_lock(struct hstate *h,
924 struct mm_struct *mm, pte_t *pte)
925 {
926 spinlock_t *ptl;
927
928 ptl = huge_pte_lockptr(h, mm, pte);
929 spin_lock(ptl);
930 return ptl;
931 }
932
933 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
934 extern void __init hugetlb_cma_reserve(int order);
935 extern void __init hugetlb_cma_check(void);
936 #else
hugetlb_cma_reserve(int order)937 static inline __init void hugetlb_cma_reserve(int order)
938 {
939 }
hugetlb_cma_check(void)940 static inline __init void hugetlb_cma_check(void)
941 {
942 }
943 #endif
944
945 #endif /* _LINUX_HUGETLB_H */
946