1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/page_ref.h>
11 #include <linux/list.h>
12 #include <linux/kref.h>
13 #include <asm/pgtable.h>
14
15 struct ctl_table;
16 struct user_struct;
17 struct mmu_gather;
18
19 #ifndef is_hugepd
20 typedef struct { unsigned long pd; } hugepd_t;
21 #define is_hugepd(hugepd) (0)
22 #define __hugepd(x) ((hugepd_t) { (x) })
23 #endif
24
25 #ifdef CONFIG_HUGETLB_PAGE
26
27 #include <linux/mempolicy.h>
28 #include <linux/shm.h>
29 #include <asm/tlbflush.h>
30
31 struct hugepage_subpool {
32 spinlock_t lock;
33 long count;
34 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
35 long used_hpages; /* Used count against maximum, includes */
36 /* both alloced and reserved pages. */
37 struct hstate *hstate;
38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
39 long rsv_hpages; /* Pages reserved against global pool to */
40 /* sasitfy minimum size. */
41 };
42
43 struct resv_map {
44 struct kref refs;
45 spinlock_t lock;
46 struct list_head regions;
47 long adds_in_progress;
48 struct list_head region_cache;
49 long region_cache_count;
50 };
51 extern struct resv_map *resv_map_alloc(void);
52 void resv_map_release(struct kref *ref);
53
54 extern spinlock_t hugetlb_lock;
55 extern int hugetlb_max_hstate __read_mostly;
56 #define for_each_hstate(h) \
57 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
58
59 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
60 long min_hpages);
61 void hugepage_put_subpool(struct hugepage_subpool *spool);
62
63 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
64 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
65 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
66 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
67
68 #ifdef CONFIG_NUMA
69 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
70 void __user *, size_t *, loff_t *);
71 #endif
72
73 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
74 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
75 struct page **, struct vm_area_struct **,
76 unsigned long *, unsigned long *, long, unsigned int,
77 int *);
78 void unmap_hugepage_range(struct vm_area_struct *,
79 unsigned long, unsigned long, struct page *);
80 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
81 struct vm_area_struct *vma,
82 unsigned long start, unsigned long end,
83 struct page *ref_page);
84 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
85 unsigned long start, unsigned long end,
86 struct page *ref_page);
87 void hugetlb_report_meminfo(struct seq_file *);
88 int hugetlb_report_node_meminfo(int, char *);
89 void hugetlb_show_meminfo(void);
90 unsigned long hugetlb_total_pages(void);
91 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92 unsigned long address, unsigned int flags);
93 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
94 struct vm_area_struct *dst_vma,
95 unsigned long dst_addr,
96 unsigned long src_addr,
97 struct page **pagep);
98 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
99 struct vm_area_struct *vma,
100 vm_flags_t vm_flags);
101 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
102 long freed);
103 bool isolate_huge_page(struct page *page, struct list_head *list);
104 void putback_active_hugepage(struct page *page);
105 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
106 void free_huge_page(struct page *page);
107 void hugetlb_fix_reserve_counts(struct inode *inode);
108 extern struct mutex *hugetlb_fault_mutex_table;
109 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
110 pgoff_t idx);
111
112 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
113
114 extern int sysctl_hugetlb_shm_group;
115 extern struct list_head huge_boot_pages;
116
117 /* arch callbacks */
118
119 pte_t *huge_pte_alloc(struct mm_struct *mm,
120 unsigned long addr, unsigned long sz);
121 pte_t *huge_pte_offset(struct mm_struct *mm,
122 unsigned long addr, unsigned long sz);
123 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
124 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
125 unsigned long *start, unsigned long *end);
126 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
127 int write);
128 struct page *follow_huge_pd(struct vm_area_struct *vma,
129 unsigned long address, hugepd_t hpd,
130 int flags, int pdshift);
131 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
132 int flags);
133 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
134 pud_t *pud, int flags);
135 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
136 pgd_t *pgd, int flags);
137
138 int pmd_huge(pmd_t pmd);
139 int pud_huge(pud_t pud);
140 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
141 unsigned long address, unsigned long end, pgprot_t newprot);
142
143 bool is_hugetlb_entry_migration(pte_t pte);
144
145 #else /* !CONFIG_HUGETLB_PAGE */
146
reset_vma_resv_huge_pages(struct vm_area_struct * vma)147 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
148 {
149 }
150
hugetlb_total_pages(void)151 static inline unsigned long hugetlb_total_pages(void)
152 {
153 return 0;
154 }
155
huge_pmd_unshare(struct mm_struct * mm,unsigned long * addr,pte_t * ptep)156 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
157 pte_t *ptep)
158 {
159 return 0;
160 }
161
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)162 static inline void adjust_range_if_pmd_sharing_possible(
163 struct vm_area_struct *vma,
164 unsigned long *start, unsigned long *end)
165 {
166 }
167
168 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
169 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
170 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
hugetlb_report_meminfo(struct seq_file * m)171 static inline void hugetlb_report_meminfo(struct seq_file *m)
172 {
173 }
174 #define hugetlb_report_node_meminfo(n, buf) 0
hugetlb_show_meminfo(void)175 static inline void hugetlb_show_meminfo(void)
176 {
177 }
178 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
179 #define follow_huge_pmd_pte(vma, addr, flags) NULL
180 #define follow_huge_pud(mm, addr, pud, flags) NULL
181 #define follow_huge_pgd(mm, addr, pgd, flags) NULL
182 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
183 #define pmd_huge(x) 0
184 #define pud_huge(x) 0
185 #define is_hugepage_only_range(mm, addr, len) 0
186 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
187 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
188 src_addr, pagep) ({ BUG(); 0; })
189 #define huge_pte_offset(mm, address, sz) 0
190
isolate_huge_page(struct page * page,struct list_head * list)191 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
192 {
193 return false;
194 }
195 #define putback_active_hugepage(p) do {} while (0)
196 #define move_hugetlb_state(old, new, reason) do {} while (0)
197
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)198 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
199 unsigned long address, unsigned long end, pgprot_t newprot)
200 {
201 return 0;
202 }
203
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)204 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
205 struct vm_area_struct *vma, unsigned long start,
206 unsigned long end, struct page *ref_page)
207 {
208 BUG();
209 }
210
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)211 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
212 struct vm_area_struct *vma, unsigned long start,
213 unsigned long end, struct page *ref_page)
214 {
215 BUG();
216 }
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)217 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
218 struct vm_area_struct *vma, unsigned long address,
219 unsigned int flags)
220 {
221 BUG();
222 return 0;
223 }
224
225 #endif /* !CONFIG_HUGETLB_PAGE */
226 /*
227 * hugepages at page global directory. If arch support
228 * hugepages at pgd level, they need to define this.
229 */
230 #ifndef pgd_huge
231 #define pgd_huge(x) 0
232 #endif
233 #ifndef p4d_huge
234 #define p4d_huge(x) 0
235 #endif
236
237 #ifndef pgd_write
pgd_write(pgd_t pgd)238 static inline int pgd_write(pgd_t pgd)
239 {
240 BUG();
241 return 0;
242 }
243 #endif
244
245 #define HUGETLB_ANON_FILE "anon_hugepage"
246
247 enum {
248 /*
249 * The file will be used as an shm file so shmfs accounting rules
250 * apply
251 */
252 HUGETLB_SHMFS_INODE = 1,
253 /*
254 * The file is being created on the internal vfs mount and shmfs
255 * accounting rules do not apply
256 */
257 HUGETLB_ANONHUGE_INODE = 2,
258 };
259
260 #ifdef CONFIG_HUGETLBFS
261 struct hugetlbfs_sb_info {
262 long max_inodes; /* inodes allowed */
263 long free_inodes; /* inodes free */
264 spinlock_t stat_lock;
265 struct hstate *hstate;
266 struct hugepage_subpool *spool;
267 kuid_t uid;
268 kgid_t gid;
269 umode_t mode;
270 };
271
HUGETLBFS_SB(struct super_block * sb)272 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
273 {
274 return sb->s_fs_info;
275 }
276
277 struct hugetlbfs_inode_info {
278 struct shared_policy policy;
279 struct inode vfs_inode;
280 unsigned int seals;
281 };
282
HUGETLBFS_I(struct inode * inode)283 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
284 {
285 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
286 }
287
288 extern const struct file_operations hugetlbfs_file_operations;
289 extern const struct vm_operations_struct hugetlb_vm_ops;
290 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
291 struct user_struct **user, int creat_flags,
292 int page_size_log);
293
is_file_hugepages(struct file * file)294 static inline bool is_file_hugepages(struct file *file)
295 {
296 if (file->f_op == &hugetlbfs_file_operations)
297 return true;
298
299 return is_file_shm_hugepages(file);
300 }
301
hstate_inode(struct inode * i)302 static inline struct hstate *hstate_inode(struct inode *i)
303 {
304 return HUGETLBFS_SB(i->i_sb)->hstate;
305 }
306 #else /* !CONFIG_HUGETLBFS */
307
308 #define is_file_hugepages(file) false
309 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct user_struct ** user,int creat_flags,int page_size_log)310 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
311 struct user_struct **user, int creat_flags,
312 int page_size_log)
313 {
314 return ERR_PTR(-ENOSYS);
315 }
316
hstate_inode(struct inode * i)317 static inline struct hstate *hstate_inode(struct inode *i)
318 {
319 return NULL;
320 }
321 #endif /* !CONFIG_HUGETLBFS */
322
323 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
324 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
325 unsigned long len, unsigned long pgoff,
326 unsigned long flags);
327 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
328
329 #ifdef CONFIG_HUGETLB_PAGE
330
331 #define HSTATE_NAME_LEN 32
332 /* Defines one hugetlb page size */
333 struct hstate {
334 int next_nid_to_alloc;
335 int next_nid_to_free;
336 unsigned int order;
337 unsigned long mask;
338 unsigned long max_huge_pages;
339 unsigned long nr_huge_pages;
340 unsigned long free_huge_pages;
341 unsigned long resv_huge_pages;
342 unsigned long surplus_huge_pages;
343 unsigned long nr_overcommit_huge_pages;
344 struct list_head hugepage_activelist;
345 struct list_head hugepage_freelists[MAX_NUMNODES];
346 unsigned int nr_huge_pages_node[MAX_NUMNODES];
347 unsigned int free_huge_pages_node[MAX_NUMNODES];
348 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
349 #ifdef CONFIG_CGROUP_HUGETLB
350 /* cgroup control files */
351 struct cftype cgroup_files[5];
352 #endif
353 char name[HSTATE_NAME_LEN];
354 };
355
356 struct huge_bootmem_page {
357 struct list_head list;
358 struct hstate *hstate;
359 };
360
361 struct page *alloc_huge_page(struct vm_area_struct *vma,
362 unsigned long addr, int avoid_reserve);
363 struct page *alloc_huge_page_node(struct hstate *h, int nid);
364 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
365 nodemask_t *nmask);
366 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
367 unsigned long address);
368 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
369 int nid, nodemask_t *nmask);
370 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
371 pgoff_t idx);
372
373 /* arch callback */
374 int __init __alloc_bootmem_huge_page(struct hstate *h);
375 int __init alloc_bootmem_huge_page(struct hstate *h);
376
377 void __init hugetlb_bad_size(void);
378 void __init hugetlb_add_hstate(unsigned order);
379 struct hstate *size_to_hstate(unsigned long size);
380
381 #ifndef HUGE_MAX_HSTATE
382 #define HUGE_MAX_HSTATE 1
383 #endif
384
385 extern struct hstate hstates[HUGE_MAX_HSTATE];
386 extern unsigned int default_hstate_idx;
387
388 #define default_hstate (hstates[default_hstate_idx])
389
hstate_file(struct file * f)390 static inline struct hstate *hstate_file(struct file *f)
391 {
392 return hstate_inode(file_inode(f));
393 }
394
hstate_sizelog(int page_size_log)395 static inline struct hstate *hstate_sizelog(int page_size_log)
396 {
397 if (!page_size_log)
398 return &default_hstate;
399
400 if (page_size_log < BITS_PER_LONG)
401 return size_to_hstate(1UL << page_size_log);
402
403 return NULL;
404 }
405
hstate_vma(struct vm_area_struct * vma)406 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
407 {
408 return hstate_file(vma->vm_file);
409 }
410
huge_page_size(struct hstate * h)411 static inline unsigned long huge_page_size(struct hstate *h)
412 {
413 return (unsigned long)PAGE_SIZE << h->order;
414 }
415
416 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
417
418 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
419
huge_page_mask(struct hstate * h)420 static inline unsigned long huge_page_mask(struct hstate *h)
421 {
422 return h->mask;
423 }
424
huge_page_order(struct hstate * h)425 static inline unsigned int huge_page_order(struct hstate *h)
426 {
427 return h->order;
428 }
429
huge_page_shift(struct hstate * h)430 static inline unsigned huge_page_shift(struct hstate *h)
431 {
432 return h->order + PAGE_SHIFT;
433 }
434
hstate_is_gigantic(struct hstate * h)435 static inline bool hstate_is_gigantic(struct hstate *h)
436 {
437 return huge_page_order(h) >= MAX_ORDER;
438 }
439
pages_per_huge_page(struct hstate * h)440 static inline unsigned int pages_per_huge_page(struct hstate *h)
441 {
442 return 1 << h->order;
443 }
444
blocks_per_huge_page(struct hstate * h)445 static inline unsigned int blocks_per_huge_page(struct hstate *h)
446 {
447 return huge_page_size(h) / 512;
448 }
449
450 #include <asm/hugetlb.h>
451
452 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)453 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
454 struct page *page, int writable)
455 {
456 return entry;
457 }
458 #endif
459
page_hstate(struct page * page)460 static inline struct hstate *page_hstate(struct page *page)
461 {
462 VM_BUG_ON_PAGE(!PageHuge(page), page);
463 return size_to_hstate(page_size(page));
464 }
465
hstate_index_to_shift(unsigned index)466 static inline unsigned hstate_index_to_shift(unsigned index)
467 {
468 return hstates[index].order + PAGE_SHIFT;
469 }
470
hstate_index(struct hstate * h)471 static inline int hstate_index(struct hstate *h)
472 {
473 return h - hstates;
474 }
475
476 extern int dissolve_free_huge_page(struct page *page);
477 extern int dissolve_free_huge_pages(unsigned long start_pfn,
478 unsigned long end_pfn);
479
480 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
481 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)482 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
483 {
484 if ((huge_page_shift(h) == PMD_SHIFT) ||
485 (huge_page_shift(h) == PUD_SHIFT) ||
486 (huge_page_shift(h) == PGDIR_SHIFT))
487 return true;
488 else
489 return false;
490 }
491 #endif
492 #else
arch_hugetlb_migration_supported(struct hstate * h)493 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
494 {
495 return false;
496 }
497 #endif
498
hugepage_migration_supported(struct hstate * h)499 static inline bool hugepage_migration_supported(struct hstate *h)
500 {
501 return arch_hugetlb_migration_supported(h);
502 }
503
504 /*
505 * Movability check is different as compared to migration check.
506 * It determines whether or not a huge page should be placed on
507 * movable zone or not. Movability of any huge page should be
508 * required only if huge page size is supported for migration.
509 * There wont be any reason for the huge page to be movable if
510 * it is not migratable to start with. Also the size of the huge
511 * page should be large enough to be placed under a movable zone
512 * and still feasible enough to be migratable. Just the presence
513 * in movable zone does not make the migration feasible.
514 *
515 * So even though large huge page sizes like the gigantic ones
516 * are migratable they should not be movable because its not
517 * feasible to migrate them from movable zone.
518 */
hugepage_movable_supported(struct hstate * h)519 static inline bool hugepage_movable_supported(struct hstate *h)
520 {
521 if (!hugepage_migration_supported(h))
522 return false;
523
524 if (hstate_is_gigantic(h))
525 return false;
526 return true;
527 }
528
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)529 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
530 struct mm_struct *mm, pte_t *pte)
531 {
532 if (huge_page_size(h) == PMD_SIZE)
533 return pmd_lockptr(mm, (pmd_t *) pte);
534 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
535 return &mm->page_table_lock;
536 }
537
538 #ifndef hugepages_supported
539 /*
540 * Some platform decide whether they support huge pages at boot
541 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
542 * when there is no such support
543 */
544 #define hugepages_supported() (HPAGE_SHIFT != 0)
545 #endif
546
547 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
548
hugetlb_count_init(struct mm_struct * mm)549 static inline void hugetlb_count_init(struct mm_struct *mm)
550 {
551 atomic_long_set(&mm->hugetlb_usage, 0);
552 }
553
hugetlb_count_add(long l,struct mm_struct * mm)554 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
555 {
556 atomic_long_add(l, &mm->hugetlb_usage);
557 }
558
hugetlb_count_sub(long l,struct mm_struct * mm)559 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
560 {
561 atomic_long_sub(l, &mm->hugetlb_usage);
562 }
563
564 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)565 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
566 pte_t *ptep, pte_t pte, unsigned long sz)
567 {
568 set_huge_pte_at(mm, addr, ptep, pte);
569 }
570 #endif
571
572 #ifndef huge_ptep_modify_prot_start
573 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)574 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
575 unsigned long addr, pte_t *ptep)
576 {
577 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
578 }
579 #endif
580
581 #ifndef huge_ptep_modify_prot_commit
582 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)583 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
584 unsigned long addr, pte_t *ptep,
585 pte_t old_pte, pte_t pte)
586 {
587 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
588 }
589 #endif
590
591 void set_page_huge_active(struct page *page);
592
593 #else /* CONFIG_HUGETLB_PAGE */
594 struct hstate {};
595
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)596 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
597 unsigned long addr,
598 int avoid_reserve)
599 {
600 return NULL;
601 }
602
alloc_huge_page_node(struct hstate * h,int nid)603 static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
604 {
605 return NULL;
606 }
607
608 static inline struct page *
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask)609 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
610 {
611 return NULL;
612 }
613
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)614 static inline struct page *alloc_huge_page_vma(struct hstate *h,
615 struct vm_area_struct *vma,
616 unsigned long address)
617 {
618 return NULL;
619 }
620
__alloc_bootmem_huge_page(struct hstate * h)621 static inline int __alloc_bootmem_huge_page(struct hstate *h)
622 {
623 return 0;
624 }
625
hstate_file(struct file * f)626 static inline struct hstate *hstate_file(struct file *f)
627 {
628 return NULL;
629 }
630
hstate_sizelog(int page_size_log)631 static inline struct hstate *hstate_sizelog(int page_size_log)
632 {
633 return NULL;
634 }
635
hstate_vma(struct vm_area_struct * vma)636 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
637 {
638 return NULL;
639 }
640
page_hstate(struct page * page)641 static inline struct hstate *page_hstate(struct page *page)
642 {
643 return NULL;
644 }
645
huge_page_size(struct hstate * h)646 static inline unsigned long huge_page_size(struct hstate *h)
647 {
648 return PAGE_SIZE;
649 }
650
huge_page_mask(struct hstate * h)651 static inline unsigned long huge_page_mask(struct hstate *h)
652 {
653 return PAGE_MASK;
654 }
655
vma_kernel_pagesize(struct vm_area_struct * vma)656 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
657 {
658 return PAGE_SIZE;
659 }
660
vma_mmu_pagesize(struct vm_area_struct * vma)661 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
662 {
663 return PAGE_SIZE;
664 }
665
huge_page_order(struct hstate * h)666 static inline unsigned int huge_page_order(struct hstate *h)
667 {
668 return 0;
669 }
670
huge_page_shift(struct hstate * h)671 static inline unsigned int huge_page_shift(struct hstate *h)
672 {
673 return PAGE_SHIFT;
674 }
675
hstate_is_gigantic(struct hstate * h)676 static inline bool hstate_is_gigantic(struct hstate *h)
677 {
678 return false;
679 }
680
pages_per_huge_page(struct hstate * h)681 static inline unsigned int pages_per_huge_page(struct hstate *h)
682 {
683 return 1;
684 }
685
hstate_index_to_shift(unsigned index)686 static inline unsigned hstate_index_to_shift(unsigned index)
687 {
688 return 0;
689 }
690
hstate_index(struct hstate * h)691 static inline int hstate_index(struct hstate *h)
692 {
693 return 0;
694 }
695
dissolve_free_huge_page(struct page * page)696 static inline int dissolve_free_huge_page(struct page *page)
697 {
698 return 0;
699 }
700
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)701 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
702 unsigned long end_pfn)
703 {
704 return 0;
705 }
706
hugepage_migration_supported(struct hstate * h)707 static inline bool hugepage_migration_supported(struct hstate *h)
708 {
709 return false;
710 }
711
hugepage_movable_supported(struct hstate * h)712 static inline bool hugepage_movable_supported(struct hstate *h)
713 {
714 return false;
715 }
716
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)717 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
718 struct mm_struct *mm, pte_t *pte)
719 {
720 return &mm->page_table_lock;
721 }
722
hugetlb_count_init(struct mm_struct * mm)723 static inline void hugetlb_count_init(struct mm_struct *mm)
724 {
725 }
726
hugetlb_report_usage(struct seq_file * f,struct mm_struct * m)727 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
728 {
729 }
730
hugetlb_count_sub(long l,struct mm_struct * mm)731 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
732 {
733 }
734
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)735 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
736 pte_t *ptep, pte_t pte, unsigned long sz)
737 {
738 }
739 #endif /* CONFIG_HUGETLB_PAGE */
740
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)741 static inline spinlock_t *huge_pte_lock(struct hstate *h,
742 struct mm_struct *mm, pte_t *pte)
743 {
744 spinlock_t *ptl;
745
746 ptl = huge_pte_lockptr(h, mm, pte);
747 spin_lock(ptl);
748 return ptl;
749 }
750
751 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
hugetlb_pmd_shared(pte_t * pte)752 static inline bool hugetlb_pmd_shared(pte_t *pte)
753 {
754 return page_count(virt_to_page(pte)) > 1;
755 }
756 #else
hugetlb_pmd_shared(pte_t * pte)757 static inline bool hugetlb_pmd_shared(pte_t *pte)
758 {
759 return false;
760 }
761 #endif
762
763 #endif /* _LINUX_HUGETLB_H */
764