• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4 
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/page_ref.h>
11 #include <linux/list.h>
12 #include <linux/kref.h>
13 #include <linux/pgtable.h>
14 #include <linux/gfp.h>
15 #include <linux/userfaultfd_k.h>
16 
17 struct ctl_table;
18 struct user_struct;
19 struct mmu_gather;
20 
21 #ifndef is_hugepd
22 typedef struct { unsigned long pd; } hugepd_t;
23 #define is_hugepd(hugepd) (0)
24 #define __hugepd(x) ((hugepd_t) { (x) })
25 #endif
26 
27 #ifdef CONFIG_HUGETLB_PAGE
28 
29 #include <linux/mempolicy.h>
30 #include <linux/shm.h>
31 #include <asm/tlbflush.h>
32 
33 /*
34  * For HugeTLB page, there are more metadata to save in the struct page. But
35  * the head struct page cannot meet our needs, so we have to abuse other tail
36  * struct page to store the metadata. In order to avoid conflicts caused by
37  * subsequent use of more tail struct pages, we gather these discrete indexes
38  * of tail struct page here.
39  */
40 enum {
41 	SUBPAGE_INDEX_SUBPOOL = 1,	/* reuse page->private */
42 #ifdef CONFIG_CGROUP_HUGETLB
43 	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
44 	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
45 	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
46 #endif
47 	__NR_USED_SUBPAGE,
48 };
49 
50 struct hugepage_subpool {
51 	spinlock_t lock;
52 	long count;
53 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
54 	long used_hpages;	/* Used count against maximum, includes */
55 				/* both allocated and reserved pages. */
56 	struct hstate *hstate;
57 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
58 	long rsv_hpages;	/* Pages reserved against global pool to */
59 				/* satisfy minimum size. */
60 };
61 
62 struct resv_map {
63 	struct kref refs;
64 	spinlock_t lock;
65 	struct list_head regions;
66 	long adds_in_progress;
67 	struct list_head region_cache;
68 	long region_cache_count;
69 #ifdef CONFIG_CGROUP_HUGETLB
70 	/*
71 	 * On private mappings, the counter to uncharge reservations is stored
72 	 * here. If these fields are 0, then either the mapping is shared, or
73 	 * cgroup accounting is disabled for this resv_map.
74 	 */
75 	struct page_counter *reservation_counter;
76 	unsigned long pages_per_hpage;
77 	struct cgroup_subsys_state *css;
78 #endif
79 };
80 
81 /*
82  * Region tracking -- allows tracking of reservations and instantiated pages
83  *                    across the pages in a mapping.
84  *
85  * The region data structures are embedded into a resv_map and protected
86  * by a resv_map's lock.  The set of regions within the resv_map represent
87  * reservations for huge pages, or huge pages that have already been
88  * instantiated within the map.  The from and to elements are huge page
89  * indices into the associated mapping.  from indicates the starting index
90  * of the region.  to represents the first index past the end of  the region.
91  *
92  * For example, a file region structure with from == 0 and to == 4 represents
93  * four huge pages in a mapping.  It is important to note that the to element
94  * represents the first element past the end of the region. This is used in
95  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
96  *
97  * Interval notation of the form [from, to) will be used to indicate that
98  * the endpoint from is inclusive and to is exclusive.
99  */
100 struct file_region {
101 	struct list_head link;
102 	long from;
103 	long to;
104 #ifdef CONFIG_CGROUP_HUGETLB
105 	/*
106 	 * On shared mappings, each reserved region appears as a struct
107 	 * file_region in resv_map. These fields hold the info needed to
108 	 * uncharge each reservation.
109 	 */
110 	struct page_counter *reservation_counter;
111 	struct cgroup_subsys_state *css;
112 #endif
113 };
114 
115 extern struct resv_map *resv_map_alloc(void);
116 void resv_map_release(struct kref *ref);
117 
118 extern spinlock_t hugetlb_lock;
119 extern int hugetlb_max_hstate __read_mostly;
120 #define for_each_hstate(h) \
121 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
122 
123 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
124 						long min_hpages);
125 void hugepage_put_subpool(struct hugepage_subpool *spool);
126 
127 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
128 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
129 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
130 		loff_t *);
131 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
132 		loff_t *);
133 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
134 		loff_t *);
135 
136 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
137 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
138 			 struct page **, struct vm_area_struct **,
139 			 unsigned long *, unsigned long *, long, unsigned int,
140 			 int *);
141 void unmap_hugepage_range(struct vm_area_struct *,
142 			  unsigned long, unsigned long, struct page *);
143 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
144 			  struct vm_area_struct *vma,
145 			  unsigned long start, unsigned long end,
146 			  struct page *ref_page);
147 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
148 				unsigned long start, unsigned long end,
149 				struct page *ref_page);
150 void hugetlb_report_meminfo(struct seq_file *);
151 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
152 void hugetlb_show_meminfo(void);
153 unsigned long hugetlb_total_pages(void);
154 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
155 			unsigned long address, unsigned int flags);
156 #ifdef CONFIG_USERFAULTFD
157 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
158 				struct vm_area_struct *dst_vma,
159 				unsigned long dst_addr,
160 				unsigned long src_addr,
161 				enum mcopy_atomic_mode mode,
162 				struct page **pagep);
163 #endif /* CONFIG_USERFAULTFD */
164 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
165 						struct vm_area_struct *vma,
166 						vm_flags_t vm_flags);
167 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
168 						long freed);
169 int isolate_hugetlb(struct page *page, struct list_head *list);
170 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
171 int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
172 void putback_active_hugepage(struct page *page);
173 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
174 void free_huge_page(struct page *page);
175 void hugetlb_fix_reserve_counts(struct inode *inode);
176 extern struct mutex *hugetlb_fault_mutex_table;
177 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
178 
179 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
180 		      unsigned long addr, pud_t *pud);
181 
182 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
183 
184 extern int sysctl_hugetlb_shm_group;
185 extern struct list_head huge_boot_pages;
186 
187 /* arch callbacks */
188 
189 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
190 			unsigned long addr, unsigned long sz);
191 pte_t *huge_pte_offset(struct mm_struct *mm,
192 		       unsigned long addr, unsigned long sz);
193 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
194 				unsigned long *addr, pte_t *ptep);
195 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
196 				unsigned long *start, unsigned long *end);
197 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
198 			      int write);
199 struct page *follow_huge_pd(struct vm_area_struct *vma,
200 			    unsigned long address, hugepd_t hpd,
201 			    int flags, int pdshift);
202 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
203 				 int flags);
204 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
205 				pud_t *pud, int flags);
206 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
207 			     pgd_t *pgd, int flags);
208 
209 int pmd_huge(pmd_t pmd);
210 int pud_huge(pud_t pud);
211 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
212 		unsigned long address, unsigned long end, pgprot_t newprot);
213 
214 bool is_hugetlb_entry_migration(pte_t pte);
215 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
216 
217 #else /* !CONFIG_HUGETLB_PAGE */
218 
reset_vma_resv_huge_pages(struct vm_area_struct * vma)219 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
220 {
221 }
222 
hugetlb_total_pages(void)223 static inline unsigned long hugetlb_total_pages(void)
224 {
225 	return 0;
226 }
227 
hugetlb_page_mapping_lock_write(struct page * hpage)228 static inline struct address_space *hugetlb_page_mapping_lock_write(
229 							struct page *hpage)
230 {
231 	return NULL;
232 }
233 
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)234 static inline int huge_pmd_unshare(struct mm_struct *mm,
235 					struct vm_area_struct *vma,
236 					unsigned long *addr, pte_t *ptep)
237 {
238 	return 0;
239 }
240 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)241 static inline void adjust_range_if_pmd_sharing_possible(
242 				struct vm_area_struct *vma,
243 				unsigned long *start, unsigned long *end)
244 {
245 }
246 
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)247 static inline long follow_hugetlb_page(struct mm_struct *mm,
248 			struct vm_area_struct *vma, struct page **pages,
249 			struct vm_area_struct **vmas, unsigned long *position,
250 			unsigned long *nr_pages, long i, unsigned int flags,
251 			int *nonblocking)
252 {
253 	BUG();
254 	return 0;
255 }
256 
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)257 static inline struct page *follow_huge_addr(struct mm_struct *mm,
258 					unsigned long address, int write)
259 {
260 	return ERR_PTR(-EINVAL);
261 }
262 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)263 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
264 			struct mm_struct *src, struct vm_area_struct *vma)
265 {
266 	BUG();
267 	return 0;
268 }
269 
hugetlb_report_meminfo(struct seq_file * m)270 static inline void hugetlb_report_meminfo(struct seq_file *m)
271 {
272 }
273 
hugetlb_report_node_meminfo(char * buf,int len,int nid)274 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
275 {
276 	return 0;
277 }
278 
hugetlb_show_meminfo(void)279 static inline void hugetlb_show_meminfo(void)
280 {
281 }
282 
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)283 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
284 				unsigned long address, hugepd_t hpd, int flags,
285 				int pdshift)
286 {
287 	return NULL;
288 }
289 
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)290 static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
291 				unsigned long address, int flags)
292 {
293 	return NULL;
294 }
295 
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)296 static inline struct page *follow_huge_pud(struct mm_struct *mm,
297 				unsigned long address, pud_t *pud, int flags)
298 {
299 	return NULL;
300 }
301 
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)302 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
303 				unsigned long address, pgd_t *pgd, int flags)
304 {
305 	return NULL;
306 }
307 
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)308 static inline int prepare_hugepage_range(struct file *file,
309 				unsigned long addr, unsigned long len)
310 {
311 	return -EINVAL;
312 }
313 
pmd_huge(pmd_t pmd)314 static inline int pmd_huge(pmd_t pmd)
315 {
316 	return 0;
317 }
318 
pud_huge(pud_t pud)319 static inline int pud_huge(pud_t pud)
320 {
321 	return 0;
322 }
323 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)324 static inline int is_hugepage_only_range(struct mm_struct *mm,
325 					unsigned long addr, unsigned long len)
326 {
327 	return 0;
328 }
329 
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)330 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
331 				unsigned long addr, unsigned long end,
332 				unsigned long floor, unsigned long ceiling)
333 {
334 	BUG();
335 }
336 
337 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep)338 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
339 						pte_t *dst_pte,
340 						struct vm_area_struct *dst_vma,
341 						unsigned long dst_addr,
342 						unsigned long src_addr,
343 						enum mcopy_atomic_mode mode,
344 						struct page **pagep)
345 {
346 	BUG();
347 	return 0;
348 }
349 #endif /* CONFIG_USERFAULTFD */
350 
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)351 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
352 					unsigned long sz)
353 {
354 	return NULL;
355 }
356 
isolate_hugetlb(struct page * page,struct list_head * list)357 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
358 {
359 	return -EBUSY;
360 }
361 
get_hwpoison_huge_page(struct page * page,bool * hugetlb)362 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
363 {
364 	return 0;
365 }
366 
get_huge_page_for_hwpoison(unsigned long pfn,int flags)367 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
368 {
369 	return 0;
370 }
371 
putback_active_hugepage(struct page * page)372 static inline void putback_active_hugepage(struct page *page)
373 {
374 }
375 
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)376 static inline void move_hugetlb_state(struct page *oldpage,
377 					struct page *newpage, int reason)
378 {
379 }
380 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)381 static inline unsigned long hugetlb_change_protection(
382 			struct vm_area_struct *vma, unsigned long address,
383 			unsigned long end, pgprot_t newprot)
384 {
385 	return 0;
386 }
387 
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)388 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
389 			struct vm_area_struct *vma, unsigned long start,
390 			unsigned long end, struct page *ref_page)
391 {
392 	BUG();
393 }
394 
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)395 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
396 			struct vm_area_struct *vma, unsigned long start,
397 			unsigned long end, struct page *ref_page)
398 {
399 	BUG();
400 }
401 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)402 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
403 			struct vm_area_struct *vma, unsigned long address,
404 			unsigned int flags)
405 {
406 	BUG();
407 	return 0;
408 }
409 
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)410 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
411 
412 #endif /* !CONFIG_HUGETLB_PAGE */
413 /*
414  * hugepages at page global directory. If arch support
415  * hugepages at pgd level, they need to define this.
416  */
417 #ifndef pgd_huge
418 #define pgd_huge(x)	0
419 #endif
420 #ifndef p4d_huge
421 #define p4d_huge(x)	0
422 #endif
423 
424 #ifndef pgd_write
pgd_write(pgd_t pgd)425 static inline int pgd_write(pgd_t pgd)
426 {
427 	BUG();
428 	return 0;
429 }
430 #endif
431 
432 #define HUGETLB_ANON_FILE "anon_hugepage"
433 
434 enum {
435 	/*
436 	 * The file will be used as an shm file so shmfs accounting rules
437 	 * apply
438 	 */
439 	HUGETLB_SHMFS_INODE     = 1,
440 	/*
441 	 * The file is being created on the internal vfs mount and shmfs
442 	 * accounting rules do not apply
443 	 */
444 	HUGETLB_ANONHUGE_INODE  = 2,
445 };
446 
447 #ifdef CONFIG_HUGETLBFS
448 struct hugetlbfs_sb_info {
449 	long	max_inodes;   /* inodes allowed */
450 	long	free_inodes;  /* inodes free */
451 	spinlock_t	stat_lock;
452 	struct hstate *hstate;
453 	struct hugepage_subpool *spool;
454 	kuid_t	uid;
455 	kgid_t	gid;
456 	umode_t mode;
457 };
458 
HUGETLBFS_SB(struct super_block * sb)459 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
460 {
461 	return sb->s_fs_info;
462 }
463 
464 struct hugetlbfs_inode_info {
465 	struct shared_policy policy;
466 	struct inode vfs_inode;
467 	unsigned int seals;
468 };
469 
HUGETLBFS_I(struct inode * inode)470 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
471 {
472 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
473 }
474 
475 extern const struct file_operations hugetlbfs_file_operations;
476 extern const struct vm_operations_struct hugetlb_vm_ops;
477 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
478 				struct ucounts **ucounts, int creat_flags,
479 				int page_size_log);
480 
is_file_hugepages(struct file * file)481 static inline bool is_file_hugepages(struct file *file)
482 {
483 	if (file->f_op == &hugetlbfs_file_operations)
484 		return true;
485 
486 	return is_file_shm_hugepages(file);
487 }
488 
hstate_inode(struct inode * i)489 static inline struct hstate *hstate_inode(struct inode *i)
490 {
491 	return HUGETLBFS_SB(i->i_sb)->hstate;
492 }
493 #else /* !CONFIG_HUGETLBFS */
494 
495 #define is_file_hugepages(file)			false
496 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct ucounts ** ucounts,int creat_flags,int page_size_log)497 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
498 		struct ucounts **ucounts, int creat_flags,
499 		int page_size_log)
500 {
501 	return ERR_PTR(-ENOSYS);
502 }
503 
hstate_inode(struct inode * i)504 static inline struct hstate *hstate_inode(struct inode *i)
505 {
506 	return NULL;
507 }
508 #endif /* !CONFIG_HUGETLBFS */
509 
510 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
511 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
512 					unsigned long len, unsigned long pgoff,
513 					unsigned long flags);
514 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
515 
516 /*
517  * huegtlb page specific state flags.  These flags are located in page.private
518  * of the hugetlb head page.  Functions created via the below macros should be
519  * used to manipulate these flags.
520  *
521  * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
522  *	allocation time.  Cleared when page is fully instantiated.  Free
523  *	routine checks flag to restore a reservation on error paths.
524  *	Synchronization:  Examined or modified by code that knows it has
525  *	the only reference to page.  i.e. After allocation but before use
526  *	or when the page is being freed.
527  * HPG_migratable  - Set after a newly allocated page is added to the page
528  *	cache and/or page tables.  Indicates the page is a candidate for
529  *	migration.
530  *	Synchronization:  Initially set after new page allocation with no
531  *	locking.  When examined and modified during migration processing
532  *	(isolate, migrate, putback) the hugetlb_lock is held.
533  * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
534  *	allocator.  Typically used for migration target pages when no pages
535  *	are available in the pool.  The hugetlb free page path will
536  *	immediately free pages with this flag set to the buddy allocator.
537  *	Synchronization: Can be set after huge page allocation from buddy when
538  *	code knows it has only reference.  All other examinations and
539  *	modifications require hugetlb_lock.
540  * HPG_freed - Set when page is on the free lists.
541  *	Synchronization: hugetlb_lock held for examination and modification.
542  * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
543  */
544 enum hugetlb_page_flags {
545 	HPG_restore_reserve = 0,
546 	HPG_migratable,
547 	HPG_temporary,
548 	HPG_freed,
549 	HPG_vmemmap_optimized,
550 	__NR_HPAGEFLAGS,
551 };
552 
553 /*
554  * Macros to create test, set and clear function definitions for
555  * hugetlb specific page flags.
556  */
557 #ifdef CONFIG_HUGETLB_PAGE
558 #define TESTHPAGEFLAG(uname, flname)				\
559 static inline int HPage##uname(struct page *page)		\
560 	{ return test_bit(HPG_##flname, &(page->private)); }
561 
562 #define SETHPAGEFLAG(uname, flname)				\
563 static inline void SetHPage##uname(struct page *page)		\
564 	{ set_bit(HPG_##flname, &(page->private)); }
565 
566 #define CLEARHPAGEFLAG(uname, flname)				\
567 static inline void ClearHPage##uname(struct page *page)		\
568 	{ clear_bit(HPG_##flname, &(page->private)); }
569 #else
570 #define TESTHPAGEFLAG(uname, flname)				\
571 static inline int HPage##uname(struct page *page)		\
572 	{ return 0; }
573 
574 #define SETHPAGEFLAG(uname, flname)				\
575 static inline void SetHPage##uname(struct page *page)		\
576 	{ }
577 
578 #define CLEARHPAGEFLAG(uname, flname)				\
579 static inline void ClearHPage##uname(struct page *page)		\
580 	{ }
581 #endif
582 
583 #define HPAGEFLAG(uname, flname)				\
584 	TESTHPAGEFLAG(uname, flname)				\
585 	SETHPAGEFLAG(uname, flname)				\
586 	CLEARHPAGEFLAG(uname, flname)				\
587 
588 /*
589  * Create functions associated with hugetlb page flags
590  */
591 HPAGEFLAG(RestoreReserve, restore_reserve)
592 HPAGEFLAG(Migratable, migratable)
593 HPAGEFLAG(Temporary, temporary)
594 HPAGEFLAG(Freed, freed)
595 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
596 
597 #ifdef CONFIG_HUGETLB_PAGE
598 
599 #define HSTATE_NAME_LEN 32
600 /* Defines one hugetlb page size */
601 struct hstate {
602 	struct mutex resize_lock;
603 	int next_nid_to_alloc;
604 	int next_nid_to_free;
605 	unsigned int order;
606 	unsigned long mask;
607 	unsigned long max_huge_pages;
608 	unsigned long nr_huge_pages;
609 	unsigned long free_huge_pages;
610 	unsigned long resv_huge_pages;
611 	unsigned long surplus_huge_pages;
612 	unsigned long nr_overcommit_huge_pages;
613 	struct list_head hugepage_activelist;
614 	struct list_head hugepage_freelists[MAX_NUMNODES];
615 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
616 	unsigned int free_huge_pages_node[MAX_NUMNODES];
617 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
618 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
619 	unsigned int nr_free_vmemmap_pages;
620 #endif
621 #ifdef CONFIG_CGROUP_HUGETLB
622 	/* cgroup control files */
623 	struct cftype cgroup_files_dfl[7];
624 	struct cftype cgroup_files_legacy[9];
625 #endif
626 	char name[HSTATE_NAME_LEN];
627 };
628 
629 struct huge_bootmem_page {
630 	struct list_head list;
631 	struct hstate *hstate;
632 };
633 
634 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
635 struct page *alloc_huge_page(struct vm_area_struct *vma,
636 				unsigned long addr, int avoid_reserve);
637 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
638 				nodemask_t *nmask, gfp_t gfp_mask);
639 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
640 				unsigned long address);
641 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
642 			pgoff_t idx);
643 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
644 				unsigned long address, struct page *page);
645 
646 /* arch callback */
647 int __init __alloc_bootmem_huge_page(struct hstate *h);
648 int __init alloc_bootmem_huge_page(struct hstate *h);
649 
650 void __init hugetlb_add_hstate(unsigned order);
651 bool __init arch_hugetlb_valid_size(unsigned long size);
652 struct hstate *size_to_hstate(unsigned long size);
653 
654 #ifndef HUGE_MAX_HSTATE
655 #define HUGE_MAX_HSTATE 1
656 #endif
657 
658 extern struct hstate hstates[HUGE_MAX_HSTATE];
659 extern unsigned int default_hstate_idx;
660 
661 #define default_hstate (hstates[default_hstate_idx])
662 
663 /*
664  * hugetlb page subpool pointer located in hpage[1].private
665  */
hugetlb_page_subpool(struct page * hpage)666 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
667 {
668 	return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
669 }
670 
hugetlb_set_page_subpool(struct page * hpage,struct hugepage_subpool * subpool)671 static inline void hugetlb_set_page_subpool(struct page *hpage,
672 					struct hugepage_subpool *subpool)
673 {
674 	set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
675 }
676 
hstate_file(struct file * f)677 static inline struct hstate *hstate_file(struct file *f)
678 {
679 	return hstate_inode(file_inode(f));
680 }
681 
hstate_sizelog(int page_size_log)682 static inline struct hstate *hstate_sizelog(int page_size_log)
683 {
684 	if (!page_size_log)
685 		return &default_hstate;
686 
687 	if (page_size_log < BITS_PER_LONG)
688 		return size_to_hstate(1UL << page_size_log);
689 
690 	return NULL;
691 }
692 
hstate_vma(struct vm_area_struct * vma)693 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
694 {
695 	return hstate_file(vma->vm_file);
696 }
697 
huge_page_size(struct hstate * h)698 static inline unsigned long huge_page_size(struct hstate *h)
699 {
700 	return (unsigned long)PAGE_SIZE << h->order;
701 }
702 
703 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
704 
705 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
706 
huge_page_mask(struct hstate * h)707 static inline unsigned long huge_page_mask(struct hstate *h)
708 {
709 	return h->mask;
710 }
711 
huge_page_order(struct hstate * h)712 static inline unsigned int huge_page_order(struct hstate *h)
713 {
714 	return h->order;
715 }
716 
huge_page_shift(struct hstate * h)717 static inline unsigned huge_page_shift(struct hstate *h)
718 {
719 	return h->order + PAGE_SHIFT;
720 }
721 
hstate_is_gigantic(struct hstate * h)722 static inline bool hstate_is_gigantic(struct hstate *h)
723 {
724 	return huge_page_order(h) >= MAX_ORDER;
725 }
726 
pages_per_huge_page(struct hstate * h)727 static inline unsigned int pages_per_huge_page(struct hstate *h)
728 {
729 	return 1 << h->order;
730 }
731 
blocks_per_huge_page(struct hstate * h)732 static inline unsigned int blocks_per_huge_page(struct hstate *h)
733 {
734 	return huge_page_size(h) / 512;
735 }
736 
737 #include <asm/hugetlb.h>
738 
739 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)740 static inline int is_hugepage_only_range(struct mm_struct *mm,
741 					unsigned long addr, unsigned long len)
742 {
743 	return 0;
744 }
745 #define is_hugepage_only_range is_hugepage_only_range
746 #endif
747 
748 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)749 static inline void arch_clear_hugepage_flags(struct page *page) { }
750 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
751 #endif
752 
753 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)754 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
755 				       vm_flags_t flags)
756 {
757 	return entry;
758 }
759 #endif
760 
page_hstate(struct page * page)761 static inline struct hstate *page_hstate(struct page *page)
762 {
763 	VM_BUG_ON_PAGE(!PageHuge(page), page);
764 	return size_to_hstate(page_size(page));
765 }
766 
hstate_index_to_shift(unsigned index)767 static inline unsigned hstate_index_to_shift(unsigned index)
768 {
769 	return hstates[index].order + PAGE_SHIFT;
770 }
771 
hstate_index(struct hstate * h)772 static inline int hstate_index(struct hstate *h)
773 {
774 	return h - hstates;
775 }
776 
777 extern int dissolve_free_huge_page(struct page *page);
778 extern int dissolve_free_huge_pages(unsigned long start_pfn,
779 				    unsigned long end_pfn);
780 
781 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
782 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)783 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
784 {
785 	if ((huge_page_shift(h) == PMD_SHIFT) ||
786 		(huge_page_shift(h) == PUD_SHIFT) ||
787 			(huge_page_shift(h) == PGDIR_SHIFT))
788 		return true;
789 	else
790 		return false;
791 }
792 #endif
793 #else
arch_hugetlb_migration_supported(struct hstate * h)794 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
795 {
796 	return false;
797 }
798 #endif
799 
hugepage_migration_supported(struct hstate * h)800 static inline bool hugepage_migration_supported(struct hstate *h)
801 {
802 	return arch_hugetlb_migration_supported(h);
803 }
804 
805 /*
806  * Movability check is different as compared to migration check.
807  * It determines whether or not a huge page should be placed on
808  * movable zone or not. Movability of any huge page should be
809  * required only if huge page size is supported for migration.
810  * There won't be any reason for the huge page to be movable if
811  * it is not migratable to start with. Also the size of the huge
812  * page should be large enough to be placed under a movable zone
813  * and still feasible enough to be migratable. Just the presence
814  * in movable zone does not make the migration feasible.
815  *
816  * So even though large huge page sizes like the gigantic ones
817  * are migratable they should not be movable because its not
818  * feasible to migrate them from movable zone.
819  */
hugepage_movable_supported(struct hstate * h)820 static inline bool hugepage_movable_supported(struct hstate *h)
821 {
822 	if (!hugepage_migration_supported(h))
823 		return false;
824 
825 	if (hstate_is_gigantic(h))
826 		return false;
827 	return true;
828 }
829 
830 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)831 static inline gfp_t htlb_alloc_mask(struct hstate *h)
832 {
833 	if (hugepage_movable_supported(h))
834 		return GFP_HIGHUSER_MOVABLE;
835 	else
836 		return GFP_HIGHUSER;
837 }
838 
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)839 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
840 {
841 	gfp_t modified_mask = htlb_alloc_mask(h);
842 
843 	/* Some callers might want to enforce node */
844 	modified_mask |= (gfp_mask & __GFP_THISNODE);
845 
846 	modified_mask |= (gfp_mask & __GFP_NOWARN);
847 
848 	return modified_mask;
849 }
850 
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)851 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
852 					   struct mm_struct *mm, pte_t *pte)
853 {
854 	if (huge_page_size(h) == PMD_SIZE)
855 		return pmd_lockptr(mm, (pmd_t *) pte);
856 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
857 	return &mm->page_table_lock;
858 }
859 
860 #ifndef hugepages_supported
861 /*
862  * Some platform decide whether they support huge pages at boot
863  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
864  * when there is no such support
865  */
866 #define hugepages_supported() (HPAGE_SHIFT != 0)
867 #endif
868 
869 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
870 
hugetlb_count_init(struct mm_struct * mm)871 static inline void hugetlb_count_init(struct mm_struct *mm)
872 {
873 	atomic_long_set(&mm->hugetlb_usage, 0);
874 }
875 
hugetlb_count_add(long l,struct mm_struct * mm)876 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
877 {
878 	atomic_long_add(l, &mm->hugetlb_usage);
879 }
880 
hugetlb_count_sub(long l,struct mm_struct * mm)881 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
882 {
883 	atomic_long_sub(l, &mm->hugetlb_usage);
884 }
885 
886 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)887 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
888 					pte_t *ptep, pte_t pte, unsigned long sz)
889 {
890 	set_huge_pte_at(mm, addr, ptep, pte);
891 }
892 #endif
893 
894 #ifndef huge_ptep_modify_prot_start
895 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)896 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
897 						unsigned long addr, pte_t *ptep)
898 {
899 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
900 }
901 #endif
902 
903 #ifndef huge_ptep_modify_prot_commit
904 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)905 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
906 						unsigned long addr, pte_t *ptep,
907 						pte_t old_pte, pte_t pte)
908 {
909 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
910 }
911 #endif
912 
913 #else	/* CONFIG_HUGETLB_PAGE */
914 struct hstate {};
915 
916 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
917 {
918 	return NULL;
919 }
920 
921 static inline int isolate_or_dissolve_huge_page(struct page *page,
922 						struct list_head *list)
923 {
924 	return -ENOMEM;
925 }
926 
927 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
928 					   unsigned long addr,
929 					   int avoid_reserve)
930 {
931 	return NULL;
932 }
933 
934 static inline struct page *
935 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
936 			nodemask_t *nmask, gfp_t gfp_mask)
937 {
938 	return NULL;
939 }
940 
941 static inline struct page *alloc_huge_page_vma(struct hstate *h,
942 					       struct vm_area_struct *vma,
943 					       unsigned long address)
944 {
945 	return NULL;
946 }
947 
948 static inline int __alloc_bootmem_huge_page(struct hstate *h)
949 {
950 	return 0;
951 }
952 
953 static inline struct hstate *hstate_file(struct file *f)
954 {
955 	return NULL;
956 }
957 
958 static inline struct hstate *hstate_sizelog(int page_size_log)
959 {
960 	return NULL;
961 }
962 
963 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
964 {
965 	return NULL;
966 }
967 
968 static inline struct hstate *page_hstate(struct page *page)
969 {
970 	return NULL;
971 }
972 
973 static inline unsigned long huge_page_size(struct hstate *h)
974 {
975 	return PAGE_SIZE;
976 }
977 
978 static inline unsigned long huge_page_mask(struct hstate *h)
979 {
980 	return PAGE_MASK;
981 }
982 
983 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
984 {
985 	return PAGE_SIZE;
986 }
987 
988 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
989 {
990 	return PAGE_SIZE;
991 }
992 
993 static inline unsigned int huge_page_order(struct hstate *h)
994 {
995 	return 0;
996 }
997 
998 static inline unsigned int huge_page_shift(struct hstate *h)
999 {
1000 	return PAGE_SHIFT;
1001 }
1002 
1003 static inline bool hstate_is_gigantic(struct hstate *h)
1004 {
1005 	return false;
1006 }
1007 
1008 static inline unsigned int pages_per_huge_page(struct hstate *h)
1009 {
1010 	return 1;
1011 }
1012 
1013 static inline unsigned hstate_index_to_shift(unsigned index)
1014 {
1015 	return 0;
1016 }
1017 
1018 static inline int hstate_index(struct hstate *h)
1019 {
1020 	return 0;
1021 }
1022 
1023 static inline int dissolve_free_huge_page(struct page *page)
1024 {
1025 	return 0;
1026 }
1027 
1028 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1029 					   unsigned long end_pfn)
1030 {
1031 	return 0;
1032 }
1033 
1034 static inline bool hugepage_migration_supported(struct hstate *h)
1035 {
1036 	return false;
1037 }
1038 
1039 static inline bool hugepage_movable_supported(struct hstate *h)
1040 {
1041 	return false;
1042 }
1043 
1044 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1045 {
1046 	return 0;
1047 }
1048 
1049 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1050 {
1051 	return 0;
1052 }
1053 
1054 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1055 					   struct mm_struct *mm, pte_t *pte)
1056 {
1057 	return &mm->page_table_lock;
1058 }
1059 
1060 static inline void hugetlb_count_init(struct mm_struct *mm)
1061 {
1062 }
1063 
1064 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1065 {
1066 }
1067 
1068 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1069 {
1070 }
1071 
1072 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1073 					pte_t *ptep, pte_t pte, unsigned long sz)
1074 {
1075 }
1076 #endif	/* CONFIG_HUGETLB_PAGE */
1077 
1078 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1079 extern bool hugetlb_free_vmemmap_enabled;
1080 #else
1081 #define hugetlb_free_vmemmap_enabled	false
1082 #endif
1083 
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1084 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1085 					struct mm_struct *mm, pte_t *pte)
1086 {
1087 	spinlock_t *ptl;
1088 
1089 	ptl = huge_pte_lockptr(h, mm, pte);
1090 	spin_lock(ptl);
1091 	return ptl;
1092 }
1093 
1094 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1095 extern void __init hugetlb_cma_reserve(int order);
1096 extern void __init hugetlb_cma_check(void);
1097 #else
hugetlb_cma_reserve(int order)1098 static inline __init void hugetlb_cma_reserve(int order)
1099 {
1100 }
hugetlb_cma_check(void)1101 static inline __init void hugetlb_cma_check(void)
1102 {
1103 }
1104 #endif
1105 
1106 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
hugetlb_pmd_shared(pte_t * pte)1107 static inline bool hugetlb_pmd_shared(pte_t *pte)
1108 {
1109 	return page_count(virt_to_page(pte)) > 1;
1110 }
1111 #else
hugetlb_pmd_shared(pte_t * pte)1112 static inline bool hugetlb_pmd_shared(pte_t *pte)
1113 {
1114 	return false;
1115 }
1116 #endif
1117 
1118 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1119 
1120 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1121 /*
1122  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1123  * implement this.
1124  */
1125 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1126 #endif
1127 
1128 #endif /* _LINUX_HUGETLB_H */
1129