• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #ifndef _ASM_TILE_HUGETLB_H
16 #define _ASM_TILE_HUGETLB_H
17 
18 #include <asm/page.h>
19 #include <asm-generic/hugetlb.h>
20 
21 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)22 static inline int is_hugepage_only_range(struct mm_struct *mm,
23 					 unsigned long addr,
24 					 unsigned long len) {
25 	return 0;
26 }
27 
28 /*
29  * If the arch doesn't supply something else, assume that hugepage
30  * size aligned regions are ok without further preparation.
31  */
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)32 static inline int prepare_hugepage_range(struct file *file,
33 					 unsigned long addr, unsigned long len)
34 {
35 	struct hstate *h = hstate_file(file);
36 	if (len & ~huge_page_mask(h))
37 		return -EINVAL;
38 	if (addr & ~huge_page_mask(h))
39 		return -EINVAL;
40 	return 0;
41 }
42 
hugetlb_prefault_arch_hook(struct mm_struct * mm)43 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
44 {
45 }
46 
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)47 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
48 					  unsigned long addr, unsigned long end,
49 					  unsigned long floor,
50 					  unsigned long ceiling)
51 {
52 	free_pgd_range(tlb, addr, end, floor, ceiling);
53 }
54 
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)55 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
56 				   pte_t *ptep, pte_t pte)
57 {
58 	set_pte(ptep, pte);
59 }
60 
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)61 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
62 					    unsigned long addr, pte_t *ptep)
63 {
64 	return ptep_get_and_clear(mm, addr, ptep);
65 }
66 
huge_ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)67 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
68 					 unsigned long addr, pte_t *ptep)
69 {
70 	ptep_clear_flush(vma, addr, ptep);
71 }
72 
huge_pte_none(pte_t pte)73 static inline int huge_pte_none(pte_t pte)
74 {
75 	return pte_none(pte);
76 }
77 
huge_pte_wrprotect(pte_t pte)78 static inline pte_t huge_pte_wrprotect(pte_t pte)
79 {
80 	return pte_wrprotect(pte);
81 }
82 
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)83 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
84 					   unsigned long addr, pte_t *ptep)
85 {
86 	ptep_set_wrprotect(mm, addr, ptep);
87 }
88 
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)89 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
90 					     unsigned long addr, pte_t *ptep,
91 					     pte_t pte, int dirty)
92 {
93 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
94 }
95 
huge_ptep_get(pte_t * ptep)96 static inline pte_t huge_ptep_get(pte_t *ptep)
97 {
98 	return *ptep;
99 }
100 
arch_prepare_hugepage(struct page * page)101 static inline int arch_prepare_hugepage(struct page *page)
102 {
103 	return 0;
104 }
105 
arch_release_hugepage(struct page * page)106 static inline void arch_release_hugepage(struct page *page)
107 {
108 }
109 
arch_clear_hugepage_flags(struct page * page)110 static inline void arch_clear_hugepage_flags(struct page *page)
111 {
112 }
113 
114 #ifdef CONFIG_HUGETLB_SUPER_PAGES
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)115 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
116 				       struct page *page, int writable)
117 {
118 	size_t pagesize = huge_page_size(hstate_vma(vma));
119 	if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
120 		entry = pte_mksuper(entry);
121 	return entry;
122 }
123 #define arch_make_huge_pte arch_make_huge_pte
124 
125 /* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
126 enum {
127 	HUGE_SHIFT_PGDIR = 0,
128 	HUGE_SHIFT_PMD = 1,
129 	HUGE_SHIFT_PAGE = 2,
130 	HUGE_SHIFT_ENTRIES
131 };
132 extern int huge_shift[HUGE_SHIFT_ENTRIES];
133 #endif
134 
135 #endif /* _ASM_TILE_HUGETLB_H */
136