1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
7 */
8
9 #ifndef __ASM_HUGETLB_H
10 #define __ASM_HUGETLB_H
11
12 #include <asm/page.h>
13 #include <asm-generic/hugetlb.h>
14
15
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)16 static inline int is_hugepage_only_range(struct mm_struct *mm,
17 unsigned long addr,
18 unsigned long len)
19 {
20 return 0;
21 }
22
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)23 static inline int prepare_hugepage_range(struct file *file,
24 unsigned long addr,
25 unsigned long len)
26 {
27 unsigned long task_size = STACK_TOP;
28 struct hstate *h = hstate_file(file);
29
30 if (len & ~huge_page_mask(h))
31 return -EINVAL;
32 if (addr & ~huge_page_mask(h))
33 return -EINVAL;
34 if (len > task_size)
35 return -ENOMEM;
36 if (task_size - len < addr)
37 return -EINVAL;
38 return 0;
39 }
40
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)41 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
42 unsigned long addr,
43 unsigned long end,
44 unsigned long floor,
45 unsigned long ceiling)
46 {
47 free_pgd_range(tlb, addr, end, floor, ceiling);
48 }
49
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)50 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
51 pte_t *ptep, pte_t pte)
52 {
53 set_pte_at(mm, addr, ptep, pte);
54 }
55
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)56 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
57 unsigned long addr, pte_t *ptep)
58 {
59 pte_t clear;
60 pte_t pte = *ptep;
61
62 pte_val(clear) = (unsigned long)invalid_pte_table;
63 set_pte_at(mm, addr, ptep, clear);
64 return pte;
65 }
66
huge_ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)67 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
68 unsigned long addr, pte_t *ptep)
69 {
70 /*
71 * clear the huge pte entry firstly, so that the other smp threads will
72 * not get old pte entry after finishing flush_tlb_page and before
73 * setting new huge pte entry
74 */
75 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
76 flush_tlb_page(vma, addr);
77 }
78
huge_pte_none(pte_t pte)79 static inline int huge_pte_none(pte_t pte)
80 {
81 unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
82 return !val || (val == (unsigned long)invalid_pte_table);
83 }
84
huge_pte_wrprotect(pte_t pte)85 static inline pte_t huge_pte_wrprotect(pte_t pte)
86 {
87 return pte_wrprotect(pte);
88 }
89
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)90 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
91 unsigned long addr, pte_t *ptep)
92 {
93 ptep_set_wrprotect(mm, addr, ptep);
94 }
95
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)96 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
97 unsigned long addr,
98 pte_t *ptep, pte_t pte,
99 int dirty)
100 {
101 int changed = !pte_same(*ptep, pte);
102
103 if (changed) {
104 set_pte_at(vma->vm_mm, addr, ptep, pte);
105 /*
106 * There could be some standard sized pages in there,
107 * get them all.
108 */
109 flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
110 }
111 return changed;
112 }
113
huge_ptep_get(pte_t * ptep)114 static inline pte_t huge_ptep_get(pte_t *ptep)
115 {
116 return *ptep;
117 }
118
arch_clear_hugepage_flags(struct page * page)119 static inline void arch_clear_hugepage_flags(struct page *page)
120 {
121 }
122
123 #endif /* __ASM_HUGETLB_H */
124