1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch/metag/mm/hugetlbpage.c
4 *
5 * METAG HugeTLB page support.
6 *
7 * Cloned from SuperH
8 *
9 * Cloned from sparc64 by Paul Mundt.
10 *
11 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
12 */
13
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17 #include <linux/hugetlb.h>
18 #include <linux/pagemap.h>
19 #include <linux/sysctl.h>
20
21 #include <asm/mman.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/cacheflush.h>
26
27 /*
28 * If the arch doesn't supply something else, assume that hugepage
29 * size aligned regions are ok without further preparation.
30 */
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)31 int prepare_hugepage_range(struct file *file, unsigned long addr,
32 unsigned long len)
33 {
34 struct mm_struct *mm = current->mm;
35 struct hstate *h = hstate_file(file);
36 struct vm_area_struct *vma;
37
38 if (len & ~huge_page_mask(h))
39 return -EINVAL;
40 if (addr & ~huge_page_mask(h))
41 return -EINVAL;
42 if (TASK_SIZE - len < addr)
43 return -EINVAL;
44
45 vma = find_vma(mm, ALIGN_HUGEPT(addr));
46 if (vma && !(vma->vm_flags & MAP_HUGETLB))
47 return -EINVAL;
48
49 vma = find_vma(mm, addr);
50 if (vma) {
51 if (addr + len > vma->vm_start)
52 return -EINVAL;
53 if (!(vma->vm_flags & MAP_HUGETLB) &&
54 (ALIGN_HUGEPT(addr + len) > vma->vm_start))
55 return -EINVAL;
56 }
57 return 0;
58 }
59
huge_pte_alloc(struct mm_struct * mm,unsigned long addr,unsigned long sz)60 pte_t *huge_pte_alloc(struct mm_struct *mm,
61 unsigned long addr, unsigned long sz)
62 {
63 pgd_t *pgd;
64 pud_t *pud;
65 pmd_t *pmd;
66 pte_t *pte;
67
68 pgd = pgd_offset(mm, addr);
69 pud = pud_offset(pgd, addr);
70 pmd = pmd_offset(pud, addr);
71 pte = pte_alloc_map(mm, pmd, addr);
72 pgd->pgd &= ~_PAGE_SZ_MASK;
73 pgd->pgd |= _PAGE_SZHUGE;
74
75 return pte;
76 }
77
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)78 pte_t *huge_pte_offset(struct mm_struct *mm,
79 unsigned long addr, unsigned long sz)
80 {
81 pgd_t *pgd;
82 pud_t *pud;
83 pmd_t *pmd;
84 pte_t *pte = NULL;
85
86 pgd = pgd_offset(mm, addr);
87 pud = pud_offset(pgd, addr);
88 pmd = pmd_offset(pud, addr);
89 pte = pte_offset_kernel(pmd, addr);
90
91 return pte;
92 }
93
pmd_huge(pmd_t pmd)94 int pmd_huge(pmd_t pmd)
95 {
96 return pmd_page_shift(pmd) > PAGE_SHIFT;
97 }
98
pud_huge(pud_t pud)99 int pud_huge(pud_t pud)
100 {
101 return 0;
102 }
103
follow_huge_pmd(struct mm_struct * mm,unsigned long address,pmd_t * pmd,int write)104 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
105 pmd_t *pmd, int write)
106 {
107 return NULL;
108 }
109
110 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
111
112 /*
113 * Look for an unmapped area starting after another hugetlb vma.
114 * There are guaranteed to be no huge pte's spare if all the huge pages are
115 * full size (4MB), so in that case compile out this search.
116 */
117 #if HPAGE_SHIFT == HUGEPT_SHIFT
118 static inline unsigned long
hugetlb_get_unmapped_area_existing(unsigned long len)119 hugetlb_get_unmapped_area_existing(unsigned long len)
120 {
121 return 0;
122 }
123 #else
124 static unsigned long
hugetlb_get_unmapped_area_existing(unsigned long len)125 hugetlb_get_unmapped_area_existing(unsigned long len)
126 {
127 struct mm_struct *mm = current->mm;
128 struct vm_area_struct *vma;
129 unsigned long start_addr, addr;
130 int after_huge;
131
132 if (mm->context.part_huge) {
133 start_addr = mm->context.part_huge;
134 after_huge = 1;
135 } else {
136 start_addr = TASK_UNMAPPED_BASE;
137 after_huge = 0;
138 }
139 new_search:
140 addr = start_addr;
141
142 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
143 if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
144 /*
145 * Start a new search - just in case we missed
146 * some holes.
147 */
148 if (start_addr != TASK_UNMAPPED_BASE) {
149 start_addr = TASK_UNMAPPED_BASE;
150 goto new_search;
151 }
152 return 0;
153 }
154 /* skip ahead if we've aligned right over some vmas */
155 if (vma && vma->vm_end <= addr)
156 continue;
157 /* space before the next vma? */
158 if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
159 <= vma->vm_start)) {
160 unsigned long end = addr + len;
161 if (end & HUGEPT_MASK)
162 mm->context.part_huge = end;
163 else if (addr == mm->context.part_huge)
164 mm->context.part_huge = 0;
165 return addr;
166 }
167 if (vma->vm_flags & MAP_HUGETLB) {
168 /* space after a huge vma in 2nd level page table? */
169 if (vma->vm_end & HUGEPT_MASK) {
170 after_huge = 1;
171 /* no need to align to the next PT block */
172 addr = vma->vm_end;
173 continue;
174 }
175 }
176 after_huge = 0;
177 addr = ALIGN_HUGEPT(vma->vm_end);
178 }
179 }
180 #endif
181
182 /* Do a full search to find an area without any nearby normal pages. */
183 static unsigned long
hugetlb_get_unmapped_area_new_pmd(unsigned long len)184 hugetlb_get_unmapped_area_new_pmd(unsigned long len)
185 {
186 struct vm_unmapped_area_info info;
187
188 info.flags = 0;
189 info.length = len;
190 info.low_limit = TASK_UNMAPPED_BASE;
191 info.high_limit = TASK_SIZE;
192 info.align_mask = PAGE_MASK & HUGEPT_MASK;
193 info.align_offset = 0;
194 return vm_unmapped_area(&info);
195 }
196
197 unsigned long
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)198 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
199 unsigned long len, unsigned long pgoff, unsigned long flags)
200 {
201 struct hstate *h = hstate_file(file);
202
203 if (len & ~huge_page_mask(h))
204 return -EINVAL;
205 if (len > TASK_SIZE)
206 return -ENOMEM;
207
208 if (flags & MAP_FIXED) {
209 if (prepare_hugepage_range(file, addr, len))
210 return -EINVAL;
211 return addr;
212 }
213
214 if (addr) {
215 addr = ALIGN(addr, huge_page_size(h));
216 if (!prepare_hugepage_range(file, addr, len))
217 return addr;
218 }
219
220 /*
221 * Look for an existing hugetlb vma with space after it (this is to to
222 * minimise fragmentation caused by huge pages.
223 */
224 addr = hugetlb_get_unmapped_area_existing(len);
225 if (addr)
226 return addr;
227
228 /*
229 * Find an unmapped naturally aligned set of 4MB blocks that we can use
230 * for huge pages.
231 */
232 return hugetlb_get_unmapped_area_new_pmd(len);
233 }
234
235 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
236
237 /* necessary for boot time 4MB huge page allocation */
setup_hugepagesz(char * opt)238 static __init int setup_hugepagesz(char *opt)
239 {
240 unsigned long ps = memparse(opt, &opt);
241 if (ps == (1 << HPAGE_SHIFT)) {
242 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
243 } else {
244 hugetlb_bad_size();
245 pr_err("hugepagesz: Unsupported page size %lu M\n",
246 ps >> 20);
247 return 0;
248 }
249 return 1;
250 }
251 __setup("hugepagesz=", setup_hugepagesz);
252