1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Lockless get_user_pages_fast for s390
4 *
5 * Copyright IBM Corp. 2010
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/vmstat.h>
12 #include <linux/pagemap.h>
13 #include <linux/rwsem.h>
14 #include <asm/pgtable.h>
15
16 /*
17 * The performance critical leaf functions are made noinline otherwise gcc
18 * inlines everything into a single function which results in too much
19 * register pressure.
20 */
gup_pte_range(pmd_t * pmdp,pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)21 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
22 unsigned long end, int write, struct page **pages, int *nr)
23 {
24 struct page *head, *page;
25 unsigned long mask;
26 pte_t *ptep, pte;
27
28 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
29
30 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
31 do {
32 pte = *ptep;
33 barrier();
34 /* Similar to the PMD case, NUMA hinting must take slow path */
35 if (pte_protnone(pte))
36 return 0;
37 if ((pte_val(pte) & mask) != 0)
38 return 0;
39 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
40 page = pte_page(pte);
41 head = compound_head(page);
42 if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
43 || !page_cache_get_speculative(head)))
44 return 0;
45 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
46 put_page(head);
47 return 0;
48 }
49 VM_BUG_ON_PAGE(compound_head(page) != head, page);
50 pages[*nr] = page;
51 (*nr)++;
52
53 } while (ptep++, addr += PAGE_SIZE, addr != end);
54
55 return 1;
56 }
57
gup_huge_pmd(pmd_t * pmdp,pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)58 static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
59 unsigned long end, int write, struct page **pages, int *nr)
60 {
61 struct page *head, *page;
62 unsigned long mask;
63 int refs;
64
65 mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
66 if ((pmd_val(pmd) & mask) != 0)
67 return 0;
68 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
69
70 refs = 0;
71 head = pmd_page(pmd);
72 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
73 do {
74 VM_BUG_ON(compound_head(page) != head);
75 pages[*nr] = page;
76 (*nr)++;
77 page++;
78 refs++;
79 } while (addr += PAGE_SIZE, addr != end);
80
81 if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
82 || !page_cache_add_speculative(head, refs))) {
83 *nr -= refs;
84 return 0;
85 }
86
87 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
88 *nr -= refs;
89 while (refs--)
90 put_page(head);
91 return 0;
92 }
93
94 return 1;
95 }
96
97
gup_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)98 static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
99 unsigned long end, int write, struct page **pages, int *nr)
100 {
101 unsigned long next;
102 pmd_t *pmdp, pmd;
103
104 pmdp = (pmd_t *) pudp;
105 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
106 pmdp = (pmd_t *) pud_deref(pud);
107 pmdp += pmd_index(addr);
108 do {
109 pmd = *pmdp;
110 barrier();
111 next = pmd_addr_end(addr, end);
112 if (pmd_none(pmd))
113 return 0;
114 if (unlikely(pmd_large(pmd))) {
115 /*
116 * NUMA hinting faults need to be handled in the GUP
117 * slowpath for accounting purposes and so that they
118 * can be serialised against THP migration.
119 */
120 if (pmd_protnone(pmd))
121 return 0;
122 if (!gup_huge_pmd(pmdp, pmd, addr, next,
123 write, pages, nr))
124 return 0;
125 } else if (!gup_pte_range(pmdp, pmd, addr, next,
126 write, pages, nr))
127 return 0;
128 } while (pmdp++, addr = next, addr != end);
129
130 return 1;
131 }
132
gup_huge_pud(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)133 static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
134 unsigned long end, int write, struct page **pages, int *nr)
135 {
136 struct page *head, *page;
137 unsigned long mask;
138 int refs;
139
140 mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
141 if ((pud_val(pud) & mask) != 0)
142 return 0;
143 VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
144
145 refs = 0;
146 head = pud_page(pud);
147 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
148 do {
149 VM_BUG_ON_PAGE(compound_head(page) != head, page);
150 pages[*nr] = page;
151 (*nr)++;
152 page++;
153 refs++;
154 } while (addr += PAGE_SIZE, addr != end);
155
156 if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
157 || !page_cache_add_speculative(head, refs))) {
158 *nr -= refs;
159 return 0;
160 }
161
162 if (unlikely(pud_val(pud) != pud_val(*pudp))) {
163 *nr -= refs;
164 while (refs--)
165 put_page(head);
166 return 0;
167 }
168
169 return 1;
170 }
171
gup_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)172 static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
173 unsigned long end, int write, struct page **pages, int *nr)
174 {
175 unsigned long next;
176 pud_t *pudp, pud;
177
178 pudp = (pud_t *) p4dp;
179 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
180 pudp = (pud_t *) p4d_deref(p4d);
181 pudp += pud_index(addr);
182 do {
183 pud = *pudp;
184 barrier();
185 next = pud_addr_end(addr, end);
186 if (pud_none(pud))
187 return 0;
188 if (unlikely(pud_large(pud))) {
189 if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
190 nr))
191 return 0;
192 } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
193 nr))
194 return 0;
195 } while (pudp++, addr = next, addr != end);
196
197 return 1;
198 }
199
gup_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)200 static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
201 unsigned long end, int write, struct page **pages, int *nr)
202 {
203 unsigned long next;
204 p4d_t *p4dp, p4d;
205
206 p4dp = (p4d_t *) pgdp;
207 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
208 p4dp = (p4d_t *) pgd_deref(pgd);
209 p4dp += p4d_index(addr);
210 do {
211 p4d = *p4dp;
212 barrier();
213 next = p4d_addr_end(addr, end);
214 if (p4d_none(p4d))
215 return 0;
216 if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
217 return 0;
218 } while (p4dp++, addr = next, addr != end);
219
220 return 1;
221 }
222
223 /*
224 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
225 * back to the regular GUP.
226 * Note a difference with get_user_pages_fast: this always returns the
227 * number of pages pinned, 0 if no pages were pinned.
228 */
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)229 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
230 struct page **pages)
231 {
232 struct mm_struct *mm = current->mm;
233 unsigned long addr, len, end;
234 unsigned long next, flags;
235 pgd_t *pgdp, pgd;
236 int nr = 0;
237
238 start &= PAGE_MASK;
239 addr = start;
240 len = (unsigned long) nr_pages << PAGE_SHIFT;
241 end = start + len;
242 if ((end <= start) || (end > mm->context.asce_limit))
243 return 0;
244 /*
245 * local_irq_save() doesn't prevent pagetable teardown, but does
246 * prevent the pagetables from being freed on s390.
247 *
248 * So long as we atomically load page table pointers versus teardown,
249 * we can follow the address down to the the page and take a ref on it.
250 */
251 local_irq_save(flags);
252 pgdp = pgd_offset(mm, addr);
253 do {
254 pgd = *pgdp;
255 barrier();
256 next = pgd_addr_end(addr, end);
257 if (pgd_none(pgd))
258 break;
259 if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
260 break;
261 } while (pgdp++, addr = next, addr != end);
262 local_irq_restore(flags);
263
264 return nr;
265 }
266
267 /**
268 * get_user_pages_fast() - pin user pages in memory
269 * @start: starting user address
270 * @nr_pages: number of pages from start to pin
271 * @write: whether pages will be written to
272 * @pages: array that receives pointers to the pages pinned.
273 * Should be at least nr_pages long.
274 *
275 * Attempt to pin user pages in memory without taking mm->mmap_sem.
276 * If not successful, it will fall back to taking the lock and
277 * calling get_user_pages().
278 *
279 * Returns number of pages pinned. This may be fewer than the number
280 * requested. If nr_pages is 0 or negative, returns 0. If no pages
281 * were pinned, returns -errno.
282 */
get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)283 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
284 struct page **pages)
285 {
286 int nr, ret;
287
288 might_sleep();
289 start &= PAGE_MASK;
290 nr = __get_user_pages_fast(start, nr_pages, write, pages);
291 if (nr == nr_pages)
292 return nr;
293
294 /* Try to get the remaining pages with get_user_pages */
295 start += nr << PAGE_SHIFT;
296 pages += nr;
297 ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
298 write ? FOLL_WRITE : 0);
299 /* Have to be a bit careful with return values */
300 if (nr > 0)
301 ret = (ret < 0) ? nr : ret + nr;
302 return ret;
303 }
304