1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/gmap.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18
19 #ifdef CONFIG_PGSTE
20
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23
24 static struct ctl_table page_table_sysctl[] = {
25 {
26 .procname = "allocate_pgste",
27 .data = &page_table_allocate_pgste,
28 .maxlen = sizeof(int),
29 .mode = S_IRUGO | S_IWUSR,
30 .proc_handler = proc_dointvec_minmax,
31 .extra1 = SYSCTL_ZERO,
32 .extra2 = SYSCTL_ONE,
33 },
34 };
35
page_table_register_sysctl(void)36 static int __init page_table_register_sysctl(void)
37 {
38 return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41
42 #endif /* CONFIG_PGSTE */
43
crst_table_alloc(struct mm_struct * mm)44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 unsigned long *table;
48
49 if (!ptdesc)
50 return NULL;
51 table = ptdesc_to_virt(ptdesc);
52 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
53 return table;
54 }
55
crst_table_free(struct mm_struct * mm,unsigned long * table)56 void crst_table_free(struct mm_struct *mm, unsigned long *table)
57 {
58 if (!table)
59 return;
60 pagetable_free(virt_to_ptdesc(table));
61 }
62
__crst_table_upgrade(void * arg)63 static void __crst_table_upgrade(void *arg)
64 {
65 struct mm_struct *mm = arg;
66
67 /* change all active ASCEs to avoid the creation of new TLBs */
68 if (current->active_mm == mm) {
69 get_lowcore()->user_asce.val = mm->context.asce;
70 local_ctl_load(7, &get_lowcore()->user_asce);
71 }
72 __tlb_flush_local();
73 }
74
crst_table_upgrade(struct mm_struct * mm,unsigned long end)75 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
76 {
77 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
78 unsigned long asce_limit = mm->context.asce_limit;
79
80 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
81 VM_BUG_ON(asce_limit < _REGION2_SIZE);
82
83 if (end <= asce_limit)
84 return 0;
85
86 if (asce_limit == _REGION2_SIZE) {
87 p4d = crst_table_alloc(mm);
88 if (unlikely(!p4d))
89 goto err_p4d;
90 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
91 }
92 if (end > _REGION1_SIZE) {
93 pgd = crst_table_alloc(mm);
94 if (unlikely(!pgd))
95 goto err_pgd;
96 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
97 }
98
99 spin_lock_bh(&mm->page_table_lock);
100
101 /*
102 * This routine gets called with mmap_lock lock held and there is
103 * no reason to optimize for the case of otherwise. However, if
104 * that would ever change, the below check will let us know.
105 */
106 VM_BUG_ON(asce_limit != mm->context.asce_limit);
107
108 if (p4d) {
109 __pgd = (unsigned long *) mm->pgd;
110 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
111 mm->pgd = (pgd_t *) p4d;
112 mm->context.asce_limit = _REGION1_SIZE;
113 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
114 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
115 mm_inc_nr_puds(mm);
116 }
117 if (pgd) {
118 __pgd = (unsigned long *) mm->pgd;
119 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
120 mm->pgd = (pgd_t *) pgd;
121 mm->context.asce_limit = TASK_SIZE_MAX;
122 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
124 }
125
126 spin_unlock_bh(&mm->page_table_lock);
127
128 on_each_cpu(__crst_table_upgrade, mm, 0);
129
130 return 0;
131
132 err_pgd:
133 crst_table_free(mm, p4d);
134 err_p4d:
135 return -ENOMEM;
136 }
137
138 #ifdef CONFIG_PGSTE
139
page_table_alloc_pgste(struct mm_struct * mm)140 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
141 {
142 struct ptdesc *ptdesc;
143 u64 *table;
144
145 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
146 if (ptdesc) {
147 table = (u64 *)ptdesc_to_virt(ptdesc);
148 __arch_set_page_dat(table, 1);
149 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
150 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
151 }
152 return ptdesc;
153 }
154
page_table_free_pgste(struct ptdesc * ptdesc)155 void page_table_free_pgste(struct ptdesc *ptdesc)
156 {
157 pagetable_free(ptdesc);
158 }
159
160 #endif /* CONFIG_PGSTE */
161
page_table_alloc(struct mm_struct * mm)162 unsigned long *page_table_alloc(struct mm_struct *mm)
163 {
164 struct ptdesc *ptdesc;
165 unsigned long *table;
166
167 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
168 if (!ptdesc)
169 return NULL;
170 if (!pagetable_pte_ctor(ptdesc)) {
171 pagetable_free(ptdesc);
172 return NULL;
173 }
174 table = ptdesc_to_virt(ptdesc);
175 __arch_set_page_dat(table, 1);
176 /* pt_list is used by gmap only */
177 INIT_LIST_HEAD(&ptdesc->pt_list);
178 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
179 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
180 return table;
181 }
182
pagetable_pte_dtor_free(struct ptdesc * ptdesc)183 static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
184 {
185 pagetable_pte_dtor(ptdesc);
186 pagetable_free(ptdesc);
187 }
188
page_table_free(struct mm_struct * mm,unsigned long * table)189 void page_table_free(struct mm_struct *mm, unsigned long *table)
190 {
191 struct ptdesc *ptdesc = virt_to_ptdesc(table);
192
193 pagetable_pte_dtor_free(ptdesc);
194 }
195
__tlb_remove_table(void * table)196 void __tlb_remove_table(void *table)
197 {
198 struct ptdesc *ptdesc = virt_to_ptdesc(table);
199 struct page *page = ptdesc_page(ptdesc);
200
201 if (compound_order(page) == CRST_ALLOC_ORDER) {
202 /* pmd, pud, or p4d */
203 pagetable_free(ptdesc);
204 return;
205 }
206 pagetable_pte_dtor_free(ptdesc);
207 }
208
209 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pte_free_now(struct rcu_head * head)210 static void pte_free_now(struct rcu_head *head)
211 {
212 struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
213
214 pagetable_pte_dtor_free(ptdesc);
215 }
216
pte_free_defer(struct mm_struct * mm,pgtable_t pgtable)217 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
218 {
219 struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
220
221 call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
222 }
223 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
224
225 /*
226 * Base infrastructure required to generate basic asces, region, segment,
227 * and page tables that do not make use of enhanced features like EDAT1.
228 */
229
230 static struct kmem_cache *base_pgt_cache;
231
base_pgt_alloc(void)232 static unsigned long *base_pgt_alloc(void)
233 {
234 unsigned long *table;
235
236 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
237 if (table)
238 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
239 return table;
240 }
241
base_pgt_free(unsigned long * table)242 static void base_pgt_free(unsigned long *table)
243 {
244 kmem_cache_free(base_pgt_cache, table);
245 }
246
base_crst_alloc(unsigned long val)247 static unsigned long *base_crst_alloc(unsigned long val)
248 {
249 unsigned long *table;
250 struct ptdesc *ptdesc;
251
252 ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
253 if (!ptdesc)
254 return NULL;
255 table = ptdesc_address(ptdesc);
256 crst_table_init(table, val);
257 return table;
258 }
259
base_crst_free(unsigned long * table)260 static void base_crst_free(unsigned long *table)
261 {
262 if (!table)
263 return;
264 pagetable_free(virt_to_ptdesc(table));
265 }
266
267 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
268 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
269 unsigned long end) \
270 { \
271 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
272 \
273 return (next - 1) < (end - 1) ? next : end; \
274 }
275
BASE_ADDR_END_FUNC(page,_PAGE_SIZE)276 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
277 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
278 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
279 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
280 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
281
282 static inline unsigned long base_lra(unsigned long address)
283 {
284 unsigned long real;
285
286 asm volatile(
287 " lra %0,0(%1)\n"
288 : "=d" (real) : "a" (address) : "cc");
289 return real;
290 }
291
base_page_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)292 static int base_page_walk(unsigned long *origin, unsigned long addr,
293 unsigned long end, int alloc)
294 {
295 unsigned long *pte, next;
296
297 if (!alloc)
298 return 0;
299 pte = origin;
300 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
301 do {
302 next = base_page_addr_end(addr, end);
303 *pte = base_lra(addr);
304 } while (pte++, addr = next, addr < end);
305 return 0;
306 }
307
base_segment_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)308 static int base_segment_walk(unsigned long *origin, unsigned long addr,
309 unsigned long end, int alloc)
310 {
311 unsigned long *ste, next, *table;
312 int rc;
313
314 ste = origin;
315 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
316 do {
317 next = base_segment_addr_end(addr, end);
318 if (*ste & _SEGMENT_ENTRY_INVALID) {
319 if (!alloc)
320 continue;
321 table = base_pgt_alloc();
322 if (!table)
323 return -ENOMEM;
324 *ste = __pa(table) | _SEGMENT_ENTRY;
325 }
326 table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
327 rc = base_page_walk(table, addr, next, alloc);
328 if (rc)
329 return rc;
330 if (!alloc)
331 base_pgt_free(table);
332 cond_resched();
333 } while (ste++, addr = next, addr < end);
334 return 0;
335 }
336
base_region3_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)337 static int base_region3_walk(unsigned long *origin, unsigned long addr,
338 unsigned long end, int alloc)
339 {
340 unsigned long *rtte, next, *table;
341 int rc;
342
343 rtte = origin;
344 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
345 do {
346 next = base_region3_addr_end(addr, end);
347 if (*rtte & _REGION_ENTRY_INVALID) {
348 if (!alloc)
349 continue;
350 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
351 if (!table)
352 return -ENOMEM;
353 *rtte = __pa(table) | _REGION3_ENTRY;
354 }
355 table = __va(*rtte & _REGION_ENTRY_ORIGIN);
356 rc = base_segment_walk(table, addr, next, alloc);
357 if (rc)
358 return rc;
359 if (!alloc)
360 base_crst_free(table);
361 } while (rtte++, addr = next, addr < end);
362 return 0;
363 }
364
base_region2_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)365 static int base_region2_walk(unsigned long *origin, unsigned long addr,
366 unsigned long end, int alloc)
367 {
368 unsigned long *rste, next, *table;
369 int rc;
370
371 rste = origin;
372 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
373 do {
374 next = base_region2_addr_end(addr, end);
375 if (*rste & _REGION_ENTRY_INVALID) {
376 if (!alloc)
377 continue;
378 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
379 if (!table)
380 return -ENOMEM;
381 *rste = __pa(table) | _REGION2_ENTRY;
382 }
383 table = __va(*rste & _REGION_ENTRY_ORIGIN);
384 rc = base_region3_walk(table, addr, next, alloc);
385 if (rc)
386 return rc;
387 if (!alloc)
388 base_crst_free(table);
389 } while (rste++, addr = next, addr < end);
390 return 0;
391 }
392
base_region1_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)393 static int base_region1_walk(unsigned long *origin, unsigned long addr,
394 unsigned long end, int alloc)
395 {
396 unsigned long *rfte, next, *table;
397 int rc;
398
399 rfte = origin;
400 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
401 do {
402 next = base_region1_addr_end(addr, end);
403 if (*rfte & _REGION_ENTRY_INVALID) {
404 if (!alloc)
405 continue;
406 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
407 if (!table)
408 return -ENOMEM;
409 *rfte = __pa(table) | _REGION1_ENTRY;
410 }
411 table = __va(*rfte & _REGION_ENTRY_ORIGIN);
412 rc = base_region2_walk(table, addr, next, alloc);
413 if (rc)
414 return rc;
415 if (!alloc)
416 base_crst_free(table);
417 } while (rfte++, addr = next, addr < end);
418 return 0;
419 }
420
421 /**
422 * base_asce_free - free asce and tables returned from base_asce_alloc()
423 * @asce: asce to be freed
424 *
425 * Frees all region, segment, and page tables that were allocated with a
426 * corresponding base_asce_alloc() call.
427 */
base_asce_free(unsigned long asce)428 void base_asce_free(unsigned long asce)
429 {
430 unsigned long *table = __va(asce & _ASCE_ORIGIN);
431
432 if (!asce)
433 return;
434 switch (asce & _ASCE_TYPE_MASK) {
435 case _ASCE_TYPE_SEGMENT:
436 base_segment_walk(table, 0, _REGION3_SIZE, 0);
437 break;
438 case _ASCE_TYPE_REGION3:
439 base_region3_walk(table, 0, _REGION2_SIZE, 0);
440 break;
441 case _ASCE_TYPE_REGION2:
442 base_region2_walk(table, 0, _REGION1_SIZE, 0);
443 break;
444 case _ASCE_TYPE_REGION1:
445 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
446 break;
447 }
448 base_crst_free(table);
449 }
450
base_pgt_cache_init(void)451 static int base_pgt_cache_init(void)
452 {
453 static DEFINE_MUTEX(base_pgt_cache_mutex);
454 unsigned long sz = _PAGE_TABLE_SIZE;
455
456 if (base_pgt_cache)
457 return 0;
458 mutex_lock(&base_pgt_cache_mutex);
459 if (!base_pgt_cache)
460 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
461 mutex_unlock(&base_pgt_cache_mutex);
462 return base_pgt_cache ? 0 : -ENOMEM;
463 }
464
465 /**
466 * base_asce_alloc - create kernel mapping without enhanced DAT features
467 * @addr: virtual start address of kernel mapping
468 * @num_pages: number of consecutive pages
469 *
470 * Generate an asce, including all required region, segment and page tables,
471 * that can be used to access the virtual kernel mapping. The difference is
472 * that the returned asce does not make use of any enhanced DAT features like
473 * e.g. large pages. This is required for some I/O functions that pass an
474 * asce, like e.g. some service call requests.
475 *
476 * Note: the returned asce may NEVER be attached to any cpu. It may only be
477 * used for I/O requests. tlb entries that might result because the
478 * asce was attached to a cpu won't be cleared.
479 */
base_asce_alloc(unsigned long addr,unsigned long num_pages)480 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
481 {
482 unsigned long asce, *table, end;
483 int rc;
484
485 if (base_pgt_cache_init())
486 return 0;
487 end = addr + num_pages * PAGE_SIZE;
488 if (end <= _REGION3_SIZE) {
489 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
490 if (!table)
491 return 0;
492 rc = base_segment_walk(table, addr, end, 1);
493 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
494 } else if (end <= _REGION2_SIZE) {
495 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
496 if (!table)
497 return 0;
498 rc = base_region3_walk(table, addr, end, 1);
499 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
500 } else if (end <= _REGION1_SIZE) {
501 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
502 if (!table)
503 return 0;
504 rc = base_region2_walk(table, addr, end, 1);
505 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
506 } else {
507 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
508 if (!table)
509 return 0;
510 rc = base_region1_walk(table, addr, end, 1);
511 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
512 }
513 if (rc) {
514 base_asce_free(asce);
515 asce = 0;
516 }
517 return asce;
518 }
519