1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17
18 #ifdef CONFIG_PGSTE
19
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22
23 static struct ctl_table page_table_sysctl[] = {
24 {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec_minmax,
30 .extra1 = SYSCTL_ZERO,
31 .extra2 = SYSCTL_ONE,
32 },
33 { }
34 };
35
36 static struct ctl_table page_table_sysctl_dir[] = {
37 {
38 .procname = "vm",
39 .maxlen = 0,
40 .mode = 0555,
41 .child = page_table_sysctl,
42 },
43 { }
44 };
45
page_table_register_sysctl(void)46 static int __init page_table_register_sysctl(void)
47 {
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49 }
50 __initcall(page_table_register_sysctl);
51
52 #endif /* CONFIG_PGSTE */
53
crst_table_alloc(struct mm_struct * mm)54 unsigned long *crst_table_alloc(struct mm_struct *mm)
55 {
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
57
58 if (!page)
59 return NULL;
60 arch_set_page_dat(page, 2);
61 return (unsigned long *) page_to_virt(page);
62 }
63
crst_table_free(struct mm_struct * mm,unsigned long * table)64 void crst_table_free(struct mm_struct *mm, unsigned long *table)
65 {
66 free_pages((unsigned long) table, 2);
67 }
68
__crst_table_upgrade(void * arg)69 static void __crst_table_upgrade(void *arg)
70 {
71 struct mm_struct *mm = arg;
72
73 /* change all active ASCEs to avoid the creation of new TLBs */
74 if (current->active_mm == mm) {
75 S390_lowcore.user_asce = mm->context.asce;
76 __ctl_load(S390_lowcore.user_asce, 7, 7);
77 }
78 __tlb_flush_local();
79 }
80
crst_table_upgrade(struct mm_struct * mm,unsigned long end)81 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
82 {
83 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
84 unsigned long asce_limit = mm->context.asce_limit;
85
86 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
87 VM_BUG_ON(asce_limit < _REGION2_SIZE);
88
89 if (end <= asce_limit)
90 return 0;
91
92 if (asce_limit == _REGION2_SIZE) {
93 p4d = crst_table_alloc(mm);
94 if (unlikely(!p4d))
95 goto err_p4d;
96 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
97 }
98 if (end > _REGION1_SIZE) {
99 pgd = crst_table_alloc(mm);
100 if (unlikely(!pgd))
101 goto err_pgd;
102 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
103 }
104
105 spin_lock_bh(&mm->page_table_lock);
106
107 /*
108 * This routine gets called with mmap_lock lock held and there is
109 * no reason to optimize for the case of otherwise. However, if
110 * that would ever change, the below check will let us know.
111 */
112 VM_BUG_ON(asce_limit != mm->context.asce_limit);
113
114 if (p4d) {
115 __pgd = (unsigned long *) mm->pgd;
116 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
117 mm->pgd = (pgd_t *) p4d;
118 mm->context.asce_limit = _REGION1_SIZE;
119 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
120 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
121 mm_inc_nr_puds(mm);
122 }
123 if (pgd) {
124 __pgd = (unsigned long *) mm->pgd;
125 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
126 mm->pgd = (pgd_t *) pgd;
127 mm->context.asce_limit = TASK_SIZE_MAX;
128 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
129 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
130 }
131
132 spin_unlock_bh(&mm->page_table_lock);
133
134 on_each_cpu(__crst_table_upgrade, mm, 0);
135
136 return 0;
137
138 err_pgd:
139 crst_table_free(mm, p4d);
140 err_p4d:
141 return -ENOMEM;
142 }
143
atomic_xor_bits(atomic_t * v,unsigned int bits)144 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
145 {
146 unsigned int old, new;
147
148 do {
149 old = atomic_read(v);
150 new = old ^ bits;
151 } while (atomic_cmpxchg(v, old, new) != old);
152 return new;
153 }
154
155 #ifdef CONFIG_PGSTE
156
page_table_alloc_pgste(struct mm_struct * mm)157 struct page *page_table_alloc_pgste(struct mm_struct *mm)
158 {
159 struct page *page;
160 u64 *table;
161
162 page = alloc_page(GFP_KERNEL);
163 if (page) {
164 table = (u64 *)page_to_virt(page);
165 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
166 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
167 }
168 return page;
169 }
170
page_table_free_pgste(struct page * page)171 void page_table_free_pgste(struct page *page)
172 {
173 __free_page(page);
174 }
175
176 #endif /* CONFIG_PGSTE */
177
178 /*
179 * page table entry allocation/free routines.
180 */
page_table_alloc(struct mm_struct * mm)181 unsigned long *page_table_alloc(struct mm_struct *mm)
182 {
183 unsigned long *table;
184 struct page *page;
185 unsigned int mask, bit;
186
187 /* Try to get a fragment of a 4K page as a 2K page table */
188 if (!mm_alloc_pgste(mm)) {
189 table = NULL;
190 spin_lock_bh(&mm->context.lock);
191 if (!list_empty(&mm->context.pgtable_list)) {
192 page = list_first_entry(&mm->context.pgtable_list,
193 struct page, lru);
194 mask = atomic_read(&page->_refcount) >> 24;
195 mask = (mask | (mask >> 4)) & 3;
196 if (mask != 3) {
197 table = (unsigned long *) page_to_virt(page);
198 bit = mask & 1; /* =1 -> second 2K */
199 if (bit)
200 table += PTRS_PER_PTE;
201 atomic_xor_bits(&page->_refcount,
202 1U << (bit + 24));
203 list_del(&page->lru);
204 }
205 }
206 spin_unlock_bh(&mm->context.lock);
207 if (table)
208 return table;
209 }
210 /* Allocate a fresh page */
211 page = alloc_page(GFP_KERNEL);
212 if (!page)
213 return NULL;
214 if (!pgtable_pte_page_ctor(page)) {
215 __free_page(page);
216 return NULL;
217 }
218 arch_set_page_dat(page, 0);
219 /* Initialize page table */
220 table = (unsigned long *) page_to_virt(page);
221 if (mm_alloc_pgste(mm)) {
222 /* Return 4K page table with PGSTEs */
223 atomic_xor_bits(&page->_refcount, 3 << 24);
224 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
225 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
226 } else {
227 /* Return the first 2K fragment of the page */
228 atomic_xor_bits(&page->_refcount, 1 << 24);
229 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
230 spin_lock_bh(&mm->context.lock);
231 list_add(&page->lru, &mm->context.pgtable_list);
232 spin_unlock_bh(&mm->context.lock);
233 }
234 return table;
235 }
236
page_table_free(struct mm_struct * mm,unsigned long * table)237 void page_table_free(struct mm_struct *mm, unsigned long *table)
238 {
239 struct page *page;
240 unsigned int bit, mask;
241
242 page = virt_to_page(table);
243 if (!mm_alloc_pgste(mm)) {
244 /* Free 2K page table fragment of a 4K page */
245 bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
246 spin_lock_bh(&mm->context.lock);
247 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
248 mask >>= 24;
249 if (mask & 3)
250 list_add(&page->lru, &mm->context.pgtable_list);
251 else
252 list_del(&page->lru);
253 spin_unlock_bh(&mm->context.lock);
254 mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
255 mask >>= 24;
256 if (mask != 0)
257 return;
258 } else {
259 atomic_xor_bits(&page->_refcount, 3U << 24);
260 }
261
262 pgtable_pte_page_dtor(page);
263 __free_page(page);
264 }
265
page_table_free_rcu(struct mmu_gather * tlb,unsigned long * table,unsigned long vmaddr)266 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
267 unsigned long vmaddr)
268 {
269 struct mm_struct *mm;
270 struct page *page;
271 unsigned int bit, mask;
272
273 mm = tlb->mm;
274 page = virt_to_page(table);
275 if (mm_alloc_pgste(mm)) {
276 gmap_unlink(mm, table, vmaddr);
277 table = (unsigned long *) ((unsigned long)table | 3);
278 tlb_remove_table(tlb, table);
279 return;
280 }
281 bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
282 spin_lock_bh(&mm->context.lock);
283 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
284 mask >>= 24;
285 if (mask & 3)
286 list_add_tail(&page->lru, &mm->context.pgtable_list);
287 else
288 list_del(&page->lru);
289 spin_unlock_bh(&mm->context.lock);
290 table = (unsigned long *) ((unsigned long) table | (1U << bit));
291 tlb_remove_table(tlb, table);
292 }
293
__tlb_remove_table(void * _table)294 void __tlb_remove_table(void *_table)
295 {
296 unsigned int mask = (unsigned long) _table & 3;
297 void *table = (void *)((unsigned long) _table ^ mask);
298 struct page *page = virt_to_page(table);
299
300 switch (mask) {
301 case 0: /* pmd, pud, or p4d */
302 free_pages((unsigned long) table, 2);
303 break;
304 case 1: /* lower 2K of a 4K page table */
305 case 2: /* higher 2K of a 4K page table */
306 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
307 mask >>= 24;
308 if (mask != 0)
309 break;
310 fallthrough;
311 case 3: /* 4K page table with pgstes */
312 if (mask & 3)
313 atomic_xor_bits(&page->_refcount, 3 << 24);
314 pgtable_pte_page_dtor(page);
315 __free_page(page);
316 break;
317 }
318 }
319
320 /*
321 * Base infrastructure required to generate basic asces, region, segment,
322 * and page tables that do not make use of enhanced features like EDAT1.
323 */
324
325 static struct kmem_cache *base_pgt_cache;
326
base_pgt_alloc(void)327 static unsigned long base_pgt_alloc(void)
328 {
329 u64 *table;
330
331 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
332 if (table)
333 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
334 return (unsigned long) table;
335 }
336
base_pgt_free(unsigned long table)337 static void base_pgt_free(unsigned long table)
338 {
339 kmem_cache_free(base_pgt_cache, (void *) table);
340 }
341
base_crst_alloc(unsigned long val)342 static unsigned long base_crst_alloc(unsigned long val)
343 {
344 unsigned long table;
345
346 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
347 if (table)
348 crst_table_init((unsigned long *)table, val);
349 return table;
350 }
351
base_crst_free(unsigned long table)352 static void base_crst_free(unsigned long table)
353 {
354 free_pages(table, CRST_ALLOC_ORDER);
355 }
356
357 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
358 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
359 unsigned long end) \
360 { \
361 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
362 \
363 return (next - 1) < (end - 1) ? next : end; \
364 }
365
BASE_ADDR_END_FUNC(page,_PAGE_SIZE)366 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
367 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
368 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
369 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
370 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
371
372 static inline unsigned long base_lra(unsigned long address)
373 {
374 unsigned long real;
375
376 asm volatile(
377 " lra %0,0(%1)\n"
378 : "=d" (real) : "a" (address) : "cc");
379 return real;
380 }
381
base_page_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)382 static int base_page_walk(unsigned long origin, unsigned long addr,
383 unsigned long end, int alloc)
384 {
385 unsigned long *pte, next;
386
387 if (!alloc)
388 return 0;
389 pte = (unsigned long *) origin;
390 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
391 do {
392 next = base_page_addr_end(addr, end);
393 *pte = base_lra(addr);
394 } while (pte++, addr = next, addr < end);
395 return 0;
396 }
397
base_segment_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)398 static int base_segment_walk(unsigned long origin, unsigned long addr,
399 unsigned long end, int alloc)
400 {
401 unsigned long *ste, next, table;
402 int rc;
403
404 ste = (unsigned long *) origin;
405 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
406 do {
407 next = base_segment_addr_end(addr, end);
408 if (*ste & _SEGMENT_ENTRY_INVALID) {
409 if (!alloc)
410 continue;
411 table = base_pgt_alloc();
412 if (!table)
413 return -ENOMEM;
414 *ste = table | _SEGMENT_ENTRY;
415 }
416 table = *ste & _SEGMENT_ENTRY_ORIGIN;
417 rc = base_page_walk(table, addr, next, alloc);
418 if (rc)
419 return rc;
420 if (!alloc)
421 base_pgt_free(table);
422 cond_resched();
423 } while (ste++, addr = next, addr < end);
424 return 0;
425 }
426
base_region3_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)427 static int base_region3_walk(unsigned long origin, unsigned long addr,
428 unsigned long end, int alloc)
429 {
430 unsigned long *rtte, next, table;
431 int rc;
432
433 rtte = (unsigned long *) origin;
434 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
435 do {
436 next = base_region3_addr_end(addr, end);
437 if (*rtte & _REGION_ENTRY_INVALID) {
438 if (!alloc)
439 continue;
440 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
441 if (!table)
442 return -ENOMEM;
443 *rtte = table | _REGION3_ENTRY;
444 }
445 table = *rtte & _REGION_ENTRY_ORIGIN;
446 rc = base_segment_walk(table, addr, next, alloc);
447 if (rc)
448 return rc;
449 if (!alloc)
450 base_crst_free(table);
451 } while (rtte++, addr = next, addr < end);
452 return 0;
453 }
454
base_region2_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)455 static int base_region2_walk(unsigned long origin, unsigned long addr,
456 unsigned long end, int alloc)
457 {
458 unsigned long *rste, next, table;
459 int rc;
460
461 rste = (unsigned long *) origin;
462 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
463 do {
464 next = base_region2_addr_end(addr, end);
465 if (*rste & _REGION_ENTRY_INVALID) {
466 if (!alloc)
467 continue;
468 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
469 if (!table)
470 return -ENOMEM;
471 *rste = table | _REGION2_ENTRY;
472 }
473 table = *rste & _REGION_ENTRY_ORIGIN;
474 rc = base_region3_walk(table, addr, next, alloc);
475 if (rc)
476 return rc;
477 if (!alloc)
478 base_crst_free(table);
479 } while (rste++, addr = next, addr < end);
480 return 0;
481 }
482
base_region1_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)483 static int base_region1_walk(unsigned long origin, unsigned long addr,
484 unsigned long end, int alloc)
485 {
486 unsigned long *rfte, next, table;
487 int rc;
488
489 rfte = (unsigned long *) origin;
490 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
491 do {
492 next = base_region1_addr_end(addr, end);
493 if (*rfte & _REGION_ENTRY_INVALID) {
494 if (!alloc)
495 continue;
496 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
497 if (!table)
498 return -ENOMEM;
499 *rfte = table | _REGION1_ENTRY;
500 }
501 table = *rfte & _REGION_ENTRY_ORIGIN;
502 rc = base_region2_walk(table, addr, next, alloc);
503 if (rc)
504 return rc;
505 if (!alloc)
506 base_crst_free(table);
507 } while (rfte++, addr = next, addr < end);
508 return 0;
509 }
510
511 /**
512 * base_asce_free - free asce and tables returned from base_asce_alloc()
513 * @asce: asce to be freed
514 *
515 * Frees all region, segment, and page tables that were allocated with a
516 * corresponding base_asce_alloc() call.
517 */
base_asce_free(unsigned long asce)518 void base_asce_free(unsigned long asce)
519 {
520 unsigned long table = asce & _ASCE_ORIGIN;
521
522 if (!asce)
523 return;
524 switch (asce & _ASCE_TYPE_MASK) {
525 case _ASCE_TYPE_SEGMENT:
526 base_segment_walk(table, 0, _REGION3_SIZE, 0);
527 break;
528 case _ASCE_TYPE_REGION3:
529 base_region3_walk(table, 0, _REGION2_SIZE, 0);
530 break;
531 case _ASCE_TYPE_REGION2:
532 base_region2_walk(table, 0, _REGION1_SIZE, 0);
533 break;
534 case _ASCE_TYPE_REGION1:
535 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
536 break;
537 }
538 base_crst_free(table);
539 }
540
base_pgt_cache_init(void)541 static int base_pgt_cache_init(void)
542 {
543 static DEFINE_MUTEX(base_pgt_cache_mutex);
544 unsigned long sz = _PAGE_TABLE_SIZE;
545
546 if (base_pgt_cache)
547 return 0;
548 mutex_lock(&base_pgt_cache_mutex);
549 if (!base_pgt_cache)
550 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
551 mutex_unlock(&base_pgt_cache_mutex);
552 return base_pgt_cache ? 0 : -ENOMEM;
553 }
554
555 /**
556 * base_asce_alloc - create kernel mapping without enhanced DAT features
557 * @addr: virtual start address of kernel mapping
558 * @num_pages: number of consecutive pages
559 *
560 * Generate an asce, including all required region, segment and page tables,
561 * that can be used to access the virtual kernel mapping. The difference is
562 * that the returned asce does not make use of any enhanced DAT features like
563 * e.g. large pages. This is required for some I/O functions that pass an
564 * asce, like e.g. some service call requests.
565 *
566 * Note: the returned asce may NEVER be attached to any cpu. It may only be
567 * used for I/O requests. tlb entries that might result because the
568 * asce was attached to a cpu won't be cleared.
569 */
base_asce_alloc(unsigned long addr,unsigned long num_pages)570 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
571 {
572 unsigned long asce, table, end;
573 int rc;
574
575 if (base_pgt_cache_init())
576 return 0;
577 end = addr + num_pages * PAGE_SIZE;
578 if (end <= _REGION3_SIZE) {
579 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
580 if (!table)
581 return 0;
582 rc = base_segment_walk(table, addr, end, 1);
583 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
584 } else if (end <= _REGION2_SIZE) {
585 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
586 if (!table)
587 return 0;
588 rc = base_region3_walk(table, addr, end, 1);
589 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
590 } else if (end <= _REGION1_SIZE) {
591 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
592 if (!table)
593 return 0;
594 rc = base_region2_walk(table, addr, end, 1);
595 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
596 } else {
597 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
598 if (!table)
599 return 0;
600 rc = base_region1_walk(table, addr, end, 1);
601 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
602 }
603 if (rc) {
604 base_asce_free(asce);
605 asce = 0;
606 }
607 return asce;
608 }
609