1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17
18 #ifdef CONFIG_PGSTE
19
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22
23 static struct ctl_table page_table_sysctl[] = {
24 {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec_minmax,
30 .extra1 = SYSCTL_ZERO,
31 .extra2 = SYSCTL_ONE,
32 },
33 { }
34 };
35
36 static struct ctl_table page_table_sysctl_dir[] = {
37 {
38 .procname = "vm",
39 .maxlen = 0,
40 .mode = 0555,
41 .child = page_table_sysctl,
42 },
43 { }
44 };
45
page_table_register_sysctl(void)46 static int __init page_table_register_sysctl(void)
47 {
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49 }
50 __initcall(page_table_register_sysctl);
51
52 #endif /* CONFIG_PGSTE */
53
crst_table_alloc(struct mm_struct * mm)54 unsigned long *crst_table_alloc(struct mm_struct *mm)
55 {
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
57
58 if (!page)
59 return NULL;
60 arch_set_page_dat(page, 2);
61 return (unsigned long *) page_to_phys(page);
62 }
63
crst_table_free(struct mm_struct * mm,unsigned long * table)64 void crst_table_free(struct mm_struct *mm, unsigned long *table)
65 {
66 free_pages((unsigned long) table, 2);
67 }
68
__crst_table_upgrade(void * arg)69 static void __crst_table_upgrade(void *arg)
70 {
71 struct mm_struct *mm = arg;
72
73 /* we must change all active ASCEs to avoid the creation of new TLBs */
74 if (current->active_mm == mm) {
75 S390_lowcore.user_asce = mm->context.asce;
76 if (current->thread.mm_segment == USER_DS) {
77 __ctl_load(S390_lowcore.user_asce, 1, 1);
78 /* Mark user-ASCE present in CR1 */
79 clear_cpu_flag(CIF_ASCE_PRIMARY);
80 }
81 if (current->thread.mm_segment == USER_DS_SACF) {
82 __ctl_load(S390_lowcore.user_asce, 7, 7);
83 /* enable_sacf_uaccess does all or nothing */
84 WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
85 }
86 }
87 __tlb_flush_local();
88 }
89
crst_table_upgrade(struct mm_struct * mm,unsigned long end)90 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
91 {
92 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
93 unsigned long asce_limit = mm->context.asce_limit;
94
95 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
96 VM_BUG_ON(asce_limit < _REGION2_SIZE);
97
98 if (end <= asce_limit)
99 return 0;
100
101 if (asce_limit == _REGION2_SIZE) {
102 p4d = crst_table_alloc(mm);
103 if (unlikely(!p4d))
104 goto err_p4d;
105 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
106 }
107 if (end > _REGION1_SIZE) {
108 pgd = crst_table_alloc(mm);
109 if (unlikely(!pgd))
110 goto err_pgd;
111 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
112 }
113
114 spin_lock_bh(&mm->page_table_lock);
115
116 /*
117 * This routine gets called with mmap_lock lock held and there is
118 * no reason to optimize for the case of otherwise. However, if
119 * that would ever change, the below check will let us know.
120 */
121 VM_BUG_ON(asce_limit != mm->context.asce_limit);
122
123 if (p4d) {
124 __pgd = (unsigned long *) mm->pgd;
125 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
126 mm->pgd = (pgd_t *) p4d;
127 mm->context.asce_limit = _REGION1_SIZE;
128 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
129 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
130 mm_inc_nr_puds(mm);
131 }
132 if (pgd) {
133 __pgd = (unsigned long *) mm->pgd;
134 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
135 mm->pgd = (pgd_t *) pgd;
136 mm->context.asce_limit = TASK_SIZE_MAX;
137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
138 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
139 }
140
141 spin_unlock_bh(&mm->page_table_lock);
142
143 on_each_cpu(__crst_table_upgrade, mm, 0);
144
145 return 0;
146
147 err_pgd:
148 crst_table_free(mm, p4d);
149 err_p4d:
150 return -ENOMEM;
151 }
152
atomic_xor_bits(atomic_t * v,unsigned int bits)153 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
154 {
155 unsigned int old, new;
156
157 do {
158 old = atomic_read(v);
159 new = old ^ bits;
160 } while (atomic_cmpxchg(v, old, new) != old);
161 return new;
162 }
163
164 #ifdef CONFIG_PGSTE
165
page_table_alloc_pgste(struct mm_struct * mm)166 struct page *page_table_alloc_pgste(struct mm_struct *mm)
167 {
168 struct page *page;
169 u64 *table;
170
171 page = alloc_page(GFP_KERNEL);
172 if (page) {
173 table = (u64 *)page_to_phys(page);
174 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
175 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
176 }
177 return page;
178 }
179
page_table_free_pgste(struct page * page)180 void page_table_free_pgste(struct page *page)
181 {
182 __free_page(page);
183 }
184
185 #endif /* CONFIG_PGSTE */
186
187 /*
188 * page table entry allocation/free routines.
189 */
page_table_alloc(struct mm_struct * mm)190 unsigned long *page_table_alloc(struct mm_struct *mm)
191 {
192 unsigned long *table;
193 struct page *page;
194 unsigned int mask, bit;
195
196 /* Try to get a fragment of a 4K page as a 2K page table */
197 if (!mm_alloc_pgste(mm)) {
198 table = NULL;
199 spin_lock_bh(&mm->context.lock);
200 if (!list_empty(&mm->context.pgtable_list)) {
201 page = list_first_entry(&mm->context.pgtable_list,
202 struct page, lru);
203 mask = atomic_read(&page->_refcount) >> 24;
204 mask = (mask | (mask >> 4)) & 3;
205 if (mask != 3) {
206 table = (unsigned long *) page_to_phys(page);
207 bit = mask & 1; /* =1 -> second 2K */
208 if (bit)
209 table += PTRS_PER_PTE;
210 atomic_xor_bits(&page->_refcount,
211 1U << (bit + 24));
212 list_del(&page->lru);
213 }
214 }
215 spin_unlock_bh(&mm->context.lock);
216 if (table)
217 return table;
218 }
219 /* Allocate a fresh page */
220 page = alloc_page(GFP_KERNEL);
221 if (!page)
222 return NULL;
223 if (!pgtable_pte_page_ctor(page)) {
224 __free_page(page);
225 return NULL;
226 }
227 arch_set_page_dat(page, 0);
228 /* Initialize page table */
229 table = (unsigned long *) page_to_phys(page);
230 if (mm_alloc_pgste(mm)) {
231 /* Return 4K page table with PGSTEs */
232 atomic_xor_bits(&page->_refcount, 3 << 24);
233 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
234 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
235 } else {
236 /* Return the first 2K fragment of the page */
237 atomic_xor_bits(&page->_refcount, 1 << 24);
238 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
239 spin_lock_bh(&mm->context.lock);
240 list_add(&page->lru, &mm->context.pgtable_list);
241 spin_unlock_bh(&mm->context.lock);
242 }
243 return table;
244 }
245
page_table_free(struct mm_struct * mm,unsigned long * table)246 void page_table_free(struct mm_struct *mm, unsigned long *table)
247 {
248 struct page *page;
249 unsigned int bit, mask;
250
251 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
252 if (!mm_alloc_pgste(mm)) {
253 /* Free 2K page table fragment of a 4K page */
254 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
255 spin_lock_bh(&mm->context.lock);
256 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
257 mask >>= 24;
258 if (mask & 3)
259 list_add(&page->lru, &mm->context.pgtable_list);
260 else
261 list_del(&page->lru);
262 spin_unlock_bh(&mm->context.lock);
263 mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
264 mask >>= 24;
265 if (mask != 0)
266 return;
267 } else {
268 atomic_xor_bits(&page->_refcount, 3U << 24);
269 }
270
271 pgtable_pte_page_dtor(page);
272 __free_page(page);
273 }
274
page_table_free_rcu(struct mmu_gather * tlb,unsigned long * table,unsigned long vmaddr)275 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
276 unsigned long vmaddr)
277 {
278 struct mm_struct *mm;
279 struct page *page;
280 unsigned int bit, mask;
281
282 mm = tlb->mm;
283 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
284 if (mm_alloc_pgste(mm)) {
285 gmap_unlink(mm, table, vmaddr);
286 table = (unsigned long *) (__pa(table) | 3);
287 tlb_remove_table(tlb, table);
288 return;
289 }
290 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
291 spin_lock_bh(&mm->context.lock);
292 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
293 mask >>= 24;
294 if (mask & 3)
295 list_add_tail(&page->lru, &mm->context.pgtable_list);
296 else
297 list_del(&page->lru);
298 spin_unlock_bh(&mm->context.lock);
299 table = (unsigned long *) (__pa(table) | (1U << bit));
300 tlb_remove_table(tlb, table);
301 }
302
__tlb_remove_table(void * _table)303 void __tlb_remove_table(void *_table)
304 {
305 unsigned int mask = (unsigned long) _table & 3;
306 void *table = (void *)((unsigned long) _table ^ mask);
307 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
308
309 switch (mask) {
310 case 0: /* pmd, pud, or p4d */
311 free_pages((unsigned long) table, 2);
312 break;
313 case 1: /* lower 2K of a 4K page table */
314 case 2: /* higher 2K of a 4K page table */
315 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
316 mask >>= 24;
317 if (mask != 0)
318 break;
319 fallthrough;
320 case 3: /* 4K page table with pgstes */
321 if (mask & 3)
322 atomic_xor_bits(&page->_refcount, 3 << 24);
323 pgtable_pte_page_dtor(page);
324 __free_page(page);
325 break;
326 }
327 }
328
329 /*
330 * Base infrastructure required to generate basic asces, region, segment,
331 * and page tables that do not make use of enhanced features like EDAT1.
332 */
333
334 static struct kmem_cache *base_pgt_cache;
335
base_pgt_alloc(void)336 static unsigned long base_pgt_alloc(void)
337 {
338 u64 *table;
339
340 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
341 if (table)
342 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
343 return (unsigned long) table;
344 }
345
base_pgt_free(unsigned long table)346 static void base_pgt_free(unsigned long table)
347 {
348 kmem_cache_free(base_pgt_cache, (void *) table);
349 }
350
base_crst_alloc(unsigned long val)351 static unsigned long base_crst_alloc(unsigned long val)
352 {
353 unsigned long table;
354
355 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
356 if (table)
357 crst_table_init((unsigned long *)table, val);
358 return table;
359 }
360
base_crst_free(unsigned long table)361 static void base_crst_free(unsigned long table)
362 {
363 free_pages(table, CRST_ALLOC_ORDER);
364 }
365
366 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
367 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
368 unsigned long end) \
369 { \
370 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
371 \
372 return (next - 1) < (end - 1) ? next : end; \
373 }
374
BASE_ADDR_END_FUNC(page,_PAGE_SIZE)375 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
376 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
377 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
378 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
379 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
380
381 static inline unsigned long base_lra(unsigned long address)
382 {
383 unsigned long real;
384
385 asm volatile(
386 " lra %0,0(%1)\n"
387 : "=d" (real) : "a" (address) : "cc");
388 return real;
389 }
390
base_page_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)391 static int base_page_walk(unsigned long origin, unsigned long addr,
392 unsigned long end, int alloc)
393 {
394 unsigned long *pte, next;
395
396 if (!alloc)
397 return 0;
398 pte = (unsigned long *) origin;
399 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
400 do {
401 next = base_page_addr_end(addr, end);
402 *pte = base_lra(addr);
403 } while (pte++, addr = next, addr < end);
404 return 0;
405 }
406
base_segment_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)407 static int base_segment_walk(unsigned long origin, unsigned long addr,
408 unsigned long end, int alloc)
409 {
410 unsigned long *ste, next, table;
411 int rc;
412
413 ste = (unsigned long *) origin;
414 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
415 do {
416 next = base_segment_addr_end(addr, end);
417 if (*ste & _SEGMENT_ENTRY_INVALID) {
418 if (!alloc)
419 continue;
420 table = base_pgt_alloc();
421 if (!table)
422 return -ENOMEM;
423 *ste = table | _SEGMENT_ENTRY;
424 }
425 table = *ste & _SEGMENT_ENTRY_ORIGIN;
426 rc = base_page_walk(table, addr, next, alloc);
427 if (rc)
428 return rc;
429 if (!alloc)
430 base_pgt_free(table);
431 cond_resched();
432 } while (ste++, addr = next, addr < end);
433 return 0;
434 }
435
base_region3_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)436 static int base_region3_walk(unsigned long origin, unsigned long addr,
437 unsigned long end, int alloc)
438 {
439 unsigned long *rtte, next, table;
440 int rc;
441
442 rtte = (unsigned long *) origin;
443 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
444 do {
445 next = base_region3_addr_end(addr, end);
446 if (*rtte & _REGION_ENTRY_INVALID) {
447 if (!alloc)
448 continue;
449 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
450 if (!table)
451 return -ENOMEM;
452 *rtte = table | _REGION3_ENTRY;
453 }
454 table = *rtte & _REGION_ENTRY_ORIGIN;
455 rc = base_segment_walk(table, addr, next, alloc);
456 if (rc)
457 return rc;
458 if (!alloc)
459 base_crst_free(table);
460 } while (rtte++, addr = next, addr < end);
461 return 0;
462 }
463
base_region2_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)464 static int base_region2_walk(unsigned long origin, unsigned long addr,
465 unsigned long end, int alloc)
466 {
467 unsigned long *rste, next, table;
468 int rc;
469
470 rste = (unsigned long *) origin;
471 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
472 do {
473 next = base_region2_addr_end(addr, end);
474 if (*rste & _REGION_ENTRY_INVALID) {
475 if (!alloc)
476 continue;
477 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
478 if (!table)
479 return -ENOMEM;
480 *rste = table | _REGION2_ENTRY;
481 }
482 table = *rste & _REGION_ENTRY_ORIGIN;
483 rc = base_region3_walk(table, addr, next, alloc);
484 if (rc)
485 return rc;
486 if (!alloc)
487 base_crst_free(table);
488 } while (rste++, addr = next, addr < end);
489 return 0;
490 }
491
base_region1_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)492 static int base_region1_walk(unsigned long origin, unsigned long addr,
493 unsigned long end, int alloc)
494 {
495 unsigned long *rfte, next, table;
496 int rc;
497
498 rfte = (unsigned long *) origin;
499 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
500 do {
501 next = base_region1_addr_end(addr, end);
502 if (*rfte & _REGION_ENTRY_INVALID) {
503 if (!alloc)
504 continue;
505 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
506 if (!table)
507 return -ENOMEM;
508 *rfte = table | _REGION1_ENTRY;
509 }
510 table = *rfte & _REGION_ENTRY_ORIGIN;
511 rc = base_region2_walk(table, addr, next, alloc);
512 if (rc)
513 return rc;
514 if (!alloc)
515 base_crst_free(table);
516 } while (rfte++, addr = next, addr < end);
517 return 0;
518 }
519
520 /**
521 * base_asce_free - free asce and tables returned from base_asce_alloc()
522 * @asce: asce to be freed
523 *
524 * Frees all region, segment, and page tables that were allocated with a
525 * corresponding base_asce_alloc() call.
526 */
base_asce_free(unsigned long asce)527 void base_asce_free(unsigned long asce)
528 {
529 unsigned long table = asce & _ASCE_ORIGIN;
530
531 if (!asce)
532 return;
533 switch (asce & _ASCE_TYPE_MASK) {
534 case _ASCE_TYPE_SEGMENT:
535 base_segment_walk(table, 0, _REGION3_SIZE, 0);
536 break;
537 case _ASCE_TYPE_REGION3:
538 base_region3_walk(table, 0, _REGION2_SIZE, 0);
539 break;
540 case _ASCE_TYPE_REGION2:
541 base_region2_walk(table, 0, _REGION1_SIZE, 0);
542 break;
543 case _ASCE_TYPE_REGION1:
544 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
545 break;
546 }
547 base_crst_free(table);
548 }
549
base_pgt_cache_init(void)550 static int base_pgt_cache_init(void)
551 {
552 static DEFINE_MUTEX(base_pgt_cache_mutex);
553 unsigned long sz = _PAGE_TABLE_SIZE;
554
555 if (base_pgt_cache)
556 return 0;
557 mutex_lock(&base_pgt_cache_mutex);
558 if (!base_pgt_cache)
559 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
560 mutex_unlock(&base_pgt_cache_mutex);
561 return base_pgt_cache ? 0 : -ENOMEM;
562 }
563
564 /**
565 * base_asce_alloc - create kernel mapping without enhanced DAT features
566 * @addr: virtual start address of kernel mapping
567 * @num_pages: number of consecutive pages
568 *
569 * Generate an asce, including all required region, segment and page tables,
570 * that can be used to access the virtual kernel mapping. The difference is
571 * that the returned asce does not make use of any enhanced DAT features like
572 * e.g. large pages. This is required for some I/O functions that pass an
573 * asce, like e.g. some service call requests.
574 *
575 * Note: the returned asce may NEVER be attached to any cpu. It may only be
576 * used for I/O requests. tlb entries that might result because the
577 * asce was attached to a cpu won't be cleared.
578 */
base_asce_alloc(unsigned long addr,unsigned long num_pages)579 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
580 {
581 unsigned long asce, table, end;
582 int rc;
583
584 if (base_pgt_cache_init())
585 return 0;
586 end = addr + num_pages * PAGE_SIZE;
587 if (end <= _REGION3_SIZE) {
588 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
589 if (!table)
590 return 0;
591 rc = base_segment_walk(table, addr, end, 1);
592 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
593 } else if (end <= _REGION2_SIZE) {
594 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
595 if (!table)
596 return 0;
597 rc = base_region3_walk(table, addr, end, 1);
598 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
599 } else if (end <= _REGION1_SIZE) {
600 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
601 if (!table)
602 return 0;
603 rc = base_region2_walk(table, addr, end, 1);
604 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
605 } else {
606 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
607 if (!table)
608 return 0;
609 rc = base_region1_walk(table, addr, end, 1);
610 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
611 }
612 if (rc) {
613 base_asce_free(asce);
614 asce = 0;
615 }
616 return asce;
617 }
618