1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgalloc.h"
9 * Copyright (C) 1994 Linus Torvalds
10 */
11
12 #ifndef _S390_PGALLOC_H
13 #define _S390_PGALLOC_H
14
15 #include <linux/threads.h>
16 #include <linux/gfp.h>
17 #include <linux/mm.h>
18
19 #define CRST_ALLOC_ORDER 2
20
21 unsigned long *crst_table_alloc(struct mm_struct *);
22 void crst_table_free(struct mm_struct *, unsigned long *);
23
24 unsigned long *page_table_alloc(struct mm_struct *);
25 struct page *page_table_alloc_pgste(struct mm_struct *mm);
26 void page_table_free(struct mm_struct *, unsigned long *);
27 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
28 void page_table_free_pgste(struct page *page);
29 extern int page_table_allocate_pgste;
30
clear_table(unsigned long * s,unsigned long val,size_t n)31 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
32 {
33 struct addrtype { char _[256]; };
34 int i;
35
36 for (i = 0; i < n; i += 256) {
37 *s = val;
38 asm volatile(
39 "mvc 8(248,%[s]),0(%[s])\n"
40 : "+m" (*(struct addrtype *) s)
41 : [s] "a" (s));
42 s += 256 / sizeof(long);
43 }
44 }
45
crst_table_init(unsigned long * crst,unsigned long entry)46 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
47 {
48 clear_table(crst, entry, _CRST_TABLE_SIZE);
49 }
50
pgd_entry_type(struct mm_struct * mm)51 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
52 {
53 if (mm->context.asce_limit <= _REGION3_SIZE)
54 return _SEGMENT_ENTRY_EMPTY;
55 if (mm->context.asce_limit <= _REGION2_SIZE)
56 return _REGION3_ENTRY_EMPTY;
57 if (mm->context.asce_limit <= _REGION1_SIZE)
58 return _REGION2_ENTRY_EMPTY;
59 return _REGION1_ENTRY_EMPTY;
60 }
61
62 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
63 void crst_table_downgrade(struct mm_struct *);
64
p4d_alloc_one(struct mm_struct * mm,unsigned long address)65 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
66 {
67 unsigned long *table = crst_table_alloc(mm);
68
69 if (table)
70 crst_table_init(table, _REGION2_ENTRY_EMPTY);
71 return (p4d_t *) table;
72 }
73
p4d_free(struct mm_struct * mm,p4d_t * p4d)74 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
75 {
76 if (!mm_p4d_folded(mm))
77 crst_table_free(mm, (unsigned long *) p4d);
78 }
79
pud_alloc_one(struct mm_struct * mm,unsigned long address)80 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
81 {
82 unsigned long *table = crst_table_alloc(mm);
83 if (table)
84 crst_table_init(table, _REGION3_ENTRY_EMPTY);
85 return (pud_t *) table;
86 }
87
pud_free(struct mm_struct * mm,pud_t * pud)88 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
89 {
90 if (!mm_pud_folded(mm))
91 crst_table_free(mm, (unsigned long *) pud);
92 }
93
pmd_alloc_one(struct mm_struct * mm,unsigned long vmaddr)94 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
95 {
96 unsigned long *table = crst_table_alloc(mm);
97
98 if (!table)
99 return NULL;
100 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
101 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
102 crst_table_free(mm, table);
103 return NULL;
104 }
105 return (pmd_t *) table;
106 }
107
pmd_free(struct mm_struct * mm,pmd_t * pmd)108 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
109 {
110 if (mm_pmd_folded(mm))
111 return;
112 pgtable_pmd_page_dtor(virt_to_page(pmd));
113 crst_table_free(mm, (unsigned long *) pmd);
114 }
115
pgd_populate(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)116 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
117 {
118 pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
119 }
120
p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)121 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
122 {
123 p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
124 }
125
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)126 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
127 {
128 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
129 }
130
pgd_alloc(struct mm_struct * mm)131 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
132 {
133 unsigned long *table = crst_table_alloc(mm);
134
135 if (!table)
136 return NULL;
137 if (mm->context.asce_limit == _REGION3_SIZE) {
138 /* Forking a compat process with 2 page table levels */
139 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
140 crst_table_free(mm, table);
141 return NULL;
142 }
143 }
144 return (pgd_t *) table;
145 }
146
pgd_free(struct mm_struct * mm,pgd_t * pgd)147 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
148 {
149 if (mm->context.asce_limit == _REGION3_SIZE)
150 pgtable_pmd_page_dtor(virt_to_page(pgd));
151 crst_table_free(mm, (unsigned long *) pgd);
152 }
153
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte)154 static inline void pmd_populate(struct mm_struct *mm,
155 pmd_t *pmd, pgtable_t pte)
156 {
157 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
158 }
159
160 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
161
162 #define pmd_pgtable(pmd) \
163 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
164
165 /*
166 * page table entry allocation/free routines.
167 */
168 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
169 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
170
171 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
172 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
173
174 extern void rcu_table_freelist_finish(void);
175
176 void vmem_map_init(void);
177 void *vmem_crst_alloc(unsigned long val);
178 pte_t *vmem_pte_alloc(void);
179
180 #endif /* _S390_PGALLOC_H */
181