• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/pgalloc.h"
9  *    Copyright (C) 1994  Linus Torvalds
10  */
11 
12 #ifndef _S390_PGALLOC_H
13 #define _S390_PGALLOC_H
14 
15 #include <linux/threads.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
19 
20 #define CRST_ALLOC_ORDER 2
21 
22 unsigned long *crst_table_alloc(struct mm_struct *);
23 void crst_table_free(struct mm_struct *, unsigned long *);
24 
25 unsigned long *page_table_alloc(struct mm_struct *);
26 struct page *page_table_alloc_pgste(struct mm_struct *mm);
27 void page_table_free(struct mm_struct *, unsigned long *);
28 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
29 void page_table_free_pgste(struct page *page);
30 extern int page_table_allocate_pgste;
31 
crst_table_init(unsigned long * crst,unsigned long entry)32 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
33 {
34 	memset64((u64 *)crst, entry, _CRST_ENTRIES);
35 }
36 
37 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
38 
check_asce_limit(struct mm_struct * mm,unsigned long addr,unsigned long len)39 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
40 					     unsigned long len)
41 {
42 	int rc;
43 
44 	if (addr + len > mm->context.asce_limit &&
45 	    addr + len <= TASK_SIZE) {
46 		rc = crst_table_upgrade(mm, addr + len);
47 		if (rc)
48 			return (unsigned long) rc;
49 	}
50 	return addr;
51 }
52 
p4d_alloc_one(struct mm_struct * mm,unsigned long address)53 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
54 {
55 	unsigned long *table = crst_table_alloc(mm);
56 
57 	if (table)
58 		crst_table_init(table, _REGION2_ENTRY_EMPTY);
59 	return (p4d_t *) table;
60 }
61 
p4d_free(struct mm_struct * mm,p4d_t * p4d)62 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
63 {
64 	if (!mm_p4d_folded(mm))
65 		crst_table_free(mm, (unsigned long *) p4d);
66 }
67 
pud_alloc_one(struct mm_struct * mm,unsigned long address)68 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
69 {
70 	unsigned long *table = crst_table_alloc(mm);
71 	if (table)
72 		crst_table_init(table, _REGION3_ENTRY_EMPTY);
73 	return (pud_t *) table;
74 }
75 
pud_free(struct mm_struct * mm,pud_t * pud)76 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
77 {
78 	if (!mm_pud_folded(mm))
79 		crst_table_free(mm, (unsigned long *) pud);
80 }
81 
pmd_alloc_one(struct mm_struct * mm,unsigned long vmaddr)82 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
83 {
84 	unsigned long *table = crst_table_alloc(mm);
85 
86 	if (!table)
87 		return NULL;
88 	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
89 	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
90 		crst_table_free(mm, table);
91 		return NULL;
92 	}
93 	return (pmd_t *) table;
94 }
95 
pmd_free(struct mm_struct * mm,pmd_t * pmd)96 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
97 {
98 	if (mm_pmd_folded(mm))
99 		return;
100 	pgtable_pmd_page_dtor(virt_to_page(pmd));
101 	crst_table_free(mm, (unsigned long *) pmd);
102 }
103 
pgd_populate(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)104 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
105 {
106 	pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
107 }
108 
p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)109 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
110 {
111 	p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
112 }
113 
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)114 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
115 {
116 	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
117 }
118 
pgd_alloc(struct mm_struct * mm)119 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
120 {
121 	return (pgd_t *) crst_table_alloc(mm);
122 }
123 
pgd_free(struct mm_struct * mm,pgd_t * pgd)124 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
125 {
126 	crst_table_free(mm, (unsigned long *) pgd);
127 }
128 
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte)129 static inline void pmd_populate(struct mm_struct *mm,
130 				pmd_t *pmd, pgtable_t pte)
131 {
132 	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
133 }
134 
135 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
136 
137 #define pmd_pgtable(pmd) \
138 	(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
139 
140 /*
141  * page table entry allocation/free routines.
142  */
143 #define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
144 #define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
145 
146 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
147 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
148 
149 void vmem_map_init(void);
150 void *vmem_crst_alloc(unsigned long val);
151 pte_t *vmem_pte_alloc(void);
152 
153 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
154 void base_asce_free(unsigned long asce);
155 
156 #endif /* _S390_PGALLOC_H */
157