1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Transitional page tables for kexec and hibernate
5 *
6 * This file derived from: arch/arm64/kernel/hibernate.c
7 *
8 * Copyright (c) 2020, Microsoft Corporation.
9 * Pavel Tatashin <pasha.tatashin@soleen.com>
10 *
11 */
12
13 /*
14 * Transitional tables are used during system transferring from one world to
15 * another: such as during hibernate restore, and kexec reboots. During these
16 * phases one cannot rely on page table not being overwritten. This is because
17 * hibernate and kexec can overwrite the current page tables during transition.
18 */
19
20 #include <asm/trans_pgd.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pgtable.h>
23 #include <linux/suspend.h>
24 #include <linux/bug.h>
25 #include <linux/mm.h>
26 #include <linux/mmzone.h>
27
trans_alloc(struct trans_pgd_info * info)28 static void *trans_alloc(struct trans_pgd_info *info)
29 {
30 return info->trans_alloc_page(info->trans_alloc_arg);
31 }
32
_copy_pte(pte_t * dst_ptep,pte_t * src_ptep,unsigned long addr)33 static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
34 {
35 pte_t pte = READ_ONCE(*src_ptep);
36
37 if (pte_valid(pte)) {
38 /*
39 * Resume will overwrite areas that may be marked
40 * read only (code, rodata). Clear the RDONLY bit from
41 * the temporary mappings we use during restore.
42 */
43 set_pte(dst_ptep, pte_mkwrite(pte));
44 } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
45 /*
46 * debug_pagealloc will removed the PTE_VALID bit if
47 * the page isn't in use by the resume kernel. It may have
48 * been in use by the original kernel, in which case we need
49 * to put it back in our copy to do the restore.
50 *
51 * Before marking this entry valid, check the pfn should
52 * be mapped.
53 */
54 BUG_ON(!pfn_valid(pte_pfn(pte)));
55
56 set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
57 }
58 }
59
copy_pte(struct trans_pgd_info * info,pmd_t * dst_pmdp,pmd_t * src_pmdp,unsigned long start,unsigned long end)60 static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
61 pmd_t *src_pmdp, unsigned long start, unsigned long end)
62 {
63 pte_t *src_ptep;
64 pte_t *dst_ptep;
65 unsigned long addr = start;
66
67 dst_ptep = trans_alloc(info);
68 if (!dst_ptep)
69 return -ENOMEM;
70 pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
71 dst_ptep = pte_offset_kernel(dst_pmdp, start);
72
73 src_ptep = pte_offset_kernel(src_pmdp, start);
74 do {
75 _copy_pte(dst_ptep, src_ptep, addr);
76 } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
77
78 return 0;
79 }
80
copy_pmd(struct trans_pgd_info * info,pud_t * dst_pudp,pud_t * src_pudp,unsigned long start,unsigned long end)81 static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp,
82 pud_t *src_pudp, unsigned long start, unsigned long end)
83 {
84 pmd_t *src_pmdp;
85 pmd_t *dst_pmdp;
86 unsigned long next;
87 unsigned long addr = start;
88
89 if (pud_none(READ_ONCE(*dst_pudp))) {
90 dst_pmdp = trans_alloc(info);
91 if (!dst_pmdp)
92 return -ENOMEM;
93 pud_populate(NULL, dst_pudp, dst_pmdp);
94 }
95 dst_pmdp = pmd_offset(dst_pudp, start);
96
97 src_pmdp = pmd_offset(src_pudp, start);
98 do {
99 pmd_t pmd = READ_ONCE(*src_pmdp);
100
101 next = pmd_addr_end(addr, end);
102 if (pmd_none(pmd))
103 continue;
104 if (pmd_table(pmd)) {
105 if (copy_pte(info, dst_pmdp, src_pmdp, addr, next))
106 return -ENOMEM;
107 } else {
108 set_pmd(dst_pmdp,
109 __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
110 }
111 } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
112
113 return 0;
114 }
115
copy_pud(struct trans_pgd_info * info,p4d_t * dst_p4dp,p4d_t * src_p4dp,unsigned long start,unsigned long end)116 static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp,
117 p4d_t *src_p4dp, unsigned long start,
118 unsigned long end)
119 {
120 pud_t *dst_pudp;
121 pud_t *src_pudp;
122 unsigned long next;
123 unsigned long addr = start;
124
125 if (p4d_none(READ_ONCE(*dst_p4dp))) {
126 dst_pudp = trans_alloc(info);
127 if (!dst_pudp)
128 return -ENOMEM;
129 p4d_populate(NULL, dst_p4dp, dst_pudp);
130 }
131 dst_pudp = pud_offset(dst_p4dp, start);
132
133 src_pudp = pud_offset(src_p4dp, start);
134 do {
135 pud_t pud = READ_ONCE(*src_pudp);
136
137 next = pud_addr_end(addr, end);
138 if (pud_none(pud))
139 continue;
140 if (pud_table(pud)) {
141 if (copy_pmd(info, dst_pudp, src_pudp, addr, next))
142 return -ENOMEM;
143 } else {
144 set_pud(dst_pudp,
145 __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
146 }
147 } while (dst_pudp++, src_pudp++, addr = next, addr != end);
148
149 return 0;
150 }
151
copy_p4d(struct trans_pgd_info * info,pgd_t * dst_pgdp,pgd_t * src_pgdp,unsigned long start,unsigned long end)152 static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
153 pgd_t *src_pgdp, unsigned long start,
154 unsigned long end)
155 {
156 p4d_t *dst_p4dp;
157 p4d_t *src_p4dp;
158 unsigned long next;
159 unsigned long addr = start;
160
161 dst_p4dp = p4d_offset(dst_pgdp, start);
162 src_p4dp = p4d_offset(src_pgdp, start);
163 do {
164 next = p4d_addr_end(addr, end);
165 if (p4d_none(READ_ONCE(*src_p4dp)))
166 continue;
167 if (copy_pud(info, dst_p4dp, src_p4dp, addr, next))
168 return -ENOMEM;
169 } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
170
171 return 0;
172 }
173
copy_page_tables(struct trans_pgd_info * info,pgd_t * dst_pgdp,unsigned long start,unsigned long end)174 static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp,
175 unsigned long start, unsigned long end)
176 {
177 unsigned long next;
178 unsigned long addr = start;
179 pgd_t *src_pgdp = pgd_offset_k(start);
180
181 dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
182 do {
183 next = pgd_addr_end(addr, end);
184 if (pgd_none(READ_ONCE(*src_pgdp)))
185 continue;
186 if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next))
187 return -ENOMEM;
188 } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
189
190 return 0;
191 }
192
193 /*
194 * Create trans_pgd and copy linear map.
195 * info: contains allocator and its argument
196 * dst_pgdp: new page table that is created, and to which map is copied.
197 * start: Start of the interval (inclusive).
198 * end: End of the interval (exclusive).
199 *
200 * Returns 0 on success, and -ENOMEM on failure.
201 */
trans_pgd_create_copy(struct trans_pgd_info * info,pgd_t ** dst_pgdp,unsigned long start,unsigned long end)202 int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp,
203 unsigned long start, unsigned long end)
204 {
205 int rc;
206 pgd_t *trans_pgd = trans_alloc(info);
207
208 if (!trans_pgd) {
209 pr_err("Failed to allocate memory for temporary page tables.\n");
210 return -ENOMEM;
211 }
212
213 rc = copy_page_tables(info, trans_pgd, start, end);
214 if (!rc)
215 *dst_pgdp = trans_pgd;
216
217 return rc;
218 }
219
220 /*
221 * Add map entry to trans_pgd for a base-size page at PTE level.
222 * info: contains allocator and its argument
223 * trans_pgd: page table in which new map is added.
224 * page: page to be mapped.
225 * dst_addr: new VA address for the page
226 * pgprot: protection for the page.
227 *
228 * Returns 0 on success, and -ENOMEM on failure.
229 */
trans_pgd_map_page(struct trans_pgd_info * info,pgd_t * trans_pgd,void * page,unsigned long dst_addr,pgprot_t pgprot)230 int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
231 void *page, unsigned long dst_addr, pgprot_t pgprot)
232 {
233 pgd_t *pgdp;
234 p4d_t *p4dp;
235 pud_t *pudp;
236 pmd_t *pmdp;
237 pte_t *ptep;
238
239 pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
240 if (pgd_none(READ_ONCE(*pgdp))) {
241 p4dp = trans_alloc(info);
242 if (!pgdp)
243 return -ENOMEM;
244 pgd_populate(NULL, pgdp, p4dp);
245 }
246
247 p4dp = p4d_offset(pgdp, dst_addr);
248 if (p4d_none(READ_ONCE(*p4dp))) {
249 pudp = trans_alloc(info);
250 if (!pudp)
251 return -ENOMEM;
252 p4d_populate(NULL, p4dp, pudp);
253 }
254
255 pudp = pud_offset(p4dp, dst_addr);
256 if (pud_none(READ_ONCE(*pudp))) {
257 pmdp = trans_alloc(info);
258 if (!pmdp)
259 return -ENOMEM;
260 pud_populate(NULL, pudp, pmdp);
261 }
262
263 pmdp = pmd_offset(pudp, dst_addr);
264 if (pmd_none(READ_ONCE(*pmdp))) {
265 ptep = trans_alloc(info);
266 if (!ptep)
267 return -ENOMEM;
268 pmd_populate_kernel(NULL, pmdp, ptep);
269 }
270
271 ptep = pte_offset_kernel(pmdp, dst_addr);
272 set_pte(ptep, pfn_pte(virt_to_pfn(page), pgprot));
273
274 return 0;
275 }
276
277 /*
278 * The page we want to idmap may be outside the range covered by VA_BITS that
279 * can be built using the kernel's p?d_populate() helpers. As a one off, for a
280 * single page, we build these page tables bottom up and just assume that will
281 * need the maximum T0SZ.
282 *
283 * Returns 0 on success, and -ENOMEM on failure.
284 * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to
285 * maximum T0SZ for this page.
286 */
trans_pgd_idmap_page(struct trans_pgd_info * info,phys_addr_t * trans_ttbr0,unsigned long * t0sz,void * page)287 int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
288 unsigned long *t0sz, void *page)
289 {
290 phys_addr_t dst_addr = virt_to_phys(page);
291 unsigned long pfn = __phys_to_pfn(dst_addr);
292 int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
293 int bits_mapped = PAGE_SHIFT - 4;
294 unsigned long level_mask, prev_level_entry, *levels[4];
295 int this_level, index, level_lsb, level_msb;
296
297 dst_addr &= PAGE_MASK;
298 prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
299
300 for (this_level = 3; this_level >= 0; this_level--) {
301 levels[this_level] = trans_alloc(info);
302 if (!levels[this_level])
303 return -ENOMEM;
304
305 level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
306 level_msb = min(level_lsb + bits_mapped, max_msb);
307 level_mask = GENMASK_ULL(level_msb, level_lsb);
308
309 index = (dst_addr & level_mask) >> level_lsb;
310 *(levels[this_level] + index) = prev_level_entry;
311
312 pfn = virt_to_pfn(levels[this_level]);
313 prev_level_entry = pte_val(pfn_pte(pfn,
314 __pgprot(PMD_TYPE_TABLE)));
315
316 if (level_msb == max_msb)
317 break;
318 }
319
320 *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
321 *t0sz = TCR_T0SZ(max_msb + 1);
322
323 return 0;
324 }
325