1 // SPDX-License-Identifier: GPL-2.0-only
2 /*:
3 * Hibernate support specific for ARM64
4 *
5 * Derived from work on ARM hibernation support by:
6 *
7 * Ubuntu project, hibernation support for mach-dove
8 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10 * https://lkml.org/lkml/2010/6/18/4
11 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
12 * https://patchwork.kernel.org/patch/96442/
13 *
14 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
15 */
16 #define pr_fmt(x) "hibernate: " x
17 #include <linux/cpu.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pm.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/utsname.h>
23
24 #include <asm/barrier.h>
25 #include <asm/cacheflush.h>
26 #include <asm/cputype.h>
27 #include <asm/daifflags.h>
28 #include <asm/irqflags.h>
29 #include <asm/kexec.h>
30 #include <asm/memory.h>
31 #include <asm/mmu_context.h>
32 #include <asm/mte.h>
33 #include <asm/sections.h>
34 #include <asm/smp.h>
35 #include <asm/smp_plat.h>
36 #include <asm/suspend.h>
37 #include <asm/sysreg.h>
38 #include <asm/trans_pgd.h>
39 #include <asm/virt.h>
40
41 /*
42 * Hibernate core relies on this value being 0 on resume, and marks it
43 * __nosavedata assuming it will keep the resume kernel's '0' value. This
44 * doesn't happen with either KASLR.
45 *
46 * defined as "__visible int in_suspend __nosavedata" in
47 * kernel/power/hibernate.c
48 */
49 extern int in_suspend;
50
51 /* Do we need to reset el2? */
52 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
53
54 /* temporary el2 vectors in the __hibernate_exit_text section. */
55 extern char hibernate_el2_vectors[];
56
57 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
58 extern char __hyp_stub_vectors[];
59
60 /*
61 * The logical cpu number we should resume on, initialised to a non-cpu
62 * number.
63 */
64 static int sleep_cpu = -EINVAL;
65
66 /*
67 * Values that may not change over hibernate/resume. We put the build number
68 * and date in here so that we guarantee not to resume with a different
69 * kernel.
70 */
71 struct arch_hibernate_hdr_invariants {
72 char uts_version[__NEW_UTS_LEN + 1];
73 };
74
75 /* These values need to be know across a hibernate/restore. */
76 static struct arch_hibernate_hdr {
77 struct arch_hibernate_hdr_invariants invariants;
78
79 /* These are needed to find the relocated kernel if built with kaslr */
80 phys_addr_t ttbr1_el1;
81 void (*reenter_kernel)(void);
82
83 /*
84 * We need to know where the __hyp_stub_vectors are after restore to
85 * re-configure el2.
86 */
87 phys_addr_t __hyp_stub_vectors;
88
89 u64 sleep_cpu_mpidr;
90 } resume_hdr;
91
arch_hdr_invariants(struct arch_hibernate_hdr_invariants * i)92 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
93 {
94 memset(i, 0, sizeof(*i));
95 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
96 }
97
pfn_is_nosave(unsigned long pfn)98 int pfn_is_nosave(unsigned long pfn)
99 {
100 unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
101 unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
102
103 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
104 crash_is_nosave(pfn);
105 }
106
save_processor_state(void)107 void notrace save_processor_state(void)
108 {
109 WARN_ON(num_online_cpus() != 1);
110 }
111
restore_processor_state(void)112 void notrace restore_processor_state(void)
113 {
114 }
115
arch_hibernation_header_save(void * addr,unsigned int max_size)116 int arch_hibernation_header_save(void *addr, unsigned int max_size)
117 {
118 struct arch_hibernate_hdr *hdr = addr;
119
120 if (max_size < sizeof(*hdr))
121 return -EOVERFLOW;
122
123 arch_hdr_invariants(&hdr->invariants);
124 hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
125 hdr->reenter_kernel = _cpu_resume;
126
127 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
128 if (el2_reset_needed())
129 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
130 else
131 hdr->__hyp_stub_vectors = 0;
132
133 /* Save the mpidr of the cpu we called cpu_suspend() on... */
134 if (sleep_cpu < 0) {
135 pr_err("Failing to hibernate on an unknown CPU.\n");
136 return -ENODEV;
137 }
138 hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
139 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
140 hdr->sleep_cpu_mpidr);
141
142 return 0;
143 }
144 EXPORT_SYMBOL(arch_hibernation_header_save);
145
arch_hibernation_header_restore(void * addr)146 int arch_hibernation_header_restore(void *addr)
147 {
148 int ret;
149 struct arch_hibernate_hdr_invariants invariants;
150 struct arch_hibernate_hdr *hdr = addr;
151
152 arch_hdr_invariants(&invariants);
153 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
154 pr_crit("Hibernate image not generated by this kernel!\n");
155 return -EINVAL;
156 }
157
158 sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
159 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
160 hdr->sleep_cpu_mpidr);
161 if (sleep_cpu < 0) {
162 pr_crit("Hibernated on a CPU not known to this kernel!\n");
163 sleep_cpu = -EINVAL;
164 return -EINVAL;
165 }
166
167 ret = bringup_hibernate_cpu(sleep_cpu);
168 if (ret) {
169 sleep_cpu = -EINVAL;
170 return ret;
171 }
172
173 resume_hdr = *hdr;
174
175 return 0;
176 }
177 EXPORT_SYMBOL(arch_hibernation_header_restore);
178
hibernate_page_alloc(void * arg)179 static void *hibernate_page_alloc(void *arg)
180 {
181 return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
182 }
183
184 /*
185 * Copies length bytes, starting at src_start into an new page,
186 * perform cache maintenance, then maps it at the specified address low
187 * address as executable.
188 *
189 * This is used by hibernate to copy the code it needs to execute when
190 * overwriting the kernel text. This function generates a new set of page
191 * tables, which it loads into ttbr0.
192 *
193 * Length is provided as we probably only want 4K of data, even on a 64K
194 * page system.
195 */
create_safe_exec_page(void * src_start,size_t length,phys_addr_t * phys_dst_addr)196 static int create_safe_exec_page(void *src_start, size_t length,
197 phys_addr_t *phys_dst_addr)
198 {
199 struct trans_pgd_info trans_info = {
200 .trans_alloc_page = hibernate_page_alloc,
201 .trans_alloc_arg = (__force void *)GFP_ATOMIC,
202 };
203
204 void *page = (void *)get_safe_page(GFP_ATOMIC);
205 phys_addr_t trans_ttbr0;
206 unsigned long t0sz;
207 int rc;
208
209 if (!page)
210 return -ENOMEM;
211
212 memcpy(page, src_start, length);
213 caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
214 rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
215 if (rc)
216 return rc;
217
218 /*
219 * Load our new page tables. A strict BBM approach requires that we
220 * ensure that TLBs are free of any entries that may overlap with the
221 * global mappings we are about to install.
222 *
223 * For a real hibernate/resume cycle TTBR0 currently points to a zero
224 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
225 * runtime services), while for a userspace-driven test_resume cycle it
226 * points to userspace page tables (and we must point it at a zero page
227 * ourselves).
228 *
229 * We change T0SZ as part of installing the idmap. This is undone by
230 * cpu_uninstall_idmap() in __cpu_suspend_exit().
231 */
232 cpu_set_reserved_ttbr0();
233 local_flush_tlb_all();
234 __cpu_set_tcr_t0sz(t0sz);
235 write_sysreg(trans_ttbr0, ttbr0_el1);
236 isb();
237
238 *phys_dst_addr = virt_to_phys(page);
239
240 return 0;
241 }
242
243 #ifdef CONFIG_ARM64_MTE
244
245 static DEFINE_XARRAY(mte_pages);
246
save_tags(struct page * page,unsigned long pfn)247 static int save_tags(struct page *page, unsigned long pfn)
248 {
249 void *tag_storage, *ret;
250
251 tag_storage = mte_allocate_tag_storage();
252 if (!tag_storage)
253 return -ENOMEM;
254
255 mte_save_page_tags(page_address(page), tag_storage);
256
257 ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
258 if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
259 mte_free_tag_storage(tag_storage);
260 return xa_err(ret);
261 } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
262 mte_free_tag_storage(ret);
263 }
264
265 return 0;
266 }
267
swsusp_mte_free_storage(void)268 static void swsusp_mte_free_storage(void)
269 {
270 XA_STATE(xa_state, &mte_pages, 0);
271 void *tags;
272
273 xa_lock(&mte_pages);
274 xas_for_each(&xa_state, tags, ULONG_MAX) {
275 mte_free_tag_storage(tags);
276 }
277 xa_unlock(&mte_pages);
278
279 xa_destroy(&mte_pages);
280 }
281
swsusp_mte_save_tags(void)282 static int swsusp_mte_save_tags(void)
283 {
284 struct zone *zone;
285 unsigned long pfn, max_zone_pfn;
286 int ret = 0;
287 int n = 0;
288
289 if (!system_supports_mte())
290 return 0;
291
292 for_each_populated_zone(zone) {
293 max_zone_pfn = zone_end_pfn(zone);
294 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
295 struct page *page = pfn_to_online_page(pfn);
296
297 if (!page)
298 continue;
299
300 if (!page_mte_tagged(page))
301 continue;
302
303 ret = save_tags(page, pfn);
304 if (ret) {
305 swsusp_mte_free_storage();
306 goto out;
307 }
308
309 n++;
310 }
311 }
312 pr_info("Saved %d MTE pages\n", n);
313
314 out:
315 return ret;
316 }
317
swsusp_mte_restore_tags(void)318 static void swsusp_mte_restore_tags(void)
319 {
320 XA_STATE(xa_state, &mte_pages, 0);
321 int n = 0;
322 void *tags;
323
324 xa_lock(&mte_pages);
325 xas_for_each(&xa_state, tags, ULONG_MAX) {
326 unsigned long pfn = xa_state.xa_index;
327 struct page *page = pfn_to_online_page(pfn);
328
329 mte_restore_page_tags(page_address(page), tags);
330
331 mte_free_tag_storage(tags);
332 n++;
333 }
334 xa_unlock(&mte_pages);
335
336 pr_info("Restored %d MTE pages\n", n);
337
338 xa_destroy(&mte_pages);
339 }
340
341 #else /* CONFIG_ARM64_MTE */
342
swsusp_mte_save_tags(void)343 static int swsusp_mte_save_tags(void)
344 {
345 return 0;
346 }
347
swsusp_mte_restore_tags(void)348 static void swsusp_mte_restore_tags(void)
349 {
350 }
351
352 #endif /* CONFIG_ARM64_MTE */
353
swsusp_arch_suspend(void)354 int swsusp_arch_suspend(void)
355 {
356 int ret = 0;
357 unsigned long flags;
358 struct sleep_stack_data state;
359
360 if (cpus_are_stuck_in_kernel()) {
361 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
362 return -EBUSY;
363 }
364
365 flags = local_daif_save();
366
367 if (__cpu_suspend_enter(&state)) {
368 /* make the crash dump kernel image visible/saveable */
369 crash_prepare_suspend();
370
371 ret = swsusp_mte_save_tags();
372 if (ret)
373 return ret;
374
375 sleep_cpu = smp_processor_id();
376 ret = swsusp_save();
377 } else {
378 /* Clean kernel core startup/idle code to PoC*/
379 dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
380 (unsigned long)__mmuoff_data_end);
381 dcache_clean_inval_poc((unsigned long)__idmap_text_start,
382 (unsigned long)__idmap_text_end);
383
384 /* Clean kvm setup code to PoC? */
385 if (el2_reset_needed()) {
386 dcache_clean_inval_poc(
387 (unsigned long)__hyp_idmap_text_start,
388 (unsigned long)__hyp_idmap_text_end);
389 dcache_clean_inval_poc((unsigned long)__hyp_text_start,
390 (unsigned long)__hyp_text_end);
391 }
392
393 swsusp_mte_restore_tags();
394
395 /* make the crash dump kernel image protected again */
396 crash_post_resume();
397
398 /*
399 * Tell the hibernation core that we've just restored
400 * the memory
401 */
402 in_suspend = 0;
403
404 sleep_cpu = -EINVAL;
405 __cpu_suspend_exit();
406
407 /*
408 * Just in case the boot kernel did turn the SSBD
409 * mitigation off behind our back, let's set the state
410 * to what we expect it to be.
411 */
412 spectre_v4_enable_mitigation(NULL);
413 }
414
415 local_daif_restore(flags);
416
417 return ret;
418 }
419
420 /*
421 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
422 *
423 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
424 * we don't need to free it here.
425 */
swsusp_arch_resume(void)426 int swsusp_arch_resume(void)
427 {
428 int rc;
429 void *zero_page;
430 size_t exit_size;
431 pgd_t *tmp_pg_dir;
432 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
433 void *, phys_addr_t, phys_addr_t);
434 struct trans_pgd_info trans_info = {
435 .trans_alloc_page = hibernate_page_alloc,
436 .trans_alloc_arg = (void *)GFP_ATOMIC,
437 };
438
439 /*
440 * Restoring the memory image will overwrite the ttbr1 page tables.
441 * Create a second copy of just the linear map, and use this when
442 * restoring.
443 */
444 rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
445 PAGE_END);
446 if (rc)
447 return rc;
448
449 /*
450 * We need a zero page that is zero before & after resume in order to
451 * to break before make on the ttbr1 page tables.
452 */
453 zero_page = (void *)get_safe_page(GFP_ATOMIC);
454 if (!zero_page) {
455 pr_err("Failed to allocate zero page.\n");
456 return -ENOMEM;
457 }
458
459 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
460 /*
461 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
462 * a new set of ttbr0 page tables and load them.
463 */
464 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
465 (phys_addr_t *)&hibernate_exit);
466 if (rc) {
467 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
468 return rc;
469 }
470
471 /*
472 * The hibernate exit text contains a set of el2 vectors, that will
473 * be executed at el2 with the mmu off in order to reload hyp-stub.
474 */
475 dcache_clean_inval_poc((unsigned long)hibernate_exit,
476 (unsigned long)hibernate_exit + exit_size);
477
478 /*
479 * KASLR will cause the el2 vectors to be in a different location in
480 * the resumed kernel. Load hibernate's temporary copy into el2.
481 *
482 * We can skip this step if we booted at EL1, or are running with VHE.
483 */
484 if (el2_reset_needed()) {
485 phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit;
486 el2_vectors += hibernate_el2_vectors -
487 __hibernate_exit_text_start; /* offset */
488
489 __hyp_set_vectors(el2_vectors);
490 }
491
492 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
493 resume_hdr.reenter_kernel, restore_pblist,
494 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
495
496 return 0;
497 }
498
hibernate_resume_nonboot_cpu_disable(void)499 int hibernate_resume_nonboot_cpu_disable(void)
500 {
501 if (sleep_cpu < 0) {
502 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
503 return -ENODEV;
504 }
505
506 return freeze_secondary_cpus(sleep_cpu);
507 }
508