1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * sleep.c - ACPI sleep support.
4 *
5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
7 * Copyright (c) 2000-2003 Patrick Mochel
8 * Copyright (c) 2003 Open Source Development Lab
9 */
10
11 #define pr_fmt(fmt) "ACPI: PM: " fmt
12
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/dmi.h>
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/suspend.h>
19 #include <linux/reboot.h>
20 #include <linux/acpi.h>
21 #include <linux/module.h>
22 #include <linux/syscore_ops.h>
23 #include <asm/io.h>
24 #include <trace/events/power.h>
25
26 #include "internal.h"
27 #include "sleep.h"
28
29 /*
30 * Some HW-full platforms do not have _S5, so they may need
31 * to leverage efi power off for a shutdown.
32 */
33 bool acpi_no_s5;
34 static u8 sleep_states[ACPI_S_STATE_COUNT];
35
acpi_sleep_tts_switch(u32 acpi_state)36 static void acpi_sleep_tts_switch(u32 acpi_state)
37 {
38 acpi_status status;
39
40 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
41 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
42 /*
43 * OS can't evaluate the _TTS object correctly. Some warning
44 * message will be printed. But it won't break anything.
45 */
46 pr_notice("Failure in evaluating _TTS object\n");
47 }
48 }
49
tts_notify_reboot(struct notifier_block * this,unsigned long code,void * x)50 static int tts_notify_reboot(struct notifier_block *this,
51 unsigned long code, void *x)
52 {
53 acpi_sleep_tts_switch(ACPI_STATE_S5);
54 return NOTIFY_DONE;
55 }
56
57 static struct notifier_block tts_notifier = {
58 .notifier_call = tts_notify_reboot,
59 .next = NULL,
60 .priority = 0,
61 };
62
acpi_sleep_prepare(u32 acpi_state)63 static int acpi_sleep_prepare(u32 acpi_state)
64 {
65 #ifdef CONFIG_ACPI_SLEEP
66 unsigned long acpi_wakeup_address;
67
68 /* do we have a wakeup address for S2 and S3? */
69 if (acpi_state == ACPI_STATE_S3) {
70 acpi_wakeup_address = acpi_get_wakeup_address();
71 if (!acpi_wakeup_address)
72 return -EFAULT;
73 acpi_set_waking_vector(acpi_wakeup_address);
74
75 }
76 ACPI_FLUSH_CPU_CACHE();
77 #endif
78 pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
79 acpi_enable_wakeup_devices(acpi_state);
80 acpi_enter_sleep_state_prep(acpi_state);
81 return 0;
82 }
83
acpi_sleep_state_supported(u8 sleep_state)84 bool acpi_sleep_state_supported(u8 sleep_state)
85 {
86 acpi_status status;
87 u8 type_a, type_b;
88
89 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
90 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
91 || (acpi_gbl_FADT.sleep_control.address
92 && acpi_gbl_FADT.sleep_status.address));
93 }
94
95 #ifdef CONFIG_ACPI_SLEEP
96 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
97
acpi_target_system_state(void)98 u32 acpi_target_system_state(void)
99 {
100 return acpi_target_sleep_state;
101 }
102 EXPORT_SYMBOL_GPL(acpi_target_system_state);
103
104 static bool pwr_btn_event_pending;
105
106 /*
107 * The ACPI specification wants us to save NVS memory regions during hibernation
108 * and to restore them during the subsequent resume. Windows does that also for
109 * suspend to RAM. However, it is known that this mechanism does not work on
110 * all machines, so we allow the user to disable it with the help of the
111 * 'acpi_sleep=nonvs' kernel command line option.
112 */
113 static bool nvs_nosave;
114
acpi_nvs_nosave(void)115 void __init acpi_nvs_nosave(void)
116 {
117 nvs_nosave = true;
118 }
119
120 /*
121 * The ACPI specification wants us to save NVS memory regions during hibernation
122 * but says nothing about saving NVS during S3. Not all versions of Windows
123 * save NVS on S3 suspend either, and it is clear that not all systems need
124 * NVS to be saved at S3 time. To improve suspend/resume time, allow the
125 * user to disable saving NVS on S3 if their system does not require it, but
126 * continue to save/restore NVS for S4 as specified.
127 */
128 static bool nvs_nosave_s3;
129
acpi_nvs_nosave_s3(void)130 void __init acpi_nvs_nosave_s3(void)
131 {
132 nvs_nosave_s3 = true;
133 }
134
init_nvs_save_s3(const struct dmi_system_id * d)135 static int __init init_nvs_save_s3(const struct dmi_system_id *d)
136 {
137 nvs_nosave_s3 = false;
138 return 0;
139 }
140
141 /*
142 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
143 * user to request that behavior by using the 'acpi_old_suspend_ordering'
144 * kernel command line option that causes the following variable to be set.
145 */
146 static bool old_suspend_ordering;
147
acpi_old_suspend_ordering(void)148 void __init acpi_old_suspend_ordering(void)
149 {
150 old_suspend_ordering = true;
151 }
152
init_old_suspend_ordering(const struct dmi_system_id * d)153 static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
154 {
155 acpi_old_suspend_ordering();
156 return 0;
157 }
158
init_nvs_nosave(const struct dmi_system_id * d)159 static int __init init_nvs_nosave(const struct dmi_system_id *d)
160 {
161 acpi_nvs_nosave();
162 return 0;
163 }
164
165 bool acpi_sleep_default_s3;
166
init_default_s3(const struct dmi_system_id * d)167 static int __init init_default_s3(const struct dmi_system_id *d)
168 {
169 acpi_sleep_default_s3 = true;
170 return 0;
171 }
172
173 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
174 {
175 .callback = init_old_suspend_ordering,
176 .ident = "Abit KN9 (nForce4 variant)",
177 .matches = {
178 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
179 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
180 },
181 },
182 {
183 .callback = init_old_suspend_ordering,
184 .ident = "HP xw4600 Workstation",
185 .matches = {
186 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
187 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
188 },
189 },
190 {
191 .callback = init_old_suspend_ordering,
192 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
193 .matches = {
194 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
195 DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
196 },
197 },
198 {
199 .callback = init_old_suspend_ordering,
200 .ident = "Panasonic CF51-2L",
201 .matches = {
202 DMI_MATCH(DMI_BOARD_VENDOR,
203 "Matsushita Electric Industrial Co.,Ltd."),
204 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
205 },
206 },
207 {
208 .callback = init_nvs_nosave,
209 .ident = "Sony Vaio VGN-FW41E_H",
210 .matches = {
211 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
212 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
213 },
214 },
215 {
216 .callback = init_nvs_nosave,
217 .ident = "Sony Vaio VGN-FW21E",
218 .matches = {
219 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
220 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
221 },
222 },
223 {
224 .callback = init_nvs_nosave,
225 .ident = "Sony Vaio VGN-FW21M",
226 .matches = {
227 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
228 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
229 },
230 },
231 {
232 .callback = init_nvs_nosave,
233 .ident = "Sony Vaio VPCEB17FX",
234 .matches = {
235 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
236 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
237 },
238 },
239 {
240 .callback = init_nvs_nosave,
241 .ident = "Sony Vaio VGN-SR11M",
242 .matches = {
243 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
244 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
245 },
246 },
247 {
248 .callback = init_nvs_nosave,
249 .ident = "Everex StepNote Series",
250 .matches = {
251 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
252 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
253 },
254 },
255 {
256 .callback = init_nvs_nosave,
257 .ident = "Sony Vaio VPCEB1Z1E",
258 .matches = {
259 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
260 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
261 },
262 },
263 {
264 .callback = init_nvs_nosave,
265 .ident = "Sony Vaio VGN-NW130D",
266 .matches = {
267 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
268 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
269 },
270 },
271 {
272 .callback = init_nvs_nosave,
273 .ident = "Sony Vaio VPCCW29FX",
274 .matches = {
275 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
276 DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
277 },
278 },
279 {
280 .callback = init_nvs_nosave,
281 .ident = "Averatec AV1020-ED2",
282 .matches = {
283 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
284 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
285 },
286 },
287 {
288 .callback = init_old_suspend_ordering,
289 .ident = "Asus A8N-SLI DELUXE",
290 .matches = {
291 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
292 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
293 },
294 },
295 {
296 .callback = init_old_suspend_ordering,
297 .ident = "Asus A8N-SLI Premium",
298 .matches = {
299 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
300 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
301 },
302 },
303 {
304 .callback = init_nvs_nosave,
305 .ident = "Sony Vaio VGN-SR26GN_P",
306 .matches = {
307 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
308 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
309 },
310 },
311 {
312 .callback = init_nvs_nosave,
313 .ident = "Sony Vaio VPCEB1S1E",
314 .matches = {
315 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
316 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
317 },
318 },
319 {
320 .callback = init_nvs_nosave,
321 .ident = "Sony Vaio VGN-FW520F",
322 .matches = {
323 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
324 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
325 },
326 },
327 {
328 .callback = init_nvs_nosave,
329 .ident = "Asus K54C",
330 .matches = {
331 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
332 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
333 },
334 },
335 {
336 .callback = init_nvs_nosave,
337 .ident = "Asus K54HR",
338 .matches = {
339 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
340 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
341 },
342 },
343 {
344 .callback = init_nvs_save_s3,
345 .ident = "Asus 1025C",
346 .matches = {
347 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
348 DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
349 },
350 },
351 /*
352 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
353 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
354 * saving during S3.
355 */
356 {
357 .callback = init_nvs_save_s3,
358 .ident = "Lenovo G50-45",
359 .matches = {
360 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
361 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
362 },
363 },
364 {
365 .callback = init_nvs_save_s3,
366 .ident = "Lenovo G40-45",
367 .matches = {
368 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
369 DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
370 },
371 },
372 /*
373 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
374 * the Low Power S0 Idle firmware interface (see
375 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
376 */
377 {
378 .callback = init_default_s3,
379 .ident = "ThinkPad X1 Tablet(2016)",
380 .matches = {
381 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
382 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
383 },
384 },
385 /*
386 * ASUS B1400CEAE hangs on resume from suspend (see
387 * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
388 */
389 {
390 .callback = init_default_s3,
391 .ident = "ASUS B1400CEAE",
392 .matches = {
393 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
394 DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
395 },
396 },
397 {},
398 };
399
400 static bool ignore_blacklist;
401
acpi_sleep_no_blacklist(void)402 void __init acpi_sleep_no_blacklist(void)
403 {
404 ignore_blacklist = true;
405 }
406
acpi_sleep_dmi_check(void)407 static void __init acpi_sleep_dmi_check(void)
408 {
409 if (ignore_blacklist)
410 return;
411
412 if (dmi_get_bios_year() >= 2012)
413 acpi_nvs_nosave_s3();
414
415 dmi_check_system(acpisleep_dmi_table);
416 }
417
418 /**
419 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
420 */
acpi_pm_freeze(void)421 static int acpi_pm_freeze(void)
422 {
423 acpi_disable_all_gpes();
424 acpi_os_wait_events_complete();
425 acpi_ec_block_transactions();
426 return 0;
427 }
428
429 /**
430 * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
431 */
acpi_pm_pre_suspend(void)432 static int acpi_pm_pre_suspend(void)
433 {
434 acpi_pm_freeze();
435 return suspend_nvs_save();
436 }
437
438 /**
439 * __acpi_pm_prepare - Prepare the platform to enter the target state.
440 *
441 * If necessary, set the firmware waking vector and do arch-specific
442 * nastiness to get the wakeup code to the waking vector.
443 */
__acpi_pm_prepare(void)444 static int __acpi_pm_prepare(void)
445 {
446 int error = acpi_sleep_prepare(acpi_target_sleep_state);
447 if (error)
448 acpi_target_sleep_state = ACPI_STATE_S0;
449
450 return error;
451 }
452
453 /**
454 * acpi_pm_prepare - Prepare the platform to enter the target sleep
455 * state and disable the GPEs.
456 */
acpi_pm_prepare(void)457 static int acpi_pm_prepare(void)
458 {
459 int error = __acpi_pm_prepare();
460 if (!error)
461 error = acpi_pm_pre_suspend();
462
463 return error;
464 }
465
466 /**
467 * acpi_pm_finish - Instruct the platform to leave a sleep state.
468 *
469 * This is called after we wake back up (or if entering the sleep state
470 * failed).
471 */
acpi_pm_finish(void)472 static void acpi_pm_finish(void)
473 {
474 struct acpi_device *pwr_btn_adev;
475 u32 acpi_state = acpi_target_sleep_state;
476
477 acpi_ec_unblock_transactions();
478 suspend_nvs_free();
479
480 if (acpi_state == ACPI_STATE_S0)
481 return;
482
483 pr_info("Waking up from system sleep state S%d\n", acpi_state);
484 acpi_disable_wakeup_devices(acpi_state);
485 acpi_leave_sleep_state(acpi_state);
486
487 /* reset firmware waking vector */
488 acpi_set_waking_vector(0);
489
490 acpi_target_sleep_state = ACPI_STATE_S0;
491
492 acpi_resume_power_resources();
493
494 /* If we were woken with the fixed power button, provide a small
495 * hint to userspace in the form of a wakeup event on the fixed power
496 * button device (if it can be found).
497 *
498 * We delay the event generation til now, as the PM layer requires
499 * timekeeping to be running before we generate events. */
500 if (!pwr_btn_event_pending)
501 return;
502
503 pwr_btn_event_pending = false;
504 pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
505 NULL, -1);
506 if (pwr_btn_adev) {
507 pm_wakeup_event(&pwr_btn_adev->dev, 0);
508 acpi_dev_put(pwr_btn_adev);
509 }
510 }
511
512 /**
513 * acpi_pm_start - Start system PM transition.
514 */
acpi_pm_start(u32 acpi_state)515 static void acpi_pm_start(u32 acpi_state)
516 {
517 acpi_target_sleep_state = acpi_state;
518 acpi_sleep_tts_switch(acpi_target_sleep_state);
519 acpi_scan_lock_acquire();
520 }
521
522 /**
523 * acpi_pm_end - Finish up system PM transition.
524 */
acpi_pm_end(void)525 static void acpi_pm_end(void)
526 {
527 acpi_turn_off_unused_power_resources();
528 acpi_scan_lock_release();
529 /*
530 * This is necessary in case acpi_pm_finish() is not called during a
531 * failing transition to a sleep state.
532 */
533 acpi_target_sleep_state = ACPI_STATE_S0;
534 acpi_sleep_tts_switch(acpi_target_sleep_state);
535 }
536 #else /* !CONFIG_ACPI_SLEEP */
537 #define sleep_no_lps0 (1)
538 #define acpi_target_sleep_state ACPI_STATE_S0
539 #define acpi_sleep_default_s3 (1)
acpi_sleep_dmi_check(void)540 static inline void acpi_sleep_dmi_check(void) {}
541 #endif /* CONFIG_ACPI_SLEEP */
542
543 #ifdef CONFIG_SUSPEND
544 static u32 acpi_suspend_states[] = {
545 [PM_SUSPEND_ON] = ACPI_STATE_S0,
546 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
547 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
548 [PM_SUSPEND_MAX] = ACPI_STATE_S5
549 };
550
551 /**
552 * acpi_suspend_begin - Set the target system sleep state to the state
553 * associated with given @pm_state, if supported.
554 */
acpi_suspend_begin(suspend_state_t pm_state)555 static int acpi_suspend_begin(suspend_state_t pm_state)
556 {
557 u32 acpi_state = acpi_suspend_states[pm_state];
558 int error;
559
560 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
561 if (error)
562 return error;
563
564 if (!sleep_states[acpi_state]) {
565 pr_err("ACPI does not support sleep state S%u\n", acpi_state);
566 return -ENOSYS;
567 }
568 if (acpi_state > ACPI_STATE_S1)
569 pm_set_suspend_via_firmware();
570
571 acpi_pm_start(acpi_state);
572 return 0;
573 }
574
575 /**
576 * acpi_suspend_enter - Actually enter a sleep state.
577 * @pm_state: ignored
578 *
579 * Flush caches and go to sleep. For STR we have to call arch-specific
580 * assembly, which in turn call acpi_enter_sleep_state().
581 * It's unfortunate, but it works. Please fix if you're feeling frisky.
582 */
acpi_suspend_enter(suspend_state_t pm_state)583 static int acpi_suspend_enter(suspend_state_t pm_state)
584 {
585 acpi_status status = AE_OK;
586 u32 acpi_state = acpi_target_sleep_state;
587 int error;
588
589 ACPI_FLUSH_CPU_CACHE();
590
591 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
592 switch (acpi_state) {
593 case ACPI_STATE_S1:
594 barrier();
595 status = acpi_enter_sleep_state(acpi_state);
596 break;
597
598 case ACPI_STATE_S3:
599 if (!acpi_suspend_lowlevel)
600 return -ENOSYS;
601 error = acpi_suspend_lowlevel();
602 if (error)
603 return error;
604 pr_info("Low-level resume complete\n");
605 pm_set_resume_via_firmware();
606 break;
607 }
608 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
609
610 /* This violates the spec but is required for bug compatibility. */
611 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
612
613 /* Reprogram control registers */
614 acpi_leave_sleep_state_prep(acpi_state);
615
616 /* ACPI 3.0 specs (P62) says that it's the responsibility
617 * of the OSPM to clear the status bit [ implying that the
618 * POWER_BUTTON event should not reach userspace ]
619 *
620 * However, we do generate a small hint for userspace in the form of
621 * a wakeup event. We flag this condition for now and generate the
622 * event later, as we're currently too early in resume to be able to
623 * generate wakeup events.
624 */
625 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
626 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
627
628 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
629
630 if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
631 acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
632 /* Flag for later */
633 pwr_btn_event_pending = true;
634 }
635 }
636
637 /*
638 * Disable all GPE and clear their status bits before interrupts are
639 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
640 * prevent them from producing spurious interrups.
641 *
642 * acpi_leave_sleep_state() will reenable specific GPEs later.
643 *
644 * Because this code runs on one CPU with disabled interrupts (all of
645 * the other CPUs are offline at this time), it need not acquire any
646 * sleeping locks which may trigger an implicit preemption point even
647 * if there is no contention, so avoid doing that by using a low-level
648 * library routine here.
649 */
650 acpi_hw_disable_all_gpes();
651 /* Allow EC transactions to happen. */
652 acpi_ec_unblock_transactions();
653
654 suspend_nvs_restore();
655
656 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
657 }
658
acpi_suspend_state_valid(suspend_state_t pm_state)659 static int acpi_suspend_state_valid(suspend_state_t pm_state)
660 {
661 u32 acpi_state;
662
663 switch (pm_state) {
664 case PM_SUSPEND_ON:
665 case PM_SUSPEND_STANDBY:
666 case PM_SUSPEND_MEM:
667 acpi_state = acpi_suspend_states[pm_state];
668
669 return sleep_states[acpi_state];
670 default:
671 return 0;
672 }
673 }
674
675 static const struct platform_suspend_ops acpi_suspend_ops = {
676 .valid = acpi_suspend_state_valid,
677 .begin = acpi_suspend_begin,
678 .prepare_late = acpi_pm_prepare,
679 .enter = acpi_suspend_enter,
680 .wake = acpi_pm_finish,
681 .end = acpi_pm_end,
682 };
683
684 /**
685 * acpi_suspend_begin_old - Set the target system sleep state to the
686 * state associated with given @pm_state, if supported, and
687 * execute the _PTS control method. This function is used if the
688 * pre-ACPI 2.0 suspend ordering has been requested.
689 */
acpi_suspend_begin_old(suspend_state_t pm_state)690 static int acpi_suspend_begin_old(suspend_state_t pm_state)
691 {
692 int error = acpi_suspend_begin(pm_state);
693 if (!error)
694 error = __acpi_pm_prepare();
695
696 return error;
697 }
698
699 /*
700 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
701 * been requested.
702 */
703 static const struct platform_suspend_ops acpi_suspend_ops_old = {
704 .valid = acpi_suspend_state_valid,
705 .begin = acpi_suspend_begin_old,
706 .prepare_late = acpi_pm_pre_suspend,
707 .enter = acpi_suspend_enter,
708 .wake = acpi_pm_finish,
709 .end = acpi_pm_end,
710 .recover = acpi_pm_finish,
711 };
712
713 static bool s2idle_wakeup;
714
acpi_s2idle_begin(void)715 int acpi_s2idle_begin(void)
716 {
717 acpi_scan_lock_acquire();
718 return 0;
719 }
720
acpi_s2idle_prepare(void)721 int acpi_s2idle_prepare(void)
722 {
723 if (acpi_sci_irq_valid()) {
724 enable_irq_wake(acpi_sci_irq);
725 acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
726 }
727
728 acpi_enable_wakeup_devices(ACPI_STATE_S0);
729
730 /* Change the configuration of GPEs to avoid spurious wakeup. */
731 acpi_enable_all_wakeup_gpes();
732 acpi_os_wait_events_complete();
733
734 s2idle_wakeup = true;
735 return 0;
736 }
737
acpi_s2idle_wake(void)738 bool acpi_s2idle_wake(void)
739 {
740 if (!acpi_sci_irq_valid())
741 return pm_wakeup_pending();
742
743 while (pm_wakeup_pending()) {
744 /*
745 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
746 * SCI has not triggered while suspended, so bail out (the
747 * wakeup is pending anyway and the SCI is not the source of
748 * it).
749 */
750 if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
751 pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
752 return true;
753 }
754
755 /*
756 * If the status bit of any enabled fixed event is set, the
757 * wakeup is regarded as valid.
758 */
759 if (acpi_any_fixed_event_status_set()) {
760 pm_pr_dbg("ACPI fixed event wakeup\n");
761 return true;
762 }
763
764 /* Check wakeups from drivers sharing the SCI. */
765 if (acpi_check_wakeup_handlers()) {
766 pm_pr_dbg("ACPI custom handler wakeup\n");
767 return true;
768 }
769
770 /* Check non-EC GPE wakeups and dispatch the EC GPE. */
771 if (acpi_ec_dispatch_gpe()) {
772 pm_pr_dbg("ACPI non-EC GPE wakeup\n");
773 return true;
774 }
775
776 /*
777 * Cancel the SCI wakeup and process all pending events in case
778 * there are any wakeup ones in there.
779 *
780 * Note that if any non-EC GPEs are active at this point, the
781 * SCI will retrigger after the rearming below, so no events
782 * should be missed by canceling the wakeup here.
783 */
784 pm_system_cancel_wakeup();
785 acpi_os_wait_events_complete();
786
787 /*
788 * The SCI is in the "suspended" state now and it cannot produce
789 * new wakeup events till the rearming below, so if any of them
790 * are pending here, they must be resulting from the processing
791 * of EC events above or coming from somewhere else.
792 */
793 if (pm_wakeup_pending()) {
794 pm_pr_dbg("Wakeup after ACPI Notify sync\n");
795 return true;
796 }
797
798 pm_wakeup_clear(acpi_sci_irq);
799 rearm_wake_irq(acpi_sci_irq);
800 }
801
802 return false;
803 }
804
acpi_s2idle_restore(void)805 void acpi_s2idle_restore(void)
806 {
807 /*
808 * Drain pending events before restoring the working-state configuration
809 * of GPEs.
810 */
811 acpi_os_wait_events_complete(); /* synchronize GPE processing */
812 acpi_ec_flush_work(); /* flush the EC driver's workqueues */
813 acpi_os_wait_events_complete(); /* synchronize Notify handling */
814
815 s2idle_wakeup = false;
816
817 acpi_enable_all_runtime_gpes();
818
819 acpi_disable_wakeup_devices(ACPI_STATE_S0);
820
821 if (acpi_sci_irq_valid()) {
822 acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
823 disable_irq_wake(acpi_sci_irq);
824 }
825 }
826
acpi_s2idle_end(void)827 void acpi_s2idle_end(void)
828 {
829 acpi_scan_lock_release();
830 }
831
832 static const struct platform_s2idle_ops acpi_s2idle_ops = {
833 .begin = acpi_s2idle_begin,
834 .prepare = acpi_s2idle_prepare,
835 .wake = acpi_s2idle_wake,
836 .restore = acpi_s2idle_restore,
837 .end = acpi_s2idle_end,
838 };
839
acpi_s2idle_setup(void)840 void __weak acpi_s2idle_setup(void)
841 {
842 s2idle_set_ops(&acpi_s2idle_ops);
843 }
844
acpi_sleep_suspend_setup(void)845 static void acpi_sleep_suspend_setup(void)
846 {
847 int i;
848
849 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
850 if (acpi_sleep_state_supported(i))
851 sleep_states[i] = 1;
852
853 suspend_set_ops(old_suspend_ordering ?
854 &acpi_suspend_ops_old : &acpi_suspend_ops);
855
856 acpi_s2idle_setup();
857 }
858
859 #else /* !CONFIG_SUSPEND */
860 #define s2idle_wakeup (false)
acpi_sleep_suspend_setup(void)861 static inline void acpi_sleep_suspend_setup(void) {}
862 #endif /* !CONFIG_SUSPEND */
863
acpi_s2idle_wakeup(void)864 bool acpi_s2idle_wakeup(void)
865 {
866 return s2idle_wakeup;
867 }
868
869 #ifdef CONFIG_PM_SLEEP
870 static u32 saved_bm_rld;
871
acpi_save_bm_rld(void)872 static int acpi_save_bm_rld(void)
873 {
874 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
875 return 0;
876 }
877
acpi_restore_bm_rld(void)878 static void acpi_restore_bm_rld(void)
879 {
880 u32 resumed_bm_rld = 0;
881
882 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
883 if (resumed_bm_rld == saved_bm_rld)
884 return;
885
886 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
887 }
888
889 static struct syscore_ops acpi_sleep_syscore_ops = {
890 .suspend = acpi_save_bm_rld,
891 .resume = acpi_restore_bm_rld,
892 };
893
acpi_sleep_syscore_init(void)894 static void acpi_sleep_syscore_init(void)
895 {
896 register_syscore_ops(&acpi_sleep_syscore_ops);
897 }
898 #else
acpi_sleep_syscore_init(void)899 static inline void acpi_sleep_syscore_init(void) {}
900 #endif /* CONFIG_PM_SLEEP */
901
902 #ifdef CONFIG_HIBERNATION
903 static unsigned long s4_hardware_signature;
904 static struct acpi_table_facs *facs;
905 static bool nosigcheck;
906
acpi_no_s4_hw_signature(void)907 void __init acpi_no_s4_hw_signature(void)
908 {
909 nosigcheck = true;
910 }
911
acpi_hibernation_begin(pm_message_t stage)912 static int acpi_hibernation_begin(pm_message_t stage)
913 {
914 if (!nvs_nosave) {
915 int error = suspend_nvs_alloc();
916 if (error)
917 return error;
918 }
919
920 if (stage.event == PM_EVENT_HIBERNATE)
921 pm_set_suspend_via_firmware();
922
923 acpi_pm_start(ACPI_STATE_S4);
924 return 0;
925 }
926
acpi_hibernation_enter(void)927 static int acpi_hibernation_enter(void)
928 {
929 acpi_status status = AE_OK;
930
931 ACPI_FLUSH_CPU_CACHE();
932
933 /* This shouldn't return. If it returns, we have a problem */
934 status = acpi_enter_sleep_state(ACPI_STATE_S4);
935 /* Reprogram control registers */
936 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
937
938 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
939 }
940
acpi_hibernation_leave(void)941 static void acpi_hibernation_leave(void)
942 {
943 pm_set_resume_via_firmware();
944 /*
945 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
946 * enable it here.
947 */
948 acpi_enable();
949 /* Reprogram control registers */
950 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
951 /* Check the hardware signature */
952 if (facs && s4_hardware_signature != facs->hardware_signature)
953 pr_crit("Hardware changed while hibernated, success doubtful!\n");
954 /* Restore the NVS memory area */
955 suspend_nvs_restore();
956 /* Allow EC transactions to happen. */
957 acpi_ec_unblock_transactions();
958 }
959
acpi_pm_thaw(void)960 static void acpi_pm_thaw(void)
961 {
962 acpi_ec_unblock_transactions();
963 acpi_enable_all_runtime_gpes();
964 }
965
966 static const struct platform_hibernation_ops acpi_hibernation_ops = {
967 .begin = acpi_hibernation_begin,
968 .end = acpi_pm_end,
969 .pre_snapshot = acpi_pm_prepare,
970 .finish = acpi_pm_finish,
971 .prepare = acpi_pm_prepare,
972 .enter = acpi_hibernation_enter,
973 .leave = acpi_hibernation_leave,
974 .pre_restore = acpi_pm_freeze,
975 .restore_cleanup = acpi_pm_thaw,
976 };
977
978 /**
979 * acpi_hibernation_begin_old - Set the target system sleep state to
980 * ACPI_STATE_S4 and execute the _PTS control method. This
981 * function is used if the pre-ACPI 2.0 suspend ordering has been
982 * requested.
983 */
acpi_hibernation_begin_old(pm_message_t stage)984 static int acpi_hibernation_begin_old(pm_message_t stage)
985 {
986 int error;
987 /*
988 * The _TTS object should always be evaluated before the _PTS object.
989 * When the old_suspended_ordering is true, the _PTS object is
990 * evaluated in the acpi_sleep_prepare.
991 */
992 acpi_sleep_tts_switch(ACPI_STATE_S4);
993
994 error = acpi_sleep_prepare(ACPI_STATE_S4);
995 if (error)
996 return error;
997
998 if (!nvs_nosave) {
999 error = suspend_nvs_alloc();
1000 if (error)
1001 return error;
1002 }
1003
1004 if (stage.event == PM_EVENT_HIBERNATE)
1005 pm_set_suspend_via_firmware();
1006
1007 acpi_target_sleep_state = ACPI_STATE_S4;
1008 acpi_scan_lock_acquire();
1009 return 0;
1010 }
1011
1012 /*
1013 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1014 * been requested.
1015 */
1016 static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1017 .begin = acpi_hibernation_begin_old,
1018 .end = acpi_pm_end,
1019 .pre_snapshot = acpi_pm_pre_suspend,
1020 .prepare = acpi_pm_freeze,
1021 .finish = acpi_pm_finish,
1022 .enter = acpi_hibernation_enter,
1023 .leave = acpi_hibernation_leave,
1024 .pre_restore = acpi_pm_freeze,
1025 .restore_cleanup = acpi_pm_thaw,
1026 .recover = acpi_pm_finish,
1027 };
1028
acpi_sleep_hibernate_setup(void)1029 static void acpi_sleep_hibernate_setup(void)
1030 {
1031 if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1032 return;
1033
1034 hibernation_set_ops(old_suspend_ordering ?
1035 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
1036 sleep_states[ACPI_STATE_S4] = 1;
1037 if (nosigcheck)
1038 return;
1039
1040 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1041 if (facs)
1042 s4_hardware_signature = facs->hardware_signature;
1043 }
1044 #else /* !CONFIG_HIBERNATION */
acpi_sleep_hibernate_setup(void)1045 static inline void acpi_sleep_hibernate_setup(void) {}
1046 #endif /* !CONFIG_HIBERNATION */
1047
acpi_power_off_prepare(void)1048 static void acpi_power_off_prepare(void)
1049 {
1050 /* Prepare to power off the system */
1051 acpi_sleep_prepare(ACPI_STATE_S5);
1052 acpi_disable_all_gpes();
1053 acpi_os_wait_events_complete();
1054 }
1055
acpi_power_off(void)1056 static void acpi_power_off(void)
1057 {
1058 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1059 pr_debug("%s called\n", __func__);
1060 local_irq_disable();
1061 acpi_enter_sleep_state(ACPI_STATE_S5);
1062 }
1063
acpi_sleep_init(void)1064 int __init acpi_sleep_init(void)
1065 {
1066 char supported[ACPI_S_STATE_COUNT * 3 + 1];
1067 char *pos = supported;
1068 int i;
1069
1070 acpi_sleep_dmi_check();
1071
1072 sleep_states[ACPI_STATE_S0] = 1;
1073
1074 acpi_sleep_syscore_init();
1075 acpi_sleep_suspend_setup();
1076 acpi_sleep_hibernate_setup();
1077
1078 if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1079 sleep_states[ACPI_STATE_S5] = 1;
1080 pm_power_off_prepare = acpi_power_off_prepare;
1081 pm_power_off = acpi_power_off;
1082 } else {
1083 acpi_no_s5 = true;
1084 }
1085
1086 supported[0] = 0;
1087 for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1088 if (sleep_states[i])
1089 pos += sprintf(pos, " S%d", i);
1090 }
1091 pr_info("(supports%s)\n", supported);
1092
1093 /*
1094 * Register the tts_notifier to reboot notifier list so that the _TTS
1095 * object can also be evaluated when the system enters S5.
1096 */
1097 register_reboot_notifier(&tts_notifier);
1098 return 0;
1099 }
1100