1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/power/main.c - PM subsystem core functionality.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 */
8
9 #include <linux/acpi.h>
10 #include <linux/export.h>
11 #include <linux/kobject.h>
12 #include <linux/string.h>
13 #include <linux/pm-trace.h>
14 #include <linux/workqueue.h>
15 #include <linux/debugfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/suspend.h>
18 #include <linux/syscalls.h>
19 #include <linux/pm_runtime.h>
20
21 #include "power.h"
22
23 #ifdef CONFIG_PM_SLEEP
24 /*
25 * The following functions are used by the suspend/hibernate code to temporarily
26 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
27 * while devices are suspended. To avoid races with the suspend/hibernate code,
28 * they should always be called with system_transition_mutex held
29 * (gfp_allowed_mask also should only be modified with system_transition_mutex
30 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
31 * with that modification).
32 */
33 static gfp_t saved_gfp_mask;
34
pm_restore_gfp_mask(void)35 void pm_restore_gfp_mask(void)
36 {
37 WARN_ON(!mutex_is_locked(&system_transition_mutex));
38 if (saved_gfp_mask) {
39 gfp_allowed_mask = saved_gfp_mask;
40 saved_gfp_mask = 0;
41 }
42 }
43
pm_restrict_gfp_mask(void)44 void pm_restrict_gfp_mask(void)
45 {
46 WARN_ON(!mutex_is_locked(&system_transition_mutex));
47 WARN_ON(saved_gfp_mask);
48 saved_gfp_mask = gfp_allowed_mask;
49 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
50 }
51
lock_system_sleep(void)52 unsigned int lock_system_sleep(void)
53 {
54 unsigned int flags = current->flags;
55 current->flags |= PF_NOFREEZE;
56 mutex_lock(&system_transition_mutex);
57 return flags;
58 }
59 EXPORT_SYMBOL_GPL(lock_system_sleep);
60
unlock_system_sleep(unsigned int flags)61 void unlock_system_sleep(unsigned int flags)
62 {
63 if (!(flags & PF_NOFREEZE))
64 current->flags &= ~PF_NOFREEZE;
65 mutex_unlock(&system_transition_mutex);
66 }
67 EXPORT_SYMBOL_GPL(unlock_system_sleep);
68
ksys_sync_helper(void)69 void ksys_sync_helper(void)
70 {
71 ktime_t start;
72 long elapsed_msecs;
73
74 start = ktime_get();
75 ksys_sync();
76 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
77 pr_info("Filesystems sync: %ld.%03ld seconds\n",
78 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
79 }
80 EXPORT_SYMBOL_GPL(ksys_sync_helper);
81
82 /* Routines for PM-transition notifications */
83
84 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
85
register_pm_notifier(struct notifier_block * nb)86 int register_pm_notifier(struct notifier_block *nb)
87 {
88 return blocking_notifier_chain_register(&pm_chain_head, nb);
89 }
90 EXPORT_SYMBOL_GPL(register_pm_notifier);
91
unregister_pm_notifier(struct notifier_block * nb)92 int unregister_pm_notifier(struct notifier_block *nb)
93 {
94 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
95 }
96 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
97
pm_report_hw_sleep_time(u64 t)98 void pm_report_hw_sleep_time(u64 t)
99 {
100 suspend_stats.last_hw_sleep = t;
101 suspend_stats.total_hw_sleep += t;
102 }
103 EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
104
pm_report_max_hw_sleep(u64 t)105 void pm_report_max_hw_sleep(u64 t)
106 {
107 suspend_stats.max_hw_sleep = t;
108 }
109 EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
110
pm_notifier_call_chain_robust(unsigned long val_up,unsigned long val_down)111 int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
112 {
113 int ret;
114
115 ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
116
117 return notifier_to_errno(ret);
118 }
119
pm_notifier_call_chain(unsigned long val)120 int pm_notifier_call_chain(unsigned long val)
121 {
122 return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
123 }
124
125 /* If set, devices may be suspended and resumed asynchronously. */
126 int pm_async_enabled = 1;
127
pm_async_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)128 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
129 char *buf)
130 {
131 return sysfs_emit(buf, "%d\n", pm_async_enabled);
132 }
133
pm_async_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)134 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
135 const char *buf, size_t n)
136 {
137 unsigned long val;
138
139 if (kstrtoul(buf, 10, &val))
140 return -EINVAL;
141
142 if (val > 1)
143 return -EINVAL;
144
145 pm_async_enabled = val;
146 return n;
147 }
148
149 power_attr(pm_async);
150
151 #ifdef CONFIG_SUSPEND
mem_sleep_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)152 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
153 char *buf)
154 {
155 ssize_t count = 0;
156 suspend_state_t i;
157
158 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) {
159 if (i >= PM_SUSPEND_MEM && cxl_mem_active())
160 continue;
161 if (mem_sleep_states[i]) {
162 const char *label = mem_sleep_states[i];
163
164 if (mem_sleep_current == i)
165 count += sysfs_emit_at(buf, count, "[%s] ", label);
166 else
167 count += sysfs_emit_at(buf, count, "%s ", label);
168 }
169 }
170
171 /* Convert the last space to a newline if needed. */
172 if (count > 0)
173 buf[count - 1] = '\n';
174
175 return count;
176 }
177
decode_suspend_state(const char * buf,size_t n)178 static suspend_state_t decode_suspend_state(const char *buf, size_t n)
179 {
180 suspend_state_t state;
181 char *p;
182 int len;
183
184 p = memchr(buf, '\n', n);
185 len = p ? p - buf : n;
186
187 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
188 const char *label = mem_sleep_states[state];
189
190 if (label && len == strlen(label) && !strncmp(buf, label, len))
191 return state;
192 }
193
194 return PM_SUSPEND_ON;
195 }
196
mem_sleep_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)197 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
198 const char *buf, size_t n)
199 {
200 suspend_state_t state;
201 int error;
202
203 error = pm_autosleep_lock();
204 if (error)
205 return error;
206
207 if (pm_autosleep_state() > PM_SUSPEND_ON) {
208 error = -EBUSY;
209 goto out;
210 }
211
212 state = decode_suspend_state(buf, n);
213 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON)
214 mem_sleep_current = state;
215 else
216 error = -EINVAL;
217
218 out:
219 pm_autosleep_unlock();
220 return error ? error : n;
221 }
222
223 power_attr(mem_sleep);
224
225 /*
226 * sync_on_suspend: invoke ksys_sync_helper() before suspend.
227 *
228 * show() returns whether ksys_sync_helper() is invoked before suspend.
229 * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it.
230 */
231 bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
232
sync_on_suspend_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)233 static ssize_t sync_on_suspend_show(struct kobject *kobj,
234 struct kobj_attribute *attr, char *buf)
235 {
236 return sysfs_emit(buf, "%d\n", sync_on_suspend_enabled);
237 }
238
sync_on_suspend_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)239 static ssize_t sync_on_suspend_store(struct kobject *kobj,
240 struct kobj_attribute *attr,
241 const char *buf, size_t n)
242 {
243 unsigned long val;
244
245 if (kstrtoul(buf, 10, &val))
246 return -EINVAL;
247
248 if (val > 1)
249 return -EINVAL;
250
251 sync_on_suspend_enabled = !!val;
252 return n;
253 }
254
255 power_attr(sync_on_suspend);
256 #endif /* CONFIG_SUSPEND */
257
258 #ifdef CONFIG_PM_SLEEP_DEBUG
259 int pm_test_level = TEST_NONE;
260
261 static const char * const pm_tests[__TEST_AFTER_LAST] = {
262 [TEST_NONE] = "none",
263 [TEST_CORE] = "core",
264 [TEST_CPUS] = "processors",
265 [TEST_PLATFORM] = "platform",
266 [TEST_DEVICES] = "devices",
267 [TEST_FREEZER] = "freezer",
268 };
269
pm_test_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)270 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
271 char *buf)
272 {
273 ssize_t count = 0;
274 int level;
275
276 for (level = TEST_FIRST; level <= TEST_MAX; level++)
277 if (pm_tests[level]) {
278 if (level == pm_test_level)
279 count += sysfs_emit_at(buf, count, "[%s] ", pm_tests[level]);
280 else
281 count += sysfs_emit_at(buf, count, "%s ", pm_tests[level]);
282 }
283
284 /* Convert the last space to a newline if needed. */
285 if (count > 0)
286 buf[count - 1] = '\n';
287
288 return count;
289 }
290
pm_test_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)291 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
292 const char *buf, size_t n)
293 {
294 unsigned int sleep_flags;
295 const char * const *s;
296 int error = -EINVAL;
297 int level;
298 char *p;
299 int len;
300
301 p = memchr(buf, '\n', n);
302 len = p ? p - buf : n;
303
304 sleep_flags = lock_system_sleep();
305
306 level = TEST_FIRST;
307 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
308 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
309 pm_test_level = level;
310 error = 0;
311 break;
312 }
313
314 unlock_system_sleep(sleep_flags);
315
316 return error ? error : n;
317 }
318
319 power_attr(pm_test);
320 #endif /* CONFIG_PM_SLEEP_DEBUG */
321
322 static const char * const suspend_step_names[] = {
323 [SUSPEND_WORKING] = "",
324 [SUSPEND_FREEZE] = "freeze",
325 [SUSPEND_PREPARE] = "prepare",
326 [SUSPEND_SUSPEND] = "suspend",
327 [SUSPEND_SUSPEND_LATE] = "suspend_late",
328 [SUSPEND_SUSPEND_NOIRQ] = "suspend_noirq",
329 [SUSPEND_RESUME_NOIRQ] = "resume_noirq",
330 [SUSPEND_RESUME_EARLY] = "resume_early",
331 [SUSPEND_RESUME] = "resume",
332 };
333
334 #define suspend_attr(_name, format_str) \
335 static ssize_t _name##_show(struct kobject *kobj, \
336 struct kobj_attribute *attr, char *buf) \
337 { \
338 return sysfs_emit(buf, format_str, suspend_stats._name);\
339 } \
340 static struct kobj_attribute _name = __ATTR_RO(_name)
341
342 suspend_attr(success, "%u\n");
343 suspend_attr(fail, "%u\n");
344 suspend_attr(last_hw_sleep, "%llu\n");
345 suspend_attr(total_hw_sleep, "%llu\n");
346 suspend_attr(max_hw_sleep, "%llu\n");
347
348 #define suspend_step_attr(_name, step) \
349 static ssize_t _name##_show(struct kobject *kobj, \
350 struct kobj_attribute *attr, char *buf) \
351 { \
352 return sysfs_emit(buf, "%u\n", \
353 suspend_stats.step_failures[step-1]); \
354 } \
355 static struct kobj_attribute _name = __ATTR_RO(_name)
356
357 suspend_step_attr(failed_freeze, SUSPEND_FREEZE);
358 suspend_step_attr(failed_prepare, SUSPEND_PREPARE);
359 suspend_step_attr(failed_suspend, SUSPEND_SUSPEND);
360 suspend_step_attr(failed_suspend_late, SUSPEND_SUSPEND_LATE);
361 suspend_step_attr(failed_suspend_noirq, SUSPEND_SUSPEND_NOIRQ);
362 suspend_step_attr(failed_resume, SUSPEND_RESUME);
363 suspend_step_attr(failed_resume_early, SUSPEND_RESUME_EARLY);
364 suspend_step_attr(failed_resume_noirq, SUSPEND_RESUME_NOIRQ);
365
last_failed_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)366 static ssize_t last_failed_dev_show(struct kobject *kobj,
367 struct kobj_attribute *attr, char *buf)
368 {
369 int index;
370 char *last_failed_dev = NULL;
371
372 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
373 index %= REC_FAILED_NUM;
374 last_failed_dev = suspend_stats.failed_devs[index];
375
376 return sysfs_emit(buf, "%s\n", last_failed_dev);
377 }
378 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
379
last_failed_errno_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)380 static ssize_t last_failed_errno_show(struct kobject *kobj,
381 struct kobj_attribute *attr, char *buf)
382 {
383 int index;
384 int last_failed_errno;
385
386 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
387 index %= REC_FAILED_NUM;
388 last_failed_errno = suspend_stats.errno[index];
389
390 return sysfs_emit(buf, "%d\n", last_failed_errno);
391 }
392 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
393
last_failed_step_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)394 static ssize_t last_failed_step_show(struct kobject *kobj,
395 struct kobj_attribute *attr, char *buf)
396 {
397 enum suspend_stat_step step;
398 int index;
399
400 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
401 index %= REC_FAILED_NUM;
402 step = suspend_stats.failed_steps[index];
403
404 return sysfs_emit(buf, "%s\n", suspend_step_names[step]);
405 }
406 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
407
408 static struct attribute *suspend_attrs[] = {
409 &success.attr,
410 &fail.attr,
411 &failed_freeze.attr,
412 &failed_prepare.attr,
413 &failed_suspend.attr,
414 &failed_suspend_late.attr,
415 &failed_suspend_noirq.attr,
416 &failed_resume.attr,
417 &failed_resume_early.attr,
418 &failed_resume_noirq.attr,
419 &last_failed_dev.attr,
420 &last_failed_errno.attr,
421 &last_failed_step.attr,
422 &last_hw_sleep.attr,
423 &total_hw_sleep.attr,
424 &max_hw_sleep.attr,
425 NULL,
426 };
427
suspend_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)428 static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
429 {
430 if (attr != &last_hw_sleep.attr &&
431 attr != &total_hw_sleep.attr &&
432 attr != &max_hw_sleep.attr)
433 return 0444;
434
435 #ifdef CONFIG_ACPI
436 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
437 return 0444;
438 #endif
439 return 0;
440 }
441
442 static const struct attribute_group suspend_attr_group = {
443 .name = "suspend_stats",
444 .attrs = suspend_attrs,
445 .is_visible = suspend_attr_is_visible,
446 };
447
448 #ifdef CONFIG_DEBUG_FS
suspend_stats_show(struct seq_file * s,void * unused)449 static int suspend_stats_show(struct seq_file *s, void *unused)
450 {
451 int i, index, last_dev, last_errno, last_step;
452 enum suspend_stat_step step;
453
454 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
455 last_dev %= REC_FAILED_NUM;
456 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
457 last_errno %= REC_FAILED_NUM;
458 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
459 last_step %= REC_FAILED_NUM;
460
461 seq_printf(s, "success: %u\nfail: %u\n",
462 suspend_stats.success, suspend_stats.fail);
463
464 for (step = SUSPEND_FREEZE; step <= SUSPEND_NR_STEPS; step++)
465 seq_printf(s, "failed_%s: %u\n", suspend_step_names[step],
466 suspend_stats.step_failures[step-1]);
467
468 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
469 suspend_stats.failed_devs[last_dev]);
470 for (i = 1; i < REC_FAILED_NUM; i++) {
471 index = last_dev + REC_FAILED_NUM - i;
472 index %= REC_FAILED_NUM;
473 seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]);
474 }
475 seq_printf(s, " last_failed_errno:\t%-d\n",
476 suspend_stats.errno[last_errno]);
477 for (i = 1; i < REC_FAILED_NUM; i++) {
478 index = last_errno + REC_FAILED_NUM - i;
479 index %= REC_FAILED_NUM;
480 seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]);
481 }
482 seq_printf(s, " last_failed_step:\t%-s\n",
483 suspend_step_names[suspend_stats.failed_steps[last_step]]);
484 for (i = 1; i < REC_FAILED_NUM; i++) {
485 index = last_step + REC_FAILED_NUM - i;
486 index %= REC_FAILED_NUM;
487 seq_printf(s, "\t\t\t%-s\n",
488 suspend_step_names[suspend_stats.failed_steps[index]]);
489 }
490
491 return 0;
492 }
493 DEFINE_SHOW_ATTRIBUTE(suspend_stats);
494
pm_debugfs_init(void)495 static int __init pm_debugfs_init(void)
496 {
497 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
498 NULL, NULL, &suspend_stats_fops);
499 return 0;
500 }
501
502 late_initcall(pm_debugfs_init);
503 #endif /* CONFIG_DEBUG_FS */
504
505 #endif /* CONFIG_PM_SLEEP */
506
507 #ifdef CONFIG_PM_SLEEP_DEBUG
508 /*
509 * pm_print_times: print time taken by devices to suspend and resume.
510 *
511 * show() returns whether printing of suspend and resume times is enabled.
512 * store() accepts 0 or 1. 0 disables printing and 1 enables it.
513 */
514 bool pm_print_times_enabled;
515
pm_print_times_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)516 static ssize_t pm_print_times_show(struct kobject *kobj,
517 struct kobj_attribute *attr, char *buf)
518 {
519 return sysfs_emit(buf, "%d\n", pm_print_times_enabled);
520 }
521
pm_print_times_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)522 static ssize_t pm_print_times_store(struct kobject *kobj,
523 struct kobj_attribute *attr,
524 const char *buf, size_t n)
525 {
526 unsigned long val;
527
528 if (kstrtoul(buf, 10, &val))
529 return -EINVAL;
530
531 if (val > 1)
532 return -EINVAL;
533
534 pm_print_times_enabled = !!val;
535 return n;
536 }
537
538 power_attr(pm_print_times);
539
pm_print_times_init(void)540 static inline void pm_print_times_init(void)
541 {
542 pm_print_times_enabled = !!initcall_debug;
543 }
544
pm_wakeup_irq_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)545 static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
546 struct kobj_attribute *attr,
547 char *buf)
548 {
549 if (!pm_wakeup_irq())
550 return -ENODATA;
551
552 return sysfs_emit(buf, "%u\n", pm_wakeup_irq());
553 }
554
555 power_attr_ro(pm_wakeup_irq);
556
557 bool pm_debug_messages_on __read_mostly;
558
pm_debug_messages_should_print(void)559 bool pm_debug_messages_should_print(void)
560 {
561 return pm_debug_messages_on && (hibernation_in_progress() ||
562 pm_suspend_target_state != PM_SUSPEND_ON);
563 }
564 EXPORT_SYMBOL_GPL(pm_debug_messages_should_print);
565
pm_debug_messages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)566 static ssize_t pm_debug_messages_show(struct kobject *kobj,
567 struct kobj_attribute *attr, char *buf)
568 {
569 return sysfs_emit(buf, "%d\n", pm_debug_messages_on);
570 }
571
pm_debug_messages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)572 static ssize_t pm_debug_messages_store(struct kobject *kobj,
573 struct kobj_attribute *attr,
574 const char *buf, size_t n)
575 {
576 unsigned long val;
577
578 if (kstrtoul(buf, 10, &val))
579 return -EINVAL;
580
581 if (val > 1)
582 return -EINVAL;
583
584 pm_debug_messages_on = !!val;
585 return n;
586 }
587
588 power_attr(pm_debug_messages);
589
pm_debug_messages_setup(char * str)590 static int __init pm_debug_messages_setup(char *str)
591 {
592 pm_debug_messages_on = true;
593 return 1;
594 }
595 __setup("pm_debug_messages", pm_debug_messages_setup);
596
597 #else /* !CONFIG_PM_SLEEP_DEBUG */
pm_print_times_init(void)598 static inline void pm_print_times_init(void) {}
599 #endif /* CONFIG_PM_SLEEP_DEBUG */
600
601 struct kobject *power_kobj;
602
603 /*
604 * state - control system sleep states.
605 *
606 * show() returns available sleep state labels, which may be "mem", "standby",
607 * "freeze" and "disk" (hibernation).
608 * See Documentation/admin-guide/pm/sleep-states.rst for a description of
609 * what they mean.
610 *
611 * store() accepts one of those strings, translates it into the proper
612 * enumerated value, and initiates a suspend transition.
613 */
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)614 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
615 char *buf)
616 {
617 ssize_t count = 0;
618 #ifdef CONFIG_SUSPEND
619 suspend_state_t i;
620
621 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
622 if (pm_states[i])
623 count += sysfs_emit_at(buf, count, "%s ", pm_states[i]);
624
625 #endif
626 if (hibernation_available())
627 count += sysfs_emit_at(buf, count, "disk ");
628
629 /* Convert the last space to a newline if needed. */
630 if (count > 0)
631 buf[count - 1] = '\n';
632
633 return count;
634 }
635
decode_state(const char * buf,size_t n)636 static suspend_state_t decode_state(const char *buf, size_t n)
637 {
638 #ifdef CONFIG_SUSPEND
639 suspend_state_t state;
640 #endif
641 char *p;
642 int len;
643
644 p = memchr(buf, '\n', n);
645 len = p ? p - buf : n;
646
647 /* Check hibernation first. */
648 if (len == 4 && str_has_prefix(buf, "disk"))
649 return PM_SUSPEND_MAX;
650
651 #ifdef CONFIG_SUSPEND
652 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
653 const char *label = pm_states[state];
654
655 if (label && len == strlen(label) && !strncmp(buf, label, len))
656 return state;
657 }
658 #endif
659
660 return PM_SUSPEND_ON;
661 }
662
state_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)663 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
664 const char *buf, size_t n)
665 {
666 suspend_state_t state;
667 int error;
668
669 error = pm_autosleep_lock();
670 if (error)
671 return error;
672
673 if (pm_autosleep_state() > PM_SUSPEND_ON) {
674 error = -EBUSY;
675 goto out;
676 }
677
678 state = decode_state(buf, n);
679 if (state < PM_SUSPEND_MAX) {
680 if (state == PM_SUSPEND_MEM)
681 state = mem_sleep_current;
682
683 error = pm_suspend(state);
684 } else if (state == PM_SUSPEND_MAX) {
685 error = hibernate();
686 } else {
687 error = -EINVAL;
688 }
689
690 out:
691 pm_autosleep_unlock();
692 return error ? error : n;
693 }
694
695 power_attr(state);
696
697 #ifdef CONFIG_PM_SLEEP
698 /*
699 * The 'wakeup_count' attribute, along with the functions defined in
700 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
701 * handled in a non-racy way.
702 *
703 * If a wakeup event occurs when the system is in a sleep state, it simply is
704 * woken up. In turn, if an event that would wake the system up from a sleep
705 * state occurs when it is undergoing a transition to that sleep state, the
706 * transition should be aborted. Moreover, if such an event occurs when the
707 * system is in the working state, an attempt to start a transition to the
708 * given sleep state should fail during certain period after the detection of
709 * the event. Using the 'state' attribute alone is not sufficient to satisfy
710 * these requirements, because a wakeup event may occur exactly when 'state'
711 * is being written to and may be delivered to user space right before it is
712 * frozen, so the event will remain only partially processed until the system is
713 * woken up by another event. In particular, it won't cause the transition to
714 * a sleep state to be aborted.
715 *
716 * This difficulty may be overcome if user space uses 'wakeup_count' before
717 * writing to 'state'. It first should read from 'wakeup_count' and store
718 * the read value. Then, after carrying out its own preparations for the system
719 * transition to a sleep state, it should write the stored value to
720 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
721 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
722 * is allowed to write to 'state', but the transition will be aborted if there
723 * are any wakeup events detected after 'wakeup_count' was written to.
724 */
725
wakeup_count_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)726 static ssize_t wakeup_count_show(struct kobject *kobj,
727 struct kobj_attribute *attr,
728 char *buf)
729 {
730 unsigned int val;
731
732 return pm_get_wakeup_count(&val, true) ?
733 sysfs_emit(buf, "%u\n", val) : -EINTR;
734 }
735
wakeup_count_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)736 static ssize_t wakeup_count_store(struct kobject *kobj,
737 struct kobj_attribute *attr,
738 const char *buf, size_t n)
739 {
740 unsigned int val;
741 int error;
742
743 error = pm_autosleep_lock();
744 if (error)
745 return error;
746
747 if (pm_autosleep_state() > PM_SUSPEND_ON) {
748 error = -EBUSY;
749 goto out;
750 }
751
752 error = -EINVAL;
753 if (sscanf(buf, "%u", &val) == 1) {
754 if (pm_save_wakeup_count(val))
755 error = n;
756 else
757 pm_print_active_wakeup_sources();
758 }
759
760 out:
761 pm_autosleep_unlock();
762 return error;
763 }
764
765 power_attr(wakeup_count);
766
767 #ifdef CONFIG_PM_AUTOSLEEP
autosleep_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)768 static ssize_t autosleep_show(struct kobject *kobj,
769 struct kobj_attribute *attr,
770 char *buf)
771 {
772 suspend_state_t state = pm_autosleep_state();
773
774 if (state == PM_SUSPEND_ON)
775 return sysfs_emit(buf, "off\n");
776
777 #ifdef CONFIG_SUSPEND
778 if (state < PM_SUSPEND_MAX)
779 return sysfs_emit(buf, "%s\n", pm_states[state] ?
780 pm_states[state] : "error");
781 #endif
782 #ifdef CONFIG_HIBERNATION
783 return sysfs_emit(buf, "disk\n");
784 #else
785 return sysfs_emit(buf, "error\n");
786 #endif
787 }
788
autosleep_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)789 static ssize_t autosleep_store(struct kobject *kobj,
790 struct kobj_attribute *attr,
791 const char *buf, size_t n)
792 {
793 suspend_state_t state = decode_state(buf, n);
794 int error;
795
796 if (state == PM_SUSPEND_ON
797 && strcmp(buf, "off") && strcmp(buf, "off\n"))
798 return -EINVAL;
799
800 if (state == PM_SUSPEND_MEM)
801 state = mem_sleep_current;
802
803 error = pm_autosleep_set_state(state);
804 return error ? error : n;
805 }
806
807 power_attr(autosleep);
808 #endif /* CONFIG_PM_AUTOSLEEP */
809
810 #ifdef CONFIG_PM_WAKELOCKS
wake_lock_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)811 static ssize_t wake_lock_show(struct kobject *kobj,
812 struct kobj_attribute *attr,
813 char *buf)
814 {
815 return pm_show_wakelocks(buf, true);
816 }
817
wake_lock_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)818 static ssize_t wake_lock_store(struct kobject *kobj,
819 struct kobj_attribute *attr,
820 const char *buf, size_t n)
821 {
822 int error = pm_wake_lock(buf);
823 return error ? error : n;
824 }
825
826 power_attr(wake_lock);
827
wake_unlock_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)828 static ssize_t wake_unlock_show(struct kobject *kobj,
829 struct kobj_attribute *attr,
830 char *buf)
831 {
832 return pm_show_wakelocks(buf, false);
833 }
834
wake_unlock_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)835 static ssize_t wake_unlock_store(struct kobject *kobj,
836 struct kobj_attribute *attr,
837 const char *buf, size_t n)
838 {
839 int error = pm_wake_unlock(buf);
840 return error ? error : n;
841 }
842
843 power_attr(wake_unlock);
844
845 #endif /* CONFIG_PM_WAKELOCKS */
846 #endif /* CONFIG_PM_SLEEP */
847
848 #ifdef CONFIG_PM_TRACE
849 int pm_trace_enabled;
850
pm_trace_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)851 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
852 char *buf)
853 {
854 return sysfs_emit(buf, "%d\n", pm_trace_enabled);
855 }
856
857 static ssize_t
pm_trace_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)858 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
859 const char *buf, size_t n)
860 {
861 int val;
862
863 if (sscanf(buf, "%d", &val) == 1) {
864 pm_trace_enabled = !!val;
865 if (pm_trace_enabled) {
866 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
867 "PM: Correct system time has to be restored manually after resume.\n");
868 }
869 return n;
870 }
871 return -EINVAL;
872 }
873
874 power_attr(pm_trace);
875
pm_trace_dev_match_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)876 static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
877 struct kobj_attribute *attr,
878 char *buf)
879 {
880 return show_trace_dev_match(buf, PAGE_SIZE);
881 }
882
883 power_attr_ro(pm_trace_dev_match);
884
885 #endif /* CONFIG_PM_TRACE */
886
887 #ifdef CONFIG_FREEZER
pm_freeze_timeout_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)888 static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
889 struct kobj_attribute *attr, char *buf)
890 {
891 return sysfs_emit(buf, "%u\n", freeze_timeout_msecs);
892 }
893
pm_freeze_timeout_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)894 static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
895 struct kobj_attribute *attr,
896 const char *buf, size_t n)
897 {
898 unsigned long val;
899
900 if (kstrtoul(buf, 10, &val))
901 return -EINVAL;
902
903 freeze_timeout_msecs = val;
904 return n;
905 }
906
907 power_attr(pm_freeze_timeout);
908
909 #endif /* CONFIG_FREEZER*/
910
911 static struct attribute * g[] = {
912 &state_attr.attr,
913 #ifdef CONFIG_PM_TRACE
914 &pm_trace_attr.attr,
915 &pm_trace_dev_match_attr.attr,
916 #endif
917 #ifdef CONFIG_PM_SLEEP
918 &pm_async_attr.attr,
919 &wakeup_count_attr.attr,
920 #ifdef CONFIG_SUSPEND
921 &mem_sleep_attr.attr,
922 &sync_on_suspend_attr.attr,
923 #endif
924 #ifdef CONFIG_PM_AUTOSLEEP
925 &autosleep_attr.attr,
926 #endif
927 #ifdef CONFIG_PM_WAKELOCKS
928 &wake_lock_attr.attr,
929 &wake_unlock_attr.attr,
930 #endif
931 #ifdef CONFIG_PM_SLEEP_DEBUG
932 &pm_test_attr.attr,
933 &pm_print_times_attr.attr,
934 &pm_wakeup_irq_attr.attr,
935 &pm_debug_messages_attr.attr,
936 #endif
937 #endif
938 #ifdef CONFIG_FREEZER
939 &pm_freeze_timeout_attr.attr,
940 #endif
941 NULL,
942 };
943
944 static const struct attribute_group attr_group = {
945 .attrs = g,
946 };
947
948 static const struct attribute_group *attr_groups[] = {
949 &attr_group,
950 #ifdef CONFIG_PM_SLEEP
951 &suspend_attr_group,
952 #endif
953 NULL,
954 };
955
956 struct workqueue_struct *pm_wq;
957 EXPORT_SYMBOL_GPL(pm_wq);
958
pm_start_workqueue(void)959 static int __init pm_start_workqueue(void)
960 {
961 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
962
963 return pm_wq ? 0 : -ENOMEM;
964 }
965
pm_init(void)966 static int __init pm_init(void)
967 {
968 int error = pm_start_workqueue();
969 if (error)
970 return error;
971 hibernate_image_size_init();
972 hibernate_reserved_size_init();
973 pm_states_init();
974 power_kobj = kobject_create_and_add("power", NULL);
975 if (!power_kobj)
976 return -ENOMEM;
977 error = sysfs_create_groups(power_kobj, attr_groups);
978 if (error)
979 return error;
980 pm_print_times_init();
981 return pm_autosleep_init();
982 }
983
984 core_initcall(pm_init);
985