• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/wakeup.c - System wakeup events framework
4  *
5  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <linux/irq.h>
19 #include <linux/irqdesc.h>
20 #include <linux/wakeup_reason.h>
21 #include <trace/events/power.h>
22 
23 #include "power.h"
24 
25 #ifndef CONFIG_SUSPEND
26 suspend_state_t pm_suspend_target_state;
27 #define pm_suspend_target_state	(PM_SUSPEND_ON)
28 #endif
29 
30 #define list_for_each_entry_rcu_locked(pos, head, member) \
31 	list_for_each_entry_rcu(pos, head, member, \
32 		srcu_read_lock_held(&wakeup_srcu))
33 /*
34  * If set, the suspend/hibernate code will abort transitions to a sleep state
35  * if wakeup events are registered during or immediately before the transition.
36  */
37 bool events_check_enabled __read_mostly;
38 
39 /* First wakeup IRQ seen by the kernel in the last cycle. */
40 static unsigned int wakeup_irq[2] __read_mostly;
41 static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
42 
43 /* If greater than 0 and the system is suspending, terminate the suspend. */
44 static atomic_t pm_abort_suspend __read_mostly;
45 
46 /*
47  * Combined counters of registered wakeup events and wakeup events in progress.
48  * They need to be modified together atomically, so it's better to use one
49  * atomic variable to hold them both.
50  */
51 static atomic_t combined_event_count = ATOMIC_INIT(0);
52 
53 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
54 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
55 
split_counters(unsigned int * cnt,unsigned int * inpr)56 static void split_counters(unsigned int *cnt, unsigned int *inpr)
57 {
58 	unsigned int comb = atomic_read(&combined_event_count);
59 
60 	*cnt = (comb >> IN_PROGRESS_BITS);
61 	*inpr = comb & MAX_IN_PROGRESS;
62 }
63 
64 /* A preserved old value of the events counter. */
65 static unsigned int saved_count;
66 
67 static DEFINE_RAW_SPINLOCK(events_lock);
68 
69 static void pm_wakeup_timer_fn(struct timer_list *t);
70 
71 static LIST_HEAD(wakeup_sources);
72 
73 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
74 
75 DEFINE_STATIC_SRCU(wakeup_srcu);
76 
77 static struct wakeup_source deleted_ws = {
78 	.name = "deleted",
79 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
80 };
81 
82 static DEFINE_IDA(wakeup_ida);
83 
84 /**
85  * wakeup_source_create - Create a struct wakeup_source object.
86  * @name: Name of the new wakeup source.
87  */
wakeup_source_create(const char * name)88 struct wakeup_source *wakeup_source_create(const char *name)
89 {
90 	struct wakeup_source *ws;
91 	const char *ws_name;
92 	int id;
93 
94 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
95 	if (!ws)
96 		goto err_ws;
97 
98 	ws_name = kstrdup_const(name, GFP_KERNEL);
99 	if (!ws_name)
100 		goto err_name;
101 	ws->name = ws_name;
102 
103 	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
104 	if (id < 0)
105 		goto err_id;
106 	ws->id = id;
107 
108 	return ws;
109 
110 err_id:
111 	kfree_const(ws->name);
112 err_name:
113 	kfree(ws);
114 err_ws:
115 	return NULL;
116 }
117 EXPORT_SYMBOL_GPL(wakeup_source_create);
118 
119 /*
120  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
121  */
wakeup_source_record(struct wakeup_source * ws)122 static void wakeup_source_record(struct wakeup_source *ws)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&deleted_ws.lock, flags);
127 
128 	if (ws->event_count) {
129 		deleted_ws.total_time =
130 			ktime_add(deleted_ws.total_time, ws->total_time);
131 		deleted_ws.prevent_sleep_time =
132 			ktime_add(deleted_ws.prevent_sleep_time,
133 				  ws->prevent_sleep_time);
134 		deleted_ws.max_time =
135 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
136 				deleted_ws.max_time : ws->max_time;
137 		deleted_ws.event_count += ws->event_count;
138 		deleted_ws.active_count += ws->active_count;
139 		deleted_ws.relax_count += ws->relax_count;
140 		deleted_ws.expire_count += ws->expire_count;
141 		deleted_ws.wakeup_count += ws->wakeup_count;
142 	}
143 
144 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
145 }
146 
wakeup_source_free(struct wakeup_source * ws)147 static void wakeup_source_free(struct wakeup_source *ws)
148 {
149 	ida_free(&wakeup_ida, ws->id);
150 	kfree_const(ws->name);
151 	kfree(ws);
152 }
153 
154 /**
155  * wakeup_source_destroy - Destroy a struct wakeup_source object.
156  * @ws: Wakeup source to destroy.
157  *
158  * Use only for wakeup source objects created with wakeup_source_create().
159  */
wakeup_source_destroy(struct wakeup_source * ws)160 void wakeup_source_destroy(struct wakeup_source *ws)
161 {
162 	if (!ws)
163 		return;
164 
165 	__pm_relax(ws);
166 	wakeup_source_record(ws);
167 	wakeup_source_free(ws);
168 }
169 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
170 
171 /**
172  * wakeup_source_add - Add given object to the list of wakeup sources.
173  * @ws: Wakeup source object to add to the list.
174  */
wakeup_source_add(struct wakeup_source * ws)175 void wakeup_source_add(struct wakeup_source *ws)
176 {
177 	unsigned long flags;
178 
179 	if (WARN_ON(!ws))
180 		return;
181 
182 	spin_lock_init(&ws->lock);
183 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
184 	ws->active = false;
185 
186 	raw_spin_lock_irqsave(&events_lock, flags);
187 	list_add_rcu(&ws->entry, &wakeup_sources);
188 	raw_spin_unlock_irqrestore(&events_lock, flags);
189 }
190 EXPORT_SYMBOL_GPL(wakeup_source_add);
191 
192 /**
193  * wakeup_source_remove - Remove given object from the wakeup sources list.
194  * @ws: Wakeup source object to remove from the list.
195  */
wakeup_source_remove(struct wakeup_source * ws)196 void wakeup_source_remove(struct wakeup_source *ws)
197 {
198 	unsigned long flags;
199 
200 	if (WARN_ON(!ws))
201 		return;
202 
203 	raw_spin_lock_irqsave(&events_lock, flags);
204 	list_del_rcu(&ws->entry);
205 	raw_spin_unlock_irqrestore(&events_lock, flags);
206 	synchronize_srcu(&wakeup_srcu);
207 
208 	del_timer_sync(&ws->timer);
209 	/*
210 	 * Clear timer.function to make wakeup_source_not_registered() treat
211 	 * this wakeup source as not registered.
212 	 */
213 	ws->timer.function = NULL;
214 }
215 EXPORT_SYMBOL_GPL(wakeup_source_remove);
216 
217 /**
218  * wakeup_source_register - Create wakeup source and add it to the list.
219  * @dev: Device this wakeup source is associated with (or NULL if virtual).
220  * @name: Name of the wakeup source to register.
221  */
wakeup_source_register(struct device * dev,const char * name)222 struct wakeup_source *wakeup_source_register(struct device *dev,
223 					     const char *name)
224 {
225 	struct wakeup_source *ws;
226 	int ret;
227 
228 	ws = wakeup_source_create(name);
229 	if (ws) {
230 		if (!dev || device_is_registered(dev)) {
231 			ret = wakeup_source_sysfs_add(dev, ws);
232 			if (ret) {
233 				wakeup_source_free(ws);
234 				return NULL;
235 			}
236 		}
237 		wakeup_source_add(ws);
238 	}
239 	return ws;
240 }
241 EXPORT_SYMBOL_GPL(wakeup_source_register);
242 
243 /**
244  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
245  * @ws: Wakeup source object to unregister.
246  */
wakeup_source_unregister(struct wakeup_source * ws)247 void wakeup_source_unregister(struct wakeup_source *ws)
248 {
249 	if (ws) {
250 		wakeup_source_remove(ws);
251 		if (ws->dev)
252 			wakeup_source_sysfs_remove(ws);
253 
254 		wakeup_source_destroy(ws);
255 	}
256 }
257 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
258 
259 /**
260  * wakeup_sources_read_lock - Lock wakeup source list for read.
261  *
262  * Returns an index of srcu lock for struct wakeup_srcu.
263  * This index must be passed to the matching wakeup_sources_read_unlock().
264  */
wakeup_sources_read_lock(void)265 int wakeup_sources_read_lock(void)
266 {
267 	return srcu_read_lock(&wakeup_srcu);
268 }
269 EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
270 
271 /**
272  * wakeup_sources_read_unlock - Unlock wakeup source list.
273  * @idx: return value from corresponding wakeup_sources_read_lock()
274  */
wakeup_sources_read_unlock(int idx)275 void wakeup_sources_read_unlock(int idx)
276 {
277 	srcu_read_unlock(&wakeup_srcu, idx);
278 }
279 EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
280 
281 /**
282  * wakeup_sources_walk_start - Begin a walk on wakeup source list
283  *
284  * Returns first object of the list of wakeup sources.
285  *
286  * Note that to be safe, wakeup sources list needs to be locked by calling
287  * wakeup_source_read_lock() for this.
288  */
wakeup_sources_walk_start(void)289 struct wakeup_source *wakeup_sources_walk_start(void)
290 {
291 	struct list_head *ws_head = &wakeup_sources;
292 
293 	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
294 }
295 EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
296 
297 /**
298  * wakeup_sources_walk_next - Get next wakeup source from the list
299  * @ws: Previous wakeup source object
300  *
301  * Note that to be safe, wakeup sources list needs to be locked by calling
302  * wakeup_source_read_lock() for this.
303  */
wakeup_sources_walk_next(struct wakeup_source * ws)304 struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
305 {
306 	struct list_head *ws_head = &wakeup_sources;
307 
308 	return list_next_or_null_rcu(ws_head, &ws->entry,
309 				struct wakeup_source, entry);
310 }
311 EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
312 
313 /**
314  * device_wakeup_attach - Attach a wakeup source object to a device object.
315  * @dev: Device to handle.
316  * @ws: Wakeup source object to attach to @dev.
317  *
318  * This causes @dev to be treated as a wakeup device.
319  */
device_wakeup_attach(struct device * dev,struct wakeup_source * ws)320 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
321 {
322 	spin_lock_irq(&dev->power.lock);
323 	if (dev->power.wakeup) {
324 		spin_unlock_irq(&dev->power.lock);
325 		return -EEXIST;
326 	}
327 	dev->power.wakeup = ws;
328 	if (dev->power.wakeirq)
329 		device_wakeup_attach_irq(dev, dev->power.wakeirq);
330 	spin_unlock_irq(&dev->power.lock);
331 	return 0;
332 }
333 
334 /**
335  * device_wakeup_enable - Enable given device to be a wakeup source.
336  * @dev: Device to handle.
337  *
338  * Create a wakeup source object, register it and attach it to @dev.
339  */
device_wakeup_enable(struct device * dev)340 int device_wakeup_enable(struct device *dev)
341 {
342 	struct wakeup_source *ws;
343 	int ret;
344 
345 	if (!dev || !dev->power.can_wakeup)
346 		return -EINVAL;
347 
348 	if (pm_suspend_target_state != PM_SUSPEND_ON)
349 		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
350 
351 	ws = wakeup_source_register(dev, dev_name(dev));
352 	if (!ws)
353 		return -ENOMEM;
354 
355 	ret = device_wakeup_attach(dev, ws);
356 	if (ret)
357 		wakeup_source_unregister(ws);
358 
359 	return ret;
360 }
361 EXPORT_SYMBOL_GPL(device_wakeup_enable);
362 
363 /**
364  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
365  * @dev: Device to handle
366  * @wakeirq: Device specific wakeirq entry
367  *
368  * Attach a device wakeirq to the wakeup source so the device
369  * wake IRQ can be configured automatically for suspend and
370  * resume.
371  *
372  * Call under the device's power.lock lock.
373  */
device_wakeup_attach_irq(struct device * dev,struct wake_irq * wakeirq)374 void device_wakeup_attach_irq(struct device *dev,
375 			     struct wake_irq *wakeirq)
376 {
377 	struct wakeup_source *ws;
378 
379 	ws = dev->power.wakeup;
380 	if (!ws)
381 		return;
382 
383 	if (ws->wakeirq)
384 		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
385 
386 	ws->wakeirq = wakeirq;
387 }
388 
389 /**
390  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
391  * @dev: Device to handle
392  *
393  * Removes a device wakeirq from the wakeup source.
394  *
395  * Call under the device's power.lock lock.
396  */
device_wakeup_detach_irq(struct device * dev)397 void device_wakeup_detach_irq(struct device *dev)
398 {
399 	struct wakeup_source *ws;
400 
401 	ws = dev->power.wakeup;
402 	if (ws)
403 		ws->wakeirq = NULL;
404 }
405 
406 /**
407  * device_wakeup_arm_wake_irqs -
408  *
409  * Iterates over the list of device wakeirqs to arm them.
410  */
device_wakeup_arm_wake_irqs(void)411 void device_wakeup_arm_wake_irqs(void)
412 {
413 	struct wakeup_source *ws;
414 	int srcuidx;
415 
416 	srcuidx = srcu_read_lock(&wakeup_srcu);
417 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
418 		dev_pm_arm_wake_irq(ws->wakeirq);
419 	srcu_read_unlock(&wakeup_srcu, srcuidx);
420 }
421 
422 /**
423  * device_wakeup_disarm_wake_irqs -
424  *
425  * Iterates over the list of device wakeirqs to disarm them.
426  */
device_wakeup_disarm_wake_irqs(void)427 void device_wakeup_disarm_wake_irqs(void)
428 {
429 	struct wakeup_source *ws;
430 	int srcuidx;
431 
432 	srcuidx = srcu_read_lock(&wakeup_srcu);
433 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
434 		dev_pm_disarm_wake_irq(ws->wakeirq);
435 	srcu_read_unlock(&wakeup_srcu, srcuidx);
436 }
437 
438 /**
439  * device_wakeup_detach - Detach a device's wakeup source object from it.
440  * @dev: Device to detach the wakeup source object from.
441  *
442  * After it returns, @dev will not be treated as a wakeup device any more.
443  */
device_wakeup_detach(struct device * dev)444 static struct wakeup_source *device_wakeup_detach(struct device *dev)
445 {
446 	struct wakeup_source *ws;
447 
448 	spin_lock_irq(&dev->power.lock);
449 	ws = dev->power.wakeup;
450 	dev->power.wakeup = NULL;
451 	spin_unlock_irq(&dev->power.lock);
452 	return ws;
453 }
454 
455 /**
456  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
457  * @dev: Device to handle.
458  *
459  * Detach the @dev's wakeup source object from it, unregister this wakeup source
460  * object and destroy it.
461  */
device_wakeup_disable(struct device * dev)462 int device_wakeup_disable(struct device *dev)
463 {
464 	struct wakeup_source *ws;
465 
466 	if (!dev || !dev->power.can_wakeup)
467 		return -EINVAL;
468 
469 	ws = device_wakeup_detach(dev);
470 	wakeup_source_unregister(ws);
471 	return 0;
472 }
473 EXPORT_SYMBOL_GPL(device_wakeup_disable);
474 
475 /**
476  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
477  * @dev: Device to handle.
478  * @capable: Whether or not @dev is capable of waking up the system from sleep.
479  *
480  * If @capable is set, set the @dev's power.can_wakeup flag and add its
481  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
482  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
483  *
484  * This function may sleep and it can't be called from any context where
485  * sleeping is not allowed.
486  */
device_set_wakeup_capable(struct device * dev,bool capable)487 void device_set_wakeup_capable(struct device *dev, bool capable)
488 {
489 	if (!!dev->power.can_wakeup == !!capable)
490 		return;
491 
492 	dev->power.can_wakeup = capable;
493 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
494 		if (capable) {
495 			int ret = wakeup_sysfs_add(dev);
496 
497 			if (ret)
498 				dev_info(dev, "Wakeup sysfs attributes not added\n");
499 		} else {
500 			wakeup_sysfs_remove(dev);
501 		}
502 	}
503 }
504 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
505 
506 /**
507  * device_init_wakeup - Device wakeup initialization.
508  * @dev: Device to handle.
509  * @enable: Whether or not to enable @dev as a wakeup device.
510  *
511  * By default, most devices should leave wakeup disabled.  The exceptions are
512  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
513  * possibly network interfaces, etc.  Also, devices that don't generate their
514  * own wakeup requests but merely forward requests from one bus to another
515  * (like PCI bridges) should have wakeup enabled by default.
516  */
device_init_wakeup(struct device * dev,bool enable)517 int device_init_wakeup(struct device *dev, bool enable)
518 {
519 	int ret = 0;
520 
521 	if (!dev)
522 		return -EINVAL;
523 
524 	if (enable) {
525 		device_set_wakeup_capable(dev, true);
526 		ret = device_wakeup_enable(dev);
527 	} else {
528 		device_wakeup_disable(dev);
529 		device_set_wakeup_capable(dev, false);
530 	}
531 
532 	return ret;
533 }
534 EXPORT_SYMBOL_GPL(device_init_wakeup);
535 
536 /**
537  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
538  * @dev: Device to handle.
539  * @enable: enable/disable flag
540  */
device_set_wakeup_enable(struct device * dev,bool enable)541 int device_set_wakeup_enable(struct device *dev, bool enable)
542 {
543 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
544 }
545 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
546 
547 /**
548  * wakeup_source_not_registered - validate the given wakeup source.
549  * @ws: Wakeup source to be validated.
550  */
wakeup_source_not_registered(struct wakeup_source * ws)551 static bool wakeup_source_not_registered(struct wakeup_source *ws)
552 {
553 	/*
554 	 * Use timer struct to check if the given source is initialized
555 	 * by wakeup_source_add.
556 	 */
557 	return ws->timer.function != pm_wakeup_timer_fn;
558 }
559 
560 /*
561  * The functions below use the observation that each wakeup event starts a
562  * period in which the system should not be suspended.  The moment this period
563  * will end depends on how the wakeup event is going to be processed after being
564  * detected and all of the possible cases can be divided into two distinct
565  * groups.
566  *
567  * First, a wakeup event may be detected by the same functional unit that will
568  * carry out the entire processing of it and possibly will pass it to user space
569  * for further processing.  In that case the functional unit that has detected
570  * the event may later "close" the "no suspend" period associated with it
571  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
572  * pm_relax(), balanced with each other, is supposed to be used in such
573  * situations.
574  *
575  * Second, a wakeup event may be detected by one functional unit and processed
576  * by another one.  In that case the unit that has detected it cannot really
577  * "close" the "no suspend" period associated with it, unless it knows in
578  * advance what's going to happen to the event during processing.  This
579  * knowledge, however, may not be available to it, so it can simply specify time
580  * to wait before the system can be suspended and pass it as the second
581  * argument of pm_wakeup_event().
582  *
583  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
584  * "no suspend" period will be ended either by the pm_relax(), or by the timer
585  * function executed when the timer expires, whichever comes first.
586  */
587 
588 /**
589  * wakeup_source_activate - Mark given wakeup source as active.
590  * @ws: Wakeup source to handle.
591  *
592  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
593  * core of the event by incrementing the counter of of wakeup events being
594  * processed.
595  */
wakeup_source_activate(struct wakeup_source * ws)596 static void wakeup_source_activate(struct wakeup_source *ws)
597 {
598 	unsigned int cec;
599 
600 	if (WARN_ONCE(wakeup_source_not_registered(ws),
601 			"unregistered wakeup source\n"))
602 		return;
603 
604 	ws->active = true;
605 	ws->active_count++;
606 	ws->last_time = ktime_get();
607 	if (ws->autosleep_enabled)
608 		ws->start_prevent_time = ws->last_time;
609 
610 	/* Increment the counter of events in progress. */
611 	cec = atomic_inc_return(&combined_event_count);
612 
613 	trace_wakeup_source_activate(ws->name, cec);
614 }
615 
616 /**
617  * wakeup_source_report_event - Report wakeup event using the given source.
618  * @ws: Wakeup source to report the event for.
619  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
620  */
wakeup_source_report_event(struct wakeup_source * ws,bool hard)621 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
622 {
623 	ws->event_count++;
624 	/* This is racy, but the counter is approximate anyway. */
625 	if (events_check_enabled)
626 		ws->wakeup_count++;
627 
628 	if (!ws->active)
629 		wakeup_source_activate(ws);
630 
631 	if (hard)
632 		pm_system_wakeup();
633 }
634 
635 /**
636  * __pm_stay_awake - Notify the PM core of a wakeup event.
637  * @ws: Wakeup source object associated with the source of the event.
638  *
639  * It is safe to call this function from interrupt context.
640  */
__pm_stay_awake(struct wakeup_source * ws)641 void __pm_stay_awake(struct wakeup_source *ws)
642 {
643 	unsigned long flags;
644 
645 	if (!ws)
646 		return;
647 
648 	spin_lock_irqsave(&ws->lock, flags);
649 
650 	wakeup_source_report_event(ws, false);
651 	del_timer(&ws->timer);
652 	ws->timer_expires = 0;
653 
654 	spin_unlock_irqrestore(&ws->lock, flags);
655 }
656 EXPORT_SYMBOL_GPL(__pm_stay_awake);
657 
658 /**
659  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
660  * @dev: Device the wakeup event is related to.
661  *
662  * Notify the PM core of a wakeup event (signaled by @dev) by calling
663  * __pm_stay_awake for the @dev's wakeup source object.
664  *
665  * Call this function after detecting of a wakeup event if pm_relax() is going
666  * to be called directly after processing the event (and possibly passing it to
667  * user space for further processing).
668  */
pm_stay_awake(struct device * dev)669 void pm_stay_awake(struct device *dev)
670 {
671 	unsigned long flags;
672 
673 	if (!dev)
674 		return;
675 
676 	spin_lock_irqsave(&dev->power.lock, flags);
677 	__pm_stay_awake(dev->power.wakeup);
678 	spin_unlock_irqrestore(&dev->power.lock, flags);
679 }
680 EXPORT_SYMBOL_GPL(pm_stay_awake);
681 
682 #ifdef CONFIG_PM_AUTOSLEEP
update_prevent_sleep_time(struct wakeup_source * ws,ktime_t now)683 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
684 {
685 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
686 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
687 }
688 #else
update_prevent_sleep_time(struct wakeup_source * ws,ktime_t now)689 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
690 					     ktime_t now) {}
691 #endif
692 
693 /**
694  * wakeup_source_deactivate - Mark given wakeup source as inactive.
695  * @ws: Wakeup source to handle.
696  *
697  * Update the @ws' statistics and notify the PM core that the wakeup source has
698  * become inactive by decrementing the counter of wakeup events being processed
699  * and incrementing the counter of registered wakeup events.
700  */
wakeup_source_deactivate(struct wakeup_source * ws)701 static void wakeup_source_deactivate(struct wakeup_source *ws)
702 {
703 	unsigned int cnt, inpr, cec;
704 	ktime_t duration;
705 	ktime_t now;
706 
707 	ws->relax_count++;
708 	/*
709 	 * __pm_relax() may be called directly or from a timer function.
710 	 * If it is called directly right after the timer function has been
711 	 * started, but before the timer function calls __pm_relax(), it is
712 	 * possible that __pm_stay_awake() will be called in the meantime and
713 	 * will set ws->active.  Then, ws->active may be cleared immediately
714 	 * by the __pm_relax() called from the timer function, but in such a
715 	 * case ws->relax_count will be different from ws->active_count.
716 	 */
717 	if (ws->relax_count != ws->active_count) {
718 		ws->relax_count--;
719 		return;
720 	}
721 
722 	ws->active = false;
723 
724 	now = ktime_get();
725 	duration = ktime_sub(now, ws->last_time);
726 	ws->total_time = ktime_add(ws->total_time, duration);
727 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
728 		ws->max_time = duration;
729 
730 	ws->last_time = now;
731 	del_timer(&ws->timer);
732 	ws->timer_expires = 0;
733 
734 	if (ws->autosleep_enabled)
735 		update_prevent_sleep_time(ws, now);
736 
737 	/*
738 	 * Increment the counter of registered wakeup events and decrement the
739 	 * couter of wakeup events in progress simultaneously.
740 	 */
741 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
742 	trace_wakeup_source_deactivate(ws->name, cec);
743 
744 	split_counters(&cnt, &inpr);
745 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
746 		wake_up(&wakeup_count_wait_queue);
747 }
748 
749 /**
750  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
751  * @ws: Wakeup source object associated with the source of the event.
752  *
753  * Call this function for wakeup events whose processing started with calling
754  * __pm_stay_awake().
755  *
756  * It is safe to call it from interrupt context.
757  */
__pm_relax(struct wakeup_source * ws)758 void __pm_relax(struct wakeup_source *ws)
759 {
760 	unsigned long flags;
761 
762 	if (!ws)
763 		return;
764 
765 	spin_lock_irqsave(&ws->lock, flags);
766 	if (ws->active)
767 		wakeup_source_deactivate(ws);
768 	spin_unlock_irqrestore(&ws->lock, flags);
769 }
770 EXPORT_SYMBOL_GPL(__pm_relax);
771 
772 /**
773  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
774  * @dev: Device that signaled the event.
775  *
776  * Execute __pm_relax() for the @dev's wakeup source object.
777  */
pm_relax(struct device * dev)778 void pm_relax(struct device *dev)
779 {
780 	unsigned long flags;
781 
782 	if (!dev)
783 		return;
784 
785 	spin_lock_irqsave(&dev->power.lock, flags);
786 	__pm_relax(dev->power.wakeup);
787 	spin_unlock_irqrestore(&dev->power.lock, flags);
788 }
789 EXPORT_SYMBOL_GPL(pm_relax);
790 
791 /**
792  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
793  * @t: timer list
794  *
795  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
796  * in @data if it is currently active and its timer has not been canceled and
797  * the expiration time of the timer is not in future.
798  */
pm_wakeup_timer_fn(struct timer_list * t)799 static void pm_wakeup_timer_fn(struct timer_list *t)
800 {
801 	struct wakeup_source *ws = from_timer(ws, t, timer);
802 	unsigned long flags;
803 
804 	spin_lock_irqsave(&ws->lock, flags);
805 
806 	if (ws->active && ws->timer_expires
807 	    && time_after_eq(jiffies, ws->timer_expires)) {
808 		wakeup_source_deactivate(ws);
809 		ws->expire_count++;
810 	}
811 
812 	spin_unlock_irqrestore(&ws->lock, flags);
813 }
814 
815 /**
816  * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
817  * @ws: Wakeup source object associated with the event source.
818  * @msec: Anticipated event processing time (in milliseconds).
819  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
820  *
821  * Notify the PM core of a wakeup event whose source is @ws that will take
822  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
823  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
824  * execute pm_wakeup_timer_fn() in future.
825  *
826  * It is safe to call this function from interrupt context.
827  */
pm_wakeup_ws_event(struct wakeup_source * ws,unsigned int msec,bool hard)828 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
829 {
830 	unsigned long flags;
831 	unsigned long expires;
832 
833 	if (!ws)
834 		return;
835 
836 	spin_lock_irqsave(&ws->lock, flags);
837 
838 	wakeup_source_report_event(ws, hard);
839 
840 	if (!msec) {
841 		wakeup_source_deactivate(ws);
842 		goto unlock;
843 	}
844 
845 	expires = jiffies + msecs_to_jiffies(msec);
846 	if (!expires)
847 		expires = 1;
848 
849 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
850 		mod_timer(&ws->timer, expires);
851 		ws->timer_expires = expires;
852 	}
853 
854  unlock:
855 	spin_unlock_irqrestore(&ws->lock, flags);
856 }
857 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
858 
859 /**
860  * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
861  * @dev: Device the wakeup event is related to.
862  * @msec: Anticipated event processing time (in milliseconds).
863  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
864  *
865  * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
866  */
pm_wakeup_dev_event(struct device * dev,unsigned int msec,bool hard)867 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
868 {
869 	unsigned long flags;
870 
871 	if (!dev)
872 		return;
873 
874 	spin_lock_irqsave(&dev->power.lock, flags);
875 	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
876 	spin_unlock_irqrestore(&dev->power.lock, flags);
877 }
878 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
879 
pm_get_active_wakeup_sources(char * pending_wakeup_source,size_t max)880 void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
881 {
882 	struct wakeup_source *ws, *last_active_ws = NULL;
883 	int len = 0;
884 	bool active = false;
885 
886 	rcu_read_lock();
887 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
888 		if (ws->active && len < max) {
889 			if (!active)
890 				len += scnprintf(pending_wakeup_source, max,
891 						"Pending Wakeup Sources: ");
892 			len += scnprintf(pending_wakeup_source + len, max - len,
893 				"%s ", ws->name);
894 			active = true;
895 		} else if (!active &&
896 			   (!last_active_ws ||
897 			    ktime_to_ns(ws->last_time) >
898 			    ktime_to_ns(last_active_ws->last_time))) {
899 			last_active_ws = ws;
900 		}
901 	}
902 	if (!active && last_active_ws) {
903 		scnprintf(pending_wakeup_source, max,
904 				"Last active Wakeup Source: %s",
905 				last_active_ws->name);
906 	}
907 	rcu_read_unlock();
908 }
909 EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
910 
pm_print_active_wakeup_sources(void)911 void pm_print_active_wakeup_sources(void)
912 {
913 	struct wakeup_source *ws;
914 	int srcuidx, active = 0;
915 	struct wakeup_source *last_activity_ws = NULL;
916 
917 	srcuidx = srcu_read_lock(&wakeup_srcu);
918 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
919 		if (ws->active) {
920 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
921 			active = 1;
922 		} else if (!active &&
923 			   (!last_activity_ws ||
924 			    ktime_to_ns(ws->last_time) >
925 			    ktime_to_ns(last_activity_ws->last_time))) {
926 			last_activity_ws = ws;
927 		}
928 	}
929 
930 	if (!active && last_activity_ws)
931 		pm_pr_dbg("last active wakeup source: %s\n",
932 			last_activity_ws->name);
933 	srcu_read_unlock(&wakeup_srcu, srcuidx);
934 }
935 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
936 
937 /**
938  * pm_wakeup_pending - Check if power transition in progress should be aborted.
939  *
940  * Compare the current number of registered wakeup events with its preserved
941  * value from the past and return true if new wakeup events have been registered
942  * since the old value was stored.  Also return true if the current number of
943  * wakeup events being processed is different from zero.
944  */
pm_wakeup_pending(void)945 bool pm_wakeup_pending(void)
946 {
947 	unsigned long flags;
948 	bool ret = false;
949 	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
950 
951 	raw_spin_lock_irqsave(&events_lock, flags);
952 	if (events_check_enabled) {
953 		unsigned int cnt, inpr;
954 
955 		split_counters(&cnt, &inpr);
956 		ret = (cnt != saved_count || inpr > 0);
957 		events_check_enabled = !ret;
958 	}
959 	raw_spin_unlock_irqrestore(&events_lock, flags);
960 
961 	if (ret) {
962 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
963 		pm_print_active_wakeup_sources();
964 		pm_get_active_wakeup_sources(suspend_abort,
965 					     MAX_SUSPEND_ABORT_LEN);
966 		log_suspend_abort_reason(suspend_abort);
967 		pr_info("PM: %s\n", suspend_abort);
968 	}
969 
970 	return ret || atomic_read(&pm_abort_suspend) > 0;
971 }
972 
pm_system_wakeup(void)973 void pm_system_wakeup(void)
974 {
975 	atomic_inc(&pm_abort_suspend);
976 	s2idle_wake();
977 }
978 EXPORT_SYMBOL_GPL(pm_system_wakeup);
979 
pm_system_cancel_wakeup(void)980 void pm_system_cancel_wakeup(void)
981 {
982 	atomic_dec_if_positive(&pm_abort_suspend);
983 }
984 
pm_wakeup_clear(unsigned int irq_number)985 void pm_wakeup_clear(unsigned int irq_number)
986 {
987 	raw_spin_lock_irq(&wakeup_irq_lock);
988 
989 	if (irq_number && wakeup_irq[0] == irq_number)
990 		wakeup_irq[0] = wakeup_irq[1];
991 	else
992 		wakeup_irq[0] = 0;
993 
994 	wakeup_irq[1] = 0;
995 
996 	raw_spin_unlock_irq(&wakeup_irq_lock);
997 
998 	if (!irq_number)
999 		atomic_set(&pm_abort_suspend, 0);
1000 }
1001 
pm_system_irq_wakeup(unsigned int irq_number)1002 void pm_system_irq_wakeup(unsigned int irq_number)
1003 {
1004 	unsigned long flags;
1005 
1006 	raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
1007 
1008 	if (wakeup_irq[0] == 0)
1009 		wakeup_irq[0] = irq_number;
1010 	else if (wakeup_irq[1] == 0)
1011 		wakeup_irq[1] = irq_number;
1012 	else
1013 		irq_number = 0;
1014 
1015 	raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
1016 
1017 	if (irq_number) {
1018 		struct irq_desc *desc;
1019 		const char *name = "null";
1020 
1021 		desc = irq_to_desc(irq_number);
1022 		if (desc == NULL)
1023 			name = "stray irq";
1024 		else if (desc->action && desc->action->name)
1025 			name = desc->action->name;
1026 
1027 		log_irq_wakeup_reason(irq_number);
1028 		pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
1029 		pm_system_wakeup();
1030 	}
1031 }
1032 
pm_wakeup_irq(void)1033 unsigned int pm_wakeup_irq(void)
1034 {
1035 	return wakeup_irq[0];
1036 }
1037 
1038 /**
1039  * pm_get_wakeup_count - Read the number of registered wakeup events.
1040  * @count: Address to store the value at.
1041  * @block: Whether or not to block.
1042  *
1043  * Store the number of registered wakeup events at the address in @count.  If
1044  * @block is set, block until the current number of wakeup events being
1045  * processed is zero.
1046  *
1047  * Return 'false' if the current number of wakeup events being processed is
1048  * nonzero.  Otherwise return 'true'.
1049  */
pm_get_wakeup_count(unsigned int * count,bool block)1050 bool pm_get_wakeup_count(unsigned int *count, bool block)
1051 {
1052 	unsigned int cnt, inpr;
1053 
1054 	if (block) {
1055 		DEFINE_WAIT(wait);
1056 
1057 		for (;;) {
1058 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
1059 					TASK_INTERRUPTIBLE);
1060 			split_counters(&cnt, &inpr);
1061 			if (inpr == 0 || signal_pending(current))
1062 				break;
1063 			pm_print_active_wakeup_sources();
1064 			schedule();
1065 		}
1066 		finish_wait(&wakeup_count_wait_queue, &wait);
1067 	}
1068 
1069 	split_counters(&cnt, &inpr);
1070 	*count = cnt;
1071 	return !inpr;
1072 }
1073 
1074 /**
1075  * pm_save_wakeup_count - Save the current number of registered wakeup events.
1076  * @count: Value to compare with the current number of registered wakeup events.
1077  *
1078  * If @count is equal to the current number of registered wakeup events and the
1079  * current number of wakeup events being processed is zero, store @count as the
1080  * old number of registered wakeup events for pm_check_wakeup_events(), enable
1081  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
1082  * detection and return 'false'.
1083  */
pm_save_wakeup_count(unsigned int count)1084 bool pm_save_wakeup_count(unsigned int count)
1085 {
1086 	unsigned int cnt, inpr;
1087 	unsigned long flags;
1088 
1089 	events_check_enabled = false;
1090 	raw_spin_lock_irqsave(&events_lock, flags);
1091 	split_counters(&cnt, &inpr);
1092 	if (cnt == count && inpr == 0) {
1093 		saved_count = count;
1094 		events_check_enabled = true;
1095 	}
1096 	raw_spin_unlock_irqrestore(&events_lock, flags);
1097 	return events_check_enabled;
1098 }
1099 
1100 #ifdef CONFIG_PM_AUTOSLEEP
1101 /**
1102  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1103  * @set: Whether to set or to clear the autosleep_enabled flags.
1104  */
pm_wakep_autosleep_enabled(bool set)1105 void pm_wakep_autosleep_enabled(bool set)
1106 {
1107 	struct wakeup_source *ws;
1108 	ktime_t now = ktime_get();
1109 	int srcuidx;
1110 
1111 	srcuidx = srcu_read_lock(&wakeup_srcu);
1112 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1113 		spin_lock_irq(&ws->lock);
1114 		if (ws->autosleep_enabled != set) {
1115 			ws->autosleep_enabled = set;
1116 			if (ws->active) {
1117 				if (set)
1118 					ws->start_prevent_time = now;
1119 				else
1120 					update_prevent_sleep_time(ws, now);
1121 			}
1122 		}
1123 		spin_unlock_irq(&ws->lock);
1124 	}
1125 	srcu_read_unlock(&wakeup_srcu, srcuidx);
1126 }
1127 #endif /* CONFIG_PM_AUTOSLEEP */
1128 
1129 /**
1130  * print_wakeup_source_stats - Print wakeup source statistics information.
1131  * @m: seq_file to print the statistics into.
1132  * @ws: Wakeup source object to print the statistics for.
1133  */
print_wakeup_source_stats(struct seq_file * m,struct wakeup_source * ws)1134 static int print_wakeup_source_stats(struct seq_file *m,
1135 				     struct wakeup_source *ws)
1136 {
1137 	unsigned long flags;
1138 	ktime_t total_time;
1139 	ktime_t max_time;
1140 	unsigned long active_count;
1141 	ktime_t active_time;
1142 	ktime_t prevent_sleep_time;
1143 
1144 	spin_lock_irqsave(&ws->lock, flags);
1145 
1146 	total_time = ws->total_time;
1147 	max_time = ws->max_time;
1148 	prevent_sleep_time = ws->prevent_sleep_time;
1149 	active_count = ws->active_count;
1150 	if (ws->active) {
1151 		ktime_t now = ktime_get();
1152 
1153 		active_time = ktime_sub(now, ws->last_time);
1154 		total_time = ktime_add(total_time, active_time);
1155 		if (active_time > max_time)
1156 			max_time = active_time;
1157 
1158 		if (ws->autosleep_enabled)
1159 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1160 				ktime_sub(now, ws->start_prevent_time));
1161 	} else {
1162 		active_time = 0;
1163 	}
1164 
1165 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1166 		   ws->name, active_count, ws->event_count,
1167 		   ws->wakeup_count, ws->expire_count,
1168 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1169 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1170 		   ktime_to_ms(prevent_sleep_time));
1171 
1172 	spin_unlock_irqrestore(&ws->lock, flags);
1173 
1174 	return 0;
1175 }
1176 
wakeup_sources_stats_seq_start(struct seq_file * m,loff_t * pos)1177 static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1178 					loff_t *pos)
1179 {
1180 	struct wakeup_source *ws;
1181 	loff_t n = *pos;
1182 	int *srcuidx = m->private;
1183 
1184 	if (n == 0) {
1185 		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1186 			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1187 			"last_change\tprevent_suspend_time\n");
1188 	}
1189 
1190 	*srcuidx = srcu_read_lock(&wakeup_srcu);
1191 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1192 		if (n-- <= 0)
1193 			return ws;
1194 	}
1195 
1196 	return NULL;
1197 }
1198 
wakeup_sources_stats_seq_next(struct seq_file * m,void * v,loff_t * pos)1199 static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1200 					void *v, loff_t *pos)
1201 {
1202 	struct wakeup_source *ws = v;
1203 	struct wakeup_source *next_ws = NULL;
1204 
1205 	++(*pos);
1206 
1207 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1208 		next_ws = ws;
1209 		break;
1210 	}
1211 
1212 	if (!next_ws)
1213 		print_wakeup_source_stats(m, &deleted_ws);
1214 
1215 	return next_ws;
1216 }
1217 
wakeup_sources_stats_seq_stop(struct seq_file * m,void * v)1218 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1219 {
1220 	int *srcuidx = m->private;
1221 
1222 	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1223 }
1224 
1225 /**
1226  * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1227  * @m: seq_file to print the statistics into.
1228  * @v: wakeup_source of each iteration
1229  */
wakeup_sources_stats_seq_show(struct seq_file * m,void * v)1230 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1231 {
1232 	struct wakeup_source *ws = v;
1233 
1234 	print_wakeup_source_stats(m, ws);
1235 
1236 	return 0;
1237 }
1238 
1239 static const struct seq_operations wakeup_sources_stats_seq_ops = {
1240 	.start = wakeup_sources_stats_seq_start,
1241 	.next  = wakeup_sources_stats_seq_next,
1242 	.stop  = wakeup_sources_stats_seq_stop,
1243 	.show  = wakeup_sources_stats_seq_show,
1244 };
1245 
wakeup_sources_stats_open(struct inode * inode,struct file * file)1246 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1247 {
1248 	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1249 }
1250 
1251 static const struct file_operations wakeup_sources_stats_fops = {
1252 	.owner = THIS_MODULE,
1253 	.open = wakeup_sources_stats_open,
1254 	.read = seq_read,
1255 	.llseek = seq_lseek,
1256 	.release = seq_release_private,
1257 };
1258 
wakeup_sources_debugfs_init(void)1259 static int __init wakeup_sources_debugfs_init(void)
1260 {
1261 	debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
1262 			    &wakeup_sources_stats_fops);
1263 	return 0;
1264 }
1265 
1266 postcore_initcall(wakeup_sources_debugfs_init);
1267