• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * kernel/power/wakeup_reason.c
3  *
4  * Logs the reasons which caused the kernel to resume from
5  * the suspend mode.
6  *
7  * Copyright (C) 2020 Google, Inc.
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/wakeup_reason.h>
19 #include <linux/kernel.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/kobject.h>
24 #include <linux/sysfs.h>
25 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/notifier.h>
28 #include <linux/suspend.h>
29 #include <linux/slab.h>
30 
31 /*
32  * struct wakeup_irq_node - stores data and relationships for IRQs logged as
33  * either base or nested wakeup reasons during suspend/resume flow.
34  * @siblings - for membership on leaf or parent IRQ lists
35  * @irq      - the IRQ number
36  * @irq_name - the name associated with the IRQ, or a default if none
37  */
38 struct wakeup_irq_node {
39 	struct list_head siblings;
40 	int irq;
41 	const char *irq_name;
42 };
43 
44 enum wakeup_reason_flag {
45 	RESUME_NONE = 0,
46 	RESUME_IRQ,
47 	RESUME_ABORT,
48 	RESUME_ABNORMAL,
49 };
50 
51 static DEFINE_SPINLOCK(wakeup_reason_lock);
52 
53 static LIST_HEAD(leaf_irqs);   /* kept in ascending IRQ sorted order */
54 static LIST_HEAD(parent_irqs); /* unordered */
55 
56 static struct kmem_cache *wakeup_irq_nodes_cache;
57 
58 static const char *default_irq_name = "(unnamed)";
59 
60 static struct kobject *kobj;
61 
62 static bool capture_reasons;
63 static int wakeup_reason;
64 static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
65 
66 static ktime_t last_monotime; /* monotonic time before last suspend */
67 static ktime_t curr_monotime; /* monotonic time after last suspend */
68 static ktime_t last_stime; /* monotonic boottime offset before last suspend */
69 static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
70 
init_node(struct wakeup_irq_node * p,int irq)71 static void init_node(struct wakeup_irq_node *p, int irq)
72 {
73 	struct irq_desc *desc;
74 
75 	INIT_LIST_HEAD(&p->siblings);
76 
77 	p->irq = irq;
78 	desc = irq_to_desc(irq);
79 	if (desc && desc->action && desc->action->name)
80 		p->irq_name = desc->action->name;
81 	else
82 		p->irq_name = default_irq_name;
83 }
84 
create_node(int irq)85 static struct wakeup_irq_node *create_node(int irq)
86 {
87 	struct wakeup_irq_node *result;
88 
89 	result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
90 	if (unlikely(!result))
91 		pr_warn("Failed to log wakeup IRQ %d\n", irq);
92 	else
93 		init_node(result, irq);
94 
95 	return result;
96 }
97 
delete_list(struct list_head * head)98 static void delete_list(struct list_head *head)
99 {
100 	struct wakeup_irq_node *n;
101 
102 	while (!list_empty(head)) {
103 		n = list_first_entry(head, struct wakeup_irq_node, siblings);
104 		list_del(&n->siblings);
105 		kmem_cache_free(wakeup_irq_nodes_cache, n);
106 	}
107 }
108 
add_sibling_node_sorted(struct list_head * head,int irq)109 static bool add_sibling_node_sorted(struct list_head *head, int irq)
110 {
111 	struct wakeup_irq_node *n = NULL;
112 	struct list_head *predecessor = head;
113 
114 	if (unlikely(WARN_ON(!head)))
115 		return NULL;
116 
117 	if (!list_empty(head))
118 		list_for_each_entry(n, head, siblings) {
119 			if (n->irq < irq)
120 				predecessor = &n->siblings;
121 			else if (n->irq == irq)
122 				return true;
123 			else
124 				break;
125 		}
126 
127 	n = create_node(irq);
128 	if (n) {
129 		list_add(&n->siblings, predecessor);
130 		return true;
131 	}
132 
133 	return false;
134 }
135 
find_node_in_list(struct list_head * head,int irq)136 static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
137 						 int irq)
138 {
139 	struct wakeup_irq_node *n;
140 
141 	if (unlikely(WARN_ON(!head)))
142 		return NULL;
143 
144 	list_for_each_entry(n, head, siblings)
145 		if (n->irq == irq)
146 			return n;
147 
148 	return NULL;
149 }
150 
log_irq_wakeup_reason(int irq)151 void log_irq_wakeup_reason(int irq)
152 {
153 	unsigned long flags;
154 
155 	spin_lock_irqsave(&wakeup_reason_lock, flags);
156 	if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
157 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
158 		return;
159 	}
160 
161 	if (!capture_reasons) {
162 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
163 		return;
164 	}
165 
166 	if (find_node_in_list(&parent_irqs, irq) == NULL)
167 		add_sibling_node_sorted(&leaf_irqs, irq);
168 
169 	wakeup_reason = RESUME_IRQ;
170 	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
171 }
172 
log_threaded_irq_wakeup_reason(int irq,int parent_irq)173 void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
174 {
175 	struct wakeup_irq_node *parent;
176 	unsigned long flags;
177 
178 	/*
179 	 * Intentionally unsynchronized.  Calls that come in after we have
180 	 * resumed should have a fast exit path since there's no work to be
181 	 * done, any any coherence issue that could cause a wrong value here is
182 	 * both highly improbable - given the set/clear timing - and very low
183 	 * impact (parent IRQ gets logged instead of the specific child).
184 	 */
185 	if (!capture_reasons)
186 		return;
187 
188 	spin_lock_irqsave(&wakeup_reason_lock, flags);
189 
190 	if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
191 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
192 		return;
193 	}
194 
195 	if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
196 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
197 		return;
198 	}
199 
200 	parent = find_node_in_list(&parent_irqs, parent_irq);
201 	if (parent != NULL)
202 		add_sibling_node_sorted(&leaf_irqs, irq);
203 	else {
204 		parent = find_node_in_list(&leaf_irqs, parent_irq);
205 		if (parent != NULL) {
206 			list_del_init(&parent->siblings);
207 			list_add_tail(&parent->siblings, &parent_irqs);
208 			add_sibling_node_sorted(&leaf_irqs, irq);
209 		}
210 	}
211 
212 	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
213 }
214 EXPORT_SYMBOL_GPL(log_threaded_irq_wakeup_reason);
215 
__log_abort_or_abnormal_wake(bool abort,const char * fmt,va_list args)216 static void __log_abort_or_abnormal_wake(bool abort, const char *fmt,
217 					 va_list args)
218 {
219 	unsigned long flags;
220 
221 	spin_lock_irqsave(&wakeup_reason_lock, flags);
222 
223 	/* Suspend abort or abnormal wake reason has already been logged. */
224 	if (wakeup_reason != RESUME_NONE) {
225 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
226 		return;
227 	}
228 
229 	if (abort)
230 		wakeup_reason = RESUME_ABORT;
231 	else
232 		wakeup_reason = RESUME_ABNORMAL;
233 
234 	vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
235 
236 	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
237 }
238 
log_suspend_abort_reason(const char * fmt,...)239 void log_suspend_abort_reason(const char *fmt, ...)
240 {
241 	va_list args;
242 
243 	va_start(args, fmt);
244 	__log_abort_or_abnormal_wake(true, fmt, args);
245 	va_end(args);
246 }
247 EXPORT_SYMBOL_GPL(log_suspend_abort_reason);
248 
log_abnormal_wakeup_reason(const char * fmt,...)249 void log_abnormal_wakeup_reason(const char *fmt, ...)
250 {
251 	va_list args;
252 
253 	va_start(args, fmt);
254 	__log_abort_or_abnormal_wake(false, fmt, args);
255 	va_end(args);
256 }
257 EXPORT_SYMBOL_GPL(log_abnormal_wakeup_reason);
258 
clear_wakeup_reasons(void)259 void clear_wakeup_reasons(void)
260 {
261 	unsigned long flags;
262 
263 	spin_lock_irqsave(&wakeup_reason_lock, flags);
264 
265 	delete_list(&leaf_irqs);
266 	delete_list(&parent_irqs);
267 	wakeup_reason = RESUME_NONE;
268 	capture_reasons = true;
269 
270 	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
271 }
272 
print_wakeup_sources(void)273 static void print_wakeup_sources(void)
274 {
275 	struct wakeup_irq_node *n;
276 	unsigned long flags;
277 
278 	spin_lock_irqsave(&wakeup_reason_lock, flags);
279 
280 	capture_reasons = false;
281 
282 	if (wakeup_reason == RESUME_ABORT) {
283 		pr_info("Abort: %s\n", non_irq_wake_reason);
284 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
285 		return;
286 	}
287 
288 	if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
289 		list_for_each_entry(n, &leaf_irqs, siblings)
290 			pr_info("Resume caused by IRQ %d, %s\n", n->irq,
291 				n->irq_name);
292 	else if (wakeup_reason == RESUME_ABNORMAL)
293 		pr_info("Resume caused by %s\n", non_irq_wake_reason);
294 	else
295 		pr_info("Resume cause unknown\n");
296 
297 	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
298 }
299 
last_resume_reason_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)300 static ssize_t last_resume_reason_show(struct kobject *kobj,
301 				       struct kobj_attribute *attr, char *buf)
302 {
303 	ssize_t buf_offset = 0;
304 	struct wakeup_irq_node *n;
305 	unsigned long flags;
306 
307 	spin_lock_irqsave(&wakeup_reason_lock, flags);
308 
309 	if (wakeup_reason == RESUME_ABORT) {
310 		buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
311 				       non_irq_wake_reason);
312 		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
313 		return buf_offset;
314 	}
315 
316 	if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
317 		list_for_each_entry(n, &leaf_irqs, siblings)
318 			buf_offset += scnprintf(buf + buf_offset,
319 						PAGE_SIZE - buf_offset,
320 						"%d %s\n", n->irq, n->irq_name);
321 	else if (wakeup_reason == RESUME_ABNORMAL)
322 		buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
323 				       non_irq_wake_reason);
324 
325 	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
326 
327 	return buf_offset;
328 }
329 
last_suspend_time_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)330 static ssize_t last_suspend_time_show(struct kobject *kobj,
331 			struct kobj_attribute *attr, char *buf)
332 {
333 	struct timespec64 sleep_time;
334 	struct timespec64 total_time;
335 	struct timespec64 suspend_resume_time;
336 
337 	/*
338 	 * total_time is calculated from monotonic bootoffsets because
339 	 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
340 	 */
341 	total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
342 
343 	/*
344 	 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
345 	 * time interval before entering suspend and post suspend.
346 	 */
347 	suspend_resume_time =
348 		ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
349 
350 	/* sleep_time = total_time - suspend_resume_time */
351 	sleep_time = timespec64_sub(total_time, suspend_resume_time);
352 
353 	/* Export suspend_resume_time and sleep_time in pair here. */
354 	return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
355 		       (unsigned long long)suspend_resume_time.tv_sec,
356 		       suspend_resume_time.tv_nsec,
357 		       (unsigned long long)sleep_time.tv_sec,
358 		       sleep_time.tv_nsec);
359 }
360 
361 static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
362 static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
363 
364 static struct attribute *attrs[] = {
365 	&resume_reason.attr,
366 	&suspend_time.attr,
367 	NULL,
368 };
369 static struct attribute_group attr_group = {
370 	.attrs = attrs,
371 };
372 
373 /* Detects a suspend and clears all the previous wake up reasons*/
wakeup_reason_pm_event(struct notifier_block * notifier,unsigned long pm_event,void * unused)374 static int wakeup_reason_pm_event(struct notifier_block *notifier,
375 		unsigned long pm_event, void *unused)
376 {
377 	switch (pm_event) {
378 	case PM_SUSPEND_PREPARE:
379 		/* monotonic time since boot */
380 		last_monotime = ktime_get();
381 		/* monotonic time since boot including the time spent in suspend */
382 		last_stime = ktime_get_boottime();
383 		clear_wakeup_reasons();
384 		break;
385 	case PM_POST_SUSPEND:
386 		/* monotonic time since boot */
387 		curr_monotime = ktime_get();
388 		/* monotonic time since boot including the time spent in suspend */
389 		curr_stime = ktime_get_boottime();
390 		print_wakeup_sources();
391 		break;
392 	default:
393 		break;
394 	}
395 	return NOTIFY_DONE;
396 }
397 
398 static struct notifier_block wakeup_reason_pm_notifier_block = {
399 	.notifier_call = wakeup_reason_pm_event,
400 };
401 
wakeup_reason_init(void)402 static int __init wakeup_reason_init(void)
403 {
404 	if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
405 		pr_warn("[%s] failed to register PM notifier\n", __func__);
406 		goto fail;
407 	}
408 
409 	kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
410 	if (!kobj) {
411 		pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
412 		goto fail_unregister_pm_notifier;
413 	}
414 
415 	if (sysfs_create_group(kobj, &attr_group)) {
416 		pr_warn("[%s] failed to create a sysfs group\n", __func__);
417 		goto fail_kobject_put;
418 	}
419 
420 	wakeup_irq_nodes_cache =
421 		kmem_cache_create("wakeup_irq_node_cache",
422 				  sizeof(struct wakeup_irq_node), 0, 0, NULL);
423 	if (!wakeup_irq_nodes_cache)
424 		goto fail_remove_group;
425 
426 	return 0;
427 
428 fail_remove_group:
429 	sysfs_remove_group(kobj, &attr_group);
430 fail_kobject_put:
431 	kobject_put(kobj);
432 fail_unregister_pm_notifier:
433 	unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
434 fail:
435 	return 1;
436 }
437 
438 late_initcall(wakeup_reason_init);
439