• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * kernel/power/wakelock.c
3  *
4  * User space wakeup sources support.
5  *
6  * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
7  *
8  * This code is based on the analogous interface allowing user space to
9  * manipulate wakelocks on Android.
10  */
11 
12 #include <linux/capability.h>
13 #include <linux/ctype.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/list.h>
18 #include <linux/rbtree.h>
19 #include <linux/slab.h>
20 #include <linux/workqueue.h>
21 
22 #include "power.h"
23 
24 static DEFINE_MUTEX(wakelocks_lock);
25 
26 struct wakelock {
27 	char			*name;
28 	struct rb_node		node;
29 	struct wakeup_source	ws;
30 #ifdef CONFIG_PM_WAKELOCKS_GC
31 	struct list_head	lru;
32 #endif
33 };
34 
35 static struct rb_root wakelocks_tree = RB_ROOT;
36 
pm_show_wakelocks(char * buf,bool show_active)37 ssize_t pm_show_wakelocks(char *buf, bool show_active)
38 {
39 	struct rb_node *node;
40 	struct wakelock *wl;
41 	int len = 0;
42 
43 	mutex_lock(&wakelocks_lock);
44 
45 	for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
46 		wl = rb_entry(node, struct wakelock, node);
47 		if (wl->ws.active == show_active)
48 			len += sysfs_emit_at(buf, len, "%s ", wl->name);
49 	}
50 	len += sysfs_emit_at(buf, len, "\n");
51 
52 	mutex_unlock(&wakelocks_lock);
53 	return len;
54 }
55 
56 #if CONFIG_PM_WAKELOCKS_LIMIT > 0
57 static unsigned int number_of_wakelocks;
58 
wakelocks_limit_exceeded(void)59 static inline bool wakelocks_limit_exceeded(void)
60 {
61 	return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
62 }
63 
increment_wakelocks_number(void)64 static inline void increment_wakelocks_number(void)
65 {
66 	number_of_wakelocks++;
67 }
68 
decrement_wakelocks_number(void)69 static inline void decrement_wakelocks_number(void)
70 {
71 	number_of_wakelocks--;
72 }
73 #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
wakelocks_limit_exceeded(void)74 static inline bool wakelocks_limit_exceeded(void) { return false; }
increment_wakelocks_number(void)75 static inline void increment_wakelocks_number(void) {}
decrement_wakelocks_number(void)76 static inline void decrement_wakelocks_number(void) {}
77 #endif /* CONFIG_PM_WAKELOCKS_LIMIT */
78 
79 #ifdef CONFIG_PM_WAKELOCKS_GC
80 #define WL_GC_COUNT_MAX	100
81 #define WL_GC_TIME_SEC	300
82 
83 static void __wakelocks_gc(struct work_struct *work);
84 static LIST_HEAD(wakelocks_lru_list);
85 static DECLARE_WORK(wakelock_work, __wakelocks_gc);
86 static unsigned int wakelocks_gc_count;
87 
wakelocks_lru_add(struct wakelock * wl)88 static inline void wakelocks_lru_add(struct wakelock *wl)
89 {
90 	list_add(&wl->lru, &wakelocks_lru_list);
91 }
92 
wakelocks_lru_most_recent(struct wakelock * wl)93 static inline void wakelocks_lru_most_recent(struct wakelock *wl)
94 {
95 	list_move(&wl->lru, &wakelocks_lru_list);
96 }
97 
__wakelocks_gc(struct work_struct * work)98 static void __wakelocks_gc(struct work_struct *work)
99 {
100 	struct wakelock *wl, *aux;
101 	ktime_t now;
102 
103 	mutex_lock(&wakelocks_lock);
104 
105 	now = ktime_get();
106 	list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
107 		u64 idle_time_ns;
108 		bool active;
109 
110 		spin_lock_irq(&wl->ws.lock);
111 		idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
112 		active = wl->ws.active;
113 		spin_unlock_irq(&wl->ws.lock);
114 
115 		if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
116 			break;
117 
118 		if (!active) {
119 			wakeup_source_remove(&wl->ws);
120 			rb_erase(&wl->node, &wakelocks_tree);
121 			list_del(&wl->lru);
122 			kfree(wl->name);
123 			kfree(wl);
124 			decrement_wakelocks_number();
125 		}
126 	}
127 	wakelocks_gc_count = 0;
128 
129 	mutex_unlock(&wakelocks_lock);
130 }
131 
wakelocks_gc(void)132 static void wakelocks_gc(void)
133 {
134 	if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
135 		return;
136 
137 	schedule_work(&wakelock_work);
138 }
139 #else /* !CONFIG_PM_WAKELOCKS_GC */
wakelocks_lru_add(struct wakelock * wl)140 static inline void wakelocks_lru_add(struct wakelock *wl) {}
wakelocks_lru_most_recent(struct wakelock * wl)141 static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
wakelocks_gc(void)142 static inline void wakelocks_gc(void) {}
143 #endif /* !CONFIG_PM_WAKELOCKS_GC */
144 
wakelock_lookup_add(const char * name,size_t len,bool add_if_not_found)145 static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
146 					    bool add_if_not_found)
147 {
148 	struct rb_node **node = &wakelocks_tree.rb_node;
149 	struct rb_node *parent = *node;
150 	struct wakelock *wl;
151 
152 	while (*node) {
153 		int diff;
154 
155 		parent = *node;
156 		wl = rb_entry(*node, struct wakelock, node);
157 		diff = strncmp(name, wl->name, len);
158 		if (diff == 0) {
159 			if (wl->name[len])
160 				diff = -1;
161 			else
162 				return wl;
163 		}
164 		if (diff < 0)
165 			node = &(*node)->rb_left;
166 		else
167 			node = &(*node)->rb_right;
168 	}
169 	if (!add_if_not_found)
170 		return ERR_PTR(-EINVAL);
171 
172 	if (wakelocks_limit_exceeded())
173 		return ERR_PTR(-ENOSPC);
174 
175 	/* Not found, we have to add a new one. */
176 	wl = kzalloc(sizeof(*wl), GFP_KERNEL);
177 	if (!wl)
178 		return ERR_PTR(-ENOMEM);
179 
180 	wl->name = kstrndup(name, len, GFP_KERNEL);
181 	if (!wl->name) {
182 		kfree(wl);
183 		return ERR_PTR(-ENOMEM);
184 	}
185 	wl->ws.name = wl->name;
186 	wakeup_source_add(&wl->ws);
187 	rb_link_node(&wl->node, parent, node);
188 	rb_insert_color(&wl->node, &wakelocks_tree);
189 	wakelocks_lru_add(wl);
190 	increment_wakelocks_number();
191 	return wl;
192 }
193 
pm_wake_lock(const char * buf)194 int pm_wake_lock(const char *buf)
195 {
196 	const char *str = buf;
197 	struct wakelock *wl;
198 	u64 timeout_ns = 0;
199 	size_t len;
200 	int ret = 0;
201 
202 	if (!capable(CAP_BLOCK_SUSPEND))
203 		return -EPERM;
204 
205 	while (*str && !isspace(*str))
206 		str++;
207 
208 	len = str - buf;
209 	if (!len)
210 		return -EINVAL;
211 
212 	if (*str && *str != '\n') {
213 		/* Find out if there's a valid timeout string appended. */
214 		ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
215 		if (ret)
216 			return -EINVAL;
217 	}
218 
219 	mutex_lock(&wakelocks_lock);
220 
221 	wl = wakelock_lookup_add(buf, len, true);
222 	if (IS_ERR(wl)) {
223 		ret = PTR_ERR(wl);
224 		goto out;
225 	}
226 	if (timeout_ns) {
227 		u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
228 
229 		do_div(timeout_ms, NSEC_PER_MSEC);
230 		__pm_wakeup_event(&wl->ws, timeout_ms);
231 	} else {
232 		__pm_stay_awake(&wl->ws);
233 	}
234 
235 	wakelocks_lru_most_recent(wl);
236 
237  out:
238 	mutex_unlock(&wakelocks_lock);
239 	return ret;
240 }
241 
pm_wake_unlock(const char * buf)242 int pm_wake_unlock(const char *buf)
243 {
244 	struct wakelock *wl;
245 	size_t len;
246 	int ret = 0;
247 
248 	if (!capable(CAP_BLOCK_SUSPEND))
249 		return -EPERM;
250 
251 	len = strlen(buf);
252 	if (!len)
253 		return -EINVAL;
254 
255 	if (buf[len-1] == '\n')
256 		len--;
257 
258 	if (!len)
259 		return -EINVAL;
260 
261 	mutex_lock(&wakelocks_lock);
262 
263 	wl = wakelock_lookup_add(buf, len, false);
264 	if (IS_ERR(wl)) {
265 		ret = PTR_ERR(wl);
266 		goto out;
267 	}
268 	__pm_relax(&wl->ws);
269 
270 	wakelocks_lru_most_recent(wl);
271 	wakelocks_gc();
272 
273  out:
274 	mutex_unlock(&wakelocks_lock);
275 	return ret;
276 }
277