1 /*
2 * drivers/power/process.c - Functions for starting/stopping processes on
3 * suspend transitions.
4 *
5 * Originally from swsusp.
6 */
7
8
9 #undef DEBUG
10
11 #include <linux/interrupt.h>
12 #include <linux/oom.h>
13 #include <linux/suspend.h>
14 #include <linux/module.h>
15 #include <linux/syscalls.h>
16 #include <linux/freezer.h>
17 #include <linux/delay.h>
18 #include <linux/workqueue.h>
19 #include <linux/kmod.h>
20 #include <trace/events/power.h>
21 #include <linux/wakeup_reason.h>
22
23 /*
24 * Timeout for stopping processes
25 */
26 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
27
try_to_freeze_tasks(bool user_only)28 static int try_to_freeze_tasks(bool user_only)
29 {
30 struct task_struct *g, *p;
31 unsigned long end_time;
32 unsigned int todo;
33 bool wq_busy = false;
34 struct timeval start, end;
35 u64 elapsed_msecs64;
36 unsigned int elapsed_msecs;
37 bool wakeup = false;
38 int sleep_usecs = USEC_PER_MSEC;
39 #ifdef CONFIG_PM_SLEEP
40 char suspend_abort[MAX_SUSPEND_ABORT_LEN];
41 #endif
42
43 do_gettimeofday(&start);
44
45 end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
46
47 if (!user_only)
48 freeze_workqueues_begin();
49
50 while (true) {
51 todo = 0;
52 read_lock(&tasklist_lock);
53 for_each_process_thread(g, p) {
54 if (p == current || !freeze_task(p))
55 continue;
56
57 if (!freezer_should_skip(p))
58 todo++;
59 }
60 read_unlock(&tasklist_lock);
61
62 if (!user_only) {
63 wq_busy = freeze_workqueues_busy();
64 todo += wq_busy;
65 }
66
67 if (!todo || time_after(jiffies, end_time))
68 break;
69
70 if (pm_wakeup_pending()) {
71 #ifdef CONFIG_PM_SLEEP
72 pm_get_active_wakeup_sources(suspend_abort,
73 MAX_SUSPEND_ABORT_LEN);
74 log_suspend_abort_reason(suspend_abort);
75 #endif
76 wakeup = true;
77 break;
78 }
79
80 /*
81 * We need to retry, but first give the freezing tasks some
82 * time to enter the refrigerator. Start with an initial
83 * 1 ms sleep followed by exponential backoff until 8 ms.
84 */
85 usleep_range(sleep_usecs / 2, sleep_usecs);
86 if (sleep_usecs < 8 * USEC_PER_MSEC)
87 sleep_usecs *= 2;
88 }
89
90 do_gettimeofday(&end);
91 elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
92 do_div(elapsed_msecs64, NSEC_PER_MSEC);
93 elapsed_msecs = elapsed_msecs64;
94
95 if (wakeup) {
96 printk("\n");
97 printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds",
98 elapsed_msecs / 1000, elapsed_msecs % 1000);
99 } else if (todo) {
100 printk("\n");
101 printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds"
102 " (%d tasks refusing to freeze, wq_busy=%d):\n",
103 elapsed_msecs / 1000, elapsed_msecs % 1000,
104 todo - wq_busy, wq_busy);
105
106 read_lock(&tasklist_lock);
107 for_each_process_thread(g, p) {
108 if (p != current && !freezer_should_skip(p)
109 && freezing(p) && !frozen(p))
110 sched_show_task(p);
111 }
112 read_unlock(&tasklist_lock);
113 } else {
114 printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
115 elapsed_msecs % 1000);
116 }
117
118 return todo ? -EBUSY : 0;
119 }
120
__check_frozen_processes(void)121 static bool __check_frozen_processes(void)
122 {
123 struct task_struct *g, *p;
124
125 for_each_process_thread(g, p)
126 if (p != current && !freezer_should_skip(p) && !frozen(p))
127 return false;
128
129 return true;
130 }
131
132 /*
133 * Returns true if all freezable tasks (except for current) are frozen already
134 */
check_frozen_processes(void)135 static bool check_frozen_processes(void)
136 {
137 bool ret;
138
139 read_lock(&tasklist_lock);
140 ret = __check_frozen_processes();
141 read_unlock(&tasklist_lock);
142 return ret;
143 }
144
145 /**
146 * freeze_processes - Signal user space processes to enter the refrigerator.
147 * The current thread will not be frozen. The same process that calls
148 * freeze_processes must later call thaw_processes.
149 *
150 * On success, returns 0. On failure, -errno and system is fully thawed.
151 */
freeze_processes(void)152 int freeze_processes(void)
153 {
154 int error;
155 int oom_kills_saved;
156
157 error = __usermodehelper_disable(UMH_FREEZING);
158 if (error)
159 return error;
160
161 /* Make sure this task doesn't get frozen */
162 current->flags |= PF_SUSPEND_TASK;
163
164 if (!pm_freezing)
165 atomic_inc(&system_freezing_cnt);
166
167 pm_wakeup_clear();
168 printk("Freezing user space processes ... ");
169 pm_freezing = true;
170 oom_kills_saved = oom_kills_count();
171 error = try_to_freeze_tasks(true);
172 if (!error) {
173 __usermodehelper_set_disable_depth(UMH_DISABLED);
174 oom_killer_disable();
175
176 /*
177 * There might have been an OOM kill while we were
178 * freezing tasks and the killed task might be still
179 * on the way out so we have to double check for race.
180 */
181 if (oom_kills_count() != oom_kills_saved &&
182 !check_frozen_processes()) {
183 __usermodehelper_set_disable_depth(UMH_ENABLED);
184 printk("OOM in progress.");
185 error = -EBUSY;
186 } else {
187 printk("done.");
188 }
189 }
190 printk("\n");
191 BUG_ON(in_atomic());
192
193 if (error)
194 thaw_processes();
195 return error;
196 }
197
198 /**
199 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
200 *
201 * On success, returns 0. On failure, -errno and only the kernel threads are
202 * thawed, so as to give a chance to the caller to do additional cleanups
203 * (if any) before thawing the userspace tasks. So, it is the responsibility
204 * of the caller to thaw the userspace tasks, when the time is right.
205 */
freeze_kernel_threads(void)206 int freeze_kernel_threads(void)
207 {
208 int error;
209
210 printk("Freezing remaining freezable tasks ... ");
211 pm_nosig_freezing = true;
212 error = try_to_freeze_tasks(false);
213 if (!error)
214 printk("done.");
215
216 printk("\n");
217 BUG_ON(in_atomic());
218
219 if (error)
220 thaw_kernel_threads();
221 return error;
222 }
223
thaw_processes(void)224 void thaw_processes(void)
225 {
226 struct task_struct *g, *p;
227 struct task_struct *curr = current;
228
229 trace_suspend_resume(TPS("thaw_processes"), 0, true);
230 if (pm_freezing)
231 atomic_dec(&system_freezing_cnt);
232 pm_freezing = false;
233 pm_nosig_freezing = false;
234
235 oom_killer_enable();
236
237 printk("Restarting tasks ... ");
238
239 __usermodehelper_set_disable_depth(UMH_FREEZING);
240 thaw_workqueues();
241
242 read_lock(&tasklist_lock);
243 for_each_process_thread(g, p) {
244 /* No other threads should have PF_SUSPEND_TASK set */
245 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
246 __thaw_task(p);
247 }
248 read_unlock(&tasklist_lock);
249
250 WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
251 curr->flags &= ~PF_SUSPEND_TASK;
252
253 usermodehelper_enable();
254
255 schedule();
256 printk("done.\n");
257 trace_suspend_resume(TPS("thaw_processes"), 0, false);
258 }
259
thaw_kernel_threads(void)260 void thaw_kernel_threads(void)
261 {
262 struct task_struct *g, *p;
263
264 pm_nosig_freezing = false;
265 printk("Restarting kernel threads ... ");
266
267 thaw_workqueues();
268
269 read_lock(&tasklist_lock);
270 for_each_process_thread(g, p) {
271 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
272 __thaw_task(p);
273 }
274 read_unlock(&tasklist_lock);
275
276 schedule();
277 printk("done.\n");
278 }
279