1 /* Freezer declarations */
2
3 #ifndef FREEZER_H_INCLUDED
4 #define FREEZER_H_INCLUDED
5
6 #include <linux/debug_locks.h>
7 #include <linux/sched.h>
8 #include <linux/wait.h>
9 #include <linux/atomic.h>
10
11 #ifdef CONFIG_FREEZER
12 extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
13 extern bool pm_freezing; /* PM freezing in effect */
14 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
15
16 /*
17 * Check if a process has been frozen
18 */
frozen(struct task_struct * p)19 static inline bool frozen(struct task_struct *p)
20 {
21 return p->flags & PF_FROZEN;
22 }
23
24 extern bool freezing_slow_path(struct task_struct *p);
25
26 /*
27 * Check if there is a request to freeze a process
28 */
freezing(struct task_struct * p)29 static inline bool freezing(struct task_struct *p)
30 {
31 if (likely(!atomic_read(&system_freezing_cnt)))
32 return false;
33 return freezing_slow_path(p);
34 }
35
36 /* Takes and releases task alloc lock using task_lock() */
37 extern void __thaw_task(struct task_struct *t);
38
39 extern bool __refrigerator(bool check_kthr_stop);
40 extern int freeze_processes(void);
41 extern int freeze_kernel_threads(void);
42 extern void thaw_processes(void);
43 extern void thaw_kernel_threads(void);
44
45 /*
46 * HACK: prevent sleeping while atomic warnings due to ARM signal handling
47 * disabling irqs
48 */
try_to_freeze_nowarn(void)49 static inline bool try_to_freeze_nowarn(void)
50 {
51 if (likely(!freezing(current)))
52 return false;
53 return __refrigerator(false);
54 }
55
56 /*
57 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
58 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
59 */
try_to_freeze_unsafe(void)60 static inline bool try_to_freeze_unsafe(void)
61 {
62 might_sleep();
63 if (likely(!freezing(current)))
64 return false;
65 return __refrigerator(false);
66 }
67
try_to_freeze(void)68 static inline bool try_to_freeze(void)
69 {
70 if (!(current->flags & PF_NOFREEZE))
71 debug_check_no_locks_held();
72 return try_to_freeze_unsafe();
73 }
74
75 extern bool freeze_task(struct task_struct *p);
76 extern bool set_freezable(void);
77
78 #ifdef CONFIG_CGROUP_FREEZER
79 extern bool cgroup_freezing(struct task_struct *task);
80 #else /* !CONFIG_CGROUP_FREEZER */
cgroup_freezing(struct task_struct * task)81 static inline bool cgroup_freezing(struct task_struct *task)
82 {
83 return false;
84 }
85 #endif /* !CONFIG_CGROUP_FREEZER */
86
87 /*
88 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
89 * calls wait_for_completion(&vfork) and reset right after it returns from this
90 * function. Next, the parent should call try_to_freeze() to freeze itself
91 * appropriately in case the child has exited before the freezing of tasks is
92 * complete. However, we don't want kernel threads to be frozen in unexpected
93 * places, so we allow them to block freeze_processes() instead or to set
94 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
95 * parent won't really block freeze_processes(), since ____call_usermodehelper()
96 * (the child) does a little before exec/exit and it can't be frozen before
97 * waking up the parent.
98 */
99
100
101 /**
102 * freezer_do_not_count - tell freezer to ignore %current
103 *
104 * Tell freezers to ignore the current task when determining whether the
105 * target frozen state is reached. IOW, the current task will be
106 * considered frozen enough by freezers.
107 *
108 * The caller shouldn't do anything which isn't allowed for a frozen task
109 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
110 * wrap a scheduling operation and nothing much else.
111 */
freezer_do_not_count(void)112 static inline void freezer_do_not_count(void)
113 {
114 current->flags |= PF_FREEZER_SKIP;
115 }
116
117 /**
118 * freezer_count - tell freezer to stop ignoring %current
119 *
120 * Undo freezer_do_not_count(). It tells freezers that %current should be
121 * considered again and tries to freeze if freezing condition is already in
122 * effect.
123 */
freezer_count(void)124 static inline void freezer_count(void)
125 {
126 current->flags &= ~PF_FREEZER_SKIP;
127 /*
128 * If freezing is in progress, the following paired with smp_mb()
129 * in freezer_should_skip() ensures that either we see %true
130 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
131 */
132 smp_mb();
133 try_to_freeze();
134 }
135
136 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezer_count_unsafe(void)137 static inline void freezer_count_unsafe(void)
138 {
139 current->flags &= ~PF_FREEZER_SKIP;
140 smp_mb();
141 try_to_freeze_unsafe();
142 }
143
144 /**
145 * freezer_should_skip - whether to skip a task when determining frozen
146 * state is reached
147 * @p: task in quesion
148 *
149 * This function is used by freezers after establishing %true freezing() to
150 * test whether a task should be skipped when determining the target frozen
151 * state is reached. IOW, if this function returns %true, @p is considered
152 * frozen enough.
153 */
freezer_should_skip(struct task_struct * p)154 static inline bool freezer_should_skip(struct task_struct *p)
155 {
156 /*
157 * The following smp_mb() paired with the one in freezer_count()
158 * ensures that either freezer_count() sees %true freezing() or we
159 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
160 * impossible for a task to slip frozen state testing after
161 * clearing %PF_FREEZER_SKIP.
162 */
163 smp_mb();
164 return p->flags & PF_FREEZER_SKIP;
165 }
166
167 /*
168 * These functions are intended to be used whenever you want allow a task that's
169 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
170 * that neither return any clear indication of whether a freeze event happened
171 * while in this function.
172 */
173
174 /* Like schedule(), but should not block the freezer. */
freezable_schedule(void)175 static inline void freezable_schedule(void)
176 {
177 freezer_do_not_count();
178 schedule();
179 freezer_count();
180 }
181
182 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_unsafe(void)183 static inline void freezable_schedule_unsafe(void)
184 {
185 freezer_do_not_count();
186 schedule();
187 freezer_count_unsafe();
188 }
189
190 /*
191 * Like freezable_schedule_timeout(), but should not block the freezer. Do not
192 * call this with locks held.
193 */
freezable_schedule_timeout(long timeout)194 static inline long freezable_schedule_timeout(long timeout)
195 {
196 long __retval;
197 freezer_do_not_count();
198 __retval = schedule_timeout(timeout);
199 freezer_count();
200 return __retval;
201 }
202
203 /*
204 * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
205 * call this with locks held.
206 */
freezable_schedule_timeout_interruptible(long timeout)207 static inline long freezable_schedule_timeout_interruptible(long timeout)
208 {
209 long __retval;
210 freezer_do_not_count();
211 __retval = schedule_timeout_interruptible(timeout);
212 freezer_count();
213 return __retval;
214 }
215
216 /* Like schedule_timeout_killable(), but should not block the freezer. */
freezable_schedule_timeout_killable(long timeout)217 static inline long freezable_schedule_timeout_killable(long timeout)
218 {
219 long __retval;
220 freezer_do_not_count();
221 __retval = schedule_timeout_killable(timeout);
222 freezer_count();
223 return __retval;
224 }
225
226 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_killable_unsafe(long timeout)227 static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
228 {
229 long __retval;
230 freezer_do_not_count();
231 __retval = schedule_timeout_killable(timeout);
232 freezer_count_unsafe();
233 return __retval;
234 }
235
236 /*
237 * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
238 * call this with locks held.
239 */
freezable_schedule_hrtimeout_range(ktime_t * expires,unsigned long delta,const enum hrtimer_mode mode)240 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
241 unsigned long delta, const enum hrtimer_mode mode)
242 {
243 int __retval;
244 freezer_do_not_count();
245 __retval = schedule_hrtimeout_range(expires, delta, mode);
246 freezer_count();
247 return __retval;
248 }
249
250 /*
251 * Freezer-friendly wrappers around wait_event_interruptible(),
252 * wait_event_killable() and wait_event_interruptible_timeout(), originally
253 * defined in <linux/wait.h>
254 */
255
256 #define wait_event_freezekillable(wq, condition) \
257 ({ \
258 int __retval; \
259 freezer_do_not_count(); \
260 __retval = wait_event_killable(wq, (condition)); \
261 freezer_count(); \
262 __retval; \
263 })
264
265 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
266 #define wait_event_freezekillable_unsafe(wq, condition) \
267 ({ \
268 int __retval; \
269 freezer_do_not_count(); \
270 __retval = wait_event_killable(wq, (condition)); \
271 freezer_count_unsafe(); \
272 __retval; \
273 })
274
275 #define wait_event_freezable(wq, condition) \
276 ({ \
277 int __retval; \
278 freezer_do_not_count(); \
279 __retval = wait_event_interruptible(wq, (condition)); \
280 freezer_count(); \
281 __retval; \
282 })
283
284 #define wait_event_freezable_timeout(wq, condition, timeout) \
285 ({ \
286 long __retval = timeout; \
287 freezer_do_not_count(); \
288 __retval = wait_event_interruptible_timeout(wq, (condition), \
289 __retval); \
290 freezer_count(); \
291 __retval; \
292 })
293
294 #define wait_event_freezable_exclusive(wq, condition) \
295 ({ \
296 int __retval; \
297 freezer_do_not_count(); \
298 __retval = wait_event_interruptible_exclusive(wq, condition); \
299 freezer_count(); \
300 __retval; \
301 })
302
303
304 #else /* !CONFIG_FREEZER */
frozen(struct task_struct * p)305 static inline bool frozen(struct task_struct *p) { return false; }
freezing(struct task_struct * p)306 static inline bool freezing(struct task_struct *p) { return false; }
__thaw_task(struct task_struct * t)307 static inline void __thaw_task(struct task_struct *t) {}
308
__refrigerator(bool check_kthr_stop)309 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
freeze_processes(void)310 static inline int freeze_processes(void) { return -ENOSYS; }
freeze_kernel_threads(void)311 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
thaw_processes(void)312 static inline void thaw_processes(void) {}
thaw_kernel_threads(void)313 static inline void thaw_kernel_threads(void) {}
314
try_to_freeze_nowarn(void)315 static inline bool try_to_freeze_nowarn(void) { return false; }
try_to_freeze(void)316 static inline bool try_to_freeze(void) { return false; }
317
freezer_do_not_count(void)318 static inline void freezer_do_not_count(void) {}
freezer_count(void)319 static inline void freezer_count(void) {}
freezer_should_skip(struct task_struct * p)320 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
set_freezable(void)321 static inline void set_freezable(void) {}
322
323 #define freezable_schedule() schedule()
324
325 #define freezable_schedule_unsafe() schedule()
326
327 #define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
328
329 #define freezable_schedule_timeout_interruptible(timeout) \
330 schedule_timeout_interruptible(timeout)
331
332 #define freezable_schedule_timeout_killable(timeout) \
333 schedule_timeout_killable(timeout)
334
335 #define freezable_schedule_timeout_killable_unsafe(timeout) \
336 schedule_timeout_killable(timeout)
337
338 #define freezable_schedule_hrtimeout_range(expires, delta, mode) \
339 schedule_hrtimeout_range(expires, delta, mode)
340
341 #define wait_event_freezable(wq, condition) \
342 wait_event_interruptible(wq, condition)
343
344 #define wait_event_freezable_timeout(wq, condition, timeout) \
345 wait_event_interruptible_timeout(wq, condition, timeout)
346
347 #define wait_event_freezable_exclusive(wq, condition) \
348 wait_event_interruptible_exclusive(wq, condition)
349
350 #define wait_event_freezekillable(wq, condition) \
351 wait_event_killable(wq, condition)
352
353 #define wait_event_freezekillable_unsafe(wq, condition) \
354 wait_event_killable(wq, condition)
355
356 #endif /* !CONFIG_FREEZER */
357
358 #endif /* FREEZER_H_INCLUDED */
359