1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Freezer declarations */
3
4 #ifndef FREEZER_H_INCLUDED
5 #define FREEZER_H_INCLUDED
6
7 #include <linux/debug_locks.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/atomic.h>
11
12 #ifdef CONFIG_FREEZER
13 extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
14 extern bool pm_freezing; /* PM freezing in effect */
15 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
16
17 /*
18 * Timeout for stopping processes
19 */
20 extern unsigned int freeze_timeout_msecs;
21
22 /*
23 * Check if a process has been frozen
24 */
frozen(struct task_struct * p)25 static inline bool frozen(struct task_struct *p)
26 {
27 return p->flags & PF_FROZEN;
28 }
29
frozen_or_skipped(struct task_struct * p)30 static inline bool frozen_or_skipped(struct task_struct *p)
31 {
32 return p->flags & (PF_FROZEN | PF_FREEZER_SKIP);
33 }
34
35 extern bool freezing_slow_path(struct task_struct *p);
36
37 /*
38 * Check if there is a request to freeze a process
39 */
freezing(struct task_struct * p)40 static inline bool freezing(struct task_struct *p)
41 {
42 if (likely(!atomic_read(&system_freezing_cnt))) {
43 return false;
44 }
45 return freezing_slow_path(p);
46 }
47
48 /* Takes and releases task alloc lock using task_lock() */
49 extern void __thaw_task(struct task_struct *t);
50
51 extern bool __refrigerator(bool check_kthr_stop);
52 extern int freeze_processes(void);
53 extern int freeze_kernel_threads(void);
54 extern void thaw_processes(void);
55 extern void thaw_kernel_threads(void);
56
57 /*
58 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
59 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
60 */
try_to_freeze_unsafe(void)61 static inline bool try_to_freeze_unsafe(void)
62 {
63 might_sleep();
64 if (likely(!freezing(current))) {
65 return false;
66 }
67 return __refrigerator(false);
68 }
69
try_to_freeze(void)70 static inline bool try_to_freeze(void)
71 {
72 if (!(current->flags & PF_NOFREEZE)) {
73 debug_check_no_locks_held();
74 }
75 return try_to_freeze_unsafe();
76 }
77
78 extern bool freeze_task(struct task_struct *p);
79 extern bool set_freezable(void);
80
81 #ifdef CONFIG_CGROUP_FREEZER
82 extern bool cgroup_freezing(struct task_struct *task);
83 #else /* !CONFIG_CGROUP_FREEZER */
cgroup_freezing(struct task_struct * task)84 static inline bool cgroup_freezing(struct task_struct *task)
85 {
86 return false;
87 }
88 #endif /* !CONFIG_CGROUP_FREEZER */
89
90 /*
91 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
92 * calls wait_for_completion(&vfork) and reset right after it returns from this
93 * function. Next, the parent should call try_to_freeze() to freeze itself
94 * appropriately in case the child has exited before the freezing of tasks is
95 * complete. However, we don't want kernel threads to be frozen in unexpected
96 * places, so we allow them to block freeze_processes() instead or to set
97 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
98 * parent won't really block freeze_processes(), since ____call_usermodehelper()
99 * (the child) does a little before exec/exit and it can't be frozen before
100 * waking up the parent.
101 */
102
103 /**
104 * freezer_do_not_count - tell freezer to ignore %current
105 *
106 * Tell freezers to ignore the current task when determining whether the
107 * target frozen state is reached. IOW, the current task will be
108 * considered frozen enough by freezers.
109 *
110 * The caller shouldn't do anything which isn't allowed for a frozen task
111 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
112 * wrap a scheduling operation and nothing much else.
113 */
freezer_do_not_count(void)114 static inline void freezer_do_not_count(void)
115 {
116 current->flags |= PF_FREEZER_SKIP;
117 }
118
119 /**
120 * freezer_count - tell freezer to stop ignoring %current
121 *
122 * Undo freezer_do_not_count(). It tells freezers that %current should be
123 * considered again and tries to freeze if freezing condition is already in
124 * effect.
125 */
freezer_count(void)126 static inline void freezer_count(void)
127 {
128 current->flags &= ~PF_FREEZER_SKIP;
129 /*
130 * If freezing is in progress, the following paired with smp_mb()
131 * in freezer_should_skip() ensures that either we see %true
132 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
133 */
134 smp_mb();
135 try_to_freeze();
136 }
137
138 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezer_count_unsafe(void)139 static inline void freezer_count_unsafe(void)
140 {
141 current->flags &= ~PF_FREEZER_SKIP;
142 smp_mb();
143 try_to_freeze_unsafe();
144 }
145
146 /**
147 * freezer_should_skip - whether to skip a task when determining frozen
148 * state is reached
149 * @p: task in quesion
150 *
151 * This function is used by freezers after establishing %true freezing() to
152 * test whether a task should be skipped when determining the target frozen
153 * state is reached. IOW, if this function returns %true, @p is considered
154 * frozen enough.
155 */
freezer_should_skip(struct task_struct * p)156 static inline bool freezer_should_skip(struct task_struct *p)
157 {
158 /*
159 * The following smp_mb() paired with the one in freezer_count()
160 * ensures that either freezer_count() sees %true freezing() or we
161 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
162 * impossible for a task to slip frozen state testing after
163 * clearing %PF_FREEZER_SKIP.
164 */
165 smp_mb();
166 return p->flags & PF_FREEZER_SKIP;
167 }
168
169 /*
170 * These functions are intended to be used whenever you want allow a sleeping
171 * task to be frozen. Note that neither return any clear indication of
172 * whether a freeze event happened while in this function.
173 */
174
175 /* Like schedule(), but should not block the freezer. */
freezable_schedule(void)176 static inline void freezable_schedule(void)
177 {
178 freezer_do_not_count();
179 schedule();
180 freezer_count();
181 }
182
183 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_unsafe(void)184 static inline void freezable_schedule_unsafe(void)
185 {
186 freezer_do_not_count();
187 schedule();
188 freezer_count_unsafe();
189 }
190
191 /*
192 * Like schedule_timeout(), but should not block the freezer. Do not
193 * call this with locks held.
194 */
freezable_schedule_timeout(long timeout)195 static inline long freezable_schedule_timeout(long timeout)
196 {
197 long __retval;
198 freezer_do_not_count();
199 __retval = schedule_timeout(timeout);
200 freezer_count();
201 return __retval;
202 }
203
204 /*
205 * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
206 * call this with locks held.
207 */
freezable_schedule_timeout_interruptible(long timeout)208 static inline long freezable_schedule_timeout_interruptible(long timeout)
209 {
210 long __retval;
211 freezer_do_not_count();
212 __retval = schedule_timeout_interruptible(timeout);
213 freezer_count();
214 return __retval;
215 }
216
217 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_interruptible_unsafe(long timeout)218 static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
219 {
220 long __retval;
221
222 freezer_do_not_count();
223 __retval = schedule_timeout_interruptible(timeout);
224 freezer_count_unsafe();
225 return __retval;
226 }
227
228 /* Like schedule_timeout_killable(), but should not block the freezer. */
freezable_schedule_timeout_killable(long timeout)229 static inline long freezable_schedule_timeout_killable(long timeout)
230 {
231 long __retval;
232 freezer_do_not_count();
233 __retval = schedule_timeout_killable(timeout);
234 freezer_count();
235 return __retval;
236 }
237
238 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_killable_unsafe(long timeout)239 static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
240 {
241 long __retval;
242 freezer_do_not_count();
243 __retval = schedule_timeout_killable(timeout);
244 freezer_count_unsafe();
245 return __retval;
246 }
247
248 /*
249 * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
250 * call this with locks held.
251 */
freezable_schedule_hrtimeout_range(ktime_t * expires,u64 delta,const enum hrtimer_mode mode)252 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode)
253 {
254 int __retval;
255 freezer_do_not_count();
256 __retval = schedule_hrtimeout_range(expires, delta, mode);
257 freezer_count();
258 return __retval;
259 }
260
261 /*
262 * Freezer-friendly wrappers around wait_event_interruptible(),
263 * wait_event_killable() and wait_event_interruptible_timeout(), originally
264 * defined in <linux/wait.h>
265 */
266
267 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
268 #define wait_event_freezekillable_unsafe(wq, condition) \
269 ( { \
270 int __retval; \
271 freezer_do_not_count(); \
272 __retval = wait_event_killable(wq, (condition)); \
273 freezer_count_unsafe(); \
274 __retval; \
275 })
276
277 #else /* !CONFIG_FREEZER */
frozen(struct task_struct * p)278 static inline bool frozen(struct task_struct *p)
279 {
280 return false;
281 }
frozen_or_skipped(struct task_struct * p)282 static inline bool frozen_or_skipped(struct task_struct *p)
283 {
284 return false;
285 }
freezing(struct task_struct * p)286 static inline bool freezing(struct task_struct *p)
287 {
288 return false;
289 }
__thaw_task(struct task_struct * t)290 static inline void __thaw_task(struct task_struct *t)
291 {
292 }
293
__refrigerator(bool check_kthr_stop)294 static inline bool __refrigerator(bool check_kthr_stop)
295 {
296 return false;
297 }
freeze_processes(void)298 static inline int freeze_processes(void)
299 {
300 return -ENOSYS;
301 }
freeze_kernel_threads(void)302 static inline int freeze_kernel_threads(void)
303 {
304 return -ENOSYS;
305 }
thaw_processes(void)306 static inline void thaw_processes(void)
307 {
308 }
thaw_kernel_threads(void)309 static inline void thaw_kernel_threads(void)
310 {
311 }
312
try_to_freeze_nowarn(void)313 static inline bool try_to_freeze_nowarn(void)
314 {
315 return false;
316 }
try_to_freeze(void)317 static inline bool try_to_freeze(void)
318 {
319 return false;
320 }
321
freezer_do_not_count(void)322 static inline void freezer_do_not_count(void)
323 {
324 }
freezer_count(void)325 static inline void freezer_count(void)
326 {
327 }
freezer_should_skip(struct task_struct * p)328 static inline int freezer_should_skip(struct task_struct *p)
329 {
330 return 0;
331 }
set_freezable(void)332 static inline void set_freezable(void)
333 {
334 }
335
336 #define freezable_schedule() schedule()
337
338 #define freezable_schedule_unsafe() schedule()
339
340 #define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
341
342 #define freezable_schedule_timeout_interruptible(timeout) schedule_timeout_interruptible(timeout)
343
344 #define freezable_schedule_timeout_interruptible_unsafe(timeout) schedule_timeout_interruptible(timeout)
345
346 #define freezable_schedule_timeout_killable(timeout) schedule_timeout_killable(timeout)
347
348 #define freezable_schedule_timeout_killable_unsafe(timeout) schedule_timeout_killable(timeout)
349
350 #define freezable_schedule_hrtimeout_range(expires, delta, mode) schedule_hrtimeout_range(expires, delta, mode)
351
352 #define wait_event_freezekillable_unsafe(wq, condition) wait_event_killable(wq, condition)
353
354 #endif /* !CONFIG_FREEZER */
355
356 #endif /* FREEZER_H_INCLUDED */
357