• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Freezer declarations */
3 
4 #ifndef FREEZER_H_INCLUDED
5 #define FREEZER_H_INCLUDED
6 
7 #include <linux/debug_locks.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/atomic.h>
11 #if defined(CONFIG_ARM64) && !defined(__GENKSYMS__)
12 #include <linux/mmu_context.h>
13 #endif
14 
15 #ifdef CONFIG_FREEZER
16 extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */
17 extern bool pm_freezing;		/* PM freezing in effect */
18 extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */
19 
20 /*
21  * Timeout for stopping processes
22  */
23 extern unsigned int freeze_timeout_msecs;
24 
25 /*
26  * Check if a process has been frozen
27  */
frozen(struct task_struct * p)28 static inline bool frozen(struct task_struct *p)
29 {
30 	return p->flags & PF_FROZEN;
31 }
32 
frozen_or_skipped(struct task_struct * p)33 static inline bool frozen_or_skipped(struct task_struct *p)
34 {
35 	return p->flags & (PF_FROZEN | PF_FREEZER_SKIP);
36 }
37 
38 extern bool freezing_slow_path(struct task_struct *p);
39 
40 /*
41  * Check if there is a request to freeze a process
42  */
freezing(struct task_struct * p)43 static inline bool freezing(struct task_struct *p)
44 {
45 	if (likely(!atomic_read(&system_freezing_cnt)))
46 		return false;
47 	return freezing_slow_path(p);
48 }
49 
50 /* Takes and releases task alloc lock using task_lock() */
51 extern void __thaw_task(struct task_struct *t);
52 
53 extern bool __refrigerator(bool check_kthr_stop);
54 extern int freeze_processes(void);
55 extern int freeze_kernel_threads(void);
56 extern void thaw_processes(void);
57 extern void thaw_kernel_threads(void);
58 
59 /*
60  * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
61  * If try_to_freeze causes a lockdep warning it means the caller may deadlock
62  */
try_to_freeze_unsafe(void)63 static inline bool try_to_freeze_unsafe(void)
64 {
65 	might_sleep();
66 	if (likely(!freezing(current)))
67 		return false;
68 	return __refrigerator(false);
69 }
70 
try_to_freeze(void)71 static inline bool try_to_freeze(void)
72 {
73 	if (!(current->flags & PF_NOFREEZE))
74 		debug_check_no_locks_held();
75 	return try_to_freeze_unsafe();
76 }
77 
78 extern bool freeze_task(struct task_struct *p);
79 extern bool set_freezable(void);
80 
81 #ifdef CONFIG_CGROUP_FREEZER
82 extern bool cgroup_freezing(struct task_struct *task);
83 #else /* !CONFIG_CGROUP_FREEZER */
cgroup_freezing(struct task_struct * task)84 static inline bool cgroup_freezing(struct task_struct *task)
85 {
86 	return false;
87 }
88 #endif /* !CONFIG_CGROUP_FREEZER */
89 
90 /*
91  * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
92  * calls wait_for_completion(&vfork) and reset right after it returns from this
93  * function.  Next, the parent should call try_to_freeze() to freeze itself
94  * appropriately in case the child has exited before the freezing of tasks is
95  * complete.  However, we don't want kernel threads to be frozen in unexpected
96  * places, so we allow them to block freeze_processes() instead or to set
97  * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
98  * parent won't really block freeze_processes(), since ____call_usermodehelper()
99  * (the child) does a little before exec/exit and it can't be frozen before
100  * waking up the parent.
101  */
102 
103 
104 /**
105  * freezer_do_not_count - tell freezer to ignore %current
106  *
107  * Tell freezers to ignore the current task when determining whether the
108  * target frozen state is reached.  IOW, the current task will be
109  * considered frozen enough by freezers.
110  *
111  * The caller shouldn't do anything which isn't allowed for a frozen task
112  * until freezer_cont() is called.  Usually, freezer[_do_not]_count() pair
113  * wrap a scheduling operation and nothing much else.
114  *
115  * The write to current->flags uses release semantics to prevent a concurrent
116  * freezer_should_skip() from observing this write before a write to on_rq
117  * during a prior call to activate_task(), which may cause it to return true
118  * before deactivate_task() is called.
119  */
freezer_do_not_count(void)120 static inline void freezer_do_not_count(void)
121 {
122 	smp_store_release(&current->flags, current->flags | PF_FREEZER_SKIP);
123 }
124 
125 /**
126  * freezer_count - tell freezer to stop ignoring %current
127  *
128  * Undo freezer_do_not_count().  It tells freezers that %current should be
129  * considered again and tries to freeze if freezing condition is already in
130  * effect.
131  */
freezer_count(void)132 static inline void freezer_count(void)
133 {
134 	current->flags &= ~PF_FREEZER_SKIP;
135 	/*
136 	 * If freezing is in progress, the following paired with smp_mb()
137 	 * in freezer_should_skip() ensures that either we see %true
138 	 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
139 	 */
140 	smp_mb();
141 	try_to_freeze();
142 }
143 
144 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezer_count_unsafe(void)145 static inline void freezer_count_unsafe(void)
146 {
147 	current->flags &= ~PF_FREEZER_SKIP;
148 	smp_mb();
149 	try_to_freeze_unsafe();
150 }
151 
152 /**
153  * freezer_should_skip - whether to skip a task when determining frozen
154  *			 state is reached
155  * @p: task in quesion
156  *
157  * This function is used by freezers after establishing %true freezing() to
158  * test whether a task should be skipped when determining the target frozen
159  * state is reached.  IOW, if this function returns %true, @p is considered
160  * frozen enough.
161  */
freezer_should_skip(struct task_struct * p)162 static inline bool freezer_should_skip(struct task_struct *p)
163 {
164 	/*
165 	 * The following smp_mb() paired with the one in freezer_count()
166 	 * ensures that either freezer_count() sees %true freezing() or we
167 	 * see cleared %PF_FREEZER_SKIP and return %false.  This makes it
168 	 * impossible for a task to slip frozen state testing after
169 	 * clearing %PF_FREEZER_SKIP.
170 	 */
171 	smp_mb();
172 #ifdef CONFIG_ARM64
173 	return (p->flags & PF_FREEZER_SKIP) &&
174 	       (!p->on_rq || task_cpu_possible_mask(p) == cpu_possible_mask);
175 #else
176 	/*
177 	 * On non-aarch64, avoid depending on task_cpu_possible_mask(), which is
178 	 * defined in <linux/mmu_context.h>, because including that header from
179 	 * here exposes a tricky bug in the tracepoint headers on x86, and that
180 	 * macro would end up being defined equal to cpu_possible_mask on other
181 	 * architectures anyway.
182 	 */
183 	return p->flags & PF_FREEZER_SKIP;
184 #endif
185 }
186 
187 /*
188  * These functions are intended to be used whenever you want allow a sleeping
189  * task to be frozen. Note that neither return any clear indication of
190  * whether a freeze event happened while in this function.
191  */
192 
193 /* Like schedule(), but should not block the freezer. */
freezable_schedule(void)194 static inline void freezable_schedule(void)
195 {
196 	freezer_do_not_count();
197 	schedule();
198 	freezer_count();
199 }
200 
201 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_unsafe(void)202 static inline void freezable_schedule_unsafe(void)
203 {
204 	freezer_do_not_count();
205 	schedule();
206 	freezer_count_unsafe();
207 }
208 
209 /*
210  * Like schedule_timeout(), but should not block the freezer.  Do not
211  * call this with locks held.
212  */
freezable_schedule_timeout(long timeout)213 static inline long freezable_schedule_timeout(long timeout)
214 {
215 	long __retval;
216 	freezer_do_not_count();
217 	__retval = schedule_timeout(timeout);
218 	freezer_count();
219 	return __retval;
220 }
221 
222 /*
223  * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
224  * call this with locks held.
225  */
freezable_schedule_timeout_interruptible(long timeout)226 static inline long freezable_schedule_timeout_interruptible(long timeout)
227 {
228 	long __retval;
229 	freezer_do_not_count();
230 	__retval = schedule_timeout_interruptible(timeout);
231 	freezer_count();
232 	return __retval;
233 }
234 
235 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_interruptible_unsafe(long timeout)236 static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
237 {
238 	long __retval;
239 
240 	freezer_do_not_count();
241 	__retval = schedule_timeout_interruptible(timeout);
242 	freezer_count_unsafe();
243 	return __retval;
244 }
245 
246 /* Like schedule_timeout_killable(), but should not block the freezer. */
freezable_schedule_timeout_killable(long timeout)247 static inline long freezable_schedule_timeout_killable(long timeout)
248 {
249 	long __retval;
250 	freezer_do_not_count();
251 	__retval = schedule_timeout_killable(timeout);
252 	freezer_count();
253 	return __retval;
254 }
255 
256 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_killable_unsafe(long timeout)257 static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
258 {
259 	long __retval;
260 	freezer_do_not_count();
261 	__retval = schedule_timeout_killable(timeout);
262 	freezer_count_unsafe();
263 	return __retval;
264 }
265 
266 /*
267  * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
268  * call this with locks held.
269  */
freezable_schedule_hrtimeout_range(ktime_t * expires,u64 delta,const enum hrtimer_mode mode)270 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
271 		u64 delta, const enum hrtimer_mode mode)
272 {
273 	int __retval;
274 	freezer_do_not_count();
275 	__retval = schedule_hrtimeout_range(expires, delta, mode);
276 	freezer_count();
277 	return __retval;
278 }
279 
280 /*
281  * Freezer-friendly wrappers around wait_event_interruptible(),
282  * wait_event_killable() and wait_event_interruptible_timeout(), originally
283  * defined in <linux/wait.h>
284  */
285 
286 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
287 #define wait_event_freezekillable_unsafe(wq, condition)			\
288 ({									\
289 	int __retval;							\
290 	freezer_do_not_count();						\
291 	__retval = wait_event_killable(wq, (condition));		\
292 	freezer_count_unsafe();						\
293 	__retval;							\
294 })
295 
296 #else /* !CONFIG_FREEZER */
frozen(struct task_struct * p)297 static inline bool frozen(struct task_struct *p) { return false; }
frozen_or_skipped(struct task_struct * p)298 static inline bool frozen_or_skipped(struct task_struct *p) { return false; }
freezing(struct task_struct * p)299 static inline bool freezing(struct task_struct *p) { return false; }
__thaw_task(struct task_struct * t)300 static inline void __thaw_task(struct task_struct *t) {}
301 
__refrigerator(bool check_kthr_stop)302 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
freeze_processes(void)303 static inline int freeze_processes(void) { return -ENOSYS; }
freeze_kernel_threads(void)304 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
thaw_processes(void)305 static inline void thaw_processes(void) {}
thaw_kernel_threads(void)306 static inline void thaw_kernel_threads(void) {}
307 
try_to_freeze(void)308 static inline bool try_to_freeze(void) { return false; }
309 
freezer_do_not_count(void)310 static inline void freezer_do_not_count(void) {}
freezer_count(void)311 static inline void freezer_count(void) {}
freezer_should_skip(struct task_struct * p)312 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
set_freezable(void)313 static inline void set_freezable(void) {}
314 
315 #define freezable_schedule()  schedule()
316 
317 #define freezable_schedule_unsafe()  schedule()
318 
319 #define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
320 
321 #define freezable_schedule_timeout_interruptible(timeout)		\
322 	schedule_timeout_interruptible(timeout)
323 
324 #define freezable_schedule_timeout_interruptible_unsafe(timeout)	\
325 	schedule_timeout_interruptible(timeout)
326 
327 #define freezable_schedule_timeout_killable(timeout)			\
328 	schedule_timeout_killable(timeout)
329 
330 #define freezable_schedule_timeout_killable_unsafe(timeout)		\
331 	schedule_timeout_killable(timeout)
332 
333 #define freezable_schedule_hrtimeout_range(expires, delta, mode)	\
334 	schedule_hrtimeout_range(expires, delta, mode)
335 
336 #define wait_event_freezekillable_unsafe(wq, condition)			\
337 		wait_event_killable(wq, condition)
338 
339 #endif /* !CONFIG_FREEZER */
340 
341 #endif	/* FREEZER_H_INCLUDED */
342