• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Freezer declarations */
3  
4  #ifndef FREEZER_H_INCLUDED
5  #define FREEZER_H_INCLUDED
6  
7  #include <linux/debug_locks.h>
8  #include <linux/sched.h>
9  #include <linux/wait.h>
10  #include <linux/atomic.h>
11  
12  #ifdef CONFIG_FREEZER
13  extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */
14  extern bool pm_freezing;		/* PM freezing in effect */
15  extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */
16  
17  /*
18   * Timeout for stopping processes
19   */
20  extern unsigned int freeze_timeout_msecs;
21  
22  /*
23   * Check if a process has been frozen
24   */
frozen(struct task_struct * p)25  static inline bool frozen(struct task_struct *p)
26  {
27  	return p->flags & PF_FROZEN;
28  }
29  
30  extern bool freezing_slow_path(struct task_struct *p);
31  
32  /*
33   * Check if there is a request to freeze a process
34   */
freezing(struct task_struct * p)35  static inline bool freezing(struct task_struct *p)
36  {
37  	if (likely(!atomic_read(&system_freezing_cnt)))
38  		return false;
39  	return freezing_slow_path(p);
40  }
41  
42  /* Takes and releases task alloc lock using task_lock() */
43  extern void __thaw_task(struct task_struct *t);
44  
45  extern bool __refrigerator(bool check_kthr_stop);
46  extern int freeze_processes(void);
47  extern int freeze_kernel_threads(void);
48  extern void thaw_processes(void);
49  extern void thaw_kernel_threads(void);
50  
51  /*
52   * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
53   * If try_to_freeze causes a lockdep warning it means the caller may deadlock
54   */
try_to_freeze_unsafe(void)55  static inline bool try_to_freeze_unsafe(void)
56  {
57  	might_sleep();
58  	if (likely(!freezing(current)))
59  		return false;
60  	return __refrigerator(false);
61  }
62  
try_to_freeze(void)63  static inline bool try_to_freeze(void)
64  {
65  	if (!(current->flags & PF_NOFREEZE))
66  		debug_check_no_locks_held();
67  	return try_to_freeze_unsafe();
68  }
69  
70  extern bool freeze_task(struct task_struct *p);
71  extern bool set_freezable(void);
72  
73  #ifdef CONFIG_CGROUP_FREEZER
74  extern bool cgroup_freezing(struct task_struct *task);
75  #else /* !CONFIG_CGROUP_FREEZER */
cgroup_freezing(struct task_struct * task)76  static inline bool cgroup_freezing(struct task_struct *task)
77  {
78  	return false;
79  }
80  #endif /* !CONFIG_CGROUP_FREEZER */
81  
82  /*
83   * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
84   * calls wait_for_completion(&vfork) and reset right after it returns from this
85   * function.  Next, the parent should call try_to_freeze() to freeze itself
86   * appropriately in case the child has exited before the freezing of tasks is
87   * complete.  However, we don't want kernel threads to be frozen in unexpected
88   * places, so we allow them to block freeze_processes() instead or to set
89   * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
90   * parent won't really block freeze_processes(), since ____call_usermodehelper()
91   * (the child) does a little before exec/exit and it can't be frozen before
92   * waking up the parent.
93   */
94  
95  
96  /**
97   * freezer_do_not_count - tell freezer to ignore %current
98   *
99   * Tell freezers to ignore the current task when determining whether the
100   * target frozen state is reached.  IOW, the current task will be
101   * considered frozen enough by freezers.
102   *
103   * The caller shouldn't do anything which isn't allowed for a frozen task
104   * until freezer_cont() is called.  Usually, freezer[_do_not]_count() pair
105   * wrap a scheduling operation and nothing much else.
106   */
freezer_do_not_count(void)107  static inline void freezer_do_not_count(void)
108  {
109  	current->flags |= PF_FREEZER_SKIP;
110  }
111  
112  /**
113   * freezer_count - tell freezer to stop ignoring %current
114   *
115   * Undo freezer_do_not_count().  It tells freezers that %current should be
116   * considered again and tries to freeze if freezing condition is already in
117   * effect.
118   */
freezer_count(void)119  static inline void freezer_count(void)
120  {
121  	current->flags &= ~PF_FREEZER_SKIP;
122  	/*
123  	 * If freezing is in progress, the following paired with smp_mb()
124  	 * in freezer_should_skip() ensures that either we see %true
125  	 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
126  	 */
127  	smp_mb();
128  	try_to_freeze();
129  }
130  
131  /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezer_count_unsafe(void)132  static inline void freezer_count_unsafe(void)
133  {
134  	current->flags &= ~PF_FREEZER_SKIP;
135  	smp_mb();
136  	try_to_freeze_unsafe();
137  }
138  
139  /**
140   * freezer_should_skip - whether to skip a task when determining frozen
141   *			 state is reached
142   * @p: task in quesion
143   *
144   * This function is used by freezers after establishing %true freezing() to
145   * test whether a task should be skipped when determining the target frozen
146   * state is reached.  IOW, if this function returns %true, @p is considered
147   * frozen enough.
148   */
freezer_should_skip(struct task_struct * p)149  static inline bool freezer_should_skip(struct task_struct *p)
150  {
151  	/*
152  	 * The following smp_mb() paired with the one in freezer_count()
153  	 * ensures that either freezer_count() sees %true freezing() or we
154  	 * see cleared %PF_FREEZER_SKIP and return %false.  This makes it
155  	 * impossible for a task to slip frozen state testing after
156  	 * clearing %PF_FREEZER_SKIP.
157  	 */
158  	smp_mb();
159  	return p->flags & PF_FREEZER_SKIP;
160  }
161  
162  /*
163   * These functions are intended to be used whenever you want allow a sleeping
164   * task to be frozen. Note that neither return any clear indication of
165   * whether a freeze event happened while in this function.
166   */
167  
168  /* Like schedule(), but should not block the freezer. */
freezable_schedule(void)169  static inline void freezable_schedule(void)
170  {
171  	freezer_do_not_count();
172  	schedule();
173  	freezer_count();
174  }
175  
176  /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_unsafe(void)177  static inline void freezable_schedule_unsafe(void)
178  {
179  	freezer_do_not_count();
180  	schedule();
181  	freezer_count_unsafe();
182  }
183  
184  /*
185   * Like schedule_timeout(), but should not block the freezer.  Do not
186   * call this with locks held.
187   */
freezable_schedule_timeout(long timeout)188  static inline long freezable_schedule_timeout(long timeout)
189  {
190  	long __retval;
191  	freezer_do_not_count();
192  	__retval = schedule_timeout(timeout);
193  	freezer_count();
194  	return __retval;
195  }
196  
197  /*
198   * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
199   * call this with locks held.
200   */
freezable_schedule_timeout_interruptible(long timeout)201  static inline long freezable_schedule_timeout_interruptible(long timeout)
202  {
203  	long __retval;
204  	freezer_do_not_count();
205  	__retval = schedule_timeout_interruptible(timeout);
206  	freezer_count();
207  	return __retval;
208  }
209  
210  /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_interruptible_unsafe(long timeout)211  static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
212  {
213  	long __retval;
214  
215  	freezer_do_not_count();
216  	__retval = schedule_timeout_interruptible(timeout);
217  	freezer_count_unsafe();
218  	return __retval;
219  }
220  
221  /* Like schedule_timeout_killable(), but should not block the freezer. */
freezable_schedule_timeout_killable(long timeout)222  static inline long freezable_schedule_timeout_killable(long timeout)
223  {
224  	long __retval;
225  	freezer_do_not_count();
226  	__retval = schedule_timeout_killable(timeout);
227  	freezer_count();
228  	return __retval;
229  }
230  
231  /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_killable_unsafe(long timeout)232  static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
233  {
234  	long __retval;
235  	freezer_do_not_count();
236  	__retval = schedule_timeout_killable(timeout);
237  	freezer_count_unsafe();
238  	return __retval;
239  }
240  
241  /*
242   * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
243   * call this with locks held.
244   */
freezable_schedule_hrtimeout_range(ktime_t * expires,u64 delta,const enum hrtimer_mode mode)245  static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
246  		u64 delta, const enum hrtimer_mode mode)
247  {
248  	int __retval;
249  	freezer_do_not_count();
250  	__retval = schedule_hrtimeout_range(expires, delta, mode);
251  	freezer_count();
252  	return __retval;
253  }
254  
255  /*
256   * Freezer-friendly wrappers around wait_event_interruptible(),
257   * wait_event_killable() and wait_event_interruptible_timeout(), originally
258   * defined in <linux/wait.h>
259   */
260  
261  /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
262  #define wait_event_freezekillable_unsafe(wq, condition)			\
263  ({									\
264  	int __retval;							\
265  	freezer_do_not_count();						\
266  	__retval = wait_event_killable(wq, (condition));		\
267  	freezer_count_unsafe();						\
268  	__retval;							\
269  })
270  
271  #else /* !CONFIG_FREEZER */
frozen(struct task_struct * p)272  static inline bool frozen(struct task_struct *p) { return false; }
freezing(struct task_struct * p)273  static inline bool freezing(struct task_struct *p) { return false; }
__thaw_task(struct task_struct * t)274  static inline void __thaw_task(struct task_struct *t) {}
275  
__refrigerator(bool check_kthr_stop)276  static inline bool __refrigerator(bool check_kthr_stop) { return false; }
freeze_processes(void)277  static inline int freeze_processes(void) { return -ENOSYS; }
freeze_kernel_threads(void)278  static inline int freeze_kernel_threads(void) { return -ENOSYS; }
thaw_processes(void)279  static inline void thaw_processes(void) {}
thaw_kernel_threads(void)280  static inline void thaw_kernel_threads(void) {}
281  
try_to_freeze_nowarn(void)282  static inline bool try_to_freeze_nowarn(void) { return false; }
try_to_freeze(void)283  static inline bool try_to_freeze(void) { return false; }
284  
freezer_do_not_count(void)285  static inline void freezer_do_not_count(void) {}
freezer_count(void)286  static inline void freezer_count(void) {}
freezer_should_skip(struct task_struct * p)287  static inline int freezer_should_skip(struct task_struct *p) { return 0; }
set_freezable(void)288  static inline void set_freezable(void) {}
289  
290  #define freezable_schedule()  schedule()
291  
292  #define freezable_schedule_unsafe()  schedule()
293  
294  #define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
295  
296  #define freezable_schedule_timeout_interruptible(timeout)		\
297  	schedule_timeout_interruptible(timeout)
298  
299  #define freezable_schedule_timeout_interruptible_unsafe(timeout)	\
300  	schedule_timeout_interruptible(timeout)
301  
302  #define freezable_schedule_timeout_killable(timeout)			\
303  	schedule_timeout_killable(timeout)
304  
305  #define freezable_schedule_timeout_killable_unsafe(timeout)		\
306  	schedule_timeout_killable(timeout)
307  
308  #define freezable_schedule_hrtimeout_range(expires, delta, mode)	\
309  	schedule_hrtimeout_range(expires, delta, mode)
310  
311  #define wait_event_freezekillable_unsafe(wq, condition)			\
312  		wait_event_killable(wq, condition)
313  
314  #endif /* !CONFIG_FREEZER */
315  
316  #endif	/* FREEZER_H_INCLUDED */
317