1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/freezer.c - Function to freeze a process
4 *
5 * Originally from kernel/power/process.c
6 */
7
8 #include <linux/interrupt.h>
9 #include <linux/suspend.h>
10 #include <linux/export.h>
11 #include <linux/syscalls.h>
12 #include <linux/freezer.h>
13 #include <linux/kthread.h>
14 #include <linux/mmu_context.h>
15
16 #undef CREATE_TRACE_POINT
17 #include <trace/hooks/cgroup.h>
18
19 /* total number of freezing conditions in effect */
20 atomic_t system_freezing_cnt = ATOMIC_INIT(0);
21 EXPORT_SYMBOL(system_freezing_cnt);
22
23 /* indicate whether PM freezing is in effect, protected by
24 * system_transition_mutex
25 */
26 bool pm_freezing;
27 bool pm_nosig_freezing;
28
29 /* protects freezing and frozen transitions */
30 static DEFINE_SPINLOCK(freezer_lock);
31
32 /**
33 * freezing_slow_path - slow path for testing whether a task needs to be frozen
34 * @p: task to be tested
35 *
36 * This function is called by freezing() if system_freezing_cnt isn't zero
37 * and tests whether @p needs to enter and stay in frozen state. Can be
38 * called under any context. The freezers are responsible for ensuring the
39 * target tasks see the updated state.
40 */
freezing_slow_path(struct task_struct * p)41 bool freezing_slow_path(struct task_struct *p)
42 {
43 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
44 return false;
45
46 if (test_tsk_thread_flag(p, TIF_MEMDIE))
47 return false;
48
49 if (pm_nosig_freezing || cgroup_freezing(p))
50 return true;
51
52 if (pm_freezing && !(p->flags & PF_KTHREAD))
53 return true;
54
55 return false;
56 }
57 EXPORT_SYMBOL(freezing_slow_path);
58
59 /* Refrigerator is place where frozen processes are stored :-). */
__refrigerator(bool check_kthr_stop)60 bool __refrigerator(bool check_kthr_stop)
61 {
62 /* Hmm, should we be allowed to suspend when there are realtime
63 processes around? */
64 bool was_frozen = false;
65 unsigned int save = get_current_state();
66
67 pr_debug("%s entered refrigerator\n", current->comm);
68
69 for (;;) {
70 set_current_state(TASK_UNINTERRUPTIBLE);
71
72 spin_lock_irq(&freezer_lock);
73 current->flags |= PF_FROZEN;
74 if (!freezing(current) ||
75 (check_kthr_stop && kthread_should_stop()))
76 current->flags &= ~PF_FROZEN;
77 trace_android_rvh_refrigerator(pm_nosig_freezing);
78 spin_unlock_irq(&freezer_lock);
79
80 if (!(current->flags & PF_FROZEN))
81 break;
82 was_frozen = true;
83 schedule();
84 }
85
86 pr_debug("%s left refrigerator\n", current->comm);
87
88 /*
89 * Restore saved task state before returning. The mb'd version
90 * needs to be used; otherwise, it might silently break
91 * synchronization which depends on ordered task state change.
92 */
93 set_current_state(save);
94
95 return was_frozen;
96 }
97 EXPORT_SYMBOL(__refrigerator);
98
fake_signal_wake_up(struct task_struct * p)99 static void fake_signal_wake_up(struct task_struct *p)
100 {
101 unsigned long flags;
102
103 if (lock_task_sighand(p, &flags)) {
104 signal_wake_up(p, 0);
105 unlock_task_sighand(p, &flags);
106 }
107 }
108
109 /**
110 * freeze_task - send a freeze request to given task
111 * @p: task to send the request to
112 *
113 * If @p is freezing, the freeze request is sent either by sending a fake
114 * signal (if it's not a kernel thread) or waking it up (if it's a kernel
115 * thread).
116 *
117 * RETURNS:
118 * %false, if @p is not freezing or already frozen; %true, otherwise
119 */
freeze_task(struct task_struct * p)120 bool freeze_task(struct task_struct *p)
121 {
122 unsigned long flags;
123
124 /*
125 * This check can race with freezer_do_not_count, but worst case that
126 * will result in an extra wakeup being sent to the task. It does not
127 * race with freezer_count(), the barriers in freezer_count() and
128 * freezer_should_skip() ensure that either freezer_count() sees
129 * freezing == true in try_to_freeze() and freezes, or
130 * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
131 * normally.
132 */
133 if (freezer_should_skip(p))
134 return false;
135
136 spin_lock_irqsave(&freezer_lock, flags);
137 if (!freezing(p) || frozen(p)) {
138 spin_unlock_irqrestore(&freezer_lock, flags);
139 return false;
140 }
141
142 if (!(p->flags & PF_KTHREAD))
143 fake_signal_wake_up(p);
144 else
145 wake_up_state(p, TASK_INTERRUPTIBLE);
146
147 spin_unlock_irqrestore(&freezer_lock, flags);
148 return true;
149 }
150
__thaw_task(struct task_struct * p)151 void __thaw_task(struct task_struct *p)
152 {
153 unsigned long flags;
154 const struct cpumask *mask = task_cpu_possible_mask(p);
155
156 spin_lock_irqsave(&freezer_lock, flags);
157 /*
158 * Wake up frozen tasks. On asymmetric systems where tasks cannot
159 * run on all CPUs, ttwu() may have deferred a wakeup generated
160 * before thaw_secondary_cpus() had completed so we generate
161 * additional wakeups here for tasks in the PF_FREEZER_SKIP state.
162 */
163 if (frozen(p) || (frozen_or_skipped(p) && mask != cpu_possible_mask))
164 wake_up_process(p);
165 spin_unlock_irqrestore(&freezer_lock, flags);
166 }
167
168 /**
169 * set_freezable - make %current freezable
170 *
171 * Mark %current freezable and enter refrigerator if necessary.
172 */
set_freezable(void)173 bool set_freezable(void)
174 {
175 might_sleep();
176
177 /*
178 * Modify flags while holding freezer_lock. This ensures the
179 * freezer notices that we aren't frozen yet or the freezing
180 * condition is visible to try_to_freeze() below.
181 */
182 spin_lock_irq(&freezer_lock);
183 current->flags &= ~PF_NOFREEZE;
184 spin_unlock_irq(&freezer_lock);
185
186 return try_to_freeze();
187 }
188 EXPORT_SYMBOL(set_freezable);
189