1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifdef CONFIG_SCHEDSTATS
4
5 /*
6 * Expects runqueue lock to be held for atomicity of update
7 */
8 static inline void
rq_sched_info_arrive(struct rq * rq,unsigned long long delta)9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10 {
11 if (rq) {
12 rq->rq_sched_info.run_delay += delta;
13 rq->rq_sched_info.pcount++;
14 }
15 }
16
17 /*
18 * Expects runqueue lock to be held for atomicity of update
19 */
20 static inline void
rq_sched_info_depart(struct rq * rq,unsigned long long delta)21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22 {
23 if (rq)
24 rq->rq_cpu_time += delta;
25 }
26
27 static inline void
rq_sched_info_dequeued(struct rq * rq,unsigned long long delta)28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
29 {
30 if (rq)
31 rq->rq_sched_info.run_delay += delta;
32 }
33 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
34 #define __schedstat_inc(var) do { var++; } while (0)
35 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
36 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
37 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
38 #define __schedstat_set(var, val) do { var = (val); } while (0)
39 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
40 #define schedstat_val(var) (var)
41 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
42
43 #else /* !CONFIG_SCHEDSTATS: */
rq_sched_info_arrive(struct rq * rq,unsigned long long delta)44 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
rq_sched_info_dequeued(struct rq * rq,unsigned long long delta)45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
rq_sched_info_depart(struct rq * rq,unsigned long long delta)46 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
47 # define schedstat_enabled() 0
48 # define __schedstat_inc(var) do { } while (0)
49 # define schedstat_inc(var) do { } while (0)
50 # define __schedstat_add(var, amt) do { } while (0)
51 # define schedstat_add(var, amt) do { } while (0)
52 # define __schedstat_set(var, val) do { } while (0)
53 # define schedstat_set(var, val) do { } while (0)
54 # define schedstat_val(var) 0
55 # define schedstat_val_or_zero(var) 0
56 #endif /* CONFIG_SCHEDSTATS */
57
58 #ifdef CONFIG_PSI
59 /*
60 * PSI tracks state that persists across sleeps, such as iowaits and
61 * memory stalls. As a result, it has to distinguish between sleeps,
62 * where a task's runnable state changes, and requeues, where a task
63 * and its state are being moved between CPUs and runqueues.
64 */
psi_enqueue(struct task_struct * p,bool wakeup)65 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
66 {
67 int clear = 0, set = TSK_RUNNING;
68
69 if (static_branch_likely(&psi_disabled))
70 return;
71
72 if (!wakeup || p->sched_psi_wake_requeue) {
73 if (p->in_memstall)
74 set |= TSK_MEMSTALL;
75 if (p->sched_psi_wake_requeue)
76 p->sched_psi_wake_requeue = 0;
77 } else {
78 if (p->in_iowait)
79 clear |= TSK_IOWAIT;
80 }
81
82 psi_task_change(p, clear, set);
83 }
84
psi_dequeue(struct task_struct * p,bool sleep)85 static inline void psi_dequeue(struct task_struct *p, bool sleep)
86 {
87 int clear = TSK_RUNNING, set = 0;
88
89 if (static_branch_likely(&psi_disabled))
90 return;
91
92 if (!sleep) {
93 if (p->in_memstall)
94 clear |= TSK_MEMSTALL;
95 } else {
96 /*
97 * When a task sleeps, schedule() dequeues it before
98 * switching to the next one. Merge the clearing of
99 * TSK_RUNNING and TSK_ONCPU to save an unnecessary
100 * psi_task_change() call in psi_sched_switch().
101 */
102 clear |= TSK_ONCPU;
103
104 if (p->in_iowait)
105 set |= TSK_IOWAIT;
106 }
107
108 psi_task_change(p, clear, set);
109 }
110
psi_ttwu_dequeue(struct task_struct * p)111 static inline void psi_ttwu_dequeue(struct task_struct *p)
112 {
113 if (static_branch_likely(&psi_disabled))
114 return;
115 /*
116 * Is the task being migrated during a wakeup? Make sure to
117 * deregister its sleep-persistent psi states from the old
118 * queue, and let psi_enqueue() know it has to requeue.
119 */
120 if (unlikely(p->in_iowait || p->in_memstall)) {
121 struct rq_flags rf;
122 struct rq *rq;
123 int clear = 0;
124
125 if (p->in_iowait)
126 clear |= TSK_IOWAIT;
127 if (p->in_memstall)
128 clear |= TSK_MEMSTALL;
129
130 rq = __task_rq_lock(p, &rf);
131 psi_task_change(p, clear, 0);
132 p->sched_psi_wake_requeue = 1;
133 __task_rq_unlock(rq, &rf);
134 }
135 }
136
psi_sched_switch(struct task_struct * prev,struct task_struct * next,bool sleep)137 static inline void psi_sched_switch(struct task_struct *prev,
138 struct task_struct *next,
139 bool sleep)
140 {
141 if (static_branch_likely(&psi_disabled))
142 return;
143
144 psi_task_switch(prev, next, sleep);
145 }
146
psi_task_tick(struct rq * rq)147 static inline void psi_task_tick(struct rq *rq)
148 {
149 if (static_branch_likely(&psi_disabled))
150 return;
151
152 if (unlikely(rq->curr->in_memstall))
153 psi_memstall_tick(rq->curr, cpu_of(rq));
154 }
155 #else /* CONFIG_PSI */
psi_enqueue(struct task_struct * p,bool wakeup)156 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
psi_dequeue(struct task_struct * p,bool sleep)157 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
psi_ttwu_dequeue(struct task_struct * p)158 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
psi_sched_switch(struct task_struct * prev,struct task_struct * next,bool sleep)159 static inline void psi_sched_switch(struct task_struct *prev,
160 struct task_struct *next,
161 bool sleep) {}
psi_task_tick(struct rq * rq)162 static inline void psi_task_tick(struct rq *rq) {}
163 #endif /* CONFIG_PSI */
164
165 #ifdef CONFIG_SCHED_INFO
sched_info_reset_dequeued(struct task_struct * t)166 static inline void sched_info_reset_dequeued(struct task_struct *t)
167 {
168 t->sched_info.last_queued = 0;
169 }
170
171 /*
172 * We are interested in knowing how long it was from the *first* time a
173 * task was queued to the time that it finally hit a CPU, we call this routine
174 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
175 * delta taken on each CPU would annul the skew.
176 */
sched_info_dequeued(struct rq * rq,struct task_struct * t)177 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
178 {
179 unsigned long long now = rq_clock(rq), delta = 0;
180
181 if (sched_info_on()) {
182 if (t->sched_info.last_queued)
183 delta = now - t->sched_info.last_queued;
184 }
185 sched_info_reset_dequeued(t);
186 t->sched_info.run_delay += delta;
187
188 rq_sched_info_dequeued(rq, delta);
189 }
190
191 /*
192 * Called when a task finally hits the CPU. We can now calculate how
193 * long it was waiting to run. We also note when it began so that we
194 * can keep stats on how long its timeslice is.
195 */
sched_info_arrive(struct rq * rq,struct task_struct * t)196 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
197 {
198 unsigned long long now = rq_clock(rq), delta = 0;
199
200 if (t->sched_info.last_queued)
201 delta = now - t->sched_info.last_queued;
202 sched_info_reset_dequeued(t);
203 t->sched_info.run_delay += delta;
204 t->sched_info.last_arrival = now;
205 t->sched_info.pcount++;
206
207 rq_sched_info_arrive(rq, delta);
208 }
209
210 /*
211 * This function is only called from enqueue_task(), but also only updates
212 * the timestamp if it is already not set. It's assumed that
213 * sched_info_dequeued() will clear that stamp when appropriate.
214 */
sched_info_queued(struct rq * rq,struct task_struct * t)215 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
216 {
217 if (sched_info_on()) {
218 if (!t->sched_info.last_queued)
219 t->sched_info.last_queued = rq_clock(rq);
220 }
221 }
222
223 /*
224 * Called when a process ceases being the active-running process involuntarily
225 * due, typically, to expiring its time slice (this may also be called when
226 * switching to the idle task). Now we can calculate how long we ran.
227 * Also, if the process is still in the TASK_RUNNING state, call
228 * sched_info_queued() to mark that it has now again started waiting on
229 * the runqueue.
230 */
sched_info_depart(struct rq * rq,struct task_struct * t)231 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
232 {
233 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
234
235 rq_sched_info_depart(rq, delta);
236
237 if (t->state == TASK_RUNNING)
238 sched_info_queued(rq, t);
239 }
240
241 /*
242 * Called when tasks are switched involuntarily due, typically, to expiring
243 * their time slice. (This may also be called when switching to or from
244 * the idle task.) We are only called when prev != next.
245 */
246 static inline void
__sched_info_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)247 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
248 {
249 /*
250 * prev now departs the CPU. It's not interesting to record
251 * stats about how efficient we were at scheduling the idle
252 * process, however.
253 */
254 if (prev != rq->idle)
255 sched_info_depart(rq, prev);
256
257 if (next != rq->idle)
258 sched_info_arrive(rq, next);
259 }
260
261 static inline void
sched_info_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)262 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
263 {
264 if (sched_info_on())
265 __sched_info_switch(rq, prev, next);
266 }
267
268 #else /* !CONFIG_SCHED_INFO: */
269 # define sched_info_queued(rq, t) do { } while (0)
270 # define sched_info_reset_dequeued(t) do { } while (0)
271 # define sched_info_dequeued(rq, t) do { } while (0)
272 # define sched_info_depart(rq, t) do { } while (0)
273 # define sched_info_arrive(rq, next) do { } while (0)
274 # define sched_info_switch(rq, t, next) do { } while (0)
275 #endif /* CONFIG_SCHED_INFO */
276