1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Pressure stall information for CPU, memory and IO
4 *
5 * Copyright (c) 2018 Facebook, Inc.
6 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 *
8 * Polling support by Suren Baghdasaryan <surenb@google.com>
9 * Copyright (c) 2018 Google, Inc.
10 *
11 * When CPU, memory and IO are contended, tasks experience delays that
12 * reduce throughput and introduce latencies into the workload. Memory
13 * and IO contention, in addition, can cause a full loss of forward
14 * progress in which the CPU goes idle.
15 *
16 * This code aggregates individual task delays into resource pressure
17 * metrics that indicate problems with both workload health and
18 * resource utilization.
19 *
20 * Model
21 *
22 * The time in which a task can execute on a CPU is our baseline for
23 * productivity. Pressure expresses the amount of time in which this
24 * potential cannot be realized due to resource contention.
25 *
26 * This concept of productivity has two components: the workload and
27 * the CPU. To measure the impact of pressure on both, we define two
28 * contention states for a resource: SOME and FULL.
29 *
30 * In the SOME state of a given resource, one or more tasks are
31 * delayed on that resource. This affects the workload's ability to
32 * perform work, but the CPU may still be executing other tasks.
33 *
34 * In the FULL state of a given resource, all non-idle tasks are
35 * delayed on that resource such that nobody is advancing and the CPU
36 * goes idle. This leaves both workload and CPU unproductive.
37 *
38 * SOME = nr_delayed_tasks != 0
39 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40 *
41 * What it means for a task to be productive is defined differently
42 * for each resource. For IO, productive means a running task. For
43 * memory, productive means a running task that isn't a reclaimer. For
44 * CPU, productive means an oncpu task.
45 *
46 * Naturally, the FULL state doesn't exist for the CPU resource at the
47 * system level, but exist at the cgroup level. At the cgroup level,
48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49 * resource which is being used by others outside of the cgroup or
50 * throttled by the cgroup cpu.max configuration.
51 *
52 * The percentage of wallclock time spent in those compound stall
53 * states gives pressure numbers between 0 and 100 for each resource,
54 * where the SOME percentage indicates workload slowdowns and the FULL
55 * percentage indicates reduced CPU utilization:
56 *
57 * %SOME = time(SOME) / period
58 * %FULL = time(FULL) / period
59 *
60 * Multiple CPUs
61 *
62 * The more tasks and available CPUs there are, the more work can be
63 * performed concurrently. This means that the potential that can go
64 * unrealized due to resource contention *also* scales with non-idle
65 * tasks and CPUs.
66 *
67 * Consider a scenario where 257 number crunching tasks are trying to
68 * run concurrently on 256 CPUs. If we simply aggregated the task
69 * states, we would have to conclude a CPU SOME pressure number of
70 * 100%, since *somebody* is waiting on a runqueue at all
71 * times. However, that is clearly not the amount of contention the
72 * workload is experiencing: only one out of 256 possible execution
73 * threads will be contended at any given time, or about 0.4%.
74 *
75 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76 * given time *one* of the tasks is delayed due to a lack of memory.
77 * Again, looking purely at the task state would yield a memory FULL
78 * pressure number of 0%, since *somebody* is always making forward
79 * progress. But again this wouldn't capture the amount of execution
80 * potential lost, which is 1 out of 4 CPUs, or 25%.
81 *
82 * To calculate wasted potential (pressure) with multiple processors,
83 * we have to base our calculation on the number of non-idle tasks in
84 * conjunction with the number of available CPUs, which is the number
85 * of potential execution threads. SOME becomes then the proportion of
86 * delayed tasks to possible threads, and FULL is the share of possible
87 * threads that are unproductive due to delays:
88 *
89 * threads = min(nr_nonidle_tasks, nr_cpus)
90 * SOME = min(nr_delayed_tasks / threads, 1)
91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads
92 *
93 * For the 257 number crunchers on 256 CPUs, this yields:
94 *
95 * threads = min(257, 256)
96 * SOME = min(1 / 256, 1) = 0.4%
97 * FULL = (256 - min(256, 256)) / 256 = 0%
98 *
99 * For the 1 out of 4 memory-delayed tasks, this yields:
100 *
101 * threads = min(4, 4)
102 * SOME = min(1 / 4, 1) = 25%
103 * FULL = (4 - min(3, 4)) / 4 = 25%
104 *
105 * [ Substitute nr_cpus with 1, and you can see that it's a natural
106 * extension of the single-CPU model. ]
107 *
108 * Implementation
109 *
110 * To assess the precise time spent in each such state, we would have
111 * to freeze the system on task changes and start/stop the state
112 * clocks accordingly. Obviously that doesn't scale in practice.
113 *
114 * Because the scheduler aims to distribute the compute load evenly
115 * among the available CPUs, we can track task state locally to each
116 * CPU and, at much lower frequency, extrapolate the global state for
117 * the cumulative stall times and the running averages.
118 *
119 * For each runqueue, we track:
120 *
121 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124 *
125 * and then periodically aggregate:
126 *
127 * tNONIDLE = sum(tNONIDLE[i])
128 *
129 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131 *
132 * %SOME = tSOME / period
133 * %FULL = tFULL / period
134 *
135 * This gives us an approximation of pressure that is practical
136 * cost-wise, yet way more sensitive and accurate than periodic
137 * sampling of the aggregate task states would be.
138 */
139 #include <trace/hooks/psi.h>
140
141 static int psi_bug __read_mostly;
142
143 DEFINE_STATIC_KEY_FALSE(psi_disabled);
144 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
145
146 #ifdef CONFIG_PSI_DEFAULT_DISABLED
147 static bool psi_enable;
148 #else
149 static bool psi_enable = true;
150 #endif
setup_psi(char * str)151 static int __init setup_psi(char *str)
152 {
153 return kstrtobool(str, &psi_enable) == 0;
154 }
155 __setup("psi=", setup_psi);
156
157 /* Running averages - we need to be higher-res than loadavg */
158 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
159 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
160 #define EXP_60s 1981 /* 1/exp(2s/60s) */
161 #define EXP_300s 2034 /* 1/exp(2s/300s) */
162
163 /* PSI trigger definitions */
164 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */
165 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */
166
167 /* Sampling frequency in nanoseconds */
168 static u64 psi_period __read_mostly;
169
170 /* System-level pressure and stall tracking */
171 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
172 struct psi_group psi_system = {
173 .pcpu = &system_group_pcpu,
174 };
175
176 static void psi_avgs_work(struct work_struct *work);
177
178 static void poll_timer_fn(struct timer_list *t);
179
group_init(struct psi_group * group)180 static void group_init(struct psi_group *group)
181 {
182 int cpu;
183
184 group->enabled = true;
185 for_each_possible_cpu(cpu)
186 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
187 group->avg_last_update = sched_clock();
188 group->avg_next_update = group->avg_last_update + psi_period;
189 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
190 mutex_init(&group->avgs_lock);
191 /* Init trigger-related members */
192 atomic_set(&group->poll_scheduled, 0);
193 mutex_init(&group->trigger_lock);
194 INIT_LIST_HEAD(&group->triggers);
195 group->poll_min_period = U32_MAX;
196 group->polling_next_update = ULLONG_MAX;
197 init_waitqueue_head(&group->poll_wait);
198 timer_setup(&group->poll_timer, poll_timer_fn, 0);
199 rcu_assign_pointer(group->poll_task, NULL);
200 }
201
psi_init(void)202 void __init psi_init(void)
203 {
204 if (!psi_enable) {
205 static_branch_enable(&psi_disabled);
206 static_branch_disable(&psi_cgroups_enabled);
207 return;
208 }
209
210 if (!cgroup_psi_enabled())
211 static_branch_disable(&psi_cgroups_enabled);
212
213 psi_period = jiffies_to_nsecs(PSI_FREQ);
214 group_init(&psi_system);
215 }
216
test_state(unsigned int * tasks,enum psi_states state,bool oncpu)217 static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
218 {
219 switch (state) {
220 case PSI_IO_SOME:
221 return unlikely(tasks[NR_IOWAIT]);
222 case PSI_IO_FULL:
223 return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
224 case PSI_MEM_SOME:
225 return unlikely(tasks[NR_MEMSTALL]);
226 case PSI_MEM_FULL:
227 return unlikely(tasks[NR_MEMSTALL] &&
228 tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
229 case PSI_CPU_SOME:
230 return unlikely(tasks[NR_RUNNING] > oncpu);
231 case PSI_CPU_FULL:
232 return unlikely(tasks[NR_RUNNING] && !oncpu);
233 case PSI_NONIDLE:
234 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
235 tasks[NR_RUNNING];
236 default:
237 return false;
238 }
239 }
240
get_recent_times(struct psi_group * group,int cpu,enum psi_aggregators aggregator,u32 * times,u32 * pchanged_states)241 static void get_recent_times(struct psi_group *group, int cpu,
242 enum psi_aggregators aggregator, u32 *times,
243 u32 *pchanged_states)
244 {
245 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
246 u64 now, state_start;
247 enum psi_states s;
248 unsigned int seq;
249 u32 state_mask;
250
251 *pchanged_states = 0;
252
253 /* Snapshot a coherent view of the CPU state */
254 do {
255 seq = read_seqcount_begin(&groupc->seq);
256 now = cpu_clock(cpu);
257 memcpy(times, groupc->times, sizeof(groupc->times));
258 state_mask = groupc->state_mask;
259 state_start = groupc->state_start;
260 } while (read_seqcount_retry(&groupc->seq, seq));
261
262 /* Calculate state time deltas against the previous snapshot */
263 for (s = 0; s < NR_PSI_STATES; s++) {
264 u32 delta;
265 /*
266 * In addition to already concluded states, we also
267 * incorporate currently active states on the CPU,
268 * since states may last for many sampling periods.
269 *
270 * This way we keep our delta sampling buckets small
271 * (u32) and our reported pressure close to what's
272 * actually happening.
273 */
274 if (state_mask & (1 << s))
275 times[s] += now - state_start;
276
277 delta = times[s] - groupc->times_prev[aggregator][s];
278 groupc->times_prev[aggregator][s] = times[s];
279
280 times[s] = delta;
281 if (delta)
282 *pchanged_states |= (1 << s);
283 }
284 }
285
calc_avgs(unsigned long avg[3],int missed_periods,u64 time,u64 period)286 static void calc_avgs(unsigned long avg[3], int missed_periods,
287 u64 time, u64 period)
288 {
289 unsigned long pct;
290
291 /* Fill in zeroes for periods of no activity */
292 if (missed_periods) {
293 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
294 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
295 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
296 }
297
298 /* Sample the most recent active period */
299 pct = div_u64(time * 100, period);
300 pct *= FIXED_1;
301 avg[0] = calc_load(avg[0], EXP_10s, pct);
302 avg[1] = calc_load(avg[1], EXP_60s, pct);
303 avg[2] = calc_load(avg[2], EXP_300s, pct);
304 }
305
collect_percpu_times(struct psi_group * group,enum psi_aggregators aggregator,u32 * pchanged_states)306 static void collect_percpu_times(struct psi_group *group,
307 enum psi_aggregators aggregator,
308 u32 *pchanged_states)
309 {
310 u64 deltas[NR_PSI_STATES - 1] = { 0, };
311 unsigned long nonidle_total = 0;
312 u32 changed_states = 0;
313 int cpu;
314 int s;
315
316 /*
317 * Collect the per-cpu time buckets and average them into a
318 * single time sample that is normalized to wallclock time.
319 *
320 * For averaging, each CPU is weighted by its non-idle time in
321 * the sampling period. This eliminates artifacts from uneven
322 * loading, or even entirely idle CPUs.
323 */
324 for_each_possible_cpu(cpu) {
325 u32 times[NR_PSI_STATES];
326 u32 nonidle;
327 u32 cpu_changed_states;
328
329 get_recent_times(group, cpu, aggregator, times,
330 &cpu_changed_states);
331 changed_states |= cpu_changed_states;
332
333 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
334 nonidle_total += nonidle;
335
336 for (s = 0; s < PSI_NONIDLE; s++)
337 deltas[s] += (u64)times[s] * nonidle;
338 }
339
340 /*
341 * Integrate the sample into the running statistics that are
342 * reported to userspace: the cumulative stall times and the
343 * decaying averages.
344 *
345 * Pressure percentages are sampled at PSI_FREQ. We might be
346 * called more often when the user polls more frequently than
347 * that; we might be called less often when there is no task
348 * activity, thus no data, and clock ticks are sporadic. The
349 * below handles both.
350 */
351
352 /* total= */
353 for (s = 0; s < NR_PSI_STATES - 1; s++)
354 group->total[aggregator][s] +=
355 div_u64(deltas[s], max(nonidle_total, 1UL));
356
357 if (pchanged_states)
358 *pchanged_states = changed_states;
359 }
360
update_averages(struct psi_group * group,u64 now)361 static u64 update_averages(struct psi_group *group, u64 now)
362 {
363 unsigned long missed_periods = 0;
364 u64 expires, period;
365 u64 avg_next_update;
366 int s;
367
368 /* avgX= */
369 expires = group->avg_next_update;
370 if (now - expires >= psi_period)
371 missed_periods = div_u64(now - expires, psi_period);
372
373 /*
374 * The periodic clock tick can get delayed for various
375 * reasons, especially on loaded systems. To avoid clock
376 * drift, we schedule the clock in fixed psi_period intervals.
377 * But the deltas we sample out of the per-cpu buckets above
378 * are based on the actual time elapsing between clock ticks.
379 */
380 avg_next_update = expires + ((1 + missed_periods) * psi_period);
381 period = now - (group->avg_last_update + (missed_periods * psi_period));
382 group->avg_last_update = now;
383
384 for (s = 0; s < NR_PSI_STATES - 1; s++) {
385 u32 sample;
386
387 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
388 /*
389 * Due to the lockless sampling of the time buckets,
390 * recorded time deltas can slip into the next period,
391 * which under full pressure can result in samples in
392 * excess of the period length.
393 *
394 * We don't want to report non-sensical pressures in
395 * excess of 100%, nor do we want to drop such events
396 * on the floor. Instead we punt any overage into the
397 * future until pressure subsides. By doing this we
398 * don't underreport the occurring pressure curve, we
399 * just report it delayed by one period length.
400 *
401 * The error isn't cumulative. As soon as another
402 * delta slips from a period P to P+1, by definition
403 * it frees up its time T in P.
404 */
405 if (sample > period)
406 sample = period;
407 group->avg_total[s] += sample;
408 calc_avgs(group->avg[s], missed_periods, sample, period);
409 }
410
411 return avg_next_update;
412 }
413
psi_avgs_work(struct work_struct * work)414 static void psi_avgs_work(struct work_struct *work)
415 {
416 struct delayed_work *dwork;
417 struct psi_group *group;
418 u32 changed_states;
419 bool nonidle;
420 u64 now;
421
422 dwork = to_delayed_work(work);
423 group = container_of(dwork, struct psi_group, avgs_work);
424
425 mutex_lock(&group->avgs_lock);
426
427 now = sched_clock();
428
429 collect_percpu_times(group, PSI_AVGS, &changed_states);
430 nonidle = changed_states & (1 << PSI_NONIDLE);
431 /*
432 * If there is task activity, periodically fold the per-cpu
433 * times and feed samples into the running averages. If things
434 * are idle and there is no data to process, stop the clock.
435 * Once restarted, we'll catch up the running averages in one
436 * go - see calc_avgs() and missed_periods.
437 */
438 if (now >= group->avg_next_update)
439 group->avg_next_update = update_averages(group, now);
440
441 if (nonidle) {
442 schedule_delayed_work(dwork, nsecs_to_jiffies(
443 group->avg_next_update - now) + 1);
444 }
445
446 mutex_unlock(&group->avgs_lock);
447 }
448
449 /* Trigger tracking window manipulations */
window_reset(struct psi_window * win,u64 now,u64 value,u64 prev_growth)450 static void window_reset(struct psi_window *win, u64 now, u64 value,
451 u64 prev_growth)
452 {
453 win->start_time = now;
454 win->start_value = value;
455 win->prev_growth = prev_growth;
456 }
457
458 /*
459 * PSI growth tracking window update and growth calculation routine.
460 *
461 * This approximates a sliding tracking window by interpolating
462 * partially elapsed windows using historical growth data from the
463 * previous intervals. This minimizes memory requirements (by not storing
464 * all the intermediate values in the previous window) and simplifies
465 * the calculations. It works well because PSI signal changes only in
466 * positive direction and over relatively small window sizes the growth
467 * is close to linear.
468 */
window_update(struct psi_window * win,u64 now,u64 value)469 static u64 window_update(struct psi_window *win, u64 now, u64 value)
470 {
471 u64 elapsed;
472 u64 growth;
473
474 elapsed = now - win->start_time;
475 growth = value - win->start_value;
476 /*
477 * After each tracking window passes win->start_value and
478 * win->start_time get reset and win->prev_growth stores
479 * the average per-window growth of the previous window.
480 * win->prev_growth is then used to interpolate additional
481 * growth from the previous window assuming it was linear.
482 */
483 if (elapsed > win->size)
484 window_reset(win, now, value, growth);
485 else {
486 u32 remaining;
487
488 remaining = win->size - elapsed;
489 growth += div64_u64(win->prev_growth * remaining, win->size);
490 }
491
492 return growth;
493 }
494
init_triggers(struct psi_group * group,u64 now)495 static void init_triggers(struct psi_group *group, u64 now)
496 {
497 struct psi_trigger *t;
498
499 list_for_each_entry(t, &group->triggers, node)
500 window_reset(&t->win, now,
501 group->total[PSI_POLL][t->state], 0);
502 memcpy(group->polling_total, group->total[PSI_POLL],
503 sizeof(group->polling_total));
504 group->polling_next_update = now + group->poll_min_period;
505 }
506
update_triggers(struct psi_group * group,u64 now)507 static u64 update_triggers(struct psi_group *group, u64 now)
508 {
509 struct psi_trigger *t;
510 bool update_total = false;
511 u64 *total = group->total[PSI_POLL];
512
513 /*
514 * On subsequent updates, calculate growth deltas and let
515 * watchers know when their specified thresholds are exceeded.
516 */
517 list_for_each_entry(t, &group->triggers, node) {
518 u64 growth;
519 bool new_stall;
520
521 new_stall = group->polling_total[t->state] != total[t->state];
522
523 /* Check for stall activity or a previous threshold breach */
524 if (!new_stall && !t->pending_event)
525 continue;
526 /*
527 * Check for new stall activity, as well as deferred
528 * events that occurred in the last window after the
529 * trigger had already fired (we want to ratelimit
530 * events without dropping any).
531 */
532 if (new_stall) {
533 /*
534 * Multiple triggers might be looking at the same state,
535 * remember to update group->polling_total[] once we've
536 * been through all of them. Also remember to extend the
537 * polling time if we see new stall activity.
538 */
539 update_total = true;
540
541 /* Calculate growth since last update */
542 growth = window_update(&t->win, now, total[t->state]);
543 if (!t->pending_event) {
544 if (growth < t->threshold)
545 continue;
546
547 t->pending_event = true;
548 }
549 }
550 /* Limit event signaling to once per window */
551 if (now < t->last_event_time + t->win.size)
552 continue;
553
554 /* Generate an event */
555 if (cmpxchg(&t->event, 0, 1) == 0)
556 wake_up_interruptible(&t->event_wait);
557 t->last_event_time = now;
558 /* Reset threshold breach flag once event got generated */
559 t->pending_event = false;
560 }
561
562 if (update_total)
563 memcpy(group->polling_total, total,
564 sizeof(group->polling_total));
565
566 return now + group->poll_min_period;
567 }
568
569 /* Schedule polling if it's not already scheduled or forced. */
psi_schedule_poll_work(struct psi_group * group,unsigned long delay,bool force)570 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
571 bool force)
572 {
573 struct task_struct *task;
574
575 /*
576 * atomic_xchg should be called even when !force to provide a
577 * full memory barrier (see the comment inside psi_poll_work).
578 */
579 if (atomic_xchg(&group->poll_scheduled, 1) && !force)
580 return;
581
582 rcu_read_lock();
583
584 task = rcu_dereference(group->poll_task);
585 /*
586 * kworker might be NULL in case psi_trigger_destroy races with
587 * psi_task_change (hotpath) which can't use locks
588 */
589 if (likely(task))
590 mod_timer(&group->poll_timer, jiffies + delay);
591 else
592 atomic_set(&group->poll_scheduled, 0);
593
594 rcu_read_unlock();
595 }
596
psi_poll_work(struct psi_group * group)597 static void psi_poll_work(struct psi_group *group)
598 {
599 bool force_reschedule = false;
600 u32 changed_states;
601 u64 now;
602
603 mutex_lock(&group->trigger_lock);
604
605 now = sched_clock();
606
607 if (now > group->polling_until) {
608 /*
609 * We are either about to start or might stop polling if no
610 * state change was recorded. Resetting poll_scheduled leaves
611 * a small window for psi_group_change to sneak in and schedule
612 * an immediate poll_work before we get to rescheduling. One
613 * potential extra wakeup at the end of the polling window
614 * should be negligible and polling_next_update still keeps
615 * updates correctly on schedule.
616 */
617 atomic_set(&group->poll_scheduled, 0);
618 /*
619 * A task change can race with the poll worker that is supposed to
620 * report on it. To avoid missing events, ensure ordering between
621 * poll_scheduled and the task state accesses, such that if the poll
622 * worker misses the state update, the task change is guaranteed to
623 * reschedule the poll worker:
624 *
625 * poll worker:
626 * atomic_set(poll_scheduled, 0)
627 * smp_mb()
628 * LOAD states
629 *
630 * task change:
631 * STORE states
632 * if atomic_xchg(poll_scheduled, 1) == 0:
633 * schedule poll worker
634 *
635 * The atomic_xchg() implies a full barrier.
636 */
637 smp_mb();
638 } else {
639 /* Polling window is not over, keep rescheduling */
640 force_reschedule = true;
641 }
642
643
644 collect_percpu_times(group, PSI_POLL, &changed_states);
645
646 if (changed_states & group->poll_states) {
647 /* Initialize trigger windows when entering polling mode */
648 if (now > group->polling_until)
649 init_triggers(group, now);
650
651 /*
652 * Keep the monitor active for at least the duration of the
653 * minimum tracking window as long as monitor states are
654 * changing.
655 */
656 group->polling_until = now +
657 group->poll_min_period * UPDATES_PER_WINDOW;
658 }
659
660 if (now > group->polling_until) {
661 group->polling_next_update = ULLONG_MAX;
662 goto out;
663 }
664
665 if (now >= group->polling_next_update)
666 group->polling_next_update = update_triggers(group, now);
667
668 psi_schedule_poll_work(group,
669 nsecs_to_jiffies(group->polling_next_update - now) + 1,
670 force_reschedule);
671
672 out:
673 mutex_unlock(&group->trigger_lock);
674 }
675
psi_poll_worker(void * data)676 static int psi_poll_worker(void *data)
677 {
678 struct psi_group *group = (struct psi_group *)data;
679
680 sched_set_fifo_low(current);
681
682 while (true) {
683 wait_event_interruptible(group->poll_wait,
684 atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
685 kthread_should_stop());
686 if (kthread_should_stop())
687 break;
688
689 psi_poll_work(group);
690 }
691 return 0;
692 }
693
poll_timer_fn(struct timer_list * t)694 static void poll_timer_fn(struct timer_list *t)
695 {
696 struct psi_group *group = from_timer(group, t, poll_timer);
697
698 atomic_set(&group->poll_wakeup, 1);
699 wake_up_interruptible(&group->poll_wait);
700 }
701
record_times(struct psi_group_cpu * groupc,u64 now)702 static void record_times(struct psi_group_cpu *groupc, u64 now)
703 {
704 u32 delta;
705
706 delta = now - groupc->state_start;
707 groupc->state_start = now;
708
709 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
710 groupc->times[PSI_IO_SOME] += delta;
711 if (groupc->state_mask & (1 << PSI_IO_FULL))
712 groupc->times[PSI_IO_FULL] += delta;
713 }
714
715 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
716 groupc->times[PSI_MEM_SOME] += delta;
717 if (groupc->state_mask & (1 << PSI_MEM_FULL))
718 groupc->times[PSI_MEM_FULL] += delta;
719 }
720
721 if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
722 groupc->times[PSI_CPU_SOME] += delta;
723 if (groupc->state_mask & (1 << PSI_CPU_FULL))
724 groupc->times[PSI_CPU_FULL] += delta;
725 }
726
727 if (groupc->state_mask & (1 << PSI_NONIDLE))
728 groupc->times[PSI_NONIDLE] += delta;
729 }
730
psi_group_change(struct psi_group * group,int cpu,unsigned int clear,unsigned int set,u64 now,bool wake_clock)731 static void psi_group_change(struct psi_group *group, int cpu,
732 unsigned int clear, unsigned int set, u64 now,
733 bool wake_clock)
734 {
735 struct psi_group_cpu *groupc;
736 unsigned int t, m;
737 enum psi_states s;
738 u32 state_mask;
739
740 groupc = per_cpu_ptr(group->pcpu, cpu);
741
742 /*
743 * First we update the task counts according to the state
744 * change requested through the @clear and @set bits.
745 *
746 * Then if the cgroup PSI stats accounting enabled, we
747 * assess the aggregate resource states this CPU's tasks
748 * have been in since the last change, and account any
749 * SOME and FULL time these may have resulted in.
750 */
751 write_seqcount_begin(&groupc->seq);
752
753 /*
754 * Start with TSK_ONCPU, which doesn't have a corresponding
755 * task count - it's just a boolean flag directly encoded in
756 * the state mask. Clear, set, or carry the current state if
757 * no changes are requested.
758 */
759 if (unlikely(clear & TSK_ONCPU)) {
760 state_mask = 0;
761 clear &= ~TSK_ONCPU;
762 } else if (unlikely(set & TSK_ONCPU)) {
763 state_mask = PSI_ONCPU;
764 set &= ~TSK_ONCPU;
765 } else {
766 state_mask = groupc->state_mask & PSI_ONCPU;
767 }
768
769 /*
770 * The rest of the state mask is calculated based on the task
771 * counts. Update those first, then construct the mask.
772 */
773 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
774 if (!(m & (1 << t)))
775 continue;
776 if (groupc->tasks[t]) {
777 groupc->tasks[t]--;
778 } else if (!psi_bug) {
779 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
780 cpu, t, groupc->tasks[0],
781 groupc->tasks[1], groupc->tasks[2],
782 groupc->tasks[3], clear, set);
783 psi_bug = 1;
784 }
785 }
786
787 for (t = 0; set; set &= ~(1 << t), t++)
788 if (set & (1 << t))
789 groupc->tasks[t]++;
790
791 if (!group->enabled) {
792 /*
793 * On the first group change after disabling PSI, conclude
794 * the current state and flush its time. This is unlikely
795 * to matter to the user, but aggregation (get_recent_times)
796 * may have already incorporated the live state into times_prev;
797 * avoid a delta sample underflow when PSI is later re-enabled.
798 */
799 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
800 record_times(groupc, now);
801
802 groupc->state_mask = state_mask;
803
804 write_seqcount_end(&groupc->seq);
805 return;
806 }
807
808 for (s = 0; s < NR_PSI_STATES; s++) {
809 if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
810 state_mask |= (1 << s);
811 }
812
813 /*
814 * Since we care about lost potential, a memstall is FULL
815 * when there are no other working tasks, but also when
816 * the CPU is actively reclaiming and nothing productive
817 * could run even if it were runnable. So when the current
818 * task in a cgroup is in_memstall, the corresponding groupc
819 * on that cpu is in PSI_MEM_FULL state.
820 */
821 if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
822 state_mask |= (1 << PSI_MEM_FULL);
823
824 record_times(groupc, now);
825
826 groupc->state_mask = state_mask;
827
828 write_seqcount_end(&groupc->seq);
829
830 if (state_mask & group->poll_states)
831 psi_schedule_poll_work(group, 1, false);
832
833 if (wake_clock && !delayed_work_pending(&group->avgs_work))
834 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
835 }
836
task_psi_group(struct task_struct * task)837 static inline struct psi_group *task_psi_group(struct task_struct *task)
838 {
839 #ifdef CONFIG_CGROUPS
840 if (static_branch_likely(&psi_cgroups_enabled))
841 return cgroup_psi(task_dfl_cgroup(task));
842 #endif
843 return &psi_system;
844 }
845
psi_flags_change(struct task_struct * task,int clear,int set)846 static void psi_flags_change(struct task_struct *task, int clear, int set)
847 {
848 if (((task->psi_flags & set) ||
849 (task->psi_flags & clear) != clear) &&
850 !psi_bug) {
851 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
852 task->pid, task->comm, task_cpu(task),
853 task->psi_flags, clear, set);
854 psi_bug = 1;
855 }
856
857 task->psi_flags &= ~clear;
858 task->psi_flags |= set;
859 }
860
psi_task_change(struct task_struct * task,int clear,int set)861 void psi_task_change(struct task_struct *task, int clear, int set)
862 {
863 int cpu = task_cpu(task);
864 struct psi_group *group;
865 u64 now;
866
867 if (!task->pid)
868 return;
869
870 psi_flags_change(task, clear, set);
871
872 now = cpu_clock(cpu);
873
874 group = task_psi_group(task);
875 do {
876 psi_group_change(group, cpu, clear, set, now, true);
877 } while ((group = group->parent));
878 }
879
psi_task_switch(struct task_struct * prev,struct task_struct * next,bool sleep)880 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
881 bool sleep)
882 {
883 struct psi_group *group, *common = NULL;
884 int cpu = task_cpu(prev);
885 u64 now = cpu_clock(cpu);
886
887 if (next->pid) {
888 psi_flags_change(next, 0, TSK_ONCPU);
889 /*
890 * Set TSK_ONCPU on @next's cgroups. If @next shares any
891 * ancestors with @prev, those will already have @prev's
892 * TSK_ONCPU bit set, and we can stop the iteration there.
893 */
894 group = task_psi_group(next);
895 do {
896 if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
897 PSI_ONCPU) {
898 common = group;
899 break;
900 }
901
902 psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
903 } while ((group = group->parent));
904 }
905
906 if (prev->pid) {
907 int clear = TSK_ONCPU, set = 0;
908 bool wake_clock = true;
909
910 /*
911 * When we're going to sleep, psi_dequeue() lets us
912 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
913 * TSK_IOWAIT here, where we can combine it with
914 * TSK_ONCPU and save walking common ancestors twice.
915 */
916 if (sleep) {
917 clear |= TSK_RUNNING;
918 if (prev->in_memstall)
919 clear |= TSK_MEMSTALL_RUNNING;
920 if (prev->in_iowait)
921 set |= TSK_IOWAIT;
922
923 /*
924 * Periodic aggregation shuts off if there is a period of no
925 * task changes, so we wake it back up if necessary. However,
926 * don't do this if the task change is the aggregation worker
927 * itself going to sleep, or we'll ping-pong forever.
928 */
929 if (unlikely((prev->flags & PF_WQ_WORKER) &&
930 wq_worker_last_func(prev) == psi_avgs_work))
931 wake_clock = false;
932 }
933
934 psi_flags_change(prev, clear, set);
935
936 group = task_psi_group(prev);
937 do {
938 if (group == common)
939 break;
940 psi_group_change(group, cpu, clear, set, now, wake_clock);
941 } while ((group = group->parent));
942
943 /*
944 * TSK_ONCPU is handled up to the common ancestor. If there are
945 * any other differences between the two tasks (e.g. prev goes
946 * to sleep, or only one task is memstall), finish propagating
947 * those differences all the way up to the root.
948 */
949 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
950 clear &= ~TSK_ONCPU;
951 for (; group; group = group->parent)
952 psi_group_change(group, cpu, clear, set, now, wake_clock);
953 }
954 }
955 }
956
957 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_account_irqtime(struct task_struct * task,u32 delta)958 void psi_account_irqtime(struct task_struct *task, u32 delta)
959 {
960 int cpu = task_cpu(task);
961 struct psi_group *group;
962 struct psi_group_cpu *groupc;
963 u64 now;
964
965 if (!task->pid)
966 return;
967
968 now = cpu_clock(cpu);
969
970 group = task_psi_group(task);
971 do {
972 if (!group->enabled)
973 continue;
974
975 groupc = per_cpu_ptr(group->pcpu, cpu);
976
977 write_seqcount_begin(&groupc->seq);
978
979 record_times(groupc, now);
980 groupc->times[PSI_IRQ_FULL] += delta;
981
982 write_seqcount_end(&groupc->seq);
983
984 if (group->poll_states & (1 << PSI_IRQ_FULL))
985 psi_schedule_poll_work(group, 1, false);
986 } while ((group = group->parent));
987 }
988 #endif
989
990 /**
991 * psi_memstall_enter - mark the beginning of a memory stall section
992 * @flags: flags to handle nested sections
993 *
994 * Marks the calling task as being stalled due to a lack of memory,
995 * such as waiting for a refault or performing reclaim.
996 */
psi_memstall_enter(unsigned long * flags)997 void psi_memstall_enter(unsigned long *flags)
998 {
999 struct rq_flags rf;
1000 struct rq *rq;
1001
1002 if (static_branch_likely(&psi_disabled))
1003 return;
1004
1005 *flags = current->in_memstall;
1006 if (*flags)
1007 return;
1008 /*
1009 * in_memstall setting & accounting needs to be atomic wrt
1010 * changes to the task's scheduling state, otherwise we can
1011 * race with CPU migration.
1012 */
1013 rq = this_rq_lock_irq(&rf);
1014
1015 current->in_memstall = 1;
1016 psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
1017
1018 rq_unlock_irq(rq, &rf);
1019 }
1020 EXPORT_SYMBOL_GPL(psi_memstall_enter);
1021
1022 /**
1023 * psi_memstall_leave - mark the end of an memory stall section
1024 * @flags: flags to handle nested memdelay sections
1025 *
1026 * Marks the calling task as no longer stalled due to lack of memory.
1027 */
psi_memstall_leave(unsigned long * flags)1028 void psi_memstall_leave(unsigned long *flags)
1029 {
1030 struct rq_flags rf;
1031 struct rq *rq;
1032
1033 if (static_branch_likely(&psi_disabled))
1034 return;
1035
1036 if (*flags)
1037 return;
1038 /*
1039 * in_memstall clearing & accounting needs to be atomic wrt
1040 * changes to the task's scheduling state, otherwise we could
1041 * race with CPU migration.
1042 */
1043 rq = this_rq_lock_irq(&rf);
1044
1045 current->in_memstall = 0;
1046 psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1047
1048 rq_unlock_irq(rq, &rf);
1049 }
1050 EXPORT_SYMBOL_GPL(psi_memstall_leave);
1051
1052 #ifdef CONFIG_CGROUPS
psi_cgroup_alloc(struct cgroup * cgroup)1053 int psi_cgroup_alloc(struct cgroup *cgroup)
1054 {
1055 if (!static_branch_likely(&psi_cgroups_enabled))
1056 return 0;
1057
1058 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
1059 if (!cgroup->psi)
1060 return -ENOMEM;
1061
1062 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
1063 if (!cgroup->psi->pcpu) {
1064 kfree(cgroup->psi);
1065 return -ENOMEM;
1066 }
1067 group_init(cgroup->psi);
1068 cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
1069 return 0;
1070 }
1071
psi_cgroup_free(struct cgroup * cgroup)1072 void psi_cgroup_free(struct cgroup *cgroup)
1073 {
1074 if (!static_branch_likely(&psi_cgroups_enabled))
1075 return;
1076
1077 cancel_delayed_work_sync(&cgroup->psi->avgs_work);
1078 free_percpu(cgroup->psi->pcpu);
1079 /* All triggers must be removed by now */
1080 WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
1081 kfree(cgroup->psi);
1082 }
1083
1084 /**
1085 * cgroup_move_task - move task to a different cgroup
1086 * @task: the task
1087 * @to: the target css_set
1088 *
1089 * Move task to a new cgroup and safely migrate its associated stall
1090 * state between the different groups.
1091 *
1092 * This function acquires the task's rq lock to lock out concurrent
1093 * changes to the task's scheduling state and - in case the task is
1094 * running - concurrent changes to its stall state.
1095 */
cgroup_move_task(struct task_struct * task,struct css_set * to)1096 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1097 {
1098 unsigned int task_flags;
1099 struct rq_flags rf;
1100 struct rq *rq;
1101
1102 if (!static_branch_likely(&psi_cgroups_enabled)) {
1103 /*
1104 * Lame to do this here, but the scheduler cannot be locked
1105 * from the outside, so we move cgroups from inside sched/.
1106 */
1107 rcu_assign_pointer(task->cgroups, to);
1108 return;
1109 }
1110
1111 rq = task_rq_lock(task, &rf);
1112
1113 /*
1114 * We may race with schedule() dropping the rq lock between
1115 * deactivating prev and switching to next. Because the psi
1116 * updates from the deactivation are deferred to the switch
1117 * callback to save cgroup tree updates, the task's scheduling
1118 * state here is not coherent with its psi state:
1119 *
1120 * schedule() cgroup_move_task()
1121 * rq_lock()
1122 * deactivate_task()
1123 * p->on_rq = 0
1124 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1125 * pick_next_task()
1126 * rq_unlock()
1127 * rq_lock()
1128 * psi_task_change() // old cgroup
1129 * task->cgroups = to
1130 * psi_task_change() // new cgroup
1131 * rq_unlock()
1132 * rq_lock()
1133 * psi_sched_switch() // does deferred updates in new cgroup
1134 *
1135 * Don't rely on the scheduling state. Use psi_flags instead.
1136 */
1137 task_flags = task->psi_flags;
1138
1139 if (task_flags)
1140 psi_task_change(task, task_flags, 0);
1141
1142 /* See comment above */
1143 rcu_assign_pointer(task->cgroups, to);
1144
1145 if (task_flags)
1146 psi_task_change(task, 0, task_flags);
1147
1148 task_rq_unlock(rq, task, &rf);
1149 }
1150
psi_cgroup_restart(struct psi_group * group)1151 void psi_cgroup_restart(struct psi_group *group)
1152 {
1153 int cpu;
1154
1155 /*
1156 * After we disable psi_group->enabled, we don't actually
1157 * stop percpu tasks accounting in each psi_group_cpu,
1158 * instead only stop test_state() loop, record_times()
1159 * and averaging worker, see psi_group_change() for details.
1160 *
1161 * When disable cgroup PSI, this function has nothing to sync
1162 * since cgroup pressure files are hidden and percpu psi_group_cpu
1163 * would see !psi_group->enabled and only do task accounting.
1164 *
1165 * When re-enable cgroup PSI, this function use psi_group_change()
1166 * to get correct state mask from test_state() loop on tasks[],
1167 * and restart groupc->state_start from now, use .clear = .set = 0
1168 * here since no task status really changed.
1169 */
1170 if (!group->enabled)
1171 return;
1172
1173 for_each_possible_cpu(cpu) {
1174 struct rq *rq = cpu_rq(cpu);
1175 struct rq_flags rf;
1176 u64 now;
1177
1178 rq_lock_irq(rq, &rf);
1179 now = cpu_clock(cpu);
1180 psi_group_change(group, cpu, 0, 0, now, true);
1181 rq_unlock_irq(rq, &rf);
1182 }
1183 }
1184 #endif /* CONFIG_CGROUPS */
1185
psi_show(struct seq_file * m,struct psi_group * group,enum psi_res res)1186 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1187 {
1188 bool only_full = false;
1189 int full;
1190 u64 now;
1191
1192 if (static_branch_likely(&psi_disabled))
1193 return -EOPNOTSUPP;
1194
1195 /* Update averages before reporting them */
1196 mutex_lock(&group->avgs_lock);
1197 now = sched_clock();
1198 collect_percpu_times(group, PSI_AVGS, NULL);
1199 if (now >= group->avg_next_update)
1200 group->avg_next_update = update_averages(group, now);
1201 mutex_unlock(&group->avgs_lock);
1202
1203 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1204 only_full = res == PSI_IRQ;
1205 #endif
1206
1207 for (full = 0; full < 2 - only_full; full++) {
1208 unsigned long avg[3] = { 0, };
1209 u64 total = 0;
1210 int w;
1211
1212 /* CPU FULL is undefined at the system level */
1213 if (!(group == &psi_system && res == PSI_CPU && full)) {
1214 for (w = 0; w < 3; w++)
1215 avg[w] = group->avg[res * 2 + full][w];
1216 total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1217 NSEC_PER_USEC);
1218 }
1219
1220 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1221 full || only_full ? "full" : "some",
1222 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1223 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1224 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1225 total);
1226 }
1227
1228 return 0;
1229 }
1230
psi_trigger_create(struct psi_group * group,char * buf,enum psi_res res)1231 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1232 char *buf, enum psi_res res)
1233 {
1234 struct psi_trigger *t;
1235 enum psi_states state;
1236 u32 threshold_us;
1237 u32 window_us;
1238
1239 if (static_branch_likely(&psi_disabled))
1240 return ERR_PTR(-EOPNOTSUPP);
1241
1242 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1243 state = PSI_IO_SOME + res * 2;
1244 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1245 state = PSI_IO_FULL + res * 2;
1246 else
1247 return ERR_PTR(-EINVAL);
1248
1249 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1250 if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
1251 return ERR_PTR(-EINVAL);
1252 #endif
1253
1254 if (state >= PSI_NONIDLE)
1255 return ERR_PTR(-EINVAL);
1256
1257 if (window_us == 0 || window_us > WINDOW_MAX_US)
1258 return ERR_PTR(-EINVAL);
1259
1260 /* Check threshold */
1261 if (threshold_us == 0 || threshold_us > window_us)
1262 return ERR_PTR(-EINVAL);
1263
1264 t = kmalloc(sizeof(*t), GFP_KERNEL);
1265 if (!t)
1266 return ERR_PTR(-ENOMEM);
1267
1268 t->group = group;
1269 t->state = state;
1270 t->threshold = threshold_us * NSEC_PER_USEC;
1271 t->win.size = window_us * NSEC_PER_USEC;
1272 window_reset(&t->win, sched_clock(),
1273 group->total[PSI_POLL][t->state], 0);
1274
1275 t->event = 0;
1276 t->last_event_time = 0;
1277 init_waitqueue_head(&t->event_wait);
1278 t->pending_event = false;
1279
1280 mutex_lock(&group->trigger_lock);
1281
1282 if (!rcu_access_pointer(group->poll_task)) {
1283 struct task_struct *task;
1284
1285 task = kthread_create(psi_poll_worker, group, "psimon");
1286 if (IS_ERR(task)) {
1287 kfree(t);
1288 mutex_unlock(&group->trigger_lock);
1289 return ERR_CAST(task);
1290 }
1291 atomic_set(&group->poll_wakeup, 0);
1292 wake_up_process(task);
1293 rcu_assign_pointer(group->poll_task, task);
1294 }
1295
1296 list_add(&t->node, &group->triggers);
1297 group->poll_min_period = min(group->poll_min_period,
1298 div_u64(t->win.size, UPDATES_PER_WINDOW));
1299 group->nr_triggers[t->state]++;
1300 group->poll_states |= (1 << t->state);
1301
1302 mutex_unlock(&group->trigger_lock);
1303
1304 return t;
1305 }
1306
psi_trigger_destroy(struct psi_trigger * t)1307 void psi_trigger_destroy(struct psi_trigger *t)
1308 {
1309 struct psi_group *group;
1310 struct task_struct *task_to_destroy = NULL;
1311
1312 /*
1313 * We do not check psi_disabled since it might have been disabled after
1314 * the trigger got created.
1315 */
1316 if (!t)
1317 return;
1318
1319 group = t->group;
1320 /*
1321 * Wakeup waiters to stop polling and clear the queue to prevent it from
1322 * being accessed later. Can happen if cgroup is deleted from under a
1323 * polling process.
1324 */
1325 wake_up_pollfree(&t->event_wait);
1326
1327 mutex_lock(&group->trigger_lock);
1328
1329 if (!list_empty(&t->node)) {
1330 struct psi_trigger *tmp;
1331 u64 period = ULLONG_MAX;
1332
1333 list_del(&t->node);
1334 group->nr_triggers[t->state]--;
1335 if (!group->nr_triggers[t->state])
1336 group->poll_states &= ~(1 << t->state);
1337 /* reset min update period for the remaining triggers */
1338 list_for_each_entry(tmp, &group->triggers, node)
1339 period = min(period, div_u64(tmp->win.size,
1340 UPDATES_PER_WINDOW));
1341 group->poll_min_period = period;
1342 /* Destroy poll_task when the last trigger is destroyed */
1343 if (group->poll_states == 0) {
1344 group->polling_until = 0;
1345 task_to_destroy = rcu_dereference_protected(
1346 group->poll_task,
1347 lockdep_is_held(&group->trigger_lock));
1348 rcu_assign_pointer(group->poll_task, NULL);
1349 del_timer(&group->poll_timer);
1350 }
1351 }
1352
1353 mutex_unlock(&group->trigger_lock);
1354
1355 /*
1356 * Wait for psi_schedule_poll_work RCU to complete its read-side
1357 * critical section before destroying the trigger and optionally the
1358 * poll_task.
1359 */
1360 synchronize_rcu();
1361 /*
1362 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1363 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1364 */
1365 if (task_to_destroy) {
1366 /*
1367 * After the RCU grace period has expired, the worker
1368 * can no longer be found through group->poll_task.
1369 */
1370 kthread_stop(task_to_destroy);
1371 atomic_set(&group->poll_scheduled, 0);
1372 }
1373 kfree(t);
1374 }
1375
psi_trigger_poll(void ** trigger_ptr,struct file * file,poll_table * wait)1376 __poll_t psi_trigger_poll(void **trigger_ptr,
1377 struct file *file, poll_table *wait)
1378 {
1379 __poll_t ret = DEFAULT_POLLMASK;
1380 struct psi_trigger *t;
1381
1382 if (static_branch_likely(&psi_disabled))
1383 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1384
1385 t = smp_load_acquire(trigger_ptr);
1386 if (!t)
1387 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1388
1389 poll_wait(file, &t->event_wait, wait);
1390
1391 if (cmpxchg(&t->event, 1, 0) == 1)
1392 ret |= EPOLLPRI;
1393
1394 return ret;
1395 }
1396
1397 #ifdef CONFIG_PROC_FS
psi_io_show(struct seq_file * m,void * v)1398 static int psi_io_show(struct seq_file *m, void *v)
1399 {
1400 return psi_show(m, &psi_system, PSI_IO);
1401 }
1402
psi_memory_show(struct seq_file * m,void * v)1403 static int psi_memory_show(struct seq_file *m, void *v)
1404 {
1405 return psi_show(m, &psi_system, PSI_MEM);
1406 }
1407
psi_cpu_show(struct seq_file * m,void * v)1408 static int psi_cpu_show(struct seq_file *m, void *v)
1409 {
1410 return psi_show(m, &psi_system, PSI_CPU);
1411 }
1412
psi_io_open(struct inode * inode,struct file * file)1413 static int psi_io_open(struct inode *inode, struct file *file)
1414 {
1415 return single_open(file, psi_io_show, NULL);
1416 }
1417
psi_memory_open(struct inode * inode,struct file * file)1418 static int psi_memory_open(struct inode *inode, struct file *file)
1419 {
1420 return single_open(file, psi_memory_show, NULL);
1421 }
1422
psi_cpu_open(struct inode * inode,struct file * file)1423 static int psi_cpu_open(struct inode *inode, struct file *file)
1424 {
1425 return single_open(file, psi_cpu_show, NULL);
1426 }
1427
psi_write(struct file * file,const char __user * user_buf,size_t nbytes,enum psi_res res)1428 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1429 size_t nbytes, enum psi_res res)
1430 {
1431 char buf[32];
1432 size_t buf_size;
1433 struct seq_file *seq;
1434 struct psi_trigger *new;
1435
1436 if (static_branch_likely(&psi_disabled))
1437 return -EOPNOTSUPP;
1438
1439 if (!nbytes)
1440 return -EINVAL;
1441
1442 buf_size = min(nbytes, sizeof(buf));
1443 if (copy_from_user(buf, user_buf, buf_size))
1444 return -EFAULT;
1445
1446 buf[buf_size - 1] = '\0';
1447
1448 seq = file->private_data;
1449
1450 /* Take seq->lock to protect seq->private from concurrent writes */
1451 mutex_lock(&seq->lock);
1452
1453 /* Allow only one trigger per file descriptor */
1454 if (seq->private) {
1455 mutex_unlock(&seq->lock);
1456 return -EBUSY;
1457 }
1458
1459 new = psi_trigger_create(&psi_system, buf, res);
1460 if (IS_ERR(new)) {
1461 mutex_unlock(&seq->lock);
1462 return PTR_ERR(new);
1463 }
1464
1465 smp_store_release(&seq->private, new);
1466 mutex_unlock(&seq->lock);
1467
1468 return nbytes;
1469 }
1470
psi_io_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1471 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1472 size_t nbytes, loff_t *ppos)
1473 {
1474 return psi_write(file, user_buf, nbytes, PSI_IO);
1475 }
1476
psi_memory_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1477 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1478 size_t nbytes, loff_t *ppos)
1479 {
1480 return psi_write(file, user_buf, nbytes, PSI_MEM);
1481 }
1482
psi_cpu_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1483 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1484 size_t nbytes, loff_t *ppos)
1485 {
1486 return psi_write(file, user_buf, nbytes, PSI_CPU);
1487 }
1488
psi_fop_poll(struct file * file,poll_table * wait)1489 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1490 {
1491 struct seq_file *seq = file->private_data;
1492
1493 return psi_trigger_poll(&seq->private, file, wait);
1494 }
1495
psi_fop_release(struct inode * inode,struct file * file)1496 static int psi_fop_release(struct inode *inode, struct file *file)
1497 {
1498 struct seq_file *seq = file->private_data;
1499
1500 psi_trigger_destroy(seq->private);
1501 return single_release(inode, file);
1502 }
1503
1504 static const struct proc_ops psi_io_proc_ops = {
1505 .proc_open = psi_io_open,
1506 .proc_read = seq_read,
1507 .proc_lseek = seq_lseek,
1508 .proc_write = psi_io_write,
1509 .proc_poll = psi_fop_poll,
1510 .proc_release = psi_fop_release,
1511 };
1512
1513 static const struct proc_ops psi_memory_proc_ops = {
1514 .proc_open = psi_memory_open,
1515 .proc_read = seq_read,
1516 .proc_lseek = seq_lseek,
1517 .proc_write = psi_memory_write,
1518 .proc_poll = psi_fop_poll,
1519 .proc_release = psi_fop_release,
1520 };
1521
1522 static const struct proc_ops psi_cpu_proc_ops = {
1523 .proc_open = psi_cpu_open,
1524 .proc_read = seq_read,
1525 .proc_lseek = seq_lseek,
1526 .proc_write = psi_cpu_write,
1527 .proc_poll = psi_fop_poll,
1528 .proc_release = psi_fop_release,
1529 };
1530
1531 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_irq_show(struct seq_file * m,void * v)1532 static int psi_irq_show(struct seq_file *m, void *v)
1533 {
1534 return psi_show(m, &psi_system, PSI_IRQ);
1535 }
1536
psi_irq_open(struct inode * inode,struct file * file)1537 static int psi_irq_open(struct inode *inode, struct file *file)
1538 {
1539 return single_open(file, psi_irq_show, NULL);
1540 }
1541
psi_irq_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1542 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
1543 size_t nbytes, loff_t *ppos)
1544 {
1545 return psi_write(file, user_buf, nbytes, PSI_IRQ);
1546 }
1547
1548 static const struct proc_ops psi_irq_proc_ops = {
1549 .proc_open = psi_irq_open,
1550 .proc_read = seq_read,
1551 .proc_lseek = seq_lseek,
1552 .proc_write = psi_irq_write,
1553 .proc_poll = psi_fop_poll,
1554 .proc_release = psi_fop_release,
1555 };
1556 #endif
1557
psi_proc_init(void)1558 static int __init psi_proc_init(void)
1559 {
1560 if (psi_enable) {
1561 proc_mkdir("pressure", NULL);
1562 proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
1563 proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
1564 proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
1565 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1566 proc_create("pressure/irq", 0, NULL, &psi_irq_proc_ops);
1567 #endif
1568 }
1569 return 0;
1570 }
1571 module_init(psi_proc_init);
1572
1573 #endif /* CONFIG_PROC_FS */
1574