• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Pressure stall information for CPU, memory and IO
3  *
4  * Copyright (c) 2018 Facebook, Inc.
5  * Author: Johannes Weiner <hannes@cmpxchg.org>
6  *
7  * Polling support by Suren Baghdasaryan <surenb@google.com>
8  * Copyright (c) 2018 Google, Inc.
9  *
10  * When CPU, memory and IO are contended, tasks experience delays that
11  * reduce throughput and introduce latencies into the workload. Memory
12  * and IO contention, in addition, can cause a full loss of forward
13  * progress in which the CPU goes idle.
14  *
15  * This code aggregates individual task delays into resource pressure
16  * metrics that indicate problems with both workload health and
17  * resource utilization.
18  *
19  *			Model
20  *
21  * The time in which a task can execute on a CPU is our baseline for
22  * productivity. Pressure expresses the amount of time in which this
23  * potential cannot be realized due to resource contention.
24  *
25  * This concept of productivity has two components: the workload and
26  * the CPU. To measure the impact of pressure on both, we define two
27  * contention states for a resource: SOME and FULL.
28  *
29  * In the SOME state of a given resource, one or more tasks are
30  * delayed on that resource. This affects the workload's ability to
31  * perform work, but the CPU may still be executing other tasks.
32  *
33  * In the FULL state of a given resource, all non-idle tasks are
34  * delayed on that resource such that nobody is advancing and the CPU
35  * goes idle. This leaves both workload and CPU unproductive.
36  *
37  *	SOME = nr_delayed_tasks != 0
38  *	FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
39  *
40  * What it means for a task to be productive is defined differently
41  * for each resource. For IO, productive means a running task. For
42  * memory, productive means a running task that isn't a reclaimer. For
43  * CPU, productive means an oncpu task.
44  *
45  * Naturally, the FULL state doesn't exist for the CPU resource at the
46  * system level, but exist at the cgroup level. At the cgroup level,
47  * FULL means all non-idle tasks in the cgroup are delayed on the CPU
48  * resource which is being used by others outside of the cgroup or
49  * throttled by the cgroup cpu.max configuration.
50  *
51  * The percentage of wallclock time spent in those compound stall
52  * states gives pressure numbers between 0 and 100 for each resource,
53  * where the SOME percentage indicates workload slowdowns and the FULL
54  * percentage indicates reduced CPU utilization:
55  *
56  *	%SOME = time(SOME) / period
57  *	%FULL = time(FULL) / period
58  *
59  *			Multiple CPUs
60  *
61  * The more tasks and available CPUs there are, the more work can be
62  * performed concurrently. This means that the potential that can go
63  * unrealized due to resource contention *also* scales with non-idle
64  * tasks and CPUs.
65  *
66  * Consider a scenario where 257 number crunching tasks are trying to
67  * run concurrently on 256 CPUs. If we simply aggregated the task
68  * states, we would have to conclude a CPU SOME pressure number of
69  * 100%, since *somebody* is waiting on a runqueue at all
70  * times. However, that is clearly not the amount of contention the
71  * workload is experiencing: only one out of 256 possible execution
72  * threads will be contended at any given time, or about 0.4%.
73  *
74  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
75  * given time *one* of the tasks is delayed due to a lack of memory.
76  * Again, looking purely at the task state would yield a memory FULL
77  * pressure number of 0%, since *somebody* is always making forward
78  * progress. But again this wouldn't capture the amount of execution
79  * potential lost, which is 1 out of 4 CPUs, or 25%.
80  *
81  * To calculate wasted potential (pressure) with multiple processors,
82  * we have to base our calculation on the number of non-idle tasks in
83  * conjunction with the number of available CPUs, which is the number
84  * of potential execution threads. SOME becomes then the proportion of
85  * delayed tasks to possible threads, and FULL is the share of possible
86  * threads that are unproductive due to delays:
87  *
88  *	threads = min(nr_nonidle_tasks, nr_cpus)
89  *	   SOME = min(nr_delayed_tasks / threads, 1)
90  *	   FULL = (threads - min(nr_productive_tasks, threads)) / threads
91  *
92  * For the 257 number crunchers on 256 CPUs, this yields:
93  *
94  *	threads = min(257, 256)
95  *	   SOME = min(1 / 256, 1)             = 0.4%
96  *	   FULL = (256 - min(256, 256)) / 256 = 0%
97  *
98  * For the 1 out of 4 memory-delayed tasks, this yields:
99  *
100  *	threads = min(4, 4)
101  *	   SOME = min(1 / 4, 1)               = 25%
102  *	   FULL = (4 - min(3, 4)) / 4         = 25%
103  *
104  * [ Substitute nr_cpus with 1, and you can see that it's a natural
105  *   extension of the single-CPU model. ]
106  *
107  *			Implementation
108  *
109  * To assess the precise time spent in each such state, we would have
110  * to freeze the system on task changes and start/stop the state
111  * clocks accordingly. Obviously that doesn't scale in practice.
112  *
113  * Because the scheduler aims to distribute the compute load evenly
114  * among the available CPUs, we can track task state locally to each
115  * CPU and, at much lower frequency, extrapolate the global state for
116  * the cumulative stall times and the running averages.
117  *
118  * For each runqueue, we track:
119  *
120  *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
121  *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
122  *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
123  *
124  * and then periodically aggregate:
125  *
126  *	tNONIDLE = sum(tNONIDLE[i])
127  *
128  *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
129  *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
130  *
131  *	   %SOME = tSOME / period
132  *	   %FULL = tFULL / period
133  *
134  * This gives us an approximation of pressure that is practical
135  * cost-wise, yet way more sensitive and accurate than periodic
136  * sampling of the aggregate task states would be.
137  */
138 
139 #include "../workqueue_internal.h"
140 #include <linux/sched/loadavg.h>
141 #include <linux/seq_file.h>
142 #include <linux/proc_fs.h>
143 #include <linux/seqlock.h>
144 #include <linux/uaccess.h>
145 #include <linux/cgroup.h>
146 #include <linux/module.h>
147 #include <linux/sched.h>
148 #include <linux/ctype.h>
149 #include <linux/file.h>
150 #include <linux/poll.h>
151 #include <linux/psi.h>
152 #include "sched.h"
153 
154 #include <trace/hooks/psi.h>
155 
156 static int psi_bug __read_mostly;
157 
158 DEFINE_STATIC_KEY_FALSE(psi_disabled);
159 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
160 
161 #ifdef CONFIG_PSI_DEFAULT_DISABLED
162 static bool psi_enable;
163 #else
164 static bool psi_enable = true;
165 #endif
setup_psi(char * str)166 static int __init setup_psi(char *str)
167 {
168 	return kstrtobool(str, &psi_enable) == 0;
169 }
170 __setup("psi=", setup_psi);
171 
172 /* Running averages - we need to be higher-res than loadavg */
173 #define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
174 #define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
175 #define EXP_60s		1981		/* 1/exp(2s/60s) */
176 #define EXP_300s	2034		/* 1/exp(2s/300s) */
177 
178 /* PSI trigger definitions */
179 #define WINDOW_MIN_US 500000	/* Min window size is 500ms */
180 #define WINDOW_MAX_US 10000000	/* Max window size is 10s */
181 #define UPDATES_PER_WINDOW 10	/* 10 updates per window */
182 
183 /* Sampling frequency in nanoseconds */
184 static u64 psi_period __read_mostly;
185 
186 /* System-level pressure and stall tracking */
187 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
188 struct psi_group psi_system = {
189 	.pcpu = &system_group_pcpu,
190 };
191 EXPORT_SYMBOL_GPL(psi_system);
192 static void psi_avgs_work(struct work_struct *work);
193 
194 static void poll_timer_fn(struct timer_list *t);
195 
atomic_set_bit(int i,atomic_t * v)196 static inline void atomic_set_bit(int i, atomic_t *v)
197 {
198 	atomic_or(1 << i, v);
199 }
200 
atomic_clear_bit(int i,atomic_t * v)201 static inline void atomic_clear_bit(int i, atomic_t *v)
202 {
203 	atomic_and(~(1 << i), v);
204 }
205 
atomic_fetch_and_set_bit(int i,atomic_t * v)206 static inline int atomic_fetch_and_set_bit(int i, atomic_t *v)
207 {
208 	int mask = 1 << i;
209 	return atomic_fetch_or(mask, v) & mask;
210 }
211 
atomic_fetch_and_clear_bit(int i,atomic_t * v)212 static inline int atomic_fetch_and_clear_bit(int i, atomic_t *v)
213 {
214 	int mask = 1 << i;
215 	return atomic_fetch_and(~mask, v) & mask;
216 }
217 
group_init(struct psi_group * group)218 static void group_init(struct psi_group *group)
219 {
220 	int cpu;
221 
222 	for_each_possible_cpu(cpu)
223 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
224 	group->avg_last_update = sched_clock();
225 	group->avg_next_update = group->avg_last_update + psi_period;
226 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
227 	mutex_init(&group->avgs_lock);
228 	/* Init trigger-related members */
229 	atomic_set(&group->poll_wakeup, 0);
230 	mutex_init(&group->trigger_lock);
231 	INIT_LIST_HEAD(&group->triggers);
232 	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
233 	group->poll_states = 0;
234 	group->poll_min_period = U32_MAX;
235 	memset(group->polling_total, 0, sizeof(group->polling_total));
236 	group->polling_next_update = ULLONG_MAX;
237 	group->polling_until = 0;
238 	init_waitqueue_head(&group->poll_wait);
239 	timer_setup(&group->poll_timer, poll_timer_fn, 0);
240 	rcu_assign_pointer(group->poll_task, NULL);
241 }
242 
psi_init(void)243 void __init psi_init(void)
244 {
245 	if (!psi_enable) {
246 		static_branch_enable(&psi_disabled);
247 		return;
248 	}
249 
250 	if (!cgroup_psi_enabled())
251 		static_branch_disable(&psi_cgroups_enabled);
252 
253 	psi_period = jiffies_to_nsecs(PSI_FREQ);
254 	group_init(&psi_system);
255 }
256 
test_state(unsigned int * tasks,enum psi_states state)257 static bool test_state(unsigned int *tasks, enum psi_states state)
258 {
259 	switch (state) {
260 	case PSI_IO_SOME:
261 		return unlikely(tasks[NR_IOWAIT]);
262 	case PSI_IO_FULL:
263 		return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
264 	case PSI_MEM_SOME:
265 		return unlikely(tasks[NR_MEMSTALL]);
266 	case PSI_MEM_FULL:
267 		return unlikely(tasks[NR_MEMSTALL] &&
268 			tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
269 	case PSI_CPU_SOME:
270 		return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]);
271 	case PSI_CPU_FULL:
272 		return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]);
273 	case PSI_NONIDLE:
274 		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
275 			tasks[NR_RUNNING];
276 	default:
277 		return false;
278 	}
279 }
280 
get_recent_times(struct psi_group * group,int cpu,enum psi_aggregators aggregator,u32 * times,u32 * pchanged_states)281 static void get_recent_times(struct psi_group *group, int cpu,
282 			     enum psi_aggregators aggregator, u32 *times,
283 			     u32 *pchanged_states)
284 {
285 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
286 	u64 now, state_start;
287 	enum psi_states s;
288 	unsigned int seq;
289 	u32 state_mask;
290 
291 	*pchanged_states = 0;
292 
293 	/* Snapshot a coherent view of the CPU state */
294 	do {
295 		seq = read_seqcount_begin(&groupc->seq);
296 		now = cpu_clock(cpu);
297 		memcpy(times, groupc->times, sizeof(groupc->times));
298 		state_mask = groupc->state_mask;
299 		state_start = groupc->state_start;
300 	} while (read_seqcount_retry(&groupc->seq, seq));
301 
302 	/* Calculate state time deltas against the previous snapshot */
303 	for (s = 0; s < NR_PSI_STATES; s++) {
304 		u32 delta;
305 		/*
306 		 * In addition to already concluded states, we also
307 		 * incorporate currently active states on the CPU,
308 		 * since states may last for many sampling periods.
309 		 *
310 		 * This way we keep our delta sampling buckets small
311 		 * (u32) and our reported pressure close to what's
312 		 * actually happening.
313 		 */
314 		if (state_mask & (1 << s))
315 			times[s] += now - state_start;
316 
317 		delta = times[s] - groupc->times_prev[aggregator][s];
318 		groupc->times_prev[aggregator][s] = times[s];
319 
320 		times[s] = delta;
321 		if (delta)
322 			*pchanged_states |= (1 << s);
323 	}
324 }
325 
calc_avgs(unsigned long avg[3],int missed_periods,u64 time,u64 period)326 static void calc_avgs(unsigned long avg[3], int missed_periods,
327 		      u64 time, u64 period)
328 {
329 	unsigned long pct;
330 
331 	/* Fill in zeroes for periods of no activity */
332 	if (missed_periods) {
333 		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
334 		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
335 		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
336 	}
337 
338 	/* Sample the most recent active period */
339 	pct = div_u64(time * 100, period);
340 	pct *= FIXED_1;
341 	avg[0] = calc_load(avg[0], EXP_10s, pct);
342 	avg[1] = calc_load(avg[1], EXP_60s, pct);
343 	avg[2] = calc_load(avg[2], EXP_300s, pct);
344 }
345 
collect_percpu_times(struct psi_group * group,enum psi_aggregators aggregator,u32 * pchanged_states)346 static void collect_percpu_times(struct psi_group *group,
347 				 enum psi_aggregators aggregator,
348 				 u32 *pchanged_states)
349 {
350 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
351 	unsigned long nonidle_total = 0;
352 	u32 changed_states = 0;
353 	int cpu;
354 	int s;
355 
356 	/*
357 	 * Collect the per-cpu time buckets and average them into a
358 	 * single time sample that is normalized to wallclock time.
359 	 *
360 	 * For averaging, each CPU is weighted by its non-idle time in
361 	 * the sampling period. This eliminates artifacts from uneven
362 	 * loading, or even entirely idle CPUs.
363 	 */
364 	for_each_possible_cpu(cpu) {
365 		u32 times[NR_PSI_STATES];
366 		u32 nonidle;
367 		u32 cpu_changed_states;
368 
369 		get_recent_times(group, cpu, aggregator, times,
370 				&cpu_changed_states);
371 		changed_states |= cpu_changed_states;
372 
373 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
374 		nonidle_total += nonidle;
375 
376 		for (s = 0; s < PSI_NONIDLE; s++)
377 			deltas[s] += (u64)times[s] * nonidle;
378 	}
379 
380 	/*
381 	 * Integrate the sample into the running statistics that are
382 	 * reported to userspace: the cumulative stall times and the
383 	 * decaying averages.
384 	 *
385 	 * Pressure percentages are sampled at PSI_FREQ. We might be
386 	 * called more often when the user polls more frequently than
387 	 * that; we might be called less often when there is no task
388 	 * activity, thus no data, and clock ticks are sporadic. The
389 	 * below handles both.
390 	 */
391 
392 	/* total= */
393 	for (s = 0; s < NR_PSI_STATES - 1; s++)
394 		group->total[aggregator][s] +=
395 				div_u64(deltas[s], max(nonidle_total, 1UL));
396 
397 	if (pchanged_states)
398 		*pchanged_states = changed_states;
399 }
400 
update_averages(struct psi_group * group,u64 now)401 static u64 update_averages(struct psi_group *group, u64 now)
402 {
403 	unsigned long missed_periods = 0;
404 	u64 expires, period;
405 	u64 avg_next_update;
406 	int s;
407 
408 	/* avgX= */
409 	expires = group->avg_next_update;
410 	if (now - expires >= psi_period)
411 		missed_periods = div_u64(now - expires, psi_period);
412 
413 	/*
414 	 * The periodic clock tick can get delayed for various
415 	 * reasons, especially on loaded systems. To avoid clock
416 	 * drift, we schedule the clock in fixed psi_period intervals.
417 	 * But the deltas we sample out of the per-cpu buckets above
418 	 * are based on the actual time elapsing between clock ticks.
419 	 */
420 	avg_next_update = expires + ((1 + missed_periods) * psi_period);
421 	period = now - (group->avg_last_update + (missed_periods * psi_period));
422 	group->avg_last_update = now;
423 
424 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
425 		u32 sample;
426 
427 		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
428 		/*
429 		 * Due to the lockless sampling of the time buckets,
430 		 * recorded time deltas can slip into the next period,
431 		 * which under full pressure can result in samples in
432 		 * excess of the period length.
433 		 *
434 		 * We don't want to report non-sensical pressures in
435 		 * excess of 100%, nor do we want to drop such events
436 		 * on the floor. Instead we punt any overage into the
437 		 * future until pressure subsides. By doing this we
438 		 * don't underreport the occurring pressure curve, we
439 		 * just report it delayed by one period length.
440 		 *
441 		 * The error isn't cumulative. As soon as another
442 		 * delta slips from a period P to P+1, by definition
443 		 * it frees up its time T in P.
444 		 */
445 		if (sample > period)
446 			sample = period;
447 		group->avg_total[s] += sample;
448 		calc_avgs(group->avg[s], missed_periods, sample, period);
449 	}
450 
451 	return avg_next_update;
452 }
453 
psi_avgs_work(struct work_struct * work)454 static void psi_avgs_work(struct work_struct *work)
455 {
456 	struct delayed_work *dwork;
457 	struct psi_group *group;
458 	u32 changed_states;
459 	bool nonidle;
460 	u64 now;
461 
462 	dwork = to_delayed_work(work);
463 	group = container_of(dwork, struct psi_group, avgs_work);
464 
465 	mutex_lock(&group->avgs_lock);
466 
467 	now = sched_clock();
468 
469 	collect_percpu_times(group, PSI_AVGS, &changed_states);
470 	nonidle = changed_states & (1 << PSI_NONIDLE);
471 	/*
472 	 * If there is task activity, periodically fold the per-cpu
473 	 * times and feed samples into the running averages. If things
474 	 * are idle and there is no data to process, stop the clock.
475 	 * Once restarted, we'll catch up the running averages in one
476 	 * go - see calc_avgs() and missed_periods.
477 	 */
478 	if (now >= group->avg_next_update)
479 		group->avg_next_update = update_averages(group, now);
480 
481 	if (nonidle) {
482 		schedule_delayed_work(dwork, nsecs_to_jiffies(
483 				group->avg_next_update - now) + 1);
484 	}
485 
486 	mutex_unlock(&group->avgs_lock);
487 }
488 
489 /* Trigger tracking window manipulations */
window_reset(struct psi_window * win,u64 now,u64 value,u64 prev_growth)490 static void window_reset(struct psi_window *win, u64 now, u64 value,
491 			 u64 prev_growth)
492 {
493 	win->start_time = now;
494 	win->start_value = value;
495 	win->prev_growth = prev_growth;
496 }
497 
498 /*
499  * PSI growth tracking window update and growth calculation routine.
500  *
501  * This approximates a sliding tracking window by interpolating
502  * partially elapsed windows using historical growth data from the
503  * previous intervals. This minimizes memory requirements (by not storing
504  * all the intermediate values in the previous window) and simplifies
505  * the calculations. It works well because PSI signal changes only in
506  * positive direction and over relatively small window sizes the growth
507  * is close to linear.
508  */
window_update(struct psi_window * win,u64 now,u64 value)509 static u64 window_update(struct psi_window *win, u64 now, u64 value)
510 {
511 	u64 elapsed;
512 	u64 growth;
513 
514 	elapsed = now - win->start_time;
515 	growth = value - win->start_value;
516 	/*
517 	 * After each tracking window passes win->start_value and
518 	 * win->start_time get reset and win->prev_growth stores
519 	 * the average per-window growth of the previous window.
520 	 * win->prev_growth is then used to interpolate additional
521 	 * growth from the previous window assuming it was linear.
522 	 */
523 	if (elapsed > win->size)
524 		window_reset(win, now, value, growth);
525 	else {
526 		u32 remaining;
527 
528 		remaining = win->size - elapsed;
529 		growth += div64_u64(win->prev_growth * remaining, win->size);
530 	}
531 
532 	return growth;
533 }
534 
init_triggers(struct psi_group * group,u64 now)535 static void init_triggers(struct psi_group *group, u64 now)
536 {
537 	struct psi_trigger *t;
538 
539 	list_for_each_entry(t, &group->triggers, node)
540 		window_reset(&t->win, now,
541 				group->total[PSI_POLL][t->state], 0);
542 	memcpy(group->polling_total, group->total[PSI_POLL],
543 		   sizeof(group->polling_total));
544 	group->polling_next_update = now + group->poll_min_period;
545 }
546 
update_triggers(struct psi_group * group,u64 now)547 static u64 update_triggers(struct psi_group *group, u64 now)
548 {
549 	struct psi_trigger *t;
550 	bool new_stall = false;
551 	u64 *total = group->total[PSI_POLL];
552 
553 	/*
554 	 * On subsequent updates, calculate growth deltas and let
555 	 * watchers know when their specified thresholds are exceeded.
556 	 */
557 	list_for_each_entry(t, &group->triggers, node) {
558 		u64 growth;
559 
560 		/* Check for stall activity */
561 		if (group->polling_total[t->state] == total[t->state])
562 			continue;
563 
564 		/*
565 		 * Multiple triggers might be looking at the same state,
566 		 * remember to update group->polling_total[] once we've
567 		 * been through all of them. Also remember to extend the
568 		 * polling time if we see new stall activity.
569 		 */
570 		new_stall = true;
571 
572 		/* Calculate growth since last update */
573 		growth = window_update(&t->win, now, total[t->state]);
574 		if (growth < t->threshold)
575 			continue;
576 
577 		/* Limit event signaling to once per window */
578 		if (now < t->last_event_time + t->win.size)
579 			continue;
580 
581 		trace_android_vh_psi_event(t);
582 
583 		/* Generate an event */
584 		if (cmpxchg(&t->event, 0, 1) == 0)
585 			wake_up_interruptible(&t->event_wait);
586 		t->last_event_time = now;
587 	}
588 
589 	trace_android_vh_psi_group(group);
590 
591 	if (new_stall)
592 		memcpy(group->polling_total, total,
593 				sizeof(group->polling_total));
594 
595 	return now + group->poll_min_period;
596 }
597 
598 /* Schedule polling if it's not already scheduled or forced. */
psi_schedule_poll_work(struct psi_group * group,unsigned long delay,bool force)599 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
600 				   bool force)
601 {
602 	struct task_struct *task;
603 
604 	/*
605 	 * atomic_xchg should be called even when !force to provide a
606 	 * full memory barrier (see the comment inside psi_poll_work).
607 	 */
608 	if (atomic_fetch_and_set_bit(POLL_SCHEDULED, &group->poll_wakeup) &&
609 				     !force)
610 		return;
611 
612 	rcu_read_lock();
613 
614 	task = rcu_dereference(group->poll_task);
615 	/*
616 	 * kworker might be NULL in case psi_trigger_destroy races with
617 	 * psi_task_change (hotpath) which can't use locks
618 	 */
619 	if (likely(task))
620 		mod_timer(&group->poll_timer, jiffies + delay);
621 	else
622 		atomic_clear_bit(POLL_SCHEDULED, &group->poll_wakeup);
623 
624 	rcu_read_unlock();
625 }
626 
psi_poll_work(struct psi_group * group)627 static void psi_poll_work(struct psi_group *group)
628 {
629 	bool force_reschedule = false;
630 	u32 changed_states;
631 	u64 now;
632 
633 	mutex_lock(&group->trigger_lock);
634 
635 	now = sched_clock();
636 
637 	if (now > group->polling_until) {
638 		/*
639 		 * We are either about to start or might stop polling if no
640 		 * state change was recorded. Resetting poll_scheduled leaves
641 		 * a small window for psi_group_change to sneak in and schedule
642 		 * an immegiate poll_work before we get to rescheduling. One
643 		 * potential extra wakeup at the end of the polling window
644 		 * should be negligible and polling_next_update still keeps
645 		 * updates correctly on schedule.
646 		 */
647 		atomic_clear_bit(POLL_SCHEDULED, &group->poll_wakeup);
648 		/*
649 		 * A task change can race with the poll worker that is supposed to
650 		 * report on it. To avoid missing events, ensure ordering between
651 		 * poll_scheduled and the task state accesses, such that if the poll
652 		 * worker misses the state update, the task change is guaranteed to
653 		 * reschedule the poll worker:
654 		 *
655 		 * poll worker:
656 		 *   atomic_set(poll_scheduled, 0)
657 		 *   smp_mb()
658 		 *   LOAD states
659 		 *
660 		 * task change:
661 		 *   STORE states
662 		 *   if atomic_xchg(poll_scheduled, 1) == 0:
663 		 *     schedule poll worker
664 		 *
665 		 * The atomic_xchg() implies a full barrier.
666 		 */
667 		smp_mb();
668 	} else {
669 		/* Polling window is not over, keep rescheduling */
670 		force_reschedule = true;
671 	}
672 
673 
674 	collect_percpu_times(group, PSI_POLL, &changed_states);
675 
676 	if (changed_states & group->poll_states) {
677 		/* Initialize trigger windows when entering polling mode */
678 		if (now > group->polling_until)
679 			init_triggers(group, now);
680 
681 		/*
682 		 * Keep the monitor active for at least the duration of the
683 		 * minimum tracking window as long as monitor states are
684 		 * changing.
685 		 */
686 		group->polling_until = now +
687 			group->poll_min_period * UPDATES_PER_WINDOW;
688 	}
689 
690 	if (now > group->polling_until) {
691 		group->polling_next_update = ULLONG_MAX;
692 		goto out;
693 	}
694 
695 	if (now >= group->polling_next_update)
696 		group->polling_next_update = update_triggers(group, now);
697 
698 	psi_schedule_poll_work(group,
699 		nsecs_to_jiffies(group->polling_next_update - now) + 1,
700 		force_reschedule);
701 
702 out:
703 	mutex_unlock(&group->trigger_lock);
704 }
705 
psi_poll_worker(void * data)706 static int psi_poll_worker(void *data)
707 {
708 	struct psi_group *group = (struct psi_group *)data;
709 
710 	sched_set_fifo_low(current);
711 
712 	while (true) {
713 		wait_event_interruptible(group->poll_wait,
714 				atomic_fetch_and_clear_bit(POLL_WAKEUP, &group->poll_wakeup) ||
715 				kthread_should_stop());
716 		if (kthread_should_stop())
717 			break;
718 
719 		psi_poll_work(group);
720 	}
721 	return 0;
722 }
723 
poll_timer_fn(struct timer_list * t)724 static void poll_timer_fn(struct timer_list *t)
725 {
726 	struct psi_group *group = from_timer(group, t, poll_timer);
727 
728 	atomic_set_bit(POLL_WAKEUP, &group->poll_wakeup);
729 	wake_up_interruptible(&group->poll_wait);
730 }
731 
record_times(struct psi_group_cpu * groupc,u64 now)732 static void record_times(struct psi_group_cpu *groupc, u64 now)
733 {
734 	u32 delta;
735 
736 	delta = now - groupc->state_start;
737 	groupc->state_start = now;
738 
739 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
740 		groupc->times[PSI_IO_SOME] += delta;
741 		if (groupc->state_mask & (1 << PSI_IO_FULL))
742 			groupc->times[PSI_IO_FULL] += delta;
743 	}
744 
745 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
746 		groupc->times[PSI_MEM_SOME] += delta;
747 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
748 			groupc->times[PSI_MEM_FULL] += delta;
749 	}
750 
751 	if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
752 		groupc->times[PSI_CPU_SOME] += delta;
753 		if (groupc->state_mask & (1 << PSI_CPU_FULL))
754 			groupc->times[PSI_CPU_FULL] += delta;
755 	}
756 
757 	if (groupc->state_mask & (1 << PSI_NONIDLE))
758 		groupc->times[PSI_NONIDLE] += delta;
759 }
760 
psi_group_change(struct psi_group * group,int cpu,unsigned int clear,unsigned int set,u64 now,bool wake_clock)761 static void psi_group_change(struct psi_group *group, int cpu,
762 			     unsigned int clear, unsigned int set, u64 now,
763 			     bool wake_clock)
764 {
765 	struct psi_group_cpu *groupc;
766 	u32 state_mask = 0;
767 	unsigned int t, m;
768 	enum psi_states s;
769 
770 	groupc = per_cpu_ptr(group->pcpu, cpu);
771 
772 	/*
773 	 * First we assess the aggregate resource states this CPU's
774 	 * tasks have been in since the last change, and account any
775 	 * SOME and FULL time these may have resulted in.
776 	 *
777 	 * Then we update the task counts according to the state
778 	 * change requested through the @clear and @set bits.
779 	 */
780 	write_seqcount_begin(&groupc->seq);
781 
782 	record_times(groupc, now);
783 
784 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
785 		if (!(m & (1 << t)))
786 			continue;
787 		if (groupc->tasks[t]) {
788 			groupc->tasks[t]--;
789 		} else if (!psi_bug) {
790 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n",
791 					cpu, t, groupc->tasks[0],
792 					groupc->tasks[1], groupc->tasks[2],
793 					groupc->tasks[3], groupc->tasks[4],
794 					clear, set);
795 			psi_bug = 1;
796 		}
797 	}
798 
799 	for (t = 0; set; set &= ~(1 << t), t++)
800 		if (set & (1 << t))
801 			groupc->tasks[t]++;
802 
803 	/* Calculate state mask representing active states */
804 	for (s = 0; s < NR_PSI_STATES; s++) {
805 		if (test_state(groupc->tasks, s))
806 			state_mask |= (1 << s);
807 	}
808 
809 	/*
810 	 * Since we care about lost potential, a memstall is FULL
811 	 * when there are no other working tasks, but also when
812 	 * the CPU is actively reclaiming and nothing productive
813 	 * could run even if it were runnable. So when the current
814 	 * task in a cgroup is in_memstall, the corresponding groupc
815 	 * on that cpu is in PSI_MEM_FULL state.
816 	 */
817 	if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall))
818 		state_mask |= (1 << PSI_MEM_FULL);
819 
820 	groupc->state_mask = state_mask;
821 
822 	write_seqcount_end(&groupc->seq);
823 
824 	if (state_mask & group->poll_states)
825 		psi_schedule_poll_work(group, 1, false);
826 
827 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
828 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
829 }
830 
iterate_groups(struct task_struct * task,void ** iter)831 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
832 {
833 	if (*iter == &psi_system)
834 		return NULL;
835 
836 #ifdef CONFIG_CGROUPS
837 	if (static_branch_likely(&psi_cgroups_enabled)) {
838 		struct cgroup *cgroup = NULL;
839 
840 		if (!*iter)
841 			cgroup = task->cgroups->dfl_cgrp;
842 		else
843 			cgroup = cgroup_parent(*iter);
844 
845 		if (cgroup && cgroup_parent(cgroup)) {
846 			*iter = cgroup;
847 			return cgroup_psi(cgroup);
848 		}
849 	}
850 #endif
851 	*iter = &psi_system;
852 	return &psi_system;
853 }
854 
psi_flags_change(struct task_struct * task,int clear,int set)855 static void psi_flags_change(struct task_struct *task, int clear, int set)
856 {
857 	if (((task->psi_flags & set) ||
858 	     (task->psi_flags & clear) != clear) &&
859 	    !psi_bug) {
860 		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
861 				task->pid, task->comm, task_cpu(task),
862 				task->psi_flags, clear, set);
863 		psi_bug = 1;
864 	}
865 
866 	task->psi_flags &= ~clear;
867 	task->psi_flags |= set;
868 }
869 
psi_task_change(struct task_struct * task,int clear,int set)870 void psi_task_change(struct task_struct *task, int clear, int set)
871 {
872 	int cpu = task_cpu(task);
873 	struct psi_group *group;
874 	bool wake_clock = true;
875 	void *iter = NULL;
876 	u64 now;
877 
878 	if (!task->pid)
879 		return;
880 
881 	psi_flags_change(task, clear, set);
882 
883 	now = cpu_clock(cpu);
884 	/*
885 	 * Periodic aggregation shuts off if there is a period of no
886 	 * task changes, so we wake it back up if necessary. However,
887 	 * don't do this if the task change is the aggregation worker
888 	 * itself going to sleep, or we'll ping-pong forever.
889 	 */
890 	if (unlikely((clear & TSK_RUNNING) &&
891 		     (task->flags & PF_WQ_WORKER) &&
892 		     wq_worker_last_func(task) == psi_avgs_work))
893 		wake_clock = false;
894 
895 	while ((group = iterate_groups(task, &iter)))
896 		psi_group_change(group, cpu, clear, set, now, wake_clock);
897 }
898 
psi_task_switch(struct task_struct * prev,struct task_struct * next,bool sleep)899 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
900 		     bool sleep)
901 {
902 	struct psi_group *group, *common = NULL;
903 	int cpu = task_cpu(prev);
904 	void *iter;
905 	u64 now = cpu_clock(cpu);
906 
907 	if (next->pid) {
908 		bool identical_state;
909 
910 		psi_flags_change(next, 0, TSK_ONCPU);
911 		/*
912 		 * When switching between tasks that have an identical
913 		 * runtime state, the cgroup that contains both tasks
914 		 * runtime state, the cgroup that contains both tasks
915 		 * we reach the first common ancestor. Iterate @next's
916 		 * ancestors only until we encounter @prev's ONCPU.
917 		 */
918 		identical_state = prev->psi_flags == next->psi_flags;
919 		iter = NULL;
920 		while ((group = iterate_groups(next, &iter))) {
921 			if (identical_state &&
922 			    per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
923 				common = group;
924 				break;
925 			}
926 
927 			psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
928 		}
929 	}
930 
931 	if (prev->pid) {
932 		int clear = TSK_ONCPU, set = 0;
933 
934 		/*
935 		 * When we're going to sleep, psi_dequeue() lets us
936 		 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
937 		 * TSK_IOWAIT here, where we can combine it with
938 		 * TSK_ONCPU and save walking common ancestors twice.
939 		 */
940 		if (sleep) {
941 			clear |= TSK_RUNNING;
942 			if (prev->in_memstall)
943 				clear |= TSK_MEMSTALL_RUNNING;
944 			if (prev->in_iowait)
945 				set |= TSK_IOWAIT;
946 		}
947 
948 		psi_flags_change(prev, clear, set);
949 
950 		iter = NULL;
951 		while ((group = iterate_groups(prev, &iter)) && group != common)
952 			psi_group_change(group, cpu, clear, set, now, true);
953 
954 		/*
955 		 * TSK_ONCPU is handled up to the common ancestor. If we're tasked
956 		 * with dequeuing too, finish that for the rest of the hierarchy.
957 		 */
958 		if (sleep) {
959 			clear &= ~TSK_ONCPU;
960 			for (; group; group = iterate_groups(prev, &iter))
961 				psi_group_change(group, cpu, clear, set, now, true);
962 		}
963 	}
964 }
965 
966 /**
967  * psi_memstall_enter - mark the beginning of a memory stall section
968  * @flags: flags to handle nested sections
969  *
970  * Marks the calling task as being stalled due to a lack of memory,
971  * such as waiting for a refault or performing reclaim.
972  */
psi_memstall_enter(unsigned long * flags)973 void psi_memstall_enter(unsigned long *flags)
974 {
975 	struct rq_flags rf;
976 	struct rq *rq;
977 
978 	if (static_branch_likely(&psi_disabled))
979 		return;
980 
981 	*flags = current->in_memstall;
982 	if (*flags)
983 		return;
984 	/*
985 	 * in_memstall setting & accounting needs to be atomic wrt
986 	 * changes to the task's scheduling state, otherwise we can
987 	 * race with CPU migration.
988 	 */
989 	rq = this_rq_lock_irq(&rf);
990 
991 	current->in_memstall = 1;
992 	psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
993 
994 	rq_unlock_irq(rq, &rf);
995 }
996 
997 /**
998  * psi_memstall_leave - mark the end of an memory stall section
999  * @flags: flags to handle nested memdelay sections
1000  *
1001  * Marks the calling task as no longer stalled due to lack of memory.
1002  */
psi_memstall_leave(unsigned long * flags)1003 void psi_memstall_leave(unsigned long *flags)
1004 {
1005 	struct rq_flags rf;
1006 	struct rq *rq;
1007 
1008 	if (static_branch_likely(&psi_disabled))
1009 		return;
1010 
1011 	if (*flags)
1012 		return;
1013 	/*
1014 	 * in_memstall clearing & accounting needs to be atomic wrt
1015 	 * changes to the task's scheduling state, otherwise we could
1016 	 * race with CPU migration.
1017 	 */
1018 	rq = this_rq_lock_irq(&rf);
1019 
1020 	current->in_memstall = 0;
1021 	psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1022 
1023 	rq_unlock_irq(rq, &rf);
1024 }
1025 
1026 #ifdef CONFIG_CGROUPS
psi_cgroup_alloc(struct cgroup * cgroup)1027 int psi_cgroup_alloc(struct cgroup *cgroup)
1028 {
1029 	if (static_branch_likely(&psi_disabled))
1030 		return 0;
1031 
1032 	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
1033 	if (!cgroup->psi.pcpu)
1034 		return -ENOMEM;
1035 	group_init(&cgroup->psi);
1036 	return 0;
1037 }
1038 
psi_cgroup_free(struct cgroup * cgroup)1039 void psi_cgroup_free(struct cgroup *cgroup)
1040 {
1041 	if (static_branch_likely(&psi_disabled))
1042 		return;
1043 
1044 	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
1045 	free_percpu(cgroup->psi.pcpu);
1046 	/* All triggers must be removed by now */
1047 	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
1048 }
1049 
1050 /**
1051  * cgroup_move_task - move task to a different cgroup
1052  * @task: the task
1053  * @to: the target css_set
1054  *
1055  * Move task to a new cgroup and safely migrate its associated stall
1056  * state between the different groups.
1057  *
1058  * This function acquires the task's rq lock to lock out concurrent
1059  * changes to the task's scheduling state and - in case the task is
1060  * running - concurrent changes to its stall state.
1061  */
cgroup_move_task(struct task_struct * task,struct css_set * to)1062 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1063 {
1064 	unsigned int task_flags;
1065 	struct rq_flags rf;
1066 	struct rq *rq;
1067 
1068 	if (static_branch_likely(&psi_disabled)) {
1069 		/*
1070 		 * Lame to do this here, but the scheduler cannot be locked
1071 		 * from the outside, so we move cgroups from inside sched/.
1072 		 */
1073 		rcu_assign_pointer(task->cgroups, to);
1074 		return;
1075 	}
1076 
1077 	rq = task_rq_lock(task, &rf);
1078 
1079 	/*
1080 	 * We may race with schedule() dropping the rq lock between
1081 	 * deactivating prev and switching to next. Because the psi
1082 	 * updates from the deactivation are deferred to the switch
1083 	 * callback to save cgroup tree updates, the task's scheduling
1084 	 * state here is not coherent with its psi state:
1085 	 *
1086 	 * schedule()                   cgroup_move_task()
1087 	 *   rq_lock()
1088 	 *   deactivate_task()
1089 	 *     p->on_rq = 0
1090 	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1091 	 *   pick_next_task()
1092 	 *     rq_unlock()
1093 	 *                                rq_lock()
1094 	 *                                psi_task_change() // old cgroup
1095 	 *                                task->cgroups = to
1096 	 *                                psi_task_change() // new cgroup
1097 	 *                                rq_unlock()
1098 	 *     rq_lock()
1099 	 *   psi_sched_switch() // does deferred updates in new cgroup
1100 	 *
1101 	 * Don't rely on the scheduling state. Use psi_flags instead.
1102 	 */
1103 	task_flags = task->psi_flags;
1104 
1105 	if (task_flags)
1106 		psi_task_change(task, task_flags, 0);
1107 
1108 	/* See comment above */
1109 	rcu_assign_pointer(task->cgroups, to);
1110 
1111 	if (task_flags)
1112 		psi_task_change(task, 0, task_flags);
1113 
1114 	task_rq_unlock(rq, task, &rf);
1115 }
1116 #endif /* CONFIG_CGROUPS */
1117 
psi_show(struct seq_file * m,struct psi_group * group,enum psi_res res)1118 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1119 {
1120 	int full;
1121 	u64 now;
1122 
1123 	if (static_branch_likely(&psi_disabled))
1124 		return -EOPNOTSUPP;
1125 
1126 	/* Update averages before reporting them */
1127 	mutex_lock(&group->avgs_lock);
1128 	now = sched_clock();
1129 	collect_percpu_times(group, PSI_AVGS, NULL);
1130 	if (now >= group->avg_next_update)
1131 		group->avg_next_update = update_averages(group, now);
1132 	mutex_unlock(&group->avgs_lock);
1133 
1134 	for (full = 0; full < 2; full++) {
1135 		unsigned long avg[3] = { 0, };
1136 		u64 total = 0;
1137 		int w;
1138 
1139 		/* CPU FULL is undefined at the system level */
1140 		if (!(group == &psi_system && res == PSI_CPU && full)) {
1141 			for (w = 0; w < 3; w++)
1142 				avg[w] = group->avg[res * 2 + full][w];
1143 			total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1144 					NSEC_PER_USEC);
1145 		}
1146 
1147 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1148 			   full ? "full" : "some",
1149 			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1150 			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1151 			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1152 			   total);
1153 	}
1154 
1155 	return 0;
1156 }
1157 
psi_trigger_create(struct psi_group * group,char * buf,size_t nbytes,enum psi_res res)1158 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1159 			char *buf, size_t nbytes, enum psi_res res)
1160 {
1161 	struct psi_trigger *t;
1162 	enum psi_states state;
1163 	u32 threshold_us;
1164 	u32 window_us;
1165 
1166 	if (static_branch_likely(&psi_disabled))
1167 		return ERR_PTR(-EOPNOTSUPP);
1168 
1169 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1170 		state = PSI_IO_SOME + res * 2;
1171 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1172 		state = PSI_IO_FULL + res * 2;
1173 	else
1174 		return ERR_PTR(-EINVAL);
1175 
1176 	if (state >= PSI_NONIDLE)
1177 		return ERR_PTR(-EINVAL);
1178 
1179 	if (window_us < WINDOW_MIN_US ||
1180 		window_us > WINDOW_MAX_US)
1181 		return ERR_PTR(-EINVAL);
1182 
1183 	/* Check threshold */
1184 	if (threshold_us == 0 || threshold_us > window_us)
1185 		return ERR_PTR(-EINVAL);
1186 
1187 	t = kmalloc(sizeof(*t), GFP_KERNEL);
1188 	if (!t)
1189 		return ERR_PTR(-ENOMEM);
1190 
1191 	t->group = group;
1192 	t->state = state;
1193 	t->threshold = threshold_us * NSEC_PER_USEC;
1194 	t->win.size = window_us * NSEC_PER_USEC;
1195 	window_reset(&t->win, 0, 0, 0);
1196 
1197 	t->event = 0;
1198 	t->last_event_time = 0;
1199 	init_waitqueue_head(&t->event_wait);
1200 
1201 	mutex_lock(&group->trigger_lock);
1202 
1203 	if (!rcu_access_pointer(group->poll_task)) {
1204 		struct task_struct *task;
1205 
1206 		task = kthread_create(psi_poll_worker, group, "psimon");
1207 		if (IS_ERR(task)) {
1208 			kfree(t);
1209 			mutex_unlock(&group->trigger_lock);
1210 			return ERR_CAST(task);
1211 		}
1212 		atomic_clear_bit(POLL_WAKEUP, &group->poll_wakeup);
1213 		wake_up_process(task);
1214 		rcu_assign_pointer(group->poll_task, task);
1215 	}
1216 
1217 	list_add(&t->node, &group->triggers);
1218 	group->poll_min_period = min(group->poll_min_period,
1219 		div_u64(t->win.size, UPDATES_PER_WINDOW));
1220 	group->nr_triggers[t->state]++;
1221 	group->poll_states |= (1 << t->state);
1222 
1223 	mutex_unlock(&group->trigger_lock);
1224 
1225 	return t;
1226 }
1227 
psi_trigger_destroy(struct psi_trigger * t)1228 void psi_trigger_destroy(struct psi_trigger *t)
1229 {
1230 	struct psi_group *group;
1231 	struct task_struct *task_to_destroy = NULL;
1232 
1233 	/*
1234 	 * We do not check psi_disabled since it might have been disabled after
1235 	 * the trigger got created.
1236 	 */
1237 	if (!t)
1238 		return;
1239 
1240 	group = t->group;
1241 	/*
1242 	 * Wakeup waiters to stop polling and clear the queue to prevent it from
1243 	 * being accessed later. Can happen if cgroup is deleted from under a
1244 	 * polling process.
1245 	 */
1246 	wake_up_pollfree(&t->event_wait);
1247 
1248 	mutex_lock(&group->trigger_lock);
1249 
1250 	if (!list_empty(&t->node)) {
1251 		struct psi_trigger *tmp;
1252 		u64 period = ULLONG_MAX;
1253 
1254 		list_del(&t->node);
1255 		group->nr_triggers[t->state]--;
1256 		if (!group->nr_triggers[t->state])
1257 			group->poll_states &= ~(1 << t->state);
1258 		/* reset min update period for the remaining triggers */
1259 		list_for_each_entry(tmp, &group->triggers, node)
1260 			period = min(period, div_u64(tmp->win.size,
1261 					UPDATES_PER_WINDOW));
1262 		group->poll_min_period = period;
1263 		/* Destroy poll_task when the last trigger is destroyed */
1264 		if (group->poll_states == 0) {
1265 			group->polling_until = 0;
1266 			task_to_destroy = rcu_dereference_protected(
1267 					group->poll_task,
1268 					lockdep_is_held(&group->trigger_lock));
1269 			rcu_assign_pointer(group->poll_task, NULL);
1270 			del_timer(&group->poll_timer);
1271 		}
1272 	}
1273 
1274 	mutex_unlock(&group->trigger_lock);
1275 
1276 	/*
1277 	 * Wait for psi_schedule_poll_work RCU to complete its read-side
1278 	 * critical section before destroying the trigger and optionally the
1279 	 * poll_task.
1280 	 */
1281 	synchronize_rcu();
1282 	/*
1283 	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1284 	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1285 	 */
1286 	if (task_to_destroy) {
1287 		/*
1288 		 * After the RCU grace period has expired, the worker
1289 		 * can no longer be found through group->poll_task.
1290 		 */
1291 		kthread_stop(task_to_destroy);
1292 		atomic_clear_bit(POLL_SCHEDULED, &group->poll_wakeup);
1293 	}
1294 	kfree(t);
1295 }
1296 
psi_trigger_poll(void ** trigger_ptr,struct file * file,poll_table * wait)1297 __poll_t psi_trigger_poll(void **trigger_ptr,
1298 				struct file *file, poll_table *wait)
1299 {
1300 	__poll_t ret = DEFAULT_POLLMASK;
1301 	struct psi_trigger *t;
1302 
1303 	if (static_branch_likely(&psi_disabled))
1304 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1305 
1306 	t = smp_load_acquire(trigger_ptr);
1307 	if (!t)
1308 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1309 
1310 	poll_wait(file, &t->event_wait, wait);
1311 
1312 	if (cmpxchg(&t->event, 1, 0) == 1)
1313 		ret |= EPOLLPRI;
1314 
1315 	return ret;
1316 }
1317 
1318 #ifdef CONFIG_PROC_FS
psi_io_show(struct seq_file * m,void * v)1319 static int psi_io_show(struct seq_file *m, void *v)
1320 {
1321 	return psi_show(m, &psi_system, PSI_IO);
1322 }
1323 
psi_memory_show(struct seq_file * m,void * v)1324 static int psi_memory_show(struct seq_file *m, void *v)
1325 {
1326 	return psi_show(m, &psi_system, PSI_MEM);
1327 }
1328 
psi_cpu_show(struct seq_file * m,void * v)1329 static int psi_cpu_show(struct seq_file *m, void *v)
1330 {
1331 	return psi_show(m, &psi_system, PSI_CPU);
1332 }
1333 
psi_io_open(struct inode * inode,struct file * file)1334 static int psi_io_open(struct inode *inode, struct file *file)
1335 {
1336 	return single_open(file, psi_io_show, NULL);
1337 }
1338 
psi_memory_open(struct inode * inode,struct file * file)1339 static int psi_memory_open(struct inode *inode, struct file *file)
1340 {
1341 	return single_open(file, psi_memory_show, NULL);
1342 }
1343 
psi_cpu_open(struct inode * inode,struct file * file)1344 static int psi_cpu_open(struct inode *inode, struct file *file)
1345 {
1346 	return single_open(file, psi_cpu_show, NULL);
1347 }
1348 
psi_write(struct file * file,const char __user * user_buf,size_t nbytes,enum psi_res res)1349 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1350 			 size_t nbytes, enum psi_res res)
1351 {
1352 	char buf[32];
1353 	size_t buf_size;
1354 	struct seq_file *seq;
1355 	struct psi_trigger *new;
1356 
1357 	if (static_branch_likely(&psi_disabled))
1358 		return -EOPNOTSUPP;
1359 
1360 	if (!nbytes)
1361 		return -EINVAL;
1362 
1363 	buf_size = min(nbytes, sizeof(buf));
1364 	if (copy_from_user(buf, user_buf, buf_size))
1365 		return -EFAULT;
1366 
1367 	buf[buf_size - 1] = '\0';
1368 
1369 	seq = file->private_data;
1370 
1371 	/* Take seq->lock to protect seq->private from concurrent writes */
1372 	mutex_lock(&seq->lock);
1373 
1374 	/* Allow only one trigger per file descriptor */
1375 	if (seq->private) {
1376 		mutex_unlock(&seq->lock);
1377 		return -EBUSY;
1378 	}
1379 
1380 	new = psi_trigger_create(&psi_system, buf, nbytes, res);
1381 	if (IS_ERR(new)) {
1382 		mutex_unlock(&seq->lock);
1383 		return PTR_ERR(new);
1384 	}
1385 
1386 	smp_store_release(&seq->private, new);
1387 	mutex_unlock(&seq->lock);
1388 
1389 	return nbytes;
1390 }
1391 
psi_io_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1392 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1393 			    size_t nbytes, loff_t *ppos)
1394 {
1395 	return psi_write(file, user_buf, nbytes, PSI_IO);
1396 }
1397 
psi_memory_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1398 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1399 				size_t nbytes, loff_t *ppos)
1400 {
1401 	return psi_write(file, user_buf, nbytes, PSI_MEM);
1402 }
1403 
psi_cpu_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1404 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1405 			     size_t nbytes, loff_t *ppos)
1406 {
1407 	return psi_write(file, user_buf, nbytes, PSI_CPU);
1408 }
1409 
psi_fop_poll(struct file * file,poll_table * wait)1410 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1411 {
1412 	struct seq_file *seq = file->private_data;
1413 
1414 	return psi_trigger_poll(&seq->private, file, wait);
1415 }
1416 
psi_fop_release(struct inode * inode,struct file * file)1417 static int psi_fop_release(struct inode *inode, struct file *file)
1418 {
1419 	struct seq_file *seq = file->private_data;
1420 
1421 	psi_trigger_destroy(seq->private);
1422 	return single_release(inode, file);
1423 }
1424 
1425 static const struct proc_ops psi_io_proc_ops = {
1426 	.proc_open	= psi_io_open,
1427 	.proc_read	= seq_read,
1428 	.proc_lseek	= seq_lseek,
1429 	.proc_write	= psi_io_write,
1430 	.proc_poll	= psi_fop_poll,
1431 	.proc_release	= psi_fop_release,
1432 };
1433 
1434 static const struct proc_ops psi_memory_proc_ops = {
1435 	.proc_open	= psi_memory_open,
1436 	.proc_read	= seq_read,
1437 	.proc_lseek	= seq_lseek,
1438 	.proc_write	= psi_memory_write,
1439 	.proc_poll	= psi_fop_poll,
1440 	.proc_release	= psi_fop_release,
1441 };
1442 
1443 static const struct proc_ops psi_cpu_proc_ops = {
1444 	.proc_open	= psi_cpu_open,
1445 	.proc_read	= seq_read,
1446 	.proc_lseek	= seq_lseek,
1447 	.proc_write	= psi_cpu_write,
1448 	.proc_poll	= psi_fop_poll,
1449 	.proc_release	= psi_fop_release,
1450 };
1451 
psi_proc_init(void)1452 static int __init psi_proc_init(void)
1453 {
1454 	if (psi_enable) {
1455 		proc_mkdir("pressure", NULL);
1456 		proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
1457 		proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
1458 		proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
1459 	}
1460 	return 0;
1461 }
1462 module_init(psi_proc_init);
1463 
1464 #endif /* CONFIG_PROC_FS */
1465