• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Pressure stall information for CPU, memory and IO
3  *
4  * Copyright (c) 2018 Facebook, Inc.
5  * Author: Johannes Weiner <hannes@cmpxchg.org>
6  *
7  * Polling support by Suren Baghdasaryan <surenb@google.com>
8  * Copyright (c) 2018 Google, Inc.
9  *
10  * When CPU, memory and IO are contended, tasks experience delays that
11  * reduce throughput and introduce latencies into the workload. Memory
12  * and IO contention, in addition, can cause a full loss of forward
13  * progress in which the CPU goes idle.
14  *
15  * This code aggregates individual task delays into resource pressure
16  * metrics that indicate problems with both workload health and
17  * resource utilization.
18  *
19  *			Model
20  *
21  * The time in which a task can execute on a CPU is our baseline for
22  * productivity. Pressure expresses the amount of time in which this
23  * potential cannot be realized due to resource contention.
24  *
25  * This concept of productivity has two components: the workload and
26  * the CPU. To measure the impact of pressure on both, we define two
27  * contention states for a resource: SOME and FULL.
28  *
29  * In the SOME state of a given resource, one or more tasks are
30  * delayed on that resource. This affects the workload's ability to
31  * perform work, but the CPU may still be executing other tasks.
32  *
33  * In the FULL state of a given resource, all non-idle tasks are
34  * delayed on that resource such that nobody is advancing and the CPU
35  * goes idle. This leaves both workload and CPU unproductive.
36  *
37  * (Naturally, the FULL state doesn't exist for the CPU resource.)
38  *
39  *	SOME = nr_delayed_tasks != 0
40  *	FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
41  *
42  * The percentage of wallclock time spent in those compound stall
43  * states gives pressure numbers between 0 and 100 for each resource,
44  * where the SOME percentage indicates workload slowdowns and the FULL
45  * percentage indicates reduced CPU utilization:
46  *
47  *	%SOME = time(SOME) / period
48  *	%FULL = time(FULL) / period
49  *
50  *			Multiple CPUs
51  *
52  * The more tasks and available CPUs there are, the more work can be
53  * performed concurrently. This means that the potential that can go
54  * unrealized due to resource contention *also* scales with non-idle
55  * tasks and CPUs.
56  *
57  * Consider a scenario where 257 number crunching tasks are trying to
58  * run concurrently on 256 CPUs. If we simply aggregated the task
59  * states, we would have to conclude a CPU SOME pressure number of
60  * 100%, since *somebody* is waiting on a runqueue at all
61  * times. However, that is clearly not the amount of contention the
62  * workload is experiencing: only one out of 256 possible exceution
63  * threads will be contended at any given time, or about 0.4%.
64  *
65  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
66  * given time *one* of the tasks is delayed due to a lack of memory.
67  * Again, looking purely at the task state would yield a memory FULL
68  * pressure number of 0%, since *somebody* is always making forward
69  * progress. But again this wouldn't capture the amount of execution
70  * potential lost, which is 1 out of 4 CPUs, or 25%.
71  *
72  * To calculate wasted potential (pressure) with multiple processors,
73  * we have to base our calculation on the number of non-idle tasks in
74  * conjunction with the number of available CPUs, which is the number
75  * of potential execution threads. SOME becomes then the proportion of
76  * delayed tasks to possibe threads, and FULL is the share of possible
77  * threads that are unproductive due to delays:
78  *
79  *	threads = min(nr_nonidle_tasks, nr_cpus)
80  *	   SOME = min(nr_delayed_tasks / threads, 1)
81  *	   FULL = (threads - min(nr_running_tasks, threads)) / threads
82  *
83  * For the 257 number crunchers on 256 CPUs, this yields:
84  *
85  *	threads = min(257, 256)
86  *	   SOME = min(1 / 256, 1)             = 0.4%
87  *	   FULL = (256 - min(257, 256)) / 256 = 0%
88  *
89  * For the 1 out of 4 memory-delayed tasks, this yields:
90  *
91  *	threads = min(4, 4)
92  *	   SOME = min(1 / 4, 1)               = 25%
93  *	   FULL = (4 - min(3, 4)) / 4         = 25%
94  *
95  * [ Substitute nr_cpus with 1, and you can see that it's a natural
96  *   extension of the single-CPU model. ]
97  *
98  *			Implementation
99  *
100  * To assess the precise time spent in each such state, we would have
101  * to freeze the system on task changes and start/stop the state
102  * clocks accordingly. Obviously that doesn't scale in practice.
103  *
104  * Because the scheduler aims to distribute the compute load evenly
105  * among the available CPUs, we can track task state locally to each
106  * CPU and, at much lower frequency, extrapolate the global state for
107  * the cumulative stall times and the running averages.
108  *
109  * For each runqueue, we track:
110  *
111  *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
112  *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
113  *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
114  *
115  * and then periodically aggregate:
116  *
117  *	tNONIDLE = sum(tNONIDLE[i])
118  *
119  *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
120  *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
121  *
122  *	   %SOME = tSOME / period
123  *	   %FULL = tFULL / period
124  *
125  * This gives us an approximation of pressure that is practical
126  * cost-wise, yet way more sensitive and accurate than periodic
127  * sampling of the aggregate task states would be.
128  */
129 
130 #include "../workqueue_internal.h"
131 #include <linux/sched/loadavg.h>
132 #include <linux/seq_file.h>
133 #include <linux/proc_fs.h>
134 #include <linux/seqlock.h>
135 #include <linux/uaccess.h>
136 #include <linux/cgroup.h>
137 #include <linux/module.h>
138 #include <linux/sched.h>
139 #include <linux/ctype.h>
140 #include <linux/file.h>
141 #include <linux/poll.h>
142 #include <linux/psi.h>
143 #include "sched.h"
144 
145 #include <trace/hooks/psi.h>
146 
147 static int psi_bug __read_mostly;
148 
149 DEFINE_STATIC_KEY_FALSE(psi_disabled);
150 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
151 
152 #ifdef CONFIG_PSI_DEFAULT_DISABLED
153 static bool psi_enable;
154 #else
155 static bool psi_enable = true;
156 #endif
setup_psi(char * str)157 static int __init setup_psi(char *str)
158 {
159 	return kstrtobool(str, &psi_enable) == 0;
160 }
161 __setup("psi=", setup_psi);
162 
163 /* Running averages - we need to be higher-res than loadavg */
164 #define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
165 #define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
166 #define EXP_60s		1981		/* 1/exp(2s/60s) */
167 #define EXP_300s	2034		/* 1/exp(2s/300s) */
168 
169 /* PSI trigger definitions */
170 #define WINDOW_MIN_US 500000	/* Min window size is 500ms */
171 #define WINDOW_MAX_US 10000000	/* Max window size is 10s */
172 #define UPDATES_PER_WINDOW 10	/* 10 updates per window */
173 
174 /* Sampling frequency in nanoseconds */
175 static u64 psi_period __read_mostly;
176 
177 /* System-level pressure and stall tracking */
178 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
179 struct psi_group psi_system = {
180 	.pcpu = &system_group_pcpu,
181 };
182 
183 static void psi_avgs_work(struct work_struct *work);
184 
185 static void poll_timer_fn(struct timer_list *t);
186 
group_init(struct psi_group * group)187 static void group_init(struct psi_group *group)
188 {
189 	int cpu;
190 
191 	for_each_possible_cpu(cpu)
192 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
193 	group->avg_last_update = sched_clock();
194 	group->avg_next_update = group->avg_last_update + psi_period;
195 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
196 	mutex_init(&group->avgs_lock);
197 	/* Init trigger-related members */
198 	atomic_set(&group->poll_scheduled, 0);
199 	mutex_init(&group->trigger_lock);
200 	INIT_LIST_HEAD(&group->triggers);
201 	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
202 	group->poll_states = 0;
203 	group->poll_min_period = U32_MAX;
204 	memset(group->polling_total, 0, sizeof(group->polling_total));
205 	group->polling_next_update = ULLONG_MAX;
206 	group->polling_until = 0;
207 	init_waitqueue_head(&group->poll_wait);
208 	timer_setup(&group->poll_timer, poll_timer_fn, 0);
209 	rcu_assign_pointer(group->poll_task, NULL);
210 }
211 
psi_init(void)212 void __init psi_init(void)
213 {
214 	if (!psi_enable) {
215 		static_branch_enable(&psi_disabled);
216 		return;
217 	}
218 
219 	if (!cgroup_psi_enabled())
220 		static_branch_disable(&psi_cgroups_enabled);
221 
222 	psi_period = jiffies_to_nsecs(PSI_FREQ);
223 	group_init(&psi_system);
224 }
225 
test_state(unsigned int * tasks,enum psi_states state)226 static bool test_state(unsigned int *tasks, enum psi_states state)
227 {
228 	switch (state) {
229 	case PSI_IO_SOME:
230 		return tasks[NR_IOWAIT];
231 	case PSI_IO_FULL:
232 		return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
233 	case PSI_MEM_SOME:
234 		return tasks[NR_MEMSTALL];
235 	case PSI_MEM_FULL:
236 		return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
237 	case PSI_CPU_SOME:
238 		return tasks[NR_RUNNING] > tasks[NR_ONCPU];
239 	case PSI_NONIDLE:
240 		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
241 			tasks[NR_RUNNING];
242 	default:
243 		return false;
244 	}
245 }
246 
get_recent_times(struct psi_group * group,int cpu,enum psi_aggregators aggregator,u32 * times,u32 * pchanged_states)247 static void get_recent_times(struct psi_group *group, int cpu,
248 			     enum psi_aggregators aggregator, u32 *times,
249 			     u32 *pchanged_states)
250 {
251 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
252 	u64 now, state_start;
253 	enum psi_states s;
254 	unsigned int seq;
255 	u32 state_mask;
256 
257 	*pchanged_states = 0;
258 
259 	/* Snapshot a coherent view of the CPU state */
260 	do {
261 		seq = read_seqcount_begin(&groupc->seq);
262 		now = cpu_clock(cpu);
263 		memcpy(times, groupc->times, sizeof(groupc->times));
264 		state_mask = groupc->state_mask;
265 		state_start = groupc->state_start;
266 	} while (read_seqcount_retry(&groupc->seq, seq));
267 
268 	/* Calculate state time deltas against the previous snapshot */
269 	for (s = 0; s < NR_PSI_STATES; s++) {
270 		u32 delta;
271 		/*
272 		 * In addition to already concluded states, we also
273 		 * incorporate currently active states on the CPU,
274 		 * since states may last for many sampling periods.
275 		 *
276 		 * This way we keep our delta sampling buckets small
277 		 * (u32) and our reported pressure close to what's
278 		 * actually happening.
279 		 */
280 		if (state_mask & (1 << s))
281 			times[s] += now - state_start;
282 
283 		delta = times[s] - groupc->times_prev[aggregator][s];
284 		groupc->times_prev[aggregator][s] = times[s];
285 
286 		times[s] = delta;
287 		if (delta)
288 			*pchanged_states |= (1 << s);
289 	}
290 }
291 
calc_avgs(unsigned long avg[3],int missed_periods,u64 time,u64 period)292 static void calc_avgs(unsigned long avg[3], int missed_periods,
293 		      u64 time, u64 period)
294 {
295 	unsigned long pct;
296 
297 	/* Fill in zeroes for periods of no activity */
298 	if (missed_periods) {
299 		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
300 		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
301 		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
302 	}
303 
304 	/* Sample the most recent active period */
305 	pct = div_u64(time * 100, period);
306 	pct *= FIXED_1;
307 	avg[0] = calc_load(avg[0], EXP_10s, pct);
308 	avg[1] = calc_load(avg[1], EXP_60s, pct);
309 	avg[2] = calc_load(avg[2], EXP_300s, pct);
310 }
311 
collect_percpu_times(struct psi_group * group,enum psi_aggregators aggregator,u32 * pchanged_states)312 static void collect_percpu_times(struct psi_group *group,
313 				 enum psi_aggregators aggregator,
314 				 u32 *pchanged_states)
315 {
316 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
317 	unsigned long nonidle_total = 0;
318 	u32 changed_states = 0;
319 	int cpu;
320 	int s;
321 
322 	/*
323 	 * Collect the per-cpu time buckets and average them into a
324 	 * single time sample that is normalized to wallclock time.
325 	 *
326 	 * For averaging, each CPU is weighted by its non-idle time in
327 	 * the sampling period. This eliminates artifacts from uneven
328 	 * loading, or even entirely idle CPUs.
329 	 */
330 	for_each_possible_cpu(cpu) {
331 		u32 times[NR_PSI_STATES];
332 		u32 nonidle;
333 		u32 cpu_changed_states;
334 
335 		get_recent_times(group, cpu, aggregator, times,
336 				&cpu_changed_states);
337 		changed_states |= cpu_changed_states;
338 
339 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
340 		nonidle_total += nonidle;
341 
342 		for (s = 0; s < PSI_NONIDLE; s++)
343 			deltas[s] += (u64)times[s] * nonidle;
344 	}
345 
346 	/*
347 	 * Integrate the sample into the running statistics that are
348 	 * reported to userspace: the cumulative stall times and the
349 	 * decaying averages.
350 	 *
351 	 * Pressure percentages are sampled at PSI_FREQ. We might be
352 	 * called more often when the user polls more frequently than
353 	 * that; we might be called less often when there is no task
354 	 * activity, thus no data, and clock ticks are sporadic. The
355 	 * below handles both.
356 	 */
357 
358 	/* total= */
359 	for (s = 0; s < NR_PSI_STATES - 1; s++)
360 		group->total[aggregator][s] +=
361 				div_u64(deltas[s], max(nonidle_total, 1UL));
362 
363 	if (pchanged_states)
364 		*pchanged_states = changed_states;
365 }
366 
update_averages(struct psi_group * group,u64 now)367 static u64 update_averages(struct psi_group *group, u64 now)
368 {
369 	unsigned long missed_periods = 0;
370 	u64 expires, period;
371 	u64 avg_next_update;
372 	int s;
373 
374 	/* avgX= */
375 	expires = group->avg_next_update;
376 	if (now - expires >= psi_period)
377 		missed_periods = div_u64(now - expires, psi_period);
378 
379 	/*
380 	 * The periodic clock tick can get delayed for various
381 	 * reasons, especially on loaded systems. To avoid clock
382 	 * drift, we schedule the clock in fixed psi_period intervals.
383 	 * But the deltas we sample out of the per-cpu buckets above
384 	 * are based on the actual time elapsing between clock ticks.
385 	 */
386 	avg_next_update = expires + ((1 + missed_periods) * psi_period);
387 	period = now - (group->avg_last_update + (missed_periods * psi_period));
388 	group->avg_last_update = now;
389 
390 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
391 		u32 sample;
392 
393 		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
394 		/*
395 		 * Due to the lockless sampling of the time buckets,
396 		 * recorded time deltas can slip into the next period,
397 		 * which under full pressure can result in samples in
398 		 * excess of the period length.
399 		 *
400 		 * We don't want to report non-sensical pressures in
401 		 * excess of 100%, nor do we want to drop such events
402 		 * on the floor. Instead we punt any overage into the
403 		 * future until pressure subsides. By doing this we
404 		 * don't underreport the occurring pressure curve, we
405 		 * just report it delayed by one period length.
406 		 *
407 		 * The error isn't cumulative. As soon as another
408 		 * delta slips from a period P to P+1, by definition
409 		 * it frees up its time T in P.
410 		 */
411 		if (sample > period)
412 			sample = period;
413 		group->avg_total[s] += sample;
414 		calc_avgs(group->avg[s], missed_periods, sample, period);
415 	}
416 
417 	return avg_next_update;
418 }
419 
psi_avgs_work(struct work_struct * work)420 static void psi_avgs_work(struct work_struct *work)
421 {
422 	struct delayed_work *dwork;
423 	struct psi_group *group;
424 	u32 changed_states;
425 	bool nonidle;
426 	u64 now;
427 
428 	dwork = to_delayed_work(work);
429 	group = container_of(dwork, struct psi_group, avgs_work);
430 
431 	mutex_lock(&group->avgs_lock);
432 
433 	now = sched_clock();
434 
435 	collect_percpu_times(group, PSI_AVGS, &changed_states);
436 	nonidle = changed_states & (1 << PSI_NONIDLE);
437 	/*
438 	 * If there is task activity, periodically fold the per-cpu
439 	 * times and feed samples into the running averages. If things
440 	 * are idle and there is no data to process, stop the clock.
441 	 * Once restarted, we'll catch up the running averages in one
442 	 * go - see calc_avgs() and missed_periods.
443 	 */
444 	if (now >= group->avg_next_update)
445 		group->avg_next_update = update_averages(group, now);
446 
447 	if (nonidle) {
448 		schedule_delayed_work(dwork, nsecs_to_jiffies(
449 				group->avg_next_update - now) + 1);
450 	}
451 
452 	mutex_unlock(&group->avgs_lock);
453 }
454 
455 /* Trigger tracking window manupulations */
window_reset(struct psi_window * win,u64 now,u64 value,u64 prev_growth)456 static void window_reset(struct psi_window *win, u64 now, u64 value,
457 			 u64 prev_growth)
458 {
459 	win->start_time = now;
460 	win->start_value = value;
461 	win->prev_growth = prev_growth;
462 }
463 
464 /*
465  * PSI growth tracking window update and growth calculation routine.
466  *
467  * This approximates a sliding tracking window by interpolating
468  * partially elapsed windows using historical growth data from the
469  * previous intervals. This minimizes memory requirements (by not storing
470  * all the intermediate values in the previous window) and simplifies
471  * the calculations. It works well because PSI signal changes only in
472  * positive direction and over relatively small window sizes the growth
473  * is close to linear.
474  */
window_update(struct psi_window * win,u64 now,u64 value)475 static u64 window_update(struct psi_window *win, u64 now, u64 value)
476 {
477 	u64 elapsed;
478 	u64 growth;
479 
480 	elapsed = now - win->start_time;
481 	growth = value - win->start_value;
482 	/*
483 	 * After each tracking window passes win->start_value and
484 	 * win->start_time get reset and win->prev_growth stores
485 	 * the average per-window growth of the previous window.
486 	 * win->prev_growth is then used to interpolate additional
487 	 * growth from the previous window assuming it was linear.
488 	 */
489 	if (elapsed > win->size)
490 		window_reset(win, now, value, growth);
491 	else {
492 		u32 remaining;
493 
494 		remaining = win->size - elapsed;
495 		growth += div64_u64(win->prev_growth * remaining, win->size);
496 	}
497 
498 	return growth;
499 }
500 
init_triggers(struct psi_group * group,u64 now)501 static void init_triggers(struct psi_group *group, u64 now)
502 {
503 	struct psi_trigger *t;
504 
505 	list_for_each_entry(t, &group->triggers, node)
506 		window_reset(&t->win, now,
507 				group->total[PSI_POLL][t->state], 0);
508 	memcpy(group->polling_total, group->total[PSI_POLL],
509 		   sizeof(group->polling_total));
510 	group->polling_next_update = now + group->poll_min_period;
511 }
512 
update_triggers(struct psi_group * group,u64 now)513 static u64 update_triggers(struct psi_group *group, u64 now)
514 {
515 	struct psi_trigger *t;
516 	bool new_stall = false;
517 	u64 *total = group->total[PSI_POLL];
518 
519 	/*
520 	 * On subsequent updates, calculate growth deltas and let
521 	 * watchers know when their specified thresholds are exceeded.
522 	 */
523 	list_for_each_entry(t, &group->triggers, node) {
524 		u64 growth;
525 
526 		/* Check for stall activity */
527 		if (group->polling_total[t->state] == total[t->state])
528 			continue;
529 
530 		/*
531 		 * Multiple triggers might be looking at the same state,
532 		 * remember to update group->polling_total[] once we've
533 		 * been through all of them. Also remember to extend the
534 		 * polling time if we see new stall activity.
535 		 */
536 		new_stall = true;
537 
538 		/* Calculate growth since last update */
539 		growth = window_update(&t->win, now, total[t->state]);
540 		if (growth < t->threshold)
541 			continue;
542 
543 		/* Limit event signaling to once per window */
544 		if (now < t->last_event_time + t->win.size)
545 			continue;
546 
547 		trace_android_vh_psi_event(t);
548 
549 		/* Generate an event */
550 		if (cmpxchg(&t->event, 0, 1) == 0)
551 			wake_up_interruptible(&t->event_wait);
552 		t->last_event_time = now;
553 	}
554 
555 	trace_android_vh_psi_group(group);
556 
557 	if (new_stall)
558 		memcpy(group->polling_total, total,
559 				sizeof(group->polling_total));
560 
561 	return now + group->poll_min_period;
562 }
563 
564 /* Schedule polling if it's not already scheduled or forced. */
psi_schedule_poll_work(struct psi_group * group,unsigned long delay,bool force)565 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
566 				   bool force)
567 {
568 	struct task_struct *task;
569 
570 	/*
571 	 * atomic_xchg should be called even when !force to provide a
572 	 * full memory barrier (see the comment inside psi_poll_work).
573 	 */
574 	if (atomic_xchg(&group->poll_scheduled, 1) && !force)
575 		return;
576 
577 	rcu_read_lock();
578 
579 	task = rcu_dereference(group->poll_task);
580 	/*
581 	 * kworker might be NULL in case psi_trigger_destroy races with
582 	 * psi_task_change (hotpath) which can't use locks
583 	 */
584 	if (likely(task))
585 		mod_timer(&group->poll_timer, jiffies + delay);
586 	else
587 		atomic_set(&group->poll_scheduled, 0);
588 
589 	rcu_read_unlock();
590 }
591 
psi_poll_work(struct psi_group * group)592 static void psi_poll_work(struct psi_group *group)
593 {
594 	bool force_reschedule = false;
595 	u32 changed_states;
596 	u64 now;
597 
598 	mutex_lock(&group->trigger_lock);
599 
600 	now = sched_clock();
601 
602 	if (now > group->polling_until) {
603 		/*
604 		 * We are either about to start or might stop polling if no
605 		 * state change was recorded. Resetting poll_scheduled leaves
606 		 * a small window for psi_group_change to sneak in and schedule
607 		 * an immegiate poll_work before we get to rescheduling. One
608 		 * potential extra wakeup at the end of the polling window
609 		 * should be negligible and polling_next_update still keeps
610 		 * updates correctly on schedule.
611 		 */
612 		atomic_set(&group->poll_scheduled, 0);
613 		/*
614 		 * A task change can race with the poll worker that is supposed to
615 		 * report on it. To avoid missing events, ensure ordering between
616 		 * poll_scheduled and the task state accesses, such that if the poll
617 		 * worker misses the state update, the task change is guaranteed to
618 		 * reschedule the poll worker:
619 		 *
620 		 * poll worker:
621 		 *   atomic_set(poll_scheduled, 0)
622 		 *   smp_mb()
623 		 *   LOAD states
624 		 *
625 		 * task change:
626 		 *   STORE states
627 		 *   if atomic_xchg(poll_scheduled, 1) == 0:
628 		 *     schedule poll worker
629 		 *
630 		 * The atomic_xchg() implies a full barrier.
631 		 */
632 		smp_mb();
633 	} else {
634 		/* Polling window is not over, keep rescheduling */
635 		force_reschedule = true;
636 	}
637 
638 
639 	collect_percpu_times(group, PSI_POLL, &changed_states);
640 
641 	if (changed_states & group->poll_states) {
642 		/* Initialize trigger windows when entering polling mode */
643 		if (now > group->polling_until)
644 			init_triggers(group, now);
645 
646 		/*
647 		 * Keep the monitor active for at least the duration of the
648 		 * minimum tracking window as long as monitor states are
649 		 * changing.
650 		 */
651 		group->polling_until = now +
652 			group->poll_min_period * UPDATES_PER_WINDOW;
653 	}
654 
655 	if (now > group->polling_until) {
656 		group->polling_next_update = ULLONG_MAX;
657 		goto out;
658 	}
659 
660 	if (now >= group->polling_next_update)
661 		group->polling_next_update = update_triggers(group, now);
662 
663 	psi_schedule_poll_work(group,
664 		nsecs_to_jiffies(group->polling_next_update - now) + 1,
665 		force_reschedule);
666 
667 out:
668 	mutex_unlock(&group->trigger_lock);
669 }
670 
psi_poll_worker(void * data)671 static int psi_poll_worker(void *data)
672 {
673 	struct psi_group *group = (struct psi_group *)data;
674 
675 	sched_set_fifo_low(current);
676 
677 	while (true) {
678 		wait_event_interruptible(group->poll_wait,
679 				atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
680 				kthread_should_stop());
681 		if (kthread_should_stop())
682 			break;
683 
684 		psi_poll_work(group);
685 	}
686 	return 0;
687 }
688 
poll_timer_fn(struct timer_list * t)689 static void poll_timer_fn(struct timer_list *t)
690 {
691 	struct psi_group *group = from_timer(group, t, poll_timer);
692 
693 	atomic_set(&group->poll_wakeup, 1);
694 	wake_up_interruptible(&group->poll_wait);
695 }
696 
record_times(struct psi_group_cpu * groupc,int cpu,bool memstall_tick)697 static void record_times(struct psi_group_cpu *groupc, int cpu,
698 			 bool memstall_tick)
699 {
700 	u32 delta;
701 	u64 now;
702 
703 	now = cpu_clock(cpu);
704 	delta = now - groupc->state_start;
705 	groupc->state_start = now;
706 
707 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
708 		groupc->times[PSI_IO_SOME] += delta;
709 		if (groupc->state_mask & (1 << PSI_IO_FULL))
710 			groupc->times[PSI_IO_FULL] += delta;
711 	}
712 
713 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
714 		groupc->times[PSI_MEM_SOME] += delta;
715 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
716 			groupc->times[PSI_MEM_FULL] += delta;
717 		else if (memstall_tick) {
718 			u32 sample;
719 			/*
720 			 * Since we care about lost potential, a
721 			 * memstall is FULL when there are no other
722 			 * working tasks, but also when the CPU is
723 			 * actively reclaiming and nothing productive
724 			 * could run even if it were runnable.
725 			 *
726 			 * When the timer tick sees a reclaiming CPU,
727 			 * regardless of runnable tasks, sample a FULL
728 			 * tick (or less if it hasn't been a full tick
729 			 * since the last state change).
730 			 */
731 			sample = min(delta, (u32)jiffies_to_nsecs(1));
732 			groupc->times[PSI_MEM_FULL] += sample;
733 		}
734 	}
735 
736 	if (groupc->state_mask & (1 << PSI_CPU_SOME))
737 		groupc->times[PSI_CPU_SOME] += delta;
738 
739 	if (groupc->state_mask & (1 << PSI_NONIDLE))
740 		groupc->times[PSI_NONIDLE] += delta;
741 }
742 
psi_group_change(struct psi_group * group,int cpu,unsigned int clear,unsigned int set,bool wake_clock)743 static void psi_group_change(struct psi_group *group, int cpu,
744 			     unsigned int clear, unsigned int set,
745 			     bool wake_clock)
746 {
747 	struct psi_group_cpu *groupc;
748 	u32 state_mask = 0;
749 	unsigned int t, m;
750 	enum psi_states s;
751 
752 	groupc = per_cpu_ptr(group->pcpu, cpu);
753 
754 	/*
755 	 * First we assess the aggregate resource states this CPU's
756 	 * tasks have been in since the last change, and account any
757 	 * SOME and FULL time these may have resulted in.
758 	 *
759 	 * Then we update the task counts according to the state
760 	 * change requested through the @clear and @set bits.
761 	 */
762 	write_seqcount_begin(&groupc->seq);
763 
764 	record_times(groupc, cpu, false);
765 
766 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
767 		if (!(m & (1 << t)))
768 			continue;
769 		if (groupc->tasks[t]) {
770 			groupc->tasks[t]--;
771 		} else if (!psi_bug) {
772 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
773 					cpu, t, groupc->tasks[0],
774 					groupc->tasks[1], groupc->tasks[2],
775 					groupc->tasks[3], clear, set);
776 			psi_bug = 1;
777 		}
778 	}
779 
780 	for (t = 0; set; set &= ~(1 << t), t++)
781 		if (set & (1 << t))
782 			groupc->tasks[t]++;
783 
784 	/* Calculate state mask representing active states */
785 	for (s = 0; s < NR_PSI_STATES; s++) {
786 		if (test_state(groupc->tasks, s))
787 			state_mask |= (1 << s);
788 	}
789 	groupc->state_mask = state_mask;
790 
791 	write_seqcount_end(&groupc->seq);
792 
793 	if (state_mask & group->poll_states)
794 		psi_schedule_poll_work(group, 1, false);
795 
796 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
797 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
798 }
799 
iterate_groups(struct task_struct * task,void ** iter)800 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
801 {
802 	if (*iter == &psi_system)
803 		return NULL;
804 
805 #ifdef CONFIG_CGROUPS
806 	if (static_branch_likely(&psi_cgroups_enabled)) {
807 		struct cgroup *cgroup = NULL;
808 
809 		if (!*iter)
810 			cgroup = task->cgroups->dfl_cgrp;
811 		else
812 			cgroup = cgroup_parent(*iter);
813 
814 		if (cgroup && cgroup_parent(cgroup)) {
815 			*iter = cgroup;
816 			return cgroup_psi(cgroup);
817 		}
818 	}
819 #endif
820 	*iter = &psi_system;
821 	return &psi_system;
822 }
823 
psi_flags_change(struct task_struct * task,int clear,int set)824 static void psi_flags_change(struct task_struct *task, int clear, int set)
825 {
826 	if (((task->psi_flags & set) ||
827 	     (task->psi_flags & clear) != clear) &&
828 	    !psi_bug) {
829 		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
830 				task->pid, task->comm, task_cpu(task),
831 				task->psi_flags, clear, set);
832 		psi_bug = 1;
833 	}
834 
835 	task->psi_flags &= ~clear;
836 	task->psi_flags |= set;
837 }
838 
psi_task_change(struct task_struct * task,int clear,int set)839 void psi_task_change(struct task_struct *task, int clear, int set)
840 {
841 	int cpu = task_cpu(task);
842 	struct psi_group *group;
843 	bool wake_clock = true;
844 	void *iter = NULL;
845 
846 	if (!task->pid)
847 		return;
848 
849 	psi_flags_change(task, clear, set);
850 
851 	/*
852 	 * Periodic aggregation shuts off if there is a period of no
853 	 * task changes, so we wake it back up if necessary. However,
854 	 * don't do this if the task change is the aggregation worker
855 	 * itself going to sleep, or we'll ping-pong forever.
856 	 */
857 	if (unlikely((clear & TSK_RUNNING) &&
858 		     (task->flags & PF_WQ_WORKER) &&
859 		     wq_worker_last_func(task) == psi_avgs_work))
860 		wake_clock = false;
861 
862 	while ((group = iterate_groups(task, &iter)))
863 		psi_group_change(group, cpu, clear, set, wake_clock);
864 }
865 
psi_task_switch(struct task_struct * prev,struct task_struct * next,bool sleep)866 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
867 		     bool sleep)
868 {
869 	struct psi_group *group, *common = NULL;
870 	int cpu = task_cpu(prev);
871 	void *iter;
872 
873 	if (next->pid) {
874 		psi_flags_change(next, 0, TSK_ONCPU);
875 		/*
876 		 * When moving state between tasks, the group that
877 		 * contains them both does not change: we can stop
878 		 * updating the tree once we reach the first common
879 		 * ancestor. Iterate @next's ancestors until we
880 		 * encounter @prev's state.
881 		 */
882 		iter = NULL;
883 		while ((group = iterate_groups(next, &iter))) {
884 			if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
885 				common = group;
886 				break;
887 			}
888 
889 			psi_group_change(group, cpu, 0, TSK_ONCPU, true);
890 		}
891 	}
892 
893 	/*
894 	 * If this is a voluntary sleep, dequeue will have taken care
895 	 * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We
896 	 * only need to deal with it during preemption.
897 	 */
898 	if (sleep)
899 		return;
900 
901 	if (prev->pid) {
902 		psi_flags_change(prev, TSK_ONCPU, 0);
903 
904 		iter = NULL;
905 		while ((group = iterate_groups(prev, &iter)) && group != common)
906 			psi_group_change(group, cpu, TSK_ONCPU, 0, true);
907 	}
908 }
909 
psi_memstall_tick(struct task_struct * task,int cpu)910 void psi_memstall_tick(struct task_struct *task, int cpu)
911 {
912 	struct psi_group *group;
913 	void *iter = NULL;
914 
915 	while ((group = iterate_groups(task, &iter))) {
916 		struct psi_group_cpu *groupc;
917 
918 		groupc = per_cpu_ptr(group->pcpu, cpu);
919 		write_seqcount_begin(&groupc->seq);
920 		record_times(groupc, cpu, true);
921 		write_seqcount_end(&groupc->seq);
922 	}
923 }
924 
925 /**
926  * psi_memstall_enter - mark the beginning of a memory stall section
927  * @flags: flags to handle nested sections
928  *
929  * Marks the calling task as being stalled due to a lack of memory,
930  * such as waiting for a refault or performing reclaim.
931  */
psi_memstall_enter(unsigned long * flags)932 void psi_memstall_enter(unsigned long *flags)
933 {
934 	struct rq_flags rf;
935 	struct rq *rq;
936 
937 	if (static_branch_likely(&psi_disabled))
938 		return;
939 
940 	*flags = current->in_memstall;
941 	if (*flags)
942 		return;
943 	/*
944 	 * in_memstall setting & accounting needs to be atomic wrt
945 	 * changes to the task's scheduling state, otherwise we can
946 	 * race with CPU migration.
947 	 */
948 	rq = this_rq_lock_irq(&rf);
949 
950 	current->in_memstall = 1;
951 	psi_task_change(current, 0, TSK_MEMSTALL);
952 
953 	rq_unlock_irq(rq, &rf);
954 }
955 
956 /**
957  * psi_memstall_leave - mark the end of an memory stall section
958  * @flags: flags to handle nested memdelay sections
959  *
960  * Marks the calling task as no longer stalled due to lack of memory.
961  */
psi_memstall_leave(unsigned long * flags)962 void psi_memstall_leave(unsigned long *flags)
963 {
964 	struct rq_flags rf;
965 	struct rq *rq;
966 
967 	if (static_branch_likely(&psi_disabled))
968 		return;
969 
970 	if (*flags)
971 		return;
972 	/*
973 	 * in_memstall clearing & accounting needs to be atomic wrt
974 	 * changes to the task's scheduling state, otherwise we could
975 	 * race with CPU migration.
976 	 */
977 	rq = this_rq_lock_irq(&rf);
978 
979 	current->in_memstall = 0;
980 	psi_task_change(current, TSK_MEMSTALL, 0);
981 
982 	rq_unlock_irq(rq, &rf);
983 }
984 
985 #ifdef CONFIG_CGROUPS
psi_cgroup_alloc(struct cgroup * cgroup)986 int psi_cgroup_alloc(struct cgroup *cgroup)
987 {
988 	if (static_branch_likely(&psi_disabled))
989 		return 0;
990 
991 	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
992 	if (!cgroup->psi.pcpu)
993 		return -ENOMEM;
994 	group_init(&cgroup->psi);
995 	return 0;
996 }
997 
psi_cgroup_free(struct cgroup * cgroup)998 void psi_cgroup_free(struct cgroup *cgroup)
999 {
1000 	if (static_branch_likely(&psi_disabled))
1001 		return;
1002 
1003 	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
1004 	free_percpu(cgroup->psi.pcpu);
1005 	/* All triggers must be removed by now */
1006 	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
1007 }
1008 
1009 /**
1010  * cgroup_move_task - move task to a different cgroup
1011  * @task: the task
1012  * @to: the target css_set
1013  *
1014  * Move task to a new cgroup and safely migrate its associated stall
1015  * state between the different groups.
1016  *
1017  * This function acquires the task's rq lock to lock out concurrent
1018  * changes to the task's scheduling state and - in case the task is
1019  * running - concurrent changes to its stall state.
1020  */
cgroup_move_task(struct task_struct * task,struct css_set * to)1021 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1022 {
1023 	unsigned int task_flags;
1024 	struct rq_flags rf;
1025 	struct rq *rq;
1026 
1027 	if (static_branch_likely(&psi_disabled)) {
1028 		/*
1029 		 * Lame to do this here, but the scheduler cannot be locked
1030 		 * from the outside, so we move cgroups from inside sched/.
1031 		 */
1032 		rcu_assign_pointer(task->cgroups, to);
1033 		return;
1034 	}
1035 
1036 	rq = task_rq_lock(task, &rf);
1037 
1038 	/*
1039 	 * We may race with schedule() dropping the rq lock between
1040 	 * deactivating prev and switching to next. Because the psi
1041 	 * updates from the deactivation are deferred to the switch
1042 	 * callback to save cgroup tree updates, the task's scheduling
1043 	 * state here is not coherent with its psi state:
1044 	 *
1045 	 * schedule()                   cgroup_move_task()
1046 	 *   rq_lock()
1047 	 *   deactivate_task()
1048 	 *     p->on_rq = 0
1049 	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1050 	 *   pick_next_task()
1051 	 *     rq_unlock()
1052 	 *                                rq_lock()
1053 	 *                                psi_task_change() // old cgroup
1054 	 *                                task->cgroups = to
1055 	 *                                psi_task_change() // new cgroup
1056 	 *                                rq_unlock()
1057 	 *     rq_lock()
1058 	 *   psi_sched_switch() // does deferred updates in new cgroup
1059 	 *
1060 	 * Don't rely on the scheduling state. Use psi_flags instead.
1061 	 */
1062 	task_flags = task->psi_flags;
1063 
1064 	if (task_flags)
1065 		psi_task_change(task, task_flags, 0);
1066 
1067 	/* See comment above */
1068 	rcu_assign_pointer(task->cgroups, to);
1069 
1070 	if (task_flags)
1071 		psi_task_change(task, 0, task_flags);
1072 
1073 	task_rq_unlock(rq, task, &rf);
1074 }
1075 #endif /* CONFIG_CGROUPS */
1076 
psi_show(struct seq_file * m,struct psi_group * group,enum psi_res res)1077 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1078 {
1079 	int full;
1080 	u64 now;
1081 
1082 	if (static_branch_likely(&psi_disabled))
1083 		return -EOPNOTSUPP;
1084 
1085 	/* Update averages before reporting them */
1086 	mutex_lock(&group->avgs_lock);
1087 	now = sched_clock();
1088 	collect_percpu_times(group, PSI_AVGS, NULL);
1089 	if (now >= group->avg_next_update)
1090 		group->avg_next_update = update_averages(group, now);
1091 	mutex_unlock(&group->avgs_lock);
1092 
1093 	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
1094 		unsigned long avg[3];
1095 		u64 total;
1096 		int w;
1097 
1098 		for (w = 0; w < 3; w++)
1099 			avg[w] = group->avg[res * 2 + full][w];
1100 		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1101 				NSEC_PER_USEC);
1102 
1103 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1104 			   full ? "full" : "some",
1105 			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1106 			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1107 			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1108 			   total);
1109 	}
1110 
1111 	return 0;
1112 }
1113 
psi_io_show(struct seq_file * m,void * v)1114 static int psi_io_show(struct seq_file *m, void *v)
1115 {
1116 	return psi_show(m, &psi_system, PSI_IO);
1117 }
1118 
psi_memory_show(struct seq_file * m,void * v)1119 static int psi_memory_show(struct seq_file *m, void *v)
1120 {
1121 	return psi_show(m, &psi_system, PSI_MEM);
1122 }
1123 
psi_cpu_show(struct seq_file * m,void * v)1124 static int psi_cpu_show(struct seq_file *m, void *v)
1125 {
1126 	return psi_show(m, &psi_system, PSI_CPU);
1127 }
1128 
psi_io_open(struct inode * inode,struct file * file)1129 static int psi_io_open(struct inode *inode, struct file *file)
1130 {
1131 	return single_open(file, psi_io_show, NULL);
1132 }
1133 
psi_memory_open(struct inode * inode,struct file * file)1134 static int psi_memory_open(struct inode *inode, struct file *file)
1135 {
1136 	return single_open(file, psi_memory_show, NULL);
1137 }
1138 
psi_cpu_open(struct inode * inode,struct file * file)1139 static int psi_cpu_open(struct inode *inode, struct file *file)
1140 {
1141 	return single_open(file, psi_cpu_show, NULL);
1142 }
1143 
psi_trigger_create(struct psi_group * group,char * buf,size_t nbytes,enum psi_res res)1144 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1145 			char *buf, size_t nbytes, enum psi_res res)
1146 {
1147 	struct psi_trigger *t;
1148 	enum psi_states state;
1149 	u32 threshold_us;
1150 	u32 window_us;
1151 
1152 	if (static_branch_likely(&psi_disabled))
1153 		return ERR_PTR(-EOPNOTSUPP);
1154 
1155 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1156 		state = PSI_IO_SOME + res * 2;
1157 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1158 		state = PSI_IO_FULL + res * 2;
1159 	else
1160 		return ERR_PTR(-EINVAL);
1161 
1162 	if (state >= PSI_NONIDLE)
1163 		return ERR_PTR(-EINVAL);
1164 
1165 	if (window_us < WINDOW_MIN_US ||
1166 		window_us > WINDOW_MAX_US)
1167 		return ERR_PTR(-EINVAL);
1168 
1169 	/* Check threshold */
1170 	if (threshold_us == 0 || threshold_us > window_us)
1171 		return ERR_PTR(-EINVAL);
1172 
1173 	t = kmalloc(sizeof(*t), GFP_KERNEL);
1174 	if (!t)
1175 		return ERR_PTR(-ENOMEM);
1176 
1177 	t->group = group;
1178 	t->state = state;
1179 	t->threshold = threshold_us * NSEC_PER_USEC;
1180 	t->win.size = window_us * NSEC_PER_USEC;
1181 	window_reset(&t->win, 0, 0, 0);
1182 
1183 	t->event = 0;
1184 	t->last_event_time = 0;
1185 	init_waitqueue_head(&t->event_wait);
1186 
1187 	mutex_lock(&group->trigger_lock);
1188 
1189 	if (!rcu_access_pointer(group->poll_task)) {
1190 		struct task_struct *task;
1191 
1192 		task = kthread_create(psi_poll_worker, group, "psimon");
1193 		if (IS_ERR(task)) {
1194 			kfree(t);
1195 			mutex_unlock(&group->trigger_lock);
1196 			return ERR_CAST(task);
1197 		}
1198 		atomic_set(&group->poll_wakeup, 0);
1199 		wake_up_process(task);
1200 		rcu_assign_pointer(group->poll_task, task);
1201 	}
1202 
1203 	list_add(&t->node, &group->triggers);
1204 	group->poll_min_period = min(group->poll_min_period,
1205 		div_u64(t->win.size, UPDATES_PER_WINDOW));
1206 	group->nr_triggers[t->state]++;
1207 	group->poll_states |= (1 << t->state);
1208 
1209 	mutex_unlock(&group->trigger_lock);
1210 
1211 	return t;
1212 }
1213 
psi_trigger_destroy(struct psi_trigger * t)1214 void psi_trigger_destroy(struct psi_trigger *t)
1215 {
1216 	struct psi_group *group;
1217 	struct task_struct *task_to_destroy = NULL;
1218 
1219 	/*
1220 	 * We do not check psi_disabled since it might have been disabled after
1221 	 * the trigger got created.
1222 	 */
1223 	if (!t)
1224 		return;
1225 
1226 	group = t->group;
1227 	/*
1228 	 * Wakeup waiters to stop polling and clear the queue to prevent it from
1229 	 * being accessed later. Can happen if cgroup is deleted from under a
1230 	 * polling process.
1231 	 */
1232 	wake_up_pollfree(&t->event_wait);
1233 
1234 	mutex_lock(&group->trigger_lock);
1235 
1236 	if (!list_empty(&t->node)) {
1237 		struct psi_trigger *tmp;
1238 		u64 period = ULLONG_MAX;
1239 
1240 		list_del(&t->node);
1241 		group->nr_triggers[t->state]--;
1242 		if (!group->nr_triggers[t->state])
1243 			group->poll_states &= ~(1 << t->state);
1244 		/* reset min update period for the remaining triggers */
1245 		list_for_each_entry(tmp, &group->triggers, node)
1246 			period = min(period, div_u64(tmp->win.size,
1247 					UPDATES_PER_WINDOW));
1248 		group->poll_min_period = period;
1249 		/* Destroy poll_task when the last trigger is destroyed */
1250 		if (group->poll_states == 0) {
1251 			group->polling_until = 0;
1252 			task_to_destroy = rcu_dereference_protected(
1253 					group->poll_task,
1254 					lockdep_is_held(&group->trigger_lock));
1255 			rcu_assign_pointer(group->poll_task, NULL);
1256 			del_timer(&group->poll_timer);
1257 		}
1258 	}
1259 
1260 	mutex_unlock(&group->trigger_lock);
1261 
1262 	/*
1263 	 * Wait for psi_schedule_poll_work RCU to complete its read-side
1264 	 * critical section before destroying the trigger and optionally the
1265 	 * poll_task.
1266 	 */
1267 	synchronize_rcu();
1268 	/*
1269 	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1270 	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1271 	 */
1272 	if (task_to_destroy) {
1273 		/*
1274 		 * After the RCU grace period has expired, the worker
1275 		 * can no longer be found through group->poll_task.
1276 		 */
1277 		kthread_stop(task_to_destroy);
1278 		atomic_set(&group->poll_scheduled, 0);
1279 	}
1280 	kfree(t);
1281 }
1282 
psi_trigger_poll(void ** trigger_ptr,struct file * file,poll_table * wait)1283 __poll_t psi_trigger_poll(void **trigger_ptr,
1284 				struct file *file, poll_table *wait)
1285 {
1286 	__poll_t ret = DEFAULT_POLLMASK;
1287 	struct psi_trigger *t;
1288 
1289 	if (static_branch_likely(&psi_disabled))
1290 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1291 
1292 	t = smp_load_acquire(trigger_ptr);
1293 	if (!t)
1294 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1295 
1296 	poll_wait(file, &t->event_wait, wait);
1297 
1298 	if (cmpxchg(&t->event, 1, 0) == 1)
1299 		ret |= EPOLLPRI;
1300 
1301 	return ret;
1302 }
1303 
psi_write(struct file * file,const char __user * user_buf,size_t nbytes,enum psi_res res)1304 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1305 			 size_t nbytes, enum psi_res res)
1306 {
1307 	char buf[32];
1308 	size_t buf_size;
1309 	struct seq_file *seq;
1310 	struct psi_trigger *new;
1311 
1312 	if (static_branch_likely(&psi_disabled))
1313 		return -EOPNOTSUPP;
1314 
1315 	if (!nbytes)
1316 		return -EINVAL;
1317 
1318 	buf_size = min(nbytes, sizeof(buf));
1319 	if (copy_from_user(buf, user_buf, buf_size))
1320 		return -EFAULT;
1321 
1322 	buf[buf_size - 1] = '\0';
1323 
1324 	seq = file->private_data;
1325 
1326 	/* Take seq->lock to protect seq->private from concurrent writes */
1327 	mutex_lock(&seq->lock);
1328 
1329 	/* Allow only one trigger per file descriptor */
1330 	if (seq->private) {
1331 		mutex_unlock(&seq->lock);
1332 		return -EBUSY;
1333 	}
1334 
1335 	new = psi_trigger_create(&psi_system, buf, nbytes, res);
1336 	if (IS_ERR(new)) {
1337 		mutex_unlock(&seq->lock);
1338 		return PTR_ERR(new);
1339 	}
1340 
1341 	smp_store_release(&seq->private, new);
1342 	mutex_unlock(&seq->lock);
1343 
1344 	return nbytes;
1345 }
1346 
psi_io_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1347 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1348 			    size_t nbytes, loff_t *ppos)
1349 {
1350 	return psi_write(file, user_buf, nbytes, PSI_IO);
1351 }
1352 
psi_memory_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1353 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1354 				size_t nbytes, loff_t *ppos)
1355 {
1356 	return psi_write(file, user_buf, nbytes, PSI_MEM);
1357 }
1358 
psi_cpu_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1359 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1360 			     size_t nbytes, loff_t *ppos)
1361 {
1362 	return psi_write(file, user_buf, nbytes, PSI_CPU);
1363 }
1364 
psi_fop_poll(struct file * file,poll_table * wait)1365 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1366 {
1367 	struct seq_file *seq = file->private_data;
1368 
1369 	return psi_trigger_poll(&seq->private, file, wait);
1370 }
1371 
psi_fop_release(struct inode * inode,struct file * file)1372 static int psi_fop_release(struct inode *inode, struct file *file)
1373 {
1374 	struct seq_file *seq = file->private_data;
1375 
1376 	psi_trigger_destroy(seq->private);
1377 	return single_release(inode, file);
1378 }
1379 
1380 static const struct proc_ops psi_io_proc_ops = {
1381 	.proc_open	= psi_io_open,
1382 	.proc_read	= seq_read,
1383 	.proc_lseek	= seq_lseek,
1384 	.proc_write	= psi_io_write,
1385 	.proc_poll	= psi_fop_poll,
1386 	.proc_release	= psi_fop_release,
1387 };
1388 
1389 static const struct proc_ops psi_memory_proc_ops = {
1390 	.proc_open	= psi_memory_open,
1391 	.proc_read	= seq_read,
1392 	.proc_lseek	= seq_lseek,
1393 	.proc_write	= psi_memory_write,
1394 	.proc_poll	= psi_fop_poll,
1395 	.proc_release	= psi_fop_release,
1396 };
1397 
1398 static const struct proc_ops psi_cpu_proc_ops = {
1399 	.proc_open	= psi_cpu_open,
1400 	.proc_read	= seq_read,
1401 	.proc_lseek	= seq_lseek,
1402 	.proc_write	= psi_cpu_write,
1403 	.proc_poll	= psi_fop_poll,
1404 	.proc_release	= psi_fop_release,
1405 };
1406 
psi_proc_init(void)1407 static int __init psi_proc_init(void)
1408 {
1409 	if (psi_enable) {
1410 		proc_mkdir("pressure", NULL);
1411 		proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
1412 		proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
1413 		proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
1414 	}
1415 	return 0;
1416 }
1417 module_init(psi_proc_init);
1418