• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * parameters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO if doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, soley depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The output looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per	: Timer period
166  * - cur_per	: Internal wall and device vtime clock
167  * - vrate	: Device virtual time rate against wall clock
168  * - weight	: Surplus-adjusted and configured weights
169  * - hweight	: Surplus-adjusted and configured hierarchical weights
170  * - inflt	: The percentage of in-flight IO cost at the end of last period
171  * - del_ms	: Deferred issuer delay induction level and duration
172  * - usages	: Usage history
173  */
174 
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
186 #include "blk-wbt.h"
187 
188 #ifdef CONFIG_TRACEPOINTS
189 
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194 
195 #define TRACE_IOCG_PATH(type, iocg, ...)					\
196 	do {									\
197 		unsigned long flags;						\
198 		if (trace_iocost_##type##_enabled()) {				\
199 			spin_lock_irqsave(&trace_iocg_path_lock, flags);	\
200 			cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,	\
201 				    trace_iocg_path, TRACE_IOCG_PATH_LEN);	\
202 			trace_iocost_##type(iocg, trace_iocg_path,		\
203 					      ##__VA_ARGS__);			\
204 			spin_unlock_irqrestore(&trace_iocg_path_lock, flags);	\
205 		}								\
206 	} while (0)
207 
208 #else	/* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)	do { } while (0)
210 #endif	/* CONFIG_TRACE_POINTS */
211 
212 enum {
213 	MILLION			= 1000000,
214 
215 	/* timer period is calculated from latency requirements, bound it */
216 	MIN_PERIOD		= USEC_PER_MSEC,
217 	MAX_PERIOD		= USEC_PER_SEC,
218 
219 	/*
220 	 * iocg->vtime is targeted at 50% behind the device vtime, which
221 	 * serves as its IO credit buffer.  Surplus weight adjustment is
222 	 * immediately canceled if the vtime margin runs below 10%.
223 	 */
224 	MARGIN_MIN_PCT		= 10,
225 	MARGIN_LOW_PCT		= 20,
226 	MARGIN_TARGET_PCT	= 50,
227 
228 	INUSE_ADJ_STEP_PCT	= 25,
229 
230 	/* Have some play in timer operations */
231 	TIMER_SLACK_PCT		= 1,
232 
233 	/* 1/64k is granular enough and can easily be handled w/ u32 */
234 	WEIGHT_ONE		= 1 << 16,
235 };
236 
237 enum {
238 	/*
239 	 * As vtime is used to calculate the cost of each IO, it needs to
240 	 * be fairly high precision.  For example, it should be able to
241 	 * represent the cost of a single page worth of discard with
242 	 * suffificient accuracy.  At the same time, it should be able to
243 	 * represent reasonably long enough durations to be useful and
244 	 * convenient during operation.
245 	 *
246 	 * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
247 	 * granularity and days of wrap-around time even at extreme vrates.
248 	 */
249 	VTIME_PER_SEC_SHIFT	= 37,
250 	VTIME_PER_SEC		= 1LLU << VTIME_PER_SEC_SHIFT,
251 	VTIME_PER_USEC		= VTIME_PER_SEC / USEC_PER_SEC,
252 	VTIME_PER_NSEC		= VTIME_PER_SEC / NSEC_PER_SEC,
253 
254 	/* bound vrate adjustments within two orders of magnitude */
255 	VRATE_MIN_PPM		= 10000,	/* 1% */
256 	VRATE_MAX_PPM		= 100000000,	/* 10000% */
257 
258 	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 	VRATE_CLAMP_ADJ_PCT	= 4,
260 
261 	/* switch iff the conditions are met for longer than this */
262 	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
263 };
264 
265 enum {
266 	/* if IOs end up waiting for requests, issue less */
267 	RQ_WAIT_BUSY_PCT	= 5,
268 
269 	/* unbusy hysterisis */
270 	UNBUSY_THR_PCT		= 75,
271 
272 	/*
273 	 * The effect of delay is indirect and non-linear and a huge amount of
274 	 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 	 * up delay as debt is going up and then let it decay exponentially.
276 	 * This gives us quick ramp ups while delay is accumulating and long
277 	 * tails which can help reducing the frequency of debt explosions on
278 	 * unthrottle. The parameters are experimentally determined.
279 	 *
280 	 * The delay mechanism provides adequate protection and behavior in many
281 	 * cases. However, this is far from ideal and falls shorts on both
282 	 * fronts. The debtors are often throttled too harshly costing a
283 	 * significant level of fairness and possibly total work while the
284 	 * protection against their impacts on the system can be choppy and
285 	 * unreliable.
286 	 *
287 	 * The shortcoming primarily stems from the fact that, unlike for page
288 	 * cache, the kernel doesn't have well-defined back-pressure propagation
289 	 * mechanism and policies for anonymous memory. Fully addressing this
290 	 * issue will likely require substantial improvements in the area.
291 	 */
292 	MIN_DELAY_THR_PCT	= 500,
293 	MAX_DELAY_THR_PCT	= 25000,
294 	MIN_DELAY		= 250,
295 	MAX_DELAY		= 250 * USEC_PER_MSEC,
296 
297 	/* halve debts if avg usage over 100ms is under 50% */
298 	DFGV_USAGE_PCT		= 50,
299 	DFGV_PERIOD		= 100 * USEC_PER_MSEC,
300 
301 	/* don't let cmds which take a very long time pin lagging for too long */
302 	MAX_LAGGING_PERIODS	= 10,
303 
304 	/*
305 	 * Count IO size in 4k pages.  The 12bit shift helps keeping
306 	 * size-proportional components of cost calculation in closer
307 	 * numbers of digits to per-IO cost components.
308 	 */
309 	IOC_PAGE_SHIFT		= 12,
310 	IOC_PAGE_SIZE		= 1 << IOC_PAGE_SHIFT,
311 	IOC_SECT_TO_PAGE_SHIFT	= IOC_PAGE_SHIFT - SECTOR_SHIFT,
312 
313 	/* if apart further than 16M, consider randio for linear model */
314 	LCOEF_RANDIO_PAGES	= 4096,
315 };
316 
317 enum ioc_running {
318 	IOC_IDLE,
319 	IOC_RUNNING,
320 	IOC_STOP,
321 };
322 
323 /* io.cost.qos controls including per-dev enable of the whole controller */
324 enum {
325 	QOS_ENABLE,
326 	QOS_CTRL,
327 	NR_QOS_CTRL_PARAMS,
328 };
329 
330 /* io.cost.qos params */
331 enum {
332 	QOS_RPPM,
333 	QOS_RLAT,
334 	QOS_WPPM,
335 	QOS_WLAT,
336 	QOS_MIN,
337 	QOS_MAX,
338 	NR_QOS_PARAMS,
339 };
340 
341 /* io.cost.model controls */
342 enum {
343 	COST_CTRL,
344 	COST_MODEL,
345 	NR_COST_CTRL_PARAMS,
346 };
347 
348 /* builtin linear cost model coefficients */
349 enum {
350 	I_LCOEF_RBPS,
351 	I_LCOEF_RSEQIOPS,
352 	I_LCOEF_RRANDIOPS,
353 	I_LCOEF_WBPS,
354 	I_LCOEF_WSEQIOPS,
355 	I_LCOEF_WRANDIOPS,
356 	NR_I_LCOEFS,
357 };
358 
359 enum {
360 	LCOEF_RPAGE,
361 	LCOEF_RSEQIO,
362 	LCOEF_RRANDIO,
363 	LCOEF_WPAGE,
364 	LCOEF_WSEQIO,
365 	LCOEF_WRANDIO,
366 	NR_LCOEFS,
367 };
368 
369 enum {
370 	AUTOP_INVALID,
371 	AUTOP_HDD,
372 	AUTOP_SSD_QD1,
373 	AUTOP_SSD_DFL,
374 	AUTOP_SSD_FAST,
375 };
376 
377 struct ioc_params {
378 	u32				qos[NR_QOS_PARAMS];
379 	u64				i_lcoefs[NR_I_LCOEFS];
380 	u64				lcoefs[NR_LCOEFS];
381 	u32				too_fast_vrate_pct;
382 	u32				too_slow_vrate_pct;
383 };
384 
385 struct ioc_margins {
386 	s64				min;
387 	s64				low;
388 	s64				target;
389 };
390 
391 struct ioc_missed {
392 	local_t				nr_met;
393 	local_t				nr_missed;
394 	u32				last_met;
395 	u32				last_missed;
396 };
397 
398 struct ioc_pcpu_stat {
399 	struct ioc_missed		missed[2];
400 
401 	local64_t			rq_wait_ns;
402 	u64				last_rq_wait_ns;
403 };
404 
405 /* per device */
406 struct ioc {
407 	struct rq_qos			rqos;
408 
409 	bool				enabled;
410 
411 	struct ioc_params		params;
412 	struct ioc_margins		margins;
413 	u32				period_us;
414 	u32				timer_slack_ns;
415 	u64				vrate_min;
416 	u64				vrate_max;
417 
418 	spinlock_t			lock;
419 	struct timer_list		timer;
420 	struct list_head		active_iocgs;	/* active cgroups */
421 	struct ioc_pcpu_stat __percpu	*pcpu_stat;
422 
423 	enum ioc_running		running;
424 	atomic64_t			vtime_rate;
425 	u64				vtime_base_rate;
426 	s64				vtime_err;
427 
428 	seqcount_spinlock_t		period_seqcount;
429 	u64				period_at;	/* wallclock starttime */
430 	u64				period_at_vtime; /* vtime starttime */
431 
432 	atomic64_t			cur_period;	/* inc'd each period */
433 	int				busy_level;	/* saturation history */
434 
435 	bool				weights_updated;
436 	atomic_t			hweight_gen;	/* for lazy hweights */
437 
438 	/* debt forgivness */
439 	u64				dfgv_period_at;
440 	u64				dfgv_period_rem;
441 	u64				dfgv_usage_us_sum;
442 
443 	u64				autop_too_fast_at;
444 	u64				autop_too_slow_at;
445 	int				autop_idx;
446 	bool				user_qos_params:1;
447 	bool				user_cost_model:1;
448 };
449 
450 struct iocg_pcpu_stat {
451 	local64_t			abs_vusage;
452 };
453 
454 struct iocg_stat {
455 	u64				usage_us;
456 	u64				wait_us;
457 	u64				indebt_us;
458 	u64				indelay_us;
459 };
460 
461 /* per device-cgroup pair */
462 struct ioc_gq {
463 	struct blkg_policy_data		pd;
464 	struct ioc			*ioc;
465 
466 	/*
467 	 * A iocg can get its weight from two sources - an explicit
468 	 * per-device-cgroup configuration or the default weight of the
469 	 * cgroup.  `cfg_weight` is the explicit per-device-cgroup
470 	 * configuration.  `weight` is the effective considering both
471 	 * sources.
472 	 *
473 	 * When an idle cgroup becomes active its `active` goes from 0 to
474 	 * `weight`.  `inuse` is the surplus adjusted active weight.
475 	 * `active` and `inuse` are used to calculate `hweight_active` and
476 	 * `hweight_inuse`.
477 	 *
478 	 * `last_inuse` remembers `inuse` while an iocg is idle to persist
479 	 * surplus adjustments.
480 	 *
481 	 * `inuse` may be adjusted dynamically during period. `saved_*` are used
482 	 * to determine and track adjustments.
483 	 */
484 	u32				cfg_weight;
485 	u32				weight;
486 	u32				active;
487 	u32				inuse;
488 
489 	u32				last_inuse;
490 	s64				saved_margin;
491 
492 	sector_t			cursor;		/* to detect randio */
493 
494 	/*
495 	 * `vtime` is this iocg's vtime cursor which progresses as IOs are
496 	 * issued.  If lagging behind device vtime, the delta represents
497 	 * the currently available IO budget.  If running ahead, the
498 	 * overage.
499 	 *
500 	 * `vtime_done` is the same but progressed on completion rather
501 	 * than issue.  The delta behind `vtime` represents the cost of
502 	 * currently in-flight IOs.
503 	 */
504 	atomic64_t			vtime;
505 	atomic64_t			done_vtime;
506 	u64				abs_vdebt;
507 
508 	/* current delay in effect and when it started */
509 	u64				delay;
510 	u64				delay_at;
511 
512 	/*
513 	 * The period this iocg was last active in.  Used for deactivation
514 	 * and invalidating `vtime`.
515 	 */
516 	atomic64_t			active_period;
517 	struct list_head		active_list;
518 
519 	/* see __propagate_weights() and current_hweight() for details */
520 	u64				child_active_sum;
521 	u64				child_inuse_sum;
522 	u64				child_adjusted_sum;
523 	int				hweight_gen;
524 	u32				hweight_active;
525 	u32				hweight_inuse;
526 	u32				hweight_donating;
527 	u32				hweight_after_donation;
528 
529 	struct list_head		walk_list;
530 	struct list_head		surplus_list;
531 
532 	struct wait_queue_head		waitq;
533 	struct hrtimer			waitq_timer;
534 
535 	/* timestamp at the latest activation */
536 	u64				activated_at;
537 
538 	/* statistics */
539 	struct iocg_pcpu_stat __percpu	*pcpu_stat;
540 	struct iocg_stat		local_stat;
541 	struct iocg_stat		desc_stat;
542 	struct iocg_stat		last_stat;
543 	u64				last_stat_abs_vusage;
544 	u64				usage_delta_us;
545 	u64				wait_since;
546 	u64				indebt_since;
547 	u64				indelay_since;
548 
549 	/* this iocg's depth in the hierarchy and ancestors including self */
550 	int				level;
551 	struct ioc_gq			*ancestors[];
552 };
553 
554 /* per cgroup */
555 struct ioc_cgrp {
556 	struct blkcg_policy_data	cpd;
557 	unsigned int			dfl_weight;
558 };
559 
560 struct ioc_now {
561 	u64				now_ns;
562 	u64				now;
563 	u64				vnow;
564 	u64				vrate;
565 };
566 
567 struct iocg_wait {
568 	struct wait_queue_entry		wait;
569 	struct bio			*bio;
570 	u64				abs_cost;
571 	bool				committed;
572 };
573 
574 struct iocg_wake_ctx {
575 	struct ioc_gq			*iocg;
576 	u32				hw_inuse;
577 	s64				vbudget;
578 };
579 
580 static const struct ioc_params autop[] = {
581 	[AUTOP_HDD] = {
582 		.qos				= {
583 			[QOS_RLAT]		=        250000, /* 250ms */
584 			[QOS_WLAT]		=        250000,
585 			[QOS_MIN]		= VRATE_MIN_PPM,
586 			[QOS_MAX]		= VRATE_MAX_PPM,
587 		},
588 		.i_lcoefs			= {
589 			[I_LCOEF_RBPS]		=     174019176,
590 			[I_LCOEF_RSEQIOPS]	=         41708,
591 			[I_LCOEF_RRANDIOPS]	=           370,
592 			[I_LCOEF_WBPS]		=     178075866,
593 			[I_LCOEF_WSEQIOPS]	=         42705,
594 			[I_LCOEF_WRANDIOPS]	=           378,
595 		},
596 	},
597 	[AUTOP_SSD_QD1] = {
598 		.qos				= {
599 			[QOS_RLAT]		=         25000, /* 25ms */
600 			[QOS_WLAT]		=         25000,
601 			[QOS_MIN]		= VRATE_MIN_PPM,
602 			[QOS_MAX]		= VRATE_MAX_PPM,
603 		},
604 		.i_lcoefs			= {
605 			[I_LCOEF_RBPS]		=     245855193,
606 			[I_LCOEF_RSEQIOPS]	=         61575,
607 			[I_LCOEF_RRANDIOPS]	=          6946,
608 			[I_LCOEF_WBPS]		=     141365009,
609 			[I_LCOEF_WSEQIOPS]	=         33716,
610 			[I_LCOEF_WRANDIOPS]	=         26796,
611 		},
612 	},
613 	[AUTOP_SSD_DFL] = {
614 		.qos				= {
615 			[QOS_RLAT]		=         25000, /* 25ms */
616 			[QOS_WLAT]		=         25000,
617 			[QOS_MIN]		= VRATE_MIN_PPM,
618 			[QOS_MAX]		= VRATE_MAX_PPM,
619 		},
620 		.i_lcoefs			= {
621 			[I_LCOEF_RBPS]		=     488636629,
622 			[I_LCOEF_RSEQIOPS]	=          8932,
623 			[I_LCOEF_RRANDIOPS]	=          8518,
624 			[I_LCOEF_WBPS]		=     427891549,
625 			[I_LCOEF_WSEQIOPS]	=         28755,
626 			[I_LCOEF_WRANDIOPS]	=         21940,
627 		},
628 		.too_fast_vrate_pct		=           500,
629 	},
630 	[AUTOP_SSD_FAST] = {
631 		.qos				= {
632 			[QOS_RLAT]		=          5000, /* 5ms */
633 			[QOS_WLAT]		=          5000,
634 			[QOS_MIN]		= VRATE_MIN_PPM,
635 			[QOS_MAX]		= VRATE_MAX_PPM,
636 		},
637 		.i_lcoefs			= {
638 			[I_LCOEF_RBPS]		=    3102524156LLU,
639 			[I_LCOEF_RSEQIOPS]	=        724816,
640 			[I_LCOEF_RRANDIOPS]	=        778122,
641 			[I_LCOEF_WBPS]		=    1742780862LLU,
642 			[I_LCOEF_WSEQIOPS]	=        425702,
643 			[I_LCOEF_WRANDIOPS]	=	 443193,
644 		},
645 		.too_slow_vrate_pct		=            10,
646 	},
647 };
648 
649 /*
650  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
651  * vtime credit shortage and down on device saturation.
652  */
653 static u32 vrate_adj_pct[] =
654 	{ 0, 0, 0, 0,
655 	  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
656 	  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
657 	  4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
658 
659 static struct blkcg_policy blkcg_policy_iocost;
660 
661 /* accessors and helpers */
rqos_to_ioc(struct rq_qos * rqos)662 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
663 {
664 	return container_of(rqos, struct ioc, rqos);
665 }
666 
q_to_ioc(struct request_queue * q)667 static struct ioc *q_to_ioc(struct request_queue *q)
668 {
669 	return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
670 }
671 
q_name(struct request_queue * q)672 static const char *q_name(struct request_queue *q)
673 {
674 	if (blk_queue_registered(q))
675 		return kobject_name(q->kobj.parent);
676 	else
677 		return "<unknown>";
678 }
679 
ioc_name(struct ioc * ioc)680 static const char __maybe_unused *ioc_name(struct ioc *ioc)
681 {
682 	return q_name(ioc->rqos.q);
683 }
684 
pd_to_iocg(struct blkg_policy_data * pd)685 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
686 {
687 	return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
688 }
689 
blkg_to_iocg(struct blkcg_gq * blkg)690 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
691 {
692 	return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
693 }
694 
iocg_to_blkg(struct ioc_gq * iocg)695 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
696 {
697 	return pd_to_blkg(&iocg->pd);
698 }
699 
blkcg_to_iocc(struct blkcg * blkcg)700 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
701 {
702 	return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
703 			    struct ioc_cgrp, cpd);
704 }
705 
706 /*
707  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
708  * weight, the more expensive each IO.  Must round up.
709  */
abs_cost_to_cost(u64 abs_cost,u32 hw_inuse)710 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
711 {
712 	return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
713 }
714 
715 /*
716  * The inverse of abs_cost_to_cost().  Must round up.
717  */
cost_to_abs_cost(u64 cost,u32 hw_inuse)718 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
719 {
720 	return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
721 }
722 
iocg_commit_bio(struct ioc_gq * iocg,struct bio * bio,u64 abs_cost,u64 cost)723 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
724 			    u64 abs_cost, u64 cost)
725 {
726 	struct iocg_pcpu_stat *gcs;
727 
728 	bio->bi_iocost_cost = cost;
729 	atomic64_add(cost, &iocg->vtime);
730 
731 	gcs = get_cpu_ptr(iocg->pcpu_stat);
732 	local64_add(abs_cost, &gcs->abs_vusage);
733 	put_cpu_ptr(gcs);
734 }
735 
iocg_lock(struct ioc_gq * iocg,bool lock_ioc,unsigned long * flags)736 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
737 {
738 	if (lock_ioc) {
739 		spin_lock_irqsave(&iocg->ioc->lock, *flags);
740 		spin_lock(&iocg->waitq.lock);
741 	} else {
742 		spin_lock_irqsave(&iocg->waitq.lock, *flags);
743 	}
744 }
745 
iocg_unlock(struct ioc_gq * iocg,bool unlock_ioc,unsigned long * flags)746 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
747 {
748 	if (unlock_ioc) {
749 		spin_unlock(&iocg->waitq.lock);
750 		spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
751 	} else {
752 		spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
753 	}
754 }
755 
756 #define CREATE_TRACE_POINTS
757 #include <trace/events/iocost.h>
758 
ioc_refresh_margins(struct ioc * ioc)759 static void ioc_refresh_margins(struct ioc *ioc)
760 {
761 	struct ioc_margins *margins = &ioc->margins;
762 	u32 period_us = ioc->period_us;
763 	u64 vrate = ioc->vtime_base_rate;
764 
765 	margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
766 	margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
767 	margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
768 }
769 
770 /* latency Qos params changed, update period_us and all the dependent params */
ioc_refresh_period_us(struct ioc * ioc)771 static void ioc_refresh_period_us(struct ioc *ioc)
772 {
773 	u32 ppm, lat, multi, period_us;
774 
775 	lockdep_assert_held(&ioc->lock);
776 
777 	/* pick the higher latency target */
778 	if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
779 		ppm = ioc->params.qos[QOS_RPPM];
780 		lat = ioc->params.qos[QOS_RLAT];
781 	} else {
782 		ppm = ioc->params.qos[QOS_WPPM];
783 		lat = ioc->params.qos[QOS_WLAT];
784 	}
785 
786 	/*
787 	 * We want the period to be long enough to contain a healthy number
788 	 * of IOs while short enough for granular control.  Define it as a
789 	 * multiple of the latency target.  Ideally, the multiplier should
790 	 * be scaled according to the percentile so that it would nominally
791 	 * contain a certain number of requests.  Let's be simpler and
792 	 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
793 	 */
794 	if (ppm)
795 		multi = max_t(u32, (MILLION - ppm) / 50000, 2);
796 	else
797 		multi = 2;
798 	period_us = multi * lat;
799 	period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
800 
801 	/* calculate dependent params */
802 	ioc->period_us = period_us;
803 	ioc->timer_slack_ns = div64_u64(
804 		(u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
805 		100);
806 	ioc_refresh_margins(ioc);
807 }
808 
ioc_autop_idx(struct ioc * ioc)809 static int ioc_autop_idx(struct ioc *ioc)
810 {
811 	int idx = ioc->autop_idx;
812 	const struct ioc_params *p = &autop[idx];
813 	u32 vrate_pct;
814 	u64 now_ns;
815 
816 	/* rotational? */
817 	if (!blk_queue_nonrot(ioc->rqos.q))
818 		return AUTOP_HDD;
819 
820 	/* handle SATA SSDs w/ broken NCQ */
821 	if (blk_queue_depth(ioc->rqos.q) == 1)
822 		return AUTOP_SSD_QD1;
823 
824 	/* use one of the normal ssd sets */
825 	if (idx < AUTOP_SSD_DFL)
826 		return AUTOP_SSD_DFL;
827 
828 	/* if user is overriding anything, maintain what was there */
829 	if (ioc->user_qos_params || ioc->user_cost_model)
830 		return idx;
831 
832 	/* step up/down based on the vrate */
833 	vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
834 	now_ns = ktime_get_ns();
835 
836 	if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
837 		if (!ioc->autop_too_fast_at)
838 			ioc->autop_too_fast_at = now_ns;
839 		if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
840 			return idx + 1;
841 	} else {
842 		ioc->autop_too_fast_at = 0;
843 	}
844 
845 	if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
846 		if (!ioc->autop_too_slow_at)
847 			ioc->autop_too_slow_at = now_ns;
848 		if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
849 			return idx - 1;
850 	} else {
851 		ioc->autop_too_slow_at = 0;
852 	}
853 
854 	return idx;
855 }
856 
857 /*
858  * Take the followings as input
859  *
860  *  @bps	maximum sequential throughput
861  *  @seqiops	maximum sequential 4k iops
862  *  @randiops	maximum random 4k iops
863  *
864  * and calculate the linear model cost coefficients.
865  *
866  *  *@page	per-page cost		1s / (@bps / 4096)
867  *  *@seqio	base cost of a seq IO	max((1s / @seqiops) - *@page, 0)
868  *  @randiops	base cost of a rand IO	max((1s / @randiops) - *@page, 0)
869  */
calc_lcoefs(u64 bps,u64 seqiops,u64 randiops,u64 * page,u64 * seqio,u64 * randio)870 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
871 			u64 *page, u64 *seqio, u64 *randio)
872 {
873 	u64 v;
874 
875 	*page = *seqio = *randio = 0;
876 
877 	if (bps) {
878 		u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
879 
880 		if (bps_pages)
881 			*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
882 		else
883 			*page = 1;
884 	}
885 
886 	if (seqiops) {
887 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
888 		if (v > *page)
889 			*seqio = v - *page;
890 	}
891 
892 	if (randiops) {
893 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
894 		if (v > *page)
895 			*randio = v - *page;
896 	}
897 }
898 
ioc_refresh_lcoefs(struct ioc * ioc)899 static void ioc_refresh_lcoefs(struct ioc *ioc)
900 {
901 	u64 *u = ioc->params.i_lcoefs;
902 	u64 *c = ioc->params.lcoefs;
903 
904 	calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
905 		    &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
906 	calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
907 		    &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
908 }
909 
ioc_refresh_params(struct ioc * ioc,bool force)910 static bool ioc_refresh_params(struct ioc *ioc, bool force)
911 {
912 	const struct ioc_params *p;
913 	int idx;
914 
915 	lockdep_assert_held(&ioc->lock);
916 
917 	idx = ioc_autop_idx(ioc);
918 	p = &autop[idx];
919 
920 	if (idx == ioc->autop_idx && !force)
921 		return false;
922 
923 	if (idx != ioc->autop_idx)
924 		atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
925 
926 	ioc->autop_idx = idx;
927 	ioc->autop_too_fast_at = 0;
928 	ioc->autop_too_slow_at = 0;
929 
930 	if (!ioc->user_qos_params)
931 		memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
932 	if (!ioc->user_cost_model)
933 		memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
934 
935 	ioc_refresh_period_us(ioc);
936 	ioc_refresh_lcoefs(ioc);
937 
938 	ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
939 					    VTIME_PER_USEC, MILLION);
940 	ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
941 				   VTIME_PER_USEC, MILLION);
942 
943 	return true;
944 }
945 
946 /*
947  * When an iocg accumulates too much vtime or gets deactivated, we throw away
948  * some vtime, which lowers the overall device utilization. As the exact amount
949  * which is being thrown away is known, we can compensate by accelerating the
950  * vrate accordingly so that the extra vtime generated in the current period
951  * matches what got lost.
952  */
ioc_refresh_vrate(struct ioc * ioc,struct ioc_now * now)953 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
954 {
955 	s64 pleft = ioc->period_at + ioc->period_us - now->now;
956 	s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
957 	s64 vcomp, vcomp_min, vcomp_max;
958 
959 	lockdep_assert_held(&ioc->lock);
960 
961 	/* we need some time left in this period */
962 	if (pleft <= 0)
963 		goto done;
964 
965 	/*
966 	 * Calculate how much vrate should be adjusted to offset the error.
967 	 * Limit the amount of adjustment and deduct the adjusted amount from
968 	 * the error.
969 	 */
970 	vcomp = -div64_s64(ioc->vtime_err, pleft);
971 	vcomp_min = -(ioc->vtime_base_rate >> 1);
972 	vcomp_max = ioc->vtime_base_rate;
973 	vcomp = clamp(vcomp, vcomp_min, vcomp_max);
974 
975 	ioc->vtime_err += vcomp * pleft;
976 
977 	atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
978 done:
979 	/* bound how much error can accumulate */
980 	ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
981 }
982 
ioc_adjust_base_vrate(struct ioc * ioc,u32 rq_wait_pct,int nr_lagging,int nr_shortages,int prev_busy_level,u32 * missed_ppm)983 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
984 				  int nr_lagging, int nr_shortages,
985 				  int prev_busy_level, u32 *missed_ppm)
986 {
987 	u64 vrate = ioc->vtime_base_rate;
988 	u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
989 
990 	if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
991 		if (ioc->busy_level != prev_busy_level || nr_lagging)
992 			trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
993 						   missed_ppm, rq_wait_pct,
994 						   nr_lagging, nr_shortages);
995 
996 		return;
997 	}
998 
999 	/*
1000 	 * If vrate is out of bounds, apply clamp gradually as the
1001 	 * bounds can change abruptly.  Otherwise, apply busy_level
1002 	 * based adjustment.
1003 	 */
1004 	if (vrate < vrate_min) {
1005 		vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
1006 		vrate = min(vrate, vrate_min);
1007 	} else if (vrate > vrate_max) {
1008 		vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1009 		vrate = max(vrate, vrate_max);
1010 	} else {
1011 		int idx = min_t(int, abs(ioc->busy_level),
1012 				ARRAY_SIZE(vrate_adj_pct) - 1);
1013 		u32 adj_pct = vrate_adj_pct[idx];
1014 
1015 		if (ioc->busy_level > 0)
1016 			adj_pct = 100 - adj_pct;
1017 		else
1018 			adj_pct = 100 + adj_pct;
1019 
1020 		vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1021 			      vrate_min, vrate_max);
1022 	}
1023 
1024 	trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1025 				   nr_lagging, nr_shortages);
1026 
1027 	ioc->vtime_base_rate = vrate;
1028 	ioc_refresh_margins(ioc);
1029 }
1030 
1031 /* take a snapshot of the current [v]time and vrate */
ioc_now(struct ioc * ioc,struct ioc_now * now)1032 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1033 {
1034 	unsigned seq;
1035 
1036 	now->now_ns = ktime_get();
1037 	now->now = ktime_to_us(now->now_ns);
1038 	now->vrate = atomic64_read(&ioc->vtime_rate);
1039 
1040 	/*
1041 	 * The current vtime is
1042 	 *
1043 	 *   vtime at period start + (wallclock time since the start) * vrate
1044 	 *
1045 	 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1046 	 * needed, they're seqcount protected.
1047 	 */
1048 	do {
1049 		seq = read_seqcount_begin(&ioc->period_seqcount);
1050 		now->vnow = ioc->period_at_vtime +
1051 			(now->now - ioc->period_at) * now->vrate;
1052 	} while (read_seqcount_retry(&ioc->period_seqcount, seq));
1053 }
1054 
ioc_start_period(struct ioc * ioc,struct ioc_now * now)1055 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1056 {
1057 	WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1058 
1059 	write_seqcount_begin(&ioc->period_seqcount);
1060 	ioc->period_at = now->now;
1061 	ioc->period_at_vtime = now->vnow;
1062 	write_seqcount_end(&ioc->period_seqcount);
1063 
1064 	ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1065 	add_timer(&ioc->timer);
1066 }
1067 
1068 /*
1069  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1070  * weight sums and propagate upwards accordingly. If @save, the current margin
1071  * is saved to be used as reference for later inuse in-period adjustments.
1072  */
__propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1073 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1074 				bool save, struct ioc_now *now)
1075 {
1076 	struct ioc *ioc = iocg->ioc;
1077 	int lvl;
1078 
1079 	lockdep_assert_held(&ioc->lock);
1080 
1081 	/*
1082 	 * For an active leaf node, its inuse shouldn't be zero or exceed
1083 	 * @active. An active internal node's inuse is solely determined by the
1084 	 * inuse to active ratio of its children regardless of @inuse.
1085 	 */
1086 	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1087 		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1088 					   iocg->child_active_sum);
1089 	} else {
1090 		inuse = clamp_t(u32, inuse, 1, active);
1091 	}
1092 
1093 	iocg->last_inuse = iocg->inuse;
1094 	if (save)
1095 		iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1096 
1097 	if (active == iocg->active && inuse == iocg->inuse)
1098 		return;
1099 
1100 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1101 		struct ioc_gq *parent = iocg->ancestors[lvl];
1102 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1103 		u32 parent_active = 0, parent_inuse = 0;
1104 
1105 		/* update the level sums */
1106 		parent->child_active_sum += (s32)(active - child->active);
1107 		parent->child_inuse_sum += (s32)(inuse - child->inuse);
1108 		/* apply the updates */
1109 		child->active = active;
1110 		child->inuse = inuse;
1111 
1112 		/*
1113 		 * The delta between inuse and active sums indicates that
1114 		 * much of weight is being given away.  Parent's inuse
1115 		 * and active should reflect the ratio.
1116 		 */
1117 		if (parent->child_active_sum) {
1118 			parent_active = parent->weight;
1119 			parent_inuse = DIV64_U64_ROUND_UP(
1120 				parent_active * parent->child_inuse_sum,
1121 				parent->child_active_sum);
1122 		}
1123 
1124 		/* do we need to keep walking up? */
1125 		if (parent_active == parent->active &&
1126 		    parent_inuse == parent->inuse)
1127 			break;
1128 
1129 		active = parent_active;
1130 		inuse = parent_inuse;
1131 	}
1132 
1133 	ioc->weights_updated = true;
1134 }
1135 
commit_weights(struct ioc * ioc)1136 static void commit_weights(struct ioc *ioc)
1137 {
1138 	lockdep_assert_held(&ioc->lock);
1139 
1140 	if (ioc->weights_updated) {
1141 		/* paired with rmb in current_hweight(), see there */
1142 		smp_wmb();
1143 		atomic_inc(&ioc->hweight_gen);
1144 		ioc->weights_updated = false;
1145 	}
1146 }
1147 
propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1148 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1149 			      bool save, struct ioc_now *now)
1150 {
1151 	__propagate_weights(iocg, active, inuse, save, now);
1152 	commit_weights(iocg->ioc);
1153 }
1154 
current_hweight(struct ioc_gq * iocg,u32 * hw_activep,u32 * hw_inusep)1155 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1156 {
1157 	struct ioc *ioc = iocg->ioc;
1158 	int lvl;
1159 	u32 hwa, hwi;
1160 	int ioc_gen;
1161 
1162 	/* hot path - if uptodate, use cached */
1163 	ioc_gen = atomic_read(&ioc->hweight_gen);
1164 	if (ioc_gen == iocg->hweight_gen)
1165 		goto out;
1166 
1167 	/*
1168 	 * Paired with wmb in commit_weights(). If we saw the updated
1169 	 * hweight_gen, all the weight updates from __propagate_weights() are
1170 	 * visible too.
1171 	 *
1172 	 * We can race with weight updates during calculation and get it
1173 	 * wrong.  However, hweight_gen would have changed and a future
1174 	 * reader will recalculate and we're guaranteed to discard the
1175 	 * wrong result soon.
1176 	 */
1177 	smp_rmb();
1178 
1179 	hwa = hwi = WEIGHT_ONE;
1180 	for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1181 		struct ioc_gq *parent = iocg->ancestors[lvl];
1182 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1183 		u64 active_sum = READ_ONCE(parent->child_active_sum);
1184 		u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1185 		u32 active = READ_ONCE(child->active);
1186 		u32 inuse = READ_ONCE(child->inuse);
1187 
1188 		/* we can race with deactivations and either may read as zero */
1189 		if (!active_sum || !inuse_sum)
1190 			continue;
1191 
1192 		active_sum = max_t(u64, active, active_sum);
1193 		hwa = div64_u64((u64)hwa * active, active_sum);
1194 
1195 		inuse_sum = max_t(u64, inuse, inuse_sum);
1196 		hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1197 	}
1198 
1199 	iocg->hweight_active = max_t(u32, hwa, 1);
1200 	iocg->hweight_inuse = max_t(u32, hwi, 1);
1201 	iocg->hweight_gen = ioc_gen;
1202 out:
1203 	if (hw_activep)
1204 		*hw_activep = iocg->hweight_active;
1205 	if (hw_inusep)
1206 		*hw_inusep = iocg->hweight_inuse;
1207 }
1208 
1209 /*
1210  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1211  * other weights stay unchanged.
1212  */
current_hweight_max(struct ioc_gq * iocg)1213 static u32 current_hweight_max(struct ioc_gq *iocg)
1214 {
1215 	u32 hwm = WEIGHT_ONE;
1216 	u32 inuse = iocg->active;
1217 	u64 child_inuse_sum;
1218 	int lvl;
1219 
1220 	lockdep_assert_held(&iocg->ioc->lock);
1221 
1222 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1223 		struct ioc_gq *parent = iocg->ancestors[lvl];
1224 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1225 
1226 		child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1227 		hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1228 		inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1229 					   parent->child_active_sum);
1230 	}
1231 
1232 	return max_t(u32, hwm, 1);
1233 }
1234 
weight_updated(struct ioc_gq * iocg,struct ioc_now * now)1235 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1236 {
1237 	struct ioc *ioc = iocg->ioc;
1238 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1239 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1240 	u32 weight;
1241 
1242 	lockdep_assert_held(&ioc->lock);
1243 
1244 	weight = iocg->cfg_weight ?: iocc->dfl_weight;
1245 	if (weight != iocg->weight && iocg->active)
1246 		propagate_weights(iocg, weight, iocg->inuse, true, now);
1247 	iocg->weight = weight;
1248 }
1249 
iocg_activate(struct ioc_gq * iocg,struct ioc_now * now)1250 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1251 {
1252 	struct ioc *ioc = iocg->ioc;
1253 	u64 last_period, cur_period;
1254 	u64 vtime, vtarget;
1255 	int i;
1256 
1257 	/*
1258 	 * If seem to be already active, just update the stamp to tell the
1259 	 * timer that we're still active.  We don't mind occassional races.
1260 	 */
1261 	if (!list_empty(&iocg->active_list)) {
1262 		ioc_now(ioc, now);
1263 		cur_period = atomic64_read(&ioc->cur_period);
1264 		if (atomic64_read(&iocg->active_period) != cur_period)
1265 			atomic64_set(&iocg->active_period, cur_period);
1266 		return true;
1267 	}
1268 
1269 	/* racy check on internal node IOs, treat as root level IOs */
1270 	if (iocg->child_active_sum)
1271 		return false;
1272 
1273 	spin_lock_irq(&ioc->lock);
1274 
1275 	ioc_now(ioc, now);
1276 
1277 	/* update period */
1278 	cur_period = atomic64_read(&ioc->cur_period);
1279 	last_period = atomic64_read(&iocg->active_period);
1280 	atomic64_set(&iocg->active_period, cur_period);
1281 
1282 	/* already activated or breaking leaf-only constraint? */
1283 	if (!list_empty(&iocg->active_list))
1284 		goto succeed_unlock;
1285 	for (i = iocg->level - 1; i > 0; i--)
1286 		if (!list_empty(&iocg->ancestors[i]->active_list))
1287 			goto fail_unlock;
1288 
1289 	if (iocg->child_active_sum)
1290 		goto fail_unlock;
1291 
1292 	/*
1293 	 * Always start with the target budget. On deactivation, we throw away
1294 	 * anything above it.
1295 	 */
1296 	vtarget = now->vnow - ioc->margins.target;
1297 	vtime = atomic64_read(&iocg->vtime);
1298 
1299 	atomic64_add(vtarget - vtime, &iocg->vtime);
1300 	atomic64_add(vtarget - vtime, &iocg->done_vtime);
1301 	vtime = vtarget;
1302 
1303 	/*
1304 	 * Activate, propagate weight and start period timer if not
1305 	 * running.  Reset hweight_gen to avoid accidental match from
1306 	 * wrapping.
1307 	 */
1308 	iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1309 	list_add(&iocg->active_list, &ioc->active_iocgs);
1310 
1311 	propagate_weights(iocg, iocg->weight,
1312 			  iocg->last_inuse ?: iocg->weight, true, now);
1313 
1314 	TRACE_IOCG_PATH(iocg_activate, iocg, now,
1315 			last_period, cur_period, vtime);
1316 
1317 	iocg->activated_at = now->now;
1318 
1319 	if (ioc->running == IOC_IDLE) {
1320 		ioc->running = IOC_RUNNING;
1321 		ioc->dfgv_period_at = now->now;
1322 		ioc->dfgv_period_rem = 0;
1323 		ioc_start_period(ioc, now);
1324 	}
1325 
1326 succeed_unlock:
1327 	spin_unlock_irq(&ioc->lock);
1328 	return true;
1329 
1330 fail_unlock:
1331 	spin_unlock_irq(&ioc->lock);
1332 	return false;
1333 }
1334 
iocg_kick_delay(struct ioc_gq * iocg,struct ioc_now * now)1335 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1336 {
1337 	struct ioc *ioc = iocg->ioc;
1338 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1339 	u64 tdelta, delay, new_delay;
1340 	s64 vover, vover_pct;
1341 	u32 hwa;
1342 
1343 	lockdep_assert_held(&iocg->waitq.lock);
1344 
1345 	/*
1346 	 * If the delay is set by another CPU, we may be in the past. No need to
1347 	 * change anything if so. This avoids decay calculation underflow.
1348 	 */
1349 	if (time_before64(now->now, iocg->delay_at))
1350 		return false;
1351 
1352 	/* calculate the current delay in effect - 1/2 every second */
1353 	tdelta = now->now - iocg->delay_at;
1354 	if (iocg->delay)
1355 		delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1356 	else
1357 		delay = 0;
1358 
1359 	/* calculate the new delay from the debt amount */
1360 	current_hweight(iocg, &hwa, NULL);
1361 	vover = atomic64_read(&iocg->vtime) +
1362 		abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1363 	vover_pct = div64_s64(100 * vover,
1364 			      ioc->period_us * ioc->vtime_base_rate);
1365 
1366 	if (vover_pct <= MIN_DELAY_THR_PCT)
1367 		new_delay = 0;
1368 	else if (vover_pct >= MAX_DELAY_THR_PCT)
1369 		new_delay = MAX_DELAY;
1370 	else
1371 		new_delay = MIN_DELAY +
1372 			div_u64((MAX_DELAY - MIN_DELAY) *
1373 				(vover_pct - MIN_DELAY_THR_PCT),
1374 				MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1375 
1376 	/* pick the higher one and apply */
1377 	if (new_delay > delay) {
1378 		iocg->delay = new_delay;
1379 		iocg->delay_at = now->now;
1380 		delay = new_delay;
1381 	}
1382 
1383 	if (delay >= MIN_DELAY) {
1384 		if (!iocg->indelay_since)
1385 			iocg->indelay_since = now->now;
1386 		blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1387 		return true;
1388 	} else {
1389 		if (iocg->indelay_since) {
1390 			iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1391 			iocg->indelay_since = 0;
1392 		}
1393 		iocg->delay = 0;
1394 		blkcg_clear_delay(blkg);
1395 		return false;
1396 	}
1397 }
1398 
iocg_incur_debt(struct ioc_gq * iocg,u64 abs_cost,struct ioc_now * now)1399 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1400 			    struct ioc_now *now)
1401 {
1402 	struct iocg_pcpu_stat *gcs;
1403 
1404 	lockdep_assert_held(&iocg->ioc->lock);
1405 	lockdep_assert_held(&iocg->waitq.lock);
1406 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1407 
1408 	/*
1409 	 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1410 	 * inuse donating all of it share to others until its debt is paid off.
1411 	 */
1412 	if (!iocg->abs_vdebt && abs_cost) {
1413 		iocg->indebt_since = now->now;
1414 		propagate_weights(iocg, iocg->active, 0, false, now);
1415 	}
1416 
1417 	iocg->abs_vdebt += abs_cost;
1418 
1419 	gcs = get_cpu_ptr(iocg->pcpu_stat);
1420 	local64_add(abs_cost, &gcs->abs_vusage);
1421 	put_cpu_ptr(gcs);
1422 }
1423 
iocg_pay_debt(struct ioc_gq * iocg,u64 abs_vpay,struct ioc_now * now)1424 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1425 			  struct ioc_now *now)
1426 {
1427 	lockdep_assert_held(&iocg->ioc->lock);
1428 	lockdep_assert_held(&iocg->waitq.lock);
1429 
1430 	/* make sure that nobody messed with @iocg */
1431 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1432 	WARN_ON_ONCE(iocg->inuse > 1);
1433 
1434 	iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1435 
1436 	/* if debt is paid in full, restore inuse */
1437 	if (!iocg->abs_vdebt) {
1438 		iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1439 		iocg->indebt_since = 0;
1440 
1441 		propagate_weights(iocg, iocg->active, iocg->last_inuse,
1442 				  false, now);
1443 	}
1444 }
1445 
iocg_wake_fn(struct wait_queue_entry * wq_entry,unsigned mode,int flags,void * key)1446 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1447 			int flags, void *key)
1448 {
1449 	struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1450 	struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1451 	u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1452 
1453 	ctx->vbudget -= cost;
1454 
1455 	if (ctx->vbudget < 0)
1456 		return -1;
1457 
1458 	iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1459 	wait->committed = true;
1460 
1461 	/*
1462 	 * autoremove_wake_function() removes the wait entry only when it
1463 	 * actually changed the task state. We want the wait always removed.
1464 	 * Remove explicitly and use default_wake_function(). Note that the
1465 	 * order of operations is important as finish_wait() tests whether
1466 	 * @wq_entry is removed without grabbing the lock.
1467 	 */
1468 	default_wake_function(wq_entry, mode, flags, key);
1469 	list_del_init_careful(&wq_entry->entry);
1470 	return 0;
1471 }
1472 
1473 /*
1474  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1475  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1476  * addition to iocg->waitq.lock.
1477  */
iocg_kick_waitq(struct ioc_gq * iocg,bool pay_debt,struct ioc_now * now)1478 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1479 			    struct ioc_now *now)
1480 {
1481 	struct ioc *ioc = iocg->ioc;
1482 	struct iocg_wake_ctx ctx = { .iocg = iocg };
1483 	u64 vshortage, expires, oexpires;
1484 	s64 vbudget;
1485 	u32 hwa;
1486 
1487 	lockdep_assert_held(&iocg->waitq.lock);
1488 
1489 	current_hweight(iocg, &hwa, NULL);
1490 	vbudget = now->vnow - atomic64_read(&iocg->vtime);
1491 
1492 	/* pay off debt */
1493 	if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1494 		u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1495 		u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1496 		u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1497 
1498 		lockdep_assert_held(&ioc->lock);
1499 
1500 		atomic64_add(vpay, &iocg->vtime);
1501 		atomic64_add(vpay, &iocg->done_vtime);
1502 		iocg_pay_debt(iocg, abs_vpay, now);
1503 		vbudget -= vpay;
1504 	}
1505 
1506 	if (iocg->abs_vdebt || iocg->delay)
1507 		iocg_kick_delay(iocg, now);
1508 
1509 	/*
1510 	 * Debt can still be outstanding if we haven't paid all yet or the
1511 	 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1512 	 * under debt. Make sure @vbudget reflects the outstanding amount and is
1513 	 * not positive.
1514 	 */
1515 	if (iocg->abs_vdebt) {
1516 		s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1517 		vbudget = min_t(s64, 0, vbudget - vdebt);
1518 	}
1519 
1520 	/*
1521 	 * Wake up the ones which are due and see how much vtime we'll need for
1522 	 * the next one. As paying off debt restores hw_inuse, it must be read
1523 	 * after the above debt payment.
1524 	 */
1525 	ctx.vbudget = vbudget;
1526 	current_hweight(iocg, NULL, &ctx.hw_inuse);
1527 
1528 	__wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1529 
1530 	if (!waitqueue_active(&iocg->waitq)) {
1531 		if (iocg->wait_since) {
1532 			iocg->local_stat.wait_us += now->now - iocg->wait_since;
1533 			iocg->wait_since = 0;
1534 		}
1535 		return;
1536 	}
1537 
1538 	if (!iocg->wait_since)
1539 		iocg->wait_since = now->now;
1540 
1541 	if (WARN_ON_ONCE(ctx.vbudget >= 0))
1542 		return;
1543 
1544 	/* determine next wakeup, add a timer margin to guarantee chunking */
1545 	vshortage = -ctx.vbudget;
1546 	expires = now->now_ns +
1547 		DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1548 		NSEC_PER_USEC;
1549 	expires += ioc->timer_slack_ns;
1550 
1551 	/* if already active and close enough, don't bother */
1552 	oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1553 	if (hrtimer_is_queued(&iocg->waitq_timer) &&
1554 	    abs(oexpires - expires) <= ioc->timer_slack_ns)
1555 		return;
1556 
1557 	hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1558 			       ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1559 }
1560 
iocg_waitq_timer_fn(struct hrtimer * timer)1561 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1562 {
1563 	struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1564 	bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1565 	struct ioc_now now;
1566 	unsigned long flags;
1567 
1568 	ioc_now(iocg->ioc, &now);
1569 
1570 	iocg_lock(iocg, pay_debt, &flags);
1571 	iocg_kick_waitq(iocg, pay_debt, &now);
1572 	iocg_unlock(iocg, pay_debt, &flags);
1573 
1574 	return HRTIMER_NORESTART;
1575 }
1576 
ioc_lat_stat(struct ioc * ioc,u32 * missed_ppm_ar,u32 * rq_wait_pct_p)1577 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1578 {
1579 	u32 nr_met[2] = { };
1580 	u32 nr_missed[2] = { };
1581 	u64 rq_wait_ns = 0;
1582 	int cpu, rw;
1583 
1584 	for_each_online_cpu(cpu) {
1585 		struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1586 		u64 this_rq_wait_ns;
1587 
1588 		for (rw = READ; rw <= WRITE; rw++) {
1589 			u32 this_met = local_read(&stat->missed[rw].nr_met);
1590 			u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1591 
1592 			nr_met[rw] += this_met - stat->missed[rw].last_met;
1593 			nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1594 			stat->missed[rw].last_met = this_met;
1595 			stat->missed[rw].last_missed = this_missed;
1596 		}
1597 
1598 		this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1599 		rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1600 		stat->last_rq_wait_ns = this_rq_wait_ns;
1601 	}
1602 
1603 	for (rw = READ; rw <= WRITE; rw++) {
1604 		if (nr_met[rw] + nr_missed[rw])
1605 			missed_ppm_ar[rw] =
1606 				DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1607 						   nr_met[rw] + nr_missed[rw]);
1608 		else
1609 			missed_ppm_ar[rw] = 0;
1610 	}
1611 
1612 	*rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1613 				   ioc->period_us * NSEC_PER_USEC);
1614 }
1615 
1616 /* was iocg idle this period? */
iocg_is_idle(struct ioc_gq * iocg)1617 static bool iocg_is_idle(struct ioc_gq *iocg)
1618 {
1619 	struct ioc *ioc = iocg->ioc;
1620 
1621 	/* did something get issued this period? */
1622 	if (atomic64_read(&iocg->active_period) ==
1623 	    atomic64_read(&ioc->cur_period))
1624 		return false;
1625 
1626 	/* is something in flight? */
1627 	if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1628 		return false;
1629 
1630 	return true;
1631 }
1632 
1633 /*
1634  * Call this function on the target leaf @iocg's to build pre-order traversal
1635  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1636  * ->walk_list and the caller is responsible for dissolving the list after use.
1637  */
iocg_build_inner_walk(struct ioc_gq * iocg,struct list_head * inner_walk)1638 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1639 				  struct list_head *inner_walk)
1640 {
1641 	int lvl;
1642 
1643 	WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1644 
1645 	/* find the first ancestor which hasn't been visited yet */
1646 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1647 		if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1648 			break;
1649 	}
1650 
1651 	/* walk down and visit the inner nodes to get pre-order traversal */
1652 	while (++lvl <= iocg->level - 1) {
1653 		struct ioc_gq *inner = iocg->ancestors[lvl];
1654 
1655 		/* record traversal order */
1656 		list_add_tail(&inner->walk_list, inner_walk);
1657 	}
1658 }
1659 
1660 /* collect per-cpu counters and propagate the deltas to the parent */
iocg_flush_stat_one(struct ioc_gq * iocg,struct ioc_now * now)1661 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1662 {
1663 	struct ioc *ioc = iocg->ioc;
1664 	struct iocg_stat new_stat;
1665 	u64 abs_vusage = 0;
1666 	u64 vusage_delta;
1667 	int cpu;
1668 
1669 	lockdep_assert_held(&iocg->ioc->lock);
1670 
1671 	/* collect per-cpu counters */
1672 	for_each_possible_cpu(cpu) {
1673 		abs_vusage += local64_read(
1674 				per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1675 	}
1676 	vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1677 	iocg->last_stat_abs_vusage = abs_vusage;
1678 
1679 	iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1680 	iocg->local_stat.usage_us += iocg->usage_delta_us;
1681 
1682 	/* propagate upwards */
1683 	new_stat.usage_us =
1684 		iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1685 	new_stat.wait_us =
1686 		iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1687 	new_stat.indebt_us =
1688 		iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1689 	new_stat.indelay_us =
1690 		iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1691 
1692 	/* propagate the deltas to the parent */
1693 	if (iocg->level > 0) {
1694 		struct iocg_stat *parent_stat =
1695 			&iocg->ancestors[iocg->level - 1]->desc_stat;
1696 
1697 		parent_stat->usage_us +=
1698 			new_stat.usage_us - iocg->last_stat.usage_us;
1699 		parent_stat->wait_us +=
1700 			new_stat.wait_us - iocg->last_stat.wait_us;
1701 		parent_stat->indebt_us +=
1702 			new_stat.indebt_us - iocg->last_stat.indebt_us;
1703 		parent_stat->indelay_us +=
1704 			new_stat.indelay_us - iocg->last_stat.indelay_us;
1705 	}
1706 
1707 	iocg->last_stat = new_stat;
1708 }
1709 
1710 /* get stat counters ready for reading on all active iocgs */
iocg_flush_stat(struct list_head * target_iocgs,struct ioc_now * now)1711 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1712 {
1713 	LIST_HEAD(inner_walk);
1714 	struct ioc_gq *iocg, *tiocg;
1715 
1716 	/* flush leaves and build inner node walk list */
1717 	list_for_each_entry(iocg, target_iocgs, active_list) {
1718 		iocg_flush_stat_one(iocg, now);
1719 		iocg_build_inner_walk(iocg, &inner_walk);
1720 	}
1721 
1722 	/* keep flushing upwards by walking the inner list backwards */
1723 	list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1724 		iocg_flush_stat_one(iocg, now);
1725 		list_del_init(&iocg->walk_list);
1726 	}
1727 }
1728 
1729 /*
1730  * Determine what @iocg's hweight_inuse should be after donating unused
1731  * capacity. @hwm is the upper bound and used to signal no donation. This
1732  * function also throws away @iocg's excess budget.
1733  */
hweight_after_donation(struct ioc_gq * iocg,u32 old_hwi,u32 hwm,u32 usage,struct ioc_now * now)1734 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1735 				  u32 usage, struct ioc_now *now)
1736 {
1737 	struct ioc *ioc = iocg->ioc;
1738 	u64 vtime = atomic64_read(&iocg->vtime);
1739 	s64 excess, delta, target, new_hwi;
1740 
1741 	/* debt handling owns inuse for debtors */
1742 	if (iocg->abs_vdebt)
1743 		return 1;
1744 
1745 	/* see whether minimum margin requirement is met */
1746 	if (waitqueue_active(&iocg->waitq) ||
1747 	    time_after64(vtime, now->vnow - ioc->margins.min))
1748 		return hwm;
1749 
1750 	/* throw away excess above target */
1751 	excess = now->vnow - vtime - ioc->margins.target;
1752 	if (excess > 0) {
1753 		atomic64_add(excess, &iocg->vtime);
1754 		atomic64_add(excess, &iocg->done_vtime);
1755 		vtime += excess;
1756 		ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1757 	}
1758 
1759 	/*
1760 	 * Let's say the distance between iocg's and device's vtimes as a
1761 	 * fraction of period duration is delta. Assuming that the iocg will
1762 	 * consume the usage determined above, we want to determine new_hwi so
1763 	 * that delta equals MARGIN_TARGET at the end of the next period.
1764 	 *
1765 	 * We need to execute usage worth of IOs while spending the sum of the
1766 	 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1767 	 * (delta):
1768 	 *
1769 	 *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1770 	 *
1771 	 * Therefore, the new_hwi is:
1772 	 *
1773 	 *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1774 	 */
1775 	delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1776 			  now->vnow - ioc->period_at_vtime);
1777 	target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1778 	new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1779 
1780 	return clamp_t(s64, new_hwi, 1, hwm);
1781 }
1782 
1783 /*
1784  * For work-conservation, an iocg which isn't using all of its share should
1785  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1786  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1787  *
1788  * #1 is mathematically simpler but has the drawback of requiring synchronous
1789  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1790  * change due to donation snapbacks as it has the possibility of grossly
1791  * overshooting what's allowed by the model and vrate.
1792  *
1793  * #2 is inherently safe with local operations. The donating iocg can easily
1794  * snap back to higher weights when needed without worrying about impacts on
1795  * other nodes as the impacts will be inherently correct. This also makes idle
1796  * iocg activations safe. The only effect activations have is decreasing
1797  * hweight_inuse of others, the right solution to which is for those iocgs to
1798  * snap back to higher weights.
1799  *
1800  * So, we go with #2. The challenge is calculating how each donating iocg's
1801  * inuse should be adjusted to achieve the target donation amounts. This is done
1802  * using Andy's method described in the following pdf.
1803  *
1804  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1805  *
1806  * Given the weights and target after-donation hweight_inuse values, Andy's
1807  * method determines how the proportional distribution should look like at each
1808  * sibling level to maintain the relative relationship between all non-donating
1809  * pairs. To roughly summarize, it divides the tree into donating and
1810  * non-donating parts, calculates global donation rate which is used to
1811  * determine the target hweight_inuse for each node, and then derives per-level
1812  * proportions.
1813  *
1814  * The following pdf shows that global distribution calculated this way can be
1815  * achieved by scaling inuse weights of donating leaves and propagating the
1816  * adjustments upwards proportionally.
1817  *
1818  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1819  *
1820  * Combining the above two, we can determine how each leaf iocg's inuse should
1821  * be adjusted to achieve the target donation.
1822  *
1823  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1824  *
1825  * The inline comments use symbols from the last pdf.
1826  *
1827  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1828  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1829  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1830  *   w is the weight of the node. w = w_f + w_t
1831  *   w_f is the non-donating portion of w. w_f = w * f / b
1832  *   w_b is the donating portion of w. w_t = w * t / b
1833  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1834  *   s_f and s_t are the non-donating and donating portions of s.
1835  *
1836  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1837  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1838  * after adjustments. Subscript r denotes the root node's values.
1839  */
transfer_surpluses(struct list_head * surpluses,struct ioc_now * now)1840 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1841 {
1842 	LIST_HEAD(over_hwa);
1843 	LIST_HEAD(inner_walk);
1844 	struct ioc_gq *iocg, *tiocg, *root_iocg;
1845 	u32 after_sum, over_sum, over_target, gamma;
1846 
1847 	/*
1848 	 * It's pretty unlikely but possible for the total sum of
1849 	 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1850 	 * confuse the following calculations. If such condition is detected,
1851 	 * scale down everyone over its full share equally to keep the sum below
1852 	 * WEIGHT_ONE.
1853 	 */
1854 	after_sum = 0;
1855 	over_sum = 0;
1856 	list_for_each_entry(iocg, surpluses, surplus_list) {
1857 		u32 hwa;
1858 
1859 		current_hweight(iocg, &hwa, NULL);
1860 		after_sum += iocg->hweight_after_donation;
1861 
1862 		if (iocg->hweight_after_donation > hwa) {
1863 			over_sum += iocg->hweight_after_donation;
1864 			list_add(&iocg->walk_list, &over_hwa);
1865 		}
1866 	}
1867 
1868 	if (after_sum >= WEIGHT_ONE) {
1869 		/*
1870 		 * The delta should be deducted from the over_sum, calculate
1871 		 * target over_sum value.
1872 		 */
1873 		u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1874 		WARN_ON_ONCE(over_sum <= over_delta);
1875 		over_target = over_sum - over_delta;
1876 	} else {
1877 		over_target = 0;
1878 	}
1879 
1880 	list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1881 		if (over_target)
1882 			iocg->hweight_after_donation =
1883 				div_u64((u64)iocg->hweight_after_donation *
1884 					over_target, over_sum);
1885 		list_del_init(&iocg->walk_list);
1886 	}
1887 
1888 	/*
1889 	 * Build pre-order inner node walk list and prepare for donation
1890 	 * adjustment calculations.
1891 	 */
1892 	list_for_each_entry(iocg, surpluses, surplus_list) {
1893 		iocg_build_inner_walk(iocg, &inner_walk);
1894 	}
1895 
1896 	root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1897 	WARN_ON_ONCE(root_iocg->level > 0);
1898 
1899 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1900 		iocg->child_adjusted_sum = 0;
1901 		iocg->hweight_donating = 0;
1902 		iocg->hweight_after_donation = 0;
1903 	}
1904 
1905 	/*
1906 	 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1907 	 * up the hierarchy.
1908 	 */
1909 	list_for_each_entry(iocg, surpluses, surplus_list) {
1910 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1911 
1912 		parent->hweight_donating += iocg->hweight_donating;
1913 		parent->hweight_after_donation += iocg->hweight_after_donation;
1914 	}
1915 
1916 	list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1917 		if (iocg->level > 0) {
1918 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1919 
1920 			parent->hweight_donating += iocg->hweight_donating;
1921 			parent->hweight_after_donation += iocg->hweight_after_donation;
1922 		}
1923 	}
1924 
1925 	/*
1926 	 * Calculate inner hwa's (b) and make sure the donation values are
1927 	 * within the accepted ranges as we're doing low res calculations with
1928 	 * roundups.
1929 	 */
1930 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1931 		if (iocg->level) {
1932 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1933 
1934 			iocg->hweight_active = DIV64_U64_ROUND_UP(
1935 				(u64)parent->hweight_active * iocg->active,
1936 				parent->child_active_sum);
1937 
1938 		}
1939 
1940 		iocg->hweight_donating = min(iocg->hweight_donating,
1941 					     iocg->hweight_active);
1942 		iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1943 						   iocg->hweight_donating - 1);
1944 		if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1945 				 iocg->hweight_donating <= 1 ||
1946 				 iocg->hweight_after_donation == 0)) {
1947 			pr_warn("iocg: invalid donation weights in ");
1948 			pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1949 			pr_cont(": active=%u donating=%u after=%u\n",
1950 				iocg->hweight_active, iocg->hweight_donating,
1951 				iocg->hweight_after_donation);
1952 		}
1953 	}
1954 
1955 	/*
1956 	 * Calculate the global donation rate (gamma) - the rate to adjust
1957 	 * non-donating budgets by.
1958 	 *
1959 	 * No need to use 64bit multiplication here as the first operand is
1960 	 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1961 	 *
1962 	 * We know that there are beneficiary nodes and the sum of the donating
1963 	 * hweights can't be whole; however, due to the round-ups during hweight
1964 	 * calculations, root_iocg->hweight_donating might still end up equal to
1965 	 * or greater than whole. Limit the range when calculating the divider.
1966 	 *
1967 	 * gamma = (1 - t_r') / (1 - t_r)
1968 	 */
1969 	gamma = DIV_ROUND_UP(
1970 		(WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1971 		WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1972 
1973 	/*
1974 	 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1975 	 * nodes.
1976 	 */
1977 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1978 		struct ioc_gq *parent;
1979 		u32 inuse, wpt, wptp;
1980 		u64 st, sf;
1981 
1982 		if (iocg->level == 0) {
1983 			/* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1984 			iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1985 				iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1986 				WEIGHT_ONE - iocg->hweight_after_donation);
1987 			continue;
1988 		}
1989 
1990 		parent = iocg->ancestors[iocg->level - 1];
1991 
1992 		/* b' = gamma * b_f + b_t' */
1993 		iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1994 			(u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1995 			WEIGHT_ONE) + iocg->hweight_after_donation;
1996 
1997 		/* w' = s' * b' / b'_p */
1998 		inuse = DIV64_U64_ROUND_UP(
1999 			(u64)parent->child_adjusted_sum * iocg->hweight_inuse,
2000 			parent->hweight_inuse);
2001 
2002 		/* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
2003 		st = DIV64_U64_ROUND_UP(
2004 			iocg->child_active_sum * iocg->hweight_donating,
2005 			iocg->hweight_active);
2006 		sf = iocg->child_active_sum - st;
2007 		wpt = DIV64_U64_ROUND_UP(
2008 			(u64)iocg->active * iocg->hweight_donating,
2009 			iocg->hweight_active);
2010 		wptp = DIV64_U64_ROUND_UP(
2011 			(u64)inuse * iocg->hweight_after_donation,
2012 			iocg->hweight_inuse);
2013 
2014 		iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2015 	}
2016 
2017 	/*
2018 	 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2019 	 * we can finally determine leaf adjustments.
2020 	 */
2021 	list_for_each_entry(iocg, surpluses, surplus_list) {
2022 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2023 		u32 inuse;
2024 
2025 		/*
2026 		 * In-debt iocgs participated in the donation calculation with
2027 		 * the minimum target hweight_inuse. Configuring inuse
2028 		 * accordingly would work fine but debt handling expects
2029 		 * @iocg->inuse stay at the minimum and we don't wanna
2030 		 * interfere.
2031 		 */
2032 		if (iocg->abs_vdebt) {
2033 			WARN_ON_ONCE(iocg->inuse > 1);
2034 			continue;
2035 		}
2036 
2037 		/* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2038 		inuse = DIV64_U64_ROUND_UP(
2039 			parent->child_adjusted_sum * iocg->hweight_after_donation,
2040 			parent->hweight_inuse);
2041 
2042 		TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2043 				iocg->inuse, inuse,
2044 				iocg->hweight_inuse,
2045 				iocg->hweight_after_donation);
2046 
2047 		__propagate_weights(iocg, iocg->active, inuse, true, now);
2048 	}
2049 
2050 	/* walk list should be dissolved after use */
2051 	list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2052 		list_del_init(&iocg->walk_list);
2053 }
2054 
2055 /*
2056  * A low weight iocg can amass a large amount of debt, for example, when
2057  * anonymous memory gets reclaimed aggressively. If the system has a lot of
2058  * memory paired with a slow IO device, the debt can span multiple seconds or
2059  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2060  * up blocked paying its debt while the IO device is idle.
2061  *
2062  * The following protects against such cases. If the device has been
2063  * sufficiently idle for a while, the debts are halved and delays are
2064  * recalculated.
2065  */
ioc_forgive_debts(struct ioc * ioc,u64 usage_us_sum,int nr_debtors,struct ioc_now * now)2066 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2067 			      struct ioc_now *now)
2068 {
2069 	struct ioc_gq *iocg;
2070 	u64 dur, usage_pct, nr_cycles;
2071 
2072 	/* if no debtor, reset the cycle */
2073 	if (!nr_debtors) {
2074 		ioc->dfgv_period_at = now->now;
2075 		ioc->dfgv_period_rem = 0;
2076 		ioc->dfgv_usage_us_sum = 0;
2077 		return;
2078 	}
2079 
2080 	/*
2081 	 * Debtors can pass through a lot of writes choking the device and we
2082 	 * don't want to be forgiving debts while the device is struggling from
2083 	 * write bursts. If we're missing latency targets, consider the device
2084 	 * fully utilized.
2085 	 */
2086 	if (ioc->busy_level > 0)
2087 		usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2088 
2089 	ioc->dfgv_usage_us_sum += usage_us_sum;
2090 	if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2091 		return;
2092 
2093 	/*
2094 	 * At least DFGV_PERIOD has passed since the last period. Calculate the
2095 	 * average usage and reset the period counters.
2096 	 */
2097 	dur = now->now - ioc->dfgv_period_at;
2098 	usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2099 
2100 	ioc->dfgv_period_at = now->now;
2101 	ioc->dfgv_usage_us_sum = 0;
2102 
2103 	/* if was too busy, reset everything */
2104 	if (usage_pct > DFGV_USAGE_PCT) {
2105 		ioc->dfgv_period_rem = 0;
2106 		return;
2107 	}
2108 
2109 	/*
2110 	 * Usage is lower than threshold. Let's forgive some debts. Debt
2111 	 * forgiveness runs off of the usual ioc timer but its period usually
2112 	 * doesn't match ioc's. Compensate the difference by performing the
2113 	 * reduction as many times as would fit in the duration since the last
2114 	 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2115 	 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2116 	 * reductions is doubled.
2117 	 */
2118 	nr_cycles = dur + ioc->dfgv_period_rem;
2119 	ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2120 
2121 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2122 		u64 __maybe_unused old_debt, __maybe_unused old_delay;
2123 
2124 		if (!iocg->abs_vdebt && !iocg->delay)
2125 			continue;
2126 
2127 		spin_lock(&iocg->waitq.lock);
2128 
2129 		old_debt = iocg->abs_vdebt;
2130 		old_delay = iocg->delay;
2131 
2132 		if (iocg->abs_vdebt)
2133 			iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2134 		if (iocg->delay)
2135 			iocg->delay = iocg->delay >> nr_cycles ?: 1;
2136 
2137 		iocg_kick_waitq(iocg, true, now);
2138 
2139 		TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2140 				old_debt, iocg->abs_vdebt,
2141 				old_delay, iocg->delay);
2142 
2143 		spin_unlock(&iocg->waitq.lock);
2144 	}
2145 }
2146 
2147 /*
2148  * Check the active iocgs' state to avoid oversleeping and deactive
2149  * idle iocgs.
2150  *
2151  * Since waiters determine the sleep durations based on the vrate
2152  * they saw at the time of sleep, if vrate has increased, some
2153  * waiters could be sleeping for too long. Wake up tardy waiters
2154  * which should have woken up in the last period and expire idle
2155  * iocgs.
2156  */
ioc_check_iocgs(struct ioc * ioc,struct ioc_now * now)2157 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2158 {
2159 	int nr_debtors = 0;
2160 	struct ioc_gq *iocg, *tiocg;
2161 
2162 	list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2163 		if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2164 		    !iocg->delay && !iocg_is_idle(iocg))
2165 			continue;
2166 
2167 		spin_lock(&iocg->waitq.lock);
2168 
2169 		/* flush wait and indebt stat deltas */
2170 		if (iocg->wait_since) {
2171 			iocg->local_stat.wait_us += now->now - iocg->wait_since;
2172 			iocg->wait_since = now->now;
2173 		}
2174 		if (iocg->indebt_since) {
2175 			iocg->local_stat.indebt_us +=
2176 				now->now - iocg->indebt_since;
2177 			iocg->indebt_since = now->now;
2178 		}
2179 		if (iocg->indelay_since) {
2180 			iocg->local_stat.indelay_us +=
2181 				now->now - iocg->indelay_since;
2182 			iocg->indelay_since = now->now;
2183 		}
2184 
2185 		if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2186 		    iocg->delay) {
2187 			/* might be oversleeping vtime / hweight changes, kick */
2188 			iocg_kick_waitq(iocg, true, now);
2189 			if (iocg->abs_vdebt || iocg->delay)
2190 				nr_debtors++;
2191 		} else if (iocg_is_idle(iocg)) {
2192 			/* no waiter and idle, deactivate */
2193 			u64 vtime = atomic64_read(&iocg->vtime);
2194 			s64 excess;
2195 
2196 			/*
2197 			 * @iocg has been inactive for a full duration and will
2198 			 * have a high budget. Account anything above target as
2199 			 * error and throw away. On reactivation, it'll start
2200 			 * with the target budget.
2201 			 */
2202 			excess = now->vnow - vtime - ioc->margins.target;
2203 			if (excess > 0) {
2204 				u32 old_hwi;
2205 
2206 				current_hweight(iocg, NULL, &old_hwi);
2207 				ioc->vtime_err -= div64_u64(excess * old_hwi,
2208 							    WEIGHT_ONE);
2209 			}
2210 
2211 			TRACE_IOCG_PATH(iocg_idle, iocg, now,
2212 					atomic64_read(&iocg->active_period),
2213 					atomic64_read(&ioc->cur_period), vtime);
2214 			__propagate_weights(iocg, 0, 0, false, now);
2215 			list_del_init(&iocg->active_list);
2216 		}
2217 
2218 		spin_unlock(&iocg->waitq.lock);
2219 	}
2220 
2221 	commit_weights(ioc);
2222 	return nr_debtors;
2223 }
2224 
ioc_timer_fn(struct timer_list * timer)2225 static void ioc_timer_fn(struct timer_list *timer)
2226 {
2227 	struct ioc *ioc = container_of(timer, struct ioc, timer);
2228 	struct ioc_gq *iocg, *tiocg;
2229 	struct ioc_now now;
2230 	LIST_HEAD(surpluses);
2231 	int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2232 	u64 usage_us_sum = 0;
2233 	u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2234 	u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2235 	u32 missed_ppm[2], rq_wait_pct;
2236 	u64 period_vtime;
2237 	int prev_busy_level;
2238 
2239 	/* how were the latencies during the period? */
2240 	ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2241 
2242 	/* take care of active iocgs */
2243 	spin_lock_irq(&ioc->lock);
2244 
2245 	ioc_now(ioc, &now);
2246 
2247 	period_vtime = now.vnow - ioc->period_at_vtime;
2248 	if (WARN_ON_ONCE(!period_vtime)) {
2249 		spin_unlock_irq(&ioc->lock);
2250 		return;
2251 	}
2252 
2253 	nr_debtors = ioc_check_iocgs(ioc, &now);
2254 
2255 	/*
2256 	 * Wait and indebt stat are flushed above and the donation calculation
2257 	 * below needs updated usage stat. Let's bring stat up-to-date.
2258 	 */
2259 	iocg_flush_stat(&ioc->active_iocgs, &now);
2260 
2261 	/* calc usage and see whether some weights need to be moved around */
2262 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2263 		u64 vdone, vtime, usage_us;
2264 		u32 hw_active, hw_inuse;
2265 
2266 		/*
2267 		 * Collect unused and wind vtime closer to vnow to prevent
2268 		 * iocgs from accumulating a large amount of budget.
2269 		 */
2270 		vdone = atomic64_read(&iocg->done_vtime);
2271 		vtime = atomic64_read(&iocg->vtime);
2272 		current_hweight(iocg, &hw_active, &hw_inuse);
2273 
2274 		/*
2275 		 * Latency QoS detection doesn't account for IOs which are
2276 		 * in-flight for longer than a period.  Detect them by
2277 		 * comparing vdone against period start.  If lagging behind
2278 		 * IOs from past periods, don't increase vrate.
2279 		 */
2280 		if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2281 		    !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2282 		    time_after64(vtime, vdone) &&
2283 		    time_after64(vtime, now.vnow -
2284 				 MAX_LAGGING_PERIODS * period_vtime) &&
2285 		    time_before64(vdone, now.vnow - period_vtime))
2286 			nr_lagging++;
2287 
2288 		/*
2289 		 * Determine absolute usage factoring in in-flight IOs to avoid
2290 		 * high-latency completions appearing as idle.
2291 		 */
2292 		usage_us = iocg->usage_delta_us;
2293 		usage_us_sum += usage_us;
2294 
2295 		/* see whether there's surplus vtime */
2296 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2297 		if (hw_inuse < hw_active ||
2298 		    (!waitqueue_active(&iocg->waitq) &&
2299 		     time_before64(vtime, now.vnow - ioc->margins.low))) {
2300 			u32 hwa, old_hwi, hwm, new_hwi, usage;
2301 			u64 usage_dur;
2302 
2303 			if (vdone != vtime) {
2304 				u64 inflight_us = DIV64_U64_ROUND_UP(
2305 					cost_to_abs_cost(vtime - vdone, hw_inuse),
2306 					ioc->vtime_base_rate);
2307 
2308 				usage_us = max(usage_us, inflight_us);
2309 			}
2310 
2311 			/* convert to hweight based usage ratio */
2312 			if (time_after64(iocg->activated_at, ioc->period_at))
2313 				usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2314 			else
2315 				usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2316 
2317 			usage = clamp_t(u32,
2318 				DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2319 						   usage_dur),
2320 				1, WEIGHT_ONE);
2321 
2322 			/*
2323 			 * Already donating or accumulated enough to start.
2324 			 * Determine the donation amount.
2325 			 */
2326 			current_hweight(iocg, &hwa, &old_hwi);
2327 			hwm = current_hweight_max(iocg);
2328 			new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2329 							 usage, &now);
2330 			/*
2331 			 * Donation calculation assumes hweight_after_donation
2332 			 * to be positive, a condition that a donor w/ hwa < 2
2333 			 * can't meet. Don't bother with donation if hwa is
2334 			 * below 2. It's not gonna make a meaningful difference
2335 			 * anyway.
2336 			 */
2337 			if (new_hwi < hwm && hwa >= 2) {
2338 				iocg->hweight_donating = hwa;
2339 				iocg->hweight_after_donation = new_hwi;
2340 				list_add(&iocg->surplus_list, &surpluses);
2341 			} else if (!iocg->abs_vdebt) {
2342 				/*
2343 				 * @iocg doesn't have enough to donate. Reset
2344 				 * its inuse to active.
2345 				 *
2346 				 * Don't reset debtors as their inuse's are
2347 				 * owned by debt handling. This shouldn't affect
2348 				 * donation calculuation in any meaningful way
2349 				 * as @iocg doesn't have a meaningful amount of
2350 				 * share anyway.
2351 				 */
2352 				TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2353 						iocg->inuse, iocg->active,
2354 						iocg->hweight_inuse, new_hwi);
2355 
2356 				__propagate_weights(iocg, iocg->active,
2357 						    iocg->active, true, &now);
2358 				nr_shortages++;
2359 			}
2360 		} else {
2361 			/* genuinely short on vtime */
2362 			nr_shortages++;
2363 		}
2364 	}
2365 
2366 	if (!list_empty(&surpluses) && nr_shortages)
2367 		transfer_surpluses(&surpluses, &now);
2368 
2369 	commit_weights(ioc);
2370 
2371 	/* surplus list should be dissolved after use */
2372 	list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2373 		list_del_init(&iocg->surplus_list);
2374 
2375 	/*
2376 	 * If q is getting clogged or we're missing too much, we're issuing
2377 	 * too much IO and should lower vtime rate.  If we're not missing
2378 	 * and experiencing shortages but not surpluses, we're too stingy
2379 	 * and should increase vtime rate.
2380 	 */
2381 	prev_busy_level = ioc->busy_level;
2382 	if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2383 	    missed_ppm[READ] > ppm_rthr ||
2384 	    missed_ppm[WRITE] > ppm_wthr) {
2385 		/* clearly missing QoS targets, slow down vrate */
2386 		ioc->busy_level = max(ioc->busy_level, 0);
2387 		ioc->busy_level++;
2388 	} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2389 		   missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2390 		   missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2391 		/* QoS targets are being met with >25% margin */
2392 		if (nr_shortages) {
2393 			/*
2394 			 * We're throttling while the device has spare
2395 			 * capacity.  If vrate was being slowed down, stop.
2396 			 */
2397 			ioc->busy_level = min(ioc->busy_level, 0);
2398 
2399 			/*
2400 			 * If there are IOs spanning multiple periods, wait
2401 			 * them out before pushing the device harder.
2402 			 */
2403 			if (!nr_lagging)
2404 				ioc->busy_level--;
2405 		} else {
2406 			/*
2407 			 * Nobody is being throttled and the users aren't
2408 			 * issuing enough IOs to saturate the device.  We
2409 			 * simply don't know how close the device is to
2410 			 * saturation.  Coast.
2411 			 */
2412 			ioc->busy_level = 0;
2413 		}
2414 	} else {
2415 		/* inside the hysterisis margin, we're good */
2416 		ioc->busy_level = 0;
2417 	}
2418 
2419 	ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2420 
2421 	ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2422 			      prev_busy_level, missed_ppm);
2423 
2424 	ioc_refresh_params(ioc, false);
2425 
2426 	ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2427 
2428 	/*
2429 	 * This period is done.  Move onto the next one.  If nothing's
2430 	 * going on with the device, stop the timer.
2431 	 */
2432 	atomic64_inc(&ioc->cur_period);
2433 
2434 	if (ioc->running != IOC_STOP) {
2435 		if (!list_empty(&ioc->active_iocgs)) {
2436 			ioc_start_period(ioc, &now);
2437 		} else {
2438 			ioc->busy_level = 0;
2439 			ioc->vtime_err = 0;
2440 			ioc->running = IOC_IDLE;
2441 		}
2442 
2443 		ioc_refresh_vrate(ioc, &now);
2444 	}
2445 
2446 	spin_unlock_irq(&ioc->lock);
2447 }
2448 
adjust_inuse_and_calc_cost(struct ioc_gq * iocg,u64 vtime,u64 abs_cost,struct ioc_now * now)2449 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2450 				      u64 abs_cost, struct ioc_now *now)
2451 {
2452 	struct ioc *ioc = iocg->ioc;
2453 	struct ioc_margins *margins = &ioc->margins;
2454 	u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2455 	u32 hwi, adj_step;
2456 	s64 margin;
2457 	u64 cost, new_inuse;
2458 	unsigned long flags;
2459 
2460 	current_hweight(iocg, NULL, &hwi);
2461 	old_hwi = hwi;
2462 	cost = abs_cost_to_cost(abs_cost, hwi);
2463 	margin = now->vnow - vtime - cost;
2464 
2465 	/* debt handling owns inuse for debtors */
2466 	if (iocg->abs_vdebt)
2467 		return cost;
2468 
2469 	/*
2470 	 * We only increase inuse during period and do so if the margin has
2471 	 * deteriorated since the previous adjustment.
2472 	 */
2473 	if (margin >= iocg->saved_margin || margin >= margins->low ||
2474 	    iocg->inuse == iocg->active)
2475 		return cost;
2476 
2477 	spin_lock_irqsave(&ioc->lock, flags);
2478 
2479 	/* we own inuse only when @iocg is in the normal active state */
2480 	if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2481 		spin_unlock_irqrestore(&ioc->lock, flags);
2482 		return cost;
2483 	}
2484 
2485 	/*
2486 	 * Bump up inuse till @abs_cost fits in the existing budget.
2487 	 * adj_step must be determined after acquiring ioc->lock - we might
2488 	 * have raced and lost to another thread for activation and could
2489 	 * be reading 0 iocg->active before ioc->lock which will lead to
2490 	 * infinite loop.
2491 	 */
2492 	new_inuse = iocg->inuse;
2493 	adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2494 	do {
2495 		new_inuse = new_inuse + adj_step;
2496 		propagate_weights(iocg, iocg->active, new_inuse, true, now);
2497 		current_hweight(iocg, NULL, &hwi);
2498 		cost = abs_cost_to_cost(abs_cost, hwi);
2499 	} while (time_after64(vtime + cost, now->vnow) &&
2500 		 iocg->inuse != iocg->active);
2501 
2502 	spin_unlock_irqrestore(&ioc->lock, flags);
2503 
2504 	TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2505 			old_inuse, iocg->inuse, old_hwi, hwi);
2506 
2507 	return cost;
2508 }
2509 
calc_vtime_cost_builtin(struct bio * bio,struct ioc_gq * iocg,bool is_merge,u64 * costp)2510 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2511 				    bool is_merge, u64 *costp)
2512 {
2513 	struct ioc *ioc = iocg->ioc;
2514 	u64 coef_seqio, coef_randio, coef_page;
2515 	u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2516 	u64 seek_pages = 0;
2517 	u64 cost = 0;
2518 
2519 	switch (bio_op(bio)) {
2520 	case REQ_OP_READ:
2521 		coef_seqio	= ioc->params.lcoefs[LCOEF_RSEQIO];
2522 		coef_randio	= ioc->params.lcoefs[LCOEF_RRANDIO];
2523 		coef_page	= ioc->params.lcoefs[LCOEF_RPAGE];
2524 		break;
2525 	case REQ_OP_WRITE:
2526 		coef_seqio	= ioc->params.lcoefs[LCOEF_WSEQIO];
2527 		coef_randio	= ioc->params.lcoefs[LCOEF_WRANDIO];
2528 		coef_page	= ioc->params.lcoefs[LCOEF_WPAGE];
2529 		break;
2530 	default:
2531 		goto out;
2532 	}
2533 
2534 	if (iocg->cursor) {
2535 		seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2536 		seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2537 	}
2538 
2539 	if (!is_merge) {
2540 		if (seek_pages > LCOEF_RANDIO_PAGES) {
2541 			cost += coef_randio;
2542 		} else {
2543 			cost += coef_seqio;
2544 		}
2545 	}
2546 	cost += pages * coef_page;
2547 out:
2548 	*costp = cost;
2549 }
2550 
calc_vtime_cost(struct bio * bio,struct ioc_gq * iocg,bool is_merge)2551 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2552 {
2553 	u64 cost;
2554 
2555 	calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2556 	return cost;
2557 }
2558 
calc_size_vtime_cost_builtin(struct request * rq,struct ioc * ioc,u64 * costp)2559 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2560 					 u64 *costp)
2561 {
2562 	unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2563 
2564 	switch (req_op(rq)) {
2565 	case REQ_OP_READ:
2566 		*costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2567 		break;
2568 	case REQ_OP_WRITE:
2569 		*costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2570 		break;
2571 	default:
2572 		*costp = 0;
2573 	}
2574 }
2575 
calc_size_vtime_cost(struct request * rq,struct ioc * ioc)2576 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2577 {
2578 	u64 cost;
2579 
2580 	calc_size_vtime_cost_builtin(rq, ioc, &cost);
2581 	return cost;
2582 }
2583 
ioc_rqos_throttle(struct rq_qos * rqos,struct bio * bio)2584 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2585 {
2586 	struct blkcg_gq *blkg = bio->bi_blkg;
2587 	struct ioc *ioc = rqos_to_ioc(rqos);
2588 	struct ioc_gq *iocg = blkg_to_iocg(blkg);
2589 	struct ioc_now now;
2590 	struct iocg_wait wait;
2591 	u64 abs_cost, cost, vtime;
2592 	bool use_debt, ioc_locked;
2593 	unsigned long flags;
2594 
2595 	/* bypass IOs if disabled, still initializing, or for root cgroup */
2596 	if (!ioc->enabled || !iocg || !iocg->level)
2597 		return;
2598 
2599 	/* calculate the absolute vtime cost */
2600 	abs_cost = calc_vtime_cost(bio, iocg, false);
2601 	if (!abs_cost)
2602 		return;
2603 
2604 	if (!iocg_activate(iocg, &now))
2605 		return;
2606 
2607 	iocg->cursor = bio_end_sector(bio);
2608 	vtime = atomic64_read(&iocg->vtime);
2609 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2610 
2611 	/*
2612 	 * If no one's waiting and within budget, issue right away.  The
2613 	 * tests are racy but the races aren't systemic - we only miss once
2614 	 * in a while which is fine.
2615 	 */
2616 	if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2617 	    time_before_eq64(vtime + cost, now.vnow)) {
2618 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2619 		return;
2620 	}
2621 
2622 	/*
2623 	 * We're over budget. This can be handled in two ways. IOs which may
2624 	 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2625 	 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2626 	 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2627 	 * whether debt handling is needed and acquire locks accordingly.
2628 	 */
2629 	use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2630 	ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2631 retry_lock:
2632 	iocg_lock(iocg, ioc_locked, &flags);
2633 
2634 	/*
2635 	 * @iocg must stay activated for debt and waitq handling. Deactivation
2636 	 * is synchronized against both ioc->lock and waitq.lock and we won't
2637 	 * get deactivated as long as we're waiting or has debt, so we're good
2638 	 * if we're activated here. In the unlikely cases that we aren't, just
2639 	 * issue the IO.
2640 	 */
2641 	if (unlikely(list_empty(&iocg->active_list))) {
2642 		iocg_unlock(iocg, ioc_locked, &flags);
2643 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2644 		return;
2645 	}
2646 
2647 	/*
2648 	 * We're over budget. If @bio has to be issued regardless, remember
2649 	 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2650 	 * off the debt before waking more IOs.
2651 	 *
2652 	 * This way, the debt is continuously paid off each period with the
2653 	 * actual budget available to the cgroup. If we just wound vtime, we
2654 	 * would incorrectly use the current hw_inuse for the entire amount
2655 	 * which, for example, can lead to the cgroup staying blocked for a
2656 	 * long time even with substantially raised hw_inuse.
2657 	 *
2658 	 * An iocg with vdebt should stay online so that the timer can keep
2659 	 * deducting its vdebt and [de]activate use_delay mechanism
2660 	 * accordingly. We don't want to race against the timer trying to
2661 	 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2662 	 * penalizing the cgroup and its descendants.
2663 	 */
2664 	if (use_debt) {
2665 		iocg_incur_debt(iocg, abs_cost, &now);
2666 		if (iocg_kick_delay(iocg, &now))
2667 			blkcg_schedule_throttle(rqos->q,
2668 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2669 		iocg_unlock(iocg, ioc_locked, &flags);
2670 		return;
2671 	}
2672 
2673 	/* guarantee that iocgs w/ waiters have maximum inuse */
2674 	if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2675 		if (!ioc_locked) {
2676 			iocg_unlock(iocg, false, &flags);
2677 			ioc_locked = true;
2678 			goto retry_lock;
2679 		}
2680 		propagate_weights(iocg, iocg->active, iocg->active, true,
2681 				  &now);
2682 	}
2683 
2684 	/*
2685 	 * Append self to the waitq and schedule the wakeup timer if we're
2686 	 * the first waiter.  The timer duration is calculated based on the
2687 	 * current vrate.  vtime and hweight changes can make it too short
2688 	 * or too long.  Each wait entry records the absolute cost it's
2689 	 * waiting for to allow re-evaluation using a custom wait entry.
2690 	 *
2691 	 * If too short, the timer simply reschedules itself.  If too long,
2692 	 * the period timer will notice and trigger wakeups.
2693 	 *
2694 	 * All waiters are on iocg->waitq and the wait states are
2695 	 * synchronized using waitq.lock.
2696 	 */
2697 	init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2698 	wait.wait.private = current;
2699 	wait.bio = bio;
2700 	wait.abs_cost = abs_cost;
2701 	wait.committed = false;	/* will be set true by waker */
2702 
2703 	__add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2704 	iocg_kick_waitq(iocg, ioc_locked, &now);
2705 
2706 	iocg_unlock(iocg, ioc_locked, &flags);
2707 
2708 	while (true) {
2709 		set_current_state(TASK_UNINTERRUPTIBLE);
2710 		if (wait.committed)
2711 			break;
2712 		io_schedule();
2713 	}
2714 
2715 	/* waker already committed us, proceed */
2716 	finish_wait(&iocg->waitq, &wait.wait);
2717 }
2718 
ioc_rqos_merge(struct rq_qos * rqos,struct request * rq,struct bio * bio)2719 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2720 			   struct bio *bio)
2721 {
2722 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2723 	struct ioc *ioc = rqos_to_ioc(rqos);
2724 	sector_t bio_end = bio_end_sector(bio);
2725 	struct ioc_now now;
2726 	u64 vtime, abs_cost, cost;
2727 	unsigned long flags;
2728 
2729 	/* bypass if disabled, still initializing, or for root cgroup */
2730 	if (!ioc->enabled || !iocg || !iocg->level)
2731 		return;
2732 
2733 	abs_cost = calc_vtime_cost(bio, iocg, true);
2734 	if (!abs_cost)
2735 		return;
2736 
2737 	ioc_now(ioc, &now);
2738 
2739 	vtime = atomic64_read(&iocg->vtime);
2740 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2741 
2742 	/* update cursor if backmerging into the request at the cursor */
2743 	if (blk_rq_pos(rq) < bio_end &&
2744 	    blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2745 		iocg->cursor = bio_end;
2746 
2747 	/*
2748 	 * Charge if there's enough vtime budget and the existing request has
2749 	 * cost assigned.
2750 	 */
2751 	if (rq->bio && rq->bio->bi_iocost_cost &&
2752 	    time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2753 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2754 		return;
2755 	}
2756 
2757 	/*
2758 	 * Otherwise, account it as debt if @iocg is online, which it should
2759 	 * be for the vast majority of cases. See debt handling in
2760 	 * ioc_rqos_throttle() for details.
2761 	 */
2762 	spin_lock_irqsave(&ioc->lock, flags);
2763 	spin_lock(&iocg->waitq.lock);
2764 
2765 	if (likely(!list_empty(&iocg->active_list))) {
2766 		iocg_incur_debt(iocg, abs_cost, &now);
2767 		if (iocg_kick_delay(iocg, &now))
2768 			blkcg_schedule_throttle(rqos->q,
2769 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2770 	} else {
2771 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2772 	}
2773 
2774 	spin_unlock(&iocg->waitq.lock);
2775 	spin_unlock_irqrestore(&ioc->lock, flags);
2776 }
2777 
ioc_rqos_done_bio(struct rq_qos * rqos,struct bio * bio)2778 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2779 {
2780 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2781 
2782 	if (iocg && bio->bi_iocost_cost)
2783 		atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2784 }
2785 
ioc_rqos_done(struct rq_qos * rqos,struct request * rq)2786 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2787 {
2788 	struct ioc *ioc = rqos_to_ioc(rqos);
2789 	struct ioc_pcpu_stat *ccs;
2790 	u64 on_q_ns, rq_wait_ns, size_nsec;
2791 	int pidx, rw;
2792 
2793 	if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2794 		return;
2795 
2796 	switch (req_op(rq) & REQ_OP_MASK) {
2797 	case REQ_OP_READ:
2798 		pidx = QOS_RLAT;
2799 		rw = READ;
2800 		break;
2801 	case REQ_OP_WRITE:
2802 		pidx = QOS_WLAT;
2803 		rw = WRITE;
2804 		break;
2805 	default:
2806 		return;
2807 	}
2808 
2809 	on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2810 	rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2811 	size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2812 
2813 	ccs = get_cpu_ptr(ioc->pcpu_stat);
2814 
2815 	if (on_q_ns <= size_nsec ||
2816 	    on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2817 		local_inc(&ccs->missed[rw].nr_met);
2818 	else
2819 		local_inc(&ccs->missed[rw].nr_missed);
2820 
2821 	local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2822 
2823 	put_cpu_ptr(ccs);
2824 }
2825 
ioc_rqos_queue_depth_changed(struct rq_qos * rqos)2826 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2827 {
2828 	struct ioc *ioc = rqos_to_ioc(rqos);
2829 
2830 	spin_lock_irq(&ioc->lock);
2831 	ioc_refresh_params(ioc, false);
2832 	spin_unlock_irq(&ioc->lock);
2833 }
2834 
ioc_rqos_exit(struct rq_qos * rqos)2835 static void ioc_rqos_exit(struct rq_qos *rqos)
2836 {
2837 	struct ioc *ioc = rqos_to_ioc(rqos);
2838 
2839 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2840 
2841 	spin_lock_irq(&ioc->lock);
2842 	ioc->running = IOC_STOP;
2843 	spin_unlock_irq(&ioc->lock);
2844 
2845 	del_timer_sync(&ioc->timer);
2846 	free_percpu(ioc->pcpu_stat);
2847 	kfree(ioc);
2848 }
2849 
2850 static struct rq_qos_ops ioc_rqos_ops = {
2851 	.throttle = ioc_rqos_throttle,
2852 	.merge = ioc_rqos_merge,
2853 	.done_bio = ioc_rqos_done_bio,
2854 	.done = ioc_rqos_done,
2855 	.queue_depth_changed = ioc_rqos_queue_depth_changed,
2856 	.exit = ioc_rqos_exit,
2857 };
2858 
blk_iocost_init(struct request_queue * q)2859 static int blk_iocost_init(struct request_queue *q)
2860 {
2861 	struct ioc *ioc;
2862 	struct rq_qos *rqos;
2863 	int i, cpu, ret;
2864 
2865 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2866 	if (!ioc)
2867 		return -ENOMEM;
2868 
2869 	ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2870 	if (!ioc->pcpu_stat) {
2871 		kfree(ioc);
2872 		return -ENOMEM;
2873 	}
2874 
2875 	for_each_possible_cpu(cpu) {
2876 		struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2877 
2878 		for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2879 			local_set(&ccs->missed[i].nr_met, 0);
2880 			local_set(&ccs->missed[i].nr_missed, 0);
2881 		}
2882 		local64_set(&ccs->rq_wait_ns, 0);
2883 	}
2884 
2885 	rqos = &ioc->rqos;
2886 	rqos->id = RQ_QOS_COST;
2887 	rqos->ops = &ioc_rqos_ops;
2888 	rqos->q = q;
2889 
2890 	spin_lock_init(&ioc->lock);
2891 	timer_setup(&ioc->timer, ioc_timer_fn, 0);
2892 	INIT_LIST_HEAD(&ioc->active_iocgs);
2893 
2894 	ioc->running = IOC_IDLE;
2895 	ioc->vtime_base_rate = VTIME_PER_USEC;
2896 	atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2897 	seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2898 	ioc->period_at = ktime_to_us(ktime_get());
2899 	atomic64_set(&ioc->cur_period, 0);
2900 	atomic_set(&ioc->hweight_gen, 0);
2901 
2902 	spin_lock_irq(&ioc->lock);
2903 	ioc->autop_idx = AUTOP_INVALID;
2904 	ioc_refresh_params(ioc, true);
2905 	spin_unlock_irq(&ioc->lock);
2906 
2907 	/*
2908 	 * rqos must be added before activation to allow iocg_pd_init() to
2909 	 * lookup the ioc from q. This means that the rqos methods may get
2910 	 * called before policy activation completion, can't assume that the
2911 	 * target bio has an iocg associated and need to test for NULL iocg.
2912 	 */
2913 	ret = rq_qos_add(q, rqos);
2914 	if (ret)
2915 		goto err_free_ioc;
2916 
2917 	ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2918 	if (ret)
2919 		goto err_del_qos;
2920 	return 0;
2921 
2922 err_del_qos:
2923 	rq_qos_del(q, rqos);
2924 err_free_ioc:
2925 	free_percpu(ioc->pcpu_stat);
2926 	kfree(ioc);
2927 	return ret;
2928 }
2929 
ioc_cpd_alloc(gfp_t gfp)2930 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2931 {
2932 	struct ioc_cgrp *iocc;
2933 
2934 	iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2935 	if (!iocc)
2936 		return NULL;
2937 
2938 	iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2939 	return &iocc->cpd;
2940 }
2941 
ioc_cpd_free(struct blkcg_policy_data * cpd)2942 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2943 {
2944 	kfree(container_of(cpd, struct ioc_cgrp, cpd));
2945 }
2946 
ioc_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)2947 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2948 					     struct blkcg *blkcg)
2949 {
2950 	int levels = blkcg->css.cgroup->level + 1;
2951 	struct ioc_gq *iocg;
2952 
2953 	iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2954 	if (!iocg)
2955 		return NULL;
2956 
2957 	iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2958 	if (!iocg->pcpu_stat) {
2959 		kfree(iocg);
2960 		return NULL;
2961 	}
2962 
2963 	return &iocg->pd;
2964 }
2965 
ioc_pd_init(struct blkg_policy_data * pd)2966 static void ioc_pd_init(struct blkg_policy_data *pd)
2967 {
2968 	struct ioc_gq *iocg = pd_to_iocg(pd);
2969 	struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2970 	struct ioc *ioc = q_to_ioc(blkg->q);
2971 	struct ioc_now now;
2972 	struct blkcg_gq *tblkg;
2973 	unsigned long flags;
2974 
2975 	ioc_now(ioc, &now);
2976 
2977 	iocg->ioc = ioc;
2978 	atomic64_set(&iocg->vtime, now.vnow);
2979 	atomic64_set(&iocg->done_vtime, now.vnow);
2980 	atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2981 	INIT_LIST_HEAD(&iocg->active_list);
2982 	INIT_LIST_HEAD(&iocg->walk_list);
2983 	INIT_LIST_HEAD(&iocg->surplus_list);
2984 	iocg->hweight_active = WEIGHT_ONE;
2985 	iocg->hweight_inuse = WEIGHT_ONE;
2986 
2987 	init_waitqueue_head(&iocg->waitq);
2988 	hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2989 	iocg->waitq_timer.function = iocg_waitq_timer_fn;
2990 
2991 	iocg->level = blkg->blkcg->css.cgroup->level;
2992 
2993 	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2994 		struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2995 		iocg->ancestors[tiocg->level] = tiocg;
2996 	}
2997 
2998 	spin_lock_irqsave(&ioc->lock, flags);
2999 	weight_updated(iocg, &now);
3000 	spin_unlock_irqrestore(&ioc->lock, flags);
3001 }
3002 
ioc_pd_free(struct blkg_policy_data * pd)3003 static void ioc_pd_free(struct blkg_policy_data *pd)
3004 {
3005 	struct ioc_gq *iocg = pd_to_iocg(pd);
3006 	struct ioc *ioc = iocg->ioc;
3007 	unsigned long flags;
3008 
3009 	if (ioc) {
3010 		spin_lock_irqsave(&ioc->lock, flags);
3011 
3012 		if (!list_empty(&iocg->active_list)) {
3013 			struct ioc_now now;
3014 
3015 			ioc_now(ioc, &now);
3016 			propagate_weights(iocg, 0, 0, false, &now);
3017 			list_del_init(&iocg->active_list);
3018 		}
3019 
3020 		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3021 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3022 
3023 		spin_unlock_irqrestore(&ioc->lock, flags);
3024 
3025 		hrtimer_cancel(&iocg->waitq_timer);
3026 	}
3027 	free_percpu(iocg->pcpu_stat);
3028 	kfree(iocg);
3029 }
3030 
ioc_pd_stat(struct blkg_policy_data * pd,struct seq_file * s)3031 static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3032 {
3033 	struct ioc_gq *iocg = pd_to_iocg(pd);
3034 	struct ioc *ioc = iocg->ioc;
3035 
3036 	if (!ioc->enabled)
3037 		return false;
3038 
3039 	if (iocg->level == 0) {
3040 		unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3041 			ioc->vtime_base_rate * 10000,
3042 			VTIME_PER_USEC);
3043 		seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3044 	}
3045 
3046 	seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3047 
3048 	if (blkcg_debug_stats)
3049 		seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3050 			iocg->last_stat.wait_us,
3051 			iocg->last_stat.indebt_us,
3052 			iocg->last_stat.indelay_us);
3053 	return true;
3054 }
3055 
ioc_weight_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3056 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3057 			     int off)
3058 {
3059 	const char *dname = blkg_dev_name(pd->blkg);
3060 	struct ioc_gq *iocg = pd_to_iocg(pd);
3061 
3062 	if (dname && iocg->cfg_weight)
3063 		seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3064 	return 0;
3065 }
3066 
3067 
ioc_weight_show(struct seq_file * sf,void * v)3068 static int ioc_weight_show(struct seq_file *sf, void *v)
3069 {
3070 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3071 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3072 
3073 	seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3074 	blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3075 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3076 	return 0;
3077 }
3078 
ioc_weight_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3079 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3080 				size_t nbytes, loff_t off)
3081 {
3082 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
3083 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3084 	struct blkg_conf_ctx ctx;
3085 	struct ioc_now now;
3086 	struct ioc_gq *iocg;
3087 	u32 v;
3088 	int ret;
3089 
3090 	if (!strchr(buf, ':')) {
3091 		struct blkcg_gq *blkg;
3092 
3093 		if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3094 			return -EINVAL;
3095 
3096 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3097 			return -EINVAL;
3098 
3099 		spin_lock_irq(&blkcg->lock);
3100 		iocc->dfl_weight = v * WEIGHT_ONE;
3101 		hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3102 			struct ioc_gq *iocg = blkg_to_iocg(blkg);
3103 
3104 			if (iocg) {
3105 				spin_lock(&iocg->ioc->lock);
3106 				ioc_now(iocg->ioc, &now);
3107 				weight_updated(iocg, &now);
3108 				spin_unlock(&iocg->ioc->lock);
3109 			}
3110 		}
3111 		spin_unlock_irq(&blkcg->lock);
3112 
3113 		return nbytes;
3114 	}
3115 
3116 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3117 	if (ret)
3118 		return ret;
3119 
3120 	iocg = blkg_to_iocg(ctx.blkg);
3121 
3122 	if (!strncmp(ctx.body, "default", 7)) {
3123 		v = 0;
3124 	} else {
3125 		if (!sscanf(ctx.body, "%u", &v))
3126 			goto einval;
3127 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3128 			goto einval;
3129 	}
3130 
3131 	spin_lock(&iocg->ioc->lock);
3132 	iocg->cfg_weight = v * WEIGHT_ONE;
3133 	ioc_now(iocg->ioc, &now);
3134 	weight_updated(iocg, &now);
3135 	spin_unlock(&iocg->ioc->lock);
3136 
3137 	blkg_conf_finish(&ctx);
3138 	return nbytes;
3139 
3140 einval:
3141 	blkg_conf_finish(&ctx);
3142 	return -EINVAL;
3143 }
3144 
ioc_qos_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3145 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3146 			  int off)
3147 {
3148 	const char *dname = blkg_dev_name(pd->blkg);
3149 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3150 
3151 	if (!dname)
3152 		return 0;
3153 
3154 	seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3155 		   dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3156 		   ioc->params.qos[QOS_RPPM] / 10000,
3157 		   ioc->params.qos[QOS_RPPM] % 10000 / 100,
3158 		   ioc->params.qos[QOS_RLAT],
3159 		   ioc->params.qos[QOS_WPPM] / 10000,
3160 		   ioc->params.qos[QOS_WPPM] % 10000 / 100,
3161 		   ioc->params.qos[QOS_WLAT],
3162 		   ioc->params.qos[QOS_MIN] / 10000,
3163 		   ioc->params.qos[QOS_MIN] % 10000 / 100,
3164 		   ioc->params.qos[QOS_MAX] / 10000,
3165 		   ioc->params.qos[QOS_MAX] % 10000 / 100);
3166 	return 0;
3167 }
3168 
ioc_qos_show(struct seq_file * sf,void * v)3169 static int ioc_qos_show(struct seq_file *sf, void *v)
3170 {
3171 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3172 
3173 	blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3174 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3175 	return 0;
3176 }
3177 
3178 static const match_table_t qos_ctrl_tokens = {
3179 	{ QOS_ENABLE,		"enable=%u"	},
3180 	{ QOS_CTRL,		"ctrl=%s"	},
3181 	{ NR_QOS_CTRL_PARAMS,	NULL		},
3182 };
3183 
3184 static const match_table_t qos_tokens = {
3185 	{ QOS_RPPM,		"rpct=%s"	},
3186 	{ QOS_RLAT,		"rlat=%u"	},
3187 	{ QOS_WPPM,		"wpct=%s"	},
3188 	{ QOS_WLAT,		"wlat=%u"	},
3189 	{ QOS_MIN,		"min=%s"	},
3190 	{ QOS_MAX,		"max=%s"	},
3191 	{ NR_QOS_PARAMS,	NULL		},
3192 };
3193 
ioc_qos_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3194 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3195 			     size_t nbytes, loff_t off)
3196 {
3197 	struct block_device *bdev;
3198 	struct ioc *ioc;
3199 	u32 qos[NR_QOS_PARAMS];
3200 	bool enable, user;
3201 	char *p;
3202 	int ret;
3203 
3204 	bdev = blkcg_conf_open_bdev(&input);
3205 	if (IS_ERR(bdev))
3206 		return PTR_ERR(bdev);
3207 
3208 	ioc = q_to_ioc(bdev->bd_disk->queue);
3209 	if (!ioc) {
3210 		ret = blk_iocost_init(bdev->bd_disk->queue);
3211 		if (ret)
3212 			goto err;
3213 		ioc = q_to_ioc(bdev->bd_disk->queue);
3214 	}
3215 
3216 	spin_lock_irq(&ioc->lock);
3217 	memcpy(qos, ioc->params.qos, sizeof(qos));
3218 	enable = ioc->enabled;
3219 	user = ioc->user_qos_params;
3220 	spin_unlock_irq(&ioc->lock);
3221 
3222 	while ((p = strsep(&input, " \t\n"))) {
3223 		substring_t args[MAX_OPT_ARGS];
3224 		char buf[32];
3225 		int tok;
3226 		s64 v;
3227 
3228 		if (!*p)
3229 			continue;
3230 
3231 		switch (match_token(p, qos_ctrl_tokens, args)) {
3232 		case QOS_ENABLE:
3233 			match_u64(&args[0], &v);
3234 			enable = v;
3235 			continue;
3236 		case QOS_CTRL:
3237 			match_strlcpy(buf, &args[0], sizeof(buf));
3238 			if (!strcmp(buf, "auto"))
3239 				user = false;
3240 			else if (!strcmp(buf, "user"))
3241 				user = true;
3242 			else
3243 				goto einval;
3244 			continue;
3245 		}
3246 
3247 		tok = match_token(p, qos_tokens, args);
3248 		switch (tok) {
3249 		case QOS_RPPM:
3250 		case QOS_WPPM:
3251 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3252 			    sizeof(buf))
3253 				goto einval;
3254 			if (cgroup_parse_float(buf, 2, &v))
3255 				goto einval;
3256 			if (v < 0 || v > 10000)
3257 				goto einval;
3258 			qos[tok] = v * 100;
3259 			break;
3260 		case QOS_RLAT:
3261 		case QOS_WLAT:
3262 			if (match_u64(&args[0], &v))
3263 				goto einval;
3264 			qos[tok] = v;
3265 			break;
3266 		case QOS_MIN:
3267 		case QOS_MAX:
3268 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3269 			    sizeof(buf))
3270 				goto einval;
3271 			if (cgroup_parse_float(buf, 2, &v))
3272 				goto einval;
3273 			if (v < 0)
3274 				goto einval;
3275 			qos[tok] = clamp_t(s64, v * 100,
3276 					   VRATE_MIN_PPM, VRATE_MAX_PPM);
3277 			break;
3278 		default:
3279 			goto einval;
3280 		}
3281 		user = true;
3282 	}
3283 
3284 	if (qos[QOS_MIN] > qos[QOS_MAX])
3285 		goto einval;
3286 
3287 	spin_lock_irq(&ioc->lock);
3288 
3289 	if (enable) {
3290 		blk_stat_enable_accounting(ioc->rqos.q);
3291 		blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3292 		ioc->enabled = true;
3293 	} else {
3294 		blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3295 		ioc->enabled = false;
3296 	}
3297 
3298 	if (user) {
3299 		memcpy(ioc->params.qos, qos, sizeof(qos));
3300 		ioc->user_qos_params = true;
3301 	} else {
3302 		ioc->user_qos_params = false;
3303 	}
3304 
3305 	ioc_refresh_params(ioc, true);
3306 	spin_unlock_irq(&ioc->lock);
3307 
3308 	blkdev_put_no_open(bdev);
3309 	return nbytes;
3310 einval:
3311 	ret = -EINVAL;
3312 err:
3313 	blkdev_put_no_open(bdev);
3314 	return ret;
3315 }
3316 
ioc_cost_model_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3317 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3318 				 struct blkg_policy_data *pd, int off)
3319 {
3320 	const char *dname = blkg_dev_name(pd->blkg);
3321 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3322 	u64 *u = ioc->params.i_lcoefs;
3323 
3324 	if (!dname)
3325 		return 0;
3326 
3327 	seq_printf(sf, "%s ctrl=%s model=linear "
3328 		   "rbps=%llu rseqiops=%llu rrandiops=%llu "
3329 		   "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3330 		   dname, ioc->user_cost_model ? "user" : "auto",
3331 		   u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3332 		   u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3333 	return 0;
3334 }
3335 
ioc_cost_model_show(struct seq_file * sf,void * v)3336 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3337 {
3338 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3339 
3340 	blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3341 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3342 	return 0;
3343 }
3344 
3345 static const match_table_t cost_ctrl_tokens = {
3346 	{ COST_CTRL,		"ctrl=%s"	},
3347 	{ COST_MODEL,		"model=%s"	},
3348 	{ NR_COST_CTRL_PARAMS,	NULL		},
3349 };
3350 
3351 static const match_table_t i_lcoef_tokens = {
3352 	{ I_LCOEF_RBPS,		"rbps=%u"	},
3353 	{ I_LCOEF_RSEQIOPS,	"rseqiops=%u"	},
3354 	{ I_LCOEF_RRANDIOPS,	"rrandiops=%u"	},
3355 	{ I_LCOEF_WBPS,		"wbps=%u"	},
3356 	{ I_LCOEF_WSEQIOPS,	"wseqiops=%u"	},
3357 	{ I_LCOEF_WRANDIOPS,	"wrandiops=%u"	},
3358 	{ NR_I_LCOEFS,		NULL		},
3359 };
3360 
ioc_cost_model_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3361 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3362 				    size_t nbytes, loff_t off)
3363 {
3364 	struct block_device *bdev;
3365 	struct ioc *ioc;
3366 	u64 u[NR_I_LCOEFS];
3367 	bool user;
3368 	char *p;
3369 	int ret;
3370 
3371 	bdev = blkcg_conf_open_bdev(&input);
3372 	if (IS_ERR(bdev))
3373 		return PTR_ERR(bdev);
3374 
3375 	ioc = q_to_ioc(bdev->bd_disk->queue);
3376 	if (!ioc) {
3377 		ret = blk_iocost_init(bdev->bd_disk->queue);
3378 		if (ret)
3379 			goto err;
3380 		ioc = q_to_ioc(bdev->bd_disk->queue);
3381 	}
3382 
3383 	spin_lock_irq(&ioc->lock);
3384 	memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3385 	user = ioc->user_cost_model;
3386 	spin_unlock_irq(&ioc->lock);
3387 
3388 	while ((p = strsep(&input, " \t\n"))) {
3389 		substring_t args[MAX_OPT_ARGS];
3390 		char buf[32];
3391 		int tok;
3392 		u64 v;
3393 
3394 		if (!*p)
3395 			continue;
3396 
3397 		switch (match_token(p, cost_ctrl_tokens, args)) {
3398 		case COST_CTRL:
3399 			match_strlcpy(buf, &args[0], sizeof(buf));
3400 			if (!strcmp(buf, "auto"))
3401 				user = false;
3402 			else if (!strcmp(buf, "user"))
3403 				user = true;
3404 			else
3405 				goto einval;
3406 			continue;
3407 		case COST_MODEL:
3408 			match_strlcpy(buf, &args[0], sizeof(buf));
3409 			if (strcmp(buf, "linear"))
3410 				goto einval;
3411 			continue;
3412 		}
3413 
3414 		tok = match_token(p, i_lcoef_tokens, args);
3415 		if (tok == NR_I_LCOEFS)
3416 			goto einval;
3417 		if (match_u64(&args[0], &v))
3418 			goto einval;
3419 		u[tok] = v;
3420 		user = true;
3421 	}
3422 
3423 	spin_lock_irq(&ioc->lock);
3424 	if (user) {
3425 		memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3426 		ioc->user_cost_model = true;
3427 	} else {
3428 		ioc->user_cost_model = false;
3429 	}
3430 	ioc_refresh_params(ioc, true);
3431 	spin_unlock_irq(&ioc->lock);
3432 
3433 	blkdev_put_no_open(bdev);
3434 	return nbytes;
3435 
3436 einval:
3437 	ret = -EINVAL;
3438 err:
3439 	blkdev_put_no_open(bdev);
3440 	return ret;
3441 }
3442 
3443 static struct cftype ioc_files[] = {
3444 	{
3445 		.name = "weight",
3446 		.flags = CFTYPE_NOT_ON_ROOT,
3447 		.seq_show = ioc_weight_show,
3448 		.write = ioc_weight_write,
3449 	},
3450 	{
3451 		.name = "cost.qos",
3452 		.flags = CFTYPE_ONLY_ON_ROOT,
3453 		.seq_show = ioc_qos_show,
3454 		.write = ioc_qos_write,
3455 	},
3456 	{
3457 		.name = "cost.model",
3458 		.flags = CFTYPE_ONLY_ON_ROOT,
3459 		.seq_show = ioc_cost_model_show,
3460 		.write = ioc_cost_model_write,
3461 	},
3462 	{}
3463 };
3464 
3465 static struct blkcg_policy blkcg_policy_iocost = {
3466 	.dfl_cftypes	= ioc_files,
3467 	.cpd_alloc_fn	= ioc_cpd_alloc,
3468 	.cpd_free_fn	= ioc_cpd_free,
3469 	.pd_alloc_fn	= ioc_pd_alloc,
3470 	.pd_init_fn	= ioc_pd_init,
3471 	.pd_free_fn	= ioc_pd_free,
3472 	.pd_stat_fn	= ioc_pd_stat,
3473 };
3474 
ioc_init(void)3475 static int __init ioc_init(void)
3476 {
3477 	return blkcg_policy_register(&blkcg_policy_iocost);
3478 }
3479 
ioc_exit(void)3480 static void __exit ioc_exit(void)
3481 {
3482 	blkcg_policy_unregister(&blkcg_policy_iocost);
3483 }
3484 
3485 module_init(ioc_init);
3486 module_exit(ioc_exit);
3487