• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * paramters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO iff doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, soley depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The ouput looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per	: Timer period
166  * - cur_per	: Internal wall and device vtime clock
167  * - vrate	: Device virtual time rate against wall clock
168  * - weight	: Surplus-adjusted and configured weights
169  * - hweight	: Surplus-adjusted and configured hierarchical weights
170  * - inflt	: The percentage of in-flight IO cost at the end of last period
171  * - del_ms	: Deferred issuer delay induction level and duration
172  * - usages	: Usage history
173  */
174 
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
186 #include "blk-wbt.h"
187 
188 #ifdef CONFIG_TRACEPOINTS
189 
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194 
195 #define TRACE_IOCG_PATH(type, iocg, ...)					\
196 	do {									\
197 		unsigned long flags;						\
198 		if (trace_iocost_##type##_enabled()) {				\
199 			spin_lock_irqsave(&trace_iocg_path_lock, flags);	\
200 			cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,	\
201 				    trace_iocg_path, TRACE_IOCG_PATH_LEN);	\
202 			trace_iocost_##type(iocg, trace_iocg_path,		\
203 					      ##__VA_ARGS__);			\
204 			spin_unlock_irqrestore(&trace_iocg_path_lock, flags);	\
205 		}								\
206 	} while (0)
207 
208 #else	/* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)	do { } while (0)
210 #endif	/* CONFIG_TRACE_POINTS */
211 
212 enum {
213 	MILLION			= 1000000,
214 
215 	/* timer period is calculated from latency requirements, bound it */
216 	MIN_PERIOD		= USEC_PER_MSEC,
217 	MAX_PERIOD		= USEC_PER_SEC,
218 
219 	/*
220 	 * iocg->vtime is targeted at 50% behind the device vtime, which
221 	 * serves as its IO credit buffer.  Surplus weight adjustment is
222 	 * immediately canceled if the vtime margin runs below 10%.
223 	 */
224 	MARGIN_MIN_PCT		= 10,
225 	MARGIN_LOW_PCT		= 20,
226 	MARGIN_TARGET_PCT	= 50,
227 
228 	INUSE_ADJ_STEP_PCT	= 25,
229 
230 	/* Have some play in timer operations */
231 	TIMER_SLACK_PCT		= 1,
232 
233 	/* 1/64k is granular enough and can easily be handled w/ u32 */
234 	WEIGHT_ONE		= 1 << 16,
235 };
236 
237 enum {
238 	/*
239 	 * As vtime is used to calculate the cost of each IO, it needs to
240 	 * be fairly high precision.  For example, it should be able to
241 	 * represent the cost of a single page worth of discard with
242 	 * suffificient accuracy.  At the same time, it should be able to
243 	 * represent reasonably long enough durations to be useful and
244 	 * convenient during operation.
245 	 *
246 	 * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
247 	 * granularity and days of wrap-around time even at extreme vrates.
248 	 */
249 	VTIME_PER_SEC_SHIFT	= 37,
250 	VTIME_PER_SEC		= 1LLU << VTIME_PER_SEC_SHIFT,
251 	VTIME_PER_USEC		= VTIME_PER_SEC / USEC_PER_SEC,
252 	VTIME_PER_NSEC		= VTIME_PER_SEC / NSEC_PER_SEC,
253 
254 	/* bound vrate adjustments within two orders of magnitude */
255 	VRATE_MIN_PPM		= 10000,	/* 1% */
256 	VRATE_MAX_PPM		= 100000000,	/* 10000% */
257 
258 	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 	VRATE_CLAMP_ADJ_PCT	= 4,
260 
261 	/* switch iff the conditions are met for longer than this */
262 	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
263 };
264 
265 enum {
266 	/* if IOs end up waiting for requests, issue less */
267 	RQ_WAIT_BUSY_PCT	= 5,
268 
269 	/* unbusy hysterisis */
270 	UNBUSY_THR_PCT		= 75,
271 
272 	/*
273 	 * The effect of delay is indirect and non-linear and a huge amount of
274 	 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 	 * up delay as debt is going up and then let it decay exponentially.
276 	 * This gives us quick ramp ups while delay is accumulating and long
277 	 * tails which can help reducing the frequency of debt explosions on
278 	 * unthrottle. The parameters are experimentally determined.
279 	 *
280 	 * The delay mechanism provides adequate protection and behavior in many
281 	 * cases. However, this is far from ideal and falls shorts on both
282 	 * fronts. The debtors are often throttled too harshly costing a
283 	 * significant level of fairness and possibly total work while the
284 	 * protection against their impacts on the system can be choppy and
285 	 * unreliable.
286 	 *
287 	 * The shortcoming primarily stems from the fact that, unlike for page
288 	 * cache, the kernel doesn't have well-defined back-pressure propagation
289 	 * mechanism and policies for anonymous memory. Fully addressing this
290 	 * issue will likely require substantial improvements in the area.
291 	 */
292 	MIN_DELAY_THR_PCT	= 500,
293 	MAX_DELAY_THR_PCT	= 25000,
294 	MIN_DELAY		= 250,
295 	MAX_DELAY		= 250 * USEC_PER_MSEC,
296 
297 	/* halve debts if avg usage over 100ms is under 50% */
298 	DFGV_USAGE_PCT		= 50,
299 	DFGV_PERIOD		= 100 * USEC_PER_MSEC,
300 
301 	/* don't let cmds which take a very long time pin lagging for too long */
302 	MAX_LAGGING_PERIODS	= 10,
303 
304 	/*
305 	 * Count IO size in 4k pages.  The 12bit shift helps keeping
306 	 * size-proportional components of cost calculation in closer
307 	 * numbers of digits to per-IO cost components.
308 	 */
309 	IOC_PAGE_SHIFT		= 12,
310 	IOC_PAGE_SIZE		= 1 << IOC_PAGE_SHIFT,
311 	IOC_SECT_TO_PAGE_SHIFT	= IOC_PAGE_SHIFT - SECTOR_SHIFT,
312 
313 	/* if apart further than 16M, consider randio for linear model */
314 	LCOEF_RANDIO_PAGES	= 4096,
315 };
316 
317 enum ioc_running {
318 	IOC_IDLE,
319 	IOC_RUNNING,
320 	IOC_STOP,
321 };
322 
323 /* io.cost.qos controls including per-dev enable of the whole controller */
324 enum {
325 	QOS_ENABLE,
326 	QOS_CTRL,
327 	NR_QOS_CTRL_PARAMS,
328 };
329 
330 /* io.cost.qos params */
331 enum {
332 	QOS_RPPM,
333 	QOS_RLAT,
334 	QOS_WPPM,
335 	QOS_WLAT,
336 	QOS_MIN,
337 	QOS_MAX,
338 	NR_QOS_PARAMS,
339 };
340 
341 /* io.cost.model controls */
342 enum {
343 	COST_CTRL,
344 	COST_MODEL,
345 	NR_COST_CTRL_PARAMS,
346 };
347 
348 /* builtin linear cost model coefficients */
349 enum {
350 	I_LCOEF_RBPS,
351 	I_LCOEF_RSEQIOPS,
352 	I_LCOEF_RRANDIOPS,
353 	I_LCOEF_WBPS,
354 	I_LCOEF_WSEQIOPS,
355 	I_LCOEF_WRANDIOPS,
356 	NR_I_LCOEFS,
357 };
358 
359 enum {
360 	LCOEF_RPAGE,
361 	LCOEF_RSEQIO,
362 	LCOEF_RRANDIO,
363 	LCOEF_WPAGE,
364 	LCOEF_WSEQIO,
365 	LCOEF_WRANDIO,
366 	NR_LCOEFS,
367 };
368 
369 enum {
370 	AUTOP_INVALID,
371 	AUTOP_HDD,
372 	AUTOP_SSD_QD1,
373 	AUTOP_SSD_DFL,
374 	AUTOP_SSD_FAST,
375 };
376 
377 struct ioc_gq;
378 
379 struct ioc_params {
380 	u32				qos[NR_QOS_PARAMS];
381 	u64				i_lcoefs[NR_I_LCOEFS];
382 	u64				lcoefs[NR_LCOEFS];
383 	u32				too_fast_vrate_pct;
384 	u32				too_slow_vrate_pct;
385 };
386 
387 struct ioc_margins {
388 	s64				min;
389 	s64				low;
390 	s64				target;
391 };
392 
393 struct ioc_missed {
394 	local_t				nr_met;
395 	local_t				nr_missed;
396 	u32				last_met;
397 	u32				last_missed;
398 };
399 
400 struct ioc_pcpu_stat {
401 	struct ioc_missed		missed[2];
402 
403 	local64_t			rq_wait_ns;
404 	u64				last_rq_wait_ns;
405 };
406 
407 /* per device */
408 struct ioc {
409 	struct rq_qos			rqos;
410 
411 	bool				enabled;
412 
413 	struct ioc_params		params;
414 	struct ioc_margins		margins;
415 	u32				period_us;
416 	u32				timer_slack_ns;
417 	u64				vrate_min;
418 	u64				vrate_max;
419 
420 	spinlock_t			lock;
421 	struct timer_list		timer;
422 	struct list_head		active_iocgs;	/* active cgroups */
423 	struct ioc_pcpu_stat __percpu	*pcpu_stat;
424 
425 	enum ioc_running		running;
426 	atomic64_t			vtime_rate;
427 	u64				vtime_base_rate;
428 	s64				vtime_err;
429 
430 	seqcount_spinlock_t		period_seqcount;
431 	u64				period_at;	/* wallclock starttime */
432 	u64				period_at_vtime; /* vtime starttime */
433 
434 	atomic64_t			cur_period;	/* inc'd each period */
435 	int				busy_level;	/* saturation history */
436 
437 	bool				weights_updated;
438 	atomic_t			hweight_gen;	/* for lazy hweights */
439 
440 	/* debt forgivness */
441 	u64				dfgv_period_at;
442 	u64				dfgv_period_rem;
443 	u64				dfgv_usage_us_sum;
444 
445 	u64				autop_too_fast_at;
446 	u64				autop_too_slow_at;
447 	int				autop_idx;
448 	bool				user_qos_params:1;
449 	bool				user_cost_model:1;
450 };
451 
452 struct iocg_pcpu_stat {
453 	local64_t			abs_vusage;
454 };
455 
456 struct iocg_stat {
457 	u64				usage_us;
458 	u64				wait_us;
459 	u64				indebt_us;
460 	u64				indelay_us;
461 };
462 
463 /* per device-cgroup pair */
464 struct ioc_gq {
465 	struct blkg_policy_data		pd;
466 	struct ioc			*ioc;
467 
468 	/*
469 	 * A iocg can get its weight from two sources - an explicit
470 	 * per-device-cgroup configuration or the default weight of the
471 	 * cgroup.  `cfg_weight` is the explicit per-device-cgroup
472 	 * configuration.  `weight` is the effective considering both
473 	 * sources.
474 	 *
475 	 * When an idle cgroup becomes active its `active` goes from 0 to
476 	 * `weight`.  `inuse` is the surplus adjusted active weight.
477 	 * `active` and `inuse` are used to calculate `hweight_active` and
478 	 * `hweight_inuse`.
479 	 *
480 	 * `last_inuse` remembers `inuse` while an iocg is idle to persist
481 	 * surplus adjustments.
482 	 *
483 	 * `inuse` may be adjusted dynamically during period. `saved_*` are used
484 	 * to determine and track adjustments.
485 	 */
486 	u32				cfg_weight;
487 	u32				weight;
488 	u32				active;
489 	u32				inuse;
490 
491 	u32				last_inuse;
492 	s64				saved_margin;
493 
494 	sector_t			cursor;		/* to detect randio */
495 
496 	/*
497 	 * `vtime` is this iocg's vtime cursor which progresses as IOs are
498 	 * issued.  If lagging behind device vtime, the delta represents
499 	 * the currently available IO budget.  If runnning ahead, the
500 	 * overage.
501 	 *
502 	 * `vtime_done` is the same but progressed on completion rather
503 	 * than issue.  The delta behind `vtime` represents the cost of
504 	 * currently in-flight IOs.
505 	 */
506 	atomic64_t			vtime;
507 	atomic64_t			done_vtime;
508 	u64				abs_vdebt;
509 
510 	/* current delay in effect and when it started */
511 	u64				delay;
512 	u64				delay_at;
513 
514 	/*
515 	 * The period this iocg was last active in.  Used for deactivation
516 	 * and invalidating `vtime`.
517 	 */
518 	atomic64_t			active_period;
519 	struct list_head		active_list;
520 
521 	/* see __propagate_weights() and current_hweight() for details */
522 	u64				child_active_sum;
523 	u64				child_inuse_sum;
524 	u64				child_adjusted_sum;
525 	int				hweight_gen;
526 	u32				hweight_active;
527 	u32				hweight_inuse;
528 	u32				hweight_donating;
529 	u32				hweight_after_donation;
530 
531 	struct list_head		walk_list;
532 	struct list_head		surplus_list;
533 
534 	struct wait_queue_head		waitq;
535 	struct hrtimer			waitq_timer;
536 
537 	/* timestamp at the latest activation */
538 	u64				activated_at;
539 
540 	/* statistics */
541 	struct iocg_pcpu_stat __percpu	*pcpu_stat;
542 	struct iocg_stat		local_stat;
543 	struct iocg_stat		desc_stat;
544 	struct iocg_stat		last_stat;
545 	u64				last_stat_abs_vusage;
546 	u64				usage_delta_us;
547 	u64				wait_since;
548 	u64				indebt_since;
549 	u64				indelay_since;
550 
551 	/* this iocg's depth in the hierarchy and ancestors including self */
552 	int				level;
553 	struct ioc_gq			*ancestors[];
554 };
555 
556 /* per cgroup */
557 struct ioc_cgrp {
558 	struct blkcg_policy_data	cpd;
559 	unsigned int			dfl_weight;
560 };
561 
562 struct ioc_now {
563 	u64				now_ns;
564 	u64				now;
565 	u64				vnow;
566 	u64				vrate;
567 };
568 
569 struct iocg_wait {
570 	struct wait_queue_entry		wait;
571 	struct bio			*bio;
572 	u64				abs_cost;
573 	bool				committed;
574 };
575 
576 struct iocg_wake_ctx {
577 	struct ioc_gq			*iocg;
578 	u32				hw_inuse;
579 	s64				vbudget;
580 };
581 
582 static const struct ioc_params autop[] = {
583 	[AUTOP_HDD] = {
584 		.qos				= {
585 			[QOS_RLAT]		=        250000, /* 250ms */
586 			[QOS_WLAT]		=        250000,
587 			[QOS_MIN]		= VRATE_MIN_PPM,
588 			[QOS_MAX]		= VRATE_MAX_PPM,
589 		},
590 		.i_lcoefs			= {
591 			[I_LCOEF_RBPS]		=     174019176,
592 			[I_LCOEF_RSEQIOPS]	=         41708,
593 			[I_LCOEF_RRANDIOPS]	=           370,
594 			[I_LCOEF_WBPS]		=     178075866,
595 			[I_LCOEF_WSEQIOPS]	=         42705,
596 			[I_LCOEF_WRANDIOPS]	=           378,
597 		},
598 	},
599 	[AUTOP_SSD_QD1] = {
600 		.qos				= {
601 			[QOS_RLAT]		=         25000, /* 25ms */
602 			[QOS_WLAT]		=         25000,
603 			[QOS_MIN]		= VRATE_MIN_PPM,
604 			[QOS_MAX]		= VRATE_MAX_PPM,
605 		},
606 		.i_lcoefs			= {
607 			[I_LCOEF_RBPS]		=     245855193,
608 			[I_LCOEF_RSEQIOPS]	=         61575,
609 			[I_LCOEF_RRANDIOPS]	=          6946,
610 			[I_LCOEF_WBPS]		=     141365009,
611 			[I_LCOEF_WSEQIOPS]	=         33716,
612 			[I_LCOEF_WRANDIOPS]	=         26796,
613 		},
614 	},
615 	[AUTOP_SSD_DFL] = {
616 		.qos				= {
617 			[QOS_RLAT]		=         25000, /* 25ms */
618 			[QOS_WLAT]		=         25000,
619 			[QOS_MIN]		= VRATE_MIN_PPM,
620 			[QOS_MAX]		= VRATE_MAX_PPM,
621 		},
622 		.i_lcoefs			= {
623 			[I_LCOEF_RBPS]		=     488636629,
624 			[I_LCOEF_RSEQIOPS]	=          8932,
625 			[I_LCOEF_RRANDIOPS]	=          8518,
626 			[I_LCOEF_WBPS]		=     427891549,
627 			[I_LCOEF_WSEQIOPS]	=         28755,
628 			[I_LCOEF_WRANDIOPS]	=         21940,
629 		},
630 		.too_fast_vrate_pct		=           500,
631 	},
632 	[AUTOP_SSD_FAST] = {
633 		.qos				= {
634 			[QOS_RLAT]		=          5000, /* 5ms */
635 			[QOS_WLAT]		=          5000,
636 			[QOS_MIN]		= VRATE_MIN_PPM,
637 			[QOS_MAX]		= VRATE_MAX_PPM,
638 		},
639 		.i_lcoefs			= {
640 			[I_LCOEF_RBPS]		=    3102524156LLU,
641 			[I_LCOEF_RSEQIOPS]	=        724816,
642 			[I_LCOEF_RRANDIOPS]	=        778122,
643 			[I_LCOEF_WBPS]		=    1742780862LLU,
644 			[I_LCOEF_WSEQIOPS]	=        425702,
645 			[I_LCOEF_WRANDIOPS]	=	 443193,
646 		},
647 		.too_slow_vrate_pct		=            10,
648 	},
649 };
650 
651 /*
652  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
653  * vtime credit shortage and down on device saturation.
654  */
655 static u32 vrate_adj_pct[] =
656 	{ 0, 0, 0, 0,
657 	  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
658 	  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
659 	  4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
660 
661 static struct blkcg_policy blkcg_policy_iocost;
662 
663 /* accessors and helpers */
rqos_to_ioc(struct rq_qos * rqos)664 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
665 {
666 	return container_of(rqos, struct ioc, rqos);
667 }
668 
q_to_ioc(struct request_queue * q)669 static struct ioc *q_to_ioc(struct request_queue *q)
670 {
671 	return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
672 }
673 
q_name(struct request_queue * q)674 static const char *q_name(struct request_queue *q)
675 {
676 	if (blk_queue_registered(q))
677 		return kobject_name(q->kobj.parent);
678 	else
679 		return "<unknown>";
680 }
681 
ioc_name(struct ioc * ioc)682 static const char __maybe_unused *ioc_name(struct ioc *ioc)
683 {
684 	return q_name(ioc->rqos.q);
685 }
686 
pd_to_iocg(struct blkg_policy_data * pd)687 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
688 {
689 	return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
690 }
691 
blkg_to_iocg(struct blkcg_gq * blkg)692 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
693 {
694 	return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
695 }
696 
iocg_to_blkg(struct ioc_gq * iocg)697 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
698 {
699 	return pd_to_blkg(&iocg->pd);
700 }
701 
blkcg_to_iocc(struct blkcg * blkcg)702 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
703 {
704 	return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
705 			    struct ioc_cgrp, cpd);
706 }
707 
708 /*
709  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
710  * weight, the more expensive each IO.  Must round up.
711  */
abs_cost_to_cost(u64 abs_cost,u32 hw_inuse)712 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
713 {
714 	return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
715 }
716 
717 /*
718  * The inverse of abs_cost_to_cost().  Must round up.
719  */
cost_to_abs_cost(u64 cost,u32 hw_inuse)720 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
721 {
722 	return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
723 }
724 
iocg_commit_bio(struct ioc_gq * iocg,struct bio * bio,u64 abs_cost,u64 cost)725 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
726 			    u64 abs_cost, u64 cost)
727 {
728 	struct iocg_pcpu_stat *gcs;
729 
730 	bio->bi_iocost_cost = cost;
731 	atomic64_add(cost, &iocg->vtime);
732 
733 	gcs = get_cpu_ptr(iocg->pcpu_stat);
734 	local64_add(abs_cost, &gcs->abs_vusage);
735 	put_cpu_ptr(gcs);
736 }
737 
iocg_lock(struct ioc_gq * iocg,bool lock_ioc,unsigned long * flags)738 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
739 {
740 	if (lock_ioc) {
741 		spin_lock_irqsave(&iocg->ioc->lock, *flags);
742 		spin_lock(&iocg->waitq.lock);
743 	} else {
744 		spin_lock_irqsave(&iocg->waitq.lock, *flags);
745 	}
746 }
747 
iocg_unlock(struct ioc_gq * iocg,bool unlock_ioc,unsigned long * flags)748 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
749 {
750 	if (unlock_ioc) {
751 		spin_unlock(&iocg->waitq.lock);
752 		spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
753 	} else {
754 		spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
755 	}
756 }
757 
758 #define CREATE_TRACE_POINTS
759 #include <trace/events/iocost.h>
760 
ioc_refresh_margins(struct ioc * ioc)761 static void ioc_refresh_margins(struct ioc *ioc)
762 {
763 	struct ioc_margins *margins = &ioc->margins;
764 	u32 period_us = ioc->period_us;
765 	u64 vrate = ioc->vtime_base_rate;
766 
767 	margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
768 	margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
769 	margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
770 }
771 
772 /* latency Qos params changed, update period_us and all the dependent params */
ioc_refresh_period_us(struct ioc * ioc)773 static void ioc_refresh_period_us(struct ioc *ioc)
774 {
775 	u32 ppm, lat, multi, period_us;
776 
777 	lockdep_assert_held(&ioc->lock);
778 
779 	/* pick the higher latency target */
780 	if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
781 		ppm = ioc->params.qos[QOS_RPPM];
782 		lat = ioc->params.qos[QOS_RLAT];
783 	} else {
784 		ppm = ioc->params.qos[QOS_WPPM];
785 		lat = ioc->params.qos[QOS_WLAT];
786 	}
787 
788 	/*
789 	 * We want the period to be long enough to contain a healthy number
790 	 * of IOs while short enough for granular control.  Define it as a
791 	 * multiple of the latency target.  Ideally, the multiplier should
792 	 * be scaled according to the percentile so that it would nominally
793 	 * contain a certain number of requests.  Let's be simpler and
794 	 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
795 	 */
796 	if (ppm)
797 		multi = max_t(u32, (MILLION - ppm) / 50000, 2);
798 	else
799 		multi = 2;
800 	period_us = multi * lat;
801 	period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
802 
803 	/* calculate dependent params */
804 	ioc->period_us = period_us;
805 	ioc->timer_slack_ns = div64_u64(
806 		(u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
807 		100);
808 	ioc_refresh_margins(ioc);
809 }
810 
ioc_autop_idx(struct ioc * ioc)811 static int ioc_autop_idx(struct ioc *ioc)
812 {
813 	int idx = ioc->autop_idx;
814 	const struct ioc_params *p = &autop[idx];
815 	u32 vrate_pct;
816 	u64 now_ns;
817 
818 	/* rotational? */
819 	if (!blk_queue_nonrot(ioc->rqos.q))
820 		return AUTOP_HDD;
821 
822 	/* handle SATA SSDs w/ broken NCQ */
823 	if (blk_queue_depth(ioc->rqos.q) == 1)
824 		return AUTOP_SSD_QD1;
825 
826 	/* use one of the normal ssd sets */
827 	if (idx < AUTOP_SSD_DFL)
828 		return AUTOP_SSD_DFL;
829 
830 	/* if user is overriding anything, maintain what was there */
831 	if (ioc->user_qos_params || ioc->user_cost_model)
832 		return idx;
833 
834 	/* step up/down based on the vrate */
835 	vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
836 	now_ns = ktime_get_ns();
837 
838 	if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
839 		if (!ioc->autop_too_fast_at)
840 			ioc->autop_too_fast_at = now_ns;
841 		if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
842 			return idx + 1;
843 	} else {
844 		ioc->autop_too_fast_at = 0;
845 	}
846 
847 	if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
848 		if (!ioc->autop_too_slow_at)
849 			ioc->autop_too_slow_at = now_ns;
850 		if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
851 			return idx - 1;
852 	} else {
853 		ioc->autop_too_slow_at = 0;
854 	}
855 
856 	return idx;
857 }
858 
859 /*
860  * Take the followings as input
861  *
862  *  @bps	maximum sequential throughput
863  *  @seqiops	maximum sequential 4k iops
864  *  @randiops	maximum random 4k iops
865  *
866  * and calculate the linear model cost coefficients.
867  *
868  *  *@page	per-page cost		1s / (@bps / 4096)
869  *  *@seqio	base cost of a seq IO	max((1s / @seqiops) - *@page, 0)
870  *  @randiops	base cost of a rand IO	max((1s / @randiops) - *@page, 0)
871  */
calc_lcoefs(u64 bps,u64 seqiops,u64 randiops,u64 * page,u64 * seqio,u64 * randio)872 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
873 			u64 *page, u64 *seqio, u64 *randio)
874 {
875 	u64 v;
876 
877 	*page = *seqio = *randio = 0;
878 
879 	if (bps) {
880 		u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
881 
882 		if (bps_pages)
883 			*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
884 		else
885 			*page = 1;
886 	}
887 
888 	if (seqiops) {
889 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
890 		if (v > *page)
891 			*seqio = v - *page;
892 	}
893 
894 	if (randiops) {
895 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
896 		if (v > *page)
897 			*randio = v - *page;
898 	}
899 }
900 
ioc_refresh_lcoefs(struct ioc * ioc)901 static void ioc_refresh_lcoefs(struct ioc *ioc)
902 {
903 	u64 *u = ioc->params.i_lcoefs;
904 	u64 *c = ioc->params.lcoefs;
905 
906 	calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
907 		    &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
908 	calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
909 		    &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
910 }
911 
ioc_refresh_params(struct ioc * ioc,bool force)912 static bool ioc_refresh_params(struct ioc *ioc, bool force)
913 {
914 	const struct ioc_params *p;
915 	int idx;
916 
917 	lockdep_assert_held(&ioc->lock);
918 
919 	idx = ioc_autop_idx(ioc);
920 	p = &autop[idx];
921 
922 	if (idx == ioc->autop_idx && !force)
923 		return false;
924 
925 	if (idx != ioc->autop_idx)
926 		atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
927 
928 	ioc->autop_idx = idx;
929 	ioc->autop_too_fast_at = 0;
930 	ioc->autop_too_slow_at = 0;
931 
932 	if (!ioc->user_qos_params)
933 		memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
934 	if (!ioc->user_cost_model)
935 		memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
936 
937 	ioc_refresh_period_us(ioc);
938 	ioc_refresh_lcoefs(ioc);
939 
940 	ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
941 					    VTIME_PER_USEC, MILLION);
942 	ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
943 				   VTIME_PER_USEC, MILLION);
944 
945 	return true;
946 }
947 
948 /*
949  * When an iocg accumulates too much vtime or gets deactivated, we throw away
950  * some vtime, which lowers the overall device utilization. As the exact amount
951  * which is being thrown away is known, we can compensate by accelerating the
952  * vrate accordingly so that the extra vtime generated in the current period
953  * matches what got lost.
954  */
ioc_refresh_vrate(struct ioc * ioc,struct ioc_now * now)955 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
956 {
957 	s64 pleft = ioc->period_at + ioc->period_us - now->now;
958 	s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
959 	s64 vcomp, vcomp_min, vcomp_max;
960 
961 	lockdep_assert_held(&ioc->lock);
962 
963 	/* we need some time left in this period */
964 	if (pleft <= 0)
965 		goto done;
966 
967 	/*
968 	 * Calculate how much vrate should be adjusted to offset the error.
969 	 * Limit the amount of adjustment and deduct the adjusted amount from
970 	 * the error.
971 	 */
972 	vcomp = -div64_s64(ioc->vtime_err, pleft);
973 	vcomp_min = -(ioc->vtime_base_rate >> 1);
974 	vcomp_max = ioc->vtime_base_rate;
975 	vcomp = clamp(vcomp, vcomp_min, vcomp_max);
976 
977 	ioc->vtime_err += vcomp * pleft;
978 
979 	atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
980 done:
981 	/* bound how much error can accumulate */
982 	ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
983 }
984 
985 /* take a snapshot of the current [v]time and vrate */
ioc_now(struct ioc * ioc,struct ioc_now * now)986 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
987 {
988 	unsigned seq;
989 
990 	now->now_ns = ktime_get();
991 	now->now = ktime_to_us(now->now_ns);
992 	now->vrate = atomic64_read(&ioc->vtime_rate);
993 
994 	/*
995 	 * The current vtime is
996 	 *
997 	 *   vtime at period start + (wallclock time since the start) * vrate
998 	 *
999 	 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1000 	 * needed, they're seqcount protected.
1001 	 */
1002 	do {
1003 		seq = read_seqcount_begin(&ioc->period_seqcount);
1004 		now->vnow = ioc->period_at_vtime +
1005 			(now->now - ioc->period_at) * now->vrate;
1006 	} while (read_seqcount_retry(&ioc->period_seqcount, seq));
1007 }
1008 
ioc_start_period(struct ioc * ioc,struct ioc_now * now)1009 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1010 {
1011 	WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1012 
1013 	write_seqcount_begin(&ioc->period_seqcount);
1014 	ioc->period_at = now->now;
1015 	ioc->period_at_vtime = now->vnow;
1016 	write_seqcount_end(&ioc->period_seqcount);
1017 
1018 	ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1019 	add_timer(&ioc->timer);
1020 }
1021 
1022 /*
1023  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1024  * weight sums and propagate upwards accordingly. If @save, the current margin
1025  * is saved to be used as reference for later inuse in-period adjustments.
1026  */
__propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1027 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1028 				bool save, struct ioc_now *now)
1029 {
1030 	struct ioc *ioc = iocg->ioc;
1031 	int lvl;
1032 
1033 	lockdep_assert_held(&ioc->lock);
1034 
1035 	/*
1036 	 * For an active leaf node, its inuse shouldn't be zero or exceed
1037 	 * @active. An active internal node's inuse is solely determined by the
1038 	 * inuse to active ratio of its children regardless of @inuse.
1039 	 */
1040 	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1041 		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1042 					   iocg->child_active_sum);
1043 	} else {
1044 		inuse = clamp_t(u32, inuse, 1, active);
1045 	}
1046 
1047 	iocg->last_inuse = iocg->inuse;
1048 	if (save)
1049 		iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1050 
1051 	if (active == iocg->active && inuse == iocg->inuse)
1052 		return;
1053 
1054 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1055 		struct ioc_gq *parent = iocg->ancestors[lvl];
1056 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1057 		u32 parent_active = 0, parent_inuse = 0;
1058 
1059 		/* update the level sums */
1060 		parent->child_active_sum += (s32)(active - child->active);
1061 		parent->child_inuse_sum += (s32)(inuse - child->inuse);
1062 		/* apply the updates */
1063 		child->active = active;
1064 		child->inuse = inuse;
1065 
1066 		/*
1067 		 * The delta between inuse and active sums indicates that
1068 		 * that much of weight is being given away.  Parent's inuse
1069 		 * and active should reflect the ratio.
1070 		 */
1071 		if (parent->child_active_sum) {
1072 			parent_active = parent->weight;
1073 			parent_inuse = DIV64_U64_ROUND_UP(
1074 				parent_active * parent->child_inuse_sum,
1075 				parent->child_active_sum);
1076 		}
1077 
1078 		/* do we need to keep walking up? */
1079 		if (parent_active == parent->active &&
1080 		    parent_inuse == parent->inuse)
1081 			break;
1082 
1083 		active = parent_active;
1084 		inuse = parent_inuse;
1085 	}
1086 
1087 	ioc->weights_updated = true;
1088 }
1089 
commit_weights(struct ioc * ioc)1090 static void commit_weights(struct ioc *ioc)
1091 {
1092 	lockdep_assert_held(&ioc->lock);
1093 
1094 	if (ioc->weights_updated) {
1095 		/* paired with rmb in current_hweight(), see there */
1096 		smp_wmb();
1097 		atomic_inc(&ioc->hweight_gen);
1098 		ioc->weights_updated = false;
1099 	}
1100 }
1101 
propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1102 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1103 			      bool save, struct ioc_now *now)
1104 {
1105 	__propagate_weights(iocg, active, inuse, save, now);
1106 	commit_weights(iocg->ioc);
1107 }
1108 
current_hweight(struct ioc_gq * iocg,u32 * hw_activep,u32 * hw_inusep)1109 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1110 {
1111 	struct ioc *ioc = iocg->ioc;
1112 	int lvl;
1113 	u32 hwa, hwi;
1114 	int ioc_gen;
1115 
1116 	/* hot path - if uptodate, use cached */
1117 	ioc_gen = atomic_read(&ioc->hweight_gen);
1118 	if (ioc_gen == iocg->hweight_gen)
1119 		goto out;
1120 
1121 	/*
1122 	 * Paired with wmb in commit_weights(). If we saw the updated
1123 	 * hweight_gen, all the weight updates from __propagate_weights() are
1124 	 * visible too.
1125 	 *
1126 	 * We can race with weight updates during calculation and get it
1127 	 * wrong.  However, hweight_gen would have changed and a future
1128 	 * reader will recalculate and we're guaranteed to discard the
1129 	 * wrong result soon.
1130 	 */
1131 	smp_rmb();
1132 
1133 	hwa = hwi = WEIGHT_ONE;
1134 	for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1135 		struct ioc_gq *parent = iocg->ancestors[lvl];
1136 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1137 		u64 active_sum = READ_ONCE(parent->child_active_sum);
1138 		u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1139 		u32 active = READ_ONCE(child->active);
1140 		u32 inuse = READ_ONCE(child->inuse);
1141 
1142 		/* we can race with deactivations and either may read as zero */
1143 		if (!active_sum || !inuse_sum)
1144 			continue;
1145 
1146 		active_sum = max_t(u64, active, active_sum);
1147 		hwa = div64_u64((u64)hwa * active, active_sum);
1148 
1149 		inuse_sum = max_t(u64, inuse, inuse_sum);
1150 		hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1151 	}
1152 
1153 	iocg->hweight_active = max_t(u32, hwa, 1);
1154 	iocg->hweight_inuse = max_t(u32, hwi, 1);
1155 	iocg->hweight_gen = ioc_gen;
1156 out:
1157 	if (hw_activep)
1158 		*hw_activep = iocg->hweight_active;
1159 	if (hw_inusep)
1160 		*hw_inusep = iocg->hweight_inuse;
1161 }
1162 
1163 /*
1164  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1165  * other weights stay unchanged.
1166  */
current_hweight_max(struct ioc_gq * iocg)1167 static u32 current_hweight_max(struct ioc_gq *iocg)
1168 {
1169 	u32 hwm = WEIGHT_ONE;
1170 	u32 inuse = iocg->active;
1171 	u64 child_inuse_sum;
1172 	int lvl;
1173 
1174 	lockdep_assert_held(&iocg->ioc->lock);
1175 
1176 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1177 		struct ioc_gq *parent = iocg->ancestors[lvl];
1178 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1179 
1180 		child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1181 		hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1182 		inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1183 					   parent->child_active_sum);
1184 	}
1185 
1186 	return max_t(u32, hwm, 1);
1187 }
1188 
weight_updated(struct ioc_gq * iocg,struct ioc_now * now)1189 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1190 {
1191 	struct ioc *ioc = iocg->ioc;
1192 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1193 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1194 	u32 weight;
1195 
1196 	lockdep_assert_held(&ioc->lock);
1197 
1198 	weight = iocg->cfg_weight ?: iocc->dfl_weight;
1199 	if (weight != iocg->weight && iocg->active)
1200 		propagate_weights(iocg, weight, iocg->inuse, true, now);
1201 	iocg->weight = weight;
1202 }
1203 
iocg_activate(struct ioc_gq * iocg,struct ioc_now * now)1204 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1205 {
1206 	struct ioc *ioc = iocg->ioc;
1207 	u64 last_period, cur_period;
1208 	u64 vtime, vtarget;
1209 	int i;
1210 
1211 	/*
1212 	 * If seem to be already active, just update the stamp to tell the
1213 	 * timer that we're still active.  We don't mind occassional races.
1214 	 */
1215 	if (!list_empty(&iocg->active_list)) {
1216 		ioc_now(ioc, now);
1217 		cur_period = atomic64_read(&ioc->cur_period);
1218 		if (atomic64_read(&iocg->active_period) != cur_period)
1219 			atomic64_set(&iocg->active_period, cur_period);
1220 		return true;
1221 	}
1222 
1223 	/* racy check on internal node IOs, treat as root level IOs */
1224 	if (iocg->child_active_sum)
1225 		return false;
1226 
1227 	spin_lock_irq(&ioc->lock);
1228 
1229 	ioc_now(ioc, now);
1230 
1231 	/* update period */
1232 	cur_period = atomic64_read(&ioc->cur_period);
1233 	last_period = atomic64_read(&iocg->active_period);
1234 	atomic64_set(&iocg->active_period, cur_period);
1235 
1236 	/* already activated or breaking leaf-only constraint? */
1237 	if (!list_empty(&iocg->active_list))
1238 		goto succeed_unlock;
1239 	for (i = iocg->level - 1; i > 0; i--)
1240 		if (!list_empty(&iocg->ancestors[i]->active_list))
1241 			goto fail_unlock;
1242 
1243 	if (iocg->child_active_sum)
1244 		goto fail_unlock;
1245 
1246 	/*
1247 	 * Always start with the target budget. On deactivation, we throw away
1248 	 * anything above it.
1249 	 */
1250 	vtarget = now->vnow - ioc->margins.target;
1251 	vtime = atomic64_read(&iocg->vtime);
1252 
1253 	atomic64_add(vtarget - vtime, &iocg->vtime);
1254 	atomic64_add(vtarget - vtime, &iocg->done_vtime);
1255 	vtime = vtarget;
1256 
1257 	/*
1258 	 * Activate, propagate weight and start period timer if not
1259 	 * running.  Reset hweight_gen to avoid accidental match from
1260 	 * wrapping.
1261 	 */
1262 	iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1263 	list_add(&iocg->active_list, &ioc->active_iocgs);
1264 
1265 	propagate_weights(iocg, iocg->weight,
1266 			  iocg->last_inuse ?: iocg->weight, true, now);
1267 
1268 	TRACE_IOCG_PATH(iocg_activate, iocg, now,
1269 			last_period, cur_period, vtime);
1270 
1271 	iocg->activated_at = now->now;
1272 
1273 	if (ioc->running == IOC_IDLE) {
1274 		ioc->running = IOC_RUNNING;
1275 		ioc->dfgv_period_at = now->now;
1276 		ioc->dfgv_period_rem = 0;
1277 		ioc_start_period(ioc, now);
1278 	}
1279 
1280 succeed_unlock:
1281 	spin_unlock_irq(&ioc->lock);
1282 	return true;
1283 
1284 fail_unlock:
1285 	spin_unlock_irq(&ioc->lock);
1286 	return false;
1287 }
1288 
iocg_kick_delay(struct ioc_gq * iocg,struct ioc_now * now)1289 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1290 {
1291 	struct ioc *ioc = iocg->ioc;
1292 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1293 	u64 tdelta, delay, new_delay;
1294 	s64 vover, vover_pct;
1295 	u32 hwa;
1296 
1297 	lockdep_assert_held(&iocg->waitq.lock);
1298 
1299 	/*
1300 	 * If the delay is set by another CPU, we may be in the past. No need to
1301 	 * change anything if so. This avoids decay calculation underflow.
1302 	 */
1303 	if (time_before64(now->now, iocg->delay_at))
1304 		return false;
1305 
1306 	/* calculate the current delay in effect - 1/2 every second */
1307 	tdelta = now->now - iocg->delay_at;
1308 	if (iocg->delay)
1309 		delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1310 	else
1311 		delay = 0;
1312 
1313 	/* calculate the new delay from the debt amount */
1314 	current_hweight(iocg, &hwa, NULL);
1315 	vover = atomic64_read(&iocg->vtime) +
1316 		abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1317 	vover_pct = div64_s64(100 * vover,
1318 			      ioc->period_us * ioc->vtime_base_rate);
1319 
1320 	if (vover_pct <= MIN_DELAY_THR_PCT)
1321 		new_delay = 0;
1322 	else if (vover_pct >= MAX_DELAY_THR_PCT)
1323 		new_delay = MAX_DELAY;
1324 	else
1325 		new_delay = MIN_DELAY +
1326 			div_u64((MAX_DELAY - MIN_DELAY) *
1327 				(vover_pct - MIN_DELAY_THR_PCT),
1328 				MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1329 
1330 	/* pick the higher one and apply */
1331 	if (new_delay > delay) {
1332 		iocg->delay = new_delay;
1333 		iocg->delay_at = now->now;
1334 		delay = new_delay;
1335 	}
1336 
1337 	if (delay >= MIN_DELAY) {
1338 		if (!iocg->indelay_since)
1339 			iocg->indelay_since = now->now;
1340 		blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1341 		return true;
1342 	} else {
1343 		if (iocg->indelay_since) {
1344 			iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1345 			iocg->indelay_since = 0;
1346 		}
1347 		iocg->delay = 0;
1348 		blkcg_clear_delay(blkg);
1349 		return false;
1350 	}
1351 }
1352 
iocg_incur_debt(struct ioc_gq * iocg,u64 abs_cost,struct ioc_now * now)1353 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1354 			    struct ioc_now *now)
1355 {
1356 	struct iocg_pcpu_stat *gcs;
1357 
1358 	lockdep_assert_held(&iocg->ioc->lock);
1359 	lockdep_assert_held(&iocg->waitq.lock);
1360 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1361 
1362 	/*
1363 	 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1364 	 * inuse donating all of it share to others until its debt is paid off.
1365 	 */
1366 	if (!iocg->abs_vdebt && abs_cost) {
1367 		iocg->indebt_since = now->now;
1368 		propagate_weights(iocg, iocg->active, 0, false, now);
1369 	}
1370 
1371 	iocg->abs_vdebt += abs_cost;
1372 
1373 	gcs = get_cpu_ptr(iocg->pcpu_stat);
1374 	local64_add(abs_cost, &gcs->abs_vusage);
1375 	put_cpu_ptr(gcs);
1376 }
1377 
iocg_pay_debt(struct ioc_gq * iocg,u64 abs_vpay,struct ioc_now * now)1378 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1379 			  struct ioc_now *now)
1380 {
1381 	lockdep_assert_held(&iocg->ioc->lock);
1382 	lockdep_assert_held(&iocg->waitq.lock);
1383 
1384 	/* make sure that nobody messed with @iocg */
1385 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1386 	WARN_ON_ONCE(iocg->inuse > 1);
1387 
1388 	iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1389 
1390 	/* if debt is paid in full, restore inuse */
1391 	if (!iocg->abs_vdebt) {
1392 		iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1393 		iocg->indebt_since = 0;
1394 
1395 		propagate_weights(iocg, iocg->active, iocg->last_inuse,
1396 				  false, now);
1397 	}
1398 }
1399 
iocg_wake_fn(struct wait_queue_entry * wq_entry,unsigned mode,int flags,void * key)1400 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1401 			int flags, void *key)
1402 {
1403 	struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1404 	struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1405 	u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1406 
1407 	ctx->vbudget -= cost;
1408 
1409 	if (ctx->vbudget < 0)
1410 		return -1;
1411 
1412 	iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1413 	wait->committed = true;
1414 
1415 	/*
1416 	 * autoremove_wake_function() removes the wait entry only when it
1417 	 * actually changed the task state. We want the wait always removed.
1418 	 * Remove explicitly and use default_wake_function(). Note that the
1419 	 * order of operations is important as finish_wait() tests whether
1420 	 * @wq_entry is removed without grabbing the lock.
1421 	 */
1422 	default_wake_function(wq_entry, mode, flags, key);
1423 	list_del_init_careful(&wq_entry->entry);
1424 	return 0;
1425 }
1426 
1427 /*
1428  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1429  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1430  * addition to iocg->waitq.lock.
1431  */
iocg_kick_waitq(struct ioc_gq * iocg,bool pay_debt,struct ioc_now * now)1432 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1433 			    struct ioc_now *now)
1434 {
1435 	struct ioc *ioc = iocg->ioc;
1436 	struct iocg_wake_ctx ctx = { .iocg = iocg };
1437 	u64 vshortage, expires, oexpires;
1438 	s64 vbudget;
1439 	u32 hwa;
1440 
1441 	lockdep_assert_held(&iocg->waitq.lock);
1442 
1443 	current_hweight(iocg, &hwa, NULL);
1444 	vbudget = now->vnow - atomic64_read(&iocg->vtime);
1445 
1446 	/* pay off debt */
1447 	if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1448 		u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1449 		u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1450 		u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1451 
1452 		lockdep_assert_held(&ioc->lock);
1453 
1454 		atomic64_add(vpay, &iocg->vtime);
1455 		atomic64_add(vpay, &iocg->done_vtime);
1456 		iocg_pay_debt(iocg, abs_vpay, now);
1457 		vbudget -= vpay;
1458 	}
1459 
1460 	if (iocg->abs_vdebt || iocg->delay)
1461 		iocg_kick_delay(iocg, now);
1462 
1463 	/*
1464 	 * Debt can still be outstanding if we haven't paid all yet or the
1465 	 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1466 	 * under debt. Make sure @vbudget reflects the outstanding amount and is
1467 	 * not positive.
1468 	 */
1469 	if (iocg->abs_vdebt) {
1470 		s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1471 		vbudget = min_t(s64, 0, vbudget - vdebt);
1472 	}
1473 
1474 	/*
1475 	 * Wake up the ones which are due and see how much vtime we'll need for
1476 	 * the next one. As paying off debt restores hw_inuse, it must be read
1477 	 * after the above debt payment.
1478 	 */
1479 	ctx.vbudget = vbudget;
1480 	current_hweight(iocg, NULL, &ctx.hw_inuse);
1481 
1482 	__wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1483 
1484 	if (!waitqueue_active(&iocg->waitq)) {
1485 		if (iocg->wait_since) {
1486 			iocg->local_stat.wait_us += now->now - iocg->wait_since;
1487 			iocg->wait_since = 0;
1488 		}
1489 		return;
1490 	}
1491 
1492 	if (!iocg->wait_since)
1493 		iocg->wait_since = now->now;
1494 
1495 	if (WARN_ON_ONCE(ctx.vbudget >= 0))
1496 		return;
1497 
1498 	/* determine next wakeup, add a timer margin to guarantee chunking */
1499 	vshortage = -ctx.vbudget;
1500 	expires = now->now_ns +
1501 		DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1502 		NSEC_PER_USEC;
1503 	expires += ioc->timer_slack_ns;
1504 
1505 	/* if already active and close enough, don't bother */
1506 	oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1507 	if (hrtimer_is_queued(&iocg->waitq_timer) &&
1508 	    abs(oexpires - expires) <= ioc->timer_slack_ns)
1509 		return;
1510 
1511 	hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1512 			       ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1513 }
1514 
iocg_waitq_timer_fn(struct hrtimer * timer)1515 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1516 {
1517 	struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1518 	bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1519 	struct ioc_now now;
1520 	unsigned long flags;
1521 
1522 	ioc_now(iocg->ioc, &now);
1523 
1524 	iocg_lock(iocg, pay_debt, &flags);
1525 	iocg_kick_waitq(iocg, pay_debt, &now);
1526 	iocg_unlock(iocg, pay_debt, &flags);
1527 
1528 	return HRTIMER_NORESTART;
1529 }
1530 
ioc_lat_stat(struct ioc * ioc,u32 * missed_ppm_ar,u32 * rq_wait_pct_p)1531 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1532 {
1533 	u32 nr_met[2] = { };
1534 	u32 nr_missed[2] = { };
1535 	u64 rq_wait_ns = 0;
1536 	int cpu, rw;
1537 
1538 	for_each_online_cpu(cpu) {
1539 		struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1540 		u64 this_rq_wait_ns;
1541 
1542 		for (rw = READ; rw <= WRITE; rw++) {
1543 			u32 this_met = local_read(&stat->missed[rw].nr_met);
1544 			u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1545 
1546 			nr_met[rw] += this_met - stat->missed[rw].last_met;
1547 			nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1548 			stat->missed[rw].last_met = this_met;
1549 			stat->missed[rw].last_missed = this_missed;
1550 		}
1551 
1552 		this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1553 		rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1554 		stat->last_rq_wait_ns = this_rq_wait_ns;
1555 	}
1556 
1557 	for (rw = READ; rw <= WRITE; rw++) {
1558 		if (nr_met[rw] + nr_missed[rw])
1559 			missed_ppm_ar[rw] =
1560 				DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1561 						   nr_met[rw] + nr_missed[rw]);
1562 		else
1563 			missed_ppm_ar[rw] = 0;
1564 	}
1565 
1566 	*rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1567 				   ioc->period_us * NSEC_PER_USEC);
1568 }
1569 
1570 /* was iocg idle this period? */
iocg_is_idle(struct ioc_gq * iocg)1571 static bool iocg_is_idle(struct ioc_gq *iocg)
1572 {
1573 	struct ioc *ioc = iocg->ioc;
1574 
1575 	/* did something get issued this period? */
1576 	if (atomic64_read(&iocg->active_period) ==
1577 	    atomic64_read(&ioc->cur_period))
1578 		return false;
1579 
1580 	/* is something in flight? */
1581 	if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1582 		return false;
1583 
1584 	return true;
1585 }
1586 
1587 /*
1588  * Call this function on the target leaf @iocg's to build pre-order traversal
1589  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1590  * ->walk_list and the caller is responsible for dissolving the list after use.
1591  */
iocg_build_inner_walk(struct ioc_gq * iocg,struct list_head * inner_walk)1592 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1593 				  struct list_head *inner_walk)
1594 {
1595 	int lvl;
1596 
1597 	WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1598 
1599 	/* find the first ancestor which hasn't been visited yet */
1600 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1601 		if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1602 			break;
1603 	}
1604 
1605 	/* walk down and visit the inner nodes to get pre-order traversal */
1606 	while (++lvl <= iocg->level - 1) {
1607 		struct ioc_gq *inner = iocg->ancestors[lvl];
1608 
1609 		/* record traversal order */
1610 		list_add_tail(&inner->walk_list, inner_walk);
1611 	}
1612 }
1613 
1614 /* collect per-cpu counters and propagate the deltas to the parent */
iocg_flush_stat_one(struct ioc_gq * iocg,struct ioc_now * now)1615 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1616 {
1617 	struct ioc *ioc = iocg->ioc;
1618 	struct iocg_stat new_stat;
1619 	u64 abs_vusage = 0;
1620 	u64 vusage_delta;
1621 	int cpu;
1622 
1623 	lockdep_assert_held(&iocg->ioc->lock);
1624 
1625 	/* collect per-cpu counters */
1626 	for_each_possible_cpu(cpu) {
1627 		abs_vusage += local64_read(
1628 				per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1629 	}
1630 	vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1631 	iocg->last_stat_abs_vusage = abs_vusage;
1632 
1633 	iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1634 	iocg->local_stat.usage_us += iocg->usage_delta_us;
1635 
1636 	/* propagate upwards */
1637 	new_stat.usage_us =
1638 		iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1639 	new_stat.wait_us =
1640 		iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1641 	new_stat.indebt_us =
1642 		iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1643 	new_stat.indelay_us =
1644 		iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1645 
1646 	/* propagate the deltas to the parent */
1647 	if (iocg->level > 0) {
1648 		struct iocg_stat *parent_stat =
1649 			&iocg->ancestors[iocg->level - 1]->desc_stat;
1650 
1651 		parent_stat->usage_us +=
1652 			new_stat.usage_us - iocg->last_stat.usage_us;
1653 		parent_stat->wait_us +=
1654 			new_stat.wait_us - iocg->last_stat.wait_us;
1655 		parent_stat->indebt_us +=
1656 			new_stat.indebt_us - iocg->last_stat.indebt_us;
1657 		parent_stat->indelay_us +=
1658 			new_stat.indelay_us - iocg->last_stat.indelay_us;
1659 	}
1660 
1661 	iocg->last_stat = new_stat;
1662 }
1663 
1664 /* get stat counters ready for reading on all active iocgs */
iocg_flush_stat(struct list_head * target_iocgs,struct ioc_now * now)1665 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1666 {
1667 	LIST_HEAD(inner_walk);
1668 	struct ioc_gq *iocg, *tiocg;
1669 
1670 	/* flush leaves and build inner node walk list */
1671 	list_for_each_entry(iocg, target_iocgs, active_list) {
1672 		iocg_flush_stat_one(iocg, now);
1673 		iocg_build_inner_walk(iocg, &inner_walk);
1674 	}
1675 
1676 	/* keep flushing upwards by walking the inner list backwards */
1677 	list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1678 		iocg_flush_stat_one(iocg, now);
1679 		list_del_init(&iocg->walk_list);
1680 	}
1681 }
1682 
1683 /*
1684  * Determine what @iocg's hweight_inuse should be after donating unused
1685  * capacity. @hwm is the upper bound and used to signal no donation. This
1686  * function also throws away @iocg's excess budget.
1687  */
hweight_after_donation(struct ioc_gq * iocg,u32 old_hwi,u32 hwm,u32 usage,struct ioc_now * now)1688 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1689 				  u32 usage, struct ioc_now *now)
1690 {
1691 	struct ioc *ioc = iocg->ioc;
1692 	u64 vtime = atomic64_read(&iocg->vtime);
1693 	s64 excess, delta, target, new_hwi;
1694 
1695 	/* debt handling owns inuse for debtors */
1696 	if (iocg->abs_vdebt)
1697 		return 1;
1698 
1699 	/* see whether minimum margin requirement is met */
1700 	if (waitqueue_active(&iocg->waitq) ||
1701 	    time_after64(vtime, now->vnow - ioc->margins.min))
1702 		return hwm;
1703 
1704 	/* throw away excess above target */
1705 	excess = now->vnow - vtime - ioc->margins.target;
1706 	if (excess > 0) {
1707 		atomic64_add(excess, &iocg->vtime);
1708 		atomic64_add(excess, &iocg->done_vtime);
1709 		vtime += excess;
1710 		ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1711 	}
1712 
1713 	/*
1714 	 * Let's say the distance between iocg's and device's vtimes as a
1715 	 * fraction of period duration is delta. Assuming that the iocg will
1716 	 * consume the usage determined above, we want to determine new_hwi so
1717 	 * that delta equals MARGIN_TARGET at the end of the next period.
1718 	 *
1719 	 * We need to execute usage worth of IOs while spending the sum of the
1720 	 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1721 	 * (delta):
1722 	 *
1723 	 *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1724 	 *
1725 	 * Therefore, the new_hwi is:
1726 	 *
1727 	 *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1728 	 */
1729 	delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1730 			  now->vnow - ioc->period_at_vtime);
1731 	target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1732 	new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1733 
1734 	return clamp_t(s64, new_hwi, 1, hwm);
1735 }
1736 
1737 /*
1738  * For work-conservation, an iocg which isn't using all of its share should
1739  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1740  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1741  *
1742  * #1 is mathematically simpler but has the drawback of requiring synchronous
1743  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1744  * change due to donation snapbacks as it has the possibility of grossly
1745  * overshooting what's allowed by the model and vrate.
1746  *
1747  * #2 is inherently safe with local operations. The donating iocg can easily
1748  * snap back to higher weights when needed without worrying about impacts on
1749  * other nodes as the impacts will be inherently correct. This also makes idle
1750  * iocg activations safe. The only effect activations have is decreasing
1751  * hweight_inuse of others, the right solution to which is for those iocgs to
1752  * snap back to higher weights.
1753  *
1754  * So, we go with #2. The challenge is calculating how each donating iocg's
1755  * inuse should be adjusted to achieve the target donation amounts. This is done
1756  * using Andy's method described in the following pdf.
1757  *
1758  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1759  *
1760  * Given the weights and target after-donation hweight_inuse values, Andy's
1761  * method determines how the proportional distribution should look like at each
1762  * sibling level to maintain the relative relationship between all non-donating
1763  * pairs. To roughly summarize, it divides the tree into donating and
1764  * non-donating parts, calculates global donation rate which is used to
1765  * determine the target hweight_inuse for each node, and then derives per-level
1766  * proportions.
1767  *
1768  * The following pdf shows that global distribution calculated this way can be
1769  * achieved by scaling inuse weights of donating leaves and propagating the
1770  * adjustments upwards proportionally.
1771  *
1772  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1773  *
1774  * Combining the above two, we can determine how each leaf iocg's inuse should
1775  * be adjusted to achieve the target donation.
1776  *
1777  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1778  *
1779  * The inline comments use symbols from the last pdf.
1780  *
1781  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1782  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1783  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1784  *   w is the weight of the node. w = w_f + w_t
1785  *   w_f is the non-donating portion of w. w_f = w * f / b
1786  *   w_b is the donating portion of w. w_t = w * t / b
1787  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1788  *   s_f and s_t are the non-donating and donating portions of s.
1789  *
1790  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1791  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1792  * after adjustments. Subscript r denotes the root node's values.
1793  */
transfer_surpluses(struct list_head * surpluses,struct ioc_now * now)1794 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1795 {
1796 	LIST_HEAD(over_hwa);
1797 	LIST_HEAD(inner_walk);
1798 	struct ioc_gq *iocg, *tiocg, *root_iocg;
1799 	u32 after_sum, over_sum, over_target, gamma;
1800 
1801 	/*
1802 	 * It's pretty unlikely but possible for the total sum of
1803 	 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1804 	 * confuse the following calculations. If such condition is detected,
1805 	 * scale down everyone over its full share equally to keep the sum below
1806 	 * WEIGHT_ONE.
1807 	 */
1808 	after_sum = 0;
1809 	over_sum = 0;
1810 	list_for_each_entry(iocg, surpluses, surplus_list) {
1811 		u32 hwa;
1812 
1813 		current_hweight(iocg, &hwa, NULL);
1814 		after_sum += iocg->hweight_after_donation;
1815 
1816 		if (iocg->hweight_after_donation > hwa) {
1817 			over_sum += iocg->hweight_after_donation;
1818 			list_add(&iocg->walk_list, &over_hwa);
1819 		}
1820 	}
1821 
1822 	if (after_sum >= WEIGHT_ONE) {
1823 		/*
1824 		 * The delta should be deducted from the over_sum, calculate
1825 		 * target over_sum value.
1826 		 */
1827 		u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1828 		WARN_ON_ONCE(over_sum <= over_delta);
1829 		over_target = over_sum - over_delta;
1830 	} else {
1831 		over_target = 0;
1832 	}
1833 
1834 	list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1835 		if (over_target)
1836 			iocg->hweight_after_donation =
1837 				div_u64((u64)iocg->hweight_after_donation *
1838 					over_target, over_sum);
1839 		list_del_init(&iocg->walk_list);
1840 	}
1841 
1842 	/*
1843 	 * Build pre-order inner node walk list and prepare for donation
1844 	 * adjustment calculations.
1845 	 */
1846 	list_for_each_entry(iocg, surpluses, surplus_list) {
1847 		iocg_build_inner_walk(iocg, &inner_walk);
1848 	}
1849 
1850 	root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1851 	WARN_ON_ONCE(root_iocg->level > 0);
1852 
1853 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1854 		iocg->child_adjusted_sum = 0;
1855 		iocg->hweight_donating = 0;
1856 		iocg->hweight_after_donation = 0;
1857 	}
1858 
1859 	/*
1860 	 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1861 	 * up the hierarchy.
1862 	 */
1863 	list_for_each_entry(iocg, surpluses, surplus_list) {
1864 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1865 
1866 		parent->hweight_donating += iocg->hweight_donating;
1867 		parent->hweight_after_donation += iocg->hweight_after_donation;
1868 	}
1869 
1870 	list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1871 		if (iocg->level > 0) {
1872 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1873 
1874 			parent->hweight_donating += iocg->hweight_donating;
1875 			parent->hweight_after_donation += iocg->hweight_after_donation;
1876 		}
1877 	}
1878 
1879 	/*
1880 	 * Calculate inner hwa's (b) and make sure the donation values are
1881 	 * within the accepted ranges as we're doing low res calculations with
1882 	 * roundups.
1883 	 */
1884 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1885 		if (iocg->level) {
1886 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1887 
1888 			iocg->hweight_active = DIV64_U64_ROUND_UP(
1889 				(u64)parent->hweight_active * iocg->active,
1890 				parent->child_active_sum);
1891 
1892 		}
1893 
1894 		iocg->hweight_donating = min(iocg->hweight_donating,
1895 					     iocg->hweight_active);
1896 		iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1897 						   iocg->hweight_donating - 1);
1898 		if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1899 				 iocg->hweight_donating <= 1 ||
1900 				 iocg->hweight_after_donation == 0)) {
1901 			pr_warn("iocg: invalid donation weights in ");
1902 			pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1903 			pr_cont(": active=%u donating=%u after=%u\n",
1904 				iocg->hweight_active, iocg->hweight_donating,
1905 				iocg->hweight_after_donation);
1906 		}
1907 	}
1908 
1909 	/*
1910 	 * Calculate the global donation rate (gamma) - the rate to adjust
1911 	 * non-donating budgets by.
1912 	 *
1913 	 * No need to use 64bit multiplication here as the first operand is
1914 	 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1915 	 *
1916 	 * We know that there are beneficiary nodes and the sum of the donating
1917 	 * hweights can't be whole; however, due to the round-ups during hweight
1918 	 * calculations, root_iocg->hweight_donating might still end up equal to
1919 	 * or greater than whole. Limit the range when calculating the divider.
1920 	 *
1921 	 * gamma = (1 - t_r') / (1 - t_r)
1922 	 */
1923 	gamma = DIV_ROUND_UP(
1924 		(WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1925 		WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1926 
1927 	/*
1928 	 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1929 	 * nodes.
1930 	 */
1931 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1932 		struct ioc_gq *parent;
1933 		u32 inuse, wpt, wptp;
1934 		u64 st, sf;
1935 
1936 		if (iocg->level == 0) {
1937 			/* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1938 			iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1939 				iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1940 				WEIGHT_ONE - iocg->hweight_after_donation);
1941 			continue;
1942 		}
1943 
1944 		parent = iocg->ancestors[iocg->level - 1];
1945 
1946 		/* b' = gamma * b_f + b_t' */
1947 		iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1948 			(u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1949 			WEIGHT_ONE) + iocg->hweight_after_donation;
1950 
1951 		/* w' = s' * b' / b'_p */
1952 		inuse = DIV64_U64_ROUND_UP(
1953 			(u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1954 			parent->hweight_inuse);
1955 
1956 		/* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1957 		st = DIV64_U64_ROUND_UP(
1958 			iocg->child_active_sum * iocg->hweight_donating,
1959 			iocg->hweight_active);
1960 		sf = iocg->child_active_sum - st;
1961 		wpt = DIV64_U64_ROUND_UP(
1962 			(u64)iocg->active * iocg->hweight_donating,
1963 			iocg->hweight_active);
1964 		wptp = DIV64_U64_ROUND_UP(
1965 			(u64)inuse * iocg->hweight_after_donation,
1966 			iocg->hweight_inuse);
1967 
1968 		iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1969 	}
1970 
1971 	/*
1972 	 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1973 	 * we can finally determine leaf adjustments.
1974 	 */
1975 	list_for_each_entry(iocg, surpluses, surplus_list) {
1976 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1977 		u32 inuse;
1978 
1979 		/*
1980 		 * In-debt iocgs participated in the donation calculation with
1981 		 * the minimum target hweight_inuse. Configuring inuse
1982 		 * accordingly would work fine but debt handling expects
1983 		 * @iocg->inuse stay at the minimum and we don't wanna
1984 		 * interfere.
1985 		 */
1986 		if (iocg->abs_vdebt) {
1987 			WARN_ON_ONCE(iocg->inuse > 1);
1988 			continue;
1989 		}
1990 
1991 		/* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
1992 		inuse = DIV64_U64_ROUND_UP(
1993 			parent->child_adjusted_sum * iocg->hweight_after_donation,
1994 			parent->hweight_inuse);
1995 
1996 		TRACE_IOCG_PATH(inuse_transfer, iocg, now,
1997 				iocg->inuse, inuse,
1998 				iocg->hweight_inuse,
1999 				iocg->hweight_after_donation);
2000 
2001 		__propagate_weights(iocg, iocg->active, inuse, true, now);
2002 	}
2003 
2004 	/* walk list should be dissolved after use */
2005 	list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2006 		list_del_init(&iocg->walk_list);
2007 }
2008 
2009 /*
2010  * A low weight iocg can amass a large amount of debt, for example, when
2011  * anonymous memory gets reclaimed aggressively. If the system has a lot of
2012  * memory paired with a slow IO device, the debt can span multiple seconds or
2013  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2014  * up blocked paying its debt while the IO device is idle.
2015  *
2016  * The following protects against such cases. If the device has been
2017  * sufficiently idle for a while, the debts are halved and delays are
2018  * recalculated.
2019  */
ioc_forgive_debts(struct ioc * ioc,u64 usage_us_sum,int nr_debtors,struct ioc_now * now)2020 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2021 			      struct ioc_now *now)
2022 {
2023 	struct ioc_gq *iocg;
2024 	u64 dur, usage_pct, nr_cycles;
2025 
2026 	/* if no debtor, reset the cycle */
2027 	if (!nr_debtors) {
2028 		ioc->dfgv_period_at = now->now;
2029 		ioc->dfgv_period_rem = 0;
2030 		ioc->dfgv_usage_us_sum = 0;
2031 		return;
2032 	}
2033 
2034 	/*
2035 	 * Debtors can pass through a lot of writes choking the device and we
2036 	 * don't want to be forgiving debts while the device is struggling from
2037 	 * write bursts. If we're missing latency targets, consider the device
2038 	 * fully utilized.
2039 	 */
2040 	if (ioc->busy_level > 0)
2041 		usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2042 
2043 	ioc->dfgv_usage_us_sum += usage_us_sum;
2044 	if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2045 		return;
2046 
2047 	/*
2048 	 * At least DFGV_PERIOD has passed since the last period. Calculate the
2049 	 * average usage and reset the period counters.
2050 	 */
2051 	dur = now->now - ioc->dfgv_period_at;
2052 	usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2053 
2054 	ioc->dfgv_period_at = now->now;
2055 	ioc->dfgv_usage_us_sum = 0;
2056 
2057 	/* if was too busy, reset everything */
2058 	if (usage_pct > DFGV_USAGE_PCT) {
2059 		ioc->dfgv_period_rem = 0;
2060 		return;
2061 	}
2062 
2063 	/*
2064 	 * Usage is lower than threshold. Let's forgive some debts. Debt
2065 	 * forgiveness runs off of the usual ioc timer but its period usually
2066 	 * doesn't match ioc's. Compensate the difference by performing the
2067 	 * reduction as many times as would fit in the duration since the last
2068 	 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2069 	 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2070 	 * reductions is doubled.
2071 	 */
2072 	nr_cycles = dur + ioc->dfgv_period_rem;
2073 	ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2074 
2075 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2076 		u64 __maybe_unused old_debt, __maybe_unused old_delay;
2077 
2078 		if (!iocg->abs_vdebt && !iocg->delay)
2079 			continue;
2080 
2081 		spin_lock(&iocg->waitq.lock);
2082 
2083 		old_debt = iocg->abs_vdebt;
2084 		old_delay = iocg->delay;
2085 
2086 		if (iocg->abs_vdebt)
2087 			iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2088 		if (iocg->delay)
2089 			iocg->delay = iocg->delay >> nr_cycles ?: 1;
2090 
2091 		iocg_kick_waitq(iocg, true, now);
2092 
2093 		TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2094 				old_debt, iocg->abs_vdebt,
2095 				old_delay, iocg->delay);
2096 
2097 		spin_unlock(&iocg->waitq.lock);
2098 	}
2099 }
2100 
ioc_timer_fn(struct timer_list * timer)2101 static void ioc_timer_fn(struct timer_list *timer)
2102 {
2103 	struct ioc *ioc = container_of(timer, struct ioc, timer);
2104 	struct ioc_gq *iocg, *tiocg;
2105 	struct ioc_now now;
2106 	LIST_HEAD(surpluses);
2107 	int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
2108 	u64 usage_us_sum = 0;
2109 	u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2110 	u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2111 	u32 missed_ppm[2], rq_wait_pct;
2112 	u64 period_vtime;
2113 	int prev_busy_level;
2114 
2115 	/* how were the latencies during the period? */
2116 	ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2117 
2118 	/* take care of active iocgs */
2119 	spin_lock_irq(&ioc->lock);
2120 
2121 	ioc_now(ioc, &now);
2122 
2123 	period_vtime = now.vnow - ioc->period_at_vtime;
2124 	if (WARN_ON_ONCE(!period_vtime)) {
2125 		spin_unlock_irq(&ioc->lock);
2126 		return;
2127 	}
2128 
2129 	/*
2130 	 * Waiters determine the sleep durations based on the vrate they
2131 	 * saw at the time of sleep.  If vrate has increased, some waiters
2132 	 * could be sleeping for too long.  Wake up tardy waiters which
2133 	 * should have woken up in the last period and expire idle iocgs.
2134 	 */
2135 	list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2136 		if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2137 		    !iocg->delay && !iocg_is_idle(iocg))
2138 			continue;
2139 
2140 		spin_lock(&iocg->waitq.lock);
2141 
2142 		/* flush wait and indebt stat deltas */
2143 		if (iocg->wait_since) {
2144 			iocg->local_stat.wait_us += now.now - iocg->wait_since;
2145 			iocg->wait_since = now.now;
2146 		}
2147 		if (iocg->indebt_since) {
2148 			iocg->local_stat.indebt_us +=
2149 				now.now - iocg->indebt_since;
2150 			iocg->indebt_since = now.now;
2151 		}
2152 		if (iocg->indelay_since) {
2153 			iocg->local_stat.indelay_us +=
2154 				now.now - iocg->indelay_since;
2155 			iocg->indelay_since = now.now;
2156 		}
2157 
2158 		if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2159 		    iocg->delay) {
2160 			/* might be oversleeping vtime / hweight changes, kick */
2161 			iocg_kick_waitq(iocg, true, &now);
2162 			if (iocg->abs_vdebt || iocg->delay)
2163 				nr_debtors++;
2164 		} else if (iocg_is_idle(iocg)) {
2165 			/* no waiter and idle, deactivate */
2166 			u64 vtime = atomic64_read(&iocg->vtime);
2167 			s64 excess;
2168 
2169 			/*
2170 			 * @iocg has been inactive for a full duration and will
2171 			 * have a high budget. Account anything above target as
2172 			 * error and throw away. On reactivation, it'll start
2173 			 * with the target budget.
2174 			 */
2175 			excess = now.vnow - vtime - ioc->margins.target;
2176 			if (excess > 0) {
2177 				u32 old_hwi;
2178 
2179 				current_hweight(iocg, NULL, &old_hwi);
2180 				ioc->vtime_err -= div64_u64(excess * old_hwi,
2181 							    WEIGHT_ONE);
2182 			}
2183 
2184 			__propagate_weights(iocg, 0, 0, false, &now);
2185 			list_del_init(&iocg->active_list);
2186 		}
2187 
2188 		spin_unlock(&iocg->waitq.lock);
2189 	}
2190 	commit_weights(ioc);
2191 
2192 	/*
2193 	 * Wait and indebt stat are flushed above and the donation calculation
2194 	 * below needs updated usage stat. Let's bring stat up-to-date.
2195 	 */
2196 	iocg_flush_stat(&ioc->active_iocgs, &now);
2197 
2198 	/* calc usage and see whether some weights need to be moved around */
2199 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2200 		u64 vdone, vtime, usage_us, usage_dur;
2201 		u32 usage, hw_active, hw_inuse;
2202 
2203 		/*
2204 		 * Collect unused and wind vtime closer to vnow to prevent
2205 		 * iocgs from accumulating a large amount of budget.
2206 		 */
2207 		vdone = atomic64_read(&iocg->done_vtime);
2208 		vtime = atomic64_read(&iocg->vtime);
2209 		current_hweight(iocg, &hw_active, &hw_inuse);
2210 
2211 		/*
2212 		 * Latency QoS detection doesn't account for IOs which are
2213 		 * in-flight for longer than a period.  Detect them by
2214 		 * comparing vdone against period start.  If lagging behind
2215 		 * IOs from past periods, don't increase vrate.
2216 		 */
2217 		if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2218 		    !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2219 		    time_after64(vtime, vdone) &&
2220 		    time_after64(vtime, now.vnow -
2221 				 MAX_LAGGING_PERIODS * period_vtime) &&
2222 		    time_before64(vdone, now.vnow - period_vtime))
2223 			nr_lagging++;
2224 
2225 		/*
2226 		 * Determine absolute usage factoring in in-flight IOs to avoid
2227 		 * high-latency completions appearing as idle.
2228 		 */
2229 		usage_us = iocg->usage_delta_us;
2230 		usage_us_sum += usage_us;
2231 
2232 		if (vdone != vtime) {
2233 			u64 inflight_us = DIV64_U64_ROUND_UP(
2234 				cost_to_abs_cost(vtime - vdone, hw_inuse),
2235 				ioc->vtime_base_rate);
2236 			usage_us = max(usage_us, inflight_us);
2237 		}
2238 
2239 		/* convert to hweight based usage ratio */
2240 		if (time_after64(iocg->activated_at, ioc->period_at))
2241 			usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2242 		else
2243 			usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2244 
2245 		usage = clamp_t(u32,
2246 				DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2247 						   usage_dur),
2248 				1, WEIGHT_ONE);
2249 
2250 		/* see whether there's surplus vtime */
2251 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2252 		if (hw_inuse < hw_active ||
2253 		    (!waitqueue_active(&iocg->waitq) &&
2254 		     time_before64(vtime, now.vnow - ioc->margins.low))) {
2255 			u32 hwa, old_hwi, hwm, new_hwi;
2256 
2257 			/*
2258 			 * Already donating or accumulated enough to start.
2259 			 * Determine the donation amount.
2260 			 */
2261 			current_hweight(iocg, &hwa, &old_hwi);
2262 			hwm = current_hweight_max(iocg);
2263 			new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2264 							 usage, &now);
2265 			/*
2266 			 * Donation calculation assumes hweight_after_donation
2267 			 * to be positive, a condition that a donor w/ hwa < 2
2268 			 * can't meet. Don't bother with donation if hwa is
2269 			 * below 2. It's not gonna make a meaningful difference
2270 			 * anyway.
2271 			 */
2272 			if (new_hwi < hwm && hwa >= 2) {
2273 				iocg->hweight_donating = hwa;
2274 				iocg->hweight_after_donation = new_hwi;
2275 				list_add(&iocg->surplus_list, &surpluses);
2276 			} else if (!iocg->abs_vdebt) {
2277 				/*
2278 				 * @iocg doesn't have enough to donate. Reset
2279 				 * its inuse to active.
2280 				 *
2281 				 * Don't reset debtors as their inuse's are
2282 				 * owned by debt handling. This shouldn't affect
2283 				 * donation calculuation in any meaningful way
2284 				 * as @iocg doesn't have a meaningful amount of
2285 				 * share anyway.
2286 				 */
2287 				TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2288 						iocg->inuse, iocg->active,
2289 						iocg->hweight_inuse, new_hwi);
2290 
2291 				__propagate_weights(iocg, iocg->active,
2292 						    iocg->active, true, &now);
2293 				nr_shortages++;
2294 			}
2295 		} else {
2296 			/* genuinely short on vtime */
2297 			nr_shortages++;
2298 		}
2299 	}
2300 
2301 	if (!list_empty(&surpluses) && nr_shortages)
2302 		transfer_surpluses(&surpluses, &now);
2303 
2304 	commit_weights(ioc);
2305 
2306 	/* surplus list should be dissolved after use */
2307 	list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2308 		list_del_init(&iocg->surplus_list);
2309 
2310 	/*
2311 	 * If q is getting clogged or we're missing too much, we're issuing
2312 	 * too much IO and should lower vtime rate.  If we're not missing
2313 	 * and experiencing shortages but not surpluses, we're too stingy
2314 	 * and should increase vtime rate.
2315 	 */
2316 	prev_busy_level = ioc->busy_level;
2317 	if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2318 	    missed_ppm[READ] > ppm_rthr ||
2319 	    missed_ppm[WRITE] > ppm_wthr) {
2320 		/* clearly missing QoS targets, slow down vrate */
2321 		ioc->busy_level = max(ioc->busy_level, 0);
2322 		ioc->busy_level++;
2323 	} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2324 		   missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2325 		   missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2326 		/* QoS targets are being met with >25% margin */
2327 		if (nr_shortages) {
2328 			/*
2329 			 * We're throttling while the device has spare
2330 			 * capacity.  If vrate was being slowed down, stop.
2331 			 */
2332 			ioc->busy_level = min(ioc->busy_level, 0);
2333 
2334 			/*
2335 			 * If there are IOs spanning multiple periods, wait
2336 			 * them out before pushing the device harder.
2337 			 */
2338 			if (!nr_lagging)
2339 				ioc->busy_level--;
2340 		} else {
2341 			/*
2342 			 * Nobody is being throttled and the users aren't
2343 			 * issuing enough IOs to saturate the device.  We
2344 			 * simply don't know how close the device is to
2345 			 * saturation.  Coast.
2346 			 */
2347 			ioc->busy_level = 0;
2348 		}
2349 	} else {
2350 		/* inside the hysterisis margin, we're good */
2351 		ioc->busy_level = 0;
2352 	}
2353 
2354 	ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2355 
2356 	if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2357 		u64 vrate = ioc->vtime_base_rate;
2358 		u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2359 
2360 		/* rq_wait signal is always reliable, ignore user vrate_min */
2361 		if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2362 			vrate_min = VRATE_MIN;
2363 
2364 		/*
2365 		 * If vrate is out of bounds, apply clamp gradually as the
2366 		 * bounds can change abruptly.  Otherwise, apply busy_level
2367 		 * based adjustment.
2368 		 */
2369 		if (vrate < vrate_min) {
2370 			vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2371 					  100);
2372 			vrate = min(vrate, vrate_min);
2373 		} else if (vrate > vrate_max) {
2374 			vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2375 					  100);
2376 			vrate = max(vrate, vrate_max);
2377 		} else {
2378 			int idx = min_t(int, abs(ioc->busy_level),
2379 					ARRAY_SIZE(vrate_adj_pct) - 1);
2380 			u32 adj_pct = vrate_adj_pct[idx];
2381 
2382 			if (ioc->busy_level > 0)
2383 				adj_pct = 100 - adj_pct;
2384 			else
2385 				adj_pct = 100 + adj_pct;
2386 
2387 			vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2388 				      vrate_min, vrate_max);
2389 		}
2390 
2391 		trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2392 					   nr_lagging, nr_shortages);
2393 
2394 		ioc->vtime_base_rate = vrate;
2395 		ioc_refresh_margins(ioc);
2396 	} else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2397 		trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2398 					   missed_ppm, rq_wait_pct, nr_lagging,
2399 					   nr_shortages);
2400 	}
2401 
2402 	ioc_refresh_params(ioc, false);
2403 
2404 	ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2405 
2406 	/*
2407 	 * This period is done.  Move onto the next one.  If nothing's
2408 	 * going on with the device, stop the timer.
2409 	 */
2410 	atomic64_inc(&ioc->cur_period);
2411 
2412 	if (ioc->running != IOC_STOP) {
2413 		if (!list_empty(&ioc->active_iocgs)) {
2414 			ioc_start_period(ioc, &now);
2415 		} else {
2416 			ioc->busy_level = 0;
2417 			ioc->vtime_err = 0;
2418 			ioc->running = IOC_IDLE;
2419 		}
2420 
2421 		ioc_refresh_vrate(ioc, &now);
2422 	}
2423 
2424 	spin_unlock_irq(&ioc->lock);
2425 }
2426 
adjust_inuse_and_calc_cost(struct ioc_gq * iocg,u64 vtime,u64 abs_cost,struct ioc_now * now)2427 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2428 				      u64 abs_cost, struct ioc_now *now)
2429 {
2430 	struct ioc *ioc = iocg->ioc;
2431 	struct ioc_margins *margins = &ioc->margins;
2432 	u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2433 	u32 hwi, adj_step;
2434 	s64 margin;
2435 	u64 cost, new_inuse;
2436 	unsigned long flags;
2437 
2438 	current_hweight(iocg, NULL, &hwi);
2439 	old_hwi = hwi;
2440 	cost = abs_cost_to_cost(abs_cost, hwi);
2441 	margin = now->vnow - vtime - cost;
2442 
2443 	/* debt handling owns inuse for debtors */
2444 	if (iocg->abs_vdebt)
2445 		return cost;
2446 
2447 	/*
2448 	 * We only increase inuse during period and do so iff the margin has
2449 	 * deteriorated since the previous adjustment.
2450 	 */
2451 	if (margin >= iocg->saved_margin || margin >= margins->low ||
2452 	    iocg->inuse == iocg->active)
2453 		return cost;
2454 
2455 	spin_lock_irqsave(&ioc->lock, flags);
2456 
2457 	/* we own inuse only when @iocg is in the normal active state */
2458 	if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2459 		spin_unlock_irqrestore(&ioc->lock, flags);
2460 		return cost;
2461 	}
2462 
2463 	/*
2464 	 * Bump up inuse till @abs_cost fits in the existing budget.
2465 	 * adj_step must be determined after acquiring ioc->lock - we might
2466 	 * have raced and lost to another thread for activation and could
2467 	 * be reading 0 iocg->active before ioc->lock which will lead to
2468 	 * infinite loop.
2469 	 */
2470 	new_inuse = iocg->inuse;
2471 	adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2472 	do {
2473 		new_inuse = new_inuse + adj_step;
2474 		propagate_weights(iocg, iocg->active, new_inuse, true, now);
2475 		current_hweight(iocg, NULL, &hwi);
2476 		cost = abs_cost_to_cost(abs_cost, hwi);
2477 	} while (time_after64(vtime + cost, now->vnow) &&
2478 		 iocg->inuse != iocg->active);
2479 
2480 	spin_unlock_irqrestore(&ioc->lock, flags);
2481 
2482 	TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2483 			old_inuse, iocg->inuse, old_hwi, hwi);
2484 
2485 	return cost;
2486 }
2487 
calc_vtime_cost_builtin(struct bio * bio,struct ioc_gq * iocg,bool is_merge,u64 * costp)2488 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2489 				    bool is_merge, u64 *costp)
2490 {
2491 	struct ioc *ioc = iocg->ioc;
2492 	u64 coef_seqio, coef_randio, coef_page;
2493 	u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2494 	u64 seek_pages = 0;
2495 	u64 cost = 0;
2496 
2497 	switch (bio_op(bio)) {
2498 	case REQ_OP_READ:
2499 		coef_seqio	= ioc->params.lcoefs[LCOEF_RSEQIO];
2500 		coef_randio	= ioc->params.lcoefs[LCOEF_RRANDIO];
2501 		coef_page	= ioc->params.lcoefs[LCOEF_RPAGE];
2502 		break;
2503 	case REQ_OP_WRITE:
2504 		coef_seqio	= ioc->params.lcoefs[LCOEF_WSEQIO];
2505 		coef_randio	= ioc->params.lcoefs[LCOEF_WRANDIO];
2506 		coef_page	= ioc->params.lcoefs[LCOEF_WPAGE];
2507 		break;
2508 	default:
2509 		goto out;
2510 	}
2511 
2512 	if (iocg->cursor) {
2513 		seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2514 		seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2515 	}
2516 
2517 	if (!is_merge) {
2518 		if (seek_pages > LCOEF_RANDIO_PAGES) {
2519 			cost += coef_randio;
2520 		} else {
2521 			cost += coef_seqio;
2522 		}
2523 	}
2524 	cost += pages * coef_page;
2525 out:
2526 	*costp = cost;
2527 }
2528 
calc_vtime_cost(struct bio * bio,struct ioc_gq * iocg,bool is_merge)2529 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2530 {
2531 	u64 cost;
2532 
2533 	calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2534 	return cost;
2535 }
2536 
calc_size_vtime_cost_builtin(struct request * rq,struct ioc * ioc,u64 * costp)2537 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2538 					 u64 *costp)
2539 {
2540 	unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2541 
2542 	switch (req_op(rq)) {
2543 	case REQ_OP_READ:
2544 		*costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2545 		break;
2546 	case REQ_OP_WRITE:
2547 		*costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2548 		break;
2549 	default:
2550 		*costp = 0;
2551 	}
2552 }
2553 
calc_size_vtime_cost(struct request * rq,struct ioc * ioc)2554 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2555 {
2556 	u64 cost;
2557 
2558 	calc_size_vtime_cost_builtin(rq, ioc, &cost);
2559 	return cost;
2560 }
2561 
ioc_rqos_throttle(struct rq_qos * rqos,struct bio * bio)2562 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2563 {
2564 	struct blkcg_gq *blkg = bio->bi_blkg;
2565 	struct ioc *ioc = rqos_to_ioc(rqos);
2566 	struct ioc_gq *iocg = blkg_to_iocg(blkg);
2567 	struct ioc_now now;
2568 	struct iocg_wait wait;
2569 	u64 abs_cost, cost, vtime;
2570 	bool use_debt, ioc_locked;
2571 	unsigned long flags;
2572 
2573 	/* bypass IOs if disabled, still initializing, or for root cgroup */
2574 	if (!ioc->enabled || !iocg || !iocg->level)
2575 		return;
2576 
2577 	/* calculate the absolute vtime cost */
2578 	abs_cost = calc_vtime_cost(bio, iocg, false);
2579 	if (!abs_cost)
2580 		return;
2581 
2582 	if (!iocg_activate(iocg, &now))
2583 		return;
2584 
2585 	iocg->cursor = bio_end_sector(bio);
2586 	vtime = atomic64_read(&iocg->vtime);
2587 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2588 
2589 	/*
2590 	 * If no one's waiting and within budget, issue right away.  The
2591 	 * tests are racy but the races aren't systemic - we only miss once
2592 	 * in a while which is fine.
2593 	 */
2594 	if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2595 	    time_before_eq64(vtime + cost, now.vnow)) {
2596 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2597 		return;
2598 	}
2599 
2600 	/*
2601 	 * We're over budget. This can be handled in two ways. IOs which may
2602 	 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2603 	 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2604 	 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2605 	 * whether debt handling is needed and acquire locks accordingly.
2606 	 */
2607 	use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2608 	ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2609 retry_lock:
2610 	iocg_lock(iocg, ioc_locked, &flags);
2611 
2612 	/*
2613 	 * @iocg must stay activated for debt and waitq handling. Deactivation
2614 	 * is synchronized against both ioc->lock and waitq.lock and we won't
2615 	 * get deactivated as long as we're waiting or has debt, so we're good
2616 	 * if we're activated here. In the unlikely cases that we aren't, just
2617 	 * issue the IO.
2618 	 */
2619 	if (unlikely(list_empty(&iocg->active_list))) {
2620 		iocg_unlock(iocg, ioc_locked, &flags);
2621 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2622 		return;
2623 	}
2624 
2625 	/*
2626 	 * We're over budget. If @bio has to be issued regardless, remember
2627 	 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2628 	 * off the debt before waking more IOs.
2629 	 *
2630 	 * This way, the debt is continuously paid off each period with the
2631 	 * actual budget available to the cgroup. If we just wound vtime, we
2632 	 * would incorrectly use the current hw_inuse for the entire amount
2633 	 * which, for example, can lead to the cgroup staying blocked for a
2634 	 * long time even with substantially raised hw_inuse.
2635 	 *
2636 	 * An iocg with vdebt should stay online so that the timer can keep
2637 	 * deducting its vdebt and [de]activate use_delay mechanism
2638 	 * accordingly. We don't want to race against the timer trying to
2639 	 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2640 	 * penalizing the cgroup and its descendants.
2641 	 */
2642 	if (use_debt) {
2643 		iocg_incur_debt(iocg, abs_cost, &now);
2644 		if (iocg_kick_delay(iocg, &now))
2645 			blkcg_schedule_throttle(rqos->q,
2646 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2647 		iocg_unlock(iocg, ioc_locked, &flags);
2648 		return;
2649 	}
2650 
2651 	/* guarantee that iocgs w/ waiters have maximum inuse */
2652 	if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2653 		if (!ioc_locked) {
2654 			iocg_unlock(iocg, false, &flags);
2655 			ioc_locked = true;
2656 			goto retry_lock;
2657 		}
2658 		propagate_weights(iocg, iocg->active, iocg->active, true,
2659 				  &now);
2660 	}
2661 
2662 	/*
2663 	 * Append self to the waitq and schedule the wakeup timer if we're
2664 	 * the first waiter.  The timer duration is calculated based on the
2665 	 * current vrate.  vtime and hweight changes can make it too short
2666 	 * or too long.  Each wait entry records the absolute cost it's
2667 	 * waiting for to allow re-evaluation using a custom wait entry.
2668 	 *
2669 	 * If too short, the timer simply reschedules itself.  If too long,
2670 	 * the period timer will notice and trigger wakeups.
2671 	 *
2672 	 * All waiters are on iocg->waitq and the wait states are
2673 	 * synchronized using waitq.lock.
2674 	 */
2675 	init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2676 	wait.wait.private = current;
2677 	wait.bio = bio;
2678 	wait.abs_cost = abs_cost;
2679 	wait.committed = false;	/* will be set true by waker */
2680 
2681 	__add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2682 	iocg_kick_waitq(iocg, ioc_locked, &now);
2683 
2684 	iocg_unlock(iocg, ioc_locked, &flags);
2685 
2686 	while (true) {
2687 		set_current_state(TASK_UNINTERRUPTIBLE);
2688 		if (wait.committed)
2689 			break;
2690 		io_schedule();
2691 	}
2692 
2693 	/* waker already committed us, proceed */
2694 	finish_wait(&iocg->waitq, &wait.wait);
2695 }
2696 
ioc_rqos_merge(struct rq_qos * rqos,struct request * rq,struct bio * bio)2697 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2698 			   struct bio *bio)
2699 {
2700 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2701 	struct ioc *ioc = rqos_to_ioc(rqos);
2702 	sector_t bio_end = bio_end_sector(bio);
2703 	struct ioc_now now;
2704 	u64 vtime, abs_cost, cost;
2705 	unsigned long flags;
2706 
2707 	/* bypass if disabled, still initializing, or for root cgroup */
2708 	if (!ioc->enabled || !iocg || !iocg->level)
2709 		return;
2710 
2711 	abs_cost = calc_vtime_cost(bio, iocg, true);
2712 	if (!abs_cost)
2713 		return;
2714 
2715 	ioc_now(ioc, &now);
2716 
2717 	vtime = atomic64_read(&iocg->vtime);
2718 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2719 
2720 	/* update cursor if backmerging into the request at the cursor */
2721 	if (blk_rq_pos(rq) < bio_end &&
2722 	    blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2723 		iocg->cursor = bio_end;
2724 
2725 	/*
2726 	 * Charge if there's enough vtime budget and the existing request has
2727 	 * cost assigned.
2728 	 */
2729 	if (rq->bio && rq->bio->bi_iocost_cost &&
2730 	    time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2731 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2732 		return;
2733 	}
2734 
2735 	/*
2736 	 * Otherwise, account it as debt if @iocg is online, which it should
2737 	 * be for the vast majority of cases. See debt handling in
2738 	 * ioc_rqos_throttle() for details.
2739 	 */
2740 	spin_lock_irqsave(&ioc->lock, flags);
2741 	spin_lock(&iocg->waitq.lock);
2742 
2743 	if (likely(!list_empty(&iocg->active_list))) {
2744 		iocg_incur_debt(iocg, abs_cost, &now);
2745 		if (iocg_kick_delay(iocg, &now))
2746 			blkcg_schedule_throttle(rqos->q,
2747 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2748 	} else {
2749 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2750 	}
2751 
2752 	spin_unlock(&iocg->waitq.lock);
2753 	spin_unlock_irqrestore(&ioc->lock, flags);
2754 }
2755 
ioc_rqos_done_bio(struct rq_qos * rqos,struct bio * bio)2756 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2757 {
2758 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2759 
2760 	if (iocg && bio->bi_iocost_cost)
2761 		atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2762 }
2763 
ioc_rqos_done(struct rq_qos * rqos,struct request * rq)2764 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2765 {
2766 	struct ioc *ioc = rqos_to_ioc(rqos);
2767 	struct ioc_pcpu_stat *ccs;
2768 	u64 on_q_ns, rq_wait_ns, size_nsec;
2769 	int pidx, rw;
2770 
2771 	if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2772 		return;
2773 
2774 	switch (req_op(rq) & REQ_OP_MASK) {
2775 	case REQ_OP_READ:
2776 		pidx = QOS_RLAT;
2777 		rw = READ;
2778 		break;
2779 	case REQ_OP_WRITE:
2780 		pidx = QOS_WLAT;
2781 		rw = WRITE;
2782 		break;
2783 	default:
2784 		return;
2785 	}
2786 
2787 	on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2788 	rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2789 	size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2790 
2791 	ccs = get_cpu_ptr(ioc->pcpu_stat);
2792 
2793 	if (on_q_ns <= size_nsec ||
2794 	    on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2795 		local_inc(&ccs->missed[rw].nr_met);
2796 	else
2797 		local_inc(&ccs->missed[rw].nr_missed);
2798 
2799 	local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2800 
2801 	put_cpu_ptr(ccs);
2802 }
2803 
ioc_rqos_queue_depth_changed(struct rq_qos * rqos)2804 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2805 {
2806 	struct ioc *ioc = rqos_to_ioc(rqos);
2807 
2808 	spin_lock_irq(&ioc->lock);
2809 	ioc_refresh_params(ioc, false);
2810 	spin_unlock_irq(&ioc->lock);
2811 }
2812 
ioc_rqos_exit(struct rq_qos * rqos)2813 static void ioc_rqos_exit(struct rq_qos *rqos)
2814 {
2815 	struct ioc *ioc = rqos_to_ioc(rqos);
2816 
2817 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2818 
2819 	spin_lock_irq(&ioc->lock);
2820 	ioc->running = IOC_STOP;
2821 	spin_unlock_irq(&ioc->lock);
2822 
2823 	del_timer_sync(&ioc->timer);
2824 	free_percpu(ioc->pcpu_stat);
2825 	kfree(ioc);
2826 }
2827 
2828 static struct rq_qos_ops ioc_rqos_ops = {
2829 	.throttle = ioc_rqos_throttle,
2830 	.merge = ioc_rqos_merge,
2831 	.done_bio = ioc_rqos_done_bio,
2832 	.done = ioc_rqos_done,
2833 	.queue_depth_changed = ioc_rqos_queue_depth_changed,
2834 	.exit = ioc_rqos_exit,
2835 };
2836 
blk_iocost_init(struct request_queue * q)2837 static int blk_iocost_init(struct request_queue *q)
2838 {
2839 	struct ioc *ioc;
2840 	struct rq_qos *rqos;
2841 	int i, cpu, ret;
2842 
2843 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2844 	if (!ioc)
2845 		return -ENOMEM;
2846 
2847 	ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2848 	if (!ioc->pcpu_stat) {
2849 		kfree(ioc);
2850 		return -ENOMEM;
2851 	}
2852 
2853 	for_each_possible_cpu(cpu) {
2854 		struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2855 
2856 		for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2857 			local_set(&ccs->missed[i].nr_met, 0);
2858 			local_set(&ccs->missed[i].nr_missed, 0);
2859 		}
2860 		local64_set(&ccs->rq_wait_ns, 0);
2861 	}
2862 
2863 	rqos = &ioc->rqos;
2864 	rqos->id = RQ_QOS_COST;
2865 	rqos->ops = &ioc_rqos_ops;
2866 	rqos->q = q;
2867 
2868 	spin_lock_init(&ioc->lock);
2869 	timer_setup(&ioc->timer, ioc_timer_fn, 0);
2870 	INIT_LIST_HEAD(&ioc->active_iocgs);
2871 
2872 	ioc->running = IOC_IDLE;
2873 	ioc->vtime_base_rate = VTIME_PER_USEC;
2874 	atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2875 	seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2876 	ioc->period_at = ktime_to_us(ktime_get());
2877 	atomic64_set(&ioc->cur_period, 0);
2878 	atomic_set(&ioc->hweight_gen, 0);
2879 
2880 	spin_lock_irq(&ioc->lock);
2881 	ioc->autop_idx = AUTOP_INVALID;
2882 	ioc_refresh_params(ioc, true);
2883 	spin_unlock_irq(&ioc->lock);
2884 
2885 	/*
2886 	 * rqos must be added before activation to allow iocg_pd_init() to
2887 	 * lookup the ioc from q. This means that the rqos methods may get
2888 	 * called before policy activation completion, can't assume that the
2889 	 * target bio has an iocg associated and need to test for NULL iocg.
2890 	 */
2891 	rq_qos_add(q, rqos);
2892 	ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2893 	if (ret) {
2894 		rq_qos_del(q, rqos);
2895 		free_percpu(ioc->pcpu_stat);
2896 		kfree(ioc);
2897 		return ret;
2898 	}
2899 	return 0;
2900 }
2901 
ioc_cpd_alloc(gfp_t gfp)2902 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2903 {
2904 	struct ioc_cgrp *iocc;
2905 
2906 	iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2907 	if (!iocc)
2908 		return NULL;
2909 
2910 	iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2911 	return &iocc->cpd;
2912 }
2913 
ioc_cpd_free(struct blkcg_policy_data * cpd)2914 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2915 {
2916 	kfree(container_of(cpd, struct ioc_cgrp, cpd));
2917 }
2918 
ioc_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)2919 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2920 					     struct blkcg *blkcg)
2921 {
2922 	int levels = blkcg->css.cgroup->level + 1;
2923 	struct ioc_gq *iocg;
2924 
2925 	iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2926 	if (!iocg)
2927 		return NULL;
2928 
2929 	iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2930 	if (!iocg->pcpu_stat) {
2931 		kfree(iocg);
2932 		return NULL;
2933 	}
2934 
2935 	return &iocg->pd;
2936 }
2937 
ioc_pd_init(struct blkg_policy_data * pd)2938 static void ioc_pd_init(struct blkg_policy_data *pd)
2939 {
2940 	struct ioc_gq *iocg = pd_to_iocg(pd);
2941 	struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2942 	struct ioc *ioc = q_to_ioc(blkg->q);
2943 	struct ioc_now now;
2944 	struct blkcg_gq *tblkg;
2945 	unsigned long flags;
2946 
2947 	ioc_now(ioc, &now);
2948 
2949 	iocg->ioc = ioc;
2950 	atomic64_set(&iocg->vtime, now.vnow);
2951 	atomic64_set(&iocg->done_vtime, now.vnow);
2952 	atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2953 	INIT_LIST_HEAD(&iocg->active_list);
2954 	INIT_LIST_HEAD(&iocg->walk_list);
2955 	INIT_LIST_HEAD(&iocg->surplus_list);
2956 	iocg->hweight_active = WEIGHT_ONE;
2957 	iocg->hweight_inuse = WEIGHT_ONE;
2958 
2959 	init_waitqueue_head(&iocg->waitq);
2960 	hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2961 	iocg->waitq_timer.function = iocg_waitq_timer_fn;
2962 
2963 	iocg->level = blkg->blkcg->css.cgroup->level;
2964 
2965 	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2966 		struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2967 		iocg->ancestors[tiocg->level] = tiocg;
2968 	}
2969 
2970 	spin_lock_irqsave(&ioc->lock, flags);
2971 	weight_updated(iocg, &now);
2972 	spin_unlock_irqrestore(&ioc->lock, flags);
2973 }
2974 
ioc_pd_free(struct blkg_policy_data * pd)2975 static void ioc_pd_free(struct blkg_policy_data *pd)
2976 {
2977 	struct ioc_gq *iocg = pd_to_iocg(pd);
2978 	struct ioc *ioc = iocg->ioc;
2979 	unsigned long flags;
2980 
2981 	if (ioc) {
2982 		spin_lock_irqsave(&ioc->lock, flags);
2983 
2984 		if (!list_empty(&iocg->active_list)) {
2985 			struct ioc_now now;
2986 
2987 			ioc_now(ioc, &now);
2988 			propagate_weights(iocg, 0, 0, false, &now);
2989 			list_del_init(&iocg->active_list);
2990 		}
2991 
2992 		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2993 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2994 
2995 		spin_unlock_irqrestore(&ioc->lock, flags);
2996 
2997 		hrtimer_cancel(&iocg->waitq_timer);
2998 	}
2999 	free_percpu(iocg->pcpu_stat);
3000 	kfree(iocg);
3001 }
3002 
ioc_pd_stat(struct blkg_policy_data * pd,char * buf,size_t size)3003 static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
3004 {
3005 	struct ioc_gq *iocg = pd_to_iocg(pd);
3006 	struct ioc *ioc = iocg->ioc;
3007 	size_t pos = 0;
3008 
3009 	if (!ioc->enabled)
3010 		return 0;
3011 
3012 	if (iocg->level == 0) {
3013 		unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3014 			ioc->vtime_base_rate * 10000,
3015 			VTIME_PER_USEC);
3016 		pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
3017 				  vp10k / 100, vp10k % 100);
3018 	}
3019 
3020 	pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
3021 			 iocg->last_stat.usage_us);
3022 
3023 	if (blkcg_debug_stats)
3024 		pos += scnprintf(buf + pos, size - pos,
3025 				 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3026 				 iocg->last_stat.wait_us,
3027 				 iocg->last_stat.indebt_us,
3028 				 iocg->last_stat.indelay_us);
3029 
3030 	return pos;
3031 }
3032 
ioc_weight_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3033 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3034 			     int off)
3035 {
3036 	const char *dname = blkg_dev_name(pd->blkg);
3037 	struct ioc_gq *iocg = pd_to_iocg(pd);
3038 
3039 	if (dname && iocg->cfg_weight)
3040 		seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3041 	return 0;
3042 }
3043 
3044 
ioc_weight_show(struct seq_file * sf,void * v)3045 static int ioc_weight_show(struct seq_file *sf, void *v)
3046 {
3047 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3048 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3049 
3050 	seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3051 	blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3052 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3053 	return 0;
3054 }
3055 
ioc_weight_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3056 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3057 				size_t nbytes, loff_t off)
3058 {
3059 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
3060 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3061 	struct blkg_conf_ctx ctx;
3062 	struct ioc_now now;
3063 	struct ioc_gq *iocg;
3064 	u32 v;
3065 	int ret;
3066 
3067 	if (!strchr(buf, ':')) {
3068 		struct blkcg_gq *blkg;
3069 
3070 		if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3071 			return -EINVAL;
3072 
3073 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3074 			return -EINVAL;
3075 
3076 		spin_lock_irq(&blkcg->lock);
3077 		iocc->dfl_weight = v * WEIGHT_ONE;
3078 		hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3079 			struct ioc_gq *iocg = blkg_to_iocg(blkg);
3080 
3081 			if (iocg) {
3082 				spin_lock(&iocg->ioc->lock);
3083 				ioc_now(iocg->ioc, &now);
3084 				weight_updated(iocg, &now);
3085 				spin_unlock(&iocg->ioc->lock);
3086 			}
3087 		}
3088 		spin_unlock_irq(&blkcg->lock);
3089 
3090 		return nbytes;
3091 	}
3092 
3093 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3094 	if (ret)
3095 		return ret;
3096 
3097 	iocg = blkg_to_iocg(ctx.blkg);
3098 
3099 	if (!strncmp(ctx.body, "default", 7)) {
3100 		v = 0;
3101 	} else {
3102 		if (!sscanf(ctx.body, "%u", &v))
3103 			goto einval;
3104 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3105 			goto einval;
3106 	}
3107 
3108 	spin_lock(&iocg->ioc->lock);
3109 	iocg->cfg_weight = v * WEIGHT_ONE;
3110 	ioc_now(iocg->ioc, &now);
3111 	weight_updated(iocg, &now);
3112 	spin_unlock(&iocg->ioc->lock);
3113 
3114 	blkg_conf_finish(&ctx);
3115 	return nbytes;
3116 
3117 einval:
3118 	blkg_conf_finish(&ctx);
3119 	return -EINVAL;
3120 }
3121 
ioc_qos_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3122 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3123 			  int off)
3124 {
3125 	const char *dname = blkg_dev_name(pd->blkg);
3126 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3127 
3128 	if (!dname)
3129 		return 0;
3130 
3131 	seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3132 		   dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3133 		   ioc->params.qos[QOS_RPPM] / 10000,
3134 		   ioc->params.qos[QOS_RPPM] % 10000 / 100,
3135 		   ioc->params.qos[QOS_RLAT],
3136 		   ioc->params.qos[QOS_WPPM] / 10000,
3137 		   ioc->params.qos[QOS_WPPM] % 10000 / 100,
3138 		   ioc->params.qos[QOS_WLAT],
3139 		   ioc->params.qos[QOS_MIN] / 10000,
3140 		   ioc->params.qos[QOS_MIN] % 10000 / 100,
3141 		   ioc->params.qos[QOS_MAX] / 10000,
3142 		   ioc->params.qos[QOS_MAX] % 10000 / 100);
3143 	return 0;
3144 }
3145 
ioc_qos_show(struct seq_file * sf,void * v)3146 static int ioc_qos_show(struct seq_file *sf, void *v)
3147 {
3148 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3149 
3150 	blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3151 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3152 	return 0;
3153 }
3154 
3155 static const match_table_t qos_ctrl_tokens = {
3156 	{ QOS_ENABLE,		"enable=%u"	},
3157 	{ QOS_CTRL,		"ctrl=%s"	},
3158 	{ NR_QOS_CTRL_PARAMS,	NULL		},
3159 };
3160 
3161 static const match_table_t qos_tokens = {
3162 	{ QOS_RPPM,		"rpct=%s"	},
3163 	{ QOS_RLAT,		"rlat=%u"	},
3164 	{ QOS_WPPM,		"wpct=%s"	},
3165 	{ QOS_WLAT,		"wlat=%u"	},
3166 	{ QOS_MIN,		"min=%s"	},
3167 	{ QOS_MAX,		"max=%s"	},
3168 	{ NR_QOS_PARAMS,	NULL		},
3169 };
3170 
ioc_qos_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3171 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3172 			     size_t nbytes, loff_t off)
3173 {
3174 	struct gendisk *disk;
3175 	struct ioc *ioc;
3176 	u32 qos[NR_QOS_PARAMS];
3177 	bool enable, user;
3178 	char *p;
3179 	int ret;
3180 
3181 	disk = blkcg_conf_get_disk(&input);
3182 	if (IS_ERR(disk))
3183 		return PTR_ERR(disk);
3184 
3185 	ioc = q_to_ioc(disk->queue);
3186 	if (!ioc) {
3187 		ret = blk_iocost_init(disk->queue);
3188 		if (ret)
3189 			goto err;
3190 		ioc = q_to_ioc(disk->queue);
3191 	}
3192 
3193 	spin_lock_irq(&ioc->lock);
3194 	memcpy(qos, ioc->params.qos, sizeof(qos));
3195 	enable = ioc->enabled;
3196 	user = ioc->user_qos_params;
3197 	spin_unlock_irq(&ioc->lock);
3198 
3199 	while ((p = strsep(&input, " \t\n"))) {
3200 		substring_t args[MAX_OPT_ARGS];
3201 		char buf[32];
3202 		int tok;
3203 		s64 v;
3204 
3205 		if (!*p)
3206 			continue;
3207 
3208 		switch (match_token(p, qos_ctrl_tokens, args)) {
3209 		case QOS_ENABLE:
3210 			match_u64(&args[0], &v);
3211 			enable = v;
3212 			continue;
3213 		case QOS_CTRL:
3214 			match_strlcpy(buf, &args[0], sizeof(buf));
3215 			if (!strcmp(buf, "auto"))
3216 				user = false;
3217 			else if (!strcmp(buf, "user"))
3218 				user = true;
3219 			else
3220 				goto einval;
3221 			continue;
3222 		}
3223 
3224 		tok = match_token(p, qos_tokens, args);
3225 		switch (tok) {
3226 		case QOS_RPPM:
3227 		case QOS_WPPM:
3228 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3229 			    sizeof(buf))
3230 				goto einval;
3231 			if (cgroup_parse_float(buf, 2, &v))
3232 				goto einval;
3233 			if (v < 0 || v > 10000)
3234 				goto einval;
3235 			qos[tok] = v * 100;
3236 			break;
3237 		case QOS_RLAT:
3238 		case QOS_WLAT:
3239 			if (match_u64(&args[0], &v))
3240 				goto einval;
3241 			qos[tok] = v;
3242 			break;
3243 		case QOS_MIN:
3244 		case QOS_MAX:
3245 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3246 			    sizeof(buf))
3247 				goto einval;
3248 			if (cgroup_parse_float(buf, 2, &v))
3249 				goto einval;
3250 			if (v < 0)
3251 				goto einval;
3252 			qos[tok] = clamp_t(s64, v * 100,
3253 					   VRATE_MIN_PPM, VRATE_MAX_PPM);
3254 			break;
3255 		default:
3256 			goto einval;
3257 		}
3258 		user = true;
3259 	}
3260 
3261 	if (qos[QOS_MIN] > qos[QOS_MAX])
3262 		goto einval;
3263 
3264 	spin_lock_irq(&ioc->lock);
3265 
3266 	if (enable) {
3267 		blk_stat_enable_accounting(ioc->rqos.q);
3268 		blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3269 		ioc->enabled = true;
3270 	} else {
3271 		blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3272 		ioc->enabled = false;
3273 	}
3274 
3275 	if (user) {
3276 		memcpy(ioc->params.qos, qos, sizeof(qos));
3277 		ioc->user_qos_params = true;
3278 	} else {
3279 		ioc->user_qos_params = false;
3280 	}
3281 
3282 	ioc_refresh_params(ioc, true);
3283 	spin_unlock_irq(&ioc->lock);
3284 
3285 	put_disk_and_module(disk);
3286 	return nbytes;
3287 einval:
3288 	ret = -EINVAL;
3289 err:
3290 	put_disk_and_module(disk);
3291 	return ret;
3292 }
3293 
ioc_cost_model_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3294 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3295 				 struct blkg_policy_data *pd, int off)
3296 {
3297 	const char *dname = blkg_dev_name(pd->blkg);
3298 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3299 	u64 *u = ioc->params.i_lcoefs;
3300 
3301 	if (!dname)
3302 		return 0;
3303 
3304 	seq_printf(sf, "%s ctrl=%s model=linear "
3305 		   "rbps=%llu rseqiops=%llu rrandiops=%llu "
3306 		   "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3307 		   dname, ioc->user_cost_model ? "user" : "auto",
3308 		   u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3309 		   u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3310 	return 0;
3311 }
3312 
ioc_cost_model_show(struct seq_file * sf,void * v)3313 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3314 {
3315 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3316 
3317 	blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3318 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3319 	return 0;
3320 }
3321 
3322 static const match_table_t cost_ctrl_tokens = {
3323 	{ COST_CTRL,		"ctrl=%s"	},
3324 	{ COST_MODEL,		"model=%s"	},
3325 	{ NR_COST_CTRL_PARAMS,	NULL		},
3326 };
3327 
3328 static const match_table_t i_lcoef_tokens = {
3329 	{ I_LCOEF_RBPS,		"rbps=%u"	},
3330 	{ I_LCOEF_RSEQIOPS,	"rseqiops=%u"	},
3331 	{ I_LCOEF_RRANDIOPS,	"rrandiops=%u"	},
3332 	{ I_LCOEF_WBPS,		"wbps=%u"	},
3333 	{ I_LCOEF_WSEQIOPS,	"wseqiops=%u"	},
3334 	{ I_LCOEF_WRANDIOPS,	"wrandiops=%u"	},
3335 	{ NR_I_LCOEFS,		NULL		},
3336 };
3337 
ioc_cost_model_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3338 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3339 				    size_t nbytes, loff_t off)
3340 {
3341 	struct gendisk *disk;
3342 	struct ioc *ioc;
3343 	u64 u[NR_I_LCOEFS];
3344 	bool user;
3345 	char *p;
3346 	int ret;
3347 
3348 	disk = blkcg_conf_get_disk(&input);
3349 	if (IS_ERR(disk))
3350 		return PTR_ERR(disk);
3351 
3352 	ioc = q_to_ioc(disk->queue);
3353 	if (!ioc) {
3354 		ret = blk_iocost_init(disk->queue);
3355 		if (ret)
3356 			goto err;
3357 		ioc = q_to_ioc(disk->queue);
3358 	}
3359 
3360 	spin_lock_irq(&ioc->lock);
3361 	memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3362 	user = ioc->user_cost_model;
3363 	spin_unlock_irq(&ioc->lock);
3364 
3365 	while ((p = strsep(&input, " \t\n"))) {
3366 		substring_t args[MAX_OPT_ARGS];
3367 		char buf[32];
3368 		int tok;
3369 		u64 v;
3370 
3371 		if (!*p)
3372 			continue;
3373 
3374 		switch (match_token(p, cost_ctrl_tokens, args)) {
3375 		case COST_CTRL:
3376 			match_strlcpy(buf, &args[0], sizeof(buf));
3377 			if (!strcmp(buf, "auto"))
3378 				user = false;
3379 			else if (!strcmp(buf, "user"))
3380 				user = true;
3381 			else
3382 				goto einval;
3383 			continue;
3384 		case COST_MODEL:
3385 			match_strlcpy(buf, &args[0], sizeof(buf));
3386 			if (strcmp(buf, "linear"))
3387 				goto einval;
3388 			continue;
3389 		}
3390 
3391 		tok = match_token(p, i_lcoef_tokens, args);
3392 		if (tok == NR_I_LCOEFS)
3393 			goto einval;
3394 		if (match_u64(&args[0], &v))
3395 			goto einval;
3396 		u[tok] = v;
3397 		user = true;
3398 	}
3399 
3400 	spin_lock_irq(&ioc->lock);
3401 	if (user) {
3402 		memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3403 		ioc->user_cost_model = true;
3404 	} else {
3405 		ioc->user_cost_model = false;
3406 	}
3407 	ioc_refresh_params(ioc, true);
3408 	spin_unlock_irq(&ioc->lock);
3409 
3410 	put_disk_and_module(disk);
3411 	return nbytes;
3412 
3413 einval:
3414 	ret = -EINVAL;
3415 err:
3416 	put_disk_and_module(disk);
3417 	return ret;
3418 }
3419 
3420 static struct cftype ioc_files[] = {
3421 	{
3422 		.name = "weight",
3423 		.flags = CFTYPE_NOT_ON_ROOT,
3424 		.seq_show = ioc_weight_show,
3425 		.write = ioc_weight_write,
3426 	},
3427 	{
3428 		.name = "cost.qos",
3429 		.flags = CFTYPE_ONLY_ON_ROOT,
3430 		.seq_show = ioc_qos_show,
3431 		.write = ioc_qos_write,
3432 	},
3433 	{
3434 		.name = "cost.model",
3435 		.flags = CFTYPE_ONLY_ON_ROOT,
3436 		.seq_show = ioc_cost_model_show,
3437 		.write = ioc_cost_model_write,
3438 	},
3439 	{}
3440 };
3441 
3442 static struct blkcg_policy blkcg_policy_iocost = {
3443 	.dfl_cftypes	= ioc_files,
3444 	.cpd_alloc_fn	= ioc_cpd_alloc,
3445 	.cpd_free_fn	= ioc_cpd_free,
3446 	.pd_alloc_fn	= ioc_pd_alloc,
3447 	.pd_init_fn	= ioc_pd_init,
3448 	.pd_free_fn	= ioc_pd_free,
3449 	.pd_stat_fn	= ioc_pd_stat,
3450 };
3451 
ioc_init(void)3452 static int __init ioc_init(void)
3453 {
3454 	return blkcg_policy_register(&blkcg_policy_iocost);
3455 }
3456 
ioc_exit(void)3457 static void __exit ioc_exit(void)
3458 {
3459 	blkcg_policy_unregister(&blkcg_policy_iocost);
3460 }
3461 
3462 module_init(ioc_init);
3463 module_exit(ioc_exit);
3464