xref: /OK3568_Linux_fs/kernel/block/blk-iocost.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * paramters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO iff doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, soley depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The ouput looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per	: Timer period
166  * - cur_per	: Internal wall and device vtime clock
167  * - vrate	: Device virtual time rate against wall clock
168  * - weight	: Surplus-adjusted and configured weights
169  * - hweight	: Surplus-adjusted and configured hierarchical weights
170  * - inflt	: The percentage of in-flight IO cost at the end of last period
171  * - del_ms	: Deferred issuer delay induction level and duration
172  * - usages	: Usage history
173  */
174 
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
186 #include "blk-wbt.h"
187 
188 #ifdef CONFIG_TRACEPOINTS
189 
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194 
195 #define TRACE_IOCG_PATH(type, iocg, ...)					\
196 	do {									\
197 		unsigned long flags;						\
198 		if (trace_iocost_##type##_enabled()) {				\
199 			spin_lock_irqsave(&trace_iocg_path_lock, flags);	\
200 			cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,	\
201 				    trace_iocg_path, TRACE_IOCG_PATH_LEN);	\
202 			trace_iocost_##type(iocg, trace_iocg_path,		\
203 					      ##__VA_ARGS__);			\
204 			spin_unlock_irqrestore(&trace_iocg_path_lock, flags);	\
205 		}								\
206 	} while (0)
207 
208 #else	/* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)	do { } while (0)
210 #endif	/* CONFIG_TRACE_POINTS */
211 
212 enum {
213 	MILLION			= 1000000,
214 
215 	/* timer period is calculated from latency requirements, bound it */
216 	MIN_PERIOD		= USEC_PER_MSEC,
217 	MAX_PERIOD		= USEC_PER_SEC,
218 
219 	/*
220 	 * iocg->vtime is targeted at 50% behind the device vtime, which
221 	 * serves as its IO credit buffer.  Surplus weight adjustment is
222 	 * immediately canceled if the vtime margin runs below 10%.
223 	 */
224 	MARGIN_MIN_PCT		= 10,
225 	MARGIN_LOW_PCT		= 20,
226 	MARGIN_TARGET_PCT	= 50,
227 
228 	INUSE_ADJ_STEP_PCT	= 25,
229 
230 	/* Have some play in timer operations */
231 	TIMER_SLACK_PCT		= 1,
232 
233 	/* 1/64k is granular enough and can easily be handled w/ u32 */
234 	WEIGHT_ONE		= 1 << 16,
235 
236 	/*
237 	 * As vtime is used to calculate the cost of each IO, it needs to
238 	 * be fairly high precision.  For example, it should be able to
239 	 * represent the cost of a single page worth of discard with
240 	 * suffificient accuracy.  At the same time, it should be able to
241 	 * represent reasonably long enough durations to be useful and
242 	 * convenient during operation.
243 	 *
244 	 * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
245 	 * granularity and days of wrap-around time even at extreme vrates.
246 	 */
247 	VTIME_PER_SEC_SHIFT	= 37,
248 	VTIME_PER_SEC		= 1LLU << VTIME_PER_SEC_SHIFT,
249 	VTIME_PER_USEC		= VTIME_PER_SEC / USEC_PER_SEC,
250 	VTIME_PER_NSEC		= VTIME_PER_SEC / NSEC_PER_SEC,
251 
252 	/* bound vrate adjustments within two orders of magnitude */
253 	VRATE_MIN_PPM		= 10000,	/* 1% */
254 	VRATE_MAX_PPM		= 100000000,	/* 10000% */
255 
256 	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 	VRATE_CLAMP_ADJ_PCT	= 4,
258 
259 	/* if IOs end up waiting for requests, issue less */
260 	RQ_WAIT_BUSY_PCT	= 5,
261 
262 	/* unbusy hysterisis */
263 	UNBUSY_THR_PCT		= 75,
264 
265 	/*
266 	 * The effect of delay is indirect and non-linear and a huge amount of
267 	 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 	 * up delay as debt is going up and then let it decay exponentially.
269 	 * This gives us quick ramp ups while delay is accumulating and long
270 	 * tails which can help reducing the frequency of debt explosions on
271 	 * unthrottle. The parameters are experimentally determined.
272 	 *
273 	 * The delay mechanism provides adequate protection and behavior in many
274 	 * cases. However, this is far from ideal and falls shorts on both
275 	 * fronts. The debtors are often throttled too harshly costing a
276 	 * significant level of fairness and possibly total work while the
277 	 * protection against their impacts on the system can be choppy and
278 	 * unreliable.
279 	 *
280 	 * The shortcoming primarily stems from the fact that, unlike for page
281 	 * cache, the kernel doesn't have well-defined back-pressure propagation
282 	 * mechanism and policies for anonymous memory. Fully addressing this
283 	 * issue will likely require substantial improvements in the area.
284 	 */
285 	MIN_DELAY_THR_PCT	= 500,
286 	MAX_DELAY_THR_PCT	= 25000,
287 	MIN_DELAY		= 250,
288 	MAX_DELAY		= 250 * USEC_PER_MSEC,
289 
290 	/* halve debts if avg usage over 100ms is under 50% */
291 	DFGV_USAGE_PCT		= 50,
292 	DFGV_PERIOD		= 100 * USEC_PER_MSEC,
293 
294 	/* don't let cmds which take a very long time pin lagging for too long */
295 	MAX_LAGGING_PERIODS	= 10,
296 
297 	/* switch iff the conditions are met for longer than this */
298 	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
299 
300 	/*
301 	 * Count IO size in 4k pages.  The 12bit shift helps keeping
302 	 * size-proportional components of cost calculation in closer
303 	 * numbers of digits to per-IO cost components.
304 	 */
305 	IOC_PAGE_SHIFT		= 12,
306 	IOC_PAGE_SIZE		= 1 << IOC_PAGE_SHIFT,
307 	IOC_SECT_TO_PAGE_SHIFT	= IOC_PAGE_SHIFT - SECTOR_SHIFT,
308 
309 	/* if apart further than 16M, consider randio for linear model */
310 	LCOEF_RANDIO_PAGES	= 4096,
311 };
312 
313 enum ioc_running {
314 	IOC_IDLE,
315 	IOC_RUNNING,
316 	IOC_STOP,
317 };
318 
319 /* io.cost.qos controls including per-dev enable of the whole controller */
320 enum {
321 	QOS_ENABLE,
322 	QOS_CTRL,
323 	NR_QOS_CTRL_PARAMS,
324 };
325 
326 /* io.cost.qos params */
327 enum {
328 	QOS_RPPM,
329 	QOS_RLAT,
330 	QOS_WPPM,
331 	QOS_WLAT,
332 	QOS_MIN,
333 	QOS_MAX,
334 	NR_QOS_PARAMS,
335 };
336 
337 /* io.cost.model controls */
338 enum {
339 	COST_CTRL,
340 	COST_MODEL,
341 	NR_COST_CTRL_PARAMS,
342 };
343 
344 /* builtin linear cost model coefficients */
345 enum {
346 	I_LCOEF_RBPS,
347 	I_LCOEF_RSEQIOPS,
348 	I_LCOEF_RRANDIOPS,
349 	I_LCOEF_WBPS,
350 	I_LCOEF_WSEQIOPS,
351 	I_LCOEF_WRANDIOPS,
352 	NR_I_LCOEFS,
353 };
354 
355 enum {
356 	LCOEF_RPAGE,
357 	LCOEF_RSEQIO,
358 	LCOEF_RRANDIO,
359 	LCOEF_WPAGE,
360 	LCOEF_WSEQIO,
361 	LCOEF_WRANDIO,
362 	NR_LCOEFS,
363 };
364 
365 enum {
366 	AUTOP_INVALID,
367 	AUTOP_HDD,
368 	AUTOP_SSD_QD1,
369 	AUTOP_SSD_DFL,
370 	AUTOP_SSD_FAST,
371 };
372 
373 struct ioc_gq;
374 
375 struct ioc_params {
376 	u32				qos[NR_QOS_PARAMS];
377 	u64				i_lcoefs[NR_I_LCOEFS];
378 	u64				lcoefs[NR_LCOEFS];
379 	u32				too_fast_vrate_pct;
380 	u32				too_slow_vrate_pct;
381 };
382 
383 struct ioc_margins {
384 	s64				min;
385 	s64				low;
386 	s64				target;
387 };
388 
389 struct ioc_missed {
390 	local_t				nr_met;
391 	local_t				nr_missed;
392 	u32				last_met;
393 	u32				last_missed;
394 };
395 
396 struct ioc_pcpu_stat {
397 	struct ioc_missed		missed[2];
398 
399 	local64_t			rq_wait_ns;
400 	u64				last_rq_wait_ns;
401 };
402 
403 /* per device */
404 struct ioc {
405 	struct rq_qos			rqos;
406 
407 	bool				enabled;
408 
409 	struct ioc_params		params;
410 	struct ioc_margins		margins;
411 	u32				period_us;
412 	u32				timer_slack_ns;
413 	u64				vrate_min;
414 	u64				vrate_max;
415 
416 	spinlock_t			lock;
417 	struct timer_list		timer;
418 	struct list_head		active_iocgs;	/* active cgroups */
419 	struct ioc_pcpu_stat __percpu	*pcpu_stat;
420 
421 	enum ioc_running		running;
422 	atomic64_t			vtime_rate;
423 	u64				vtime_base_rate;
424 	s64				vtime_err;
425 
426 	seqcount_spinlock_t		period_seqcount;
427 	u64				period_at;	/* wallclock starttime */
428 	u64				period_at_vtime; /* vtime starttime */
429 
430 	atomic64_t			cur_period;	/* inc'd each period */
431 	int				busy_level;	/* saturation history */
432 
433 	bool				weights_updated;
434 	atomic_t			hweight_gen;	/* for lazy hweights */
435 
436 	/* debt forgivness */
437 	u64				dfgv_period_at;
438 	u64				dfgv_period_rem;
439 	u64				dfgv_usage_us_sum;
440 
441 	u64				autop_too_fast_at;
442 	u64				autop_too_slow_at;
443 	int				autop_idx;
444 	bool				user_qos_params:1;
445 	bool				user_cost_model:1;
446 };
447 
448 struct iocg_pcpu_stat {
449 	local64_t			abs_vusage;
450 };
451 
452 struct iocg_stat {
453 	u64				usage_us;
454 	u64				wait_us;
455 	u64				indebt_us;
456 	u64				indelay_us;
457 };
458 
459 /* per device-cgroup pair */
460 struct ioc_gq {
461 	struct blkg_policy_data		pd;
462 	struct ioc			*ioc;
463 
464 	/*
465 	 * A iocg can get its weight from two sources - an explicit
466 	 * per-device-cgroup configuration or the default weight of the
467 	 * cgroup.  `cfg_weight` is the explicit per-device-cgroup
468 	 * configuration.  `weight` is the effective considering both
469 	 * sources.
470 	 *
471 	 * When an idle cgroup becomes active its `active` goes from 0 to
472 	 * `weight`.  `inuse` is the surplus adjusted active weight.
473 	 * `active` and `inuse` are used to calculate `hweight_active` and
474 	 * `hweight_inuse`.
475 	 *
476 	 * `last_inuse` remembers `inuse` while an iocg is idle to persist
477 	 * surplus adjustments.
478 	 *
479 	 * `inuse` may be adjusted dynamically during period. `saved_*` are used
480 	 * to determine and track adjustments.
481 	 */
482 	u32				cfg_weight;
483 	u32				weight;
484 	u32				active;
485 	u32				inuse;
486 
487 	u32				last_inuse;
488 	s64				saved_margin;
489 
490 	sector_t			cursor;		/* to detect randio */
491 
492 	/*
493 	 * `vtime` is this iocg's vtime cursor which progresses as IOs are
494 	 * issued.  If lagging behind device vtime, the delta represents
495 	 * the currently available IO budget.  If runnning ahead, the
496 	 * overage.
497 	 *
498 	 * `vtime_done` is the same but progressed on completion rather
499 	 * than issue.  The delta behind `vtime` represents the cost of
500 	 * currently in-flight IOs.
501 	 */
502 	atomic64_t			vtime;
503 	atomic64_t			done_vtime;
504 	u64				abs_vdebt;
505 
506 	/* current delay in effect and when it started */
507 	u64				delay;
508 	u64				delay_at;
509 
510 	/*
511 	 * The period this iocg was last active in.  Used for deactivation
512 	 * and invalidating `vtime`.
513 	 */
514 	atomic64_t			active_period;
515 	struct list_head		active_list;
516 
517 	/* see __propagate_weights() and current_hweight() for details */
518 	u64				child_active_sum;
519 	u64				child_inuse_sum;
520 	u64				child_adjusted_sum;
521 	int				hweight_gen;
522 	u32				hweight_active;
523 	u32				hweight_inuse;
524 	u32				hweight_donating;
525 	u32				hweight_after_donation;
526 
527 	struct list_head		walk_list;
528 	struct list_head		surplus_list;
529 
530 	struct wait_queue_head		waitq;
531 	struct hrtimer			waitq_timer;
532 
533 	/* timestamp at the latest activation */
534 	u64				activated_at;
535 
536 	/* statistics */
537 	struct iocg_pcpu_stat __percpu	*pcpu_stat;
538 	struct iocg_stat		local_stat;
539 	struct iocg_stat		desc_stat;
540 	struct iocg_stat		last_stat;
541 	u64				last_stat_abs_vusage;
542 	u64				usage_delta_us;
543 	u64				wait_since;
544 	u64				indebt_since;
545 	u64				indelay_since;
546 
547 	/* this iocg's depth in the hierarchy and ancestors including self */
548 	int				level;
549 	struct ioc_gq			*ancestors[];
550 };
551 
552 /* per cgroup */
553 struct ioc_cgrp {
554 	struct blkcg_policy_data	cpd;
555 	unsigned int			dfl_weight;
556 };
557 
558 struct ioc_now {
559 	u64				now_ns;
560 	u64				now;
561 	u64				vnow;
562 	u64				vrate;
563 };
564 
565 struct iocg_wait {
566 	struct wait_queue_entry		wait;
567 	struct bio			*bio;
568 	u64				abs_cost;
569 	bool				committed;
570 };
571 
572 struct iocg_wake_ctx {
573 	struct ioc_gq			*iocg;
574 	u32				hw_inuse;
575 	s64				vbudget;
576 };
577 
578 static const struct ioc_params autop[] = {
579 	[AUTOP_HDD] = {
580 		.qos				= {
581 			[QOS_RLAT]		=        250000, /* 250ms */
582 			[QOS_WLAT]		=        250000,
583 			[QOS_MIN]		= VRATE_MIN_PPM,
584 			[QOS_MAX]		= VRATE_MAX_PPM,
585 		},
586 		.i_lcoefs			= {
587 			[I_LCOEF_RBPS]		=     174019176,
588 			[I_LCOEF_RSEQIOPS]	=         41708,
589 			[I_LCOEF_RRANDIOPS]	=           370,
590 			[I_LCOEF_WBPS]		=     178075866,
591 			[I_LCOEF_WSEQIOPS]	=         42705,
592 			[I_LCOEF_WRANDIOPS]	=           378,
593 		},
594 	},
595 	[AUTOP_SSD_QD1] = {
596 		.qos				= {
597 			[QOS_RLAT]		=         25000, /* 25ms */
598 			[QOS_WLAT]		=         25000,
599 			[QOS_MIN]		= VRATE_MIN_PPM,
600 			[QOS_MAX]		= VRATE_MAX_PPM,
601 		},
602 		.i_lcoefs			= {
603 			[I_LCOEF_RBPS]		=     245855193,
604 			[I_LCOEF_RSEQIOPS]	=         61575,
605 			[I_LCOEF_RRANDIOPS]	=          6946,
606 			[I_LCOEF_WBPS]		=     141365009,
607 			[I_LCOEF_WSEQIOPS]	=         33716,
608 			[I_LCOEF_WRANDIOPS]	=         26796,
609 		},
610 	},
611 	[AUTOP_SSD_DFL] = {
612 		.qos				= {
613 			[QOS_RLAT]		=         25000, /* 25ms */
614 			[QOS_WLAT]		=         25000,
615 			[QOS_MIN]		= VRATE_MIN_PPM,
616 			[QOS_MAX]		= VRATE_MAX_PPM,
617 		},
618 		.i_lcoefs			= {
619 			[I_LCOEF_RBPS]		=     488636629,
620 			[I_LCOEF_RSEQIOPS]	=          8932,
621 			[I_LCOEF_RRANDIOPS]	=          8518,
622 			[I_LCOEF_WBPS]		=     427891549,
623 			[I_LCOEF_WSEQIOPS]	=         28755,
624 			[I_LCOEF_WRANDIOPS]	=         21940,
625 		},
626 		.too_fast_vrate_pct		=           500,
627 	},
628 	[AUTOP_SSD_FAST] = {
629 		.qos				= {
630 			[QOS_RLAT]		=          5000, /* 5ms */
631 			[QOS_WLAT]		=          5000,
632 			[QOS_MIN]		= VRATE_MIN_PPM,
633 			[QOS_MAX]		= VRATE_MAX_PPM,
634 		},
635 		.i_lcoefs			= {
636 			[I_LCOEF_RBPS]		=    3102524156LLU,
637 			[I_LCOEF_RSEQIOPS]	=        724816,
638 			[I_LCOEF_RRANDIOPS]	=        778122,
639 			[I_LCOEF_WBPS]		=    1742780862LLU,
640 			[I_LCOEF_WSEQIOPS]	=        425702,
641 			[I_LCOEF_WRANDIOPS]	=	 443193,
642 		},
643 		.too_slow_vrate_pct		=            10,
644 	},
645 };
646 
647 /*
648  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
649  * vtime credit shortage and down on device saturation.
650  */
651 static u32 vrate_adj_pct[] =
652 	{ 0, 0, 0, 0,
653 	  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654 	  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655 	  4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
656 
657 static struct blkcg_policy blkcg_policy_iocost;
658 
659 /* accessors and helpers */
rqos_to_ioc(struct rq_qos * rqos)660 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
661 {
662 	return container_of(rqos, struct ioc, rqos);
663 }
664 
q_to_ioc(struct request_queue * q)665 static struct ioc *q_to_ioc(struct request_queue *q)
666 {
667 	return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
668 }
669 
q_name(struct request_queue * q)670 static const char *q_name(struct request_queue *q)
671 {
672 	if (blk_queue_registered(q))
673 		return kobject_name(q->kobj.parent);
674 	else
675 		return "<unknown>";
676 }
677 
ioc_name(struct ioc * ioc)678 static const char __maybe_unused *ioc_name(struct ioc *ioc)
679 {
680 	return q_name(ioc->rqos.q);
681 }
682 
pd_to_iocg(struct blkg_policy_data * pd)683 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
684 {
685 	return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
686 }
687 
blkg_to_iocg(struct blkcg_gq * blkg)688 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
689 {
690 	return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
691 }
692 
iocg_to_blkg(struct ioc_gq * iocg)693 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
694 {
695 	return pd_to_blkg(&iocg->pd);
696 }
697 
blkcg_to_iocc(struct blkcg * blkcg)698 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
699 {
700 	return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
701 			    struct ioc_cgrp, cpd);
702 }
703 
704 /*
705  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
706  * weight, the more expensive each IO.  Must round up.
707  */
abs_cost_to_cost(u64 abs_cost,u32 hw_inuse)708 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
709 {
710 	return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
711 }
712 
713 /*
714  * The inverse of abs_cost_to_cost().  Must round up.
715  */
cost_to_abs_cost(u64 cost,u32 hw_inuse)716 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
717 {
718 	return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
719 }
720 
iocg_commit_bio(struct ioc_gq * iocg,struct bio * bio,u64 abs_cost,u64 cost)721 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
722 			    u64 abs_cost, u64 cost)
723 {
724 	struct iocg_pcpu_stat *gcs;
725 
726 	bio->bi_iocost_cost = cost;
727 	atomic64_add(cost, &iocg->vtime);
728 
729 	gcs = get_cpu_ptr(iocg->pcpu_stat);
730 	local64_add(abs_cost, &gcs->abs_vusage);
731 	put_cpu_ptr(gcs);
732 }
733 
iocg_lock(struct ioc_gq * iocg,bool lock_ioc,unsigned long * flags)734 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
735 {
736 	if (lock_ioc) {
737 		spin_lock_irqsave(&iocg->ioc->lock, *flags);
738 		spin_lock(&iocg->waitq.lock);
739 	} else {
740 		spin_lock_irqsave(&iocg->waitq.lock, *flags);
741 	}
742 }
743 
iocg_unlock(struct ioc_gq * iocg,bool unlock_ioc,unsigned long * flags)744 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
745 {
746 	if (unlock_ioc) {
747 		spin_unlock(&iocg->waitq.lock);
748 		spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
749 	} else {
750 		spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
751 	}
752 }
753 
754 #define CREATE_TRACE_POINTS
755 #include <trace/events/iocost.h>
756 
ioc_refresh_margins(struct ioc * ioc)757 static void ioc_refresh_margins(struct ioc *ioc)
758 {
759 	struct ioc_margins *margins = &ioc->margins;
760 	u32 period_us = ioc->period_us;
761 	u64 vrate = ioc->vtime_base_rate;
762 
763 	margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
764 	margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
765 	margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
766 }
767 
768 /* latency Qos params changed, update period_us and all the dependent params */
ioc_refresh_period_us(struct ioc * ioc)769 static void ioc_refresh_period_us(struct ioc *ioc)
770 {
771 	u32 ppm, lat, multi, period_us;
772 
773 	lockdep_assert_held(&ioc->lock);
774 
775 	/* pick the higher latency target */
776 	if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
777 		ppm = ioc->params.qos[QOS_RPPM];
778 		lat = ioc->params.qos[QOS_RLAT];
779 	} else {
780 		ppm = ioc->params.qos[QOS_WPPM];
781 		lat = ioc->params.qos[QOS_WLAT];
782 	}
783 
784 	/*
785 	 * We want the period to be long enough to contain a healthy number
786 	 * of IOs while short enough for granular control.  Define it as a
787 	 * multiple of the latency target.  Ideally, the multiplier should
788 	 * be scaled according to the percentile so that it would nominally
789 	 * contain a certain number of requests.  Let's be simpler and
790 	 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
791 	 */
792 	if (ppm)
793 		multi = max_t(u32, (MILLION - ppm) / 50000, 2);
794 	else
795 		multi = 2;
796 	period_us = multi * lat;
797 	period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
798 
799 	/* calculate dependent params */
800 	ioc->period_us = period_us;
801 	ioc->timer_slack_ns = div64_u64(
802 		(u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
803 		100);
804 	ioc_refresh_margins(ioc);
805 }
806 
ioc_autop_idx(struct ioc * ioc)807 static int ioc_autop_idx(struct ioc *ioc)
808 {
809 	int idx = ioc->autop_idx;
810 	const struct ioc_params *p = &autop[idx];
811 	u32 vrate_pct;
812 	u64 now_ns;
813 
814 	/* rotational? */
815 	if (!blk_queue_nonrot(ioc->rqos.q))
816 		return AUTOP_HDD;
817 
818 	/* handle SATA SSDs w/ broken NCQ */
819 	if (blk_queue_depth(ioc->rqos.q) == 1)
820 		return AUTOP_SSD_QD1;
821 
822 	/* use one of the normal ssd sets */
823 	if (idx < AUTOP_SSD_DFL)
824 		return AUTOP_SSD_DFL;
825 
826 	/* if user is overriding anything, maintain what was there */
827 	if (ioc->user_qos_params || ioc->user_cost_model)
828 		return idx;
829 
830 	/* step up/down based on the vrate */
831 	vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
832 	now_ns = ktime_get_ns();
833 
834 	if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
835 		if (!ioc->autop_too_fast_at)
836 			ioc->autop_too_fast_at = now_ns;
837 		if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
838 			return idx + 1;
839 	} else {
840 		ioc->autop_too_fast_at = 0;
841 	}
842 
843 	if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
844 		if (!ioc->autop_too_slow_at)
845 			ioc->autop_too_slow_at = now_ns;
846 		if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
847 			return idx - 1;
848 	} else {
849 		ioc->autop_too_slow_at = 0;
850 	}
851 
852 	return idx;
853 }
854 
855 /*
856  * Take the followings as input
857  *
858  *  @bps	maximum sequential throughput
859  *  @seqiops	maximum sequential 4k iops
860  *  @randiops	maximum random 4k iops
861  *
862  * and calculate the linear model cost coefficients.
863  *
864  *  *@page	per-page cost		1s / (@bps / 4096)
865  *  *@seqio	base cost of a seq IO	max((1s / @seqiops) - *@page, 0)
866  *  @randiops	base cost of a rand IO	max((1s / @randiops) - *@page, 0)
867  */
calc_lcoefs(u64 bps,u64 seqiops,u64 randiops,u64 * page,u64 * seqio,u64 * randio)868 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
869 			u64 *page, u64 *seqio, u64 *randio)
870 {
871 	u64 v;
872 
873 	*page = *seqio = *randio = 0;
874 
875 	if (bps)
876 		*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
877 					   DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
878 
879 	if (seqiops) {
880 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
881 		if (v > *page)
882 			*seqio = v - *page;
883 	}
884 
885 	if (randiops) {
886 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
887 		if (v > *page)
888 			*randio = v - *page;
889 	}
890 }
891 
ioc_refresh_lcoefs(struct ioc * ioc)892 static void ioc_refresh_lcoefs(struct ioc *ioc)
893 {
894 	u64 *u = ioc->params.i_lcoefs;
895 	u64 *c = ioc->params.lcoefs;
896 
897 	calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
898 		    &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
899 	calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
900 		    &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
901 }
902 
ioc_refresh_params(struct ioc * ioc,bool force)903 static bool ioc_refresh_params(struct ioc *ioc, bool force)
904 {
905 	const struct ioc_params *p;
906 	int idx;
907 
908 	lockdep_assert_held(&ioc->lock);
909 
910 	idx = ioc_autop_idx(ioc);
911 	p = &autop[idx];
912 
913 	if (idx == ioc->autop_idx && !force)
914 		return false;
915 
916 	if (idx != ioc->autop_idx)
917 		atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
918 
919 	ioc->autop_idx = idx;
920 	ioc->autop_too_fast_at = 0;
921 	ioc->autop_too_slow_at = 0;
922 
923 	if (!ioc->user_qos_params)
924 		memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
925 	if (!ioc->user_cost_model)
926 		memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
927 
928 	ioc_refresh_period_us(ioc);
929 	ioc_refresh_lcoefs(ioc);
930 
931 	ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
932 					    VTIME_PER_USEC, MILLION);
933 	ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
934 				   VTIME_PER_USEC, MILLION);
935 
936 	return true;
937 }
938 
939 /*
940  * When an iocg accumulates too much vtime or gets deactivated, we throw away
941  * some vtime, which lowers the overall device utilization. As the exact amount
942  * which is being thrown away is known, we can compensate by accelerating the
943  * vrate accordingly so that the extra vtime generated in the current period
944  * matches what got lost.
945  */
ioc_refresh_vrate(struct ioc * ioc,struct ioc_now * now)946 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
947 {
948 	s64 pleft = ioc->period_at + ioc->period_us - now->now;
949 	s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
950 	s64 vcomp, vcomp_min, vcomp_max;
951 
952 	lockdep_assert_held(&ioc->lock);
953 
954 	/* we need some time left in this period */
955 	if (pleft <= 0)
956 		goto done;
957 
958 	/*
959 	 * Calculate how much vrate should be adjusted to offset the error.
960 	 * Limit the amount of adjustment and deduct the adjusted amount from
961 	 * the error.
962 	 */
963 	vcomp = -div64_s64(ioc->vtime_err, pleft);
964 	vcomp_min = -(ioc->vtime_base_rate >> 1);
965 	vcomp_max = ioc->vtime_base_rate;
966 	vcomp = clamp(vcomp, vcomp_min, vcomp_max);
967 
968 	ioc->vtime_err += vcomp * pleft;
969 
970 	atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
971 done:
972 	/* bound how much error can accumulate */
973 	ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
974 }
975 
976 /* take a snapshot of the current [v]time and vrate */
ioc_now(struct ioc * ioc,struct ioc_now * now)977 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
978 {
979 	unsigned seq;
980 
981 	now->now_ns = ktime_get();
982 	now->now = ktime_to_us(now->now_ns);
983 	now->vrate = atomic64_read(&ioc->vtime_rate);
984 
985 	/*
986 	 * The current vtime is
987 	 *
988 	 *   vtime at period start + (wallclock time since the start) * vrate
989 	 *
990 	 * As a consistent snapshot of `period_at_vtime` and `period_at` is
991 	 * needed, they're seqcount protected.
992 	 */
993 	do {
994 		seq = read_seqcount_begin(&ioc->period_seqcount);
995 		now->vnow = ioc->period_at_vtime +
996 			(now->now - ioc->period_at) * now->vrate;
997 	} while (read_seqcount_retry(&ioc->period_seqcount, seq));
998 }
999 
ioc_start_period(struct ioc * ioc,struct ioc_now * now)1000 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1001 {
1002 	WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1003 
1004 	write_seqcount_begin(&ioc->period_seqcount);
1005 	ioc->period_at = now->now;
1006 	ioc->period_at_vtime = now->vnow;
1007 	write_seqcount_end(&ioc->period_seqcount);
1008 
1009 	ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1010 	add_timer(&ioc->timer);
1011 }
1012 
1013 /*
1014  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1015  * weight sums and propagate upwards accordingly. If @save, the current margin
1016  * is saved to be used as reference for later inuse in-period adjustments.
1017  */
__propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1018 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1019 				bool save, struct ioc_now *now)
1020 {
1021 	struct ioc *ioc = iocg->ioc;
1022 	int lvl;
1023 
1024 	lockdep_assert_held(&ioc->lock);
1025 
1026 	/*
1027 	 * For an active leaf node, its inuse shouldn't be zero or exceed
1028 	 * @active. An active internal node's inuse is solely determined by the
1029 	 * inuse to active ratio of its children regardless of @inuse.
1030 	 */
1031 	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1032 		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1033 					   iocg->child_active_sum);
1034 	} else {
1035 		inuse = clamp_t(u32, inuse, 1, active);
1036 	}
1037 
1038 	iocg->last_inuse = iocg->inuse;
1039 	if (save)
1040 		iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1041 
1042 	if (active == iocg->active && inuse == iocg->inuse)
1043 		return;
1044 
1045 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1046 		struct ioc_gq *parent = iocg->ancestors[lvl];
1047 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1048 		u32 parent_active = 0, parent_inuse = 0;
1049 
1050 		/* update the level sums */
1051 		parent->child_active_sum += (s32)(active - child->active);
1052 		parent->child_inuse_sum += (s32)(inuse - child->inuse);
1053 		/* apply the updates */
1054 		child->active = active;
1055 		child->inuse = inuse;
1056 
1057 		/*
1058 		 * The delta between inuse and active sums indicates that
1059 		 * that much of weight is being given away.  Parent's inuse
1060 		 * and active should reflect the ratio.
1061 		 */
1062 		if (parent->child_active_sum) {
1063 			parent_active = parent->weight;
1064 			parent_inuse = DIV64_U64_ROUND_UP(
1065 				parent_active * parent->child_inuse_sum,
1066 				parent->child_active_sum);
1067 		}
1068 
1069 		/* do we need to keep walking up? */
1070 		if (parent_active == parent->active &&
1071 		    parent_inuse == parent->inuse)
1072 			break;
1073 
1074 		active = parent_active;
1075 		inuse = parent_inuse;
1076 	}
1077 
1078 	ioc->weights_updated = true;
1079 }
1080 
commit_weights(struct ioc * ioc)1081 static void commit_weights(struct ioc *ioc)
1082 {
1083 	lockdep_assert_held(&ioc->lock);
1084 
1085 	if (ioc->weights_updated) {
1086 		/* paired with rmb in current_hweight(), see there */
1087 		smp_wmb();
1088 		atomic_inc(&ioc->hweight_gen);
1089 		ioc->weights_updated = false;
1090 	}
1091 }
1092 
propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1093 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1094 			      bool save, struct ioc_now *now)
1095 {
1096 	__propagate_weights(iocg, active, inuse, save, now);
1097 	commit_weights(iocg->ioc);
1098 }
1099 
current_hweight(struct ioc_gq * iocg,u32 * hw_activep,u32 * hw_inusep)1100 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1101 {
1102 	struct ioc *ioc = iocg->ioc;
1103 	int lvl;
1104 	u32 hwa, hwi;
1105 	int ioc_gen;
1106 
1107 	/* hot path - if uptodate, use cached */
1108 	ioc_gen = atomic_read(&ioc->hweight_gen);
1109 	if (ioc_gen == iocg->hweight_gen)
1110 		goto out;
1111 
1112 	/*
1113 	 * Paired with wmb in commit_weights(). If we saw the updated
1114 	 * hweight_gen, all the weight updates from __propagate_weights() are
1115 	 * visible too.
1116 	 *
1117 	 * We can race with weight updates during calculation and get it
1118 	 * wrong.  However, hweight_gen would have changed and a future
1119 	 * reader will recalculate and we're guaranteed to discard the
1120 	 * wrong result soon.
1121 	 */
1122 	smp_rmb();
1123 
1124 	hwa = hwi = WEIGHT_ONE;
1125 	for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1126 		struct ioc_gq *parent = iocg->ancestors[lvl];
1127 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1128 		u64 active_sum = READ_ONCE(parent->child_active_sum);
1129 		u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1130 		u32 active = READ_ONCE(child->active);
1131 		u32 inuse = READ_ONCE(child->inuse);
1132 
1133 		/* we can race with deactivations and either may read as zero */
1134 		if (!active_sum || !inuse_sum)
1135 			continue;
1136 
1137 		active_sum = max_t(u64, active, active_sum);
1138 		hwa = div64_u64((u64)hwa * active, active_sum);
1139 
1140 		inuse_sum = max_t(u64, inuse, inuse_sum);
1141 		hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1142 	}
1143 
1144 	iocg->hweight_active = max_t(u32, hwa, 1);
1145 	iocg->hweight_inuse = max_t(u32, hwi, 1);
1146 	iocg->hweight_gen = ioc_gen;
1147 out:
1148 	if (hw_activep)
1149 		*hw_activep = iocg->hweight_active;
1150 	if (hw_inusep)
1151 		*hw_inusep = iocg->hweight_inuse;
1152 }
1153 
1154 /*
1155  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1156  * other weights stay unchanged.
1157  */
current_hweight_max(struct ioc_gq * iocg)1158 static u32 current_hweight_max(struct ioc_gq *iocg)
1159 {
1160 	u32 hwm = WEIGHT_ONE;
1161 	u32 inuse = iocg->active;
1162 	u64 child_inuse_sum;
1163 	int lvl;
1164 
1165 	lockdep_assert_held(&iocg->ioc->lock);
1166 
1167 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1168 		struct ioc_gq *parent = iocg->ancestors[lvl];
1169 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1170 
1171 		child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1172 		hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1173 		inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1174 					   parent->child_active_sum);
1175 	}
1176 
1177 	return max_t(u32, hwm, 1);
1178 }
1179 
weight_updated(struct ioc_gq * iocg,struct ioc_now * now)1180 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1181 {
1182 	struct ioc *ioc = iocg->ioc;
1183 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1184 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1185 	u32 weight;
1186 
1187 	lockdep_assert_held(&ioc->lock);
1188 
1189 	weight = iocg->cfg_weight ?: iocc->dfl_weight;
1190 	if (weight != iocg->weight && iocg->active)
1191 		propagate_weights(iocg, weight, iocg->inuse, true, now);
1192 	iocg->weight = weight;
1193 }
1194 
iocg_activate(struct ioc_gq * iocg,struct ioc_now * now)1195 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1196 {
1197 	struct ioc *ioc = iocg->ioc;
1198 	u64 last_period, cur_period;
1199 	u64 vtime, vtarget;
1200 	int i;
1201 
1202 	/*
1203 	 * If seem to be already active, just update the stamp to tell the
1204 	 * timer that we're still active.  We don't mind occassional races.
1205 	 */
1206 	if (!list_empty(&iocg->active_list)) {
1207 		ioc_now(ioc, now);
1208 		cur_period = atomic64_read(&ioc->cur_period);
1209 		if (atomic64_read(&iocg->active_period) != cur_period)
1210 			atomic64_set(&iocg->active_period, cur_period);
1211 		return true;
1212 	}
1213 
1214 	/* racy check on internal node IOs, treat as root level IOs */
1215 	if (iocg->child_active_sum)
1216 		return false;
1217 
1218 	spin_lock_irq(&ioc->lock);
1219 
1220 	ioc_now(ioc, now);
1221 
1222 	/* update period */
1223 	cur_period = atomic64_read(&ioc->cur_period);
1224 	last_period = atomic64_read(&iocg->active_period);
1225 	atomic64_set(&iocg->active_period, cur_period);
1226 
1227 	/* already activated or breaking leaf-only constraint? */
1228 	if (!list_empty(&iocg->active_list))
1229 		goto succeed_unlock;
1230 	for (i = iocg->level - 1; i > 0; i--)
1231 		if (!list_empty(&iocg->ancestors[i]->active_list))
1232 			goto fail_unlock;
1233 
1234 	if (iocg->child_active_sum)
1235 		goto fail_unlock;
1236 
1237 	/*
1238 	 * Always start with the target budget. On deactivation, we throw away
1239 	 * anything above it.
1240 	 */
1241 	vtarget = now->vnow - ioc->margins.target;
1242 	vtime = atomic64_read(&iocg->vtime);
1243 
1244 	atomic64_add(vtarget - vtime, &iocg->vtime);
1245 	atomic64_add(vtarget - vtime, &iocg->done_vtime);
1246 	vtime = vtarget;
1247 
1248 	/*
1249 	 * Activate, propagate weight and start period timer if not
1250 	 * running.  Reset hweight_gen to avoid accidental match from
1251 	 * wrapping.
1252 	 */
1253 	iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1254 	list_add(&iocg->active_list, &ioc->active_iocgs);
1255 
1256 	propagate_weights(iocg, iocg->weight,
1257 			  iocg->last_inuse ?: iocg->weight, true, now);
1258 
1259 	TRACE_IOCG_PATH(iocg_activate, iocg, now,
1260 			last_period, cur_period, vtime);
1261 
1262 	iocg->activated_at = now->now;
1263 
1264 	if (ioc->running == IOC_IDLE) {
1265 		ioc->running = IOC_RUNNING;
1266 		ioc->dfgv_period_at = now->now;
1267 		ioc->dfgv_period_rem = 0;
1268 		ioc_start_period(ioc, now);
1269 	}
1270 
1271 succeed_unlock:
1272 	spin_unlock_irq(&ioc->lock);
1273 	return true;
1274 
1275 fail_unlock:
1276 	spin_unlock_irq(&ioc->lock);
1277 	return false;
1278 }
1279 
iocg_kick_delay(struct ioc_gq * iocg,struct ioc_now * now)1280 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1281 {
1282 	struct ioc *ioc = iocg->ioc;
1283 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1284 	u64 tdelta, delay, new_delay;
1285 	s64 vover, vover_pct;
1286 	u32 hwa;
1287 
1288 	lockdep_assert_held(&iocg->waitq.lock);
1289 
1290 	/* calculate the current delay in effect - 1/2 every second */
1291 	tdelta = now->now - iocg->delay_at;
1292 	if (iocg->delay)
1293 		delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1294 	else
1295 		delay = 0;
1296 
1297 	/* calculate the new delay from the debt amount */
1298 	current_hweight(iocg, &hwa, NULL);
1299 	vover = atomic64_read(&iocg->vtime) +
1300 		abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1301 	vover_pct = div64_s64(100 * vover,
1302 			      ioc->period_us * ioc->vtime_base_rate);
1303 
1304 	if (vover_pct <= MIN_DELAY_THR_PCT)
1305 		new_delay = 0;
1306 	else if (vover_pct >= MAX_DELAY_THR_PCT)
1307 		new_delay = MAX_DELAY;
1308 	else
1309 		new_delay = MIN_DELAY +
1310 			div_u64((MAX_DELAY - MIN_DELAY) *
1311 				(vover_pct - MIN_DELAY_THR_PCT),
1312 				MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1313 
1314 	/* pick the higher one and apply */
1315 	if (new_delay > delay) {
1316 		iocg->delay = new_delay;
1317 		iocg->delay_at = now->now;
1318 		delay = new_delay;
1319 	}
1320 
1321 	if (delay >= MIN_DELAY) {
1322 		if (!iocg->indelay_since)
1323 			iocg->indelay_since = now->now;
1324 		blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1325 		return true;
1326 	} else {
1327 		if (iocg->indelay_since) {
1328 			iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1329 			iocg->indelay_since = 0;
1330 		}
1331 		iocg->delay = 0;
1332 		blkcg_clear_delay(blkg);
1333 		return false;
1334 	}
1335 }
1336 
iocg_incur_debt(struct ioc_gq * iocg,u64 abs_cost,struct ioc_now * now)1337 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1338 			    struct ioc_now *now)
1339 {
1340 	struct iocg_pcpu_stat *gcs;
1341 
1342 	lockdep_assert_held(&iocg->ioc->lock);
1343 	lockdep_assert_held(&iocg->waitq.lock);
1344 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1345 
1346 	/*
1347 	 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1348 	 * inuse donating all of it share to others until its debt is paid off.
1349 	 */
1350 	if (!iocg->abs_vdebt && abs_cost) {
1351 		iocg->indebt_since = now->now;
1352 		propagate_weights(iocg, iocg->active, 0, false, now);
1353 	}
1354 
1355 	iocg->abs_vdebt += abs_cost;
1356 
1357 	gcs = get_cpu_ptr(iocg->pcpu_stat);
1358 	local64_add(abs_cost, &gcs->abs_vusage);
1359 	put_cpu_ptr(gcs);
1360 }
1361 
iocg_pay_debt(struct ioc_gq * iocg,u64 abs_vpay,struct ioc_now * now)1362 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1363 			  struct ioc_now *now)
1364 {
1365 	lockdep_assert_held(&iocg->ioc->lock);
1366 	lockdep_assert_held(&iocg->waitq.lock);
1367 
1368 	/* make sure that nobody messed with @iocg */
1369 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1370 	WARN_ON_ONCE(iocg->inuse > 1);
1371 
1372 	iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1373 
1374 	/* if debt is paid in full, restore inuse */
1375 	if (!iocg->abs_vdebt) {
1376 		iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1377 		iocg->indebt_since = 0;
1378 
1379 		propagate_weights(iocg, iocg->active, iocg->last_inuse,
1380 				  false, now);
1381 	}
1382 }
1383 
iocg_wake_fn(struct wait_queue_entry * wq_entry,unsigned mode,int flags,void * key)1384 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1385 			int flags, void *key)
1386 {
1387 	struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1388 	struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1389 	u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1390 
1391 	ctx->vbudget -= cost;
1392 
1393 	if (ctx->vbudget < 0)
1394 		return -1;
1395 
1396 	iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1397 	wait->committed = true;
1398 
1399 	/*
1400 	 * autoremove_wake_function() removes the wait entry only when it
1401 	 * actually changed the task state. We want the wait always removed.
1402 	 * Remove explicitly and use default_wake_function(). Note that the
1403 	 * order of operations is important as finish_wait() tests whether
1404 	 * @wq_entry is removed without grabbing the lock.
1405 	 */
1406 	default_wake_function(wq_entry, mode, flags, key);
1407 	list_del_init_careful(&wq_entry->entry);
1408 	return 0;
1409 }
1410 
1411 /*
1412  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1413  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1414  * addition to iocg->waitq.lock.
1415  */
iocg_kick_waitq(struct ioc_gq * iocg,bool pay_debt,struct ioc_now * now)1416 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1417 			    struct ioc_now *now)
1418 {
1419 	struct ioc *ioc = iocg->ioc;
1420 	struct iocg_wake_ctx ctx = { .iocg = iocg };
1421 	u64 vshortage, expires, oexpires;
1422 	s64 vbudget;
1423 	u32 hwa;
1424 
1425 	lockdep_assert_held(&iocg->waitq.lock);
1426 
1427 	current_hweight(iocg, &hwa, NULL);
1428 	vbudget = now->vnow - atomic64_read(&iocg->vtime);
1429 
1430 	/* pay off debt */
1431 	if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1432 		u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1433 		u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1434 		u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1435 
1436 		lockdep_assert_held(&ioc->lock);
1437 
1438 		atomic64_add(vpay, &iocg->vtime);
1439 		atomic64_add(vpay, &iocg->done_vtime);
1440 		iocg_pay_debt(iocg, abs_vpay, now);
1441 		vbudget -= vpay;
1442 	}
1443 
1444 	if (iocg->abs_vdebt || iocg->delay)
1445 		iocg_kick_delay(iocg, now);
1446 
1447 	/*
1448 	 * Debt can still be outstanding if we haven't paid all yet or the
1449 	 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1450 	 * under debt. Make sure @vbudget reflects the outstanding amount and is
1451 	 * not positive.
1452 	 */
1453 	if (iocg->abs_vdebt) {
1454 		s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1455 		vbudget = min_t(s64, 0, vbudget - vdebt);
1456 	}
1457 
1458 	/*
1459 	 * Wake up the ones which are due and see how much vtime we'll need for
1460 	 * the next one. As paying off debt restores hw_inuse, it must be read
1461 	 * after the above debt payment.
1462 	 */
1463 	ctx.vbudget = vbudget;
1464 	current_hweight(iocg, NULL, &ctx.hw_inuse);
1465 
1466 	__wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1467 
1468 	if (!waitqueue_active(&iocg->waitq)) {
1469 		if (iocg->wait_since) {
1470 			iocg->local_stat.wait_us += now->now - iocg->wait_since;
1471 			iocg->wait_since = 0;
1472 		}
1473 		return;
1474 	}
1475 
1476 	if (!iocg->wait_since)
1477 		iocg->wait_since = now->now;
1478 
1479 	if (WARN_ON_ONCE(ctx.vbudget >= 0))
1480 		return;
1481 
1482 	/* determine next wakeup, add a timer margin to guarantee chunking */
1483 	vshortage = -ctx.vbudget;
1484 	expires = now->now_ns +
1485 		DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1486 		NSEC_PER_USEC;
1487 	expires += ioc->timer_slack_ns;
1488 
1489 	/* if already active and close enough, don't bother */
1490 	oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1491 	if (hrtimer_is_queued(&iocg->waitq_timer) &&
1492 	    abs(oexpires - expires) <= ioc->timer_slack_ns)
1493 		return;
1494 
1495 	hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1496 			       ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1497 }
1498 
iocg_waitq_timer_fn(struct hrtimer * timer)1499 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1500 {
1501 	struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1502 	bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1503 	struct ioc_now now;
1504 	unsigned long flags;
1505 
1506 	ioc_now(iocg->ioc, &now);
1507 
1508 	iocg_lock(iocg, pay_debt, &flags);
1509 	iocg_kick_waitq(iocg, pay_debt, &now);
1510 	iocg_unlock(iocg, pay_debt, &flags);
1511 
1512 	return HRTIMER_NORESTART;
1513 }
1514 
ioc_lat_stat(struct ioc * ioc,u32 * missed_ppm_ar,u32 * rq_wait_pct_p)1515 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1516 {
1517 	u32 nr_met[2] = { };
1518 	u32 nr_missed[2] = { };
1519 	u64 rq_wait_ns = 0;
1520 	int cpu, rw;
1521 
1522 	for_each_online_cpu(cpu) {
1523 		struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1524 		u64 this_rq_wait_ns;
1525 
1526 		for (rw = READ; rw <= WRITE; rw++) {
1527 			u32 this_met = local_read(&stat->missed[rw].nr_met);
1528 			u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1529 
1530 			nr_met[rw] += this_met - stat->missed[rw].last_met;
1531 			nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1532 			stat->missed[rw].last_met = this_met;
1533 			stat->missed[rw].last_missed = this_missed;
1534 		}
1535 
1536 		this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1537 		rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1538 		stat->last_rq_wait_ns = this_rq_wait_ns;
1539 	}
1540 
1541 	for (rw = READ; rw <= WRITE; rw++) {
1542 		if (nr_met[rw] + nr_missed[rw])
1543 			missed_ppm_ar[rw] =
1544 				DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1545 						   nr_met[rw] + nr_missed[rw]);
1546 		else
1547 			missed_ppm_ar[rw] = 0;
1548 	}
1549 
1550 	*rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1551 				   ioc->period_us * NSEC_PER_USEC);
1552 }
1553 
1554 /* was iocg idle this period? */
iocg_is_idle(struct ioc_gq * iocg)1555 static bool iocg_is_idle(struct ioc_gq *iocg)
1556 {
1557 	struct ioc *ioc = iocg->ioc;
1558 
1559 	/* did something get issued this period? */
1560 	if (atomic64_read(&iocg->active_period) ==
1561 	    atomic64_read(&ioc->cur_period))
1562 		return false;
1563 
1564 	/* is something in flight? */
1565 	if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1566 		return false;
1567 
1568 	return true;
1569 }
1570 
1571 /*
1572  * Call this function on the target leaf @iocg's to build pre-order traversal
1573  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1574  * ->walk_list and the caller is responsible for dissolving the list after use.
1575  */
iocg_build_inner_walk(struct ioc_gq * iocg,struct list_head * inner_walk)1576 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1577 				  struct list_head *inner_walk)
1578 {
1579 	int lvl;
1580 
1581 	WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1582 
1583 	/* find the first ancestor which hasn't been visited yet */
1584 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1585 		if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1586 			break;
1587 	}
1588 
1589 	/* walk down and visit the inner nodes to get pre-order traversal */
1590 	while (++lvl <= iocg->level - 1) {
1591 		struct ioc_gq *inner = iocg->ancestors[lvl];
1592 
1593 		/* record traversal order */
1594 		list_add_tail(&inner->walk_list, inner_walk);
1595 	}
1596 }
1597 
1598 /* collect per-cpu counters and propagate the deltas to the parent */
iocg_flush_stat_one(struct ioc_gq * iocg,struct ioc_now * now)1599 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1600 {
1601 	struct ioc *ioc = iocg->ioc;
1602 	struct iocg_stat new_stat;
1603 	u64 abs_vusage = 0;
1604 	u64 vusage_delta;
1605 	int cpu;
1606 
1607 	lockdep_assert_held(&iocg->ioc->lock);
1608 
1609 	/* collect per-cpu counters */
1610 	for_each_possible_cpu(cpu) {
1611 		abs_vusage += local64_read(
1612 				per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1613 	}
1614 	vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1615 	iocg->last_stat_abs_vusage = abs_vusage;
1616 
1617 	iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1618 	iocg->local_stat.usage_us += iocg->usage_delta_us;
1619 
1620 	/* propagate upwards */
1621 	new_stat.usage_us =
1622 		iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1623 	new_stat.wait_us =
1624 		iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1625 	new_stat.indebt_us =
1626 		iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1627 	new_stat.indelay_us =
1628 		iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1629 
1630 	/* propagate the deltas to the parent */
1631 	if (iocg->level > 0) {
1632 		struct iocg_stat *parent_stat =
1633 			&iocg->ancestors[iocg->level - 1]->desc_stat;
1634 
1635 		parent_stat->usage_us +=
1636 			new_stat.usage_us - iocg->last_stat.usage_us;
1637 		parent_stat->wait_us +=
1638 			new_stat.wait_us - iocg->last_stat.wait_us;
1639 		parent_stat->indebt_us +=
1640 			new_stat.indebt_us - iocg->last_stat.indebt_us;
1641 		parent_stat->indelay_us +=
1642 			new_stat.indelay_us - iocg->last_stat.indelay_us;
1643 	}
1644 
1645 	iocg->last_stat = new_stat;
1646 }
1647 
1648 /* get stat counters ready for reading on all active iocgs */
iocg_flush_stat(struct list_head * target_iocgs,struct ioc_now * now)1649 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1650 {
1651 	LIST_HEAD(inner_walk);
1652 	struct ioc_gq *iocg, *tiocg;
1653 
1654 	/* flush leaves and build inner node walk list */
1655 	list_for_each_entry(iocg, target_iocgs, active_list) {
1656 		iocg_flush_stat_one(iocg, now);
1657 		iocg_build_inner_walk(iocg, &inner_walk);
1658 	}
1659 
1660 	/* keep flushing upwards by walking the inner list backwards */
1661 	list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1662 		iocg_flush_stat_one(iocg, now);
1663 		list_del_init(&iocg->walk_list);
1664 	}
1665 }
1666 
1667 /*
1668  * Determine what @iocg's hweight_inuse should be after donating unused
1669  * capacity. @hwm is the upper bound and used to signal no donation. This
1670  * function also throws away @iocg's excess budget.
1671  */
hweight_after_donation(struct ioc_gq * iocg,u32 old_hwi,u32 hwm,u32 usage,struct ioc_now * now)1672 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1673 				  u32 usage, struct ioc_now *now)
1674 {
1675 	struct ioc *ioc = iocg->ioc;
1676 	u64 vtime = atomic64_read(&iocg->vtime);
1677 	s64 excess, delta, target, new_hwi;
1678 
1679 	/* debt handling owns inuse for debtors */
1680 	if (iocg->abs_vdebt)
1681 		return 1;
1682 
1683 	/* see whether minimum margin requirement is met */
1684 	if (waitqueue_active(&iocg->waitq) ||
1685 	    time_after64(vtime, now->vnow - ioc->margins.min))
1686 		return hwm;
1687 
1688 	/* throw away excess above target */
1689 	excess = now->vnow - vtime - ioc->margins.target;
1690 	if (excess > 0) {
1691 		atomic64_add(excess, &iocg->vtime);
1692 		atomic64_add(excess, &iocg->done_vtime);
1693 		vtime += excess;
1694 		ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1695 	}
1696 
1697 	/*
1698 	 * Let's say the distance between iocg's and device's vtimes as a
1699 	 * fraction of period duration is delta. Assuming that the iocg will
1700 	 * consume the usage determined above, we want to determine new_hwi so
1701 	 * that delta equals MARGIN_TARGET at the end of the next period.
1702 	 *
1703 	 * We need to execute usage worth of IOs while spending the sum of the
1704 	 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1705 	 * (delta):
1706 	 *
1707 	 *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1708 	 *
1709 	 * Therefore, the new_hwi is:
1710 	 *
1711 	 *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1712 	 */
1713 	delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1714 			  now->vnow - ioc->period_at_vtime);
1715 	target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1716 	new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1717 
1718 	return clamp_t(s64, new_hwi, 1, hwm);
1719 }
1720 
1721 /*
1722  * For work-conservation, an iocg which isn't using all of its share should
1723  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1724  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1725  *
1726  * #1 is mathematically simpler but has the drawback of requiring synchronous
1727  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1728  * change due to donation snapbacks as it has the possibility of grossly
1729  * overshooting what's allowed by the model and vrate.
1730  *
1731  * #2 is inherently safe with local operations. The donating iocg can easily
1732  * snap back to higher weights when needed without worrying about impacts on
1733  * other nodes as the impacts will be inherently correct. This also makes idle
1734  * iocg activations safe. The only effect activations have is decreasing
1735  * hweight_inuse of others, the right solution to which is for those iocgs to
1736  * snap back to higher weights.
1737  *
1738  * So, we go with #2. The challenge is calculating how each donating iocg's
1739  * inuse should be adjusted to achieve the target donation amounts. This is done
1740  * using Andy's method described in the following pdf.
1741  *
1742  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1743  *
1744  * Given the weights and target after-donation hweight_inuse values, Andy's
1745  * method determines how the proportional distribution should look like at each
1746  * sibling level to maintain the relative relationship between all non-donating
1747  * pairs. To roughly summarize, it divides the tree into donating and
1748  * non-donating parts, calculates global donation rate which is used to
1749  * determine the target hweight_inuse for each node, and then derives per-level
1750  * proportions.
1751  *
1752  * The following pdf shows that global distribution calculated this way can be
1753  * achieved by scaling inuse weights of donating leaves and propagating the
1754  * adjustments upwards proportionally.
1755  *
1756  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1757  *
1758  * Combining the above two, we can determine how each leaf iocg's inuse should
1759  * be adjusted to achieve the target donation.
1760  *
1761  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1762  *
1763  * The inline comments use symbols from the last pdf.
1764  *
1765  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1766  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1767  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1768  *   w is the weight of the node. w = w_f + w_t
1769  *   w_f is the non-donating portion of w. w_f = w * f / b
1770  *   w_b is the donating portion of w. w_t = w * t / b
1771  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1772  *   s_f and s_t are the non-donating and donating portions of s.
1773  *
1774  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1775  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1776  * after adjustments. Subscript r denotes the root node's values.
1777  */
transfer_surpluses(struct list_head * surpluses,struct ioc_now * now)1778 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1779 {
1780 	LIST_HEAD(over_hwa);
1781 	LIST_HEAD(inner_walk);
1782 	struct ioc_gq *iocg, *tiocg, *root_iocg;
1783 	u32 after_sum, over_sum, over_target, gamma;
1784 
1785 	/*
1786 	 * It's pretty unlikely but possible for the total sum of
1787 	 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1788 	 * confuse the following calculations. If such condition is detected,
1789 	 * scale down everyone over its full share equally to keep the sum below
1790 	 * WEIGHT_ONE.
1791 	 */
1792 	after_sum = 0;
1793 	over_sum = 0;
1794 	list_for_each_entry(iocg, surpluses, surplus_list) {
1795 		u32 hwa;
1796 
1797 		current_hweight(iocg, &hwa, NULL);
1798 		after_sum += iocg->hweight_after_donation;
1799 
1800 		if (iocg->hweight_after_donation > hwa) {
1801 			over_sum += iocg->hweight_after_donation;
1802 			list_add(&iocg->walk_list, &over_hwa);
1803 		}
1804 	}
1805 
1806 	if (after_sum >= WEIGHT_ONE) {
1807 		/*
1808 		 * The delta should be deducted from the over_sum, calculate
1809 		 * target over_sum value.
1810 		 */
1811 		u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1812 		WARN_ON_ONCE(over_sum <= over_delta);
1813 		over_target = over_sum - over_delta;
1814 	} else {
1815 		over_target = 0;
1816 	}
1817 
1818 	list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1819 		if (over_target)
1820 			iocg->hweight_after_donation =
1821 				div_u64((u64)iocg->hweight_after_donation *
1822 					over_target, over_sum);
1823 		list_del_init(&iocg->walk_list);
1824 	}
1825 
1826 	/*
1827 	 * Build pre-order inner node walk list and prepare for donation
1828 	 * adjustment calculations.
1829 	 */
1830 	list_for_each_entry(iocg, surpluses, surplus_list) {
1831 		iocg_build_inner_walk(iocg, &inner_walk);
1832 	}
1833 
1834 	root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1835 	WARN_ON_ONCE(root_iocg->level > 0);
1836 
1837 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1838 		iocg->child_adjusted_sum = 0;
1839 		iocg->hweight_donating = 0;
1840 		iocg->hweight_after_donation = 0;
1841 	}
1842 
1843 	/*
1844 	 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1845 	 * up the hierarchy.
1846 	 */
1847 	list_for_each_entry(iocg, surpluses, surplus_list) {
1848 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1849 
1850 		parent->hweight_donating += iocg->hweight_donating;
1851 		parent->hweight_after_donation += iocg->hweight_after_donation;
1852 	}
1853 
1854 	list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1855 		if (iocg->level > 0) {
1856 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1857 
1858 			parent->hweight_donating += iocg->hweight_donating;
1859 			parent->hweight_after_donation += iocg->hweight_after_donation;
1860 		}
1861 	}
1862 
1863 	/*
1864 	 * Calculate inner hwa's (b) and make sure the donation values are
1865 	 * within the accepted ranges as we're doing low res calculations with
1866 	 * roundups.
1867 	 */
1868 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1869 		if (iocg->level) {
1870 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1871 
1872 			iocg->hweight_active = DIV64_U64_ROUND_UP(
1873 				(u64)parent->hweight_active * iocg->active,
1874 				parent->child_active_sum);
1875 
1876 		}
1877 
1878 		iocg->hweight_donating = min(iocg->hweight_donating,
1879 					     iocg->hweight_active);
1880 		iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1881 						   iocg->hweight_donating - 1);
1882 		if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1883 				 iocg->hweight_donating <= 1 ||
1884 				 iocg->hweight_after_donation == 0)) {
1885 			pr_warn("iocg: invalid donation weights in ");
1886 			pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1887 			pr_cont(": active=%u donating=%u after=%u\n",
1888 				iocg->hweight_active, iocg->hweight_donating,
1889 				iocg->hweight_after_donation);
1890 		}
1891 	}
1892 
1893 	/*
1894 	 * Calculate the global donation rate (gamma) - the rate to adjust
1895 	 * non-donating budgets by.
1896 	 *
1897 	 * No need to use 64bit multiplication here as the first operand is
1898 	 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1899 	 *
1900 	 * We know that there are beneficiary nodes and the sum of the donating
1901 	 * hweights can't be whole; however, due to the round-ups during hweight
1902 	 * calculations, root_iocg->hweight_donating might still end up equal to
1903 	 * or greater than whole. Limit the range when calculating the divider.
1904 	 *
1905 	 * gamma = (1 - t_r') / (1 - t_r)
1906 	 */
1907 	gamma = DIV_ROUND_UP(
1908 		(WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1909 		WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1910 
1911 	/*
1912 	 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1913 	 * nodes.
1914 	 */
1915 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1916 		struct ioc_gq *parent;
1917 		u32 inuse, wpt, wptp;
1918 		u64 st, sf;
1919 
1920 		if (iocg->level == 0) {
1921 			/* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1922 			iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1923 				iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1924 				WEIGHT_ONE - iocg->hweight_after_donation);
1925 			continue;
1926 		}
1927 
1928 		parent = iocg->ancestors[iocg->level - 1];
1929 
1930 		/* b' = gamma * b_f + b_t' */
1931 		iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1932 			(u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1933 			WEIGHT_ONE) + iocg->hweight_after_donation;
1934 
1935 		/* w' = s' * b' / b'_p */
1936 		inuse = DIV64_U64_ROUND_UP(
1937 			(u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1938 			parent->hweight_inuse);
1939 
1940 		/* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1941 		st = DIV64_U64_ROUND_UP(
1942 			iocg->child_active_sum * iocg->hweight_donating,
1943 			iocg->hweight_active);
1944 		sf = iocg->child_active_sum - st;
1945 		wpt = DIV64_U64_ROUND_UP(
1946 			(u64)iocg->active * iocg->hweight_donating,
1947 			iocg->hweight_active);
1948 		wptp = DIV64_U64_ROUND_UP(
1949 			(u64)inuse * iocg->hweight_after_donation,
1950 			iocg->hweight_inuse);
1951 
1952 		iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1953 	}
1954 
1955 	/*
1956 	 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1957 	 * we can finally determine leaf adjustments.
1958 	 */
1959 	list_for_each_entry(iocg, surpluses, surplus_list) {
1960 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1961 		u32 inuse;
1962 
1963 		/*
1964 		 * In-debt iocgs participated in the donation calculation with
1965 		 * the minimum target hweight_inuse. Configuring inuse
1966 		 * accordingly would work fine but debt handling expects
1967 		 * @iocg->inuse stay at the minimum and we don't wanna
1968 		 * interfere.
1969 		 */
1970 		if (iocg->abs_vdebt) {
1971 			WARN_ON_ONCE(iocg->inuse > 1);
1972 			continue;
1973 		}
1974 
1975 		/* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
1976 		inuse = DIV64_U64_ROUND_UP(
1977 			parent->child_adjusted_sum * iocg->hweight_after_donation,
1978 			parent->hweight_inuse);
1979 
1980 		TRACE_IOCG_PATH(inuse_transfer, iocg, now,
1981 				iocg->inuse, inuse,
1982 				iocg->hweight_inuse,
1983 				iocg->hweight_after_donation);
1984 
1985 		__propagate_weights(iocg, iocg->active, inuse, true, now);
1986 	}
1987 
1988 	/* walk list should be dissolved after use */
1989 	list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
1990 		list_del_init(&iocg->walk_list);
1991 }
1992 
1993 /*
1994  * A low weight iocg can amass a large amount of debt, for example, when
1995  * anonymous memory gets reclaimed aggressively. If the system has a lot of
1996  * memory paired with a slow IO device, the debt can span multiple seconds or
1997  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
1998  * up blocked paying its debt while the IO device is idle.
1999  *
2000  * The following protects against such cases. If the device has been
2001  * sufficiently idle for a while, the debts are halved and delays are
2002  * recalculated.
2003  */
ioc_forgive_debts(struct ioc * ioc,u64 usage_us_sum,int nr_debtors,struct ioc_now * now)2004 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2005 			      struct ioc_now *now)
2006 {
2007 	struct ioc_gq *iocg;
2008 	u64 dur, usage_pct, nr_cycles;
2009 
2010 	/* if no debtor, reset the cycle */
2011 	if (!nr_debtors) {
2012 		ioc->dfgv_period_at = now->now;
2013 		ioc->dfgv_period_rem = 0;
2014 		ioc->dfgv_usage_us_sum = 0;
2015 		return;
2016 	}
2017 
2018 	/*
2019 	 * Debtors can pass through a lot of writes choking the device and we
2020 	 * don't want to be forgiving debts while the device is struggling from
2021 	 * write bursts. If we're missing latency targets, consider the device
2022 	 * fully utilized.
2023 	 */
2024 	if (ioc->busy_level > 0)
2025 		usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2026 
2027 	ioc->dfgv_usage_us_sum += usage_us_sum;
2028 	if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2029 		return;
2030 
2031 	/*
2032 	 * At least DFGV_PERIOD has passed since the last period. Calculate the
2033 	 * average usage and reset the period counters.
2034 	 */
2035 	dur = now->now - ioc->dfgv_period_at;
2036 	usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2037 
2038 	ioc->dfgv_period_at = now->now;
2039 	ioc->dfgv_usage_us_sum = 0;
2040 
2041 	/* if was too busy, reset everything */
2042 	if (usage_pct > DFGV_USAGE_PCT) {
2043 		ioc->dfgv_period_rem = 0;
2044 		return;
2045 	}
2046 
2047 	/*
2048 	 * Usage is lower than threshold. Let's forgive some debts. Debt
2049 	 * forgiveness runs off of the usual ioc timer but its period usually
2050 	 * doesn't match ioc's. Compensate the difference by performing the
2051 	 * reduction as many times as would fit in the duration since the last
2052 	 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2053 	 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2054 	 * reductions is doubled.
2055 	 */
2056 	nr_cycles = dur + ioc->dfgv_period_rem;
2057 	ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2058 
2059 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2060 		u64 __maybe_unused old_debt, __maybe_unused old_delay;
2061 
2062 		if (!iocg->abs_vdebt && !iocg->delay)
2063 			continue;
2064 
2065 		spin_lock(&iocg->waitq.lock);
2066 
2067 		old_debt = iocg->abs_vdebt;
2068 		old_delay = iocg->delay;
2069 
2070 		if (iocg->abs_vdebt)
2071 			iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2072 		if (iocg->delay)
2073 			iocg->delay = iocg->delay >> nr_cycles ?: 1;
2074 
2075 		iocg_kick_waitq(iocg, true, now);
2076 
2077 		TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2078 				old_debt, iocg->abs_vdebt,
2079 				old_delay, iocg->delay);
2080 
2081 		spin_unlock(&iocg->waitq.lock);
2082 	}
2083 }
2084 
ioc_timer_fn(struct timer_list * timer)2085 static void ioc_timer_fn(struct timer_list *timer)
2086 {
2087 	struct ioc *ioc = container_of(timer, struct ioc, timer);
2088 	struct ioc_gq *iocg, *tiocg;
2089 	struct ioc_now now;
2090 	LIST_HEAD(surpluses);
2091 	int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
2092 	u64 usage_us_sum = 0;
2093 	u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2094 	u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2095 	u32 missed_ppm[2], rq_wait_pct;
2096 	u64 period_vtime;
2097 	int prev_busy_level;
2098 
2099 	/* how were the latencies during the period? */
2100 	ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2101 
2102 	/* take care of active iocgs */
2103 	spin_lock_irq(&ioc->lock);
2104 
2105 	ioc_now(ioc, &now);
2106 
2107 	period_vtime = now.vnow - ioc->period_at_vtime;
2108 	if (WARN_ON_ONCE(!period_vtime)) {
2109 		spin_unlock_irq(&ioc->lock);
2110 		return;
2111 	}
2112 
2113 	/*
2114 	 * Waiters determine the sleep durations based on the vrate they
2115 	 * saw at the time of sleep.  If vrate has increased, some waiters
2116 	 * could be sleeping for too long.  Wake up tardy waiters which
2117 	 * should have woken up in the last period and expire idle iocgs.
2118 	 */
2119 	list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2120 		if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2121 		    !iocg->delay && !iocg_is_idle(iocg))
2122 			continue;
2123 
2124 		spin_lock(&iocg->waitq.lock);
2125 
2126 		/* flush wait and indebt stat deltas */
2127 		if (iocg->wait_since) {
2128 			iocg->local_stat.wait_us += now.now - iocg->wait_since;
2129 			iocg->wait_since = now.now;
2130 		}
2131 		if (iocg->indebt_since) {
2132 			iocg->local_stat.indebt_us +=
2133 				now.now - iocg->indebt_since;
2134 			iocg->indebt_since = now.now;
2135 		}
2136 		if (iocg->indelay_since) {
2137 			iocg->local_stat.indelay_us +=
2138 				now.now - iocg->indelay_since;
2139 			iocg->indelay_since = now.now;
2140 		}
2141 
2142 		if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2143 		    iocg->delay) {
2144 			/* might be oversleeping vtime / hweight changes, kick */
2145 			iocg_kick_waitq(iocg, true, &now);
2146 			if (iocg->abs_vdebt || iocg->delay)
2147 				nr_debtors++;
2148 		} else if (iocg_is_idle(iocg)) {
2149 			/* no waiter and idle, deactivate */
2150 			u64 vtime = atomic64_read(&iocg->vtime);
2151 			s64 excess;
2152 
2153 			/*
2154 			 * @iocg has been inactive for a full duration and will
2155 			 * have a high budget. Account anything above target as
2156 			 * error and throw away. On reactivation, it'll start
2157 			 * with the target budget.
2158 			 */
2159 			excess = now.vnow - vtime - ioc->margins.target;
2160 			if (excess > 0) {
2161 				u32 old_hwi;
2162 
2163 				current_hweight(iocg, NULL, &old_hwi);
2164 				ioc->vtime_err -= div64_u64(excess * old_hwi,
2165 							    WEIGHT_ONE);
2166 			}
2167 
2168 			__propagate_weights(iocg, 0, 0, false, &now);
2169 			list_del_init(&iocg->active_list);
2170 		}
2171 
2172 		spin_unlock(&iocg->waitq.lock);
2173 	}
2174 	commit_weights(ioc);
2175 
2176 	/*
2177 	 * Wait and indebt stat are flushed above and the donation calculation
2178 	 * below needs updated usage stat. Let's bring stat up-to-date.
2179 	 */
2180 	iocg_flush_stat(&ioc->active_iocgs, &now);
2181 
2182 	/* calc usage and see whether some weights need to be moved around */
2183 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2184 		u64 vdone, vtime, usage_us, usage_dur;
2185 		u32 usage, hw_active, hw_inuse;
2186 
2187 		/*
2188 		 * Collect unused and wind vtime closer to vnow to prevent
2189 		 * iocgs from accumulating a large amount of budget.
2190 		 */
2191 		vdone = atomic64_read(&iocg->done_vtime);
2192 		vtime = atomic64_read(&iocg->vtime);
2193 		current_hweight(iocg, &hw_active, &hw_inuse);
2194 
2195 		/*
2196 		 * Latency QoS detection doesn't account for IOs which are
2197 		 * in-flight for longer than a period.  Detect them by
2198 		 * comparing vdone against period start.  If lagging behind
2199 		 * IOs from past periods, don't increase vrate.
2200 		 */
2201 		if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2202 		    !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2203 		    time_after64(vtime, vdone) &&
2204 		    time_after64(vtime, now.vnow -
2205 				 MAX_LAGGING_PERIODS * period_vtime) &&
2206 		    time_before64(vdone, now.vnow - period_vtime))
2207 			nr_lagging++;
2208 
2209 		/*
2210 		 * Determine absolute usage factoring in in-flight IOs to avoid
2211 		 * high-latency completions appearing as idle.
2212 		 */
2213 		usage_us = iocg->usage_delta_us;
2214 		usage_us_sum += usage_us;
2215 
2216 		if (vdone != vtime) {
2217 			u64 inflight_us = DIV64_U64_ROUND_UP(
2218 				cost_to_abs_cost(vtime - vdone, hw_inuse),
2219 				ioc->vtime_base_rate);
2220 			usage_us = max(usage_us, inflight_us);
2221 		}
2222 
2223 		/* convert to hweight based usage ratio */
2224 		if (time_after64(iocg->activated_at, ioc->period_at))
2225 			usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2226 		else
2227 			usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2228 
2229 		usage = clamp_t(u32,
2230 				DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2231 						   usage_dur),
2232 				1, WEIGHT_ONE);
2233 
2234 		/* see whether there's surplus vtime */
2235 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2236 		if (hw_inuse < hw_active ||
2237 		    (!waitqueue_active(&iocg->waitq) &&
2238 		     time_before64(vtime, now.vnow - ioc->margins.low))) {
2239 			u32 hwa, old_hwi, hwm, new_hwi;
2240 
2241 			/*
2242 			 * Already donating or accumulated enough to start.
2243 			 * Determine the donation amount.
2244 			 */
2245 			current_hweight(iocg, &hwa, &old_hwi);
2246 			hwm = current_hweight_max(iocg);
2247 			new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2248 							 usage, &now);
2249 			/*
2250 			 * Donation calculation assumes hweight_after_donation
2251 			 * to be positive, a condition that a donor w/ hwa < 2
2252 			 * can't meet. Don't bother with donation if hwa is
2253 			 * below 2. It's not gonna make a meaningful difference
2254 			 * anyway.
2255 			 */
2256 			if (new_hwi < hwm && hwa >= 2) {
2257 				iocg->hweight_donating = hwa;
2258 				iocg->hweight_after_donation = new_hwi;
2259 				list_add(&iocg->surplus_list, &surpluses);
2260 			} else if (!iocg->abs_vdebt) {
2261 				/*
2262 				 * @iocg doesn't have enough to donate. Reset
2263 				 * its inuse to active.
2264 				 *
2265 				 * Don't reset debtors as their inuse's are
2266 				 * owned by debt handling. This shouldn't affect
2267 				 * donation calculuation in any meaningful way
2268 				 * as @iocg doesn't have a meaningful amount of
2269 				 * share anyway.
2270 				 */
2271 				TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2272 						iocg->inuse, iocg->active,
2273 						iocg->hweight_inuse, new_hwi);
2274 
2275 				__propagate_weights(iocg, iocg->active,
2276 						    iocg->active, true, &now);
2277 				nr_shortages++;
2278 			}
2279 		} else {
2280 			/* genuinely short on vtime */
2281 			nr_shortages++;
2282 		}
2283 	}
2284 
2285 	if (!list_empty(&surpluses) && nr_shortages)
2286 		transfer_surpluses(&surpluses, &now);
2287 
2288 	commit_weights(ioc);
2289 
2290 	/* surplus list should be dissolved after use */
2291 	list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2292 		list_del_init(&iocg->surplus_list);
2293 
2294 	/*
2295 	 * If q is getting clogged or we're missing too much, we're issuing
2296 	 * too much IO and should lower vtime rate.  If we're not missing
2297 	 * and experiencing shortages but not surpluses, we're too stingy
2298 	 * and should increase vtime rate.
2299 	 */
2300 	prev_busy_level = ioc->busy_level;
2301 	if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2302 	    missed_ppm[READ] > ppm_rthr ||
2303 	    missed_ppm[WRITE] > ppm_wthr) {
2304 		/* clearly missing QoS targets, slow down vrate */
2305 		ioc->busy_level = max(ioc->busy_level, 0);
2306 		ioc->busy_level++;
2307 	} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2308 		   missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2309 		   missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2310 		/* QoS targets are being met with >25% margin */
2311 		if (nr_shortages) {
2312 			/*
2313 			 * We're throttling while the device has spare
2314 			 * capacity.  If vrate was being slowed down, stop.
2315 			 */
2316 			ioc->busy_level = min(ioc->busy_level, 0);
2317 
2318 			/*
2319 			 * If there are IOs spanning multiple periods, wait
2320 			 * them out before pushing the device harder.
2321 			 */
2322 			if (!nr_lagging)
2323 				ioc->busy_level--;
2324 		} else {
2325 			/*
2326 			 * Nobody is being throttled and the users aren't
2327 			 * issuing enough IOs to saturate the device.  We
2328 			 * simply don't know how close the device is to
2329 			 * saturation.  Coast.
2330 			 */
2331 			ioc->busy_level = 0;
2332 		}
2333 	} else {
2334 		/* inside the hysterisis margin, we're good */
2335 		ioc->busy_level = 0;
2336 	}
2337 
2338 	ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2339 
2340 	if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2341 		u64 vrate = ioc->vtime_base_rate;
2342 		u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2343 
2344 		/* rq_wait signal is always reliable, ignore user vrate_min */
2345 		if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2346 			vrate_min = VRATE_MIN;
2347 
2348 		/*
2349 		 * If vrate is out of bounds, apply clamp gradually as the
2350 		 * bounds can change abruptly.  Otherwise, apply busy_level
2351 		 * based adjustment.
2352 		 */
2353 		if (vrate < vrate_min) {
2354 			vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2355 					  100);
2356 			vrate = min(vrate, vrate_min);
2357 		} else if (vrate > vrate_max) {
2358 			vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2359 					  100);
2360 			vrate = max(vrate, vrate_max);
2361 		} else {
2362 			int idx = min_t(int, abs(ioc->busy_level),
2363 					ARRAY_SIZE(vrate_adj_pct) - 1);
2364 			u32 adj_pct = vrate_adj_pct[idx];
2365 
2366 			if (ioc->busy_level > 0)
2367 				adj_pct = 100 - adj_pct;
2368 			else
2369 				adj_pct = 100 + adj_pct;
2370 
2371 			vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2372 				      vrate_min, vrate_max);
2373 		}
2374 
2375 		trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2376 					   nr_lagging, nr_shortages);
2377 
2378 		ioc->vtime_base_rate = vrate;
2379 		ioc_refresh_margins(ioc);
2380 	} else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2381 		trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2382 					   missed_ppm, rq_wait_pct, nr_lagging,
2383 					   nr_shortages);
2384 	}
2385 
2386 	ioc_refresh_params(ioc, false);
2387 
2388 	ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2389 
2390 	/*
2391 	 * This period is done.  Move onto the next one.  If nothing's
2392 	 * going on with the device, stop the timer.
2393 	 */
2394 	atomic64_inc(&ioc->cur_period);
2395 
2396 	if (ioc->running != IOC_STOP) {
2397 		if (!list_empty(&ioc->active_iocgs)) {
2398 			ioc_start_period(ioc, &now);
2399 		} else {
2400 			ioc->busy_level = 0;
2401 			ioc->vtime_err = 0;
2402 			ioc->running = IOC_IDLE;
2403 		}
2404 
2405 		ioc_refresh_vrate(ioc, &now);
2406 	}
2407 
2408 	spin_unlock_irq(&ioc->lock);
2409 }
2410 
adjust_inuse_and_calc_cost(struct ioc_gq * iocg,u64 vtime,u64 abs_cost,struct ioc_now * now)2411 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2412 				      u64 abs_cost, struct ioc_now *now)
2413 {
2414 	struct ioc *ioc = iocg->ioc;
2415 	struct ioc_margins *margins = &ioc->margins;
2416 	u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2417 	u32 hwi, adj_step;
2418 	s64 margin;
2419 	u64 cost, new_inuse;
2420 
2421 	current_hweight(iocg, NULL, &hwi);
2422 	old_hwi = hwi;
2423 	cost = abs_cost_to_cost(abs_cost, hwi);
2424 	margin = now->vnow - vtime - cost;
2425 
2426 	/* debt handling owns inuse for debtors */
2427 	if (iocg->abs_vdebt)
2428 		return cost;
2429 
2430 	/*
2431 	 * We only increase inuse during period and do so iff the margin has
2432 	 * deteriorated since the previous adjustment.
2433 	 */
2434 	if (margin >= iocg->saved_margin || margin >= margins->low ||
2435 	    iocg->inuse == iocg->active)
2436 		return cost;
2437 
2438 	spin_lock_irq(&ioc->lock);
2439 
2440 	/* we own inuse only when @iocg is in the normal active state */
2441 	if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2442 		spin_unlock_irq(&ioc->lock);
2443 		return cost;
2444 	}
2445 
2446 	/*
2447 	 * Bump up inuse till @abs_cost fits in the existing budget.
2448 	 * adj_step must be determined after acquiring ioc->lock - we might
2449 	 * have raced and lost to another thread for activation and could
2450 	 * be reading 0 iocg->active before ioc->lock which will lead to
2451 	 * infinite loop.
2452 	 */
2453 	new_inuse = iocg->inuse;
2454 	adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2455 	do {
2456 		new_inuse = new_inuse + adj_step;
2457 		propagate_weights(iocg, iocg->active, new_inuse, true, now);
2458 		current_hweight(iocg, NULL, &hwi);
2459 		cost = abs_cost_to_cost(abs_cost, hwi);
2460 	} while (time_after64(vtime + cost, now->vnow) &&
2461 		 iocg->inuse != iocg->active);
2462 
2463 	spin_unlock_irq(&ioc->lock);
2464 
2465 	TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2466 			old_inuse, iocg->inuse, old_hwi, hwi);
2467 
2468 	return cost;
2469 }
2470 
calc_vtime_cost_builtin(struct bio * bio,struct ioc_gq * iocg,bool is_merge,u64 * costp)2471 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2472 				    bool is_merge, u64 *costp)
2473 {
2474 	struct ioc *ioc = iocg->ioc;
2475 	u64 coef_seqio, coef_randio, coef_page;
2476 	u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2477 	u64 seek_pages = 0;
2478 	u64 cost = 0;
2479 
2480 	switch (bio_op(bio)) {
2481 	case REQ_OP_READ:
2482 		coef_seqio	= ioc->params.lcoefs[LCOEF_RSEQIO];
2483 		coef_randio	= ioc->params.lcoefs[LCOEF_RRANDIO];
2484 		coef_page	= ioc->params.lcoefs[LCOEF_RPAGE];
2485 		break;
2486 	case REQ_OP_WRITE:
2487 		coef_seqio	= ioc->params.lcoefs[LCOEF_WSEQIO];
2488 		coef_randio	= ioc->params.lcoefs[LCOEF_WRANDIO];
2489 		coef_page	= ioc->params.lcoefs[LCOEF_WPAGE];
2490 		break;
2491 	default:
2492 		goto out;
2493 	}
2494 
2495 	if (iocg->cursor) {
2496 		seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2497 		seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2498 	}
2499 
2500 	if (!is_merge) {
2501 		if (seek_pages > LCOEF_RANDIO_PAGES) {
2502 			cost += coef_randio;
2503 		} else {
2504 			cost += coef_seqio;
2505 		}
2506 	}
2507 	cost += pages * coef_page;
2508 out:
2509 	*costp = cost;
2510 }
2511 
calc_vtime_cost(struct bio * bio,struct ioc_gq * iocg,bool is_merge)2512 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2513 {
2514 	u64 cost;
2515 
2516 	calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2517 	return cost;
2518 }
2519 
calc_size_vtime_cost_builtin(struct request * rq,struct ioc * ioc,u64 * costp)2520 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2521 					 u64 *costp)
2522 {
2523 	unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2524 
2525 	switch (req_op(rq)) {
2526 	case REQ_OP_READ:
2527 		*costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2528 		break;
2529 	case REQ_OP_WRITE:
2530 		*costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2531 		break;
2532 	default:
2533 		*costp = 0;
2534 	}
2535 }
2536 
calc_size_vtime_cost(struct request * rq,struct ioc * ioc)2537 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2538 {
2539 	u64 cost;
2540 
2541 	calc_size_vtime_cost_builtin(rq, ioc, &cost);
2542 	return cost;
2543 }
2544 
ioc_rqos_throttle(struct rq_qos * rqos,struct bio * bio)2545 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2546 {
2547 	struct blkcg_gq *blkg = bio->bi_blkg;
2548 	struct ioc *ioc = rqos_to_ioc(rqos);
2549 	struct ioc_gq *iocg = blkg_to_iocg(blkg);
2550 	struct ioc_now now;
2551 	struct iocg_wait wait;
2552 	u64 abs_cost, cost, vtime;
2553 	bool use_debt, ioc_locked;
2554 	unsigned long flags;
2555 
2556 	/* bypass IOs if disabled, still initializing, or for root cgroup */
2557 	if (!ioc->enabled || !iocg || !iocg->level)
2558 		return;
2559 
2560 	/* calculate the absolute vtime cost */
2561 	abs_cost = calc_vtime_cost(bio, iocg, false);
2562 	if (!abs_cost)
2563 		return;
2564 
2565 	if (!iocg_activate(iocg, &now))
2566 		return;
2567 
2568 	iocg->cursor = bio_end_sector(bio);
2569 	vtime = atomic64_read(&iocg->vtime);
2570 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2571 
2572 	/*
2573 	 * If no one's waiting and within budget, issue right away.  The
2574 	 * tests are racy but the races aren't systemic - we only miss once
2575 	 * in a while which is fine.
2576 	 */
2577 	if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2578 	    time_before_eq64(vtime + cost, now.vnow)) {
2579 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2580 		return;
2581 	}
2582 
2583 	/*
2584 	 * We're over budget. This can be handled in two ways. IOs which may
2585 	 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2586 	 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2587 	 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2588 	 * whether debt handling is needed and acquire locks accordingly.
2589 	 */
2590 	use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2591 	ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2592 retry_lock:
2593 	iocg_lock(iocg, ioc_locked, &flags);
2594 
2595 	/*
2596 	 * @iocg must stay activated for debt and waitq handling. Deactivation
2597 	 * is synchronized against both ioc->lock and waitq.lock and we won't
2598 	 * get deactivated as long as we're waiting or has debt, so we're good
2599 	 * if we're activated here. In the unlikely cases that we aren't, just
2600 	 * issue the IO.
2601 	 */
2602 	if (unlikely(list_empty(&iocg->active_list))) {
2603 		iocg_unlock(iocg, ioc_locked, &flags);
2604 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2605 		return;
2606 	}
2607 
2608 	/*
2609 	 * We're over budget. If @bio has to be issued regardless, remember
2610 	 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2611 	 * off the debt before waking more IOs.
2612 	 *
2613 	 * This way, the debt is continuously paid off each period with the
2614 	 * actual budget available to the cgroup. If we just wound vtime, we
2615 	 * would incorrectly use the current hw_inuse for the entire amount
2616 	 * which, for example, can lead to the cgroup staying blocked for a
2617 	 * long time even with substantially raised hw_inuse.
2618 	 *
2619 	 * An iocg with vdebt should stay online so that the timer can keep
2620 	 * deducting its vdebt and [de]activate use_delay mechanism
2621 	 * accordingly. We don't want to race against the timer trying to
2622 	 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2623 	 * penalizing the cgroup and its descendants.
2624 	 */
2625 	if (use_debt) {
2626 		iocg_incur_debt(iocg, abs_cost, &now);
2627 		if (iocg_kick_delay(iocg, &now))
2628 			blkcg_schedule_throttle(rqos->q,
2629 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2630 		iocg_unlock(iocg, ioc_locked, &flags);
2631 		return;
2632 	}
2633 
2634 	/* guarantee that iocgs w/ waiters have maximum inuse */
2635 	if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2636 		if (!ioc_locked) {
2637 			iocg_unlock(iocg, false, &flags);
2638 			ioc_locked = true;
2639 			goto retry_lock;
2640 		}
2641 		propagate_weights(iocg, iocg->active, iocg->active, true,
2642 				  &now);
2643 	}
2644 
2645 	/*
2646 	 * Append self to the waitq and schedule the wakeup timer if we're
2647 	 * the first waiter.  The timer duration is calculated based on the
2648 	 * current vrate.  vtime and hweight changes can make it too short
2649 	 * or too long.  Each wait entry records the absolute cost it's
2650 	 * waiting for to allow re-evaluation using a custom wait entry.
2651 	 *
2652 	 * If too short, the timer simply reschedules itself.  If too long,
2653 	 * the period timer will notice and trigger wakeups.
2654 	 *
2655 	 * All waiters are on iocg->waitq and the wait states are
2656 	 * synchronized using waitq.lock.
2657 	 */
2658 	init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2659 	wait.wait.private = current;
2660 	wait.bio = bio;
2661 	wait.abs_cost = abs_cost;
2662 	wait.committed = false;	/* will be set true by waker */
2663 
2664 	__add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2665 	iocg_kick_waitq(iocg, ioc_locked, &now);
2666 
2667 	iocg_unlock(iocg, ioc_locked, &flags);
2668 
2669 	while (true) {
2670 		set_current_state(TASK_UNINTERRUPTIBLE);
2671 		if (wait.committed)
2672 			break;
2673 		io_schedule();
2674 	}
2675 
2676 	/* waker already committed us, proceed */
2677 	finish_wait(&iocg->waitq, &wait.wait);
2678 }
2679 
ioc_rqos_merge(struct rq_qos * rqos,struct request * rq,struct bio * bio)2680 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2681 			   struct bio *bio)
2682 {
2683 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2684 	struct ioc *ioc = rqos_to_ioc(rqos);
2685 	sector_t bio_end = bio_end_sector(bio);
2686 	struct ioc_now now;
2687 	u64 vtime, abs_cost, cost;
2688 	unsigned long flags;
2689 
2690 	/* bypass if disabled, still initializing, or for root cgroup */
2691 	if (!ioc->enabled || !iocg || !iocg->level)
2692 		return;
2693 
2694 	abs_cost = calc_vtime_cost(bio, iocg, true);
2695 	if (!abs_cost)
2696 		return;
2697 
2698 	ioc_now(ioc, &now);
2699 
2700 	vtime = atomic64_read(&iocg->vtime);
2701 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2702 
2703 	/* update cursor if backmerging into the request at the cursor */
2704 	if (blk_rq_pos(rq) < bio_end &&
2705 	    blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2706 		iocg->cursor = bio_end;
2707 
2708 	/*
2709 	 * Charge if there's enough vtime budget and the existing request has
2710 	 * cost assigned.
2711 	 */
2712 	if (rq->bio && rq->bio->bi_iocost_cost &&
2713 	    time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2714 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2715 		return;
2716 	}
2717 
2718 	/*
2719 	 * Otherwise, account it as debt if @iocg is online, which it should
2720 	 * be for the vast majority of cases. See debt handling in
2721 	 * ioc_rqos_throttle() for details.
2722 	 */
2723 	spin_lock_irqsave(&ioc->lock, flags);
2724 	spin_lock(&iocg->waitq.lock);
2725 
2726 	if (likely(!list_empty(&iocg->active_list))) {
2727 		iocg_incur_debt(iocg, abs_cost, &now);
2728 		if (iocg_kick_delay(iocg, &now))
2729 			blkcg_schedule_throttle(rqos->q,
2730 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2731 	} else {
2732 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2733 	}
2734 
2735 	spin_unlock(&iocg->waitq.lock);
2736 	spin_unlock_irqrestore(&ioc->lock, flags);
2737 }
2738 
ioc_rqos_done_bio(struct rq_qos * rqos,struct bio * bio)2739 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2740 {
2741 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2742 
2743 	if (iocg && bio->bi_iocost_cost)
2744 		atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2745 }
2746 
ioc_rqos_done(struct rq_qos * rqos,struct request * rq)2747 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2748 {
2749 	struct ioc *ioc = rqos_to_ioc(rqos);
2750 	struct ioc_pcpu_stat *ccs;
2751 	u64 on_q_ns, rq_wait_ns, size_nsec;
2752 	int pidx, rw;
2753 
2754 	if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2755 		return;
2756 
2757 	switch (req_op(rq) & REQ_OP_MASK) {
2758 	case REQ_OP_READ:
2759 		pidx = QOS_RLAT;
2760 		rw = READ;
2761 		break;
2762 	case REQ_OP_WRITE:
2763 		pidx = QOS_WLAT;
2764 		rw = WRITE;
2765 		break;
2766 	default:
2767 		return;
2768 	}
2769 
2770 	on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2771 	rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2772 	size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2773 
2774 	ccs = get_cpu_ptr(ioc->pcpu_stat);
2775 
2776 	if (on_q_ns <= size_nsec ||
2777 	    on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2778 		local_inc(&ccs->missed[rw].nr_met);
2779 	else
2780 		local_inc(&ccs->missed[rw].nr_missed);
2781 
2782 	local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2783 
2784 	put_cpu_ptr(ccs);
2785 }
2786 
ioc_rqos_queue_depth_changed(struct rq_qos * rqos)2787 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2788 {
2789 	struct ioc *ioc = rqos_to_ioc(rqos);
2790 
2791 	spin_lock_irq(&ioc->lock);
2792 	ioc_refresh_params(ioc, false);
2793 	spin_unlock_irq(&ioc->lock);
2794 }
2795 
ioc_rqos_exit(struct rq_qos * rqos)2796 static void ioc_rqos_exit(struct rq_qos *rqos)
2797 {
2798 	struct ioc *ioc = rqos_to_ioc(rqos);
2799 
2800 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2801 
2802 	spin_lock_irq(&ioc->lock);
2803 	ioc->running = IOC_STOP;
2804 	spin_unlock_irq(&ioc->lock);
2805 
2806 	del_timer_sync(&ioc->timer);
2807 	free_percpu(ioc->pcpu_stat);
2808 	kfree(ioc);
2809 }
2810 
2811 static struct rq_qos_ops ioc_rqos_ops = {
2812 	.throttle = ioc_rqos_throttle,
2813 	.merge = ioc_rqos_merge,
2814 	.done_bio = ioc_rqos_done_bio,
2815 	.done = ioc_rqos_done,
2816 	.queue_depth_changed = ioc_rqos_queue_depth_changed,
2817 	.exit = ioc_rqos_exit,
2818 };
2819 
blk_iocost_init(struct request_queue * q)2820 static int blk_iocost_init(struct request_queue *q)
2821 {
2822 	struct ioc *ioc;
2823 	struct rq_qos *rqos;
2824 	int i, cpu, ret;
2825 
2826 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2827 	if (!ioc)
2828 		return -ENOMEM;
2829 
2830 	ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2831 	if (!ioc->pcpu_stat) {
2832 		kfree(ioc);
2833 		return -ENOMEM;
2834 	}
2835 
2836 	for_each_possible_cpu(cpu) {
2837 		struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2838 
2839 		for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2840 			local_set(&ccs->missed[i].nr_met, 0);
2841 			local_set(&ccs->missed[i].nr_missed, 0);
2842 		}
2843 		local64_set(&ccs->rq_wait_ns, 0);
2844 	}
2845 
2846 	rqos = &ioc->rqos;
2847 	rqos->id = RQ_QOS_COST;
2848 	rqos->ops = &ioc_rqos_ops;
2849 	rqos->q = q;
2850 
2851 	spin_lock_init(&ioc->lock);
2852 	timer_setup(&ioc->timer, ioc_timer_fn, 0);
2853 	INIT_LIST_HEAD(&ioc->active_iocgs);
2854 
2855 	ioc->running = IOC_IDLE;
2856 	ioc->vtime_base_rate = VTIME_PER_USEC;
2857 	atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2858 	seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2859 	ioc->period_at = ktime_to_us(ktime_get());
2860 	atomic64_set(&ioc->cur_period, 0);
2861 	atomic_set(&ioc->hweight_gen, 0);
2862 
2863 	spin_lock_irq(&ioc->lock);
2864 	ioc->autop_idx = AUTOP_INVALID;
2865 	ioc_refresh_params(ioc, true);
2866 	spin_unlock_irq(&ioc->lock);
2867 
2868 	/*
2869 	 * rqos must be added before activation to allow iocg_pd_init() to
2870 	 * lookup the ioc from q. This means that the rqos methods may get
2871 	 * called before policy activation completion, can't assume that the
2872 	 * target bio has an iocg associated and need to test for NULL iocg.
2873 	 */
2874 	rq_qos_add(q, rqos);
2875 	ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2876 	if (ret) {
2877 		rq_qos_del(q, rqos);
2878 		free_percpu(ioc->pcpu_stat);
2879 		kfree(ioc);
2880 		return ret;
2881 	}
2882 	return 0;
2883 }
2884 
ioc_cpd_alloc(gfp_t gfp)2885 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2886 {
2887 	struct ioc_cgrp *iocc;
2888 
2889 	iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2890 	if (!iocc)
2891 		return NULL;
2892 
2893 	iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2894 	return &iocc->cpd;
2895 }
2896 
ioc_cpd_free(struct blkcg_policy_data * cpd)2897 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2898 {
2899 	kfree(container_of(cpd, struct ioc_cgrp, cpd));
2900 }
2901 
ioc_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)2902 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2903 					     struct blkcg *blkcg)
2904 {
2905 	int levels = blkcg->css.cgroup->level + 1;
2906 	struct ioc_gq *iocg;
2907 
2908 	iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2909 	if (!iocg)
2910 		return NULL;
2911 
2912 	iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2913 	if (!iocg->pcpu_stat) {
2914 		kfree(iocg);
2915 		return NULL;
2916 	}
2917 
2918 	return &iocg->pd;
2919 }
2920 
ioc_pd_init(struct blkg_policy_data * pd)2921 static void ioc_pd_init(struct blkg_policy_data *pd)
2922 {
2923 	struct ioc_gq *iocg = pd_to_iocg(pd);
2924 	struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2925 	struct ioc *ioc = q_to_ioc(blkg->q);
2926 	struct ioc_now now;
2927 	struct blkcg_gq *tblkg;
2928 	unsigned long flags;
2929 
2930 	ioc_now(ioc, &now);
2931 
2932 	iocg->ioc = ioc;
2933 	atomic64_set(&iocg->vtime, now.vnow);
2934 	atomic64_set(&iocg->done_vtime, now.vnow);
2935 	atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2936 	INIT_LIST_HEAD(&iocg->active_list);
2937 	INIT_LIST_HEAD(&iocg->walk_list);
2938 	INIT_LIST_HEAD(&iocg->surplus_list);
2939 	iocg->hweight_active = WEIGHT_ONE;
2940 	iocg->hweight_inuse = WEIGHT_ONE;
2941 
2942 	init_waitqueue_head(&iocg->waitq);
2943 	hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2944 	iocg->waitq_timer.function = iocg_waitq_timer_fn;
2945 
2946 	iocg->level = blkg->blkcg->css.cgroup->level;
2947 
2948 	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2949 		struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2950 		iocg->ancestors[tiocg->level] = tiocg;
2951 	}
2952 
2953 	spin_lock_irqsave(&ioc->lock, flags);
2954 	weight_updated(iocg, &now);
2955 	spin_unlock_irqrestore(&ioc->lock, flags);
2956 }
2957 
ioc_pd_free(struct blkg_policy_data * pd)2958 static void ioc_pd_free(struct blkg_policy_data *pd)
2959 {
2960 	struct ioc_gq *iocg = pd_to_iocg(pd);
2961 	struct ioc *ioc = iocg->ioc;
2962 	unsigned long flags;
2963 
2964 	if (ioc) {
2965 		spin_lock_irqsave(&ioc->lock, flags);
2966 
2967 		if (!list_empty(&iocg->active_list)) {
2968 			struct ioc_now now;
2969 
2970 			ioc_now(ioc, &now);
2971 			propagate_weights(iocg, 0, 0, false, &now);
2972 			list_del_init(&iocg->active_list);
2973 		}
2974 
2975 		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2976 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2977 
2978 		spin_unlock_irqrestore(&ioc->lock, flags);
2979 
2980 		hrtimer_cancel(&iocg->waitq_timer);
2981 	}
2982 	free_percpu(iocg->pcpu_stat);
2983 	kfree(iocg);
2984 }
2985 
ioc_pd_stat(struct blkg_policy_data * pd,char * buf,size_t size)2986 static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2987 {
2988 	struct ioc_gq *iocg = pd_to_iocg(pd);
2989 	struct ioc *ioc = iocg->ioc;
2990 	size_t pos = 0;
2991 
2992 	if (!ioc->enabled)
2993 		return 0;
2994 
2995 	if (iocg->level == 0) {
2996 		unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
2997 			ioc->vtime_base_rate * 10000,
2998 			VTIME_PER_USEC);
2999 		pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
3000 				  vp10k / 100, vp10k % 100);
3001 	}
3002 
3003 	pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
3004 			 iocg->last_stat.usage_us);
3005 
3006 	if (blkcg_debug_stats)
3007 		pos += scnprintf(buf + pos, size - pos,
3008 				 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3009 				 iocg->last_stat.wait_us,
3010 				 iocg->last_stat.indebt_us,
3011 				 iocg->last_stat.indelay_us);
3012 
3013 	return pos;
3014 }
3015 
ioc_weight_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3016 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3017 			     int off)
3018 {
3019 	const char *dname = blkg_dev_name(pd->blkg);
3020 	struct ioc_gq *iocg = pd_to_iocg(pd);
3021 
3022 	if (dname && iocg->cfg_weight)
3023 		seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3024 	return 0;
3025 }
3026 
3027 
ioc_weight_show(struct seq_file * sf,void * v)3028 static int ioc_weight_show(struct seq_file *sf, void *v)
3029 {
3030 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3031 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3032 
3033 	seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3034 	blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3035 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3036 	return 0;
3037 }
3038 
ioc_weight_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3039 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3040 				size_t nbytes, loff_t off)
3041 {
3042 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
3043 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3044 	struct blkg_conf_ctx ctx;
3045 	struct ioc_now now;
3046 	struct ioc_gq *iocg;
3047 	u32 v;
3048 	int ret;
3049 
3050 	if (!strchr(buf, ':')) {
3051 		struct blkcg_gq *blkg;
3052 
3053 		if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3054 			return -EINVAL;
3055 
3056 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3057 			return -EINVAL;
3058 
3059 		spin_lock_irq(&blkcg->lock);
3060 		iocc->dfl_weight = v * WEIGHT_ONE;
3061 		hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3062 			struct ioc_gq *iocg = blkg_to_iocg(blkg);
3063 
3064 			if (iocg) {
3065 				spin_lock(&iocg->ioc->lock);
3066 				ioc_now(iocg->ioc, &now);
3067 				weight_updated(iocg, &now);
3068 				spin_unlock(&iocg->ioc->lock);
3069 			}
3070 		}
3071 		spin_unlock_irq(&blkcg->lock);
3072 
3073 		return nbytes;
3074 	}
3075 
3076 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3077 	if (ret)
3078 		return ret;
3079 
3080 	iocg = blkg_to_iocg(ctx.blkg);
3081 
3082 	if (!strncmp(ctx.body, "default", 7)) {
3083 		v = 0;
3084 	} else {
3085 		if (!sscanf(ctx.body, "%u", &v))
3086 			goto einval;
3087 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3088 			goto einval;
3089 	}
3090 
3091 	spin_lock(&iocg->ioc->lock);
3092 	iocg->cfg_weight = v * WEIGHT_ONE;
3093 	ioc_now(iocg->ioc, &now);
3094 	weight_updated(iocg, &now);
3095 	spin_unlock(&iocg->ioc->lock);
3096 
3097 	blkg_conf_finish(&ctx);
3098 	return nbytes;
3099 
3100 einval:
3101 	blkg_conf_finish(&ctx);
3102 	return -EINVAL;
3103 }
3104 
ioc_qos_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3105 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3106 			  int off)
3107 {
3108 	const char *dname = blkg_dev_name(pd->blkg);
3109 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3110 
3111 	if (!dname)
3112 		return 0;
3113 
3114 	seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3115 		   dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3116 		   ioc->params.qos[QOS_RPPM] / 10000,
3117 		   ioc->params.qos[QOS_RPPM] % 10000 / 100,
3118 		   ioc->params.qos[QOS_RLAT],
3119 		   ioc->params.qos[QOS_WPPM] / 10000,
3120 		   ioc->params.qos[QOS_WPPM] % 10000 / 100,
3121 		   ioc->params.qos[QOS_WLAT],
3122 		   ioc->params.qos[QOS_MIN] / 10000,
3123 		   ioc->params.qos[QOS_MIN] % 10000 / 100,
3124 		   ioc->params.qos[QOS_MAX] / 10000,
3125 		   ioc->params.qos[QOS_MAX] % 10000 / 100);
3126 	return 0;
3127 }
3128 
ioc_qos_show(struct seq_file * sf,void * v)3129 static int ioc_qos_show(struct seq_file *sf, void *v)
3130 {
3131 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3132 
3133 	blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3134 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3135 	return 0;
3136 }
3137 
3138 static const match_table_t qos_ctrl_tokens = {
3139 	{ QOS_ENABLE,		"enable=%u"	},
3140 	{ QOS_CTRL,		"ctrl=%s"	},
3141 	{ NR_QOS_CTRL_PARAMS,	NULL		},
3142 };
3143 
3144 static const match_table_t qos_tokens = {
3145 	{ QOS_RPPM,		"rpct=%s"	},
3146 	{ QOS_RLAT,		"rlat=%u"	},
3147 	{ QOS_WPPM,		"wpct=%s"	},
3148 	{ QOS_WLAT,		"wlat=%u"	},
3149 	{ QOS_MIN,		"min=%s"	},
3150 	{ QOS_MAX,		"max=%s"	},
3151 	{ NR_QOS_PARAMS,	NULL		},
3152 };
3153 
ioc_qos_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3154 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3155 			     size_t nbytes, loff_t off)
3156 {
3157 	struct gendisk *disk;
3158 	struct ioc *ioc;
3159 	u32 qos[NR_QOS_PARAMS];
3160 	bool enable, user;
3161 	char *p;
3162 	int ret;
3163 
3164 	disk = blkcg_conf_get_disk(&input);
3165 	if (IS_ERR(disk))
3166 		return PTR_ERR(disk);
3167 
3168 	ioc = q_to_ioc(disk->queue);
3169 	if (!ioc) {
3170 		ret = blk_iocost_init(disk->queue);
3171 		if (ret)
3172 			goto err;
3173 		ioc = q_to_ioc(disk->queue);
3174 	}
3175 
3176 	spin_lock_irq(&ioc->lock);
3177 	memcpy(qos, ioc->params.qos, sizeof(qos));
3178 	enable = ioc->enabled;
3179 	user = ioc->user_qos_params;
3180 	spin_unlock_irq(&ioc->lock);
3181 
3182 	while ((p = strsep(&input, " \t\n"))) {
3183 		substring_t args[MAX_OPT_ARGS];
3184 		char buf[32];
3185 		int tok;
3186 		s64 v;
3187 
3188 		if (!*p)
3189 			continue;
3190 
3191 		switch (match_token(p, qos_ctrl_tokens, args)) {
3192 		case QOS_ENABLE:
3193 			match_u64(&args[0], &v);
3194 			enable = v;
3195 			continue;
3196 		case QOS_CTRL:
3197 			match_strlcpy(buf, &args[0], sizeof(buf));
3198 			if (!strcmp(buf, "auto"))
3199 				user = false;
3200 			else if (!strcmp(buf, "user"))
3201 				user = true;
3202 			else
3203 				goto einval;
3204 			continue;
3205 		}
3206 
3207 		tok = match_token(p, qos_tokens, args);
3208 		switch (tok) {
3209 		case QOS_RPPM:
3210 		case QOS_WPPM:
3211 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3212 			    sizeof(buf))
3213 				goto einval;
3214 			if (cgroup_parse_float(buf, 2, &v))
3215 				goto einval;
3216 			if (v < 0 || v > 10000)
3217 				goto einval;
3218 			qos[tok] = v * 100;
3219 			break;
3220 		case QOS_RLAT:
3221 		case QOS_WLAT:
3222 			if (match_u64(&args[0], &v))
3223 				goto einval;
3224 			qos[tok] = v;
3225 			break;
3226 		case QOS_MIN:
3227 		case QOS_MAX:
3228 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3229 			    sizeof(buf))
3230 				goto einval;
3231 			if (cgroup_parse_float(buf, 2, &v))
3232 				goto einval;
3233 			if (v < 0)
3234 				goto einval;
3235 			qos[tok] = clamp_t(s64, v * 100,
3236 					   VRATE_MIN_PPM, VRATE_MAX_PPM);
3237 			break;
3238 		default:
3239 			goto einval;
3240 		}
3241 		user = true;
3242 	}
3243 
3244 	if (qos[QOS_MIN] > qos[QOS_MAX])
3245 		goto einval;
3246 
3247 	spin_lock_irq(&ioc->lock);
3248 
3249 	if (enable) {
3250 		blk_stat_enable_accounting(ioc->rqos.q);
3251 		blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3252 		ioc->enabled = true;
3253 	} else {
3254 		blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3255 		ioc->enabled = false;
3256 	}
3257 
3258 	if (user) {
3259 		memcpy(ioc->params.qos, qos, sizeof(qos));
3260 		ioc->user_qos_params = true;
3261 	} else {
3262 		ioc->user_qos_params = false;
3263 	}
3264 
3265 	ioc_refresh_params(ioc, true);
3266 	spin_unlock_irq(&ioc->lock);
3267 
3268 	put_disk_and_module(disk);
3269 	return nbytes;
3270 einval:
3271 	ret = -EINVAL;
3272 err:
3273 	put_disk_and_module(disk);
3274 	return ret;
3275 }
3276 
ioc_cost_model_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3277 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3278 				 struct blkg_policy_data *pd, int off)
3279 {
3280 	const char *dname = blkg_dev_name(pd->blkg);
3281 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3282 	u64 *u = ioc->params.i_lcoefs;
3283 
3284 	if (!dname)
3285 		return 0;
3286 
3287 	seq_printf(sf, "%s ctrl=%s model=linear "
3288 		   "rbps=%llu rseqiops=%llu rrandiops=%llu "
3289 		   "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3290 		   dname, ioc->user_cost_model ? "user" : "auto",
3291 		   u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3292 		   u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3293 	return 0;
3294 }
3295 
ioc_cost_model_show(struct seq_file * sf,void * v)3296 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3297 {
3298 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3299 
3300 	blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3301 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3302 	return 0;
3303 }
3304 
3305 static const match_table_t cost_ctrl_tokens = {
3306 	{ COST_CTRL,		"ctrl=%s"	},
3307 	{ COST_MODEL,		"model=%s"	},
3308 	{ NR_COST_CTRL_PARAMS,	NULL		},
3309 };
3310 
3311 static const match_table_t i_lcoef_tokens = {
3312 	{ I_LCOEF_RBPS,		"rbps=%u"	},
3313 	{ I_LCOEF_RSEQIOPS,	"rseqiops=%u"	},
3314 	{ I_LCOEF_RRANDIOPS,	"rrandiops=%u"	},
3315 	{ I_LCOEF_WBPS,		"wbps=%u"	},
3316 	{ I_LCOEF_WSEQIOPS,	"wseqiops=%u"	},
3317 	{ I_LCOEF_WRANDIOPS,	"wrandiops=%u"	},
3318 	{ NR_I_LCOEFS,		NULL		},
3319 };
3320 
ioc_cost_model_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3321 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3322 				    size_t nbytes, loff_t off)
3323 {
3324 	struct gendisk *disk;
3325 	struct ioc *ioc;
3326 	u64 u[NR_I_LCOEFS];
3327 	bool user;
3328 	char *p;
3329 	int ret;
3330 
3331 	disk = blkcg_conf_get_disk(&input);
3332 	if (IS_ERR(disk))
3333 		return PTR_ERR(disk);
3334 
3335 	ioc = q_to_ioc(disk->queue);
3336 	if (!ioc) {
3337 		ret = blk_iocost_init(disk->queue);
3338 		if (ret)
3339 			goto err;
3340 		ioc = q_to_ioc(disk->queue);
3341 	}
3342 
3343 	spin_lock_irq(&ioc->lock);
3344 	memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3345 	user = ioc->user_cost_model;
3346 	spin_unlock_irq(&ioc->lock);
3347 
3348 	while ((p = strsep(&input, " \t\n"))) {
3349 		substring_t args[MAX_OPT_ARGS];
3350 		char buf[32];
3351 		int tok;
3352 		u64 v;
3353 
3354 		if (!*p)
3355 			continue;
3356 
3357 		switch (match_token(p, cost_ctrl_tokens, args)) {
3358 		case COST_CTRL:
3359 			match_strlcpy(buf, &args[0], sizeof(buf));
3360 			if (!strcmp(buf, "auto"))
3361 				user = false;
3362 			else if (!strcmp(buf, "user"))
3363 				user = true;
3364 			else
3365 				goto einval;
3366 			continue;
3367 		case COST_MODEL:
3368 			match_strlcpy(buf, &args[0], sizeof(buf));
3369 			if (strcmp(buf, "linear"))
3370 				goto einval;
3371 			continue;
3372 		}
3373 
3374 		tok = match_token(p, i_lcoef_tokens, args);
3375 		if (tok == NR_I_LCOEFS)
3376 			goto einval;
3377 		if (match_u64(&args[0], &v))
3378 			goto einval;
3379 		u[tok] = v;
3380 		user = true;
3381 	}
3382 
3383 	spin_lock_irq(&ioc->lock);
3384 	if (user) {
3385 		memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3386 		ioc->user_cost_model = true;
3387 	} else {
3388 		ioc->user_cost_model = false;
3389 	}
3390 	ioc_refresh_params(ioc, true);
3391 	spin_unlock_irq(&ioc->lock);
3392 
3393 	put_disk_and_module(disk);
3394 	return nbytes;
3395 
3396 einval:
3397 	ret = -EINVAL;
3398 err:
3399 	put_disk_and_module(disk);
3400 	return ret;
3401 }
3402 
3403 static struct cftype ioc_files[] = {
3404 	{
3405 		.name = "weight",
3406 		.flags = CFTYPE_NOT_ON_ROOT,
3407 		.seq_show = ioc_weight_show,
3408 		.write = ioc_weight_write,
3409 	},
3410 	{
3411 		.name = "cost.qos",
3412 		.flags = CFTYPE_ONLY_ON_ROOT,
3413 		.seq_show = ioc_qos_show,
3414 		.write = ioc_qos_write,
3415 	},
3416 	{
3417 		.name = "cost.model",
3418 		.flags = CFTYPE_ONLY_ON_ROOT,
3419 		.seq_show = ioc_cost_model_show,
3420 		.write = ioc_cost_model_write,
3421 	},
3422 	{}
3423 };
3424 
3425 static struct blkcg_policy blkcg_policy_iocost = {
3426 	.dfl_cftypes	= ioc_files,
3427 	.cpd_alloc_fn	= ioc_cpd_alloc,
3428 	.cpd_free_fn	= ioc_cpd_free,
3429 	.pd_alloc_fn	= ioc_pd_alloc,
3430 	.pd_init_fn	= ioc_pd_init,
3431 	.pd_free_fn	= ioc_pd_free,
3432 	.pd_stat_fn	= ioc_pd_stat,
3433 };
3434 
ioc_init(void)3435 static int __init ioc_init(void)
3436 {
3437 	return blkcg_policy_register(&blkcg_policy_iocost);
3438 }
3439 
ioc_exit(void)3440 static void __exit ioc_exit(void)
3441 {
3442 	blkcg_policy_unregister(&blkcg_policy_iocost);
3443 }
3444 
3445 module_init(ioc_init);
3446 module_exit(ioc_exit);
3447