xref: /OK3568_Linux_fs/kernel/block/blk-iolatency.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Block rq-qos base io controller
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This works similar to wbt with a few exceptions
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * - It's bio based, so the latency covers the whole block layer in addition to
8*4882a593Smuzhiyun  *   the actual io.
9*4882a593Smuzhiyun  * - We will throttle all IO that comes in here if we need to.
10*4882a593Smuzhiyun  * - We use the mean latency over the 100ms window.  This is because writes can
11*4882a593Smuzhiyun  *   be particularly fast, which could give us a false sense of the impact of
12*4882a593Smuzhiyun  *   other workloads on our protected workload.
13*4882a593Smuzhiyun  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
14*4882a593Smuzhiyun  *   that we can have as many outstanding bio's as we're allowed to.  Only at
15*4882a593Smuzhiyun  *   throttle time do we pay attention to the actual queue depth.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * The hierarchy works like the cpu controller does, we track the latency at
18*4882a593Smuzhiyun  * every configured node, and each configured node has it's own independent
19*4882a593Smuzhiyun  * queue depth.  This means that we only care about our latency targets at the
20*4882a593Smuzhiyun  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
21*4882a593Smuzhiyun  * a group at the end of some other path if we're only configred at leaf level.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Consider the following
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  *                   root blkg
26*4882a593Smuzhiyun  *             /                     \
27*4882a593Smuzhiyun  *        fast (target=5ms)     slow (target=10ms)
28*4882a593Smuzhiyun  *         /     \                  /        \
29*4882a593Smuzhiyun  *       a        b          normal(15ms)   unloved
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
32*4882a593Smuzhiyun  * an average latency of 5ms.  If it does then we will throttle the "slow"
33*4882a593Smuzhiyun  * group.  In the case of "normal", if it exceeds its 15ms target, we will
34*4882a593Smuzhiyun  * throttle "unloved", but nobody else.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * In this example "fast", "slow", and "normal" will be the only groups actually
37*4882a593Smuzhiyun  * accounting their io latencies.  We have to walk up the heirarchy to the root
38*4882a593Smuzhiyun  * on every submit and complete so we can do the appropriate stat recording and
39*4882a593Smuzhiyun  * adjust the queue depth of ourselves if needed.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * There are 2 ways we throttle IO.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
44*4882a593Smuzhiyun  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
45*4882a593Smuzhiyun  * to 1.  If the group is only ever submitting IO for itself then this is the
46*4882a593Smuzhiyun  * only way we throttle.
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * 2) Induced delay throttling.  This is for the case that a group is generating
49*4882a593Smuzhiyun  * IO that has to be issued by the root cg to avoid priority inversion. So think
50*4882a593Smuzhiyun  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
51*4882a593Smuzhiyun  * of work done for us on behalf of the root cg and are being asked to scale
52*4882a593Smuzhiyun  * down more then we induce a latency at userspace return.  We accumulate the
53*4882a593Smuzhiyun  * total amount of time we need to be punished by doing
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * total_time += min_lat_nsec - actual_io_completion
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * and then at throttle time will do
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * throttle_time = min(total_time, NSEC_PER_SEC)
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * This induced delay will throttle back the activity that is generating the
62*4882a593Smuzhiyun  * root cg issued io's, wethere that's some metadata intensive operation or the
63*4882a593Smuzhiyun  * group is using so much memory that it is pushing us into swap.
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Copyright (C) 2018 Josef Bacik
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun #include <linux/kernel.h>
68*4882a593Smuzhiyun #include <linux/blk_types.h>
69*4882a593Smuzhiyun #include <linux/backing-dev.h>
70*4882a593Smuzhiyun #include <linux/module.h>
71*4882a593Smuzhiyun #include <linux/timer.h>
72*4882a593Smuzhiyun #include <linux/memcontrol.h>
73*4882a593Smuzhiyun #include <linux/sched/loadavg.h>
74*4882a593Smuzhiyun #include <linux/sched/signal.h>
75*4882a593Smuzhiyun #include <trace/events/block.h>
76*4882a593Smuzhiyun #include <linux/blk-mq.h>
77*4882a593Smuzhiyun #include "blk-rq-qos.h"
78*4882a593Smuzhiyun #include "blk-stat.h"
79*4882a593Smuzhiyun #include "blk.h"
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define DEFAULT_SCALE_COOKIE 1000000U
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun static struct blkcg_policy blkcg_policy_iolatency;
84*4882a593Smuzhiyun struct iolatency_grp;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun struct blk_iolatency {
87*4882a593Smuzhiyun 	struct rq_qos rqos;
88*4882a593Smuzhiyun 	struct timer_list timer;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/*
91*4882a593Smuzhiyun 	 * ->enabled is the master enable switch gating the throttling logic and
92*4882a593Smuzhiyun 	 * inflight tracking. The number of cgroups which have iolat enabled is
93*4882a593Smuzhiyun 	 * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
94*4882a593Smuzhiyun 	 * from ->enable_work with the request_queue frozen. For details, See
95*4882a593Smuzhiyun 	 * blkiolatency_enable_work_fn().
96*4882a593Smuzhiyun 	 */
97*4882a593Smuzhiyun 	bool enabled;
98*4882a593Smuzhiyun 	atomic_t enable_cnt;
99*4882a593Smuzhiyun 	struct work_struct enable_work;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
BLKIOLATENCY(struct rq_qos * rqos)102*4882a593Smuzhiyun static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	return container_of(rqos, struct blk_iolatency, rqos);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct child_latency_info {
108*4882a593Smuzhiyun 	spinlock_t lock;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* Last time we adjusted the scale of everybody. */
111*4882a593Smuzhiyun 	u64 last_scale_event;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* The latency that we missed. */
114*4882a593Smuzhiyun 	u64 scale_lat;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* Total io's from all of our children for the last summation. */
117*4882a593Smuzhiyun 	u64 nr_samples;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* The guy who actually changed the latency numbers. */
120*4882a593Smuzhiyun 	struct iolatency_grp *scale_grp;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* Cookie to tell if we need to scale up or down. */
123*4882a593Smuzhiyun 	atomic_t scale_cookie;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun struct percentile_stats {
127*4882a593Smuzhiyun 	u64 total;
128*4882a593Smuzhiyun 	u64 missed;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct latency_stat {
132*4882a593Smuzhiyun 	union {
133*4882a593Smuzhiyun 		struct percentile_stats ps;
134*4882a593Smuzhiyun 		struct blk_rq_stat rqs;
135*4882a593Smuzhiyun 	};
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct iolatency_grp {
139*4882a593Smuzhiyun 	struct blkg_policy_data pd;
140*4882a593Smuzhiyun 	struct latency_stat __percpu *stats;
141*4882a593Smuzhiyun 	struct latency_stat cur_stat;
142*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat;
143*4882a593Smuzhiyun 	struct rq_depth rq_depth;
144*4882a593Smuzhiyun 	struct rq_wait rq_wait;
145*4882a593Smuzhiyun 	atomic64_t window_start;
146*4882a593Smuzhiyun 	atomic_t scale_cookie;
147*4882a593Smuzhiyun 	u64 min_lat_nsec;
148*4882a593Smuzhiyun 	u64 cur_win_nsec;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* total running average of our io latency. */
151*4882a593Smuzhiyun 	u64 lat_avg;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Our current number of IO's for the last summation. */
154*4882a593Smuzhiyun 	u64 nr_samples;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	bool ssd;
157*4882a593Smuzhiyun 	struct child_latency_info child_lat;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
161*4882a593Smuzhiyun #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun  * These are the constants used to fake the fixed-point moving average
164*4882a593Smuzhiyun  * calculation just like load average.  The call to calc_load() folds
165*4882a593Smuzhiyun  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
166*4882a593Smuzhiyun  * window size is bucketed to try to approximately calculate average
167*4882a593Smuzhiyun  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
168*4882a593Smuzhiyun  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
169*4882a593Smuzhiyun  * periods extend the most recent window.
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun #define BLKIOLATENCY_NR_EXP_FACTORS 5
172*4882a593Smuzhiyun #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
173*4882a593Smuzhiyun 				      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
174*4882a593Smuzhiyun static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
175*4882a593Smuzhiyun 	2045, // exp(1/600) - 600 samples
176*4882a593Smuzhiyun 	2039, // exp(1/240) - 240 samples
177*4882a593Smuzhiyun 	2031, // exp(1/120) - 120 samples
178*4882a593Smuzhiyun 	2023, // exp(1/80)  - 80 samples
179*4882a593Smuzhiyun 	2014, // exp(1/60)  - 60 samples
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
pd_to_lat(struct blkg_policy_data * pd)182*4882a593Smuzhiyun static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
blkg_to_lat(struct blkcg_gq * blkg)187*4882a593Smuzhiyun static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
lat_to_blkg(struct iolatency_grp * iolat)192*4882a593Smuzhiyun static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	return pd_to_blkg(&iolat->pd);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
latency_stat_init(struct iolatency_grp * iolat,struct latency_stat * stat)197*4882a593Smuzhiyun static inline void latency_stat_init(struct iolatency_grp *iolat,
198*4882a593Smuzhiyun 				     struct latency_stat *stat)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	if (iolat->ssd) {
201*4882a593Smuzhiyun 		stat->ps.total = 0;
202*4882a593Smuzhiyun 		stat->ps.missed = 0;
203*4882a593Smuzhiyun 	} else
204*4882a593Smuzhiyun 		blk_rq_stat_init(&stat->rqs);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
latency_stat_sum(struct iolatency_grp * iolat,struct latency_stat * sum,struct latency_stat * stat)207*4882a593Smuzhiyun static inline void latency_stat_sum(struct iolatency_grp *iolat,
208*4882a593Smuzhiyun 				    struct latency_stat *sum,
209*4882a593Smuzhiyun 				    struct latency_stat *stat)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	if (iolat->ssd) {
212*4882a593Smuzhiyun 		sum->ps.total += stat->ps.total;
213*4882a593Smuzhiyun 		sum->ps.missed += stat->ps.missed;
214*4882a593Smuzhiyun 	} else
215*4882a593Smuzhiyun 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
latency_stat_record_time(struct iolatency_grp * iolat,u64 req_time)218*4882a593Smuzhiyun static inline void latency_stat_record_time(struct iolatency_grp *iolat,
219*4882a593Smuzhiyun 					    u64 req_time)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct latency_stat *stat = get_cpu_ptr(iolat->stats);
222*4882a593Smuzhiyun 	if (iolat->ssd) {
223*4882a593Smuzhiyun 		if (req_time >= iolat->min_lat_nsec)
224*4882a593Smuzhiyun 			stat->ps.missed++;
225*4882a593Smuzhiyun 		stat->ps.total++;
226*4882a593Smuzhiyun 	} else
227*4882a593Smuzhiyun 		blk_rq_stat_add(&stat->rqs, req_time);
228*4882a593Smuzhiyun 	put_cpu_ptr(stat);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
latency_sum_ok(struct iolatency_grp * iolat,struct latency_stat * stat)231*4882a593Smuzhiyun static inline bool latency_sum_ok(struct iolatency_grp *iolat,
232*4882a593Smuzhiyun 				  struct latency_stat *stat)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	if (iolat->ssd) {
235*4882a593Smuzhiyun 		u64 thresh = div64_u64(stat->ps.total, 10);
236*4882a593Smuzhiyun 		thresh = max(thresh, 1ULL);
237*4882a593Smuzhiyun 		return stat->ps.missed < thresh;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 	return stat->rqs.mean <= iolat->min_lat_nsec;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
latency_stat_samples(struct iolatency_grp * iolat,struct latency_stat * stat)242*4882a593Smuzhiyun static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
243*4882a593Smuzhiyun 				       struct latency_stat *stat)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	if (iolat->ssd)
246*4882a593Smuzhiyun 		return stat->ps.total;
247*4882a593Smuzhiyun 	return stat->rqs.nr_samples;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
iolat_update_total_lat_avg(struct iolatency_grp * iolat,struct latency_stat * stat)250*4882a593Smuzhiyun static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
251*4882a593Smuzhiyun 					      struct latency_stat *stat)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	int exp_idx;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (iolat->ssd)
256*4882a593Smuzhiyun 		return;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/*
259*4882a593Smuzhiyun 	 * calc_load() takes in a number stored in fixed point representation.
260*4882a593Smuzhiyun 	 * Because we are using this for IO time in ns, the values stored
261*4882a593Smuzhiyun 	 * are significantly larger than the FIXED_1 denominator (2048).
262*4882a593Smuzhiyun 	 * Therefore, rounding errors in the calculation are negligible and
263*4882a593Smuzhiyun 	 * can be ignored.
264*4882a593Smuzhiyun 	 */
265*4882a593Smuzhiyun 	exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
266*4882a593Smuzhiyun 			div64_u64(iolat->cur_win_nsec,
267*4882a593Smuzhiyun 				  BLKIOLATENCY_EXP_BUCKET_SIZE));
268*4882a593Smuzhiyun 	iolat->lat_avg = calc_load(iolat->lat_avg,
269*4882a593Smuzhiyun 				   iolatency_exp_factors[exp_idx],
270*4882a593Smuzhiyun 				   stat->rqs.mean);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
iolat_cleanup_cb(struct rq_wait * rqw,void * private_data)273*4882a593Smuzhiyun static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	atomic_dec(&rqw->inflight);
276*4882a593Smuzhiyun 	wake_up(&rqw->wait);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
iolat_acquire_inflight(struct rq_wait * rqw,void * private_data)279*4882a593Smuzhiyun static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct iolatency_grp *iolat = private_data;
282*4882a593Smuzhiyun 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
__blkcg_iolatency_throttle(struct rq_qos * rqos,struct iolatency_grp * iolat,bool issue_as_root,bool use_memdelay)285*4882a593Smuzhiyun static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
286*4882a593Smuzhiyun 				       struct iolatency_grp *iolat,
287*4882a593Smuzhiyun 				       bool issue_as_root,
288*4882a593Smuzhiyun 				       bool use_memdelay)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct rq_wait *rqw = &iolat->rq_wait;
291*4882a593Smuzhiyun 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (use_delay)
294*4882a593Smuzhiyun 		blkcg_schedule_throttle(rqos->q, use_memdelay);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/*
297*4882a593Smuzhiyun 	 * To avoid priority inversions we want to just take a slot if we are
298*4882a593Smuzhiyun 	 * issuing as root.  If we're being killed off there's no point in
299*4882a593Smuzhiyun 	 * delaying things, we may have been killed by OOM so throttling may
300*4882a593Smuzhiyun 	 * make recovery take even longer, so just let the IO's through so the
301*4882a593Smuzhiyun 	 * task can go away.
302*4882a593Smuzhiyun 	 */
303*4882a593Smuzhiyun 	if (issue_as_root || fatal_signal_pending(current)) {
304*4882a593Smuzhiyun 		atomic_inc(&rqw->inflight);
305*4882a593Smuzhiyun 		return;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #define SCALE_DOWN_FACTOR 2
312*4882a593Smuzhiyun #define SCALE_UP_FACTOR 4
313*4882a593Smuzhiyun 
scale_amount(unsigned long qd,bool up)314*4882a593Smuzhiyun static inline unsigned long scale_amount(unsigned long qd, bool up)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun  * We scale the qd down faster than we scale up, so we need to use this helper
321*4882a593Smuzhiyun  * to adjust the scale_cookie accordingly so we don't prematurely get
322*4882a593Smuzhiyun  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
323*4882a593Smuzhiyun  *
324*4882a593Smuzhiyun  * Each group has their own local copy of the last scale cookie they saw, so if
325*4882a593Smuzhiyun  * the global scale cookie goes up or down they know which way they need to go
326*4882a593Smuzhiyun  * based on their last knowledge of it.
327*4882a593Smuzhiyun  */
scale_cookie_change(struct blk_iolatency * blkiolat,struct child_latency_info * lat_info,bool up)328*4882a593Smuzhiyun static void scale_cookie_change(struct blk_iolatency *blkiolat,
329*4882a593Smuzhiyun 				struct child_latency_info *lat_info,
330*4882a593Smuzhiyun 				bool up)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	unsigned long qd = blkiolat->rqos.q->nr_requests;
333*4882a593Smuzhiyun 	unsigned long scale = scale_amount(qd, up);
334*4882a593Smuzhiyun 	unsigned long old = atomic_read(&lat_info->scale_cookie);
335*4882a593Smuzhiyun 	unsigned long max_scale = qd << 1;
336*4882a593Smuzhiyun 	unsigned long diff = 0;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (old < DEFAULT_SCALE_COOKIE)
339*4882a593Smuzhiyun 		diff = DEFAULT_SCALE_COOKIE - old;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (up) {
342*4882a593Smuzhiyun 		if (scale + old > DEFAULT_SCALE_COOKIE)
343*4882a593Smuzhiyun 			atomic_set(&lat_info->scale_cookie,
344*4882a593Smuzhiyun 				   DEFAULT_SCALE_COOKIE);
345*4882a593Smuzhiyun 		else if (diff > qd)
346*4882a593Smuzhiyun 			atomic_inc(&lat_info->scale_cookie);
347*4882a593Smuzhiyun 		else
348*4882a593Smuzhiyun 			atomic_add(scale, &lat_info->scale_cookie);
349*4882a593Smuzhiyun 	} else {
350*4882a593Smuzhiyun 		/*
351*4882a593Smuzhiyun 		 * We don't want to dig a hole so deep that it takes us hours to
352*4882a593Smuzhiyun 		 * dig out of it.  Just enough that we don't throttle/unthrottle
353*4882a593Smuzhiyun 		 * with jagged workloads but can still unthrottle once pressure
354*4882a593Smuzhiyun 		 * has sufficiently dissipated.
355*4882a593Smuzhiyun 		 */
356*4882a593Smuzhiyun 		if (diff > qd) {
357*4882a593Smuzhiyun 			if (diff < max_scale)
358*4882a593Smuzhiyun 				atomic_dec(&lat_info->scale_cookie);
359*4882a593Smuzhiyun 		} else {
360*4882a593Smuzhiyun 			atomic_sub(scale, &lat_info->scale_cookie);
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
367*4882a593Smuzhiyun  * queue depth at a time so we don't get wild swings and hopefully dial in to
368*4882a593Smuzhiyun  * fairer distribution of the overall queue depth.
369*4882a593Smuzhiyun  */
scale_change(struct iolatency_grp * iolat,bool up)370*4882a593Smuzhiyun static void scale_change(struct iolatency_grp *iolat, bool up)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
373*4882a593Smuzhiyun 	unsigned long scale = scale_amount(qd, up);
374*4882a593Smuzhiyun 	unsigned long old = iolat->rq_depth.max_depth;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (old > qd)
377*4882a593Smuzhiyun 		old = qd;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (up) {
380*4882a593Smuzhiyun 		if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
381*4882a593Smuzhiyun 			return;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		if (old < qd) {
384*4882a593Smuzhiyun 			old += scale;
385*4882a593Smuzhiyun 			old = min(old, qd);
386*4882a593Smuzhiyun 			iolat->rq_depth.max_depth = old;
387*4882a593Smuzhiyun 			wake_up_all(&iolat->rq_wait.wait);
388*4882a593Smuzhiyun 		}
389*4882a593Smuzhiyun 	} else {
390*4882a593Smuzhiyun 		old >>= 1;
391*4882a593Smuzhiyun 		iolat->rq_depth.max_depth = max(old, 1UL);
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /* Check our parent and see if the scale cookie has changed. */
check_scale_change(struct iolatency_grp * iolat)396*4882a593Smuzhiyun static void check_scale_change(struct iolatency_grp *iolat)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct iolatency_grp *parent;
399*4882a593Smuzhiyun 	struct child_latency_info *lat_info;
400*4882a593Smuzhiyun 	unsigned int cur_cookie;
401*4882a593Smuzhiyun 	unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
402*4882a593Smuzhiyun 	u64 scale_lat;
403*4882a593Smuzhiyun 	unsigned int old;
404*4882a593Smuzhiyun 	int direction = 0;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (lat_to_blkg(iolat)->parent == NULL)
407*4882a593Smuzhiyun 		return;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
410*4882a593Smuzhiyun 	if (!parent)
411*4882a593Smuzhiyun 		return;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	lat_info = &parent->child_lat;
414*4882a593Smuzhiyun 	cur_cookie = atomic_read(&lat_info->scale_cookie);
415*4882a593Smuzhiyun 	scale_lat = READ_ONCE(lat_info->scale_lat);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (cur_cookie < our_cookie)
418*4882a593Smuzhiyun 		direction = -1;
419*4882a593Smuzhiyun 	else if (cur_cookie > our_cookie)
420*4882a593Smuzhiyun 		direction = 1;
421*4882a593Smuzhiyun 	else
422*4882a593Smuzhiyun 		return;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* Somebody beat us to the punch, just bail. */
427*4882a593Smuzhiyun 	if (old != our_cookie)
428*4882a593Smuzhiyun 		return;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (direction < 0 && iolat->min_lat_nsec) {
431*4882a593Smuzhiyun 		u64 samples_thresh;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 		if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
434*4882a593Smuzhiyun 			return;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		/*
437*4882a593Smuzhiyun 		 * Sometimes high priority groups are their own worst enemy, so
438*4882a593Smuzhiyun 		 * instead of taking it out on some poor other group that did 5%
439*4882a593Smuzhiyun 		 * or less of the IO's for the last summation just skip this
440*4882a593Smuzhiyun 		 * scale down event.
441*4882a593Smuzhiyun 		 */
442*4882a593Smuzhiyun 		samples_thresh = lat_info->nr_samples * 5;
443*4882a593Smuzhiyun 		samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
444*4882a593Smuzhiyun 		if (iolat->nr_samples <= samples_thresh)
445*4882a593Smuzhiyun 			return;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* We're as low as we can go. */
449*4882a593Smuzhiyun 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
450*4882a593Smuzhiyun 		blkcg_use_delay(lat_to_blkg(iolat));
451*4882a593Smuzhiyun 		return;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* We're back to the default cookie, unthrottle all the things. */
455*4882a593Smuzhiyun 	if (cur_cookie == DEFAULT_SCALE_COOKIE) {
456*4882a593Smuzhiyun 		blkcg_clear_delay(lat_to_blkg(iolat));
457*4882a593Smuzhiyun 		iolat->rq_depth.max_depth = UINT_MAX;
458*4882a593Smuzhiyun 		wake_up_all(&iolat->rq_wait.wait);
459*4882a593Smuzhiyun 		return;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	scale_change(iolat, direction > 0);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
blkcg_iolatency_throttle(struct rq_qos * rqos,struct bio * bio)465*4882a593Smuzhiyun static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
468*4882a593Smuzhiyun 	struct blkcg_gq *blkg = bio->bi_blkg;
469*4882a593Smuzhiyun 	bool issue_as_root = bio_issue_as_root_blkg(bio);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (!blkiolat->enabled)
472*4882a593Smuzhiyun 		return;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	while (blkg && blkg->parent) {
475*4882a593Smuzhiyun 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
476*4882a593Smuzhiyun 		if (!iolat) {
477*4882a593Smuzhiyun 			blkg = blkg->parent;
478*4882a593Smuzhiyun 			continue;
479*4882a593Smuzhiyun 		}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		check_scale_change(iolat);
482*4882a593Smuzhiyun 		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
483*4882a593Smuzhiyun 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
484*4882a593Smuzhiyun 		blkg = blkg->parent;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 	if (!timer_pending(&blkiolat->timer))
487*4882a593Smuzhiyun 		mod_timer(&blkiolat->timer, jiffies + HZ);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
iolatency_record_time(struct iolatency_grp * iolat,struct bio_issue * issue,u64 now,bool issue_as_root)490*4882a593Smuzhiyun static void iolatency_record_time(struct iolatency_grp *iolat,
491*4882a593Smuzhiyun 				  struct bio_issue *issue, u64 now,
492*4882a593Smuzhiyun 				  bool issue_as_root)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	u64 start = bio_issue_time(issue);
495*4882a593Smuzhiyun 	u64 req_time;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/*
498*4882a593Smuzhiyun 	 * Have to do this so we are truncated to the correct time that our
499*4882a593Smuzhiyun 	 * issue is truncated to.
500*4882a593Smuzhiyun 	 */
501*4882a593Smuzhiyun 	now = __bio_issue_time(now);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (now <= start)
504*4882a593Smuzhiyun 		return;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	req_time = now - start;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * We don't want to count issue_as_root bio's in the cgroups latency
510*4882a593Smuzhiyun 	 * statistics as it could skew the numbers downwards.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
513*4882a593Smuzhiyun 		u64 sub = iolat->min_lat_nsec;
514*4882a593Smuzhiyun 		if (req_time < sub)
515*4882a593Smuzhiyun 			blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
516*4882a593Smuzhiyun 		return;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	latency_stat_record_time(iolat, req_time);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
523*4882a593Smuzhiyun #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
524*4882a593Smuzhiyun 
iolatency_check_latencies(struct iolatency_grp * iolat,u64 now)525*4882a593Smuzhiyun static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
528*4882a593Smuzhiyun 	struct iolatency_grp *parent;
529*4882a593Smuzhiyun 	struct child_latency_info *lat_info;
530*4882a593Smuzhiyun 	struct latency_stat stat;
531*4882a593Smuzhiyun 	unsigned long flags;
532*4882a593Smuzhiyun 	int cpu;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	latency_stat_init(iolat, &stat);
535*4882a593Smuzhiyun 	preempt_disable();
536*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
537*4882a593Smuzhiyun 		struct latency_stat *s;
538*4882a593Smuzhiyun 		s = per_cpu_ptr(iolat->stats, cpu);
539*4882a593Smuzhiyun 		latency_stat_sum(iolat, &stat, s);
540*4882a593Smuzhiyun 		latency_stat_init(iolat, s);
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 	preempt_enable();
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	parent = blkg_to_lat(blkg->parent);
545*4882a593Smuzhiyun 	if (!parent)
546*4882a593Smuzhiyun 		return;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	lat_info = &parent->child_lat;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	iolat_update_total_lat_avg(iolat, &stat);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	/* Everything is ok and we don't need to adjust the scale. */
553*4882a593Smuzhiyun 	if (latency_sum_ok(iolat, &stat) &&
554*4882a593Smuzhiyun 	    atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
555*4882a593Smuzhiyun 		return;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* Somebody beat us to the punch, just bail. */
558*4882a593Smuzhiyun 	spin_lock_irqsave(&lat_info->lock, flags);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	latency_stat_sum(iolat, &iolat->cur_stat, &stat);
561*4882a593Smuzhiyun 	lat_info->nr_samples -= iolat->nr_samples;
562*4882a593Smuzhiyun 	lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
563*4882a593Smuzhiyun 	iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if ((lat_info->last_scale_event >= now ||
566*4882a593Smuzhiyun 	    now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
567*4882a593Smuzhiyun 		goto out;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (latency_sum_ok(iolat, &iolat->cur_stat) &&
570*4882a593Smuzhiyun 	    latency_sum_ok(iolat, &stat)) {
571*4882a593Smuzhiyun 		if (latency_stat_samples(iolat, &iolat->cur_stat) <
572*4882a593Smuzhiyun 		    BLKIOLATENCY_MIN_GOOD_SAMPLES)
573*4882a593Smuzhiyun 			goto out;
574*4882a593Smuzhiyun 		if (lat_info->scale_grp == iolat) {
575*4882a593Smuzhiyun 			lat_info->last_scale_event = now;
576*4882a593Smuzhiyun 			scale_cookie_change(iolat->blkiolat, lat_info, true);
577*4882a593Smuzhiyun 		}
578*4882a593Smuzhiyun 	} else if (lat_info->scale_lat == 0 ||
579*4882a593Smuzhiyun 		   lat_info->scale_lat >= iolat->min_lat_nsec) {
580*4882a593Smuzhiyun 		lat_info->last_scale_event = now;
581*4882a593Smuzhiyun 		if (!lat_info->scale_grp ||
582*4882a593Smuzhiyun 		    lat_info->scale_lat > iolat->min_lat_nsec) {
583*4882a593Smuzhiyun 			WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
584*4882a593Smuzhiyun 			lat_info->scale_grp = iolat;
585*4882a593Smuzhiyun 		}
586*4882a593Smuzhiyun 		scale_cookie_change(iolat->blkiolat, lat_info, false);
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 	latency_stat_init(iolat, &iolat->cur_stat);
589*4882a593Smuzhiyun out:
590*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lat_info->lock, flags);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
blkcg_iolatency_done_bio(struct rq_qos * rqos,struct bio * bio)593*4882a593Smuzhiyun static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	struct blkcg_gq *blkg;
596*4882a593Smuzhiyun 	struct rq_wait *rqw;
597*4882a593Smuzhiyun 	struct iolatency_grp *iolat;
598*4882a593Smuzhiyun 	u64 window_start;
599*4882a593Smuzhiyun 	u64 now;
600*4882a593Smuzhiyun 	bool issue_as_root = bio_issue_as_root_blkg(bio);
601*4882a593Smuzhiyun 	int inflight = 0;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	blkg = bio->bi_blkg;
604*4882a593Smuzhiyun 	if (!blkg || !bio_flagged(bio, BIO_TRACKED))
605*4882a593Smuzhiyun 		return;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	iolat = blkg_to_lat(bio->bi_blkg);
608*4882a593Smuzhiyun 	if (!iolat)
609*4882a593Smuzhiyun 		return;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (!iolat->blkiolat->enabled)
612*4882a593Smuzhiyun 		return;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	now = ktime_to_ns(ktime_get());
615*4882a593Smuzhiyun 	while (blkg && blkg->parent) {
616*4882a593Smuzhiyun 		iolat = blkg_to_lat(blkg);
617*4882a593Smuzhiyun 		if (!iolat) {
618*4882a593Smuzhiyun 			blkg = blkg->parent;
619*4882a593Smuzhiyun 			continue;
620*4882a593Smuzhiyun 		}
621*4882a593Smuzhiyun 		rqw = &iolat->rq_wait;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 		inflight = atomic_dec_return(&rqw->inflight);
624*4882a593Smuzhiyun 		WARN_ON_ONCE(inflight < 0);
625*4882a593Smuzhiyun 		/*
626*4882a593Smuzhiyun 		 * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
627*4882a593Smuzhiyun 		 * submitted, so do not account for it.
628*4882a593Smuzhiyun 		 */
629*4882a593Smuzhiyun 		if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
630*4882a593Smuzhiyun 			iolatency_record_time(iolat, &bio->bi_issue, now,
631*4882a593Smuzhiyun 					      issue_as_root);
632*4882a593Smuzhiyun 			window_start = atomic64_read(&iolat->window_start);
633*4882a593Smuzhiyun 			if (now > window_start &&
634*4882a593Smuzhiyun 			    (now - window_start) >= iolat->cur_win_nsec) {
635*4882a593Smuzhiyun 				if (atomic64_cmpxchg(&iolat->window_start,
636*4882a593Smuzhiyun 					     window_start, now) == window_start)
637*4882a593Smuzhiyun 					iolatency_check_latencies(iolat, now);
638*4882a593Smuzhiyun 			}
639*4882a593Smuzhiyun 		}
640*4882a593Smuzhiyun 		wake_up(&rqw->wait);
641*4882a593Smuzhiyun 		blkg = blkg->parent;
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
blkcg_iolatency_exit(struct rq_qos * rqos)645*4882a593Smuzhiyun static void blkcg_iolatency_exit(struct rq_qos *rqos)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	del_timer_sync(&blkiolat->timer);
650*4882a593Smuzhiyun 	flush_work(&blkiolat->enable_work);
651*4882a593Smuzhiyun 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
652*4882a593Smuzhiyun 	kfree(blkiolat);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun static struct rq_qos_ops blkcg_iolatency_ops = {
656*4882a593Smuzhiyun 	.throttle = blkcg_iolatency_throttle,
657*4882a593Smuzhiyun 	.done_bio = blkcg_iolatency_done_bio,
658*4882a593Smuzhiyun 	.exit = blkcg_iolatency_exit,
659*4882a593Smuzhiyun };
660*4882a593Smuzhiyun 
blkiolatency_timer_fn(struct timer_list * t)661*4882a593Smuzhiyun static void blkiolatency_timer_fn(struct timer_list *t)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
664*4882a593Smuzhiyun 	struct blkcg_gq *blkg;
665*4882a593Smuzhiyun 	struct cgroup_subsys_state *pos_css;
666*4882a593Smuzhiyun 	u64 now = ktime_to_ns(ktime_get());
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	rcu_read_lock();
669*4882a593Smuzhiyun 	blkg_for_each_descendant_pre(blkg, pos_css,
670*4882a593Smuzhiyun 				     blkiolat->rqos.q->root_blkg) {
671*4882a593Smuzhiyun 		struct iolatency_grp *iolat;
672*4882a593Smuzhiyun 		struct child_latency_info *lat_info;
673*4882a593Smuzhiyun 		unsigned long flags;
674*4882a593Smuzhiyun 		u64 cookie;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 		/*
677*4882a593Smuzhiyun 		 * We could be exiting, don't access the pd unless we have a
678*4882a593Smuzhiyun 		 * ref on the blkg.
679*4882a593Smuzhiyun 		 */
680*4882a593Smuzhiyun 		if (!blkg_tryget(blkg))
681*4882a593Smuzhiyun 			continue;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		iolat = blkg_to_lat(blkg);
684*4882a593Smuzhiyun 		if (!iolat)
685*4882a593Smuzhiyun 			goto next;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		lat_info = &iolat->child_lat;
688*4882a593Smuzhiyun 		cookie = atomic_read(&lat_info->scale_cookie);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 		if (cookie >= DEFAULT_SCALE_COOKIE)
691*4882a593Smuzhiyun 			goto next;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 		spin_lock_irqsave(&lat_info->lock, flags);
694*4882a593Smuzhiyun 		if (lat_info->last_scale_event >= now)
695*4882a593Smuzhiyun 			goto next_lock;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 		/*
698*4882a593Smuzhiyun 		 * We scaled down but don't have a scale_grp, scale up and carry
699*4882a593Smuzhiyun 		 * on.
700*4882a593Smuzhiyun 		 */
701*4882a593Smuzhiyun 		if (lat_info->scale_grp == NULL) {
702*4882a593Smuzhiyun 			scale_cookie_change(iolat->blkiolat, lat_info, true);
703*4882a593Smuzhiyun 			goto next_lock;
704*4882a593Smuzhiyun 		}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 		/*
707*4882a593Smuzhiyun 		 * It's been 5 seconds since our last scale event, clear the
708*4882a593Smuzhiyun 		 * scale grp in case the group that needed the scale down isn't
709*4882a593Smuzhiyun 		 * doing any IO currently.
710*4882a593Smuzhiyun 		 */
711*4882a593Smuzhiyun 		if (now - lat_info->last_scale_event >=
712*4882a593Smuzhiyun 		    ((u64)NSEC_PER_SEC * 5))
713*4882a593Smuzhiyun 			lat_info->scale_grp = NULL;
714*4882a593Smuzhiyun next_lock:
715*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lat_info->lock, flags);
716*4882a593Smuzhiyun next:
717*4882a593Smuzhiyun 		blkg_put(blkg);
718*4882a593Smuzhiyun 	}
719*4882a593Smuzhiyun 	rcu_read_unlock();
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun /**
723*4882a593Smuzhiyun  * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
724*4882a593Smuzhiyun  * @work: enable_work of the blk_iolatency of interest
725*4882a593Smuzhiyun  *
726*4882a593Smuzhiyun  * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
727*4882a593Smuzhiyun  * is relatively expensive as it involves walking up the hierarchy twice for
728*4882a593Smuzhiyun  * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
729*4882a593Smuzhiyun  * want to disable the in-flight tracking.
730*4882a593Smuzhiyun  *
731*4882a593Smuzhiyun  * We have to make sure that the counting is balanced - we don't want to leak
732*4882a593Smuzhiyun  * the in-flight counts by disabling accounting in the completion path while IOs
733*4882a593Smuzhiyun  * are in flight. This is achieved by ensuring that no IO is in flight by
734*4882a593Smuzhiyun  * freezing the queue while flipping ->enabled. As this requires a sleepable
735*4882a593Smuzhiyun  * context, ->enabled flipping is punted to this work function.
736*4882a593Smuzhiyun  */
blkiolatency_enable_work_fn(struct work_struct * work)737*4882a593Smuzhiyun static void blkiolatency_enable_work_fn(struct work_struct *work)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
740*4882a593Smuzhiyun 						      enable_work);
741*4882a593Smuzhiyun 	bool enabled;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	/*
744*4882a593Smuzhiyun 	 * There can only be one instance of this function running for @blkiolat
745*4882a593Smuzhiyun 	 * and it's guaranteed to be executed at least once after the latest
746*4882a593Smuzhiyun 	 * ->enabled_cnt modification. Acting on the latest ->enable_cnt is
747*4882a593Smuzhiyun 	 * sufficient.
748*4882a593Smuzhiyun 	 *
749*4882a593Smuzhiyun 	 * Also, we know @blkiolat is safe to access as ->enable_work is flushed
750*4882a593Smuzhiyun 	 * in blkcg_iolatency_exit().
751*4882a593Smuzhiyun 	 */
752*4882a593Smuzhiyun 	enabled = atomic_read(&blkiolat->enable_cnt);
753*4882a593Smuzhiyun 	if (enabled != blkiolat->enabled) {
754*4882a593Smuzhiyun 		blk_mq_freeze_queue(blkiolat->rqos.q);
755*4882a593Smuzhiyun 		blkiolat->enabled = enabled;
756*4882a593Smuzhiyun 		blk_mq_unfreeze_queue(blkiolat->rqos.q);
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun 
blk_iolatency_init(struct request_queue * q)760*4882a593Smuzhiyun int blk_iolatency_init(struct request_queue *q)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat;
763*4882a593Smuzhiyun 	struct rq_qos *rqos;
764*4882a593Smuzhiyun 	int ret;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
767*4882a593Smuzhiyun 	if (!blkiolat)
768*4882a593Smuzhiyun 		return -ENOMEM;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	rqos = &blkiolat->rqos;
771*4882a593Smuzhiyun 	rqos->id = RQ_QOS_LATENCY;
772*4882a593Smuzhiyun 	rqos->ops = &blkcg_iolatency_ops;
773*4882a593Smuzhiyun 	rqos->q = q;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	rq_qos_add(q, rqos);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
778*4882a593Smuzhiyun 	if (ret) {
779*4882a593Smuzhiyun 		rq_qos_del(q, rqos);
780*4882a593Smuzhiyun 		kfree(blkiolat);
781*4882a593Smuzhiyun 		return ret;
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
785*4882a593Smuzhiyun 	INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	return 0;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
iolatency_set_min_lat_nsec(struct blkcg_gq * blkg,u64 val)790*4882a593Smuzhiyun static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
793*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat = iolat->blkiolat;
794*4882a593Smuzhiyun 	u64 oldval = iolat->min_lat_nsec;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	iolat->min_lat_nsec = val;
797*4882a593Smuzhiyun 	iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
798*4882a593Smuzhiyun 	iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
799*4882a593Smuzhiyun 				    BLKIOLATENCY_MAX_WIN_SIZE);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (!oldval && val) {
802*4882a593Smuzhiyun 		if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
803*4882a593Smuzhiyun 			schedule_work(&blkiolat->enable_work);
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 	if (oldval && !val) {
806*4882a593Smuzhiyun 		blkcg_clear_delay(blkg);
807*4882a593Smuzhiyun 		if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
808*4882a593Smuzhiyun 			schedule_work(&blkiolat->enable_work);
809*4882a593Smuzhiyun 	}
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
iolatency_clear_scaling(struct blkcg_gq * blkg)812*4882a593Smuzhiyun static void iolatency_clear_scaling(struct blkcg_gq *blkg)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	if (blkg->parent) {
815*4882a593Smuzhiyun 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
816*4882a593Smuzhiyun 		struct child_latency_info *lat_info;
817*4882a593Smuzhiyun 		if (!iolat)
818*4882a593Smuzhiyun 			return;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 		lat_info = &iolat->child_lat;
821*4882a593Smuzhiyun 		spin_lock(&lat_info->lock);
822*4882a593Smuzhiyun 		atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
823*4882a593Smuzhiyun 		lat_info->last_scale_event = 0;
824*4882a593Smuzhiyun 		lat_info->scale_grp = NULL;
825*4882a593Smuzhiyun 		lat_info->scale_lat = 0;
826*4882a593Smuzhiyun 		spin_unlock(&lat_info->lock);
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun 
iolatency_set_limit(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)830*4882a593Smuzhiyun static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
831*4882a593Smuzhiyun 			     size_t nbytes, loff_t off)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
834*4882a593Smuzhiyun 	struct blkcg_gq *blkg;
835*4882a593Smuzhiyun 	struct blkg_conf_ctx ctx;
836*4882a593Smuzhiyun 	struct iolatency_grp *iolat;
837*4882a593Smuzhiyun 	char *p, *tok;
838*4882a593Smuzhiyun 	u64 lat_val = 0;
839*4882a593Smuzhiyun 	u64 oldval;
840*4882a593Smuzhiyun 	int ret;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
843*4882a593Smuzhiyun 	if (ret)
844*4882a593Smuzhiyun 		return ret;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	iolat = blkg_to_lat(ctx.blkg);
847*4882a593Smuzhiyun 	p = ctx.body;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	ret = -EINVAL;
850*4882a593Smuzhiyun 	while ((tok = strsep(&p, " "))) {
851*4882a593Smuzhiyun 		char key[16];
852*4882a593Smuzhiyun 		char val[21];	/* 18446744073709551616 */
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 		if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
855*4882a593Smuzhiyun 			goto out;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		if (!strcmp(key, "target")) {
858*4882a593Smuzhiyun 			u64 v;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 			if (!strcmp(val, "max"))
861*4882a593Smuzhiyun 				lat_val = 0;
862*4882a593Smuzhiyun 			else if (sscanf(val, "%llu", &v) == 1)
863*4882a593Smuzhiyun 				lat_val = v * NSEC_PER_USEC;
864*4882a593Smuzhiyun 			else
865*4882a593Smuzhiyun 				goto out;
866*4882a593Smuzhiyun 		} else {
867*4882a593Smuzhiyun 			goto out;
868*4882a593Smuzhiyun 		}
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	/* Walk up the tree to see if our new val is lower than it should be. */
872*4882a593Smuzhiyun 	blkg = ctx.blkg;
873*4882a593Smuzhiyun 	oldval = iolat->min_lat_nsec;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	iolatency_set_min_lat_nsec(blkg, lat_val);
876*4882a593Smuzhiyun 	if (oldval != iolat->min_lat_nsec)
877*4882a593Smuzhiyun 		iolatency_clear_scaling(blkg);
878*4882a593Smuzhiyun 	ret = 0;
879*4882a593Smuzhiyun out:
880*4882a593Smuzhiyun 	blkg_conf_finish(&ctx);
881*4882a593Smuzhiyun 	return ret ?: nbytes;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
iolatency_prfill_limit(struct seq_file * sf,struct blkg_policy_data * pd,int off)884*4882a593Smuzhiyun static u64 iolatency_prfill_limit(struct seq_file *sf,
885*4882a593Smuzhiyun 				  struct blkg_policy_data *pd, int off)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	struct iolatency_grp *iolat = pd_to_lat(pd);
888*4882a593Smuzhiyun 	const char *dname = blkg_dev_name(pd->blkg);
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	if (!dname || !iolat->min_lat_nsec)
891*4882a593Smuzhiyun 		return 0;
892*4882a593Smuzhiyun 	seq_printf(sf, "%s target=%llu\n",
893*4882a593Smuzhiyun 		   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
894*4882a593Smuzhiyun 	return 0;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
iolatency_print_limit(struct seq_file * sf,void * v)897*4882a593Smuzhiyun static int iolatency_print_limit(struct seq_file *sf, void *v)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
900*4882a593Smuzhiyun 			  iolatency_prfill_limit,
901*4882a593Smuzhiyun 			  &blkcg_policy_iolatency, seq_cft(sf)->private, false);
902*4882a593Smuzhiyun 	return 0;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun 
iolatency_ssd_stat(struct iolatency_grp * iolat,char * buf,size_t size)905*4882a593Smuzhiyun static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
906*4882a593Smuzhiyun 				 size_t size)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	struct latency_stat stat;
909*4882a593Smuzhiyun 	int cpu;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	latency_stat_init(iolat, &stat);
912*4882a593Smuzhiyun 	preempt_disable();
913*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
914*4882a593Smuzhiyun 		struct latency_stat *s;
915*4882a593Smuzhiyun 		s = per_cpu_ptr(iolat->stats, cpu);
916*4882a593Smuzhiyun 		latency_stat_sum(iolat, &stat, s);
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 	preempt_enable();
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	if (iolat->rq_depth.max_depth == UINT_MAX)
921*4882a593Smuzhiyun 		return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
922*4882a593Smuzhiyun 				 (unsigned long long)stat.ps.missed,
923*4882a593Smuzhiyun 				 (unsigned long long)stat.ps.total);
924*4882a593Smuzhiyun 	return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
925*4882a593Smuzhiyun 			 (unsigned long long)stat.ps.missed,
926*4882a593Smuzhiyun 			 (unsigned long long)stat.ps.total,
927*4882a593Smuzhiyun 			 iolat->rq_depth.max_depth);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
iolatency_pd_stat(struct blkg_policy_data * pd,char * buf,size_t size)930*4882a593Smuzhiyun static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
931*4882a593Smuzhiyun 				size_t size)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	struct iolatency_grp *iolat = pd_to_lat(pd);
934*4882a593Smuzhiyun 	unsigned long long avg_lat;
935*4882a593Smuzhiyun 	unsigned long long cur_win;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (!blkcg_debug_stats)
938*4882a593Smuzhiyun 		return 0;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	if (iolat->ssd)
941*4882a593Smuzhiyun 		return iolatency_ssd_stat(iolat, buf, size);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
944*4882a593Smuzhiyun 	cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
945*4882a593Smuzhiyun 	if (iolat->rq_depth.max_depth == UINT_MAX)
946*4882a593Smuzhiyun 		return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
947*4882a593Smuzhiyun 				 avg_lat, cur_win);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
950*4882a593Smuzhiyun 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 
iolatency_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)954*4882a593Smuzhiyun static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
955*4882a593Smuzhiyun 						   struct request_queue *q,
956*4882a593Smuzhiyun 						   struct blkcg *blkcg)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	struct iolatency_grp *iolat;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
961*4882a593Smuzhiyun 	if (!iolat)
962*4882a593Smuzhiyun 		return NULL;
963*4882a593Smuzhiyun 	iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
964*4882a593Smuzhiyun 				       __alignof__(struct latency_stat), gfp);
965*4882a593Smuzhiyun 	if (!iolat->stats) {
966*4882a593Smuzhiyun 		kfree(iolat);
967*4882a593Smuzhiyun 		return NULL;
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 	return &iolat->pd;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun 
iolatency_pd_init(struct blkg_policy_data * pd)972*4882a593Smuzhiyun static void iolatency_pd_init(struct blkg_policy_data *pd)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun 	struct iolatency_grp *iolat = pd_to_lat(pd);
975*4882a593Smuzhiyun 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
976*4882a593Smuzhiyun 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
977*4882a593Smuzhiyun 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
978*4882a593Smuzhiyun 	u64 now = ktime_to_ns(ktime_get());
979*4882a593Smuzhiyun 	int cpu;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	if (blk_queue_nonrot(blkg->q))
982*4882a593Smuzhiyun 		iolat->ssd = true;
983*4882a593Smuzhiyun 	else
984*4882a593Smuzhiyun 		iolat->ssd = false;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
987*4882a593Smuzhiyun 		struct latency_stat *stat;
988*4882a593Smuzhiyun 		stat = per_cpu_ptr(iolat->stats, cpu);
989*4882a593Smuzhiyun 		latency_stat_init(iolat, stat);
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	latency_stat_init(iolat, &iolat->cur_stat);
993*4882a593Smuzhiyun 	rq_wait_init(&iolat->rq_wait);
994*4882a593Smuzhiyun 	spin_lock_init(&iolat->child_lat.lock);
995*4882a593Smuzhiyun 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
996*4882a593Smuzhiyun 	iolat->rq_depth.max_depth = UINT_MAX;
997*4882a593Smuzhiyun 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
998*4882a593Smuzhiyun 	iolat->blkiolat = blkiolat;
999*4882a593Smuzhiyun 	iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
1000*4882a593Smuzhiyun 	atomic64_set(&iolat->window_start, now);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/*
1003*4882a593Smuzhiyun 	 * We init things in list order, so the pd for the parent may not be
1004*4882a593Smuzhiyun 	 * init'ed yet for whatever reason.
1005*4882a593Smuzhiyun 	 */
1006*4882a593Smuzhiyun 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
1007*4882a593Smuzhiyun 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
1008*4882a593Smuzhiyun 		atomic_set(&iolat->scale_cookie,
1009*4882a593Smuzhiyun 			   atomic_read(&parent->child_lat.scale_cookie));
1010*4882a593Smuzhiyun 	} else {
1011*4882a593Smuzhiyun 		atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1012*4882a593Smuzhiyun 	}
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
iolatency_pd_offline(struct blkg_policy_data * pd)1017*4882a593Smuzhiyun static void iolatency_pd_offline(struct blkg_policy_data *pd)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun 	struct iolatency_grp *iolat = pd_to_lat(pd);
1020*4882a593Smuzhiyun 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	iolatency_set_min_lat_nsec(blkg, 0);
1023*4882a593Smuzhiyun 	iolatency_clear_scaling(blkg);
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun 
iolatency_pd_free(struct blkg_policy_data * pd)1026*4882a593Smuzhiyun static void iolatency_pd_free(struct blkg_policy_data *pd)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	struct iolatency_grp *iolat = pd_to_lat(pd);
1029*4882a593Smuzhiyun 	free_percpu(iolat->stats);
1030*4882a593Smuzhiyun 	kfree(iolat);
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun static struct cftype iolatency_files[] = {
1034*4882a593Smuzhiyun 	{
1035*4882a593Smuzhiyun 		.name = "latency",
1036*4882a593Smuzhiyun 		.flags = CFTYPE_NOT_ON_ROOT,
1037*4882a593Smuzhiyun 		.seq_show = iolatency_print_limit,
1038*4882a593Smuzhiyun 		.write = iolatency_set_limit,
1039*4882a593Smuzhiyun 	},
1040*4882a593Smuzhiyun 	{}
1041*4882a593Smuzhiyun };
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun static struct blkcg_policy blkcg_policy_iolatency = {
1044*4882a593Smuzhiyun 	.dfl_cftypes	= iolatency_files,
1045*4882a593Smuzhiyun 	.pd_alloc_fn	= iolatency_pd_alloc,
1046*4882a593Smuzhiyun 	.pd_init_fn	= iolatency_pd_init,
1047*4882a593Smuzhiyun 	.pd_offline_fn	= iolatency_pd_offline,
1048*4882a593Smuzhiyun 	.pd_free_fn	= iolatency_pd_free,
1049*4882a593Smuzhiyun 	.pd_stat_fn	= iolatency_pd_stat,
1050*4882a593Smuzhiyun };
1051*4882a593Smuzhiyun 
iolatency_init(void)1052*4882a593Smuzhiyun static int __init iolatency_init(void)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	return blkcg_policy_register(&blkcg_policy_iolatency);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
iolatency_exit(void)1057*4882a593Smuzhiyun static void __exit iolatency_exit(void)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	blkcg_policy_unregister(&blkcg_policy_iolatency);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun module_init(iolatency_init);
1063*4882a593Smuzhiyun module_exit(iolatency_exit);
1064