xref: /OK3568_Linux_fs/kernel/block/blk-rq-qos.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef RQ_QOS_H
3*4882a593Smuzhiyun #define RQ_QOS_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/blkdev.h>
7*4882a593Smuzhiyun #include <linux/blk_types.h>
8*4882a593Smuzhiyun #include <linux/atomic.h>
9*4882a593Smuzhiyun #include <linux/wait.h>
10*4882a593Smuzhiyun #include <linux/blk-mq.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "blk-mq-debugfs.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun struct blk_mq_debugfs_attr;
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun enum rq_qos_id {
17*4882a593Smuzhiyun 	RQ_QOS_WBT,
18*4882a593Smuzhiyun 	RQ_QOS_LATENCY,
19*4882a593Smuzhiyun 	RQ_QOS_COST,
20*4882a593Smuzhiyun 	RQ_QOS_IOPRIO,
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct rq_wait {
24*4882a593Smuzhiyun 	wait_queue_head_t wait;
25*4882a593Smuzhiyun 	atomic_t inflight;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun struct rq_qos {
29*4882a593Smuzhiyun 	struct rq_qos_ops *ops;
30*4882a593Smuzhiyun 	struct request_queue *q;
31*4882a593Smuzhiyun 	enum rq_qos_id id;
32*4882a593Smuzhiyun 	struct rq_qos *next;
33*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
34*4882a593Smuzhiyun 	struct dentry *debugfs_dir;
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct rq_qos_ops {
39*4882a593Smuzhiyun 	void (*throttle)(struct rq_qos *, struct bio *);
40*4882a593Smuzhiyun 	void (*track)(struct rq_qos *, struct request *, struct bio *);
41*4882a593Smuzhiyun 	void (*merge)(struct rq_qos *, struct request *, struct bio *);
42*4882a593Smuzhiyun 	void (*issue)(struct rq_qos *, struct request *);
43*4882a593Smuzhiyun 	void (*requeue)(struct rq_qos *, struct request *);
44*4882a593Smuzhiyun 	void (*done)(struct rq_qos *, struct request *);
45*4882a593Smuzhiyun 	void (*done_bio)(struct rq_qos *, struct bio *);
46*4882a593Smuzhiyun 	void (*cleanup)(struct rq_qos *, struct bio *);
47*4882a593Smuzhiyun 	void (*queue_depth_changed)(struct rq_qos *);
48*4882a593Smuzhiyun 	void (*exit)(struct rq_qos *);
49*4882a593Smuzhiyun 	const struct blk_mq_debugfs_attr *debugfs_attrs;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct rq_depth {
53*4882a593Smuzhiyun 	unsigned int max_depth;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	int scale_step;
56*4882a593Smuzhiyun 	bool scaled_max;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	unsigned int queue_depth;
59*4882a593Smuzhiyun 	unsigned int default_depth;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
rq_qos_id(struct request_queue * q,enum rq_qos_id id)62*4882a593Smuzhiyun static inline struct rq_qos *rq_qos_id(struct request_queue *q,
63*4882a593Smuzhiyun 				       enum rq_qos_id id)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct rq_qos *rqos;
66*4882a593Smuzhiyun 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
67*4882a593Smuzhiyun 		if (rqos->id == id)
68*4882a593Smuzhiyun 			break;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	return rqos;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
wbt_rq_qos(struct request_queue * q)73*4882a593Smuzhiyun static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	return rq_qos_id(q, RQ_QOS_WBT);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
blkcg_rq_qos(struct request_queue * q)78*4882a593Smuzhiyun static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	return rq_qos_id(q, RQ_QOS_LATENCY);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
rq_wait_init(struct rq_wait * rq_wait)83*4882a593Smuzhiyun static inline void rq_wait_init(struct rq_wait *rq_wait)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	atomic_set(&rq_wait->inflight, 0);
86*4882a593Smuzhiyun 	init_waitqueue_head(&rq_wait->wait);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
rq_qos_add(struct request_queue * q,struct rq_qos * rqos)89*4882a593Smuzhiyun static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	/*
92*4882a593Smuzhiyun 	 * No IO can be in-flight when adding rqos, so freeze queue, which
93*4882a593Smuzhiyun 	 * is fine since we only support rq_qos for blk-mq queue.
94*4882a593Smuzhiyun 	 *
95*4882a593Smuzhiyun 	 * Reuse ->queue_lock for protecting against other concurrent
96*4882a593Smuzhiyun 	 * rq_qos adding/deleting
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	blk_mq_freeze_queue(q);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	spin_lock_irq(&q->queue_lock);
101*4882a593Smuzhiyun 	rqos->next = q->rq_qos;
102*4882a593Smuzhiyun 	q->rq_qos = rqos;
103*4882a593Smuzhiyun 	spin_unlock_irq(&q->queue_lock);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	blk_mq_unfreeze_queue(q);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (rqos->ops->debugfs_attrs)
108*4882a593Smuzhiyun 		blk_mq_debugfs_register_rqos(rqos);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
rq_qos_del(struct request_queue * q,struct rq_qos * rqos)111*4882a593Smuzhiyun static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct rq_qos **cur;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * See comment in rq_qos_add() about freezing queue & using
117*4882a593Smuzhiyun 	 * ->queue_lock.
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	blk_mq_freeze_queue(q);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	spin_lock_irq(&q->queue_lock);
122*4882a593Smuzhiyun 	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
123*4882a593Smuzhiyun 		if (*cur == rqos) {
124*4882a593Smuzhiyun 			*cur = rqos->next;
125*4882a593Smuzhiyun 			break;
126*4882a593Smuzhiyun 		}
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 	spin_unlock_irq(&q->queue_lock);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	blk_mq_unfreeze_queue(q);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	blk_mq_debugfs_unregister_rqos(rqos);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
136*4882a593Smuzhiyun typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun void rq_qos_wait(struct rq_wait *rqw, void *private_data,
139*4882a593Smuzhiyun 		 acquire_inflight_cb_t *acquire_inflight_cb,
140*4882a593Smuzhiyun 		 cleanup_cb_t *cleanup_cb);
141*4882a593Smuzhiyun bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
142*4882a593Smuzhiyun bool rq_depth_scale_up(struct rq_depth *rqd);
143*4882a593Smuzhiyun bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
144*4882a593Smuzhiyun bool rq_depth_calc_max_depth(struct rq_depth *rqd);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
147*4882a593Smuzhiyun void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
148*4882a593Smuzhiyun void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
149*4882a593Smuzhiyun void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
150*4882a593Smuzhiyun void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
151*4882a593Smuzhiyun void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
152*4882a593Smuzhiyun void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
153*4882a593Smuzhiyun void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
154*4882a593Smuzhiyun void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
155*4882a593Smuzhiyun 
rq_qos_cleanup(struct request_queue * q,struct bio * bio)156*4882a593Smuzhiyun static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	if (q->rq_qos)
159*4882a593Smuzhiyun 		__rq_qos_cleanup(q->rq_qos, bio);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
rq_qos_done(struct request_queue * q,struct request * rq)162*4882a593Smuzhiyun static inline void rq_qos_done(struct request_queue *q, struct request *rq)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	if (q->rq_qos)
165*4882a593Smuzhiyun 		__rq_qos_done(q->rq_qos, rq);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
rq_qos_issue(struct request_queue * q,struct request * rq)168*4882a593Smuzhiyun static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (q->rq_qos)
171*4882a593Smuzhiyun 		__rq_qos_issue(q->rq_qos, rq);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
rq_qos_requeue(struct request_queue * q,struct request * rq)174*4882a593Smuzhiyun static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	if (q->rq_qos)
177*4882a593Smuzhiyun 		__rq_qos_requeue(q->rq_qos, rq);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
rq_qos_done_bio(struct request_queue * q,struct bio * bio)180*4882a593Smuzhiyun static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	if (q->rq_qos)
183*4882a593Smuzhiyun 		__rq_qos_done_bio(q->rq_qos, bio);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
rq_qos_throttle(struct request_queue * q,struct bio * bio)186*4882a593Smuzhiyun static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	/*
189*4882a593Smuzhiyun 	 * BIO_TRACKED lets controllers know that a bio went through the
190*4882a593Smuzhiyun 	 * normal rq_qos path.
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	bio_set_flag(bio, BIO_TRACKED);
193*4882a593Smuzhiyun 	if (q->rq_qos)
194*4882a593Smuzhiyun 		__rq_qos_throttle(q->rq_qos, bio);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
rq_qos_track(struct request_queue * q,struct request * rq,struct bio * bio)197*4882a593Smuzhiyun static inline void rq_qos_track(struct request_queue *q, struct request *rq,
198*4882a593Smuzhiyun 				struct bio *bio)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	if (q->rq_qos)
201*4882a593Smuzhiyun 		__rq_qos_track(q->rq_qos, rq, bio);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
rq_qos_merge(struct request_queue * q,struct request * rq,struct bio * bio)204*4882a593Smuzhiyun static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
205*4882a593Smuzhiyun 				struct bio *bio)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	if (q->rq_qos)
208*4882a593Smuzhiyun 		__rq_qos_merge(q->rq_qos, rq, bio);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
rq_qos_queue_depth_changed(struct request_queue * q)211*4882a593Smuzhiyun static inline void rq_qos_queue_depth_changed(struct request_queue *q)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	if (q->rq_qos)
214*4882a593Smuzhiyun 		__rq_qos_queue_depth_changed(q->rq_qos);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun void rq_qos_exit(struct request_queue *);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun #endif
220