xref: /OK3568_Linux_fs/kernel/block/mq-deadline-main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4*4882a593Smuzhiyun  *  for the blk-mq scheduling framework
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/blk-mq.h>
12*4882a593Smuzhiyun #include <linux/elevator.h>
13*4882a593Smuzhiyun #include <linux/bio.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/compiler.h>
18*4882a593Smuzhiyun #include <linux/rbtree.h>
19*4882a593Smuzhiyun #include <linux/sbitmap.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "blk.h"
22*4882a593Smuzhiyun #include "blk-mq.h"
23*4882a593Smuzhiyun #include "blk-mq-debugfs.h"
24*4882a593Smuzhiyun #include "blk-mq-tag.h"
25*4882a593Smuzhiyun #include "blk-mq-sched.h"
26*4882a593Smuzhiyun #include "mq-deadline-cgroup.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * See Documentation/block/deadline-iosched.rst
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
32*4882a593Smuzhiyun static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Time after which to dispatch lower priority requests even if higher
35*4882a593Smuzhiyun  * priority requests are pending.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun static const int aging_expire = 10 * HZ;
38*4882a593Smuzhiyun static const int writes_starved = 2;    /* max times reads can starve a write */
39*4882a593Smuzhiyun static const int fifo_batch = 16;       /* # of sequential requests treated as one
40*4882a593Smuzhiyun 				     by the above parameters. For throughput. */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun enum dd_data_dir {
43*4882a593Smuzhiyun 	DD_READ		= READ,
44*4882a593Smuzhiyun 	DD_WRITE	= WRITE,
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun enum { DD_DIR_COUNT = 2 };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun enum dd_prio {
50*4882a593Smuzhiyun 	DD_RT_PRIO	= 0,
51*4882a593Smuzhiyun 	DD_BE_PRIO	= 1,
52*4882a593Smuzhiyun 	DD_IDLE_PRIO	= 2,
53*4882a593Smuzhiyun 	DD_PRIO_MAX	= 2,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun enum { DD_PRIO_COUNT = 3 };
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* I/O statistics for all I/O priorities (enum dd_prio). */
59*4882a593Smuzhiyun struct io_stats {
60*4882a593Smuzhiyun 	struct io_stats_per_prio stats[DD_PRIO_COUNT];
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
65*4882a593Smuzhiyun  * present on both sort_list[] and fifo_list[].
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun struct dd_per_prio {
68*4882a593Smuzhiyun 	struct list_head dispatch;
69*4882a593Smuzhiyun 	struct rb_root sort_list[DD_DIR_COUNT];
70*4882a593Smuzhiyun 	struct list_head fifo_list[DD_DIR_COUNT];
71*4882a593Smuzhiyun 	/* Next request in FIFO order. Read, write or both are NULL. */
72*4882a593Smuzhiyun 	struct request *next_rq[DD_DIR_COUNT];
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct deadline_data {
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * run time data
78*4882a593Smuzhiyun 	 */
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* Request queue that owns this data structure. */
81*4882a593Smuzhiyun 	struct request_queue *queue;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* Data direction of latest dispatched request. */
86*4882a593Smuzhiyun 	enum dd_data_dir last_dir;
87*4882a593Smuzhiyun 	unsigned int batching;		/* number of sequential requests made */
88*4882a593Smuzhiyun 	unsigned int starved;		/* times reads have starved writes */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	struct io_stats __percpu *stats;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * settings that change how the i/o scheduler behaves
94*4882a593Smuzhiyun 	 */
95*4882a593Smuzhiyun 	int fifo_expire[DD_DIR_COUNT];
96*4882a593Smuzhiyun 	int fifo_batch;
97*4882a593Smuzhiyun 	int writes_starved;
98*4882a593Smuzhiyun 	int front_merges;
99*4882a593Smuzhiyun 	u32 async_depth;
100*4882a593Smuzhiyun 	int aging_expire;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	spinlock_t lock;
103*4882a593Smuzhiyun 	spinlock_t zone_lock;
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /* Count one event of type 'event_type' and with I/O priority 'prio' */
107*4882a593Smuzhiyun #define dd_count(dd, event_type, prio) do {				\
108*4882a593Smuzhiyun 	struct io_stats *io_stats = get_cpu_ptr((dd)->stats);		\
109*4882a593Smuzhiyun 									\
110*4882a593Smuzhiyun 	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
111*4882a593Smuzhiyun 	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
112*4882a593Smuzhiyun 	local_inc(&io_stats->stats[(prio)].event_type);			\
113*4882a593Smuzhiyun 	put_cpu_ptr(io_stats);						\
114*4882a593Smuzhiyun } while (0)
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun  * Returns the total number of dd_count(dd, event_type, prio) calls across all
118*4882a593Smuzhiyun  * CPUs. No locking or barriers since it is fine if the returned sum is slightly
119*4882a593Smuzhiyun  * outdated.
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun #define dd_sum(dd, event_type, prio) ({					\
122*4882a593Smuzhiyun 	unsigned int cpu;						\
123*4882a593Smuzhiyun 	u32 sum = 0;							\
124*4882a593Smuzhiyun 									\
125*4882a593Smuzhiyun 	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
126*4882a593Smuzhiyun 	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
127*4882a593Smuzhiyun 	for_each_present_cpu(cpu)					\
128*4882a593Smuzhiyun 		sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->	\
129*4882a593Smuzhiyun 				  stats[(prio)].event_type);		\
130*4882a593Smuzhiyun 	sum;								\
131*4882a593Smuzhiyun })
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Maps an I/O priority class to a deadline scheduler priority. */
134*4882a593Smuzhiyun static const enum dd_prio ioprio_class_to_prio[] = {
135*4882a593Smuzhiyun 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
136*4882a593Smuzhiyun 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
137*4882a593Smuzhiyun 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
138*4882a593Smuzhiyun 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static inline struct rb_root *
deadline_rb_root(struct dd_per_prio * per_prio,struct request * rq)142*4882a593Smuzhiyun deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	return &per_prio->sort_list[rq_data_dir(rq)];
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
149*4882a593Smuzhiyun  * request.
150*4882a593Smuzhiyun  */
dd_rq_ioclass(struct request * rq)151*4882a593Smuzhiyun static u8 dd_rq_ioclass(struct request *rq)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * get the request after `rq' in sector-sorted order
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun static inline struct request *
deadline_latter_request(struct request * rq)160*4882a593Smuzhiyun deadline_latter_request(struct request *rq)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct rb_node *node = rb_next(&rq->rb_node);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (node)
165*4882a593Smuzhiyun 		return rb_entry_rq(node);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	return NULL;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static void
deadline_add_rq_rb(struct dd_per_prio * per_prio,struct request * rq)171*4882a593Smuzhiyun deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct rb_root *root = deadline_rb_root(per_prio, rq);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	elv_rb_add(root, rq);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun static inline void
deadline_del_rq_rb(struct dd_per_prio * per_prio,struct request * rq)179*4882a593Smuzhiyun deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	const enum dd_data_dir data_dir = rq_data_dir(rq);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (per_prio->next_rq[data_dir] == rq)
184*4882a593Smuzhiyun 		per_prio->next_rq[data_dir] = deadline_latter_request(rq);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * remove rq from rbtree and fifo.
191*4882a593Smuzhiyun  */
deadline_remove_request(struct request_queue * q,struct dd_per_prio * per_prio,struct request * rq)192*4882a593Smuzhiyun static void deadline_remove_request(struct request_queue *q,
193*4882a593Smuzhiyun 				    struct dd_per_prio *per_prio,
194*4882a593Smuzhiyun 				    struct request *rq)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	list_del_init(&rq->queuelist);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/*
199*4882a593Smuzhiyun 	 * We might not be on the rbtree, if we are doing an insert merge
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	if (!RB_EMPTY_NODE(&rq->rb_node))
202*4882a593Smuzhiyun 		deadline_del_rq_rb(per_prio, rq);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	elv_rqhash_del(q, rq);
205*4882a593Smuzhiyun 	if (q->last_merge == rq)
206*4882a593Smuzhiyun 		q->last_merge = NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
dd_request_merged(struct request_queue * q,struct request * req,enum elv_merge type)209*4882a593Smuzhiyun static void dd_request_merged(struct request_queue *q, struct request *req,
210*4882a593Smuzhiyun 			      enum elv_merge type)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
213*4882a593Smuzhiyun 	const u8 ioprio_class = dd_rq_ioclass(req);
214*4882a593Smuzhiyun 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
215*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/*
218*4882a593Smuzhiyun 	 * if the merge was a front merge, we need to reposition request
219*4882a593Smuzhiyun 	 */
220*4882a593Smuzhiyun 	if (type == ELEVATOR_FRONT_MERGE) {
221*4882a593Smuzhiyun 		elv_rb_del(deadline_rb_root(per_prio, req), req);
222*4882a593Smuzhiyun 		deadline_add_rq_rb(per_prio, req);
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun  * Callback function that is invoked after @next has been merged into @req.
228*4882a593Smuzhiyun  */
dd_merged_requests(struct request_queue * q,struct request * req,struct request * next)229*4882a593Smuzhiyun static void dd_merged_requests(struct request_queue *q, struct request *req,
230*4882a593Smuzhiyun 			       struct request *next)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
233*4882a593Smuzhiyun 	const u8 ioprio_class = dd_rq_ioclass(next);
234*4882a593Smuzhiyun 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
235*4882a593Smuzhiyun 	struct dd_blkcg *blkcg = next->elv.priv[0];
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	dd_count(dd, merged, prio);
238*4882a593Smuzhiyun 	ddcg_count(blkcg, merged, ioprio_class);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/*
241*4882a593Smuzhiyun 	 * if next expires before rq, assign its expire time to rq
242*4882a593Smuzhiyun 	 * and move into next position (next will be deleted) in fifo
243*4882a593Smuzhiyun 	 */
244*4882a593Smuzhiyun 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
245*4882a593Smuzhiyun 		if (time_before((unsigned long)next->fifo_time,
246*4882a593Smuzhiyun 				(unsigned long)req->fifo_time)) {
247*4882a593Smuzhiyun 			list_move(&req->queuelist, &next->queuelist);
248*4882a593Smuzhiyun 			req->fifo_time = next->fifo_time;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/*
253*4882a593Smuzhiyun 	 * kill knowledge of next, this one is a goner
254*4882a593Smuzhiyun 	 */
255*4882a593Smuzhiyun 	deadline_remove_request(q, &dd->per_prio[prio], next);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun  * move an entry to dispatch queue
260*4882a593Smuzhiyun  */
261*4882a593Smuzhiyun static void
deadline_move_request(struct deadline_data * dd,struct dd_per_prio * per_prio,struct request * rq)262*4882a593Smuzhiyun deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
263*4882a593Smuzhiyun 		      struct request *rq)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	const enum dd_data_dir data_dir = rq_data_dir(rq);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	per_prio->next_rq[data_dir] = deadline_latter_request(rq);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * take it off the sort and fifo list
271*4882a593Smuzhiyun 	 */
272*4882a593Smuzhiyun 	deadline_remove_request(rq->q, per_prio, rq);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /* Number of requests queued for a given priority level. */
dd_queued(struct deadline_data * dd,enum dd_prio prio)276*4882a593Smuzhiyun static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
283*4882a593Smuzhiyun  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
284*4882a593Smuzhiyun  */
deadline_check_fifo(struct dd_per_prio * per_prio,enum dd_data_dir data_dir)285*4882a593Smuzhiyun static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
286*4882a593Smuzhiyun 				      enum dd_data_dir data_dir)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	/*
291*4882a593Smuzhiyun 	 * rq is expired!
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
294*4882a593Smuzhiyun 		return 1;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun  * For the specified data direction, return the next request to
301*4882a593Smuzhiyun  * dispatch using arrival ordered lists.
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun static struct request *
deadline_fifo_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)304*4882a593Smuzhiyun deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
305*4882a593Smuzhiyun 		      enum dd_data_dir data_dir)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct request *rq;
308*4882a593Smuzhiyun 	unsigned long flags;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (list_empty(&per_prio->fifo_list[data_dir]))
311*4882a593Smuzhiyun 		return NULL;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
314*4882a593Smuzhiyun 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
315*4882a593Smuzhiyun 		return rq;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * Look for a write request that can be dispatched, that is one with
319*4882a593Smuzhiyun 	 * an unlocked target zone.
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->zone_lock, flags);
322*4882a593Smuzhiyun 	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
323*4882a593Smuzhiyun 		if (blk_req_can_dispatch_to_zone(rq))
324*4882a593Smuzhiyun 			goto out;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 	rq = NULL;
327*4882a593Smuzhiyun out:
328*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->zone_lock, flags);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	return rq;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun  * For the specified data direction, return the next request to
335*4882a593Smuzhiyun  * dispatch using sector position sorted lists.
336*4882a593Smuzhiyun  */
337*4882a593Smuzhiyun static struct request *
deadline_next_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)338*4882a593Smuzhiyun deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
339*4882a593Smuzhiyun 		      enum dd_data_dir data_dir)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	struct request *rq;
342*4882a593Smuzhiyun 	unsigned long flags;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	rq = per_prio->next_rq[data_dir];
345*4882a593Smuzhiyun 	if (!rq)
346*4882a593Smuzhiyun 		return NULL;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
349*4882a593Smuzhiyun 		return rq;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/*
352*4882a593Smuzhiyun 	 * Look for a write request that can be dispatched, that is one with
353*4882a593Smuzhiyun 	 * an unlocked target zone.
354*4882a593Smuzhiyun 	 */
355*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->zone_lock, flags);
356*4882a593Smuzhiyun 	while (rq) {
357*4882a593Smuzhiyun 		if (blk_req_can_dispatch_to_zone(rq))
358*4882a593Smuzhiyun 			break;
359*4882a593Smuzhiyun 		rq = deadline_latter_request(rq);
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->zone_lock, flags);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return rq;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun  * deadline_dispatch_requests selects the best request according to
368*4882a593Smuzhiyun  * read/write expire, fifo_batch, etc and with a start time <= @latest.
369*4882a593Smuzhiyun  */
__dd_dispatch_request(struct deadline_data * dd,struct dd_per_prio * per_prio,u64 latest_start_ns)370*4882a593Smuzhiyun static struct request *__dd_dispatch_request(struct deadline_data *dd,
371*4882a593Smuzhiyun 					     struct dd_per_prio *per_prio,
372*4882a593Smuzhiyun 					     u64 latest_start_ns)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	struct request *rq, *next_rq;
375*4882a593Smuzhiyun 	enum dd_data_dir data_dir;
376*4882a593Smuzhiyun 	struct dd_blkcg *blkcg;
377*4882a593Smuzhiyun 	enum dd_prio prio;
378*4882a593Smuzhiyun 	u8 ioprio_class;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	lockdep_assert_held(&dd->lock);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (!list_empty(&per_prio->dispatch)) {
383*4882a593Smuzhiyun 		rq = list_first_entry(&per_prio->dispatch, struct request,
384*4882a593Smuzhiyun 				      queuelist);
385*4882a593Smuzhiyun 		if (rq->start_time_ns > latest_start_ns)
386*4882a593Smuzhiyun 			return NULL;
387*4882a593Smuzhiyun 		list_del_init(&rq->queuelist);
388*4882a593Smuzhiyun 		goto done;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/*
392*4882a593Smuzhiyun 	 * batches are currently reads XOR writes
393*4882a593Smuzhiyun 	 */
394*4882a593Smuzhiyun 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
395*4882a593Smuzhiyun 	if (rq && dd->batching < dd->fifo_batch)
396*4882a593Smuzhiyun 		/* we have a next request are still entitled to batch */
397*4882a593Smuzhiyun 		goto dispatch_request;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/*
400*4882a593Smuzhiyun 	 * at this point we are not running a batch. select the appropriate
401*4882a593Smuzhiyun 	 * data direction (read / write)
402*4882a593Smuzhiyun 	 */
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
405*4882a593Smuzhiyun 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
408*4882a593Smuzhiyun 		    (dd->starved++ >= dd->writes_starved))
409*4882a593Smuzhiyun 			goto dispatch_writes;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		data_dir = DD_READ;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		goto dispatch_find_request;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/*
417*4882a593Smuzhiyun 	 * there are either no reads or writes have been starved
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
421*4882a593Smuzhiyun dispatch_writes:
422*4882a593Smuzhiyun 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		dd->starved = 0;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		data_dir = DD_WRITE;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		goto dispatch_find_request;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return NULL;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun dispatch_find_request:
434*4882a593Smuzhiyun 	/*
435*4882a593Smuzhiyun 	 * we are not running a batch, find best request for selected data_dir
436*4882a593Smuzhiyun 	 */
437*4882a593Smuzhiyun 	next_rq = deadline_next_request(dd, per_prio, data_dir);
438*4882a593Smuzhiyun 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
439*4882a593Smuzhiyun 		/*
440*4882a593Smuzhiyun 		 * A deadline has expired, the last request was in the other
441*4882a593Smuzhiyun 		 * direction, or we have run out of higher-sectored requests.
442*4882a593Smuzhiyun 		 * Start again from the request with the earliest expiry time.
443*4882a593Smuzhiyun 		 */
444*4882a593Smuzhiyun 		rq = deadline_fifo_request(dd, per_prio, data_dir);
445*4882a593Smuzhiyun 	} else {
446*4882a593Smuzhiyun 		/*
447*4882a593Smuzhiyun 		 * The last req was the same dir and we have a next request in
448*4882a593Smuzhiyun 		 * sort order. No expired requests so continue on from here.
449*4882a593Smuzhiyun 		 */
450*4882a593Smuzhiyun 		rq = next_rq;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/*
454*4882a593Smuzhiyun 	 * For a zoned block device, if we only have writes queued and none of
455*4882a593Smuzhiyun 	 * them can be dispatched, rq will be NULL.
456*4882a593Smuzhiyun 	 */
457*4882a593Smuzhiyun 	if (!rq)
458*4882a593Smuzhiyun 		return NULL;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	dd->last_dir = data_dir;
461*4882a593Smuzhiyun 	dd->batching = 0;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun dispatch_request:
464*4882a593Smuzhiyun 	if (rq->start_time_ns > latest_start_ns)
465*4882a593Smuzhiyun 		return NULL;
466*4882a593Smuzhiyun 	/*
467*4882a593Smuzhiyun 	 * rq is the selected appropriate request.
468*4882a593Smuzhiyun 	 */
469*4882a593Smuzhiyun 	dd->batching++;
470*4882a593Smuzhiyun 	deadline_move_request(dd, per_prio, rq);
471*4882a593Smuzhiyun done:
472*4882a593Smuzhiyun 	ioprio_class = dd_rq_ioclass(rq);
473*4882a593Smuzhiyun 	prio = ioprio_class_to_prio[ioprio_class];
474*4882a593Smuzhiyun 	dd_count(dd, dispatched, prio);
475*4882a593Smuzhiyun 	blkcg = rq->elv.priv[0];
476*4882a593Smuzhiyun 	ddcg_count(blkcg, dispatched, ioprio_class);
477*4882a593Smuzhiyun 	/*
478*4882a593Smuzhiyun 	 * If the request needs its target zone locked, do it.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	blk_req_zone_write_lock(rq);
481*4882a593Smuzhiyun 	rq->rq_flags |= RQF_STARTED;
482*4882a593Smuzhiyun 	return rq;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
487*4882a593Smuzhiyun  *
488*4882a593Smuzhiyun  * One confusing aspect here is that we get called for a specific
489*4882a593Smuzhiyun  * hardware queue, but we may return a request that is for a
490*4882a593Smuzhiyun  * different hardware queue. This is because mq-deadline has shared
491*4882a593Smuzhiyun  * state for all hardware queues, in terms of sorting, FIFOs, etc.
492*4882a593Smuzhiyun  */
dd_dispatch_request(struct blk_mq_hw_ctx * hctx)493*4882a593Smuzhiyun static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
496*4882a593Smuzhiyun 	const u64 now_ns = ktime_get_ns();
497*4882a593Smuzhiyun 	struct request *rq = NULL;
498*4882a593Smuzhiyun 	enum dd_prio prio;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	spin_lock(&dd->lock);
501*4882a593Smuzhiyun 	/*
502*4882a593Smuzhiyun 	 * Start with dispatching requests whose deadline expired more than
503*4882a593Smuzhiyun 	 * aging_expire jiffies ago.
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
506*4882a593Smuzhiyun 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
507*4882a593Smuzhiyun 					   jiffies_to_nsecs(dd->aging_expire));
508*4882a593Smuzhiyun 		if (rq)
509*4882a593Smuzhiyun 			goto unlock;
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 	/*
512*4882a593Smuzhiyun 	 * Next, dispatch requests in priority order. Ignore lower priority
513*4882a593Smuzhiyun 	 * requests if any higher priority requests are pending.
514*4882a593Smuzhiyun 	 */
515*4882a593Smuzhiyun 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
516*4882a593Smuzhiyun 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
517*4882a593Smuzhiyun 		if (rq || dd_queued(dd, prio))
518*4882a593Smuzhiyun 			break;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun unlock:
522*4882a593Smuzhiyun 	spin_unlock(&dd->lock);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	return rq;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
529*4882a593Smuzhiyun  * function is used by __blk_mq_get_tag().
530*4882a593Smuzhiyun  */
dd_limit_depth(unsigned int op,struct blk_mq_alloc_data * data)531*4882a593Smuzhiyun static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct deadline_data *dd = data->q->elevator->elevator_data;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* Do not throttle synchronous reads. */
536*4882a593Smuzhiyun 	if (op_is_sync(op) && !op_is_write(op))
537*4882a593Smuzhiyun 		return;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	/*
540*4882a593Smuzhiyun 	 * Throttle asynchronous requests and writes such that these requests
541*4882a593Smuzhiyun 	 * do not block the allocation of synchronous requests.
542*4882a593Smuzhiyun 	 */
543*4882a593Smuzhiyun 	data->shallow_depth = dd->async_depth;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun /* Called by blk_mq_update_nr_requests(). */
dd_depth_updated(struct blk_mq_hw_ctx * hctx)547*4882a593Smuzhiyun static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
550*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
551*4882a593Smuzhiyun 	struct blk_mq_tags *tags = hctx->sched_tags;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
dd_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)559*4882a593Smuzhiyun static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	dd_depth_updated(hctx);
562*4882a593Smuzhiyun 	return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
dd_exit_sched(struct elevator_queue * e)565*4882a593Smuzhiyun static void dd_exit_sched(struct elevator_queue *e)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct deadline_data *dd = e->elevator_data;
568*4882a593Smuzhiyun 	enum dd_prio prio;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	dd_deactivate_policy(dd->queue);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
573*4882a593Smuzhiyun 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
576*4882a593Smuzhiyun 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	free_percpu(dd->stats);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	kfree(dd);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun  * Initialize elevator private data (deadline_data) and associate with blkcg.
586*4882a593Smuzhiyun  */
dd_init_sched(struct request_queue * q,struct elevator_type * e)587*4882a593Smuzhiyun static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct deadline_data *dd;
590*4882a593Smuzhiyun 	struct elevator_queue *eq;
591*4882a593Smuzhiyun 	enum dd_prio prio;
592*4882a593Smuzhiyun 	int ret = -ENOMEM;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/*
595*4882a593Smuzhiyun 	 * Initialization would be very tricky if the queue is not frozen,
596*4882a593Smuzhiyun 	 * hence the warning statement below.
597*4882a593Smuzhiyun 	 */
598*4882a593Smuzhiyun 	WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	eq = elevator_alloc(q, e);
601*4882a593Smuzhiyun 	if (!eq)
602*4882a593Smuzhiyun 		return ret;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
605*4882a593Smuzhiyun 	if (!dd)
606*4882a593Smuzhiyun 		goto put_eq;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	eq->elevator_data = dd;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
611*4882a593Smuzhiyun 				     GFP_KERNEL | __GFP_ZERO);
612*4882a593Smuzhiyun 	if (!dd->stats)
613*4882a593Smuzhiyun 		goto free_dd;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	dd->queue = q;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
618*4882a593Smuzhiyun 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		INIT_LIST_HEAD(&per_prio->dispatch);
621*4882a593Smuzhiyun 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
622*4882a593Smuzhiyun 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
623*4882a593Smuzhiyun 		per_prio->sort_list[DD_READ] = RB_ROOT;
624*4882a593Smuzhiyun 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 	dd->fifo_expire[DD_READ] = read_expire;
627*4882a593Smuzhiyun 	dd->fifo_expire[DD_WRITE] = write_expire;
628*4882a593Smuzhiyun 	dd->writes_starved = writes_starved;
629*4882a593Smuzhiyun 	dd->front_merges = 1;
630*4882a593Smuzhiyun 	dd->last_dir = DD_WRITE;
631*4882a593Smuzhiyun 	dd->fifo_batch = fifo_batch;
632*4882a593Smuzhiyun 	dd->aging_expire = aging_expire;
633*4882a593Smuzhiyun 	spin_lock_init(&dd->lock);
634*4882a593Smuzhiyun 	spin_lock_init(&dd->zone_lock);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	ret = dd_activate_policy(q);
637*4882a593Smuzhiyun 	if (ret)
638*4882a593Smuzhiyun 		goto free_stats;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	ret = 0;
641*4882a593Smuzhiyun 	q->elevator = eq;
642*4882a593Smuzhiyun 	return 0;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun free_stats:
645*4882a593Smuzhiyun 	free_percpu(dd->stats);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun free_dd:
648*4882a593Smuzhiyun 	kfree(dd);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun put_eq:
651*4882a593Smuzhiyun 	kobject_put(&eq->kobj);
652*4882a593Smuzhiyun 	return ret;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun  * Try to merge @bio into an existing request. If @bio has been merged into
657*4882a593Smuzhiyun  * an existing request, store the pointer to that request into *@rq.
658*4882a593Smuzhiyun  */
dd_request_merge(struct request_queue * q,struct request ** rq,struct bio * bio)659*4882a593Smuzhiyun static int dd_request_merge(struct request_queue *q, struct request **rq,
660*4882a593Smuzhiyun 			    struct bio *bio)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
663*4882a593Smuzhiyun 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
664*4882a593Smuzhiyun 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
665*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
666*4882a593Smuzhiyun 	sector_t sector = bio_end_sector(bio);
667*4882a593Smuzhiyun 	struct request *__rq;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (!dd->front_merges)
670*4882a593Smuzhiyun 		return ELEVATOR_NO_MERGE;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
673*4882a593Smuzhiyun 	if (__rq) {
674*4882a593Smuzhiyun 		BUG_ON(sector != blk_rq_pos(__rq));
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 		if (elv_bio_merge_ok(__rq, bio)) {
677*4882a593Smuzhiyun 			*rq = __rq;
678*4882a593Smuzhiyun 			if (blk_discard_mergable(__rq))
679*4882a593Smuzhiyun 				return ELEVATOR_DISCARD_MERGE;
680*4882a593Smuzhiyun 			return ELEVATOR_FRONT_MERGE;
681*4882a593Smuzhiyun 		}
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return ELEVATOR_NO_MERGE;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /*
688*4882a593Smuzhiyun  * Attempt to merge a bio into an existing request. This function is called
689*4882a593Smuzhiyun  * before @bio is associated with a request.
690*4882a593Smuzhiyun  */
dd_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)691*4882a593Smuzhiyun static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
692*4882a593Smuzhiyun 		unsigned int nr_segs)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
695*4882a593Smuzhiyun 	struct request *free = NULL;
696*4882a593Smuzhiyun 	bool ret;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	spin_lock(&dd->lock);
699*4882a593Smuzhiyun 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
700*4882a593Smuzhiyun 	spin_unlock(&dd->lock);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	if (free)
703*4882a593Smuzhiyun 		blk_mq_free_request(free);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	return ret;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun /*
709*4882a593Smuzhiyun  * add rq to rbtree and fifo
710*4882a593Smuzhiyun  */
dd_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)711*4882a593Smuzhiyun static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
712*4882a593Smuzhiyun 			      bool at_head)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
715*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
716*4882a593Smuzhiyun 	const enum dd_data_dir data_dir = rq_data_dir(rq);
717*4882a593Smuzhiyun 	u16 ioprio = req_get_ioprio(rq);
718*4882a593Smuzhiyun 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
719*4882a593Smuzhiyun 	struct dd_per_prio *per_prio;
720*4882a593Smuzhiyun 	enum dd_prio prio;
721*4882a593Smuzhiyun 	struct dd_blkcg *blkcg;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	lockdep_assert_held(&dd->lock);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/*
726*4882a593Smuzhiyun 	 * This may be a requeue of a write request that has locked its
727*4882a593Smuzhiyun 	 * target zone. If it is the case, this releases the zone lock.
728*4882a593Smuzhiyun 	 */
729*4882a593Smuzhiyun 	blk_req_zone_write_unlock(rq);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/*
732*4882a593Smuzhiyun 	 * If a block cgroup has been associated with the submitter and if an
733*4882a593Smuzhiyun 	 * I/O priority has been set in the associated block cgroup, use the
734*4882a593Smuzhiyun 	 * lowest of the cgroup priority and the request priority for the
735*4882a593Smuzhiyun 	 * request. If no priority has been set in the request, use the cgroup
736*4882a593Smuzhiyun 	 * priority.
737*4882a593Smuzhiyun 	 */
738*4882a593Smuzhiyun 	prio = ioprio_class_to_prio[ioprio_class];
739*4882a593Smuzhiyun 	dd_count(dd, inserted, prio);
740*4882a593Smuzhiyun 	blkcg = dd_blkcg_from_bio(rq->bio);
741*4882a593Smuzhiyun 	ddcg_count(blkcg, inserted, ioprio_class);
742*4882a593Smuzhiyun 	rq->elv.priv[0] = blkcg;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	if (blk_mq_sched_try_insert_merge(q, rq))
745*4882a593Smuzhiyun 		return;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	blk_mq_sched_request_inserted(rq);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	per_prio = &dd->per_prio[prio];
750*4882a593Smuzhiyun 	if (at_head) {
751*4882a593Smuzhiyun 		list_add(&rq->queuelist, &per_prio->dispatch);
752*4882a593Smuzhiyun 		rq->fifo_time = jiffies;
753*4882a593Smuzhiyun 	} else {
754*4882a593Smuzhiyun 		deadline_add_rq_rb(per_prio, rq);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		if (rq_mergeable(rq)) {
757*4882a593Smuzhiyun 			elv_rqhash_add(q, rq);
758*4882a593Smuzhiyun 			if (!q->last_merge)
759*4882a593Smuzhiyun 				q->last_merge = rq;
760*4882a593Smuzhiyun 		}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 		/*
763*4882a593Smuzhiyun 		 * set expire time and add to fifo list
764*4882a593Smuzhiyun 		 */
765*4882a593Smuzhiyun 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
766*4882a593Smuzhiyun 		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun  * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
772*4882a593Smuzhiyun  */
dd_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * list,bool at_head)773*4882a593Smuzhiyun static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
774*4882a593Smuzhiyun 			       struct list_head *list, bool at_head)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
777*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	spin_lock(&dd->lock);
780*4882a593Smuzhiyun 	while (!list_empty(list)) {
781*4882a593Smuzhiyun 		struct request *rq;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 		rq = list_first_entry(list, struct request, queuelist);
784*4882a593Smuzhiyun 		list_del_init(&rq->queuelist);
785*4882a593Smuzhiyun 		dd_insert_request(hctx, rq, at_head);
786*4882a593Smuzhiyun 	}
787*4882a593Smuzhiyun 	spin_unlock(&dd->lock);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun /* Callback from inside blk_mq_rq_ctx_init(). */
dd_prepare_request(struct request * rq)791*4882a593Smuzhiyun static void dd_prepare_request(struct request *rq)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	rq->elv.priv[0] = NULL;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun /*
797*4882a593Smuzhiyun  * Callback from inside blk_mq_free_request().
798*4882a593Smuzhiyun  *
799*4882a593Smuzhiyun  * For zoned block devices, write unlock the target zone of
800*4882a593Smuzhiyun  * completed write requests. Do this while holding the zone lock
801*4882a593Smuzhiyun  * spinlock so that the zone is never unlocked while deadline_fifo_request()
802*4882a593Smuzhiyun  * or deadline_next_request() are executing. This function is called for
803*4882a593Smuzhiyun  * all requests, whether or not these requests complete successfully.
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * For a zoned block device, __dd_dispatch_request() may have stopped
806*4882a593Smuzhiyun  * dispatching requests if all the queued requests are write requests directed
807*4882a593Smuzhiyun  * at zones that are already locked due to on-going write requests. To ensure
808*4882a593Smuzhiyun  * write request dispatch progress in this case, mark the queue as needing a
809*4882a593Smuzhiyun  * restart to ensure that the queue is run again after completion of the
810*4882a593Smuzhiyun  * request and zones being unlocked.
811*4882a593Smuzhiyun  */
dd_finish_request(struct request * rq)812*4882a593Smuzhiyun static void dd_finish_request(struct request *rq)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	struct request_queue *q = rq->q;
815*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
816*4882a593Smuzhiyun 	struct dd_blkcg *blkcg = rq->elv.priv[0];
817*4882a593Smuzhiyun 	const u8 ioprio_class = dd_rq_ioclass(rq);
818*4882a593Smuzhiyun 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
819*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	dd_count(dd, completed, prio);
822*4882a593Smuzhiyun 	ddcg_count(blkcg, completed, ioprio_class);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	if (blk_queue_is_zoned(q)) {
825*4882a593Smuzhiyun 		unsigned long flags;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		spin_lock_irqsave(&dd->zone_lock, flags);
828*4882a593Smuzhiyun 		blk_req_zone_write_unlock(rq);
829*4882a593Smuzhiyun 		if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
830*4882a593Smuzhiyun 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
831*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dd->zone_lock, flags);
832*4882a593Smuzhiyun 	}
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun 
dd_has_work_for_prio(struct dd_per_prio * per_prio)835*4882a593Smuzhiyun static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	return !list_empty_careful(&per_prio->dispatch) ||
838*4882a593Smuzhiyun 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
839*4882a593Smuzhiyun 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
dd_has_work(struct blk_mq_hw_ctx * hctx)842*4882a593Smuzhiyun static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
845*4882a593Smuzhiyun 	enum dd_prio prio;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
848*4882a593Smuzhiyun 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
849*4882a593Smuzhiyun 			return true;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	return false;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun /*
855*4882a593Smuzhiyun  * sysfs parts below
856*4882a593Smuzhiyun  */
857*4882a593Smuzhiyun #define SHOW_INT(__FUNC, __VAR)						\
858*4882a593Smuzhiyun static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
859*4882a593Smuzhiyun {									\
860*4882a593Smuzhiyun 	struct deadline_data *dd = e->elevator_data;			\
861*4882a593Smuzhiyun 									\
862*4882a593Smuzhiyun 	return sysfs_emit(page, "%d\n", __VAR);				\
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
865*4882a593Smuzhiyun SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
866*4882a593Smuzhiyun SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
867*4882a593Smuzhiyun SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
868*4882a593Smuzhiyun SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
869*4882a593Smuzhiyun SHOW_INT(deadline_front_merges_show, dd->front_merges);
870*4882a593Smuzhiyun SHOW_INT(deadline_async_depth_show, dd->async_depth);
871*4882a593Smuzhiyun SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
872*4882a593Smuzhiyun #undef SHOW_INT
873*4882a593Smuzhiyun #undef SHOW_JIFFIES
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
876*4882a593Smuzhiyun static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
877*4882a593Smuzhiyun {									\
878*4882a593Smuzhiyun 	struct deadline_data *dd = e->elevator_data;			\
879*4882a593Smuzhiyun 	int __data, __ret;						\
880*4882a593Smuzhiyun 									\
881*4882a593Smuzhiyun 	__ret = kstrtoint(page, 0, &__data);				\
882*4882a593Smuzhiyun 	if (__ret < 0)							\
883*4882a593Smuzhiyun 		return __ret;						\
884*4882a593Smuzhiyun 	if (__data < (MIN))						\
885*4882a593Smuzhiyun 		__data = (MIN);						\
886*4882a593Smuzhiyun 	else if (__data > (MAX))					\
887*4882a593Smuzhiyun 		__data = (MAX);						\
888*4882a593Smuzhiyun 	*(__PTR) = __CONV(__data);					\
889*4882a593Smuzhiyun 	return count;							\
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
892*4882a593Smuzhiyun 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
893*4882a593Smuzhiyun #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
894*4882a593Smuzhiyun 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
895*4882a593Smuzhiyun STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
896*4882a593Smuzhiyun STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
897*4882a593Smuzhiyun STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
898*4882a593Smuzhiyun STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
899*4882a593Smuzhiyun STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
900*4882a593Smuzhiyun STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
901*4882a593Smuzhiyun STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
902*4882a593Smuzhiyun #undef STORE_FUNCTION
903*4882a593Smuzhiyun #undef STORE_INT
904*4882a593Smuzhiyun #undef STORE_JIFFIES
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun #define DD_ATTR(name) \
907*4882a593Smuzhiyun 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun static struct elv_fs_entry deadline_attrs[] = {
910*4882a593Smuzhiyun 	DD_ATTR(read_expire),
911*4882a593Smuzhiyun 	DD_ATTR(write_expire),
912*4882a593Smuzhiyun 	DD_ATTR(writes_starved),
913*4882a593Smuzhiyun 	DD_ATTR(front_merges),
914*4882a593Smuzhiyun 	DD_ATTR(async_depth),
915*4882a593Smuzhiyun 	DD_ATTR(fifo_batch),
916*4882a593Smuzhiyun 	DD_ATTR(aging_expire),
917*4882a593Smuzhiyun 	__ATTR_NULL
918*4882a593Smuzhiyun };
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
921*4882a593Smuzhiyun #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
922*4882a593Smuzhiyun static void *deadline_##name##_fifo_start(struct seq_file *m,		\
923*4882a593Smuzhiyun 					  loff_t *pos)			\
924*4882a593Smuzhiyun 	__acquires(&dd->lock)						\
925*4882a593Smuzhiyun {									\
926*4882a593Smuzhiyun 	struct request_queue *q = m->private;				\
927*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
928*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
929*4882a593Smuzhiyun 									\
930*4882a593Smuzhiyun 	spin_lock(&dd->lock);						\
931*4882a593Smuzhiyun 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
932*4882a593Smuzhiyun }									\
933*4882a593Smuzhiyun 									\
934*4882a593Smuzhiyun static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
935*4882a593Smuzhiyun 					 loff_t *pos)			\
936*4882a593Smuzhiyun {									\
937*4882a593Smuzhiyun 	struct request_queue *q = m->private;				\
938*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
939*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
940*4882a593Smuzhiyun 									\
941*4882a593Smuzhiyun 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
942*4882a593Smuzhiyun }									\
943*4882a593Smuzhiyun 									\
944*4882a593Smuzhiyun static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
945*4882a593Smuzhiyun 	__releases(&dd->lock)						\
946*4882a593Smuzhiyun {									\
947*4882a593Smuzhiyun 	struct request_queue *q = m->private;				\
948*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
949*4882a593Smuzhiyun 									\
950*4882a593Smuzhiyun 	spin_unlock(&dd->lock);						\
951*4882a593Smuzhiyun }									\
952*4882a593Smuzhiyun 									\
953*4882a593Smuzhiyun static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
954*4882a593Smuzhiyun 	.start	= deadline_##name##_fifo_start,				\
955*4882a593Smuzhiyun 	.next	= deadline_##name##_fifo_next,				\
956*4882a593Smuzhiyun 	.stop	= deadline_##name##_fifo_stop,				\
957*4882a593Smuzhiyun 	.show	= blk_mq_debugfs_rq_show,				\
958*4882a593Smuzhiyun };									\
959*4882a593Smuzhiyun 									\
960*4882a593Smuzhiyun static int deadline_##name##_next_rq_show(void *data,			\
961*4882a593Smuzhiyun 					  struct seq_file *m)		\
962*4882a593Smuzhiyun {									\
963*4882a593Smuzhiyun 	struct request_queue *q = data;					\
964*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
965*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
966*4882a593Smuzhiyun 	struct request *rq = per_prio->next_rq[data_dir];		\
967*4882a593Smuzhiyun 									\
968*4882a593Smuzhiyun 	if (rq)								\
969*4882a593Smuzhiyun 		__blk_mq_debugfs_rq_show(m, rq);			\
970*4882a593Smuzhiyun 	return 0;							\
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
974*4882a593Smuzhiyun DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
975*4882a593Smuzhiyun DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
976*4882a593Smuzhiyun DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
977*4882a593Smuzhiyun DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
978*4882a593Smuzhiyun DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
979*4882a593Smuzhiyun #undef DEADLINE_DEBUGFS_DDIR_ATTRS
980*4882a593Smuzhiyun 
deadline_batching_show(void * data,struct seq_file * m)981*4882a593Smuzhiyun static int deadline_batching_show(void *data, struct seq_file *m)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	struct request_queue *q = data;
984*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	seq_printf(m, "%u\n", dd->batching);
987*4882a593Smuzhiyun 	return 0;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
deadline_starved_show(void * data,struct seq_file * m)990*4882a593Smuzhiyun static int deadline_starved_show(void *data, struct seq_file *m)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct request_queue *q = data;
993*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	seq_printf(m, "%u\n", dd->starved);
996*4882a593Smuzhiyun 	return 0;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun 
dd_async_depth_show(void * data,struct seq_file * m)999*4882a593Smuzhiyun static int dd_async_depth_show(void *data, struct seq_file *m)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	struct request_queue *q = data;
1002*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	seq_printf(m, "%u\n", dd->async_depth);
1005*4882a593Smuzhiyun 	return 0;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
dd_queued_show(void * data,struct seq_file * m)1008*4882a593Smuzhiyun static int dd_queued_show(void *data, struct seq_file *m)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	struct request_queue *q = data;
1011*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
1014*4882a593Smuzhiyun 		   dd_queued(dd, DD_BE_PRIO),
1015*4882a593Smuzhiyun 		   dd_queued(dd, DD_IDLE_PRIO));
1016*4882a593Smuzhiyun 	return 0;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun /* Number of requests owned by the block driver for a given priority. */
dd_owned_by_driver(struct deadline_data * dd,enum dd_prio prio)1020*4882a593Smuzhiyun static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun 	return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
1023*4882a593Smuzhiyun 		- dd_sum(dd, completed, prio);
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun 
dd_owned_by_driver_show(void * data,struct seq_file * m)1026*4882a593Smuzhiyun static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	struct request_queue *q = data;
1029*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
1032*4882a593Smuzhiyun 		   dd_owned_by_driver(dd, DD_BE_PRIO),
1033*4882a593Smuzhiyun 		   dd_owned_by_driver(dd, DD_IDLE_PRIO));
1034*4882a593Smuzhiyun 	return 0;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun #define DEADLINE_DISPATCH_ATTR(prio)					\
1038*4882a593Smuzhiyun static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
1039*4882a593Smuzhiyun 					     loff_t *pos)		\
1040*4882a593Smuzhiyun 	__acquires(&dd->lock)						\
1041*4882a593Smuzhiyun {									\
1042*4882a593Smuzhiyun 	struct request_queue *q = m->private;				\
1043*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
1044*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
1045*4882a593Smuzhiyun 									\
1046*4882a593Smuzhiyun 	spin_lock(&dd->lock);						\
1047*4882a593Smuzhiyun 	return seq_list_start(&per_prio->dispatch, *pos);		\
1048*4882a593Smuzhiyun }									\
1049*4882a593Smuzhiyun 									\
1050*4882a593Smuzhiyun static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
1051*4882a593Smuzhiyun 					    void *v, loff_t *pos)	\
1052*4882a593Smuzhiyun {									\
1053*4882a593Smuzhiyun 	struct request_queue *q = m->private;				\
1054*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
1055*4882a593Smuzhiyun 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
1056*4882a593Smuzhiyun 									\
1057*4882a593Smuzhiyun 	return seq_list_next(v, &per_prio->dispatch, pos);		\
1058*4882a593Smuzhiyun }									\
1059*4882a593Smuzhiyun 									\
1060*4882a593Smuzhiyun static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
1061*4882a593Smuzhiyun 	__releases(&dd->lock)						\
1062*4882a593Smuzhiyun {									\
1063*4882a593Smuzhiyun 	struct request_queue *q = m->private;				\
1064*4882a593Smuzhiyun 	struct deadline_data *dd = q->elevator->elevator_data;		\
1065*4882a593Smuzhiyun 									\
1066*4882a593Smuzhiyun 	spin_unlock(&dd->lock);						\
1067*4882a593Smuzhiyun }									\
1068*4882a593Smuzhiyun 									\
1069*4882a593Smuzhiyun static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1070*4882a593Smuzhiyun 	.start	= deadline_dispatch##prio##_start,			\
1071*4882a593Smuzhiyun 	.next	= deadline_dispatch##prio##_next,			\
1072*4882a593Smuzhiyun 	.stop	= deadline_dispatch##prio##_stop,			\
1073*4882a593Smuzhiyun 	.show	= blk_mq_debugfs_rq_show,				\
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun DEADLINE_DISPATCH_ATTR(0);
1077*4882a593Smuzhiyun DEADLINE_DISPATCH_ATTR(1);
1078*4882a593Smuzhiyun DEADLINE_DISPATCH_ATTR(2);
1079*4882a593Smuzhiyun #undef DEADLINE_DISPATCH_ATTR
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
1082*4882a593Smuzhiyun 	{#name "_fifo_list", 0400,					\
1083*4882a593Smuzhiyun 			.seq_ops = &deadline_##name##_fifo_seq_ops}
1084*4882a593Smuzhiyun #define DEADLINE_NEXT_RQ_ATTR(name)					\
1085*4882a593Smuzhiyun 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1086*4882a593Smuzhiyun static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1087*4882a593Smuzhiyun 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
1088*4882a593Smuzhiyun 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
1089*4882a593Smuzhiyun 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
1090*4882a593Smuzhiyun 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
1091*4882a593Smuzhiyun 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
1092*4882a593Smuzhiyun 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
1093*4882a593Smuzhiyun 	DEADLINE_NEXT_RQ_ATTR(read0),
1094*4882a593Smuzhiyun 	DEADLINE_NEXT_RQ_ATTR(write0),
1095*4882a593Smuzhiyun 	DEADLINE_NEXT_RQ_ATTR(read1),
1096*4882a593Smuzhiyun 	DEADLINE_NEXT_RQ_ATTR(write1),
1097*4882a593Smuzhiyun 	DEADLINE_NEXT_RQ_ATTR(read2),
1098*4882a593Smuzhiyun 	DEADLINE_NEXT_RQ_ATTR(write2),
1099*4882a593Smuzhiyun 	{"batching", 0400, deadline_batching_show},
1100*4882a593Smuzhiyun 	{"starved", 0400, deadline_starved_show},
1101*4882a593Smuzhiyun 	{"async_depth", 0400, dd_async_depth_show},
1102*4882a593Smuzhiyun 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1103*4882a593Smuzhiyun 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1104*4882a593Smuzhiyun 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1105*4882a593Smuzhiyun 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
1106*4882a593Smuzhiyun 	{"queued", 0400, dd_queued_show},
1107*4882a593Smuzhiyun 	{},
1108*4882a593Smuzhiyun };
1109*4882a593Smuzhiyun #undef DEADLINE_QUEUE_DDIR_ATTRS
1110*4882a593Smuzhiyun #endif
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun static struct elevator_type mq_deadline = {
1113*4882a593Smuzhiyun 	.ops = {
1114*4882a593Smuzhiyun 		.depth_updated		= dd_depth_updated,
1115*4882a593Smuzhiyun 		.limit_depth		= dd_limit_depth,
1116*4882a593Smuzhiyun 		.insert_requests	= dd_insert_requests,
1117*4882a593Smuzhiyun 		.dispatch_request	= dd_dispatch_request,
1118*4882a593Smuzhiyun 		.prepare_request	= dd_prepare_request,
1119*4882a593Smuzhiyun 		.finish_request		= dd_finish_request,
1120*4882a593Smuzhiyun 		.next_request		= elv_rb_latter_request,
1121*4882a593Smuzhiyun 		.former_request		= elv_rb_former_request,
1122*4882a593Smuzhiyun 		.bio_merge		= dd_bio_merge,
1123*4882a593Smuzhiyun 		.request_merge		= dd_request_merge,
1124*4882a593Smuzhiyun 		.requests_merged	= dd_merged_requests,
1125*4882a593Smuzhiyun 		.request_merged		= dd_request_merged,
1126*4882a593Smuzhiyun 		.has_work		= dd_has_work,
1127*4882a593Smuzhiyun 		.init_sched		= dd_init_sched,
1128*4882a593Smuzhiyun 		.exit_sched		= dd_exit_sched,
1129*4882a593Smuzhiyun 		.init_hctx		= dd_init_hctx,
1130*4882a593Smuzhiyun 	},
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
1133*4882a593Smuzhiyun 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1134*4882a593Smuzhiyun #endif
1135*4882a593Smuzhiyun 	.elevator_attrs = deadline_attrs,
1136*4882a593Smuzhiyun 	.elevator_name = "mq-deadline",
1137*4882a593Smuzhiyun 	.elevator_alias = "deadline",
1138*4882a593Smuzhiyun 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1139*4882a593Smuzhiyun 	.elevator_owner = THIS_MODULE,
1140*4882a593Smuzhiyun };
1141*4882a593Smuzhiyun MODULE_ALIAS("mq-deadline-iosched");
1142*4882a593Smuzhiyun 
deadline_init(void)1143*4882a593Smuzhiyun static int __init deadline_init(void)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	int ret;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	ret = elv_register(&mq_deadline);
1148*4882a593Smuzhiyun 	if (ret)
1149*4882a593Smuzhiyun 		goto out;
1150*4882a593Smuzhiyun 	ret = dd_blkcg_init();
1151*4882a593Smuzhiyun 	if (ret)
1152*4882a593Smuzhiyun 		goto unreg;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun out:
1155*4882a593Smuzhiyun 	return ret;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun unreg:
1158*4882a593Smuzhiyun 	elv_unregister(&mq_deadline);
1159*4882a593Smuzhiyun 	goto out;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun 
deadline_exit(void)1162*4882a593Smuzhiyun static void __exit deadline_exit(void)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun 	dd_blkcg_exit();
1165*4882a593Smuzhiyun 	elv_unregister(&mq_deadline);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun module_init(deadline_init);
1169*4882a593Smuzhiyun module_exit(deadline_exit);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1172*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1173*4882a593Smuzhiyun MODULE_DESCRIPTION("MQ deadline IO scheduler");
1174