xref: /OK3568_Linux_fs/kernel/block/blk-mq-sched.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * blk-mq scheduling framework
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2016 Jens Axboe
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/blk-mq.h>
10*4882a593Smuzhiyun #include <linux/list_sort.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <trace/events/block.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "blk.h"
15*4882a593Smuzhiyun #include "blk-mq.h"
16*4882a593Smuzhiyun #include "blk-mq-debugfs.h"
17*4882a593Smuzhiyun #include "blk-mq-sched.h"
18*4882a593Smuzhiyun #include "blk-mq-tag.h"
19*4882a593Smuzhiyun #include "blk-wbt.h"
20*4882a593Smuzhiyun 
blk_mq_sched_assign_ioc(struct request * rq)21*4882a593Smuzhiyun void blk_mq_sched_assign_ioc(struct request *rq)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct request_queue *q = rq->q;
24*4882a593Smuzhiyun 	struct io_context *ioc;
25*4882a593Smuzhiyun 	struct io_cq *icq;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	/*
28*4882a593Smuzhiyun 	 * May not have an IO context if it's a passthrough request
29*4882a593Smuzhiyun 	 */
30*4882a593Smuzhiyun 	ioc = current->io_context;
31*4882a593Smuzhiyun 	if (!ioc)
32*4882a593Smuzhiyun 		return;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	spin_lock_irq(&q->queue_lock);
35*4882a593Smuzhiyun 	icq = ioc_lookup_icq(ioc, q);
36*4882a593Smuzhiyun 	spin_unlock_irq(&q->queue_lock);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (!icq) {
39*4882a593Smuzhiyun 		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
40*4882a593Smuzhiyun 		if (!icq)
41*4882a593Smuzhiyun 			return;
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun 	get_io_context(icq->ioc);
44*4882a593Smuzhiyun 	rq->elv.icq = icq;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun  * Mark a hardware queue as needing a restart. For shared queues, maintain
49*4882a593Smuzhiyun  * a count of how many hardware queues are marked for restart.
50*4882a593Smuzhiyun  */
blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx * hctx)51*4882a593Smuzhiyun void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
54*4882a593Smuzhiyun 		return;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
59*4882a593Smuzhiyun 
blk_mq_sched_restart(struct blk_mq_hw_ctx * hctx)60*4882a593Smuzhiyun void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
63*4882a593Smuzhiyun 		return;
64*4882a593Smuzhiyun 	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/*
67*4882a593Smuzhiyun 	 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
68*4882a593Smuzhiyun 	 * in blk_mq_run_hw_queue(). Its pair is the barrier in
69*4882a593Smuzhiyun 	 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
70*4882a593Smuzhiyun 	 * meantime new request added to hctx->dispatch is missed to check in
71*4882a593Smuzhiyun 	 * blk_mq_run_hw_queue().
72*4882a593Smuzhiyun 	 */
73*4882a593Smuzhiyun 	smp_mb();
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	blk_mq_run_hw_queue(hctx, true);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
sched_rq_cmp(void * priv,struct list_head * a,struct list_head * b)78*4882a593Smuzhiyun static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct request *rqa = container_of(a, struct request, queuelist);
81*4882a593Smuzhiyun 	struct request *rqb = container_of(b, struct request, queuelist);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return rqa->mq_hctx > rqb->mq_hctx;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
blk_mq_dispatch_hctx_list(struct list_head * rq_list)86*4882a593Smuzhiyun static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx =
89*4882a593Smuzhiyun 		list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
90*4882a593Smuzhiyun 	struct request *rq;
91*4882a593Smuzhiyun 	LIST_HEAD(hctx_list);
92*4882a593Smuzhiyun 	unsigned int count = 0;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	list_for_each_entry(rq, rq_list, queuelist) {
95*4882a593Smuzhiyun 		if (rq->mq_hctx != hctx) {
96*4882a593Smuzhiyun 			list_cut_before(&hctx_list, rq_list, &rq->queuelist);
97*4882a593Smuzhiyun 			goto dispatch;
98*4882a593Smuzhiyun 		}
99*4882a593Smuzhiyun 		count++;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 	list_splice_tail_init(rq_list, &hctx_list);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun dispatch:
104*4882a593Smuzhiyun 	return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define BLK_MQ_BUDGET_DELAY	3		/* ms units */
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
111*4882a593Smuzhiyun  * its queue by itself in its completion handler, so we don't need to
112*4882a593Smuzhiyun  * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
115*4882a593Smuzhiyun  * be run again.  This is necessary to avoid starving flushes.
116*4882a593Smuzhiyun  */
__blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)117*4882a593Smuzhiyun static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
120*4882a593Smuzhiyun 	struct elevator_queue *e = q->elevator;
121*4882a593Smuzhiyun 	bool multi_hctxs = false, run_queue = false;
122*4882a593Smuzhiyun 	bool dispatched = false, busy = false;
123*4882a593Smuzhiyun 	unsigned int max_dispatch;
124*4882a593Smuzhiyun 	LIST_HEAD(rq_list);
125*4882a593Smuzhiyun 	int count = 0;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (hctx->dispatch_busy)
128*4882a593Smuzhiyun 		max_dispatch = 1;
129*4882a593Smuzhiyun 	else
130*4882a593Smuzhiyun 		max_dispatch = hctx->queue->nr_requests;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	do {
133*4882a593Smuzhiyun 		struct request *rq;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
136*4882a593Smuzhiyun 			break;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 		if (!list_empty_careful(&hctx->dispatch)) {
139*4882a593Smuzhiyun 			busy = true;
140*4882a593Smuzhiyun 			break;
141*4882a593Smuzhiyun 		}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		if (!blk_mq_get_dispatch_budget(q))
144*4882a593Smuzhiyun 			break;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		rq = e->type->ops.dispatch_request(hctx);
147*4882a593Smuzhiyun 		if (!rq) {
148*4882a593Smuzhiyun 			blk_mq_put_dispatch_budget(q);
149*4882a593Smuzhiyun 			/*
150*4882a593Smuzhiyun 			 * We're releasing without dispatching. Holding the
151*4882a593Smuzhiyun 			 * budget could have blocked any "hctx"s with the
152*4882a593Smuzhiyun 			 * same queue and if we didn't dispatch then there's
153*4882a593Smuzhiyun 			 * no guarantee anyone will kick the queue.  Kick it
154*4882a593Smuzhiyun 			 * ourselves.
155*4882a593Smuzhiyun 			 */
156*4882a593Smuzhiyun 			run_queue = true;
157*4882a593Smuzhiyun 			break;
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		/*
161*4882a593Smuzhiyun 		 * Now this rq owns the budget which has to be released
162*4882a593Smuzhiyun 		 * if this rq won't be queued to driver via .queue_rq()
163*4882a593Smuzhiyun 		 * in blk_mq_dispatch_rq_list().
164*4882a593Smuzhiyun 		 */
165*4882a593Smuzhiyun 		list_add_tail(&rq->queuelist, &rq_list);
166*4882a593Smuzhiyun 		if (rq->mq_hctx != hctx)
167*4882a593Smuzhiyun 			multi_hctxs = true;
168*4882a593Smuzhiyun 	} while (++count < max_dispatch);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (!count) {
171*4882a593Smuzhiyun 		if (run_queue)
172*4882a593Smuzhiyun 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
173*4882a593Smuzhiyun 	} else if (multi_hctxs) {
174*4882a593Smuzhiyun 		/*
175*4882a593Smuzhiyun 		 * Requests from different hctx may be dequeued from some
176*4882a593Smuzhiyun 		 * schedulers, such as bfq and deadline.
177*4882a593Smuzhiyun 		 *
178*4882a593Smuzhiyun 		 * Sort the requests in the list according to their hctx,
179*4882a593Smuzhiyun 		 * dispatch batching requests from same hctx at a time.
180*4882a593Smuzhiyun 		 */
181*4882a593Smuzhiyun 		list_sort(NULL, &rq_list, sched_rq_cmp);
182*4882a593Smuzhiyun 		do {
183*4882a593Smuzhiyun 			dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
184*4882a593Smuzhiyun 		} while (!list_empty(&rq_list));
185*4882a593Smuzhiyun 	} else {
186*4882a593Smuzhiyun 		dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (busy)
190*4882a593Smuzhiyun 		return -EAGAIN;
191*4882a593Smuzhiyun 	return !!dispatched;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)194*4882a593Smuzhiyun static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	unsigned long end = jiffies + HZ;
197*4882a593Smuzhiyun 	int ret;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	do {
200*4882a593Smuzhiyun 		ret = __blk_mq_do_dispatch_sched(hctx);
201*4882a593Smuzhiyun 		if (ret != 1)
202*4882a593Smuzhiyun 			break;
203*4882a593Smuzhiyun 		if (need_resched() || time_is_before_jiffies(end)) {
204*4882a593Smuzhiyun 			blk_mq_delay_run_hw_queue(hctx, 0);
205*4882a593Smuzhiyun 			break;
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 	} while (1);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return ret;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
blk_mq_next_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)212*4882a593Smuzhiyun static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
213*4882a593Smuzhiyun 					  struct blk_mq_ctx *ctx)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	unsigned short idx = ctx->index_hw[hctx->type];
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (++idx == hctx->nr_ctx)
218*4882a593Smuzhiyun 		idx = 0;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	return hctx->ctxs[idx];
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
225*4882a593Smuzhiyun  * its queue by itself in its completion handler, so we don't need to
226*4882a593Smuzhiyun  * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
227*4882a593Smuzhiyun  *
228*4882a593Smuzhiyun  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
229*4882a593Smuzhiyun  * be run again.  This is necessary to avoid starving flushes.
230*4882a593Smuzhiyun  */
blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx * hctx)231*4882a593Smuzhiyun static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
234*4882a593Smuzhiyun 	LIST_HEAD(rq_list);
235*4882a593Smuzhiyun 	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
236*4882a593Smuzhiyun 	int ret = 0;
237*4882a593Smuzhiyun 	struct request *rq;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	do {
240*4882a593Smuzhiyun 		if (!list_empty_careful(&hctx->dispatch)) {
241*4882a593Smuzhiyun 			ret = -EAGAIN;
242*4882a593Smuzhiyun 			break;
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		if (!sbitmap_any_bit_set(&hctx->ctx_map))
246*4882a593Smuzhiyun 			break;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		if (!blk_mq_get_dispatch_budget(q))
249*4882a593Smuzhiyun 			break;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
252*4882a593Smuzhiyun 		if (!rq) {
253*4882a593Smuzhiyun 			blk_mq_put_dispatch_budget(q);
254*4882a593Smuzhiyun 			/*
255*4882a593Smuzhiyun 			 * We're releasing without dispatching. Holding the
256*4882a593Smuzhiyun 			 * budget could have blocked any "hctx"s with the
257*4882a593Smuzhiyun 			 * same queue and if we didn't dispatch then there's
258*4882a593Smuzhiyun 			 * no guarantee anyone will kick the queue.  Kick it
259*4882a593Smuzhiyun 			 * ourselves.
260*4882a593Smuzhiyun 			 */
261*4882a593Smuzhiyun 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
262*4882a593Smuzhiyun 			break;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		/*
266*4882a593Smuzhiyun 		 * Now this rq owns the budget which has to be released
267*4882a593Smuzhiyun 		 * if this rq won't be queued to driver via .queue_rq()
268*4882a593Smuzhiyun 		 * in blk_mq_dispatch_rq_list().
269*4882a593Smuzhiyun 		 */
270*4882a593Smuzhiyun 		list_add(&rq->queuelist, &rq_list);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		/* round robin for fair dispatch */
273*4882a593Smuzhiyun 		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	WRITE_ONCE(hctx->dispatch_from, ctx);
278*4882a593Smuzhiyun 	return ret;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
__blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)281*4882a593Smuzhiyun static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
284*4882a593Smuzhiyun 	struct elevator_queue *e = q->elevator;
285*4882a593Smuzhiyun 	const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
286*4882a593Smuzhiyun 	int ret = 0;
287*4882a593Smuzhiyun 	LIST_HEAD(rq_list);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/*
290*4882a593Smuzhiyun 	 * If we have previous entries on our dispatch list, grab them first for
291*4882a593Smuzhiyun 	 * more fair dispatch.
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	if (!list_empty_careful(&hctx->dispatch)) {
294*4882a593Smuzhiyun 		spin_lock(&hctx->lock);
295*4882a593Smuzhiyun 		if (!list_empty(&hctx->dispatch))
296*4882a593Smuzhiyun 			list_splice_init(&hctx->dispatch, &rq_list);
297*4882a593Smuzhiyun 		spin_unlock(&hctx->lock);
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/*
301*4882a593Smuzhiyun 	 * Only ask the scheduler for requests, if we didn't have residual
302*4882a593Smuzhiyun 	 * requests from the dispatch list. This is to avoid the case where
303*4882a593Smuzhiyun 	 * we only ever dispatch a fraction of the requests available because
304*4882a593Smuzhiyun 	 * of low device queue depth. Once we pull requests out of the IO
305*4882a593Smuzhiyun 	 * scheduler, we can no longer merge or sort them. So it's best to
306*4882a593Smuzhiyun 	 * leave them there for as long as we can. Mark the hw queue as
307*4882a593Smuzhiyun 	 * needing a restart in that case.
308*4882a593Smuzhiyun 	 *
309*4882a593Smuzhiyun 	 * We want to dispatch from the scheduler if there was nothing
310*4882a593Smuzhiyun 	 * on the dispatch list or we were able to dispatch from the
311*4882a593Smuzhiyun 	 * dispatch list.
312*4882a593Smuzhiyun 	 */
313*4882a593Smuzhiyun 	if (!list_empty(&rq_list)) {
314*4882a593Smuzhiyun 		blk_mq_sched_mark_restart_hctx(hctx);
315*4882a593Smuzhiyun 		if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
316*4882a593Smuzhiyun 			if (has_sched_dispatch)
317*4882a593Smuzhiyun 				ret = blk_mq_do_dispatch_sched(hctx);
318*4882a593Smuzhiyun 			else
319*4882a593Smuzhiyun 				ret = blk_mq_do_dispatch_ctx(hctx);
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 	} else if (has_sched_dispatch) {
322*4882a593Smuzhiyun 		ret = blk_mq_do_dispatch_sched(hctx);
323*4882a593Smuzhiyun 	} else if (hctx->dispatch_busy) {
324*4882a593Smuzhiyun 		/* dequeue request one by one from sw queue if queue is busy */
325*4882a593Smuzhiyun 		ret = blk_mq_do_dispatch_ctx(hctx);
326*4882a593Smuzhiyun 	} else {
327*4882a593Smuzhiyun 		blk_mq_flush_busy_ctxs(hctx, &rq_list);
328*4882a593Smuzhiyun 		blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	return ret;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)334*4882a593Smuzhiyun void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* RCU or SRCU read lock is needed before checking quiesced flag */
339*4882a593Smuzhiyun 	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
340*4882a593Smuzhiyun 		return;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	hctx->run++;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/*
345*4882a593Smuzhiyun 	 * A return of -EAGAIN is an indication that hctx->dispatch is not
346*4882a593Smuzhiyun 	 * empty and we must run again in order to avoid starving flushes.
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
349*4882a593Smuzhiyun 		if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
350*4882a593Smuzhiyun 			blk_mq_run_hw_queue(hctx, true);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
__blk_mq_sched_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)354*4882a593Smuzhiyun bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
355*4882a593Smuzhiyun 		unsigned int nr_segs)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct elevator_queue *e = q->elevator;
358*4882a593Smuzhiyun 	struct blk_mq_ctx *ctx;
359*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx;
360*4882a593Smuzhiyun 	bool ret = false;
361*4882a593Smuzhiyun 	enum hctx_type type;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (e && e->type->ops.bio_merge)
364*4882a593Smuzhiyun 		return e->type->ops.bio_merge(q, bio, nr_segs);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	ctx = blk_mq_get_ctx(q);
367*4882a593Smuzhiyun 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
368*4882a593Smuzhiyun 	type = hctx->type;
369*4882a593Smuzhiyun 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
370*4882a593Smuzhiyun 	    list_empty_careful(&ctx->rq_lists[type]))
371*4882a593Smuzhiyun 		return false;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* default per sw-queue merge */
374*4882a593Smuzhiyun 	spin_lock(&ctx->lock);
375*4882a593Smuzhiyun 	/*
376*4882a593Smuzhiyun 	 * Reverse check our software queue for entries that we could
377*4882a593Smuzhiyun 	 * potentially merge with. Currently includes a hand-wavy stop
378*4882a593Smuzhiyun 	 * count of 8, to not spend too much time checking for merges.
379*4882a593Smuzhiyun 	 */
380*4882a593Smuzhiyun 	if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
381*4882a593Smuzhiyun 		ctx->rq_merged++;
382*4882a593Smuzhiyun 		ret = true;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	spin_unlock(&ctx->lock);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	return ret;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
blk_mq_sched_try_insert_merge(struct request_queue * q,struct request * rq)390*4882a593Smuzhiyun bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
395*4882a593Smuzhiyun 
blk_mq_sched_request_inserted(struct request * rq)396*4882a593Smuzhiyun void blk_mq_sched_request_inserted(struct request *rq)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	trace_block_rq_insert(rq->q, rq);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
401*4882a593Smuzhiyun 
blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx * hctx,bool has_sched,struct request * rq)402*4882a593Smuzhiyun static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
403*4882a593Smuzhiyun 				       bool has_sched,
404*4882a593Smuzhiyun 				       struct request *rq)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	/*
407*4882a593Smuzhiyun 	 * dispatch flush and passthrough rq directly
408*4882a593Smuzhiyun 	 *
409*4882a593Smuzhiyun 	 * passthrough request has to be added to hctx->dispatch directly.
410*4882a593Smuzhiyun 	 * For some reason, device may be in one situation which can't
411*4882a593Smuzhiyun 	 * handle FS request, so STS_RESOURCE is always returned and the
412*4882a593Smuzhiyun 	 * FS request will be added to hctx->dispatch. However passthrough
413*4882a593Smuzhiyun 	 * request may be required at that time for fixing the problem. If
414*4882a593Smuzhiyun 	 * passthrough request is added to scheduler queue, there isn't any
415*4882a593Smuzhiyun 	 * chance to dispatch it given we prioritize requests in hctx->dispatch.
416*4882a593Smuzhiyun 	 */
417*4882a593Smuzhiyun 	if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
418*4882a593Smuzhiyun 		return true;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (has_sched)
421*4882a593Smuzhiyun 		rq->rq_flags |= RQF_SORTED;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	return false;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
blk_mq_sched_insert_request(struct request * rq,bool at_head,bool run_queue,bool async)426*4882a593Smuzhiyun void blk_mq_sched_insert_request(struct request *rq, bool at_head,
427*4882a593Smuzhiyun 				 bool run_queue, bool async)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct request_queue *q = rq->q;
430*4882a593Smuzhiyun 	struct elevator_queue *e = q->elevator;
431*4882a593Smuzhiyun 	struct blk_mq_ctx *ctx = rq->mq_ctx;
432*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
437*4882a593Smuzhiyun 		/*
438*4882a593Smuzhiyun 		 * Firstly normal IO request is inserted to scheduler queue or
439*4882a593Smuzhiyun 		 * sw queue, meantime we add flush request to dispatch queue(
440*4882a593Smuzhiyun 		 * hctx->dispatch) directly and there is at most one in-flight
441*4882a593Smuzhiyun 		 * flush request for each hw queue, so it doesn't matter to add
442*4882a593Smuzhiyun 		 * flush request to tail or front of the dispatch queue.
443*4882a593Smuzhiyun 		 *
444*4882a593Smuzhiyun 		 * Secondly in case of NCQ, flush request belongs to non-NCQ
445*4882a593Smuzhiyun 		 * command, and queueing it will fail when there is any
446*4882a593Smuzhiyun 		 * in-flight normal IO request(NCQ command). When adding flush
447*4882a593Smuzhiyun 		 * rq to the front of hctx->dispatch, it is easier to introduce
448*4882a593Smuzhiyun 		 * extra time to flush rq's latency because of S_SCHED_RESTART
449*4882a593Smuzhiyun 		 * compared with adding to the tail of dispatch queue, then
450*4882a593Smuzhiyun 		 * chance of flush merge is increased, and less flush requests
451*4882a593Smuzhiyun 		 * will be issued to controller. It is observed that ~10% time
452*4882a593Smuzhiyun 		 * is saved in blktests block/004 on disk attached to AHCI/NCQ
453*4882a593Smuzhiyun 		 * drive when adding flush rq to the front of hctx->dispatch.
454*4882a593Smuzhiyun 		 *
455*4882a593Smuzhiyun 		 * Simply queue flush rq to the front of hctx->dispatch so that
456*4882a593Smuzhiyun 		 * intensive flush workloads can benefit in case of NCQ HW.
457*4882a593Smuzhiyun 		 */
458*4882a593Smuzhiyun 		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
459*4882a593Smuzhiyun 		blk_mq_request_bypass_insert(rq, at_head, false);
460*4882a593Smuzhiyun 		goto run;
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (e && e->type->ops.insert_requests) {
464*4882a593Smuzhiyun 		LIST_HEAD(list);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		list_add(&rq->queuelist, &list);
467*4882a593Smuzhiyun 		e->type->ops.insert_requests(hctx, &list, at_head);
468*4882a593Smuzhiyun 	} else {
469*4882a593Smuzhiyun 		spin_lock(&ctx->lock);
470*4882a593Smuzhiyun 		__blk_mq_insert_request(hctx, rq, at_head);
471*4882a593Smuzhiyun 		spin_unlock(&ctx->lock);
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun run:
475*4882a593Smuzhiyun 	if (run_queue)
476*4882a593Smuzhiyun 		blk_mq_run_hw_queue(hctx, async);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
blk_mq_sched_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)479*4882a593Smuzhiyun void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
480*4882a593Smuzhiyun 				  struct blk_mq_ctx *ctx,
481*4882a593Smuzhiyun 				  struct list_head *list, bool run_queue_async)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct elevator_queue *e;
484*4882a593Smuzhiyun 	struct request_queue *q = hctx->queue;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/*
487*4882a593Smuzhiyun 	 * blk_mq_sched_insert_requests() is called from flush plug
488*4882a593Smuzhiyun 	 * context only, and hold one usage counter to prevent queue
489*4882a593Smuzhiyun 	 * from being released.
490*4882a593Smuzhiyun 	 */
491*4882a593Smuzhiyun 	percpu_ref_get(&q->q_usage_counter);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	e = hctx->queue->elevator;
494*4882a593Smuzhiyun 	if (e && e->type->ops.insert_requests)
495*4882a593Smuzhiyun 		e->type->ops.insert_requests(hctx, list, false);
496*4882a593Smuzhiyun 	else {
497*4882a593Smuzhiyun 		/*
498*4882a593Smuzhiyun 		 * try to issue requests directly if the hw queue isn't
499*4882a593Smuzhiyun 		 * busy in case of 'none' scheduler, and this way may save
500*4882a593Smuzhiyun 		 * us one extra enqueue & dequeue to sw queue.
501*4882a593Smuzhiyun 		 */
502*4882a593Smuzhiyun 		if (!hctx->dispatch_busy && !e && !run_queue_async) {
503*4882a593Smuzhiyun 			blk_mq_try_issue_list_directly(hctx, list);
504*4882a593Smuzhiyun 			if (list_empty(list))
505*4882a593Smuzhiyun 				goto out;
506*4882a593Smuzhiyun 		}
507*4882a593Smuzhiyun 		blk_mq_insert_requests(hctx, ctx, list);
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	blk_mq_run_hw_queue(hctx, run_queue_async);
511*4882a593Smuzhiyun  out:
512*4882a593Smuzhiyun 	percpu_ref_put(&q->q_usage_counter);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
blk_mq_sched_free_tags(struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)515*4882a593Smuzhiyun static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
516*4882a593Smuzhiyun 				   struct blk_mq_hw_ctx *hctx,
517*4882a593Smuzhiyun 				   unsigned int hctx_idx)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (hctx->sched_tags) {
522*4882a593Smuzhiyun 		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
523*4882a593Smuzhiyun 		blk_mq_free_rq_map(hctx->sched_tags, flags);
524*4882a593Smuzhiyun 		hctx->sched_tags = NULL;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
blk_mq_sched_alloc_tags(struct request_queue * q,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)528*4882a593Smuzhiyun static int blk_mq_sched_alloc_tags(struct request_queue *q,
529*4882a593Smuzhiyun 				   struct blk_mq_hw_ctx *hctx,
530*4882a593Smuzhiyun 				   unsigned int hctx_idx)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	struct blk_mq_tag_set *set = q->tag_set;
533*4882a593Smuzhiyun 	/* Clear HCTX_SHARED so tags are init'ed */
534*4882a593Smuzhiyun 	unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
535*4882a593Smuzhiyun 	int ret;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
538*4882a593Smuzhiyun 					       set->reserved_tags, flags);
539*4882a593Smuzhiyun 	if (!hctx->sched_tags)
540*4882a593Smuzhiyun 		return -ENOMEM;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
543*4882a593Smuzhiyun 	if (ret)
544*4882a593Smuzhiyun 		blk_mq_sched_free_tags(set, hctx, hctx_idx);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	return ret;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /* called in queue's release handler, tagset has gone away */
blk_mq_sched_tags_teardown(struct request_queue * q)550*4882a593Smuzhiyun static void blk_mq_sched_tags_teardown(struct request_queue *q)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx;
553*4882a593Smuzhiyun 	int i;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	queue_for_each_hw_ctx(q, hctx, i) {
556*4882a593Smuzhiyun 		/* Clear HCTX_SHARED so tags are freed */
557*4882a593Smuzhiyun 		unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		if (hctx->sched_tags) {
560*4882a593Smuzhiyun 			blk_mq_free_rq_map(hctx->sched_tags, flags);
561*4882a593Smuzhiyun 			hctx->sched_tags = NULL;
562*4882a593Smuzhiyun 		}
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
blk_mq_init_sched(struct request_queue * q,struct elevator_type * e)566*4882a593Smuzhiyun int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx;
569*4882a593Smuzhiyun 	struct elevator_queue *eq;
570*4882a593Smuzhiyun 	unsigned int i;
571*4882a593Smuzhiyun 	int ret;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (!e) {
574*4882a593Smuzhiyun 		q->elevator = NULL;
575*4882a593Smuzhiyun 		q->nr_requests = q->tag_set->queue_depth;
576*4882a593Smuzhiyun 		return 0;
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/*
580*4882a593Smuzhiyun 	 * Default to double of smaller one between hw queue_depth and 128,
581*4882a593Smuzhiyun 	 * since we don't split into sync/async like the old code did.
582*4882a593Smuzhiyun 	 * Additionally, this is a per-hw queue depth.
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
585*4882a593Smuzhiyun 				   BLKDEV_MAX_RQ);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	queue_for_each_hw_ctx(q, hctx, i) {
588*4882a593Smuzhiyun 		ret = blk_mq_sched_alloc_tags(q, hctx, i);
589*4882a593Smuzhiyun 		if (ret)
590*4882a593Smuzhiyun 			goto err;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	ret = e->ops.init_sched(q, e);
594*4882a593Smuzhiyun 	if (ret)
595*4882a593Smuzhiyun 		goto err;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	blk_mq_debugfs_register_sched(q);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	queue_for_each_hw_ctx(q, hctx, i) {
600*4882a593Smuzhiyun 		if (e->ops.init_hctx) {
601*4882a593Smuzhiyun 			ret = e->ops.init_hctx(hctx, i);
602*4882a593Smuzhiyun 			if (ret) {
603*4882a593Smuzhiyun 				eq = q->elevator;
604*4882a593Smuzhiyun 				blk_mq_sched_free_requests(q);
605*4882a593Smuzhiyun 				blk_mq_exit_sched(q, eq);
606*4882a593Smuzhiyun 				kobject_put(&eq->kobj);
607*4882a593Smuzhiyun 				return ret;
608*4882a593Smuzhiyun 			}
609*4882a593Smuzhiyun 		}
610*4882a593Smuzhiyun 		blk_mq_debugfs_register_sched_hctx(q, hctx);
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	return 0;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun err:
616*4882a593Smuzhiyun 	blk_mq_sched_free_requests(q);
617*4882a593Smuzhiyun 	blk_mq_sched_tags_teardown(q);
618*4882a593Smuzhiyun 	q->elevator = NULL;
619*4882a593Smuzhiyun 	return ret;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /*
623*4882a593Smuzhiyun  * called in either blk_queue_cleanup or elevator_switch, tagset
624*4882a593Smuzhiyun  * is required for freeing requests
625*4882a593Smuzhiyun  */
blk_mq_sched_free_requests(struct request_queue * q)626*4882a593Smuzhiyun void blk_mq_sched_free_requests(struct request_queue *q)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx;
629*4882a593Smuzhiyun 	int i;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	queue_for_each_hw_ctx(q, hctx, i) {
632*4882a593Smuzhiyun 		if (hctx->sched_tags)
633*4882a593Smuzhiyun 			blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
blk_mq_exit_sched(struct request_queue * q,struct elevator_queue * e)637*4882a593Smuzhiyun void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	struct blk_mq_hw_ctx *hctx;
640*4882a593Smuzhiyun 	unsigned int i;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	queue_for_each_hw_ctx(q, hctx, i) {
643*4882a593Smuzhiyun 		blk_mq_debugfs_unregister_sched_hctx(hctx);
644*4882a593Smuzhiyun 		if (e->type->ops.exit_hctx && hctx->sched_data) {
645*4882a593Smuzhiyun 			e->type->ops.exit_hctx(hctx, i);
646*4882a593Smuzhiyun 			hctx->sched_data = NULL;
647*4882a593Smuzhiyun 		}
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 	blk_mq_debugfs_unregister_sched(q);
650*4882a593Smuzhiyun 	if (e->type->ops.exit_sched)
651*4882a593Smuzhiyun 		e->type->ops.exit_sched(e);
652*4882a593Smuzhiyun 	blk_mq_sched_tags_teardown(q);
653*4882a593Smuzhiyun 	q->elevator = NULL;
654*4882a593Smuzhiyun }
655