1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Functions related to setting various queue properties from drivers
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/bio.h>
8*4882a593Smuzhiyun #include <linux/blkdev.h>
9*4882a593Smuzhiyun #include <linux/blk-mq.h>
10*4882a593Smuzhiyun #include <linux/sched/sysctl.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "blk.h"
13*4882a593Smuzhiyun #include "blk-mq-sched.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /**
16*4882a593Smuzhiyun * blk_end_sync_rq - executes a completion event on a request
17*4882a593Smuzhiyun * @rq: request to complete
18*4882a593Smuzhiyun * @error: end I/O status of the request
19*4882a593Smuzhiyun */
blk_end_sync_rq(struct request * rq,blk_status_t error)20*4882a593Smuzhiyun static void blk_end_sync_rq(struct request *rq, blk_status_t error)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct completion *waiting = rq->end_io_data;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun rq->end_io_data = NULL;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * complete last, if this is a stack request the process (and thus
28*4882a593Smuzhiyun * the rq pointer) could be invalid right after this complete()
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun complete(waiting);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun * blk_execute_rq_nowait - insert a request into queue for execution
35*4882a593Smuzhiyun * @q: queue to insert the request in
36*4882a593Smuzhiyun * @bd_disk: matching gendisk
37*4882a593Smuzhiyun * @rq: request to insert
38*4882a593Smuzhiyun * @at_head: insert request at head or tail of queue
39*4882a593Smuzhiyun * @done: I/O completion handler
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * Description:
42*4882a593Smuzhiyun * Insert a fully prepared request at the back of the I/O scheduler queue
43*4882a593Smuzhiyun * for execution. Don't wait for completion.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Note:
46*4882a593Smuzhiyun * This function will invoke @done directly if the queue is dead.
47*4882a593Smuzhiyun */
blk_execute_rq_nowait(struct request_queue * q,struct gendisk * bd_disk,struct request * rq,int at_head,rq_end_io_fn * done)48*4882a593Smuzhiyun void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
49*4882a593Smuzhiyun struct request *rq, int at_head,
50*4882a593Smuzhiyun rq_end_io_fn *done)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun WARN_ON(irqs_disabled());
53*4882a593Smuzhiyun WARN_ON(!blk_rq_is_passthrough(rq));
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun rq->rq_disk = bd_disk;
56*4882a593Smuzhiyun rq->end_io = done;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun blk_account_io_start(rq);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * don't check dying flag for MQ because the request won't
62*4882a593Smuzhiyun * be reused after dying flag is set
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun blk_mq_sched_insert_request(rq, at_head, true, false);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /**
69*4882a593Smuzhiyun * blk_execute_rq - insert a request into queue for execution
70*4882a593Smuzhiyun * @q: queue to insert the request in
71*4882a593Smuzhiyun * @bd_disk: matching gendisk
72*4882a593Smuzhiyun * @rq: request to insert
73*4882a593Smuzhiyun * @at_head: insert request at head or tail of queue
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Description:
76*4882a593Smuzhiyun * Insert a fully prepared request at the back of the I/O scheduler queue
77*4882a593Smuzhiyun * for execution and wait for completion.
78*4882a593Smuzhiyun */
blk_execute_rq(struct request_queue * q,struct gendisk * bd_disk,struct request * rq,int at_head)79*4882a593Smuzhiyun void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
80*4882a593Smuzhiyun struct request *rq, int at_head)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(wait);
83*4882a593Smuzhiyun unsigned long hang_check;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun rq->end_io_data = &wait;
86*4882a593Smuzhiyun blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Prevent hang_check timer from firing at us during very long I/O */
89*4882a593Smuzhiyun hang_check = sysctl_hung_task_timeout_secs;
90*4882a593Smuzhiyun if (hang_check)
91*4882a593Smuzhiyun while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
92*4882a593Smuzhiyun else
93*4882a593Smuzhiyun wait_for_completion_io(&wait);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun EXPORT_SYMBOL(blk_execute_rq);
96