xref: /OK3568_Linux_fs/kernel/block/blk-core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 1991, 1992 Linus Torvalds
4*4882a593Smuzhiyun  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
5*4882a593Smuzhiyun  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
6*4882a593Smuzhiyun  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7*4882a593Smuzhiyun  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8*4882a593Smuzhiyun  *	-  July2000
9*4882a593Smuzhiyun  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * This handles all read/write requests to block devices
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/backing-dev.h>
18*4882a593Smuzhiyun #include <linux/bio.h>
19*4882a593Smuzhiyun #include <linux/blkdev.h>
20*4882a593Smuzhiyun #include <linux/blk-mq.h>
21*4882a593Smuzhiyun #include <linux/highmem.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/pagemap.h>
24*4882a593Smuzhiyun #include <linux/kernel_stat.h>
25*4882a593Smuzhiyun #include <linux/string.h>
26*4882a593Smuzhiyun #include <linux/init.h>
27*4882a593Smuzhiyun #include <linux/completion.h>
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include <linux/swap.h>
30*4882a593Smuzhiyun #include <linux/writeback.h>
31*4882a593Smuzhiyun #include <linux/task_io_accounting_ops.h>
32*4882a593Smuzhiyun #include <linux/fault-inject.h>
33*4882a593Smuzhiyun #include <linux/list_sort.h>
34*4882a593Smuzhiyun #include <linux/delay.h>
35*4882a593Smuzhiyun #include <linux/ratelimit.h>
36*4882a593Smuzhiyun #include <linux/pm_runtime.h>
37*4882a593Smuzhiyun #include <linux/blk-cgroup.h>
38*4882a593Smuzhiyun #include <linux/t10-pi.h>
39*4882a593Smuzhiyun #include <linux/debugfs.h>
40*4882a593Smuzhiyun #include <linux/bpf.h>
41*4882a593Smuzhiyun #include <linux/psi.h>
42*4882a593Smuzhiyun #include <linux/sched/sysctl.h>
43*4882a593Smuzhiyun #include <linux/blk-crypto.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
46*4882a593Smuzhiyun #include <trace/events/block.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include "blk.h"
49*4882a593Smuzhiyun #include "blk-mq.h"
50*4882a593Smuzhiyun #include "blk-mq-sched.h"
51*4882a593Smuzhiyun #include "blk-pm.h"
52*4882a593Smuzhiyun #include "blk-rq-qos.h"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun struct dentry *blk_debugfs_root;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
62*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
63*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
64*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
65*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
66*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
67*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun DEFINE_IDA(blk_queue_ida);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun  * For queue allocation
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun struct kmem_cache *blk_requestq_cachep;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun  * Controlling structure to kblockd
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun static struct workqueue_struct *kblockd_workqueue;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /**
82*4882a593Smuzhiyun  * blk_queue_flag_set - atomically set a queue flag
83*4882a593Smuzhiyun  * @flag: flag to be set
84*4882a593Smuzhiyun  * @q: request queue
85*4882a593Smuzhiyun  */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)86*4882a593Smuzhiyun void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	set_bit(flag, &q->queue_flags);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun EXPORT_SYMBOL(blk_queue_flag_set);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * blk_queue_flag_clear - atomically clear a queue flag
94*4882a593Smuzhiyun  * @flag: flag to be cleared
95*4882a593Smuzhiyun  * @q: request queue
96*4882a593Smuzhiyun  */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)97*4882a593Smuzhiyun void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	clear_bit(flag, &q->queue_flags);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun EXPORT_SYMBOL(blk_queue_flag_clear);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  * blk_queue_flag_test_and_set - atomically test and set a queue flag
105*4882a593Smuzhiyun  * @flag: flag to be set
106*4882a593Smuzhiyun  * @q: request queue
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
109*4882a593Smuzhiyun  * the flag was already set.
110*4882a593Smuzhiyun  */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)111*4882a593Smuzhiyun bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	return test_and_set_bit(flag, &q->queue_flags);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
116*4882a593Smuzhiyun 
blk_rq_init(struct request_queue * q,struct request * rq)117*4882a593Smuzhiyun void blk_rq_init(struct request_queue *q, struct request *rq)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	memset(rq, 0, sizeof(*rq));
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rq->queuelist);
122*4882a593Smuzhiyun 	rq->q = q;
123*4882a593Smuzhiyun 	rq->__sector = (sector_t) -1;
124*4882a593Smuzhiyun 	INIT_HLIST_NODE(&rq->hash);
125*4882a593Smuzhiyun 	RB_CLEAR_NODE(&rq->rb_node);
126*4882a593Smuzhiyun 	rq->tag = BLK_MQ_NO_TAG;
127*4882a593Smuzhiyun 	rq->internal_tag = BLK_MQ_NO_TAG;
128*4882a593Smuzhiyun 	rq->start_time_ns = ktime_get_ns();
129*4882a593Smuzhiyun 	rq->part = NULL;
130*4882a593Smuzhiyun 	blk_crypto_rq_set_defaults(rq);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun EXPORT_SYMBOL(blk_rq_init);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
135*4882a593Smuzhiyun static const char *const blk_op_name[] = {
136*4882a593Smuzhiyun 	REQ_OP_NAME(READ),
137*4882a593Smuzhiyun 	REQ_OP_NAME(WRITE),
138*4882a593Smuzhiyun 	REQ_OP_NAME(FLUSH),
139*4882a593Smuzhiyun 	REQ_OP_NAME(DISCARD),
140*4882a593Smuzhiyun 	REQ_OP_NAME(SECURE_ERASE),
141*4882a593Smuzhiyun 	REQ_OP_NAME(ZONE_RESET),
142*4882a593Smuzhiyun 	REQ_OP_NAME(ZONE_RESET_ALL),
143*4882a593Smuzhiyun 	REQ_OP_NAME(ZONE_OPEN),
144*4882a593Smuzhiyun 	REQ_OP_NAME(ZONE_CLOSE),
145*4882a593Smuzhiyun 	REQ_OP_NAME(ZONE_FINISH),
146*4882a593Smuzhiyun 	REQ_OP_NAME(ZONE_APPEND),
147*4882a593Smuzhiyun 	REQ_OP_NAME(WRITE_SAME),
148*4882a593Smuzhiyun 	REQ_OP_NAME(WRITE_ZEROES),
149*4882a593Smuzhiyun 	REQ_OP_NAME(SCSI_IN),
150*4882a593Smuzhiyun 	REQ_OP_NAME(SCSI_OUT),
151*4882a593Smuzhiyun 	REQ_OP_NAME(DRV_IN),
152*4882a593Smuzhiyun 	REQ_OP_NAME(DRV_OUT),
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun #undef REQ_OP_NAME
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun  * blk_op_str - Return string XXX in the REQ_OP_XXX.
158*4882a593Smuzhiyun  * @op: REQ_OP_XXX.
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  * Description: Centralize block layer function to convert REQ_OP_XXX into
161*4882a593Smuzhiyun  * string format. Useful in the debugging and tracing bio or request. For
162*4882a593Smuzhiyun  * invalid REQ_OP_XXX it returns string "UNKNOWN".
163*4882a593Smuzhiyun  */
blk_op_str(unsigned int op)164*4882a593Smuzhiyun inline const char *blk_op_str(unsigned int op)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	const char *op_str = "UNKNOWN";
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
169*4882a593Smuzhiyun 		op_str = blk_op_name[op];
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return op_str;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_op_str);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun static const struct {
176*4882a593Smuzhiyun 	int		errno;
177*4882a593Smuzhiyun 	const char	*name;
178*4882a593Smuzhiyun } blk_errors[] = {
179*4882a593Smuzhiyun 	[BLK_STS_OK]		= { 0,		"" },
180*4882a593Smuzhiyun 	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
181*4882a593Smuzhiyun 	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
182*4882a593Smuzhiyun 	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
183*4882a593Smuzhiyun 	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
184*4882a593Smuzhiyun 	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
185*4882a593Smuzhiyun 	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
186*4882a593Smuzhiyun 	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
187*4882a593Smuzhiyun 	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
188*4882a593Smuzhiyun 	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
189*4882a593Smuzhiyun 	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
190*4882a593Smuzhiyun 	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* device mapper special case, should not leak out: */
193*4882a593Smuzhiyun 	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* zone device specific errors */
196*4882a593Smuzhiyun 	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
197*4882a593Smuzhiyun 	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* everything else not covered above: */
200*4882a593Smuzhiyun 	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
errno_to_blk_status(int errno)203*4882a593Smuzhiyun blk_status_t errno_to_blk_status(int errno)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	int i;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
208*4882a593Smuzhiyun 		if (blk_errors[i].errno == errno)
209*4882a593Smuzhiyun 			return (__force blk_status_t)i;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return BLK_STS_IOERR;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(errno_to_blk_status);
215*4882a593Smuzhiyun 
blk_status_to_errno(blk_status_t status)216*4882a593Smuzhiyun int blk_status_to_errno(blk_status_t status)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	int idx = (__force int)status;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
221*4882a593Smuzhiyun 		return -EIO;
222*4882a593Smuzhiyun 	return blk_errors[idx].errno;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_status_to_errno);
225*4882a593Smuzhiyun 
print_req_error(struct request * req,blk_status_t status,const char * caller)226*4882a593Smuzhiyun static void print_req_error(struct request *req, blk_status_t status,
227*4882a593Smuzhiyun 		const char *caller)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	int idx = (__force int)status;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
232*4882a593Smuzhiyun 		return;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	printk_ratelimited(KERN_ERR
235*4882a593Smuzhiyun 		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
236*4882a593Smuzhiyun 		"phys_seg %u prio class %u\n",
237*4882a593Smuzhiyun 		caller, blk_errors[idx].name,
238*4882a593Smuzhiyun 		req->rq_disk ? req->rq_disk->disk_name : "?",
239*4882a593Smuzhiyun 		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
240*4882a593Smuzhiyun 		req->cmd_flags & ~REQ_OP_MASK,
241*4882a593Smuzhiyun 		req->nr_phys_segments,
242*4882a593Smuzhiyun 		IOPRIO_PRIO_CLASS(req->ioprio));
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)245*4882a593Smuzhiyun static void req_bio_endio(struct request *rq, struct bio *bio,
246*4882a593Smuzhiyun 			  unsigned int nbytes, blk_status_t error)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	if (error)
249*4882a593Smuzhiyun 		bio->bi_status = error;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (unlikely(rq->rq_flags & RQF_QUIET))
252*4882a593Smuzhiyun 		bio_set_flag(bio, BIO_QUIET);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	bio_advance(bio, nbytes);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
257*4882a593Smuzhiyun 		/*
258*4882a593Smuzhiyun 		 * Partial zone append completions cannot be supported as the
259*4882a593Smuzhiyun 		 * BIO fragments may end up not being written sequentially.
260*4882a593Smuzhiyun 		 */
261*4882a593Smuzhiyun 		if (bio->bi_iter.bi_size)
262*4882a593Smuzhiyun 			bio->bi_status = BLK_STS_IOERR;
263*4882a593Smuzhiyun 		else
264*4882a593Smuzhiyun 			bio->bi_iter.bi_sector = rq->__sector;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* don't actually finish bio if it's part of flush sequence */
268*4882a593Smuzhiyun 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
269*4882a593Smuzhiyun 		bio_endio(bio);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
blk_dump_rq_flags(struct request * rq,char * msg)272*4882a593Smuzhiyun void blk_dump_rq_flags(struct request *rq, char *msg)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
275*4882a593Smuzhiyun 		rq->rq_disk ? rq->rq_disk->disk_name : "?",
276*4882a593Smuzhiyun 		(unsigned long long) rq->cmd_flags);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
279*4882a593Smuzhiyun 	       (unsigned long long)blk_rq_pos(rq),
280*4882a593Smuzhiyun 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
281*4882a593Smuzhiyun 	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
282*4882a593Smuzhiyun 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun EXPORT_SYMBOL(blk_dump_rq_flags);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun  * blk_sync_queue - cancel any pending callbacks on a queue
288*4882a593Smuzhiyun  * @q: the queue
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * Description:
291*4882a593Smuzhiyun  *     The block layer may perform asynchronous callback activity
292*4882a593Smuzhiyun  *     on a queue, such as calling the unplug function after a timeout.
293*4882a593Smuzhiyun  *     A block device may call blk_sync_queue to ensure that any
294*4882a593Smuzhiyun  *     such activity is cancelled, thus allowing it to release resources
295*4882a593Smuzhiyun  *     that the callbacks might use. The caller must already have made sure
296*4882a593Smuzhiyun  *     that its ->submit_bio will not re-add plugging prior to calling
297*4882a593Smuzhiyun  *     this function.
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  *     This function does not cancel any asynchronous activity arising
300*4882a593Smuzhiyun  *     out of elevator or throttling code. That would require elevator_exit()
301*4882a593Smuzhiyun  *     and blkcg_exit_queue() to be called with queue lock initialized.
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  */
blk_sync_queue(struct request_queue * q)304*4882a593Smuzhiyun void blk_sync_queue(struct request_queue *q)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	del_timer_sync(&q->timeout);
307*4882a593Smuzhiyun 	cancel_work_sync(&q->timeout_work);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun EXPORT_SYMBOL(blk_sync_queue);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun  * blk_set_pm_only - increment pm_only counter
313*4882a593Smuzhiyun  * @q: request queue pointer
314*4882a593Smuzhiyun  */
blk_set_pm_only(struct request_queue * q)315*4882a593Smuzhiyun void blk_set_pm_only(struct request_queue *q)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	atomic_inc(&q->pm_only);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_set_pm_only);
320*4882a593Smuzhiyun 
blk_clear_pm_only(struct request_queue * q)321*4882a593Smuzhiyun void blk_clear_pm_only(struct request_queue *q)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	int pm_only;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	pm_only = atomic_dec_return(&q->pm_only);
326*4882a593Smuzhiyun 	WARN_ON_ONCE(pm_only < 0);
327*4882a593Smuzhiyun 	if (pm_only == 0)
328*4882a593Smuzhiyun 		wake_up_all(&q->mq_freeze_wq);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_clear_pm_only);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun  * blk_put_queue - decrement the request_queue refcount
334*4882a593Smuzhiyun  * @q: the request_queue structure to decrement the refcount for
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * Decrements the refcount of the request_queue kobject. When this reaches 0
337*4882a593Smuzhiyun  * we'll have blk_release_queue() called.
338*4882a593Smuzhiyun  *
339*4882a593Smuzhiyun  * Context: Any context, but the last reference must not be dropped from
340*4882a593Smuzhiyun  *          atomic context.
341*4882a593Smuzhiyun  */
blk_put_queue(struct request_queue * q)342*4882a593Smuzhiyun void blk_put_queue(struct request_queue *q)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	kobject_put(&q->kobj);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun EXPORT_SYMBOL(blk_put_queue);
347*4882a593Smuzhiyun 
blk_set_queue_dying(struct request_queue * q)348*4882a593Smuzhiyun void blk_set_queue_dying(struct request_queue *q)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/*
353*4882a593Smuzhiyun 	 * When queue DYING flag is set, we need to block new req
354*4882a593Smuzhiyun 	 * entering queue, so we call blk_freeze_queue_start() to
355*4882a593Smuzhiyun 	 * prevent I/O from crossing blk_queue_enter().
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	blk_freeze_queue_start(q);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (queue_is_mq(q))
360*4882a593Smuzhiyun 		blk_mq_wake_waiters(q);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* Make blk_queue_enter() reexamine the DYING flag. */
363*4882a593Smuzhiyun 	wake_up_all(&q->mq_freeze_wq);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_set_queue_dying);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun  * blk_cleanup_queue - shutdown a request queue
369*4882a593Smuzhiyun  * @q: request queue to shutdown
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
372*4882a593Smuzhiyun  * put it.  All future requests will be failed immediately with -ENODEV.
373*4882a593Smuzhiyun  *
374*4882a593Smuzhiyun  * Context: can sleep
375*4882a593Smuzhiyun  */
blk_cleanup_queue(struct request_queue * q)376*4882a593Smuzhiyun void blk_cleanup_queue(struct request_queue *q)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	/* cannot be called from atomic context */
379*4882a593Smuzhiyun 	might_sleep();
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	WARN_ON_ONCE(blk_queue_registered(q));
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* mark @q DYING, no new request or merges will be allowed afterwards */
384*4882a593Smuzhiyun 	blk_set_queue_dying(q);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
387*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/*
390*4882a593Smuzhiyun 	 * Drain all requests queued before DYING marking. Set DEAD flag to
391*4882a593Smuzhiyun 	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
392*4882a593Smuzhiyun 	 * after draining finished.
393*4882a593Smuzhiyun 	 */
394*4882a593Smuzhiyun 	blk_freeze_queue(q);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	rq_qos_exit(q);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* for synchronous bio-based driver finish in-flight integrity i/o */
401*4882a593Smuzhiyun 	blk_flush_integrity();
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	/* @q won't process any more request, flush async actions */
404*4882a593Smuzhiyun 	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
405*4882a593Smuzhiyun 	blk_sync_queue(q);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (queue_is_mq(q))
408*4882a593Smuzhiyun 		blk_mq_exit_queue(q);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/*
411*4882a593Smuzhiyun 	 * In theory, request pool of sched_tags belongs to request queue.
412*4882a593Smuzhiyun 	 * However, the current implementation requires tag_set for freeing
413*4882a593Smuzhiyun 	 * requests, so free the pool now.
414*4882a593Smuzhiyun 	 *
415*4882a593Smuzhiyun 	 * Queue has become frozen, there can't be any in-queue requests, so
416*4882a593Smuzhiyun 	 * it is safe to free requests now.
417*4882a593Smuzhiyun 	 */
418*4882a593Smuzhiyun 	mutex_lock(&q->sysfs_lock);
419*4882a593Smuzhiyun 	if (q->elevator)
420*4882a593Smuzhiyun 		blk_mq_sched_free_requests(q);
421*4882a593Smuzhiyun 	mutex_unlock(&q->sysfs_lock);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	percpu_ref_exit(&q->q_usage_counter);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/* @q is and will stay empty, shutdown and put */
426*4882a593Smuzhiyun 	blk_put_queue(q);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun EXPORT_SYMBOL(blk_cleanup_queue);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /**
431*4882a593Smuzhiyun  * blk_queue_enter() - try to increase q->q_usage_counter
432*4882a593Smuzhiyun  * @q: request queue pointer
433*4882a593Smuzhiyun  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
434*4882a593Smuzhiyun  */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)435*4882a593Smuzhiyun int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	const bool pm = flags & BLK_MQ_REQ_PM;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	while (true) {
440*4882a593Smuzhiyun 		bool success = false;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 		rcu_read_lock();
443*4882a593Smuzhiyun 		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
444*4882a593Smuzhiyun 			/*
445*4882a593Smuzhiyun 			 * The code that increments the pm_only counter is
446*4882a593Smuzhiyun 			 * responsible for ensuring that that counter is
447*4882a593Smuzhiyun 			 * globally visible before the queue is unfrozen.
448*4882a593Smuzhiyun 			 */
449*4882a593Smuzhiyun 			if (pm || !blk_queue_pm_only(q)) {
450*4882a593Smuzhiyun 				success = true;
451*4882a593Smuzhiyun 			} else {
452*4882a593Smuzhiyun 				percpu_ref_put(&q->q_usage_counter);
453*4882a593Smuzhiyun 			}
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 		rcu_read_unlock();
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		if (success)
458*4882a593Smuzhiyun 			return 0;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		if (flags & BLK_MQ_REQ_NOWAIT)
461*4882a593Smuzhiyun 			return -EBUSY;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 		/*
464*4882a593Smuzhiyun 		 * read pair of barrier in blk_freeze_queue_start(),
465*4882a593Smuzhiyun 		 * we need to order reading __PERCPU_REF_DEAD flag of
466*4882a593Smuzhiyun 		 * .q_usage_counter and reading .mq_freeze_depth or
467*4882a593Smuzhiyun 		 * queue dying flag, otherwise the following wait may
468*4882a593Smuzhiyun 		 * never return if the two reads are reordered.
469*4882a593Smuzhiyun 		 */
470*4882a593Smuzhiyun 		smp_rmb();
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		wait_event(q->mq_freeze_wq,
473*4882a593Smuzhiyun 			   (!q->mq_freeze_depth &&
474*4882a593Smuzhiyun 			    (pm || (blk_pm_request_resume(q),
475*4882a593Smuzhiyun 				    !blk_queue_pm_only(q)))) ||
476*4882a593Smuzhiyun 			   blk_queue_dying(q));
477*4882a593Smuzhiyun 		if (blk_queue_dying(q))
478*4882a593Smuzhiyun 			return -ENODEV;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
bio_queue_enter(struct bio * bio)482*4882a593Smuzhiyun static inline int bio_queue_enter(struct bio *bio)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct request_queue *q = bio->bi_disk->queue;
485*4882a593Smuzhiyun 	bool nowait = bio->bi_opf & REQ_NOWAIT;
486*4882a593Smuzhiyun 	int ret;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
489*4882a593Smuzhiyun 	if (unlikely(ret)) {
490*4882a593Smuzhiyun 		if (nowait && !blk_queue_dying(q))
491*4882a593Smuzhiyun 			bio_wouldblock_error(bio);
492*4882a593Smuzhiyun 		else
493*4882a593Smuzhiyun 			bio_io_error(bio);
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	return ret;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
blk_queue_exit(struct request_queue * q)499*4882a593Smuzhiyun void blk_queue_exit(struct request_queue *q)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	percpu_ref_put(&q->q_usage_counter);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
blk_queue_usage_counter_release(struct percpu_ref * ref)504*4882a593Smuzhiyun static void blk_queue_usage_counter_release(struct percpu_ref *ref)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	struct request_queue *q =
507*4882a593Smuzhiyun 		container_of(ref, struct request_queue, q_usage_counter);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	wake_up_all(&q->mq_freeze_wq);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
blk_rq_timed_out_timer(struct timer_list * t)512*4882a593Smuzhiyun static void blk_rq_timed_out_timer(struct timer_list *t)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct request_queue *q = from_timer(q, t, timeout);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	kblockd_schedule_work(&q->timeout_work);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
blk_timeout_work(struct work_struct * work)519*4882a593Smuzhiyun static void blk_timeout_work(struct work_struct *work)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
blk_alloc_queue(int node_id)523*4882a593Smuzhiyun struct request_queue *blk_alloc_queue(int node_id)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct request_queue *q;
526*4882a593Smuzhiyun 	int ret;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	q = kmem_cache_alloc_node(blk_requestq_cachep,
529*4882a593Smuzhiyun 				GFP_KERNEL | __GFP_ZERO, node_id);
530*4882a593Smuzhiyun 	if (!q)
531*4882a593Smuzhiyun 		return NULL;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	q->last_merge = NULL;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
536*4882a593Smuzhiyun 	if (q->id < 0)
537*4882a593Smuzhiyun 		goto fail_q;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
540*4882a593Smuzhiyun 	if (ret)
541*4882a593Smuzhiyun 		goto fail_id;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	q->backing_dev_info = bdi_alloc(node_id);
544*4882a593Smuzhiyun 	if (!q->backing_dev_info)
545*4882a593Smuzhiyun 		goto fail_split;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	q->stats = blk_alloc_queue_stats();
548*4882a593Smuzhiyun 	if (!q->stats)
549*4882a593Smuzhiyun 		goto fail_stats;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	q->node = node_id;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
556*4882a593Smuzhiyun 		    laptop_mode_timer_fn, 0);
557*4882a593Smuzhiyun 	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
558*4882a593Smuzhiyun 	INIT_WORK(&q->timeout_work, blk_timeout_work);
559*4882a593Smuzhiyun 	INIT_LIST_HEAD(&q->icq_list);
560*4882a593Smuzhiyun #ifdef CONFIG_BLK_CGROUP
561*4882a593Smuzhiyun 	INIT_LIST_HEAD(&q->blkg_list);
562*4882a593Smuzhiyun #endif
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	kobject_init(&q->kobj, &blk_queue_ktype);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	mutex_init(&q->debugfs_mutex);
567*4882a593Smuzhiyun 	mutex_init(&q->sysfs_lock);
568*4882a593Smuzhiyun 	mutex_init(&q->sysfs_dir_lock);
569*4882a593Smuzhiyun 	spin_lock_init(&q->queue_lock);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	init_waitqueue_head(&q->mq_freeze_wq);
572*4882a593Smuzhiyun 	mutex_init(&q->mq_freeze_lock);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/*
575*4882a593Smuzhiyun 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
576*4882a593Smuzhiyun 	 * See blk_register_queue() for details.
577*4882a593Smuzhiyun 	 */
578*4882a593Smuzhiyun 	if (percpu_ref_init(&q->q_usage_counter,
579*4882a593Smuzhiyun 				blk_queue_usage_counter_release,
580*4882a593Smuzhiyun 				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
581*4882a593Smuzhiyun 		goto fail_bdi;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	if (blkcg_init_queue(q))
584*4882a593Smuzhiyun 		goto fail_ref;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	blk_queue_dma_alignment(q, 511);
587*4882a593Smuzhiyun 	blk_set_default_limits(&q->limits);
588*4882a593Smuzhiyun 	q->nr_requests = BLKDEV_MAX_RQ;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	return q;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun fail_ref:
593*4882a593Smuzhiyun 	percpu_ref_exit(&q->q_usage_counter);
594*4882a593Smuzhiyun fail_bdi:
595*4882a593Smuzhiyun 	blk_free_queue_stats(q->stats);
596*4882a593Smuzhiyun fail_stats:
597*4882a593Smuzhiyun 	bdi_put(q->backing_dev_info);
598*4882a593Smuzhiyun fail_split:
599*4882a593Smuzhiyun 	bioset_exit(&q->bio_split);
600*4882a593Smuzhiyun fail_id:
601*4882a593Smuzhiyun 	ida_simple_remove(&blk_queue_ida, q->id);
602*4882a593Smuzhiyun fail_q:
603*4882a593Smuzhiyun 	kmem_cache_free(blk_requestq_cachep, q);
604*4882a593Smuzhiyun 	return NULL;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun EXPORT_SYMBOL(blk_alloc_queue);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun /**
609*4882a593Smuzhiyun  * blk_get_queue - increment the request_queue refcount
610*4882a593Smuzhiyun  * @q: the request_queue structure to increment the refcount for
611*4882a593Smuzhiyun  *
612*4882a593Smuzhiyun  * Increment the refcount of the request_queue kobject.
613*4882a593Smuzhiyun  *
614*4882a593Smuzhiyun  * Context: Any context.
615*4882a593Smuzhiyun  */
blk_get_queue(struct request_queue * q)616*4882a593Smuzhiyun bool blk_get_queue(struct request_queue *q)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	if (likely(!blk_queue_dying(q))) {
619*4882a593Smuzhiyun 		__blk_get_queue(q);
620*4882a593Smuzhiyun 		return true;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	return false;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun EXPORT_SYMBOL(blk_get_queue);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun /**
628*4882a593Smuzhiyun  * blk_get_request - allocate a request
629*4882a593Smuzhiyun  * @q: request queue to allocate a request for
630*4882a593Smuzhiyun  * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
631*4882a593Smuzhiyun  * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
632*4882a593Smuzhiyun  */
blk_get_request(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags)633*4882a593Smuzhiyun struct request *blk_get_request(struct request_queue *q, unsigned int op,
634*4882a593Smuzhiyun 				blk_mq_req_flags_t flags)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	struct request *req;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	WARN_ON_ONCE(op & REQ_NOWAIT);
639*4882a593Smuzhiyun 	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	req = blk_mq_alloc_request(q, op, flags);
642*4882a593Smuzhiyun 	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
643*4882a593Smuzhiyun 		q->mq_ops->initialize_rq_fn(req);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	return req;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun EXPORT_SYMBOL(blk_get_request);
648*4882a593Smuzhiyun 
blk_put_request(struct request * req)649*4882a593Smuzhiyun void blk_put_request(struct request *req)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	blk_mq_free_request(req);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun EXPORT_SYMBOL(blk_put_request);
654*4882a593Smuzhiyun 
handle_bad_sector(struct bio * bio,sector_t maxsector)655*4882a593Smuzhiyun static void handle_bad_sector(struct bio *bio, sector_t maxsector)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	char b[BDEVNAME_SIZE];
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	pr_info_ratelimited("attempt to access beyond end of device\n"
660*4882a593Smuzhiyun 			    "%s: rw=%d, want=%llu, limit=%llu\n",
661*4882a593Smuzhiyun 			    bio_devname(bio, b), bio->bi_opf,
662*4882a593Smuzhiyun 			    bio_end_sector(bio), maxsector);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun #ifdef CONFIG_FAIL_MAKE_REQUEST
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun static DECLARE_FAULT_ATTR(fail_make_request);
668*4882a593Smuzhiyun 
setup_fail_make_request(char * str)669*4882a593Smuzhiyun static int __init setup_fail_make_request(char *str)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	return setup_fault_attr(&fail_make_request, str);
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun __setup("fail_make_request=", setup_fail_make_request);
674*4882a593Smuzhiyun 
should_fail_request(struct hd_struct * part,unsigned int bytes)675*4882a593Smuzhiyun static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	return part->make_it_fail && should_fail(&fail_make_request, bytes);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
fail_make_request_debugfs(void)680*4882a593Smuzhiyun static int __init fail_make_request_debugfs(void)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
683*4882a593Smuzhiyun 						NULL, &fail_make_request);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(dir);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun late_initcall(fail_make_request_debugfs);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun #else /* CONFIG_FAIL_MAKE_REQUEST */
691*4882a593Smuzhiyun 
should_fail_request(struct hd_struct * part,unsigned int bytes)692*4882a593Smuzhiyun static inline bool should_fail_request(struct hd_struct *part,
693*4882a593Smuzhiyun 					unsigned int bytes)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	return false;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun #endif /* CONFIG_FAIL_MAKE_REQUEST */
699*4882a593Smuzhiyun 
bio_check_ro(struct bio * bio,struct hd_struct * part)700*4882a593Smuzhiyun static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	const int op = bio_op(bio);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (part->policy && op_is_write(op)) {
705*4882a593Smuzhiyun 		char b[BDEVNAME_SIZE];
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
708*4882a593Smuzhiyun 			return false;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		WARN_ONCE(1,
711*4882a593Smuzhiyun 		       "Trying to write to read-only block-device %s (partno %d)\n",
712*4882a593Smuzhiyun 			bio_devname(bio, b), part->partno);
713*4882a593Smuzhiyun 		/* Older lvm-tools actually trigger this */
714*4882a593Smuzhiyun 		return false;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	return false;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
should_fail_bio(struct bio * bio)720*4882a593Smuzhiyun static noinline int should_fail_bio(struct bio *bio)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
723*4882a593Smuzhiyun 		return -EIO;
724*4882a593Smuzhiyun 	return 0;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun  * Check whether this bio extends beyond the end of the device or partition.
730*4882a593Smuzhiyun  * This may well happen - the kernel calls bread() without checking the size of
731*4882a593Smuzhiyun  * the device, e.g., when mounting a file system.
732*4882a593Smuzhiyun  */
bio_check_eod(struct bio * bio,sector_t maxsector)733*4882a593Smuzhiyun static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	unsigned int nr_sectors = bio_sectors(bio);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (nr_sectors && maxsector &&
738*4882a593Smuzhiyun 	    (nr_sectors > maxsector ||
739*4882a593Smuzhiyun 	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
740*4882a593Smuzhiyun 		handle_bad_sector(bio, maxsector);
741*4882a593Smuzhiyun 		return -EIO;
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 	return 0;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun /*
747*4882a593Smuzhiyun  * Remap block n of partition p to block n+start(p) of the disk.
748*4882a593Smuzhiyun  */
blk_partition_remap(struct bio * bio)749*4882a593Smuzhiyun static inline int blk_partition_remap(struct bio *bio)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	struct hd_struct *p;
752*4882a593Smuzhiyun 	int ret = -EIO;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	rcu_read_lock();
755*4882a593Smuzhiyun 	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
756*4882a593Smuzhiyun 	if (unlikely(!p))
757*4882a593Smuzhiyun 		goto out;
758*4882a593Smuzhiyun 	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
759*4882a593Smuzhiyun 		goto out;
760*4882a593Smuzhiyun 	if (unlikely(bio_check_ro(bio, p)))
761*4882a593Smuzhiyun 		goto out;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (bio_sectors(bio)) {
764*4882a593Smuzhiyun 		if (bio_check_eod(bio, part_nr_sects_read(p)))
765*4882a593Smuzhiyun 			goto out;
766*4882a593Smuzhiyun 		bio->bi_iter.bi_sector += p->start_sect;
767*4882a593Smuzhiyun 		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
768*4882a593Smuzhiyun 				      bio->bi_iter.bi_sector - p->start_sect);
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun 	bio->bi_partno = 0;
771*4882a593Smuzhiyun 	ret = 0;
772*4882a593Smuzhiyun out:
773*4882a593Smuzhiyun 	rcu_read_unlock();
774*4882a593Smuzhiyun 	return ret;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun  * Check write append to a zoned block device.
779*4882a593Smuzhiyun  */
blk_check_zone_append(struct request_queue * q,struct bio * bio)780*4882a593Smuzhiyun static inline blk_status_t blk_check_zone_append(struct request_queue *q,
781*4882a593Smuzhiyun 						 struct bio *bio)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	sector_t pos = bio->bi_iter.bi_sector;
784*4882a593Smuzhiyun 	int nr_sectors = bio_sectors(bio);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/* Only applicable to zoned block devices */
787*4882a593Smuzhiyun 	if (!blk_queue_is_zoned(q))
788*4882a593Smuzhiyun 		return BLK_STS_NOTSUPP;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	/* The bio sector must point to the start of a sequential zone */
791*4882a593Smuzhiyun 	if (pos & (blk_queue_zone_sectors(q) - 1) ||
792*4882a593Smuzhiyun 	    !blk_queue_zone_is_seq(q, pos))
793*4882a593Smuzhiyun 		return BLK_STS_IOERR;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/*
796*4882a593Smuzhiyun 	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
797*4882a593Smuzhiyun 	 * split and could result in non-contiguous sectors being written in
798*4882a593Smuzhiyun 	 * different zones.
799*4882a593Smuzhiyun 	 */
800*4882a593Smuzhiyun 	if (nr_sectors > q->limits.chunk_sectors)
801*4882a593Smuzhiyun 		return BLK_STS_IOERR;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	/* Make sure the BIO is small enough and will not get split */
804*4882a593Smuzhiyun 	if (nr_sectors > q->limits.max_zone_append_sectors)
805*4882a593Smuzhiyun 		return BLK_STS_IOERR;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	bio->bi_opf |= REQ_NOMERGE;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	return BLK_STS_OK;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
submit_bio_checks(struct bio * bio)812*4882a593Smuzhiyun static noinline_for_stack bool submit_bio_checks(struct bio *bio)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	struct request_queue *q = bio->bi_disk->queue;
815*4882a593Smuzhiyun 	blk_status_t status = BLK_STS_IOERR;
816*4882a593Smuzhiyun 	struct blk_plug *plug;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	might_sleep();
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	plug = blk_mq_plug(q, bio);
821*4882a593Smuzhiyun 	if (plug && plug->nowait)
822*4882a593Smuzhiyun 		bio->bi_opf |= REQ_NOWAIT;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	/*
825*4882a593Smuzhiyun 	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
826*4882a593Smuzhiyun 	 * if queue does not support NOWAIT.
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
829*4882a593Smuzhiyun 		goto not_supported;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (should_fail_bio(bio))
832*4882a593Smuzhiyun 		goto end_io;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	if (bio->bi_partno) {
835*4882a593Smuzhiyun 		if (unlikely(blk_partition_remap(bio)))
836*4882a593Smuzhiyun 			goto end_io;
837*4882a593Smuzhiyun 	} else {
838*4882a593Smuzhiyun 		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
839*4882a593Smuzhiyun 			goto end_io;
840*4882a593Smuzhiyun 		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
841*4882a593Smuzhiyun 			goto end_io;
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/*
845*4882a593Smuzhiyun 	 * Filter flush bio's early so that bio based drivers without flush
846*4882a593Smuzhiyun 	 * support don't have to worry about them.
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 	if (op_is_flush(bio->bi_opf) &&
849*4882a593Smuzhiyun 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
850*4882a593Smuzhiyun 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
851*4882a593Smuzhiyun 		if (!bio_sectors(bio)) {
852*4882a593Smuzhiyun 			status = BLK_STS_OK;
853*4882a593Smuzhiyun 			goto end_io;
854*4882a593Smuzhiyun 		}
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
858*4882a593Smuzhiyun 		bio->bi_opf &= ~REQ_HIPRI;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	switch (bio_op(bio)) {
861*4882a593Smuzhiyun 	case REQ_OP_DISCARD:
862*4882a593Smuzhiyun 		if (!blk_queue_discard(q))
863*4882a593Smuzhiyun 			goto not_supported;
864*4882a593Smuzhiyun 		break;
865*4882a593Smuzhiyun 	case REQ_OP_SECURE_ERASE:
866*4882a593Smuzhiyun 		if (!blk_queue_secure_erase(q))
867*4882a593Smuzhiyun 			goto not_supported;
868*4882a593Smuzhiyun 		break;
869*4882a593Smuzhiyun 	case REQ_OP_WRITE_SAME:
870*4882a593Smuzhiyun 		if (!q->limits.max_write_same_sectors)
871*4882a593Smuzhiyun 			goto not_supported;
872*4882a593Smuzhiyun 		break;
873*4882a593Smuzhiyun 	case REQ_OP_ZONE_APPEND:
874*4882a593Smuzhiyun 		status = blk_check_zone_append(q, bio);
875*4882a593Smuzhiyun 		if (status != BLK_STS_OK)
876*4882a593Smuzhiyun 			goto end_io;
877*4882a593Smuzhiyun 		break;
878*4882a593Smuzhiyun 	case REQ_OP_ZONE_RESET:
879*4882a593Smuzhiyun 	case REQ_OP_ZONE_OPEN:
880*4882a593Smuzhiyun 	case REQ_OP_ZONE_CLOSE:
881*4882a593Smuzhiyun 	case REQ_OP_ZONE_FINISH:
882*4882a593Smuzhiyun 		if (!blk_queue_is_zoned(q))
883*4882a593Smuzhiyun 			goto not_supported;
884*4882a593Smuzhiyun 		break;
885*4882a593Smuzhiyun 	case REQ_OP_ZONE_RESET_ALL:
886*4882a593Smuzhiyun 		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
887*4882a593Smuzhiyun 			goto not_supported;
888*4882a593Smuzhiyun 		break;
889*4882a593Smuzhiyun 	case REQ_OP_WRITE_ZEROES:
890*4882a593Smuzhiyun 		if (!q->limits.max_write_zeroes_sectors)
891*4882a593Smuzhiyun 			goto not_supported;
892*4882a593Smuzhiyun 		break;
893*4882a593Smuzhiyun 	default:
894*4882a593Smuzhiyun 		break;
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	/*
898*4882a593Smuzhiyun 	 * Various block parts want %current->io_context, so allocate it up
899*4882a593Smuzhiyun 	 * front rather than dealing with lots of pain to allocate it only
900*4882a593Smuzhiyun 	 * where needed. This may fail and the block layer knows how to live
901*4882a593Smuzhiyun 	 * with it.
902*4882a593Smuzhiyun 	 */
903*4882a593Smuzhiyun 	if (unlikely(!current->io_context))
904*4882a593Smuzhiyun 		create_task_io_context(current, GFP_ATOMIC, q->node);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	if (blk_throtl_bio(bio))
907*4882a593Smuzhiyun 		return false;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	blk_cgroup_bio_start(bio);
910*4882a593Smuzhiyun 	blkcg_bio_issue_init(bio);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
913*4882a593Smuzhiyun 		trace_block_bio_queue(q, bio);
914*4882a593Smuzhiyun 		/* Now that enqueuing has been traced, we need to trace
915*4882a593Smuzhiyun 		 * completion as well.
916*4882a593Smuzhiyun 		 */
917*4882a593Smuzhiyun 		bio_set_flag(bio, BIO_TRACE_COMPLETION);
918*4882a593Smuzhiyun 	}
919*4882a593Smuzhiyun 	return true;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun not_supported:
922*4882a593Smuzhiyun 	status = BLK_STS_NOTSUPP;
923*4882a593Smuzhiyun end_io:
924*4882a593Smuzhiyun 	bio->bi_status = status;
925*4882a593Smuzhiyun 	bio_endio(bio);
926*4882a593Smuzhiyun 	return false;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
__submit_bio(struct bio * bio)929*4882a593Smuzhiyun static blk_qc_t __submit_bio(struct bio *bio)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	struct gendisk *disk = bio->bi_disk;
932*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	if (blk_crypto_bio_prep(&bio)) {
935*4882a593Smuzhiyun 		if (!disk->fops->submit_bio)
936*4882a593Smuzhiyun 			return blk_mq_submit_bio(bio);
937*4882a593Smuzhiyun 		ret = disk->fops->submit_bio(bio);
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun 	blk_queue_exit(disk->queue);
940*4882a593Smuzhiyun 	return ret;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun  * The loop in this function may be a bit non-obvious, and so deserves some
945*4882a593Smuzhiyun  * explanation:
946*4882a593Smuzhiyun  *
947*4882a593Smuzhiyun  *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
948*4882a593Smuzhiyun  *    that), so we have a list with a single bio.
949*4882a593Smuzhiyun  *  - We pretend that we have just taken it off a longer list, so we assign
950*4882a593Smuzhiyun  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
951*4882a593Smuzhiyun  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
952*4882a593Smuzhiyun  *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
953*4882a593Smuzhiyun  *    non-NULL value in bio_list and re-enter the loop from the top.
954*4882a593Smuzhiyun  *  - In this case we really did just take the bio of the top of the list (no
955*4882a593Smuzhiyun  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
956*4882a593Smuzhiyun  *    again.
957*4882a593Smuzhiyun  *
958*4882a593Smuzhiyun  * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
959*4882a593Smuzhiyun  * bio_list_on_stack[1] contains bios that were submitted before the current
960*4882a593Smuzhiyun  *	->submit_bio_bio, but that haven't been processed yet.
961*4882a593Smuzhiyun  */
__submit_bio_noacct(struct bio * bio)962*4882a593Smuzhiyun static blk_qc_t __submit_bio_noacct(struct bio *bio)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	struct bio_list bio_list_on_stack[2];
965*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	BUG_ON(bio->bi_next);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	bio_list_init(&bio_list_on_stack[0]);
970*4882a593Smuzhiyun 	current->bio_list = bio_list_on_stack;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	do {
973*4882a593Smuzhiyun 		struct request_queue *q = bio->bi_disk->queue;
974*4882a593Smuzhiyun 		struct bio_list lower, same;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 		if (unlikely(bio_queue_enter(bio) != 0))
977*4882a593Smuzhiyun 			continue;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 		/*
980*4882a593Smuzhiyun 		 * Create a fresh bio_list for all subordinate requests.
981*4882a593Smuzhiyun 		 */
982*4882a593Smuzhiyun 		bio_list_on_stack[1] = bio_list_on_stack[0];
983*4882a593Smuzhiyun 		bio_list_init(&bio_list_on_stack[0]);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 		ret = __submit_bio(bio);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 		/*
988*4882a593Smuzhiyun 		 * Sort new bios into those for a lower level and those for the
989*4882a593Smuzhiyun 		 * same level.
990*4882a593Smuzhiyun 		 */
991*4882a593Smuzhiyun 		bio_list_init(&lower);
992*4882a593Smuzhiyun 		bio_list_init(&same);
993*4882a593Smuzhiyun 		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
994*4882a593Smuzhiyun 			if (q == bio->bi_disk->queue)
995*4882a593Smuzhiyun 				bio_list_add(&same, bio);
996*4882a593Smuzhiyun 			else
997*4882a593Smuzhiyun 				bio_list_add(&lower, bio);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		/*
1000*4882a593Smuzhiyun 		 * Now assemble so we handle the lowest level first.
1001*4882a593Smuzhiyun 		 */
1002*4882a593Smuzhiyun 		bio_list_merge(&bio_list_on_stack[0], &lower);
1003*4882a593Smuzhiyun 		bio_list_merge(&bio_list_on_stack[0], &same);
1004*4882a593Smuzhiyun 		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1005*4882a593Smuzhiyun 	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	current->bio_list = NULL;
1008*4882a593Smuzhiyun 	return ret;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
__submit_bio_noacct_mq(struct bio * bio)1011*4882a593Smuzhiyun static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun 	struct bio_list bio_list[2] = { };
1014*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	current->bio_list = bio_list;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	do {
1019*4882a593Smuzhiyun 		struct gendisk *disk = bio->bi_disk;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 		if (unlikely(bio_queue_enter(bio) != 0))
1022*4882a593Smuzhiyun 			continue;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 		if (!blk_crypto_bio_prep(&bio)) {
1025*4882a593Smuzhiyun 			blk_queue_exit(disk->queue);
1026*4882a593Smuzhiyun 			ret = BLK_QC_T_NONE;
1027*4882a593Smuzhiyun 			continue;
1028*4882a593Smuzhiyun 		}
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 		ret = blk_mq_submit_bio(bio);
1031*4882a593Smuzhiyun 	} while ((bio = bio_list_pop(&bio_list[0])));
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	current->bio_list = NULL;
1034*4882a593Smuzhiyun 	return ret;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun  * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1039*4882a593Smuzhiyun  * @bio:  The bio describing the location in memory and on the device.
1040*4882a593Smuzhiyun  *
1041*4882a593Smuzhiyun  * This is a version of submit_bio() that shall only be used for I/O that is
1042*4882a593Smuzhiyun  * resubmitted to lower level drivers by stacking block drivers.  All file
1043*4882a593Smuzhiyun  * systems and other upper level users of the block layer should use
1044*4882a593Smuzhiyun  * submit_bio() instead.
1045*4882a593Smuzhiyun  */
submit_bio_noacct(struct bio * bio)1046*4882a593Smuzhiyun blk_qc_t submit_bio_noacct(struct bio *bio)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun 	if (!submit_bio_checks(bio))
1049*4882a593Smuzhiyun 		return BLK_QC_T_NONE;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	/*
1052*4882a593Smuzhiyun 	 * We only want one ->submit_bio to be active at a time, else stack
1053*4882a593Smuzhiyun 	 * usage with stacked devices could be a problem.  Use current->bio_list
1054*4882a593Smuzhiyun 	 * to collect a list of requests submited by a ->submit_bio method while
1055*4882a593Smuzhiyun 	 * it is active, and then process them after it returned.
1056*4882a593Smuzhiyun 	 */
1057*4882a593Smuzhiyun 	if (current->bio_list) {
1058*4882a593Smuzhiyun 		bio_list_add(&current->bio_list[0], bio);
1059*4882a593Smuzhiyun 		return BLK_QC_T_NONE;
1060*4882a593Smuzhiyun 	}
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	if (!bio->bi_disk->fops->submit_bio)
1063*4882a593Smuzhiyun 		return __submit_bio_noacct_mq(bio);
1064*4882a593Smuzhiyun 	return __submit_bio_noacct(bio);
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun EXPORT_SYMBOL(submit_bio_noacct);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun /**
1069*4882a593Smuzhiyun  * submit_bio - submit a bio to the block device layer for I/O
1070*4882a593Smuzhiyun  * @bio: The &struct bio which describes the I/O
1071*4882a593Smuzhiyun  *
1072*4882a593Smuzhiyun  * submit_bio() is used to submit I/O requests to block devices.  It is passed a
1073*4882a593Smuzhiyun  * fully set up &struct bio that describes the I/O that needs to be done.  The
1074*4882a593Smuzhiyun  * bio will be send to the device described by the bi_disk and bi_partno fields.
1075*4882a593Smuzhiyun  *
1076*4882a593Smuzhiyun  * The success/failure status of the request, along with notification of
1077*4882a593Smuzhiyun  * completion, is delivered asynchronously through the ->bi_end_io() callback
1078*4882a593Smuzhiyun  * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
1079*4882a593Smuzhiyun  * been called.
1080*4882a593Smuzhiyun  */
submit_bio(struct bio * bio)1081*4882a593Smuzhiyun blk_qc_t submit_bio(struct bio *bio)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	if (blkcg_punt_bio_submit(bio))
1084*4882a593Smuzhiyun 		return BLK_QC_T_NONE;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	/*
1087*4882a593Smuzhiyun 	 * If it's a regular read/write or a barrier with data attached,
1088*4882a593Smuzhiyun 	 * go through the normal accounting stuff before submission.
1089*4882a593Smuzhiyun 	 */
1090*4882a593Smuzhiyun 	if (bio_has_data(bio)) {
1091*4882a593Smuzhiyun 		unsigned int count;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1094*4882a593Smuzhiyun 			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1095*4882a593Smuzhiyun 		else
1096*4882a593Smuzhiyun 			count = bio_sectors(bio);
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 		if (op_is_write(bio_op(bio))) {
1099*4882a593Smuzhiyun 			count_vm_events(PGPGOUT, count);
1100*4882a593Smuzhiyun 		} else {
1101*4882a593Smuzhiyun 			task_io_account_read(bio->bi_iter.bi_size);
1102*4882a593Smuzhiyun 			count_vm_events(PGPGIN, count);
1103*4882a593Smuzhiyun 		}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		if (unlikely(block_dump)) {
1106*4882a593Smuzhiyun 			char b[BDEVNAME_SIZE];
1107*4882a593Smuzhiyun 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1108*4882a593Smuzhiyun 			current->comm, task_pid_nr(current),
1109*4882a593Smuzhiyun 				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1110*4882a593Smuzhiyun 				(unsigned long long)bio->bi_iter.bi_sector,
1111*4882a593Smuzhiyun 				bio_devname(bio, b), count);
1112*4882a593Smuzhiyun 		}
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	/*
1116*4882a593Smuzhiyun 	 * If we're reading data that is part of the userspace workingset, count
1117*4882a593Smuzhiyun 	 * submission time as memory stall.  When the device is congested, or
1118*4882a593Smuzhiyun 	 * the submitting cgroup IO-throttled, submission can be a significant
1119*4882a593Smuzhiyun 	 * part of overall IO time.
1120*4882a593Smuzhiyun 	 */
1121*4882a593Smuzhiyun 	if (unlikely(bio_op(bio) == REQ_OP_READ &&
1122*4882a593Smuzhiyun 	    bio_flagged(bio, BIO_WORKINGSET))) {
1123*4882a593Smuzhiyun 		unsigned long pflags;
1124*4882a593Smuzhiyun 		blk_qc_t ret;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 		psi_memstall_enter(&pflags);
1127*4882a593Smuzhiyun 		ret = submit_bio_noacct(bio);
1128*4882a593Smuzhiyun 		psi_memstall_leave(&pflags);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 		return ret;
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	return submit_bio_noacct(bio);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun EXPORT_SYMBOL(submit_bio);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /**
1138*4882a593Smuzhiyun  * blk_cloned_rq_check_limits - Helper function to check a cloned request
1139*4882a593Smuzhiyun  *                              for the new queue limits
1140*4882a593Smuzhiyun  * @q:  the queue
1141*4882a593Smuzhiyun  * @rq: the request being checked
1142*4882a593Smuzhiyun  *
1143*4882a593Smuzhiyun  * Description:
1144*4882a593Smuzhiyun  *    @rq may have been made based on weaker limitations of upper-level queues
1145*4882a593Smuzhiyun  *    in request stacking drivers, and it may violate the limitation of @q.
1146*4882a593Smuzhiyun  *    Since the block layer and the underlying device driver trust @rq
1147*4882a593Smuzhiyun  *    after it is inserted to @q, it should be checked against @q before
1148*4882a593Smuzhiyun  *    the insertion using this generic function.
1149*4882a593Smuzhiyun  *
1150*4882a593Smuzhiyun  *    Request stacking drivers like request-based dm may change the queue
1151*4882a593Smuzhiyun  *    limits when retrying requests on other queues. Those requests need
1152*4882a593Smuzhiyun  *    to be checked against the new queue limits again during dispatch.
1153*4882a593Smuzhiyun  */
blk_cloned_rq_check_limits(struct request_queue * q,struct request * rq)1154*4882a593Smuzhiyun static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1155*4882a593Smuzhiyun 				      struct request *rq)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun 	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	if (blk_rq_sectors(rq) > max_sectors) {
1160*4882a593Smuzhiyun 		/*
1161*4882a593Smuzhiyun 		 * SCSI device does not have a good way to return if
1162*4882a593Smuzhiyun 		 * Write Same/Zero is actually supported. If a device rejects
1163*4882a593Smuzhiyun 		 * a non-read/write command (discard, write same,etc.) the
1164*4882a593Smuzhiyun 		 * low-level device driver will set the relevant queue limit to
1165*4882a593Smuzhiyun 		 * 0 to prevent blk-lib from issuing more of the offending
1166*4882a593Smuzhiyun 		 * operations. Commands queued prior to the queue limit being
1167*4882a593Smuzhiyun 		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1168*4882a593Smuzhiyun 		 * errors being propagated to upper layers.
1169*4882a593Smuzhiyun 		 */
1170*4882a593Smuzhiyun 		if (max_sectors == 0)
1171*4882a593Smuzhiyun 			return BLK_STS_NOTSUPP;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1174*4882a593Smuzhiyun 			__func__, blk_rq_sectors(rq), max_sectors);
1175*4882a593Smuzhiyun 		return BLK_STS_IOERR;
1176*4882a593Smuzhiyun 	}
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	/*
1179*4882a593Smuzhiyun 	 * queue's settings related to segment counting like q->bounce_pfn
1180*4882a593Smuzhiyun 	 * may differ from that of other stacking queues.
1181*4882a593Smuzhiyun 	 * Recalculate it to check the request correctly on this queue's
1182*4882a593Smuzhiyun 	 * limitation.
1183*4882a593Smuzhiyun 	 */
1184*4882a593Smuzhiyun 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1185*4882a593Smuzhiyun 	if (rq->nr_phys_segments > queue_max_segments(q)) {
1186*4882a593Smuzhiyun 		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1187*4882a593Smuzhiyun 			__func__, rq->nr_phys_segments, queue_max_segments(q));
1188*4882a593Smuzhiyun 		return BLK_STS_IOERR;
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	return BLK_STS_OK;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun /**
1195*4882a593Smuzhiyun  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1196*4882a593Smuzhiyun  * @q:  the queue to submit the request
1197*4882a593Smuzhiyun  * @rq: the request being queued
1198*4882a593Smuzhiyun  */
blk_insert_cloned_request(struct request_queue * q,struct request * rq)1199*4882a593Smuzhiyun blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun 	blk_status_t ret;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	ret = blk_cloned_rq_check_limits(q, rq);
1204*4882a593Smuzhiyun 	if (ret != BLK_STS_OK)
1205*4882a593Smuzhiyun 		return ret;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	if (rq->rq_disk &&
1208*4882a593Smuzhiyun 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1209*4882a593Smuzhiyun 		return BLK_STS_IOERR;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	if (blk_crypto_insert_cloned_request(rq))
1212*4882a593Smuzhiyun 		return BLK_STS_IOERR;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	if (blk_queue_io_stat(q))
1215*4882a593Smuzhiyun 		blk_account_io_start(rq);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/*
1218*4882a593Smuzhiyun 	 * Since we have a scheduler attached on the top device,
1219*4882a593Smuzhiyun 	 * bypass a potential scheduler on the bottom device for
1220*4882a593Smuzhiyun 	 * insert.
1221*4882a593Smuzhiyun 	 */
1222*4882a593Smuzhiyun 	return blk_mq_request_issue_directly(rq, true);
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun /**
1227*4882a593Smuzhiyun  * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1228*4882a593Smuzhiyun  * @rq: request to examine
1229*4882a593Smuzhiyun  *
1230*4882a593Smuzhiyun  * Description:
1231*4882a593Smuzhiyun  *     A request could be merge of IOs which require different failure
1232*4882a593Smuzhiyun  *     handling.  This function determines the number of bytes which
1233*4882a593Smuzhiyun  *     can be failed from the beginning of the request without
1234*4882a593Smuzhiyun  *     crossing into area which need to be retried further.
1235*4882a593Smuzhiyun  *
1236*4882a593Smuzhiyun  * Return:
1237*4882a593Smuzhiyun  *     The number of bytes to fail.
1238*4882a593Smuzhiyun  */
blk_rq_err_bytes(const struct request * rq)1239*4882a593Smuzhiyun unsigned int blk_rq_err_bytes(const struct request *rq)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1242*4882a593Smuzhiyun 	unsigned int bytes = 0;
1243*4882a593Smuzhiyun 	struct bio *bio;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1246*4882a593Smuzhiyun 		return blk_rq_bytes(rq);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	/*
1249*4882a593Smuzhiyun 	 * Currently the only 'mixing' which can happen is between
1250*4882a593Smuzhiyun 	 * different fastfail types.  We can safely fail portions
1251*4882a593Smuzhiyun 	 * which have all the failfast bits that the first one has -
1252*4882a593Smuzhiyun 	 * the ones which are at least as eager to fail as the first
1253*4882a593Smuzhiyun 	 * one.
1254*4882a593Smuzhiyun 	 */
1255*4882a593Smuzhiyun 	for (bio = rq->bio; bio; bio = bio->bi_next) {
1256*4882a593Smuzhiyun 		if ((bio->bi_opf & ff) != ff)
1257*4882a593Smuzhiyun 			break;
1258*4882a593Smuzhiyun 		bytes += bio->bi_iter.bi_size;
1259*4882a593Smuzhiyun 	}
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	/* this could lead to infinite loop */
1262*4882a593Smuzhiyun 	BUG_ON(blk_rq_bytes(rq) && !bytes);
1263*4882a593Smuzhiyun 	return bytes;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1266*4882a593Smuzhiyun 
update_io_ticks(struct hd_struct * part,unsigned long now,bool end)1267*4882a593Smuzhiyun static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	unsigned long stamp;
1270*4882a593Smuzhiyun again:
1271*4882a593Smuzhiyun 	stamp = READ_ONCE(part->stamp);
1272*4882a593Smuzhiyun 	if (unlikely(stamp != now)) {
1273*4882a593Smuzhiyun 		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
1274*4882a593Smuzhiyun 			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
1275*4882a593Smuzhiyun 	}
1276*4882a593Smuzhiyun 	if (part->partno) {
1277*4882a593Smuzhiyun 		part = &part_to_disk(part)->part0;
1278*4882a593Smuzhiyun 		goto again;
1279*4882a593Smuzhiyun 	}
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun 
blk_account_io_completion(struct request * req,unsigned int bytes)1282*4882a593Smuzhiyun static void blk_account_io_completion(struct request *req, unsigned int bytes)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	if (req->part && blk_do_io_stat(req)) {
1285*4882a593Smuzhiyun 		const int sgrp = op_stat_group(req_op(req));
1286*4882a593Smuzhiyun 		struct hd_struct *part;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		part_stat_lock();
1289*4882a593Smuzhiyun 		part = req->part;
1290*4882a593Smuzhiyun 		part_stat_add(part, sectors[sgrp], bytes >> 9);
1291*4882a593Smuzhiyun 		part_stat_unlock();
1292*4882a593Smuzhiyun 	}
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun 
blk_account_io_done(struct request * req,u64 now)1295*4882a593Smuzhiyun void blk_account_io_done(struct request *req, u64 now)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun 	/*
1298*4882a593Smuzhiyun 	 * Account IO completion.  flush_rq isn't accounted as a
1299*4882a593Smuzhiyun 	 * normal IO on queueing nor completion.  Accounting the
1300*4882a593Smuzhiyun 	 * containing request is enough.
1301*4882a593Smuzhiyun 	 */
1302*4882a593Smuzhiyun 	if (req->part && blk_do_io_stat(req) &&
1303*4882a593Smuzhiyun 	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1304*4882a593Smuzhiyun 		const int sgrp = op_stat_group(req_op(req));
1305*4882a593Smuzhiyun 		struct hd_struct *part;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 		part_stat_lock();
1308*4882a593Smuzhiyun 		part = req->part;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		update_io_ticks(part, jiffies, true);
1311*4882a593Smuzhiyun 		part_stat_inc(part, ios[sgrp]);
1312*4882a593Smuzhiyun 		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1313*4882a593Smuzhiyun 		part_stat_unlock();
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 		hd_struct_put(part);
1316*4882a593Smuzhiyun 	}
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun 
blk_account_io_start(struct request * rq)1319*4882a593Smuzhiyun void blk_account_io_start(struct request *rq)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	if (!blk_do_io_stat(rq))
1322*4882a593Smuzhiyun 		return;
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	part_stat_lock();
1327*4882a593Smuzhiyun 	update_io_ticks(rq->part, jiffies, false);
1328*4882a593Smuzhiyun 	part_stat_unlock();
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
__part_start_io_acct(struct hd_struct * part,unsigned int sectors,unsigned int op)1331*4882a593Smuzhiyun static unsigned long __part_start_io_acct(struct hd_struct *part,
1332*4882a593Smuzhiyun 					  unsigned int sectors, unsigned int op)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun 	const int sgrp = op_stat_group(op);
1335*4882a593Smuzhiyun 	unsigned long now = READ_ONCE(jiffies);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	part_stat_lock();
1338*4882a593Smuzhiyun 	update_io_ticks(part, now, false);
1339*4882a593Smuzhiyun 	part_stat_inc(part, ios[sgrp]);
1340*4882a593Smuzhiyun 	part_stat_add(part, sectors[sgrp], sectors);
1341*4882a593Smuzhiyun 	part_stat_local_inc(part, in_flight[op_is_write(op)]);
1342*4882a593Smuzhiyun 	part_stat_unlock();
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	return now;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun 
part_start_io_acct(struct gendisk * disk,struct hd_struct ** part,struct bio * bio)1347*4882a593Smuzhiyun unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
1348*4882a593Smuzhiyun 				 struct bio *bio)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun 	*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(part_start_io_acct);
1355*4882a593Smuzhiyun 
disk_start_io_acct(struct gendisk * disk,unsigned int sectors,unsigned int op)1356*4882a593Smuzhiyun unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1357*4882a593Smuzhiyun 				 unsigned int op)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun 	return __part_start_io_acct(&disk->part0, sectors, op);
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun EXPORT_SYMBOL(disk_start_io_acct);
1362*4882a593Smuzhiyun 
__part_end_io_acct(struct hd_struct * part,unsigned int op,unsigned long start_time)1363*4882a593Smuzhiyun static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
1364*4882a593Smuzhiyun 			       unsigned long start_time)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun 	const int sgrp = op_stat_group(op);
1367*4882a593Smuzhiyun 	unsigned long now = READ_ONCE(jiffies);
1368*4882a593Smuzhiyun 	unsigned long duration = now - start_time;
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	part_stat_lock();
1371*4882a593Smuzhiyun 	update_io_ticks(part, now, true);
1372*4882a593Smuzhiyun 	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1373*4882a593Smuzhiyun 	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1374*4882a593Smuzhiyun 	part_stat_unlock();
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun 
part_end_io_acct(struct hd_struct * part,struct bio * bio,unsigned long start_time)1377*4882a593Smuzhiyun void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1378*4882a593Smuzhiyun 		      unsigned long start_time)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun 	__part_end_io_acct(part, bio_op(bio), start_time);
1381*4882a593Smuzhiyun 	hd_struct_put(part);
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(part_end_io_acct);
1384*4882a593Smuzhiyun 
disk_end_io_acct(struct gendisk * disk,unsigned int op,unsigned long start_time)1385*4882a593Smuzhiyun void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1386*4882a593Smuzhiyun 		      unsigned long start_time)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun 	__part_end_io_acct(&disk->part0, op, start_time);
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun EXPORT_SYMBOL(disk_end_io_acct);
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun /*
1393*4882a593Smuzhiyun  * Steal bios from a request and add them to a bio list.
1394*4882a593Smuzhiyun  * The request must not have been partially completed before.
1395*4882a593Smuzhiyun  */
blk_steal_bios(struct bio_list * list,struct request * rq)1396*4882a593Smuzhiyun void blk_steal_bios(struct bio_list *list, struct request *rq)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	if (rq->bio) {
1399*4882a593Smuzhiyun 		if (list->tail)
1400*4882a593Smuzhiyun 			list->tail->bi_next = rq->bio;
1401*4882a593Smuzhiyun 		else
1402*4882a593Smuzhiyun 			list->head = rq->bio;
1403*4882a593Smuzhiyun 		list->tail = rq->biotail;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 		rq->bio = NULL;
1406*4882a593Smuzhiyun 		rq->biotail = NULL;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	rq->__data_len = 0;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_steal_bios);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun /**
1414*4882a593Smuzhiyun  * blk_update_request - Special helper function for request stacking drivers
1415*4882a593Smuzhiyun  * @req:      the request being processed
1416*4882a593Smuzhiyun  * @error:    block status code
1417*4882a593Smuzhiyun  * @nr_bytes: number of bytes to complete @req
1418*4882a593Smuzhiyun  *
1419*4882a593Smuzhiyun  * Description:
1420*4882a593Smuzhiyun  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
1421*4882a593Smuzhiyun  *     the request structure even if @req doesn't have leftover.
1422*4882a593Smuzhiyun  *     If @req has leftover, sets it up for the next range of segments.
1423*4882a593Smuzhiyun  *
1424*4882a593Smuzhiyun  *     This special helper function is only for request stacking drivers
1425*4882a593Smuzhiyun  *     (e.g. request-based dm) so that they can handle partial completion.
1426*4882a593Smuzhiyun  *     Actual device drivers should use blk_mq_end_request instead.
1427*4882a593Smuzhiyun  *
1428*4882a593Smuzhiyun  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1429*4882a593Smuzhiyun  *     %false return from this function.
1430*4882a593Smuzhiyun  *
1431*4882a593Smuzhiyun  * Note:
1432*4882a593Smuzhiyun  *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
1433*4882a593Smuzhiyun  *	blk_rq_bytes() and in blk_update_request().
1434*4882a593Smuzhiyun  *
1435*4882a593Smuzhiyun  * Return:
1436*4882a593Smuzhiyun  *     %false - this request doesn't have any more data
1437*4882a593Smuzhiyun  *     %true  - this request has more data
1438*4882a593Smuzhiyun  **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)1439*4882a593Smuzhiyun bool blk_update_request(struct request *req, blk_status_t error,
1440*4882a593Smuzhiyun 		unsigned int nr_bytes)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	int total_bytes;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	if (!req->bio)
1447*4882a593Smuzhiyun 		return false;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INTEGRITY
1450*4882a593Smuzhiyun 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1451*4882a593Smuzhiyun 	    error == BLK_STS_OK)
1452*4882a593Smuzhiyun 		req->q->integrity.profile->complete_fn(req, nr_bytes);
1453*4882a593Smuzhiyun #endif
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
1456*4882a593Smuzhiyun 		     !(req->rq_flags & RQF_QUIET)))
1457*4882a593Smuzhiyun 		print_req_error(req, error, __func__);
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	blk_account_io_completion(req, nr_bytes);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	total_bytes = 0;
1462*4882a593Smuzhiyun 	while (req->bio) {
1463*4882a593Smuzhiyun 		struct bio *bio = req->bio;
1464*4882a593Smuzhiyun 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 		if (bio_bytes == bio->bi_iter.bi_size)
1467*4882a593Smuzhiyun 			req->bio = bio->bi_next;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 		/* Completion has already been traced */
1470*4882a593Smuzhiyun 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1471*4882a593Smuzhiyun 		req_bio_endio(req, bio, bio_bytes, error);
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 		total_bytes += bio_bytes;
1474*4882a593Smuzhiyun 		nr_bytes -= bio_bytes;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 		if (!nr_bytes)
1477*4882a593Smuzhiyun 			break;
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/*
1481*4882a593Smuzhiyun 	 * completely done
1482*4882a593Smuzhiyun 	 */
1483*4882a593Smuzhiyun 	if (!req->bio) {
1484*4882a593Smuzhiyun 		/*
1485*4882a593Smuzhiyun 		 * Reset counters so that the request stacking driver
1486*4882a593Smuzhiyun 		 * can find how many bytes remain in the request
1487*4882a593Smuzhiyun 		 * later.
1488*4882a593Smuzhiyun 		 */
1489*4882a593Smuzhiyun 		req->__data_len = 0;
1490*4882a593Smuzhiyun 		return false;
1491*4882a593Smuzhiyun 	}
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	req->__data_len -= total_bytes;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	/* update sector only for requests with clear definition of sector */
1496*4882a593Smuzhiyun 	if (!blk_rq_is_passthrough(req))
1497*4882a593Smuzhiyun 		req->__sector += total_bytes >> 9;
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	/* mixed attributes always follow the first bio */
1500*4882a593Smuzhiyun 	if (req->rq_flags & RQF_MIXED_MERGE) {
1501*4882a593Smuzhiyun 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
1502*4882a593Smuzhiyun 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1503*4882a593Smuzhiyun 	}
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1506*4882a593Smuzhiyun 		/*
1507*4882a593Smuzhiyun 		 * If total number of sectors is less than the first segment
1508*4882a593Smuzhiyun 		 * size, something has gone terribly wrong.
1509*4882a593Smuzhiyun 		 */
1510*4882a593Smuzhiyun 		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1511*4882a593Smuzhiyun 			blk_dump_rq_flags(req, "request botched");
1512*4882a593Smuzhiyun 			req->__data_len = blk_rq_cur_bytes(req);
1513*4882a593Smuzhiyun 		}
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		/* recalculate the number of segments */
1516*4882a593Smuzhiyun 		req->nr_phys_segments = blk_recalc_rq_segments(req);
1517*4882a593Smuzhiyun 	}
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	return true;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_update_request);
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1524*4882a593Smuzhiyun /**
1525*4882a593Smuzhiyun  * rq_flush_dcache_pages - Helper function to flush all pages in a request
1526*4882a593Smuzhiyun  * @rq: the request to be flushed
1527*4882a593Smuzhiyun  *
1528*4882a593Smuzhiyun  * Description:
1529*4882a593Smuzhiyun  *     Flush all pages in @rq.
1530*4882a593Smuzhiyun  */
rq_flush_dcache_pages(struct request * rq)1531*4882a593Smuzhiyun void rq_flush_dcache_pages(struct request *rq)
1532*4882a593Smuzhiyun {
1533*4882a593Smuzhiyun 	struct req_iterator iter;
1534*4882a593Smuzhiyun 	struct bio_vec bvec;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	rq_for_each_segment(bvec, rq, iter)
1537*4882a593Smuzhiyun 		flush_dcache_page(bvec.bv_page);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1540*4882a593Smuzhiyun #endif
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun /**
1543*4882a593Smuzhiyun  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1544*4882a593Smuzhiyun  * @q : the queue of the device being checked
1545*4882a593Smuzhiyun  *
1546*4882a593Smuzhiyun  * Description:
1547*4882a593Smuzhiyun  *    Check if underlying low-level drivers of a device are busy.
1548*4882a593Smuzhiyun  *    If the drivers want to export their busy state, they must set own
1549*4882a593Smuzhiyun  *    exporting function using blk_queue_lld_busy() first.
1550*4882a593Smuzhiyun  *
1551*4882a593Smuzhiyun  *    Basically, this function is used only by request stacking drivers
1552*4882a593Smuzhiyun  *    to stop dispatching requests to underlying devices when underlying
1553*4882a593Smuzhiyun  *    devices are busy.  This behavior helps more I/O merging on the queue
1554*4882a593Smuzhiyun  *    of the request stacking driver and prevents I/O throughput regression
1555*4882a593Smuzhiyun  *    on burst I/O load.
1556*4882a593Smuzhiyun  *
1557*4882a593Smuzhiyun  * Return:
1558*4882a593Smuzhiyun  *    0 - Not busy (The request stacking driver should dispatch request)
1559*4882a593Smuzhiyun  *    1 - Busy (The request stacking driver should stop dispatching request)
1560*4882a593Smuzhiyun  */
blk_lld_busy(struct request_queue * q)1561*4882a593Smuzhiyun int blk_lld_busy(struct request_queue *q)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun 	if (queue_is_mq(q) && q->mq_ops->busy)
1564*4882a593Smuzhiyun 		return q->mq_ops->busy(q);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	return 0;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_lld_busy);
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun /**
1571*4882a593Smuzhiyun  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1572*4882a593Smuzhiyun  * @rq: the clone request to be cleaned up
1573*4882a593Smuzhiyun  *
1574*4882a593Smuzhiyun  * Description:
1575*4882a593Smuzhiyun  *     Free all bios in @rq for a cloned request.
1576*4882a593Smuzhiyun  */
blk_rq_unprep_clone(struct request * rq)1577*4882a593Smuzhiyun void blk_rq_unprep_clone(struct request *rq)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun 	struct bio *bio;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	while ((bio = rq->bio) != NULL) {
1582*4882a593Smuzhiyun 		rq->bio = bio->bi_next;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 		bio_put(bio);
1585*4882a593Smuzhiyun 	}
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun /**
1590*4882a593Smuzhiyun  * blk_rq_prep_clone - Helper function to setup clone request
1591*4882a593Smuzhiyun  * @rq: the request to be setup
1592*4882a593Smuzhiyun  * @rq_src: original request to be cloned
1593*4882a593Smuzhiyun  * @bs: bio_set that bios for clone are allocated from
1594*4882a593Smuzhiyun  * @gfp_mask: memory allocation mask for bio
1595*4882a593Smuzhiyun  * @bio_ctr: setup function to be called for each clone bio.
1596*4882a593Smuzhiyun  *           Returns %0 for success, non %0 for failure.
1597*4882a593Smuzhiyun  * @data: private data to be passed to @bio_ctr
1598*4882a593Smuzhiyun  *
1599*4882a593Smuzhiyun  * Description:
1600*4882a593Smuzhiyun  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
1601*4882a593Smuzhiyun  *     Also, pages which the original bios are pointing to are not copied
1602*4882a593Smuzhiyun  *     and the cloned bios just point same pages.
1603*4882a593Smuzhiyun  *     So cloned bios must be completed before original bios, which means
1604*4882a593Smuzhiyun  *     the caller must complete @rq before @rq_src.
1605*4882a593Smuzhiyun  */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)1606*4882a593Smuzhiyun int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1607*4882a593Smuzhiyun 		      struct bio_set *bs, gfp_t gfp_mask,
1608*4882a593Smuzhiyun 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
1609*4882a593Smuzhiyun 		      void *data)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	struct bio *bio, *bio_src;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	if (!bs)
1614*4882a593Smuzhiyun 		bs = &fs_bio_set;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	__rq_for_each_bio(bio_src, rq_src) {
1617*4882a593Smuzhiyun 		bio = bio_clone_fast(bio_src, gfp_mask, bs);
1618*4882a593Smuzhiyun 		if (!bio)
1619*4882a593Smuzhiyun 			goto free_and_out;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 		if (bio_ctr && bio_ctr(bio, bio_src, data))
1622*4882a593Smuzhiyun 			goto free_and_out;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 		if (rq->bio) {
1625*4882a593Smuzhiyun 			rq->biotail->bi_next = bio;
1626*4882a593Smuzhiyun 			rq->biotail = bio;
1627*4882a593Smuzhiyun 		} else {
1628*4882a593Smuzhiyun 			rq->bio = rq->biotail = bio;
1629*4882a593Smuzhiyun 		}
1630*4882a593Smuzhiyun 		bio = NULL;
1631*4882a593Smuzhiyun 	}
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	/* Copy attributes of the original request to the clone request. */
1634*4882a593Smuzhiyun 	rq->__sector = blk_rq_pos(rq_src);
1635*4882a593Smuzhiyun 	rq->__data_len = blk_rq_bytes(rq_src);
1636*4882a593Smuzhiyun 	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1637*4882a593Smuzhiyun 		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1638*4882a593Smuzhiyun 		rq->special_vec = rq_src->special_vec;
1639*4882a593Smuzhiyun 	}
1640*4882a593Smuzhiyun 	rq->nr_phys_segments = rq_src->nr_phys_segments;
1641*4882a593Smuzhiyun 	rq->ioprio = rq_src->ioprio;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1644*4882a593Smuzhiyun 		goto free_and_out;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	return 0;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun free_and_out:
1649*4882a593Smuzhiyun 	if (bio)
1650*4882a593Smuzhiyun 		bio_put(bio);
1651*4882a593Smuzhiyun 	blk_rq_unprep_clone(rq);
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	return -ENOMEM;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1656*4882a593Smuzhiyun 
kblockd_schedule_work(struct work_struct * work)1657*4882a593Smuzhiyun int kblockd_schedule_work(struct work_struct *work)
1658*4882a593Smuzhiyun {
1659*4882a593Smuzhiyun 	return queue_work(kblockd_workqueue, work);
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun EXPORT_SYMBOL(kblockd_schedule_work);
1662*4882a593Smuzhiyun 
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1663*4882a593Smuzhiyun int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1664*4882a593Smuzhiyun 				unsigned long delay)
1665*4882a593Smuzhiyun {
1666*4882a593Smuzhiyun 	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun /**
1671*4882a593Smuzhiyun  * blk_start_plug - initialize blk_plug and track it inside the task_struct
1672*4882a593Smuzhiyun  * @plug:	The &struct blk_plug that needs to be initialized
1673*4882a593Smuzhiyun  *
1674*4882a593Smuzhiyun  * Description:
1675*4882a593Smuzhiyun  *   blk_start_plug() indicates to the block layer an intent by the caller
1676*4882a593Smuzhiyun  *   to submit multiple I/O requests in a batch.  The block layer may use
1677*4882a593Smuzhiyun  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1678*4882a593Smuzhiyun  *   is called.  However, the block layer may choose to submit requests
1679*4882a593Smuzhiyun  *   before a call to blk_finish_plug() if the number of queued I/Os
1680*4882a593Smuzhiyun  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1681*4882a593Smuzhiyun  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1682*4882a593Smuzhiyun  *   the task schedules (see below).
1683*4882a593Smuzhiyun  *
1684*4882a593Smuzhiyun  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1685*4882a593Smuzhiyun  *   pending I/O should the task end up blocking between blk_start_plug() and
1686*4882a593Smuzhiyun  *   blk_finish_plug(). This is important from a performance perspective, but
1687*4882a593Smuzhiyun  *   also ensures that we don't deadlock. For instance, if the task is blocking
1688*4882a593Smuzhiyun  *   for a memory allocation, memory reclaim could end up wanting to free a
1689*4882a593Smuzhiyun  *   page belonging to that request that is currently residing in our private
1690*4882a593Smuzhiyun  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1691*4882a593Smuzhiyun  *   this kind of deadlock.
1692*4882a593Smuzhiyun  */
blk_start_plug(struct blk_plug * plug)1693*4882a593Smuzhiyun void blk_start_plug(struct blk_plug *plug)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	struct task_struct *tsk = current;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	/*
1698*4882a593Smuzhiyun 	 * If this is a nested plug, don't actually assign it.
1699*4882a593Smuzhiyun 	 */
1700*4882a593Smuzhiyun 	if (tsk->plug)
1701*4882a593Smuzhiyun 		return;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	INIT_LIST_HEAD(&plug->mq_list);
1704*4882a593Smuzhiyun 	INIT_LIST_HEAD(&plug->cb_list);
1705*4882a593Smuzhiyun 	plug->rq_count = 0;
1706*4882a593Smuzhiyun 	plug->multiple_queues = false;
1707*4882a593Smuzhiyun 	plug->nowait = false;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	/*
1710*4882a593Smuzhiyun 	 * Store ordering should not be needed here, since a potential
1711*4882a593Smuzhiyun 	 * preempt will imply a full memory barrier
1712*4882a593Smuzhiyun 	 */
1713*4882a593Smuzhiyun 	tsk->plug = plug;
1714*4882a593Smuzhiyun }
1715*4882a593Smuzhiyun EXPORT_SYMBOL(blk_start_plug);
1716*4882a593Smuzhiyun 
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1717*4882a593Smuzhiyun static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1718*4882a593Smuzhiyun {
1719*4882a593Smuzhiyun 	LIST_HEAD(callbacks);
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	while (!list_empty(&plug->cb_list)) {
1722*4882a593Smuzhiyun 		list_splice_init(&plug->cb_list, &callbacks);
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 		while (!list_empty(&callbacks)) {
1725*4882a593Smuzhiyun 			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1726*4882a593Smuzhiyun 							  struct blk_plug_cb,
1727*4882a593Smuzhiyun 							  list);
1728*4882a593Smuzhiyun 			list_del(&cb->list);
1729*4882a593Smuzhiyun 			cb->callback(cb, from_schedule);
1730*4882a593Smuzhiyun 		}
1731*4882a593Smuzhiyun 	}
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun 
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1734*4882a593Smuzhiyun struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1735*4882a593Smuzhiyun 				      int size)
1736*4882a593Smuzhiyun {
1737*4882a593Smuzhiyun 	struct blk_plug *plug = current->plug;
1738*4882a593Smuzhiyun 	struct blk_plug_cb *cb;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	if (!plug)
1741*4882a593Smuzhiyun 		return NULL;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	list_for_each_entry(cb, &plug->cb_list, list)
1744*4882a593Smuzhiyun 		if (cb->callback == unplug && cb->data == data)
1745*4882a593Smuzhiyun 			return cb;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	/* Not currently on the callback list */
1748*4882a593Smuzhiyun 	BUG_ON(size < sizeof(*cb));
1749*4882a593Smuzhiyun 	cb = kzalloc(size, GFP_ATOMIC);
1750*4882a593Smuzhiyun 	if (cb) {
1751*4882a593Smuzhiyun 		cb->data = data;
1752*4882a593Smuzhiyun 		cb->callback = unplug;
1753*4882a593Smuzhiyun 		list_add(&cb->list, &plug->cb_list);
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 	return cb;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun EXPORT_SYMBOL(blk_check_plugged);
1758*4882a593Smuzhiyun 
blk_flush_plug_list(struct blk_plug * plug,bool from_schedule)1759*4882a593Smuzhiyun void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun 	flush_plug_callbacks(plug, from_schedule);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	if (!list_empty(&plug->mq_list))
1764*4882a593Smuzhiyun 		blk_mq_flush_plug_list(plug, from_schedule);
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun /**
1768*4882a593Smuzhiyun  * blk_finish_plug - mark the end of a batch of submitted I/O
1769*4882a593Smuzhiyun  * @plug:	The &struct blk_plug passed to blk_start_plug()
1770*4882a593Smuzhiyun  *
1771*4882a593Smuzhiyun  * Description:
1772*4882a593Smuzhiyun  * Indicate that a batch of I/O submissions is complete.  This function
1773*4882a593Smuzhiyun  * must be paired with an initial call to blk_start_plug().  The intent
1774*4882a593Smuzhiyun  * is to allow the block layer to optimize I/O submission.  See the
1775*4882a593Smuzhiyun  * documentation for blk_start_plug() for more information.
1776*4882a593Smuzhiyun  */
blk_finish_plug(struct blk_plug * plug)1777*4882a593Smuzhiyun void blk_finish_plug(struct blk_plug *plug)
1778*4882a593Smuzhiyun {
1779*4882a593Smuzhiyun 	if (plug != current->plug)
1780*4882a593Smuzhiyun 		return;
1781*4882a593Smuzhiyun 	blk_flush_plug_list(plug, false);
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	current->plug = NULL;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun EXPORT_SYMBOL(blk_finish_plug);
1786*4882a593Smuzhiyun 
blk_io_schedule(void)1787*4882a593Smuzhiyun void blk_io_schedule(void)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun 	/* Prevent hang_check timer from firing at us during very long I/O */
1790*4882a593Smuzhiyun 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 	if (timeout)
1793*4882a593Smuzhiyun 		io_schedule_timeout(timeout);
1794*4882a593Smuzhiyun 	else
1795*4882a593Smuzhiyun 		io_schedule();
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_io_schedule);
1798*4882a593Smuzhiyun 
blk_dev_init(void)1799*4882a593Smuzhiyun int __init blk_dev_init(void)
1800*4882a593Smuzhiyun {
1801*4882a593Smuzhiyun 	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1802*4882a593Smuzhiyun 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1803*4882a593Smuzhiyun 			sizeof_field(struct request, cmd_flags));
1804*4882a593Smuzhiyun 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1805*4882a593Smuzhiyun 			sizeof_field(struct bio, bi_opf));
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
1808*4882a593Smuzhiyun 	kblockd_workqueue = alloc_workqueue("kblockd",
1809*4882a593Smuzhiyun 					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1810*4882a593Smuzhiyun 	if (!kblockd_workqueue)
1811*4882a593Smuzhiyun 		panic("Failed to create kblockd\n");
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	blk_requestq_cachep = kmem_cache_create("request_queue",
1814*4882a593Smuzhiyun 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	blk_debugfs_root = debugfs_create_dir("block", NULL);
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	return 0;
1819*4882a593Smuzhiyun }
1820