xref: /OK3568_Linux_fs/kernel/block/blk-core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1991, 1992 Linus Torvalds
4  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
5  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
6  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8  *	-  July2000
9  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10  */
11 
12 /*
13  * This handles all read/write requests to block devices
14  */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/backing-dev.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/blk-mq.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/blk-cgroup.h>
38 #include <linux/t10-pi.h>
39 #include <linux/debugfs.h>
40 #include <linux/bpf.h>
41 #include <linux/psi.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/blk-crypto.h>
44 
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/block.h>
47 
48 #include "blk.h"
49 #include "blk-mq.h"
50 #include "blk-mq-sched.h"
51 #include "blk-pm.h"
52 #include "blk-rq-qos.h"
53 
54 struct dentry *blk_debugfs_root;
55 
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
62 EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
63 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
64 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
65 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
66 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
67 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
68 
69 DEFINE_IDA(blk_queue_ida);
70 
71 /*
72  * For queue allocation
73  */
74 struct kmem_cache *blk_requestq_cachep;
75 
76 /*
77  * Controlling structure to kblockd
78  */
79 static struct workqueue_struct *kblockd_workqueue;
80 
81 /**
82  * blk_queue_flag_set - atomically set a queue flag
83  * @flag: flag to be set
84  * @q: request queue
85  */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)86 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
87 {
88 	set_bit(flag, &q->queue_flags);
89 }
90 EXPORT_SYMBOL(blk_queue_flag_set);
91 
92 /**
93  * blk_queue_flag_clear - atomically clear a queue flag
94  * @flag: flag to be cleared
95  * @q: request queue
96  */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)97 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
98 {
99 	clear_bit(flag, &q->queue_flags);
100 }
101 EXPORT_SYMBOL(blk_queue_flag_clear);
102 
103 /**
104  * blk_queue_flag_test_and_set - atomically test and set a queue flag
105  * @flag: flag to be set
106  * @q: request queue
107  *
108  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
109  * the flag was already set.
110  */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)111 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
112 {
113 	return test_and_set_bit(flag, &q->queue_flags);
114 }
115 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
116 
blk_rq_init(struct request_queue * q,struct request * rq)117 void blk_rq_init(struct request_queue *q, struct request *rq)
118 {
119 	memset(rq, 0, sizeof(*rq));
120 
121 	INIT_LIST_HEAD(&rq->queuelist);
122 	rq->q = q;
123 	rq->__sector = (sector_t) -1;
124 	INIT_HLIST_NODE(&rq->hash);
125 	RB_CLEAR_NODE(&rq->rb_node);
126 	rq->tag = BLK_MQ_NO_TAG;
127 	rq->internal_tag = BLK_MQ_NO_TAG;
128 	rq->start_time_ns = ktime_get_ns();
129 	rq->part = NULL;
130 	blk_crypto_rq_set_defaults(rq);
131 }
132 EXPORT_SYMBOL(blk_rq_init);
133 
134 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
135 static const char *const blk_op_name[] = {
136 	REQ_OP_NAME(READ),
137 	REQ_OP_NAME(WRITE),
138 	REQ_OP_NAME(FLUSH),
139 	REQ_OP_NAME(DISCARD),
140 	REQ_OP_NAME(SECURE_ERASE),
141 	REQ_OP_NAME(ZONE_RESET),
142 	REQ_OP_NAME(ZONE_RESET_ALL),
143 	REQ_OP_NAME(ZONE_OPEN),
144 	REQ_OP_NAME(ZONE_CLOSE),
145 	REQ_OP_NAME(ZONE_FINISH),
146 	REQ_OP_NAME(ZONE_APPEND),
147 	REQ_OP_NAME(WRITE_SAME),
148 	REQ_OP_NAME(WRITE_ZEROES),
149 	REQ_OP_NAME(SCSI_IN),
150 	REQ_OP_NAME(SCSI_OUT),
151 	REQ_OP_NAME(DRV_IN),
152 	REQ_OP_NAME(DRV_OUT),
153 };
154 #undef REQ_OP_NAME
155 
156 /**
157  * blk_op_str - Return string XXX in the REQ_OP_XXX.
158  * @op: REQ_OP_XXX.
159  *
160  * Description: Centralize block layer function to convert REQ_OP_XXX into
161  * string format. Useful in the debugging and tracing bio or request. For
162  * invalid REQ_OP_XXX it returns string "UNKNOWN".
163  */
blk_op_str(unsigned int op)164 inline const char *blk_op_str(unsigned int op)
165 {
166 	const char *op_str = "UNKNOWN";
167 
168 	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
169 		op_str = blk_op_name[op];
170 
171 	return op_str;
172 }
173 EXPORT_SYMBOL_GPL(blk_op_str);
174 
175 static const struct {
176 	int		errno;
177 	const char	*name;
178 } blk_errors[] = {
179 	[BLK_STS_OK]		= { 0,		"" },
180 	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
181 	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
182 	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
183 	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
184 	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
185 	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
186 	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
187 	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
188 	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
189 	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
190 	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
191 
192 	/* device mapper special case, should not leak out: */
193 	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
194 
195 	/* zone device specific errors */
196 	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
197 	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
198 
199 	/* everything else not covered above: */
200 	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
201 };
202 
errno_to_blk_status(int errno)203 blk_status_t errno_to_blk_status(int errno)
204 {
205 	int i;
206 
207 	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
208 		if (blk_errors[i].errno == errno)
209 			return (__force blk_status_t)i;
210 	}
211 
212 	return BLK_STS_IOERR;
213 }
214 EXPORT_SYMBOL_GPL(errno_to_blk_status);
215 
blk_status_to_errno(blk_status_t status)216 int blk_status_to_errno(blk_status_t status)
217 {
218 	int idx = (__force int)status;
219 
220 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
221 		return -EIO;
222 	return blk_errors[idx].errno;
223 }
224 EXPORT_SYMBOL_GPL(blk_status_to_errno);
225 
print_req_error(struct request * req,blk_status_t status,const char * caller)226 static void print_req_error(struct request *req, blk_status_t status,
227 		const char *caller)
228 {
229 	int idx = (__force int)status;
230 
231 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
232 		return;
233 
234 	printk_ratelimited(KERN_ERR
235 		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
236 		"phys_seg %u prio class %u\n",
237 		caller, blk_errors[idx].name,
238 		req->rq_disk ? req->rq_disk->disk_name : "?",
239 		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
240 		req->cmd_flags & ~REQ_OP_MASK,
241 		req->nr_phys_segments,
242 		IOPRIO_PRIO_CLASS(req->ioprio));
243 }
244 
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)245 static void req_bio_endio(struct request *rq, struct bio *bio,
246 			  unsigned int nbytes, blk_status_t error)
247 {
248 	if (error)
249 		bio->bi_status = error;
250 
251 	if (unlikely(rq->rq_flags & RQF_QUIET))
252 		bio_set_flag(bio, BIO_QUIET);
253 
254 	bio_advance(bio, nbytes);
255 
256 	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
257 		/*
258 		 * Partial zone append completions cannot be supported as the
259 		 * BIO fragments may end up not being written sequentially.
260 		 */
261 		if (bio->bi_iter.bi_size)
262 			bio->bi_status = BLK_STS_IOERR;
263 		else
264 			bio->bi_iter.bi_sector = rq->__sector;
265 	}
266 
267 	/* don't actually finish bio if it's part of flush sequence */
268 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
269 		bio_endio(bio);
270 }
271 
blk_dump_rq_flags(struct request * rq,char * msg)272 void blk_dump_rq_flags(struct request *rq, char *msg)
273 {
274 	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
275 		rq->rq_disk ? rq->rq_disk->disk_name : "?",
276 		(unsigned long long) rq->cmd_flags);
277 
278 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
279 	       (unsigned long long)blk_rq_pos(rq),
280 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
281 	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
282 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
283 }
284 EXPORT_SYMBOL(blk_dump_rq_flags);
285 
286 /**
287  * blk_sync_queue - cancel any pending callbacks on a queue
288  * @q: the queue
289  *
290  * Description:
291  *     The block layer may perform asynchronous callback activity
292  *     on a queue, such as calling the unplug function after a timeout.
293  *     A block device may call blk_sync_queue to ensure that any
294  *     such activity is cancelled, thus allowing it to release resources
295  *     that the callbacks might use. The caller must already have made sure
296  *     that its ->submit_bio will not re-add plugging prior to calling
297  *     this function.
298  *
299  *     This function does not cancel any asynchronous activity arising
300  *     out of elevator or throttling code. That would require elevator_exit()
301  *     and blkcg_exit_queue() to be called with queue lock initialized.
302  *
303  */
blk_sync_queue(struct request_queue * q)304 void blk_sync_queue(struct request_queue *q)
305 {
306 	del_timer_sync(&q->timeout);
307 	cancel_work_sync(&q->timeout_work);
308 }
309 EXPORT_SYMBOL(blk_sync_queue);
310 
311 /**
312  * blk_set_pm_only - increment pm_only counter
313  * @q: request queue pointer
314  */
blk_set_pm_only(struct request_queue * q)315 void blk_set_pm_only(struct request_queue *q)
316 {
317 	atomic_inc(&q->pm_only);
318 }
319 EXPORT_SYMBOL_GPL(blk_set_pm_only);
320 
blk_clear_pm_only(struct request_queue * q)321 void blk_clear_pm_only(struct request_queue *q)
322 {
323 	int pm_only;
324 
325 	pm_only = atomic_dec_return(&q->pm_only);
326 	WARN_ON_ONCE(pm_only < 0);
327 	if (pm_only == 0)
328 		wake_up_all(&q->mq_freeze_wq);
329 }
330 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
331 
332 /**
333  * blk_put_queue - decrement the request_queue refcount
334  * @q: the request_queue structure to decrement the refcount for
335  *
336  * Decrements the refcount of the request_queue kobject. When this reaches 0
337  * we'll have blk_release_queue() called.
338  *
339  * Context: Any context, but the last reference must not be dropped from
340  *          atomic context.
341  */
blk_put_queue(struct request_queue * q)342 void blk_put_queue(struct request_queue *q)
343 {
344 	kobject_put(&q->kobj);
345 }
346 EXPORT_SYMBOL(blk_put_queue);
347 
blk_set_queue_dying(struct request_queue * q)348 void blk_set_queue_dying(struct request_queue *q)
349 {
350 	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
351 
352 	/*
353 	 * When queue DYING flag is set, we need to block new req
354 	 * entering queue, so we call blk_freeze_queue_start() to
355 	 * prevent I/O from crossing blk_queue_enter().
356 	 */
357 	blk_freeze_queue_start(q);
358 
359 	if (queue_is_mq(q))
360 		blk_mq_wake_waiters(q);
361 
362 	/* Make blk_queue_enter() reexamine the DYING flag. */
363 	wake_up_all(&q->mq_freeze_wq);
364 }
365 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
366 
367 /**
368  * blk_cleanup_queue - shutdown a request queue
369  * @q: request queue to shutdown
370  *
371  * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
372  * put it.  All future requests will be failed immediately with -ENODEV.
373  *
374  * Context: can sleep
375  */
blk_cleanup_queue(struct request_queue * q)376 void blk_cleanup_queue(struct request_queue *q)
377 {
378 	/* cannot be called from atomic context */
379 	might_sleep();
380 
381 	WARN_ON_ONCE(blk_queue_registered(q));
382 
383 	/* mark @q DYING, no new request or merges will be allowed afterwards */
384 	blk_set_queue_dying(q);
385 
386 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
387 	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
388 
389 	/*
390 	 * Drain all requests queued before DYING marking. Set DEAD flag to
391 	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
392 	 * after draining finished.
393 	 */
394 	blk_freeze_queue(q);
395 
396 	rq_qos_exit(q);
397 
398 	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
399 
400 	/* for synchronous bio-based driver finish in-flight integrity i/o */
401 	blk_flush_integrity();
402 
403 	/* @q won't process any more request, flush async actions */
404 	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
405 	blk_sync_queue(q);
406 
407 	if (queue_is_mq(q))
408 		blk_mq_exit_queue(q);
409 
410 	/*
411 	 * In theory, request pool of sched_tags belongs to request queue.
412 	 * However, the current implementation requires tag_set for freeing
413 	 * requests, so free the pool now.
414 	 *
415 	 * Queue has become frozen, there can't be any in-queue requests, so
416 	 * it is safe to free requests now.
417 	 */
418 	mutex_lock(&q->sysfs_lock);
419 	if (q->elevator)
420 		blk_mq_sched_free_requests(q);
421 	mutex_unlock(&q->sysfs_lock);
422 
423 	percpu_ref_exit(&q->q_usage_counter);
424 
425 	/* @q is and will stay empty, shutdown and put */
426 	blk_put_queue(q);
427 }
428 EXPORT_SYMBOL(blk_cleanup_queue);
429 
430 /**
431  * blk_queue_enter() - try to increase q->q_usage_counter
432  * @q: request queue pointer
433  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
434  */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)435 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
436 {
437 	const bool pm = flags & BLK_MQ_REQ_PM;
438 
439 	while (true) {
440 		bool success = false;
441 
442 		rcu_read_lock();
443 		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
444 			/*
445 			 * The code that increments the pm_only counter is
446 			 * responsible for ensuring that that counter is
447 			 * globally visible before the queue is unfrozen.
448 			 */
449 			if (pm || !blk_queue_pm_only(q)) {
450 				success = true;
451 			} else {
452 				percpu_ref_put(&q->q_usage_counter);
453 			}
454 		}
455 		rcu_read_unlock();
456 
457 		if (success)
458 			return 0;
459 
460 		if (flags & BLK_MQ_REQ_NOWAIT)
461 			return -EBUSY;
462 
463 		/*
464 		 * read pair of barrier in blk_freeze_queue_start(),
465 		 * we need to order reading __PERCPU_REF_DEAD flag of
466 		 * .q_usage_counter and reading .mq_freeze_depth or
467 		 * queue dying flag, otherwise the following wait may
468 		 * never return if the two reads are reordered.
469 		 */
470 		smp_rmb();
471 
472 		wait_event(q->mq_freeze_wq,
473 			   (!q->mq_freeze_depth &&
474 			    (pm || (blk_pm_request_resume(q),
475 				    !blk_queue_pm_only(q)))) ||
476 			   blk_queue_dying(q));
477 		if (blk_queue_dying(q))
478 			return -ENODEV;
479 	}
480 }
481 
bio_queue_enter(struct bio * bio)482 static inline int bio_queue_enter(struct bio *bio)
483 {
484 	struct request_queue *q = bio->bi_disk->queue;
485 	bool nowait = bio->bi_opf & REQ_NOWAIT;
486 	int ret;
487 
488 	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
489 	if (unlikely(ret)) {
490 		if (nowait && !blk_queue_dying(q))
491 			bio_wouldblock_error(bio);
492 		else
493 			bio_io_error(bio);
494 	}
495 
496 	return ret;
497 }
498 
blk_queue_exit(struct request_queue * q)499 void blk_queue_exit(struct request_queue *q)
500 {
501 	percpu_ref_put(&q->q_usage_counter);
502 }
503 
blk_queue_usage_counter_release(struct percpu_ref * ref)504 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
505 {
506 	struct request_queue *q =
507 		container_of(ref, struct request_queue, q_usage_counter);
508 
509 	wake_up_all(&q->mq_freeze_wq);
510 }
511 
blk_rq_timed_out_timer(struct timer_list * t)512 static void blk_rq_timed_out_timer(struct timer_list *t)
513 {
514 	struct request_queue *q = from_timer(q, t, timeout);
515 
516 	kblockd_schedule_work(&q->timeout_work);
517 }
518 
blk_timeout_work(struct work_struct * work)519 static void blk_timeout_work(struct work_struct *work)
520 {
521 }
522 
blk_alloc_queue(int node_id)523 struct request_queue *blk_alloc_queue(int node_id)
524 {
525 	struct request_queue *q;
526 	int ret;
527 
528 	q = kmem_cache_alloc_node(blk_requestq_cachep,
529 				GFP_KERNEL | __GFP_ZERO, node_id);
530 	if (!q)
531 		return NULL;
532 
533 	q->last_merge = NULL;
534 
535 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
536 	if (q->id < 0)
537 		goto fail_q;
538 
539 	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
540 	if (ret)
541 		goto fail_id;
542 
543 	q->backing_dev_info = bdi_alloc(node_id);
544 	if (!q->backing_dev_info)
545 		goto fail_split;
546 
547 	q->stats = blk_alloc_queue_stats();
548 	if (!q->stats)
549 		goto fail_stats;
550 
551 	q->node = node_id;
552 
553 	atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
554 
555 	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
556 		    laptop_mode_timer_fn, 0);
557 	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
558 	INIT_WORK(&q->timeout_work, blk_timeout_work);
559 	INIT_LIST_HEAD(&q->icq_list);
560 #ifdef CONFIG_BLK_CGROUP
561 	INIT_LIST_HEAD(&q->blkg_list);
562 #endif
563 
564 	kobject_init(&q->kobj, &blk_queue_ktype);
565 
566 	mutex_init(&q->debugfs_mutex);
567 	mutex_init(&q->sysfs_lock);
568 	mutex_init(&q->sysfs_dir_lock);
569 	spin_lock_init(&q->queue_lock);
570 
571 	init_waitqueue_head(&q->mq_freeze_wq);
572 	mutex_init(&q->mq_freeze_lock);
573 
574 	/*
575 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
576 	 * See blk_register_queue() for details.
577 	 */
578 	if (percpu_ref_init(&q->q_usage_counter,
579 				blk_queue_usage_counter_release,
580 				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
581 		goto fail_bdi;
582 
583 	if (blkcg_init_queue(q))
584 		goto fail_ref;
585 
586 	blk_queue_dma_alignment(q, 511);
587 	blk_set_default_limits(&q->limits);
588 	q->nr_requests = BLKDEV_MAX_RQ;
589 
590 	return q;
591 
592 fail_ref:
593 	percpu_ref_exit(&q->q_usage_counter);
594 fail_bdi:
595 	blk_free_queue_stats(q->stats);
596 fail_stats:
597 	bdi_put(q->backing_dev_info);
598 fail_split:
599 	bioset_exit(&q->bio_split);
600 fail_id:
601 	ida_simple_remove(&blk_queue_ida, q->id);
602 fail_q:
603 	kmem_cache_free(blk_requestq_cachep, q);
604 	return NULL;
605 }
606 EXPORT_SYMBOL(blk_alloc_queue);
607 
608 /**
609  * blk_get_queue - increment the request_queue refcount
610  * @q: the request_queue structure to increment the refcount for
611  *
612  * Increment the refcount of the request_queue kobject.
613  *
614  * Context: Any context.
615  */
blk_get_queue(struct request_queue * q)616 bool blk_get_queue(struct request_queue *q)
617 {
618 	if (likely(!blk_queue_dying(q))) {
619 		__blk_get_queue(q);
620 		return true;
621 	}
622 
623 	return false;
624 }
625 EXPORT_SYMBOL(blk_get_queue);
626 
627 /**
628  * blk_get_request - allocate a request
629  * @q: request queue to allocate a request for
630  * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
631  * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
632  */
blk_get_request(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags)633 struct request *blk_get_request(struct request_queue *q, unsigned int op,
634 				blk_mq_req_flags_t flags)
635 {
636 	struct request *req;
637 
638 	WARN_ON_ONCE(op & REQ_NOWAIT);
639 	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
640 
641 	req = blk_mq_alloc_request(q, op, flags);
642 	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
643 		q->mq_ops->initialize_rq_fn(req);
644 
645 	return req;
646 }
647 EXPORT_SYMBOL(blk_get_request);
648 
blk_put_request(struct request * req)649 void blk_put_request(struct request *req)
650 {
651 	blk_mq_free_request(req);
652 }
653 EXPORT_SYMBOL(blk_put_request);
654 
handle_bad_sector(struct bio * bio,sector_t maxsector)655 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
656 {
657 	char b[BDEVNAME_SIZE];
658 
659 	pr_info_ratelimited("attempt to access beyond end of device\n"
660 			    "%s: rw=%d, want=%llu, limit=%llu\n",
661 			    bio_devname(bio, b), bio->bi_opf,
662 			    bio_end_sector(bio), maxsector);
663 }
664 
665 #ifdef CONFIG_FAIL_MAKE_REQUEST
666 
667 static DECLARE_FAULT_ATTR(fail_make_request);
668 
setup_fail_make_request(char * str)669 static int __init setup_fail_make_request(char *str)
670 {
671 	return setup_fault_attr(&fail_make_request, str);
672 }
673 __setup("fail_make_request=", setup_fail_make_request);
674 
should_fail_request(struct hd_struct * part,unsigned int bytes)675 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
676 {
677 	return part->make_it_fail && should_fail(&fail_make_request, bytes);
678 }
679 
fail_make_request_debugfs(void)680 static int __init fail_make_request_debugfs(void)
681 {
682 	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
683 						NULL, &fail_make_request);
684 
685 	return PTR_ERR_OR_ZERO(dir);
686 }
687 
688 late_initcall(fail_make_request_debugfs);
689 
690 #else /* CONFIG_FAIL_MAKE_REQUEST */
691 
should_fail_request(struct hd_struct * part,unsigned int bytes)692 static inline bool should_fail_request(struct hd_struct *part,
693 					unsigned int bytes)
694 {
695 	return false;
696 }
697 
698 #endif /* CONFIG_FAIL_MAKE_REQUEST */
699 
bio_check_ro(struct bio * bio,struct hd_struct * part)700 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
701 {
702 	const int op = bio_op(bio);
703 
704 	if (part->policy && op_is_write(op)) {
705 		char b[BDEVNAME_SIZE];
706 
707 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
708 			return false;
709 
710 		WARN_ONCE(1,
711 		       "Trying to write to read-only block-device %s (partno %d)\n",
712 			bio_devname(bio, b), part->partno);
713 		/* Older lvm-tools actually trigger this */
714 		return false;
715 	}
716 
717 	return false;
718 }
719 
should_fail_bio(struct bio * bio)720 static noinline int should_fail_bio(struct bio *bio)
721 {
722 	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
723 		return -EIO;
724 	return 0;
725 }
726 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
727 
728 /*
729  * Check whether this bio extends beyond the end of the device or partition.
730  * This may well happen - the kernel calls bread() without checking the size of
731  * the device, e.g., when mounting a file system.
732  */
bio_check_eod(struct bio * bio,sector_t maxsector)733 static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
734 {
735 	unsigned int nr_sectors = bio_sectors(bio);
736 
737 	if (nr_sectors && maxsector &&
738 	    (nr_sectors > maxsector ||
739 	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
740 		handle_bad_sector(bio, maxsector);
741 		return -EIO;
742 	}
743 	return 0;
744 }
745 
746 /*
747  * Remap block n of partition p to block n+start(p) of the disk.
748  */
blk_partition_remap(struct bio * bio)749 static inline int blk_partition_remap(struct bio *bio)
750 {
751 	struct hd_struct *p;
752 	int ret = -EIO;
753 
754 	rcu_read_lock();
755 	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
756 	if (unlikely(!p))
757 		goto out;
758 	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
759 		goto out;
760 	if (unlikely(bio_check_ro(bio, p)))
761 		goto out;
762 
763 	if (bio_sectors(bio)) {
764 		if (bio_check_eod(bio, part_nr_sects_read(p)))
765 			goto out;
766 		bio->bi_iter.bi_sector += p->start_sect;
767 		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
768 				      bio->bi_iter.bi_sector - p->start_sect);
769 	}
770 	bio->bi_partno = 0;
771 	ret = 0;
772 out:
773 	rcu_read_unlock();
774 	return ret;
775 }
776 
777 /*
778  * Check write append to a zoned block device.
779  */
blk_check_zone_append(struct request_queue * q,struct bio * bio)780 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
781 						 struct bio *bio)
782 {
783 	sector_t pos = bio->bi_iter.bi_sector;
784 	int nr_sectors = bio_sectors(bio);
785 
786 	/* Only applicable to zoned block devices */
787 	if (!blk_queue_is_zoned(q))
788 		return BLK_STS_NOTSUPP;
789 
790 	/* The bio sector must point to the start of a sequential zone */
791 	if (pos & (blk_queue_zone_sectors(q) - 1) ||
792 	    !blk_queue_zone_is_seq(q, pos))
793 		return BLK_STS_IOERR;
794 
795 	/*
796 	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
797 	 * split and could result in non-contiguous sectors being written in
798 	 * different zones.
799 	 */
800 	if (nr_sectors > q->limits.chunk_sectors)
801 		return BLK_STS_IOERR;
802 
803 	/* Make sure the BIO is small enough and will not get split */
804 	if (nr_sectors > q->limits.max_zone_append_sectors)
805 		return BLK_STS_IOERR;
806 
807 	bio->bi_opf |= REQ_NOMERGE;
808 
809 	return BLK_STS_OK;
810 }
811 
submit_bio_checks(struct bio * bio)812 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
813 {
814 	struct request_queue *q = bio->bi_disk->queue;
815 	blk_status_t status = BLK_STS_IOERR;
816 	struct blk_plug *plug;
817 
818 	might_sleep();
819 
820 	plug = blk_mq_plug(q, bio);
821 	if (plug && plug->nowait)
822 		bio->bi_opf |= REQ_NOWAIT;
823 
824 	/*
825 	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
826 	 * if queue does not support NOWAIT.
827 	 */
828 	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
829 		goto not_supported;
830 
831 	if (should_fail_bio(bio))
832 		goto end_io;
833 
834 	if (bio->bi_partno) {
835 		if (unlikely(blk_partition_remap(bio)))
836 			goto end_io;
837 	} else {
838 		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
839 			goto end_io;
840 		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
841 			goto end_io;
842 	}
843 
844 	/*
845 	 * Filter flush bio's early so that bio based drivers without flush
846 	 * support don't have to worry about them.
847 	 */
848 	if (op_is_flush(bio->bi_opf) &&
849 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
850 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
851 		if (!bio_sectors(bio)) {
852 			status = BLK_STS_OK;
853 			goto end_io;
854 		}
855 	}
856 
857 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
858 		bio->bi_opf &= ~REQ_HIPRI;
859 
860 	switch (bio_op(bio)) {
861 	case REQ_OP_DISCARD:
862 		if (!blk_queue_discard(q))
863 			goto not_supported;
864 		break;
865 	case REQ_OP_SECURE_ERASE:
866 		if (!blk_queue_secure_erase(q))
867 			goto not_supported;
868 		break;
869 	case REQ_OP_WRITE_SAME:
870 		if (!q->limits.max_write_same_sectors)
871 			goto not_supported;
872 		break;
873 	case REQ_OP_ZONE_APPEND:
874 		status = blk_check_zone_append(q, bio);
875 		if (status != BLK_STS_OK)
876 			goto end_io;
877 		break;
878 	case REQ_OP_ZONE_RESET:
879 	case REQ_OP_ZONE_OPEN:
880 	case REQ_OP_ZONE_CLOSE:
881 	case REQ_OP_ZONE_FINISH:
882 		if (!blk_queue_is_zoned(q))
883 			goto not_supported;
884 		break;
885 	case REQ_OP_ZONE_RESET_ALL:
886 		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
887 			goto not_supported;
888 		break;
889 	case REQ_OP_WRITE_ZEROES:
890 		if (!q->limits.max_write_zeroes_sectors)
891 			goto not_supported;
892 		break;
893 	default:
894 		break;
895 	}
896 
897 	/*
898 	 * Various block parts want %current->io_context, so allocate it up
899 	 * front rather than dealing with lots of pain to allocate it only
900 	 * where needed. This may fail and the block layer knows how to live
901 	 * with it.
902 	 */
903 	if (unlikely(!current->io_context))
904 		create_task_io_context(current, GFP_ATOMIC, q->node);
905 
906 	if (blk_throtl_bio(bio))
907 		return false;
908 
909 	blk_cgroup_bio_start(bio);
910 	blkcg_bio_issue_init(bio);
911 
912 	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
913 		trace_block_bio_queue(q, bio);
914 		/* Now that enqueuing has been traced, we need to trace
915 		 * completion as well.
916 		 */
917 		bio_set_flag(bio, BIO_TRACE_COMPLETION);
918 	}
919 	return true;
920 
921 not_supported:
922 	status = BLK_STS_NOTSUPP;
923 end_io:
924 	bio->bi_status = status;
925 	bio_endio(bio);
926 	return false;
927 }
928 
__submit_bio(struct bio * bio)929 static blk_qc_t __submit_bio(struct bio *bio)
930 {
931 	struct gendisk *disk = bio->bi_disk;
932 	blk_qc_t ret = BLK_QC_T_NONE;
933 
934 	if (blk_crypto_bio_prep(&bio)) {
935 		if (!disk->fops->submit_bio)
936 			return blk_mq_submit_bio(bio);
937 		ret = disk->fops->submit_bio(bio);
938 	}
939 	blk_queue_exit(disk->queue);
940 	return ret;
941 }
942 
943 /*
944  * The loop in this function may be a bit non-obvious, and so deserves some
945  * explanation:
946  *
947  *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
948  *    that), so we have a list with a single bio.
949  *  - We pretend that we have just taken it off a longer list, so we assign
950  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
951  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
952  *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
953  *    non-NULL value in bio_list and re-enter the loop from the top.
954  *  - In this case we really did just take the bio of the top of the list (no
955  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
956  *    again.
957  *
958  * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
959  * bio_list_on_stack[1] contains bios that were submitted before the current
960  *	->submit_bio_bio, but that haven't been processed yet.
961  */
__submit_bio_noacct(struct bio * bio)962 static blk_qc_t __submit_bio_noacct(struct bio *bio)
963 {
964 	struct bio_list bio_list_on_stack[2];
965 	blk_qc_t ret = BLK_QC_T_NONE;
966 
967 	BUG_ON(bio->bi_next);
968 
969 	bio_list_init(&bio_list_on_stack[0]);
970 	current->bio_list = bio_list_on_stack;
971 
972 	do {
973 		struct request_queue *q = bio->bi_disk->queue;
974 		struct bio_list lower, same;
975 
976 		if (unlikely(bio_queue_enter(bio) != 0))
977 			continue;
978 
979 		/*
980 		 * Create a fresh bio_list for all subordinate requests.
981 		 */
982 		bio_list_on_stack[1] = bio_list_on_stack[0];
983 		bio_list_init(&bio_list_on_stack[0]);
984 
985 		ret = __submit_bio(bio);
986 
987 		/*
988 		 * Sort new bios into those for a lower level and those for the
989 		 * same level.
990 		 */
991 		bio_list_init(&lower);
992 		bio_list_init(&same);
993 		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
994 			if (q == bio->bi_disk->queue)
995 				bio_list_add(&same, bio);
996 			else
997 				bio_list_add(&lower, bio);
998 
999 		/*
1000 		 * Now assemble so we handle the lowest level first.
1001 		 */
1002 		bio_list_merge(&bio_list_on_stack[0], &lower);
1003 		bio_list_merge(&bio_list_on_stack[0], &same);
1004 		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1005 	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
1006 
1007 	current->bio_list = NULL;
1008 	return ret;
1009 }
1010 
__submit_bio_noacct_mq(struct bio * bio)1011 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1012 {
1013 	struct bio_list bio_list[2] = { };
1014 	blk_qc_t ret = BLK_QC_T_NONE;
1015 
1016 	current->bio_list = bio_list;
1017 
1018 	do {
1019 		struct gendisk *disk = bio->bi_disk;
1020 
1021 		if (unlikely(bio_queue_enter(bio) != 0))
1022 			continue;
1023 
1024 		if (!blk_crypto_bio_prep(&bio)) {
1025 			blk_queue_exit(disk->queue);
1026 			ret = BLK_QC_T_NONE;
1027 			continue;
1028 		}
1029 
1030 		ret = blk_mq_submit_bio(bio);
1031 	} while ((bio = bio_list_pop(&bio_list[0])));
1032 
1033 	current->bio_list = NULL;
1034 	return ret;
1035 }
1036 
1037 /**
1038  * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1039  * @bio:  The bio describing the location in memory and on the device.
1040  *
1041  * This is a version of submit_bio() that shall only be used for I/O that is
1042  * resubmitted to lower level drivers by stacking block drivers.  All file
1043  * systems and other upper level users of the block layer should use
1044  * submit_bio() instead.
1045  */
submit_bio_noacct(struct bio * bio)1046 blk_qc_t submit_bio_noacct(struct bio *bio)
1047 {
1048 	if (!submit_bio_checks(bio))
1049 		return BLK_QC_T_NONE;
1050 
1051 	/*
1052 	 * We only want one ->submit_bio to be active at a time, else stack
1053 	 * usage with stacked devices could be a problem.  Use current->bio_list
1054 	 * to collect a list of requests submited by a ->submit_bio method while
1055 	 * it is active, and then process them after it returned.
1056 	 */
1057 	if (current->bio_list) {
1058 		bio_list_add(&current->bio_list[0], bio);
1059 		return BLK_QC_T_NONE;
1060 	}
1061 
1062 	if (!bio->bi_disk->fops->submit_bio)
1063 		return __submit_bio_noacct_mq(bio);
1064 	return __submit_bio_noacct(bio);
1065 }
1066 EXPORT_SYMBOL(submit_bio_noacct);
1067 
1068 /**
1069  * submit_bio - submit a bio to the block device layer for I/O
1070  * @bio: The &struct bio which describes the I/O
1071  *
1072  * submit_bio() is used to submit I/O requests to block devices.  It is passed a
1073  * fully set up &struct bio that describes the I/O that needs to be done.  The
1074  * bio will be send to the device described by the bi_disk and bi_partno fields.
1075  *
1076  * The success/failure status of the request, along with notification of
1077  * completion, is delivered asynchronously through the ->bi_end_io() callback
1078  * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
1079  * been called.
1080  */
submit_bio(struct bio * bio)1081 blk_qc_t submit_bio(struct bio *bio)
1082 {
1083 	if (blkcg_punt_bio_submit(bio))
1084 		return BLK_QC_T_NONE;
1085 
1086 	/*
1087 	 * If it's a regular read/write or a barrier with data attached,
1088 	 * go through the normal accounting stuff before submission.
1089 	 */
1090 	if (bio_has_data(bio)) {
1091 		unsigned int count;
1092 
1093 		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1094 			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1095 		else
1096 			count = bio_sectors(bio);
1097 
1098 		if (op_is_write(bio_op(bio))) {
1099 			count_vm_events(PGPGOUT, count);
1100 		} else {
1101 			task_io_account_read(bio->bi_iter.bi_size);
1102 			count_vm_events(PGPGIN, count);
1103 		}
1104 
1105 		if (unlikely(block_dump)) {
1106 			char b[BDEVNAME_SIZE];
1107 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1108 			current->comm, task_pid_nr(current),
1109 				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1110 				(unsigned long long)bio->bi_iter.bi_sector,
1111 				bio_devname(bio, b), count);
1112 		}
1113 	}
1114 
1115 	/*
1116 	 * If we're reading data that is part of the userspace workingset, count
1117 	 * submission time as memory stall.  When the device is congested, or
1118 	 * the submitting cgroup IO-throttled, submission can be a significant
1119 	 * part of overall IO time.
1120 	 */
1121 	if (unlikely(bio_op(bio) == REQ_OP_READ &&
1122 	    bio_flagged(bio, BIO_WORKINGSET))) {
1123 		unsigned long pflags;
1124 		blk_qc_t ret;
1125 
1126 		psi_memstall_enter(&pflags);
1127 		ret = submit_bio_noacct(bio);
1128 		psi_memstall_leave(&pflags);
1129 
1130 		return ret;
1131 	}
1132 
1133 	return submit_bio_noacct(bio);
1134 }
1135 EXPORT_SYMBOL(submit_bio);
1136 
1137 /**
1138  * blk_cloned_rq_check_limits - Helper function to check a cloned request
1139  *                              for the new queue limits
1140  * @q:  the queue
1141  * @rq: the request being checked
1142  *
1143  * Description:
1144  *    @rq may have been made based on weaker limitations of upper-level queues
1145  *    in request stacking drivers, and it may violate the limitation of @q.
1146  *    Since the block layer and the underlying device driver trust @rq
1147  *    after it is inserted to @q, it should be checked against @q before
1148  *    the insertion using this generic function.
1149  *
1150  *    Request stacking drivers like request-based dm may change the queue
1151  *    limits when retrying requests on other queues. Those requests need
1152  *    to be checked against the new queue limits again during dispatch.
1153  */
blk_cloned_rq_check_limits(struct request_queue * q,struct request * rq)1154 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1155 				      struct request *rq)
1156 {
1157 	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1158 
1159 	if (blk_rq_sectors(rq) > max_sectors) {
1160 		/*
1161 		 * SCSI device does not have a good way to return if
1162 		 * Write Same/Zero is actually supported. If a device rejects
1163 		 * a non-read/write command (discard, write same,etc.) the
1164 		 * low-level device driver will set the relevant queue limit to
1165 		 * 0 to prevent blk-lib from issuing more of the offending
1166 		 * operations. Commands queued prior to the queue limit being
1167 		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1168 		 * errors being propagated to upper layers.
1169 		 */
1170 		if (max_sectors == 0)
1171 			return BLK_STS_NOTSUPP;
1172 
1173 		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1174 			__func__, blk_rq_sectors(rq), max_sectors);
1175 		return BLK_STS_IOERR;
1176 	}
1177 
1178 	/*
1179 	 * queue's settings related to segment counting like q->bounce_pfn
1180 	 * may differ from that of other stacking queues.
1181 	 * Recalculate it to check the request correctly on this queue's
1182 	 * limitation.
1183 	 */
1184 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1185 	if (rq->nr_phys_segments > queue_max_segments(q)) {
1186 		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1187 			__func__, rq->nr_phys_segments, queue_max_segments(q));
1188 		return BLK_STS_IOERR;
1189 	}
1190 
1191 	return BLK_STS_OK;
1192 }
1193 
1194 /**
1195  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1196  * @q:  the queue to submit the request
1197  * @rq: the request being queued
1198  */
blk_insert_cloned_request(struct request_queue * q,struct request * rq)1199 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1200 {
1201 	blk_status_t ret;
1202 
1203 	ret = blk_cloned_rq_check_limits(q, rq);
1204 	if (ret != BLK_STS_OK)
1205 		return ret;
1206 
1207 	if (rq->rq_disk &&
1208 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1209 		return BLK_STS_IOERR;
1210 
1211 	if (blk_crypto_insert_cloned_request(rq))
1212 		return BLK_STS_IOERR;
1213 
1214 	if (blk_queue_io_stat(q))
1215 		blk_account_io_start(rq);
1216 
1217 	/*
1218 	 * Since we have a scheduler attached on the top device,
1219 	 * bypass a potential scheduler on the bottom device for
1220 	 * insert.
1221 	 */
1222 	return blk_mq_request_issue_directly(rq, true);
1223 }
1224 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1225 
1226 /**
1227  * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1228  * @rq: request to examine
1229  *
1230  * Description:
1231  *     A request could be merge of IOs which require different failure
1232  *     handling.  This function determines the number of bytes which
1233  *     can be failed from the beginning of the request without
1234  *     crossing into area which need to be retried further.
1235  *
1236  * Return:
1237  *     The number of bytes to fail.
1238  */
blk_rq_err_bytes(const struct request * rq)1239 unsigned int blk_rq_err_bytes(const struct request *rq)
1240 {
1241 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1242 	unsigned int bytes = 0;
1243 	struct bio *bio;
1244 
1245 	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1246 		return blk_rq_bytes(rq);
1247 
1248 	/*
1249 	 * Currently the only 'mixing' which can happen is between
1250 	 * different fastfail types.  We can safely fail portions
1251 	 * which have all the failfast bits that the first one has -
1252 	 * the ones which are at least as eager to fail as the first
1253 	 * one.
1254 	 */
1255 	for (bio = rq->bio; bio; bio = bio->bi_next) {
1256 		if ((bio->bi_opf & ff) != ff)
1257 			break;
1258 		bytes += bio->bi_iter.bi_size;
1259 	}
1260 
1261 	/* this could lead to infinite loop */
1262 	BUG_ON(blk_rq_bytes(rq) && !bytes);
1263 	return bytes;
1264 }
1265 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1266 
update_io_ticks(struct hd_struct * part,unsigned long now,bool end)1267 static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
1268 {
1269 	unsigned long stamp;
1270 again:
1271 	stamp = READ_ONCE(part->stamp);
1272 	if (unlikely(stamp != now)) {
1273 		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
1274 			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
1275 	}
1276 	if (part->partno) {
1277 		part = &part_to_disk(part)->part0;
1278 		goto again;
1279 	}
1280 }
1281 
blk_account_io_completion(struct request * req,unsigned int bytes)1282 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1283 {
1284 	if (req->part && blk_do_io_stat(req)) {
1285 		const int sgrp = op_stat_group(req_op(req));
1286 		struct hd_struct *part;
1287 
1288 		part_stat_lock();
1289 		part = req->part;
1290 		part_stat_add(part, sectors[sgrp], bytes >> 9);
1291 		part_stat_unlock();
1292 	}
1293 }
1294 
blk_account_io_done(struct request * req,u64 now)1295 void blk_account_io_done(struct request *req, u64 now)
1296 {
1297 	/*
1298 	 * Account IO completion.  flush_rq isn't accounted as a
1299 	 * normal IO on queueing nor completion.  Accounting the
1300 	 * containing request is enough.
1301 	 */
1302 	if (req->part && blk_do_io_stat(req) &&
1303 	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1304 		const int sgrp = op_stat_group(req_op(req));
1305 		struct hd_struct *part;
1306 
1307 		part_stat_lock();
1308 		part = req->part;
1309 
1310 		update_io_ticks(part, jiffies, true);
1311 		part_stat_inc(part, ios[sgrp]);
1312 		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1313 		part_stat_unlock();
1314 
1315 		hd_struct_put(part);
1316 	}
1317 }
1318 
blk_account_io_start(struct request * rq)1319 void blk_account_io_start(struct request *rq)
1320 {
1321 	if (!blk_do_io_stat(rq))
1322 		return;
1323 
1324 	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1325 
1326 	part_stat_lock();
1327 	update_io_ticks(rq->part, jiffies, false);
1328 	part_stat_unlock();
1329 }
1330 
__part_start_io_acct(struct hd_struct * part,unsigned int sectors,unsigned int op)1331 static unsigned long __part_start_io_acct(struct hd_struct *part,
1332 					  unsigned int sectors, unsigned int op)
1333 {
1334 	const int sgrp = op_stat_group(op);
1335 	unsigned long now = READ_ONCE(jiffies);
1336 
1337 	part_stat_lock();
1338 	update_io_ticks(part, now, false);
1339 	part_stat_inc(part, ios[sgrp]);
1340 	part_stat_add(part, sectors[sgrp], sectors);
1341 	part_stat_local_inc(part, in_flight[op_is_write(op)]);
1342 	part_stat_unlock();
1343 
1344 	return now;
1345 }
1346 
part_start_io_acct(struct gendisk * disk,struct hd_struct ** part,struct bio * bio)1347 unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
1348 				 struct bio *bio)
1349 {
1350 	*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
1351 
1352 	return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
1353 }
1354 EXPORT_SYMBOL_GPL(part_start_io_acct);
1355 
disk_start_io_acct(struct gendisk * disk,unsigned int sectors,unsigned int op)1356 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1357 				 unsigned int op)
1358 {
1359 	return __part_start_io_acct(&disk->part0, sectors, op);
1360 }
1361 EXPORT_SYMBOL(disk_start_io_acct);
1362 
__part_end_io_acct(struct hd_struct * part,unsigned int op,unsigned long start_time)1363 static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
1364 			       unsigned long start_time)
1365 {
1366 	const int sgrp = op_stat_group(op);
1367 	unsigned long now = READ_ONCE(jiffies);
1368 	unsigned long duration = now - start_time;
1369 
1370 	part_stat_lock();
1371 	update_io_ticks(part, now, true);
1372 	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1373 	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1374 	part_stat_unlock();
1375 }
1376 
part_end_io_acct(struct hd_struct * part,struct bio * bio,unsigned long start_time)1377 void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1378 		      unsigned long start_time)
1379 {
1380 	__part_end_io_acct(part, bio_op(bio), start_time);
1381 	hd_struct_put(part);
1382 }
1383 EXPORT_SYMBOL_GPL(part_end_io_acct);
1384 
disk_end_io_acct(struct gendisk * disk,unsigned int op,unsigned long start_time)1385 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1386 		      unsigned long start_time)
1387 {
1388 	__part_end_io_acct(&disk->part0, op, start_time);
1389 }
1390 EXPORT_SYMBOL(disk_end_io_acct);
1391 
1392 /*
1393  * Steal bios from a request and add them to a bio list.
1394  * The request must not have been partially completed before.
1395  */
blk_steal_bios(struct bio_list * list,struct request * rq)1396 void blk_steal_bios(struct bio_list *list, struct request *rq)
1397 {
1398 	if (rq->bio) {
1399 		if (list->tail)
1400 			list->tail->bi_next = rq->bio;
1401 		else
1402 			list->head = rq->bio;
1403 		list->tail = rq->biotail;
1404 
1405 		rq->bio = NULL;
1406 		rq->biotail = NULL;
1407 	}
1408 
1409 	rq->__data_len = 0;
1410 }
1411 EXPORT_SYMBOL_GPL(blk_steal_bios);
1412 
1413 /**
1414  * blk_update_request - Special helper function for request stacking drivers
1415  * @req:      the request being processed
1416  * @error:    block status code
1417  * @nr_bytes: number of bytes to complete @req
1418  *
1419  * Description:
1420  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
1421  *     the request structure even if @req doesn't have leftover.
1422  *     If @req has leftover, sets it up for the next range of segments.
1423  *
1424  *     This special helper function is only for request stacking drivers
1425  *     (e.g. request-based dm) so that they can handle partial completion.
1426  *     Actual device drivers should use blk_mq_end_request instead.
1427  *
1428  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1429  *     %false return from this function.
1430  *
1431  * Note:
1432  *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
1433  *	blk_rq_bytes() and in blk_update_request().
1434  *
1435  * Return:
1436  *     %false - this request doesn't have any more data
1437  *     %true  - this request has more data
1438  **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)1439 bool blk_update_request(struct request *req, blk_status_t error,
1440 		unsigned int nr_bytes)
1441 {
1442 	int total_bytes;
1443 
1444 	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1445 
1446 	if (!req->bio)
1447 		return false;
1448 
1449 #ifdef CONFIG_BLK_DEV_INTEGRITY
1450 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1451 	    error == BLK_STS_OK)
1452 		req->q->integrity.profile->complete_fn(req, nr_bytes);
1453 #endif
1454 
1455 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
1456 		     !(req->rq_flags & RQF_QUIET)))
1457 		print_req_error(req, error, __func__);
1458 
1459 	blk_account_io_completion(req, nr_bytes);
1460 
1461 	total_bytes = 0;
1462 	while (req->bio) {
1463 		struct bio *bio = req->bio;
1464 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1465 
1466 		if (bio_bytes == bio->bi_iter.bi_size)
1467 			req->bio = bio->bi_next;
1468 
1469 		/* Completion has already been traced */
1470 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1471 		req_bio_endio(req, bio, bio_bytes, error);
1472 
1473 		total_bytes += bio_bytes;
1474 		nr_bytes -= bio_bytes;
1475 
1476 		if (!nr_bytes)
1477 			break;
1478 	}
1479 
1480 	/*
1481 	 * completely done
1482 	 */
1483 	if (!req->bio) {
1484 		/*
1485 		 * Reset counters so that the request stacking driver
1486 		 * can find how many bytes remain in the request
1487 		 * later.
1488 		 */
1489 		req->__data_len = 0;
1490 		return false;
1491 	}
1492 
1493 	req->__data_len -= total_bytes;
1494 
1495 	/* update sector only for requests with clear definition of sector */
1496 	if (!blk_rq_is_passthrough(req))
1497 		req->__sector += total_bytes >> 9;
1498 
1499 	/* mixed attributes always follow the first bio */
1500 	if (req->rq_flags & RQF_MIXED_MERGE) {
1501 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
1502 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1503 	}
1504 
1505 	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1506 		/*
1507 		 * If total number of sectors is less than the first segment
1508 		 * size, something has gone terribly wrong.
1509 		 */
1510 		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1511 			blk_dump_rq_flags(req, "request botched");
1512 			req->__data_len = blk_rq_cur_bytes(req);
1513 		}
1514 
1515 		/* recalculate the number of segments */
1516 		req->nr_phys_segments = blk_recalc_rq_segments(req);
1517 	}
1518 
1519 	return true;
1520 }
1521 EXPORT_SYMBOL_GPL(blk_update_request);
1522 
1523 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1524 /**
1525  * rq_flush_dcache_pages - Helper function to flush all pages in a request
1526  * @rq: the request to be flushed
1527  *
1528  * Description:
1529  *     Flush all pages in @rq.
1530  */
rq_flush_dcache_pages(struct request * rq)1531 void rq_flush_dcache_pages(struct request *rq)
1532 {
1533 	struct req_iterator iter;
1534 	struct bio_vec bvec;
1535 
1536 	rq_for_each_segment(bvec, rq, iter)
1537 		flush_dcache_page(bvec.bv_page);
1538 }
1539 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1540 #endif
1541 
1542 /**
1543  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1544  * @q : the queue of the device being checked
1545  *
1546  * Description:
1547  *    Check if underlying low-level drivers of a device are busy.
1548  *    If the drivers want to export their busy state, they must set own
1549  *    exporting function using blk_queue_lld_busy() first.
1550  *
1551  *    Basically, this function is used only by request stacking drivers
1552  *    to stop dispatching requests to underlying devices when underlying
1553  *    devices are busy.  This behavior helps more I/O merging on the queue
1554  *    of the request stacking driver and prevents I/O throughput regression
1555  *    on burst I/O load.
1556  *
1557  * Return:
1558  *    0 - Not busy (The request stacking driver should dispatch request)
1559  *    1 - Busy (The request stacking driver should stop dispatching request)
1560  */
blk_lld_busy(struct request_queue * q)1561 int blk_lld_busy(struct request_queue *q)
1562 {
1563 	if (queue_is_mq(q) && q->mq_ops->busy)
1564 		return q->mq_ops->busy(q);
1565 
1566 	return 0;
1567 }
1568 EXPORT_SYMBOL_GPL(blk_lld_busy);
1569 
1570 /**
1571  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1572  * @rq: the clone request to be cleaned up
1573  *
1574  * Description:
1575  *     Free all bios in @rq for a cloned request.
1576  */
blk_rq_unprep_clone(struct request * rq)1577 void blk_rq_unprep_clone(struct request *rq)
1578 {
1579 	struct bio *bio;
1580 
1581 	while ((bio = rq->bio) != NULL) {
1582 		rq->bio = bio->bi_next;
1583 
1584 		bio_put(bio);
1585 	}
1586 }
1587 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1588 
1589 /**
1590  * blk_rq_prep_clone - Helper function to setup clone request
1591  * @rq: the request to be setup
1592  * @rq_src: original request to be cloned
1593  * @bs: bio_set that bios for clone are allocated from
1594  * @gfp_mask: memory allocation mask for bio
1595  * @bio_ctr: setup function to be called for each clone bio.
1596  *           Returns %0 for success, non %0 for failure.
1597  * @data: private data to be passed to @bio_ctr
1598  *
1599  * Description:
1600  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
1601  *     Also, pages which the original bios are pointing to are not copied
1602  *     and the cloned bios just point same pages.
1603  *     So cloned bios must be completed before original bios, which means
1604  *     the caller must complete @rq before @rq_src.
1605  */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)1606 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1607 		      struct bio_set *bs, gfp_t gfp_mask,
1608 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
1609 		      void *data)
1610 {
1611 	struct bio *bio, *bio_src;
1612 
1613 	if (!bs)
1614 		bs = &fs_bio_set;
1615 
1616 	__rq_for_each_bio(bio_src, rq_src) {
1617 		bio = bio_clone_fast(bio_src, gfp_mask, bs);
1618 		if (!bio)
1619 			goto free_and_out;
1620 
1621 		if (bio_ctr && bio_ctr(bio, bio_src, data))
1622 			goto free_and_out;
1623 
1624 		if (rq->bio) {
1625 			rq->biotail->bi_next = bio;
1626 			rq->biotail = bio;
1627 		} else {
1628 			rq->bio = rq->biotail = bio;
1629 		}
1630 		bio = NULL;
1631 	}
1632 
1633 	/* Copy attributes of the original request to the clone request. */
1634 	rq->__sector = blk_rq_pos(rq_src);
1635 	rq->__data_len = blk_rq_bytes(rq_src);
1636 	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1637 		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1638 		rq->special_vec = rq_src->special_vec;
1639 	}
1640 	rq->nr_phys_segments = rq_src->nr_phys_segments;
1641 	rq->ioprio = rq_src->ioprio;
1642 
1643 	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1644 		goto free_and_out;
1645 
1646 	return 0;
1647 
1648 free_and_out:
1649 	if (bio)
1650 		bio_put(bio);
1651 	blk_rq_unprep_clone(rq);
1652 
1653 	return -ENOMEM;
1654 }
1655 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1656 
kblockd_schedule_work(struct work_struct * work)1657 int kblockd_schedule_work(struct work_struct *work)
1658 {
1659 	return queue_work(kblockd_workqueue, work);
1660 }
1661 EXPORT_SYMBOL(kblockd_schedule_work);
1662 
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1663 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1664 				unsigned long delay)
1665 {
1666 	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1667 }
1668 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1669 
1670 /**
1671  * blk_start_plug - initialize blk_plug and track it inside the task_struct
1672  * @plug:	The &struct blk_plug that needs to be initialized
1673  *
1674  * Description:
1675  *   blk_start_plug() indicates to the block layer an intent by the caller
1676  *   to submit multiple I/O requests in a batch.  The block layer may use
1677  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1678  *   is called.  However, the block layer may choose to submit requests
1679  *   before a call to blk_finish_plug() if the number of queued I/Os
1680  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1681  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1682  *   the task schedules (see below).
1683  *
1684  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1685  *   pending I/O should the task end up blocking between blk_start_plug() and
1686  *   blk_finish_plug(). This is important from a performance perspective, but
1687  *   also ensures that we don't deadlock. For instance, if the task is blocking
1688  *   for a memory allocation, memory reclaim could end up wanting to free a
1689  *   page belonging to that request that is currently residing in our private
1690  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1691  *   this kind of deadlock.
1692  */
blk_start_plug(struct blk_plug * plug)1693 void blk_start_plug(struct blk_plug *plug)
1694 {
1695 	struct task_struct *tsk = current;
1696 
1697 	/*
1698 	 * If this is a nested plug, don't actually assign it.
1699 	 */
1700 	if (tsk->plug)
1701 		return;
1702 
1703 	INIT_LIST_HEAD(&plug->mq_list);
1704 	INIT_LIST_HEAD(&plug->cb_list);
1705 	plug->rq_count = 0;
1706 	plug->multiple_queues = false;
1707 	plug->nowait = false;
1708 
1709 	/*
1710 	 * Store ordering should not be needed here, since a potential
1711 	 * preempt will imply a full memory barrier
1712 	 */
1713 	tsk->plug = plug;
1714 }
1715 EXPORT_SYMBOL(blk_start_plug);
1716 
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1717 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1718 {
1719 	LIST_HEAD(callbacks);
1720 
1721 	while (!list_empty(&plug->cb_list)) {
1722 		list_splice_init(&plug->cb_list, &callbacks);
1723 
1724 		while (!list_empty(&callbacks)) {
1725 			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1726 							  struct blk_plug_cb,
1727 							  list);
1728 			list_del(&cb->list);
1729 			cb->callback(cb, from_schedule);
1730 		}
1731 	}
1732 }
1733 
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1734 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1735 				      int size)
1736 {
1737 	struct blk_plug *plug = current->plug;
1738 	struct blk_plug_cb *cb;
1739 
1740 	if (!plug)
1741 		return NULL;
1742 
1743 	list_for_each_entry(cb, &plug->cb_list, list)
1744 		if (cb->callback == unplug && cb->data == data)
1745 			return cb;
1746 
1747 	/* Not currently on the callback list */
1748 	BUG_ON(size < sizeof(*cb));
1749 	cb = kzalloc(size, GFP_ATOMIC);
1750 	if (cb) {
1751 		cb->data = data;
1752 		cb->callback = unplug;
1753 		list_add(&cb->list, &plug->cb_list);
1754 	}
1755 	return cb;
1756 }
1757 EXPORT_SYMBOL(blk_check_plugged);
1758 
blk_flush_plug_list(struct blk_plug * plug,bool from_schedule)1759 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1760 {
1761 	flush_plug_callbacks(plug, from_schedule);
1762 
1763 	if (!list_empty(&plug->mq_list))
1764 		blk_mq_flush_plug_list(plug, from_schedule);
1765 }
1766 
1767 /**
1768  * blk_finish_plug - mark the end of a batch of submitted I/O
1769  * @plug:	The &struct blk_plug passed to blk_start_plug()
1770  *
1771  * Description:
1772  * Indicate that a batch of I/O submissions is complete.  This function
1773  * must be paired with an initial call to blk_start_plug().  The intent
1774  * is to allow the block layer to optimize I/O submission.  See the
1775  * documentation for blk_start_plug() for more information.
1776  */
blk_finish_plug(struct blk_plug * plug)1777 void blk_finish_plug(struct blk_plug *plug)
1778 {
1779 	if (plug != current->plug)
1780 		return;
1781 	blk_flush_plug_list(plug, false);
1782 
1783 	current->plug = NULL;
1784 }
1785 EXPORT_SYMBOL(blk_finish_plug);
1786 
blk_io_schedule(void)1787 void blk_io_schedule(void)
1788 {
1789 	/* Prevent hang_check timer from firing at us during very long I/O */
1790 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1791 
1792 	if (timeout)
1793 		io_schedule_timeout(timeout);
1794 	else
1795 		io_schedule();
1796 }
1797 EXPORT_SYMBOL_GPL(blk_io_schedule);
1798 
blk_dev_init(void)1799 int __init blk_dev_init(void)
1800 {
1801 	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1802 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1803 			sizeof_field(struct request, cmd_flags));
1804 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1805 			sizeof_field(struct bio, bi_opf));
1806 
1807 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
1808 	kblockd_workqueue = alloc_workqueue("kblockd",
1809 					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1810 	if (!kblockd_workqueue)
1811 		panic("Failed to create kblockd\n");
1812 
1813 	blk_requestq_cachep = kmem_cache_create("request_queue",
1814 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1815 
1816 	blk_debugfs_root = debugfs_create_dir("block", NULL);
1817 
1818 	return 0;
1819 }
1820