1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/srcu.h>
8
9 struct blk_mq_tags;
10 struct blk_flush_queue;
11
12 /**
13 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
14 * block device
15 */
16 struct blk_mq_hw_ctx {
17 struct {
18 /** @lock: Protects the dispatch list. */
19 spinlock_t lock;
20 /**
21 * @dispatch: Used for requests that are ready to be
22 * dispatched to the hardware but for some reason (e.g. lack of
23 * resources) could not be sent to the hardware. As soon as the
24 * driver can send new requests, requests at this list will
25 * be sent first for a fairer dispatch.
26 */
27 struct list_head dispatch;
28 /**
29 * @state: BLK_MQ_S_* flags. Defines the state of the hw
30 * queue (active, scheduled to restart, stopped).
31 */
32 unsigned long state;
33 } ____cacheline_aligned_in_smp;
34
35 /**
36 * @run_work: Used for scheduling a hardware queue run at a later time.
37 */
38 struct delayed_work run_work;
39 /** @cpumask: Map of available CPUs where this hctx can run. */
40 cpumask_var_t cpumask;
41 /**
42 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
43 * selection from @cpumask.
44 */
45 int next_cpu;
46 /**
47 * @next_cpu_batch: Counter of how many works left in the batch before
48 * changing to the next CPU.
49 */
50 int next_cpu_batch;
51
52 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
53 unsigned long flags;
54
55 /**
56 * @sched_data: Pointer owned by the IO scheduler attached to a request
57 * queue. It's up to the IO scheduler how to use this pointer.
58 */
59 void *sched_data;
60 /**
61 * @queue: Pointer to the request queue that owns this hardware context.
62 */
63 struct request_queue *queue;
64 /** @fq: Queue of requests that need to perform a flush operation. */
65 struct blk_flush_queue *fq;
66
67 /**
68 * @driver_data: Pointer to data owned by the block driver that created
69 * this hctx
70 */
71 void *driver_data;
72
73 /**
74 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
75 * pending request in that software queue.
76 */
77 struct sbitmap ctx_map;
78
79 /**
80 * @dispatch_from: Software queue to be used when no scheduler was
81 * selected.
82 */
83 struct blk_mq_ctx *dispatch_from;
84 /**
85 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
86 * decide if the hw_queue is busy using Exponential Weighted Moving
87 * Average algorithm.
88 */
89 unsigned int dispatch_busy;
90
91 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
92 unsigned short type;
93 /** @nr_ctx: Number of software queues. */
94 unsigned short nr_ctx;
95 /** @ctxs: Array of software queues. */
96 struct blk_mq_ctx **ctxs;
97
98 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
99 spinlock_t dispatch_wait_lock;
100 /**
101 * @dispatch_wait: Waitqueue to put requests when there is no tag
102 * available at the moment, to wait for another try in the future.
103 */
104 wait_queue_entry_t dispatch_wait;
105
106 /**
107 * @wait_index: Index of next available dispatch_wait queue to insert
108 * requests.
109 */
110 atomic_t wait_index;
111
112 /**
113 * @tags: Tags owned by the block driver. A tag at this set is only
114 * assigned when a request is dispatched from a hardware queue.
115 */
116 struct blk_mq_tags *tags;
117 /**
118 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
119 * scheduler associated with a request queue, a tag is assigned when
120 * that request is allocated. Else, this member is not used.
121 */
122 struct blk_mq_tags *sched_tags;
123
124 /** @queued: Number of queued requests. */
125 unsigned long queued;
126 /** @run: Number of dispatched requests. */
127 unsigned long run;
128 #define BLK_MQ_MAX_DISPATCH_ORDER 7
129 /** @dispatched: Number of dispatch requests by queue. */
130 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
131
132 /** @numa_node: NUMA node the storage adapter has been connected to. */
133 unsigned int numa_node;
134 /** @queue_num: Index of this hardware queue. */
135 unsigned int queue_num;
136
137 /**
138 * @nr_active: Number of active requests. Only used when a tag set is
139 * shared across request queues.
140 */
141 atomic_t nr_active;
142
143 /** @cpuhp_online: List to store request if CPU is going to die */
144 struct hlist_node cpuhp_online;
145 /** @cpuhp_dead: List to store request if some CPU die. */
146 struct hlist_node cpuhp_dead;
147 /** @kobj: Kernel object for sysfs. */
148 struct kobject kobj;
149
150 /** @poll_considered: Count times blk_poll() was called. */
151 unsigned long poll_considered;
152 /** @poll_invoked: Count how many requests blk_poll() polled. */
153 unsigned long poll_invoked;
154 /** @poll_success: Count how many polled requests were completed. */
155 unsigned long poll_success;
156
157 #ifdef CONFIG_BLK_DEBUG_FS
158 /**
159 * @debugfs_dir: debugfs directory for this hardware queue. Named
160 * as cpu<cpu_number>.
161 */
162 struct dentry *debugfs_dir;
163 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
164 struct dentry *sched_debugfs_dir;
165 #endif
166
167 /**
168 * @hctx_list: if this hctx is not in use, this is an entry in
169 * q->unused_hctx_list.
170 */
171 struct list_head hctx_list;
172
173 /**
174 * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
175 * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
176 * blk_mq_hw_ctx_size().
177 */
178 struct srcu_struct srcu[];
179 };
180
181 /**
182 * struct blk_mq_queue_map - Map software queues to hardware queues
183 * @mq_map: CPU ID to hardware queue index map. This is an array
184 * with nr_cpu_ids elements. Each element has a value in the range
185 * [@queue_offset, @queue_offset + @nr_queues).
186 * @nr_queues: Number of hardware queues to map CPU IDs onto.
187 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
188 * driver to map each hardware queue type (enum hctx_type) onto a distinct
189 * set of hardware queues.
190 */
191 struct blk_mq_queue_map {
192 unsigned int *mq_map;
193 unsigned int nr_queues;
194 unsigned int queue_offset;
195 };
196
197 /**
198 * enum hctx_type - Type of hardware queue
199 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
200 * @HCTX_TYPE_READ: Just for READ I/O.
201 * @HCTX_TYPE_POLL: Polled I/O of any kind.
202 * @HCTX_MAX_TYPES: Number of types of hctx.
203 */
204 enum hctx_type {
205 HCTX_TYPE_DEFAULT,
206 HCTX_TYPE_READ,
207 HCTX_TYPE_POLL,
208
209 HCTX_MAX_TYPES,
210 };
211
212 /**
213 * struct blk_mq_tag_set - tag set that can be shared between request queues
214 * @map: One or more ctx -> hctx mappings. One map exists for each
215 * hardware queue type (enum hctx_type) that the driver wishes
216 * to support. There are no restrictions on maps being of the
217 * same size, and it's perfectly legal to share maps between
218 * types.
219 * @nr_maps: Number of elements in the @map array. A number in the range
220 * [1, HCTX_MAX_TYPES].
221 * @ops: Pointers to functions that implement block driver behavior.
222 * @nr_hw_queues: Number of hardware queues supported by the block driver that
223 * owns this data structure.
224 * @queue_depth: Number of tags per hardware queue, reserved tags included.
225 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
226 * allocations.
227 * @cmd_size: Number of additional bytes to allocate per request. The block
228 * driver owns these additional bytes.
229 * @numa_node: NUMA node the storage adapter has been connected to.
230 * @timeout: Request processing timeout in jiffies.
231 * @flags: Zero or more BLK_MQ_F_* flags.
232 * @driver_data: Pointer to data owned by the block driver that created this
233 * tag set.
234 * @active_queues_shared_sbitmap:
235 * number of active request queues per tag set.
236 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
237 * @__breserved_tags:
238 * A shared reserved tags sbitmap, used over all hctx's
239 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
240 * elements.
241 * @tag_list_lock: Serializes tag_list accesses.
242 * @tag_list: List of the request queues that use this tag set. See also
243 * request_queue.tag_set_list.
244 */
245 struct blk_mq_tag_set {
246 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
247 unsigned int nr_maps;
248 const struct blk_mq_ops *ops;
249 unsigned int nr_hw_queues;
250 unsigned int queue_depth;
251 unsigned int reserved_tags;
252 unsigned int cmd_size;
253 int numa_node;
254 unsigned int timeout;
255 unsigned int flags;
256 void *driver_data;
257 atomic_t active_queues_shared_sbitmap;
258
259 struct sbitmap_queue __bitmap_tags;
260 struct sbitmap_queue __breserved_tags;
261 struct blk_mq_tags **tags;
262
263 struct mutex tag_list_lock;
264 struct list_head tag_list;
265 };
266
267 /**
268 * struct blk_mq_queue_data - Data about a request inserted in a queue
269 *
270 * @rq: Request pointer.
271 * @last: If it is the last request in the queue.
272 */
273 struct blk_mq_queue_data {
274 struct request *rq;
275 bool last;
276 };
277
278 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
279 bool);
280 typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
281
282 /**
283 * struct blk_mq_ops - Callback functions that implements block driver
284 * behaviour.
285 */
286 struct blk_mq_ops {
287 /**
288 * @queue_rq: Queue a new request from block IO.
289 */
290 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
291 const struct blk_mq_queue_data *);
292
293 /**
294 * @commit_rqs: If a driver uses bd->last to judge when to submit
295 * requests to hardware, it must define this function. In case of errors
296 * that make us stop issuing further requests, this hook serves the
297 * purpose of kicking the hardware (which the last request otherwise
298 * would have done).
299 */
300 void (*commit_rqs)(struct blk_mq_hw_ctx *);
301
302 /**
303 * @get_budget: Reserve budget before queue request, once .queue_rq is
304 * run, it is driver's responsibility to release the
305 * reserved budget. Also we have to handle failure case
306 * of .get_budget for avoiding I/O deadlock.
307 */
308 bool (*get_budget)(struct request_queue *);
309
310 /**
311 * @put_budget: Release the reserved budget.
312 */
313 void (*put_budget)(struct request_queue *);
314
315 /**
316 * @timeout: Called on request timeout.
317 */
318 enum blk_eh_timer_return (*timeout)(struct request *, bool);
319
320 /**
321 * @poll: Called to poll for completion of a specific tag.
322 */
323 int (*poll)(struct blk_mq_hw_ctx *);
324
325 /**
326 * @complete: Mark the request as complete.
327 */
328 void (*complete)(struct request *);
329
330 /**
331 * @init_hctx: Called when the block layer side of a hardware queue has
332 * been set up, allowing the driver to allocate/init matching
333 * structures.
334 */
335 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
336 /**
337 * @exit_hctx: Ditto for exit/teardown.
338 */
339 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
340
341 /**
342 * @init_request: Called for every command allocated by the block layer
343 * to allow the driver to set up driver specific data.
344 *
345 * Tag greater than or equal to queue_depth is for setting up
346 * flush request.
347 */
348 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
349 unsigned int, unsigned int);
350 /**
351 * @exit_request: Ditto for exit/teardown.
352 */
353 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
354 unsigned int);
355
356 /**
357 * @initialize_rq_fn: Called from inside blk_get_request().
358 */
359 void (*initialize_rq_fn)(struct request *rq);
360
361 /**
362 * @cleanup_rq: Called before freeing one request which isn't completed
363 * yet, and usually for freeing the driver private data.
364 */
365 void (*cleanup_rq)(struct request *);
366
367 /**
368 * @busy: If set, returns whether or not this queue currently is busy.
369 */
370 bool (*busy)(struct request_queue *);
371
372 /**
373 * @map_queues: This allows drivers specify their own queue mapping by
374 * overriding the setup-time function that builds the mq_map.
375 */
376 int (*map_queues)(struct blk_mq_tag_set *set);
377
378 #ifdef CONFIG_BLK_DEBUG_FS
379 /**
380 * @show_rq: Used by the debugfs implementation to show driver-specific
381 * information about a request.
382 */
383 void (*show_rq)(struct seq_file *m, struct request *rq);
384 #endif
385 };
386
387 enum {
388 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
389 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
390 /*
391 * Set when this device requires underlying blk-mq device for
392 * completing IO:
393 */
394 BLK_MQ_F_STACKING = 1 << 2,
395 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
396 BLK_MQ_F_BLOCKING = 1 << 5,
397 /* Do not allow an I/O scheduler to be configured. */
398 BLK_MQ_F_NO_SCHED = 1 << 6,
399 /*
400 * Select 'none' during queue registration in case of a single hwq
401 * or shared hwqs instead of 'mq-deadline'.
402 */
403 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
404 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
405 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
406
407 BLK_MQ_S_STOPPED = 0,
408 BLK_MQ_S_TAG_ACTIVE = 1,
409 BLK_MQ_S_SCHED_RESTART = 2,
410
411 /* hw queue is inactive after all its CPUs become offline */
412 BLK_MQ_S_INACTIVE = 3,
413
414 BLK_MQ_MAX_DEPTH = 10240,
415
416 BLK_MQ_CPU_WORK_BATCH = 8,
417 };
418 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
419 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
420 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
421 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
422 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
423 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
424
425 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
426 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
427 void *queuedata);
428 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
429 struct request_queue *q,
430 bool elevator_init);
431 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
432 const struct blk_mq_ops *ops,
433 unsigned int queue_depth,
434 unsigned int set_flags);
435 void blk_mq_unregister_dev(struct device *, struct request_queue *);
436
437 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
438 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
439
440 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
441
442 void blk_mq_free_request(struct request *rq);
443
444 bool blk_mq_queue_inflight(struct request_queue *q);
445
446 enum {
447 /* return when out of requests */
448 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
449 /* allocate from reserved pool */
450 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
451 /* set RQF_PM */
452 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
453 };
454
455 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
456 blk_mq_req_flags_t flags);
457 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
458 unsigned int op, blk_mq_req_flags_t flags,
459 unsigned int hctx_idx);
460 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
461
462 enum {
463 BLK_MQ_UNIQUE_TAG_BITS = 16,
464 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
465 };
466
467 u32 blk_mq_unique_tag(struct request *rq);
468
blk_mq_unique_tag_to_hwq(u32 unique_tag)469 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
470 {
471 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
472 }
473
blk_mq_unique_tag_to_tag(u32 unique_tag)474 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
475 {
476 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
477 }
478
479 /**
480 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
481 * @rq: target request.
482 */
blk_mq_rq_state(struct request * rq)483 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
484 {
485 return READ_ONCE(rq->state);
486 }
487
blk_mq_request_started(struct request * rq)488 static inline int blk_mq_request_started(struct request *rq)
489 {
490 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
491 }
492
blk_mq_request_completed(struct request * rq)493 static inline int blk_mq_request_completed(struct request *rq)
494 {
495 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
496 }
497
498 void blk_mq_start_request(struct request *rq);
499 void blk_mq_end_request(struct request *rq, blk_status_t error);
500 void __blk_mq_end_request(struct request *rq, blk_status_t error);
501
502 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
503 void blk_mq_kick_requeue_list(struct request_queue *q);
504 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
505 void blk_mq_complete_request(struct request *rq);
506 bool blk_mq_complete_request_remote(struct request *rq);
507 bool blk_mq_queue_stopped(struct request_queue *q);
508 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
509 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
510 void blk_mq_stop_hw_queues(struct request_queue *q);
511 void blk_mq_start_hw_queues(struct request_queue *q);
512 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
513 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
514 void blk_mq_quiesce_queue(struct request_queue *q);
515 void blk_mq_unquiesce_queue(struct request_queue *q);
516 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
517 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
518 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
519 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
520 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
521 busy_tag_iter_fn *fn, void *priv);
522 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
523 void blk_mq_freeze_queue(struct request_queue *q);
524 void blk_mq_unfreeze_queue(struct request_queue *q);
525 void blk_freeze_queue_start(struct request_queue *q);
526 void blk_mq_freeze_queue_wait(struct request_queue *q);
527 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
528 unsigned long timeout);
529
530 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
531 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
532
533 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
534
535 unsigned int blk_mq_rq_cpu(struct request *rq);
536
537 bool __blk_should_fake_timeout(struct request_queue *q);
blk_should_fake_timeout(struct request_queue * q)538 static inline bool blk_should_fake_timeout(struct request_queue *q)
539 {
540 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
541 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
542 return __blk_should_fake_timeout(q);
543 return false;
544 }
545
546 /**
547 * blk_mq_rq_from_pdu - cast a PDU to a request
548 * @pdu: the PDU (Protocol Data Unit) to be casted
549 *
550 * Return: request
551 *
552 * Driver command data is immediately after the request. So subtract request
553 * size to get back to the original request.
554 */
blk_mq_rq_from_pdu(void * pdu)555 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
556 {
557 return pdu - sizeof(struct request);
558 }
559
560 /**
561 * blk_mq_rq_to_pdu - cast a request to a PDU
562 * @rq: the request to be casted
563 *
564 * Return: pointer to the PDU
565 *
566 * Driver command data is immediately after the request. So add request to get
567 * the PDU.
568 */
blk_mq_rq_to_pdu(struct request * rq)569 static inline void *blk_mq_rq_to_pdu(struct request *rq)
570 {
571 return rq + 1;
572 }
573
574 #define queue_for_each_hw_ctx(q, hctx, i) \
575 for ((i) = 0; (i) < (q)->nr_hw_queues && \
576 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
577
578 #define hctx_for_each_ctx(hctx, ctx, i) \
579 for ((i) = 0; (i) < (hctx)->nr_ctx && \
580 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
581
request_to_qc_t(struct blk_mq_hw_ctx * hctx,struct request * rq)582 static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
583 struct request *rq)
584 {
585 if (rq->tag != -1)
586 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
587
588 return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
589 BLK_QC_T_INTERNAL;
590 }
591
blk_mq_cleanup_rq(struct request * rq)592 static inline void blk_mq_cleanup_rq(struct request *rq)
593 {
594 if (rq->q->mq_ops->cleanup_rq)
595 rq->q->mq_ops->cleanup_rq(rq);
596 }
597
598 blk_qc_t blk_mq_submit_bio(struct bio *bio);
599
600 #endif
601