Lines Matching refs:rq

51 static int blk_mq_poll_stats_bkt(const struct request *rq)  in blk_mq_poll_stats_bkt()  argument
55 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
56 sectors = blk_rq_stats_sectors(rq); in blk_mq_poll_stats_bkt()
105 struct request *rq, void *priv, in blk_mq_check_inflight() argument
110 if ((!mi->part->partno || rq->part == mi->part) && in blk_mq_check_inflight()
111 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight()
112 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight()
273 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp() argument
275 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
282 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() local
285 rq->tag = BLK_MQ_NO_TAG; in blk_mq_rq_ctx_init()
286 rq->internal_tag = tag; in blk_mq_rq_ctx_init()
288 rq->tag = tag; in blk_mq_rq_ctx_init()
289 rq->internal_tag = BLK_MQ_NO_TAG; in blk_mq_rq_ctx_init()
293 rq->q = data->q; in blk_mq_rq_ctx_init()
294 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
295 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
296 rq->rq_flags = 0; in blk_mq_rq_ctx_init()
297 rq->cmd_flags = data->cmd_flags; in blk_mq_rq_ctx_init()
299 rq->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
301 rq->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
302 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init()
303 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init()
304 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init()
305 rq->rq_disk = NULL; in blk_mq_rq_ctx_init()
306 rq->part = NULL; in blk_mq_rq_ctx_init()
308 rq->alloc_time_ns = alloc_time_ns; in blk_mq_rq_ctx_init()
310 if (blk_mq_need_time_stamp(rq)) in blk_mq_rq_ctx_init()
311 rq->start_time_ns = ktime_get_ns(); in blk_mq_rq_ctx_init()
313 rq->start_time_ns = 0; in blk_mq_rq_ctx_init()
314 rq->io_start_time_ns = 0; in blk_mq_rq_ctx_init()
315 rq->stats_sectors = 0; in blk_mq_rq_ctx_init()
316 rq->nr_phys_segments = 0; in blk_mq_rq_ctx_init()
318 rq->nr_integrity_segments = 0; in blk_mq_rq_ctx_init()
320 blk_crypto_rq_set_defaults(rq); in blk_mq_rq_ctx_init()
322 WRITE_ONCE(rq->deadline, 0); in blk_mq_rq_ctx_init()
324 rq->timeout = 0; in blk_mq_rq_ctx_init()
326 rq->end_io = NULL; in blk_mq_rq_ctx_init()
327 rq->end_io_data = NULL; in blk_mq_rq_ctx_init()
330 refcount_set(&rq->ref, 1); in blk_mq_rq_ctx_init()
335 rq->elv.icq = NULL; in blk_mq_rq_ctx_init()
338 blk_mq_sched_assign_ioc(rq); in blk_mq_rq_ctx_init()
340 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init()
341 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_rq_ctx_init()
346 trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns); in blk_mq_rq_ctx_init()
347 return rq; in blk_mq_rq_ctx_init()
411 struct request *rq; in blk_mq_alloc_request() local
418 rq = __blk_mq_alloc_request(&data); in blk_mq_alloc_request()
419 if (!rq) in blk_mq_alloc_request()
421 rq->__data_len = 0; in blk_mq_alloc_request()
422 rq->__sector = (sector_t) -1; in blk_mq_alloc_request()
423 rq->bio = rq->biotail = NULL; in blk_mq_alloc_request()
424 return rq; in blk_mq_alloc_request()
492 static void __blk_mq_free_request(struct request *rq) in __blk_mq_free_request() argument
494 struct request_queue *q = rq->q; in __blk_mq_free_request()
495 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_free_request()
496 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request()
497 const int sched_tag = rq->internal_tag; in __blk_mq_free_request()
499 blk_crypto_free_request(rq); in __blk_mq_free_request()
500 blk_pm_mark_last_busy(rq); in __blk_mq_free_request()
501 rq->mq_hctx = NULL; in __blk_mq_free_request()
502 if (rq->tag != BLK_MQ_NO_TAG) in __blk_mq_free_request()
503 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
510 void blk_mq_free_request(struct request *rq) in blk_mq_free_request() argument
512 struct request_queue *q = rq->q; in blk_mq_free_request()
514 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_free_request()
515 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request()
517 if (rq->rq_flags & RQF_ELVPRIV) { in blk_mq_free_request()
519 e->type->ops.finish_request(rq); in blk_mq_free_request()
520 if (rq->elv.icq) { in blk_mq_free_request()
521 put_io_context(rq->elv.icq->ioc); in blk_mq_free_request()
522 rq->elv.icq = NULL; in blk_mq_free_request()
526 ctx->rq_completed[rq_is_sync(rq)]++; in blk_mq_free_request()
527 if (rq->rq_flags & RQF_MQ_INFLIGHT) in blk_mq_free_request()
530 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) in blk_mq_free_request()
533 rq_qos_done(q, rq); in blk_mq_free_request()
535 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in blk_mq_free_request()
536 if (refcount_dec_and_test(&rq->ref)) in blk_mq_free_request()
537 __blk_mq_free_request(rq); in blk_mq_free_request()
541 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) in __blk_mq_end_request() argument
545 if (blk_mq_need_time_stamp(rq)) in __blk_mq_end_request()
548 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request()
549 blk_mq_poll_stats_start(rq->q); in __blk_mq_end_request()
550 blk_stat_add(rq, now); in __blk_mq_end_request()
553 blk_mq_sched_completed_request(rq, now); in __blk_mq_end_request()
555 blk_account_io_done(rq, now); in __blk_mq_end_request()
557 if (rq->end_io) { in __blk_mq_end_request()
558 rq_qos_done(rq->q, rq); in __blk_mq_end_request()
559 rq->end_io(rq, error); in __blk_mq_end_request()
561 blk_mq_free_request(rq); in __blk_mq_end_request()
566 void blk_mq_end_request(struct request *rq, blk_status_t error) in blk_mq_end_request() argument
568 if (blk_update_request(rq, error, blk_rq_bytes(rq))) in blk_mq_end_request()
570 __blk_mq_end_request(rq, error); in blk_mq_end_request()
588 struct request *rq; in blk_done_softirq() local
590 rq = list_entry(local_list.next, struct request, ipi_list); in blk_done_softirq()
591 list_del_init(&rq->ipi_list); in blk_done_softirq()
592 rq->q->mq_ops->complete(rq); in blk_done_softirq()
596 static void blk_mq_trigger_softirq(struct request *rq) in blk_mq_trigger_softirq() argument
603 list_add_tail(&rq->ipi_list, list); in blk_mq_trigger_softirq()
610 if (list->next == &rq->ipi_list) in blk_mq_trigger_softirq()
633 struct request *rq = data; in __blk_mq_complete_request_remote() local
644 if (rq->q->nr_hw_queues == 1) in __blk_mq_complete_request_remote()
645 blk_mq_trigger_softirq(rq); in __blk_mq_complete_request_remote()
647 rq->q->mq_ops->complete(rq); in __blk_mq_complete_request_remote()
650 static inline bool blk_mq_complete_need_ipi(struct request *rq) in blk_mq_complete_need_ipi() argument
655 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) in blk_mq_complete_need_ipi()
659 if (cpu == rq->mq_ctx->cpu || in blk_mq_complete_need_ipi()
660 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && in blk_mq_complete_need_ipi()
661 cpus_share_cache(cpu, rq->mq_ctx->cpu))) in blk_mq_complete_need_ipi()
665 return cpu_online(rq->mq_ctx->cpu); in blk_mq_complete_need_ipi()
668 bool blk_mq_complete_request_remote(struct request *rq) in blk_mq_complete_request_remote() argument
670 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); in blk_mq_complete_request_remote()
676 if (rq->cmd_flags & REQ_HIPRI) in blk_mq_complete_request_remote()
679 if (blk_mq_complete_need_ipi(rq)) { in blk_mq_complete_request_remote()
680 rq->csd.func = __blk_mq_complete_request_remote; in blk_mq_complete_request_remote()
681 rq->csd.info = rq; in blk_mq_complete_request_remote()
682 rq->csd.flags = 0; in blk_mq_complete_request_remote()
683 smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); in blk_mq_complete_request_remote()
685 if (rq->q->nr_hw_queues > 1) in blk_mq_complete_request_remote()
687 blk_mq_trigger_softirq(rq); in blk_mq_complete_request_remote()
701 void blk_mq_complete_request(struct request *rq) in blk_mq_complete_request() argument
703 if (!blk_mq_complete_request_remote(rq)) in blk_mq_complete_request()
704 rq->q->mq_ops->complete(rq); in blk_mq_complete_request()
736 void blk_mq_start_request(struct request *rq) in blk_mq_start_request() argument
738 struct request_queue *q = rq->q; in blk_mq_start_request()
740 trace_block_rq_issue(q, rq); in blk_mq_start_request()
743 rq->io_start_time_ns = ktime_get_ns(); in blk_mq_start_request()
744 rq->stats_sectors = blk_rq_sectors(rq); in blk_mq_start_request()
745 rq->rq_flags |= RQF_STATS; in blk_mq_start_request()
746 rq_qos_issue(q, rq); in blk_mq_start_request()
749 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); in blk_mq_start_request()
751 blk_add_timer(rq); in blk_mq_start_request()
752 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); in blk_mq_start_request()
755 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) in blk_mq_start_request()
756 q->integrity.profile->prepare_fn(rq); in blk_mq_start_request()
761 static void __blk_mq_requeue_request(struct request *rq) in __blk_mq_requeue_request() argument
763 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
765 blk_mq_put_driver_tag(rq); in __blk_mq_requeue_request()
767 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
768 rq_qos_requeue(q, rq); in __blk_mq_requeue_request()
770 if (blk_mq_request_started(rq)) { in __blk_mq_requeue_request()
771 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in __blk_mq_requeue_request()
772 rq->rq_flags &= ~RQF_TIMED_OUT; in __blk_mq_requeue_request()
776 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) in blk_mq_requeue_request() argument
778 __blk_mq_requeue_request(rq); in blk_mq_requeue_request()
781 blk_mq_sched_requeue_request(rq); in blk_mq_requeue_request()
783 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); in blk_mq_requeue_request()
792 struct request *rq, *next; in blk_mq_requeue_work() local
798 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
799 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) in blk_mq_requeue_work()
802 rq->rq_flags &= ~RQF_SOFTBARRIER; in blk_mq_requeue_work()
803 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
809 if (rq->rq_flags & RQF_DONTPREP) in blk_mq_requeue_work()
810 blk_mq_request_bypass_insert(rq, false, false); in blk_mq_requeue_work()
812 blk_mq_sched_insert_request(rq, true, false, false); in blk_mq_requeue_work()
816 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
817 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
818 blk_mq_sched_insert_request(rq, false, false, false); in blk_mq_requeue_work()
824 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, in blk_mq_add_to_requeue_list() argument
827 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list()
834 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); in blk_mq_add_to_requeue_list()
838 rq->rq_flags |= RQF_SOFTBARRIER; in blk_mq_add_to_requeue_list()
839 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
841 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
874 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, in blk_mq_rq_inflight() argument
881 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { in blk_mq_rq_inflight()
915 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) in blk_mq_req_expired() argument
919 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) in blk_mq_req_expired()
921 if (rq->rq_flags & RQF_TIMED_OUT) in blk_mq_req_expired()
924 deadline = READ_ONCE(rq->deadline); in blk_mq_req_expired()
935 void blk_mq_put_rq_ref(struct request *rq) in blk_mq_put_rq_ref() argument
937 if (is_flush_rq(rq)) in blk_mq_put_rq_ref()
938 rq->end_io(rq, 0); in blk_mq_put_rq_ref()
939 else if (refcount_dec_and_test(&rq->ref)) in blk_mq_put_rq_ref()
940 __blk_mq_free_request(rq); in blk_mq_put_rq_ref()
944 struct request *rq, void *priv, bool reserved) in blk_mq_check_expired() argument
955 if (blk_mq_req_expired(rq, next)) in blk_mq_check_expired()
956 blk_mq_rq_timed_out(rq, reserved); in blk_mq_check_expired()
1040 struct request *rq; member
1053 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); in dispatch_rq_from_ctx()
1054 list_del_init(&dispatch_data->rq->queuelist); in dispatch_rq_from_ctx()
1060 return !dispatch_data->rq; in dispatch_rq_from_ctx()
1069 .rq = NULL, in blk_mq_dequeue_from_ctx()
1075 return data.rq; in blk_mq_dequeue_from_ctx()
1086 static bool __blk_mq_get_driver_tag(struct request *rq) in __blk_mq_get_driver_tag() argument
1088 struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; in __blk_mq_get_driver_tag()
1089 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; in __blk_mq_get_driver_tag()
1092 blk_mq_tag_busy(rq->mq_hctx); in __blk_mq_get_driver_tag()
1094 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { in __blk_mq_get_driver_tag()
1095 bt = rq->mq_hctx->tags->breserved_tags; in __blk_mq_get_driver_tag()
1098 if (!hctx_may_queue(rq->mq_hctx, bt)) in __blk_mq_get_driver_tag()
1106 rq->tag = tag + tag_offset; in __blk_mq_get_driver_tag()
1110 static bool blk_mq_get_driver_tag(struct request *rq) in blk_mq_get_driver_tag() argument
1112 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_get_driver_tag()
1114 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) in blk_mq_get_driver_tag()
1118 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { in blk_mq_get_driver_tag()
1119 rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_mq_get_driver_tag()
1122 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
1154 struct request *rq) in blk_mq_mark_tag_wait() argument
1172 return blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1198 ret = blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1245 static void blk_mq_handle_dev_resource(struct request *rq, in blk_mq_handle_dev_resource() argument
1258 list_add(&rq->queuelist, list); in blk_mq_handle_dev_resource()
1259 __blk_mq_requeue_request(rq); in blk_mq_handle_dev_resource()
1262 static void blk_mq_handle_zone_resource(struct request *rq, in blk_mq_handle_zone_resource() argument
1271 list_add(&rq->queuelist, zone_list); in blk_mq_handle_zone_resource()
1272 __blk_mq_requeue_request(rq); in blk_mq_handle_zone_resource()
1281 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, in blk_mq_prep_dispatch_rq() argument
1284 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_prep_dispatch_rq()
1286 if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) { in blk_mq_prep_dispatch_rq()
1287 blk_mq_put_driver_tag(rq); in blk_mq_prep_dispatch_rq()
1291 if (!blk_mq_get_driver_tag(rq)) { in blk_mq_prep_dispatch_rq()
1299 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_prep_dispatch_rq()
1305 blk_mq_put_dispatch_budget(rq->q); in blk_mq_prep_dispatch_rq()
1331 struct request *rq, *nxt; in blk_mq_dispatch_rq_list() local
1347 rq = list_first_entry(list, struct request, queuelist); in blk_mq_dispatch_rq_list()
1349 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
1350 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); in blk_mq_dispatch_rq_list()
1354 list_del_init(&rq->queuelist); in blk_mq_dispatch_rq_list()
1356 bd.rq = rq; in blk_mq_dispatch_rq_list()
1384 blk_mq_handle_dev_resource(rq, list); in blk_mq_dispatch_rq_list()
1392 blk_mq_handle_zone_resource(rq, &zone_list); in blk_mq_dispatch_rq_list()
1397 blk_mq_end_request(rq, BLK_STS_IOERR); in blk_mq_dispatch_rq_list()
1859 struct request *rq, in __blk_mq_insert_req_list() argument
1862 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_req_list()
1867 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1870 list_add(&rq->queuelist, &ctx->rq_lists[type]); in __blk_mq_insert_req_list()
1872 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); in __blk_mq_insert_req_list()
1875 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in __blk_mq_insert_request() argument
1878 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_request()
1882 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1895 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, in blk_mq_request_bypass_insert() argument
1898 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_bypass_insert()
1902 list_add(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1904 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1915 struct request *rq; in blk_mq_insert_requests() local
1922 list_for_each_entry(rq, list, queuelist) { in blk_mq_insert_requests()
1923 BUG_ON(rq->mq_ctx != ctx); in blk_mq_insert_requests()
1924 trace_block_rq_insert(hctx->queue, rq); in blk_mq_insert_requests()
1961 struct request *rq, *head_rq = list_entry_rq(list.next); in blk_mq_flush_plug_list() local
1968 rq = list_entry_rq(pos); in blk_mq_flush_plug_list()
1969 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1970 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) in blk_mq_flush_plug_list()
1982 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, in blk_mq_bio_to_request() argument
1988 rq->cmd_flags |= REQ_FAILFAST_MASK; in blk_mq_bio_to_request()
1990 rq->__sector = bio->bi_iter.bi_sector; in blk_mq_bio_to_request()
1991 rq->write_hint = bio->bi_write_hint; in blk_mq_bio_to_request()
1992 blk_rq_bio_prep(rq, bio, nr_segs); in blk_mq_bio_to_request()
1995 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); in blk_mq_bio_to_request()
1998 blk_account_io_start(rq); in blk_mq_bio_to_request()
2002 struct request *rq, in __blk_mq_issue_directly() argument
2005 struct request_queue *q = rq->q; in __blk_mq_issue_directly()
2007 .rq = rq, in __blk_mq_issue_directly()
2013 new_cookie = request_to_qc_t(hctx, rq); in __blk_mq_issue_directly()
2029 __blk_mq_requeue_request(rq); in __blk_mq_issue_directly()
2041 struct request *rq, in __blk_mq_try_issue_directly() argument
2045 struct request_queue *q = rq->q; in __blk_mq_try_issue_directly()
2067 if (!blk_mq_get_driver_tag(rq)) { in __blk_mq_try_issue_directly()
2072 return __blk_mq_issue_directly(hctx, rq, cookie, last); in __blk_mq_try_issue_directly()
2077 blk_mq_sched_insert_request(rq, false, run_queue, false); in __blk_mq_try_issue_directly()
2094 struct request *rq, blk_qc_t *cookie) in blk_mq_try_issue_directly() argument
2103 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); in blk_mq_try_issue_directly()
2105 blk_mq_request_bypass_insert(rq, false, true); in blk_mq_try_issue_directly()
2107 blk_mq_end_request(rq, ret); in blk_mq_try_issue_directly()
2112 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) in blk_mq_request_issue_directly() argument
2117 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_issue_directly()
2120 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); in blk_mq_request_issue_directly()
2134 struct request *rq = list_first_entry(list, struct request, in blk_mq_try_issue_list_directly() local
2137 list_del_init(&rq->queuelist); in blk_mq_try_issue_list_directly()
2138 ret = blk_mq_request_issue_directly(rq, list_empty(list)); in blk_mq_try_issue_list_directly()
2143 blk_mq_request_bypass_insert(rq, false, in blk_mq_try_issue_list_directly()
2147 blk_mq_end_request(rq, ret); in blk_mq_try_issue_list_directly()
2162 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) in blk_add_rq_to_plug() argument
2164 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_add_rq_to_plug()
2171 if (tmp->q != rq->q) in blk_add_rq_to_plug()
2211 struct request *rq; in blk_mq_submit_bio() local
2234 rq = __blk_mq_alloc_request(&data); in blk_mq_submit_bio()
2235 if (unlikely(!rq)) { in blk_mq_submit_bio()
2244 rq_qos_track(q, rq, bio); in blk_mq_submit_bio()
2246 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_submit_bio()
2248 blk_mq_bio_to_request(rq, bio, nr_segs); in blk_mq_submit_bio()
2250 ret = blk_crypto_init_request(rq); in blk_mq_submit_bio()
2254 blk_mq_free_request(rq); in blk_mq_submit_bio()
2261 blk_insert_flush(rq); in blk_mq_submit_bio()
2264 blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) || in blk_mq_submit_bio()
2287 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()
2290 blk_mq_sched_insert_request(rq, false, true, true); in blk_mq_submit_bio()
2305 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()
2320 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_submit_bio()
2323 blk_mq_sched_insert_request(rq, false, true, true); in blk_mq_submit_bio()
2351 struct request *rq = drv_tags->rqs[i]; in blk_mq_clear_rq_mapping() local
2352 unsigned long rq_addr = (unsigned long)rq; in blk_mq_clear_rq_mapping()
2355 WARN_ON_ONCE(refcount_read(&rq->ref) != 0); in blk_mq_clear_rq_mapping()
2356 cmpxchg(&drv_tags->rqs[i], rq, NULL); in blk_mq_clear_rq_mapping()
2380 struct request *rq = tags->static_rqs[i]; in blk_mq_free_rqs() local
2382 if (!rq) in blk_mq_free_rqs()
2384 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
2450 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, in blk_mq_init_request() argument
2456 ret = set->ops->init_request(set, rq, hctx_idx, node); in blk_mq_init_request()
2461 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in blk_mq_init_request()
2524 struct request *rq = p; in blk_mq_alloc_rqs() local
2526 tags->static_rqs[i] = rq; in blk_mq_alloc_rqs()
2527 if (blk_mq_init_request(set, rq, hctx_idx, node)) { in blk_mq_alloc_rqs()
2548 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) in blk_mq_has_request() argument
2552 if (rq->mq_hctx != iter_data->hctx) in blk_mq_has_request()
3855 struct request *rq) in blk_mq_poll_nsecs() argument
3876 bucket = blk_mq_poll_stats_bkt(rq); in blk_mq_poll_nsecs()
3887 struct request *rq) in blk_mq_poll_hybrid_sleep() argument
3894 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) in blk_mq_poll_hybrid_sleep()
3906 nsecs = blk_mq_poll_nsecs(q, rq); in blk_mq_poll_hybrid_sleep()
3911 rq->rq_flags |= RQF_MQ_POLL_SLEPT; in blk_mq_poll_hybrid_sleep()
3924 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) in blk_mq_poll_hybrid_sleep()
3942 struct request *rq; in blk_mq_poll_hybrid() local
3948 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
3950 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
3957 if (!rq) in blk_mq_poll_hybrid()
3961 return blk_mq_poll_hybrid_sleep(q, rq); in blk_mq_poll_hybrid()
4030 unsigned int blk_mq_rq_cpu(struct request *rq) in blk_mq_rq_cpu() argument
4032 return rq->mq_ctx->cpu; in blk_mq_rq_cpu()