| /OK3568_Linux_fs/kernel/block/ |
| H A D | blk-mq.h | 26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; 45 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 49 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 50 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 71 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 75 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 80 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 94 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, in blk_mq_map_queue_type() 107 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, in blk_mq_map_queue() 132 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); [all …]
|
| H A D | blk-mq-sysfs.c | 36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() 56 ssize_t (*show)(struct blk_mq_hw_ctx *, char *); 57 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); 106 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() 111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show() 128 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store() 133 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_store() 145 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, in blk_mq_hw_sysfs_nr_tags_show() 151 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, in blk_mq_hw_sysfs_nr_reserved_tags_show() 157 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) in blk_mq_hw_sysfs_cpus_show() [all …]
|
| H A D | blk-mq-debugfs.h | 24 struct blk_mq_hw_ctx *hctx); 25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 32 struct blk_mq_hw_ctx *hctx); 33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() 52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() 73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() 77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx()
|
| H A D | blk-mq-sched.c | 51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() 60 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() 88 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() 117 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() 194 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_sched() 212 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_next_ctx() 231 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_ctx() 281 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_dispatch_requests() 334 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_dispatch_requests() 359 struct blk_mq_hw_ctx *hctx; in __blk_mq_sched_bio_merge() [all …]
|
| H A D | blk-mq-debugfs.c | 228 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() 256 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() 367 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() 375 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() 383 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_stop() 397 struct blk_mq_hw_ctx *hctx; 417 struct blk_mq_hw_ctx *hctx = data; in hctx_busy_show() 434 struct blk_mq_hw_ctx *hctx = data; in hctx_type_show() 443 struct blk_mq_hw_ctx *hctx = data; in hctx_ctx_map_show() 468 struct blk_mq_hw_ctx *hctx = data; in hctx_tags_show() [all …]
|
| H A D | blk-mq.c | 72 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() 82 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() 91 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() 104 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight() 226 struct blk_mq_hw_ctx *hctx; in blk_mq_quiesce_queue() 261 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() 496 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request() 515 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request() 708 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) in hctx_unlock() 717 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) in hctx_lock() [all …]
|
| H A D | blk-mq-tag.h | 45 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 58 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() 71 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); 72 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); 74 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() 82 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle()
|
| H A D | blk-mq-sched.h | 16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 17 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 21 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 25 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 70 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() 80 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart()
|
| H A D | kyber-iosched.c | 461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() 517 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_exit_hctx() 569 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in kyber_bio_merge() 588 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, in kyber_insert_requests() 695 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); in kyber_domain_wake() 705 struct blk_mq_hw_ctx *hctx) in kyber_get_domain_token() 753 struct blk_mq_hw_ctx *hctx) in kyber_dispatch_cur_domain() 800 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) in kyber_dispatch_request() 846 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) in kyber_has_work() 910 struct blk_mq_hw_ctx *hctx = m->private; \ [all …]
|
| H A D | blk-mq-tag.c | 24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() 56 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() 196 struct blk_mq_hw_ctx *hctx; 219 struct blk_mq_hw_ctx *hctx = iter_data->hctx; in bt_iter() 255 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, in bt_for_each() 439 struct blk_mq_hw_ctx *hctx; in blk_mq_queue_tag_busy_iter() 566 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, in blk_mq_tag_update_depth()
|
| H A D | mq-deadline-main.c | 493 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() 547 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() 559 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() 711 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() 773 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() 842 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work()
|
| H A D | blk-flush.c | 355 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
|
| H A D | bsg-lib.c | 264 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, in bsg_queue_rq()
|
| H A D | bfq-iosched.c | 4667 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() 4679 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() 4819 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() 5512 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request() 5568 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, in bfq_insert_requests() 6377 static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) in bfq_depth_updated() 6387 static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) in bfq_init_hctx()
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | blk-mq.h | 16 struct blk_mq_hw_ctx { struct 278 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 290 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, 300 void (*commit_rqs)(struct blk_mq_hw_ctx *); 323 int (*poll)(struct blk_mq_hw_ctx *); 335 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); 339 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); 508 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 509 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 512 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); [all …]
|
| H A D | elevator.h | 28 struct blk_mq_hw_ctx; 33 int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); 34 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); 35 void (*depth_updated)(struct blk_mq_hw_ctx *); 45 void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); 46 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); 47 bool (*has_work)(struct blk_mq_hw_ctx *);
|
| /OK3568_Linux_fs/kernel/drivers/s390/block/ |
| H A D | scm_blk.c | 283 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() 332 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() 346 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx()
|
| /OK3568_Linux_fs/kernel/drivers/block/rnbd/ |
| H A D | rnbd-clt.h | 99 struct blk_mq_hw_ctx *hctx;
|
| H A D | rnbd-clt.c | 1106 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() 1121 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rnbd_queue_rq() 1252 struct blk_mq_hw_ctx *hctx) in rnbd_init_hw_queue() 1262 struct blk_mq_hw_ctx *hctx; in rnbd_init_mq_hw_queues()
|
| /OK3568_Linux_fs/kernel/drivers/nvme/target/ |
| H A D | loop.c | 131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() 214 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() 226 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx()
|
| /OK3568_Linux_fs/kernel/drivers/block/ |
| H A D | z2ram.c | 69 static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx, in z2_queue_rq()
|
| H A D | virtio_blk.c | 203 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() 217 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq()
|
| /OK3568_Linux_fs/kernel/drivers/block/paride/ |
| H A D | pcd.c | 189 static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx, 816 static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx, in pcd_queue_rq()
|
| H A D | pf.c | 209 static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx, 872 static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx, in pf_queue_rq()
|
| /OK3568_Linux_fs/kernel/drivers/nvme/host/ |
| H A D | tcp.c | 478 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_hctx() 488 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_admin_hctx() 2377 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_tcp_commit_rqs() 2385 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_tcp_queue_rq() 2454 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) in nvme_tcp_poll()
|