Lines Matching refs:khd

464 	struct kyber_hctx_data *khd;  in kyber_init_hctx()  local
467 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
468 if (!khd) in kyber_init_hctx()
471 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
474 if (!khd->kcqs) in kyber_init_hctx()
478 kyber_ctx_queue_init(&khd->kcqs[i]); in kyber_init_hctx()
481 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
484 sbitmap_free(&khd->kcq_map[i]); in kyber_init_hctx()
489 spin_lock_init(&khd->lock); in kyber_init_hctx()
492 INIT_LIST_HEAD(&khd->rqs[i]); in kyber_init_hctx()
493 khd->domain_wait[i].sbq = NULL; in kyber_init_hctx()
494 init_waitqueue_func_entry(&khd->domain_wait[i].wait, in kyber_init_hctx()
496 khd->domain_wait[i].wait.private = hctx; in kyber_init_hctx()
497 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); in kyber_init_hctx()
498 atomic_set(&khd->wait_index[i], 0); in kyber_init_hctx()
501 khd->cur_domain = 0; in kyber_init_hctx()
502 khd->batching = 0; in kyber_init_hctx()
504 hctx->sched_data = khd; in kyber_init_hctx()
511 kfree(khd->kcqs); in kyber_init_hctx()
513 kfree(khd); in kyber_init_hctx()
519 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_exit_hctx() local
523 sbitmap_free(&khd->kcq_map[i]); in kyber_exit_hctx()
524 kfree(khd->kcqs); in kyber_exit_hctx()
570 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_bio_merge() local
571 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; in kyber_bio_merge()
591 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_insert_requests() local
596 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; in kyber_insert_requests()
604 sbitmap_set_bit(&khd->kcq_map[sched_domain], in kyber_insert_requests()
659 struct kyber_hctx_data *khd; member
667 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr]; in flush_busy_kcq()
678 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, in kyber_flush_busy_kcqs() argument
683 .khd = khd, in kyber_flush_busy_kcqs()
688 sbitmap_for_each_set(&khd->kcq_map[sched_domain], in kyber_flush_busy_kcqs()
704 struct kyber_hctx_data *khd, in kyber_get_domain_token() argument
707 unsigned int sched_domain = khd->cur_domain; in kyber_get_domain_token()
709 struct sbq_wait *wait = &khd->domain_wait[sched_domain]; in kyber_get_domain_token()
722 &khd->wait_index[sched_domain]); in kyber_get_domain_token()
723 khd->domain_ws[sched_domain] = ws; in kyber_get_domain_token()
741 ws = khd->domain_ws[sched_domain]; in kyber_get_domain_token()
752 struct kyber_hctx_data *khd, in kyber_dispatch_cur_domain() argument
759 rqs = &khd->rqs[khd->cur_domain]; in kyber_dispatch_cur_domain()
771 nr = kyber_get_domain_token(kqd, khd, hctx); in kyber_dispatch_cur_domain()
773 khd->batching++; in kyber_dispatch_cur_domain()
779 kyber_domain_names[khd->cur_domain]); in kyber_dispatch_cur_domain()
781 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { in kyber_dispatch_cur_domain()
782 nr = kyber_get_domain_token(kqd, khd, hctx); in kyber_dispatch_cur_domain()
784 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); in kyber_dispatch_cur_domain()
786 khd->batching++; in kyber_dispatch_cur_domain()
792 kyber_domain_names[khd->cur_domain]); in kyber_dispatch_cur_domain()
803 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_dispatch_request() local
807 spin_lock(&khd->lock); in kyber_dispatch_request()
813 if (khd->batching < kyber_batch_size[khd->cur_domain]) { in kyber_dispatch_request()
814 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); in kyber_dispatch_request()
828 khd->batching = 0; in kyber_dispatch_request()
830 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) in kyber_dispatch_request()
831 khd->cur_domain = 0; in kyber_dispatch_request()
833 khd->cur_domain++; in kyber_dispatch_request()
835 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); in kyber_dispatch_request()
842 spin_unlock(&khd->lock); in kyber_dispatch_request()
848 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_has_work() local
852 if (!list_empty_careful(&khd->rqs[i]) || in kyber_has_work()
853 sbitmap_any_bit_set(&khd->kcq_map[i])) in kyber_has_work()
908 __acquires(&khd->lock) \
911 struct kyber_hctx_data *khd = hctx->sched_data; \
913 spin_lock(&khd->lock); \
914 return seq_list_start(&khd->rqs[domain], *pos); \
921 struct kyber_hctx_data *khd = hctx->sched_data; \
923 return seq_list_next(v, &khd->rqs[domain], pos); \
927 __releases(&khd->lock) \
930 struct kyber_hctx_data *khd = hctx->sched_data; \
932 spin_unlock(&khd->lock); \
945 struct kyber_hctx_data *khd = hctx->sched_data; \
946 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
969 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_cur_domain_show() local
971 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]); in kyber_cur_domain_show()
978 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_batching_show() local
980 seq_printf(m, "%u\n", khd->batching); in kyber_batching_show()