Lines Matching refs:phba
217 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) in lpfc_nvmet_get_ctx_for_xri() argument
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_get_ctx_for_xri()
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { in lpfc_nvmet_get_ctx_for_xri()
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_get_ctx_for_xri()
239 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) in lpfc_nvmet_get_ctx_for_oxid() argument
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_get_ctx_for_oxid()
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { in lpfc_nvmet_get_ctx_for_oxid()
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_get_ctx_for_oxid()
262 lpfc_nvmet_defer_release(struct lpfc_hba *phba, in lpfc_nvmet_defer_release() argument
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_defer_release()
275 spin_lock(&phba->sli4_hba.t_active_list_lock); in lpfc_nvmet_defer_release()
277 spin_unlock(&phba->sli4_hba.t_active_list_lock); in lpfc_nvmet_defer_release()
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_defer_release()
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); in lpfc_nvmet_defer_release()
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_defer_release()
295 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in __lpfc_nvme_xmt_ls_rsp_cmp() argument
306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in __lpfc_nvme_xmt_ls_rsp_cmp()
312 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n", in __lpfc_nvme_xmt_ls_rsp_cmp()
315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, in __lpfc_nvme_xmt_ls_rsp_cmp()
322 lpfc_sli_release_iocbq(phba, cmdwqe); in __lpfc_nvme_xmt_ls_rsp_cmp()
324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, in __lpfc_nvme_xmt_ls_rsp_cmp()
342 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in lpfc_nvmet_xmt_ls_rsp_cmp() argument
348 if (!phba->targetport) in lpfc_nvmet_xmt_ls_rsp_cmp()
354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_xmt_ls_rsp_cmp()
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe); in lpfc_nvmet_xmt_ls_rsp_cmp()
385 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) in lpfc_nvmet_ctxbuf_post() argument
398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_ctxbuf_post()
412 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, in lpfc_nvmet_ctxbuf_post()
417 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); in lpfc_nvmet_ctxbuf_post()
425 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); in lpfc_nvmet_ctxbuf_post()
426 if (phba->sli4_hba.nvmet_io_wait_cnt) { in lpfc_nvmet_ctxbuf_post()
427 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, in lpfc_nvmet_ctxbuf_post()
430 phba->sli4_hba.nvmet_io_wait_cnt--; in lpfc_nvmet_ctxbuf_post()
431 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, in lpfc_nvmet_ctxbuf_post()
436 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_ctxbuf_post()
443 ctxp->phba = phba; in lpfc_nvmet_ctxbuf_post()
475 if (!queue_work(phba->wq, &ctx_buf->defer_work)) { in lpfc_nvmet_ctxbuf_post()
477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_ctxbuf_post()
487 lpfc_nvmet_defer_release(phba, ctxp); in lpfc_nvmet_ctxbuf_post()
489 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); in lpfc_nvmet_ctxbuf_post()
493 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); in lpfc_nvmet_ctxbuf_post()
499 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_ctxbuf_post()
501 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_ctxbuf_post()
503 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); in lpfc_nvmet_ctxbuf_post()
513 lpfc_nvmet_ktime(struct lpfc_hba *phba, in lpfc_nvmet_ktime() argument
640 phba->ktime_seg1_total += seg1; in lpfc_nvmet_ktime()
641 if (seg1 < phba->ktime_seg1_min) in lpfc_nvmet_ktime()
642 phba->ktime_seg1_min = seg1; in lpfc_nvmet_ktime()
643 else if (seg1 > phba->ktime_seg1_max) in lpfc_nvmet_ktime()
644 phba->ktime_seg1_max = seg1; in lpfc_nvmet_ktime()
646 phba->ktime_seg2_total += seg2; in lpfc_nvmet_ktime()
647 if (seg2 < phba->ktime_seg2_min) in lpfc_nvmet_ktime()
648 phba->ktime_seg2_min = seg2; in lpfc_nvmet_ktime()
649 else if (seg2 > phba->ktime_seg2_max) in lpfc_nvmet_ktime()
650 phba->ktime_seg2_max = seg2; in lpfc_nvmet_ktime()
652 phba->ktime_seg3_total += seg3; in lpfc_nvmet_ktime()
653 if (seg3 < phba->ktime_seg3_min) in lpfc_nvmet_ktime()
654 phba->ktime_seg3_min = seg3; in lpfc_nvmet_ktime()
655 else if (seg3 > phba->ktime_seg3_max) in lpfc_nvmet_ktime()
656 phba->ktime_seg3_max = seg3; in lpfc_nvmet_ktime()
658 phba->ktime_seg4_total += seg4; in lpfc_nvmet_ktime()
659 if (seg4 < phba->ktime_seg4_min) in lpfc_nvmet_ktime()
660 phba->ktime_seg4_min = seg4; in lpfc_nvmet_ktime()
661 else if (seg4 > phba->ktime_seg4_max) in lpfc_nvmet_ktime()
662 phba->ktime_seg4_max = seg4; in lpfc_nvmet_ktime()
664 phba->ktime_seg5_total += seg5; in lpfc_nvmet_ktime()
665 if (seg5 < phba->ktime_seg5_min) in lpfc_nvmet_ktime()
666 phba->ktime_seg5_min = seg5; in lpfc_nvmet_ktime()
667 else if (seg5 > phba->ktime_seg5_max) in lpfc_nvmet_ktime()
668 phba->ktime_seg5_max = seg5; in lpfc_nvmet_ktime()
670 phba->ktime_data_samples++; in lpfc_nvmet_ktime()
674 phba->ktime_seg6_total += seg6; in lpfc_nvmet_ktime()
675 if (seg6 < phba->ktime_seg6_min) in lpfc_nvmet_ktime()
676 phba->ktime_seg6_min = seg6; in lpfc_nvmet_ktime()
677 else if (seg6 > phba->ktime_seg6_max) in lpfc_nvmet_ktime()
678 phba->ktime_seg6_max = seg6; in lpfc_nvmet_ktime()
680 phba->ktime_seg7_total += seg7; in lpfc_nvmet_ktime()
681 if (seg7 < phba->ktime_seg7_min) in lpfc_nvmet_ktime()
682 phba->ktime_seg7_min = seg7; in lpfc_nvmet_ktime()
683 else if (seg7 > phba->ktime_seg7_max) in lpfc_nvmet_ktime()
684 phba->ktime_seg7_max = seg7; in lpfc_nvmet_ktime()
686 phba->ktime_seg8_total += seg8; in lpfc_nvmet_ktime()
687 if (seg8 < phba->ktime_seg8_min) in lpfc_nvmet_ktime()
688 phba->ktime_seg8_min = seg8; in lpfc_nvmet_ktime()
689 else if (seg8 > phba->ktime_seg8_max) in lpfc_nvmet_ktime()
690 phba->ktime_seg8_max = seg8; in lpfc_nvmet_ktime()
692 phba->ktime_seg9_total += seg9; in lpfc_nvmet_ktime()
693 if (seg9 < phba->ktime_seg9_min) in lpfc_nvmet_ktime()
694 phba->ktime_seg9_min = seg9; in lpfc_nvmet_ktime()
695 else if (seg9 > phba->ktime_seg9_max) in lpfc_nvmet_ktime()
696 phba->ktime_seg9_max = seg9; in lpfc_nvmet_ktime()
698 phba->ktime_seg10_total += seg10; in lpfc_nvmet_ktime()
699 if (seg10 < phba->ktime_seg10_min) in lpfc_nvmet_ktime()
700 phba->ktime_seg10_min = seg10; in lpfc_nvmet_ktime()
701 else if (seg10 > phba->ktime_seg10_max) in lpfc_nvmet_ktime()
702 phba->ktime_seg10_max = seg10; in lpfc_nvmet_ktime()
703 phba->ktime_status_samples++; in lpfc_nvmet_ktime()
718 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in lpfc_nvmet_xmt_fcp_op_cmp() argument
738 if (phba->targetport) in lpfc_nvmet_xmt_fcp_op_cmp()
739 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_xmt_fcp_op_cmp()
743 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", in lpfc_nvmet_xmt_fcp_op_cmp()
768 lpfc_printf_log(phba, KERN_INFO, logerr, in lpfc_nvmet_xmt_fcp_op_cmp()
816 lpfc_nvmet_ktime(phba, ctxp); in lpfc_nvmet_xmt_fcp_op_cmp()
833 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { in lpfc_nvmet_xmt_fcp_op_cmp()
835 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); in lpfc_nvmet_xmt_fcp_op_cmp()
837 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, in lpfc_nvmet_xmt_fcp_op_cmp()
864 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, in __lpfc_nvme_xmt_ls_rsp() argument
868 struct lpfc_hba *phba = axchg->phba; in __lpfc_nvme_xmt_ls_rsp() local
875 if (phba->pport->load_flag & FC_UNLOADING) in __lpfc_nvme_xmt_ls_rsp()
878 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, in __lpfc_nvme_xmt_ls_rsp()
882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in __lpfc_nvme_xmt_ls_rsp()
891 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, in __lpfc_nvme_xmt_ls_rsp()
894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in __lpfc_nvme_xmt_ls_rsp()
921 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n", in __lpfc_nvme_xmt_ls_rsp()
924 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq); in __lpfc_nvme_xmt_ls_rsp()
934 lpfc_in_buf_free(phba, &nvmebuf->dbuf); in __lpfc_nvme_xmt_ls_rsp()
938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in __lpfc_nvme_xmt_ls_rsp()
948 lpfc_in_buf_free(phba, &nvmebuf->dbuf); in __lpfc_nvme_xmt_ls_rsp()
957 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid); in __lpfc_nvme_xmt_ls_rsp()
988 if (axchg->phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_xmt_ls_rsp()
1016 struct lpfc_hba *phba = ctxp->phba; in lpfc_nvmet_xmt_fcp_op() local
1026 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_nvmet_xmt_fcp_op()
1041 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; in lpfc_nvmet_xmt_fcp_op()
1043 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { in lpfc_nvmet_xmt_fcp_op()
1045 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); in lpfc_nvmet_xmt_fcp_op()
1047 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, in lpfc_nvmet_xmt_fcp_op()
1059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_xmt_fcp_op()
1066 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); in lpfc_nvmet_xmt_fcp_op()
1069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_xmt_fcp_op()
1082 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", in lpfc_nvmet_xmt_fcp_op()
1086 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); in lpfc_nvmet_xmt_fcp_op()
1117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_xmt_fcp_op()
1135 if (tport->phba->targetport) in lpfc_nvmet_targetport_delete()
1146 struct lpfc_hba *phba = ctxp->phba; in lpfc_nvmet_xmt_fcp_abort() local
1150 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_xmt_fcp_abort()
1154 ctxp->hdwq = &phba->sli4_hba.hdwq[0]; in lpfc_nvmet_xmt_fcp_abort()
1156 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_xmt_fcp_abort()
1160 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", in lpfc_nvmet_xmt_fcp_abort()
1178 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, in lpfc_nvmet_xmt_fcp_abort()
1181 lpfc_nvmet_wqfull_flush(phba, wq, ctxp); in lpfc_nvmet_xmt_fcp_abort()
1191 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, in lpfc_nvmet_xmt_fcp_abort()
1194 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, in lpfc_nvmet_xmt_fcp_abort()
1205 struct lpfc_hba *phba = ctxp->phba; in lpfc_nvmet_xmt_fcp_release() local
1211 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, in lpfc_nvmet_xmt_fcp_release()
1217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_xmt_fcp_release()
1225 lpfc_nvmet_defer_release(phba, ctxp); in lpfc_nvmet_xmt_fcp_release()
1229 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, in lpfc_nvmet_xmt_fcp_release()
1238 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_nvmet_xmt_fcp_release()
1249 struct lpfc_hba *phba = ctxp->phba; in lpfc_nvmet_defer_rcv() local
1253 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", in lpfc_nvmet_defer_rcv()
1257 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, in lpfc_nvmet_defer_rcv()
1264 tgtp = phba->targetport->private; in lpfc_nvmet_defer_rcv()
1269 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); in lpfc_nvmet_defer_rcv()
1286 lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in lpfc_nvmet_ls_req_cmp() argument
1289 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe); in lpfc_nvmet_ls_req_cmp()
1312 struct lpfc_hba *phba; in lpfc_nvmet_ls_req() local
1320 phba = lpfc_nvmet->phba; in lpfc_nvmet_ls_req()
1321 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_ls_req()
1330 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq, in lpfc_nvmet_ls_req()
1352 struct lpfc_hba *phba; in lpfc_nvmet_ls_abort() local
1356 phba = lpfc_nvmet->phba; in lpfc_nvmet_ls_abort()
1357 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_ls_abort()
1362 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq); in lpfc_nvmet_ls_abort()
1371 struct lpfc_hba *phba = NULL; in lpfc_nvmet_host_release() local
1374 phba = ndlp->phba; in lpfc_nvmet_host_release()
1375 if (!phba->targetport || !phba->targetport->private) in lpfc_nvmet_host_release()
1378 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, in lpfc_nvmet_host_release()
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_host_release()
1389 struct lpfc_hba *phba; in lpfc_nvmet_discovery_event() local
1393 phba = tgtp->phba; in lpfc_nvmet_discovery_event()
1395 rc = lpfc_issue_els_rscn(phba->pport, 0); in lpfc_nvmet_discovery_event()
1396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_discovery_event()
1426 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, in __lpfc_nvmet_clean_io_for_cpu() argument
1435 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in __lpfc_nvmet_clean_io_for_cpu()
1437 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in __lpfc_nvmet_clean_io_for_cpu()
1439 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); in __lpfc_nvmet_clean_io_for_cpu()
1443 spin_lock(&phba->sli4_hba.sgl_list_lock); in __lpfc_nvmet_clean_io_for_cpu()
1445 &phba->sli4_hba.lpfc_nvmet_sgl_list); in __lpfc_nvmet_clean_io_for_cpu()
1446 spin_unlock(&phba->sli4_hba.sgl_list_lock); in __lpfc_nvmet_clean_io_for_cpu()
1448 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); in __lpfc_nvmet_clean_io_for_cpu()
1455 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) in lpfc_nvmet_cleanup_io_context() argument
1461 infop = phba->sli4_hba.nvmet_ctx_info; in lpfc_nvmet_cleanup_io_context()
1466 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { in lpfc_nvmet_cleanup_io_context()
1468 infop = lpfc_get_ctx_list(phba, j, i); in lpfc_nvmet_cleanup_io_context()
1469 __lpfc_nvmet_clean_io_for_cpu(phba, infop); in lpfc_nvmet_cleanup_io_context()
1472 kfree(phba->sli4_hba.nvmet_ctx_info); in lpfc_nvmet_cleanup_io_context()
1473 phba->sli4_hba.nvmet_ctx_info = NULL; in lpfc_nvmet_cleanup_io_context()
1477 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) in lpfc_nvmet_setup_io_context() argument
1486 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, in lpfc_nvmet_setup_io_context()
1488 phba->sli4_hba.nvmet_xri_cnt); in lpfc_nvmet_setup_io_context()
1490 phba->sli4_hba.nvmet_ctx_info = kcalloc( in lpfc_nvmet_setup_io_context()
1491 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, in lpfc_nvmet_setup_io_context()
1493 if (!phba->sli4_hba.nvmet_ctx_info) { in lpfc_nvmet_setup_io_context()
1494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_setup_io_context()
1520 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { in lpfc_nvmet_setup_io_context()
1521 infop = lpfc_get_ctx_list(phba, i, j); in lpfc_nvmet_setup_io_context()
1533 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { in lpfc_nvmet_setup_io_context()
1534 last_infop = lpfc_get_ctx_list(phba, in lpfc_nvmet_setup_io_context()
1537 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { in lpfc_nvmet_setup_io_context()
1538 infop = lpfc_get_ctx_list(phba, i, j); in lpfc_nvmet_setup_io_context()
1549 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { in lpfc_nvmet_setup_io_context()
1552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_setup_io_context()
1561 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_setup_io_context()
1569 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); in lpfc_nvmet_setup_io_context()
1573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_setup_io_context()
1585 spin_lock(&phba->sli4_hba.sgl_list_lock); in lpfc_nvmet_setup_io_context()
1586 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); in lpfc_nvmet_setup_io_context()
1587 spin_unlock(&phba->sli4_hba.sgl_list_lock); in lpfc_nvmet_setup_io_context()
1589 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); in lpfc_nvmet_setup_io_context()
1592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_setup_io_context()
1603 infop = lpfc_get_ctx_list(phba, cpu, idx); in lpfc_nvmet_setup_io_context()
1611 if (idx >= phba->cfg_nvmet_mrq) { in lpfc_nvmet_setup_io_context()
1623 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { in lpfc_nvmet_setup_io_context()
1624 infop = lpfc_get_ctx_list(phba, i, j); in lpfc_nvmet_setup_io_context()
1625 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, in lpfc_nvmet_setup_io_context()
1636 lpfc_nvmet_create_targetport(struct lpfc_hba *phba) in lpfc_nvmet_create_targetport() argument
1638 struct lpfc_vport *vport = phba->pport; in lpfc_nvmet_create_targetport()
1643 if (phba->targetport) in lpfc_nvmet_create_targetport()
1646 error = lpfc_nvmet_setup_io_context(phba); in lpfc_nvmet_create_targetport()
1659 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; in lpfc_nvmet_create_targetport()
1660 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; in lpfc_nvmet_create_targetport()
1665 &phba->pcidev->dev, in lpfc_nvmet_create_targetport()
1666 &phba->targetport); in lpfc_nvmet_create_targetport()
1671 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_create_targetport()
1678 phba->targetport = NULL; in lpfc_nvmet_create_targetport()
1679 phba->nvmet_support = 0; in lpfc_nvmet_create_targetport()
1681 lpfc_nvmet_cleanup_io_context(phba); in lpfc_nvmet_create_targetport()
1685 phba->targetport->private; in lpfc_nvmet_create_targetport()
1686 tgtp->phba = phba; in lpfc_nvmet_create_targetport()
1688 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, in lpfc_nvmet_create_targetport()
1692 phba->targetport, tgtp, in lpfc_nvmet_create_targetport()
1737 lpfc_nvmet_update_targetport(struct lpfc_hba *phba) in lpfc_nvmet_update_targetport() argument
1739 struct lpfc_vport *vport = phba->pport; in lpfc_nvmet_update_targetport()
1741 if (!phba->targetport) in lpfc_nvmet_update_targetport()
1746 phba->targetport, vport->fc_myDID); in lpfc_nvmet_update_targetport()
1748 phba->targetport->port_id = vport->fc_myDID; in lpfc_nvmet_update_targetport()
1761 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, in lpfc_sli4_nvmet_xri_aborted() argument
1775 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_sli4_nvmet_xri_aborted()
1778 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) in lpfc_sli4_nvmet_xri_aborted()
1781 if (phba->targetport) { in lpfc_sli4_nvmet_xri_aborted()
1782 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_sli4_nvmet_xri_aborted()
1786 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_sli4_nvmet_xri_aborted()
1787 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_sli4_nvmet_xri_aborted()
1789 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, in lpfc_sli4_nvmet_xri_aborted()
1805 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_sli4_nvmet_xri_aborted()
1807 rrq_empty = list_empty(&phba->active_rrq_list); in lpfc_sli4_nvmet_xri_aborted()
1808 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_sli4_nvmet_xri_aborted()
1809 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); in lpfc_sli4_nvmet_xri_aborted()
1813 lpfc_set_rrq_active(phba, ndlp, in lpfc_sli4_nvmet_xri_aborted()
1816 lpfc_sli4_abts_err_handler(phba, ndlp, axri); in lpfc_sli4_nvmet_xri_aborted()
1819 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_sli4_nvmet_xri_aborted()
1823 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_sli4_nvmet_xri_aborted()
1826 lpfc_worker_wake_up(phba); in lpfc_sli4_nvmet_xri_aborted()
1829 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_sli4_nvmet_xri_aborted()
1830 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_sli4_nvmet_xri_aborted()
1832 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); in lpfc_sli4_nvmet_xri_aborted()
1838 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_sli4_nvmet_xri_aborted()
1849 lpfc_nvmeio_data(phba, in lpfc_sli4_nvmet_xri_aborted()
1855 nvmet_fc_rcv_fcp_abort(phba->targetport, req); in lpfc_sli4_nvmet_xri_aborted()
1865 struct lpfc_hba *phba = vport->phba; in lpfc_nvmet_rcv_unsol_abort() local
1875 spin_lock_irqsave(&phba->hbalock, iflag); in lpfc_nvmet_rcv_unsol_abort()
1876 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_rcv_unsol_abort()
1878 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, in lpfc_nvmet_rcv_unsol_abort()
1885 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_rcv_unsol_abort()
1886 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_nvmet_rcv_unsol_abort()
1892 lpfc_nvmeio_data(phba, in lpfc_nvmet_rcv_unsol_abort()
1896 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_rcv_unsol_abort()
1900 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); in lpfc_nvmet_rcv_unsol_abort()
1906 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_rcv_unsol_abort()
1907 spin_unlock_irqrestore(&phba->hbalock, iflag); in lpfc_nvmet_rcv_unsol_abort()
1910 if (phba->sli4_hba.nvmet_io_wait_cnt) { in lpfc_nvmet_rcv_unsol_abort()
1917 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); in lpfc_nvmet_rcv_unsol_abort()
1921 &phba->sli4_hba.lpfc_nvmet_io_wait_list, in lpfc_nvmet_rcv_unsol_abort()
1930 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_rcv_unsol_abort()
1936 phba->sli4_hba.nvmet_io_wait_cnt--; in lpfc_nvmet_rcv_unsol_abort()
1940 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, in lpfc_nvmet_rcv_unsol_abort()
1945 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); in lpfc_nvmet_rcv_unsol_abort()
1953 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); in lpfc_nvmet_rcv_unsol_abort()
1961 lpfc_nvmeio_data(phba, in lpfc_nvmet_rcv_unsol_abort()
1965 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_rcv_unsol_abort()
1972 nvmet_fc_rcv_fcp_abort(phba->targetport, in lpfc_nvmet_rcv_unsol_abort()
1977 lpfc_nvmet_defer_release(phba, ctxp); in lpfc_nvmet_rcv_unsol_abort()
1980 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, in lpfc_nvmet_rcv_unsol_abort()
1987 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", in lpfc_nvmet_rcv_unsol_abort()
1990 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_rcv_unsol_abort()
2000 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, in lpfc_nvmet_wqfull_flush() argument
2027 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, in lpfc_nvmet_wqfull_flush()
2036 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep); in lpfc_nvmet_wqfull_flush()
2046 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, in lpfc_nvmet_wqfull_process() argument
2067 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); in lpfc_nvmet_wqfull_process()
2095 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) in lpfc_nvmet_destroy_targetport() argument
2103 if (phba->nvmet_support == 0) in lpfc_nvmet_destroy_targetport()
2105 if (phba->targetport) { in lpfc_nvmet_destroy_targetport()
2106 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_destroy_targetport()
2107 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_nvmet_destroy_targetport()
2108 wq = phba->sli4_hba.hdwq[qidx].io_wq; in lpfc_nvmet_destroy_targetport()
2109 lpfc_nvmet_wqfull_flush(phba, wq, NULL); in lpfc_nvmet_destroy_targetport()
2112 nvmet_fc_unregister_targetport(phba->targetport); in lpfc_nvmet_destroy_targetport()
2115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_destroy_targetport()
2117 "reached.\n", phba->targetport); in lpfc_nvmet_destroy_targetport()
2118 lpfc_nvmet_cleanup_io_context(phba); in lpfc_nvmet_destroy_targetport()
2120 phba->targetport = NULL; in lpfc_nvmet_destroy_targetport()
2141 lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, in lpfc_nvmet_handle_lsreq() argument
2145 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private; in lpfc_nvmet_handle_lsreq()
2156 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp, in lpfc_nvmet_handle_lsreq()
2159 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, in lpfc_nvmet_handle_lsreq()
2180 struct lpfc_hba *phba = ctxp->phba; in lpfc_nvmet_process_rcv_fcp_req() local
2188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_process_rcv_fcp_req()
2193 lpfc_nvmet_defer_release(phba, ctxp); in lpfc_nvmet_process_rcv_fcp_req()
2195 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, in lpfc_nvmet_process_rcv_fcp_req()
2201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_process_rcv_fcp_req()
2208 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_process_rcv_fcp_req()
2223 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req, in lpfc_nvmet_process_rcv_fcp_req()
2236 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ in lpfc_nvmet_process_rcv_fcp_req()
2242 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " in lpfc_nvmet_process_rcv_fcp_req()
2259 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], in lpfc_nvmet_process_rcv_fcp_req()
2260 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); in lpfc_nvmet_process_rcv_fcp_req()
2265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_process_rcv_fcp_req()
2271 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", in lpfc_nvmet_process_rcv_fcp_req()
2274 lpfc_nvmet_defer_release(phba, ctxp); in lpfc_nvmet_process_rcv_fcp_req()
2276 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); in lpfc_nvmet_process_rcv_fcp_req()
2292 lpfc_nvmet_replenish_context(struct lpfc_hba *phba, in lpfc_nvmet_replenish_context() argument
2315 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { in lpfc_nvmet_replenish_context()
2364 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, in lpfc_nvmet_unsol_fcp_buffer() argument
2383 if (!nvmebuf || !phba->targetport) { in lpfc_nvmet_unsol_fcp_buffer()
2384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_buffer()
2387 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); in lpfc_nvmet_unsol_fcp_buffer()
2399 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); in lpfc_nvmet_unsol_fcp_buffer()
2406 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop); in lpfc_nvmet_unsol_fcp_buffer()
2415 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { in lpfc_nvmet_unsol_fcp_buffer()
2416 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); in lpfc_nvmet_unsol_fcp_buffer()
2418 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, in lpfc_nvmet_unsol_fcp_buffer()
2425 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", in lpfc_nvmet_unsol_fcp_buffer()
2428 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_unsol_fcp_buffer()
2432 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); in lpfc_nvmet_unsol_fcp_buffer()
2434 &phba->sli4_hba.lpfc_nvmet_io_wait_list); in lpfc_nvmet_unsol_fcp_buffer()
2435 phba->sli4_hba.nvmet_io_wait_cnt++; in lpfc_nvmet_unsol_fcp_buffer()
2436 phba->sli4_hba.nvmet_io_wait_total++; in lpfc_nvmet_unsol_fcp_buffer()
2437 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, in lpfc_nvmet_unsol_fcp_buffer()
2443 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], in lpfc_nvmet_unsol_fcp_buffer()
2444 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); in lpfc_nvmet_unsol_fcp_buffer()
2453 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_unsol_fcp_buffer()
2454 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); in lpfc_nvmet_unsol_fcp_buffer()
2455 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); in lpfc_nvmet_unsol_fcp_buffer()
2457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_buffer()
2463 ctxp->phba = phba; in lpfc_nvmet_unsol_fcp_buffer()
2497 if (!queue_work(phba->wq, &ctx_buf->defer_work)) { in lpfc_nvmet_unsol_fcp_buffer()
2499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_buffer()
2508 lpfc_nvmet_defer_release(phba, ctxp); in lpfc_nvmet_unsol_fcp_buffer()
2510 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); in lpfc_nvmet_unsol_fcp_buffer()
2529 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, in lpfc_nvmet_unsol_fcp_event() argument
2536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_event()
2540 if (phba->nvmet_support == 0) { in lpfc_nvmet_unsol_fcp_event()
2541 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); in lpfc_nvmet_unsol_fcp_event()
2544 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); in lpfc_nvmet_unsol_fcp_event()
2573 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, in lpfc_nvmet_prep_ls_wqe() argument
2581 if (!lpfc_is_link_up(phba)) { in lpfc_nvmet_prep_ls_wqe()
2582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_ls_wqe()
2590 nvmewqe = lpfc_sli_get_iocbq(phba); in lpfc_nvmet_prep_ls_wqe()
2592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_ls_wqe()
2599 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); in lpfc_nvmet_prep_ls_wqe()
2603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_ls_wqe()
2639 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); in lpfc_nvmet_prep_ls_wqe()
2674 nvmewqe->vport = phba->pport; in lpfc_nvmet_prep_ls_wqe()
2675 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; in lpfc_nvmet_prep_ls_wqe()
2679 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, in lpfc_nvmet_prep_ls_wqe()
2689 lpfc_sli_release_iocbq(phba, nvmewqe); in lpfc_nvmet_prep_ls_wqe()
2695 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, in lpfc_nvmet_prep_fcp_wqe() argument
2711 if (!lpfc_is_link_up(phba)) { in lpfc_nvmet_prep_fcp_wqe()
2712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_fcp_wqe()
2719 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); in lpfc_nvmet_prep_fcp_wqe()
2723 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_fcp_wqe()
2731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_fcp_wqe()
2735 phba->cfg_nvme_seg_cnt); in lpfc_nvmet_prep_fcp_wqe()
2740 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_prep_fcp_wqe()
2746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_fcp_wqe()
2764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_prep_fcp_wqe()
2799 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); in lpfc_nvmet_prep_fcp_wqe()
2879 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); in lpfc_nvmet_prep_fcp_wqe()
2897 if (phba->cfg_enable_pbde) { in lpfc_nvmet_prep_fcp_wqe()
2945 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); in lpfc_nvmet_prep_fcp_wqe()
2984 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, in lpfc_nvmet_prep_fcp_wqe()
2991 nvmewqe->vport = phba->pport; in lpfc_nvmet_prep_fcp_wqe()
2992 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; in lpfc_nvmet_prep_fcp_wqe()
3040 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in lpfc_nvmet_sol_fcp_abort_cmp() argument
3052 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_sol_fcp_abort_cmp()
3064 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_sol_fcp_abort_cmp()
3066 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_sol_fcp_abort_cmp()
3073 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_sol_fcp_abort_cmp()
3087 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_nvmet_sol_fcp_abort_cmp()
3090 lpfc_sli_release_iocbq(phba, cmdwqe); in lpfc_nvmet_sol_fcp_abort_cmp()
3109 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in lpfc_nvmet_unsol_fcp_abort_cmp() argument
3123 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_unsol_fcp_abort_cmp()
3130 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_unsol_fcp_abort_cmp()
3137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_abort_cmp()
3148 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_unsol_fcp_abort_cmp()
3150 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_unsol_fcp_abort_cmp()
3157 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_unsol_fcp_abort_cmp()
3171 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_nvmet_unsol_fcp_abort_cmp()
3190 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, in lpfc_nvmet_xmt_ls_abort_cmp() argument
3200 if (phba->nvmet_support) { in lpfc_nvmet_xmt_ls_abort_cmp()
3201 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_xmt_ls_abort_cmp()
3205 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_xmt_ls_abort_cmp()
3211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_xmt_ls_abort_cmp()
3217 lpfc_sli_release_iocbq(phba, cmdwqe); in lpfc_nvmet_xmt_ls_abort_cmp()
3222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_xmt_ls_abort_cmp()
3230 lpfc_sli_release_iocbq(phba, cmdwqe); in lpfc_nvmet_xmt_ls_abort_cmp()
3235 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, in lpfc_nvmet_unsol_issue_abort() argument
3244 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_unsol_issue_abort()
3248 if (phba->nvmet_support && phba->targetport) in lpfc_nvmet_unsol_issue_abort()
3249 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_unsol_issue_abort()
3251 ndlp = lpfc_findnode_did(phba->pport, sid); in lpfc_nvmet_unsol_issue_abort()
3257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_issue_abort()
3283 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); in lpfc_nvmet_unsol_issue_abort()
3315 abts_wqeq->vport = phba->pport; in lpfc_nvmet_unsol_issue_abort()
3324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_unsol_issue_abort()
3331 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, in lpfc_nvmet_sol_fcp_issue_abort() argument
3342 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_sol_fcp_issue_abort()
3348 ndlp = lpfc_findnode_did(phba->pport, sid); in lpfc_nvmet_sol_fcp_issue_abort()
3353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_sol_fcp_issue_abort()
3365 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); in lpfc_nvmet_sol_fcp_issue_abort()
3369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_sol_fcp_issue_abort()
3383 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, in lpfc_nvmet_sol_fcp_issue_abort()
3391 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_nvmet_sol_fcp_issue_abort()
3393 if (phba->hba_flag & HBA_IOQ_FLUSH) { in lpfc_nvmet_sol_fcp_issue_abort()
3394 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_nvmet_sol_fcp_issue_abort()
3396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_sol_fcp_issue_abort()
3399 phba->hba_flag, ctxp->oxid); in lpfc_nvmet_sol_fcp_issue_abort()
3400 lpfc_sli_release_iocbq(phba, abts_wqeq); in lpfc_nvmet_sol_fcp_issue_abort()
3409 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_nvmet_sol_fcp_issue_abort()
3411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_sol_fcp_issue_abort()
3415 lpfc_sli_release_iocbq(phba, abts_wqeq); in lpfc_nvmet_sol_fcp_issue_abort()
3433 abts_wqeq->vport = phba->pport; in lpfc_nvmet_sol_fcp_issue_abort()
3435 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; in lpfc_nvmet_sol_fcp_issue_abort()
3437 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); in lpfc_nvmet_sol_fcp_issue_abort()
3438 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_nvmet_sol_fcp_issue_abort()
3448 lpfc_sli_release_iocbq(phba, abts_wqeq); in lpfc_nvmet_sol_fcp_issue_abort()
3449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_sol_fcp_issue_abort()
3457 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, in lpfc_nvmet_unsol_fcp_issue_abort() argument
3467 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_unsol_fcp_issue_abort()
3474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_issue_abort()
3482 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); in lpfc_nvmet_unsol_fcp_issue_abort()
3486 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_nvmet_unsol_fcp_issue_abort()
3492 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; in lpfc_nvmet_unsol_fcp_issue_abort()
3494 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); in lpfc_nvmet_unsol_fcp_issue_abort()
3495 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_nvmet_unsol_fcp_issue_abort()
3503 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_unsol_fcp_issue_abort()
3505 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); in lpfc_nvmet_unsol_fcp_issue_abort()
3512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvmet_unsol_fcp_issue_abort()
3517 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); in lpfc_nvmet_unsol_fcp_issue_abort()
3530 lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, in lpfc_nvme_unsol_ls_issue_abort() argument
3544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvme_unsol_ls_issue_abort()
3551 if (phba->nvmet_support && phba->targetport) in lpfc_nvme_unsol_ls_issue_abort()
3552 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvme_unsol_ls_issue_abort()
3556 ctxp->wqeq = lpfc_sli_get_iocbq(phba); in lpfc_nvme_unsol_ls_issue_abort()
3558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvme_unsol_ls_issue_abort()
3568 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { in lpfc_nvme_unsol_ls_issue_abort()
3573 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_nvme_unsol_ls_issue_abort()
3577 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); in lpfc_nvme_unsol_ls_issue_abort()
3578 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_nvme_unsol_ls_issue_abort()
3589 lpfc_sli_release_iocbq(phba, abts_wqeq); in lpfc_nvme_unsol_ls_issue_abort()
3590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_nvme_unsol_ls_issue_abort()
3605 lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) in lpfc_nvmet_invalidate_host() argument
3609 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS, in lpfc_nvmet_invalidate_host()
3613 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; in lpfc_nvmet_invalidate_host()
3618 nvmet_fc_invalidate_host(phba->targetport, ndlp); in lpfc_nvmet_invalidate_host()