Lines Matching refs:tgtport
34 struct nvmet_fc_tgtport *tgtport; member
51 struct nvmet_fc_tgtport *tgtport; member
90 struct nvmet_fc_tgtport *tgtport; member
117 struct nvmet_fc_tgtport *tgtport; member
152 struct nvmet_fc_tgtport *tgtport; member
163 struct nvmet_fc_tgtport *tgtport; member
176 return (iodptr - iodptr->tgtport->iod); in nvmet_fc_iodnum()
248 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
249 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
250 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
253 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
352 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; in __nvmet_fc_finish_ls_req() local
356 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
359 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
367 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
369 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_finish_ls_req()
373 nvmet_fc_tgtport_put(tgtport); in __nvmet_fc_finish_ls_req()
377 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, in __nvmet_fc_send_ls_req() argument
385 if (!tgtport->ops->ls_req) in __nvmet_fc_send_ls_req()
388 if (!nvmet_fc_tgtport_get(tgtport)) in __nvmet_fc_send_ls_req()
395 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, in __nvmet_fc_send_ls_req()
398 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { in __nvmet_fc_send_ls_req()
404 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
406 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); in __nvmet_fc_send_ls_req()
410 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
412 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, in __nvmet_fc_send_ls_req()
421 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
424 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
425 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_send_ls_req()
429 nvmet_fc_tgtport_put(tgtport); in __nvmet_fc_send_ls_req()
435 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_send_ls_req_async() argument
441 return __nvmet_fc_send_ls_req(tgtport, lsop, done); in nvmet_fc_send_ls_req_async()
477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc() local
489 if (!tgtport->ops->ls_req || !assoc->hostport || in nvmet_fc_xmt_disconnect_assoc()
495 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvmet_fc_xmt_disconnect_assoc()
497 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
499 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
506 if (tgtport->ops->lsrqst_priv_sz) in nvmet_fc_xmt_disconnect_assoc()
511 lsop->tgtport = tgtport; in nvmet_fc_xmt_disconnect_assoc()
517 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, in nvmet_fc_xmt_disconnect_assoc()
520 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
522 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
532 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_alloc_ls_iodlist() argument
542 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
546 iod->tgtport = tgtport; in nvmet_fc_alloc_ls_iodlist()
547 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
557 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, in nvmet_fc_alloc_ls_iodlist()
560 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) in nvmet_fc_alloc_ls_iodlist()
570 fc_dma_unmap_single(tgtport->dev, iod->rspdma, in nvmet_fc_alloc_ls_iodlist()
582 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_free_ls_iodlist() argument
584 struct nvmet_fc_ls_iod *iod = tgtport->iod; in nvmet_fc_free_ls_iodlist()
588 fc_dma_unmap_single(tgtport->dev, in nvmet_fc_free_ls_iodlist()
594 kfree(tgtport->iod); in nvmet_fc_free_ls_iodlist()
598 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_alloc_ls_iod() argument
603 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
604 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, in nvmet_fc_alloc_ls_iod()
607 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); in nvmet_fc_alloc_ls_iod()
608 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
614 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_free_ls_iod() argument
619 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
620 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_free_ls_iod()
621 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
625 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_prep_fcp_iodlist() argument
633 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
647 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
660 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_destroy_fcp_iodlist() argument
668 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
696 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_queue_fcp_req() argument
707 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; in nvmet_fc_queue_fcp_req()
709 nvmet_fc_handle_fcp_rqst(tgtport, fod); in nvmet_fc_queue_fcp_req()
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
728 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod() local
732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
743 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
778 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
807 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
832 spin_lock_irqsave(&assoc->tgtport->lock, flags); in nvmet_fc_alloc_target_queue()
834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags); in nvmet_fc_alloc_target_queue()
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); in nvmet_fc_tgt_queue_free()
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); in nvmet_fc_tgt_queue_free()
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue() local
912 tgtport->ops->fcp_abort( in nvmet_fc_delete_target_queue()
913 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
935 tgtport->ops->defer_rcv(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
938 tgtport->ops->fcp_abort(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
941 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
961 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_find_target_queue() argument
973 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_find_target_queue()
974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
981 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_find_target_queue()
985 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_find_target_queue()
994 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; in nvmet_fc_hostport_free() local
997 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_hostport_free()
999 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_hostport_free()
1000 if (tgtport->ops->host_release && hostport->invalid) in nvmet_fc_hostport_free()
1001 tgtport->ops->host_release(hostport->hosthandle); in nvmet_fc_hostport_free()
1003 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_hostport_free()
1029 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) in nvmet_fc_alloc_hostport() argument
1039 if (!nvmet_fc_tgtport_get(tgtport)) in nvmet_fc_alloc_hostport()
1044 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1045 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_alloc_hostport()
1053 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1055 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_hostport()
1059 newhost->tgtport = tgtport; in nvmet_fc_alloc_hostport()
1064 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1065 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_alloc_hostport()
1077 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_hostport()
1079 list_add_tail(&newhost->host_list, &tgtport->host_list); in nvmet_fc_alloc_hostport()
1080 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1096 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) in nvmet_fc_alloc_target_assoc() argument
1108 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1112 if (!nvmet_fc_tgtport_get(tgtport)) in nvmet_fc_alloc_target_assoc()
1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1119 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1130 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1132 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { in nvmet_fc_alloc_target_assoc()
1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1142 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1148 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_target_assoc()
1150 ida_simple_remove(&tgtport->assoc_cnt, idx); in nvmet_fc_alloc_target_assoc()
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free() local
1169 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1172 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1175 nvmet_fc_xmt_ls_rsp(tgtport, oldls); in nvmet_fc_target_assoc_free()
1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1177 dev_info(tgtport->dev, in nvmet_fc_target_assoc_free()
1179 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1181 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_target_assoc_free()
1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc() local
1210 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1216 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1219 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1222 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1224 dev_info(tgtport->dev, in nvmet_fc_delete_target_assoc()
1226 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1232 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_find_target_assoc() argument
1239 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_find_target_assoc()
1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1248 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_find_target_assoc()
1254 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_portentry_bind() argument
1260 pe->tgtport = tgtport; in nvmet_fc_portentry_bind()
1261 tgtport->pe = pe; in nvmet_fc_portentry_bind()
1266 pe->node_name = tgtport->fc_target_port.node_name; in nvmet_fc_portentry_bind()
1267 pe->port_name = tgtport->fc_target_port.port_name; in nvmet_fc_portentry_bind()
1279 if (pe->tgtport) in nvmet_fc_portentry_unbind()
1280 pe->tgtport->pe = NULL; in nvmet_fc_portentry_unbind()
1291 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_portentry_unbind_tgt() argument
1297 pe = tgtport->pe; in nvmet_fc_portentry_unbind_tgt()
1299 pe->tgtport = NULL; in nvmet_fc_portentry_unbind_tgt()
1300 tgtport->pe = NULL; in nvmet_fc_portentry_unbind_tgt()
1313 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_portentry_rebind_tgt() argument
1320 if (tgtport->fc_target_port.node_name == pe->node_name && in nvmet_fc_portentry_rebind_tgt()
1321 tgtport->fc_target_port.port_name == pe->port_name) { in nvmet_fc_portentry_rebind_tgt()
1322 WARN_ON(pe->tgtport); in nvmet_fc_portentry_rebind_tgt()
1323 tgtport->pe = pe; in nvmet_fc_portentry_rebind_tgt()
1324 pe->tgtport = tgtport; in nvmet_fc_portentry_rebind_tgt()
1437 struct nvmet_fc_tgtport *tgtport = in nvmet_fc_free_tgtport() local
1439 struct device *dev = tgtport->dev; in nvmet_fc_free_tgtport()
1443 list_del(&tgtport->tgt_list); in nvmet_fc_free_tgtport()
1446 nvmet_fc_free_ls_iodlist(tgtport); in nvmet_fc_free_tgtport()
1449 tgtport->ops->targetport_delete(&tgtport->fc_target_port); in nvmet_fc_free_tgtport()
1452 tgtport->fc_target_port.port_num); in nvmet_fc_free_tgtport()
1454 ida_destroy(&tgtport->assoc_cnt); in nvmet_fc_free_tgtport()
1456 kfree(tgtport); in nvmet_fc_free_tgtport()
1462 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_tgtport_put() argument
1464 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); in nvmet_fc_tgtport_put()
1468 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_tgtport_get() argument
1470 return kref_get_unless_zero(&tgtport->ref); in nvmet_fc_tgtport_get()
1474 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) in __nvmet_fc_free_assocs() argument
1479 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_free_assocs()
1481 &tgtport->assoc_list, a_list) { in __nvmet_fc_free_assocs()
1488 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_free_assocs()
1524 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_invalidate_host() local
1529 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1531 &tgtport->assoc_list, a_list) { in nvmet_fc_invalidate_host()
1543 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1546 if (noassoc && tgtport->ops->host_release) in nvmet_fc_invalidate_host()
1547 tgtport->ops->host_release(hosthandle); in nvmet_fc_invalidate_host()
1557 struct nvmet_fc_tgtport *tgtport, *next; in nvmet_fc_delete_ctrl() local
1565 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, in nvmet_fc_delete_ctrl()
1567 if (!nvmet_fc_tgtport_get(tgtport)) in nvmet_fc_delete_ctrl()
1571 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_ctrl()
1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1580 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_ctrl()
1582 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_delete_ctrl()
1610 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_unregister_targetport() local
1612 nvmet_fc_portentry_unbind_tgt(tgtport); in nvmet_fc_unregister_targetport()
1615 __nvmet_fc_free_assocs(tgtport); in nvmet_fc_unregister_targetport()
1624 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_unregister_targetport()
1635 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_ls_create_association() argument
1672 tgtport, iod->hosthandle); in nvmet_fc_ls_create_association()
1684 dev_err(tgtport->dev, in nvmet_fc_ls_create_association()
1698 dev_info(tgtport->dev, in nvmet_fc_ls_create_association()
1700 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1724 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_ls_create_connection() argument
1760 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1777 dev_err(tgtport->dev, in nvmet_fc_ls_create_connection()
1814 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_ls_disconnect() argument
1831 assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_disconnect()
1839 dev_err(tgtport->dev, in nvmet_fc_ls_disconnect()
1872 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1875 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1880 dev_info(tgtport->dev, in nvmet_fc_ls_disconnect()
1883 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1891 nvmet_fc_xmt_ls_rsp(tgtport, oldls); in nvmet_fc_ls_disconnect()
1909 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_xmt_ls_rsp_done() local
1911 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp_done()
1913 nvmet_fc_free_ls_iod(tgtport, iod); in nvmet_fc_xmt_ls_rsp_done()
1914 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_xmt_ls_rsp_done()
1918 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_xmt_ls_rsp() argument
1923 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp()
1926 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1935 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_handle_ls_rqst() argument
1958 nvmet_fc_ls_create_association(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1962 nvmet_fc_ls_create_connection(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1966 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1975 nvmet_fc_xmt_ls_rsp(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1986 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_handle_ls_rqst_work() local
1988 nvmet_fc_handle_ls_rqst(tgtport, iod); in nvmet_fc_handle_ls_rqst_work()
2015 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_rcv_ls_req() local
2020 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2028 if (!nvmet_fc_tgtport_get(tgtport)) { in nvmet_fc_rcv_ls_req()
2029 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2036 iod = nvmet_fc_alloc_ls_iod(tgtport); in nvmet_fc_rcv_ls_req()
2038 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2042 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_rcv_ls_req()
2077 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2095 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2121 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_prep_fcp_rsp() argument
2179 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2186 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_abort_op() argument
2200 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); in nvmet_fc_abort_op()
2206 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_xmt_fcp_rsp() argument
2214 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2216 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2218 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2222 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_transfer_fcp_data() argument
2248 fcpreq->sg_cnt < tgtport->max_sg_cnt && in nvmet_fc_transfer_fcp_data()
2275 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { in nvmet_fc_transfer_fcp_data()
2277 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_transfer_fcp_data()
2280 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2306 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort() local
2315 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fod_op_abort()
2329 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done() local
2360 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2375 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_fod_op_done()
2391 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2401 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in nvmet_fc_fod_op_done()
2428 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, in __nvmet_fc_fcp_nvme_cmd_done() argument
2445 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2466 nvmet_fc_transfer_fcp_data(tgtport, fod, in __nvmet_fc_fcp_nvme_cmd_done()
2477 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2485 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done() local
2487 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); in nvmet_fc_fcp_nvme_cmd_done()
2495 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_handle_fcp_rqst() argument
2529 if (tgtport->pe) in nvmet_fc_handle_fcp_rqst()
2530 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2566 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); in nvmet_fc_handle_fcp_rqst()
2580 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_handle_fcp_rqst()
2635 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_rcv_fcp_req() local
2649 queue = nvmet_fc_find_target_queue(tgtport, in nvmet_fc_rcv_fcp_req()
2672 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); in nvmet_fc_rcv_fcp_req()
2677 if (!tgtport->ops->defer_rcv) { in nvmet_fc_rcv_fcp_req()
2840 struct nvmet_fc_tgtport *tgtport; in nvmet_fc_add_port() local
2864 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { in nvmet_fc_add_port()
2865 if ((tgtport->fc_target_port.node_name == traddr.nn) && in nvmet_fc_add_port()
2866 (tgtport->fc_target_port.port_name == traddr.pn)) { in nvmet_fc_add_port()
2868 if (!tgtport->pe) { in nvmet_fc_add_port()
2869 nvmet_fc_portentry_bind(tgtport, pe, port); in nvmet_fc_add_port()
2898 struct nvmet_fc_tgtport *tgtport = pe->tgtport; in nvmet_fc_discovery_chg() local
2900 if (tgtport && tgtport->ops->discovery_event) in nvmet_fc_discovery_chg()
2901 tgtport->ops->discovery_event(&tgtport->fc_target_port); in nvmet_fc_discovery_chg()