Lines Matching refs:fod

148 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */  member
182 return (fodptr - fodptr->queue->fod); in nvmet_fc_fodnum()
251 struct nvmet_fc_fcp_iod *fod);
628 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist() local
631 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
632 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
633 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
634 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
635 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
636 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
637 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
638 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
639 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
640 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
643 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
645 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
646 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
647 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
648 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
650 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
651 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
663 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist() local
666 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
667 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
668 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
669 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
676 struct nvmet_fc_fcp_iod *fod; in nvmet_fc_alloc_fcp_iod() local
680 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
682 if (fod) { in nvmet_fc_alloc_fcp_iod()
683 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
684 fod->active = true; in nvmet_fc_alloc_fcp_iod()
691 return fod; in nvmet_fc_alloc_fcp_iod()
700 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req() local
709 nvmet_fc_handle_fcp_rqst(tgtport, fod); in nvmet_fc_queue_fcp_req()
715 struct nvmet_fc_fcp_iod *fod = in nvmet_fc_fcp_rqst_op_defer_work() local
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
725 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_free_fcp_iod() argument
727 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
728 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
733 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
737 fod->active = false; in nvmet_fc_free_fcp_iod()
738 fod->abort = false; in nvmet_fc_free_fcp_iod()
739 fod->aborted = false; in nvmet_fc_free_fcp_iod()
740 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
741 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
752 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
768 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
773 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
774 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
775 fod->active = true; in nvmet_fc_free_fcp_iod()
785 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue()
886 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue() local
900 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
901 if (fod->active) { in nvmet_fc_delete_target_queue()
902 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
903 fod->abort = true; in nvmet_fc_delete_target_queue()
909 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
910 fod->aborted = true; in nvmet_fc_delete_target_queue()
911 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
913 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
915 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
2066 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_alloc_tgt_pgs() argument
2071 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2075 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2076 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2077 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2078 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2081 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2090 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_free_tgt_pgs() argument
2092 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2095 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2096 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2098 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2099 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2100 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2122 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_prep_fcp_rsp() argument
2124 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2125 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2131 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2132 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2134 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2155 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2156 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2158 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2161 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2165 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2166 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2170 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2173 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2176 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2179 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2180 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2187 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_abort_op() argument
2189 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2192 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_abort_op()
2199 if (!fod->aborted) in nvmet_fc_abort_op()
2202 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2207 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_xmt_fcp_rsp() argument
2211 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2212 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2214 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2216 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2218 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2223 struct nvmet_fc_fcp_iod *fod, u8 op) in nvmet_fc_transfer_fcp_data() argument
2225 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2226 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2228 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2233 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2260 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2262 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2274 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2277 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_transfer_fcp_data()
2280 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2287 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2290 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2291 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2292 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2293 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2297 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2303 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) in __nvmet_fc_fod_op_abort() argument
2305 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2306 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2311 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2315 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fod_op_abort()
2326 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_fod_op_done() argument
2328 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2329 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2333 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2334 abort = fod->abort; in nvmet_fc_fod_op_done()
2335 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2336 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2341 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2345 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2346 fod->abort = true; in nvmet_fc_fod_op_done()
2347 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2349 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2353 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2354 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2355 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2356 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2357 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2360 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2366 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2371 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2375 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_fod_op_done()
2383 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_fod_op_done()
2384 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2388 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2389 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2391 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2399 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_fod_op_done()
2401 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in nvmet_fc_fod_op_done()
2406 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2408 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2419 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done() local
2421 nvmet_fc_fod_op_done(fod); in nvmet_fc_xmt_fcp_op_done()
2429 struct nvmet_fc_fcp_iod *fod, int status) in __nvmet_fc_fcp_nvme_cmd_done() argument
2431 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2432 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done()
2436 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2437 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2438 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2442 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2445 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2453 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2454 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2464 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2466 nvmet_fc_transfer_fcp_data(tgtport, fod, in __nvmet_fc_fcp_nvme_cmd_done()
2475 nvmet_fc_free_tgt_pgs(fod); in __nvmet_fc_fcp_nvme_cmd_done()
2477 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2484 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); in nvmet_fc_fcp_nvme_cmd_done() local
2485 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2487 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); in nvmet_fc_fcp_nvme_cmd_done()
2496 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_handle_fcp_rqst() argument
2498 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2511 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2514 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2518 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2522 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2527 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2528 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2530 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2533 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2535 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2536 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2538 ret = nvmet_req_init(&fod->req, in nvmet_fc_handle_fcp_rqst()
2539 &fod->queue->nvme_cq, in nvmet_fc_handle_fcp_rqst()
2540 &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2548 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2551 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2553 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2554 ret = nvmet_fc_alloc_tgt_pgs(fod); in nvmet_fc_handle_fcp_rqst()
2556 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2560 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2561 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2562 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2564 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2566 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); in nvmet_fc_handle_fcp_rqst()
2576 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2580 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_handle_fcp_rqst()
2638 struct nvmet_fc_fcp_iod *fod; in nvmet_fc_rcv_fcp_req() local
2663 fod = nvmet_fc_alloc_fcp_iod(queue); in nvmet_fc_rcv_fcp_req()
2664 if (fod) { in nvmet_fc_rcv_fcp_req()
2667 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2668 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2670 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2745 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort() local
2749 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2753 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2756 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2762 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2763 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2764 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2765 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()