Lines Matching refs:iod

425 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);  in nvme_init_request()  local
430 iod->nvmeq = nvmeq; in nvme_init_request()
526 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_iod_list() local
527 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); in nvme_pci_iod_list()
532 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_use_sgls() local
540 if (!iod->nvmeq->qid) in nvme_pci_use_sgls()
550 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_prps() local
551 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
554 for (i = 0; i < iod->npages; i++) { in nvme_free_prps()
567 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_sgls() local
568 dma_addr_t dma_addr = iod->first_dma; in nvme_free_sgls()
571 for (i = 0; i < iod->npages; i++) { in nvme_free_sgls()
583 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_unmap_sg() local
585 if (is_pci_p2pdma_page(sg_page(iod->sg))) in nvme_unmap_sg()
586 pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, in nvme_unmap_sg()
589 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); in nvme_unmap_sg()
594 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_unmap_data() local
596 if (iod->dma_len) { in nvme_unmap_data()
597 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
602 WARN_ON_ONCE(!iod->nents); in nvme_unmap_data()
605 if (iod->npages == 0) in nvme_unmap_data()
607 iod->first_dma); in nvme_unmap_data()
608 else if (iod->use_sgl) in nvme_unmap_data()
612 mempool_free(iod->sg, dev->iod_mempool); in nvme_unmap_data()
632 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_setup_prps() local
635 struct scatterlist *sg = iod->sg; in nvme_pci_setup_prps()
646 iod->first_dma = 0; in nvme_pci_setup_prps()
660 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
667 iod->npages = 0; in nvme_pci_setup_prps()
670 iod->npages = 1; in nvme_pci_setup_prps()
675 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
676 iod->npages = -1; in nvme_pci_setup_prps()
680 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
688 list[iod->npages++] = prp_list; in nvme_pci_setup_prps()
708 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_pci_setup_prps()
709 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
715 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), in nvme_pci_setup_prps()
717 blk_rq_payload_bytes(req), iod->nents); in nvme_pci_setup_prps()
745 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_setup_sgls() local
748 struct scatterlist *sg = iod->sg; in nvme_pci_setup_sgls()
762 iod->npages = 0; in nvme_pci_setup_sgls()
765 iod->npages = 1; in nvme_pci_setup_sgls()
770 iod->npages = -1; in nvme_pci_setup_sgls()
775 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
789 nvme_pci_iod_list(req)[iod->npages++] = sg_list; in nvme_pci_setup_sgls()
808 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_setup_prp_simple() local
812 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
813 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
815 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
817 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
819 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
829 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_setup_sgl_simple() local
831 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
832 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
834 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
837 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
838 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
846 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_map_data() local
858 if (iod->nvmeq->qid && sgl_threshold && in nvme_map_data()
865 iod->dma_len = 0; in nvme_map_data()
866 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
867 if (!iod->sg) in nvme_map_data()
869 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); in nvme_map_data()
870 iod->nents = blk_rq_map_sg(req->q, req, iod->sg); in nvme_map_data()
871 if (!iod->nents) in nvme_map_data()
874 if (is_pci_p2pdma_page(sg_page(iod->sg))) in nvme_map_data()
875 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, in nvme_map_data()
876 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); in nvme_map_data()
878 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, in nvme_map_data()
883 iod->use_sgl = nvme_pci_use_sgls(dev, req); in nvme_map_data()
884 if (iod->use_sgl) in nvme_map_data()
895 mempool_free(iod->sg, dev->iod_mempool); in nvme_map_data()
902 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_map_metadata() local
904 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), in nvme_map_metadata()
906 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
908 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_map_metadata()
922 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_queue_rq() local
923 struct nvme_command *cmnd = &iod->cmd; in nvme_queue_rq()
926 iod->aborted = 0; in nvme_queue_rq()
927 iod->npages = -1; in nvme_queue_rq()
928 iod->nents = 0; in nvme_queue_rq()
965 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_complete_rq() local
966 struct nvme_dev *dev = iod->nvmeq->dev; in nvme_pci_complete_rq()
969 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_complete_rq()
1208 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in abort_endio() local
1209 struct nvme_queue *nvmeq = iod->nvmeq; in abort_endio()
1262 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_timeout() local
1263 struct nvme_queue *nvmeq = iod->nvmeq; in nvme_timeout()
1329 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1344 iod->aborted = 1; in nvme_timeout()