Lines Matching refs:vq
180 struct vhost_virtqueue vq; member
261 struct vhost_virtqueue *vq; in vhost_scsi_init_inflight() local
265 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
267 mutex_lock(&vq->mutex); in vhost_scsi_init_inflight()
280 mutex_unlock(&vq->mutex); in vhost_scsi_init_inflight()
285 vhost_scsi_get_inflight(struct vhost_virtqueue *vq) in vhost_scsi_get_inflight() argument
290 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); in vhost_scsi_get_inflight()
346 struct vhost_scsi_virtqueue, vq); in vhost_scsi_release_cmd_res()
449 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt() local
459 vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); in vhost_scsi_allocate_evt()
464 evt->event.event = cpu_to_vhost32(vq, event); in vhost_scsi_allocate_evt()
465 evt->event.reason = cpu_to_vhost32(vq, reason); in vhost_scsi_allocate_evt()
479 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work() local
485 if (!vhost_vq_get_backend(vq)) { in vhost_scsi_do_evt_work()
491 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_do_evt_work()
492 head = vhost_get_vq_desc(vq, vq->iov, in vhost_scsi_do_evt_work()
493 ARRAY_SIZE(vq->iov), &out, &in, in vhost_scsi_do_evt_work()
499 if (head == vq->num) { in vhost_scsi_do_evt_work()
500 if (vhost_enable_notify(&vs->dev, vq)) in vhost_scsi_do_evt_work()
506 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { in vhost_scsi_do_evt_work()
507 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", in vhost_scsi_do_evt_work()
508 vq->iov[out].iov_len); in vhost_scsi_do_evt_work()
514 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); in vhost_scsi_do_evt_work()
518 eventp = vq->iov[out].iov_base; in vhost_scsi_do_evt_work()
521 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_do_evt_work()
523 vq_err(vq, "Faulted on vhost_scsi_send_event\n"); in vhost_scsi_do_evt_work()
530 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work() local
534 mutex_lock(&vq->mutex); in vhost_scsi_evt_work()
540 mutex_unlock(&vq->mutex); in vhost_scsi_evt_work()
558 int ret, vq; in vhost_scsi_complete_cmd_work() local
583 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); in vhost_scsi_complete_cmd_work()
584 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
585 __set_bit(vq, signal); in vhost_scsi_complete_cmd_work()
592 vq = -1; in vhost_scsi_complete_cmd_work()
593 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) in vhost_scsi_complete_cmd_work()
595 vhost_signal(&vs->dev, &vs->vqs[vq].vq); in vhost_scsi_complete_cmd_work()
599 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, in vhost_scsi_get_cmd() argument
603 struct vhost_scsi_virtqueue *svq = container_of(vq, in vhost_scsi_get_cmd()
604 struct vhost_scsi_virtqueue, vq); in vhost_scsi_get_cmd()
638 cmd->inflight = vhost_scsi_get_inflight(vq); in vhost_scsi_get_cmd()
824 struct vhost_virtqueue *vq, in vhost_scsi_send_bad_target() argument
833 resp = vq->iov[out].iov_base; in vhost_scsi_send_bad_target()
836 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_send_bad_target()
842 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, in vhost_scsi_get_desc() argument
847 vc->head = vhost_get_vq_desc(vq, vq->iov, in vhost_scsi_get_desc()
848 ARRAY_SIZE(vq->iov), &vc->out, &vc->in, in vhost_scsi_get_desc()
859 if (vc->head == vq->num) { in vhost_scsi_get_desc()
860 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { in vhost_scsi_get_desc()
861 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_get_desc()
871 vc->out_size = iov_length(vq->iov, vc->out); in vhost_scsi_get_desc()
872 vc->in_size = iov_length(&vq->iov[vc->out], vc->in); in vhost_scsi_get_desc()
884 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size); in vhost_scsi_get_desc()
892 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) in vhost_scsi_chk_size() argument
895 vq_err(vq, in vhost_scsi_chk_size()
900 vq_err(vq, in vhost_scsi_chk_size()
910 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, in vhost_scsi_get_req() argument
917 vq_err(vq, "Faulted on copy_from_iter_full\n"); in vhost_scsi_get_req()
920 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp); in vhost_scsi_get_req()
924 vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */ in vhost_scsi_get_req()
928 vq_err(vq, "Target 0x%x does not exist\n", *vc->target); in vhost_scsi_get_req()
945 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_handle_vq() argument
958 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); in vhost_scsi_handle_vq()
961 mutex_lock(&vq->mutex); in vhost_scsi_handle_vq()
966 vs_tpg = vhost_vq_get_backend(vq); in vhost_scsi_handle_vq()
973 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
976 ret = vhost_scsi_get_desc(vs, vq, &vc); in vhost_scsi_handle_vq()
1001 ret = vhost_scsi_chk_size(vq, &vc); in vhost_scsi_handle_vq()
1005 ret = vhost_scsi_get_req(vq, &vc, &tpg); in vhost_scsi_handle_vq()
1036 iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in, in vhost_scsi_handle_vq()
1052 vq_err(vq, "Received non zero pi_bytesout," in vhost_scsi_handle_vq()
1056 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); in vhost_scsi_handle_vq()
1059 vq_err(vq, "Received non zero pi_bytesin," in vhost_scsi_handle_vq()
1063 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); in vhost_scsi_handle_vq()
1079 tag = vhost64_to_cpu(vq, v_req_pi.tag); in vhost_scsi_handle_vq()
1084 tag = vhost64_to_cpu(vq, v_req.tag); in vhost_scsi_handle_vq()
1097 vq_err(vq, "Received SCSI CDB with command_size: %d that" in vhost_scsi_handle_vq()
1102 cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr, in vhost_scsi_handle_vq()
1106 vq_err(vq, "vhost_scsi_get_cmd failed %ld\n", in vhost_scsi_handle_vq()
1111 cmd->tvc_vq = vq; in vhost_scsi_handle_vq()
1112 cmd->tvc_resp_iov = vq->iov[vc.out]; in vhost_scsi_handle_vq()
1124 vq_err(vq, "Failed to map iov to sgl\n"); in vhost_scsi_handle_vq()
1154 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); in vhost_scsi_handle_vq()
1155 } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); in vhost_scsi_handle_vq()
1157 mutex_unlock(&vq->mutex); in vhost_scsi_handle_vq()
1161 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, in vhost_scsi_send_tmf_resp() argument
1177 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0); in vhost_scsi_send_tmf_resp()
1193 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, in vhost_scsi_tmf_resp_work()
1200 struct vhost_virtqueue *vq, in vhost_scsi_handle_tmf() argument
1204 struct vhost_scsi_virtqueue *svq = container_of(vq, in vhost_scsi_handle_tmf()
1205 struct vhost_scsi_virtqueue, vq); in vhost_scsi_handle_tmf()
1208 if (vhost32_to_cpu(vq, vtmf->subtype) != in vhost_scsi_handle_tmf()
1232 tmf->resp_iov = vq->iov[vc->out]; in vhost_scsi_handle_tmf()
1235 tmf->inflight = vhost_scsi_get_inflight(vq); in vhost_scsi_handle_tmf()
1248 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out], in vhost_scsi_handle_tmf()
1254 struct vhost_virtqueue *vq, in vhost_scsi_send_an_resp() argument
1265 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); in vhost_scsi_send_an_resp()
1269 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); in vhost_scsi_send_an_resp()
1275 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_ctl_handle_vq() argument
1287 mutex_lock(&vq->mutex); in vhost_scsi_ctl_handle_vq()
1292 if (!vhost_vq_get_backend(vq)) in vhost_scsi_ctl_handle_vq()
1297 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_ctl_handle_vq()
1300 ret = vhost_scsi_get_desc(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1313 vq_err(vq, "Faulted on copy_from_iter tmf type\n"); in vhost_scsi_ctl_handle_vq()
1323 switch (vhost32_to_cpu(vq, v_req.type)) { in vhost_scsi_ctl_handle_vq()
1340 vq_err(vq, "Unknown control request %d", v_req.type); in vhost_scsi_ctl_handle_vq()
1349 ret = vhost_scsi_chk_size(vq, &vc); in vhost_scsi_ctl_handle_vq()
1359 ret = vhost_scsi_get_req(vq, &vc, &tpg); in vhost_scsi_ctl_handle_vq()
1364 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc); in vhost_scsi_ctl_handle_vq()
1366 vhost_scsi_send_an_resp(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1377 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); in vhost_scsi_ctl_handle_vq()
1378 } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); in vhost_scsi_ctl_handle_vq()
1380 mutex_unlock(&vq->mutex); in vhost_scsi_ctl_handle_vq()
1385 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in vhost_scsi_ctl_handle_kick() local
1387 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_ctl_handle_kick()
1390 vhost_scsi_ctl_handle_vq(vs, vq); in vhost_scsi_ctl_handle_kick()
1425 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in vhost_scsi_evt_handle_kick() local
1427 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_evt_handle_kick()
1429 mutex_lock(&vq->mutex); in vhost_scsi_evt_handle_kick()
1430 if (!vhost_vq_get_backend(vq)) in vhost_scsi_evt_handle_kick()
1436 mutex_unlock(&vq->mutex); in vhost_scsi_evt_handle_kick()
1441 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in vhost_scsi_handle_kick() local
1443 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_handle_kick()
1445 vhost_scsi_handle_vq(vs, vq); in vhost_scsi_handle_kick()
1450 vhost_poll_flush(&vs->vqs[index].vq.poll); in vhost_scsi_flush_vq()
1481 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) in vhost_scsi_destroy_vq_cmds() argument
1483 struct vhost_scsi_virtqueue *svq = container_of(vq, in vhost_scsi_destroy_vq_cmds()
1484 struct vhost_scsi_virtqueue, vq); in vhost_scsi_destroy_vq_cmds()
1504 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) in vhost_scsi_setup_vq_cmds() argument
1506 struct vhost_scsi_virtqueue *svq = container_of(vq, in vhost_scsi_setup_vq_cmds()
1507 struct vhost_scsi_virtqueue, vq); in vhost_scsi_setup_vq_cmds()
1554 vhost_scsi_destroy_vq_cmds(vq); in vhost_scsi_setup_vq_cmds()
1573 struct vhost_virtqueue *vq; in vhost_scsi_set_endpoint() local
1583 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_set_endpoint()
1642 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1643 if (!vhost_vq_is_setup(vq)) in vhost_scsi_set_endpoint()
1646 ret = vhost_scsi_setup_vq_cmds(vq, vq->num); in vhost_scsi_set_endpoint()
1652 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1653 mutex_lock(&vq->mutex); in vhost_scsi_set_endpoint()
1654 vhost_vq_set_backend(vq, vs_tpg); in vhost_scsi_set_endpoint()
1655 vhost_vq_init_access(vq); in vhost_scsi_set_endpoint()
1656 mutex_unlock(&vq->mutex); in vhost_scsi_set_endpoint()
1674 if (!vhost_vq_get_backend(&vs->vqs[i].vq)) in vhost_scsi_set_endpoint()
1675 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq); in vhost_scsi_set_endpoint()
1699 struct vhost_virtqueue *vq; in vhost_scsi_clear_endpoint() local
1708 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_clear_endpoint()
1754 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1755 mutex_lock(&vq->mutex); in vhost_scsi_clear_endpoint()
1756 vhost_vq_set_backend(vq, NULL); in vhost_scsi_clear_endpoint()
1757 mutex_unlock(&vq->mutex); in vhost_scsi_clear_endpoint()
1763 vhost_scsi_destroy_vq_cmds(vq); in vhost_scsi_clear_endpoint()
1788 struct vhost_virtqueue *vq; in vhost_scsi_set_features() local
1802 vq = &vs->vqs[i].vq; in vhost_scsi_set_features()
1803 mutex_lock(&vq->mutex); in vhost_scsi_set_features()
1804 vq->acked_features = features; in vhost_scsi_set_features()
1805 mutex_unlock(&vq->mutex); in vhost_scsi_set_features()
1834 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; in vhost_scsi_open()
1835 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_open()
1836 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; in vhost_scsi_open()
1837 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; in vhost_scsi_open()
1839 vqs[i] = &vs->vqs[i].vq; in vhost_scsi_open()
1840 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; in vhost_scsi_open()
1887 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_ioctl() local
1911 mutex_lock(&vq->mutex); in vhost_scsi_ioctl()
1913 mutex_unlock(&vq->mutex); in vhost_scsi_ioctl()
1916 mutex_lock(&vq->mutex); in vhost_scsi_ioctl()
1918 mutex_unlock(&vq->mutex); in vhost_scsi_ioctl()
1989 struct vhost_virtqueue *vq; in vhost_scsi_do_plug() local
2002 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_plug()
2003 mutex_lock(&vq->mutex); in vhost_scsi_do_plug()
2004 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) in vhost_scsi_do_plug()
2007 mutex_unlock(&vq->mutex); in vhost_scsi_do_plug()