Lines Matching refs:vq

20 		dev_err(&(_vq)->vq.vdev->dev,			\
21 "%s:"fmt, (_vq)->vq.name, ##args); \
29 (_vq)->vq.name, (_vq)->in_use); \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
61 #define START_USE(vq) argument
62 #define END_USE(vq) argument
63 #define LAST_ADD_TIME_UPDATE(vq) argument
64 #define LAST_ADD_TIME_CHECK(vq) argument
65 #define LAST_ADD_TIME_INVALID(vq) argument
88 struct virtqueue vq; member
181 bool (*notify)(struct virtqueue *vq);
201 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
206 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_use_indirect() local
212 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
319 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) in vring_dma_dev() argument
321 return vq->vq.vdev->dev.parent; in vring_dma_dev()
325 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, in vring_map_one_sg() argument
329 if (!vq->use_dma_api) in vring_map_one_sg()
337 return dma_map_page(vring_dma_dev(vq), in vring_map_one_sg()
342 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, in vring_map_single() argument
346 if (!vq->use_dma_api) in vring_map_single()
349 return dma_map_single(vring_dma_dev(vq), in vring_map_single()
353 static int vring_mapping_error(const struct vring_virtqueue *vq, in vring_mapping_error() argument
356 if (!vq->use_dma_api) in vring_mapping_error()
359 return dma_mapping_error(vring_dma_dev(vq), addr); in vring_mapping_error()
367 static void vring_unmap_one_split(const struct vring_virtqueue *vq, in vring_unmap_one_split() argument
372 if (!vq->use_dma_api) in vring_unmap_one_split()
375 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); in vring_unmap_one_split()
378 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split()
379 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split()
380 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split()
384 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split()
385 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split()
386 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split()
424 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split() local
431 START_USE(vq); in virtqueue_add_split()
434 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
436 if (unlikely(vq->broken)) { in virtqueue_add_split()
437 END_USE(vq); in virtqueue_add_split()
441 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_split()
445 head = vq->free_head; in virtqueue_add_split()
451 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
462 desc = vq->split.vring.desc; in virtqueue_add_split()
467 if (vq->vq.num_free < descs_used) { in virtqueue_add_split()
469 descs_used, vq->vq.num_free); in virtqueue_add_split()
474 vq->notify(&vq->vq); in virtqueue_add_split()
477 END_USE(vq); in virtqueue_add_split()
483 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); in virtqueue_add_split()
484 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
496 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); in virtqueue_add_split()
497 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
513 vq, desc, total_sg * sizeof(struct vring_desc), in virtqueue_add_split()
515 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, in virtqueue_add_split()
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, in virtqueue_add_split()
528 vq->vq.num_free -= descs_used; in virtqueue_add_split()
532 vq->free_head = virtio16_to_cpu(_vq->vdev, in virtqueue_add_split()
533 vq->split.vring.desc[head].next); in virtqueue_add_split()
535 vq->free_head = i; in virtqueue_add_split()
538 vq->split.desc_state[head].data = data; in virtqueue_add_split()
540 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
542 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
546 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
551 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
552 vq->split.avail_idx_shadow++; in virtqueue_add_split()
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
554 vq->split.avail_idx_shadow); in virtqueue_add_split()
555 vq->num_added++; in virtqueue_add_split()
557 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_split()
558 END_USE(vq); in virtqueue_add_split()
562 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
578 vring_unmap_one_split(vq, &desc[i]); in virtqueue_add_split()
585 END_USE(vq); in virtqueue_add_split()
591 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split() local
595 START_USE(vq); in virtqueue_kick_prepare_split()
598 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
600 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
601 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
602 vq->num_added = 0; in virtqueue_kick_prepare_split()
604 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_split()
605 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_split()
607 if (vq->event) { in virtqueue_kick_prepare_split()
609 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
612 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
616 END_USE(vq); in virtqueue_kick_prepare_split()
620 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, in detach_buf_split() argument
624 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
627 vq->split.desc_state[head].data = NULL; in detach_buf_split()
632 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
633 vring_unmap_one_split(vq, &vq->split.vring.desc[i]); in detach_buf_split()
634 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next); in detach_buf_split()
635 vq->vq.num_free++; in detach_buf_split()
638 vring_unmap_one_split(vq, &vq->split.vring.desc[i]); in detach_buf_split()
639 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, in detach_buf_split()
640 vq->free_head); in detach_buf_split()
641 vq->free_head = head; in detach_buf_split()
644 vq->vq.num_free++; in detach_buf_split()
646 if (vq->indirect) { in detach_buf_split()
648 vq->split.desc_state[head].indir_desc; in detach_buf_split()
655 len = virtio32_to_cpu(vq->vq.vdev, in detach_buf_split()
656 vq->split.vring.desc[head].len); in detach_buf_split()
658 BUG_ON(!(vq->split.vring.desc[head].flags & in detach_buf_split()
659 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); in detach_buf_split()
663 vring_unmap_one_split(vq, &indir_desc[j]); in detach_buf_split()
666 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
668 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
672 static inline bool more_used_split(const struct vring_virtqueue *vq) in more_used_split() argument
674 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
675 vq->split.vring.used->idx); in more_used_split()
682 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split() local
687 START_USE(vq); in virtqueue_get_buf_ctx_split()
689 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
690 END_USE(vq); in virtqueue_get_buf_ctx_split()
694 if (!more_used_split(vq)) { in virtqueue_get_buf_ctx_split()
696 END_USE(vq); in virtqueue_get_buf_ctx_split()
701 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
703 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
705 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
707 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
709 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
710 BAD_RING(vq, "id %u out of range\n", i); in virtqueue_get_buf_ctx_split()
713 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
714 BAD_RING(vq, "id %u is not a head!\n", i); in virtqueue_get_buf_ctx_split()
719 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
720 detach_buf_split(vq, i, ctx); in virtqueue_get_buf_ctx_split()
721 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
725 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
726 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
727 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
728 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
730 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_split()
732 END_USE(vq); in virtqueue_get_buf_ctx_split()
738 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split() local
740 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
741 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
742 if (!vq->event) in virtqueue_disable_cb_split()
743 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
745 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
751 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split() local
754 START_USE(vq); in virtqueue_enable_cb_prepare_split()
761 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
762 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
763 if (!vq->event) in virtqueue_enable_cb_prepare_split()
764 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
766 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
769 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
770 END_USE(vq); in virtqueue_enable_cb_prepare_split()
776 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split() local
779 vq->split.vring.used->idx); in virtqueue_poll_split()
784 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split() local
787 START_USE(vq); in virtqueue_enable_cb_delayed_split()
794 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
795 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
796 if (!vq->event) in virtqueue_enable_cb_delayed_split()
797 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
799 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
802 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
804 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
805 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
806 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
809 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
810 END_USE(vq); in virtqueue_enable_cb_delayed_split()
814 END_USE(vq); in virtqueue_enable_cb_delayed_split()
820 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split() local
824 START_USE(vq); in virtqueue_detach_unused_buf_split()
826 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
827 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
830 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
831 detach_buf_split(vq, i, NULL); in virtqueue_detach_unused_buf_split()
832 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
834 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
835 END_USE(vq); in virtqueue_detach_unused_buf_split()
839 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
841 END_USE(vq); in virtqueue_detach_unused_buf_split()
857 struct virtqueue *vq; in vring_create_virtqueue_split() local
894 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, in vring_create_virtqueue_split()
896 if (!vq) { in vring_create_virtqueue_split()
902 to_vvq(vq)->split.queue_dma_addr = dma_addr; in vring_create_virtqueue_split()
903 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes; in vring_create_virtqueue_split()
904 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
906 return vq; in vring_create_virtqueue_split()
914 static void vring_unmap_state_packed(const struct vring_virtqueue *vq, in vring_unmap_state_packed() argument
919 if (!vq->use_dma_api) in vring_unmap_state_packed()
925 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_state_packed()
930 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_state_packed()
937 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, in vring_unmap_desc_packed() argument
942 if (!vq->use_dma_api) in vring_unmap_desc_packed()
948 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_desc_packed()
954 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_desc_packed()
979 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, in virtqueue_add_indirect_packed() argument
993 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
998 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1001 END_USE(vq); in virtqueue_add_indirect_packed()
1006 id = vq->free_head; in virtqueue_add_indirect_packed()
1007 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1011 addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_indirect_packed()
1013 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1025 addr = vring_map_single(vq, desc, in virtqueue_add_indirect_packed()
1028 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1031 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1032 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1034 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1036 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1037 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1038 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1040 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1041 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1049 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1050 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1051 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1054 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1058 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1060 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1061 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1065 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1066 vq->free_head = vq->packed.desc_state[id].next; in virtqueue_add_indirect_packed()
1069 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1070 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1071 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1072 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1074 vq->num_added += 1; in virtqueue_add_indirect_packed()
1076 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_indirect_packed()
1077 END_USE(vq); in virtqueue_add_indirect_packed()
1085 vring_unmap_desc_packed(vq, &desc[i]); in virtqueue_add_indirect_packed()
1089 END_USE(vq); in virtqueue_add_indirect_packed()
1102 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed() local
1110 START_USE(vq); in virtqueue_add_packed()
1113 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1115 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1116 END_USE(vq); in virtqueue_add_packed()
1120 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_packed()
1125 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in virtqueue_add_packed()
1128 END_USE(vq); in virtqueue_add_packed()
1135 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1136 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1138 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1140 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1144 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1146 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1147 END_USE(vq); in virtqueue_add_packed()
1151 id = vq->free_head; in virtqueue_add_packed()
1152 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1158 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_packed()
1160 if (vring_mapping_error(vq, addr)) in virtqueue_add_packed()
1163 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1175 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1176 vq->packed.desc_extra[curr].addr = addr; in virtqueue_add_packed()
1177 vq->packed.desc_extra[curr].len = sg->length; in virtqueue_add_packed()
1178 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1182 curr = vq->packed.desc_state[curr].next; in virtqueue_add_packed()
1184 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1186 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1194 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1197 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1200 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1201 vq->free_head = curr; in virtqueue_add_packed()
1204 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1205 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1206 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1207 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1214 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1215 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1216 vq->num_added += descs_used; in virtqueue_add_packed()
1218 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_packed()
1219 END_USE(vq); in virtqueue_add_packed()
1227 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1232 vring_unmap_desc_packed(vq, &desc[i]); in virtqueue_add_packed()
1234 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1238 END_USE(vq); in virtqueue_add_packed()
1244 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed() local
1255 START_USE(vq); in virtqueue_kick_prepare_packed()
1261 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1263 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1264 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1265 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1267 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1270 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_packed()
1271 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_packed()
1282 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1283 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1287 END_USE(vq); in virtqueue_kick_prepare_packed()
1291 static void detach_buf_packed(struct vring_virtqueue *vq, in detach_buf_packed() argument
1298 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1303 vq->packed.desc_state[state->last].next = vq->free_head; in detach_buf_packed()
1304 vq->free_head = id; in detach_buf_packed()
1305 vq->vq.num_free += state->num; in detach_buf_packed()
1307 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1310 vring_unmap_state_packed(vq, in detach_buf_packed()
1311 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1312 curr = vq->packed.desc_state[curr].next; in detach_buf_packed()
1316 if (vq->indirect) { in detach_buf_packed()
1324 if (vq->use_dma_api) { in detach_buf_packed()
1325 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1328 vring_unmap_desc_packed(vq, &desc[i]); in detach_buf_packed()
1337 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, in is_used_desc_packed() argument
1343 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1350 static inline bool more_used_packed(const struct vring_virtqueue *vq) in more_used_packed() argument
1352 return is_used_desc_packed(vq, vq->last_used_idx, in more_used_packed()
1353 vq->packed.used_wrap_counter); in more_used_packed()
1360 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed() local
1364 START_USE(vq); in virtqueue_get_buf_ctx_packed()
1366 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1367 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1371 if (!more_used_packed(vq)) { in virtqueue_get_buf_ctx_packed()
1373 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1378 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1380 last_used = vq->last_used_idx; in virtqueue_get_buf_ctx_packed()
1381 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1382 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1384 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1385 BAD_RING(vq, "id %u out of range\n", id); in virtqueue_get_buf_ctx_packed()
1388 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1389 BAD_RING(vq, "id %u is not a head!\n", id); in virtqueue_get_buf_ctx_packed()
1394 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1395 detach_buf_packed(vq, id, ctx); in virtqueue_get_buf_ctx_packed()
1397 vq->last_used_idx += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1398 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1399 vq->last_used_idx -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1400 vq->packed.used_wrap_counter ^= 1; in virtqueue_get_buf_ctx_packed()
1408 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1409 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1410 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1411 cpu_to_le16(vq->last_used_idx | in virtqueue_get_buf_ctx_packed()
1412 (vq->packed.used_wrap_counter << in virtqueue_get_buf_ctx_packed()
1415 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_packed()
1417 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1423 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed() local
1425 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1426 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1427 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1428 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1434 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed() local
1436 START_USE(vq); in virtqueue_enable_cb_prepare_packed()
1443 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1444 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1445 cpu_to_le16(vq->last_used_idx | in virtqueue_enable_cb_prepare_packed()
1446 (vq->packed.used_wrap_counter << in virtqueue_enable_cb_prepare_packed()
1452 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1455 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1456 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1459 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1460 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1463 END_USE(vq); in virtqueue_enable_cb_prepare_packed()
1464 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter << in virtqueue_enable_cb_prepare_packed()
1470 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed() local
1477 return is_used_desc_packed(vq, used_idx, wrap_counter); in virtqueue_poll_packed()
1482 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed() local
1486 START_USE(vq); in virtqueue_enable_cb_delayed_packed()
1493 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1495 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1496 wrap_counter = vq->packed.used_wrap_counter; in virtqueue_enable_cb_delayed_packed()
1498 used_idx = vq->last_used_idx + bufs; in virtqueue_enable_cb_delayed_packed()
1499 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1500 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1504 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1511 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1514 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1515 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1518 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1519 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1526 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1528 if (is_used_desc_packed(vq, in virtqueue_enable_cb_delayed_packed()
1529 vq->last_used_idx, in virtqueue_enable_cb_delayed_packed()
1530 vq->packed.used_wrap_counter)) { in virtqueue_enable_cb_delayed_packed()
1531 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1535 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1541 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed() local
1545 START_USE(vq); in virtqueue_detach_unused_buf_packed()
1547 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1548 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1551 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1552 detach_buf_packed(vq, i, NULL); in virtqueue_detach_unused_buf_packed()
1553 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1557 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1559 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1575 struct vring_virtqueue *vq; in vring_create_virtqueue_packed() local
1604 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in vring_create_virtqueue_packed()
1605 if (!vq) in vring_create_virtqueue_packed()
1608 vq->vq.callback = callback; in vring_create_virtqueue_packed()
1609 vq->vq.vdev = vdev; in vring_create_virtqueue_packed()
1610 vq->vq.name = name; in vring_create_virtqueue_packed()
1611 vq->vq.num_free = num; in vring_create_virtqueue_packed()
1612 vq->vq.index = index; in vring_create_virtqueue_packed()
1613 vq->we_own_ring = true; in vring_create_virtqueue_packed()
1614 vq->notify = notify; in vring_create_virtqueue_packed()
1615 vq->weak_barriers = weak_barriers; in vring_create_virtqueue_packed()
1616 vq->broken = false; in vring_create_virtqueue_packed()
1617 vq->last_used_idx = 0; in vring_create_virtqueue_packed()
1618 vq->num_added = 0; in vring_create_virtqueue_packed()
1619 vq->packed_ring = true; in vring_create_virtqueue_packed()
1620 vq->use_dma_api = vring_use_dma_api(vdev); in vring_create_virtqueue_packed()
1622 vq->in_use = false; in vring_create_virtqueue_packed()
1623 vq->last_add_time_valid = false; in vring_create_virtqueue_packed()
1626 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in vring_create_virtqueue_packed()
1628 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_create_virtqueue_packed()
1631 vq->weak_barriers = false; in vring_create_virtqueue_packed()
1633 vq->packed.ring_dma_addr = ring_dma_addr; in vring_create_virtqueue_packed()
1634 vq->packed.driver_event_dma_addr = driver_event_dma_addr; in vring_create_virtqueue_packed()
1635 vq->packed.device_event_dma_addr = device_event_dma_addr; in vring_create_virtqueue_packed()
1637 vq->packed.ring_size_in_bytes = ring_size_in_bytes; in vring_create_virtqueue_packed()
1638 vq->packed.event_size_in_bytes = event_size_in_bytes; in vring_create_virtqueue_packed()
1640 vq->packed.vring.num = num; in vring_create_virtqueue_packed()
1641 vq->packed.vring.desc = ring; in vring_create_virtqueue_packed()
1642 vq->packed.vring.driver = driver; in vring_create_virtqueue_packed()
1643 vq->packed.vring.device = device; in vring_create_virtqueue_packed()
1645 vq->packed.next_avail_idx = 0; in vring_create_virtqueue_packed()
1646 vq->packed.avail_wrap_counter = 1; in vring_create_virtqueue_packed()
1647 vq->packed.used_wrap_counter = 1; in vring_create_virtqueue_packed()
1648 vq->packed.event_flags_shadow = 0; in vring_create_virtqueue_packed()
1649 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; in vring_create_virtqueue_packed()
1651 vq->packed.desc_state = kmalloc_array(num, in vring_create_virtqueue_packed()
1654 if (!vq->packed.desc_state) in vring_create_virtqueue_packed()
1657 memset(vq->packed.desc_state, 0, in vring_create_virtqueue_packed()
1661 vq->free_head = 0; in vring_create_virtqueue_packed()
1663 vq->packed.desc_state[i].next = i + 1; in vring_create_virtqueue_packed()
1665 vq->packed.desc_extra = kmalloc_array(num, in vring_create_virtqueue_packed()
1668 if (!vq->packed.desc_extra) in vring_create_virtqueue_packed()
1671 memset(vq->packed.desc_extra, 0, in vring_create_virtqueue_packed()
1676 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in vring_create_virtqueue_packed()
1677 vq->packed.vring.driver->flags = in vring_create_virtqueue_packed()
1678 cpu_to_le16(vq->packed.event_flags_shadow); in vring_create_virtqueue_packed()
1681 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_create_virtqueue_packed()
1682 return &vq->vq; in vring_create_virtqueue_packed()
1685 kfree(vq->packed.desc_state); in vring_create_virtqueue_packed()
1687 kfree(vq); in vring_create_virtqueue_packed()
1712 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add() local
1714 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
1768 int virtqueue_add_outbuf(struct virtqueue *vq, in virtqueue_add_outbuf() argument
1773 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); in virtqueue_add_outbuf()
1790 int virtqueue_add_inbuf(struct virtqueue *vq, in virtqueue_add_inbuf() argument
1795 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); in virtqueue_add_inbuf()
1813 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, in virtqueue_add_inbuf_ctx() argument
1819 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); in virtqueue_add_inbuf_ctx()
1836 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare() local
1838 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
1853 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify() local
1855 if (unlikely(vq->broken)) in virtqueue_notify()
1859 if (!vq->notify(_vq)) { in virtqueue_notify()
1860 vq->broken = true; in virtqueue_notify()
1879 bool virtqueue_kick(struct virtqueue *vq) in virtqueue_kick() argument
1881 if (virtqueue_kick_prepare(vq)) in virtqueue_kick()
1882 return virtqueue_notify(vq); in virtqueue_kick()
1907 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx() local
1909 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
1930 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb() local
1932 if (vq->packed_ring) in virtqueue_disable_cb()
1953 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare() local
1955 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
1971 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll() local
1973 if (unlikely(vq->broken)) in virtqueue_poll()
1976 virtio_mb(vq->weak_barriers); in virtqueue_poll()
1977 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2016 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed() local
2018 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2033 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf() local
2035 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2040 static inline bool more_used(const struct vring_virtqueue *vq) in more_used() argument
2042 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2047 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt() local
2049 if (!more_used(vq)) { in vring_interrupt()
2050 pr_debug("virtqueue interrupt with no work for %p\n", vq); in vring_interrupt()
2054 if (unlikely(vq->broken)) in vring_interrupt()
2057 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2058 if (vq->vq.callback) in vring_interrupt()
2059 vq->vq.callback(&vq->vq); in vring_interrupt()
2076 struct vring_virtqueue *vq; in __vring_new_virtqueue() local
2081 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in __vring_new_virtqueue()
2082 if (!vq) in __vring_new_virtqueue()
2085 vq->packed_ring = false; in __vring_new_virtqueue()
2086 vq->vq.callback = callback; in __vring_new_virtqueue()
2087 vq->vq.vdev = vdev; in __vring_new_virtqueue()
2088 vq->vq.name = name; in __vring_new_virtqueue()
2089 vq->vq.num_free = vring.num; in __vring_new_virtqueue()
2090 vq->vq.index = index; in __vring_new_virtqueue()
2091 vq->we_own_ring = false; in __vring_new_virtqueue()
2092 vq->notify = notify; in __vring_new_virtqueue()
2093 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue()
2094 vq->broken = false; in __vring_new_virtqueue()
2095 vq->last_used_idx = 0; in __vring_new_virtqueue()
2096 vq->num_added = 0; in __vring_new_virtqueue()
2097 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue()
2099 vq->in_use = false; in __vring_new_virtqueue()
2100 vq->last_add_time_valid = false; in __vring_new_virtqueue()
2103 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue()
2105 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue()
2108 vq->weak_barriers = false; in __vring_new_virtqueue()
2110 vq->split.queue_dma_addr = 0; in __vring_new_virtqueue()
2111 vq->split.queue_size_in_bytes = 0; in __vring_new_virtqueue()
2113 vq->split.vring = vring; in __vring_new_virtqueue()
2114 vq->split.avail_flags_shadow = 0; in __vring_new_virtqueue()
2115 vq->split.avail_idx_shadow = 0; in __vring_new_virtqueue()
2119 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in __vring_new_virtqueue()
2120 if (!vq->event) in __vring_new_virtqueue()
2121 vq->split.vring.avail->flags = cpu_to_virtio16(vdev, in __vring_new_virtqueue()
2122 vq->split.avail_flags_shadow); in __vring_new_virtqueue()
2125 vq->split.desc_state = kmalloc_array(vring.num, in __vring_new_virtqueue()
2127 if (!vq->split.desc_state) { in __vring_new_virtqueue()
2128 kfree(vq); in __vring_new_virtqueue()
2133 vq->free_head = 0; in __vring_new_virtqueue()
2135 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); in __vring_new_virtqueue()
2136 memset(vq->split.desc_state, 0, vring.num * in __vring_new_virtqueue()
2139 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue()
2140 return &vq->vq; in __vring_new_virtqueue()
2176 bool (*notify)(struct virtqueue *vq), in vring_new_virtqueue() argument
2177 void (*callback)(struct virtqueue *vq), in vring_new_virtqueue() argument
2193 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue() local
2195 if (vq->we_own_ring) { in vring_del_virtqueue()
2196 if (vq->packed_ring) { in vring_del_virtqueue()
2197 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2198 vq->packed.ring_size_in_bytes, in vring_del_virtqueue()
2199 vq->packed.vring.desc, in vring_del_virtqueue()
2200 vq->packed.ring_dma_addr); in vring_del_virtqueue()
2202 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2203 vq->packed.event_size_in_bytes, in vring_del_virtqueue()
2204 vq->packed.vring.driver, in vring_del_virtqueue()
2205 vq->packed.driver_event_dma_addr); in vring_del_virtqueue()
2207 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2208 vq->packed.event_size_in_bytes, in vring_del_virtqueue()
2209 vq->packed.vring.device, in vring_del_virtqueue()
2210 vq->packed.device_event_dma_addr); in vring_del_virtqueue()
2212 kfree(vq->packed.desc_state); in vring_del_virtqueue()
2213 kfree(vq->packed.desc_extra); in vring_del_virtqueue()
2215 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2216 vq->split.queue_size_in_bytes, in vring_del_virtqueue()
2217 vq->split.vring.desc, in vring_del_virtqueue()
2218 vq->split.queue_dma_addr); in vring_del_virtqueue()
2221 if (!vq->packed_ring) in vring_del_virtqueue()
2222 kfree(vq->split.desc_state); in vring_del_virtqueue()
2224 kfree(vq); in vring_del_virtqueue()
2265 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size() local
2267 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
2273 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken() local
2275 return READ_ONCE(vq->broken); in virtqueue_is_broken()
2288 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device() local
2291 WRITE_ONCE(vq->broken, true); in virtio_break_device()
2298 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr() local
2300 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
2302 if (vq->packed_ring) in virtqueue_get_desc_addr()
2303 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
2305 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
2311 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr() local
2313 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
2315 if (vq->packed_ring) in virtqueue_get_avail_addr()
2316 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
2318 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
2319 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
2325 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr() local
2327 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
2329 if (vq->packed_ring) in virtqueue_get_used_addr()
2330 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
2332 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
2333 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
2338 const struct vring *virtqueue_get_vring(struct virtqueue *vq) in virtqueue_get_vring() argument
2340 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()