Lines Matching refs:_vq

18 #define BAD_RING(_vq, fmt, args...)				\  argument
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
25 #define START_USE(_vq) \ argument
27 if ((_vq)->in_use) \
29 (_vq)->vq.name, (_vq)->in_use); \
30 (_vq)->in_use = __LINE__; \
32 #define END_USE(_vq) \ argument
33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34 #define LAST_ADD_TIME_UPDATE(_vq) \ argument
39 if ((_vq)->last_add_time_valid) \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
45 #define LAST_ADD_TIME_CHECK(_vq) \ argument
47 if ((_vq)->last_add_time_valid) { \
49 (_vq)->last_add_time)) > 100); \
52 #define LAST_ADD_TIME_INVALID(_vq) \ argument
53 ((_vq)->last_add_time_valid = false)
55 #define BAD_RING(_vq, fmt, args...) \ argument
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
201 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) argument
203 static inline bool virtqueue_use_indirect(struct virtqueue *_vq, in virtqueue_use_indirect() argument
206 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_use_indirect()
392 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, in alloc_indirect_split() argument
411 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); in alloc_indirect_split()
415 static inline int virtqueue_add_split(struct virtqueue *_vq, in virtqueue_add_split() argument
424 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split()
447 if (virtqueue_use_indirect(_vq, total_sg)) in virtqueue_add_split()
448 desc = alloc_indirect_split(_vq, total_sg, gfp); in virtqueue_add_split()
487 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); in virtqueue_add_split()
488 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); in virtqueue_add_split()
489 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); in virtqueue_add_split()
491 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add_split()
500 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); in virtqueue_add_split()
501 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); in virtqueue_add_split()
502 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); in virtqueue_add_split()
504 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add_split()
508 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); in virtqueue_add_split()
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, in virtqueue_add_split()
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, in virtqueue_add_split()
532 vq->free_head = virtio16_to_cpu(_vq->vdev, in virtqueue_add_split()
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
563 virtqueue_kick(_vq); in virtqueue_add_split()
579 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add_split()
589 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) in virtqueue_kick_prepare_split() argument
591 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split()
608 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, in virtqueue_kick_prepare_split()
613 cpu_to_virtio16(_vq->vdev, in virtqueue_kick_prepare_split()
678 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, in virtqueue_get_buf_ctx_split() argument
682 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split()
704 i = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
706 *len = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
728 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
736 static void virtqueue_disable_cb_split(struct virtqueue *_vq) in virtqueue_disable_cb_split() argument
738 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split()
744 cpu_to_virtio16(_vq->vdev, in virtqueue_disable_cb_split()
749 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) in virtqueue_enable_cb_prepare_split() argument
751 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split()
765 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
774 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx) in virtqueue_poll_split() argument
776 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split()
778 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, in virtqueue_poll_split()
782 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) in virtqueue_enable_cb_delayed_split() argument
784 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split()
798 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_delayed_split()
806 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
818 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) in virtqueue_detach_unused_buf_split() argument
820 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split()
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
1093 static inline int virtqueue_add_packed(struct virtqueue *_vq, in virtqueue_add_packed() argument
1102 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed()
1124 if (virtqueue_use_indirect(_vq, total_sg)) { in virtqueue_add_packed()
1242 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) in virtqueue_kick_prepare_packed() argument
1244 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed()
1356 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, in virtqueue_get_buf_ctx_packed() argument
1360 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed()
1421 static void virtqueue_disable_cb_packed(struct virtqueue *_vq) in virtqueue_disable_cb_packed() argument
1423 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed()
1432 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) in virtqueue_enable_cb_prepare_packed() argument
1434 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed()
1468 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) in virtqueue_poll_packed() argument
1470 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed()
1480 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) in virtqueue_enable_cb_delayed_packed() argument
1482 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed()
1539 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) in virtqueue_detach_unused_buf_packed() argument
1541 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed()
1703 static inline int virtqueue_add(struct virtqueue *_vq, in virtqueue_add() argument
1712 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add()
1714 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
1716 virtqueue_add_split(_vq, sgs, total_sg, in virtqueue_add()
1734 int virtqueue_add_sgs(struct virtqueue *_vq, in virtqueue_add_sgs() argument
1750 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, in virtqueue_add_sgs()
1834 bool virtqueue_kick_prepare(struct virtqueue *_vq) in virtqueue_kick_prepare() argument
1836 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare()
1838 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
1839 virtqueue_kick_prepare_split(_vq); in virtqueue_kick_prepare()
1851 bool virtqueue_notify(struct virtqueue *_vq) in virtqueue_notify() argument
1853 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify()
1859 if (!vq->notify(_vq)) { in virtqueue_notify()
1904 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, in virtqueue_get_buf_ctx() argument
1907 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx()
1909 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
1910 virtqueue_get_buf_ctx_split(_vq, len, ctx); in virtqueue_get_buf_ctx()
1914 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) in virtqueue_get_buf() argument
1916 return virtqueue_get_buf_ctx(_vq, len, NULL); in virtqueue_get_buf()
1928 void virtqueue_disable_cb(struct virtqueue *_vq) in virtqueue_disable_cb() argument
1930 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb()
1933 virtqueue_disable_cb_packed(_vq); in virtqueue_disable_cb()
1935 virtqueue_disable_cb_split(_vq); in virtqueue_disable_cb()
1951 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) in virtqueue_enable_cb_prepare() argument
1953 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare()
1955 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
1956 virtqueue_enable_cb_prepare_split(_vq); in virtqueue_enable_cb_prepare()
1969 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) in virtqueue_poll() argument
1971 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll()
1977 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
1978 virtqueue_poll_split(_vq, last_used_idx); in virtqueue_poll()
1993 bool virtqueue_enable_cb(struct virtqueue *_vq) in virtqueue_enable_cb() argument
1995 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); in virtqueue_enable_cb()
1997 return !virtqueue_poll(_vq, last_used_idx); in virtqueue_enable_cb()
2014 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) in virtqueue_enable_cb_delayed() argument
2016 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed()
2018 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2019 virtqueue_enable_cb_delayed_split(_vq); in virtqueue_enable_cb_delayed()
2031 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) in virtqueue_detach_unused_buf() argument
2033 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf()
2035 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2036 virtqueue_detach_unused_buf_split(_vq); in virtqueue_detach_unused_buf()
2045 irqreturn_t vring_interrupt(int irq, void *_vq) in vring_interrupt() argument
2047 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt()
2191 void vring_del_virtqueue(struct virtqueue *_vq) in vring_del_virtqueue() argument
2193 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue()
2223 list_del(&_vq->list); in vring_del_virtqueue()
2262 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) in virtqueue_get_vring_size() argument
2265 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size()
2271 bool virtqueue_is_broken(struct virtqueue *_vq) in virtqueue_is_broken() argument
2273 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken()
2285 struct virtqueue *_vq; in virtio_break_device() local
2287 list_for_each_entry(_vq, &dev->vqs, list) { in virtio_break_device()
2288 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device()
2296 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) in virtqueue_get_desc_addr() argument
2298 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr()
2309 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) in virtqueue_get_avail_addr() argument
2311 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr()
2323 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) in virtqueue_get_used_addr() argument
2325 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr()