1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2015 Red Hat, Inc.
3*4882a593Smuzhiyun * All Rights Reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining
6*4882a593Smuzhiyun * a copy of this software and associated documentation files (the
7*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun * distribute, sublicense, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun * the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
14*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial
15*4882a593Smuzhiyun * portions of the Software.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20*4882a593Smuzhiyun * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21*4882a593Smuzhiyun * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22*4882a593Smuzhiyun * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23*4882a593Smuzhiyun * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <trace/events/dma_fence.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "virtgpu_drv.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define to_virtio_fence(x) \
31*4882a593Smuzhiyun container_of(x, struct virtio_gpu_fence, f)
32*4882a593Smuzhiyun
virtio_get_driver_name(struct dma_fence * f)33*4882a593Smuzhiyun static const char *virtio_get_driver_name(struct dma_fence *f)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun return "virtio_gpu";
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
virtio_get_timeline_name(struct dma_fence * f)38*4882a593Smuzhiyun static const char *virtio_get_timeline_name(struct dma_fence *f)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun return "controlq";
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
virtio_fence_signaled(struct dma_fence * f)43*4882a593Smuzhiyun static bool virtio_fence_signaled(struct dma_fence *f)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct virtio_gpu_fence *fence = to_virtio_fence(f);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (WARN_ON_ONCE(fence->f.seqno == 0))
48*4882a593Smuzhiyun /* leaked fence outside driver before completing
49*4882a593Smuzhiyun * initialization with virtio_gpu_fence_emit */
50*4882a593Smuzhiyun return false;
51*4882a593Smuzhiyun if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
52*4882a593Smuzhiyun return true;
53*4882a593Smuzhiyun return false;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
virtio_fence_value_str(struct dma_fence * f,char * str,int size)56*4882a593Smuzhiyun static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun snprintf(str, size, "%llu", f->seqno);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
virtio_timeline_value_str(struct dma_fence * f,char * str,int size)61*4882a593Smuzhiyun static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct virtio_gpu_fence *fence = to_virtio_fence(f);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static const struct dma_fence_ops virtio_fence_ops = {
69*4882a593Smuzhiyun .get_driver_name = virtio_get_driver_name,
70*4882a593Smuzhiyun .get_timeline_name = virtio_get_timeline_name,
71*4882a593Smuzhiyun .signaled = virtio_fence_signaled,
72*4882a593Smuzhiyun .fence_value_str = virtio_fence_value_str,
73*4882a593Smuzhiyun .timeline_value_str = virtio_timeline_value_str,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
virtio_gpu_fence_alloc(struct virtio_gpu_device * vgdev)76*4882a593Smuzhiyun struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
79*4882a593Smuzhiyun struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
80*4882a593Smuzhiyun GFP_KERNEL);
81*4882a593Smuzhiyun if (!fence)
82*4882a593Smuzhiyun return fence;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun fence->drv = drv;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* This only partially initializes the fence because the seqno is
87*4882a593Smuzhiyun * unknown yet. The fence must not be used outside of the driver
88*4882a593Smuzhiyun * until virtio_gpu_fence_emit is called.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return fence;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
virtio_gpu_fence_emit(struct virtio_gpu_device * vgdev,struct virtio_gpu_ctrl_hdr * cmd_hdr,struct virtio_gpu_fence * fence)95*4882a593Smuzhiyun void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
96*4882a593Smuzhiyun struct virtio_gpu_ctrl_hdr *cmd_hdr,
97*4882a593Smuzhiyun struct virtio_gpu_fence *fence)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
100*4882a593Smuzhiyun unsigned long irq_flags;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun spin_lock_irqsave(&drv->lock, irq_flags);
103*4882a593Smuzhiyun fence->f.seqno = ++drv->sync_seq;
104*4882a593Smuzhiyun dma_fence_get(&fence->f);
105*4882a593Smuzhiyun list_add_tail(&fence->node, &drv->fences);
106*4882a593Smuzhiyun spin_unlock_irqrestore(&drv->lock, irq_flags);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun trace_dma_fence_emit(&fence->f);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
111*4882a593Smuzhiyun cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
virtio_gpu_fence_event_process(struct virtio_gpu_device * vgdev,u64 last_seq)114*4882a593Smuzhiyun void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
115*4882a593Smuzhiyun u64 last_seq)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
118*4882a593Smuzhiyun struct virtio_gpu_fence *fence, *tmp;
119*4882a593Smuzhiyun unsigned long irq_flags;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun spin_lock_irqsave(&drv->lock, irq_flags);
122*4882a593Smuzhiyun atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
123*4882a593Smuzhiyun list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
124*4882a593Smuzhiyun if (last_seq < fence->f.seqno)
125*4882a593Smuzhiyun continue;
126*4882a593Smuzhiyun dma_fence_signal_locked(&fence->f);
127*4882a593Smuzhiyun list_del(&fence->node);
128*4882a593Smuzhiyun dma_fence_put(&fence->f);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun spin_unlock_irqrestore(&drv->lock, irq_flags);
131*4882a593Smuzhiyun }
132