1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) Rockchip Electronics Co., Ltd.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Huang Lee <Putin.li@rock-chips.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "rve_fence: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/dma-fence.h>
11*4882a593Smuzhiyun #include <linux/sync_file.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include "rve_fence.h"
15*4882a593Smuzhiyun
rve_fence_get_name(struct dma_fence * fence)16*4882a593Smuzhiyun static const char *rve_fence_get_name(struct dma_fence *fence)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun return DRIVER_NAME;
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static const struct dma_fence_ops rve_fence_ops = {
22*4882a593Smuzhiyun .get_driver_name = rve_fence_get_name,
23*4882a593Smuzhiyun .get_timeline_name = rve_fence_get_name,
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
rve_fence_context_alloc(void)26*4882a593Smuzhiyun struct rve_fence_context *rve_fence_context_alloc(void)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct rve_fence_context *fence_ctx = NULL;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun fence_ctx = kzalloc(sizeof(*fence_ctx), GFP_KERNEL);
31*4882a593Smuzhiyun if (!fence_ctx)
32*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun fence_ctx->context = dma_fence_context_alloc(1);
35*4882a593Smuzhiyun spin_lock_init(&fence_ctx->spinlock);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return fence_ctx;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
rve_fence_context_free(struct rve_fence_context * fence_ctx)40*4882a593Smuzhiyun void rve_fence_context_free(struct rve_fence_context *fence_ctx)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun kfree(fence_ctx);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
rve_out_fence_alloc(struct rve_job * job)45*4882a593Smuzhiyun int rve_out_fence_alloc(struct rve_job *job)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct rve_fence_context *fence_ctx = rve_drvdata->fence_ctx;
48*4882a593Smuzhiyun struct dma_fence *fence = NULL;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun fence = kzalloc(sizeof(*fence), GFP_KERNEL);
51*4882a593Smuzhiyun if (!fence)
52*4882a593Smuzhiyun return -ENOMEM;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun dma_fence_init(fence, &rve_fence_ops, &job->fence_lock,
55*4882a593Smuzhiyun fence_ctx->context, ++fence_ctx->seqno);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun job->out_fence = fence;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
rve_out_fence_get_fd(struct rve_job * job)62*4882a593Smuzhiyun int rve_out_fence_get_fd(struct rve_job *job)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct sync_file *sync_file = NULL;
65*4882a593Smuzhiyun int fence_fd = -1;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (!job->out_fence)
68*4882a593Smuzhiyun return -EINVAL;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun fence_fd = get_unused_fd_flags(O_CLOEXEC);
71*4882a593Smuzhiyun if (fence_fd < 0)
72*4882a593Smuzhiyun return fence_fd;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun sync_file = sync_file_create(job->out_fence);
75*4882a593Smuzhiyun if (!sync_file)
76*4882a593Smuzhiyun return -ENOMEM;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun fd_install(fence_fd, sync_file->file);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun return fence_fd;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
rve_get_input_fence(int in_fence_fd)83*4882a593Smuzhiyun struct dma_fence *rve_get_input_fence(int in_fence_fd)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct dma_fence *in_fence;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun in_fence = sync_file_get_fence(in_fence_fd);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (!in_fence)
90*4882a593Smuzhiyun pr_err("can not get in-fence from fd\n");
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return in_fence;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
rve_wait_input_fence(struct dma_fence * in_fence)95*4882a593Smuzhiyun int rve_wait_input_fence(struct dma_fence *in_fence)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun int ret = 0;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun ret = dma_fence_wait(in_fence, true);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun dma_fence_put(in_fence);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return ret;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
rve_add_dma_fence_callback(struct rve_job * job,struct dma_fence * in_fence,dma_fence_func_t func)106*4882a593Smuzhiyun int rve_add_dma_fence_callback(struct rve_job *job, struct dma_fence *in_fence,
107*4882a593Smuzhiyun dma_fence_func_t func)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct rve_fence_waiter *waiter;
110*4882a593Smuzhiyun int ret;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
113*4882a593Smuzhiyun if (!waiter) {
114*4882a593Smuzhiyun pr_err("%s: Failed to allocate waiter\n", __func__);
115*4882a593Smuzhiyun return -ENOMEM;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun waiter->job = job;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun ret = dma_fence_add_callback(in_fence, &waiter->waiter, func);
121*4882a593Smuzhiyun if (ret == -ENOENT) {
122*4882a593Smuzhiyun pr_err("'input fence' has been already signaled.");
123*4882a593Smuzhiyun goto err_free_waiter;
124*4882a593Smuzhiyun } else if (ret == -EINVAL) {
125*4882a593Smuzhiyun pr_err
126*4882a593Smuzhiyun ("%s: failed to add callback to dma_fence, err: %d\n",
127*4882a593Smuzhiyun __func__, ret);
128*4882a593Smuzhiyun goto err_free_waiter;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return ret;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun err_free_waiter:
134*4882a593Smuzhiyun kfree(waiter);
135*4882a593Smuzhiyun return ret;
136*4882a593Smuzhiyun }
137