1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3 * Copyright (c) 2023 Rockchip Electronics Co., Ltd.
4 */
5
6 #include <string.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9
10 #include "mpp_env.h"
11 #include "mpp_log.h"
12 #include "mpp_common.h"
13 #include "mpp_dmabuf.h"
14 #include "mpp_singleton.h"
15 #include "linux/dma-buf.h"
16
17 #define MPP_NO_PARTIAL_SUPPORT 25 /* ENOTTY */
18 #define CACHE_LINE_SIZE 64
19
20 static RK_U32 has_partial_ops = 0;
21
mpp_dmabuf_init(void)22 static void mpp_dmabuf_init(void)
23 {
24 /*
25 * update has_partial_ops by env
26 * NOTE: When dmaheap is enabled the dmaheap fd partial ops is fine.
27 * But the drm fd partial ops may have error when kernel version above 4.19
28 * So we provide the mpp_dmabuf_has_partial_ops env to disable partial ops.
29 */
30 mpp_env_get_u32("mpp_dmabuf_has_partial_ops", &has_partial_ops, has_partial_ops);
31 }
32
33 MPP_SINGLETON(MPP_SGLN_DMABUF, mpp_dmabuf, mpp_dmabuf_init, NULL);
34
mpp_dmabuf_sync_begin(RK_S32 fd,RK_S32 ro,const char * caller)35 MPP_RET mpp_dmabuf_sync_begin(RK_S32 fd, RK_S32 ro, const char *caller)
36 {
37 struct dma_buf_sync sync;
38 RK_S32 ret;
39
40 sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
41
42 ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
43 if (ret) {
44 mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
45 return MPP_NOK;
46 }
47
48 return MPP_OK;
49 }
50
mpp_dmabuf_sync_end(RK_S32 fd,RK_S32 ro,const char * caller)51 MPP_RET mpp_dmabuf_sync_end(RK_S32 fd, RK_S32 ro, const char *caller)
52 {
53 struct dma_buf_sync sync;
54 RK_S32 ret;
55
56 sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
57
58 ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
59 if (ret) {
60 mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
61 return MPP_NOK;
62 }
63
64 return MPP_OK;
65 }
66
mpp_dmabuf_sync_partial_begin(RK_S32 fd,RK_S32 ro,RK_U32 offset,RK_U32 length,const char * caller)67 MPP_RET mpp_dmabuf_sync_partial_begin(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
68 {
69 if (has_partial_ops) {
70 struct dma_buf_sync_partial sync;
71 RK_S32 ret;
72
73 if (!length)
74 return MPP_OK;
75
76 sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
77 sync.offset = MPP_ALIGN_DOWN(offset, CACHE_LINE_SIZE);
78 sync.len = MPP_ALIGN(length + offset - sync.offset, CACHE_LINE_SIZE);
79
80 ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
81 if (ret) {
82 if (errno == MPP_NO_PARTIAL_SUPPORT) {
83 has_partial_ops = 0;
84 goto NOT_SUPPORT;
85 }
86
87 mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
88 return MPP_NOK;
89 }
90
91 return MPP_OK;
92 }
93
94 NOT_SUPPORT:
95 return mpp_dmabuf_sync_begin(fd, ro, caller);
96 }
97
mpp_dmabuf_sync_partial_end(RK_S32 fd,RK_S32 ro,RK_U32 offset,RK_U32 length,const char * caller)98 MPP_RET mpp_dmabuf_sync_partial_end(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
99 {
100 if (has_partial_ops) {
101 struct dma_buf_sync_partial sync;
102 RK_S32 ret;
103
104 if (!length)
105 return MPP_OK;
106
107 sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
108 sync.offset = MPP_ALIGN_DOWN(offset, CACHE_LINE_SIZE);
109 sync.len = MPP_ALIGN(length + offset - sync.offset, CACHE_LINE_SIZE);
110
111 ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
112 if (ret) {
113 if (errno == MPP_NO_PARTIAL_SUPPORT) {
114 has_partial_ops = 0;
115 goto NOT_SUPPORT;
116 }
117
118 mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
119 return MPP_NOK;
120 }
121
122 return MPP_OK;
123 }
124
125 NOT_SUPPORT:
126 return mpp_dmabuf_sync_end(fd, ro, caller);
127 }
128
mpp_dmabuf_set_name(RK_S32 fd,const char * name,const char * caller)129 MPP_RET mpp_dmabuf_set_name(RK_S32 fd, const char *name, const char *caller)
130 {
131 RK_S32 ret = ioctl(fd, DMA_BUF_SET_NAME, name);
132 if (ret) {
133 mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
134 return MPP_NOK;
135 }
136
137 return MPP_OK;
138 }
139
mpp_dmabuf_sync_partial_support(void)140 RK_U32 mpp_dmabuf_sync_partial_support(void)
141 {
142 return has_partial_ops;
143 }
144