1*437bfbebSnyanmisaka /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2*437bfbebSnyanmisaka /*
3*437bfbebSnyanmisaka * Copyright (c) 2023 Rockchip Electronics Co., Ltd.
4*437bfbebSnyanmisaka */
5*437bfbebSnyanmisaka
6*437bfbebSnyanmisaka #include <string.h>
7*437bfbebSnyanmisaka #include <errno.h>
8*437bfbebSnyanmisaka #include <sys/ioctl.h>
9*437bfbebSnyanmisaka
10*437bfbebSnyanmisaka #include "mpp_env.h"
11*437bfbebSnyanmisaka #include "mpp_log.h"
12*437bfbebSnyanmisaka #include "mpp_common.h"
13*437bfbebSnyanmisaka #include "mpp_dmabuf.h"
14*437bfbebSnyanmisaka #include "mpp_singleton.h"
15*437bfbebSnyanmisaka #include "linux/dma-buf.h"
16*437bfbebSnyanmisaka
17*437bfbebSnyanmisaka #define MPP_NO_PARTIAL_SUPPORT 25 /* ENOTTY */
18*437bfbebSnyanmisaka #define CACHE_LINE_SIZE 64
19*437bfbebSnyanmisaka
20*437bfbebSnyanmisaka static RK_U32 has_partial_ops = 0;
21*437bfbebSnyanmisaka
mpp_dmabuf_init(void)22*437bfbebSnyanmisaka static void mpp_dmabuf_init(void)
23*437bfbebSnyanmisaka {
24*437bfbebSnyanmisaka /*
25*437bfbebSnyanmisaka * update has_partial_ops by env
26*437bfbebSnyanmisaka * NOTE: When dmaheap is enabled the dmaheap fd partial ops is fine.
27*437bfbebSnyanmisaka * But the drm fd partial ops may have error when kernel version above 4.19
28*437bfbebSnyanmisaka * So we provide the mpp_dmabuf_has_partial_ops env to disable partial ops.
29*437bfbebSnyanmisaka */
30*437bfbebSnyanmisaka mpp_env_get_u32("mpp_dmabuf_has_partial_ops", &has_partial_ops, has_partial_ops);
31*437bfbebSnyanmisaka }
32*437bfbebSnyanmisaka
33*437bfbebSnyanmisaka MPP_SINGLETON(MPP_SGLN_DMABUF, mpp_dmabuf, mpp_dmabuf_init, NULL);
34*437bfbebSnyanmisaka
mpp_dmabuf_sync_begin(RK_S32 fd,RK_S32 ro,const char * caller)35*437bfbebSnyanmisaka MPP_RET mpp_dmabuf_sync_begin(RK_S32 fd, RK_S32 ro, const char *caller)
36*437bfbebSnyanmisaka {
37*437bfbebSnyanmisaka struct dma_buf_sync sync;
38*437bfbebSnyanmisaka RK_S32 ret;
39*437bfbebSnyanmisaka
40*437bfbebSnyanmisaka sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
41*437bfbebSnyanmisaka
42*437bfbebSnyanmisaka ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
43*437bfbebSnyanmisaka if (ret) {
44*437bfbebSnyanmisaka mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
45*437bfbebSnyanmisaka return MPP_NOK;
46*437bfbebSnyanmisaka }
47*437bfbebSnyanmisaka
48*437bfbebSnyanmisaka return MPP_OK;
49*437bfbebSnyanmisaka }
50*437bfbebSnyanmisaka
mpp_dmabuf_sync_end(RK_S32 fd,RK_S32 ro,const char * caller)51*437bfbebSnyanmisaka MPP_RET mpp_dmabuf_sync_end(RK_S32 fd, RK_S32 ro, const char *caller)
52*437bfbebSnyanmisaka {
53*437bfbebSnyanmisaka struct dma_buf_sync sync;
54*437bfbebSnyanmisaka RK_S32 ret;
55*437bfbebSnyanmisaka
56*437bfbebSnyanmisaka sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
57*437bfbebSnyanmisaka
58*437bfbebSnyanmisaka ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
59*437bfbebSnyanmisaka if (ret) {
60*437bfbebSnyanmisaka mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
61*437bfbebSnyanmisaka return MPP_NOK;
62*437bfbebSnyanmisaka }
63*437bfbebSnyanmisaka
64*437bfbebSnyanmisaka return MPP_OK;
65*437bfbebSnyanmisaka }
66*437bfbebSnyanmisaka
mpp_dmabuf_sync_partial_begin(RK_S32 fd,RK_S32 ro,RK_U32 offset,RK_U32 length,const char * caller)67*437bfbebSnyanmisaka MPP_RET mpp_dmabuf_sync_partial_begin(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
68*437bfbebSnyanmisaka {
69*437bfbebSnyanmisaka if (has_partial_ops) {
70*437bfbebSnyanmisaka struct dma_buf_sync_partial sync;
71*437bfbebSnyanmisaka RK_S32 ret;
72*437bfbebSnyanmisaka
73*437bfbebSnyanmisaka if (!length)
74*437bfbebSnyanmisaka return MPP_OK;
75*437bfbebSnyanmisaka
76*437bfbebSnyanmisaka sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
77*437bfbebSnyanmisaka sync.offset = MPP_ALIGN_DOWN(offset, CACHE_LINE_SIZE);
78*437bfbebSnyanmisaka sync.len = MPP_ALIGN(length + offset - sync.offset, CACHE_LINE_SIZE);
79*437bfbebSnyanmisaka
80*437bfbebSnyanmisaka ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
81*437bfbebSnyanmisaka if (ret) {
82*437bfbebSnyanmisaka if (errno == MPP_NO_PARTIAL_SUPPORT) {
83*437bfbebSnyanmisaka has_partial_ops = 0;
84*437bfbebSnyanmisaka goto NOT_SUPPORT;
85*437bfbebSnyanmisaka }
86*437bfbebSnyanmisaka
87*437bfbebSnyanmisaka mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
88*437bfbebSnyanmisaka return MPP_NOK;
89*437bfbebSnyanmisaka }
90*437bfbebSnyanmisaka
91*437bfbebSnyanmisaka return MPP_OK;
92*437bfbebSnyanmisaka }
93*437bfbebSnyanmisaka
94*437bfbebSnyanmisaka NOT_SUPPORT:
95*437bfbebSnyanmisaka return mpp_dmabuf_sync_begin(fd, ro, caller);
96*437bfbebSnyanmisaka }
97*437bfbebSnyanmisaka
mpp_dmabuf_sync_partial_end(RK_S32 fd,RK_S32 ro,RK_U32 offset,RK_U32 length,const char * caller)98*437bfbebSnyanmisaka MPP_RET mpp_dmabuf_sync_partial_end(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
99*437bfbebSnyanmisaka {
100*437bfbebSnyanmisaka if (has_partial_ops) {
101*437bfbebSnyanmisaka struct dma_buf_sync_partial sync;
102*437bfbebSnyanmisaka RK_S32 ret;
103*437bfbebSnyanmisaka
104*437bfbebSnyanmisaka if (!length)
105*437bfbebSnyanmisaka return MPP_OK;
106*437bfbebSnyanmisaka
107*437bfbebSnyanmisaka sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
108*437bfbebSnyanmisaka sync.offset = MPP_ALIGN_DOWN(offset, CACHE_LINE_SIZE);
109*437bfbebSnyanmisaka sync.len = MPP_ALIGN(length + offset - sync.offset, CACHE_LINE_SIZE);
110*437bfbebSnyanmisaka
111*437bfbebSnyanmisaka ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
112*437bfbebSnyanmisaka if (ret) {
113*437bfbebSnyanmisaka if (errno == MPP_NO_PARTIAL_SUPPORT) {
114*437bfbebSnyanmisaka has_partial_ops = 0;
115*437bfbebSnyanmisaka goto NOT_SUPPORT;
116*437bfbebSnyanmisaka }
117*437bfbebSnyanmisaka
118*437bfbebSnyanmisaka mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
119*437bfbebSnyanmisaka return MPP_NOK;
120*437bfbebSnyanmisaka }
121*437bfbebSnyanmisaka
122*437bfbebSnyanmisaka return MPP_OK;
123*437bfbebSnyanmisaka }
124*437bfbebSnyanmisaka
125*437bfbebSnyanmisaka NOT_SUPPORT:
126*437bfbebSnyanmisaka return mpp_dmabuf_sync_end(fd, ro, caller);
127*437bfbebSnyanmisaka }
128*437bfbebSnyanmisaka
mpp_dmabuf_set_name(RK_S32 fd,const char * name,const char * caller)129*437bfbebSnyanmisaka MPP_RET mpp_dmabuf_set_name(RK_S32 fd, const char *name, const char *caller)
130*437bfbebSnyanmisaka {
131*437bfbebSnyanmisaka RK_S32 ret = ioctl(fd, DMA_BUF_SET_NAME, name);
132*437bfbebSnyanmisaka if (ret) {
133*437bfbebSnyanmisaka mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
134*437bfbebSnyanmisaka return MPP_NOK;
135*437bfbebSnyanmisaka }
136*437bfbebSnyanmisaka
137*437bfbebSnyanmisaka return MPP_OK;
138*437bfbebSnyanmisaka }
139*437bfbebSnyanmisaka
mpp_dmabuf_sync_partial_support(void)140*437bfbebSnyanmisaka RK_U32 mpp_dmabuf_sync_partial_support(void)
141*437bfbebSnyanmisaka {
142*437bfbebSnyanmisaka return has_partial_ops;
143*437bfbebSnyanmisaka }
144