xref: /OK3568_Linux_fs/kernel/drivers/soc/rockchip/rockchip_decompress_user.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2022 Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Due to hardware limitations, this module only supports
6*4882a593Smuzhiyun  * up to 32bit continuous CMA memory.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * author:
9*4882a593Smuzhiyun  *	Xiao Yapeng, yp.xiao@rock-chips.com
10*4882a593Smuzhiyun  * mender:
11*4882a593Smuzhiyun  *	Lin Jinhan, troy.lin@rock-chips.com
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/dma-buf.h>
15*4882a593Smuzhiyun #include <linux/dma-direct.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/ioctl.h>
19*4882a593Smuzhiyun #include <linux/miscdevice.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/mutex.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/soc/rockchip/rockchip_decompress.h>
24*4882a593Smuzhiyun #include <uapi/linux/rk-decom.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define RK_DECOME_TIMEOUT	3 /* 3 seconds */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun struct rk_decom_dev {
29*4882a593Smuzhiyun 	struct miscdevice miscdev;
30*4882a593Smuzhiyun 	struct device *dev;
31*4882a593Smuzhiyun 	struct mutex mutex;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static long rk_decom_misc_ioctl(struct file *fptr, unsigned int cmd, unsigned long arg);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static const struct file_operations rk_decom_fops = {
37*4882a593Smuzhiyun 	.owner           = THIS_MODULE,
38*4882a593Smuzhiyun 	.unlocked_ioctl  = rk_decom_misc_ioctl,
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static struct rk_decom_dev g_rk_decom = {
42*4882a593Smuzhiyun 	.miscdev = {
43*4882a593Smuzhiyun 		.minor = MISC_DYNAMIC_MINOR,
44*4882a593Smuzhiyun 		.name  = RK_DECOM_NAME,
45*4882a593Smuzhiyun 		.fops  = &rk_decom_fops,
46*4882a593Smuzhiyun 	},
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
check_scatter_list(unsigned int max_size,struct sg_table * sg_tbl)49*4882a593Smuzhiyun static bool check_scatter_list(unsigned int max_size, struct sg_table *sg_tbl)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	int i;
52*4882a593Smuzhiyun 	unsigned int total_len = 0;
53*4882a593Smuzhiyun 	dma_addr_t next_addr = 0;
54*4882a593Smuzhiyun 	struct scatterlist *sgl = NULL;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (!sg_tbl || !(sg_tbl->sgl))
57*4882a593Smuzhiyun 		return false;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	for_each_sgtable_sg(sg_tbl, sgl, i) {
60*4882a593Smuzhiyun 		if  (sg_phys(sgl) > SZ_4G || sg_phys(sgl) + sg_dma_len(sgl) > SZ_4G)
61*4882a593Smuzhiyun 			return false;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 		if (i && next_addr != sg_dma_address(sgl))
64*4882a593Smuzhiyun 			return false;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 		total_len += sg_dma_len(sgl);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		next_addr = sg_dma_address(sgl) + sg_dma_len(sgl);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return max_size <= total_len;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
get_dmafd_sgtbl(struct device * dev,int dma_fd,enum dma_data_direction dir,struct sg_table ** sg_tbl,struct dma_buf_attachment ** dma_attach,struct dma_buf ** dmabuf)74*4882a593Smuzhiyun static int get_dmafd_sgtbl(struct device *dev, int dma_fd, enum dma_data_direction dir,
75*4882a593Smuzhiyun 			   struct sg_table **sg_tbl, struct dma_buf_attachment **dma_attach,
76*4882a593Smuzhiyun 			   struct dma_buf **dmabuf)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	int ret = -EINVAL;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (!dev)
81*4882a593Smuzhiyun 		return -EINVAL;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	*sg_tbl     = NULL;
84*4882a593Smuzhiyun 	*dmabuf     = NULL;
85*4882a593Smuzhiyun 	*dma_attach = NULL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	*dmabuf = dma_buf_get(dma_fd);
88*4882a593Smuzhiyun 	if (IS_ERR(*dmabuf)) {
89*4882a593Smuzhiyun 		ret = PTR_ERR(*dmabuf);
90*4882a593Smuzhiyun 		goto error;
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	*dma_attach = dma_buf_attach(*dmabuf, dev);
94*4882a593Smuzhiyun 	if (IS_ERR(*dma_attach)) {
95*4882a593Smuzhiyun 		ret = PTR_ERR(*dma_attach);
96*4882a593Smuzhiyun 		goto error;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	*sg_tbl = dma_buf_map_attachment(*dma_attach, dir);
100*4882a593Smuzhiyun 	if (IS_ERR(*sg_tbl)) {
101*4882a593Smuzhiyun 		ret = PTR_ERR(*sg_tbl);
102*4882a593Smuzhiyun 		goto error;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun error:
107*4882a593Smuzhiyun 	if (*sg_tbl)
108*4882a593Smuzhiyun 		dma_buf_unmap_attachment(*dma_attach, *sg_tbl, dir);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (*dma_attach)
111*4882a593Smuzhiyun 		dma_buf_detach(*dmabuf, *dma_attach);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (*dmabuf)
114*4882a593Smuzhiyun 		dma_buf_put(*dmabuf);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	*sg_tbl     = NULL;
117*4882a593Smuzhiyun 	*dmabuf     = NULL;
118*4882a593Smuzhiyun 	*dma_attach = NULL;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return ret;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
put_dmafd_sgtbl(struct device * dev,int dma_fd,enum dma_data_direction dir,struct sg_table * sg_tbl,struct dma_buf_attachment * dma_attach,struct dma_buf * dmabuf)123*4882a593Smuzhiyun static int put_dmafd_sgtbl(struct device *dev, int dma_fd, enum dma_data_direction dir,
124*4882a593Smuzhiyun 			   struct sg_table *sg_tbl, struct dma_buf_attachment *dma_attach,
125*4882a593Smuzhiyun 			   struct dma_buf *dmabuf)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	if (!dev)
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!sg_tbl || !dma_attach || !dmabuf)
131*4882a593Smuzhiyun 		return -EINVAL;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	dma_buf_unmap_attachment(dma_attach, sg_tbl, dir);
134*4882a593Smuzhiyun 	dma_buf_detach(dmabuf, dma_attach);
135*4882a593Smuzhiyun 	dma_buf_put(dmabuf);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
rk_decom_for_user(struct device * dev,struct rk_decom_param * param)140*4882a593Smuzhiyun static int rk_decom_for_user(struct device *dev, struct rk_decom_param *param)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int ret;
143*4882a593Smuzhiyun 	struct sg_table *sg_tbl_in = NULL, *sg_tbl_out = NULL;
144*4882a593Smuzhiyun 	struct dma_buf *dma_buf_in = NULL, *dma_buf_out = NULL;
145*4882a593Smuzhiyun 	struct dma_buf_attachment *dma_attach_in = NULL, *dma_attach_out = NULL;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (param->mode != RK_GZIP_MOD && param->mode != RK_ZLIB_MOD) {
148*4882a593Smuzhiyun 		dev_err(dev, "unsupported mode %u for decompress.\n", param->mode);
149*4882a593Smuzhiyun 		return -EINVAL;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	ret = get_dmafd_sgtbl(dev, param->src_fd, DMA_TO_DEVICE,
153*4882a593Smuzhiyun 			      &sg_tbl_in, &dma_attach_in, &dma_buf_in);
154*4882a593Smuzhiyun 	if (unlikely(ret)) {
155*4882a593Smuzhiyun 		dev_err(dev, "src_fd[%d] get_dmafd_sgtbl error.", (int)param->src_fd);
156*4882a593Smuzhiyun 		goto exit;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	ret = get_dmafd_sgtbl(dev, param->dst_fd, DMA_FROM_DEVICE,
160*4882a593Smuzhiyun 			      &sg_tbl_out, &dma_attach_out, &dma_buf_out);
161*4882a593Smuzhiyun 	if (unlikely(ret)) {
162*4882a593Smuzhiyun 		dev_err(dev, "dst_fd[%d] get_dmafd_sgtbl error.", (int)param->dst_fd);
163*4882a593Smuzhiyun 		goto exit;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (!check_scatter_list(0, sg_tbl_in)) {
167*4882a593Smuzhiyun 		dev_err(dev, "Input dma_fd not a continuous buffer.\n");
168*4882a593Smuzhiyun 		ret = -EINVAL;
169*4882a593Smuzhiyun 		goto exit;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (!check_scatter_list(param->dst_max_size, sg_tbl_out)) {
173*4882a593Smuzhiyun 		dev_err(dev, "Output dma_fd not a continuous buffer or dst_max_size too big.\n");
174*4882a593Smuzhiyun 		ret = -EINVAL;
175*4882a593Smuzhiyun 		goto exit;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	ret = rk_decom_start(param->mode | DECOM_NOBLOCKING, sg_dma_address(sg_tbl_in->sgl),
179*4882a593Smuzhiyun 			     sg_dma_address(sg_tbl_out->sgl), param->dst_max_size);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	if (ret) {
182*4882a593Smuzhiyun 		dev_err(dev, "rk_decom_start failed[%d].", ret);
183*4882a593Smuzhiyun 		goto exit;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	ret = rk_decom_wait_done(RK_DECOME_TIMEOUT, &param->decom_data_len);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun exit:
189*4882a593Smuzhiyun 	if (sg_tbl_in && dma_buf_in && dma_attach_in)
190*4882a593Smuzhiyun 		put_dmafd_sgtbl(dev, param->src_fd, DMA_TO_DEVICE,
191*4882a593Smuzhiyun 				sg_tbl_in, dma_attach_in, dma_buf_in);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (sg_tbl_out && dma_buf_out && dma_attach_out)
194*4882a593Smuzhiyun 		put_dmafd_sgtbl(dev, param->dst_fd, DMA_FROM_DEVICE,
195*4882a593Smuzhiyun 				sg_tbl_out, dma_attach_out, dma_buf_out);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return ret;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
rk_decom_misc_ioctl(struct file * fptr,unsigned int cmd,unsigned long arg)200*4882a593Smuzhiyun static long rk_decom_misc_ioctl(struct file *fptr, unsigned int cmd, unsigned long arg)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct rk_decom_param param;
203*4882a593Smuzhiyun 	struct rk_decom_dev *rk_decom = NULL;
204*4882a593Smuzhiyun 	int ret = -EINVAL;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	rk_decom = container_of(fptr->private_data, struct rk_decom_dev, miscdev);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	mutex_lock(&rk_decom->mutex);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	switch (cmd) {
211*4882a593Smuzhiyun 	case RK_DECOM_USER: {
212*4882a593Smuzhiyun 		ret = copy_from_user((char *)&param, (char *)arg, sizeof(param));
213*4882a593Smuzhiyun 		if (unlikely(ret)) {
214*4882a593Smuzhiyun 			ret = -EFAULT;
215*4882a593Smuzhiyun 			dev_err(rk_decom->dev, "copy from user fail.\n");
216*4882a593Smuzhiyun 			goto exit;
217*4882a593Smuzhiyun 		}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		ret = rk_decom_for_user(rk_decom->dev, &param);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		if (copy_to_user((char *)arg, &param, sizeof(param))) {
222*4882a593Smuzhiyun 			dev_err(rk_decom->dev, " copy to user fail.\n");
223*4882a593Smuzhiyun 			ret = -EFAULT;
224*4882a593Smuzhiyun 			goto exit;
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		break;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	default:
231*4882a593Smuzhiyun 		ret = -EINVAL;
232*4882a593Smuzhiyun 		break;
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun exit:
236*4882a593Smuzhiyun 	mutex_unlock(&rk_decom->mutex);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return ret;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
rk_decom_misc_init(void)241*4882a593Smuzhiyun static int __init rk_decom_misc_init(void)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	int ret;
244*4882a593Smuzhiyun 	struct rk_decom_dev *rk_decom = &g_rk_decom;
245*4882a593Smuzhiyun 	struct miscdevice *misc = &g_rk_decom.miscdev;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	ret = misc_register(misc);
248*4882a593Smuzhiyun 	if (ret < 0) {
249*4882a593Smuzhiyun 		pr_err("rk_decom: misc device %s register failed[%d].\n", RK_DECOM_NAME, ret);
250*4882a593Smuzhiyun 		goto error;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	rk_decom->dev = misc->this_device;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Save driver private data */
256*4882a593Smuzhiyun 	dev_set_drvdata(rk_decom->dev, rk_decom);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	ret = dma_coerce_mask_and_coherent(misc->this_device, DMA_BIT_MASK(32));
259*4882a593Smuzhiyun 	if (ret) {
260*4882a593Smuzhiyun 		dev_err(rk_decom->dev, "No suitable DMA available.\n");
261*4882a593Smuzhiyun 		goto error;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	mutex_init(&rk_decom->mutex);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	dev_info(rk_decom->dev, "misc device %s register success.\n", RK_DECOM_NAME);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun error:
270*4882a593Smuzhiyun 	if (rk_decom->dev)
271*4882a593Smuzhiyun 		misc_deregister(&rk_decom->miscdev);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return ret;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
rk_decom_misc_exit(void)276*4882a593Smuzhiyun static void __exit rk_decom_misc_exit(void)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	misc_deregister(&g_rk_decom.miscdev);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun module_init(rk_decom_misc_init)
282*4882a593Smuzhiyun module_exit(rk_decom_misc_exit)
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun MODULE_LICENSE("Dual MIT/GPL");
285*4882a593Smuzhiyun MODULE_VERSION("1.0.0");
286*4882a593Smuzhiyun MODULE_AUTHOR("Xiao Yapeng yp.xiao@rock-chips.com");
287*4882a593Smuzhiyun MODULE_DESCRIPTION("Rockchip decom driver");
288