1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2022 Rockchip Electronics Co., Ltd
4 *
5 * Due to hardware limitations, this module only supports
6 * up to 32bit continuous CMA memory.
7 *
8 * author:
9 * Xiao Yapeng, yp.xiao@rock-chips.com
10 * mender:
11 * Lin Jinhan, troy.lin@rock-chips.com
12 */
13
14 #include <linux/dma-buf.h>
15 #include <linux/dma-direct.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/fs.h>
18 #include <linux/ioctl.h>
19 #include <linux/miscdevice.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/slab.h>
23 #include <linux/soc/rockchip/rockchip_decompress.h>
24 #include <uapi/linux/rk-decom.h>
25
26 #define RK_DECOME_TIMEOUT 3 /* 3 seconds */
27
28 struct rk_decom_dev {
29 struct miscdevice miscdev;
30 struct device *dev;
31 struct mutex mutex;
32 };
33
34 static long rk_decom_misc_ioctl(struct file *fptr, unsigned int cmd, unsigned long arg);
35
36 static const struct file_operations rk_decom_fops = {
37 .owner = THIS_MODULE,
38 .unlocked_ioctl = rk_decom_misc_ioctl,
39 };
40
41 static struct rk_decom_dev g_rk_decom = {
42 .miscdev = {
43 .minor = MISC_DYNAMIC_MINOR,
44 .name = RK_DECOM_NAME,
45 .fops = &rk_decom_fops,
46 },
47 };
48
check_scatter_list(unsigned int max_size,struct sg_table * sg_tbl)49 static bool check_scatter_list(unsigned int max_size, struct sg_table *sg_tbl)
50 {
51 int i;
52 unsigned int total_len = 0;
53 dma_addr_t next_addr = 0;
54 struct scatterlist *sgl = NULL;
55
56 if (!sg_tbl || !(sg_tbl->sgl))
57 return false;
58
59 for_each_sgtable_sg(sg_tbl, sgl, i) {
60 if (sg_phys(sgl) > SZ_4G || sg_phys(sgl) + sg_dma_len(sgl) > SZ_4G)
61 return false;
62
63 if (i && next_addr != sg_dma_address(sgl))
64 return false;
65
66 total_len += sg_dma_len(sgl);
67
68 next_addr = sg_dma_address(sgl) + sg_dma_len(sgl);
69 }
70
71 return max_size <= total_len;
72 }
73
get_dmafd_sgtbl(struct device * dev,int dma_fd,enum dma_data_direction dir,struct sg_table ** sg_tbl,struct dma_buf_attachment ** dma_attach,struct dma_buf ** dmabuf)74 static int get_dmafd_sgtbl(struct device *dev, int dma_fd, enum dma_data_direction dir,
75 struct sg_table **sg_tbl, struct dma_buf_attachment **dma_attach,
76 struct dma_buf **dmabuf)
77 {
78 int ret = -EINVAL;
79
80 if (!dev)
81 return -EINVAL;
82
83 *sg_tbl = NULL;
84 *dmabuf = NULL;
85 *dma_attach = NULL;
86
87 *dmabuf = dma_buf_get(dma_fd);
88 if (IS_ERR(*dmabuf)) {
89 ret = PTR_ERR(*dmabuf);
90 goto error;
91 }
92
93 *dma_attach = dma_buf_attach(*dmabuf, dev);
94 if (IS_ERR(*dma_attach)) {
95 ret = PTR_ERR(*dma_attach);
96 goto error;
97 }
98
99 *sg_tbl = dma_buf_map_attachment(*dma_attach, dir);
100 if (IS_ERR(*sg_tbl)) {
101 ret = PTR_ERR(*sg_tbl);
102 goto error;
103 }
104
105 return 0;
106 error:
107 if (*sg_tbl)
108 dma_buf_unmap_attachment(*dma_attach, *sg_tbl, dir);
109
110 if (*dma_attach)
111 dma_buf_detach(*dmabuf, *dma_attach);
112
113 if (*dmabuf)
114 dma_buf_put(*dmabuf);
115
116 *sg_tbl = NULL;
117 *dmabuf = NULL;
118 *dma_attach = NULL;
119
120 return ret;
121 }
122
put_dmafd_sgtbl(struct device * dev,int dma_fd,enum dma_data_direction dir,struct sg_table * sg_tbl,struct dma_buf_attachment * dma_attach,struct dma_buf * dmabuf)123 static int put_dmafd_sgtbl(struct device *dev, int dma_fd, enum dma_data_direction dir,
124 struct sg_table *sg_tbl, struct dma_buf_attachment *dma_attach,
125 struct dma_buf *dmabuf)
126 {
127 if (!dev)
128 return -EINVAL;
129
130 if (!sg_tbl || !dma_attach || !dmabuf)
131 return -EINVAL;
132
133 dma_buf_unmap_attachment(dma_attach, sg_tbl, dir);
134 dma_buf_detach(dmabuf, dma_attach);
135 dma_buf_put(dmabuf);
136
137 return 0;
138 }
139
rk_decom_for_user(struct device * dev,struct rk_decom_param * param)140 static int rk_decom_for_user(struct device *dev, struct rk_decom_param *param)
141 {
142 int ret;
143 struct sg_table *sg_tbl_in = NULL, *sg_tbl_out = NULL;
144 struct dma_buf *dma_buf_in = NULL, *dma_buf_out = NULL;
145 struct dma_buf_attachment *dma_attach_in = NULL, *dma_attach_out = NULL;
146
147 if (param->mode != RK_GZIP_MOD && param->mode != RK_ZLIB_MOD) {
148 dev_err(dev, "unsupported mode %u for decompress.\n", param->mode);
149 return -EINVAL;
150 }
151
152 ret = get_dmafd_sgtbl(dev, param->src_fd, DMA_TO_DEVICE,
153 &sg_tbl_in, &dma_attach_in, &dma_buf_in);
154 if (unlikely(ret)) {
155 dev_err(dev, "src_fd[%d] get_dmafd_sgtbl error.", (int)param->src_fd);
156 goto exit;
157 }
158
159 ret = get_dmafd_sgtbl(dev, param->dst_fd, DMA_FROM_DEVICE,
160 &sg_tbl_out, &dma_attach_out, &dma_buf_out);
161 if (unlikely(ret)) {
162 dev_err(dev, "dst_fd[%d] get_dmafd_sgtbl error.", (int)param->dst_fd);
163 goto exit;
164 }
165
166 if (!check_scatter_list(0, sg_tbl_in)) {
167 dev_err(dev, "Input dma_fd not a continuous buffer.\n");
168 ret = -EINVAL;
169 goto exit;
170 }
171
172 if (!check_scatter_list(param->dst_max_size, sg_tbl_out)) {
173 dev_err(dev, "Output dma_fd not a continuous buffer or dst_max_size too big.\n");
174 ret = -EINVAL;
175 goto exit;
176 }
177
178 ret = rk_decom_start(param->mode | DECOM_NOBLOCKING, sg_dma_address(sg_tbl_in->sgl),
179 sg_dma_address(sg_tbl_out->sgl), param->dst_max_size);
180
181 if (ret) {
182 dev_err(dev, "rk_decom_start failed[%d].", ret);
183 goto exit;
184 }
185
186 ret = rk_decom_wait_done(RK_DECOME_TIMEOUT, ¶m->decom_data_len);
187
188 exit:
189 if (sg_tbl_in && dma_buf_in && dma_attach_in)
190 put_dmafd_sgtbl(dev, param->src_fd, DMA_TO_DEVICE,
191 sg_tbl_in, dma_attach_in, dma_buf_in);
192
193 if (sg_tbl_out && dma_buf_out && dma_attach_out)
194 put_dmafd_sgtbl(dev, param->dst_fd, DMA_FROM_DEVICE,
195 sg_tbl_out, dma_attach_out, dma_buf_out);
196
197 return ret;
198 }
199
rk_decom_misc_ioctl(struct file * fptr,unsigned int cmd,unsigned long arg)200 static long rk_decom_misc_ioctl(struct file *fptr, unsigned int cmd, unsigned long arg)
201 {
202 struct rk_decom_param param;
203 struct rk_decom_dev *rk_decom = NULL;
204 int ret = -EINVAL;
205
206 rk_decom = container_of(fptr->private_data, struct rk_decom_dev, miscdev);
207
208 mutex_lock(&rk_decom->mutex);
209
210 switch (cmd) {
211 case RK_DECOM_USER: {
212 ret = copy_from_user((char *)¶m, (char *)arg, sizeof(param));
213 if (unlikely(ret)) {
214 ret = -EFAULT;
215 dev_err(rk_decom->dev, "copy from user fail.\n");
216 goto exit;
217 }
218
219 ret = rk_decom_for_user(rk_decom->dev, ¶m);
220
221 if (copy_to_user((char *)arg, ¶m, sizeof(param))) {
222 dev_err(rk_decom->dev, " copy to user fail.\n");
223 ret = -EFAULT;
224 goto exit;
225 }
226
227 break;
228 }
229
230 default:
231 ret = -EINVAL;
232 break;
233 }
234
235 exit:
236 mutex_unlock(&rk_decom->mutex);
237
238 return ret;
239 }
240
rk_decom_misc_init(void)241 static int __init rk_decom_misc_init(void)
242 {
243 int ret;
244 struct rk_decom_dev *rk_decom = &g_rk_decom;
245 struct miscdevice *misc = &g_rk_decom.miscdev;
246
247 ret = misc_register(misc);
248 if (ret < 0) {
249 pr_err("rk_decom: misc device %s register failed[%d].\n", RK_DECOM_NAME, ret);
250 goto error;
251 }
252
253 rk_decom->dev = misc->this_device;
254
255 /* Save driver private data */
256 dev_set_drvdata(rk_decom->dev, rk_decom);
257
258 ret = dma_coerce_mask_and_coherent(misc->this_device, DMA_BIT_MASK(32));
259 if (ret) {
260 dev_err(rk_decom->dev, "No suitable DMA available.\n");
261 goto error;
262 }
263
264 mutex_init(&rk_decom->mutex);
265
266 dev_info(rk_decom->dev, "misc device %s register success.\n", RK_DECOM_NAME);
267
268 return 0;
269 error:
270 if (rk_decom->dev)
271 misc_deregister(&rk_decom->miscdev);
272
273 return ret;
274 }
275
rk_decom_misc_exit(void)276 static void __exit rk_decom_misc_exit(void)
277 {
278 misc_deregister(&g_rk_decom.miscdev);
279 }
280
281 module_init(rk_decom_misc_init)
282 module_exit(rk_decom_misc_exit)
283
284 MODULE_LICENSE("Dual MIT/GPL");
285 MODULE_VERSION("1.0.0");
286 MODULE_AUTHOR("Xiao Yapeng yp.xiao@rock-chips.com");
287 MODULE_DESCRIPTION("Rockchip decom driver");
288