1 /*
2 * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
3 * author: Jung Zhao jung.zhao@rock-chips.com
4 * Randy Li, randy.li@rock-chips.com
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 #include <drm/drm_device.h>
17 #include <linux/dma-iommu.h>
18 #include <linux/dma-buf.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/iommu.h>
21 #include <linux/kref.h>
22 #include <linux/slab.h>
23
24 #include "iep_iommu_ops.h"
25
26 struct iep_drm_buffer {
27 struct list_head list;
28 struct dma_buf *dma_buf;
29 union {
30 unsigned long iova;
31 unsigned long phys;
32 };
33 unsigned long size;
34 int index;
35 struct dma_buf_attachment *attach;
36 struct sg_table *sgt;
37 struct page **pages;
38 struct kref ref;
39 struct iep_iommu_session_info *session_info;
40 };
41
42 struct iep_iommu_drm_info {
43 struct iommu_domain *domain;
44 bool attached;
45 };
46
47 static struct iep_drm_buffer *
iep_drm_get_buffer_no_lock(struct iep_iommu_session_info * session_info,int idx)48 iep_drm_get_buffer_no_lock(struct iep_iommu_session_info *session_info,
49 int idx)
50 {
51 struct iep_drm_buffer *drm_buffer = NULL, *n;
52
53 list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
54 list) {
55 if (drm_buffer->index == idx)
56 return drm_buffer;
57 }
58
59 return NULL;
60 }
61
62 static struct iep_drm_buffer *
iep_drm_get_buffer_fd_no_lock(struct iep_iommu_session_info * session_info,int fd)63 iep_drm_get_buffer_fd_no_lock(struct iep_iommu_session_info *session_info,
64 int fd)
65 {
66 struct iep_drm_buffer *drm_buffer = NULL, *n;
67 struct dma_buf *dma_buf = NULL;
68
69 dma_buf = dma_buf_get(fd);
70
71 list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
72 list) {
73 if (drm_buffer->dma_buf == dma_buf) {
74 dma_buf_put(dma_buf);
75 return drm_buffer;
76 }
77 }
78
79 dma_buf_put(dma_buf);
80
81 return NULL;
82 }
83
iep_drm_detach(struct iep_iommu_info * iommu_info)84 static void iep_drm_detach(struct iep_iommu_info *iommu_info)
85 {
86 struct iep_iommu_drm_info *drm_info = iommu_info->private;
87 struct device *dev = iommu_info->dev;
88 struct iommu_domain *domain = drm_info->domain;
89
90 mutex_lock(&iommu_info->iommu_mutex);
91
92 if (!drm_info->attached) {
93 mutex_unlock(&iommu_info->iommu_mutex);
94 return;
95 }
96
97 iommu_detach_device(domain, dev);
98 drm_info->attached = false;
99
100 mutex_unlock(&iommu_info->iommu_mutex);
101 }
102
iep_drm_attach_unlock(struct iep_iommu_info * iommu_info)103 static int iep_drm_attach_unlock(struct iep_iommu_info *iommu_info)
104 {
105 struct iep_iommu_drm_info *drm_info = iommu_info->private;
106 struct device *dev = iommu_info->dev;
107 struct iommu_domain *domain = drm_info->domain;
108 int ret = 0;
109
110 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
111 if (ret)
112 return ret;
113
114 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
115 ret = iommu_attach_device(domain, dev);
116 if (ret) {
117 dev_err(dev, "Failed to attach iommu device\n");
118 return ret;
119 }
120
121 return ret;
122 }
123
iep_drm_attach(struct iep_iommu_info * iommu_info)124 static int iep_drm_attach(struct iep_iommu_info *iommu_info)
125 {
126 struct iep_iommu_drm_info *drm_info = iommu_info->private;
127 int ret;
128
129 mutex_lock(&iommu_info->iommu_mutex);
130
131 if (drm_info->attached) {
132 mutex_unlock(&iommu_info->iommu_mutex);
133 return 0;
134 }
135
136 ret = iep_drm_attach_unlock(iommu_info);
137 if (ret) {
138 mutex_unlock(&iommu_info->iommu_mutex);
139 return ret;
140 }
141
142 drm_info->attached = true;
143
144 mutex_unlock(&iommu_info->iommu_mutex);
145
146 return ret;
147 }
148
iep_drm_clear_map(struct kref * ref)149 static void iep_drm_clear_map(struct kref *ref)
150 {
151 struct iep_drm_buffer *drm_buffer =
152 container_of(ref, struct iep_drm_buffer, ref);
153 struct iep_iommu_session_info *session_info =
154 drm_buffer->session_info;
155 struct iep_iommu_info *iommu_info = session_info->iommu_info;
156 struct iep_iommu_drm_info *drm_info = iommu_info->private;
157 struct device *dev = session_info->dev;
158 struct iommu_domain *domain = drm_info->domain;
159
160 mutex_lock(&iommu_info->iommu_mutex);
161 drm_info = session_info->iommu_info->private;
162 if (!drm_info->attached) {
163 if (iep_drm_attach_unlock(session_info->iommu_info))
164 dev_err(dev, "can't clea map, attach iommu failed.\n");
165 }
166
167 if (drm_buffer->attach) {
168 dma_buf_unmap_attachment(drm_buffer->attach, drm_buffer->sgt,
169 DMA_BIDIRECTIONAL);
170 dma_buf_detach(drm_buffer->dma_buf, drm_buffer->attach);
171 dma_buf_put(drm_buffer->dma_buf);
172 drm_buffer->attach = NULL;
173 }
174
175 if (!drm_info->attached)
176 iommu_detach_device(domain, dev);
177
178 mutex_unlock(&iommu_info->iommu_mutex);
179 }
180
vcdoec_drm_dump_info(struct iep_iommu_session_info * session_info)181 static void vcdoec_drm_dump_info(struct iep_iommu_session_info *session_info)
182 {
183 struct iep_drm_buffer *drm_buffer = NULL, *n;
184
185 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
186 "still there are below buffers stored in list\n");
187 list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
188 list) {
189 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
190 "index %d drm_buffer dma_buf %p\n",
191 drm_buffer->index,
192 drm_buffer->dma_buf);
193 }
194 }
195
iep_drm_free(struct iep_iommu_session_info * session_info,int idx)196 static int iep_drm_free(struct iep_iommu_session_info *session_info,
197 int idx)
198 {
199 struct device *dev = session_info->dev;
200 /* please double-check all maps have been release */
201 struct iep_drm_buffer *drm_buffer;
202
203 mutex_lock(&session_info->list_mutex);
204 drm_buffer = iep_drm_get_buffer_no_lock(session_info, idx);
205
206 if (!drm_buffer) {
207 dev_err(dev, "can not find %d buffer in list\n", idx);
208 mutex_unlock(&session_info->list_mutex);
209
210 return -EINVAL;
211 }
212
213 if (kref_read(&drm_buffer->ref) == 0) {
214 dma_buf_put(drm_buffer->dma_buf);
215 list_del_init(&drm_buffer->list);
216 kfree(drm_buffer);
217 session_info->buffer_nums--;
218 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
219 "buffer nums %d\n", session_info->buffer_nums);
220 }
221 mutex_unlock(&session_info->list_mutex);
222
223 return 0;
224 }
225
226 static int
iep_drm_unmap_iommu(struct iep_iommu_session_info * session_info,int idx)227 iep_drm_unmap_iommu(struct iep_iommu_session_info *session_info,
228 int idx)
229 {
230 struct device *dev = session_info->dev;
231 struct iep_drm_buffer *drm_buffer;
232
233 mutex_lock(&session_info->list_mutex);
234 drm_buffer = iep_drm_get_buffer_no_lock(session_info, idx);
235 mutex_unlock(&session_info->list_mutex);
236
237 if (!drm_buffer) {
238 dev_err(dev, "can not find %d buffer in list\n", idx);
239 return -EINVAL;
240 }
241
242 kref_put(&drm_buffer->ref, iep_drm_clear_map);
243
244 return 0;
245 }
246
iep_drm_map_iommu(struct iep_iommu_session_info * session_info,int idx,unsigned long * iova,unsigned long * size)247 static int iep_drm_map_iommu(struct iep_iommu_session_info *session_info,
248 int idx,
249 unsigned long *iova,
250 unsigned long *size)
251 {
252 struct device *dev = session_info->dev;
253 struct iep_drm_buffer *drm_buffer;
254
255 mutex_lock(&session_info->list_mutex);
256 drm_buffer = iep_drm_get_buffer_no_lock(session_info, idx);
257 mutex_unlock(&session_info->list_mutex);
258
259 if (!drm_buffer) {
260 dev_err(dev, "can not find %d buffer in list\n", idx);
261 return -EINVAL;
262 }
263
264 kref_get(&drm_buffer->ref);
265 if (iova)
266 *iova = drm_buffer->iova;
267 if (size)
268 *size = drm_buffer->size;
269 return 0;
270 }
271
272 static int
iep_drm_free_fd(struct iep_iommu_session_info * session_info,int fd)273 iep_drm_free_fd(struct iep_iommu_session_info *session_info, int fd)
274 {
275 /* please double-check all maps have been release */
276 struct iep_drm_buffer *drm_buffer = NULL;
277
278 mutex_lock(&session_info->list_mutex);
279 drm_buffer = iep_drm_get_buffer_fd_no_lock(session_info, fd);
280
281 if (!drm_buffer) {
282 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
283 "can not find %d buffer in list\n", fd);
284 mutex_unlock(&session_info->list_mutex);
285
286 return -EINVAL;
287 }
288 mutex_unlock(&session_info->list_mutex);
289
290 iep_drm_unmap_iommu(session_info, drm_buffer->index);
291
292 mutex_lock(&session_info->list_mutex);
293 if (kref_read(&drm_buffer->ref) == 0) {
294 dma_buf_put(drm_buffer->dma_buf);
295 list_del_init(&drm_buffer->list);
296 kfree(drm_buffer);
297 session_info->buffer_nums--;
298 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
299 "buffer nums %d\n", session_info->buffer_nums);
300 }
301 mutex_unlock(&session_info->list_mutex);
302
303 return 0;
304 }
305
306 static void
iep_drm_clear_session(struct iep_iommu_session_info * session_info)307 iep_drm_clear_session(struct iep_iommu_session_info *session_info)
308 {
309 struct iep_drm_buffer *drm_buffer = NULL, *n;
310
311 list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
312 list) {
313 kref_put(&drm_buffer->ref, iep_drm_clear_map);
314 iep_drm_free(session_info, drm_buffer->index);
315 }
316 }
317
iep_drm_import(struct iep_iommu_session_info * session_info,int fd)318 static int iep_drm_import(struct iep_iommu_session_info *session_info,
319 int fd)
320 {
321 struct iep_drm_buffer *drm_buffer = NULL, *n;
322 struct iep_iommu_info *iommu_info = session_info->iommu_info;
323 struct iep_iommu_drm_info *drm_info = iommu_info->private;
324 struct iommu_domain *domain = drm_info->domain;
325 struct device *dev = session_info->dev;
326 struct dma_buf_attachment *attach;
327 struct sg_table *sgt;
328 struct dma_buf *dma_buf;
329 int ret = 0;
330
331 dma_buf = dma_buf_get(fd);
332 if (IS_ERR(dma_buf)) {
333 ret = PTR_ERR(dma_buf);
334 return ret;
335 }
336
337 list_for_each_entry_safe(drm_buffer, n,
338 &session_info->buffer_list, list) {
339 if (drm_buffer->dma_buf == dma_buf) {
340 dma_buf_put(dma_buf);
341 return drm_buffer->index;
342 }
343 }
344
345 drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
346 if (!drm_buffer) {
347 ret = -ENOMEM;
348 return ret;
349 }
350
351 drm_buffer->dma_buf = dma_buf;
352 drm_buffer->session_info = session_info;
353
354 kref_init(&drm_buffer->ref);
355
356 mutex_lock(&iommu_info->iommu_mutex);
357 drm_info = session_info->iommu_info->private;
358 if (!drm_info->attached) {
359 ret = iep_drm_attach_unlock(session_info->iommu_info);
360 if (ret)
361 goto fail_out;
362 }
363
364 attach = dma_buf_attach(drm_buffer->dma_buf, dev);
365 if (IS_ERR(attach)) {
366 ret = PTR_ERR(attach);
367 goto fail_out;
368 }
369
370 get_dma_buf(drm_buffer->dma_buf);
371
372 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
373 if (IS_ERR(sgt)) {
374 ret = PTR_ERR(sgt);
375 goto fail_detach;
376 }
377
378 drm_buffer->iova = sg_dma_address(sgt->sgl);
379 drm_buffer->size = drm_buffer->dma_buf->size;
380
381 drm_buffer->attach = attach;
382 drm_buffer->sgt = sgt;
383
384 if (!drm_info->attached)
385 iommu_detach_device(domain, dev);
386
387 mutex_unlock(&iommu_info->iommu_mutex);
388
389 INIT_LIST_HEAD(&drm_buffer->list);
390 mutex_lock(&session_info->list_mutex);
391 session_info->buffer_nums++;
392 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
393 "buffer nums %d\n", session_info->buffer_nums);
394 drm_buffer->index = session_info->max_idx;
395 list_add_tail(&drm_buffer->list, &session_info->buffer_list);
396 session_info->max_idx++;
397 if ((session_info->max_idx & 0xfffffff) == 0)
398 session_info->max_idx = 0;
399 mutex_unlock(&session_info->list_mutex);
400
401 return drm_buffer->index;
402
403 fail_detach:
404 dev_err(dev, "dmabuf map attach failed\n");
405 dma_buf_detach(drm_buffer->dma_buf, attach);
406 dma_buf_put(drm_buffer->dma_buf);
407 fail_out:
408 kfree(drm_buffer);
409 mutex_unlock(&iommu_info->iommu_mutex);
410
411 return ret;
412 }
413
iep_drm_create(struct iep_iommu_info * iommu_info)414 static int iep_drm_create(struct iep_iommu_info *iommu_info)
415 {
416 struct iep_iommu_drm_info *drm_info;
417
418 iommu_info->private = kzalloc(sizeof(*drm_info),
419 GFP_KERNEL);
420 drm_info = iommu_info->private;
421 if (!drm_info)
422 return -ENOMEM;
423
424 drm_info->domain = iommu_get_domain_for_dev(iommu_info->dev);
425 drm_info->attached = false;
426 if (!drm_info->domain) {
427 kfree(iommu_info->private);
428 return -ENOMEM;
429 }
430
431 return 0;
432 }
433
iep_drm_destroy(struct iep_iommu_info * iommu_info)434 static int iep_drm_destroy(struct iep_iommu_info *iommu_info)
435 {
436 struct iep_iommu_drm_info *drm_info = iommu_info->private;
437
438 iep_drm_detach(iommu_info);
439
440 kfree(drm_info);
441 iommu_info->private = NULL;
442
443 return 0;
444 }
445
446 static struct iep_iommu_ops drm_ops = {
447 .create = iep_drm_create,
448 .import = iep_drm_import,
449 .free = iep_drm_free,
450 .free_fd = iep_drm_free_fd,
451 .map_iommu = iep_drm_map_iommu,
452 .unmap_iommu = iep_drm_unmap_iommu,
453 .destroy = iep_drm_destroy,
454 .dump = vcdoec_drm_dump_info,
455 .attach = iep_drm_attach,
456 .detach = iep_drm_detach,
457 .clear = iep_drm_clear_session,
458 };
459
iep_iommu_drm_set_ops(struct iep_iommu_info * iommu_info)460 void iep_iommu_drm_set_ops(struct iep_iommu_info *iommu_info)
461 {
462 if (!iommu_info)
463 return;
464 iommu_info->ops = &drm_ops;
465 }
466