xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_iommu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <linux/delay.h>
12 #include <linux/dma-buf-cache.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/iommu.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/kref.h>
19 #include <linux/slab.h>
20 #include <linux/pm_runtime.h>
21 
22 #ifdef CONFIG_ARM_DMA_USE_IOMMU
23 #include <asm/dma-iommu.h>
24 #endif
25 #include <soc/rockchip/rockchip_iommu.h>
26 
27 #include "mpp_debug.h"
28 #include "mpp_iommu.h"
29 #include "mpp_common.h"
30 
31 struct mpp_dma_buffer *
mpp_dma_find_buffer_fd(struct mpp_dma_session * dma,int fd)32 mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd)
33 {
34 	struct dma_buf *dmabuf;
35 	struct mpp_dma_buffer *out = NULL;
36 	struct mpp_dma_buffer *buffer = NULL, *n;
37 
38 	dmabuf = dma_buf_get(fd);
39 	if (IS_ERR(dmabuf))
40 		return NULL;
41 
42 	mutex_lock(&dma->list_mutex);
43 	list_for_each_entry_safe(buffer, n,
44 				 &dma->used_list, link) {
45 		/*
46 		 * fd may dup several and point the same dambuf.
47 		 * thus, here should be distinguish with the dmabuf.
48 		 */
49 		if (buffer->dmabuf == dmabuf) {
50 			out = buffer;
51 			break;
52 		}
53 	}
54 	mutex_unlock(&dma->list_mutex);
55 	dma_buf_put(dmabuf);
56 
57 	return out;
58 }
59 
60 /* Release the buffer from the current list */
mpp_dma_release_buffer(struct kref * ref)61 static void mpp_dma_release_buffer(struct kref *ref)
62 {
63 	struct mpp_dma_buffer *buffer =
64 		container_of(ref, struct mpp_dma_buffer, ref);
65 
66 	buffer->dma->buffer_count--;
67 	list_move_tail(&buffer->link, &buffer->dma->unused_list);
68 
69 	dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir);
70 	dma_buf_detach(buffer->dmabuf, buffer->attach);
71 	dma_buf_put(buffer->dmabuf);
72 	buffer->dma = NULL;
73 	buffer->dmabuf = NULL;
74 	buffer->attach = NULL;
75 	buffer->sgt = NULL;
76 	buffer->copy_sgt = NULL;
77 	buffer->iova = 0;
78 	buffer->size = 0;
79 	buffer->vaddr = NULL;
80 	buffer->last_used = 0;
81 }
82 
83 /* Remove the oldest buffer when count more than the setting */
84 static int
mpp_dma_remove_extra_buffer(struct mpp_dma_session * dma)85 mpp_dma_remove_extra_buffer(struct mpp_dma_session *dma)
86 {
87 	struct mpp_dma_buffer *n;
88 	struct mpp_dma_buffer *oldest = NULL, *buffer = NULL;
89 	ktime_t oldest_time = ktime_set(0, 0);
90 
91 	if (dma->buffer_count > dma->max_buffers) {
92 		mutex_lock(&dma->list_mutex);
93 		list_for_each_entry_safe(buffer, n,
94 					 &dma->used_list,
95 					 link) {
96 			if (ktime_to_ns(oldest_time) == 0 ||
97 			    ktime_after(oldest_time, buffer->last_used)) {
98 				oldest_time = buffer->last_used;
99 				oldest = buffer;
100 			}
101 		}
102 		if (oldest && kref_read(&oldest->ref) == 1)
103 			kref_put(&oldest->ref, mpp_dma_release_buffer);
104 		mutex_unlock(&dma->list_mutex);
105 	}
106 
107 	return 0;
108 }
109 
mpp_dma_release(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)110 int mpp_dma_release(struct mpp_dma_session *dma,
111 		    struct mpp_dma_buffer *buffer)
112 {
113 	mutex_lock(&dma->list_mutex);
114 	kref_put(&buffer->ref, mpp_dma_release_buffer);
115 	mutex_unlock(&dma->list_mutex);
116 
117 	return 0;
118 }
119 
mpp_dma_release_fd(struct mpp_dma_session * dma,int fd)120 int mpp_dma_release_fd(struct mpp_dma_session *dma, int fd)
121 {
122 	struct device *dev = dma->dev;
123 	struct mpp_dma_buffer *buffer = NULL;
124 
125 	buffer = mpp_dma_find_buffer_fd(dma, fd);
126 	if (IS_ERR_OR_NULL(buffer)) {
127 		dev_err(dev, "can not find %d buffer in list\n", fd);
128 
129 		return -EINVAL;
130 	}
131 
132 	mutex_lock(&dma->list_mutex);
133 	kref_put(&buffer->ref, mpp_dma_release_buffer);
134 	mutex_unlock(&dma->list_mutex);
135 
136 	return 0;
137 }
138 
139 struct mpp_dma_buffer *
mpp_dma_alloc(struct device * dev,size_t size)140 mpp_dma_alloc(struct device *dev, size_t size)
141 {
142 	size_t align_size;
143 	dma_addr_t iova;
144 	struct  mpp_dma_buffer *buffer;
145 
146 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
147 	if (!buffer)
148 		return NULL;
149 
150 	align_size = PAGE_ALIGN(size);
151 	buffer->vaddr = dma_alloc_coherent(dev, align_size, &iova, GFP_KERNEL);
152 	if (!buffer->vaddr)
153 		goto fail_dma_alloc;
154 
155 	buffer->size = align_size;
156 	buffer->iova = iova;
157 	buffer->dev = dev;
158 
159 	return buffer;
160 fail_dma_alloc:
161 	kfree(buffer);
162 	return NULL;
163 }
164 
mpp_dma_free(struct mpp_dma_buffer * buffer)165 int mpp_dma_free(struct mpp_dma_buffer *buffer)
166 {
167 	dma_free_coherent(buffer->dev, buffer->size,
168 			buffer->vaddr, buffer->iova);
169 	buffer->vaddr = NULL;
170 	buffer->iova = 0;
171 	buffer->size = 0;
172 	buffer->dev = NULL;
173 	kfree(buffer);
174 
175 	return 0;
176 }
177 
mpp_dma_import_fd(struct mpp_iommu_info * iommu_info,struct mpp_dma_session * dma,int fd)178 struct mpp_dma_buffer *mpp_dma_import_fd(struct mpp_iommu_info *iommu_info,
179 					 struct mpp_dma_session *dma,
180 					 int fd)
181 {
182 	int ret = 0;
183 	struct sg_table *sgt;
184 	struct dma_buf *dmabuf;
185 	struct mpp_dma_buffer *buffer;
186 	struct dma_buf_attachment *attach;
187 
188 	if (!dma) {
189 		mpp_err("dma session is null\n");
190 		return ERR_PTR(-EINVAL);
191 	}
192 
193 	/* remove the oldest before add buffer */
194 	if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
195 		mpp_dma_remove_extra_buffer(dma);
196 
197 	/* Check whether in dma session */
198 	buffer = mpp_dma_find_buffer_fd(dma, fd);
199 	if (!IS_ERR_OR_NULL(buffer)) {
200 		if (kref_get_unless_zero(&buffer->ref)) {
201 			buffer->last_used = ktime_get();
202 			return buffer;
203 		}
204 		dev_dbg(dma->dev, "missing the fd %d\n", fd);
205 	}
206 
207 	dmabuf = dma_buf_get(fd);
208 	if (IS_ERR(dmabuf)) {
209 		ret = PTR_ERR(dmabuf);
210 		mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret);
211 		return ERR_PTR(ret);
212 	}
213 	/* A new DMA buffer */
214 	mutex_lock(&dma->list_mutex);
215 	buffer = list_first_entry_or_null(&dma->unused_list,
216 					   struct mpp_dma_buffer,
217 					   link);
218 	if (!buffer) {
219 		ret = -ENOMEM;
220 		mutex_unlock(&dma->list_mutex);
221 		goto fail;
222 	}
223 	list_del_init(&buffer->link);
224 	mutex_unlock(&dma->list_mutex);
225 
226 	buffer->dmabuf = dmabuf;
227 	buffer->dir = DMA_BIDIRECTIONAL;
228 	buffer->last_used = ktime_get();
229 
230 	attach = dma_buf_attach(buffer->dmabuf, dma->dev);
231 	if (IS_ERR(attach)) {
232 		ret = PTR_ERR(attach);
233 		mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret);
234 		goto fail_attach;
235 	}
236 
237 	sgt = dma_buf_map_attachment(attach, buffer->dir);
238 	if (IS_ERR(sgt)) {
239 		ret = PTR_ERR(sgt);
240 		mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret);
241 		goto fail_map;
242 	}
243 	buffer->iova = sg_dma_address(sgt->sgl);
244 	buffer->size = sg_dma_len(sgt->sgl);
245 	buffer->attach = attach;
246 	buffer->sgt = sgt;
247 	buffer->dma = dma;
248 
249 	kref_init(&buffer->ref);
250 
251 	if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
252 		/* Increase the reference for used outside the buffer pool */
253 		kref_get(&buffer->ref);
254 
255 	mutex_lock(&dma->list_mutex);
256 	dma->buffer_count++;
257 	list_add_tail(&buffer->link, &dma->used_list);
258 	mutex_unlock(&dma->list_mutex);
259 
260 	return buffer;
261 
262 fail_map:
263 	dma_buf_detach(buffer->dmabuf, attach);
264 fail_attach:
265 	mutex_lock(&dma->list_mutex);
266 	list_add_tail(&buffer->link, &dma->unused_list);
267 	mutex_unlock(&dma->list_mutex);
268 fail:
269 	dma_buf_put(dmabuf);
270 	return ERR_PTR(ret);
271 }
272 
mpp_dma_unmap_kernel(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)273 int mpp_dma_unmap_kernel(struct mpp_dma_session *dma,
274 			 struct mpp_dma_buffer *buffer)
275 {
276 	void *vaddr = buffer->vaddr;
277 	struct dma_buf *dmabuf = buffer->dmabuf;
278 
279 	if (IS_ERR_OR_NULL(vaddr) ||
280 	    IS_ERR_OR_NULL(dmabuf))
281 		return -EINVAL;
282 
283 	dma_buf_vunmap(dmabuf, vaddr);
284 	buffer->vaddr = NULL;
285 
286 	dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
287 
288 	return 0;
289 }
290 
mpp_dma_map_kernel(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)291 int mpp_dma_map_kernel(struct mpp_dma_session *dma,
292 		       struct mpp_dma_buffer *buffer)
293 {
294 	int ret;
295 	void *vaddr;
296 	struct dma_buf *dmabuf = buffer->dmabuf;
297 
298 	if (IS_ERR_OR_NULL(dmabuf))
299 		return -EINVAL;
300 
301 	ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
302 	if (ret) {
303 		dev_dbg(dma->dev, "can't access the dma buffer\n");
304 		goto failed_access;
305 	}
306 
307 	vaddr = dma_buf_vmap(dmabuf);
308 	if (!vaddr) {
309 		dev_dbg(dma->dev, "can't vmap the dma buffer\n");
310 		ret = -EIO;
311 		goto failed_vmap;
312 	}
313 
314 	buffer->vaddr = vaddr;
315 
316 	return 0;
317 
318 failed_vmap:
319 	dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
320 failed_access:
321 
322 	return ret;
323 }
324 
mpp_dma_session_destroy(struct mpp_dma_session * dma)325 int mpp_dma_session_destroy(struct mpp_dma_session *dma)
326 {
327 	struct mpp_dma_buffer *n, *buffer = NULL;
328 
329 	if (!dma)
330 		return -EINVAL;
331 
332 	mutex_lock(&dma->list_mutex);
333 	list_for_each_entry_safe(buffer, n,
334 				 &dma->used_list,
335 				 link) {
336 		kref_put(&buffer->ref, mpp_dma_release_buffer);
337 	}
338 	mutex_unlock(&dma->list_mutex);
339 
340 	kfree(dma);
341 
342 	return 0;
343 }
344 
345 struct mpp_dma_session *
mpp_dma_session_create(struct device * dev,u32 max_buffers)346 mpp_dma_session_create(struct device *dev, u32 max_buffers)
347 {
348 	int i;
349 	struct mpp_dma_session *dma = NULL;
350 	struct mpp_dma_buffer *buffer = NULL;
351 
352 	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
353 	if (!dma)
354 		return NULL;
355 
356 	mutex_init(&dma->list_mutex);
357 	INIT_LIST_HEAD(&dma->unused_list);
358 	INIT_LIST_HEAD(&dma->used_list);
359 
360 	if (max_buffers > MPP_SESSION_MAX_BUFFERS) {
361 		mpp_debug(DEBUG_IOCTL, "session_max_buffer %d must less than %d\n",
362 			  max_buffers, MPP_SESSION_MAX_BUFFERS);
363 		dma->max_buffers = MPP_SESSION_MAX_BUFFERS;
364 	} else {
365 		dma->max_buffers = max_buffers;
366 	}
367 
368 	for (i = 0; i < ARRAY_SIZE(dma->dma_bufs); i++) {
369 		buffer = &dma->dma_bufs[i];
370 		buffer->dma = dma;
371 		INIT_LIST_HEAD(&buffer->link);
372 		list_add_tail(&buffer->link, &dma->unused_list);
373 	}
374 	dma->dev = dev;
375 
376 	return dma;
377 }
378 
379 /*
380  * begin cpu access => for_cpu = true
381  * end cpu access => for_cpu = false
382  */
mpp_dma_buf_sync(struct mpp_dma_buffer * buffer,u32 offset,u32 length,enum dma_data_direction dir,bool for_cpu)383 void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length,
384 		      enum dma_data_direction dir, bool for_cpu)
385 {
386 	struct device *dev = buffer->dma->dev;
387 	struct sg_table *sgt = buffer->sgt;
388 	struct scatterlist *sg = sgt->sgl;
389 	dma_addr_t sg_dma_addr = sg_dma_address(sg);
390 	unsigned int len = 0;
391 	int i;
392 
393 	for_each_sgtable_sg(sgt, sg, i) {
394 		unsigned int sg_offset, sg_left, size = 0;
395 
396 		len += sg->length;
397 		if (len <= offset) {
398 			sg_dma_addr += sg->length;
399 			continue;
400 		}
401 
402 		sg_left = len - offset;
403 		sg_offset = sg->length - sg_left;
404 
405 		size = (length < sg_left) ? length : sg_left;
406 
407 		if (for_cpu)
408 			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
409 						      sg_offset, size, dir);
410 		else
411 			dma_sync_single_range_for_device(dev, sg_dma_addr,
412 							 sg_offset, size, dir);
413 
414 		offset += size;
415 		length -= size;
416 		sg_dma_addr += sg->length;
417 
418 		if (length == 0)
419 			break;
420 	}
421 }
422 
mpp_iommu_detach(struct mpp_iommu_info * info)423 int mpp_iommu_detach(struct mpp_iommu_info *info)
424 {
425 	if (!info)
426 		return 0;
427 
428 	iommu_detach_group(info->domain, info->group);
429 	return 0;
430 }
431 
mpp_iommu_attach(struct mpp_iommu_info * info)432 int mpp_iommu_attach(struct mpp_iommu_info *info)
433 {
434 	if (!info)
435 		return 0;
436 
437 	if (info->domain == iommu_get_domain_for_dev(info->dev))
438 		return 0;
439 
440 	return iommu_attach_group(info->domain, info->group);
441 }
442 
mpp_iommu_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)443 static int mpp_iommu_handle(struct iommu_domain *iommu,
444 			    struct device *iommu_dev,
445 			    unsigned long iova,
446 			    int status, void *arg)
447 {
448 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
449 
450 	dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
451 		iova, status, arg);
452 
453 	if (!mpp) {
454 		dev_err(iommu_dev, "pagefault without device to handle\n");
455 		return 0;
456 	}
457 
458 	if (mpp->dev_ops && mpp->dev_ops->dump_dev)
459 		mpp->dev_ops->dump_dev(mpp);
460 	else
461 		mpp_task_dump_hw_reg(mpp);
462 
463 	return 0;
464 }
465 
466 struct mpp_iommu_info *
mpp_iommu_probe(struct device * dev)467 mpp_iommu_probe(struct device *dev)
468 {
469 	int ret = 0;
470 	struct device_node *np = NULL;
471 	struct platform_device *pdev = NULL;
472 	struct mpp_iommu_info *info = NULL;
473 	struct iommu_domain *domain = NULL;
474 	struct iommu_group *group = NULL;
475 #ifdef CONFIG_ARM_DMA_USE_IOMMU
476 	struct dma_iommu_mapping *mapping;
477 #endif
478 	np = of_parse_phandle(dev->of_node, "iommus", 0);
479 	if (!np || !of_device_is_available(np)) {
480 		mpp_err("failed to get device node\n");
481 		return ERR_PTR(-ENODEV);
482 	}
483 
484 	pdev = of_find_device_by_node(np);
485 	of_node_put(np);
486 	if (!pdev) {
487 		mpp_err("failed to get platform device\n");
488 		return ERR_PTR(-ENODEV);
489 	}
490 
491 	group = iommu_group_get(dev);
492 	if (!group) {
493 		ret = -EINVAL;
494 		goto err_put_pdev;
495 	}
496 
497 	/*
498 	 * On arm32-arch, group->default_domain should be NULL,
499 	 * domain store in mapping created by arm32-arch.
500 	 * we re-attach domain here
501 	 */
502 #ifdef CONFIG_ARM_DMA_USE_IOMMU
503 	if (!iommu_group_default_domain(group)) {
504 		mapping = to_dma_iommu_mapping(dev);
505 		WARN_ON(!mapping);
506 		domain = mapping->domain;
507 	}
508 #endif
509 	if (!domain) {
510 		domain = iommu_get_domain_for_dev(dev);
511 		if (!domain) {
512 			ret = -EINVAL;
513 			goto err_put_group;
514 		}
515 	}
516 
517 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
518 	if (!info) {
519 		ret = -ENOMEM;
520 		goto err_put_group;
521 	}
522 
523 	init_rwsem(&info->rw_sem);
524 	spin_lock_init(&info->dev_lock);
525 	info->dev = dev;
526 	info->pdev = pdev;
527 	info->group = group;
528 	info->domain = domain;
529 	info->dev_active = NULL;
530 	info->irq = platform_get_irq(pdev, 0);
531 	info->got_irq = (info->irq < 0) ? false : true;
532 
533 	return info;
534 
535 err_put_group:
536 	if (group)
537 		iommu_group_put(group);
538 err_put_pdev:
539 	if (pdev)
540 		platform_device_put(pdev);
541 
542 	return ERR_PTR(ret);
543 }
544 
mpp_iommu_remove(struct mpp_iommu_info * info)545 int mpp_iommu_remove(struct mpp_iommu_info *info)
546 {
547 	if (!info)
548 		return 0;
549 
550 	iommu_group_put(info->group);
551 	platform_device_put(info->pdev);
552 
553 	return 0;
554 }
555 
mpp_iommu_refresh(struct mpp_iommu_info * info,struct device * dev)556 int mpp_iommu_refresh(struct mpp_iommu_info *info, struct device *dev)
557 {
558 	int ret;
559 
560 	if (!info)
561 		return 0;
562 	/* call av1 iommu ops */
563 	if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) {
564 		ret = mpp_av1_iommu_disable(dev);
565 		if (ret)
566 			return ret;
567 		return mpp_av1_iommu_enable(dev);
568 	}
569 	/* disable iommu */
570 	ret = rockchip_iommu_disable(dev);
571 	if (ret)
572 		return ret;
573 	/* re-enable iommu */
574 	return rockchip_iommu_enable(dev);
575 }
576 
mpp_iommu_flush_tlb(struct mpp_iommu_info * info)577 int mpp_iommu_flush_tlb(struct mpp_iommu_info *info)
578 {
579 	if (!info)
580 		return 0;
581 
582 	if (info->domain && info->domain->ops)
583 		iommu_flush_iotlb_all(info->domain);
584 
585 	return 0;
586 }
587 
mpp_iommu_dev_activate(struct mpp_iommu_info * info,struct mpp_dev * dev)588 int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev)
589 {
590 	unsigned long flags;
591 	int ret = 0;
592 
593 	if (!info)
594 		return 0;
595 
596 	spin_lock_irqsave(&info->dev_lock, flags);
597 
598 	if (info->dev_active || !dev) {
599 		dev_err(info->dev, "can not activate %s -> %s\n",
600 			info->dev_active ? dev_name(info->dev_active->dev) : NULL,
601 			dev ? dev_name(dev->dev) : NULL);
602 		ret = -EINVAL;
603 	} else {
604 		info->dev_active = dev;
605 		/* switch domain pagefault handler and arg depending on device */
606 		iommu_set_fault_handler(info->domain, dev->fault_handler ?
607 					dev->fault_handler : mpp_iommu_handle, dev);
608 
609 		dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev));
610 	}
611 
612 	spin_unlock_irqrestore(&info->dev_lock, flags);
613 
614 	return ret;
615 }
616 
mpp_iommu_dev_deactivate(struct mpp_iommu_info * info,struct mpp_dev * dev)617 int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev)
618 {
619 	unsigned long flags;
620 
621 	if (!info)
622 		return 0;
623 
624 	spin_lock_irqsave(&info->dev_lock, flags);
625 
626 	if (info->dev_active != dev)
627 		dev_err(info->dev, "can not deactivate %s when %s activated\n",
628 			dev_name(dev->dev),
629 			info->dev_active ? dev_name(info->dev_active->dev) : NULL);
630 
631 	dev_dbg(info->dev, "deactivate %p\n", info->dev_active);
632 	info->dev_active = NULL;
633 	spin_unlock_irqrestore(&info->dev_lock, flags);
634 
635 	return 0;
636 }
637