1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * author:
6*4882a593Smuzhiyun * Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun * Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun * Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/dma-buf-cache.h>
13*4882a593Smuzhiyun #include <linux/dma-iommu.h>
14*4882a593Smuzhiyun #include <linux/dma-mapping.h>
15*4882a593Smuzhiyun #include <linux/iommu.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/of_platform.h>
18*4882a593Smuzhiyun #include <linux/kref.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/pm_runtime.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #ifdef CONFIG_ARM_DMA_USE_IOMMU
23*4882a593Smuzhiyun #include <asm/dma-iommu.h>
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun #include <soc/rockchip/rockchip_iommu.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "mpp_debug.h"
28*4882a593Smuzhiyun #include "mpp_iommu.h"
29*4882a593Smuzhiyun #include "mpp_common.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct mpp_dma_buffer *
mpp_dma_find_buffer_fd(struct mpp_dma_session * dma,int fd)32*4882a593Smuzhiyun mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct dma_buf *dmabuf;
35*4882a593Smuzhiyun struct mpp_dma_buffer *out = NULL;
36*4882a593Smuzhiyun struct mpp_dma_buffer *buffer = NULL, *n;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun dmabuf = dma_buf_get(fd);
39*4882a593Smuzhiyun if (IS_ERR(dmabuf))
40*4882a593Smuzhiyun return NULL;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
43*4882a593Smuzhiyun list_for_each_entry_safe(buffer, n,
44*4882a593Smuzhiyun &dma->used_list, link) {
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * fd may dup several and point the same dambuf.
47*4882a593Smuzhiyun * thus, here should be distinguish with the dmabuf.
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun if (buffer->dmabuf == dmabuf) {
50*4882a593Smuzhiyun out = buffer;
51*4882a593Smuzhiyun break;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
55*4882a593Smuzhiyun dma_buf_put(dmabuf);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return out;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Release the buffer from the current list */
mpp_dma_release_buffer(struct kref * ref)61*4882a593Smuzhiyun static void mpp_dma_release_buffer(struct kref *ref)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct mpp_dma_buffer *buffer =
64*4882a593Smuzhiyun container_of(ref, struct mpp_dma_buffer, ref);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun buffer->dma->buffer_count--;
67*4882a593Smuzhiyun list_move_tail(&buffer->link, &buffer->dma->unused_list);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir);
70*4882a593Smuzhiyun dma_buf_detach(buffer->dmabuf, buffer->attach);
71*4882a593Smuzhiyun dma_buf_put(buffer->dmabuf);
72*4882a593Smuzhiyun buffer->dma = NULL;
73*4882a593Smuzhiyun buffer->dmabuf = NULL;
74*4882a593Smuzhiyun buffer->attach = NULL;
75*4882a593Smuzhiyun buffer->sgt = NULL;
76*4882a593Smuzhiyun buffer->copy_sgt = NULL;
77*4882a593Smuzhiyun buffer->iova = 0;
78*4882a593Smuzhiyun buffer->size = 0;
79*4882a593Smuzhiyun buffer->vaddr = NULL;
80*4882a593Smuzhiyun buffer->last_used = 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Remove the oldest buffer when count more than the setting */
84*4882a593Smuzhiyun static int
mpp_dma_remove_extra_buffer(struct mpp_dma_session * dma)85*4882a593Smuzhiyun mpp_dma_remove_extra_buffer(struct mpp_dma_session *dma)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct mpp_dma_buffer *n;
88*4882a593Smuzhiyun struct mpp_dma_buffer *oldest = NULL, *buffer = NULL;
89*4882a593Smuzhiyun ktime_t oldest_time = ktime_set(0, 0);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (dma->buffer_count > dma->max_buffers) {
92*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
93*4882a593Smuzhiyun list_for_each_entry_safe(buffer, n,
94*4882a593Smuzhiyun &dma->used_list,
95*4882a593Smuzhiyun link) {
96*4882a593Smuzhiyun if (ktime_to_ns(oldest_time) == 0 ||
97*4882a593Smuzhiyun ktime_after(oldest_time, buffer->last_used)) {
98*4882a593Smuzhiyun oldest_time = buffer->last_used;
99*4882a593Smuzhiyun oldest = buffer;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun if (oldest && kref_read(&oldest->ref) == 1)
103*4882a593Smuzhiyun kref_put(&oldest->ref, mpp_dma_release_buffer);
104*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
mpp_dma_release(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)110*4882a593Smuzhiyun int mpp_dma_release(struct mpp_dma_session *dma,
111*4882a593Smuzhiyun struct mpp_dma_buffer *buffer)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
114*4882a593Smuzhiyun kref_put(&buffer->ref, mpp_dma_release_buffer);
115*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
mpp_dma_release_fd(struct mpp_dma_session * dma,int fd)120*4882a593Smuzhiyun int mpp_dma_release_fd(struct mpp_dma_session *dma, int fd)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct device *dev = dma->dev;
123*4882a593Smuzhiyun struct mpp_dma_buffer *buffer = NULL;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun buffer = mpp_dma_find_buffer_fd(dma, fd);
126*4882a593Smuzhiyun if (IS_ERR_OR_NULL(buffer)) {
127*4882a593Smuzhiyun dev_err(dev, "can not find %d buffer in list\n", fd);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return -EINVAL;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
133*4882a593Smuzhiyun kref_put(&buffer->ref, mpp_dma_release_buffer);
134*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun struct mpp_dma_buffer *
mpp_dma_alloc(struct device * dev,size_t size)140*4882a593Smuzhiyun mpp_dma_alloc(struct device *dev, size_t size)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun size_t align_size;
143*4882a593Smuzhiyun dma_addr_t iova;
144*4882a593Smuzhiyun struct mpp_dma_buffer *buffer;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
147*4882a593Smuzhiyun if (!buffer)
148*4882a593Smuzhiyun return NULL;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun align_size = PAGE_ALIGN(size);
151*4882a593Smuzhiyun buffer->vaddr = dma_alloc_coherent(dev, align_size, &iova, GFP_KERNEL);
152*4882a593Smuzhiyun if (!buffer->vaddr)
153*4882a593Smuzhiyun goto fail_dma_alloc;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun buffer->size = align_size;
156*4882a593Smuzhiyun buffer->iova = iova;
157*4882a593Smuzhiyun buffer->dev = dev;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return buffer;
160*4882a593Smuzhiyun fail_dma_alloc:
161*4882a593Smuzhiyun kfree(buffer);
162*4882a593Smuzhiyun return NULL;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
mpp_dma_free(struct mpp_dma_buffer * buffer)165*4882a593Smuzhiyun int mpp_dma_free(struct mpp_dma_buffer *buffer)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun dma_free_coherent(buffer->dev, buffer->size,
168*4882a593Smuzhiyun buffer->vaddr, buffer->iova);
169*4882a593Smuzhiyun buffer->vaddr = NULL;
170*4882a593Smuzhiyun buffer->iova = 0;
171*4882a593Smuzhiyun buffer->size = 0;
172*4882a593Smuzhiyun buffer->dev = NULL;
173*4882a593Smuzhiyun kfree(buffer);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
mpp_dma_import_fd(struct mpp_iommu_info * iommu_info,struct mpp_dma_session * dma,int fd)178*4882a593Smuzhiyun struct mpp_dma_buffer *mpp_dma_import_fd(struct mpp_iommu_info *iommu_info,
179*4882a593Smuzhiyun struct mpp_dma_session *dma,
180*4882a593Smuzhiyun int fd)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun int ret = 0;
183*4882a593Smuzhiyun struct sg_table *sgt;
184*4882a593Smuzhiyun struct dma_buf *dmabuf;
185*4882a593Smuzhiyun struct mpp_dma_buffer *buffer;
186*4882a593Smuzhiyun struct dma_buf_attachment *attach;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (!dma) {
189*4882a593Smuzhiyun mpp_err("dma session is null\n");
190*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* remove the oldest before add buffer */
194*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
195*4882a593Smuzhiyun mpp_dma_remove_extra_buffer(dma);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Check whether in dma session */
198*4882a593Smuzhiyun buffer = mpp_dma_find_buffer_fd(dma, fd);
199*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(buffer)) {
200*4882a593Smuzhiyun if (kref_get_unless_zero(&buffer->ref)) {
201*4882a593Smuzhiyun buffer->last_used = ktime_get();
202*4882a593Smuzhiyun return buffer;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun dev_dbg(dma->dev, "missing the fd %d\n", fd);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun dmabuf = dma_buf_get(fd);
208*4882a593Smuzhiyun if (IS_ERR(dmabuf)) {
209*4882a593Smuzhiyun ret = PTR_ERR(dmabuf);
210*4882a593Smuzhiyun mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret);
211*4882a593Smuzhiyun return ERR_PTR(ret);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun /* A new DMA buffer */
214*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
215*4882a593Smuzhiyun buffer = list_first_entry_or_null(&dma->unused_list,
216*4882a593Smuzhiyun struct mpp_dma_buffer,
217*4882a593Smuzhiyun link);
218*4882a593Smuzhiyun if (!buffer) {
219*4882a593Smuzhiyun ret = -ENOMEM;
220*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
221*4882a593Smuzhiyun goto fail;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun list_del_init(&buffer->link);
224*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun buffer->dmabuf = dmabuf;
227*4882a593Smuzhiyun buffer->dir = DMA_BIDIRECTIONAL;
228*4882a593Smuzhiyun buffer->last_used = ktime_get();
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun attach = dma_buf_attach(buffer->dmabuf, dma->dev);
231*4882a593Smuzhiyun if (IS_ERR(attach)) {
232*4882a593Smuzhiyun ret = PTR_ERR(attach);
233*4882a593Smuzhiyun mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret);
234*4882a593Smuzhiyun goto fail_attach;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun sgt = dma_buf_map_attachment(attach, buffer->dir);
238*4882a593Smuzhiyun if (IS_ERR(sgt)) {
239*4882a593Smuzhiyun ret = PTR_ERR(sgt);
240*4882a593Smuzhiyun mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret);
241*4882a593Smuzhiyun goto fail_map;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun buffer->iova = sg_dma_address(sgt->sgl);
244*4882a593Smuzhiyun buffer->size = sg_dma_len(sgt->sgl);
245*4882a593Smuzhiyun buffer->attach = attach;
246*4882a593Smuzhiyun buffer->sgt = sgt;
247*4882a593Smuzhiyun buffer->dma = dma;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun kref_init(&buffer->ref);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
252*4882a593Smuzhiyun /* Increase the reference for used outside the buffer pool */
253*4882a593Smuzhiyun kref_get(&buffer->ref);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
256*4882a593Smuzhiyun dma->buffer_count++;
257*4882a593Smuzhiyun list_add_tail(&buffer->link, &dma->used_list);
258*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return buffer;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun fail_map:
263*4882a593Smuzhiyun dma_buf_detach(buffer->dmabuf, attach);
264*4882a593Smuzhiyun fail_attach:
265*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
266*4882a593Smuzhiyun list_add_tail(&buffer->link, &dma->unused_list);
267*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
268*4882a593Smuzhiyun fail:
269*4882a593Smuzhiyun dma_buf_put(dmabuf);
270*4882a593Smuzhiyun return ERR_PTR(ret);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
mpp_dma_unmap_kernel(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)273*4882a593Smuzhiyun int mpp_dma_unmap_kernel(struct mpp_dma_session *dma,
274*4882a593Smuzhiyun struct mpp_dma_buffer *buffer)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun void *vaddr = buffer->vaddr;
277*4882a593Smuzhiyun struct dma_buf *dmabuf = buffer->dmabuf;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (IS_ERR_OR_NULL(vaddr) ||
280*4882a593Smuzhiyun IS_ERR_OR_NULL(dmabuf))
281*4882a593Smuzhiyun return -EINVAL;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun dma_buf_vunmap(dmabuf, vaddr);
284*4882a593Smuzhiyun buffer->vaddr = NULL;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
mpp_dma_map_kernel(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)291*4882a593Smuzhiyun int mpp_dma_map_kernel(struct mpp_dma_session *dma,
292*4882a593Smuzhiyun struct mpp_dma_buffer *buffer)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int ret;
295*4882a593Smuzhiyun void *vaddr;
296*4882a593Smuzhiyun struct dma_buf *dmabuf = buffer->dmabuf;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dmabuf))
299*4882a593Smuzhiyun return -EINVAL;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
302*4882a593Smuzhiyun if (ret) {
303*4882a593Smuzhiyun dev_dbg(dma->dev, "can't access the dma buffer\n");
304*4882a593Smuzhiyun goto failed_access;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun vaddr = dma_buf_vmap(dmabuf);
308*4882a593Smuzhiyun if (!vaddr) {
309*4882a593Smuzhiyun dev_dbg(dma->dev, "can't vmap the dma buffer\n");
310*4882a593Smuzhiyun ret = -EIO;
311*4882a593Smuzhiyun goto failed_vmap;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun buffer->vaddr = vaddr;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return 0;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun failed_vmap:
319*4882a593Smuzhiyun dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
320*4882a593Smuzhiyun failed_access:
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun return ret;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
mpp_dma_session_destroy(struct mpp_dma_session * dma)325*4882a593Smuzhiyun int mpp_dma_session_destroy(struct mpp_dma_session *dma)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct mpp_dma_buffer *n, *buffer = NULL;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (!dma)
330*4882a593Smuzhiyun return -EINVAL;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun mutex_lock(&dma->list_mutex);
333*4882a593Smuzhiyun list_for_each_entry_safe(buffer, n,
334*4882a593Smuzhiyun &dma->used_list,
335*4882a593Smuzhiyun link) {
336*4882a593Smuzhiyun kref_put(&buffer->ref, mpp_dma_release_buffer);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun mutex_unlock(&dma->list_mutex);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun kfree(dma);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return 0;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun struct mpp_dma_session *
mpp_dma_session_create(struct device * dev,u32 max_buffers)346*4882a593Smuzhiyun mpp_dma_session_create(struct device *dev, u32 max_buffers)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun int i;
349*4882a593Smuzhiyun struct mpp_dma_session *dma = NULL;
350*4882a593Smuzhiyun struct mpp_dma_buffer *buffer = NULL;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun dma = kzalloc(sizeof(*dma), GFP_KERNEL);
353*4882a593Smuzhiyun if (!dma)
354*4882a593Smuzhiyun return NULL;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun mutex_init(&dma->list_mutex);
357*4882a593Smuzhiyun INIT_LIST_HEAD(&dma->unused_list);
358*4882a593Smuzhiyun INIT_LIST_HEAD(&dma->used_list);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (max_buffers > MPP_SESSION_MAX_BUFFERS) {
361*4882a593Smuzhiyun mpp_debug(DEBUG_IOCTL, "session_max_buffer %d must less than %d\n",
362*4882a593Smuzhiyun max_buffers, MPP_SESSION_MAX_BUFFERS);
363*4882a593Smuzhiyun dma->max_buffers = MPP_SESSION_MAX_BUFFERS;
364*4882a593Smuzhiyun } else {
365*4882a593Smuzhiyun dma->max_buffers = max_buffers;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dma->dma_bufs); i++) {
369*4882a593Smuzhiyun buffer = &dma->dma_bufs[i];
370*4882a593Smuzhiyun buffer->dma = dma;
371*4882a593Smuzhiyun INIT_LIST_HEAD(&buffer->link);
372*4882a593Smuzhiyun list_add_tail(&buffer->link, &dma->unused_list);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun dma->dev = dev;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return dma;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * begin cpu access => for_cpu = true
381*4882a593Smuzhiyun * end cpu access => for_cpu = false
382*4882a593Smuzhiyun */
mpp_dma_buf_sync(struct mpp_dma_buffer * buffer,u32 offset,u32 length,enum dma_data_direction dir,bool for_cpu)383*4882a593Smuzhiyun void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length,
384*4882a593Smuzhiyun enum dma_data_direction dir, bool for_cpu)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct device *dev = buffer->dma->dev;
387*4882a593Smuzhiyun struct sg_table *sgt = buffer->sgt;
388*4882a593Smuzhiyun struct scatterlist *sg = sgt->sgl;
389*4882a593Smuzhiyun dma_addr_t sg_dma_addr = sg_dma_address(sg);
390*4882a593Smuzhiyun unsigned int len = 0;
391*4882a593Smuzhiyun int i;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun for_each_sgtable_sg(sgt, sg, i) {
394*4882a593Smuzhiyun unsigned int sg_offset, sg_left, size = 0;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun len += sg->length;
397*4882a593Smuzhiyun if (len <= offset) {
398*4882a593Smuzhiyun sg_dma_addr += sg->length;
399*4882a593Smuzhiyun continue;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun sg_left = len - offset;
403*4882a593Smuzhiyun sg_offset = sg->length - sg_left;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun size = (length < sg_left) ? length : sg_left;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (for_cpu)
408*4882a593Smuzhiyun dma_sync_single_range_for_cpu(dev, sg_dma_addr,
409*4882a593Smuzhiyun sg_offset, size, dir);
410*4882a593Smuzhiyun else
411*4882a593Smuzhiyun dma_sync_single_range_for_device(dev, sg_dma_addr,
412*4882a593Smuzhiyun sg_offset, size, dir);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun offset += size;
415*4882a593Smuzhiyun length -= size;
416*4882a593Smuzhiyun sg_dma_addr += sg->length;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (length == 0)
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
mpp_iommu_detach(struct mpp_iommu_info * info)423*4882a593Smuzhiyun int mpp_iommu_detach(struct mpp_iommu_info *info)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun if (!info)
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun iommu_detach_group(info->domain, info->group);
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
mpp_iommu_attach(struct mpp_iommu_info * info)432*4882a593Smuzhiyun int mpp_iommu_attach(struct mpp_iommu_info *info)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun if (!info)
435*4882a593Smuzhiyun return 0;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (info->domain == iommu_get_domain_for_dev(info->dev))
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return iommu_attach_group(info->domain, info->group);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
mpp_iommu_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)443*4882a593Smuzhiyun static int mpp_iommu_handle(struct iommu_domain *iommu,
444*4882a593Smuzhiyun struct device *iommu_dev,
445*4882a593Smuzhiyun unsigned long iova,
446*4882a593Smuzhiyun int status, void *arg)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct mpp_dev *mpp = (struct mpp_dev *)arg;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
451*4882a593Smuzhiyun iova, status, arg);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (!mpp) {
454*4882a593Smuzhiyun dev_err(iommu_dev, "pagefault without device to handle\n");
455*4882a593Smuzhiyun return 0;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (mpp->dev_ops && mpp->dev_ops->dump_dev)
459*4882a593Smuzhiyun mpp->dev_ops->dump_dev(mpp);
460*4882a593Smuzhiyun else
461*4882a593Smuzhiyun mpp_task_dump_hw_reg(mpp);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun return 0;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun struct mpp_iommu_info *
mpp_iommu_probe(struct device * dev)467*4882a593Smuzhiyun mpp_iommu_probe(struct device *dev)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun int ret = 0;
470*4882a593Smuzhiyun struct device_node *np = NULL;
471*4882a593Smuzhiyun struct platform_device *pdev = NULL;
472*4882a593Smuzhiyun struct mpp_iommu_info *info = NULL;
473*4882a593Smuzhiyun struct iommu_domain *domain = NULL;
474*4882a593Smuzhiyun struct iommu_group *group = NULL;
475*4882a593Smuzhiyun #ifdef CONFIG_ARM_DMA_USE_IOMMU
476*4882a593Smuzhiyun struct dma_iommu_mapping *mapping;
477*4882a593Smuzhiyun #endif
478*4882a593Smuzhiyun np = of_parse_phandle(dev->of_node, "iommus", 0);
479*4882a593Smuzhiyun if (!np || !of_device_is_available(np)) {
480*4882a593Smuzhiyun mpp_err("failed to get device node\n");
481*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun pdev = of_find_device_by_node(np);
485*4882a593Smuzhiyun of_node_put(np);
486*4882a593Smuzhiyun if (!pdev) {
487*4882a593Smuzhiyun mpp_err("failed to get platform device\n");
488*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun group = iommu_group_get(dev);
492*4882a593Smuzhiyun if (!group) {
493*4882a593Smuzhiyun ret = -EINVAL;
494*4882a593Smuzhiyun goto err_put_pdev;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /*
498*4882a593Smuzhiyun * On arm32-arch, group->default_domain should be NULL,
499*4882a593Smuzhiyun * domain store in mapping created by arm32-arch.
500*4882a593Smuzhiyun * we re-attach domain here
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun #ifdef CONFIG_ARM_DMA_USE_IOMMU
503*4882a593Smuzhiyun if (!iommu_group_default_domain(group)) {
504*4882a593Smuzhiyun mapping = to_dma_iommu_mapping(dev);
505*4882a593Smuzhiyun WARN_ON(!mapping);
506*4882a593Smuzhiyun domain = mapping->domain;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun #endif
509*4882a593Smuzhiyun if (!domain) {
510*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
511*4882a593Smuzhiyun if (!domain) {
512*4882a593Smuzhiyun ret = -EINVAL;
513*4882a593Smuzhiyun goto err_put_group;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
518*4882a593Smuzhiyun if (!info) {
519*4882a593Smuzhiyun ret = -ENOMEM;
520*4882a593Smuzhiyun goto err_put_group;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun init_rwsem(&info->rw_sem);
524*4882a593Smuzhiyun spin_lock_init(&info->dev_lock);
525*4882a593Smuzhiyun info->dev = dev;
526*4882a593Smuzhiyun info->pdev = pdev;
527*4882a593Smuzhiyun info->group = group;
528*4882a593Smuzhiyun info->domain = domain;
529*4882a593Smuzhiyun info->dev_active = NULL;
530*4882a593Smuzhiyun info->irq = platform_get_irq(pdev, 0);
531*4882a593Smuzhiyun info->got_irq = (info->irq < 0) ? false : true;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return info;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun err_put_group:
536*4882a593Smuzhiyun if (group)
537*4882a593Smuzhiyun iommu_group_put(group);
538*4882a593Smuzhiyun err_put_pdev:
539*4882a593Smuzhiyun if (pdev)
540*4882a593Smuzhiyun platform_device_put(pdev);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun return ERR_PTR(ret);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
mpp_iommu_remove(struct mpp_iommu_info * info)545*4882a593Smuzhiyun int mpp_iommu_remove(struct mpp_iommu_info *info)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun if (!info)
548*4882a593Smuzhiyun return 0;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun iommu_group_put(info->group);
551*4882a593Smuzhiyun platform_device_put(info->pdev);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
mpp_iommu_refresh(struct mpp_iommu_info * info,struct device * dev)556*4882a593Smuzhiyun int mpp_iommu_refresh(struct mpp_iommu_info *info, struct device *dev)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun int ret;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (!info)
561*4882a593Smuzhiyun return 0;
562*4882a593Smuzhiyun /* call av1 iommu ops */
563*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) {
564*4882a593Smuzhiyun ret = mpp_av1_iommu_disable(dev);
565*4882a593Smuzhiyun if (ret)
566*4882a593Smuzhiyun return ret;
567*4882a593Smuzhiyun return mpp_av1_iommu_enable(dev);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun /* disable iommu */
570*4882a593Smuzhiyun ret = rockchip_iommu_disable(dev);
571*4882a593Smuzhiyun if (ret)
572*4882a593Smuzhiyun return ret;
573*4882a593Smuzhiyun /* re-enable iommu */
574*4882a593Smuzhiyun return rockchip_iommu_enable(dev);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
mpp_iommu_flush_tlb(struct mpp_iommu_info * info)577*4882a593Smuzhiyun int mpp_iommu_flush_tlb(struct mpp_iommu_info *info)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun if (!info)
580*4882a593Smuzhiyun return 0;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (info->domain && info->domain->ops)
583*4882a593Smuzhiyun iommu_flush_iotlb_all(info->domain);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return 0;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
mpp_iommu_dev_activate(struct mpp_iommu_info * info,struct mpp_dev * dev)588*4882a593Smuzhiyun int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun unsigned long flags;
591*4882a593Smuzhiyun int ret = 0;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (!info)
594*4882a593Smuzhiyun return 0;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun spin_lock_irqsave(&info->dev_lock, flags);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if (info->dev_active || !dev) {
599*4882a593Smuzhiyun dev_err(info->dev, "can not activate %s -> %s\n",
600*4882a593Smuzhiyun info->dev_active ? dev_name(info->dev_active->dev) : NULL,
601*4882a593Smuzhiyun dev ? dev_name(dev->dev) : NULL);
602*4882a593Smuzhiyun ret = -EINVAL;
603*4882a593Smuzhiyun } else {
604*4882a593Smuzhiyun info->dev_active = dev;
605*4882a593Smuzhiyun /* switch domain pagefault handler and arg depending on device */
606*4882a593Smuzhiyun iommu_set_fault_handler(info->domain, dev->fault_handler ?
607*4882a593Smuzhiyun dev->fault_handler : mpp_iommu_handle, dev);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev));
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun spin_unlock_irqrestore(&info->dev_lock, flags);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun return ret;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
mpp_iommu_dev_deactivate(struct mpp_iommu_info * info,struct mpp_dev * dev)617*4882a593Smuzhiyun int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun unsigned long flags;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (!info)
622*4882a593Smuzhiyun return 0;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun spin_lock_irqsave(&info->dev_lock, flags);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (info->dev_active != dev)
627*4882a593Smuzhiyun dev_err(info->dev, "can not deactivate %s when %s activated\n",
628*4882a593Smuzhiyun dev_name(dev->dev),
629*4882a593Smuzhiyun info->dev_active ? dev_name(info->dev_active->dev) : NULL);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun dev_dbg(info->dev, "deactivate %p\n", info->dev_active);
632*4882a593Smuzhiyun info->dev_active = NULL;
633*4882a593Smuzhiyun spin_unlock_irqrestore(&info->dev_lock, flags);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun return 0;
636*4882a593Smuzhiyun }
637