xref: /OK3568_Linux_fs/kernel/drivers/media/platform/rockchip/cif/common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Rockchip Electronics Co., Ltd */
3 
4 #include <media/videobuf2-dma-contig.h>
5 #include <media/videobuf2-dma-sg.h>
6 #include <linux/of_platform.h>
7 #include "dev.h"
8 #include "common.h"
9 
rkcif_alloc_buffer(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)10 int rkcif_alloc_buffer(struct rkcif_device *dev,
11 		       struct rkcif_dummy_buffer *buf)
12 {
13 	unsigned long attrs = buf->is_need_vaddr ? 0 : DMA_ATTR_NO_KERNEL_MAPPING;
14 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
15 	struct sg_table	 *sg_tbl;
16 	void *mem_priv;
17 	int ret = 0;
18 
19 	if (!buf->size) {
20 		ret = -EINVAL;
21 		goto err;
22 	}
23 
24 	if (dev->hw_dev->is_dma_contig)
25 		attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
26 	buf->size = PAGE_ALIGN(buf->size);
27 	mem_priv = g_ops->alloc(dev->hw_dev->dev, attrs, buf->size,
28 				DMA_BIDIRECTIONAL, GFP_KERNEL | GFP_DMA32);
29 	if (IS_ERR_OR_NULL(mem_priv)) {
30 		ret = -ENOMEM;
31 		goto err;
32 	}
33 
34 	buf->mem_priv = mem_priv;
35 	if (dev->hw_dev->is_dma_sg_ops) {
36 		sg_tbl = (struct sg_table *)g_ops->cookie(mem_priv);
37 		buf->dma_addr = sg_dma_address(sg_tbl->sgl);
38 		g_ops->prepare(mem_priv);
39 	} else {
40 		buf->dma_addr = *((dma_addr_t *)g_ops->cookie(mem_priv));
41 	}
42 	if (buf->is_need_vaddr)
43 		buf->vaddr = g_ops->vaddr(mem_priv);
44 	if (buf->is_need_dbuf) {
45 		buf->dbuf = g_ops->get_dmabuf(mem_priv, O_RDWR);
46 		if (buf->is_need_dmafd) {
47 			buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
48 			if (buf->dma_fd < 0) {
49 				dma_buf_put(buf->dbuf);
50 				ret = buf->dma_fd;
51 				goto err;
52 			}
53 			get_dma_buf(buf->dbuf);
54 		}
55 	}
56 	v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
57 		 "%s buf:0x%x~0x%x size:%d\n", __func__,
58 		 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size, buf->size);
59 	return ret;
60 err:
61 	dev_err(dev->dev, "%s failed ret:%d\n", __func__, ret);
62 	return ret;
63 }
64 
rkcif_free_buffer(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)65 void rkcif_free_buffer(struct rkcif_device *dev,
66 			struct rkcif_dummy_buffer *buf)
67 {
68 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
69 
70 	if (buf && buf->mem_priv) {
71 		v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
72 			 "%s buf:0x%x~0x%x\n", __func__,
73 			 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size);
74 		if (buf->dbuf)
75 			dma_buf_put(buf->dbuf);
76 		g_ops->put(buf->mem_priv);
77 		buf->size = 0;
78 		buf->dbuf = NULL;
79 		buf->vaddr = NULL;
80 		buf->mem_priv = NULL;
81 		buf->is_need_dbuf = false;
82 		buf->is_need_vaddr = false;
83 		buf->is_need_dmafd = false;
84 		buf->is_free = true;
85 	}
86 }
87 
rkcif_alloc_page_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)88 static int rkcif_alloc_page_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
89 {
90 	struct rkcif_hw *hw = dev->hw_dev;
91 	u32 i, n_pages = PAGE_ALIGN(buf->size) >> PAGE_SHIFT;
92 	struct page *page = NULL, **pages = NULL;
93 	struct sg_table *sg = NULL;
94 	int ret = -ENOMEM;
95 
96 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
97 	if (!page)
98 		goto err;
99 
100 	pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
101 	if (!pages)
102 		goto free_page;
103 	for (i = 0; i < n_pages; i++)
104 		pages[i] = page;
105 
106 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
107 	if (!sg)
108 		goto free_pages;
109 	ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0,
110 					n_pages << PAGE_SHIFT, GFP_KERNEL);
111 	if (ret)
112 		goto free_sg;
113 
114 	ret = dma_map_sg(hw->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
115 	buf->dma_addr = sg_dma_address(sg->sgl);
116 	buf->mem_priv = sg;
117 	buf->pages = pages;
118 	v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
119 		 "%s buf:0x%x map cnt:%d size:%d\n", __func__,
120 		 (u32)buf->dma_addr, ret, buf->size);
121 	return 0;
122 free_sg:
123 	kfree(sg);
124 free_pages:
125 	kvfree(pages);
126 free_page:
127 	__free_pages(page, 0);
128 err:
129 	return ret;
130 }
131 
rkcif_free_page_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)132 static void rkcif_free_page_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
133 {
134 	struct sg_table *sg = buf->mem_priv;
135 
136 	if (!sg)
137 		return;
138 	dma_unmap_sg(dev->hw_dev->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
139 	sg_free_table(sg);
140 	kfree(sg);
141 	__free_pages(buf->pages[0], 0);
142 	kvfree(buf->pages);
143 	buf->mem_priv = NULL;
144 	buf->pages = NULL;
145 }
146 
rkcif_alloc_common_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)147 int rkcif_alloc_common_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
148 {
149 	struct rkcif_hw *hw = dev->hw_dev;
150 	int ret = 0;
151 
152 	mutex_lock(&hw->dev_lock);
153 	if (buf->mem_priv)
154 		goto end;
155 
156 	if (buf->size == 0)
157 		goto end;
158 
159 	if (hw->iommu_en) {
160 		ret = rkcif_alloc_page_dummy_buf(dev, buf);
161 		goto end;
162 	}
163 
164 	ret = rkcif_alloc_buffer(dev, buf);
165 	if (!ret)
166 		v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
167 			 "%s buf:0x%x size:%d\n", __func__,
168 			 (u32)buf->dma_addr, buf->size);
169 end:
170 	if (ret < 0)
171 		v4l2_err(&dev->v4l2_dev, "%s failed:%d\n", __func__, ret);
172 	mutex_unlock(&hw->dev_lock);
173 	return ret;
174 }
175 
rkcif_free_common_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)176 void rkcif_free_common_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
177 {
178 	struct rkcif_hw *hw = dev->hw_dev;
179 
180 	mutex_lock(&hw->dev_lock);
181 
182 	if (hw->iommu_en)
183 		rkcif_free_page_dummy_buf(dev, buf);
184 	else
185 		rkcif_free_buffer(dev, buf);
186 	mutex_unlock(&hw->dev_lock);
187 }
188 
189 struct rkcif_shm_data {
190 	void *vaddr;
191 	int vmap_cnt;
192 	int npages;
193 	struct page *pages[];
194 };
195 
rkcif_shm_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)196 static struct sg_table *rkcif_shm_map_dma_buf(struct dma_buf_attachment *attachment,
197 					enum dma_data_direction dir)
198 {
199 	struct rkcif_shm_data *data = attachment->dmabuf->priv;
200 	struct sg_table *table;
201 
202 	table = kmalloc(sizeof(*table), GFP_KERNEL);
203 	if (!table)
204 		return ERR_PTR(-ENOMEM);
205 
206 	sg_alloc_table_from_pages(table, data->pages, data->npages, 0,
207 				  data->npages << PAGE_SHIFT, GFP_KERNEL);
208 
209 	dma_map_sgtable(attachment->dev, table, dir, DMA_ATTR_SKIP_CPU_SYNC);
210 
211 	return table;
212 }
213 
rkcif_shm_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction dir)214 static void rkcif_shm_unmap_dma_buf(struct dma_buf_attachment *attachment,
215 			      struct sg_table *table,
216 			      enum dma_data_direction dir)
217 {
218 	dma_unmap_sgtable(attachment->dev, table, dir, DMA_ATTR_SKIP_CPU_SYNC);
219 	sg_free_table(table);
220 	kfree(table);
221 }
222 
rkcif_shm_vmap(struct dma_buf * dma_buf)223 static void *rkcif_shm_vmap(struct dma_buf *dma_buf)
224 {
225 	struct rkcif_shm_data *data = dma_buf->priv;
226 
227 	data->vaddr = vmap(data->pages, data->npages, VM_MAP, PAGE_KERNEL);
228 	data->vmap_cnt++;
229 	return data->vaddr;
230 }
231 
rkcif_shm_vunmap(struct dma_buf * dma_buf,void * vaddr)232 static void rkcif_shm_vunmap(struct dma_buf *dma_buf, void *vaddr)
233 {
234 	struct rkcif_shm_data *data = dma_buf->priv;
235 
236 	vunmap(data->vaddr);
237 	data->vaddr = NULL;
238 	data->vmap_cnt--;
239 }
240 
rkcif_shm_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)241 static int rkcif_shm_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
242 {
243 	struct rkcif_shm_data *data = dma_buf->priv;
244 	unsigned long vm_start = vma->vm_start;
245 	int i;
246 
247 	for (i = 0; i < data->npages; i++) {
248 		remap_pfn_range(vma, vm_start, page_to_pfn(data->pages[i]),
249 				PAGE_SIZE, vma->vm_page_prot);
250 		vm_start += PAGE_SIZE;
251 	}
252 
253 	return 0;
254 }
255 
rkcif_shm_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)256 static int rkcif_shm_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
257 {
258 	struct dma_buf_attachment *attachment;
259 	struct sg_table *table;
260 
261 	attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
262 	table = attachment->priv;
263 	dma_sync_sg_for_cpu(NULL, table->sgl, table->nents, dir);
264 
265 	return 0;
266 }
267 
rkcif_shm_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)268 static int rkcif_shm_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
269 {
270 	struct dma_buf_attachment *attachment;
271 	struct sg_table *table;
272 
273 	attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
274 	table = attachment->priv;
275 	dma_sync_sg_for_device(NULL, table->sgl, table->nents, dir);
276 
277 	return 0;
278 }
279 
rkcif_shm_release(struct dma_buf * dma_buf)280 static void rkcif_shm_release(struct dma_buf *dma_buf)
281 {
282 	struct rkcif_shm_data *data = dma_buf->priv;
283 
284 	if (data->vmap_cnt) {
285 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
286 		rkcif_shm_vunmap(dma_buf, data->vaddr);
287 	}
288 	kfree(data);
289 }
290 
291 static const struct dma_buf_ops rkcif_shm_dmabuf_ops = {
292 	.map_dma_buf = rkcif_shm_map_dma_buf,
293 	.unmap_dma_buf = rkcif_shm_unmap_dma_buf,
294 	.release = rkcif_shm_release,
295 	.mmap = rkcif_shm_mmap,
296 	.vmap = rkcif_shm_vmap,
297 	.vunmap = rkcif_shm_vunmap,
298 	.begin_cpu_access = rkcif_shm_begin_cpu_access,
299 	.end_cpu_access = rkcif_shm_end_cpu_access,
300 };
301 
rkcif_shm_alloc(struct rkisp_thunderboot_shmem * shmem)302 static struct dma_buf *rkcif_shm_alloc(struct rkisp_thunderboot_shmem *shmem)
303 {
304 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
305 	struct dma_buf *dmabuf;
306 	struct rkcif_shm_data *data;
307 	int i, npages;
308 
309 	npages = PAGE_ALIGN(shmem->shm_size) / PAGE_SIZE;
310 	data = kmalloc(sizeof(*data) + npages * sizeof(struct page *), GFP_KERNEL);
311 	if (!data)
312 		return ERR_PTR(-ENOMEM);
313 	data->vmap_cnt = 0;
314 	data->npages = npages;
315 	for (i = 0; i < npages; i++)
316 		data->pages[i] = phys_to_page(shmem->shm_start + i * PAGE_SIZE);
317 
318 	exp_info.ops = &rkcif_shm_dmabuf_ops;
319 	exp_info.size = npages * PAGE_SIZE;
320 	exp_info.flags = O_RDWR;
321 	exp_info.priv = data;
322 
323 	dmabuf = dma_buf_export(&exp_info);
324 
325 	return dmabuf;
326 }
327 
rkcif_alloc_reserved_mem_buf(struct rkcif_device * dev,struct rkcif_rx_buffer * buf)328 int rkcif_alloc_reserved_mem_buf(struct rkcif_device *dev, struct rkcif_rx_buffer *buf)
329 {
330 	struct rkcif_dummy_buffer *dummy = &buf->dummy;
331 
332 	dummy->dma_addr = dev->resmem_pa + dummy->size * buf->buf_idx;
333 	if (dummy->dma_addr + dummy->size > dev->resmem_pa + dev->resmem_size)
334 		return -EINVAL;
335 	buf->dbufs.dma = dummy->dma_addr;
336 	buf->dbufs.is_resmem = true;
337 	buf->shmem.shm_start = dummy->dma_addr;
338 	buf->shmem.shm_size = dummy->size;
339 	dummy->dbuf = rkcif_shm_alloc(&buf->shmem);
340 	if (dummy->is_need_vaddr)
341 		dummy->vaddr = dummy->dbuf->ops->vmap(dummy->dbuf);
342 	return 0;
343 }
344 
rkcif_free_reserved_mem_buf(struct rkcif_device * dev,struct rkcif_rx_buffer * buf)345 void rkcif_free_reserved_mem_buf(struct rkcif_device *dev, struct rkcif_rx_buffer *buf)
346 {
347 	struct rkcif_dummy_buffer *dummy = &buf->dummy;
348 	struct media_pad *pad = NULL;
349 	struct v4l2_subdev *sd;
350 
351 	if (buf->dummy.is_free)
352 		return;
353 
354 	if (dev->rdbk_debug)
355 		v4l2_info(&dev->v4l2_dev,
356 			  "free reserved mem addr 0x%x\n",
357 			  (u32)dummy->dma_addr);
358 	if (dev->sditf[0]) {
359 		if (dev->sditf[0]->is_combine_mode)
360 			pad = media_entity_remote_pad(&dev->sditf[0]->pads[1]);
361 		else
362 			pad = media_entity_remote_pad(&dev->sditf[0]->pads[0]);
363 	} else {
364 		v4l2_info(&dev->v4l2_dev,
365 			  "not find sditf\n");
366 		return;
367 	}
368 	if (pad) {
369 		sd = media_entity_to_v4l2_subdev(pad->entity);
370 	} else {
371 		v4l2_info(&dev->v4l2_dev,
372 			  "not find remote pad\n");
373 		return;
374 	}
375 	if (buf->dbufs.is_init)
376 		v4l2_subdev_call(sd, core, ioctl,
377 				 RKISP_VICAP_CMD_RX_BUFFER_FREE, &buf->dbufs);
378 	if (dummy->is_need_vaddr)
379 		dummy->dbuf->ops->vunmap(dummy->dbuf, dummy->vaddr);
380 #ifdef CONFIG_VIDEO_ROCKCHIP_THUNDER_BOOT_ISP
381 	free_reserved_area(phys_to_virt(buf->shmem.shm_start),
382 			   phys_to_virt(buf->shmem.shm_start + buf->shmem.shm_size),
383 			   -1, "rkisp_thunderboot");
384 #endif
385 	buf->dummy.is_free = true;
386 }
387 
388