xref: /OK3568_Linux_fs/kernel/drivers/media/platform/rockchip/rga/rga-buf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
4*4882a593Smuzhiyun  * Author: Jacob Chen <jacob-chen@iotwrt.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/pm_runtime.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <media/v4l2-device.h>
10*4882a593Smuzhiyun #include <media/v4l2-ioctl.h>
11*4882a593Smuzhiyun #include <media/v4l2-mem2mem.h>
12*4882a593Smuzhiyun #include <media/videobuf2-dma-sg.h>
13*4882a593Smuzhiyun #include <media/videobuf2-v4l2.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "rga-hw.h"
16*4882a593Smuzhiyun #include "rga.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static int
rga_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])19*4882a593Smuzhiyun rga_queue_setup(struct vb2_queue *vq,
20*4882a593Smuzhiyun 		unsigned int *nbuffers, unsigned int *nplanes,
21*4882a593Smuzhiyun 		unsigned int sizes[], struct device *alloc_devs[])
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(vq);
24*4882a593Smuzhiyun 	struct rga_frame *f = rga_get_frame(ctx, vq->type);
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (IS_ERR(f))
27*4882a593Smuzhiyun 		return PTR_ERR(f);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	if (*nplanes)
30*4882a593Smuzhiyun 		return sizes[0] < f->size ? -EINVAL : 0;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	sizes[0] = f->size;
33*4882a593Smuzhiyun 	*nplanes = 1;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	return 0;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
rga_buf_prepare(struct vb2_buffer * vb)38*4882a593Smuzhiyun static int rga_buf_prepare(struct vb2_buffer *vb)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
41*4882a593Smuzhiyun 	struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	if (IS_ERR(f))
44*4882a593Smuzhiyun 		return PTR_ERR(f);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	vb2_set_plane_payload(vb, 0, f->size);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	return 0;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
rga_buf_queue(struct vb2_buffer * vb)51*4882a593Smuzhiyun static void rga_buf_queue(struct vb2_buffer *vb)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
54*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
rga_buf_return_buffers(struct vb2_queue * q,enum vb2_buffer_state state)59*4882a593Smuzhiyun static void rga_buf_return_buffers(struct vb2_queue *q,
60*4882a593Smuzhiyun 				   enum vb2_buffer_state state)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
63*4882a593Smuzhiyun 	struct vb2_v4l2_buffer *vbuf;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	for (;;) {
66*4882a593Smuzhiyun 		if (V4L2_TYPE_IS_OUTPUT(q->type))
67*4882a593Smuzhiyun 			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
68*4882a593Smuzhiyun 		else
69*4882a593Smuzhiyun 			vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
70*4882a593Smuzhiyun 		if (!vbuf)
71*4882a593Smuzhiyun 			break;
72*4882a593Smuzhiyun 		v4l2_m2m_buf_done(vbuf, state);
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
rga_buf_start_streaming(struct vb2_queue * q,unsigned int count)76*4882a593Smuzhiyun static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
79*4882a593Smuzhiyun 	struct rockchip_rga *rga = ctx->rga;
80*4882a593Smuzhiyun 	int ret;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	ret = pm_runtime_resume_and_get(rga->dev);
83*4882a593Smuzhiyun 	if (ret < 0) {
84*4882a593Smuzhiyun 		rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
85*4882a593Smuzhiyun 		return ret;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
rga_buf_stop_streaming(struct vb2_queue * q)91*4882a593Smuzhiyun static void rga_buf_stop_streaming(struct vb2_queue *q)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
94*4882a593Smuzhiyun 	struct rockchip_rga *rga = ctx->rga;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
97*4882a593Smuzhiyun 	pm_runtime_put(rga->dev);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun const struct vb2_ops rga_qops = {
101*4882a593Smuzhiyun 	.queue_setup = rga_queue_setup,
102*4882a593Smuzhiyun 	.buf_prepare = rga_buf_prepare,
103*4882a593Smuzhiyun 	.buf_queue = rga_buf_queue,
104*4882a593Smuzhiyun 	.wait_prepare = vb2_ops_wait_prepare,
105*4882a593Smuzhiyun 	.wait_finish = vb2_ops_wait_finish,
106*4882a593Smuzhiyun 	.start_streaming = rga_buf_start_streaming,
107*4882a593Smuzhiyun 	.stop_streaming = rga_buf_stop_streaming,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
111*4882a593Smuzhiyun  * We use it more like a scatter-gather list.
112*4882a593Smuzhiyun  */
rga_buf_map(struct vb2_buffer * vb)113*4882a593Smuzhiyun void rga_buf_map(struct vb2_buffer *vb)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
116*4882a593Smuzhiyun 	struct rockchip_rga *rga = ctx->rga;
117*4882a593Smuzhiyun 	struct sg_table *sgt;
118*4882a593Smuzhiyun 	struct scatterlist *sgl;
119*4882a593Smuzhiyun 	unsigned int *pages;
120*4882a593Smuzhiyun 	unsigned int address, len, i, p;
121*4882a593Smuzhiyun 	unsigned int mapped_size = 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
124*4882a593Smuzhiyun 		pages = rga->src_mmu_pages;
125*4882a593Smuzhiyun 	else
126*4882a593Smuzhiyun 		pages = rga->dst_mmu_pages;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* Create local MMU table for RGA */
129*4882a593Smuzhiyun 	sgt = vb2_plane_cookie(vb, 0);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
132*4882a593Smuzhiyun 		len = sg_dma_len(sgl) >> PAGE_SHIFT;
133*4882a593Smuzhiyun 		address = sg_phys(sgl);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		for (p = 0; p < len; p++) {
136*4882a593Smuzhiyun 			dma_addr_t phys = address +
137*4882a593Smuzhiyun 					  ((dma_addr_t)p << PAGE_SHIFT);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 			pages[mapped_size + p] = phys;
140*4882a593Smuzhiyun 		}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		mapped_size += len;
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* sync local MMU table for RGA */
146*4882a593Smuzhiyun 	dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
147*4882a593Smuzhiyun 				   8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
148*4882a593Smuzhiyun }
149