xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_vdpu1.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * author:
6*4882a593Smuzhiyun  *	Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun  *	Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun  *	Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <asm/cacheflush.h>
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/iopoll.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <linux/of_platform.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/uaccess.h>
21*4882a593Smuzhiyun #include <linux/regmap.h>
22*4882a593Smuzhiyun #include <linux/proc_fs.h>
23*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "mpp_debug.h"
26*4882a593Smuzhiyun #include "mpp_common.h"
27*4882a593Smuzhiyun #include "mpp_iommu.h"
28*4882a593Smuzhiyun #include <soc/rockchip/rockchip_iommu.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define VDPU1_DRIVER_NAME		"mpp_vdpu1"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define	VDPU1_SESSION_MAX_BUFFERS	40
33*4882a593Smuzhiyun /* The maximum registers number of all the version */
34*4882a593Smuzhiyun #define VDPU1_REG_NUM			60
35*4882a593Smuzhiyun #define VDPU1_REG_HW_ID_INDEX		0
36*4882a593Smuzhiyun #define VDPU1_REG_START_INDEX		0
37*4882a593Smuzhiyun #define VDPU1_REG_END_INDEX		59
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define VDPU1_REG_PP_NUM		101
40*4882a593Smuzhiyun #define VDPU1_REG_PP_START_INDEX	0
41*4882a593Smuzhiyun #define VDPU1_REG_PP_END_INDEX		100
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define VDPU1_REG_DEC_INT_EN		0x004
44*4882a593Smuzhiyun #define VDPU1_REG_DEC_INT_EN_INDEX	(1)
45*4882a593Smuzhiyun /* B slice detected, used in 8190 decoder and later */
46*4882a593Smuzhiyun #define	VDPU1_INT_PIC_INF		BIT(24)
47*4882a593Smuzhiyun #define	VDPU1_INT_TIMEOUT		BIT(18)
48*4882a593Smuzhiyun #define	VDPU1_INT_SLICE			BIT(17)
49*4882a593Smuzhiyun #define	VDPU1_INT_STRM_ERROR		BIT(16)
50*4882a593Smuzhiyun #define	VDPU1_INT_ASO_ERROR		BIT(15)
51*4882a593Smuzhiyun #define	VDPU1_INT_BUF_EMPTY		BIT(14)
52*4882a593Smuzhiyun #define	VDPU1_INT_BUS_ERROR		BIT(13)
53*4882a593Smuzhiyun #define	VDPU1_DEC_INT			BIT(12)
54*4882a593Smuzhiyun #define	VDPU1_DEC_INT_RAW		BIT(8)
55*4882a593Smuzhiyun #define	VDPU1_DEC_IRQ_DIS		BIT(4)
56*4882a593Smuzhiyun #define	VDPU1_DEC_START			BIT(0)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* NOTE: Don't enable it or decoding AVC would meet problem at rk3288 */
59*4882a593Smuzhiyun #define VDPU1_REG_DEC_EN		0x008
60*4882a593Smuzhiyun #define	VDPU1_CLOCK_GATE_EN		BIT(10)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define VDPU1_REG_SOFT_RESET		0x194
63*4882a593Smuzhiyun #define VDPU1_REG_SOFT_RESET_INDEX	(101)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define VDPU1_REG_SYS_CTRL		0x00c
66*4882a593Smuzhiyun #define VDPU1_REG_SYS_CTRL_INDEX	(3)
67*4882a593Smuzhiyun #define VDPU1_RGE_WIDTH_INDEX		(4)
68*4882a593Smuzhiyun #define	VDPU1_GET_FORMAT(x)		(((x) >> 28) & 0xf)
69*4882a593Smuzhiyun #define VDPU1_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
70*4882a593Smuzhiyun #define VDPU1_GET_WIDTH(x)		(((x) & 0xff800000) >> 19)
71*4882a593Smuzhiyun #define	VDPU1_FMT_H264D			0
72*4882a593Smuzhiyun #define	VDPU1_FMT_MPEG4D		1
73*4882a593Smuzhiyun #define	VDPU1_FMT_H263D			2
74*4882a593Smuzhiyun #define	VDPU1_FMT_JPEGD			3
75*4882a593Smuzhiyun #define	VDPU1_FMT_VC1D			4
76*4882a593Smuzhiyun #define	VDPU1_FMT_MPEG2D		5
77*4882a593Smuzhiyun #define	VDPU1_FMT_MPEG1D		6
78*4882a593Smuzhiyun #define	VDPU1_FMT_VP6D			7
79*4882a593Smuzhiyun #define	VDPU1_FMT_RESERVED		8
80*4882a593Smuzhiyun #define	VDPU1_FMT_VP7D			9
81*4882a593Smuzhiyun #define	VDPU1_FMT_VP8D			10
82*4882a593Smuzhiyun #define	VDPU1_FMT_AVSD			11
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define VDPU1_REG_STREAM_RLC_BASE	0x030
85*4882a593Smuzhiyun #define VDPU1_REG_STREAM_RLC_BASE_INDEX	(12)
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define VDPU1_REG_DIR_MV_BASE		0x0a4
88*4882a593Smuzhiyun #define VDPU1_REG_DIR_MV_BASE_INDEX	(41)
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define VDPU1_REG_CLR_CACHE_BASE	0x810
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define to_vdpu_task(task)		\
93*4882a593Smuzhiyun 		container_of(task, struct vdpu_task, mpp_task)
94*4882a593Smuzhiyun #define to_vdpu_dev(dev)		\
95*4882a593Smuzhiyun 		container_of(dev, struct vdpu_dev, mpp)
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun enum VPUD1_HW_ID {
98*4882a593Smuzhiyun 	VDPU1_ID_0102 = 0x0102,
99*4882a593Smuzhiyun 	VDPU1_ID_9190 = 0x6731,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct vdpu_task {
103*4882a593Smuzhiyun 	struct mpp_task mpp_task;
104*4882a593Smuzhiyun 	/* enable of post process */
105*4882a593Smuzhiyun 	bool pp_enable;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	enum MPP_CLOCK_MODE clk_mode;
108*4882a593Smuzhiyun 	u32 reg[VDPU1_REG_PP_NUM];
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	struct reg_offset_info off_inf;
111*4882a593Smuzhiyun 	u32 strm_addr;
112*4882a593Smuzhiyun 	u32 irq_status;
113*4882a593Smuzhiyun 	/* req for current task */
114*4882a593Smuzhiyun 	u32 w_req_cnt;
115*4882a593Smuzhiyun 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
116*4882a593Smuzhiyun 	u32 r_req_cnt;
117*4882a593Smuzhiyun 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun struct vdpu_dev {
121*4882a593Smuzhiyun 	struct mpp_dev mpp;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	struct mpp_clk_info aclk_info;
124*4882a593Smuzhiyun 	struct mpp_clk_info hclk_info;
125*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
126*4882a593Smuzhiyun 	struct proc_dir_entry *procfs;
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun 	struct reset_control *rst_a;
129*4882a593Smuzhiyun 	struct reset_control *rst_h;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static struct mpp_hw_info vdpu_v1_hw_info = {
133*4882a593Smuzhiyun 	.reg_num = VDPU1_REG_NUM,
134*4882a593Smuzhiyun 	.reg_id = VDPU1_REG_HW_ID_INDEX,
135*4882a593Smuzhiyun 	.reg_start = VDPU1_REG_START_INDEX,
136*4882a593Smuzhiyun 	.reg_end = VDPU1_REG_END_INDEX,
137*4882a593Smuzhiyun 	.reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static struct mpp_hw_info vdpu_pp_v1_hw_info = {
141*4882a593Smuzhiyun 	.reg_num = VDPU1_REG_PP_NUM,
142*4882a593Smuzhiyun 	.reg_id = VDPU1_REG_HW_ID_INDEX,
143*4882a593Smuzhiyun 	.reg_start = VDPU1_REG_PP_START_INDEX,
144*4882a593Smuzhiyun 	.reg_end = VDPU1_REG_PP_END_INDEX,
145*4882a593Smuzhiyun 	.reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun  * file handle translate information
150*4882a593Smuzhiyun  */
151*4882a593Smuzhiyun static const u16 trans_tbl_avsd[] = {
152*4882a593Smuzhiyun 	12, 13, 14, 15, 16, 17, 40, 41, 45
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun static const u16 trans_tbl_default[] = {
156*4882a593Smuzhiyun 	12, 13, 14, 15, 16, 17, 40, 41
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static const u16 trans_tbl_jpegd[] = {
160*4882a593Smuzhiyun 	12, 13, 14, 40, 66, 67
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun static const u16 trans_tbl_h264d[] = {
164*4882a593Smuzhiyun 	12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
165*4882a593Smuzhiyun 	28, 29, 40
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun static const u16 trans_tbl_vc1d[] = {
169*4882a593Smuzhiyun 	12, 13, 14, 15, 16, 17, 27, 41
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun static const u16 trans_tbl_vp6d[] = {
173*4882a593Smuzhiyun 	12, 13, 14, 18, 27, 40
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun static const u16 trans_tbl_vp8d[] = {
177*4882a593Smuzhiyun 	10, 12, 13, 14, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun static struct mpp_trans_info vdpu_v1_trans[] = {
181*4882a593Smuzhiyun 	[VDPU1_FMT_H264D] = {
182*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_h264d),
183*4882a593Smuzhiyun 		.table = trans_tbl_h264d,
184*4882a593Smuzhiyun 	},
185*4882a593Smuzhiyun 	[VDPU1_FMT_H263D] = {
186*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_default),
187*4882a593Smuzhiyun 		.table = trans_tbl_default,
188*4882a593Smuzhiyun 	},
189*4882a593Smuzhiyun 	[VDPU1_FMT_MPEG4D] = {
190*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_default),
191*4882a593Smuzhiyun 		.table = trans_tbl_default,
192*4882a593Smuzhiyun 	},
193*4882a593Smuzhiyun 	[VDPU1_FMT_JPEGD] = {
194*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_jpegd),
195*4882a593Smuzhiyun 		.table = trans_tbl_jpegd,
196*4882a593Smuzhiyun 	},
197*4882a593Smuzhiyun 	[VDPU1_FMT_VC1D] = {
198*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_vc1d),
199*4882a593Smuzhiyun 		.table = trans_tbl_vc1d,
200*4882a593Smuzhiyun 	},
201*4882a593Smuzhiyun 	[VDPU1_FMT_MPEG2D] = {
202*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_default),
203*4882a593Smuzhiyun 		.table = trans_tbl_default,
204*4882a593Smuzhiyun 	},
205*4882a593Smuzhiyun 	[VDPU1_FMT_MPEG1D] = {
206*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_default),
207*4882a593Smuzhiyun 		.table = trans_tbl_default,
208*4882a593Smuzhiyun 	},
209*4882a593Smuzhiyun 	[VDPU1_FMT_VP6D] = {
210*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_vp6d),
211*4882a593Smuzhiyun 		.table = trans_tbl_vp6d,
212*4882a593Smuzhiyun 	},
213*4882a593Smuzhiyun 	[VDPU1_FMT_RESERVED] = {
214*4882a593Smuzhiyun 		.count = 0,
215*4882a593Smuzhiyun 		.table = NULL,
216*4882a593Smuzhiyun 	},
217*4882a593Smuzhiyun 	[VDPU1_FMT_VP7D] = {
218*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_default),
219*4882a593Smuzhiyun 		.table = trans_tbl_default,
220*4882a593Smuzhiyun 	},
221*4882a593Smuzhiyun 	[VDPU1_FMT_VP8D] = {
222*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_vp8d),
223*4882a593Smuzhiyun 		.table = trans_tbl_vp8d,
224*4882a593Smuzhiyun 	},
225*4882a593Smuzhiyun 	[VDPU1_FMT_AVSD] = {
226*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_avsd),
227*4882a593Smuzhiyun 		.table = trans_tbl_avsd,
228*4882a593Smuzhiyun 	},
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)231*4882a593Smuzhiyun static int vdpu_process_reg_fd(struct mpp_session *session,
232*4882a593Smuzhiyun 			       struct vdpu_task *task,
233*4882a593Smuzhiyun 			       struct mpp_task_msgs *msgs)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	int ret = 0;
236*4882a593Smuzhiyun 	int fmt = VDPU1_GET_FORMAT(task->reg[VDPU1_REG_SYS_CTRL_INDEX]);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	ret = mpp_translate_reg_address(session, &task->mpp_task,
239*4882a593Smuzhiyun 					fmt, task->reg, &task->off_inf);
240*4882a593Smuzhiyun 	if (ret)
241*4882a593Smuzhiyun 		return ret;
242*4882a593Smuzhiyun 	/*
243*4882a593Smuzhiyun 	 * special offset scale case
244*4882a593Smuzhiyun 	 *
245*4882a593Smuzhiyun 	 * This translation is for fd + offset translation.
246*4882a593Smuzhiyun 	 * One register has 32bits. We need to transfer both buffer file
247*4882a593Smuzhiyun 	 * handle and the start address offset so we packet file handle
248*4882a593Smuzhiyun 	 * and offset together using below format.
249*4882a593Smuzhiyun 	 *
250*4882a593Smuzhiyun 	 *  0~9  bit for buffer file handle range 0 ~ 1023
251*4882a593Smuzhiyun 	 * 10~31 bit for offset range 0 ~ 4M
252*4882a593Smuzhiyun 	 *
253*4882a593Smuzhiyun 	 * But on 4K case the offset can be larger the 4M
254*4882a593Smuzhiyun 	 */
255*4882a593Smuzhiyun 	if (likely(fmt == VDPU1_FMT_H264D)) {
256*4882a593Smuzhiyun 		int fd;
257*4882a593Smuzhiyun 		u32 offset;
258*4882a593Smuzhiyun 		dma_addr_t iova = 0;
259*4882a593Smuzhiyun 		u32 idx = VDPU1_REG_DIR_MV_BASE_INDEX;
260*4882a593Smuzhiyun 		struct mpp_mem_region *mem_region = NULL;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
263*4882a593Smuzhiyun 			fd = task->reg[idx];
264*4882a593Smuzhiyun 			offset = 0;
265*4882a593Smuzhiyun 		} else {
266*4882a593Smuzhiyun 			fd = task->reg[idx] & 0x3ff;
267*4882a593Smuzhiyun 			offset = task->reg[idx] >> 10 << 4;
268*4882a593Smuzhiyun 		}
269*4882a593Smuzhiyun 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
270*4882a593Smuzhiyun 		if (IS_ERR(mem_region)) {
271*4882a593Smuzhiyun 			mpp_err("reg[%03d]: %08x fd %d attach failed\n",
272*4882a593Smuzhiyun 				idx, task->reg[idx], fd);
273*4882a593Smuzhiyun 			goto fail;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		iova = mem_region->iova;
277*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
278*4882a593Smuzhiyun 			  idx, fd, &iova, offset);
279*4882a593Smuzhiyun 		task->reg[idx] = iova + offset;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	mpp_translate_reg_offset_info(&task->mpp_task,
283*4882a593Smuzhiyun 				      &task->off_inf, task->reg);
284*4882a593Smuzhiyun 	return 0;
285*4882a593Smuzhiyun fail:
286*4882a593Smuzhiyun 	return -EFAULT;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)289*4882a593Smuzhiyun static int vdpu_extract_task_msg(struct vdpu_task *task,
290*4882a593Smuzhiyun 				 struct mpp_task_msgs *msgs)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	u32 i;
293*4882a593Smuzhiyun 	int ret;
294*4882a593Smuzhiyun 	struct mpp_request *req;
295*4882a593Smuzhiyun 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	for (i = 0; i < msgs->req_cnt; i++) {
298*4882a593Smuzhiyun 		u32 off_s, off_e;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		req = &msgs->reqs[i];
301*4882a593Smuzhiyun 		if (!req->size)
302*4882a593Smuzhiyun 			continue;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		switch (req->cmd) {
305*4882a593Smuzhiyun 		case MPP_CMD_SET_REG_WRITE: {
306*4882a593Smuzhiyun 			off_s = hw_info->reg_start * sizeof(u32);
307*4882a593Smuzhiyun 			off_e = hw_info->reg_end * sizeof(u32);
308*4882a593Smuzhiyun 			ret = mpp_check_req(req, 0, sizeof(task->reg),
309*4882a593Smuzhiyun 					    off_s, off_e);
310*4882a593Smuzhiyun 			if (ret)
311*4882a593Smuzhiyun 				continue;
312*4882a593Smuzhiyun 			if (copy_from_user((u8 *)task->reg + req->offset,
313*4882a593Smuzhiyun 					   req->data, req->size)) {
314*4882a593Smuzhiyun 				mpp_err("copy_from_user reg failed\n");
315*4882a593Smuzhiyun 				return -EIO;
316*4882a593Smuzhiyun 			}
317*4882a593Smuzhiyun 			memcpy(&task->w_reqs[task->w_req_cnt++],
318*4882a593Smuzhiyun 			       req, sizeof(*req));
319*4882a593Smuzhiyun 		} break;
320*4882a593Smuzhiyun 		case MPP_CMD_SET_REG_READ: {
321*4882a593Smuzhiyun 			off_s = hw_info->reg_start * sizeof(u32);
322*4882a593Smuzhiyun 			off_e = hw_info->reg_end * sizeof(u32);
323*4882a593Smuzhiyun 			ret = mpp_check_req(req, 0, sizeof(task->reg),
324*4882a593Smuzhiyun 					    off_s, off_e);
325*4882a593Smuzhiyun 			if (ret)
326*4882a593Smuzhiyun 				continue;
327*4882a593Smuzhiyun 			memcpy(&task->r_reqs[task->r_req_cnt++],
328*4882a593Smuzhiyun 			       req, sizeof(*req));
329*4882a593Smuzhiyun 		} break;
330*4882a593Smuzhiyun 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
331*4882a593Smuzhiyun 			mpp_extract_reg_offset_info(&task->off_inf, req);
332*4882a593Smuzhiyun 		} break;
333*4882a593Smuzhiyun 		default:
334*4882a593Smuzhiyun 			break;
335*4882a593Smuzhiyun 		}
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
338*4882a593Smuzhiyun 		  task->w_req_cnt, task->r_req_cnt);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)343*4882a593Smuzhiyun static void *vdpu_alloc_task(struct mpp_session *session,
344*4882a593Smuzhiyun 			     struct mpp_task_msgs *msgs)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	int ret;
347*4882a593Smuzhiyun 	struct mpp_task *mpp_task = NULL;
348*4882a593Smuzhiyun 	struct vdpu_task *task = NULL;
349*4882a593Smuzhiyun 	struct mpp_dev *mpp = session->mpp;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	mpp_debug_enter();
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	task = kzalloc(sizeof(*task), GFP_KERNEL);
354*4882a593Smuzhiyun 	if (!task)
355*4882a593Smuzhiyun 		return NULL;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	mpp_task = &task->mpp_task;
358*4882a593Smuzhiyun 	mpp_task_init(session, mpp_task);
359*4882a593Smuzhiyun 	if (session->device_type == MPP_DEVICE_VDPU1_PP) {
360*4882a593Smuzhiyun 		task->pp_enable = true;
361*4882a593Smuzhiyun 		mpp_task->hw_info = &vdpu_pp_v1_hw_info;
362*4882a593Smuzhiyun 	} else {
363*4882a593Smuzhiyun 		mpp_task->hw_info = mpp->var->hw_info;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 	mpp_task->reg = task->reg;
366*4882a593Smuzhiyun 	/* extract reqs for current task */
367*4882a593Smuzhiyun 	ret = vdpu_extract_task_msg(task, msgs);
368*4882a593Smuzhiyun 	if (ret)
369*4882a593Smuzhiyun 		goto fail;
370*4882a593Smuzhiyun 	/* process fd in register */
371*4882a593Smuzhiyun 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
372*4882a593Smuzhiyun 		ret = vdpu_process_reg_fd(session, task, msgs);
373*4882a593Smuzhiyun 		if (ret)
374*4882a593Smuzhiyun 			goto fail;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	task->strm_addr = task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX];
377*4882a593Smuzhiyun 	task->clk_mode = CLK_MODE_NORMAL;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	mpp_debug_leave();
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return mpp_task;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun fail:
384*4882a593Smuzhiyun 	mpp_task_dump_mem_region(mpp, mpp_task);
385*4882a593Smuzhiyun 	mpp_task_dump_reg(mpp, mpp_task);
386*4882a593Smuzhiyun 	mpp_task_finalize(session, mpp_task);
387*4882a593Smuzhiyun 	kfree(task);
388*4882a593Smuzhiyun 	return NULL;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)391*4882a593Smuzhiyun static int vdpu_run(struct mpp_dev *mpp,
392*4882a593Smuzhiyun 		    struct mpp_task *mpp_task)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	u32 i;
395*4882a593Smuzhiyun 	u32 reg_en;
396*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
397*4882a593Smuzhiyun 	u32 timing_en = mpp->srv->timing_en;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	mpp_debug_enter();
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* clear cache */
402*4882a593Smuzhiyun 	mpp_write_relaxed(mpp, VDPU1_REG_CLR_CACHE_BASE, 1);
403*4882a593Smuzhiyun 	/* set registers for hardware */
404*4882a593Smuzhiyun 	reg_en = mpp_task->hw_info->reg_en;
405*4882a593Smuzhiyun 	for (i = 0; i < task->w_req_cnt; i++) {
406*4882a593Smuzhiyun 		struct mpp_request *req = &task->w_reqs[i];
407*4882a593Smuzhiyun 		int s = req->offset / sizeof(u32);
408*4882a593Smuzhiyun 		int e = s + req->size / sizeof(u32);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		mpp_write_req(mpp, task->reg, s, e, reg_en);
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* flush tlb before starting hardware */
414*4882a593Smuzhiyun 	mpp_iommu_flush_tlb(mpp->iommu_info);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* init current task */
417*4882a593Smuzhiyun 	mpp->cur_task = mpp_task;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* Flush the register before the start the device */
422*4882a593Smuzhiyun 	wmb();
423*4882a593Smuzhiyun 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN,
424*4882a593Smuzhiyun 		  task->reg[reg_en] | VDPU1_DEC_START);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	mpp_task_run_end(mpp_task, timing_en);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	mpp_debug_leave();
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)433*4882a593Smuzhiyun static int vdpu_finish(struct mpp_dev *mpp,
434*4882a593Smuzhiyun 		       struct mpp_task *mpp_task)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	u32 i;
437*4882a593Smuzhiyun 	u32 s, e;
438*4882a593Smuzhiyun 	u32 dec_get;
439*4882a593Smuzhiyun 	s32 dec_length;
440*4882a593Smuzhiyun 	struct mpp_request *req;
441*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	mpp_debug_enter();
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	/* read register after running */
446*4882a593Smuzhiyun 	for (i = 0; i < task->r_req_cnt; i++) {
447*4882a593Smuzhiyun 		req = &task->r_reqs[i];
448*4882a593Smuzhiyun 		s = req->offset / sizeof(u32);
449*4882a593Smuzhiyun 		e = s + req->size / sizeof(u32);
450*4882a593Smuzhiyun 		mpp_read_req(mpp, task->reg, s, e);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 	/* revert hack for irq status */
453*4882a593Smuzhiyun 	task->reg[VDPU1_REG_DEC_INT_EN_INDEX] = task->irq_status;
454*4882a593Smuzhiyun 	/* revert hack for decoded length */
455*4882a593Smuzhiyun 	dec_get = mpp_read_relaxed(mpp, VDPU1_REG_STREAM_RLC_BASE);
456*4882a593Smuzhiyun 	dec_length = dec_get - task->strm_addr;
457*4882a593Smuzhiyun 	task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
458*4882a593Smuzhiyun 	mpp_debug(DEBUG_REGISTER,
459*4882a593Smuzhiyun 		  "dec_get %08x dec_length %d\n", dec_get, dec_length);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	mpp_debug_leave();
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return 0;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)466*4882a593Smuzhiyun static int vdpu_result(struct mpp_dev *mpp,
467*4882a593Smuzhiyun 		       struct mpp_task *mpp_task,
468*4882a593Smuzhiyun 		       struct mpp_task_msgs *msgs)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	u32 i;
471*4882a593Smuzhiyun 	struct mpp_request *req;
472*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* FIXME may overflow the kernel */
475*4882a593Smuzhiyun 	for (i = 0; i < task->r_req_cnt; i++) {
476*4882a593Smuzhiyun 		req = &task->r_reqs[i];
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		if (copy_to_user(req->data,
479*4882a593Smuzhiyun 				 (u8 *)task->reg + req->offset,
480*4882a593Smuzhiyun 				 req->size)) {
481*4882a593Smuzhiyun 			mpp_err("copy_to_user reg fail\n");
482*4882a593Smuzhiyun 			return -EIO;
483*4882a593Smuzhiyun 		}
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)489*4882a593Smuzhiyun static int vdpu_free_task(struct mpp_session *session,
490*4882a593Smuzhiyun 			  struct mpp_task *mpp_task)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	mpp_task_finalize(session, mpp_task);
495*4882a593Smuzhiyun 	kfree(task);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return 0;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)501*4882a593Smuzhiyun static int vdpu_procfs_remove(struct mpp_dev *mpp)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	if (dec->procfs) {
506*4882a593Smuzhiyun 		proc_remove(dec->procfs);
507*4882a593Smuzhiyun 		dec->procfs = NULL;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
vdpu_procfs_init(struct mpp_dev * mpp)513*4882a593Smuzhiyun static int vdpu_procfs_init(struct mpp_dev *mpp)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
518*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dec->procfs)) {
519*4882a593Smuzhiyun 		mpp_err("failed on open procfs\n");
520*4882a593Smuzhiyun 		dec->procfs = NULL;
521*4882a593Smuzhiyun 		return -EIO;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* for common mpp_dev options */
525*4882a593Smuzhiyun 	mpp_procfs_create_common(dec->procfs, mpp);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	mpp_procfs_create_u32("aclk", 0644,
528*4882a593Smuzhiyun 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
529*4882a593Smuzhiyun 	mpp_procfs_create_u32("session_buffers", 0644,
530*4882a593Smuzhiyun 			      dec->procfs, &mpp->session_max_buffers);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	return 0;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun #else
vdpu_procfs_remove(struct mpp_dev * mpp)535*4882a593Smuzhiyun static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	return 0;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
vdpu_procfs_init(struct mpp_dev * mpp)540*4882a593Smuzhiyun static inline int vdpu_procfs_init(struct mpp_dev *mpp)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	return 0;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun #endif
545*4882a593Smuzhiyun 
vdpu_init(struct mpp_dev * mpp)546*4882a593Smuzhiyun static int vdpu_init(struct mpp_dev *mpp)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	int ret;
549*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU1];
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* Get clock info from dtsi */
554*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
555*4882a593Smuzhiyun 	if (ret)
556*4882a593Smuzhiyun 		mpp_err("failed on clk_get aclk_vcodec\n");
557*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
558*4882a593Smuzhiyun 	if (ret)
559*4882a593Smuzhiyun 		mpp_err("failed on clk_get hclk_vcodec\n");
560*4882a593Smuzhiyun 	/* Set default rates */
561*4882a593Smuzhiyun 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	/* Get reset control from dtsi */
564*4882a593Smuzhiyun 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
565*4882a593Smuzhiyun 	if (!dec->rst_a)
566*4882a593Smuzhiyun 		mpp_err("No aclk reset resource define\n");
567*4882a593Smuzhiyun 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
568*4882a593Smuzhiyun 	if (!dec->rst_h)
569*4882a593Smuzhiyun 		mpp_err("No hclk reset resource define\n");
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
vdpu_3036_init(struct mpp_dev * mpp)574*4882a593Smuzhiyun static int vdpu_3036_init(struct mpp_dev *mpp)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	vdpu_init(mpp);
577*4882a593Smuzhiyun 	set_bit(mpp->var->device_type, &mpp->queue->dev_active_flags);
578*4882a593Smuzhiyun 	return 0;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
vdpu_clk_on(struct mpp_dev * mpp)581*4882a593Smuzhiyun static int vdpu_clk_on(struct mpp_dev *mpp)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->aclk_info.clk);
586*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->hclk_info.clk);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	return 0;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
vdpu_clk_off(struct mpp_dev * mpp)591*4882a593Smuzhiyun static int vdpu_clk_off(struct mpp_dev *mpp)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	mpp_clk_safe_disable(dec->aclk_info.clk);
596*4882a593Smuzhiyun 	mpp_clk_safe_disable(dec->hclk_info.clk);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
vdpu_3288_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)601*4882a593Smuzhiyun static int vdpu_3288_get_freq(struct mpp_dev *mpp,
602*4882a593Smuzhiyun 			      struct mpp_task *mpp_task)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	u32 width;
605*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
608*4882a593Smuzhiyun 	if (width > 2560)
609*4882a593Smuzhiyun 		task->clk_mode = CLK_MODE_ADVANCED;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	return 0;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
vdpu_3368_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)614*4882a593Smuzhiyun static int vdpu_3368_get_freq(struct mpp_dev *mpp,
615*4882a593Smuzhiyun 			      struct mpp_task *mpp_task)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	u32 width;
618*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
621*4882a593Smuzhiyun 	if (width > 2560)
622*4882a593Smuzhiyun 		task->clk_mode = CLK_MODE_ADVANCED;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	return 0;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)627*4882a593Smuzhiyun static int vdpu_set_freq(struct mpp_dev *mpp,
628*4882a593Smuzhiyun 			 struct mpp_task *mpp_task)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
631*4882a593Smuzhiyun 	struct vdpu_task *task = to_vdpu_task(mpp_task);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	return 0;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
vdpu_reduce_freq(struct mpp_dev * mpp)638*4882a593Smuzhiyun static int vdpu_reduce_freq(struct mpp_dev *mpp)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
vdpu_irq(struct mpp_dev * mpp)647*4882a593Smuzhiyun static int vdpu_irq(struct mpp_dev *mpp)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	mpp->irq_status = mpp_read(mpp, VDPU1_REG_DEC_INT_EN);
650*4882a593Smuzhiyun 	if (!(mpp->irq_status & VDPU1_DEC_INT_RAW))
651*4882a593Smuzhiyun 		return IRQ_NONE;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
654*4882a593Smuzhiyun 	/* set clock gating to save power */
655*4882a593Smuzhiyun 	mpp_write(mpp, VDPU1_REG_DEC_EN, VDPU1_CLOCK_GATE_EN);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	return IRQ_WAKE_THREAD;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
vdpu_isr(struct mpp_dev * mpp)660*4882a593Smuzhiyun static int vdpu_isr(struct mpp_dev *mpp)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	u32 err_mask;
663*4882a593Smuzhiyun 	struct vdpu_task *task = NULL;
664*4882a593Smuzhiyun 	struct mpp_task *mpp_task = mpp->cur_task;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/* FIXME use a spin lock here */
667*4882a593Smuzhiyun 	if (!mpp_task) {
668*4882a593Smuzhiyun 		dev_err(mpp->dev, "no current task\n");
669*4882a593Smuzhiyun 		return IRQ_HANDLED;
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 	mpp_time_diff(mpp_task);
672*4882a593Smuzhiyun 	mpp->cur_task = NULL;
673*4882a593Smuzhiyun 	task = to_vdpu_task(mpp_task);
674*4882a593Smuzhiyun 	task->irq_status = mpp->irq_status;
675*4882a593Smuzhiyun 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
676*4882a593Smuzhiyun 		  task->irq_status);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	err_mask = VDPU1_INT_TIMEOUT
679*4882a593Smuzhiyun 		| VDPU1_INT_STRM_ERROR
680*4882a593Smuzhiyun 		| VDPU1_INT_ASO_ERROR
681*4882a593Smuzhiyun 		| VDPU1_INT_BUF_EMPTY
682*4882a593Smuzhiyun 		| VDPU1_INT_BUS_ERROR;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	if (err_mask & task->irq_status)
685*4882a593Smuzhiyun 		atomic_inc(&mpp->reset_request);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	mpp_task_finish(mpp_task->session, mpp_task);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	mpp_debug_leave();
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	return IRQ_HANDLED;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
vdpu_soft_reset(struct mpp_dev * mpp)694*4882a593Smuzhiyun static int vdpu_soft_reset(struct mpp_dev *mpp)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	u32 val;
697*4882a593Smuzhiyun 	u32 ret;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	mpp_write(mpp, VDPU1_REG_SOFT_RESET, 1);
700*4882a593Smuzhiyun 	ret = readl_relaxed_poll_timeout(mpp->reg_base + VDPU1_REG_SOFT_RESET,
701*4882a593Smuzhiyun 					 val, !val, 0, 5);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return ret;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
vdpu_reset(struct mpp_dev * mpp)706*4882a593Smuzhiyun static int vdpu_reset(struct mpp_dev *mpp)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
709*4882a593Smuzhiyun 	u32 ret = 0;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	/* soft reset first */
712*4882a593Smuzhiyun 	ret = vdpu_soft_reset(mpp);
713*4882a593Smuzhiyun 	if (ret && dec->rst_a && dec->rst_h) {
714*4882a593Smuzhiyun 		mpp_err("soft reset failed, use cru reset!\n");
715*4882a593Smuzhiyun 		mpp_debug(DEBUG_RESET, "reset in\n");
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		/* Don't skip this or iommu won't work after reset */
718*4882a593Smuzhiyun 		mpp_pmu_idle_request(mpp, true);
719*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_a);
720*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_h);
721*4882a593Smuzhiyun 		udelay(5);
722*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_a);
723*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_h);
724*4882a593Smuzhiyun 		mpp_pmu_idle_request(mpp, false);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		mpp_debug(DEBUG_RESET, "reset out\n");
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	return 0;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun 
vdpu_3036_set_grf(struct mpp_dev * mpp)733*4882a593Smuzhiyun static int vdpu_3036_set_grf(struct mpp_dev *mpp)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	int grf_changed;
736*4882a593Smuzhiyun 	struct mpp_dev *loop = NULL, *n;
737*4882a593Smuzhiyun 	struct mpp_taskqueue *queue = mpp->queue;
738*4882a593Smuzhiyun 	bool pd_is_on;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	grf_changed = mpp_grf_is_changed(mpp->grf_info);
741*4882a593Smuzhiyun 	if (grf_changed) {
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		/*
744*4882a593Smuzhiyun 		 * in this case, devices share the queue also share the same pd&clk,
745*4882a593Smuzhiyun 		 * so use mpp->dev's pd to control all the process is okay
746*4882a593Smuzhiyun 		 */
747*4882a593Smuzhiyun 		pd_is_on = rockchip_pmu_pd_is_on(mpp->dev);
748*4882a593Smuzhiyun 		if (!pd_is_on)
749*4882a593Smuzhiyun 			rockchip_pmu_pd_on(mpp->dev);
750*4882a593Smuzhiyun 		mpp->hw_ops->clk_on(mpp);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 		list_for_each_entry_safe(loop, n, &queue->dev_list, queue_link) {
753*4882a593Smuzhiyun 			if (test_bit(loop->var->device_type, &queue->dev_active_flags)) {
754*4882a593Smuzhiyun 				mpp_set_grf(loop->grf_info);
755*4882a593Smuzhiyun 				if (loop->hw_ops->clk_on)
756*4882a593Smuzhiyun 					loop->hw_ops->clk_on(loop);
757*4882a593Smuzhiyun 				if (loop->hw_ops->reset)
758*4882a593Smuzhiyun 					loop->hw_ops->reset(loop);
759*4882a593Smuzhiyun 				rockchip_iommu_disable(loop->dev);
760*4882a593Smuzhiyun 				if (loop->hw_ops->clk_off)
761*4882a593Smuzhiyun 					loop->hw_ops->clk_off(loop);
762*4882a593Smuzhiyun 				clear_bit(loop->var->device_type, &queue->dev_active_flags);
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		mpp_set_grf(mpp->grf_info);
767*4882a593Smuzhiyun 		rockchip_iommu_enable(mpp->dev);
768*4882a593Smuzhiyun 		set_bit(mpp->var->device_type, &queue->dev_active_flags);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 		mpp->hw_ops->clk_off(mpp);
771*4882a593Smuzhiyun 		if (!pd_is_on)
772*4882a593Smuzhiyun 			rockchip_pmu_pd_off(mpp->dev);
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	return 0;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun static struct mpp_hw_ops vdpu_v1_hw_ops = {
779*4882a593Smuzhiyun 	.init = vdpu_init,
780*4882a593Smuzhiyun 	.clk_on = vdpu_clk_on,
781*4882a593Smuzhiyun 	.clk_off = vdpu_clk_off,
782*4882a593Smuzhiyun 	.set_freq = vdpu_set_freq,
783*4882a593Smuzhiyun 	.reduce_freq = vdpu_reduce_freq,
784*4882a593Smuzhiyun 	.reset = vdpu_reset,
785*4882a593Smuzhiyun 	.set_grf = vdpu_3036_set_grf,
786*4882a593Smuzhiyun };
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun static struct mpp_hw_ops vdpu_3036_hw_ops = {
789*4882a593Smuzhiyun 	.init = vdpu_3036_init,
790*4882a593Smuzhiyun 	.clk_on = vdpu_clk_on,
791*4882a593Smuzhiyun 	.clk_off = vdpu_clk_off,
792*4882a593Smuzhiyun 	.set_freq = vdpu_set_freq,
793*4882a593Smuzhiyun 	.reduce_freq = vdpu_reduce_freq,
794*4882a593Smuzhiyun 	.reset = vdpu_reset,
795*4882a593Smuzhiyun 	.set_grf = vdpu_3036_set_grf,
796*4882a593Smuzhiyun };
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun static struct mpp_hw_ops vdpu_3288_hw_ops = {
799*4882a593Smuzhiyun 	.init = vdpu_init,
800*4882a593Smuzhiyun 	.clk_on = vdpu_clk_on,
801*4882a593Smuzhiyun 	.clk_off = vdpu_clk_off,
802*4882a593Smuzhiyun 	.get_freq = vdpu_3288_get_freq,
803*4882a593Smuzhiyun 	.set_freq = vdpu_set_freq,
804*4882a593Smuzhiyun 	.reduce_freq = vdpu_reduce_freq,
805*4882a593Smuzhiyun 	.reset = vdpu_reset,
806*4882a593Smuzhiyun };
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun static struct mpp_hw_ops vdpu_3368_hw_ops = {
809*4882a593Smuzhiyun 	.init = vdpu_init,
810*4882a593Smuzhiyun 	.clk_on = vdpu_clk_on,
811*4882a593Smuzhiyun 	.clk_off = vdpu_clk_off,
812*4882a593Smuzhiyun 	.get_freq = vdpu_3368_get_freq,
813*4882a593Smuzhiyun 	.set_freq = vdpu_set_freq,
814*4882a593Smuzhiyun 	.reduce_freq = vdpu_reduce_freq,
815*4882a593Smuzhiyun 	.reset = vdpu_reset,
816*4882a593Smuzhiyun };
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun static struct mpp_dev_ops vdpu_v1_dev_ops = {
819*4882a593Smuzhiyun 	.alloc_task = vdpu_alloc_task,
820*4882a593Smuzhiyun 	.run = vdpu_run,
821*4882a593Smuzhiyun 	.irq = vdpu_irq,
822*4882a593Smuzhiyun 	.isr = vdpu_isr,
823*4882a593Smuzhiyun 	.finish = vdpu_finish,
824*4882a593Smuzhiyun 	.result = vdpu_result,
825*4882a593Smuzhiyun 	.free_task = vdpu_free_task,
826*4882a593Smuzhiyun };
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun static const struct mpp_dev_var vdpu_v1_data = {
829*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_VDPU1,
830*4882a593Smuzhiyun 	.hw_info = &vdpu_v1_hw_info,
831*4882a593Smuzhiyun 	.trans_info = vdpu_v1_trans,
832*4882a593Smuzhiyun 	.hw_ops = &vdpu_v1_hw_ops,
833*4882a593Smuzhiyun 	.dev_ops = &vdpu_v1_dev_ops,
834*4882a593Smuzhiyun };
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun static const struct mpp_dev_var vdpu_3036_data = {
837*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_VDPU1,
838*4882a593Smuzhiyun 	.hw_info = &vdpu_v1_hw_info,
839*4882a593Smuzhiyun 	.trans_info = vdpu_v1_trans,
840*4882a593Smuzhiyun 	.hw_ops = &vdpu_3036_hw_ops,
841*4882a593Smuzhiyun 	.dev_ops = &vdpu_v1_dev_ops,
842*4882a593Smuzhiyun };
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun static const struct mpp_dev_var vdpu_3288_data = {
845*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_VDPU1,
846*4882a593Smuzhiyun 	.hw_info = &vdpu_v1_hw_info,
847*4882a593Smuzhiyun 	.trans_info = vdpu_v1_trans,
848*4882a593Smuzhiyun 	.hw_ops = &vdpu_3288_hw_ops,
849*4882a593Smuzhiyun 	.dev_ops = &vdpu_v1_dev_ops,
850*4882a593Smuzhiyun };
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun static const struct mpp_dev_var vdpu_3368_data = {
853*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_VDPU1,
854*4882a593Smuzhiyun 	.hw_info = &vdpu_v1_hw_info,
855*4882a593Smuzhiyun 	.trans_info = vdpu_v1_trans,
856*4882a593Smuzhiyun 	.hw_ops = &vdpu_3368_hw_ops,
857*4882a593Smuzhiyun 	.dev_ops = &vdpu_v1_dev_ops,
858*4882a593Smuzhiyun };
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun static const struct mpp_dev_var avsd_plus_data = {
861*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_AVSPLUS_DEC,
862*4882a593Smuzhiyun 	.hw_info = &vdpu_v1_hw_info,
863*4882a593Smuzhiyun 	.trans_info = vdpu_v1_trans,
864*4882a593Smuzhiyun 	.hw_ops = &vdpu_v1_hw_ops,
865*4882a593Smuzhiyun 	.dev_ops = &vdpu_v1_dev_ops,
866*4882a593Smuzhiyun };
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun static const struct of_device_id mpp_vdpu1_dt_match[] = {
869*4882a593Smuzhiyun 	{
870*4882a593Smuzhiyun 		.compatible = "rockchip,vpu-decoder-v1",
871*4882a593Smuzhiyun 		.data = &vdpu_v1_data,
872*4882a593Smuzhiyun 	},
873*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3288
874*4882a593Smuzhiyun 	{
875*4882a593Smuzhiyun 		.compatible = "rockchip,vpu-decoder-rk3288",
876*4882a593Smuzhiyun 		.data = &vdpu_3288_data,
877*4882a593Smuzhiyun 	},
878*4882a593Smuzhiyun #endif
879*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3036
880*4882a593Smuzhiyun 	{
881*4882a593Smuzhiyun 		.compatible = "rockchip,vpu-decoder-rk3036",
882*4882a593Smuzhiyun 		.data = &vdpu_3036_data,
883*4882a593Smuzhiyun 	},
884*4882a593Smuzhiyun #endif
885*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3368
886*4882a593Smuzhiyun 	{
887*4882a593Smuzhiyun 		.compatible = "rockchip,vpu-decoder-rk3368",
888*4882a593Smuzhiyun 		.data = &vdpu_3368_data,
889*4882a593Smuzhiyun 	},
890*4882a593Smuzhiyun #endif
891*4882a593Smuzhiyun 	{
892*4882a593Smuzhiyun 		.compatible = "rockchip,avs-plus-decoder",
893*4882a593Smuzhiyun 		.data = &avsd_plus_data,
894*4882a593Smuzhiyun 	},
895*4882a593Smuzhiyun 	{},
896*4882a593Smuzhiyun };
897*4882a593Smuzhiyun 
vdpu_probe(struct platform_device * pdev)898*4882a593Smuzhiyun static int vdpu_probe(struct platform_device *pdev)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
901*4882a593Smuzhiyun 	struct vdpu_dev *dec = NULL;
902*4882a593Smuzhiyun 	struct mpp_dev *mpp = NULL;
903*4882a593Smuzhiyun 	const struct of_device_id *match = NULL;
904*4882a593Smuzhiyun 	int ret = 0;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	dev_info(dev, "probe device\n");
907*4882a593Smuzhiyun 	dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
908*4882a593Smuzhiyun 	if (!dec)
909*4882a593Smuzhiyun 		return -ENOMEM;
910*4882a593Smuzhiyun 	mpp = &dec->mpp;
911*4882a593Smuzhiyun 	platform_set_drvdata(pdev, mpp);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	if (pdev->dev.of_node) {
914*4882a593Smuzhiyun 		match = of_match_node(mpp_vdpu1_dt_match, pdev->dev.of_node);
915*4882a593Smuzhiyun 		if (match)
916*4882a593Smuzhiyun 			mpp->var = (struct mpp_dev_var *)match->data;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 		mpp->core_id = of_alias_get_id(pdev->dev.of_node, "vdpu");
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	ret = mpp_dev_probe(mpp, pdev);
922*4882a593Smuzhiyun 	if (ret) {
923*4882a593Smuzhiyun 		dev_err(dev, "probe sub driver failed\n");
924*4882a593Smuzhiyun 		return -EINVAL;
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(dev, mpp->irq,
928*4882a593Smuzhiyun 					mpp_dev_irq,
929*4882a593Smuzhiyun 					mpp_dev_isr_sched,
930*4882a593Smuzhiyun 					IRQF_SHARED,
931*4882a593Smuzhiyun 					dev_name(dev), mpp);
932*4882a593Smuzhiyun 	if (ret) {
933*4882a593Smuzhiyun 		dev_err(dev, "register interrupter runtime failed\n");
934*4882a593Smuzhiyun 		return -EINVAL;
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (mpp->var->device_type == MPP_DEVICE_VDPU1) {
938*4882a593Smuzhiyun 		mpp->srv->sub_devices[MPP_DEVICE_VDPU1_PP] = mpp;
939*4882a593Smuzhiyun 		set_bit(MPP_DEVICE_VDPU1_PP, &mpp->srv->hw_support);
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	mpp->session_max_buffers = VDPU1_SESSION_MAX_BUFFERS;
943*4882a593Smuzhiyun 	vdpu_procfs_init(mpp);
944*4882a593Smuzhiyun 	/* register current device to mpp service */
945*4882a593Smuzhiyun 	mpp_dev_register_srv(mpp, mpp->srv);
946*4882a593Smuzhiyun 	dev_info(dev, "probing finish\n");
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun 
vdpu_remove(struct platform_device * pdev)951*4882a593Smuzhiyun static int vdpu_remove(struct platform_device *pdev)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
954*4882a593Smuzhiyun 	struct mpp_dev *mpp = dev_get_drvdata(dev);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	dev_info(dev, "remove device\n");
957*4882a593Smuzhiyun 	mpp_dev_remove(mpp);
958*4882a593Smuzhiyun 	vdpu_procfs_remove(mpp);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	return 0;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun struct platform_driver rockchip_vdpu1_driver = {
964*4882a593Smuzhiyun 	.probe = vdpu_probe,
965*4882a593Smuzhiyun 	.remove = vdpu_remove,
966*4882a593Smuzhiyun 	.shutdown = mpp_dev_shutdown,
967*4882a593Smuzhiyun 	.driver = {
968*4882a593Smuzhiyun 		.name = VDPU1_DRIVER_NAME,
969*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(mpp_vdpu1_dt_match),
970*4882a593Smuzhiyun 	},
971*4882a593Smuzhiyun };
972*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_vdpu1_driver);
973