1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * author:
6*4882a593Smuzhiyun * Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun * Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun * Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <asm/cacheflush.h>
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/iopoll.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <linux/of_platform.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/uaccess.h>
21*4882a593Smuzhiyun #include <linux/regmap.h>
22*4882a593Smuzhiyun #include <linux/proc_fs.h>
23*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "mpp_debug.h"
26*4882a593Smuzhiyun #include "mpp_common.h"
27*4882a593Smuzhiyun #include "mpp_iommu.h"
28*4882a593Smuzhiyun #include "hack/mpp_hack_px30.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define VDPU2_DRIVER_NAME "mpp_vdpu2"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define VDPU2_SESSION_MAX_BUFFERS 40
33*4882a593Smuzhiyun /* The maximum registers number of all the version */
34*4882a593Smuzhiyun #define VDPU2_REG_NUM 159
35*4882a593Smuzhiyun #define VDPU2_REG_HW_ID_INDEX -1 /* INVALID */
36*4882a593Smuzhiyun #define VDPU2_REG_START_INDEX 50
37*4882a593Smuzhiyun #define VDPU2_REG_END_INDEX 158
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define VDPU2_REG_SYS_CTRL 0x0d4
40*4882a593Smuzhiyun #define VDPU2_REG_SYS_CTRL_INDEX (53)
41*4882a593Smuzhiyun #define VDPU2_GET_FORMAT(x) ((x) & 0xf)
42*4882a593Smuzhiyun #define VDPU2_FMT_H264D 0
43*4882a593Smuzhiyun #define VDPU2_FMT_MPEG4D 1
44*4882a593Smuzhiyun #define VDPU2_FMT_H263D 2
45*4882a593Smuzhiyun #define VDPU2_FMT_JPEGD 3
46*4882a593Smuzhiyun #define VDPU2_FMT_VC1D 4
47*4882a593Smuzhiyun #define VDPU2_FMT_MPEG2D 5
48*4882a593Smuzhiyun #define VDPU2_FMT_MPEG1D 6
49*4882a593Smuzhiyun #define VDPU2_FMT_VP6D 7
50*4882a593Smuzhiyun #define VDPU2_FMT_RESERVED 8
51*4882a593Smuzhiyun #define VDPU2_FMT_VP7D 9
52*4882a593Smuzhiyun #define VDPU2_FMT_VP8D 10
53*4882a593Smuzhiyun #define VDPU2_FMT_AVSD 11
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define VDPU2_REG_DEC_INT 0x0dc
56*4882a593Smuzhiyun #define VDPU2_REG_DEC_INT_INDEX (55)
57*4882a593Smuzhiyun #define VDPU2_INT_TIMEOUT BIT(13)
58*4882a593Smuzhiyun #define VDPU2_INT_STRM_ERROR BIT(12)
59*4882a593Smuzhiyun #define VDPU2_INT_SLICE BIT(9)
60*4882a593Smuzhiyun #define VDPU2_INT_ASO_ERROR BIT(8)
61*4882a593Smuzhiyun #define VDPU2_INT_BUF_EMPTY BIT(6)
62*4882a593Smuzhiyun #define VDPU2_INT_BUS_ERROR BIT(5)
63*4882a593Smuzhiyun #define VDPU2_DEC_INT BIT(4)
64*4882a593Smuzhiyun #define VDPU2_DEC_IRQ_DIS BIT(1)
65*4882a593Smuzhiyun #define VDPU2_DEC_INT_RAW BIT(0)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define VDPU2_REG_DEC_EN 0x0e4
68*4882a593Smuzhiyun #define VDPU2_REG_DEC_EN_INDEX (57)
69*4882a593Smuzhiyun #define VDPU2_DEC_CLOCK_GATE_EN BIT(4)
70*4882a593Smuzhiyun #define VDPU2_DEC_START BIT(0)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define VDPU2_REG_SOFT_RESET 0x0e8
73*4882a593Smuzhiyun #define VDPU2_REG_SOFT_RESET_INDEX (58)
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define VDPU2_REG_DIR_MV_BASE 0x0f8
76*4882a593Smuzhiyun #define VDPU2_REG_DIR_MV_BASE_INDEX (62)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define VDPU2_REG_STREAM_RLC_BASE 0x100
79*4882a593Smuzhiyun #define VDPU2_REG_STREAM_RLC_BASE_INDEX (64)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define VDPU2_REG_CLR_CACHE_BASE 0x810
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define to_vdpu_task(task) \
84*4882a593Smuzhiyun container_of(task, struct vdpu_task, mpp_task)
85*4882a593Smuzhiyun #define to_vdpu_dev(dev) \
86*4882a593Smuzhiyun container_of(dev, struct vdpu_dev, mpp)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct vdpu_task {
89*4882a593Smuzhiyun struct mpp_task mpp_task;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun enum MPP_CLOCK_MODE clk_mode;
92*4882a593Smuzhiyun u32 reg[VDPU2_REG_NUM];
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct reg_offset_info off_inf;
95*4882a593Smuzhiyun u32 strm_addr;
96*4882a593Smuzhiyun u32 irq_status;
97*4882a593Smuzhiyun /* req for current task */
98*4882a593Smuzhiyun u32 w_req_cnt;
99*4882a593Smuzhiyun struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
100*4882a593Smuzhiyun u32 r_req_cnt;
101*4882a593Smuzhiyun struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct vdpu_dev {
105*4882a593Smuzhiyun struct mpp_dev mpp;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun struct mpp_clk_info aclk_info;
108*4882a593Smuzhiyun struct mpp_clk_info hclk_info;
109*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
110*4882a593Smuzhiyun struct proc_dir_entry *procfs;
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun struct reset_control *rst_a;
113*4882a593Smuzhiyun struct reset_control *rst_h;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun static struct mpp_hw_info vdpu_v2_hw_info = {
117*4882a593Smuzhiyun .reg_num = VDPU2_REG_NUM,
118*4882a593Smuzhiyun .reg_id = VDPU2_REG_HW_ID_INDEX,
119*4882a593Smuzhiyun .reg_start = VDPU2_REG_START_INDEX,
120*4882a593Smuzhiyun .reg_end = VDPU2_REG_END_INDEX,
121*4882a593Smuzhiyun .reg_en = VDPU2_REG_DEC_EN_INDEX,
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * file handle translate information
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun static const u16 trans_tbl_default[] = {
128*4882a593Smuzhiyun 61, 62, 63, 64, 131, 134, 135, 148
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun static const u16 trans_tbl_jpegd[] = {
132*4882a593Smuzhiyun 21, 22, 61, 63, 64, 131
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun static const u16 trans_tbl_h264d[] = {
136*4882a593Smuzhiyun 61, 63, 64, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
137*4882a593Smuzhiyun 98, 99
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun static const u16 trans_tbl_vc1d[] = {
141*4882a593Smuzhiyun 62, 63, 64, 131, 134, 135, 145, 148
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun static const u16 trans_tbl_vp6d[] = {
145*4882a593Smuzhiyun 61, 63, 64, 131, 136, 145
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun static const u16 trans_tbl_vp8d[] = {
149*4882a593Smuzhiyun 61, 63, 64, 131, 136, 137, 140, 141, 142, 143, 144, 145, 146, 147, 149
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun static struct mpp_trans_info vdpu_v2_trans[] = {
153*4882a593Smuzhiyun [VDPU2_FMT_H264D] = {
154*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_h264d),
155*4882a593Smuzhiyun .table = trans_tbl_h264d,
156*4882a593Smuzhiyun },
157*4882a593Smuzhiyun [VDPU2_FMT_H263D] = {
158*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_default),
159*4882a593Smuzhiyun .table = trans_tbl_default,
160*4882a593Smuzhiyun },
161*4882a593Smuzhiyun [VDPU2_FMT_MPEG4D] = {
162*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_default),
163*4882a593Smuzhiyun .table = trans_tbl_default,
164*4882a593Smuzhiyun },
165*4882a593Smuzhiyun [VDPU2_FMT_JPEGD] = {
166*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_jpegd),
167*4882a593Smuzhiyun .table = trans_tbl_jpegd,
168*4882a593Smuzhiyun },
169*4882a593Smuzhiyun [VDPU2_FMT_VC1D] = {
170*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_vc1d),
171*4882a593Smuzhiyun .table = trans_tbl_vc1d,
172*4882a593Smuzhiyun },
173*4882a593Smuzhiyun [VDPU2_FMT_MPEG2D] = {
174*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_default),
175*4882a593Smuzhiyun .table = trans_tbl_default,
176*4882a593Smuzhiyun },
177*4882a593Smuzhiyun [VDPU2_FMT_MPEG1D] = {
178*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_default),
179*4882a593Smuzhiyun .table = trans_tbl_default,
180*4882a593Smuzhiyun },
181*4882a593Smuzhiyun [VDPU2_FMT_VP6D] = {
182*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_vp6d),
183*4882a593Smuzhiyun .table = trans_tbl_vp6d,
184*4882a593Smuzhiyun },
185*4882a593Smuzhiyun [VDPU2_FMT_RESERVED] = {
186*4882a593Smuzhiyun .count = 0,
187*4882a593Smuzhiyun .table = NULL,
188*4882a593Smuzhiyun },
189*4882a593Smuzhiyun [VDPU2_FMT_VP7D] = {
190*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_default),
191*4882a593Smuzhiyun .table = trans_tbl_default,
192*4882a593Smuzhiyun },
193*4882a593Smuzhiyun [VDPU2_FMT_VP8D] = {
194*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_vp8d),
195*4882a593Smuzhiyun .table = trans_tbl_vp8d,
196*4882a593Smuzhiyun },
197*4882a593Smuzhiyun [VDPU2_FMT_AVSD] = {
198*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_default),
199*4882a593Smuzhiyun .table = trans_tbl_default,
200*4882a593Smuzhiyun },
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)203*4882a593Smuzhiyun static int vdpu_process_reg_fd(struct mpp_session *session,
204*4882a593Smuzhiyun struct vdpu_task *task,
205*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun int ret = 0;
208*4882a593Smuzhiyun int fmt = VDPU2_GET_FORMAT(task->reg[VDPU2_REG_SYS_CTRL_INDEX]);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun ret = mpp_translate_reg_address(session, &task->mpp_task,
211*4882a593Smuzhiyun fmt, task->reg, &task->off_inf);
212*4882a593Smuzhiyun if (ret)
213*4882a593Smuzhiyun return ret;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (likely(fmt == VDPU2_FMT_H264D)) {
216*4882a593Smuzhiyun int fd;
217*4882a593Smuzhiyun u32 offset;
218*4882a593Smuzhiyun dma_addr_t iova = 0;
219*4882a593Smuzhiyun struct mpp_mem_region *mem_region = NULL;
220*4882a593Smuzhiyun int idx = VDPU2_REG_DIR_MV_BASE_INDEX;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
223*4882a593Smuzhiyun fd = task->reg[idx];
224*4882a593Smuzhiyun offset = 0;
225*4882a593Smuzhiyun } else {
226*4882a593Smuzhiyun fd = task->reg[idx] & 0x3ff;
227*4882a593Smuzhiyun offset = task->reg[idx] >> 10 << 4;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
230*4882a593Smuzhiyun if (IS_ERR(mem_region)) {
231*4882a593Smuzhiyun mpp_err("reg[%3d]: %08x fd %d attach failed\n",
232*4882a593Smuzhiyun idx, task->reg[idx], fd);
233*4882a593Smuzhiyun return -EFAULT;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun iova = mem_region->iova;
237*4882a593Smuzhiyun mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
238*4882a593Smuzhiyun idx, fd, &iova, offset);
239*4882a593Smuzhiyun task->reg[idx] = iova + offset;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun mpp_translate_reg_offset_info(&task->mpp_task,
242*4882a593Smuzhiyun &task->off_inf, task->reg);
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)246*4882a593Smuzhiyun static int vdpu_extract_task_msg(struct vdpu_task *task,
247*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun u32 i;
250*4882a593Smuzhiyun int ret;
251*4882a593Smuzhiyun struct mpp_request *req;
252*4882a593Smuzhiyun struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun for (i = 0; i < msgs->req_cnt; i++) {
255*4882a593Smuzhiyun u32 off_s, off_e;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun req = &msgs->reqs[i];
258*4882a593Smuzhiyun if (!req->size)
259*4882a593Smuzhiyun continue;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun switch (req->cmd) {
262*4882a593Smuzhiyun case MPP_CMD_SET_REG_WRITE: {
263*4882a593Smuzhiyun off_s = hw_info->reg_start * sizeof(u32);
264*4882a593Smuzhiyun off_e = hw_info->reg_end * sizeof(u32);
265*4882a593Smuzhiyun ret = mpp_check_req(req, 0, sizeof(task->reg),
266*4882a593Smuzhiyun off_s, off_e);
267*4882a593Smuzhiyun if (ret)
268*4882a593Smuzhiyun continue;
269*4882a593Smuzhiyun if (copy_from_user((u8 *)task->reg + req->offset,
270*4882a593Smuzhiyun req->data, req->size)) {
271*4882a593Smuzhiyun mpp_err("copy_from_user reg failed\n");
272*4882a593Smuzhiyun return -EIO;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun memcpy(&task->w_reqs[task->w_req_cnt++],
275*4882a593Smuzhiyun req, sizeof(*req));
276*4882a593Smuzhiyun } break;
277*4882a593Smuzhiyun case MPP_CMD_SET_REG_READ: {
278*4882a593Smuzhiyun off_s = hw_info->reg_start * sizeof(u32);
279*4882a593Smuzhiyun off_e = hw_info->reg_end * sizeof(u32);
280*4882a593Smuzhiyun ret = mpp_check_req(req, 0, sizeof(task->reg),
281*4882a593Smuzhiyun off_s, off_e);
282*4882a593Smuzhiyun if (ret)
283*4882a593Smuzhiyun continue;
284*4882a593Smuzhiyun memcpy(&task->r_reqs[task->r_req_cnt++],
285*4882a593Smuzhiyun req, sizeof(*req));
286*4882a593Smuzhiyun } break;
287*4882a593Smuzhiyun case MPP_CMD_SET_REG_ADDR_OFFSET: {
288*4882a593Smuzhiyun mpp_extract_reg_offset_info(&task->off_inf, req);
289*4882a593Smuzhiyun } break;
290*4882a593Smuzhiyun default:
291*4882a593Smuzhiyun break;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
295*4882a593Smuzhiyun task->w_req_cnt, task->r_req_cnt);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)300*4882a593Smuzhiyun static void *vdpu_alloc_task(struct mpp_session *session,
301*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun int ret;
304*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL;
305*4882a593Smuzhiyun struct vdpu_task *task = NULL;
306*4882a593Smuzhiyun struct mpp_dev *mpp = session->mpp;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun mpp_debug_enter();
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun task = kzalloc(sizeof(*task), GFP_KERNEL);
311*4882a593Smuzhiyun if (!task)
312*4882a593Smuzhiyun return NULL;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun mpp_task = &task->mpp_task;
315*4882a593Smuzhiyun mpp_task_init(session, mpp_task);
316*4882a593Smuzhiyun mpp_task->hw_info = mpp->var->hw_info;
317*4882a593Smuzhiyun mpp_task->reg = task->reg;
318*4882a593Smuzhiyun /* extract reqs for current task */
319*4882a593Smuzhiyun ret = vdpu_extract_task_msg(task, msgs);
320*4882a593Smuzhiyun if (ret)
321*4882a593Smuzhiyun goto fail;
322*4882a593Smuzhiyun /* process fd in register */
323*4882a593Smuzhiyun if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
324*4882a593Smuzhiyun ret = vdpu_process_reg_fd(session, task, msgs);
325*4882a593Smuzhiyun if (ret)
326*4882a593Smuzhiyun goto fail;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun task->strm_addr = task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX];
329*4882a593Smuzhiyun task->clk_mode = CLK_MODE_NORMAL;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun mpp_debug_leave();
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return mpp_task;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun fail:
336*4882a593Smuzhiyun mpp_task_dump_mem_region(mpp, mpp_task);
337*4882a593Smuzhiyun mpp_task_dump_reg(mpp, mpp_task);
338*4882a593Smuzhiyun mpp_task_finalize(session, mpp_task);
339*4882a593Smuzhiyun kfree(task);
340*4882a593Smuzhiyun return NULL;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)343*4882a593Smuzhiyun static int vdpu_run(struct mpp_dev *mpp,
344*4882a593Smuzhiyun struct mpp_task *mpp_task)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun u32 i;
347*4882a593Smuzhiyun u32 reg_en;
348*4882a593Smuzhiyun struct vdpu_task *task = to_vdpu_task(mpp_task);
349*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun mpp_debug_enter();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* clear cache */
354*4882a593Smuzhiyun mpp_write_relaxed(mpp, VDPU2_REG_CLR_CACHE_BASE, 1);
355*4882a593Smuzhiyun /* set registers for hardware */
356*4882a593Smuzhiyun reg_en = mpp_task->hw_info->reg_en;
357*4882a593Smuzhiyun for (i = 0; i < task->w_req_cnt; i++) {
358*4882a593Smuzhiyun struct mpp_request *req = &task->w_reqs[i];
359*4882a593Smuzhiyun int s = req->offset / sizeof(u32);
360*4882a593Smuzhiyun int e = s + req->size / sizeof(u32);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun mpp_write_req(mpp, task->reg, s, e, reg_en);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* flush tlb before starting hardware */
366*4882a593Smuzhiyun mpp_iommu_flush_tlb(mpp->iommu_info);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* init current task */
369*4882a593Smuzhiyun mpp->cur_task = mpp_task;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* Flush the registers */
374*4882a593Smuzhiyun wmb();
375*4882a593Smuzhiyun mpp_write(mpp, VDPU2_REG_DEC_EN,
376*4882a593Smuzhiyun task->reg[reg_en] | VDPU2_DEC_START);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun mpp_task_run_end(mpp_task, timing_en);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun mpp_debug_leave();
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
vdpu_px30_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)385*4882a593Smuzhiyun static int vdpu_px30_run(struct mpp_dev *mpp,
386*4882a593Smuzhiyun struct mpp_task *mpp_task)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun mpp_iommu_flush_tlb(mpp->iommu_info);
389*4882a593Smuzhiyun return vdpu_run(mpp, mpp_task);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)392*4882a593Smuzhiyun static int vdpu_finish(struct mpp_dev *mpp,
393*4882a593Smuzhiyun struct mpp_task *mpp_task)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun u32 i;
396*4882a593Smuzhiyun u32 s, e;
397*4882a593Smuzhiyun u32 dec_get;
398*4882a593Smuzhiyun s32 dec_length;
399*4882a593Smuzhiyun struct mpp_request *req;
400*4882a593Smuzhiyun struct vdpu_task *task = to_vdpu_task(mpp_task);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun mpp_debug_enter();
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* read register after running */
405*4882a593Smuzhiyun for (i = 0; i < task->r_req_cnt; i++) {
406*4882a593Smuzhiyun req = &task->r_reqs[i];
407*4882a593Smuzhiyun s = req->offset / sizeof(u32);
408*4882a593Smuzhiyun e = s + req->size / sizeof(u32);
409*4882a593Smuzhiyun mpp_read_req(mpp, task->reg, s, e);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun /* revert hack for irq status */
412*4882a593Smuzhiyun task->reg[VDPU2_REG_DEC_INT_INDEX] = task->irq_status;
413*4882a593Smuzhiyun /* revert hack for decoded length */
414*4882a593Smuzhiyun dec_get = mpp_read_relaxed(mpp, VDPU2_REG_STREAM_RLC_BASE);
415*4882a593Smuzhiyun dec_length = dec_get - task->strm_addr;
416*4882a593Smuzhiyun task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
417*4882a593Smuzhiyun mpp_debug(DEBUG_REGISTER,
418*4882a593Smuzhiyun "dec_get %08x dec_length %d\n", dec_get, dec_length);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun mpp_debug_leave();
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)425*4882a593Smuzhiyun static int vdpu_result(struct mpp_dev *mpp,
426*4882a593Smuzhiyun struct mpp_task *mpp_task,
427*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun u32 i;
430*4882a593Smuzhiyun struct mpp_request *req;
431*4882a593Smuzhiyun struct vdpu_task *task = to_vdpu_task(mpp_task);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* FIXME may overflow the kernel */
434*4882a593Smuzhiyun for (i = 0; i < task->r_req_cnt; i++) {
435*4882a593Smuzhiyun req = &task->r_reqs[i];
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (copy_to_user(req->data,
438*4882a593Smuzhiyun (u8 *)task->reg + req->offset,
439*4882a593Smuzhiyun req->size)) {
440*4882a593Smuzhiyun mpp_err("copy_to_user reg fail\n");
441*4882a593Smuzhiyun return -EIO;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)448*4882a593Smuzhiyun static int vdpu_free_task(struct mpp_session *session,
449*4882a593Smuzhiyun struct mpp_task *mpp_task)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun struct vdpu_task *task = to_vdpu_task(mpp_task);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun mpp_task_finalize(session, mpp_task);
454*4882a593Smuzhiyun kfree(task);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)460*4882a593Smuzhiyun static int vdpu_procfs_remove(struct mpp_dev *mpp)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (dec->procfs) {
465*4882a593Smuzhiyun proc_remove(dec->procfs);
466*4882a593Smuzhiyun dec->procfs = NULL;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
vdpu_procfs_init(struct mpp_dev * mpp)472*4882a593Smuzhiyun static int vdpu_procfs_init(struct mpp_dev *mpp)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
477*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dec->procfs)) {
478*4882a593Smuzhiyun mpp_err("failed on open procfs\n");
479*4882a593Smuzhiyun dec->procfs = NULL;
480*4882a593Smuzhiyun return -EIO;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* for common mpp_dev options */
484*4882a593Smuzhiyun mpp_procfs_create_common(dec->procfs, mpp);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun mpp_procfs_create_u32("aclk", 0644,
487*4882a593Smuzhiyun dec->procfs, &dec->aclk_info.debug_rate_hz);
488*4882a593Smuzhiyun mpp_procfs_create_u32("session_buffers", 0644,
489*4882a593Smuzhiyun dec->procfs, &mpp->session_max_buffers);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun return 0;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun #else
vdpu_procfs_remove(struct mpp_dev * mpp)494*4882a593Smuzhiyun static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
vdpu_procfs_init(struct mpp_dev * mpp)499*4882a593Smuzhiyun static inline int vdpu_procfs_init(struct mpp_dev *mpp)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun #endif
504*4882a593Smuzhiyun
vdpu_init(struct mpp_dev * mpp)505*4882a593Smuzhiyun static int vdpu_init(struct mpp_dev *mpp)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun int ret;
508*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU2];
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* Get clock info from dtsi */
513*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
514*4882a593Smuzhiyun if (ret)
515*4882a593Smuzhiyun mpp_err("failed on clk_get aclk_vcodec\n");
516*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
517*4882a593Smuzhiyun if (ret)
518*4882a593Smuzhiyun mpp_err("failed on clk_get hclk_vcodec\n");
519*4882a593Smuzhiyun /* Set default rates */
520*4882a593Smuzhiyun mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Get reset control from dtsi */
523*4882a593Smuzhiyun dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
524*4882a593Smuzhiyun if (!dec->rst_a)
525*4882a593Smuzhiyun mpp_err("No aclk reset resource define\n");
526*4882a593Smuzhiyun dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
527*4882a593Smuzhiyun if (!dec->rst_h)
528*4882a593Smuzhiyun mpp_err("No hclk reset resource define\n");
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun return 0;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
vdpu_px30_init(struct mpp_dev * mpp)533*4882a593Smuzhiyun static int vdpu_px30_init(struct mpp_dev *mpp)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun vdpu_init(mpp);
536*4882a593Smuzhiyun return px30_workaround_combo_init(mpp);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
vdpu_clk_on(struct mpp_dev * mpp)539*4882a593Smuzhiyun static int vdpu_clk_on(struct mpp_dev *mpp)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun mpp_clk_safe_enable(dec->aclk_info.clk);
544*4882a593Smuzhiyun mpp_clk_safe_enable(dec->hclk_info.clk);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun return 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
vdpu_clk_off(struct mpp_dev * mpp)549*4882a593Smuzhiyun static int vdpu_clk_off(struct mpp_dev *mpp)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun mpp_clk_safe_disable(dec->aclk_info.clk);
554*4882a593Smuzhiyun mpp_clk_safe_disable(dec->hclk_info.clk);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun return 0;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)559*4882a593Smuzhiyun static int vdpu_set_freq(struct mpp_dev *mpp,
560*4882a593Smuzhiyun struct mpp_task *mpp_task)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
563*4882a593Smuzhiyun struct vdpu_task *task = to_vdpu_task(mpp_task);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
vdpu_reduce_freq(struct mpp_dev * mpp)570*4882a593Smuzhiyun static int vdpu_reduce_freq(struct mpp_dev *mpp)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
vdpu_irq(struct mpp_dev * mpp)579*4882a593Smuzhiyun static int vdpu_irq(struct mpp_dev *mpp)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun mpp->irq_status = mpp_read(mpp, VDPU2_REG_DEC_INT);
582*4882a593Smuzhiyun if (!(mpp->irq_status & VDPU2_DEC_INT_RAW))
583*4882a593Smuzhiyun return IRQ_NONE;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
586*4882a593Smuzhiyun /* set clock gating to save power */
587*4882a593Smuzhiyun mpp_write(mpp, VDPU2_REG_DEC_EN, VDPU2_DEC_CLOCK_GATE_EN);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
vdpu_isr(struct mpp_dev * mpp)592*4882a593Smuzhiyun static int vdpu_isr(struct mpp_dev *mpp)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun u32 err_mask;
595*4882a593Smuzhiyun struct vdpu_task *task = NULL;
596*4882a593Smuzhiyun struct mpp_task *mpp_task = mpp->cur_task;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* FIXME use a spin lock here */
599*4882a593Smuzhiyun if (!mpp_task) {
600*4882a593Smuzhiyun dev_err(mpp->dev, "no current task\n");
601*4882a593Smuzhiyun return IRQ_HANDLED;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun mpp_time_diff(mpp_task);
604*4882a593Smuzhiyun mpp->cur_task = NULL;
605*4882a593Smuzhiyun task = to_vdpu_task(mpp_task);
606*4882a593Smuzhiyun task->irq_status = mpp->irq_status;
607*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
608*4882a593Smuzhiyun task->irq_status);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun err_mask = VDPU2_INT_TIMEOUT
611*4882a593Smuzhiyun | VDPU2_INT_STRM_ERROR
612*4882a593Smuzhiyun | VDPU2_INT_ASO_ERROR
613*4882a593Smuzhiyun | VDPU2_INT_BUF_EMPTY
614*4882a593Smuzhiyun | VDPU2_INT_BUS_ERROR;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (err_mask & task->irq_status)
617*4882a593Smuzhiyun atomic_inc(&mpp->reset_request);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun mpp_task_finish(mpp_task->session, mpp_task);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun mpp_debug_leave();
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun return IRQ_HANDLED;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
vdpu_soft_reset(struct mpp_dev * mpp)626*4882a593Smuzhiyun static int vdpu_soft_reset(struct mpp_dev *mpp)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun u32 val;
629*4882a593Smuzhiyun u32 ret;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun mpp_write(mpp, VDPU2_REG_SOFT_RESET, 1);
632*4882a593Smuzhiyun ret = readl_relaxed_poll_timeout(mpp->reg_base + VDPU2_REG_SOFT_RESET,
633*4882a593Smuzhiyun val, !val, 0, 5);
634*4882a593Smuzhiyun return ret;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
vdpu_reset(struct mpp_dev * mpp)637*4882a593Smuzhiyun static int vdpu_reset(struct mpp_dev *mpp)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun struct vdpu_dev *dec = to_vdpu_dev(mpp);
640*4882a593Smuzhiyun u32 ret = 0;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun mpp_write(mpp, VDPU2_REG_DEC_EN, 0);
643*4882a593Smuzhiyun mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /* soft reset first */
646*4882a593Smuzhiyun ret = vdpu_soft_reset(mpp);
647*4882a593Smuzhiyun if (ret && dec->rst_a && dec->rst_h) {
648*4882a593Smuzhiyun /* Don't skip this or iommu won't work after reset */
649*4882a593Smuzhiyun mpp_err("soft reset failed, use cru reset!\n");
650*4882a593Smuzhiyun mpp_debug(DEBUG_RESET, "reset in\n");
651*4882a593Smuzhiyun mpp_pmu_idle_request(mpp, true);
652*4882a593Smuzhiyun mpp_safe_reset(dec->rst_a);
653*4882a593Smuzhiyun mpp_safe_reset(dec->rst_h);
654*4882a593Smuzhiyun udelay(5);
655*4882a593Smuzhiyun mpp_safe_unreset(dec->rst_a);
656*4882a593Smuzhiyun mpp_safe_unreset(dec->rst_h);
657*4882a593Smuzhiyun mpp_pmu_idle_request(mpp, false);
658*4882a593Smuzhiyun mpp_debug(DEBUG_RESET, "reset out\n");
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun return 0;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun static struct mpp_hw_ops vdpu_v2_hw_ops = {
665*4882a593Smuzhiyun .init = vdpu_init,
666*4882a593Smuzhiyun .clk_on = vdpu_clk_on,
667*4882a593Smuzhiyun .clk_off = vdpu_clk_off,
668*4882a593Smuzhiyun .set_freq = vdpu_set_freq,
669*4882a593Smuzhiyun .reduce_freq = vdpu_reduce_freq,
670*4882a593Smuzhiyun .reset = vdpu_reset,
671*4882a593Smuzhiyun };
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun static struct mpp_hw_ops vdpu_px30_hw_ops = {
674*4882a593Smuzhiyun .init = vdpu_px30_init,
675*4882a593Smuzhiyun .clk_on = vdpu_clk_on,
676*4882a593Smuzhiyun .clk_off = vdpu_clk_off,
677*4882a593Smuzhiyun .set_freq = vdpu_set_freq,
678*4882a593Smuzhiyun .reduce_freq = vdpu_reduce_freq,
679*4882a593Smuzhiyun .reset = vdpu_reset,
680*4882a593Smuzhiyun .set_grf = px30_workaround_combo_switch_grf,
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun static struct mpp_dev_ops vdpu_v2_dev_ops = {
684*4882a593Smuzhiyun .alloc_task = vdpu_alloc_task,
685*4882a593Smuzhiyun .run = vdpu_run,
686*4882a593Smuzhiyun .irq = vdpu_irq,
687*4882a593Smuzhiyun .isr = vdpu_isr,
688*4882a593Smuzhiyun .finish = vdpu_finish,
689*4882a593Smuzhiyun .result = vdpu_result,
690*4882a593Smuzhiyun .free_task = vdpu_free_task,
691*4882a593Smuzhiyun };
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun static struct mpp_dev_ops vdpu_px30_dev_ops = {
694*4882a593Smuzhiyun .alloc_task = vdpu_alloc_task,
695*4882a593Smuzhiyun .run = vdpu_px30_run,
696*4882a593Smuzhiyun .irq = vdpu_irq,
697*4882a593Smuzhiyun .isr = vdpu_isr,
698*4882a593Smuzhiyun .finish = vdpu_finish,
699*4882a593Smuzhiyun .result = vdpu_result,
700*4882a593Smuzhiyun .free_task = vdpu_free_task,
701*4882a593Smuzhiyun };
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun static const struct mpp_dev_var vdpu_v2_data = {
704*4882a593Smuzhiyun .device_type = MPP_DEVICE_VDPU2,
705*4882a593Smuzhiyun .hw_info = &vdpu_v2_hw_info,
706*4882a593Smuzhiyun .trans_info = vdpu_v2_trans,
707*4882a593Smuzhiyun .hw_ops = &vdpu_v2_hw_ops,
708*4882a593Smuzhiyun .dev_ops = &vdpu_v2_dev_ops,
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun static const struct mpp_dev_var vdpu_px30_data = {
712*4882a593Smuzhiyun .device_type = MPP_DEVICE_VDPU2,
713*4882a593Smuzhiyun .hw_info = &vdpu_v2_hw_info,
714*4882a593Smuzhiyun .trans_info = vdpu_v2_trans,
715*4882a593Smuzhiyun .hw_ops = &vdpu_px30_hw_ops,
716*4882a593Smuzhiyun .dev_ops = &vdpu_px30_dev_ops,
717*4882a593Smuzhiyun };
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun static const struct of_device_id mpp_vdpu2_dt_match[] = {
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun .compatible = "rockchip,vpu-decoder-v2",
722*4882a593Smuzhiyun .data = &vdpu_v2_data,
723*4882a593Smuzhiyun },
724*4882a593Smuzhiyun #ifdef CONFIG_CPU_PX30
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun .compatible = "rockchip,vpu-decoder-px30",
727*4882a593Smuzhiyun .data = &vdpu_px30_data,
728*4882a593Smuzhiyun },
729*4882a593Smuzhiyun #endif
730*4882a593Smuzhiyun {},
731*4882a593Smuzhiyun };
732*4882a593Smuzhiyun
vdpu_probe(struct platform_device * pdev)733*4882a593Smuzhiyun static int vdpu_probe(struct platform_device *pdev)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun int ret = 0;
736*4882a593Smuzhiyun struct device *dev = &pdev->dev;
737*4882a593Smuzhiyun struct vdpu_dev *dec = NULL;
738*4882a593Smuzhiyun struct mpp_dev *mpp = NULL;
739*4882a593Smuzhiyun const struct of_device_id *match = NULL;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun dev_info(dev, "probe device\n");
742*4882a593Smuzhiyun dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
743*4882a593Smuzhiyun if (!dec)
744*4882a593Smuzhiyun return -ENOMEM;
745*4882a593Smuzhiyun mpp = &dec->mpp;
746*4882a593Smuzhiyun platform_set_drvdata(pdev, mpp);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (pdev->dev.of_node) {
749*4882a593Smuzhiyun match = of_match_node(mpp_vdpu2_dt_match,
750*4882a593Smuzhiyun pdev->dev.of_node);
751*4882a593Smuzhiyun if (match)
752*4882a593Smuzhiyun mpp->var = (struct mpp_dev_var *)match->data;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun mpp->core_id = of_alias_get_id(pdev->dev.of_node, "vdpu");
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun ret = mpp_dev_probe(mpp, pdev);
758*4882a593Smuzhiyun if (ret) {
759*4882a593Smuzhiyun dev_err(dev, "probe sub driver failed\n");
760*4882a593Smuzhiyun return -EINVAL;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun ret = devm_request_threaded_irq(dev, mpp->irq,
764*4882a593Smuzhiyun mpp_dev_irq,
765*4882a593Smuzhiyun mpp_dev_isr_sched,
766*4882a593Smuzhiyun IRQF_SHARED,
767*4882a593Smuzhiyun dev_name(dev), mpp);
768*4882a593Smuzhiyun if (ret) {
769*4882a593Smuzhiyun dev_err(dev, "register interrupter runtime failed\n");
770*4882a593Smuzhiyun return -EINVAL;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (mpp->var->device_type == MPP_DEVICE_VDPU2) {
774*4882a593Smuzhiyun mpp->srv->sub_devices[MPP_DEVICE_VDPU2_PP] = mpp;
775*4882a593Smuzhiyun set_bit(MPP_DEVICE_VDPU2_PP, &mpp->srv->hw_support);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun mpp->session_max_buffers = VDPU2_SESSION_MAX_BUFFERS;
779*4882a593Smuzhiyun vdpu_procfs_init(mpp);
780*4882a593Smuzhiyun /* register current device to mpp service */
781*4882a593Smuzhiyun mpp_dev_register_srv(mpp, mpp->srv);
782*4882a593Smuzhiyun dev_info(dev, "probing finish\n");
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun return 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
vdpu_remove(struct platform_device * pdev)787*4882a593Smuzhiyun static int vdpu_remove(struct platform_device *pdev)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct device *dev = &pdev->dev;
790*4882a593Smuzhiyun struct mpp_dev *mpp = dev_get_drvdata(dev);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun dev_info(dev, "remove device\n");
793*4882a593Smuzhiyun mpp_dev_remove(mpp);
794*4882a593Smuzhiyun vdpu_procfs_remove(mpp);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun return 0;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun struct platform_driver rockchip_vdpu2_driver = {
800*4882a593Smuzhiyun .probe = vdpu_probe,
801*4882a593Smuzhiyun .remove = vdpu_remove,
802*4882a593Smuzhiyun .shutdown = mpp_dev_shutdown,
803*4882a593Smuzhiyun .driver = {
804*4882a593Smuzhiyun .name = VDPU2_DRIVER_NAME,
805*4882a593Smuzhiyun .of_match_table = of_match_ptr(mpp_vdpu2_dt_match),
806*4882a593Smuzhiyun },
807*4882a593Smuzhiyun };
808*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_vdpu2_driver);
809