1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2022 Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * author:
6*4882a593Smuzhiyun * Ding Wei, leo.ding@rock-chips.com
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <asm/cacheflush.h>
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/iopoll.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/of_platform.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/uaccess.h>
19*4882a593Smuzhiyun #include <linux/regmap.h>
20*4882a593Smuzhiyun #include <linux/proc_fs.h>
21*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "mpp_debug.h"
24*4882a593Smuzhiyun #include "mpp_common.h"
25*4882a593Smuzhiyun #include "mpp_iommu.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define VDPP_DRIVER_NAME "mpp_vdpp"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define VDPP_SESSION_MAX_BUFFERS 15
30*4882a593Smuzhiyun #define VDPP_REG_WORK_MODE 0x0008
31*4882a593Smuzhiyun #define VDPP_REG_VDPP_MODE BIT(1)
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define to_vdpp_info(info) \
34*4882a593Smuzhiyun container_of(info, struct vdpp_hw_info, hw)
35*4882a593Smuzhiyun #define to_vdpp_task(task) \
36*4882a593Smuzhiyun container_of(task, struct vdpp_task, mpp_task)
37*4882a593Smuzhiyun #define to_vdpp_dev(dev) \
38*4882a593Smuzhiyun container_of(dev, struct vdpp_dev, mpp)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct vdpp_hw_info {
41*4882a593Smuzhiyun struct mpp_hw_info hw;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* register info */
44*4882a593Smuzhiyun u32 start_base;
45*4882a593Smuzhiyun u32 cfg_base;
46*4882a593Smuzhiyun u32 work_mode_base;
47*4882a593Smuzhiyun u32 gate_base;
48*4882a593Smuzhiyun u32 rst_sta_base;
49*4882a593Smuzhiyun u32 int_en_base;
50*4882a593Smuzhiyun u32 int_clr_base;
51*4882a593Smuzhiyun u32 int_sta_base; // int_sta = int_raw_sta && int_en
52*4882a593Smuzhiyun u32 int_mask;
53*4882a593Smuzhiyun u32 err_mask;
54*4882a593Smuzhiyun /* register for zme */
55*4882a593Smuzhiyun u32 zme_reg_off;
56*4882a593Smuzhiyun u32 zme_reg_num;
57*4882a593Smuzhiyun /* for soft reset */
58*4882a593Smuzhiyun u32 bit_rst_en;
59*4882a593Smuzhiyun u32 bit_rst_done;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct vdpp_task {
63*4882a593Smuzhiyun struct mpp_task mpp_task;
64*4882a593Smuzhiyun enum MPP_CLOCK_MODE clk_mode;
65*4882a593Smuzhiyun u32 *reg;
66*4882a593Smuzhiyun u32 *zme_reg;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct reg_offset_info off_inf;
69*4882a593Smuzhiyun u32 irq_status;
70*4882a593Smuzhiyun /* req for current task */
71*4882a593Smuzhiyun u32 w_req_cnt;
72*4882a593Smuzhiyun struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
73*4882a593Smuzhiyun u32 r_req_cnt;
74*4882a593Smuzhiyun struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct vdpp_dev {
78*4882a593Smuzhiyun struct mpp_dev mpp;
79*4882a593Smuzhiyun struct vdpp_hw_info *hw_info;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun struct mpp_clk_info aclk_info;
82*4882a593Smuzhiyun struct mpp_clk_info hclk_info;
83*4882a593Smuzhiyun struct mpp_clk_info sclk_info;
84*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
85*4882a593Smuzhiyun struct proc_dir_entry *procfs;
86*4882a593Smuzhiyun #endif
87*4882a593Smuzhiyun struct reset_control *rst_a;
88*4882a593Smuzhiyun struct reset_control *rst_h;
89*4882a593Smuzhiyun struct reset_control *rst_s;
90*4882a593Smuzhiyun /* for zme */
91*4882a593Smuzhiyun void __iomem *zme_base;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun static struct vdpp_hw_info vdpp_v1_hw_info = {
95*4882a593Smuzhiyun .hw = {
96*4882a593Smuzhiyun .reg_num = 53,
97*4882a593Smuzhiyun .reg_id = 21,
98*4882a593Smuzhiyun .reg_en = 0,
99*4882a593Smuzhiyun .reg_start = 0,
100*4882a593Smuzhiyun .reg_end = 52,
101*4882a593Smuzhiyun },
102*4882a593Smuzhiyun .start_base = 0x0000,
103*4882a593Smuzhiyun .cfg_base = 0x0004,
104*4882a593Smuzhiyun .work_mode_base = 0x0008,
105*4882a593Smuzhiyun .gate_base = 0x0010,
106*4882a593Smuzhiyun .rst_sta_base = 0x0014,
107*4882a593Smuzhiyun .int_en_base = 0x0020,
108*4882a593Smuzhiyun .int_clr_base = 0x0024,
109*4882a593Smuzhiyun .int_sta_base = 0x0028,
110*4882a593Smuzhiyun .int_mask = 0x0073,
111*4882a593Smuzhiyun .err_mask = 0x0070,
112*4882a593Smuzhiyun .zme_reg_off = 0x2000,
113*4882a593Smuzhiyun .zme_reg_num = 530,
114*4882a593Smuzhiyun .bit_rst_en = BIT(21),
115*4882a593Smuzhiyun .bit_rst_done = BIT(0),
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * file handle translate information
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun static const u16 trans_tbl_vdpp[] = {
122*4882a593Smuzhiyun 24, 25, 26, 27,
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #define VDPP_FMT_DEFAULT 0
126*4882a593Smuzhiyun static struct mpp_trans_info vdpp_v1_trans[] = {
127*4882a593Smuzhiyun [VDPP_FMT_DEFAULT] = {
128*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_vdpp),
129*4882a593Smuzhiyun .table = trans_tbl_vdpp,
130*4882a593Smuzhiyun },
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
vdpp_process_reg_fd(struct mpp_session * session,struct vdpp_task * task,struct mpp_task_msgs * msgs)133*4882a593Smuzhiyun static int vdpp_process_reg_fd(struct mpp_session *session,
134*4882a593Smuzhiyun struct vdpp_task *task,
135*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun int ret = 0;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun ret = mpp_translate_reg_address(session, &task->mpp_task,
140*4882a593Smuzhiyun VDPP_FMT_DEFAULT, task->reg, &task->off_inf);
141*4882a593Smuzhiyun if (ret)
142*4882a593Smuzhiyun return ret;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun mpp_translate_reg_offset_info(&task->mpp_task,
145*4882a593Smuzhiyun &task->off_inf, task->reg);
146*4882a593Smuzhiyun return 0;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
vdpp_extract_task_msg(struct vdpp_task * task,struct mpp_task_msgs * msgs)149*4882a593Smuzhiyun static int vdpp_extract_task_msg(struct vdpp_task *task,
150*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun u32 i;
153*4882a593Smuzhiyun int ret;
154*4882a593Smuzhiyun struct mpp_request *req;
155*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = to_vdpp_info(task->mpp_task.hw_info);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun for (i = 0; i < msgs->req_cnt; i++) {
158*4882a593Smuzhiyun req = &msgs->reqs[i];
159*4882a593Smuzhiyun if (!req->size)
160*4882a593Smuzhiyun continue;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun switch (req->cmd) {
163*4882a593Smuzhiyun case MPP_CMD_SET_REG_WRITE: {
164*4882a593Smuzhiyun int req_base;
165*4882a593Smuzhiyun int max_size;
166*4882a593Smuzhiyun u8 *dst = NULL;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (req->offset >= hw_info->zme_reg_off) {
169*4882a593Smuzhiyun req_base = hw_info->zme_reg_off;
170*4882a593Smuzhiyun max_size = hw_info->zme_reg_num * sizeof(u32);
171*4882a593Smuzhiyun dst = (u8 *)task->zme_reg;
172*4882a593Smuzhiyun } else {
173*4882a593Smuzhiyun req_base = 0;
174*4882a593Smuzhiyun max_size = hw_info->hw.reg_num * sizeof(u32);
175*4882a593Smuzhiyun dst = (u8 *)task->reg;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun ret = mpp_check_req(req, req_base, max_size, 0, max_size);
179*4882a593Smuzhiyun if (ret)
180*4882a593Smuzhiyun return ret;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun dst += req->offset - req_base;
183*4882a593Smuzhiyun if (copy_from_user(dst, req->data, req->size)) {
184*4882a593Smuzhiyun mpp_err("copy_from_user reg failed\n");
185*4882a593Smuzhiyun return -EIO;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun memcpy(&task->w_reqs[task->w_req_cnt++], req, sizeof(*req));
188*4882a593Smuzhiyun } break;
189*4882a593Smuzhiyun case MPP_CMD_SET_REG_READ: {
190*4882a593Smuzhiyun int req_base;
191*4882a593Smuzhiyun int max_size;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (req->offset >= hw_info->zme_reg_off) {
194*4882a593Smuzhiyun req_base = hw_info->zme_reg_off;
195*4882a593Smuzhiyun max_size = hw_info->zme_reg_num * sizeof(u32);
196*4882a593Smuzhiyun } else {
197*4882a593Smuzhiyun req_base = 0;
198*4882a593Smuzhiyun max_size = hw_info->hw.reg_num * sizeof(u32);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ret = mpp_check_req(req, req_base, max_size, 0, max_size);
202*4882a593Smuzhiyun if (ret)
203*4882a593Smuzhiyun return ret;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun memcpy(&task->r_reqs[task->r_req_cnt++], req, sizeof(*req));
206*4882a593Smuzhiyun } break;
207*4882a593Smuzhiyun case MPP_CMD_SET_REG_ADDR_OFFSET: {
208*4882a593Smuzhiyun mpp_extract_reg_offset_info(&task->off_inf, req);
209*4882a593Smuzhiyun } break;
210*4882a593Smuzhiyun default:
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
215*4882a593Smuzhiyun task->w_req_cnt, task->r_req_cnt);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
vdpp_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)220*4882a593Smuzhiyun static void *vdpp_alloc_task(struct mpp_session *session,
221*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun int ret;
224*4882a593Smuzhiyun u32 reg_num;
225*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL;
226*4882a593Smuzhiyun struct vdpp_task *task = NULL;
227*4882a593Smuzhiyun struct mpp_dev *mpp = session->mpp;
228*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = to_vdpp_info(mpp->var->hw_info);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun mpp_debug_enter();
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun task = kzalloc(sizeof(*task), GFP_KERNEL);
233*4882a593Smuzhiyun if (!task)
234*4882a593Smuzhiyun return NULL;
235*4882a593Smuzhiyun /* alloc reg buffer */
236*4882a593Smuzhiyun reg_num = hw_info->hw.reg_num + hw_info->zme_reg_num;
237*4882a593Smuzhiyun task->reg = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
238*4882a593Smuzhiyun if (!task->reg)
239*4882a593Smuzhiyun goto free_task;
240*4882a593Smuzhiyun task->zme_reg = task->reg + hw_info->hw.reg_num;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun mpp_task = &task->mpp_task;
243*4882a593Smuzhiyun mpp_task_init(session, mpp_task);
244*4882a593Smuzhiyun mpp_task->hw_info = mpp->var->hw_info;
245*4882a593Smuzhiyun mpp_task->reg = task->reg;
246*4882a593Smuzhiyun /* extract reqs for current task */
247*4882a593Smuzhiyun ret = vdpp_extract_task_msg(task, msgs);
248*4882a593Smuzhiyun if (ret)
249*4882a593Smuzhiyun goto fail;
250*4882a593Smuzhiyun /* process fd in register */
251*4882a593Smuzhiyun if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
252*4882a593Smuzhiyun ret = vdpp_process_reg_fd(session, task, msgs);
253*4882a593Smuzhiyun if (ret)
254*4882a593Smuzhiyun goto fail;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun task->clk_mode = CLK_MODE_NORMAL;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun mpp_debug_leave();
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return mpp_task;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun fail:
263*4882a593Smuzhiyun mpp_task_dump_mem_region(mpp, mpp_task);
264*4882a593Smuzhiyun mpp_task_dump_reg(mpp, mpp_task);
265*4882a593Smuzhiyun mpp_task_finalize(session, mpp_task);
266*4882a593Smuzhiyun kfree(task->reg);
267*4882a593Smuzhiyun free_task:
268*4882a593Smuzhiyun kfree(task);
269*4882a593Smuzhiyun return NULL;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
vdpp_write_req_zme(void __iomem * reg_base,u32 * regs,u32 start_idx,u32 end_idx)272*4882a593Smuzhiyun static int vdpp_write_req_zme(void __iomem *reg_base,
273*4882a593Smuzhiyun u32 *regs,
274*4882a593Smuzhiyun u32 start_idx, u32 end_idx)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun int i;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun for (i = start_idx; i < end_idx; i++) {
279*4882a593Smuzhiyun int reg = i * sizeof(u32);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun mpp_debug(DEBUG_SET_REG_L2, "zme_reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
282*4882a593Smuzhiyun writel_relaxed(regs[i], reg_base + reg);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
vdpp_read_req_zme(void __iomem * reg_base,u32 * regs,u32 start_idx,u32 end_idx)288*4882a593Smuzhiyun static int vdpp_read_req_zme(void __iomem *reg_base,
289*4882a593Smuzhiyun u32 *regs,
290*4882a593Smuzhiyun u32 start_idx, u32 end_idx)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun int i;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun for (i = start_idx; i < end_idx; i++) {
295*4882a593Smuzhiyun int reg = i * sizeof(u32);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun regs[i] = readl_relaxed(reg_base + reg);
298*4882a593Smuzhiyun mpp_debug(DEBUG_GET_REG_L2, "zme_reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
vdpp_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)304*4882a593Smuzhiyun static int vdpp_run(struct mpp_dev *mpp,
305*4882a593Smuzhiyun struct mpp_task *mpp_task)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun u32 i;
308*4882a593Smuzhiyun u32 reg_en;
309*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
310*4882a593Smuzhiyun struct vdpp_task *task = to_vdpp_task(mpp_task);
311*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = vdpp->hw_info;
312*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun mpp_debug_enter();
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun reg_en = hw_info->hw.reg_en;
317*4882a593Smuzhiyun for (i = 0; i < task->w_req_cnt; i++) {
318*4882a593Smuzhiyun struct mpp_request *req = &task->w_reqs[i];
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (req->offset >= hw_info->zme_reg_off) {
321*4882a593Smuzhiyun /* set registers for zme */
322*4882a593Smuzhiyun int off = req->offset - hw_info->zme_reg_off;
323*4882a593Smuzhiyun int s = off / sizeof(u32);
324*4882a593Smuzhiyun int e = s + req->size / sizeof(u32);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (!vdpp->zme_base)
327*4882a593Smuzhiyun continue;
328*4882a593Smuzhiyun vdpp_write_req_zme(vdpp->zme_base, task->zme_reg, s, e);
329*4882a593Smuzhiyun } else {
330*4882a593Smuzhiyun /* set registers for vdpp */
331*4882a593Smuzhiyun int s = req->offset / sizeof(u32);
332*4882a593Smuzhiyun int e = s + req->size / sizeof(u32);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun mpp_write_req(mpp, task->reg, s, e, reg_en);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* flush tlb before starting hardware */
339*4882a593Smuzhiyun mpp_iommu_flush_tlb(mpp->iommu_info);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* init current task */
342*4882a593Smuzhiyun mpp->cur_task = mpp_task;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
345*4882a593Smuzhiyun /* Flush the register before the start the device */
346*4882a593Smuzhiyun wmb();
347*4882a593Smuzhiyun mpp_write(mpp, hw_info->start_base, task->reg[reg_en]);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun mpp_task_run_end(mpp_task, timing_en);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun mpp_debug_leave();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
vdpp_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)356*4882a593Smuzhiyun static int vdpp_finish(struct mpp_dev *mpp,
357*4882a593Smuzhiyun struct mpp_task *mpp_task)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun u32 i;
360*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
361*4882a593Smuzhiyun struct vdpp_task *task = to_vdpp_task(mpp_task);
362*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = vdpp->hw_info;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun mpp_debug_enter();
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun for (i = 0; i < task->r_req_cnt; i++) {
367*4882a593Smuzhiyun struct mpp_request *req = &task->r_reqs[i];
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (req->offset >= hw_info->zme_reg_off) {
370*4882a593Smuzhiyun int off = req->offset - hw_info->zme_reg_off;
371*4882a593Smuzhiyun int s = off / sizeof(u32);
372*4882a593Smuzhiyun int e = s + req->size / sizeof(u32);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (!vdpp->zme_base)
375*4882a593Smuzhiyun continue;
376*4882a593Smuzhiyun vdpp_read_req_zme(vdpp->zme_base, task->zme_reg, s, e);
377*4882a593Smuzhiyun } else {
378*4882a593Smuzhiyun int s = req->offset / sizeof(u32);
379*4882a593Smuzhiyun int e = s + req->size / sizeof(u32);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun mpp_read_req(mpp, task->reg, s, e);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun task->reg[hw_info->int_sta_base] = task->irq_status;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun mpp_debug_leave();
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun return 0;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
vdpp_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)391*4882a593Smuzhiyun static int vdpp_result(struct mpp_dev *mpp,
392*4882a593Smuzhiyun struct mpp_task *mpp_task,
393*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun u32 i;
396*4882a593Smuzhiyun struct vdpp_task *task = to_vdpp_task(mpp_task);
397*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = to_vdpp_info(mpp_task->hw_info);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun for (i = 0; i < task->r_req_cnt; i++) {
400*4882a593Smuzhiyun struct mpp_request *req;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun req = &task->r_reqs[i];
403*4882a593Smuzhiyun /* set register L2 */
404*4882a593Smuzhiyun if (req->offset >= hw_info->zme_reg_off) {
405*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
406*4882a593Smuzhiyun int off = req->offset - hw_info->zme_reg_off;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (!vdpp->zme_base)
409*4882a593Smuzhiyun continue;
410*4882a593Smuzhiyun if (copy_to_user(req->data,
411*4882a593Smuzhiyun (u8 *)task->zme_reg + off,
412*4882a593Smuzhiyun req->size)) {
413*4882a593Smuzhiyun mpp_err("copy_to_user reg_l2 fail\n");
414*4882a593Smuzhiyun return -EIO;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun } else {
417*4882a593Smuzhiyun if (copy_to_user(req->data,
418*4882a593Smuzhiyun (u8 *)task->reg + req->offset,
419*4882a593Smuzhiyun req->size)) {
420*4882a593Smuzhiyun mpp_err("copy_to_user reg fail\n");
421*4882a593Smuzhiyun return -EIO;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
vdpp_free_task(struct mpp_session * session,struct mpp_task * mpp_task)429*4882a593Smuzhiyun static int vdpp_free_task(struct mpp_session *session,
430*4882a593Smuzhiyun struct mpp_task *mpp_task)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct vdpp_task *task = to_vdpp_task(mpp_task);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun mpp_task_finalize(session, mpp_task);
435*4882a593Smuzhiyun kfree(task->reg);
436*4882a593Smuzhiyun kfree(task);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpp_procfs_remove(struct mpp_dev * mpp)442*4882a593Smuzhiyun static int vdpp_procfs_remove(struct mpp_dev *mpp)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (vdpp->procfs) {
447*4882a593Smuzhiyun proc_remove(vdpp->procfs);
448*4882a593Smuzhiyun vdpp->procfs = NULL;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
vdpp_procfs_init(struct mpp_dev * mpp)454*4882a593Smuzhiyun static int vdpp_procfs_init(struct mpp_dev *mpp)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun vdpp->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
459*4882a593Smuzhiyun if (IS_ERR_OR_NULL(vdpp->procfs)) {
460*4882a593Smuzhiyun mpp_err("failed on open procfs\n");
461*4882a593Smuzhiyun vdpp->procfs = NULL;
462*4882a593Smuzhiyun return -EIO;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun mpp_procfs_create_u32("aclk", 0644,
465*4882a593Smuzhiyun vdpp->procfs, &vdpp->aclk_info.debug_rate_hz);
466*4882a593Smuzhiyun mpp_procfs_create_u32("session_buffers", 0644,
467*4882a593Smuzhiyun vdpp->procfs, &mpp->session_max_buffers);
468*4882a593Smuzhiyun return 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun #else
vdpp_procfs_remove(struct mpp_dev * mpp)471*4882a593Smuzhiyun static inline int vdpp_procfs_remove(struct mpp_dev *mpp)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun return 0;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
vdpp_procfs_init(struct mpp_dev * mpp)476*4882a593Smuzhiyun static inline int vdpp_procfs_init(struct mpp_dev *mpp)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun return 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun #endif
481*4882a593Smuzhiyun
vdpp_init(struct mpp_dev * mpp)482*4882a593Smuzhiyun static int vdpp_init(struct mpp_dev *mpp)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun int ret;
485*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /* Get clock info from dtsi */
488*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &vdpp->aclk_info, "aclk");
489*4882a593Smuzhiyun if (ret)
490*4882a593Smuzhiyun mpp_err("failed on clk_get aclk\n");
491*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &vdpp->hclk_info, "hclk");
492*4882a593Smuzhiyun if (ret)
493*4882a593Smuzhiyun mpp_err("failed on clk_get hclk\n");
494*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &vdpp->sclk_info, "sclk");
495*4882a593Smuzhiyun if (ret)
496*4882a593Smuzhiyun mpp_err("failed on clk_get sclk\n");
497*4882a593Smuzhiyun /* Set default rates */
498*4882a593Smuzhiyun mpp_set_clk_info_rate_hz(&vdpp->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun vdpp->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "rst_a");
501*4882a593Smuzhiyun if (!vdpp->rst_a)
502*4882a593Smuzhiyun mpp_err("No aclk reset resource define\n");
503*4882a593Smuzhiyun vdpp->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "rst_h");
504*4882a593Smuzhiyun if (!vdpp->rst_h)
505*4882a593Smuzhiyun mpp_err("No hclk reset resource define\n");
506*4882a593Smuzhiyun vdpp->rst_s = mpp_reset_control_get(mpp, RST_TYPE_CORE, "rst_s");
507*4882a593Smuzhiyun if (!vdpp->rst_s)
508*4882a593Smuzhiyun mpp_err("No sclk reset resource define\n");
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
vdpp_clk_on(struct mpp_dev * mpp)513*4882a593Smuzhiyun static int vdpp_clk_on(struct mpp_dev *mpp)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun mpp_clk_safe_enable(vdpp->aclk_info.clk);
518*4882a593Smuzhiyun mpp_clk_safe_enable(vdpp->hclk_info.clk);
519*4882a593Smuzhiyun mpp_clk_safe_enable(vdpp->sclk_info.clk);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
vdpp_clk_off(struct mpp_dev * mpp)524*4882a593Smuzhiyun static int vdpp_clk_off(struct mpp_dev *mpp)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun mpp_clk_safe_disable(vdpp->aclk_info.clk);
529*4882a593Smuzhiyun mpp_clk_safe_disable(vdpp->hclk_info.clk);
530*4882a593Smuzhiyun mpp_clk_safe_disable(vdpp->sclk_info.clk);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return 0;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
vdpp_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)535*4882a593Smuzhiyun static int vdpp_set_freq(struct mpp_dev *mpp,
536*4882a593Smuzhiyun struct mpp_task *mpp_task)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
539*4882a593Smuzhiyun struct vdpp_task *task = to_vdpp_task(mpp_task);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun mpp_clk_set_rate(&vdpp->aclk_info, task->clk_mode);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun return 0;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
vdpp_reduce_freq(struct mpp_dev * mpp)546*4882a593Smuzhiyun static int vdpp_reduce_freq(struct mpp_dev *mpp)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun mpp_clk_set_rate(&vdpp->aclk_info, CLK_MODE_REDUCE);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return 0;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
vdpp_irq(struct mpp_dev * mpp)555*4882a593Smuzhiyun static int vdpp_irq(struct mpp_dev *mpp)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
558*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = vdpp->hw_info;
559*4882a593Smuzhiyun u32 work_mode = mpp_read(mpp, VDPP_REG_WORK_MODE);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (!(work_mode & VDPP_REG_VDPP_MODE))
562*4882a593Smuzhiyun return IRQ_NONE;
563*4882a593Smuzhiyun mpp->irq_status = mpp_read(mpp, hw_info->int_sta_base);
564*4882a593Smuzhiyun if (!(mpp->irq_status & hw_info->int_mask))
565*4882a593Smuzhiyun return IRQ_NONE;
566*4882a593Smuzhiyun mpp_write(mpp, hw_info->int_en_base, 0);
567*4882a593Smuzhiyun mpp_write(mpp, hw_info->int_clr_base, mpp->irq_status);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /* ensure hardware is being off status */
570*4882a593Smuzhiyun mpp_write(mpp, hw_info->start_base, 0);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
vdpp_isr(struct mpp_dev * mpp)575*4882a593Smuzhiyun static int vdpp_isr(struct mpp_dev *mpp)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct vdpp_task *task = NULL;
578*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
579*4882a593Smuzhiyun struct mpp_task *mpp_task = mpp->cur_task;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* FIXME use a spin lock here */
582*4882a593Smuzhiyun if (!mpp_task) {
583*4882a593Smuzhiyun dev_err(mpp->dev, "no current task\n");
584*4882a593Smuzhiyun return IRQ_HANDLED;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun mpp_time_diff(mpp_task);
587*4882a593Smuzhiyun mpp->cur_task = NULL;
588*4882a593Smuzhiyun task = to_vdpp_task(mpp_task);
589*4882a593Smuzhiyun task->irq_status = mpp->irq_status;
590*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
591*4882a593Smuzhiyun task->irq_status);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (task->irq_status & vdpp->hw_info->err_mask)
594*4882a593Smuzhiyun atomic_inc(&mpp->reset_request);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun mpp_task_finish(mpp_task->session, mpp_task);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun mpp_debug_leave();
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun return IRQ_HANDLED;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
_vdpp_reset(struct mpp_dev * mpp,struct vdpp_dev * vdpp)603*4882a593Smuzhiyun static int _vdpp_reset(struct mpp_dev *mpp, struct vdpp_dev *vdpp)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun if (vdpp->rst_a && vdpp->rst_h && vdpp->rst_s) {
606*4882a593Smuzhiyun mpp_debug(DEBUG_RESET, "reset in\n");
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* Don't skip this or iommu won't work after reset */
609*4882a593Smuzhiyun mpp_pmu_idle_request(mpp, true);
610*4882a593Smuzhiyun mpp_safe_reset(vdpp->rst_a);
611*4882a593Smuzhiyun mpp_safe_reset(vdpp->rst_h);
612*4882a593Smuzhiyun mpp_safe_reset(vdpp->rst_s);
613*4882a593Smuzhiyun udelay(5);
614*4882a593Smuzhiyun mpp_safe_unreset(vdpp->rst_a);
615*4882a593Smuzhiyun mpp_safe_unreset(vdpp->rst_h);
616*4882a593Smuzhiyun mpp_safe_unreset(vdpp->rst_s);
617*4882a593Smuzhiyun mpp_pmu_idle_request(mpp, false);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun mpp_debug(DEBUG_RESET, "reset out\n");
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun return 0;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
vdpp_reset(struct mpp_dev * mpp)625*4882a593Smuzhiyun static int vdpp_reset(struct mpp_dev *mpp)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun int ret = 0;
628*4882a593Smuzhiyun u32 rst_status = 0;
629*4882a593Smuzhiyun struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
630*4882a593Smuzhiyun struct vdpp_hw_info *hw_info = vdpp->hw_info;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* soft rest first */
633*4882a593Smuzhiyun mpp_write(mpp, hw_info->cfg_base, hw_info->bit_rst_en);
634*4882a593Smuzhiyun ret = readl_relaxed_poll_timeout(mpp->reg_base + hw_info->rst_sta_base,
635*4882a593Smuzhiyun rst_status,
636*4882a593Smuzhiyun rst_status & hw_info->bit_rst_done,
637*4882a593Smuzhiyun 0, 5);
638*4882a593Smuzhiyun if (ret) {
639*4882a593Smuzhiyun mpp_err("soft reset timeout, use cru reset\n");
640*4882a593Smuzhiyun return _vdpp_reset(mpp, vdpp);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun mpp_write(mpp, hw_info->rst_sta_base, 0);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /* ensure hardware is being off status */
646*4882a593Smuzhiyun mpp_write(mpp, hw_info->start_base, 0);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun static struct mpp_hw_ops vdpp_v1_hw_ops = {
652*4882a593Smuzhiyun .init = vdpp_init,
653*4882a593Smuzhiyun .clk_on = vdpp_clk_on,
654*4882a593Smuzhiyun .clk_off = vdpp_clk_off,
655*4882a593Smuzhiyun .set_freq = vdpp_set_freq,
656*4882a593Smuzhiyun .reduce_freq = vdpp_reduce_freq,
657*4882a593Smuzhiyun .reset = vdpp_reset,
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun static struct mpp_dev_ops vdpp_v1_dev_ops = {
661*4882a593Smuzhiyun .alloc_task = vdpp_alloc_task,
662*4882a593Smuzhiyun .run = vdpp_run,
663*4882a593Smuzhiyun .irq = vdpp_irq,
664*4882a593Smuzhiyun .isr = vdpp_isr,
665*4882a593Smuzhiyun .finish = vdpp_finish,
666*4882a593Smuzhiyun .result = vdpp_result,
667*4882a593Smuzhiyun .free_task = vdpp_free_task,
668*4882a593Smuzhiyun };
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun static const struct mpp_dev_var vdpp_v1_data = {
671*4882a593Smuzhiyun .device_type = MPP_DEVICE_VDPP,
672*4882a593Smuzhiyun .hw_info = &vdpp_v1_hw_info.hw,
673*4882a593Smuzhiyun .trans_info = vdpp_v1_trans,
674*4882a593Smuzhiyun .hw_ops = &vdpp_v1_hw_ops,
675*4882a593Smuzhiyun .dev_ops = &vdpp_v1_dev_ops,
676*4882a593Smuzhiyun };
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun static const struct of_device_id mpp_vdpp_dt_match[] = {
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun .compatible = "rockchip,vdpp-v1",
681*4882a593Smuzhiyun .data = &vdpp_v1_data,
682*4882a593Smuzhiyun },
683*4882a593Smuzhiyun {},
684*4882a593Smuzhiyun };
685*4882a593Smuzhiyun
vdpp_probe(struct platform_device * pdev)686*4882a593Smuzhiyun static int vdpp_probe(struct platform_device *pdev)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun struct device *dev = &pdev->dev;
689*4882a593Smuzhiyun struct vdpp_dev *vdpp = NULL;
690*4882a593Smuzhiyun struct mpp_dev *mpp = NULL;
691*4882a593Smuzhiyun const struct of_device_id *match = NULL;
692*4882a593Smuzhiyun int ret = 0;
693*4882a593Smuzhiyun struct resource *res;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun dev_info(dev, "probe device\n");
696*4882a593Smuzhiyun vdpp = devm_kzalloc(dev, sizeof(struct vdpp_dev), GFP_KERNEL);
697*4882a593Smuzhiyun if (!vdpp)
698*4882a593Smuzhiyun return -ENOMEM;
699*4882a593Smuzhiyun platform_set_drvdata(pdev, vdpp);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun mpp = &vdpp->mpp;
702*4882a593Smuzhiyun if (pdev->dev.of_node) {
703*4882a593Smuzhiyun match = of_match_node(mpp_vdpp_dt_match, pdev->dev.of_node);
704*4882a593Smuzhiyun if (match)
705*4882a593Smuzhiyun mpp->var = (struct mpp_dev_var *)match->data;
706*4882a593Smuzhiyun mpp->core_id = -1;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun ret = mpp_dev_probe(mpp, pdev);
710*4882a593Smuzhiyun if (ret) {
711*4882a593Smuzhiyun dev_err(dev, "probe sub driver failed\n");
712*4882a593Smuzhiyun return -EINVAL;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun /* map zme regs */
715*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "zme_regs");
716*4882a593Smuzhiyun if (res) {
717*4882a593Smuzhiyun vdpp->zme_base = devm_ioremap(dev, res->start, resource_size(res));
718*4882a593Smuzhiyun if (!vdpp->zme_base) {
719*4882a593Smuzhiyun dev_err(dev, "ioremap failed for resource %pR\n", res);
720*4882a593Smuzhiyun return -ENOMEM;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun /* get irq */
724*4882a593Smuzhiyun ret = devm_request_threaded_irq(dev, mpp->irq,
725*4882a593Smuzhiyun mpp_dev_irq,
726*4882a593Smuzhiyun mpp_dev_isr_sched,
727*4882a593Smuzhiyun IRQF_SHARED,
728*4882a593Smuzhiyun dev_name(dev), mpp);
729*4882a593Smuzhiyun if (ret) {
730*4882a593Smuzhiyun dev_err(dev, "register interrupter runtime failed\n");
731*4882a593Smuzhiyun return -EINVAL;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun mpp->session_max_buffers = VDPP_SESSION_MAX_BUFFERS;
735*4882a593Smuzhiyun vdpp->hw_info = to_vdpp_info(mpp->var->hw_info);
736*4882a593Smuzhiyun vdpp_procfs_init(mpp);
737*4882a593Smuzhiyun /* register current device to mpp service */
738*4882a593Smuzhiyun mpp_dev_register_srv(mpp, mpp->srv);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun dev_info(dev, "probing finish\n");
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
vdpp_remove(struct platform_device * pdev)745*4882a593Smuzhiyun static int vdpp_remove(struct platform_device *pdev)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct device *dev = &pdev->dev;
748*4882a593Smuzhiyun struct vdpp_dev *vdpp = platform_get_drvdata(pdev);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun dev_info(dev, "remove device\n");
751*4882a593Smuzhiyun mpp_dev_remove(&vdpp->mpp);
752*4882a593Smuzhiyun vdpp_procfs_remove(&vdpp->mpp);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
vdpp_shutdown(struct platform_device * pdev)757*4882a593Smuzhiyun static void vdpp_shutdown(struct platform_device *pdev)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun int ret;
760*4882a593Smuzhiyun int val;
761*4882a593Smuzhiyun struct device *dev = &pdev->dev;
762*4882a593Smuzhiyun struct vdpp_dev *vdpp = platform_get_drvdata(pdev);
763*4882a593Smuzhiyun struct mpp_dev *mpp = &vdpp->mpp;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun dev_info(dev, "shutdown device\n");
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun atomic_inc(&mpp->srv->shutdown_request);
768*4882a593Smuzhiyun ret = readx_poll_timeout(atomic_read,
769*4882a593Smuzhiyun &mpp->task_count,
770*4882a593Smuzhiyun val, val == 0, 20000, 200000);
771*4882a593Smuzhiyun if (ret == -ETIMEDOUT)
772*4882a593Smuzhiyun dev_err(dev, "wait total running time out\n");
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun struct platform_driver rockchip_vdpp_driver = {
776*4882a593Smuzhiyun .probe = vdpp_probe,
777*4882a593Smuzhiyun .remove = vdpp_remove,
778*4882a593Smuzhiyun .shutdown = vdpp_shutdown,
779*4882a593Smuzhiyun .driver = {
780*4882a593Smuzhiyun .name = VDPP_DRIVER_NAME,
781*4882a593Smuzhiyun .of_match_table = of_match_ptr(mpp_vdpp_dt_match),
782*4882a593Smuzhiyun },
783*4882a593Smuzhiyun };
784*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_vdpp_driver);
785