xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_vdpp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2022 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Ding Wei, leo.ding@rock-chips.com
7  *
8  */
9 #include <asm/cacheflush.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/iopoll.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/of_platform.h>
17 #include <linux/slab.h>
18 #include <linux/uaccess.h>
19 #include <linux/regmap.h>
20 #include <linux/proc_fs.h>
21 #include <soc/rockchip/pm_domains.h>
22 
23 #include "mpp_debug.h"
24 #include "mpp_common.h"
25 #include "mpp_iommu.h"
26 
27 #define VDPP_DRIVER_NAME		"mpp_vdpp"
28 
29 #define	VDPP_SESSION_MAX_BUFFERS	15
30 #define VDPP_REG_WORK_MODE			0x0008
31 #define VDPP_REG_VDPP_MODE			BIT(1)
32 
33 #define to_vdpp_info(info)	\
34 		container_of(info, struct vdpp_hw_info, hw)
35 #define to_vdpp_task(task)	\
36 		container_of(task, struct vdpp_task, mpp_task)
37 #define to_vdpp_dev(dev)	\
38 		container_of(dev, struct vdpp_dev, mpp)
39 
40 struct vdpp_hw_info {
41 	struct mpp_hw_info hw;
42 
43 	/* register info */
44 	u32 start_base;
45 	u32 cfg_base;
46 	u32 work_mode_base;
47 	u32 gate_base;
48 	u32 rst_sta_base;
49 	u32 int_en_base;
50 	u32 int_clr_base;
51 	u32 int_sta_base; // int_sta = int_raw_sta && int_en
52 	u32 int_mask;
53 	u32 err_mask;
54 	/* register for zme */
55 	u32 zme_reg_off;
56 	u32 zme_reg_num;
57 	/* for soft reset */
58 	u32 bit_rst_en;
59 	u32 bit_rst_done;
60 };
61 
62 struct vdpp_task {
63 	struct mpp_task mpp_task;
64 	enum MPP_CLOCK_MODE clk_mode;
65 	u32 *reg;
66 	u32 *zme_reg;
67 
68 	struct reg_offset_info off_inf;
69 	u32 irq_status;
70 	/* req for current task */
71 	u32 w_req_cnt;
72 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
73 	u32 r_req_cnt;
74 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
75 };
76 
77 struct vdpp_dev {
78 	struct mpp_dev mpp;
79 	struct vdpp_hw_info *hw_info;
80 
81 	struct mpp_clk_info aclk_info;
82 	struct mpp_clk_info hclk_info;
83 	struct mpp_clk_info sclk_info;
84 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
85 	struct proc_dir_entry *procfs;
86 #endif
87 	struct reset_control *rst_a;
88 	struct reset_control *rst_h;
89 	struct reset_control *rst_s;
90 	/* for zme */
91 	void __iomem *zme_base;
92 };
93 
94 static struct vdpp_hw_info vdpp_v1_hw_info = {
95 	.hw = {
96 		.reg_num = 53,
97 		.reg_id = 21,
98 		.reg_en = 0,
99 		.reg_start = 0,
100 		.reg_end = 52,
101 	},
102 	.start_base = 0x0000,
103 	.cfg_base = 0x0004,
104 	.work_mode_base = 0x0008,
105 	.gate_base = 0x0010,
106 	.rst_sta_base = 0x0014,
107 	.int_en_base = 0x0020,
108 	.int_clr_base = 0x0024,
109 	.int_sta_base = 0x0028,
110 	.int_mask = 0x0073,
111 	.err_mask = 0x0070,
112 	.zme_reg_off = 0x2000,
113 	.zme_reg_num = 530,
114 	.bit_rst_en = BIT(21),
115 	.bit_rst_done = BIT(0),
116 };
117 
118 /*
119  * file handle translate information
120  */
121 static const u16 trans_tbl_vdpp[] = {
122 	24, 25, 26, 27,
123 };
124 
125 #define VDPP_FMT_DEFAULT		0
126 static struct mpp_trans_info vdpp_v1_trans[] = {
127 	[VDPP_FMT_DEFAULT] = {
128 		.count = ARRAY_SIZE(trans_tbl_vdpp),
129 		.table = trans_tbl_vdpp,
130 	},
131 };
132 
vdpp_process_reg_fd(struct mpp_session * session,struct vdpp_task * task,struct mpp_task_msgs * msgs)133 static int vdpp_process_reg_fd(struct mpp_session *session,
134 				 struct vdpp_task *task,
135 				 struct mpp_task_msgs *msgs)
136 {
137 	int ret = 0;
138 
139 	ret = mpp_translate_reg_address(session, &task->mpp_task,
140 					VDPP_FMT_DEFAULT, task->reg, &task->off_inf);
141 	if (ret)
142 		return ret;
143 
144 	mpp_translate_reg_offset_info(&task->mpp_task,
145 				      &task->off_inf, task->reg);
146 	return 0;
147 }
148 
vdpp_extract_task_msg(struct vdpp_task * task,struct mpp_task_msgs * msgs)149 static int vdpp_extract_task_msg(struct vdpp_task *task,
150 				   struct mpp_task_msgs *msgs)
151 {
152 	u32 i;
153 	int ret;
154 	struct mpp_request *req;
155 	struct vdpp_hw_info *hw_info = to_vdpp_info(task->mpp_task.hw_info);
156 
157 	for (i = 0; i < msgs->req_cnt; i++) {
158 		req = &msgs->reqs[i];
159 		if (!req->size)
160 			continue;
161 
162 		switch (req->cmd) {
163 		case MPP_CMD_SET_REG_WRITE: {
164 			int req_base;
165 			int max_size;
166 			u8 *dst = NULL;
167 
168 			if (req->offset >= hw_info->zme_reg_off) {
169 				req_base = hw_info->zme_reg_off;
170 				max_size = hw_info->zme_reg_num * sizeof(u32);
171 				dst = (u8 *)task->zme_reg;
172 			} else {
173 				req_base = 0;
174 				max_size = hw_info->hw.reg_num * sizeof(u32);
175 				dst = (u8 *)task->reg;
176 			}
177 
178 			ret = mpp_check_req(req, req_base, max_size, 0, max_size);
179 			if (ret)
180 				return ret;
181 
182 			dst += req->offset - req_base;
183 			if (copy_from_user(dst, req->data, req->size)) {
184 				mpp_err("copy_from_user reg failed\n");
185 				return -EIO;
186 			}
187 			memcpy(&task->w_reqs[task->w_req_cnt++], req, sizeof(*req));
188 		} break;
189 		case MPP_CMD_SET_REG_READ: {
190 			int req_base;
191 			int max_size;
192 
193 			if (req->offset >= hw_info->zme_reg_off) {
194 				req_base = hw_info->zme_reg_off;
195 				max_size = hw_info->zme_reg_num * sizeof(u32);
196 			} else {
197 				req_base = 0;
198 				max_size = hw_info->hw.reg_num * sizeof(u32);
199 			}
200 
201 			ret = mpp_check_req(req, req_base, max_size, 0, max_size);
202 			if (ret)
203 				return ret;
204 
205 			memcpy(&task->r_reqs[task->r_req_cnt++], req, sizeof(*req));
206 		} break;
207 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
208 			mpp_extract_reg_offset_info(&task->off_inf, req);
209 		} break;
210 		default:
211 			break;
212 		}
213 	}
214 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
215 		  task->w_req_cnt, task->r_req_cnt);
216 
217 	return 0;
218 }
219 
vdpp_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)220 static void *vdpp_alloc_task(struct mpp_session *session,
221 			       struct mpp_task_msgs *msgs)
222 {
223 	int ret;
224 	u32 reg_num;
225 	struct mpp_task *mpp_task = NULL;
226 	struct vdpp_task *task = NULL;
227 	struct mpp_dev *mpp = session->mpp;
228 	struct vdpp_hw_info *hw_info = to_vdpp_info(mpp->var->hw_info);
229 
230 	mpp_debug_enter();
231 
232 	task = kzalloc(sizeof(*task), GFP_KERNEL);
233 	if (!task)
234 		return NULL;
235 	/* alloc reg buffer */
236 	reg_num = hw_info->hw.reg_num + hw_info->zme_reg_num;
237 	task->reg = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
238 	if (!task->reg)
239 		goto free_task;
240 	task->zme_reg = task->reg + hw_info->hw.reg_num;
241 
242 	mpp_task = &task->mpp_task;
243 	mpp_task_init(session, mpp_task);
244 	mpp_task->hw_info = mpp->var->hw_info;
245 	mpp_task->reg = task->reg;
246 	/* extract reqs for current task */
247 	ret = vdpp_extract_task_msg(task, msgs);
248 	if (ret)
249 		goto fail;
250 	/* process fd in register */
251 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
252 		ret = vdpp_process_reg_fd(session, task, msgs);
253 		if (ret)
254 			goto fail;
255 	}
256 	task->clk_mode = CLK_MODE_NORMAL;
257 
258 	mpp_debug_leave();
259 
260 	return mpp_task;
261 
262 fail:
263 	mpp_task_dump_mem_region(mpp, mpp_task);
264 	mpp_task_dump_reg(mpp, mpp_task);
265 	mpp_task_finalize(session, mpp_task);
266 	kfree(task->reg);
267 free_task:
268 	kfree(task);
269 	return NULL;
270 }
271 
vdpp_write_req_zme(void __iomem * reg_base,u32 * regs,u32 start_idx,u32 end_idx)272 static int vdpp_write_req_zme(void __iomem *reg_base,
273 			      u32 *regs,
274 			      u32 start_idx, u32 end_idx)
275 {
276 	int i;
277 
278 	for (i = start_idx; i < end_idx; i++) {
279 		int reg = i * sizeof(u32);
280 
281 		mpp_debug(DEBUG_SET_REG_L2, "zme_reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
282 		writel_relaxed(regs[i], reg_base + reg);
283 	}
284 
285 	return 0;
286 }
287 
vdpp_read_req_zme(void __iomem * reg_base,u32 * regs,u32 start_idx,u32 end_idx)288 static int vdpp_read_req_zme(void __iomem *reg_base,
289 			     u32 *regs,
290 			     u32 start_idx, u32 end_idx)
291 {
292 	int i;
293 
294 	for (i = start_idx; i < end_idx; i++) {
295 		int reg = i * sizeof(u32);
296 
297 		regs[i] = readl_relaxed(reg_base + reg);
298 		mpp_debug(DEBUG_GET_REG_L2, "zme_reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
299 	}
300 
301 	return 0;
302 }
303 
vdpp_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)304 static int vdpp_run(struct mpp_dev *mpp,
305 		      struct mpp_task *mpp_task)
306 {
307 	u32 i;
308 	u32 reg_en;
309 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
310 	struct vdpp_task *task = to_vdpp_task(mpp_task);
311 	struct vdpp_hw_info *hw_info = vdpp->hw_info;
312 	u32 timing_en = mpp->srv->timing_en;
313 
314 	mpp_debug_enter();
315 
316 	reg_en = hw_info->hw.reg_en;
317 	for (i = 0; i < task->w_req_cnt; i++) {
318 		struct mpp_request *req = &task->w_reqs[i];
319 
320 		if (req->offset >= hw_info->zme_reg_off) {
321 			/* set registers for zme */
322 			int off = req->offset - hw_info->zme_reg_off;
323 			int s = off / sizeof(u32);
324 			int e = s + req->size / sizeof(u32);
325 
326 			if (!vdpp->zme_base)
327 				continue;
328 			vdpp_write_req_zme(vdpp->zme_base, task->zme_reg, s, e);
329 		} else {
330 			/* set registers for vdpp */
331 			int s = req->offset / sizeof(u32);
332 			int e = s + req->size / sizeof(u32);
333 
334 			mpp_write_req(mpp, task->reg, s, e, reg_en);
335 		}
336 	}
337 
338 	/* flush tlb before starting hardware */
339 	mpp_iommu_flush_tlb(mpp->iommu_info);
340 
341 	/* init current task */
342 	mpp->cur_task = mpp_task;
343 
344 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
345 	/* Flush the register before the start the device */
346 	wmb();
347 	mpp_write(mpp, hw_info->start_base, task->reg[reg_en]);
348 
349 	mpp_task_run_end(mpp_task, timing_en);
350 
351 	mpp_debug_leave();
352 
353 	return 0;
354 }
355 
vdpp_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)356 static int vdpp_finish(struct mpp_dev *mpp,
357 			 struct mpp_task *mpp_task)
358 {
359 	u32 i;
360 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
361 	struct vdpp_task *task = to_vdpp_task(mpp_task);
362 	struct vdpp_hw_info *hw_info = vdpp->hw_info;
363 
364 	mpp_debug_enter();
365 
366 	for (i = 0; i < task->r_req_cnt; i++) {
367 		struct mpp_request *req = &task->r_reqs[i];
368 
369 		if (req->offset >= hw_info->zme_reg_off) {
370 			int off = req->offset - hw_info->zme_reg_off;
371 			int s = off / sizeof(u32);
372 			int e = s + req->size / sizeof(u32);
373 
374 			if (!vdpp->zme_base)
375 				continue;
376 			vdpp_read_req_zme(vdpp->zme_base, task->zme_reg, s, e);
377 		} else {
378 			int s = req->offset / sizeof(u32);
379 			int e = s + req->size / sizeof(u32);
380 
381 			mpp_read_req(mpp, task->reg, s, e);
382 		}
383 	}
384 	task->reg[hw_info->int_sta_base] = task->irq_status;
385 
386 	mpp_debug_leave();
387 
388 	return 0;
389 }
390 
vdpp_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)391 static int vdpp_result(struct mpp_dev *mpp,
392 			 struct mpp_task *mpp_task,
393 			 struct mpp_task_msgs *msgs)
394 {
395 	u32 i;
396 	struct vdpp_task *task = to_vdpp_task(mpp_task);
397 	struct vdpp_hw_info *hw_info = to_vdpp_info(mpp_task->hw_info);
398 
399 	for (i = 0; i < task->r_req_cnt; i++) {
400 		struct mpp_request *req;
401 
402 		req = &task->r_reqs[i];
403 		/* set register L2 */
404 		if (req->offset >= hw_info->zme_reg_off) {
405 			struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
406 			int off = req->offset - hw_info->zme_reg_off;
407 
408 			if (!vdpp->zme_base)
409 				continue;
410 			if (copy_to_user(req->data,
411 					 (u8 *)task->zme_reg + off,
412 					 req->size)) {
413 				mpp_err("copy_to_user reg_l2 fail\n");
414 				return -EIO;
415 			}
416 		} else {
417 			if (copy_to_user(req->data,
418 					 (u8 *)task->reg + req->offset,
419 					 req->size)) {
420 				mpp_err("copy_to_user reg fail\n");
421 				return -EIO;
422 			}
423 		}
424 	}
425 
426 	return 0;
427 }
428 
vdpp_free_task(struct mpp_session * session,struct mpp_task * mpp_task)429 static int vdpp_free_task(struct mpp_session *session,
430 			    struct mpp_task *mpp_task)
431 {
432 	struct vdpp_task *task = to_vdpp_task(mpp_task);
433 
434 	mpp_task_finalize(session, mpp_task);
435 	kfree(task->reg);
436 	kfree(task);
437 
438 	return 0;
439 }
440 
441 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpp_procfs_remove(struct mpp_dev * mpp)442 static int vdpp_procfs_remove(struct mpp_dev *mpp)
443 {
444 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
445 
446 	if (vdpp->procfs) {
447 		proc_remove(vdpp->procfs);
448 		vdpp->procfs = NULL;
449 	}
450 
451 	return 0;
452 }
453 
vdpp_procfs_init(struct mpp_dev * mpp)454 static int vdpp_procfs_init(struct mpp_dev *mpp)
455 {
456 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
457 
458 	vdpp->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
459 	if (IS_ERR_OR_NULL(vdpp->procfs)) {
460 		mpp_err("failed on open procfs\n");
461 		vdpp->procfs = NULL;
462 		return -EIO;
463 	}
464 	mpp_procfs_create_u32("aclk", 0644,
465 			      vdpp->procfs, &vdpp->aclk_info.debug_rate_hz);
466 	mpp_procfs_create_u32("session_buffers", 0644,
467 			      vdpp->procfs, &mpp->session_max_buffers);
468 	return 0;
469 }
470 #else
vdpp_procfs_remove(struct mpp_dev * mpp)471 static inline int vdpp_procfs_remove(struct mpp_dev *mpp)
472 {
473 	return 0;
474 }
475 
vdpp_procfs_init(struct mpp_dev * mpp)476 static inline int vdpp_procfs_init(struct mpp_dev *mpp)
477 {
478 	return 0;
479 }
480 #endif
481 
vdpp_init(struct mpp_dev * mpp)482 static int vdpp_init(struct mpp_dev *mpp)
483 {
484 	int ret;
485 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
486 
487 	/* Get clock info from dtsi */
488 	ret = mpp_get_clk_info(mpp, &vdpp->aclk_info, "aclk");
489 	if (ret)
490 		mpp_err("failed on clk_get aclk\n");
491 	ret = mpp_get_clk_info(mpp, &vdpp->hclk_info, "hclk");
492 	if (ret)
493 		mpp_err("failed on clk_get hclk\n");
494 	ret = mpp_get_clk_info(mpp, &vdpp->sclk_info, "sclk");
495 	if (ret)
496 		mpp_err("failed on clk_get sclk\n");
497 	/* Set default rates */
498 	mpp_set_clk_info_rate_hz(&vdpp->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
499 
500 	vdpp->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "rst_a");
501 	if (!vdpp->rst_a)
502 		mpp_err("No aclk reset resource define\n");
503 	vdpp->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "rst_h");
504 	if (!vdpp->rst_h)
505 		mpp_err("No hclk reset resource define\n");
506 	vdpp->rst_s = mpp_reset_control_get(mpp, RST_TYPE_CORE, "rst_s");
507 	if (!vdpp->rst_s)
508 		mpp_err("No sclk reset resource define\n");
509 
510 	return 0;
511 }
512 
vdpp_clk_on(struct mpp_dev * mpp)513 static int vdpp_clk_on(struct mpp_dev *mpp)
514 {
515 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
516 
517 	mpp_clk_safe_enable(vdpp->aclk_info.clk);
518 	mpp_clk_safe_enable(vdpp->hclk_info.clk);
519 	mpp_clk_safe_enable(vdpp->sclk_info.clk);
520 
521 	return 0;
522 }
523 
vdpp_clk_off(struct mpp_dev * mpp)524 static int vdpp_clk_off(struct mpp_dev *mpp)
525 {
526 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
527 
528 	mpp_clk_safe_disable(vdpp->aclk_info.clk);
529 	mpp_clk_safe_disable(vdpp->hclk_info.clk);
530 	mpp_clk_safe_disable(vdpp->sclk_info.clk);
531 
532 	return 0;
533 }
534 
vdpp_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)535 static int vdpp_set_freq(struct mpp_dev *mpp,
536 			 struct mpp_task *mpp_task)
537 {
538 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
539 	struct vdpp_task *task = to_vdpp_task(mpp_task);
540 
541 	mpp_clk_set_rate(&vdpp->aclk_info, task->clk_mode);
542 
543 	return 0;
544 }
545 
vdpp_reduce_freq(struct mpp_dev * mpp)546 static int vdpp_reduce_freq(struct mpp_dev *mpp)
547 {
548 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
549 
550 	mpp_clk_set_rate(&vdpp->aclk_info, CLK_MODE_REDUCE);
551 
552 	return 0;
553 }
554 
vdpp_irq(struct mpp_dev * mpp)555 static int vdpp_irq(struct mpp_dev *mpp)
556 {
557 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
558 	struct vdpp_hw_info *hw_info = vdpp->hw_info;
559 	u32 work_mode = mpp_read(mpp, VDPP_REG_WORK_MODE);
560 
561 	if (!(work_mode & VDPP_REG_VDPP_MODE))
562 		return IRQ_NONE;
563 	mpp->irq_status = mpp_read(mpp, hw_info->int_sta_base);
564 	if (!(mpp->irq_status & hw_info->int_mask))
565 		return IRQ_NONE;
566 	mpp_write(mpp, hw_info->int_en_base, 0);
567 	mpp_write(mpp, hw_info->int_clr_base, mpp->irq_status);
568 
569 	/* ensure hardware is being off status */
570 	mpp_write(mpp, hw_info->start_base, 0);
571 
572 	return IRQ_WAKE_THREAD;
573 }
574 
vdpp_isr(struct mpp_dev * mpp)575 static int vdpp_isr(struct mpp_dev *mpp)
576 {
577 	struct vdpp_task *task = NULL;
578 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
579 	struct mpp_task *mpp_task = mpp->cur_task;
580 
581 	/* FIXME use a spin lock here */
582 	if (!mpp_task) {
583 		dev_err(mpp->dev, "no current task\n");
584 		return IRQ_HANDLED;
585 	}
586 	mpp_time_diff(mpp_task);
587 	mpp->cur_task = NULL;
588 	task = to_vdpp_task(mpp_task);
589 	task->irq_status = mpp->irq_status;
590 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
591 		  task->irq_status);
592 
593 	if (task->irq_status & vdpp->hw_info->err_mask)
594 		atomic_inc(&mpp->reset_request);
595 
596 	mpp_task_finish(mpp_task->session, mpp_task);
597 
598 	mpp_debug_leave();
599 
600 	return IRQ_HANDLED;
601 }
602 
_vdpp_reset(struct mpp_dev * mpp,struct vdpp_dev * vdpp)603 static int _vdpp_reset(struct mpp_dev *mpp, struct vdpp_dev *vdpp)
604 {
605 	if (vdpp->rst_a && vdpp->rst_h && vdpp->rst_s) {
606 		mpp_debug(DEBUG_RESET, "reset in\n");
607 
608 		/* Don't skip this or iommu won't work after reset */
609 		mpp_pmu_idle_request(mpp, true);
610 		mpp_safe_reset(vdpp->rst_a);
611 		mpp_safe_reset(vdpp->rst_h);
612 		mpp_safe_reset(vdpp->rst_s);
613 		udelay(5);
614 		mpp_safe_unreset(vdpp->rst_a);
615 		mpp_safe_unreset(vdpp->rst_h);
616 		mpp_safe_unreset(vdpp->rst_s);
617 		mpp_pmu_idle_request(mpp, false);
618 
619 		mpp_debug(DEBUG_RESET, "reset out\n");
620 	}
621 
622 	return 0;
623 }
624 
vdpp_reset(struct mpp_dev * mpp)625 static int vdpp_reset(struct mpp_dev *mpp)
626 {
627 	int ret = 0;
628 	u32 rst_status = 0;
629 	struct vdpp_dev *vdpp = to_vdpp_dev(mpp);
630 	struct vdpp_hw_info *hw_info = vdpp->hw_info;
631 
632 	/* soft rest first */
633 	mpp_write(mpp, hw_info->cfg_base, hw_info->bit_rst_en);
634 	ret = readl_relaxed_poll_timeout(mpp->reg_base + hw_info->rst_sta_base,
635 					 rst_status,
636 					 rst_status & hw_info->bit_rst_done,
637 					 0, 5);
638 	if (ret) {
639 		mpp_err("soft reset timeout, use cru reset\n");
640 		return _vdpp_reset(mpp, vdpp);
641 	}
642 
643 	mpp_write(mpp, hw_info->rst_sta_base, 0);
644 
645 	/* ensure hardware is being off status */
646 	mpp_write(mpp, hw_info->start_base, 0);
647 
648 	return 0;
649 }
650 
651 static struct mpp_hw_ops vdpp_v1_hw_ops = {
652 	.init = vdpp_init,
653 	.clk_on = vdpp_clk_on,
654 	.clk_off = vdpp_clk_off,
655 	.set_freq = vdpp_set_freq,
656 	.reduce_freq = vdpp_reduce_freq,
657 	.reset = vdpp_reset,
658 };
659 
660 static struct mpp_dev_ops vdpp_v1_dev_ops = {
661 	.alloc_task = vdpp_alloc_task,
662 	.run = vdpp_run,
663 	.irq = vdpp_irq,
664 	.isr = vdpp_isr,
665 	.finish = vdpp_finish,
666 	.result = vdpp_result,
667 	.free_task = vdpp_free_task,
668 };
669 
670 static const struct mpp_dev_var vdpp_v1_data = {
671 	.device_type = MPP_DEVICE_VDPP,
672 	.hw_info = &vdpp_v1_hw_info.hw,
673 	.trans_info = vdpp_v1_trans,
674 	.hw_ops = &vdpp_v1_hw_ops,
675 	.dev_ops = &vdpp_v1_dev_ops,
676 };
677 
678 static const struct of_device_id mpp_vdpp_dt_match[] = {
679 	{
680 		.compatible = "rockchip,vdpp-v1",
681 		.data = &vdpp_v1_data,
682 	},
683 	{},
684 };
685 
vdpp_probe(struct platform_device * pdev)686 static int vdpp_probe(struct platform_device *pdev)
687 {
688 	struct device *dev = &pdev->dev;
689 	struct vdpp_dev *vdpp = NULL;
690 	struct mpp_dev *mpp = NULL;
691 	const struct of_device_id *match = NULL;
692 	int ret = 0;
693 	struct resource *res;
694 
695 	dev_info(dev, "probe device\n");
696 	vdpp = devm_kzalloc(dev, sizeof(struct vdpp_dev), GFP_KERNEL);
697 	if (!vdpp)
698 		return -ENOMEM;
699 	platform_set_drvdata(pdev, vdpp);
700 
701 	mpp = &vdpp->mpp;
702 	if (pdev->dev.of_node) {
703 		match = of_match_node(mpp_vdpp_dt_match, pdev->dev.of_node);
704 		if (match)
705 			mpp->var = (struct mpp_dev_var *)match->data;
706 		mpp->core_id = -1;
707 	}
708 
709 	ret = mpp_dev_probe(mpp, pdev);
710 	if (ret) {
711 		dev_err(dev, "probe sub driver failed\n");
712 		return -EINVAL;
713 	}
714 	/* map zme regs */
715 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "zme_regs");
716 	if (res) {
717 		vdpp->zme_base = devm_ioremap(dev, res->start, resource_size(res));
718 		if (!vdpp->zme_base) {
719 			dev_err(dev, "ioremap failed for resource %pR\n", res);
720 			return -ENOMEM;
721 		}
722 	}
723 	/* get irq */
724 	ret = devm_request_threaded_irq(dev, mpp->irq,
725 					mpp_dev_irq,
726 					mpp_dev_isr_sched,
727 					IRQF_SHARED,
728 					dev_name(dev), mpp);
729 	if (ret) {
730 		dev_err(dev, "register interrupter runtime failed\n");
731 		return -EINVAL;
732 	}
733 
734 	mpp->session_max_buffers = VDPP_SESSION_MAX_BUFFERS;
735 	vdpp->hw_info = to_vdpp_info(mpp->var->hw_info);
736 	vdpp_procfs_init(mpp);
737 	/* register current device to mpp service */
738 	mpp_dev_register_srv(mpp, mpp->srv);
739 
740 	dev_info(dev, "probing finish\n");
741 
742 	return 0;
743 }
744 
vdpp_remove(struct platform_device * pdev)745 static int vdpp_remove(struct platform_device *pdev)
746 {
747 	struct device *dev = &pdev->dev;
748 	struct vdpp_dev *vdpp = platform_get_drvdata(pdev);
749 
750 	dev_info(dev, "remove device\n");
751 	mpp_dev_remove(&vdpp->mpp);
752 	vdpp_procfs_remove(&vdpp->mpp);
753 
754 	return 0;
755 }
756 
vdpp_shutdown(struct platform_device * pdev)757 static void vdpp_shutdown(struct platform_device *pdev)
758 {
759 	int ret;
760 	int val;
761 	struct device *dev = &pdev->dev;
762 	struct vdpp_dev *vdpp = platform_get_drvdata(pdev);
763 	struct mpp_dev *mpp = &vdpp->mpp;
764 
765 	dev_info(dev, "shutdown device\n");
766 
767 	atomic_inc(&mpp->srv->shutdown_request);
768 	ret = readx_poll_timeout(atomic_read,
769 				 &mpp->task_count,
770 				 val, val == 0, 20000, 200000);
771 	if (ret == -ETIMEDOUT)
772 		dev_err(dev, "wait total running time out\n");
773 }
774 
775 struct platform_driver rockchip_vdpp_driver = {
776 	.probe = vdpp_probe,
777 	.remove = vdpp_remove,
778 	.shutdown = vdpp_shutdown,
779 	.driver = {
780 		.name = VDPP_DRIVER_NAME,
781 		.of_match_table = of_match_ptr(mpp_vdpp_dt_match),
782 	},
783 };
784 EXPORT_SYMBOL(rockchip_vdpp_driver);
785