xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_vdpu2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24 
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_iommu.h"
28 #include "hack/mpp_hack_px30.h"
29 
30 #define VDPU2_DRIVER_NAME		"mpp_vdpu2"
31 
32 #define	VDPU2_SESSION_MAX_BUFFERS	40
33 /* The maximum registers number of all the version */
34 #define VDPU2_REG_NUM			159
35 #define VDPU2_REG_HW_ID_INDEX		-1 /* INVALID */
36 #define VDPU2_REG_START_INDEX		50
37 #define VDPU2_REG_END_INDEX		158
38 
39 #define VDPU2_REG_SYS_CTRL			0x0d4
40 #define VDPU2_REG_SYS_CTRL_INDEX		(53)
41 #define VDPU2_GET_FORMAT(x)			((x) & 0xf)
42 #define VDPU2_FMT_H264D				0
43 #define VDPU2_FMT_MPEG4D			1
44 #define VDPU2_FMT_H263D				2
45 #define VDPU2_FMT_JPEGD				3
46 #define VDPU2_FMT_VC1D				4
47 #define VDPU2_FMT_MPEG2D			5
48 #define VDPU2_FMT_MPEG1D			6
49 #define VDPU2_FMT_VP6D				7
50 #define VDPU2_FMT_RESERVED			8
51 #define VDPU2_FMT_VP7D				9
52 #define VDPU2_FMT_VP8D				10
53 #define VDPU2_FMT_AVSD				11
54 
55 #define VDPU2_REG_DEC_INT			0x0dc
56 #define VDPU2_REG_DEC_INT_INDEX			(55)
57 #define VDPU2_INT_TIMEOUT			BIT(13)
58 #define VDPU2_INT_STRM_ERROR			BIT(12)
59 #define VDPU2_INT_SLICE				BIT(9)
60 #define VDPU2_INT_ASO_ERROR			BIT(8)
61 #define VDPU2_INT_BUF_EMPTY			BIT(6)
62 #define VDPU2_INT_BUS_ERROR			BIT(5)
63 #define	VDPU2_DEC_INT				BIT(4)
64 #define VDPU2_DEC_IRQ_DIS			BIT(1)
65 #define VDPU2_DEC_INT_RAW			BIT(0)
66 
67 #define VDPU2_REG_DEC_EN			0x0e4
68 #define VDPU2_REG_DEC_EN_INDEX			(57)
69 #define VDPU2_DEC_CLOCK_GATE_EN			BIT(4)
70 #define VDPU2_DEC_START				BIT(0)
71 
72 #define VDPU2_REG_SOFT_RESET			0x0e8
73 #define VDPU2_REG_SOFT_RESET_INDEX		(58)
74 
75 #define VDPU2_REG_DIR_MV_BASE			0x0f8
76 #define VDPU2_REG_DIR_MV_BASE_INDEX		(62)
77 
78 #define VDPU2_REG_STREAM_RLC_BASE		0x100
79 #define VDPU2_REG_STREAM_RLC_BASE_INDEX		(64)
80 
81 #define VDPU2_REG_CLR_CACHE_BASE		0x810
82 
83 #define to_vdpu_task(task)		\
84 		container_of(task, struct vdpu_task, mpp_task)
85 #define to_vdpu_dev(dev)		\
86 		container_of(dev, struct vdpu_dev, mpp)
87 
88 struct vdpu_task {
89 	struct mpp_task mpp_task;
90 
91 	enum MPP_CLOCK_MODE clk_mode;
92 	u32 reg[VDPU2_REG_NUM];
93 
94 	struct reg_offset_info off_inf;
95 	u32 strm_addr;
96 	u32 irq_status;
97 	/* req for current task */
98 	u32 w_req_cnt;
99 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
100 	u32 r_req_cnt;
101 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
102 };
103 
104 struct vdpu_dev {
105 	struct mpp_dev mpp;
106 
107 	struct mpp_clk_info aclk_info;
108 	struct mpp_clk_info hclk_info;
109 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
110 	struct proc_dir_entry *procfs;
111 #endif
112 	struct reset_control *rst_a;
113 	struct reset_control *rst_h;
114 };
115 
116 static struct mpp_hw_info vdpu_v2_hw_info = {
117 	.reg_num = VDPU2_REG_NUM,
118 	.reg_id = VDPU2_REG_HW_ID_INDEX,
119 	.reg_start = VDPU2_REG_START_INDEX,
120 	.reg_end = VDPU2_REG_END_INDEX,
121 	.reg_en = VDPU2_REG_DEC_EN_INDEX,
122 };
123 
124 /*
125  * file handle translate information
126  */
127 static const u16 trans_tbl_default[] = {
128 	61, 62, 63, 64, 131, 134, 135, 148
129 };
130 
131 static const u16 trans_tbl_jpegd[] = {
132 	21, 22, 61, 63, 64, 131
133 };
134 
135 static const u16 trans_tbl_h264d[] = {
136 	61, 63, 64, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
137 	98, 99
138 };
139 
140 static const u16 trans_tbl_vc1d[] = {
141 	62, 63, 64, 131, 134, 135, 145, 148
142 };
143 
144 static const u16 trans_tbl_vp6d[] = {
145 	61, 63, 64, 131, 136, 145
146 };
147 
148 static const u16 trans_tbl_vp8d[] = {
149 	61, 63, 64, 131, 136, 137, 140, 141, 142, 143, 144, 145, 146, 147, 149
150 };
151 
152 static struct mpp_trans_info vdpu_v2_trans[] = {
153 	[VDPU2_FMT_H264D] = {
154 		.count = ARRAY_SIZE(trans_tbl_h264d),
155 		.table = trans_tbl_h264d,
156 	},
157 	[VDPU2_FMT_H263D] = {
158 		.count = ARRAY_SIZE(trans_tbl_default),
159 		.table = trans_tbl_default,
160 	},
161 	[VDPU2_FMT_MPEG4D] = {
162 		.count = ARRAY_SIZE(trans_tbl_default),
163 		.table = trans_tbl_default,
164 	},
165 	[VDPU2_FMT_JPEGD] = {
166 		.count = ARRAY_SIZE(trans_tbl_jpegd),
167 		.table = trans_tbl_jpegd,
168 	},
169 	[VDPU2_FMT_VC1D] = {
170 		.count = ARRAY_SIZE(trans_tbl_vc1d),
171 		.table = trans_tbl_vc1d,
172 	},
173 	[VDPU2_FMT_MPEG2D] = {
174 		.count = ARRAY_SIZE(trans_tbl_default),
175 		.table = trans_tbl_default,
176 	},
177 	[VDPU2_FMT_MPEG1D] = {
178 		.count = ARRAY_SIZE(trans_tbl_default),
179 		.table = trans_tbl_default,
180 	},
181 	[VDPU2_FMT_VP6D] = {
182 		.count = ARRAY_SIZE(trans_tbl_vp6d),
183 		.table = trans_tbl_vp6d,
184 	},
185 	[VDPU2_FMT_RESERVED] = {
186 		.count = 0,
187 		.table = NULL,
188 	},
189 	[VDPU2_FMT_VP7D] = {
190 		.count = ARRAY_SIZE(trans_tbl_default),
191 		.table = trans_tbl_default,
192 	},
193 	[VDPU2_FMT_VP8D] = {
194 		.count = ARRAY_SIZE(trans_tbl_vp8d),
195 		.table = trans_tbl_vp8d,
196 	},
197 	[VDPU2_FMT_AVSD] = {
198 		.count = ARRAY_SIZE(trans_tbl_default),
199 		.table = trans_tbl_default,
200 	},
201 };
202 
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)203 static int vdpu_process_reg_fd(struct mpp_session *session,
204 			       struct vdpu_task *task,
205 			       struct mpp_task_msgs *msgs)
206 {
207 	int ret = 0;
208 	int fmt = VDPU2_GET_FORMAT(task->reg[VDPU2_REG_SYS_CTRL_INDEX]);
209 
210 	ret = mpp_translate_reg_address(session, &task->mpp_task,
211 					fmt, task->reg, &task->off_inf);
212 	if (ret)
213 		return ret;
214 
215 	if (likely(fmt == VDPU2_FMT_H264D)) {
216 		int fd;
217 		u32 offset;
218 		dma_addr_t iova = 0;
219 		struct mpp_mem_region *mem_region = NULL;
220 		int idx = VDPU2_REG_DIR_MV_BASE_INDEX;
221 
222 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
223 			fd = task->reg[idx];
224 			offset = 0;
225 		} else {
226 			fd = task->reg[idx] & 0x3ff;
227 			offset = task->reg[idx] >> 10 << 4;
228 		}
229 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
230 		if (IS_ERR(mem_region)) {
231 			mpp_err("reg[%3d]: %08x fd %d attach failed\n",
232 				idx, task->reg[idx], fd);
233 			return -EFAULT;
234 		}
235 
236 		iova = mem_region->iova;
237 		mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
238 			  idx, fd, &iova, offset);
239 		task->reg[idx] = iova + offset;
240 	}
241 	mpp_translate_reg_offset_info(&task->mpp_task,
242 				      &task->off_inf, task->reg);
243 	return 0;
244 }
245 
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)246 static int vdpu_extract_task_msg(struct vdpu_task *task,
247 				 struct mpp_task_msgs *msgs)
248 {
249 	u32 i;
250 	int ret;
251 	struct mpp_request *req;
252 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
253 
254 	for (i = 0; i < msgs->req_cnt; i++) {
255 		u32 off_s, off_e;
256 
257 		req = &msgs->reqs[i];
258 		if (!req->size)
259 			continue;
260 
261 		switch (req->cmd) {
262 		case MPP_CMD_SET_REG_WRITE: {
263 			off_s = hw_info->reg_start * sizeof(u32);
264 			off_e = hw_info->reg_end * sizeof(u32);
265 			ret = mpp_check_req(req, 0, sizeof(task->reg),
266 					    off_s, off_e);
267 			if (ret)
268 				continue;
269 			if (copy_from_user((u8 *)task->reg + req->offset,
270 					   req->data, req->size)) {
271 				mpp_err("copy_from_user reg failed\n");
272 				return -EIO;
273 			}
274 			memcpy(&task->w_reqs[task->w_req_cnt++],
275 			       req, sizeof(*req));
276 		} break;
277 		case MPP_CMD_SET_REG_READ: {
278 			off_s = hw_info->reg_start * sizeof(u32);
279 			off_e = hw_info->reg_end * sizeof(u32);
280 			ret = mpp_check_req(req, 0, sizeof(task->reg),
281 					    off_s, off_e);
282 			if (ret)
283 				continue;
284 			memcpy(&task->r_reqs[task->r_req_cnt++],
285 			       req, sizeof(*req));
286 		} break;
287 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
288 			mpp_extract_reg_offset_info(&task->off_inf, req);
289 		} break;
290 		default:
291 			break;
292 		}
293 	}
294 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
295 		  task->w_req_cnt, task->r_req_cnt);
296 
297 	return 0;
298 }
299 
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)300 static void *vdpu_alloc_task(struct mpp_session *session,
301 			     struct mpp_task_msgs *msgs)
302 {
303 	int ret;
304 	struct mpp_task *mpp_task = NULL;
305 	struct vdpu_task *task = NULL;
306 	struct mpp_dev *mpp = session->mpp;
307 
308 	mpp_debug_enter();
309 
310 	task = kzalloc(sizeof(*task), GFP_KERNEL);
311 	if (!task)
312 		return NULL;
313 
314 	mpp_task = &task->mpp_task;
315 	mpp_task_init(session, mpp_task);
316 	mpp_task->hw_info = mpp->var->hw_info;
317 	mpp_task->reg = task->reg;
318 	/* extract reqs for current task */
319 	ret = vdpu_extract_task_msg(task, msgs);
320 	if (ret)
321 		goto fail;
322 	/* process fd in register */
323 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
324 		ret = vdpu_process_reg_fd(session, task, msgs);
325 		if (ret)
326 			goto fail;
327 	}
328 	task->strm_addr = task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX];
329 	task->clk_mode = CLK_MODE_NORMAL;
330 
331 	mpp_debug_leave();
332 
333 	return mpp_task;
334 
335 fail:
336 	mpp_task_dump_mem_region(mpp, mpp_task);
337 	mpp_task_dump_reg(mpp, mpp_task);
338 	mpp_task_finalize(session, mpp_task);
339 	kfree(task);
340 	return NULL;
341 }
342 
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)343 static int vdpu_run(struct mpp_dev *mpp,
344 		    struct mpp_task *mpp_task)
345 {
346 	u32 i;
347 	u32 reg_en;
348 	struct vdpu_task *task = to_vdpu_task(mpp_task);
349 	u32 timing_en = mpp->srv->timing_en;
350 
351 	mpp_debug_enter();
352 
353 	/* clear cache */
354 	mpp_write_relaxed(mpp, VDPU2_REG_CLR_CACHE_BASE, 1);
355 	/* set registers for hardware */
356 	 reg_en = mpp_task->hw_info->reg_en;
357 	for (i = 0; i < task->w_req_cnt; i++) {
358 		struct mpp_request *req = &task->w_reqs[i];
359 		int s = req->offset / sizeof(u32);
360 		int e = s + req->size / sizeof(u32);
361 
362 		mpp_write_req(mpp, task->reg, s, e, reg_en);
363 	}
364 
365 	/* flush tlb before starting hardware */
366 	mpp_iommu_flush_tlb(mpp->iommu_info);
367 
368 	/* init current task */
369 	mpp->cur_task = mpp_task;
370 
371 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
372 
373 	/* Flush the registers */
374 	wmb();
375 	mpp_write(mpp, VDPU2_REG_DEC_EN,
376 		  task->reg[reg_en] | VDPU2_DEC_START);
377 
378 	mpp_task_run_end(mpp_task, timing_en);
379 
380 	mpp_debug_leave();
381 
382 	return 0;
383 }
384 
vdpu_px30_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)385 static int vdpu_px30_run(struct mpp_dev *mpp,
386 		    struct mpp_task *mpp_task)
387 {
388 	mpp_iommu_flush_tlb(mpp->iommu_info);
389 	return vdpu_run(mpp, mpp_task);
390 }
391 
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)392 static int vdpu_finish(struct mpp_dev *mpp,
393 		       struct mpp_task *mpp_task)
394 {
395 	u32 i;
396 	u32 s, e;
397 	u32 dec_get;
398 	s32 dec_length;
399 	struct mpp_request *req;
400 	struct vdpu_task *task = to_vdpu_task(mpp_task);
401 
402 	mpp_debug_enter();
403 
404 	/* read register after running */
405 	for (i = 0; i < task->r_req_cnt; i++) {
406 		req = &task->r_reqs[i];
407 		s = req->offset / sizeof(u32);
408 		e = s + req->size / sizeof(u32);
409 		mpp_read_req(mpp, task->reg, s, e);
410 	}
411 	/* revert hack for irq status */
412 	task->reg[VDPU2_REG_DEC_INT_INDEX] = task->irq_status;
413 	/* revert hack for decoded length */
414 	dec_get = mpp_read_relaxed(mpp, VDPU2_REG_STREAM_RLC_BASE);
415 	dec_length = dec_get - task->strm_addr;
416 	task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
417 	mpp_debug(DEBUG_REGISTER,
418 		  "dec_get %08x dec_length %d\n", dec_get, dec_length);
419 
420 	mpp_debug_leave();
421 
422 	return 0;
423 }
424 
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)425 static int vdpu_result(struct mpp_dev *mpp,
426 		       struct mpp_task *mpp_task,
427 		       struct mpp_task_msgs *msgs)
428 {
429 	u32 i;
430 	struct mpp_request *req;
431 	struct vdpu_task *task = to_vdpu_task(mpp_task);
432 
433 	/* FIXME may overflow the kernel */
434 	for (i = 0; i < task->r_req_cnt; i++) {
435 		req = &task->r_reqs[i];
436 
437 		if (copy_to_user(req->data,
438 				 (u8 *)task->reg + req->offset,
439 				 req->size)) {
440 			mpp_err("copy_to_user reg fail\n");
441 			return -EIO;
442 		}
443 	}
444 
445 	return 0;
446 }
447 
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)448 static int vdpu_free_task(struct mpp_session *session,
449 			  struct mpp_task *mpp_task)
450 {
451 	struct vdpu_task *task = to_vdpu_task(mpp_task);
452 
453 	mpp_task_finalize(session, mpp_task);
454 	kfree(task);
455 
456 	return 0;
457 }
458 
459 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)460 static int vdpu_procfs_remove(struct mpp_dev *mpp)
461 {
462 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
463 
464 	if (dec->procfs) {
465 		proc_remove(dec->procfs);
466 		dec->procfs = NULL;
467 	}
468 
469 	return 0;
470 }
471 
vdpu_procfs_init(struct mpp_dev * mpp)472 static int vdpu_procfs_init(struct mpp_dev *mpp)
473 {
474 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
475 
476 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
477 	if (IS_ERR_OR_NULL(dec->procfs)) {
478 		mpp_err("failed on open procfs\n");
479 		dec->procfs = NULL;
480 		return -EIO;
481 	}
482 
483 	/* for common mpp_dev options */
484 	mpp_procfs_create_common(dec->procfs, mpp);
485 
486 	mpp_procfs_create_u32("aclk", 0644,
487 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
488 	mpp_procfs_create_u32("session_buffers", 0644,
489 			      dec->procfs, &mpp->session_max_buffers);
490 
491 	return 0;
492 }
493 #else
vdpu_procfs_remove(struct mpp_dev * mpp)494 static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
495 {
496 	return 0;
497 }
498 
vdpu_procfs_init(struct mpp_dev * mpp)499 static inline int vdpu_procfs_init(struct mpp_dev *mpp)
500 {
501 	return 0;
502 }
503 #endif
504 
vdpu_init(struct mpp_dev * mpp)505 static int vdpu_init(struct mpp_dev *mpp)
506 {
507 	int ret;
508 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
509 
510 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU2];
511 
512 	/* Get clock info from dtsi */
513 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
514 	if (ret)
515 		mpp_err("failed on clk_get aclk_vcodec\n");
516 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
517 	if (ret)
518 		mpp_err("failed on clk_get hclk_vcodec\n");
519 	/* Set default rates */
520 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
521 
522 	/* Get reset control from dtsi */
523 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
524 	if (!dec->rst_a)
525 		mpp_err("No aclk reset resource define\n");
526 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
527 	if (!dec->rst_h)
528 		mpp_err("No hclk reset resource define\n");
529 
530 	return 0;
531 }
532 
vdpu_px30_init(struct mpp_dev * mpp)533 static int vdpu_px30_init(struct mpp_dev *mpp)
534 {
535 	vdpu_init(mpp);
536 	return px30_workaround_combo_init(mpp);
537 }
538 
vdpu_clk_on(struct mpp_dev * mpp)539 static int vdpu_clk_on(struct mpp_dev *mpp)
540 {
541 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
542 
543 	mpp_clk_safe_enable(dec->aclk_info.clk);
544 	mpp_clk_safe_enable(dec->hclk_info.clk);
545 
546 	return 0;
547 }
548 
vdpu_clk_off(struct mpp_dev * mpp)549 static int vdpu_clk_off(struct mpp_dev *mpp)
550 {
551 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
552 
553 	mpp_clk_safe_disable(dec->aclk_info.clk);
554 	mpp_clk_safe_disable(dec->hclk_info.clk);
555 
556 	return 0;
557 }
558 
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)559 static int vdpu_set_freq(struct mpp_dev *mpp,
560 			 struct mpp_task *mpp_task)
561 {
562 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
563 	struct vdpu_task *task = to_vdpu_task(mpp_task);
564 
565 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
566 
567 	return 0;
568 }
569 
vdpu_reduce_freq(struct mpp_dev * mpp)570 static int vdpu_reduce_freq(struct mpp_dev *mpp)
571 {
572 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
573 
574 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
575 
576 	return 0;
577 }
578 
vdpu_irq(struct mpp_dev * mpp)579 static int vdpu_irq(struct mpp_dev *mpp)
580 {
581 	mpp->irq_status = mpp_read(mpp, VDPU2_REG_DEC_INT);
582 	if (!(mpp->irq_status & VDPU2_DEC_INT_RAW))
583 		return IRQ_NONE;
584 
585 	mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
586 	/* set clock gating to save power */
587 	mpp_write(mpp, VDPU2_REG_DEC_EN, VDPU2_DEC_CLOCK_GATE_EN);
588 
589 	return IRQ_WAKE_THREAD;
590 }
591 
vdpu_isr(struct mpp_dev * mpp)592 static int vdpu_isr(struct mpp_dev *mpp)
593 {
594 	u32 err_mask;
595 	struct vdpu_task *task = NULL;
596 	struct mpp_task *mpp_task = mpp->cur_task;
597 
598 	/* FIXME use a spin lock here */
599 	if (!mpp_task) {
600 		dev_err(mpp->dev, "no current task\n");
601 		return IRQ_HANDLED;
602 	}
603 	mpp_time_diff(mpp_task);
604 	mpp->cur_task = NULL;
605 	task = to_vdpu_task(mpp_task);
606 	task->irq_status = mpp->irq_status;
607 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
608 		  task->irq_status);
609 
610 	err_mask = VDPU2_INT_TIMEOUT
611 		| VDPU2_INT_STRM_ERROR
612 		| VDPU2_INT_ASO_ERROR
613 		| VDPU2_INT_BUF_EMPTY
614 		| VDPU2_INT_BUS_ERROR;
615 
616 	if (err_mask & task->irq_status)
617 		atomic_inc(&mpp->reset_request);
618 
619 	mpp_task_finish(mpp_task->session, mpp_task);
620 
621 	mpp_debug_leave();
622 
623 	return IRQ_HANDLED;
624 }
625 
vdpu_soft_reset(struct mpp_dev * mpp)626 static int vdpu_soft_reset(struct mpp_dev *mpp)
627 {
628 	u32 val;
629 	u32 ret;
630 
631 	mpp_write(mpp, VDPU2_REG_SOFT_RESET, 1);
632 	ret = readl_relaxed_poll_timeout(mpp->reg_base + VDPU2_REG_SOFT_RESET,
633 					 val, !val, 0, 5);
634 	return ret;
635 }
636 
vdpu_reset(struct mpp_dev * mpp)637 static int vdpu_reset(struct mpp_dev *mpp)
638 {
639 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
640 	u32 ret = 0;
641 
642 	mpp_write(mpp, VDPU2_REG_DEC_EN, 0);
643 	mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
644 
645 	/* soft reset first */
646 	ret = vdpu_soft_reset(mpp);
647 	if (ret && dec->rst_a && dec->rst_h) {
648 		/* Don't skip this or iommu won't work after reset */
649 		mpp_err("soft reset failed, use cru reset!\n");
650 		mpp_debug(DEBUG_RESET, "reset in\n");
651 		mpp_pmu_idle_request(mpp, true);
652 		mpp_safe_reset(dec->rst_a);
653 		mpp_safe_reset(dec->rst_h);
654 		udelay(5);
655 		mpp_safe_unreset(dec->rst_a);
656 		mpp_safe_unreset(dec->rst_h);
657 		mpp_pmu_idle_request(mpp, false);
658 		mpp_debug(DEBUG_RESET, "reset out\n");
659 	}
660 
661 	return 0;
662 }
663 
664 static struct mpp_hw_ops vdpu_v2_hw_ops = {
665 	.init = vdpu_init,
666 	.clk_on = vdpu_clk_on,
667 	.clk_off = vdpu_clk_off,
668 	.set_freq = vdpu_set_freq,
669 	.reduce_freq = vdpu_reduce_freq,
670 	.reset = vdpu_reset,
671 };
672 
673 static struct mpp_hw_ops vdpu_px30_hw_ops = {
674 	.init = vdpu_px30_init,
675 	.clk_on = vdpu_clk_on,
676 	.clk_off = vdpu_clk_off,
677 	.set_freq = vdpu_set_freq,
678 	.reduce_freq = vdpu_reduce_freq,
679 	.reset = vdpu_reset,
680 	.set_grf = px30_workaround_combo_switch_grf,
681 };
682 
683 static struct mpp_dev_ops vdpu_v2_dev_ops = {
684 	.alloc_task = vdpu_alloc_task,
685 	.run = vdpu_run,
686 	.irq = vdpu_irq,
687 	.isr = vdpu_isr,
688 	.finish = vdpu_finish,
689 	.result = vdpu_result,
690 	.free_task = vdpu_free_task,
691 };
692 
693 static struct mpp_dev_ops vdpu_px30_dev_ops = {
694 	.alloc_task = vdpu_alloc_task,
695 	.run = vdpu_px30_run,
696 	.irq = vdpu_irq,
697 	.isr = vdpu_isr,
698 	.finish = vdpu_finish,
699 	.result = vdpu_result,
700 	.free_task = vdpu_free_task,
701 };
702 
703 static const struct mpp_dev_var vdpu_v2_data = {
704 	.device_type = MPP_DEVICE_VDPU2,
705 	.hw_info = &vdpu_v2_hw_info,
706 	.trans_info = vdpu_v2_trans,
707 	.hw_ops = &vdpu_v2_hw_ops,
708 	.dev_ops = &vdpu_v2_dev_ops,
709 };
710 
711 static const struct mpp_dev_var vdpu_px30_data = {
712 	.device_type = MPP_DEVICE_VDPU2,
713 	.hw_info = &vdpu_v2_hw_info,
714 	.trans_info = vdpu_v2_trans,
715 	.hw_ops = &vdpu_px30_hw_ops,
716 	.dev_ops = &vdpu_px30_dev_ops,
717 };
718 
719 static const struct of_device_id mpp_vdpu2_dt_match[] = {
720 	{
721 		.compatible = "rockchip,vpu-decoder-v2",
722 		.data = &vdpu_v2_data,
723 	},
724 #ifdef CONFIG_CPU_PX30
725 	{
726 		.compatible = "rockchip,vpu-decoder-px30",
727 		.data = &vdpu_px30_data,
728 	},
729 #endif
730 	{},
731 };
732 
vdpu_probe(struct platform_device * pdev)733 static int vdpu_probe(struct platform_device *pdev)
734 {
735 	int ret = 0;
736 	struct device *dev = &pdev->dev;
737 	struct vdpu_dev *dec = NULL;
738 	struct mpp_dev *mpp = NULL;
739 	const struct of_device_id *match = NULL;
740 
741 	dev_info(dev, "probe device\n");
742 	dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
743 	if (!dec)
744 		return -ENOMEM;
745 	mpp = &dec->mpp;
746 	platform_set_drvdata(pdev, mpp);
747 
748 	if (pdev->dev.of_node) {
749 		match = of_match_node(mpp_vdpu2_dt_match,
750 				      pdev->dev.of_node);
751 		if (match)
752 			mpp->var = (struct mpp_dev_var *)match->data;
753 
754 		mpp->core_id = of_alias_get_id(pdev->dev.of_node, "vdpu");
755 	}
756 
757 	ret = mpp_dev_probe(mpp, pdev);
758 	if (ret) {
759 		dev_err(dev, "probe sub driver failed\n");
760 		return -EINVAL;
761 	}
762 
763 	ret = devm_request_threaded_irq(dev, mpp->irq,
764 					mpp_dev_irq,
765 					mpp_dev_isr_sched,
766 					IRQF_SHARED,
767 					dev_name(dev), mpp);
768 	if (ret) {
769 		dev_err(dev, "register interrupter runtime failed\n");
770 		return -EINVAL;
771 	}
772 
773 	if (mpp->var->device_type == MPP_DEVICE_VDPU2) {
774 		mpp->srv->sub_devices[MPP_DEVICE_VDPU2_PP] = mpp;
775 		set_bit(MPP_DEVICE_VDPU2_PP, &mpp->srv->hw_support);
776 	}
777 
778 	mpp->session_max_buffers = VDPU2_SESSION_MAX_BUFFERS;
779 	vdpu_procfs_init(mpp);
780 	/* register current device to mpp service */
781 	mpp_dev_register_srv(mpp, mpp->srv);
782 	dev_info(dev, "probing finish\n");
783 
784 	return 0;
785 }
786 
vdpu_remove(struct platform_device * pdev)787 static int vdpu_remove(struct platform_device *pdev)
788 {
789 	struct device *dev = &pdev->dev;
790 	struct mpp_dev *mpp = dev_get_drvdata(dev);
791 
792 	dev_info(dev, "remove device\n");
793 	mpp_dev_remove(mpp);
794 	vdpu_procfs_remove(mpp);
795 
796 	return 0;
797 }
798 
799 struct platform_driver rockchip_vdpu2_driver = {
800 	.probe = vdpu_probe,
801 	.remove = vdpu_remove,
802 	.shutdown = mpp_dev_shutdown,
803 	.driver = {
804 		.name = VDPU2_DRIVER_NAME,
805 		.of_match_table = of_match_ptr(mpp_vdpu2_dt_match),
806 	},
807 };
808 EXPORT_SYMBOL(rockchip_vdpu2_driver);
809