xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_vdpu1.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24 
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_iommu.h"
28 #include <soc/rockchip/rockchip_iommu.h>
29 
30 #define VDPU1_DRIVER_NAME		"mpp_vdpu1"
31 
32 #define	VDPU1_SESSION_MAX_BUFFERS	40
33 /* The maximum registers number of all the version */
34 #define VDPU1_REG_NUM			60
35 #define VDPU1_REG_HW_ID_INDEX		0
36 #define VDPU1_REG_START_INDEX		0
37 #define VDPU1_REG_END_INDEX		59
38 
39 #define VDPU1_REG_PP_NUM		101
40 #define VDPU1_REG_PP_START_INDEX	0
41 #define VDPU1_REG_PP_END_INDEX		100
42 
43 #define VDPU1_REG_DEC_INT_EN		0x004
44 #define VDPU1_REG_DEC_INT_EN_INDEX	(1)
45 /* B slice detected, used in 8190 decoder and later */
46 #define	VDPU1_INT_PIC_INF		BIT(24)
47 #define	VDPU1_INT_TIMEOUT		BIT(18)
48 #define	VDPU1_INT_SLICE			BIT(17)
49 #define	VDPU1_INT_STRM_ERROR		BIT(16)
50 #define	VDPU1_INT_ASO_ERROR		BIT(15)
51 #define	VDPU1_INT_BUF_EMPTY		BIT(14)
52 #define	VDPU1_INT_BUS_ERROR		BIT(13)
53 #define	VDPU1_DEC_INT			BIT(12)
54 #define	VDPU1_DEC_INT_RAW		BIT(8)
55 #define	VDPU1_DEC_IRQ_DIS		BIT(4)
56 #define	VDPU1_DEC_START			BIT(0)
57 
58 /* NOTE: Don't enable it or decoding AVC would meet problem at rk3288 */
59 #define VDPU1_REG_DEC_EN		0x008
60 #define	VDPU1_CLOCK_GATE_EN		BIT(10)
61 
62 #define VDPU1_REG_SOFT_RESET		0x194
63 #define VDPU1_REG_SOFT_RESET_INDEX	(101)
64 
65 #define VDPU1_REG_SYS_CTRL		0x00c
66 #define VDPU1_REG_SYS_CTRL_INDEX	(3)
67 #define VDPU1_RGE_WIDTH_INDEX		(4)
68 #define	VDPU1_GET_FORMAT(x)		(((x) >> 28) & 0xf)
69 #define VDPU1_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
70 #define VDPU1_GET_WIDTH(x)		(((x) & 0xff800000) >> 19)
71 #define	VDPU1_FMT_H264D			0
72 #define	VDPU1_FMT_MPEG4D		1
73 #define	VDPU1_FMT_H263D			2
74 #define	VDPU1_FMT_JPEGD			3
75 #define	VDPU1_FMT_VC1D			4
76 #define	VDPU1_FMT_MPEG2D		5
77 #define	VDPU1_FMT_MPEG1D		6
78 #define	VDPU1_FMT_VP6D			7
79 #define	VDPU1_FMT_RESERVED		8
80 #define	VDPU1_FMT_VP7D			9
81 #define	VDPU1_FMT_VP8D			10
82 #define	VDPU1_FMT_AVSD			11
83 
84 #define VDPU1_REG_STREAM_RLC_BASE	0x030
85 #define VDPU1_REG_STREAM_RLC_BASE_INDEX	(12)
86 
87 #define VDPU1_REG_DIR_MV_BASE		0x0a4
88 #define VDPU1_REG_DIR_MV_BASE_INDEX	(41)
89 
90 #define VDPU1_REG_CLR_CACHE_BASE	0x810
91 
92 #define to_vdpu_task(task)		\
93 		container_of(task, struct vdpu_task, mpp_task)
94 #define to_vdpu_dev(dev)		\
95 		container_of(dev, struct vdpu_dev, mpp)
96 
97 enum VPUD1_HW_ID {
98 	VDPU1_ID_0102 = 0x0102,
99 	VDPU1_ID_9190 = 0x6731,
100 };
101 
102 struct vdpu_task {
103 	struct mpp_task mpp_task;
104 	/* enable of post process */
105 	bool pp_enable;
106 
107 	enum MPP_CLOCK_MODE clk_mode;
108 	u32 reg[VDPU1_REG_PP_NUM];
109 
110 	struct reg_offset_info off_inf;
111 	u32 strm_addr;
112 	u32 irq_status;
113 	/* req for current task */
114 	u32 w_req_cnt;
115 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
116 	u32 r_req_cnt;
117 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
118 };
119 
120 struct vdpu_dev {
121 	struct mpp_dev mpp;
122 
123 	struct mpp_clk_info aclk_info;
124 	struct mpp_clk_info hclk_info;
125 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
126 	struct proc_dir_entry *procfs;
127 #endif
128 	struct reset_control *rst_a;
129 	struct reset_control *rst_h;
130 };
131 
132 static struct mpp_hw_info vdpu_v1_hw_info = {
133 	.reg_num = VDPU1_REG_NUM,
134 	.reg_id = VDPU1_REG_HW_ID_INDEX,
135 	.reg_start = VDPU1_REG_START_INDEX,
136 	.reg_end = VDPU1_REG_END_INDEX,
137 	.reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
138 };
139 
140 static struct mpp_hw_info vdpu_pp_v1_hw_info = {
141 	.reg_num = VDPU1_REG_PP_NUM,
142 	.reg_id = VDPU1_REG_HW_ID_INDEX,
143 	.reg_start = VDPU1_REG_PP_START_INDEX,
144 	.reg_end = VDPU1_REG_PP_END_INDEX,
145 	.reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
146 };
147 
148 /*
149  * file handle translate information
150  */
151 static const u16 trans_tbl_avsd[] = {
152 	12, 13, 14, 15, 16, 17, 40, 41, 45
153 };
154 
155 static const u16 trans_tbl_default[] = {
156 	12, 13, 14, 15, 16, 17, 40, 41
157 };
158 
159 static const u16 trans_tbl_jpegd[] = {
160 	12, 13, 14, 40, 66, 67
161 };
162 
163 static const u16 trans_tbl_h264d[] = {
164 	12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
165 	28, 29, 40
166 };
167 
168 static const u16 trans_tbl_vc1d[] = {
169 	12, 13, 14, 15, 16, 17, 27, 41
170 };
171 
172 static const u16 trans_tbl_vp6d[] = {
173 	12, 13, 14, 18, 27, 40
174 };
175 
176 static const u16 trans_tbl_vp8d[] = {
177 	10, 12, 13, 14, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40
178 };
179 
180 static struct mpp_trans_info vdpu_v1_trans[] = {
181 	[VDPU1_FMT_H264D] = {
182 		.count = ARRAY_SIZE(trans_tbl_h264d),
183 		.table = trans_tbl_h264d,
184 	},
185 	[VDPU1_FMT_H263D] = {
186 		.count = ARRAY_SIZE(trans_tbl_default),
187 		.table = trans_tbl_default,
188 	},
189 	[VDPU1_FMT_MPEG4D] = {
190 		.count = ARRAY_SIZE(trans_tbl_default),
191 		.table = trans_tbl_default,
192 	},
193 	[VDPU1_FMT_JPEGD] = {
194 		.count = ARRAY_SIZE(trans_tbl_jpegd),
195 		.table = trans_tbl_jpegd,
196 	},
197 	[VDPU1_FMT_VC1D] = {
198 		.count = ARRAY_SIZE(trans_tbl_vc1d),
199 		.table = trans_tbl_vc1d,
200 	},
201 	[VDPU1_FMT_MPEG2D] = {
202 		.count = ARRAY_SIZE(trans_tbl_default),
203 		.table = trans_tbl_default,
204 	},
205 	[VDPU1_FMT_MPEG1D] = {
206 		.count = ARRAY_SIZE(trans_tbl_default),
207 		.table = trans_tbl_default,
208 	},
209 	[VDPU1_FMT_VP6D] = {
210 		.count = ARRAY_SIZE(trans_tbl_vp6d),
211 		.table = trans_tbl_vp6d,
212 	},
213 	[VDPU1_FMT_RESERVED] = {
214 		.count = 0,
215 		.table = NULL,
216 	},
217 	[VDPU1_FMT_VP7D] = {
218 		.count = ARRAY_SIZE(trans_tbl_default),
219 		.table = trans_tbl_default,
220 	},
221 	[VDPU1_FMT_VP8D] = {
222 		.count = ARRAY_SIZE(trans_tbl_vp8d),
223 		.table = trans_tbl_vp8d,
224 	},
225 	[VDPU1_FMT_AVSD] = {
226 		.count = ARRAY_SIZE(trans_tbl_avsd),
227 		.table = trans_tbl_avsd,
228 	},
229 };
230 
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)231 static int vdpu_process_reg_fd(struct mpp_session *session,
232 			       struct vdpu_task *task,
233 			       struct mpp_task_msgs *msgs)
234 {
235 	int ret = 0;
236 	int fmt = VDPU1_GET_FORMAT(task->reg[VDPU1_REG_SYS_CTRL_INDEX]);
237 
238 	ret = mpp_translate_reg_address(session, &task->mpp_task,
239 					fmt, task->reg, &task->off_inf);
240 	if (ret)
241 		return ret;
242 	/*
243 	 * special offset scale case
244 	 *
245 	 * This translation is for fd + offset translation.
246 	 * One register has 32bits. We need to transfer both buffer file
247 	 * handle and the start address offset so we packet file handle
248 	 * and offset together using below format.
249 	 *
250 	 *  0~9  bit for buffer file handle range 0 ~ 1023
251 	 * 10~31 bit for offset range 0 ~ 4M
252 	 *
253 	 * But on 4K case the offset can be larger the 4M
254 	 */
255 	if (likely(fmt == VDPU1_FMT_H264D)) {
256 		int fd;
257 		u32 offset;
258 		dma_addr_t iova = 0;
259 		u32 idx = VDPU1_REG_DIR_MV_BASE_INDEX;
260 		struct mpp_mem_region *mem_region = NULL;
261 
262 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
263 			fd = task->reg[idx];
264 			offset = 0;
265 		} else {
266 			fd = task->reg[idx] & 0x3ff;
267 			offset = task->reg[idx] >> 10 << 4;
268 		}
269 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
270 		if (IS_ERR(mem_region)) {
271 			mpp_err("reg[%03d]: %08x fd %d attach failed\n",
272 				idx, task->reg[idx], fd);
273 			goto fail;
274 		}
275 
276 		iova = mem_region->iova;
277 		mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
278 			  idx, fd, &iova, offset);
279 		task->reg[idx] = iova + offset;
280 	}
281 
282 	mpp_translate_reg_offset_info(&task->mpp_task,
283 				      &task->off_inf, task->reg);
284 	return 0;
285 fail:
286 	return -EFAULT;
287 }
288 
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)289 static int vdpu_extract_task_msg(struct vdpu_task *task,
290 				 struct mpp_task_msgs *msgs)
291 {
292 	u32 i;
293 	int ret;
294 	struct mpp_request *req;
295 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
296 
297 	for (i = 0; i < msgs->req_cnt; i++) {
298 		u32 off_s, off_e;
299 
300 		req = &msgs->reqs[i];
301 		if (!req->size)
302 			continue;
303 
304 		switch (req->cmd) {
305 		case MPP_CMD_SET_REG_WRITE: {
306 			off_s = hw_info->reg_start * sizeof(u32);
307 			off_e = hw_info->reg_end * sizeof(u32);
308 			ret = mpp_check_req(req, 0, sizeof(task->reg),
309 					    off_s, off_e);
310 			if (ret)
311 				continue;
312 			if (copy_from_user((u8 *)task->reg + req->offset,
313 					   req->data, req->size)) {
314 				mpp_err("copy_from_user reg failed\n");
315 				return -EIO;
316 			}
317 			memcpy(&task->w_reqs[task->w_req_cnt++],
318 			       req, sizeof(*req));
319 		} break;
320 		case MPP_CMD_SET_REG_READ: {
321 			off_s = hw_info->reg_start * sizeof(u32);
322 			off_e = hw_info->reg_end * sizeof(u32);
323 			ret = mpp_check_req(req, 0, sizeof(task->reg),
324 					    off_s, off_e);
325 			if (ret)
326 				continue;
327 			memcpy(&task->r_reqs[task->r_req_cnt++],
328 			       req, sizeof(*req));
329 		} break;
330 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
331 			mpp_extract_reg_offset_info(&task->off_inf, req);
332 		} break;
333 		default:
334 			break;
335 		}
336 	}
337 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
338 		  task->w_req_cnt, task->r_req_cnt);
339 
340 	return 0;
341 }
342 
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)343 static void *vdpu_alloc_task(struct mpp_session *session,
344 			     struct mpp_task_msgs *msgs)
345 {
346 	int ret;
347 	struct mpp_task *mpp_task = NULL;
348 	struct vdpu_task *task = NULL;
349 	struct mpp_dev *mpp = session->mpp;
350 
351 	mpp_debug_enter();
352 
353 	task = kzalloc(sizeof(*task), GFP_KERNEL);
354 	if (!task)
355 		return NULL;
356 
357 	mpp_task = &task->mpp_task;
358 	mpp_task_init(session, mpp_task);
359 	if (session->device_type == MPP_DEVICE_VDPU1_PP) {
360 		task->pp_enable = true;
361 		mpp_task->hw_info = &vdpu_pp_v1_hw_info;
362 	} else {
363 		mpp_task->hw_info = mpp->var->hw_info;
364 	}
365 	mpp_task->reg = task->reg;
366 	/* extract reqs for current task */
367 	ret = vdpu_extract_task_msg(task, msgs);
368 	if (ret)
369 		goto fail;
370 	/* process fd in register */
371 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
372 		ret = vdpu_process_reg_fd(session, task, msgs);
373 		if (ret)
374 			goto fail;
375 	}
376 	task->strm_addr = task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX];
377 	task->clk_mode = CLK_MODE_NORMAL;
378 
379 	mpp_debug_leave();
380 
381 	return mpp_task;
382 
383 fail:
384 	mpp_task_dump_mem_region(mpp, mpp_task);
385 	mpp_task_dump_reg(mpp, mpp_task);
386 	mpp_task_finalize(session, mpp_task);
387 	kfree(task);
388 	return NULL;
389 }
390 
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)391 static int vdpu_run(struct mpp_dev *mpp,
392 		    struct mpp_task *mpp_task)
393 {
394 	u32 i;
395 	u32 reg_en;
396 	struct vdpu_task *task = to_vdpu_task(mpp_task);
397 	u32 timing_en = mpp->srv->timing_en;
398 
399 	mpp_debug_enter();
400 
401 	/* clear cache */
402 	mpp_write_relaxed(mpp, VDPU1_REG_CLR_CACHE_BASE, 1);
403 	/* set registers for hardware */
404 	reg_en = mpp_task->hw_info->reg_en;
405 	for (i = 0; i < task->w_req_cnt; i++) {
406 		struct mpp_request *req = &task->w_reqs[i];
407 		int s = req->offset / sizeof(u32);
408 		int e = s + req->size / sizeof(u32);
409 
410 		mpp_write_req(mpp, task->reg, s, e, reg_en);
411 	}
412 
413 	/* flush tlb before starting hardware */
414 	mpp_iommu_flush_tlb(mpp->iommu_info);
415 
416 	/* init current task */
417 	mpp->cur_task = mpp_task;
418 
419 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
420 
421 	/* Flush the register before the start the device */
422 	wmb();
423 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN,
424 		  task->reg[reg_en] | VDPU1_DEC_START);
425 
426 	mpp_task_run_end(mpp_task, timing_en);
427 
428 	mpp_debug_leave();
429 
430 	return 0;
431 }
432 
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)433 static int vdpu_finish(struct mpp_dev *mpp,
434 		       struct mpp_task *mpp_task)
435 {
436 	u32 i;
437 	u32 s, e;
438 	u32 dec_get;
439 	s32 dec_length;
440 	struct mpp_request *req;
441 	struct vdpu_task *task = to_vdpu_task(mpp_task);
442 
443 	mpp_debug_enter();
444 
445 	/* read register after running */
446 	for (i = 0; i < task->r_req_cnt; i++) {
447 		req = &task->r_reqs[i];
448 		s = req->offset / sizeof(u32);
449 		e = s + req->size / sizeof(u32);
450 		mpp_read_req(mpp, task->reg, s, e);
451 	}
452 	/* revert hack for irq status */
453 	task->reg[VDPU1_REG_DEC_INT_EN_INDEX] = task->irq_status;
454 	/* revert hack for decoded length */
455 	dec_get = mpp_read_relaxed(mpp, VDPU1_REG_STREAM_RLC_BASE);
456 	dec_length = dec_get - task->strm_addr;
457 	task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
458 	mpp_debug(DEBUG_REGISTER,
459 		  "dec_get %08x dec_length %d\n", dec_get, dec_length);
460 
461 	mpp_debug_leave();
462 
463 	return 0;
464 }
465 
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)466 static int vdpu_result(struct mpp_dev *mpp,
467 		       struct mpp_task *mpp_task,
468 		       struct mpp_task_msgs *msgs)
469 {
470 	u32 i;
471 	struct mpp_request *req;
472 	struct vdpu_task *task = to_vdpu_task(mpp_task);
473 
474 	/* FIXME may overflow the kernel */
475 	for (i = 0; i < task->r_req_cnt; i++) {
476 		req = &task->r_reqs[i];
477 
478 		if (copy_to_user(req->data,
479 				 (u8 *)task->reg + req->offset,
480 				 req->size)) {
481 			mpp_err("copy_to_user reg fail\n");
482 			return -EIO;
483 		}
484 	}
485 
486 	return 0;
487 }
488 
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)489 static int vdpu_free_task(struct mpp_session *session,
490 			  struct mpp_task *mpp_task)
491 {
492 	struct vdpu_task *task = to_vdpu_task(mpp_task);
493 
494 	mpp_task_finalize(session, mpp_task);
495 	kfree(task);
496 
497 	return 0;
498 }
499 
500 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)501 static int vdpu_procfs_remove(struct mpp_dev *mpp)
502 {
503 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
504 
505 	if (dec->procfs) {
506 		proc_remove(dec->procfs);
507 		dec->procfs = NULL;
508 	}
509 
510 	return 0;
511 }
512 
vdpu_procfs_init(struct mpp_dev * mpp)513 static int vdpu_procfs_init(struct mpp_dev *mpp)
514 {
515 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
516 
517 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
518 	if (IS_ERR_OR_NULL(dec->procfs)) {
519 		mpp_err("failed on open procfs\n");
520 		dec->procfs = NULL;
521 		return -EIO;
522 	}
523 
524 	/* for common mpp_dev options */
525 	mpp_procfs_create_common(dec->procfs, mpp);
526 
527 	mpp_procfs_create_u32("aclk", 0644,
528 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
529 	mpp_procfs_create_u32("session_buffers", 0644,
530 			      dec->procfs, &mpp->session_max_buffers);
531 
532 	return 0;
533 }
534 #else
vdpu_procfs_remove(struct mpp_dev * mpp)535 static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
536 {
537 	return 0;
538 }
539 
vdpu_procfs_init(struct mpp_dev * mpp)540 static inline int vdpu_procfs_init(struct mpp_dev *mpp)
541 {
542 	return 0;
543 }
544 #endif
545 
vdpu_init(struct mpp_dev * mpp)546 static int vdpu_init(struct mpp_dev *mpp)
547 {
548 	int ret;
549 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
550 
551 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU1];
552 
553 	/* Get clock info from dtsi */
554 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
555 	if (ret)
556 		mpp_err("failed on clk_get aclk_vcodec\n");
557 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
558 	if (ret)
559 		mpp_err("failed on clk_get hclk_vcodec\n");
560 	/* Set default rates */
561 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
562 
563 	/* Get reset control from dtsi */
564 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
565 	if (!dec->rst_a)
566 		mpp_err("No aclk reset resource define\n");
567 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
568 	if (!dec->rst_h)
569 		mpp_err("No hclk reset resource define\n");
570 
571 	return 0;
572 }
573 
vdpu_3036_init(struct mpp_dev * mpp)574 static int vdpu_3036_init(struct mpp_dev *mpp)
575 {
576 	vdpu_init(mpp);
577 	set_bit(mpp->var->device_type, &mpp->queue->dev_active_flags);
578 	return 0;
579 }
580 
vdpu_clk_on(struct mpp_dev * mpp)581 static int vdpu_clk_on(struct mpp_dev *mpp)
582 {
583 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
584 
585 	mpp_clk_safe_enable(dec->aclk_info.clk);
586 	mpp_clk_safe_enable(dec->hclk_info.clk);
587 
588 	return 0;
589 }
590 
vdpu_clk_off(struct mpp_dev * mpp)591 static int vdpu_clk_off(struct mpp_dev *mpp)
592 {
593 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
594 
595 	mpp_clk_safe_disable(dec->aclk_info.clk);
596 	mpp_clk_safe_disable(dec->hclk_info.clk);
597 
598 	return 0;
599 }
600 
vdpu_3288_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)601 static int vdpu_3288_get_freq(struct mpp_dev *mpp,
602 			      struct mpp_task *mpp_task)
603 {
604 	u32 width;
605 	struct vdpu_task *task = to_vdpu_task(mpp_task);
606 
607 	width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
608 	if (width > 2560)
609 		task->clk_mode = CLK_MODE_ADVANCED;
610 
611 	return 0;
612 }
613 
vdpu_3368_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)614 static int vdpu_3368_get_freq(struct mpp_dev *mpp,
615 			      struct mpp_task *mpp_task)
616 {
617 	u32 width;
618 	struct vdpu_task *task = to_vdpu_task(mpp_task);
619 
620 	width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
621 	if (width > 2560)
622 		task->clk_mode = CLK_MODE_ADVANCED;
623 
624 	return 0;
625 }
626 
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)627 static int vdpu_set_freq(struct mpp_dev *mpp,
628 			 struct mpp_task *mpp_task)
629 {
630 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
631 	struct vdpu_task *task = to_vdpu_task(mpp_task);
632 
633 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
634 
635 	return 0;
636 }
637 
vdpu_reduce_freq(struct mpp_dev * mpp)638 static int vdpu_reduce_freq(struct mpp_dev *mpp)
639 {
640 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
641 
642 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
643 
644 	return 0;
645 }
646 
vdpu_irq(struct mpp_dev * mpp)647 static int vdpu_irq(struct mpp_dev *mpp)
648 {
649 	mpp->irq_status = mpp_read(mpp, VDPU1_REG_DEC_INT_EN);
650 	if (!(mpp->irq_status & VDPU1_DEC_INT_RAW))
651 		return IRQ_NONE;
652 
653 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
654 	/* set clock gating to save power */
655 	mpp_write(mpp, VDPU1_REG_DEC_EN, VDPU1_CLOCK_GATE_EN);
656 
657 	return IRQ_WAKE_THREAD;
658 }
659 
vdpu_isr(struct mpp_dev * mpp)660 static int vdpu_isr(struct mpp_dev *mpp)
661 {
662 	u32 err_mask;
663 	struct vdpu_task *task = NULL;
664 	struct mpp_task *mpp_task = mpp->cur_task;
665 
666 	/* FIXME use a spin lock here */
667 	if (!mpp_task) {
668 		dev_err(mpp->dev, "no current task\n");
669 		return IRQ_HANDLED;
670 	}
671 	mpp_time_diff(mpp_task);
672 	mpp->cur_task = NULL;
673 	task = to_vdpu_task(mpp_task);
674 	task->irq_status = mpp->irq_status;
675 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
676 		  task->irq_status);
677 
678 	err_mask = VDPU1_INT_TIMEOUT
679 		| VDPU1_INT_STRM_ERROR
680 		| VDPU1_INT_ASO_ERROR
681 		| VDPU1_INT_BUF_EMPTY
682 		| VDPU1_INT_BUS_ERROR;
683 
684 	if (err_mask & task->irq_status)
685 		atomic_inc(&mpp->reset_request);
686 
687 	mpp_task_finish(mpp_task->session, mpp_task);
688 
689 	mpp_debug_leave();
690 
691 	return IRQ_HANDLED;
692 }
693 
vdpu_soft_reset(struct mpp_dev * mpp)694 static int vdpu_soft_reset(struct mpp_dev *mpp)
695 {
696 	u32 val;
697 	u32 ret;
698 
699 	mpp_write(mpp, VDPU1_REG_SOFT_RESET, 1);
700 	ret = readl_relaxed_poll_timeout(mpp->reg_base + VDPU1_REG_SOFT_RESET,
701 					 val, !val, 0, 5);
702 
703 	return ret;
704 }
705 
vdpu_reset(struct mpp_dev * mpp)706 static int vdpu_reset(struct mpp_dev *mpp)
707 {
708 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
709 	u32 ret = 0;
710 
711 	/* soft reset first */
712 	ret = vdpu_soft_reset(mpp);
713 	if (ret && dec->rst_a && dec->rst_h) {
714 		mpp_err("soft reset failed, use cru reset!\n");
715 		mpp_debug(DEBUG_RESET, "reset in\n");
716 
717 		/* Don't skip this or iommu won't work after reset */
718 		mpp_pmu_idle_request(mpp, true);
719 		mpp_safe_reset(dec->rst_a);
720 		mpp_safe_reset(dec->rst_h);
721 		udelay(5);
722 		mpp_safe_unreset(dec->rst_a);
723 		mpp_safe_unreset(dec->rst_h);
724 		mpp_pmu_idle_request(mpp, false);
725 
726 		mpp_debug(DEBUG_RESET, "reset out\n");
727 	}
728 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
729 
730 	return 0;
731 }
732 
vdpu_3036_set_grf(struct mpp_dev * mpp)733 static int vdpu_3036_set_grf(struct mpp_dev *mpp)
734 {
735 	int grf_changed;
736 	struct mpp_dev *loop = NULL, *n;
737 	struct mpp_taskqueue *queue = mpp->queue;
738 	bool pd_is_on;
739 
740 	grf_changed = mpp_grf_is_changed(mpp->grf_info);
741 	if (grf_changed) {
742 
743 		/*
744 		 * in this case, devices share the queue also share the same pd&clk,
745 		 * so use mpp->dev's pd to control all the process is okay
746 		 */
747 		pd_is_on = rockchip_pmu_pd_is_on(mpp->dev);
748 		if (!pd_is_on)
749 			rockchip_pmu_pd_on(mpp->dev);
750 		mpp->hw_ops->clk_on(mpp);
751 
752 		list_for_each_entry_safe(loop, n, &queue->dev_list, queue_link) {
753 			if (test_bit(loop->var->device_type, &queue->dev_active_flags)) {
754 				mpp_set_grf(loop->grf_info);
755 				if (loop->hw_ops->clk_on)
756 					loop->hw_ops->clk_on(loop);
757 				if (loop->hw_ops->reset)
758 					loop->hw_ops->reset(loop);
759 				rockchip_iommu_disable(loop->dev);
760 				if (loop->hw_ops->clk_off)
761 					loop->hw_ops->clk_off(loop);
762 				clear_bit(loop->var->device_type, &queue->dev_active_flags);
763 			}
764 		}
765 
766 		mpp_set_grf(mpp->grf_info);
767 		rockchip_iommu_enable(mpp->dev);
768 		set_bit(mpp->var->device_type, &queue->dev_active_flags);
769 
770 		mpp->hw_ops->clk_off(mpp);
771 		if (!pd_is_on)
772 			rockchip_pmu_pd_off(mpp->dev);
773 	}
774 
775 	return 0;
776 }
777 
778 static struct mpp_hw_ops vdpu_v1_hw_ops = {
779 	.init = vdpu_init,
780 	.clk_on = vdpu_clk_on,
781 	.clk_off = vdpu_clk_off,
782 	.set_freq = vdpu_set_freq,
783 	.reduce_freq = vdpu_reduce_freq,
784 	.reset = vdpu_reset,
785 	.set_grf = vdpu_3036_set_grf,
786 };
787 
788 static struct mpp_hw_ops vdpu_3036_hw_ops = {
789 	.init = vdpu_3036_init,
790 	.clk_on = vdpu_clk_on,
791 	.clk_off = vdpu_clk_off,
792 	.set_freq = vdpu_set_freq,
793 	.reduce_freq = vdpu_reduce_freq,
794 	.reset = vdpu_reset,
795 	.set_grf = vdpu_3036_set_grf,
796 };
797 
798 static struct mpp_hw_ops vdpu_3288_hw_ops = {
799 	.init = vdpu_init,
800 	.clk_on = vdpu_clk_on,
801 	.clk_off = vdpu_clk_off,
802 	.get_freq = vdpu_3288_get_freq,
803 	.set_freq = vdpu_set_freq,
804 	.reduce_freq = vdpu_reduce_freq,
805 	.reset = vdpu_reset,
806 };
807 
808 static struct mpp_hw_ops vdpu_3368_hw_ops = {
809 	.init = vdpu_init,
810 	.clk_on = vdpu_clk_on,
811 	.clk_off = vdpu_clk_off,
812 	.get_freq = vdpu_3368_get_freq,
813 	.set_freq = vdpu_set_freq,
814 	.reduce_freq = vdpu_reduce_freq,
815 	.reset = vdpu_reset,
816 };
817 
818 static struct mpp_dev_ops vdpu_v1_dev_ops = {
819 	.alloc_task = vdpu_alloc_task,
820 	.run = vdpu_run,
821 	.irq = vdpu_irq,
822 	.isr = vdpu_isr,
823 	.finish = vdpu_finish,
824 	.result = vdpu_result,
825 	.free_task = vdpu_free_task,
826 };
827 
828 static const struct mpp_dev_var vdpu_v1_data = {
829 	.device_type = MPP_DEVICE_VDPU1,
830 	.hw_info = &vdpu_v1_hw_info,
831 	.trans_info = vdpu_v1_trans,
832 	.hw_ops = &vdpu_v1_hw_ops,
833 	.dev_ops = &vdpu_v1_dev_ops,
834 };
835 
836 static const struct mpp_dev_var vdpu_3036_data = {
837 	.device_type = MPP_DEVICE_VDPU1,
838 	.hw_info = &vdpu_v1_hw_info,
839 	.trans_info = vdpu_v1_trans,
840 	.hw_ops = &vdpu_3036_hw_ops,
841 	.dev_ops = &vdpu_v1_dev_ops,
842 };
843 
844 static const struct mpp_dev_var vdpu_3288_data = {
845 	.device_type = MPP_DEVICE_VDPU1,
846 	.hw_info = &vdpu_v1_hw_info,
847 	.trans_info = vdpu_v1_trans,
848 	.hw_ops = &vdpu_3288_hw_ops,
849 	.dev_ops = &vdpu_v1_dev_ops,
850 };
851 
852 static const struct mpp_dev_var vdpu_3368_data = {
853 	.device_type = MPP_DEVICE_VDPU1,
854 	.hw_info = &vdpu_v1_hw_info,
855 	.trans_info = vdpu_v1_trans,
856 	.hw_ops = &vdpu_3368_hw_ops,
857 	.dev_ops = &vdpu_v1_dev_ops,
858 };
859 
860 static const struct mpp_dev_var avsd_plus_data = {
861 	.device_type = MPP_DEVICE_AVSPLUS_DEC,
862 	.hw_info = &vdpu_v1_hw_info,
863 	.trans_info = vdpu_v1_trans,
864 	.hw_ops = &vdpu_v1_hw_ops,
865 	.dev_ops = &vdpu_v1_dev_ops,
866 };
867 
868 static const struct of_device_id mpp_vdpu1_dt_match[] = {
869 	{
870 		.compatible = "rockchip,vpu-decoder-v1",
871 		.data = &vdpu_v1_data,
872 	},
873 #ifdef CONFIG_CPU_RK3288
874 	{
875 		.compatible = "rockchip,vpu-decoder-rk3288",
876 		.data = &vdpu_3288_data,
877 	},
878 #endif
879 #ifdef CONFIG_CPU_RK3036
880 	{
881 		.compatible = "rockchip,vpu-decoder-rk3036",
882 		.data = &vdpu_3036_data,
883 	},
884 #endif
885 #ifdef CONFIG_CPU_RK3368
886 	{
887 		.compatible = "rockchip,vpu-decoder-rk3368",
888 		.data = &vdpu_3368_data,
889 	},
890 #endif
891 	{
892 		.compatible = "rockchip,avs-plus-decoder",
893 		.data = &avsd_plus_data,
894 	},
895 	{},
896 };
897 
vdpu_probe(struct platform_device * pdev)898 static int vdpu_probe(struct platform_device *pdev)
899 {
900 	struct device *dev = &pdev->dev;
901 	struct vdpu_dev *dec = NULL;
902 	struct mpp_dev *mpp = NULL;
903 	const struct of_device_id *match = NULL;
904 	int ret = 0;
905 
906 	dev_info(dev, "probe device\n");
907 	dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
908 	if (!dec)
909 		return -ENOMEM;
910 	mpp = &dec->mpp;
911 	platform_set_drvdata(pdev, mpp);
912 
913 	if (pdev->dev.of_node) {
914 		match = of_match_node(mpp_vdpu1_dt_match, pdev->dev.of_node);
915 		if (match)
916 			mpp->var = (struct mpp_dev_var *)match->data;
917 
918 		mpp->core_id = of_alias_get_id(pdev->dev.of_node, "vdpu");
919 	}
920 
921 	ret = mpp_dev_probe(mpp, pdev);
922 	if (ret) {
923 		dev_err(dev, "probe sub driver failed\n");
924 		return -EINVAL;
925 	}
926 
927 	ret = devm_request_threaded_irq(dev, mpp->irq,
928 					mpp_dev_irq,
929 					mpp_dev_isr_sched,
930 					IRQF_SHARED,
931 					dev_name(dev), mpp);
932 	if (ret) {
933 		dev_err(dev, "register interrupter runtime failed\n");
934 		return -EINVAL;
935 	}
936 
937 	if (mpp->var->device_type == MPP_DEVICE_VDPU1) {
938 		mpp->srv->sub_devices[MPP_DEVICE_VDPU1_PP] = mpp;
939 		set_bit(MPP_DEVICE_VDPU1_PP, &mpp->srv->hw_support);
940 	}
941 
942 	mpp->session_max_buffers = VDPU1_SESSION_MAX_BUFFERS;
943 	vdpu_procfs_init(mpp);
944 	/* register current device to mpp service */
945 	mpp_dev_register_srv(mpp, mpp->srv);
946 	dev_info(dev, "probing finish\n");
947 
948 	return 0;
949 }
950 
vdpu_remove(struct platform_device * pdev)951 static int vdpu_remove(struct platform_device *pdev)
952 {
953 	struct device *dev = &pdev->dev;
954 	struct mpp_dev *mpp = dev_get_drvdata(dev);
955 
956 	dev_info(dev, "remove device\n");
957 	mpp_dev_remove(mpp);
958 	vdpu_procfs_remove(mpp);
959 
960 	return 0;
961 }
962 
963 struct platform_driver rockchip_vdpu1_driver = {
964 	.probe = vdpu_probe,
965 	.remove = vdpu_remove,
966 	.shutdown = mpp_dev_shutdown,
967 	.driver = {
968 		.name = VDPU1_DRIVER_NAME,
969 		.of_match_table = of_match_ptr(mpp_vdpu1_dt_match),
970 	},
971 };
972 EXPORT_SYMBOL(rockchip_vdpu1_driver);
973