xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2020 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Ding Wei, leo.ding@rock-chips.com
8  *
9  */
10 #include <linux/pm_runtime.h>
11 
12 #include "mpp_debug.h"
13 #include "mpp_common.h"
14 #include "mpp_iommu.h"
15 
16 #include "mpp_rkvdec2_link.h"
17 
18 #include "hack/mpp_rkvdec2_hack_rk3568.c"
19 
20 #include <linux/devfreq_cooling.h>
21 #include <soc/rockchip/rockchip_ipa.h>
22 #include <soc/rockchip/rockchip_dmc.h>
23 #include <soc/rockchip/rockchip_opp_select.h>
24 #include <soc/rockchip/rockchip_system_monitor.h>
25 #include <soc/rockchip/rockchip_iommu.h>
26 
27 #ifdef CONFIG_PM_DEVFREQ
28 #include "../drivers/devfreq/governor.h"
29 #endif
30 
31 /*
32  * hardware information
33  */
34 static struct mpp_hw_info rkvdec_v2_hw_info = {
35 	.reg_num = RKVDEC_REG_NUM,
36 	.reg_id = RKVDEC_REG_HW_ID_INDEX,
37 	.reg_start = RKVDEC_REG_START_INDEX,
38 	.reg_end = RKVDEC_REG_END_INDEX,
39 	.reg_en = RKVDEC_REG_START_EN_INDEX,
40 	.link_info = &rkvdec_link_v2_hw_info,
41 };
42 
43 static struct mpp_hw_info rkvdec_rk356x_hw_info = {
44 	.reg_num = RKVDEC_REG_NUM,
45 	.reg_id = RKVDEC_REG_HW_ID_INDEX,
46 	.reg_start = RKVDEC_REG_START_INDEX,
47 	.reg_end = RKVDEC_REG_END_INDEX,
48 	.reg_en = RKVDEC_REG_START_EN_INDEX,
49 	.link_info = &rkvdec_link_rk356x_hw_info,
50 };
51 
52 static struct mpp_hw_info rkvdec_vdpu382_hw_info = {
53 	.reg_num = RKVDEC_REG_NUM,
54 	.reg_id = RKVDEC_REG_HW_ID_INDEX,
55 	.reg_start = RKVDEC_REG_START_INDEX,
56 	.reg_end = RKVDEC_REG_END_INDEX,
57 	.reg_en = RKVDEC_REG_START_EN_INDEX,
58 	.link_info = &rkvdec_link_vdpu382_hw_info,
59 };
60 
61 /*
62  * file handle translate information
63  */
64 static const u16 trans_tbl_h264d[] = {
65 	128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
66 	161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
67 	177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
68 	192, 193, 194, 195, 196, 197, 198, 199
69 };
70 
71 static const u16 trans_tbl_h265d[] = {
72 	128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
73 	161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
74 	177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
75 	192, 193, 194, 195, 196, 197, 198, 199
76 };
77 
78 static const u16 trans_tbl_vp9d[] = {
79 	128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
80 	160, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 180, 181, 182, 183,
81 	184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199
82 };
83 
84 static const u16 trans_tbl_avs2d[] = {
85 	128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
86 	161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
87 	177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
88 	192, 193, 194, 195, 196, 197, 198, 199
89 };
90 
91 static struct mpp_trans_info rkvdec_v2_trans[] = {
92 	[RKVDEC_FMT_H265D] = {
93 		.count = ARRAY_SIZE(trans_tbl_h265d),
94 		.table = trans_tbl_h265d,
95 	},
96 	[RKVDEC_FMT_H264D] = {
97 		.count = ARRAY_SIZE(trans_tbl_h264d),
98 		.table = trans_tbl_h264d,
99 	},
100 	[RKVDEC_FMT_VP9D] = {
101 		.count = ARRAY_SIZE(trans_tbl_vp9d),
102 		.table = trans_tbl_vp9d,
103 	},
104 	[RKVDEC_FMT_AVS2] = {
105 		.count = ARRAY_SIZE(trans_tbl_avs2d),
106 		.table = trans_tbl_avs2d,
107 	}
108 };
109 
mpp_extract_rcb_info(struct rkvdec2_rcb_info * rcb_inf,struct mpp_request * req)110 static int mpp_extract_rcb_info(struct rkvdec2_rcb_info *rcb_inf,
111 				struct mpp_request *req)
112 {
113 	int max_size = ARRAY_SIZE(rcb_inf->elem);
114 	int cnt = req->size / sizeof(rcb_inf->elem[0]);
115 
116 	if (req->size > sizeof(rcb_inf->elem)) {
117 		mpp_err("count %d,max_size %d\n", cnt, max_size);
118 		return -EINVAL;
119 	}
120 	if (copy_from_user(rcb_inf->elem, req->data, req->size)) {
121 		mpp_err("copy_from_user failed\n");
122 		return -EINVAL;
123 	}
124 	rcb_inf->cnt = cnt;
125 
126 	return 0;
127 }
128 
rkvdec2_extract_task_msg(struct mpp_session * session,struct rkvdec2_task * task,struct mpp_task_msgs * msgs)129 static int rkvdec2_extract_task_msg(struct mpp_session *session,
130 				    struct rkvdec2_task *task,
131 				    struct mpp_task_msgs *msgs)
132 {
133 	u32 i;
134 	int ret;
135 	struct mpp_request *req;
136 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
137 
138 	for (i = 0; i < msgs->req_cnt; i++) {
139 		u32 off_s, off_e;
140 
141 		req = &msgs->reqs[i];
142 		if (!req->size)
143 			continue;
144 
145 		switch (req->cmd) {
146 		case MPP_CMD_SET_REG_WRITE: {
147 			off_s = hw_info->reg_start * sizeof(u32);
148 			off_e = hw_info->reg_end * sizeof(u32);
149 			ret = mpp_check_req(req, 0, sizeof(task->reg), off_s, off_e);
150 			if (ret)
151 				continue;
152 			if (copy_from_user((u8 *)task->reg + req->offset,
153 					   req->data, req->size)) {
154 				mpp_err("copy_from_user reg failed\n");
155 				return -EIO;
156 			}
157 			memcpy(&task->w_reqs[task->w_req_cnt++], req, sizeof(*req));
158 		} break;
159 		case MPP_CMD_SET_REG_READ: {
160 			int req_base;
161 			int max_size;
162 
163 			if (req->offset >= RKVDEC_PERF_SEL_OFFSET) {
164 				req_base = RKVDEC_PERF_SEL_OFFSET;
165 				max_size = sizeof(task->reg_sel);
166 			} else {
167 				req_base = 0;
168 				max_size = sizeof(task->reg);
169 			}
170 
171 			ret = mpp_check_req(req, req_base, max_size, 0, max_size);
172 			if (ret)
173 				continue;
174 
175 			memcpy(&task->r_reqs[task->r_req_cnt++], req, sizeof(*req));
176 		} break;
177 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
178 			mpp_extract_reg_offset_info(&task->off_inf, req);
179 		} break;
180 		case MPP_CMD_SET_RCB_INFO: {
181 			struct rkvdec2_session_priv *priv = session->priv;
182 
183 			if (priv)
184 				mpp_extract_rcb_info(&priv->rcb_inf, req);
185 		} break;
186 		default:
187 			break;
188 		}
189 	}
190 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
191 		  task->w_req_cnt, task->r_req_cnt);
192 
193 	return 0;
194 }
195 
mpp_set_rcbbuf(struct mpp_dev * mpp,struct mpp_session * session,struct mpp_task * task)196 int mpp_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session,
197 		   struct mpp_task *task)
198 {
199 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
200 	struct rkvdec2_session_priv *priv = session->priv;
201 
202 	mpp_debug_enter();
203 
204 	if (priv && dec->rcb_iova) {
205 		int i;
206 		u32 reg_idx, rcb_size, rcb_offset;
207 		struct rkvdec2_rcb_info *rcb_inf = &priv->rcb_inf;
208 		u32 width = priv->codec_info[DEC_INFO_WIDTH].val;
209 
210 		if (width < dec->rcb_min_width)
211 			goto done;
212 
213 		rcb_offset = 0;
214 		for (i = 0; i < rcb_inf->cnt; i++) {
215 			reg_idx = rcb_inf->elem[i].index;
216 			rcb_size = rcb_inf->elem[i].size;
217 			if ((rcb_offset + rcb_size) > dec->rcb_size) {
218 				mpp_debug(DEBUG_SRAM_INFO,
219 					  "rcb: reg %d use original buffer\n", reg_idx);
220 				continue;
221 			}
222 			mpp_debug(DEBUG_SRAM_INFO, "rcb: reg %d offset %d, size %d\n",
223 				  reg_idx, rcb_offset, rcb_size);
224 			task->reg[reg_idx] = dec->rcb_iova + rcb_offset;
225 			rcb_offset += rcb_size;
226 		}
227 	}
228 done:
229 	mpp_debug_leave();
230 
231 	return 0;
232 }
233 
rkvdec2_task_init(struct mpp_dev * mpp,struct mpp_session * session,struct rkvdec2_task * task,struct mpp_task_msgs * msgs)234 int rkvdec2_task_init(struct mpp_dev *mpp, struct mpp_session *session,
235 		      struct rkvdec2_task *task, struct mpp_task_msgs *msgs)
236 {
237 	int ret;
238 	struct mpp_task *mpp_task = &task->mpp_task;
239 
240 	mpp_debug_enter();
241 
242 	mpp_task_init(session, mpp_task);
243 	mpp_task->hw_info = mpp->var->hw_info;
244 	mpp_task->reg = task->reg;
245 	/* extract reqs for current task */
246 	ret = rkvdec2_extract_task_msg(session, task, msgs);
247 	if (ret)
248 		return ret;
249 
250 	/* process fd in register */
251 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
252 		u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_FORMAT_INDEX]);
253 
254 		ret = mpp_translate_reg_address(session, mpp_task,
255 						fmt, task->reg, &task->off_inf);
256 		if (ret)
257 			goto fail;
258 
259 		mpp_translate_reg_offset_info(mpp_task, &task->off_inf, task->reg);
260 	}
261 
262 	task->strm_addr = task->reg[RKVDEC_REG_RLC_BASE_INDEX];
263 	task->clk_mode = CLK_MODE_NORMAL;
264 	task->slot_idx = -1;
265 	init_waitqueue_head(&mpp_task->wait);
266 	/* get resolution info */
267 	if (session->priv) {
268 		struct rkvdec2_session_priv *priv = session->priv;
269 		u32 width = priv->codec_info[DEC_INFO_WIDTH].val;
270 		u32 bitdepth = priv->codec_info[DEC_INFO_BITDEPTH].val;
271 
272 		task->width =  (bitdepth > 8) ? ((width * bitdepth + 7) >> 3) : width;
273 		task->height = priv->codec_info[DEC_INFO_HEIGHT].val;
274 		task->pixels = task->width * task->height;
275 		mpp_debug(DEBUG_TASK_INFO, "width=%d, bitdepth=%d, height=%d\n",
276 			  width, bitdepth, task->height);
277 	}
278 
279 	mpp_debug_leave();
280 
281 	return 0;
282 
283 fail:
284 	mpp_task_dump_mem_region(mpp, mpp_task);
285 	mpp_task_dump_reg(mpp, mpp_task);
286 	mpp_task_finalize(session, mpp_task);
287 	return ret;
288 }
289 
rkvdec2_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)290 void *rkvdec2_alloc_task(struct mpp_session *session,
291 			 struct mpp_task_msgs *msgs)
292 {
293 	int ret;
294 	struct rkvdec2_task *task;
295 
296 	task = kzalloc(sizeof(*task), GFP_KERNEL);
297 	if (!task)
298 		return NULL;
299 
300 	ret = rkvdec2_task_init(session->mpp, session, task, msgs);
301 	if (ret) {
302 		kfree(task);
303 		return NULL;
304 	}
305 	mpp_set_rcbbuf(session->mpp, session, &task->mpp_task);
306 
307 	return &task->mpp_task;
308 }
309 
rkvdec2_rk3568_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)310 static void *rkvdec2_rk3568_alloc_task(struct mpp_session *session,
311 				       struct mpp_task_msgs *msgs)
312 {
313 	u32 fmt;
314 	struct mpp_task *mpp_task = NULL;
315 	struct rkvdec2_task *task = NULL;
316 
317 	mpp_task = rkvdec2_alloc_task(session, msgs);
318 	if (!mpp_task)
319 		return NULL;
320 
321 	task = to_rkvdec2_task(mpp_task);
322 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_FORMAT_INDEX]);
323 	/* workaround for rk356x, fix the hw bug of cabac/cavlc switch only in h264d */
324 	task->need_hack = (fmt == RKVDEC_FMT_H264D);
325 
326 	return mpp_task;
327 }
328 
rkvdec2_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)329 static int rkvdec2_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
330 {
331 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
332 	u32 timing_en = mpp->srv->timing_en;
333 	u32 reg_en = mpp_task->hw_info->reg_en;
334 	/* set cache size */
335 	u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
336 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
337 	int i;
338 
339 	mpp_debug_enter();
340 
341 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
342 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
343 
344 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
345 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
346 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
347 	/* clear cache */
348 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
349 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
350 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
351 
352 	/* set registers for hardware */
353 	for (i = 0; i < task->w_req_cnt; i++) {
354 		int s, e;
355 		struct mpp_request *req = &task->w_reqs[i];
356 
357 		s = req->offset / sizeof(u32);
358 		e = s + req->size / sizeof(u32);
359 		mpp_write_req(mpp, task->reg, s, e, reg_en);
360 	}
361 
362 	/* flush tlb before starting hardware */
363 	mpp_iommu_flush_tlb(mpp->iommu_info);
364 
365 	/* init current task */
366 	mpp->cur_task = mpp_task;
367 
368 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
369 
370 	/* Flush the register before the start the device */
371 	wmb();
372 	mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
373 
374 	mpp_task_run_end(mpp_task, timing_en);
375 
376 	mpp_debug_leave();
377 
378 	return 0;
379 }
380 
rkvdec2_rk3568_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)381 static int rkvdec2_rk3568_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
382 {
383 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
384 	int ret = 0;
385 
386 	mpp_debug_enter();
387 
388 	/*
389 	 * run fix before task processing
390 	 * workaround for rk356x, fix the hw bug of cabac/cavlc switch only in h264d
391 	 */
392 	if (task->need_hack)
393 		rkvdec2_3568_hack_fix(mpp);
394 
395 	ret = rkvdec2_run(mpp, mpp_task);
396 
397 	mpp_debug_leave();
398 
399 	return ret;
400 }
401 
rkvdec2_irq(struct mpp_dev * mpp)402 static int rkvdec2_irq(struct mpp_dev *mpp)
403 {
404 	mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
405 	if (!(mpp->irq_status & RKVDEC_IRQ_RAW))
406 		return IRQ_NONE;
407 
408 	mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
409 
410 	return IRQ_WAKE_THREAD;
411 }
412 
rkvdec2_isr(struct mpp_dev * mpp)413 static int rkvdec2_isr(struct mpp_dev *mpp)
414 {
415 	u32 err_mask;
416 	struct rkvdec2_task *task = NULL;
417 	struct mpp_task *mpp_task = mpp->cur_task;
418 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
419 
420 	/* FIXME use a spin lock here */
421 	if (!mpp_task) {
422 		dev_err(mpp->dev, "no current task\n");
423 		return IRQ_HANDLED;
424 	}
425 	mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
426 	mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
427 	mpp->cur_task = NULL;
428 	task = to_rkvdec2_task(mpp_task);
429 	task->irq_status = mpp->irq_status;
430 
431 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
432 	err_mask = RKVDEC_COLMV_REF_ERR_STA | RKVDEC_BUF_EMPTY_STA |
433 		   RKVDEC_TIMEOUT_STA | RKVDEC_ERROR_STA;
434 	if (err_mask & task->irq_status) {
435 		atomic_inc(&mpp->reset_request);
436 		if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
437 			mpp_debug(DEBUG_DUMP_ERR_REG, "irq_status: %08x\n", task->irq_status);
438 			mpp_task_dump_hw_reg(mpp);
439 		}
440 	}
441 
442 	mpp_task_finish(mpp_task->session, mpp_task);
443 
444 	mpp_debug_leave();
445 	return IRQ_HANDLED;
446 }
447 
rkvdec2_read_perf_sel(struct mpp_dev * mpp,u32 * regs,u32 s,u32 e)448 static int rkvdec2_read_perf_sel(struct mpp_dev *mpp, u32 *regs, u32 s, u32 e)
449 {
450 	u32 i;
451 	u32 sel0, sel1, sel2, val;
452 
453 	for (i = s; i < e; i += 3) {
454 		/* set sel */
455 		sel0 = i;
456 		sel1 = ((i + 1) < e) ? (i + 1) : 0;
457 		sel2 = ((i + 2) < e) ? (i + 2) : 0;
458 		val = RKVDEC_SET_PERF_SEL(sel0, sel1, sel2);
459 		writel_relaxed(val, mpp->reg_base + RKVDEC_PERF_SEL_BASE);
460 		/* read data */
461 		regs[sel0] = readl_relaxed(mpp->reg_base + RKVDEC_SEL_VAL0_BASE);
462 		mpp_debug(DEBUG_GET_PERF_VAL, "sel[%d]:%u\n", sel0, regs[sel0]);
463 		if (sel1) {
464 			regs[sel1] = readl_relaxed(mpp->reg_base + RKVDEC_SEL_VAL1_BASE);
465 			mpp_debug(DEBUG_GET_PERF_VAL, "sel[%d]:%u\n", sel1, regs[sel1]);
466 		}
467 		if (sel2) {
468 			regs[sel2] = readl_relaxed(mpp->reg_base + RKVDEC_SEL_VAL2_BASE);
469 			mpp_debug(DEBUG_GET_PERF_VAL, "sel[%d]:%u\n", sel2, regs[sel2]);
470 		}
471 	}
472 
473 	return 0;
474 }
475 
rkvdec2_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)476 static int rkvdec2_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
477 {
478 	u32 i;
479 	u32 dec_get;
480 	s32 dec_length;
481 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
482 	struct mpp_request *req;
483 	u32 s, e;
484 
485 	mpp_debug_enter();
486 
487 	/* read register after running */
488 	for (i = 0; i < task->r_req_cnt; i++) {
489 		req = &task->r_reqs[i];
490 		/* read perf register */
491 		if (req->offset >= RKVDEC_PERF_SEL_OFFSET) {
492 			int off = req->offset - RKVDEC_PERF_SEL_OFFSET;
493 
494 			s = off / sizeof(u32);
495 			e = s + req->size / sizeof(u32);
496 			rkvdec2_read_perf_sel(mpp, task->reg_sel, s, e);
497 		} else {
498 			s = req->offset / sizeof(u32);
499 			e = s + req->size / sizeof(u32);
500 			mpp_read_req(mpp, task->reg, s, e);
501 		}
502 	}
503 	/* revert hack for irq status */
504 	task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
505 	/* revert hack for decoded length */
506 	dec_get = mpp_read_relaxed(mpp, RKVDEC_REG_RLC_BASE);
507 	dec_length = dec_get - task->strm_addr;
508 	task->reg[RKVDEC_REG_RLC_BASE_INDEX] = dec_length << 10;
509 	mpp_debug(DEBUG_REGISTER, "dec_get %08x dec_length %d\n", dec_get, dec_length);
510 
511 	if (mpp->srv->timing_en) {
512 		s64 time_diff;
513 
514 		mpp_task->on_finish = ktime_get();
515 		set_bit(TASK_TIMING_FINISH, &mpp_task->state);
516 
517 		time_diff = ktime_us_delta(mpp_task->on_finish, mpp_task->on_create);
518 
519 		if (mpp->timing_check && time_diff > (s64)mpp->timing_check)
520 			mpp_task_dump_timing(mpp_task, time_diff);
521 	}
522 
523 	mpp_debug_leave();
524 
525 	return 0;
526 }
527 
rkvdec2_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)528 int rkvdec2_result(struct mpp_dev *mpp, struct mpp_task *mpp_task,
529 		   struct mpp_task_msgs *msgs)
530 {
531 	u32 i;
532 	struct mpp_request *req;
533 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
534 
535 	for (i = 0; i < task->r_req_cnt; i++) {
536 		req = &task->r_reqs[i];
537 
538 		if (req->offset >= RKVDEC_PERF_SEL_OFFSET) {
539 			int off = req->offset - RKVDEC_PERF_SEL_OFFSET;
540 
541 			if (copy_to_user(req->data,
542 					 (u8 *)task->reg_sel + off,
543 					 req->size)) {
544 				mpp_err("copy_to_user perf_sel fail\n");
545 				return -EIO;
546 			}
547 		} else {
548 			if (copy_to_user(req->data,
549 					 (u8 *)task->reg + req->offset,
550 					 req->size)) {
551 				mpp_err("copy_to_user reg fail\n");
552 				return -EIO;
553 			}
554 		}
555 	}
556 
557 	return 0;
558 }
559 
rkvdec2_free_task(struct mpp_session * session,struct mpp_task * mpp_task)560 int rkvdec2_free_task(struct mpp_session *session, struct mpp_task *mpp_task)
561 {
562 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
563 
564 	mpp_task_finalize(session, mpp_task);
565 	kfree(task);
566 
567 	return 0;
568 }
569 
rkvdec2_control(struct mpp_session * session,struct mpp_request * req)570 static int rkvdec2_control(struct mpp_session *session, struct mpp_request *req)
571 {
572 	switch (req->cmd) {
573 	case MPP_CMD_SEND_CODEC_INFO: {
574 		int i;
575 		int cnt;
576 		struct codec_info_elem elem;
577 		struct rkvdec2_session_priv *priv;
578 
579 		if (!session || !session->priv) {
580 			mpp_err("session info null\n");
581 			return -EINVAL;
582 		}
583 		priv = session->priv;
584 
585 		cnt = req->size / sizeof(elem);
586 		cnt = (cnt > DEC_INFO_BUTT) ? DEC_INFO_BUTT : cnt;
587 		mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
588 		for (i = 0; i < cnt; i++) {
589 			if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
590 				mpp_err("copy_from_user failed\n");
591 				continue;
592 			}
593 			if (elem.type > DEC_INFO_BASE && elem.type < DEC_INFO_BUTT &&
594 			    elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
595 				elem.type = array_index_nospec(elem.type, DEC_INFO_BUTT);
596 				priv->codec_info[elem.type].flag = elem.flag;
597 				priv->codec_info[elem.type].val = elem.data;
598 			} else {
599 				mpp_err("codec info invalid, type %d, flag %d\n",
600 					elem.type, elem.flag);
601 			}
602 		}
603 	} break;
604 	default: {
605 		mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
606 	} break;
607 	}
608 
609 	return 0;
610 }
611 
rkvdec2_free_session(struct mpp_session * session)612 int rkvdec2_free_session(struct mpp_session *session)
613 {
614 	if (session && session->priv) {
615 		kfree(session->priv);
616 		session->priv = NULL;
617 	}
618 
619 	return 0;
620 }
621 
rkvdec2_init_session(struct mpp_session * session)622 static int rkvdec2_init_session(struct mpp_session *session)
623 {
624 	struct rkvdec2_session_priv *priv;
625 
626 	if (!session) {
627 		mpp_err("session is null\n");
628 		return -EINVAL;
629 	}
630 
631 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
632 	if (!priv)
633 		return -ENOMEM;
634 	session->priv = priv;
635 
636 	return 0;
637 }
638 
639 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec2_procfs_remove(struct mpp_dev * mpp)640 static int rkvdec2_procfs_remove(struct mpp_dev *mpp)
641 {
642 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
643 
644 	if (dec->procfs) {
645 		proc_remove(dec->procfs);
646 		dec->procfs = NULL;
647 	}
648 
649 	return 0;
650 }
651 
rkvdec2_show_pref_sel_offset(struct seq_file * file,void * v)652 static int rkvdec2_show_pref_sel_offset(struct seq_file *file, void *v)
653 {
654 	seq_printf(file, "0x%08x\n", RKVDEC_PERF_SEL_OFFSET);
655 
656 	return 0;
657 }
658 
rkvdec2_procfs_init(struct mpp_dev * mpp)659 static int rkvdec2_procfs_init(struct mpp_dev *mpp)
660 {
661 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
662 	char name[32];
663 
664 	if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name ||
665 	    !mpp->srv || !mpp->srv->procfs)
666 		return -EINVAL;
667 
668 	snprintf(name, sizeof(name) - 1, "%s%d",
669 		 mpp->dev->of_node->name, mpp->core_id);
670 	dec->procfs = proc_mkdir(name, mpp->srv->procfs);
671 	if (IS_ERR_OR_NULL(dec->procfs)) {
672 		mpp_err("failed on open procfs\n");
673 		dec->procfs = NULL;
674 		return -EIO;
675 	}
676 
677 	/* for common mpp_dev options */
678 	mpp_procfs_create_common(dec->procfs, mpp);
679 
680 	mpp_procfs_create_u32("aclk", 0644,
681 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
682 	mpp_procfs_create_u32("clk_core", 0644,
683 			      dec->procfs, &dec->core_clk_info.debug_rate_hz);
684 	mpp_procfs_create_u32("clk_cabac", 0644,
685 			      dec->procfs, &dec->cabac_clk_info.debug_rate_hz);
686 	mpp_procfs_create_u32("clk_hevc_cabac", 0644,
687 			      dec->procfs, &dec->hevc_cabac_clk_info.debug_rate_hz);
688 	mpp_procfs_create_u32("session_buffers", 0644,
689 			      dec->procfs, &mpp->session_max_buffers);
690 	proc_create_single("perf_sel_offset", 0444,
691 			   dec->procfs, rkvdec2_show_pref_sel_offset);
692 	mpp_procfs_create_u32("task_count", 0644,
693 			      dec->procfs, &mpp->task_index);
694 
695 	return 0;
696 }
697 #else
rkvdec2_procfs_remove(struct mpp_dev * mpp)698 static inline int rkvdec2_procfs_remove(struct mpp_dev *mpp)
699 {
700 	return 0;
701 }
702 
rkvdec2_procfs_init(struct mpp_dev * mpp)703 static inline int rkvdec2_procfs_init(struct mpp_dev *mpp)
704 {
705 	return 0;
706 }
707 #endif
708 
709 #ifdef CONFIG_PM_DEVFREQ
rkvdec2_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)710 static int rkvdec2_devfreq_target(struct device *dev,
711 				  unsigned long *freq, u32 flags)
712 {
713 	struct dev_pm_opp *opp;
714 	unsigned long target_volt, target_freq;
715 	int ret = 0;
716 
717 	struct rkvdec2_dev *dec = dev_get_drvdata(dev);
718 	struct devfreq *devfreq = dec->devfreq;
719 	struct devfreq_dev_status *stat = &devfreq->last_status;
720 	unsigned long old_clk_rate = stat->current_frequency;
721 
722 	opp = devfreq_recommended_opp(dev, freq, flags);
723 	if (IS_ERR(opp)) {
724 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
725 		return PTR_ERR(opp);
726 	}
727 	target_freq = dev_pm_opp_get_freq(opp);
728 	target_volt = dev_pm_opp_get_voltage(opp);
729 	dev_pm_opp_put(opp);
730 
731 	if (old_clk_rate == target_freq) {
732 		dec->core_last_rate_hz = target_freq;
733 		if (dec->volt == target_volt)
734 			return ret;
735 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
736 		if (ret) {
737 			dev_err(dev, "Cannot set voltage %lu uV\n",
738 				target_volt);
739 			return ret;
740 		}
741 		dec->volt = target_volt;
742 		return 0;
743 	}
744 
745 	if (old_clk_rate < target_freq) {
746 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
747 		if (ret) {
748 			dev_err(dev, "set voltage %lu uV\n", target_volt);
749 			return ret;
750 		}
751 	}
752 
753 	dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
754 	clk_set_rate(dec->core_clk_info.clk, target_freq);
755 	stat->current_frequency = target_freq;
756 	dec->core_last_rate_hz = target_freq;
757 
758 	if (old_clk_rate > target_freq) {
759 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
760 		if (ret) {
761 			dev_err(dev, "set vol %lu uV\n", target_volt);
762 			return ret;
763 		}
764 	}
765 	dec->volt = target_volt;
766 
767 	return ret;
768 }
769 
rkvdec2_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)770 static int rkvdec2_devfreq_get_dev_status(struct device *dev,
771 					  struct devfreq_dev_status *stat)
772 {
773 	return 0;
774 }
775 
rkvdec2_devfreq_get_cur_freq(struct device * dev,unsigned long * freq)776 static int rkvdec2_devfreq_get_cur_freq(struct device *dev,
777 					unsigned long *freq)
778 {
779 	struct rkvdec2_dev *dec = dev_get_drvdata(dev);
780 
781 	*freq = dec->core_last_rate_hz;
782 
783 	return 0;
784 }
785 
786 static struct devfreq_dev_profile rkvdec2_devfreq_profile = {
787 	.target	= rkvdec2_devfreq_target,
788 	.get_dev_status	= rkvdec2_devfreq_get_dev_status,
789 	.get_cur_freq = rkvdec2_devfreq_get_cur_freq,
790 };
791 
devfreq_vdec2_ondemand_func(struct devfreq * df,unsigned long * freq)792 static int devfreq_vdec2_ondemand_func(struct devfreq *df, unsigned long *freq)
793 {
794 	struct rkvdec2_dev *dec = df->data;
795 
796 	if (dec)
797 		*freq = dec->core_rate_hz;
798 	else
799 		*freq = df->previous_freq;
800 
801 	return 0;
802 }
803 
devfreq_vdec2_ondemand_handler(struct devfreq * devfreq,unsigned int event,void * data)804 static int devfreq_vdec2_ondemand_handler(struct devfreq *devfreq,
805 					  unsigned int event, void *data)
806 {
807 	return 0;
808 }
809 
810 static struct devfreq_governor devfreq_vdec2_ondemand = {
811 	.name = "vdec2_ondemand",
812 	.get_target_freq = devfreq_vdec2_ondemand_func,
813 	.event_handler = devfreq_vdec2_ondemand_handler,
814 };
815 
rkvdec2_get_static_power(struct devfreq * devfreq,unsigned long voltage)816 static unsigned long rkvdec2_get_static_power(struct devfreq *devfreq,
817 					      unsigned long voltage)
818 {
819 	struct rkvdec2_dev *dec = devfreq->data;
820 
821 	if (!dec->model_data)
822 		return 0;
823 	else
824 		return rockchip_ipa_get_static_power(dec->model_data,
825 						     voltage);
826 }
827 
828 static struct devfreq_cooling_power vdec2_cooling_power_data = {
829 	.get_static_power = rkvdec2_get_static_power,
830 };
831 
832 static struct monitor_dev_profile vdec2_mdevp = {
833 	.type = MONITOR_TYPE_DEV,
834 	.low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
835 	.high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
836 };
837 
rkvdec2_devfreq_init(struct mpp_dev * mpp)838 static int rkvdec2_devfreq_init(struct mpp_dev *mpp)
839 {
840 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
841 	struct clk *clk_core = dec->core_clk_info.clk;
842 	struct devfreq_cooling_power *vdec2_dcp = &vdec2_cooling_power_data;
843 	int ret = 0;
844 
845 	if (!clk_core)
846 		return 0;
847 
848 	dec->vdd = devm_regulator_get_optional(mpp->dev, "vdec");
849 	if (IS_ERR_OR_NULL(dec->vdd)) {
850 		if (PTR_ERR(dec->vdd) == -EPROBE_DEFER) {
851 			dev_warn(mpp->dev, "vdec regulator not ready, retry\n");
852 
853 			return -EPROBE_DEFER;
854 		}
855 		dev_info(mpp->dev, "no regulator, devfreq is disabled\n");
856 
857 		return 0;
858 	}
859 
860 	ret = rockchip_init_opp_table(mpp->dev, NULL, "leakage", "vdec");
861 	if (ret) {
862 		dev_err(mpp->dev, "failed to init_opp_table\n");
863 		return ret;
864 	}
865 
866 	ret = devfreq_add_governor(&devfreq_vdec2_ondemand);
867 	if (ret) {
868 		dev_err(mpp->dev, "failed to add vdec2_ondemand governor\n");
869 		goto governor_err;
870 	}
871 
872 	rkvdec2_devfreq_profile.initial_freq = clk_get_rate(clk_core);
873 
874 	dec->devfreq = devm_devfreq_add_device(mpp->dev,
875 					       &rkvdec2_devfreq_profile,
876 					       "vdec2_ondemand", (void *)dec);
877 	if (IS_ERR(dec->devfreq)) {
878 		ret = PTR_ERR(dec->devfreq);
879 		dec->devfreq = NULL;
880 		goto devfreq_err;
881 	}
882 	dec->devfreq->last_status.total_time = 1;
883 	dec->devfreq->last_status.busy_time = 1;
884 
885 	devfreq_register_opp_notifier(mpp->dev, dec->devfreq);
886 
887 	of_property_read_u32(mpp->dev->of_node, "dynamic-power-coefficient",
888 			     (u32 *)&vdec2_dcp->dyn_power_coeff);
889 	dec->model_data = rockchip_ipa_power_model_init(mpp->dev,
890 							"vdec_leakage");
891 	if (IS_ERR_OR_NULL(dec->model_data)) {
892 		dec->model_data = NULL;
893 		dev_err(mpp->dev, "failed to initialize power model\n");
894 	} else if (dec->model_data->dynamic_coefficient) {
895 		vdec2_dcp->dyn_power_coeff =
896 			dec->model_data->dynamic_coefficient;
897 	}
898 	if (!vdec2_dcp->dyn_power_coeff) {
899 		dev_err(mpp->dev, "failed to get dynamic-coefficient\n");
900 		goto out;
901 	}
902 
903 	dec->devfreq_cooling =
904 		of_devfreq_cooling_register_power(mpp->dev->of_node,
905 						  dec->devfreq, vdec2_dcp);
906 	if (IS_ERR_OR_NULL(dec->devfreq_cooling))
907 		dev_err(mpp->dev, "failed to register cooling device\n");
908 
909 	vdec2_mdevp.data = dec->devfreq;
910 	dec->mdev_info = rockchip_system_monitor_register(mpp->dev, &vdec2_mdevp);
911 	if (IS_ERR(dec->mdev_info)) {
912 		dev_dbg(mpp->dev, "without system monitor\n");
913 		dec->mdev_info = NULL;
914 	}
915 
916 out:
917 	return 0;
918 
919 devfreq_err:
920 	devfreq_remove_governor(&devfreq_vdec2_ondemand);
921 governor_err:
922 	dev_pm_opp_of_remove_table(mpp->dev);
923 
924 	return ret;
925 }
926 
rkvdec2_devfreq_remove(struct mpp_dev * mpp)927 static int rkvdec2_devfreq_remove(struct mpp_dev *mpp)
928 {
929 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
930 
931 	if (dec->mdev_info)
932 		rockchip_system_monitor_unregister(dec->mdev_info);
933 	if (dec->devfreq) {
934 		devfreq_unregister_opp_notifier(mpp->dev, dec->devfreq);
935 		dev_pm_opp_of_remove_table(mpp->dev);
936 		devfreq_remove_governor(&devfreq_vdec2_ondemand);
937 	}
938 
939 	return 0;
940 }
941 
mpp_devfreq_set_core_rate(struct mpp_dev * mpp,enum MPP_CLOCK_MODE mode)942 void mpp_devfreq_set_core_rate(struct mpp_dev *mpp, enum MPP_CLOCK_MODE mode)
943 {
944 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
945 
946 	if (dec->devfreq) {
947 		unsigned long core_rate_hz;
948 
949 		mutex_lock(&dec->devfreq->lock);
950 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info, mode);
951 		if (dec->core_rate_hz != core_rate_hz) {
952 			dec->core_rate_hz = core_rate_hz;
953 			update_devfreq(dec->devfreq);
954 		}
955 		mutex_unlock(&dec->devfreq->lock);
956 	}
957 
958 	mpp_clk_set_rate(&dec->core_clk_info, mode);
959 }
960 #else
rkvdec2_devfreq_init(struct mpp_dev * mpp)961 static inline int rkvdec2_devfreq_init(struct mpp_dev *mpp)
962 {
963 	return 0;
964 }
965 
rkvdec2_devfreq_remove(struct mpp_dev * mpp)966 static inline int rkvdec2_devfreq_remove(struct mpp_dev *mpp)
967 {
968 	return 0;
969 }
970 
mpp_devfreq_set_core_rate(struct mpp_dev * mpp,enum MPP_CLOCK_MODE mode)971 void mpp_devfreq_set_core_rate(struct mpp_dev *mpp, enum MPP_CLOCK_MODE mode)
972 {
973 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
974 
975 	mpp_clk_set_rate(&dec->core_clk_info, mode);
976 }
977 #endif
978 
rkvdec2_init(struct mpp_dev * mpp)979 static int rkvdec2_init(struct mpp_dev *mpp)
980 {
981 	int ret;
982 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
983 
984 	mutex_init(&dec->sip_reset_lock);
985 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVDEC];
986 
987 	/* Get clock info from dtsi */
988 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
989 	if (ret)
990 		mpp_err("failed on clk_get aclk_vcodec\n");
991 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
992 	if (ret)
993 		mpp_err("failed on clk_get hclk_vcodec\n");
994 	ret = mpp_get_clk_info(mpp, &dec->core_clk_info, "clk_core");
995 	if (ret)
996 		mpp_err("failed on clk_get clk_core\n");
997 	ret = mpp_get_clk_info(mpp, &dec->cabac_clk_info, "clk_cabac");
998 	if (ret)
999 		mpp_err("failed on clk_get clk_cabac\n");
1000 	ret = mpp_get_clk_info(mpp, &dec->hevc_cabac_clk_info, "clk_hevc_cabac");
1001 	if (ret)
1002 		mpp_err("failed on clk_get clk_hevc_cabac\n");
1003 	/* Set default rates */
1004 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1005 	mpp_set_clk_info_rate_hz(&dec->core_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1006 	mpp_set_clk_info_rate_hz(&dec->cabac_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1007 	mpp_set_clk_info_rate_hz(&dec->hevc_cabac_clk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1008 
1009 	dec->cycle_clk = &dec->aclk_info;
1010 	/* Get normal max workload from dtsi */
1011 	of_property_read_u32(mpp->dev->of_node,
1012 			     "rockchip,default-max-load", &dec->default_max_load);
1013 	/* Get reset control from dtsi */
1014 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1015 	if (!dec->rst_a)
1016 		mpp_err("No aclk reset resource define\n");
1017 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1018 	if (!dec->rst_h)
1019 		mpp_err("No hclk reset resource define\n");
1020 	dec->rst_niu_a = mpp_reset_control_get(mpp, RST_TYPE_NIU_A, "niu_a");
1021 	if (!dec->rst_niu_a)
1022 		mpp_err("No niu aclk reset resource define\n");
1023 	dec->rst_niu_h = mpp_reset_control_get(mpp, RST_TYPE_NIU_H, "niu_h");
1024 	if (!dec->rst_niu_h)
1025 		mpp_err("No niu hclk reset resource define\n");
1026 	dec->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1027 	if (!dec->rst_core)
1028 		mpp_err("No core reset resource define\n");
1029 	dec->rst_cabac = mpp_reset_control_get(mpp, RST_TYPE_CABAC, "video_cabac");
1030 	if (!dec->rst_cabac)
1031 		mpp_err("No cabac reset resource define\n");
1032 	dec->rst_hevc_cabac = mpp_reset_control_get(mpp, RST_TYPE_HEVC_CABAC, "video_hevc_cabac");
1033 	if (!dec->rst_hevc_cabac)
1034 		mpp_err("No hevc cabac reset resource define\n");
1035 
1036 	ret = rkvdec2_devfreq_init(mpp);
1037 	if (ret)
1038 		mpp_err("failed to add vdec devfreq\n");
1039 
1040 	return ret;
1041 }
1042 
rkvdec2_rk3568_init(struct mpp_dev * mpp)1043 static int rkvdec2_rk3568_init(struct mpp_dev *mpp)
1044 {
1045 	int ret;
1046 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1047 
1048 	dec->fix = mpp_dma_alloc(mpp->dev, FIX_RK3568_BUF_SIZE);
1049 	ret = dec->fix ? 0 : -ENOMEM;
1050 	if (!ret)
1051 		rkvdec2_3568_hack_data_setup(dec->fix);
1052 	else
1053 		dev_err(mpp->dev, "failed to create buffer for hack\n");
1054 
1055 	ret = rkvdec2_init(mpp);
1056 
1057 	return ret;
1058 }
1059 
rkvdec2_rk3568_exit(struct mpp_dev * mpp)1060 static int rkvdec2_rk3568_exit(struct mpp_dev *mpp)
1061 {
1062 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1063 
1064 	rkvdec2_devfreq_remove(mpp);
1065 
1066 	if (dec->fix)
1067 		mpp_dma_free(dec->fix);
1068 
1069 	return 0;
1070 }
1071 
rkvdec2_clk_on(struct mpp_dev * mpp)1072 static int rkvdec2_clk_on(struct mpp_dev *mpp)
1073 {
1074 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1075 
1076 	mpp_clk_safe_enable(dec->aclk_info.clk);
1077 	mpp_clk_safe_enable(dec->hclk_info.clk);
1078 	mpp_clk_safe_enable(dec->core_clk_info.clk);
1079 	mpp_clk_safe_enable(dec->cabac_clk_info.clk);
1080 	mpp_clk_safe_enable(dec->hevc_cabac_clk_info.clk);
1081 
1082 	return 0;
1083 }
1084 
rkvdec2_clk_off(struct mpp_dev * mpp)1085 static int rkvdec2_clk_off(struct mpp_dev *mpp)
1086 {
1087 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1088 
1089 	clk_disable_unprepare(dec->aclk_info.clk);
1090 	clk_disable_unprepare(dec->hclk_info.clk);
1091 	clk_disable_unprepare(dec->core_clk_info.clk);
1092 	clk_disable_unprepare(dec->cabac_clk_info.clk);
1093 	clk_disable_unprepare(dec->hevc_cabac_clk_info.clk);
1094 
1095 	return 0;
1096 }
1097 
rkvdec2_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1098 static int rkvdec2_get_freq(struct mpp_dev *mpp,
1099 			    struct mpp_task *mpp_task)
1100 {
1101 	u32 task_cnt;
1102 	u32 workload;
1103 	struct mpp_task *loop = NULL, *n;
1104 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1105 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1106 
1107 	/* if not set max load, consider not have advanced mode */
1108 	if (!dec->default_max_load || !task->pixels)
1109 		return 0;
1110 
1111 	task_cnt = 1;
1112 	workload = task->pixels;
1113 	/* calc workload in pending list */
1114 	mutex_lock(&mpp->queue->pending_lock);
1115 	list_for_each_entry_safe(loop, n,
1116 				 &mpp->queue->pending_list,
1117 				 queue_link) {
1118 		struct rkvdec2_task *loop_task = to_rkvdec2_task(loop);
1119 
1120 		task_cnt++;
1121 		workload += loop_task->pixels;
1122 	}
1123 	mutex_unlock(&mpp->queue->pending_lock);
1124 
1125 	if (workload > dec->default_max_load)
1126 		task->clk_mode = CLK_MODE_ADVANCED;
1127 
1128 	mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1129 		  task_cnt, workload, task->clk_mode);
1130 
1131 	return 0;
1132 }
1133 
rkvdec2_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1134 static int rkvdec2_set_freq(struct mpp_dev *mpp,
1135 			    struct mpp_task *mpp_task)
1136 {
1137 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1138 	struct rkvdec2_task *task =  to_rkvdec2_task(mpp_task);
1139 
1140 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1141 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1142 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1143 	mpp_devfreq_set_core_rate(mpp, task->clk_mode);
1144 
1145 	return 0;
1146 }
1147 
rkvdec2_soft_reset(struct mpp_dev * mpp)1148 static int rkvdec2_soft_reset(struct mpp_dev *mpp)
1149 {
1150 	int ret = 0;
1151 
1152 	/*
1153 	 * for rk3528 and rk3562
1154 	 * use mmu reset instead of rkvdec soft reset
1155 	 * rkvdec will reset together when rkvdec_mmu force reset
1156 	 */
1157 	ret = rockchip_iommu_force_reset(mpp->dev);
1158 	if (ret)
1159 		mpp_err("soft mmu reset fail, ret %d\n", ret);
1160 	mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1161 
1162 	return ret;
1163 
1164 }
1165 
rkvdec2_sip_reset(struct mpp_dev * mpp)1166 static int rkvdec2_sip_reset(struct mpp_dev *mpp)
1167 {
1168 	mpp_debug_enter();
1169 
1170 	if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1171 		/* sip reset */
1172 		rockchip_dmcfreq_lock();
1173 		sip_smc_vpu_reset(0, 0, 0);
1174 		rockchip_dmcfreq_unlock();
1175 	} else {
1176 		rkvdec2_reset(mpp);
1177 	}
1178 
1179 	mpp_debug_leave();
1180 
1181 	return 0;
1182 }
1183 
rkvdec2_reset(struct mpp_dev * mpp)1184 int rkvdec2_reset(struct mpp_dev *mpp)
1185 {
1186 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1187 	int ret = 0;
1188 
1189 	mpp_debug_enter();
1190 
1191 	/* safe reset first*/
1192 	ret = rkvdec2_soft_reset(mpp);
1193 
1194 	/* cru reset */
1195 	if (ret && dec->rst_a && dec->rst_h) {
1196 		mpp_err("soft reset timeout, use cru reset\n");
1197 		mpp_pmu_idle_request(mpp, true);
1198 		mpp_safe_reset(dec->rst_niu_a);
1199 		mpp_safe_reset(dec->rst_niu_h);
1200 		mpp_safe_reset(dec->rst_a);
1201 		mpp_safe_reset(dec->rst_h);
1202 		mpp_safe_reset(dec->rst_core);
1203 		mpp_safe_reset(dec->rst_cabac);
1204 		mpp_safe_reset(dec->rst_hevc_cabac);
1205 		udelay(5);
1206 		mpp_safe_unreset(dec->rst_niu_h);
1207 		mpp_safe_unreset(dec->rst_niu_a);
1208 		mpp_safe_unreset(dec->rst_a);
1209 		mpp_safe_unreset(dec->rst_h);
1210 		mpp_safe_unreset(dec->rst_core);
1211 		mpp_safe_unreset(dec->rst_cabac);
1212 		mpp_safe_unreset(dec->rst_hevc_cabac);
1213 		mpp_pmu_idle_request(mpp, false);
1214 	}
1215 	mpp_debug_leave();
1216 
1217 	return 0;
1218 }
1219 
1220 static struct mpp_hw_ops rkvdec_v2_hw_ops = {
1221 	.init = rkvdec2_init,
1222 	.clk_on = rkvdec2_clk_on,
1223 	.clk_off = rkvdec2_clk_off,
1224 	.get_freq = rkvdec2_get_freq,
1225 	.set_freq = rkvdec2_set_freq,
1226 	.reset = rkvdec2_reset,
1227 };
1228 
1229 static struct mpp_hw_ops rkvdec_rk3568_hw_ops = {
1230 	.init = rkvdec2_rk3568_init,
1231 	.exit = rkvdec2_rk3568_exit,
1232 	.clk_on = rkvdec2_clk_on,
1233 	.clk_off = rkvdec2_clk_off,
1234 	.get_freq = rkvdec2_get_freq,
1235 	.set_freq = rkvdec2_set_freq,
1236 	.reset = rkvdec2_sip_reset,
1237 };
1238 
1239 static struct mpp_hw_ops rkvdec_rk3588_hw_ops = {
1240 	.init = rkvdec2_init,
1241 	.clk_on = rkvdec2_clk_on,
1242 	.clk_off = rkvdec2_clk_off,
1243 	.get_freq = rkvdec2_get_freq,
1244 	.set_freq = rkvdec2_set_freq,
1245 	.reset = rkvdec2_sip_reset,
1246 };
1247 
1248 static struct mpp_dev_ops rkvdec_v2_dev_ops = {
1249 	.alloc_task = rkvdec2_alloc_task,
1250 	.run = rkvdec2_run,
1251 	.irq = rkvdec2_irq,
1252 	.isr = rkvdec2_isr,
1253 	.finish = rkvdec2_finish,
1254 	.result = rkvdec2_result,
1255 	.free_task = rkvdec2_free_task,
1256 	.ioctl = rkvdec2_control,
1257 	.init_session = rkvdec2_init_session,
1258 	.free_session = rkvdec2_free_session,
1259 };
1260 
1261 static struct mpp_dev_ops rkvdec_rk3568_dev_ops = {
1262 	.alloc_task = rkvdec2_rk3568_alloc_task,
1263 	.run = rkvdec2_rk3568_run,
1264 	.irq = rkvdec2_irq,
1265 	.isr = rkvdec2_isr,
1266 	.finish = rkvdec2_finish,
1267 	.result = rkvdec2_result,
1268 	.free_task = rkvdec2_free_task,
1269 	.ioctl = rkvdec2_control,
1270 	.init_session = rkvdec2_init_session,
1271 	.free_session = rkvdec2_free_session,
1272 	.dump_dev = rkvdec_link_dump,
1273 };
1274 
1275 static const struct mpp_dev_var rkvdec_v2_data = {
1276 	.device_type = MPP_DEVICE_RKVDEC,
1277 	.hw_info = &rkvdec_v2_hw_info,
1278 	.trans_info = rkvdec_v2_trans,
1279 	.hw_ops = &rkvdec_v2_hw_ops,
1280 	.dev_ops = &rkvdec_v2_dev_ops,
1281 };
1282 
1283 static const struct mpp_dev_var rkvdec_rk3568_data = {
1284 	.device_type = MPP_DEVICE_RKVDEC,
1285 	.hw_info = &rkvdec_rk356x_hw_info,
1286 	.trans_info = rkvdec_v2_trans,
1287 	.hw_ops = &rkvdec_rk3568_hw_ops,
1288 	.dev_ops = &rkvdec_rk3568_dev_ops,
1289 };
1290 
1291 static const struct mpp_dev_var rkvdec_vdpu382_data = {
1292 	.device_type = MPP_DEVICE_RKVDEC,
1293 	.hw_info = &rkvdec_vdpu382_hw_info,
1294 	.trans_info = rkvdec_v2_trans,
1295 	.hw_ops = &rkvdec_v2_hw_ops,
1296 	.dev_ops = &rkvdec_v2_dev_ops,
1297 };
1298 
1299 static const struct mpp_dev_var rkvdec_rk3588_data = {
1300 	.device_type = MPP_DEVICE_RKVDEC,
1301 	.hw_info = &rkvdec_v2_hw_info,
1302 	.trans_info = rkvdec_v2_trans,
1303 	.hw_ops = &rkvdec_rk3588_hw_ops,
1304 	.dev_ops = &rkvdec_v2_dev_ops,
1305 };
1306 
1307 static const struct of_device_id mpp_rkvdec2_dt_match[] = {
1308 	{
1309 		.compatible = "rockchip,rkv-decoder-v2",
1310 		.data = &rkvdec_v2_data,
1311 	},
1312 #ifdef CONFIG_CPU_RK3568
1313 	{
1314 		.compatible = "rockchip,rkv-decoder-rk3568",
1315 		.data = &rkvdec_rk3568_data,
1316 	},
1317 #endif
1318 #ifdef CONFIG_CPU_RK3588
1319 	{
1320 		.compatible = "rockchip,rkv-decoder-v2-ccu",
1321 		.data = &rkvdec_rk3588_data,
1322 	},
1323 #endif
1324 #ifdef CONFIG_CPU_RK3528
1325 	{
1326 		.compatible = "rockchip,rkv-decoder-rk3528",
1327 		.data = &rkvdec_vdpu382_data,
1328 	},
1329 #endif
1330 #ifdef CONFIG_CPU_RK3562
1331 	{
1332 		.compatible = "rockchip,rkv-decoder-rk3562",
1333 		.data = &rkvdec_vdpu382_data,
1334 	},
1335 #endif
1336 	{},
1337 };
1338 
rkvdec2_ccu_remove(struct device * dev)1339 static int rkvdec2_ccu_remove(struct device *dev)
1340 {
1341 	device_init_wakeup(dev, false);
1342 	pm_runtime_disable(dev);
1343 
1344 	return 0;
1345 }
1346 
rkvdec2_ccu_probe(struct platform_device * pdev)1347 static int rkvdec2_ccu_probe(struct platform_device *pdev)
1348 {
1349 	struct rkvdec2_ccu *ccu;
1350 	struct resource *res;
1351 	struct device *dev = &pdev->dev;
1352 	u32 ccu_mode;
1353 
1354 	ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
1355 	if (!ccu)
1356 		return -ENOMEM;
1357 
1358 	ccu->dev = dev;
1359 	/* use task-level soft ccu default */
1360 	ccu->ccu_mode = RKVDEC2_CCU_TASK_SOFT;
1361 	atomic_set(&ccu->power_enabled, 0);
1362 	INIT_LIST_HEAD(&ccu->unused_list);
1363 	INIT_LIST_HEAD(&ccu->used_list);
1364 	platform_set_drvdata(pdev, ccu);
1365 
1366 	if (!of_property_read_u32(dev->of_node, "rockchip,ccu-mode", &ccu_mode)) {
1367 		if (ccu_mode <= RKVDEC2_CCU_MODE_NULL || ccu_mode >= RKVDEC2_CCU_MODE_BUTT)
1368 			ccu_mode = RKVDEC2_CCU_TASK_SOFT;
1369 		ccu->ccu_mode = (enum RKVDEC2_CCU_MODE)ccu_mode;
1370 	}
1371 
1372 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ccu");
1373 	if (!res) {
1374 		dev_err(dev, "no memory resource defined\n");
1375 		return -ENODEV;
1376 	}
1377 
1378 	ccu->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1379 	if (!ccu->reg_base) {
1380 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1381 		return -ENODEV;
1382 	}
1383 
1384 	ccu->aclk_info.clk = devm_clk_get(dev, "aclk_ccu");
1385 	if (!ccu->aclk_info.clk)
1386 		mpp_err("failed on clk_get ccu aclk\n");
1387 
1388 	ccu->rst_a = devm_reset_control_get(dev, "video_ccu");
1389 	if (ccu->rst_a)
1390 		mpp_safe_unreset(ccu->rst_a);
1391 	else
1392 		mpp_err("failed on clk_get ccu reset\n");
1393 
1394 	/* power domain autosuspend delay 2s */
1395 	pm_runtime_set_autosuspend_delay(dev, 2000);
1396 	pm_runtime_use_autosuspend(dev);
1397 	device_init_wakeup(dev, true);
1398 	pm_runtime_enable(dev);
1399 
1400 	dev_info(dev, "ccu-mode: %d\n", ccu->ccu_mode);
1401 	return 0;
1402 }
1403 
rkvdec2_alloc_rcbbuf(struct platform_device * pdev,struct rkvdec2_dev * dec)1404 static int rkvdec2_alloc_rcbbuf(struct platform_device *pdev, struct rkvdec2_dev *dec)
1405 {
1406 	int ret;
1407 	u32 vals[2];
1408 	dma_addr_t iova;
1409 	u32 rcb_size, sram_size;
1410 	struct device_node *sram_np;
1411 	struct resource sram_res;
1412 	resource_size_t sram_start, sram_end;
1413 	struct iommu_domain *domain;
1414 	struct device *dev = &pdev->dev;
1415 
1416 	/* get rcb iova start and size */
1417 	ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 2);
1418 	if (ret) {
1419 		dev_err(dev, "could not find property rcb-iova\n");
1420 		return ret;
1421 	}
1422 	iova = PAGE_ALIGN(vals[0]);
1423 	rcb_size = PAGE_ALIGN(vals[1]);
1424 	if (!rcb_size) {
1425 		dev_err(dev, "rcb_size invalid.\n");
1426 		return -EINVAL;
1427 	}
1428 	/* alloc reserve iova for rcb */
1429 	ret = iommu_dma_reserve_iova(dev, iova, rcb_size);
1430 	if (ret) {
1431 		dev_err(dev, "alloc rcb iova error.\n");
1432 		return ret;
1433 	}
1434 	/* get sram device node */
1435 	sram_np = of_parse_phandle(dev->of_node, "rockchip,sram", 0);
1436 	if (!sram_np) {
1437 		dev_err(dev, "could not find phandle sram\n");
1438 		return -ENODEV;
1439 	}
1440 	/* get sram start and size */
1441 	ret = of_address_to_resource(sram_np, 0, &sram_res);
1442 	of_node_put(sram_np);
1443 	if (ret) {
1444 		dev_err(dev, "find sram res error\n");
1445 		return ret;
1446 	}
1447 	/* check sram start and size is PAGE_SIZE align */
1448 	sram_start = round_up(sram_res.start, PAGE_SIZE);
1449 	sram_end = round_down(sram_res.start + resource_size(&sram_res), PAGE_SIZE);
1450 	if (sram_end <= sram_start) {
1451 		dev_err(dev, "no available sram, phy_start %pa, phy_end %pa\n",
1452 			&sram_start, &sram_end);
1453 		return -ENOMEM;
1454 	}
1455 	sram_size = sram_end - sram_start;
1456 	sram_size = rcb_size < sram_size ? rcb_size : sram_size;
1457 	/* iova map to sram */
1458 	domain = dec->mpp.iommu_info->domain;
1459 	ret = iommu_map(domain, iova, sram_start, sram_size, IOMMU_READ | IOMMU_WRITE);
1460 	if (ret) {
1461 		dev_err(dev, "sram iommu_map error.\n");
1462 		return ret;
1463 	}
1464 	/* alloc dma for the remaining buffer, sram + dma */
1465 	if (sram_size < rcb_size) {
1466 		struct page *page;
1467 		size_t page_size = PAGE_ALIGN(rcb_size - sram_size);
1468 
1469 		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(page_size));
1470 		if (!page) {
1471 			dev_err(dev, "unable to allocate pages\n");
1472 			ret = -ENOMEM;
1473 			goto err_sram_map;
1474 		}
1475 		/* iova map to dma */
1476 		ret = iommu_map(domain, iova + sram_size, page_to_phys(page),
1477 				page_size, IOMMU_READ | IOMMU_WRITE);
1478 		if (ret) {
1479 			dev_err(dev, "page iommu_map error.\n");
1480 			__free_pages(page, get_order(page_size));
1481 			goto err_sram_map;
1482 		}
1483 		dec->rcb_page = page;
1484 	}
1485 	dec->sram_size = sram_size;
1486 	dec->rcb_size = rcb_size;
1487 	dec->rcb_iova = iova;
1488 	dev_info(dev, "sram_start %pa\n", &sram_start);
1489 	dev_info(dev, "rcb_iova %pad\n", &dec->rcb_iova);
1490 	dev_info(dev, "sram_size %u\n", dec->sram_size);
1491 	dev_info(dev, "rcb_size %u\n", dec->rcb_size);
1492 
1493 	ret = of_property_read_u32(dev->of_node, "rockchip,rcb-min-width", &dec->rcb_min_width);
1494 	if (!ret && dec->rcb_min_width)
1495 		dev_info(dev, "min_width %u\n", dec->rcb_min_width);
1496 
1497 	/* if have, read rcb_info */
1498 	dec->rcb_info_count = device_property_count_u32(dev, "rockchip,rcb-info");
1499 	if (dec->rcb_info_count > 0 &&
1500 	    dec->rcb_info_count <= (sizeof(dec->rcb_infos) / sizeof(u32))) {
1501 		int i;
1502 
1503 		ret = device_property_read_u32_array(dev, "rockchip,rcb-info",
1504 						     dec->rcb_infos, dec->rcb_info_count);
1505 		if (!ret) {
1506 			dev_info(dev, "rcb_info_count %u\n", dec->rcb_info_count);
1507 			for (i = 0; i < dec->rcb_info_count; i += 2)
1508 				dev_info(dev, "[%u, %u]\n",
1509 					 dec->rcb_infos[i], dec->rcb_infos[i+1]);
1510 		}
1511 	}
1512 
1513 	return 0;
1514 
1515 err_sram_map:
1516 	iommu_unmap(domain, iova, sram_size);
1517 
1518 	return ret;
1519 }
1520 
rkvdec2_core_probe(struct platform_device * pdev)1521 static int rkvdec2_core_probe(struct platform_device *pdev)
1522 {
1523 	int ret;
1524 	struct rkvdec2_dev *dec;
1525 	struct mpp_dev *mpp;
1526 	struct device *dev = &pdev->dev;
1527 	irq_handler_t irq_proc = NULL;
1528 
1529 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1530 	if (!dec)
1531 		return -ENOMEM;
1532 
1533 	mpp = &dec->mpp;
1534 	platform_set_drvdata(pdev, mpp);
1535 	mpp->is_irq_startup = false;
1536 	if (dev->of_node) {
1537 		struct device_node *np = pdev->dev.of_node;
1538 		const struct of_device_id *match;
1539 
1540 		match = of_match_node(mpp_rkvdec2_dt_match, dev->of_node);
1541 		if (match)
1542 			mpp->var = (struct mpp_dev_var *)match->data;
1543 		mpp->core_id = of_alias_get_id(np, "rkvdec");
1544 	}
1545 
1546 	ret = mpp_dev_probe(mpp, pdev);
1547 	if (ret) {
1548 		dev_err(dev, "probe sub driver failed\n");
1549 		return ret;
1550 	}
1551 	dec->mmu_base = ioremap(dec->mpp.io_base + 0x600, 0x80);
1552 	if (!dec->mmu_base)
1553 		dev_err(dev, "mmu base map failed!\n");
1554 
1555 	/* attach core to ccu */
1556 	ret = rkvdec2_attach_ccu(dev, dec);
1557 	if (ret) {
1558 		dev_err(dev, "attach ccu failed\n");
1559 		return ret;
1560 	}
1561 
1562 	/* alloc rcb buffer */
1563 	rkvdec2_alloc_rcbbuf(pdev, dec);
1564 
1565 	/* set device for link */
1566 	ret = rkvdec2_ccu_link_init(pdev, dec);
1567 	if (ret)
1568 		return ret;
1569 
1570 	mpp->dev_ops->alloc_task = rkvdec2_ccu_alloc_task;
1571 	if (dec->ccu->ccu_mode == RKVDEC2_CCU_TASK_SOFT) {
1572 		mpp->dev_ops->task_worker = rkvdec2_soft_ccu_worker;
1573 		irq_proc = rkvdec2_soft_ccu_irq;
1574 	} else if (dec->ccu->ccu_mode == RKVDEC2_CCU_TASK_HARD) {
1575 		if (mpp->core_id == 0 && mpp->task_capacity > 1) {
1576 			dec->link_dec->task_capacity = mpp->task_capacity;
1577 			ret = rkvdec2_ccu_alloc_table(dec, dec->link_dec);
1578 			if (ret)
1579 				return ret;
1580 		}
1581 		mpp->dev_ops->task_worker = rkvdec2_hard_ccu_worker;
1582 		irq_proc = rkvdec2_hard_ccu_irq;
1583 	}
1584 	mpp->iommu_info->hdl = rkvdec2_ccu_iommu_fault_handle;
1585 	kthread_init_work(&mpp->work, mpp->dev_ops->task_worker);
1586 
1587 	/* get irq request */
1588 	ret = devm_request_threaded_irq(dev, mpp->irq, irq_proc, NULL,
1589 					IRQF_SHARED, dev_name(dev), mpp);
1590 	if (ret) {
1591 		dev_err(dev, "register interrupter runtime failed\n");
1592 		return -EINVAL;
1593 	}
1594 	/*make sure mpp->irq is startup then can be en/disable*/
1595 	mpp->is_irq_startup = true;
1596 
1597 	mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
1598 	rkvdec2_procfs_init(mpp);
1599 
1600 	/* if is main-core, register to mpp service */
1601 	if (mpp->core_id == 0)
1602 		mpp_dev_register_srv(mpp, mpp->srv);
1603 
1604 	return ret;
1605 }
1606 
rkvdec2_probe_default(struct platform_device * pdev)1607 static int rkvdec2_probe_default(struct platform_device *pdev)
1608 {
1609 	struct device *dev = &pdev->dev;
1610 	struct rkvdec2_dev *dec = NULL;
1611 	struct mpp_dev *mpp = NULL;
1612 	const struct of_device_id *match = NULL;
1613 	int ret = 0;
1614 
1615 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1616 	if (!dec)
1617 		return -ENOMEM;
1618 
1619 	mpp = &dec->mpp;
1620 	platform_set_drvdata(pdev, mpp);
1621 
1622 	if (pdev->dev.of_node) {
1623 		match = of_match_node(mpp_rkvdec2_dt_match, pdev->dev.of_node);
1624 		if (match)
1625 			mpp->var = (struct mpp_dev_var *)match->data;
1626 	}
1627 
1628 	ret = mpp_dev_probe(mpp, pdev);
1629 	if (ret) {
1630 		dev_err(dev, "probe sub driver failed\n");
1631 		return ret;
1632 	}
1633 
1634 	rkvdec2_alloc_rcbbuf(pdev, dec);
1635 	rkvdec2_link_init(pdev, dec);
1636 
1637 	if (dec->link_dec) {
1638 		ret = devm_request_threaded_irq(dev, mpp->irq,
1639 						rkvdec2_link_irq_proc, NULL,
1640 						IRQF_SHARED, dev_name(dev), mpp);
1641 		mpp->dev_ops->process_task = rkvdec2_link_process_task;
1642 		mpp->dev_ops->wait_result = rkvdec2_link_wait_result;
1643 		mpp->dev_ops->task_worker = rkvdec2_link_worker;
1644 		mpp->dev_ops->deinit = rkvdec2_link_session_deinit;
1645 		kthread_init_work(&mpp->work, rkvdec2_link_worker);
1646 	} else {
1647 		ret = devm_request_threaded_irq(dev, mpp->irq,
1648 						mpp_dev_irq, mpp_dev_isr_sched,
1649 						IRQF_SHARED, dev_name(dev), mpp);
1650 	}
1651 	if (ret) {
1652 		dev_err(dev, "register interrupter runtime failed\n");
1653 		return -EINVAL;
1654 	}
1655 
1656 	mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
1657 	rkvdec2_procfs_init(mpp);
1658 	rkvdec2_link_procfs_init(mpp);
1659 	/* register current device to mpp service */
1660 	mpp_dev_register_srv(mpp, mpp->srv);
1661 
1662 	return ret;
1663 }
1664 
rkvdec2_probe(struct platform_device * pdev)1665 static int rkvdec2_probe(struct platform_device *pdev)
1666 {
1667 	int ret;
1668 	struct device *dev = &pdev->dev;
1669 	struct device_node *np = dev->of_node;
1670 
1671 	dev_info(dev, "%s, probing start\n", np->name);
1672 
1673 	if (strstr(np->name, "ccu"))
1674 		ret = rkvdec2_ccu_probe(pdev);
1675 	else if (strstr(np->name, "core"))
1676 		ret = rkvdec2_core_probe(pdev);
1677 	else
1678 		ret = rkvdec2_probe_default(pdev);
1679 
1680 	dev_info(dev, "probing finish\n");
1681 
1682 	return ret;
1683 }
1684 
rkvdec2_free_rcbbuf(struct platform_device * pdev,struct rkvdec2_dev * dec)1685 static int rkvdec2_free_rcbbuf(struct platform_device *pdev, struct rkvdec2_dev *dec)
1686 {
1687 	struct iommu_domain *domain;
1688 
1689 	if (dec->rcb_page) {
1690 		size_t page_size = PAGE_ALIGN(dec->rcb_size - dec->sram_size);
1691 		int order = min(get_order(page_size), MAX_ORDER);
1692 
1693 		__free_pages(dec->rcb_page, order);
1694 	}
1695 	if (dec->rcb_iova) {
1696 		domain = dec->mpp.iommu_info->domain;
1697 		iommu_unmap(domain, dec->rcb_iova, dec->rcb_size);
1698 	}
1699 
1700 	return 0;
1701 }
1702 
rkvdec2_remove(struct platform_device * pdev)1703 static int rkvdec2_remove(struct platform_device *pdev)
1704 {
1705 	struct device *dev = &pdev->dev;
1706 
1707 	if (strstr(dev_name(dev), "ccu")) {
1708 		dev_info(dev, "remove ccu device\n");
1709 		rkvdec2_ccu_remove(dev);
1710 	} else {
1711 		struct mpp_dev *mpp = dev_get_drvdata(dev);
1712 		struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1713 
1714 		dev_info(dev, "remove device\n");
1715 		if (dec->mmu_base) {
1716 			iounmap(dec->mmu_base);
1717 			dec->mmu_base = NULL;
1718 		}
1719 		rkvdec2_free_rcbbuf(pdev, dec);
1720 		mpp_dev_remove(mpp);
1721 		rkvdec2_procfs_remove(mpp);
1722 		rkvdec2_link_remove(mpp, dec->link_dec);
1723 	}
1724 
1725 	return 0;
1726 }
1727 
rkvdec2_shutdown(struct platform_device * pdev)1728 static void rkvdec2_shutdown(struct platform_device *pdev)
1729 {
1730 	struct device *dev = &pdev->dev;
1731 
1732 	if (!strstr(dev_name(dev), "ccu"))
1733 		mpp_dev_shutdown(pdev);
1734 }
1735 
rkvdec2_runtime_suspend(struct device * dev)1736 static int __maybe_unused rkvdec2_runtime_suspend(struct device *dev)
1737 {
1738 	if (strstr(dev_name(dev), "ccu")) {
1739 		struct rkvdec2_ccu *ccu = dev_get_drvdata(dev);
1740 
1741 		mpp_clk_safe_disable(ccu->aclk_info.clk);
1742 	} else {
1743 		struct mpp_dev *mpp = dev_get_drvdata(dev);
1744 
1745 		if (mpp->is_irq_startup) {
1746 			/* disable core irq */
1747 			disable_irq(mpp->irq);
1748 			if (mpp->iommu_info && mpp->iommu_info->got_irq)
1749 				/* disable mmu irq */
1750 				disable_irq(mpp->iommu_info->irq);
1751 		}
1752 
1753 		if (mpp->hw_ops->clk_off)
1754 			mpp->hw_ops->clk_off(mpp);
1755 	}
1756 
1757 	return 0;
1758 }
1759 
rkvdec2_runtime_resume(struct device * dev)1760 static int __maybe_unused rkvdec2_runtime_resume(struct device *dev)
1761 {
1762 	if (strstr(dev_name(dev), "ccu")) {
1763 		struct rkvdec2_ccu *ccu = dev_get_drvdata(dev);
1764 
1765 		mpp_clk_safe_enable(ccu->aclk_info.clk);
1766 	} else {
1767 		struct mpp_dev *mpp = dev_get_drvdata(dev);
1768 
1769 		if (mpp->hw_ops->clk_on)
1770 			mpp->hw_ops->clk_on(mpp);
1771 		if (mpp->is_irq_startup) {
1772 			/* enable core irq */
1773 			enable_irq(mpp->irq);
1774 			/* enable mmu irq */
1775 			if (mpp->iommu_info && mpp->iommu_info->got_irq)
1776 				enable_irq(mpp->iommu_info->irq);
1777 		}
1778 
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 static const struct dev_pm_ops rkvdec2_pm_ops = {
1785 	SET_RUNTIME_PM_OPS(rkvdec2_runtime_suspend, rkvdec2_runtime_resume, NULL)
1786 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
1787 };
1788 
1789 struct platform_driver rockchip_rkvdec2_driver = {
1790 	.probe = rkvdec2_probe,
1791 	.remove = rkvdec2_remove,
1792 	.shutdown = rkvdec2_shutdown,
1793 	.driver = {
1794 		.name = RKVDEC_DRIVER_NAME,
1795 		.of_match_table = of_match_ptr(mpp_rkvdec2_dt_match),
1796 		.pm = &rkvdec2_pm_ops,
1797 	},
1798 };
1799 EXPORT_SYMBOL(rockchip_rkvdec2_driver);
1800