xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_rkvdec.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/devfreq.h>
15 #include <linux/devfreq_cooling.h>
16 #include <linux/gfp.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/of_platform.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/regmap.h>
25 #include <linux/kernel.h>
26 #include <linux/thermal.h>
27 #include <linux/notifier.h>
28 #include <linux/proc_fs.h>
29 #include <linux/rockchip/rockchip_sip.h>
30 #include <linux/regulator/consumer.h>
31 
32 #include <soc/rockchip/pm_domains.h>
33 #include <soc/rockchip/rockchip_sip.h>
34 #include <soc/rockchip/rockchip_opp_select.h>
35 
36 #include "mpp_debug.h"
37 #include "mpp_common.h"
38 #include "mpp_iommu.h"
39 #include <soc/rockchip/rockchip_iommu.h>
40 
41 #include "hack/mpp_hack_px30.h"
42 
43 #define RKVDEC_DRIVER_NAME		"mpp_rkvdec"
44 
45 #define IOMMU_GET_BUS_ID(x)		(((x) >> 6) & 0x1f)
46 #define IOMMU_PAGE_SIZE			SZ_4K
47 
48 #define	RKVDEC_SESSION_MAX_BUFFERS	40
49 /* The maximum registers number of all the version */
50 #define HEVC_DEC_REG_NUM		68
51 #define HEVC_DEC_REG_HW_ID_INDEX	0
52 #define HEVC_DEC_REG_START_INDEX	0
53 #define HEVC_DEC_REG_END_INDEX		67
54 
55 #define RKVDEC_V1_REG_NUM		78
56 #define RKVDEC_V1_REG_HW_ID_INDEX	0
57 #define RKVDEC_V1_REG_START_INDEX	0
58 #define RKVDEC_V1_REG_END_INDEX		77
59 
60 #define RKVDEC_V2_REG_NUM		109
61 #define RKVDEC_V2_REG_HW_ID_INDEX	0
62 #define RKVDEC_V2_REG_START_INDEX	0
63 #define RKVDEC_V2_REG_END_INDEX		108
64 
65 #define RKVDEC_REG_INT_EN		0x004
66 #define RKVDEC_REG_INT_EN_INDEX		(1)
67 #define RKVDEC_WR_DDR_ALIGN_EN		BIT(23)
68 #define RKVDEC_FORCE_SOFT_RESET_VALID	BIT(21)
69 #define RKVDEC_SOFTWARE_RESET_EN	BIT(20)
70 #define RKVDEC_INT_COLMV_REF_ERROR	BIT(17)
71 #define RKVDEC_INT_BUF_EMPTY		BIT(16)
72 #define RKVDEC_INT_TIMEOUT		BIT(15)
73 #define RKVDEC_INT_STRM_ERROR		BIT(14)
74 #define RKVDEC_INT_BUS_ERROR		BIT(13)
75 #define RKVDEC_DEC_INT_RAW		BIT(9)
76 #define RKVDEC_DEC_INT			BIT(8)
77 #define RKVDEC_DEC_TIMEOUT_EN		BIT(5)
78 #define RKVDEC_DEC_IRQ_DIS		BIT(4)
79 #define RKVDEC_CLOCK_GATE_EN		BIT(1)
80 #define RKVDEC_DEC_START		BIT(0)
81 
82 #define RKVDEC_REG_SYS_CTRL		0x008
83 #define RKVDEC_REG_SYS_CTRL_INDEX	(2)
84 #define RKVDEC_RGE_WIDTH_INDEX		(3)
85 #define RKVDEC_GET_FORMAT(x)		(((x) >> 20) & 0x3)
86 #define REVDEC_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
87 #define RKVDEC_GET_WIDTH(x)		(((x) & 0x3ff) << 4)
88 #define RKVDEC_FMT_H265D		(0)
89 #define RKVDEC_FMT_H264D		(1)
90 #define RKVDEC_FMT_VP9D			(2)
91 
92 #define RKVDEC_REG_RLC_BASE		0x010
93 #define RKVDEC_REG_RLC_BASE_INDEX	(4)
94 
95 #define RKVDEC_RGE_YSTRDE_INDEX		(8)
96 #define RKVDEC_GET_YSTRDE(x)		(((x) & 0x1fffff) << 4)
97 
98 #define RKVDEC_REG_PPS_BASE		0x0a0
99 #define RKVDEC_REG_PPS_BASE_INDEX	(42)
100 
101 #define RKVDEC_REG_VP9_REFCOLMV_BASE		0x0d0
102 #define RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX	(52)
103 
104 #define RKVDEC_REG_CACHE0_SIZE_BASE	0x41c
105 #define RKVDEC_REG_CACHE1_SIZE_BASE	0x45c
106 #define RKVDEC_REG_CLR_CACHE0_BASE	0x410
107 #define RKVDEC_REG_CLR_CACHE1_BASE	0x450
108 
109 #define RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS	BIT(0)
110 #define RKVDEC_CACHE_PERMIT_READ_ALLOCATE	BIT(1)
111 #define RKVDEC_CACHE_LINE_SIZE_64_BYTES		BIT(4)
112 
113 #define RKVDEC_POWER_CTL_INDEX		(99)
114 #define RKVDEC_POWER_CTL_BASE		0x018c
115 
116 #define FALLBACK_STATIC_TEMPERATURE	55000
117 
118 #define to_rkvdec_task(task)		\
119 		container_of(task, struct rkvdec_task, mpp_task)
120 #define to_rkvdec_dev(dev)		\
121 		container_of(dev, struct rkvdec_dev, mpp)
122 
123 enum RKVDEC_MODE {
124 	RKVDEC_MODE_NONE,
125 	RKVDEC_MODE_ONEFRAME,
126 	RKVDEC_MODE_BUTT
127 };
128 
129 enum SET_CLK_EVENT {
130 	EVENT_POWER_ON = 0,
131 	EVENT_POWER_OFF,
132 	EVENT_ADJUST,
133 	EVENT_THERMAL,
134 	EVENT_BUTT,
135 };
136 
137 struct rkvdec_task {
138 	struct mpp_task mpp_task;
139 
140 	enum RKVDEC_MODE link_mode;
141 	enum MPP_CLOCK_MODE clk_mode;
142 	u32 reg[RKVDEC_V2_REG_NUM];
143 	struct reg_offset_info off_inf;
144 
145 	u32 strm_addr;
146 	u32 irq_status;
147 	/* req for current task */
148 	u32 w_req_cnt;
149 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
150 	u32 r_req_cnt;
151 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
152 	/* ystride info */
153 	u32 pixels;
154 };
155 
156 struct rkvdec_dev {
157 	struct mpp_dev mpp;
158 	/* sip smc reset lock */
159 	struct mutex sip_reset_lock;
160 
161 	struct mpp_clk_info aclk_info;
162 	struct mpp_clk_info hclk_info;
163 	struct mpp_clk_info core_clk_info;
164 	struct mpp_clk_info cabac_clk_info;
165 	struct mpp_clk_info hevc_cabac_clk_info;
166 	u32 default_max_load;
167 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
168 	struct proc_dir_entry *procfs;
169 #endif
170 	struct reset_control *rst_a;
171 	struct reset_control *rst_h;
172 	struct reset_control *rst_niu_a;
173 	struct reset_control *rst_niu_h;
174 	struct reset_control *rst_core;
175 	struct reset_control *rst_cabac;
176 	struct reset_control *rst_hevc_cabac;
177 
178 	unsigned long aux_iova;
179 	struct page *aux_page;
180 #ifdef CONFIG_PM_DEVFREQ
181 	struct regulator *vdd;
182 	struct devfreq *devfreq;
183 	struct devfreq *parent_devfreq;
184 	struct notifier_block devfreq_nb;
185 	struct thermal_cooling_device *devfreq_cooling;
186 	struct thermal_zone_device *thermal_zone;
187 	u32 static_power_coeff;
188 	s32 ts[4];
189 	/* set clk lock */
190 	struct mutex set_clk_lock;
191 	unsigned int thermal_div;
192 	unsigned long volt;
193 	unsigned long devf_aclk_rate_hz;
194 	unsigned long devf_core_rate_hz;
195 	unsigned long devf_cabac_rate_hz;
196 #endif
197 	/* record last infos */
198 	u32 last_fmt;
199 	bool had_reset;
200 	bool grf_changed;
201 };
202 
203 /*
204  * hardware information
205  */
206 static struct mpp_hw_info rk_hevcdec_hw_info = {
207 	.reg_num = HEVC_DEC_REG_NUM,
208 	.reg_id = HEVC_DEC_REG_HW_ID_INDEX,
209 	.reg_start = HEVC_DEC_REG_START_INDEX,
210 	.reg_end = HEVC_DEC_REG_END_INDEX,
211 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
212 };
213 
214 static struct mpp_hw_info rkvdec_v1_hw_info = {
215 	.reg_num = RKVDEC_V1_REG_NUM,
216 	.reg_id = RKVDEC_V1_REG_HW_ID_INDEX,
217 	.reg_start = RKVDEC_V1_REG_START_INDEX,
218 	.reg_end = RKVDEC_V1_REG_END_INDEX,
219 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
220 };
221 
222 /*
223  * file handle translate information
224  */
225 static const u16 trans_tbl_h264d[] = {
226 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
227 	23, 24, 41, 42, 43, 48, 75
228 };
229 
230 static const u16 trans_tbl_h265d[] = {
231 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
232 	23, 24, 42, 43
233 };
234 
235 static const u16 trans_tbl_vp9d[] = {
236 	4, 6, 7, 11, 12, 13, 14, 15, 16
237 };
238 
239 static struct mpp_trans_info rk_hevcdec_trans[] = {
240 	[RKVDEC_FMT_H265D] = {
241 		.count = ARRAY_SIZE(trans_tbl_h265d),
242 		.table = trans_tbl_h265d,
243 	},
244 };
245 
246 static struct mpp_trans_info rkvdec_v1_trans[] = {
247 	[RKVDEC_FMT_H265D] = {
248 		.count = ARRAY_SIZE(trans_tbl_h265d),
249 		.table = trans_tbl_h265d,
250 	},
251 	[RKVDEC_FMT_H264D] = {
252 		.count = ARRAY_SIZE(trans_tbl_h264d),
253 		.table = trans_tbl_h264d,
254 	},
255 	[RKVDEC_FMT_VP9D] = {
256 		.count = ARRAY_SIZE(trans_tbl_vp9d),
257 		.table = trans_tbl_vp9d,
258 	},
259 };
260 
261 #ifdef CONFIG_PM_DEVFREQ
rkvdec_devf_set_clk(struct rkvdec_dev * dec,unsigned long aclk_rate_hz,unsigned long core_rate_hz,unsigned long cabac_rate_hz,unsigned int event)262 static int rkvdec_devf_set_clk(struct rkvdec_dev *dec,
263 			       unsigned long aclk_rate_hz,
264 			       unsigned long core_rate_hz,
265 			       unsigned long cabac_rate_hz,
266 			       unsigned int event)
267 {
268 	struct clk *aclk = dec->aclk_info.clk;
269 	struct clk *clk_core = dec->core_clk_info.clk;
270 	struct clk *clk_cabac = dec->cabac_clk_info.clk;
271 
272 	mutex_lock(&dec->set_clk_lock);
273 
274 	switch (event) {
275 	case EVENT_POWER_ON:
276 		clk_set_rate(aclk, dec->devf_aclk_rate_hz);
277 		clk_set_rate(clk_core, dec->devf_core_rate_hz);
278 		clk_set_rate(clk_cabac, dec->devf_cabac_rate_hz);
279 		dec->thermal_div = 0;
280 		break;
281 	case EVENT_POWER_OFF:
282 		clk_set_rate(aclk, aclk_rate_hz);
283 		clk_set_rate(clk_core, core_rate_hz);
284 		clk_set_rate(clk_cabac, cabac_rate_hz);
285 		dec->thermal_div = 0;
286 		break;
287 	case EVENT_ADJUST:
288 		if (!dec->thermal_div) {
289 			clk_set_rate(aclk, aclk_rate_hz);
290 			clk_set_rate(clk_core, core_rate_hz);
291 			clk_set_rate(clk_cabac, cabac_rate_hz);
292 		} else {
293 			clk_set_rate(aclk,
294 				     aclk_rate_hz / dec->thermal_div);
295 			clk_set_rate(clk_core,
296 				     core_rate_hz / dec->thermal_div);
297 			clk_set_rate(clk_cabac,
298 				     cabac_rate_hz / dec->thermal_div);
299 		}
300 		dec->devf_aclk_rate_hz = aclk_rate_hz;
301 		dec->devf_core_rate_hz = core_rate_hz;
302 		dec->devf_cabac_rate_hz = cabac_rate_hz;
303 		break;
304 	case EVENT_THERMAL:
305 		dec->thermal_div = dec->devf_aclk_rate_hz / aclk_rate_hz;
306 		if (dec->thermal_div > 4)
307 			dec->thermal_div = 4;
308 		if (dec->thermal_div) {
309 			clk_set_rate(aclk,
310 				     dec->devf_aclk_rate_hz / dec->thermal_div);
311 			clk_set_rate(clk_core,
312 				     dec->devf_core_rate_hz / dec->thermal_div);
313 			clk_set_rate(clk_cabac,
314 				     dec->devf_cabac_rate_hz / dec->thermal_div);
315 		}
316 		break;
317 	}
318 
319 	mutex_unlock(&dec->set_clk_lock);
320 
321 	return 0;
322 }
323 
devfreq_target(struct device * dev,unsigned long * freq,u32 flags)324 static int devfreq_target(struct device *dev,
325 			  unsigned long *freq, u32 flags)
326 {
327 	int ret = 0;
328 	unsigned int clk_event;
329 	struct dev_pm_opp *opp;
330 	unsigned long target_volt, target_freq;
331 	unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
332 
333 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
334 	struct devfreq *devfreq = dec->devfreq;
335 	struct devfreq_dev_status *stat = &devfreq->last_status;
336 	unsigned long old_clk_rate = stat->current_frequency;
337 
338 	opp = devfreq_recommended_opp(dev, freq, flags);
339 	if (IS_ERR(opp)) {
340 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
341 		return PTR_ERR(opp);
342 	}
343 	target_freq = dev_pm_opp_get_freq(opp);
344 	target_volt = dev_pm_opp_get_voltage(opp);
345 	dev_pm_opp_put(opp);
346 
347 	if (target_freq < *freq) {
348 		clk_event = EVENT_THERMAL;
349 		aclk_rate_hz = target_freq;
350 		core_rate_hz = target_freq;
351 		cabac_rate_hz = target_freq;
352 	} else {
353 		clk_event = stat->busy_time ? EVENT_POWER_ON : EVENT_POWER_OFF;
354 		aclk_rate_hz = dec->devf_aclk_rate_hz;
355 		core_rate_hz = dec->devf_core_rate_hz;
356 		cabac_rate_hz = dec->devf_cabac_rate_hz;
357 	}
358 
359 	if (old_clk_rate == target_freq) {
360 		if (dec->volt == target_volt)
361 			return ret;
362 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
363 		if (ret) {
364 			dev_err(dev, "Cannot set voltage %lu uV\n",
365 				target_volt);
366 			return ret;
367 		}
368 		dec->volt = target_volt;
369 		return 0;
370 	}
371 
372 	if (old_clk_rate < target_freq) {
373 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
374 		if (ret) {
375 			dev_err(dev, "set voltage %lu uV\n", target_volt);
376 			return ret;
377 		}
378 	}
379 
380 	dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
381 	rkvdec_devf_set_clk(dec, aclk_rate_hz, core_rate_hz, cabac_rate_hz, clk_event);
382 	stat->current_frequency = target_freq;
383 
384 	if (old_clk_rate > target_freq) {
385 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
386 		if (ret) {
387 			dev_err(dev, "set vol %lu uV\n", target_volt);
388 			return ret;
389 		}
390 	}
391 	dec->volt = target_volt;
392 
393 	return ret;
394 }
395 
devfreq_get_cur_freq(struct device * dev,unsigned long * freq)396 static int devfreq_get_cur_freq(struct device *dev,
397 				unsigned long *freq)
398 {
399 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
400 
401 	*freq = clk_get_rate(dec->aclk_info.clk);
402 
403 	return 0;
404 }
405 
devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)406 static int devfreq_get_dev_status(struct device *dev,
407 				  struct devfreq_dev_status *stat)
408 {
409 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
410 	struct devfreq *devfreq = dec->devfreq;
411 
412 	memcpy(stat, &devfreq->last_status, sizeof(*stat));
413 
414 	return 0;
415 }
416 
417 static struct devfreq_dev_profile devfreq_profile = {
418 	.target	= devfreq_target,
419 	.get_cur_freq = devfreq_get_cur_freq,
420 	.get_dev_status	= devfreq_get_dev_status,
421 };
422 
423 static unsigned long
model_static_power(struct devfreq * devfreq,unsigned long voltage)424 model_static_power(struct devfreq *devfreq,
425 		   unsigned long voltage)
426 {
427 	struct device *dev = devfreq->dev.parent;
428 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
429 	struct thermal_zone_device *tz = dec->thermal_zone;
430 
431 	int temperature;
432 	unsigned long temp;
433 	unsigned long temp_squared, temp_cubed, temp_scaling_factor;
434 	const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
435 
436 	if (!IS_ERR_OR_NULL(tz) && tz->ops->get_temp) {
437 		int ret;
438 
439 		ret = tz->ops->get_temp(tz, &temperature);
440 		if (ret) {
441 			dev_warn_ratelimited(dev, "ddr thermal zone failed\n");
442 			temperature = FALLBACK_STATIC_TEMPERATURE;
443 		}
444 	} else {
445 		temperature = FALLBACK_STATIC_TEMPERATURE;
446 	}
447 
448 	/*
449 	 * Calculate the temperature scaling factor. To be applied to the
450 	 * voltage scaled power.
451 	 */
452 	temp = temperature / 1000;
453 	temp_squared = temp * temp;
454 	temp_cubed = temp_squared * temp;
455 	temp_scaling_factor = (dec->ts[3] * temp_cubed)
456 	    + (dec->ts[2] * temp_squared) + (dec->ts[1] * temp) + dec->ts[0];
457 
458 	return (((dec->static_power_coeff * voltage_cubed) >> 20)
459 		* temp_scaling_factor) / 1000000;
460 }
461 
462 static struct devfreq_cooling_power cooling_power_data = {
463 	.get_static_power = model_static_power,
464 	.dyn_power_coeff = 120,
465 };
466 
power_model_simple_init(struct mpp_dev * mpp)467 static int power_model_simple_init(struct mpp_dev *mpp)
468 {
469 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
470 	struct device_node *np = mpp->dev->of_node;
471 
472 	u32 temp;
473 	const char *tz_name;
474 	struct device_node *power_model_node;
475 
476 	power_model_node = of_get_child_by_name(np, "vcodec_power_model");
477 	if (!power_model_node) {
478 		dev_err(mpp->dev, "could not find power_model node\n");
479 		return -ENODEV;
480 	}
481 
482 	if (of_property_read_string(power_model_node,
483 				    "thermal-zone",
484 				    &tz_name)) {
485 		dev_err(mpp->dev, "ts in power_model not available\n");
486 		return -EINVAL;
487 	}
488 
489 	dec->thermal_zone = thermal_zone_get_zone_by_name(tz_name);
490 	if (IS_ERR(dec->thermal_zone)) {
491 		pr_warn("Error getting ddr thermal zone, not yet ready?\n");
492 		dec->thermal_zone = NULL;
493 		return -EPROBE_DEFER;
494 	}
495 
496 	if (of_property_read_u32(power_model_node,
497 				 "static-power-coefficient",
498 				 &dec->static_power_coeff)) {
499 		dev_err(mpp->dev, "static-power-coefficient not available\n");
500 		return -EINVAL;
501 	}
502 	if (of_property_read_u32(power_model_node,
503 				 "dynamic-power-coefficient",
504 				 &temp)) {
505 		dev_err(mpp->dev, "dynamic-power-coefficient not available\n");
506 		return -EINVAL;
507 	}
508 	cooling_power_data.dyn_power_coeff = (unsigned long)temp;
509 
510 	if (of_property_read_u32_array(power_model_node,
511 				       "ts",
512 				       (u32 *)dec->ts,
513 				       4)) {
514 		dev_err(mpp->dev, "ts in power_model not available\n");
515 		return -EINVAL;
516 	}
517 
518 	return 0;
519 }
520 
devfreq_notifier_call(struct notifier_block * nb,unsigned long event,void * data)521 static int devfreq_notifier_call(struct notifier_block *nb,
522 				 unsigned long event,
523 				 void *data)
524 {
525 	struct rkvdec_dev *dec = container_of(nb,
526 					      struct rkvdec_dev,
527 					      devfreq_nb);
528 
529 	if (!dec)
530 		return NOTIFY_OK;
531 
532 	if (event == DEVFREQ_PRECHANGE)
533 		mutex_lock(&dec->sip_reset_lock);
534 	else if (event == DEVFREQ_POSTCHANGE)
535 		mutex_unlock(&dec->sip_reset_lock);
536 
537 	return NOTIFY_OK;
538 }
539 #endif
540 
541 /*
542  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
543  * it by pps id in video stream data.
544  *
545  * So we need to translate the address in iommu case. The address data is also
546  * 10bit fd + 22bit offset mode.
547  * Because userspace decoder do not give the pps id in the register file sets
548  * kernel driver need to translate each scaling list address in pps buffer which
549  * means 256 pps for H.264, 64 pps for H.265.
550  *
551  * In order to optimize the performance kernel driver ask userspace decoder to
552  * set all scaling list address in pps buffer to the same one which will be used
553  * on current decoding task. Then kernel driver can only translate the first
554  * address then copy it all pps buffer.
555  */
fill_scaling_list_pps(struct rkvdec_task * task,int fd,int offset,int count,int pps_info_size,int sub_addr_offset)556 static int fill_scaling_list_pps(struct rkvdec_task *task,
557 				 int fd, int offset, int count,
558 				 int pps_info_size, int sub_addr_offset)
559 {
560 	struct dma_buf *dmabuf = NULL;
561 	void *vaddr = NULL;
562 	u8 *pps = NULL;
563 	u32 scaling_fd = 0;
564 	int ret = 0;
565 	u32 base = sub_addr_offset;
566 
567 	dmabuf = dma_buf_get(fd);
568 	if (IS_ERR_OR_NULL(dmabuf)) {
569 		mpp_err("invliad pps buffer\n");
570 		return -ENOENT;
571 	}
572 
573 	ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
574 	if (ret) {
575 		mpp_err("can't access the pps buffer\n");
576 		goto done;
577 	}
578 
579 	vaddr = dma_buf_vmap(dmabuf);
580 	if (!vaddr) {
581 		mpp_err("can't access the pps buffer\n");
582 		ret = -EIO;
583 		goto done;
584 	}
585 	pps = vaddr + offset;
586 	/* NOTE: scaling buffer in pps, have no offset */
587 	memcpy(&scaling_fd, pps + base, sizeof(scaling_fd));
588 	scaling_fd = le32_to_cpu(scaling_fd);
589 	if (scaling_fd > 0) {
590 		struct mpp_mem_region *mem_region = NULL;
591 		u32 tmp = 0;
592 		int i = 0;
593 
594 		mem_region = mpp_task_attach_fd(&task->mpp_task,
595 						scaling_fd);
596 		if (IS_ERR(mem_region)) {
597 			mpp_err("scaling list fd %d attach failed\n", scaling_fd);
598 			ret = PTR_ERR(mem_region);
599 			goto done;
600 		}
601 
602 		tmp = mem_region->iova & 0xffffffff;
603 		tmp = cpu_to_le32(tmp);
604 		mpp_debug(DEBUG_PPS_FILL,
605 			  "pps at %p, scaling fd: %3d => %pad + offset %10d\n",
606 			  pps, scaling_fd, &mem_region->iova, offset);
607 
608 		/* Fill the scaling list address in each pps entries */
609 		for (i = 0; i < count; i++, base += pps_info_size)
610 			memcpy(pps + base, &tmp, sizeof(tmp));
611 	}
612 
613 done:
614 	dma_buf_vunmap(dmabuf, vaddr);
615 	dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
616 	dma_buf_put(dmabuf);
617 
618 	return ret;
619 }
620 
rkvdec_process_scl_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)621 static int rkvdec_process_scl_fd(struct mpp_session *session,
622 				 struct rkvdec_task *task,
623 				 struct mpp_task_msgs *msgs)
624 {
625 	int ret = 0;
626 	int pps_fd;
627 	u32 pps_offset;
628 	int idx = RKVDEC_REG_PPS_BASE_INDEX;
629 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
630 
631 	if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
632 		pps_fd = task->reg[idx];
633 		pps_offset = 0;
634 	} else {
635 		pps_fd = task->reg[idx] & 0x3ff;
636 		pps_offset = task->reg[idx] >> 10;
637 	}
638 
639 	pps_offset += mpp_query_reg_offset_info(&task->off_inf, idx);
640 	if (pps_fd > 0) {
641 		int pps_info_offset;
642 		int pps_info_count;
643 		int pps_info_size;
644 		int scaling_list_addr_offset;
645 
646 		switch (fmt) {
647 		case RKVDEC_FMT_H264D:
648 			pps_info_offset = pps_offset;
649 			pps_info_count = 256;
650 			pps_info_size = 32;
651 			scaling_list_addr_offset = 23;
652 			break;
653 		case RKVDEC_FMT_H265D:
654 			pps_info_offset = pps_offset;
655 			pps_info_count = 64;
656 			pps_info_size = 80;
657 			scaling_list_addr_offset = 74;
658 			break;
659 		default:
660 			pps_info_offset = 0;
661 			pps_info_count = 0;
662 			pps_info_size = 0;
663 			scaling_list_addr_offset = 0;
664 			break;
665 		}
666 
667 		mpp_debug(DEBUG_PPS_FILL,
668 			  "scaling list filling parameter:\n");
669 		mpp_debug(DEBUG_PPS_FILL,
670 			  "pps_info_offset %d\n", pps_info_offset);
671 		mpp_debug(DEBUG_PPS_FILL,
672 			  "pps_info_count  %d\n", pps_info_count);
673 		mpp_debug(DEBUG_PPS_FILL,
674 			  "pps_info_size   %d\n", pps_info_size);
675 		mpp_debug(DEBUG_PPS_FILL,
676 			  "scaling_list_addr_offset %d\n",
677 			  scaling_list_addr_offset);
678 
679 		if (pps_info_count) {
680 			ret = fill_scaling_list_pps(task, pps_fd,
681 						    pps_info_offset,
682 						    pps_info_count,
683 						    pps_info_size,
684 						    scaling_list_addr_offset);
685 			if (ret) {
686 				mpp_err("fill pps failed\n");
687 				goto fail;
688 			}
689 		}
690 	}
691 
692 fail:
693 	return ret;
694 }
695 
rkvdec_process_reg_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)696 static int rkvdec_process_reg_fd(struct mpp_session *session,
697 				 struct rkvdec_task *task,
698 				 struct mpp_task_msgs *msgs)
699 {
700 	int ret = 0;
701 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
702 
703 	/*
704 	 * special offset scale case
705 	 *
706 	 * This translation is for fd + offset translation.
707 	 * One register has 32bits. We need to transfer both buffer file
708 	 * handle and the start address offset so we packet file handle
709 	 * and offset together using below format.
710 	 *
711 	 *  0~9  bit for buffer file handle range 0 ~ 1023
712 	 * 10~31 bit for offset range 0 ~ 4M
713 	 *
714 	 * But on 4K case the offset can be larger the 4M
715 	 * So on VP9 4K decoder colmv base we scale the offset by 16
716 	 */
717 	if (fmt == RKVDEC_FMT_VP9D) {
718 		int fd;
719 		u32 offset;
720 		dma_addr_t iova = 0;
721 		struct mpp_mem_region *mem_region = NULL;
722 		int idx = RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX;
723 
724 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
725 			fd = task->reg[idx];
726 			offset = 0;
727 		} else {
728 			fd = task->reg[idx] & 0x3ff;
729 			offset = task->reg[idx] >> 10 << 4;
730 		}
731 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
732 		if (IS_ERR(mem_region)) {
733 			mpp_err("reg[%03d]: %08x fd %d attach failed\n",
734 				idx, task->reg[idx], fd);
735 			return -EFAULT;
736 		}
737 
738 		iova = mem_region->iova;
739 		task->reg[idx] = iova + offset;
740 	}
741 
742 	ret = mpp_translate_reg_address(session, &task->mpp_task,
743 					fmt, task->reg, &task->off_inf);
744 	if (ret)
745 		return ret;
746 
747 	mpp_translate_reg_offset_info(&task->mpp_task,
748 				      &task->off_inf, task->reg);
749 	return 0;
750 }
751 
rkvdec_extract_task_msg(struct rkvdec_task * task,struct mpp_task_msgs * msgs)752 static int rkvdec_extract_task_msg(struct rkvdec_task *task,
753 				   struct mpp_task_msgs *msgs)
754 {
755 	u32 i;
756 	int ret;
757 	struct mpp_request *req;
758 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
759 
760 	for (i = 0; i < msgs->req_cnt; i++) {
761 		u32 off_s, off_e;
762 
763 		req = &msgs->reqs[i];
764 		if (!req->size)
765 			continue;
766 
767 		switch (req->cmd) {
768 		case MPP_CMD_SET_REG_WRITE: {
769 			off_s = hw_info->reg_start * sizeof(u32);
770 			off_e = hw_info->reg_end * sizeof(u32);
771 			ret = mpp_check_req(req, 0, sizeof(task->reg),
772 					    off_s, off_e);
773 			if (ret)
774 				continue;
775 			if (copy_from_user((u8 *)task->reg + req->offset,
776 					   req->data, req->size)) {
777 				mpp_err("copy_from_user reg failed\n");
778 				return -EIO;
779 			}
780 			memcpy(&task->w_reqs[task->w_req_cnt++],
781 			       req, sizeof(*req));
782 		} break;
783 		case MPP_CMD_SET_REG_READ: {
784 			off_s = hw_info->reg_start * sizeof(u32);
785 			off_e = hw_info->reg_end * sizeof(u32);
786 			ret = mpp_check_req(req, 0, sizeof(task->reg),
787 					    off_s, off_e);
788 			if (ret)
789 				continue;
790 			memcpy(&task->r_reqs[task->r_req_cnt++],
791 			       req, sizeof(*req));
792 		} break;
793 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
794 			mpp_extract_reg_offset_info(&task->off_inf, req);
795 		} break;
796 		default:
797 			break;
798 		}
799 	}
800 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
801 		  task->w_req_cnt, task->r_req_cnt);
802 
803 	return 0;
804 }
805 
rkvdec_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)806 static void *rkvdec_alloc_task(struct mpp_session *session,
807 			       struct mpp_task_msgs *msgs)
808 {
809 	int ret;
810 	struct mpp_task *mpp_task = NULL;
811 	struct rkvdec_task *task = NULL;
812 	struct mpp_dev *mpp = session->mpp;
813 
814 	mpp_debug_enter();
815 
816 	task = kzalloc(sizeof(*task), GFP_KERNEL);
817 	if (!task)
818 		return NULL;
819 
820 	mpp_task = &task->mpp_task;
821 	mpp_task_init(session, mpp_task);
822 	mpp_task->hw_info = mpp->var->hw_info;
823 	mpp_task->reg = task->reg;
824 	/* extract reqs for current task */
825 	ret = rkvdec_extract_task_msg(task, msgs);
826 	if (ret)
827 		goto fail;
828 	/* process fd in pps for 264 and 265 */
829 	if (!(msgs->flags & MPP_FLAGS_SCL_FD_NO_TRANS)) {
830 		ret = rkvdec_process_scl_fd(session, task, msgs);
831 		if (ret)
832 			goto fail;
833 	}
834 	/* process fd in register */
835 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
836 		ret = rkvdec_process_reg_fd(session, task, msgs);
837 		if (ret)
838 			goto fail;
839 	}
840 	task->strm_addr = task->reg[RKVDEC_REG_RLC_BASE_INDEX];
841 	task->link_mode = RKVDEC_MODE_ONEFRAME;
842 	task->clk_mode = CLK_MODE_NORMAL;
843 
844 	/* get resolution info */
845 	task->pixels = RKVDEC_GET_YSTRDE(task->reg[RKVDEC_RGE_YSTRDE_INDEX]);
846 	mpp_debug(DEBUG_TASK_INFO, "ystride=%d\n", task->pixels);
847 
848 	mpp_debug_leave();
849 
850 	return mpp_task;
851 
852 fail:
853 	mpp_task_dump_mem_region(mpp, mpp_task);
854 	mpp_task_dump_reg(mpp, mpp_task);
855 	mpp_task_finalize(session, mpp_task);
856 	kfree(task);
857 	return NULL;
858 }
859 
rkvdec_prepare_with_reset(struct mpp_dev * mpp,struct mpp_task * mpp_task)860 static void *rkvdec_prepare_with_reset(struct mpp_dev *mpp,
861 				       struct mpp_task *mpp_task)
862 {
863 	unsigned long flags;
864 	struct mpp_task *out_task = NULL;
865 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
866 
867 	spin_lock_irqsave(&mpp->queue->running_lock, flags);
868 	out_task = list_empty(&mpp->queue->running_list) ? mpp_task : NULL;
869 	spin_unlock_irqrestore(&mpp->queue->running_lock, flags);
870 
871 	if (out_task && !dec->had_reset) {
872 		struct rkvdec_task *task = to_rkvdec_task(out_task);
873 		u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
874 
875 		/* in 3399 3228 and 3229 chips, when 264 switch vp9,
876 		 * hardware will timeout, and can't recover problem.
877 		 * so reset it when 264 switch vp9, before hardware run.
878 		 */
879 		if (dec->last_fmt == RKVDEC_FMT_H264D && fmt == RKVDEC_FMT_VP9D) {
880 			mpp_power_on(mpp);
881 			mpp_dev_reset(mpp);
882 			mpp_power_off(mpp);
883 		}
884 	}
885 
886 	return out_task;
887 }
888 
rkvdec_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)889 static int rkvdec_run(struct mpp_dev *mpp,
890 		      struct mpp_task *mpp_task)
891 {
892 	int i;
893 	u32 reg_en;
894 	struct rkvdec_task *task = NULL;
895 	u32 timing_en = mpp->srv->timing_en;
896 
897 	mpp_debug_enter();
898 
899 	task = to_rkvdec_task(mpp_task);
900 	reg_en = mpp_task->hw_info->reg_en;
901 	switch (task->link_mode) {
902 	case RKVDEC_MODE_ONEFRAME: {
903 		u32 reg;
904 
905 		/* set cache size */
906 		reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS
907 			| RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
908 		if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
909 			reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
910 
911 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
912 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
913 		/* clear cache */
914 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
915 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
916 		/* set registers for hardware */
917 		for (i = 0; i < task->w_req_cnt; i++) {
918 			int s, e;
919 			struct mpp_request *req = &task->w_reqs[i];
920 
921 			s = req->offset / sizeof(u32);
922 			e = s + req->size / sizeof(u32);
923 			mpp_write_req(mpp, task->reg, s, e, reg_en);
924 		}
925 		/* init current task */
926 		mpp->cur_task = mpp_task;
927 		mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
928 		/* Flush the register before the start the device */
929 		wmb();
930 		mpp_write(mpp, RKVDEC_REG_INT_EN,
931 			  task->reg[reg_en] | RKVDEC_DEC_START);
932 
933 		mpp_task_run_end(mpp_task, timing_en);
934 	} break;
935 	default:
936 		break;
937 	}
938 
939 	mpp_debug_leave();
940 
941 	return 0;
942 }
943 
rkvdec_3328_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)944 static int rkvdec_3328_run(struct mpp_dev *mpp,
945 			   struct mpp_task *mpp_task)
946 {
947 	u32 fmt = 0;
948 	u32 cfg = 0;
949 	struct rkvdec_task *task = NULL;
950 
951 	mpp_debug_enter();
952 
953 	task = to_rkvdec_task(mpp_task);
954 
955 	/*
956 	 * HW defeat workaround: VP9 and H.265 power save optimization cause decoding
957 	 * corruption, disable optimization here.
958 	 */
959 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
960 	if (fmt == RKVDEC_FMT_VP9D || fmt == RKVDEC_FMT_H265D) {
961 		cfg = task->reg[RKVDEC_POWER_CTL_INDEX] | 0xFFFF;
962 		task->reg[RKVDEC_POWER_CTL_INDEX] = cfg & (~(1 << 12));
963 		mpp_write_relaxed(mpp, RKVDEC_POWER_CTL_BASE,
964 				  task->reg[RKVDEC_POWER_CTL_INDEX]);
965 	}
966 
967 	rkvdec_run(mpp, mpp_task);
968 
969 	mpp_debug_leave();
970 
971 	return 0;
972 }
973 
rkvdec_1126_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)974 static int rkvdec_1126_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
975 {
976 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
977 
978 	if (task->link_mode == RKVDEC_MODE_ONEFRAME)
979 		mpp_iommu_flush_tlb(mpp->iommu_info);
980 
981 	return rkvdec_run(mpp, mpp_task);
982 }
983 
rkvdec_px30_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)984 static int rkvdec_px30_run(struct mpp_dev *mpp,
985 		    struct mpp_task *mpp_task)
986 {
987 	mpp_iommu_flush_tlb(mpp->iommu_info);
988 	return rkvdec_run(mpp, mpp_task);
989 }
990 
rkvdec_irq(struct mpp_dev * mpp)991 static int rkvdec_irq(struct mpp_dev *mpp)
992 {
993 	mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
994 	if (!(mpp->irq_status & RKVDEC_DEC_INT_RAW))
995 		return IRQ_NONE;
996 
997 	mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
998 
999 	return IRQ_WAKE_THREAD;
1000 }
1001 
rkvdec_isr(struct mpp_dev * mpp)1002 static int rkvdec_isr(struct mpp_dev *mpp)
1003 {
1004 	u32 err_mask;
1005 	struct rkvdec_task *task = NULL;
1006 	struct mpp_task *mpp_task = mpp->cur_task;
1007 
1008 	mpp_debug_enter();
1009 	/* FIXME use a spin lock here */
1010 	if (!mpp_task) {
1011 		dev_err(mpp->dev, "no current task\n");
1012 		goto done;
1013 	}
1014 	mpp_time_diff(mpp_task);
1015 	mpp->cur_task = NULL;
1016 	task = to_rkvdec_task(mpp_task);
1017 	task->irq_status = mpp->irq_status;
1018 	switch (task->link_mode) {
1019 	case RKVDEC_MODE_ONEFRAME: {
1020 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1021 
1022 		err_mask = RKVDEC_INT_BUF_EMPTY
1023 			| RKVDEC_INT_BUS_ERROR
1024 			| RKVDEC_INT_COLMV_REF_ERROR
1025 			| RKVDEC_INT_STRM_ERROR
1026 			| RKVDEC_INT_TIMEOUT;
1027 
1028 		if (err_mask & task->irq_status)
1029 			atomic_inc(&mpp->reset_request);
1030 
1031 		mpp_task_finish(mpp_task->session, mpp_task);
1032 	} break;
1033 	default:
1034 		break;
1035 	}
1036 done:
1037 	mpp_debug_leave();
1038 	return IRQ_HANDLED;
1039 }
1040 
rkvdec_3328_isr(struct mpp_dev * mpp)1041 static int rkvdec_3328_isr(struct mpp_dev *mpp)
1042 {
1043 	u32 err_mask;
1044 	struct rkvdec_task *task = NULL;
1045 	struct mpp_task *mpp_task = mpp->cur_task;
1046 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1047 
1048 	mpp_debug_enter();
1049 	/* FIXME use a spin lock here */
1050 	if (!mpp_task) {
1051 		dev_err(mpp->dev, "no current task\n");
1052 		goto done;
1053 	}
1054 	mpp_time_diff(mpp_task);
1055 	mpp->cur_task = NULL;
1056 	task = to_rkvdec_task(mpp_task);
1057 	task->irq_status = mpp->irq_status;
1058 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1059 
1060 	err_mask = RKVDEC_INT_BUF_EMPTY
1061 		| RKVDEC_INT_BUS_ERROR
1062 		| RKVDEC_INT_COLMV_REF_ERROR
1063 		| RKVDEC_INT_STRM_ERROR
1064 		| RKVDEC_INT_TIMEOUT;
1065 	if (err_mask & task->irq_status)
1066 		atomic_inc(&mpp->reset_request);
1067 
1068 	/* unmap reserve buffer */
1069 	if (dec->aux_iova != -1) {
1070 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1071 		dec->aux_iova = -1;
1072 	}
1073 
1074 	mpp_task_finish(mpp_task->session, mpp_task);
1075 done:
1076 	mpp_debug_leave();
1077 	return IRQ_HANDLED;
1078 }
1079 
rkvdec_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)1080 static int rkvdec_finish(struct mpp_dev *mpp,
1081 			 struct mpp_task *mpp_task)
1082 {
1083 	u32 i;
1084 	u32 dec_get;
1085 	s32 dec_length;
1086 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1087 
1088 	mpp_debug_enter();
1089 
1090 	switch (task->link_mode) {
1091 	case RKVDEC_MODE_ONEFRAME: {
1092 		u32 s, e;
1093 		struct mpp_request *req;
1094 
1095 		/* read register after running */
1096 		for (i = 0; i < task->r_req_cnt; i++) {
1097 			req = &task->r_reqs[i];
1098 			s = req->offset / sizeof(u32);
1099 			e = s + req->size / sizeof(u32);
1100 			mpp_read_req(mpp, task->reg, s, e);
1101 		}
1102 		/* revert hack for irq status */
1103 		task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
1104 		/* revert hack for decoded length */
1105 		dec_get = mpp_read_relaxed(mpp, RKVDEC_REG_RLC_BASE);
1106 		dec_length = dec_get - task->strm_addr;
1107 		task->reg[RKVDEC_REG_RLC_BASE_INDEX] = dec_length << 10;
1108 		mpp_debug(DEBUG_REGISTER,
1109 			  "dec_get %08x dec_length %d\n", dec_get, dec_length);
1110 	} break;
1111 	default:
1112 		break;
1113 	}
1114 
1115 	mpp_debug_leave();
1116 
1117 	return 0;
1118 }
1119 
rkvdec_finish_with_record_info(struct mpp_dev * mpp,struct mpp_task * mpp_task)1120 static int rkvdec_finish_with_record_info(struct mpp_dev *mpp,
1121 					  struct mpp_task *mpp_task)
1122 {
1123 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1124 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1125 
1126 	rkvdec_finish(mpp, mpp_task);
1127 	dec->last_fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1128 	dec->had_reset = (atomic_read(&mpp->reset_request) > 0) ? true : false;
1129 
1130 	return 0;
1131 }
1132 
rkvdec_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)1133 static int rkvdec_result(struct mpp_dev *mpp,
1134 			 struct mpp_task *mpp_task,
1135 			 struct mpp_task_msgs *msgs)
1136 {
1137 	u32 i;
1138 	struct mpp_request *req;
1139 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1140 
1141 	/* FIXME may overflow the kernel */
1142 	for (i = 0; i < task->r_req_cnt; i++) {
1143 		req = &task->r_reqs[i];
1144 
1145 		if (copy_to_user(req->data,
1146 				 (u8 *)task->reg + req->offset,
1147 				 req->size)) {
1148 			mpp_err("copy_to_user reg fail\n");
1149 			return -EIO;
1150 		}
1151 	}
1152 
1153 	return 0;
1154 }
1155 
rkvdec_free_task(struct mpp_session * session,struct mpp_task * mpp_task)1156 static int rkvdec_free_task(struct mpp_session *session,
1157 			    struct mpp_task *mpp_task)
1158 {
1159 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1160 
1161 	mpp_task_finalize(session, mpp_task);
1162 	kfree(task);
1163 
1164 	return 0;
1165 }
1166 
1167 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec_procfs_remove(struct mpp_dev * mpp)1168 static int rkvdec_procfs_remove(struct mpp_dev *mpp)
1169 {
1170 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1171 
1172 	if (dec->procfs) {
1173 		proc_remove(dec->procfs);
1174 		dec->procfs = NULL;
1175 	}
1176 
1177 	return 0;
1178 }
1179 
rkvdec_procfs_init(struct mpp_dev * mpp)1180 static int rkvdec_procfs_init(struct mpp_dev *mpp)
1181 {
1182 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1183 
1184 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
1185 	if (IS_ERR_OR_NULL(dec->procfs)) {
1186 		mpp_err("failed on open procfs\n");
1187 		dec->procfs = NULL;
1188 		return -EIO;
1189 	}
1190 
1191 	/* for common mpp_dev options */
1192 	mpp_procfs_create_common(dec->procfs, mpp);
1193 
1194 	mpp_procfs_create_u32("aclk", 0644,
1195 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
1196 	mpp_procfs_create_u32("clk_core", 0644,
1197 			      dec->procfs, &dec->core_clk_info.debug_rate_hz);
1198 	mpp_procfs_create_u32("clk_cabac", 0644,
1199 			      dec->procfs, &dec->cabac_clk_info.debug_rate_hz);
1200 	mpp_procfs_create_u32("clk_hevc_cabac", 0644,
1201 			      dec->procfs, &dec->hevc_cabac_clk_info.debug_rate_hz);
1202 	mpp_procfs_create_u32("session_buffers", 0644,
1203 			      dec->procfs, &mpp->session_max_buffers);
1204 
1205 	return 0;
1206 }
1207 #else
rkvdec_procfs_remove(struct mpp_dev * mpp)1208 static inline int rkvdec_procfs_remove(struct mpp_dev *mpp)
1209 {
1210 	return 0;
1211 }
1212 
rkvdec_procfs_init(struct mpp_dev * mpp)1213 static inline int rkvdec_procfs_init(struct mpp_dev *mpp)
1214 {
1215 	return 0;
1216 }
1217 #endif
1218 
rkvdec_init(struct mpp_dev * mpp)1219 static int rkvdec_init(struct mpp_dev *mpp)
1220 {
1221 	int ret;
1222 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1223 
1224 	mutex_init(&dec->sip_reset_lock);
1225 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVDEC];
1226 
1227 	/* Get clock info from dtsi */
1228 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
1229 	if (ret)
1230 		mpp_err("failed on clk_get aclk_vcodec\n");
1231 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
1232 	if (ret)
1233 		mpp_err("failed on clk_get hclk_vcodec\n");
1234 	ret = mpp_get_clk_info(mpp, &dec->core_clk_info, "clk_core");
1235 	if (ret)
1236 		mpp_err("failed on clk_get clk_core\n");
1237 	ret = mpp_get_clk_info(mpp, &dec->cabac_clk_info, "clk_cabac");
1238 	if (ret)
1239 		mpp_err("failed on clk_get clk_cabac\n");
1240 	ret = mpp_get_clk_info(mpp, &dec->hevc_cabac_clk_info, "clk_hevc_cabac");
1241 	if (ret)
1242 		mpp_err("failed on clk_get clk_hevc_cabac\n");
1243 	/* Set default rates */
1244 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1245 	mpp_set_clk_info_rate_hz(&dec->core_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1246 	mpp_set_clk_info_rate_hz(&dec->cabac_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1247 	mpp_set_clk_info_rate_hz(&dec->hevc_cabac_clk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1248 
1249 	/* Get normal max workload from dtsi */
1250 	of_property_read_u32(mpp->dev->of_node,
1251 			     "rockchip,default-max-load", &dec->default_max_load);
1252 	/* Get reset control from dtsi */
1253 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1254 	if (!dec->rst_a)
1255 		mpp_err("No aclk reset resource define\n");
1256 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1257 	if (!dec->rst_h)
1258 		mpp_err("No hclk reset resource define\n");
1259 	dec->rst_niu_a = mpp_reset_control_get(mpp, RST_TYPE_NIU_A, "niu_a");
1260 	if (!dec->rst_niu_a)
1261 		mpp_err("No niu aclk reset resource define\n");
1262 	dec->rst_niu_h = mpp_reset_control_get(mpp, RST_TYPE_NIU_H, "niu_h");
1263 	if (!dec->rst_niu_h)
1264 		mpp_err("No niu hclk reset resource define\n");
1265 	dec->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1266 	if (!dec->rst_core)
1267 		mpp_err("No core reset resource define\n");
1268 	dec->rst_cabac = mpp_reset_control_get(mpp, RST_TYPE_CABAC, "video_cabac");
1269 	if (!dec->rst_cabac)
1270 		mpp_err("No cabac reset resource define\n");
1271 	dec->rst_hevc_cabac = mpp_reset_control_get(mpp, RST_TYPE_HEVC_CABAC, "video_hevc_cabac");
1272 	if (!dec->rst_hevc_cabac)
1273 		mpp_err("No hevc cabac reset resource define\n");
1274 
1275 	return 0;
1276 }
1277 
rkvdec_px30_init(struct mpp_dev * mpp)1278 static int rkvdec_px30_init(struct mpp_dev *mpp)
1279 {
1280 	rkvdec_init(mpp);
1281 	return px30_workaround_combo_init(mpp);
1282 }
1283 
rkvdec_3036_init(struct mpp_dev * mpp)1284 static int rkvdec_3036_init(struct mpp_dev *mpp)
1285 {
1286 	rkvdec_init(mpp);
1287 	set_bit(mpp->var->device_type, &mpp->queue->dev_active_flags);
1288 	return 0;
1289 }
1290 
rkvdec_3328_iommu_hdl(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1291 static int rkvdec_3328_iommu_hdl(struct iommu_domain *iommu,
1292 				 struct device *iommu_dev,
1293 				 unsigned long iova,
1294 				 int status, void *arg)
1295 {
1296 	int ret = 0;
1297 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1298 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1299 
1300 	/*
1301 	 * defeat workaround, invalidate address generated when rk322x
1302 	 * hevc decoder tile mode pre-fetch colmv data.
1303 	 */
1304 	if (IOMMU_GET_BUS_ID(status) == 2) {
1305 		unsigned long page_iova = 0;
1306 		/* avoid another page fault occur after page fault */
1307 		if (dec->aux_iova != -1) {
1308 			iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1309 			dec->aux_iova = -1;
1310 		}
1311 
1312 		page_iova = round_down(iova, IOMMU_PAGE_SIZE);
1313 		ret = iommu_map(mpp->iommu_info->domain, page_iova,
1314 				page_to_phys(dec->aux_page), IOMMU_PAGE_SIZE,
1315 				IOMMU_READ | IOMMU_WRITE);
1316 		if (!ret)
1317 			dec->aux_iova = page_iova;
1318 	}
1319 
1320 	return ret;
1321 }
1322 
1323 #ifdef CONFIG_PM_DEVFREQ
rkvdec_devfreq_remove(struct mpp_dev * mpp)1324 static int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1325 {
1326 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1327 
1328 	devfreq_unregister_opp_notifier(mpp->dev, dec->devfreq);
1329 	dev_pm_opp_of_remove_table(mpp->dev);
1330 
1331 	return 0;
1332 }
1333 
rkvdec_devfreq_init(struct mpp_dev * mpp)1334 static int rkvdec_devfreq_init(struct mpp_dev *mpp)
1335 {
1336 	int ret = 0;
1337 	struct devfreq_dev_status *stat;
1338 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1339 
1340 	mutex_init(&dec->set_clk_lock);
1341 	dec->parent_devfreq = devfreq_get_devfreq_by_phandle(mpp->dev, "rkvdec_devfreq", 0);
1342 	if (IS_ERR_OR_NULL(dec->parent_devfreq)) {
1343 		if (PTR_ERR(dec->parent_devfreq) == -EPROBE_DEFER) {
1344 			dev_warn(mpp->dev, "parent devfreq is not ready, retry\n");
1345 
1346 			return -EPROBE_DEFER;
1347 		}
1348 	} else {
1349 		dec->devfreq_nb.notifier_call = devfreq_notifier_call;
1350 		devm_devfreq_register_notifier(mpp->dev,
1351 					       dec->parent_devfreq,
1352 					       &dec->devfreq_nb,
1353 					       DEVFREQ_TRANSITION_NOTIFIER);
1354 	}
1355 
1356 	dec->vdd = devm_regulator_get_optional(mpp->dev, "vcodec");
1357 	if (IS_ERR_OR_NULL(dec->vdd)) {
1358 		if (PTR_ERR(dec->vdd) == -EPROBE_DEFER) {
1359 			dev_warn(mpp->dev, "vcodec regulator not ready, retry\n");
1360 
1361 			return -EPROBE_DEFER;
1362 		}
1363 		dev_warn(mpp->dev, "no regulator for vcodec\n");
1364 
1365 		return 0;
1366 	}
1367 
1368 	ret = rockchip_init_opp_table(mpp->dev, NULL,
1369 				      "rkvdec_leakage", "vcodec");
1370 	if (ret) {
1371 		dev_err(mpp->dev, "Failed to init_opp_table\n");
1372 		goto done;
1373 	}
1374 	dec->devfreq = devm_devfreq_add_device(mpp->dev, &devfreq_profile,
1375 					       "userspace", NULL);
1376 	if (IS_ERR(dec->devfreq)) {
1377 		ret = PTR_ERR(dec->devfreq);
1378 		goto done;
1379 	}
1380 
1381 	stat = &dec->devfreq->last_status;
1382 	stat->current_frequency = clk_get_rate(dec->aclk_info.clk);
1383 
1384 	ret = devfreq_register_opp_notifier(mpp->dev, dec->devfreq);
1385 	if (ret)
1386 		goto done;
1387 
1388 	/* power simplle init */
1389 	ret = power_model_simple_init(mpp);
1390 	if (!ret && dec->devfreq) {
1391 		dec->devfreq_cooling =
1392 			of_devfreq_cooling_register_power(mpp->dev->of_node,
1393 							  dec->devfreq,
1394 							  &cooling_power_data);
1395 		if (IS_ERR_OR_NULL(dec->devfreq_cooling)) {
1396 			ret = -ENXIO;
1397 			dev_err(mpp->dev, "Failed to register cooling\n");
1398 			goto done;
1399 		}
1400 	}
1401 
1402 done:
1403 	return ret;
1404 }
1405 #else
rkvdec_devfreq_remove(struct mpp_dev * mpp)1406 static inline int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1407 {
1408 	return 0;
1409 }
1410 
rkvdec_devfreq_init(struct mpp_dev * mpp)1411 static inline int rkvdec_devfreq_init(struct mpp_dev *mpp)
1412 {
1413 	return 0;
1414 }
1415 #endif
1416 
rkvdec_3328_init(struct mpp_dev * mpp)1417 static int rkvdec_3328_init(struct mpp_dev *mpp)
1418 {
1419 	int ret = 0;
1420 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1421 
1422 	rkvdec_init(mpp);
1423 
1424 	/* warkaround for mmu pagefault */
1425 	dec->aux_page = alloc_page(GFP_KERNEL);
1426 	if (!dec->aux_page) {
1427 		dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1428 		ret = -ENOMEM;
1429 		goto done;
1430 	}
1431 	dec->aux_iova = -1;
1432 	mpp->iommu_info->hdl = rkvdec_3328_iommu_hdl;
1433 
1434 	ret = rkvdec_devfreq_init(mpp);
1435 done:
1436 	return ret;
1437 }
1438 
rkvdec_3328_exit(struct mpp_dev * mpp)1439 static int rkvdec_3328_exit(struct mpp_dev *mpp)
1440 {
1441 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1442 
1443 	if (dec->aux_page)
1444 		__free_page(dec->aux_page);
1445 
1446 	if (dec->aux_iova != -1) {
1447 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1448 		dec->aux_iova = -1;
1449 	}
1450 	rkvdec_devfreq_remove(mpp);
1451 
1452 	return 0;
1453 }
1454 
rkvdec_clk_on(struct mpp_dev * mpp)1455 static int rkvdec_clk_on(struct mpp_dev *mpp)
1456 {
1457 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1458 
1459 	mpp_clk_safe_enable(dec->aclk_info.clk);
1460 	mpp_clk_safe_enable(dec->hclk_info.clk);
1461 	mpp_clk_safe_enable(dec->core_clk_info.clk);
1462 	mpp_clk_safe_enable(dec->cabac_clk_info.clk);
1463 	mpp_clk_safe_enable(dec->hevc_cabac_clk_info.clk);
1464 
1465 	return 0;
1466 }
1467 
rkvdec_clk_off(struct mpp_dev * mpp)1468 static int rkvdec_clk_off(struct mpp_dev *mpp)
1469 {
1470 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1471 
1472 	clk_disable_unprepare(dec->aclk_info.clk);
1473 	clk_disable_unprepare(dec->hclk_info.clk);
1474 	clk_disable_unprepare(dec->core_clk_info.clk);
1475 	clk_disable_unprepare(dec->cabac_clk_info.clk);
1476 	clk_disable_unprepare(dec->hevc_cabac_clk_info.clk);
1477 
1478 	return 0;
1479 }
1480 
rkvdec_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1481 static int rkvdec_get_freq(struct mpp_dev *mpp,
1482 			   struct mpp_task *mpp_task)
1483 {
1484 	u32 task_cnt;
1485 	u32 workload;
1486 	struct mpp_task *loop = NULL, *n;
1487 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1488 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1489 
1490 	/* if not set max load, consider not have advanced mode */
1491 	if (!dec->default_max_load || !task->pixels)
1492 		return 0;
1493 
1494 	task_cnt = 1;
1495 	workload = task->pixels;
1496 	/* calc workload in pending list */
1497 	mutex_lock(&mpp->queue->pending_lock);
1498 	list_for_each_entry_safe(loop, n,
1499 				 &mpp->queue->pending_list,
1500 				 queue_link) {
1501 		struct rkvdec_task *loop_task = to_rkvdec_task(loop);
1502 
1503 		task_cnt++;
1504 		workload += loop_task->pixels;
1505 	}
1506 	mutex_unlock(&mpp->queue->pending_lock);
1507 
1508 	if (workload > dec->default_max_load)
1509 		task->clk_mode = CLK_MODE_ADVANCED;
1510 
1511 	mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1512 		  task_cnt, workload, task->clk_mode);
1513 
1514 	return 0;
1515 }
1516 
rkvdec_3328_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1517 static int rkvdec_3328_get_freq(struct mpp_dev *mpp,
1518 				struct mpp_task *mpp_task)
1519 {
1520 	u32 fmt;
1521 	u32 ddr_align_en;
1522 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1523 
1524 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1525 	ddr_align_en = task->reg[RKVDEC_REG_INT_EN_INDEX] & RKVDEC_WR_DDR_ALIGN_EN;
1526 	if (fmt == RKVDEC_FMT_H264D && ddr_align_en)
1527 		task->clk_mode = CLK_MODE_ADVANCED;
1528 	else
1529 		rkvdec_get_freq(mpp, mpp_task);
1530 
1531 	return 0;
1532 }
1533 
rkvdec_3368_set_grf(struct mpp_dev * mpp)1534 static int rkvdec_3368_set_grf(struct mpp_dev *mpp)
1535 {
1536 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1537 
1538 	dec->grf_changed = mpp_grf_is_changed(mpp->grf_info);
1539 	mpp_set_grf(mpp->grf_info);
1540 
1541 	return 0;
1542 }
1543 
rkvdec_3036_set_grf(struct mpp_dev * mpp)1544 static int rkvdec_3036_set_grf(struct mpp_dev *mpp)
1545 {
1546 	int grf_changed;
1547 	struct mpp_dev *loop = NULL, *n;
1548 	struct mpp_taskqueue *queue = mpp->queue;
1549 	bool pd_is_on;
1550 
1551 	grf_changed = mpp_grf_is_changed(mpp->grf_info);
1552 	if (grf_changed) {
1553 
1554 		/*
1555 		 * in this case, devices share the queue also share the same pd&clk,
1556 		 * so use mpp->dev's pd to control all the process is okay
1557 		 */
1558 		pd_is_on = rockchip_pmu_pd_is_on(mpp->dev);
1559 		if (!pd_is_on)
1560 			rockchip_pmu_pd_on(mpp->dev);
1561 		mpp->hw_ops->clk_on(mpp);
1562 
1563 		list_for_each_entry_safe(loop, n, &queue->dev_list, queue_link) {
1564 			if (test_bit(loop->var->device_type, &queue->dev_active_flags)) {
1565 				mpp_set_grf(loop->grf_info);
1566 				if (loop->hw_ops->clk_on)
1567 					loop->hw_ops->clk_on(loop);
1568 				if (loop->hw_ops->reset)
1569 					loop->hw_ops->reset(loop);
1570 				rockchip_iommu_disable(loop->dev);
1571 				if (loop->hw_ops->clk_off)
1572 					loop->hw_ops->clk_off(loop);
1573 				clear_bit(loop->var->device_type, &queue->dev_active_flags);
1574 			}
1575 		}
1576 
1577 		mpp_set_grf(mpp->grf_info);
1578 		rockchip_iommu_enable(mpp->dev);
1579 		set_bit(mpp->var->device_type, &queue->dev_active_flags);
1580 
1581 		mpp->hw_ops->clk_off(mpp);
1582 		if (!pd_is_on)
1583 			rockchip_pmu_pd_off(mpp->dev);
1584 	}
1585 
1586 
1587 	return 0;
1588 }
1589 
rkvdec_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1590 static int rkvdec_set_freq(struct mpp_dev *mpp,
1591 			   struct mpp_task *mpp_task)
1592 {
1593 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1594 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1595 
1596 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1597 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1598 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1599 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1600 
1601 	return 0;
1602 }
1603 
rkvdec_3368_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1604 static int rkvdec_3368_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1605 {
1606 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1607 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1608 
1609 	/* if grf changed, need reset iommu for rk3368 */
1610 	if (dec->grf_changed) {
1611 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1612 		dec->grf_changed = false;
1613 	}
1614 
1615 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1616 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1617 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1618 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1619 
1620 	return 0;
1621 }
1622 
rkvdec_3328_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1623 static int rkvdec_3328_set_freq(struct mpp_dev *mpp,
1624 				struct mpp_task *mpp_task)
1625 {
1626 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1627 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1628 
1629 #ifdef CONFIG_PM_DEVFREQ
1630 	if (dec->devfreq) {
1631 		struct devfreq_dev_status *stat;
1632 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1633 
1634 		stat = &dec->devfreq->last_status;
1635 		stat->busy_time = 1;
1636 		stat->total_time = 1;
1637 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1638 							task->clk_mode);
1639 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1640 							task->clk_mode);
1641 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1642 							 task->clk_mode);
1643 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1644 				    core_rate_hz, cabac_rate_hz,
1645 				    EVENT_ADJUST);
1646 	}
1647 #else
1648 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1649 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1650 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1651 #endif
1652 
1653 	return 0;
1654 }
1655 
rkvdec_reduce_freq(struct mpp_dev * mpp)1656 static int rkvdec_reduce_freq(struct mpp_dev *mpp)
1657 {
1658 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1659 
1660 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1661 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1662 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1663 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_REDUCE);
1664 
1665 	return 0;
1666 }
1667 
rkvdec_3328_reduce_freq(struct mpp_dev * mpp)1668 static int rkvdec_3328_reduce_freq(struct mpp_dev *mpp)
1669 {
1670 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1671 
1672 #ifdef CONFIG_PM_DEVFREQ
1673 	if (dec->devfreq) {
1674 		struct devfreq_dev_status *stat;
1675 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1676 
1677 		stat = &dec->devfreq->last_status;
1678 		stat->busy_time = 0;
1679 		stat->total_time = 1;
1680 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1681 							CLK_MODE_REDUCE);
1682 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1683 							CLK_MODE_REDUCE);
1684 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1685 							 CLK_MODE_REDUCE);
1686 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1687 				    core_rate_hz, cabac_rate_hz,
1688 				    EVENT_ADJUST);
1689 	}
1690 #else
1691 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1692 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1693 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1694 #endif
1695 
1696 	return 0;
1697 }
1698 
rkvdec_reset(struct mpp_dev * mpp)1699 static int rkvdec_reset(struct mpp_dev *mpp)
1700 {
1701 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1702 
1703 	mpp_debug_enter();
1704 	if (dec->rst_a && dec->rst_h) {
1705 		mpp_pmu_idle_request(mpp, true);
1706 		mpp_safe_reset(dec->rst_niu_a);
1707 		mpp_safe_reset(dec->rst_niu_h);
1708 		mpp_safe_reset(dec->rst_a);
1709 		mpp_safe_reset(dec->rst_h);
1710 		mpp_safe_reset(dec->rst_core);
1711 		mpp_safe_reset(dec->rst_cabac);
1712 		mpp_safe_reset(dec->rst_hevc_cabac);
1713 		udelay(5);
1714 		mpp_safe_unreset(dec->rst_niu_h);
1715 		mpp_safe_unreset(dec->rst_niu_a);
1716 		mpp_safe_unreset(dec->rst_a);
1717 		mpp_safe_unreset(dec->rst_h);
1718 		mpp_safe_unreset(dec->rst_core);
1719 		mpp_safe_unreset(dec->rst_cabac);
1720 		mpp_safe_unreset(dec->rst_hevc_cabac);
1721 		mpp_pmu_idle_request(mpp, false);
1722 	}
1723 	mpp_debug_leave();
1724 
1725 	return 0;
1726 }
1727 
rkvdec_sip_reset(struct mpp_dev * mpp)1728 static int rkvdec_sip_reset(struct mpp_dev *mpp)
1729 {
1730 	if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1731 		/* The reset flow in arm trustzone firmware */
1732 		struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1733 
1734 		mutex_lock(&dec->sip_reset_lock);
1735 		sip_smc_vpu_reset(0, 0, 0);
1736 		mutex_unlock(&dec->sip_reset_lock);
1737 
1738 		return 0;
1739 	} else {
1740 		return rkvdec_reset(mpp);
1741 	}
1742 }
1743 
1744 static struct mpp_hw_ops rkvdec_v1_hw_ops = {
1745 	.init = rkvdec_init,
1746 	.clk_on = rkvdec_clk_on,
1747 	.clk_off = rkvdec_clk_off,
1748 	.get_freq = rkvdec_get_freq,
1749 	.set_freq = rkvdec_set_freq,
1750 	.reduce_freq = rkvdec_reduce_freq,
1751 	.reset = rkvdec_reset,
1752 };
1753 
1754 static struct mpp_hw_ops rkvdec_px30_hw_ops = {
1755 	.init = rkvdec_px30_init,
1756 	.clk_on = rkvdec_clk_on,
1757 	.clk_off = rkvdec_clk_off,
1758 	.get_freq = rkvdec_get_freq,
1759 	.set_freq = rkvdec_set_freq,
1760 	.reduce_freq = rkvdec_reduce_freq,
1761 	.reset = rkvdec_reset,
1762 	.set_grf = px30_workaround_combo_switch_grf,
1763 };
1764 
1765 static struct mpp_hw_ops rkvdec_3036_hw_ops = {
1766 	.init = rkvdec_3036_init,
1767 	.clk_on = rkvdec_clk_on,
1768 	.clk_off = rkvdec_clk_off,
1769 	.get_freq = rkvdec_get_freq,
1770 	.set_freq = rkvdec_set_freq,
1771 	.reduce_freq = rkvdec_reduce_freq,
1772 	.reset = rkvdec_reset,
1773 	.set_grf = rkvdec_3036_set_grf,
1774 };
1775 
1776 static struct mpp_hw_ops rkvdec_3399_hw_ops = {
1777 	.init = rkvdec_init,
1778 	.clk_on = rkvdec_clk_on,
1779 	.clk_off = rkvdec_clk_off,
1780 	.get_freq = rkvdec_get_freq,
1781 	.set_freq = rkvdec_set_freq,
1782 	.reduce_freq = rkvdec_reduce_freq,
1783 	.reset = rkvdec_reset,
1784 };
1785 
1786 static struct mpp_hw_ops rkvdec_3368_hw_ops = {
1787 	.init = rkvdec_init,
1788 	.clk_on = rkvdec_clk_on,
1789 	.clk_off = rkvdec_clk_off,
1790 	.get_freq = rkvdec_get_freq,
1791 	.set_freq = rkvdec_3368_set_freq,
1792 	.reduce_freq = rkvdec_reduce_freq,
1793 	.reset = rkvdec_reset,
1794 	.set_grf = rkvdec_3368_set_grf,
1795 };
1796 
1797 static struct mpp_dev_ops rkvdec_v1_dev_ops = {
1798 	.alloc_task = rkvdec_alloc_task,
1799 	.run = rkvdec_run,
1800 	.irq = rkvdec_irq,
1801 	.isr = rkvdec_isr,
1802 	.finish = rkvdec_finish,
1803 	.result = rkvdec_result,
1804 	.free_task = rkvdec_free_task,
1805 };
1806 
1807 static struct mpp_dev_ops rkvdec_px30_dev_ops = {
1808 	.alloc_task = rkvdec_alloc_task,
1809 	.run = rkvdec_px30_run,
1810 	.irq = rkvdec_irq,
1811 	.isr = rkvdec_isr,
1812 	.finish = rkvdec_finish,
1813 	.result = rkvdec_result,
1814 	.free_task = rkvdec_free_task,
1815 };
1816 
1817 static struct mpp_hw_ops rkvdec_3328_hw_ops = {
1818 	.init = rkvdec_3328_init,
1819 	.exit = rkvdec_3328_exit,
1820 	.clk_on = rkvdec_clk_on,
1821 	.clk_off = rkvdec_clk_off,
1822 	.get_freq = rkvdec_3328_get_freq,
1823 	.set_freq = rkvdec_3328_set_freq,
1824 	.reduce_freq = rkvdec_3328_reduce_freq,
1825 	.reset = rkvdec_sip_reset,
1826 };
1827 
1828 static struct mpp_dev_ops rkvdec_3328_dev_ops = {
1829 	.alloc_task = rkvdec_alloc_task,
1830 	.run = rkvdec_3328_run,
1831 	.irq = rkvdec_irq,
1832 	.isr = rkvdec_3328_isr,
1833 	.finish = rkvdec_finish,
1834 	.result = rkvdec_result,
1835 	.free_task = rkvdec_free_task,
1836 };
1837 
1838 static struct mpp_dev_ops rkvdec_3399_dev_ops = {
1839 	.alloc_task = rkvdec_alloc_task,
1840 	.prepare = rkvdec_prepare_with_reset,
1841 	.run = rkvdec_run,
1842 	.irq = rkvdec_irq,
1843 	.isr = rkvdec_isr,
1844 	.finish = rkvdec_finish_with_record_info,
1845 	.result = rkvdec_result,
1846 	.free_task = rkvdec_free_task,
1847 };
1848 
1849 static struct mpp_dev_ops rkvdec_1126_dev_ops = {
1850 	.alloc_task = rkvdec_alloc_task,
1851 	.run = rkvdec_1126_run,
1852 	.irq = rkvdec_irq,
1853 	.isr = rkvdec_isr,
1854 	.finish = rkvdec_finish,
1855 	.result = rkvdec_result,
1856 	.free_task = rkvdec_free_task,
1857 };
1858 static const struct mpp_dev_var rk_hevcdec_data = {
1859 	.device_type = MPP_DEVICE_HEVC_DEC,
1860 	.hw_info = &rk_hevcdec_hw_info,
1861 	.trans_info = rk_hevcdec_trans,
1862 	.hw_ops = &rkvdec_v1_hw_ops,
1863 	.dev_ops = &rkvdec_v1_dev_ops,
1864 };
1865 
1866 static const struct mpp_dev_var rk_hevcdec_3036_data = {
1867 	.device_type = MPP_DEVICE_HEVC_DEC,
1868 	.hw_info = &rk_hevcdec_hw_info,
1869 	.trans_info = rk_hevcdec_trans,
1870 	.hw_ops = &rkvdec_3036_hw_ops,
1871 	.dev_ops = &rkvdec_v1_dev_ops,
1872 };
1873 
1874 static const struct mpp_dev_var rk_hevcdec_3368_data = {
1875 	.device_type = MPP_DEVICE_HEVC_DEC,
1876 	.hw_info = &rk_hevcdec_hw_info,
1877 	.trans_info = rk_hevcdec_trans,
1878 	.hw_ops = &rkvdec_3368_hw_ops,
1879 	.dev_ops = &rkvdec_v1_dev_ops,
1880 };
1881 
1882 static const struct mpp_dev_var rk_hevcdec_px30_data = {
1883 	.device_type = MPP_DEVICE_HEVC_DEC,
1884 	.hw_info = &rk_hevcdec_hw_info,
1885 	.trans_info = rk_hevcdec_trans,
1886 	.hw_ops = &rkvdec_px30_hw_ops,
1887 	.dev_ops = &rkvdec_px30_dev_ops,
1888 };
1889 
1890 static const struct mpp_dev_var rkvdec_v1_data = {
1891 	.device_type = MPP_DEVICE_RKVDEC,
1892 	.hw_info = &rkvdec_v1_hw_info,
1893 	.trans_info = rkvdec_v1_trans,
1894 	.hw_ops = &rkvdec_v1_hw_ops,
1895 	.dev_ops = &rkvdec_v1_dev_ops,
1896 };
1897 
1898 static const struct mpp_dev_var rkvdec_3399_data = {
1899 	.device_type = MPP_DEVICE_RKVDEC,
1900 	.hw_info = &rkvdec_v1_hw_info,
1901 	.trans_info = rkvdec_v1_trans,
1902 	.hw_ops = &rkvdec_3399_hw_ops,
1903 	.dev_ops = &rkvdec_3399_dev_ops,
1904 };
1905 
1906 static const struct mpp_dev_var rkvdec_3328_data = {
1907 	.device_type = MPP_DEVICE_RKVDEC,
1908 	.hw_info = &rkvdec_v1_hw_info,
1909 	.trans_info = rkvdec_v1_trans,
1910 	.hw_ops = &rkvdec_3328_hw_ops,
1911 	.dev_ops = &rkvdec_3328_dev_ops,
1912 };
1913 
1914 static const struct mpp_dev_var rkvdec_1126_data = {
1915 	.device_type = MPP_DEVICE_RKVDEC,
1916 	.hw_info = &rkvdec_v1_hw_info,
1917 	.trans_info = rkvdec_v1_trans,
1918 	.hw_ops = &rkvdec_v1_hw_ops,
1919 	.dev_ops = &rkvdec_1126_dev_ops,
1920 };
1921 
1922 static const struct of_device_id mpp_rkvdec_dt_match[] = {
1923 	{
1924 		.compatible = "rockchip,hevc-decoder",
1925 		.data = &rk_hevcdec_data,
1926 	},
1927 #ifdef CONFIG_CPU_PX30
1928 	{
1929 		.compatible = "rockchip,hevc-decoder-px30",
1930 		.data = &rk_hevcdec_px30_data,
1931 	},
1932 #endif
1933 #ifdef CONFIG_CPU_RK3036
1934 	{
1935 		.compatible = "rockchip,hevc-decoder-rk3036",
1936 		.data = &rk_hevcdec_3036_data,
1937 	},
1938 #endif
1939 #ifdef CONFIG_CPU_RK3368
1940 	{
1941 		.compatible = "rockchip,hevc-decoder-rk3368",
1942 		.data = &rk_hevcdec_3368_data,
1943 	},
1944 #endif
1945 	{
1946 		.compatible = "rockchip,rkv-decoder-v1",
1947 		.data = &rkvdec_v1_data,
1948 	},
1949 #ifdef CONFIG_CPU_RK3399
1950 	{
1951 		.compatible = "rockchip,rkv-decoder-rk3399",
1952 		.data = &rkvdec_3399_data,
1953 	},
1954 #endif
1955 #ifdef CONFIG_CPU_RK3328
1956 	{
1957 		.compatible = "rockchip,rkv-decoder-rk3328",
1958 		.data = &rkvdec_3328_data,
1959 	},
1960 #endif
1961 #ifdef CONFIG_CPU_RV1126
1962 	{
1963 		.compatible = "rockchip,rkv-decoder-rv1126",
1964 		.data = &rkvdec_1126_data,
1965 	},
1966 #endif
1967 	{},
1968 };
1969 
rkvdec_probe(struct platform_device * pdev)1970 static int rkvdec_probe(struct platform_device *pdev)
1971 {
1972 	struct device *dev = &pdev->dev;
1973 	struct rkvdec_dev *dec = NULL;
1974 	struct mpp_dev *mpp = NULL;
1975 	const struct of_device_id *match = NULL;
1976 	int ret = 0;
1977 
1978 	dev_info(dev, "probing start\n");
1979 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1980 	if (!dec)
1981 		return -ENOMEM;
1982 
1983 	mpp = &dec->mpp;
1984 	platform_set_drvdata(pdev, mpp);
1985 
1986 	if (pdev->dev.of_node) {
1987 		match = of_match_node(mpp_rkvdec_dt_match,
1988 				      pdev->dev.of_node);
1989 		if (match)
1990 			mpp->var = (struct mpp_dev_var *)match->data;
1991 	}
1992 
1993 	ret = mpp_dev_probe(mpp, pdev);
1994 	if (ret) {
1995 		dev_err(dev, "probe sub driver failed\n");
1996 		return ret;
1997 	}
1998 
1999 	ret = devm_request_threaded_irq(dev, mpp->irq,
2000 					mpp_dev_irq,
2001 					mpp_dev_isr_sched,
2002 					IRQF_SHARED,
2003 					dev_name(dev), mpp);
2004 	if (ret) {
2005 		dev_err(dev, "register interrupter runtime failed\n");
2006 		return -EINVAL;
2007 	}
2008 
2009 	mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
2010 	rkvdec_procfs_init(mpp);
2011 	/* register current device to mpp service */
2012 	mpp_dev_register_srv(mpp, mpp->srv);
2013 	dev_info(dev, "probing finish\n");
2014 
2015 	return 0;
2016 }
2017 
rkvdec_remove(struct platform_device * pdev)2018 static int rkvdec_remove(struct platform_device *pdev)
2019 {
2020 	struct device *dev = &pdev->dev;
2021 	struct rkvdec_dev *dec = platform_get_drvdata(pdev);
2022 
2023 	dev_info(dev, "remove device\n");
2024 	mpp_dev_remove(&dec->mpp);
2025 	rkvdec_procfs_remove(&dec->mpp);
2026 
2027 	return 0;
2028 }
2029 
2030 struct platform_driver rockchip_rkvdec_driver = {
2031 	.probe = rkvdec_probe,
2032 	.remove = rkvdec_remove,
2033 	.shutdown = mpp_dev_shutdown,
2034 	.driver = {
2035 		.name = RKVDEC_DRIVER_NAME,
2036 		.of_match_table = of_match_ptr(mpp_rkvdec_dt_match),
2037 	},
2038 };
2039 EXPORT_SYMBOL(rockchip_rkvdec_driver);
2040