xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_rkvdec.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * author:
6*4882a593Smuzhiyun  *	Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun  *	Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun  *	Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <asm/cacheflush.h>
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/devfreq.h>
15*4882a593Smuzhiyun #include <linux/devfreq_cooling.h>
16*4882a593Smuzhiyun #include <linux/gfp.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/iopoll.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/types.h>
21*4882a593Smuzhiyun #include <linux/of_platform.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <linux/regmap.h>
25*4882a593Smuzhiyun #include <linux/kernel.h>
26*4882a593Smuzhiyun #include <linux/thermal.h>
27*4882a593Smuzhiyun #include <linux/notifier.h>
28*4882a593Smuzhiyun #include <linux/proc_fs.h>
29*4882a593Smuzhiyun #include <linux/rockchip/rockchip_sip.h>
30*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
33*4882a593Smuzhiyun #include <soc/rockchip/rockchip_sip.h>
34*4882a593Smuzhiyun #include <soc/rockchip/rockchip_opp_select.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "mpp_debug.h"
37*4882a593Smuzhiyun #include "mpp_common.h"
38*4882a593Smuzhiyun #include "mpp_iommu.h"
39*4882a593Smuzhiyun #include <soc/rockchip/rockchip_iommu.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include "hack/mpp_hack_px30.h"
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define RKVDEC_DRIVER_NAME		"mpp_rkvdec"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define IOMMU_GET_BUS_ID(x)		(((x) >> 6) & 0x1f)
46*4882a593Smuzhiyun #define IOMMU_PAGE_SIZE			SZ_4K
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define	RKVDEC_SESSION_MAX_BUFFERS	40
49*4882a593Smuzhiyun /* The maximum registers number of all the version */
50*4882a593Smuzhiyun #define HEVC_DEC_REG_NUM		68
51*4882a593Smuzhiyun #define HEVC_DEC_REG_HW_ID_INDEX	0
52*4882a593Smuzhiyun #define HEVC_DEC_REG_START_INDEX	0
53*4882a593Smuzhiyun #define HEVC_DEC_REG_END_INDEX		67
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define RKVDEC_V1_REG_NUM		78
56*4882a593Smuzhiyun #define RKVDEC_V1_REG_HW_ID_INDEX	0
57*4882a593Smuzhiyun #define RKVDEC_V1_REG_START_INDEX	0
58*4882a593Smuzhiyun #define RKVDEC_V1_REG_END_INDEX		77
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define RKVDEC_V2_REG_NUM		109
61*4882a593Smuzhiyun #define RKVDEC_V2_REG_HW_ID_INDEX	0
62*4882a593Smuzhiyun #define RKVDEC_V2_REG_START_INDEX	0
63*4882a593Smuzhiyun #define RKVDEC_V2_REG_END_INDEX		108
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define RKVDEC_REG_INT_EN		0x004
66*4882a593Smuzhiyun #define RKVDEC_REG_INT_EN_INDEX		(1)
67*4882a593Smuzhiyun #define RKVDEC_WR_DDR_ALIGN_EN		BIT(23)
68*4882a593Smuzhiyun #define RKVDEC_FORCE_SOFT_RESET_VALID	BIT(21)
69*4882a593Smuzhiyun #define RKVDEC_SOFTWARE_RESET_EN	BIT(20)
70*4882a593Smuzhiyun #define RKVDEC_INT_COLMV_REF_ERROR	BIT(17)
71*4882a593Smuzhiyun #define RKVDEC_INT_BUF_EMPTY		BIT(16)
72*4882a593Smuzhiyun #define RKVDEC_INT_TIMEOUT		BIT(15)
73*4882a593Smuzhiyun #define RKVDEC_INT_STRM_ERROR		BIT(14)
74*4882a593Smuzhiyun #define RKVDEC_INT_BUS_ERROR		BIT(13)
75*4882a593Smuzhiyun #define RKVDEC_DEC_INT_RAW		BIT(9)
76*4882a593Smuzhiyun #define RKVDEC_DEC_INT			BIT(8)
77*4882a593Smuzhiyun #define RKVDEC_DEC_TIMEOUT_EN		BIT(5)
78*4882a593Smuzhiyun #define RKVDEC_DEC_IRQ_DIS		BIT(4)
79*4882a593Smuzhiyun #define RKVDEC_CLOCK_GATE_EN		BIT(1)
80*4882a593Smuzhiyun #define RKVDEC_DEC_START		BIT(0)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define RKVDEC_REG_SYS_CTRL		0x008
83*4882a593Smuzhiyun #define RKVDEC_REG_SYS_CTRL_INDEX	(2)
84*4882a593Smuzhiyun #define RKVDEC_RGE_WIDTH_INDEX		(3)
85*4882a593Smuzhiyun #define RKVDEC_GET_FORMAT(x)		(((x) >> 20) & 0x3)
86*4882a593Smuzhiyun #define REVDEC_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
87*4882a593Smuzhiyun #define RKVDEC_GET_WIDTH(x)		(((x) & 0x3ff) << 4)
88*4882a593Smuzhiyun #define RKVDEC_FMT_H265D		(0)
89*4882a593Smuzhiyun #define RKVDEC_FMT_H264D		(1)
90*4882a593Smuzhiyun #define RKVDEC_FMT_VP9D			(2)
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define RKVDEC_REG_RLC_BASE		0x010
93*4882a593Smuzhiyun #define RKVDEC_REG_RLC_BASE_INDEX	(4)
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define RKVDEC_RGE_YSTRDE_INDEX		(8)
96*4882a593Smuzhiyun #define RKVDEC_GET_YSTRDE(x)		(((x) & 0x1fffff) << 4)
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define RKVDEC_REG_PPS_BASE		0x0a0
99*4882a593Smuzhiyun #define RKVDEC_REG_PPS_BASE_INDEX	(42)
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define RKVDEC_REG_VP9_REFCOLMV_BASE		0x0d0
102*4882a593Smuzhiyun #define RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX	(52)
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define RKVDEC_REG_CACHE0_SIZE_BASE	0x41c
105*4882a593Smuzhiyun #define RKVDEC_REG_CACHE1_SIZE_BASE	0x45c
106*4882a593Smuzhiyun #define RKVDEC_REG_CLR_CACHE0_BASE	0x410
107*4882a593Smuzhiyun #define RKVDEC_REG_CLR_CACHE1_BASE	0x450
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS	BIT(0)
110*4882a593Smuzhiyun #define RKVDEC_CACHE_PERMIT_READ_ALLOCATE	BIT(1)
111*4882a593Smuzhiyun #define RKVDEC_CACHE_LINE_SIZE_64_BYTES		BIT(4)
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define RKVDEC_POWER_CTL_INDEX		(99)
114*4882a593Smuzhiyun #define RKVDEC_POWER_CTL_BASE		0x018c
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define FALLBACK_STATIC_TEMPERATURE	55000
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define to_rkvdec_task(task)		\
119*4882a593Smuzhiyun 		container_of(task, struct rkvdec_task, mpp_task)
120*4882a593Smuzhiyun #define to_rkvdec_dev(dev)		\
121*4882a593Smuzhiyun 		container_of(dev, struct rkvdec_dev, mpp)
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun enum RKVDEC_MODE {
124*4882a593Smuzhiyun 	RKVDEC_MODE_NONE,
125*4882a593Smuzhiyun 	RKVDEC_MODE_ONEFRAME,
126*4882a593Smuzhiyun 	RKVDEC_MODE_BUTT
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun enum SET_CLK_EVENT {
130*4882a593Smuzhiyun 	EVENT_POWER_ON = 0,
131*4882a593Smuzhiyun 	EVENT_POWER_OFF,
132*4882a593Smuzhiyun 	EVENT_ADJUST,
133*4882a593Smuzhiyun 	EVENT_THERMAL,
134*4882a593Smuzhiyun 	EVENT_BUTT,
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun struct rkvdec_task {
138*4882a593Smuzhiyun 	struct mpp_task mpp_task;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	enum RKVDEC_MODE link_mode;
141*4882a593Smuzhiyun 	enum MPP_CLOCK_MODE clk_mode;
142*4882a593Smuzhiyun 	u32 reg[RKVDEC_V2_REG_NUM];
143*4882a593Smuzhiyun 	struct reg_offset_info off_inf;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	u32 strm_addr;
146*4882a593Smuzhiyun 	u32 irq_status;
147*4882a593Smuzhiyun 	/* req for current task */
148*4882a593Smuzhiyun 	u32 w_req_cnt;
149*4882a593Smuzhiyun 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
150*4882a593Smuzhiyun 	u32 r_req_cnt;
151*4882a593Smuzhiyun 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
152*4882a593Smuzhiyun 	/* ystride info */
153*4882a593Smuzhiyun 	u32 pixels;
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun struct rkvdec_dev {
157*4882a593Smuzhiyun 	struct mpp_dev mpp;
158*4882a593Smuzhiyun 	/* sip smc reset lock */
159*4882a593Smuzhiyun 	struct mutex sip_reset_lock;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	struct mpp_clk_info aclk_info;
162*4882a593Smuzhiyun 	struct mpp_clk_info hclk_info;
163*4882a593Smuzhiyun 	struct mpp_clk_info core_clk_info;
164*4882a593Smuzhiyun 	struct mpp_clk_info cabac_clk_info;
165*4882a593Smuzhiyun 	struct mpp_clk_info hevc_cabac_clk_info;
166*4882a593Smuzhiyun 	u32 default_max_load;
167*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
168*4882a593Smuzhiyun 	struct proc_dir_entry *procfs;
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun 	struct reset_control *rst_a;
171*4882a593Smuzhiyun 	struct reset_control *rst_h;
172*4882a593Smuzhiyun 	struct reset_control *rst_niu_a;
173*4882a593Smuzhiyun 	struct reset_control *rst_niu_h;
174*4882a593Smuzhiyun 	struct reset_control *rst_core;
175*4882a593Smuzhiyun 	struct reset_control *rst_cabac;
176*4882a593Smuzhiyun 	struct reset_control *rst_hevc_cabac;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	unsigned long aux_iova;
179*4882a593Smuzhiyun 	struct page *aux_page;
180*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
181*4882a593Smuzhiyun 	struct regulator *vdd;
182*4882a593Smuzhiyun 	struct devfreq *devfreq;
183*4882a593Smuzhiyun 	struct devfreq *parent_devfreq;
184*4882a593Smuzhiyun 	struct notifier_block devfreq_nb;
185*4882a593Smuzhiyun 	struct thermal_cooling_device *devfreq_cooling;
186*4882a593Smuzhiyun 	struct thermal_zone_device *thermal_zone;
187*4882a593Smuzhiyun 	u32 static_power_coeff;
188*4882a593Smuzhiyun 	s32 ts[4];
189*4882a593Smuzhiyun 	/* set clk lock */
190*4882a593Smuzhiyun 	struct mutex set_clk_lock;
191*4882a593Smuzhiyun 	unsigned int thermal_div;
192*4882a593Smuzhiyun 	unsigned long volt;
193*4882a593Smuzhiyun 	unsigned long devf_aclk_rate_hz;
194*4882a593Smuzhiyun 	unsigned long devf_core_rate_hz;
195*4882a593Smuzhiyun 	unsigned long devf_cabac_rate_hz;
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun 	/* record last infos */
198*4882a593Smuzhiyun 	u32 last_fmt;
199*4882a593Smuzhiyun 	bool had_reset;
200*4882a593Smuzhiyun 	bool grf_changed;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun  * hardware information
205*4882a593Smuzhiyun  */
206*4882a593Smuzhiyun static struct mpp_hw_info rk_hevcdec_hw_info = {
207*4882a593Smuzhiyun 	.reg_num = HEVC_DEC_REG_NUM,
208*4882a593Smuzhiyun 	.reg_id = HEVC_DEC_REG_HW_ID_INDEX,
209*4882a593Smuzhiyun 	.reg_start = HEVC_DEC_REG_START_INDEX,
210*4882a593Smuzhiyun 	.reg_end = HEVC_DEC_REG_END_INDEX,
211*4882a593Smuzhiyun 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun static struct mpp_hw_info rkvdec_v1_hw_info = {
215*4882a593Smuzhiyun 	.reg_num = RKVDEC_V1_REG_NUM,
216*4882a593Smuzhiyun 	.reg_id = RKVDEC_V1_REG_HW_ID_INDEX,
217*4882a593Smuzhiyun 	.reg_start = RKVDEC_V1_REG_START_INDEX,
218*4882a593Smuzhiyun 	.reg_end = RKVDEC_V1_REG_END_INDEX,
219*4882a593Smuzhiyun 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun  * file handle translate information
224*4882a593Smuzhiyun  */
225*4882a593Smuzhiyun static const u16 trans_tbl_h264d[] = {
226*4882a593Smuzhiyun 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
227*4882a593Smuzhiyun 	23, 24, 41, 42, 43, 48, 75
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun static const u16 trans_tbl_h265d[] = {
231*4882a593Smuzhiyun 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
232*4882a593Smuzhiyun 	23, 24, 42, 43
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun static const u16 trans_tbl_vp9d[] = {
236*4882a593Smuzhiyun 	4, 6, 7, 11, 12, 13, 14, 15, 16
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun static struct mpp_trans_info rk_hevcdec_trans[] = {
240*4882a593Smuzhiyun 	[RKVDEC_FMT_H265D] = {
241*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_h265d),
242*4882a593Smuzhiyun 		.table = trans_tbl_h265d,
243*4882a593Smuzhiyun 	},
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun static struct mpp_trans_info rkvdec_v1_trans[] = {
247*4882a593Smuzhiyun 	[RKVDEC_FMT_H265D] = {
248*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_h265d),
249*4882a593Smuzhiyun 		.table = trans_tbl_h265d,
250*4882a593Smuzhiyun 	},
251*4882a593Smuzhiyun 	[RKVDEC_FMT_H264D] = {
252*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_h264d),
253*4882a593Smuzhiyun 		.table = trans_tbl_h264d,
254*4882a593Smuzhiyun 	},
255*4882a593Smuzhiyun 	[RKVDEC_FMT_VP9D] = {
256*4882a593Smuzhiyun 		.count = ARRAY_SIZE(trans_tbl_vp9d),
257*4882a593Smuzhiyun 		.table = trans_tbl_vp9d,
258*4882a593Smuzhiyun 	},
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
rkvdec_devf_set_clk(struct rkvdec_dev * dec,unsigned long aclk_rate_hz,unsigned long core_rate_hz,unsigned long cabac_rate_hz,unsigned int event)262*4882a593Smuzhiyun static int rkvdec_devf_set_clk(struct rkvdec_dev *dec,
263*4882a593Smuzhiyun 			       unsigned long aclk_rate_hz,
264*4882a593Smuzhiyun 			       unsigned long core_rate_hz,
265*4882a593Smuzhiyun 			       unsigned long cabac_rate_hz,
266*4882a593Smuzhiyun 			       unsigned int event)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct clk *aclk = dec->aclk_info.clk;
269*4882a593Smuzhiyun 	struct clk *clk_core = dec->core_clk_info.clk;
270*4882a593Smuzhiyun 	struct clk *clk_cabac = dec->cabac_clk_info.clk;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	mutex_lock(&dec->set_clk_lock);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	switch (event) {
275*4882a593Smuzhiyun 	case EVENT_POWER_ON:
276*4882a593Smuzhiyun 		clk_set_rate(aclk, dec->devf_aclk_rate_hz);
277*4882a593Smuzhiyun 		clk_set_rate(clk_core, dec->devf_core_rate_hz);
278*4882a593Smuzhiyun 		clk_set_rate(clk_cabac, dec->devf_cabac_rate_hz);
279*4882a593Smuzhiyun 		dec->thermal_div = 0;
280*4882a593Smuzhiyun 		break;
281*4882a593Smuzhiyun 	case EVENT_POWER_OFF:
282*4882a593Smuzhiyun 		clk_set_rate(aclk, aclk_rate_hz);
283*4882a593Smuzhiyun 		clk_set_rate(clk_core, core_rate_hz);
284*4882a593Smuzhiyun 		clk_set_rate(clk_cabac, cabac_rate_hz);
285*4882a593Smuzhiyun 		dec->thermal_div = 0;
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 	case EVENT_ADJUST:
288*4882a593Smuzhiyun 		if (!dec->thermal_div) {
289*4882a593Smuzhiyun 			clk_set_rate(aclk, aclk_rate_hz);
290*4882a593Smuzhiyun 			clk_set_rate(clk_core, core_rate_hz);
291*4882a593Smuzhiyun 			clk_set_rate(clk_cabac, cabac_rate_hz);
292*4882a593Smuzhiyun 		} else {
293*4882a593Smuzhiyun 			clk_set_rate(aclk,
294*4882a593Smuzhiyun 				     aclk_rate_hz / dec->thermal_div);
295*4882a593Smuzhiyun 			clk_set_rate(clk_core,
296*4882a593Smuzhiyun 				     core_rate_hz / dec->thermal_div);
297*4882a593Smuzhiyun 			clk_set_rate(clk_cabac,
298*4882a593Smuzhiyun 				     cabac_rate_hz / dec->thermal_div);
299*4882a593Smuzhiyun 		}
300*4882a593Smuzhiyun 		dec->devf_aclk_rate_hz = aclk_rate_hz;
301*4882a593Smuzhiyun 		dec->devf_core_rate_hz = core_rate_hz;
302*4882a593Smuzhiyun 		dec->devf_cabac_rate_hz = cabac_rate_hz;
303*4882a593Smuzhiyun 		break;
304*4882a593Smuzhiyun 	case EVENT_THERMAL:
305*4882a593Smuzhiyun 		dec->thermal_div = dec->devf_aclk_rate_hz / aclk_rate_hz;
306*4882a593Smuzhiyun 		if (dec->thermal_div > 4)
307*4882a593Smuzhiyun 			dec->thermal_div = 4;
308*4882a593Smuzhiyun 		if (dec->thermal_div) {
309*4882a593Smuzhiyun 			clk_set_rate(aclk,
310*4882a593Smuzhiyun 				     dec->devf_aclk_rate_hz / dec->thermal_div);
311*4882a593Smuzhiyun 			clk_set_rate(clk_core,
312*4882a593Smuzhiyun 				     dec->devf_core_rate_hz / dec->thermal_div);
313*4882a593Smuzhiyun 			clk_set_rate(clk_cabac,
314*4882a593Smuzhiyun 				     dec->devf_cabac_rate_hz / dec->thermal_div);
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 		break;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	mutex_unlock(&dec->set_clk_lock);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
devfreq_target(struct device * dev,unsigned long * freq,u32 flags)324*4882a593Smuzhiyun static int devfreq_target(struct device *dev,
325*4882a593Smuzhiyun 			  unsigned long *freq, u32 flags)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int ret = 0;
328*4882a593Smuzhiyun 	unsigned int clk_event;
329*4882a593Smuzhiyun 	struct dev_pm_opp *opp;
330*4882a593Smuzhiyun 	unsigned long target_volt, target_freq;
331*4882a593Smuzhiyun 	unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
334*4882a593Smuzhiyun 	struct devfreq *devfreq = dec->devfreq;
335*4882a593Smuzhiyun 	struct devfreq_dev_status *stat = &devfreq->last_status;
336*4882a593Smuzhiyun 	unsigned long old_clk_rate = stat->current_frequency;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	opp = devfreq_recommended_opp(dev, freq, flags);
339*4882a593Smuzhiyun 	if (IS_ERR(opp)) {
340*4882a593Smuzhiyun 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
341*4882a593Smuzhiyun 		return PTR_ERR(opp);
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 	target_freq = dev_pm_opp_get_freq(opp);
344*4882a593Smuzhiyun 	target_volt = dev_pm_opp_get_voltage(opp);
345*4882a593Smuzhiyun 	dev_pm_opp_put(opp);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (target_freq < *freq) {
348*4882a593Smuzhiyun 		clk_event = EVENT_THERMAL;
349*4882a593Smuzhiyun 		aclk_rate_hz = target_freq;
350*4882a593Smuzhiyun 		core_rate_hz = target_freq;
351*4882a593Smuzhiyun 		cabac_rate_hz = target_freq;
352*4882a593Smuzhiyun 	} else {
353*4882a593Smuzhiyun 		clk_event = stat->busy_time ? EVENT_POWER_ON : EVENT_POWER_OFF;
354*4882a593Smuzhiyun 		aclk_rate_hz = dec->devf_aclk_rate_hz;
355*4882a593Smuzhiyun 		core_rate_hz = dec->devf_core_rate_hz;
356*4882a593Smuzhiyun 		cabac_rate_hz = dec->devf_cabac_rate_hz;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (old_clk_rate == target_freq) {
360*4882a593Smuzhiyun 		if (dec->volt == target_volt)
361*4882a593Smuzhiyun 			return ret;
362*4882a593Smuzhiyun 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
363*4882a593Smuzhiyun 		if (ret) {
364*4882a593Smuzhiyun 			dev_err(dev, "Cannot set voltage %lu uV\n",
365*4882a593Smuzhiyun 				target_volt);
366*4882a593Smuzhiyun 			return ret;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 		dec->volt = target_volt;
369*4882a593Smuzhiyun 		return 0;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (old_clk_rate < target_freq) {
373*4882a593Smuzhiyun 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
374*4882a593Smuzhiyun 		if (ret) {
375*4882a593Smuzhiyun 			dev_err(dev, "set voltage %lu uV\n", target_volt);
376*4882a593Smuzhiyun 			return ret;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
381*4882a593Smuzhiyun 	rkvdec_devf_set_clk(dec, aclk_rate_hz, core_rate_hz, cabac_rate_hz, clk_event);
382*4882a593Smuzhiyun 	stat->current_frequency = target_freq;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (old_clk_rate > target_freq) {
385*4882a593Smuzhiyun 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
386*4882a593Smuzhiyun 		if (ret) {
387*4882a593Smuzhiyun 			dev_err(dev, "set vol %lu uV\n", target_volt);
388*4882a593Smuzhiyun 			return ret;
389*4882a593Smuzhiyun 		}
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 	dec->volt = target_volt;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return ret;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
devfreq_get_cur_freq(struct device * dev,unsigned long * freq)396*4882a593Smuzhiyun static int devfreq_get_cur_freq(struct device *dev,
397*4882a593Smuzhiyun 				unsigned long *freq)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	*freq = clk_get_rate(dec->aclk_info.clk);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)406*4882a593Smuzhiyun static int devfreq_get_dev_status(struct device *dev,
407*4882a593Smuzhiyun 				  struct devfreq_dev_status *stat)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
410*4882a593Smuzhiyun 	struct devfreq *devfreq = dec->devfreq;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	memcpy(stat, &devfreq->last_status, sizeof(*stat));
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	return 0;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun static struct devfreq_dev_profile devfreq_profile = {
418*4882a593Smuzhiyun 	.target	= devfreq_target,
419*4882a593Smuzhiyun 	.get_cur_freq = devfreq_get_cur_freq,
420*4882a593Smuzhiyun 	.get_dev_status	= devfreq_get_dev_status,
421*4882a593Smuzhiyun };
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun static unsigned long
model_static_power(struct devfreq * devfreq,unsigned long voltage)424*4882a593Smuzhiyun model_static_power(struct devfreq *devfreq,
425*4882a593Smuzhiyun 		   unsigned long voltage)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct device *dev = devfreq->dev.parent;
428*4882a593Smuzhiyun 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
429*4882a593Smuzhiyun 	struct thermal_zone_device *tz = dec->thermal_zone;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	int temperature;
432*4882a593Smuzhiyun 	unsigned long temp;
433*4882a593Smuzhiyun 	unsigned long temp_squared, temp_cubed, temp_scaling_factor;
434*4882a593Smuzhiyun 	const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(tz) && tz->ops->get_temp) {
437*4882a593Smuzhiyun 		int ret;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		ret = tz->ops->get_temp(tz, &temperature);
440*4882a593Smuzhiyun 		if (ret) {
441*4882a593Smuzhiyun 			dev_warn_ratelimited(dev, "ddr thermal zone failed\n");
442*4882a593Smuzhiyun 			temperature = FALLBACK_STATIC_TEMPERATURE;
443*4882a593Smuzhiyun 		}
444*4882a593Smuzhiyun 	} else {
445*4882a593Smuzhiyun 		temperature = FALLBACK_STATIC_TEMPERATURE;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/*
449*4882a593Smuzhiyun 	 * Calculate the temperature scaling factor. To be applied to the
450*4882a593Smuzhiyun 	 * voltage scaled power.
451*4882a593Smuzhiyun 	 */
452*4882a593Smuzhiyun 	temp = temperature / 1000;
453*4882a593Smuzhiyun 	temp_squared = temp * temp;
454*4882a593Smuzhiyun 	temp_cubed = temp_squared * temp;
455*4882a593Smuzhiyun 	temp_scaling_factor = (dec->ts[3] * temp_cubed)
456*4882a593Smuzhiyun 	    + (dec->ts[2] * temp_squared) + (dec->ts[1] * temp) + dec->ts[0];
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return (((dec->static_power_coeff * voltage_cubed) >> 20)
459*4882a593Smuzhiyun 		* temp_scaling_factor) / 1000000;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun static struct devfreq_cooling_power cooling_power_data = {
463*4882a593Smuzhiyun 	.get_static_power = model_static_power,
464*4882a593Smuzhiyun 	.dyn_power_coeff = 120,
465*4882a593Smuzhiyun };
466*4882a593Smuzhiyun 
power_model_simple_init(struct mpp_dev * mpp)467*4882a593Smuzhiyun static int power_model_simple_init(struct mpp_dev *mpp)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
470*4882a593Smuzhiyun 	struct device_node *np = mpp->dev->of_node;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	u32 temp;
473*4882a593Smuzhiyun 	const char *tz_name;
474*4882a593Smuzhiyun 	struct device_node *power_model_node;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	power_model_node = of_get_child_by_name(np, "vcodec_power_model");
477*4882a593Smuzhiyun 	if (!power_model_node) {
478*4882a593Smuzhiyun 		dev_err(mpp->dev, "could not find power_model node\n");
479*4882a593Smuzhiyun 		return -ENODEV;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (of_property_read_string(power_model_node,
483*4882a593Smuzhiyun 				    "thermal-zone",
484*4882a593Smuzhiyun 				    &tz_name)) {
485*4882a593Smuzhiyun 		dev_err(mpp->dev, "ts in power_model not available\n");
486*4882a593Smuzhiyun 		return -EINVAL;
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	dec->thermal_zone = thermal_zone_get_zone_by_name(tz_name);
490*4882a593Smuzhiyun 	if (IS_ERR(dec->thermal_zone)) {
491*4882a593Smuzhiyun 		pr_warn("Error getting ddr thermal zone, not yet ready?\n");
492*4882a593Smuzhiyun 		dec->thermal_zone = NULL;
493*4882a593Smuzhiyun 		return -EPROBE_DEFER;
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (of_property_read_u32(power_model_node,
497*4882a593Smuzhiyun 				 "static-power-coefficient",
498*4882a593Smuzhiyun 				 &dec->static_power_coeff)) {
499*4882a593Smuzhiyun 		dev_err(mpp->dev, "static-power-coefficient not available\n");
500*4882a593Smuzhiyun 		return -EINVAL;
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 	if (of_property_read_u32(power_model_node,
503*4882a593Smuzhiyun 				 "dynamic-power-coefficient",
504*4882a593Smuzhiyun 				 &temp)) {
505*4882a593Smuzhiyun 		dev_err(mpp->dev, "dynamic-power-coefficient not available\n");
506*4882a593Smuzhiyun 		return -EINVAL;
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 	cooling_power_data.dyn_power_coeff = (unsigned long)temp;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (of_property_read_u32_array(power_model_node,
511*4882a593Smuzhiyun 				       "ts",
512*4882a593Smuzhiyun 				       (u32 *)dec->ts,
513*4882a593Smuzhiyun 				       4)) {
514*4882a593Smuzhiyun 		dev_err(mpp->dev, "ts in power_model not available\n");
515*4882a593Smuzhiyun 		return -EINVAL;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	return 0;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
devfreq_notifier_call(struct notifier_block * nb,unsigned long event,void * data)521*4882a593Smuzhiyun static int devfreq_notifier_call(struct notifier_block *nb,
522*4882a593Smuzhiyun 				 unsigned long event,
523*4882a593Smuzhiyun 				 void *data)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct rkvdec_dev *dec = container_of(nb,
526*4882a593Smuzhiyun 					      struct rkvdec_dev,
527*4882a593Smuzhiyun 					      devfreq_nb);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (!dec)
530*4882a593Smuzhiyun 		return NOTIFY_OK;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (event == DEVFREQ_PRECHANGE)
533*4882a593Smuzhiyun 		mutex_lock(&dec->sip_reset_lock);
534*4882a593Smuzhiyun 	else if (event == DEVFREQ_POSTCHANGE)
535*4882a593Smuzhiyun 		mutex_unlock(&dec->sip_reset_lock);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return NOTIFY_OK;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
543*4882a593Smuzhiyun  * it by pps id in video stream data.
544*4882a593Smuzhiyun  *
545*4882a593Smuzhiyun  * So we need to translate the address in iommu case. The address data is also
546*4882a593Smuzhiyun  * 10bit fd + 22bit offset mode.
547*4882a593Smuzhiyun  * Because userspace decoder do not give the pps id in the register file sets
548*4882a593Smuzhiyun  * kernel driver need to translate each scaling list address in pps buffer which
549*4882a593Smuzhiyun  * means 256 pps for H.264, 64 pps for H.265.
550*4882a593Smuzhiyun  *
551*4882a593Smuzhiyun  * In order to optimize the performance kernel driver ask userspace decoder to
552*4882a593Smuzhiyun  * set all scaling list address in pps buffer to the same one which will be used
553*4882a593Smuzhiyun  * on current decoding task. Then kernel driver can only translate the first
554*4882a593Smuzhiyun  * address then copy it all pps buffer.
555*4882a593Smuzhiyun  */
fill_scaling_list_pps(struct rkvdec_task * task,int fd,int offset,int count,int pps_info_size,int sub_addr_offset)556*4882a593Smuzhiyun static int fill_scaling_list_pps(struct rkvdec_task *task,
557*4882a593Smuzhiyun 				 int fd, int offset, int count,
558*4882a593Smuzhiyun 				 int pps_info_size, int sub_addr_offset)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	struct dma_buf *dmabuf = NULL;
561*4882a593Smuzhiyun 	void *vaddr = NULL;
562*4882a593Smuzhiyun 	u8 *pps = NULL;
563*4882a593Smuzhiyun 	u32 scaling_fd = 0;
564*4882a593Smuzhiyun 	int ret = 0;
565*4882a593Smuzhiyun 	u32 base = sub_addr_offset;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	dmabuf = dma_buf_get(fd);
568*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dmabuf)) {
569*4882a593Smuzhiyun 		mpp_err("invliad pps buffer\n");
570*4882a593Smuzhiyun 		return -ENOENT;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
574*4882a593Smuzhiyun 	if (ret) {
575*4882a593Smuzhiyun 		mpp_err("can't access the pps buffer\n");
576*4882a593Smuzhiyun 		goto done;
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	vaddr = dma_buf_vmap(dmabuf);
580*4882a593Smuzhiyun 	if (!vaddr) {
581*4882a593Smuzhiyun 		mpp_err("can't access the pps buffer\n");
582*4882a593Smuzhiyun 		ret = -EIO;
583*4882a593Smuzhiyun 		goto done;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 	pps = vaddr + offset;
586*4882a593Smuzhiyun 	/* NOTE: scaling buffer in pps, have no offset */
587*4882a593Smuzhiyun 	memcpy(&scaling_fd, pps + base, sizeof(scaling_fd));
588*4882a593Smuzhiyun 	scaling_fd = le32_to_cpu(scaling_fd);
589*4882a593Smuzhiyun 	if (scaling_fd > 0) {
590*4882a593Smuzhiyun 		struct mpp_mem_region *mem_region = NULL;
591*4882a593Smuzhiyun 		u32 tmp = 0;
592*4882a593Smuzhiyun 		int i = 0;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		mem_region = mpp_task_attach_fd(&task->mpp_task,
595*4882a593Smuzhiyun 						scaling_fd);
596*4882a593Smuzhiyun 		if (IS_ERR(mem_region)) {
597*4882a593Smuzhiyun 			mpp_err("scaling list fd %d attach failed\n", scaling_fd);
598*4882a593Smuzhiyun 			ret = PTR_ERR(mem_region);
599*4882a593Smuzhiyun 			goto done;
600*4882a593Smuzhiyun 		}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		tmp = mem_region->iova & 0xffffffff;
603*4882a593Smuzhiyun 		tmp = cpu_to_le32(tmp);
604*4882a593Smuzhiyun 		mpp_debug(DEBUG_PPS_FILL,
605*4882a593Smuzhiyun 			  "pps at %p, scaling fd: %3d => %pad + offset %10d\n",
606*4882a593Smuzhiyun 			  pps, scaling_fd, &mem_region->iova, offset);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		/* Fill the scaling list address in each pps entries */
609*4882a593Smuzhiyun 		for (i = 0; i < count; i++, base += pps_info_size)
610*4882a593Smuzhiyun 			memcpy(pps + base, &tmp, sizeof(tmp));
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun done:
614*4882a593Smuzhiyun 	dma_buf_vunmap(dmabuf, vaddr);
615*4882a593Smuzhiyun 	dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
616*4882a593Smuzhiyun 	dma_buf_put(dmabuf);
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	return ret;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
rkvdec_process_scl_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)621*4882a593Smuzhiyun static int rkvdec_process_scl_fd(struct mpp_session *session,
622*4882a593Smuzhiyun 				 struct rkvdec_task *task,
623*4882a593Smuzhiyun 				 struct mpp_task_msgs *msgs)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	int ret = 0;
626*4882a593Smuzhiyun 	int pps_fd;
627*4882a593Smuzhiyun 	u32 pps_offset;
628*4882a593Smuzhiyun 	int idx = RKVDEC_REG_PPS_BASE_INDEX;
629*4882a593Smuzhiyun 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
632*4882a593Smuzhiyun 		pps_fd = task->reg[idx];
633*4882a593Smuzhiyun 		pps_offset = 0;
634*4882a593Smuzhiyun 	} else {
635*4882a593Smuzhiyun 		pps_fd = task->reg[idx] & 0x3ff;
636*4882a593Smuzhiyun 		pps_offset = task->reg[idx] >> 10;
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	pps_offset += mpp_query_reg_offset_info(&task->off_inf, idx);
640*4882a593Smuzhiyun 	if (pps_fd > 0) {
641*4882a593Smuzhiyun 		int pps_info_offset;
642*4882a593Smuzhiyun 		int pps_info_count;
643*4882a593Smuzhiyun 		int pps_info_size;
644*4882a593Smuzhiyun 		int scaling_list_addr_offset;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 		switch (fmt) {
647*4882a593Smuzhiyun 		case RKVDEC_FMT_H264D:
648*4882a593Smuzhiyun 			pps_info_offset = pps_offset;
649*4882a593Smuzhiyun 			pps_info_count = 256;
650*4882a593Smuzhiyun 			pps_info_size = 32;
651*4882a593Smuzhiyun 			scaling_list_addr_offset = 23;
652*4882a593Smuzhiyun 			break;
653*4882a593Smuzhiyun 		case RKVDEC_FMT_H265D:
654*4882a593Smuzhiyun 			pps_info_offset = pps_offset;
655*4882a593Smuzhiyun 			pps_info_count = 64;
656*4882a593Smuzhiyun 			pps_info_size = 80;
657*4882a593Smuzhiyun 			scaling_list_addr_offset = 74;
658*4882a593Smuzhiyun 			break;
659*4882a593Smuzhiyun 		default:
660*4882a593Smuzhiyun 			pps_info_offset = 0;
661*4882a593Smuzhiyun 			pps_info_count = 0;
662*4882a593Smuzhiyun 			pps_info_size = 0;
663*4882a593Smuzhiyun 			scaling_list_addr_offset = 0;
664*4882a593Smuzhiyun 			break;
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		mpp_debug(DEBUG_PPS_FILL,
668*4882a593Smuzhiyun 			  "scaling list filling parameter:\n");
669*4882a593Smuzhiyun 		mpp_debug(DEBUG_PPS_FILL,
670*4882a593Smuzhiyun 			  "pps_info_offset %d\n", pps_info_offset);
671*4882a593Smuzhiyun 		mpp_debug(DEBUG_PPS_FILL,
672*4882a593Smuzhiyun 			  "pps_info_count  %d\n", pps_info_count);
673*4882a593Smuzhiyun 		mpp_debug(DEBUG_PPS_FILL,
674*4882a593Smuzhiyun 			  "pps_info_size   %d\n", pps_info_size);
675*4882a593Smuzhiyun 		mpp_debug(DEBUG_PPS_FILL,
676*4882a593Smuzhiyun 			  "scaling_list_addr_offset %d\n",
677*4882a593Smuzhiyun 			  scaling_list_addr_offset);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		if (pps_info_count) {
680*4882a593Smuzhiyun 			ret = fill_scaling_list_pps(task, pps_fd,
681*4882a593Smuzhiyun 						    pps_info_offset,
682*4882a593Smuzhiyun 						    pps_info_count,
683*4882a593Smuzhiyun 						    pps_info_size,
684*4882a593Smuzhiyun 						    scaling_list_addr_offset);
685*4882a593Smuzhiyun 			if (ret) {
686*4882a593Smuzhiyun 				mpp_err("fill pps failed\n");
687*4882a593Smuzhiyun 				goto fail;
688*4882a593Smuzhiyun 			}
689*4882a593Smuzhiyun 		}
690*4882a593Smuzhiyun 	}
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun fail:
693*4882a593Smuzhiyun 	return ret;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
rkvdec_process_reg_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)696*4882a593Smuzhiyun static int rkvdec_process_reg_fd(struct mpp_session *session,
697*4882a593Smuzhiyun 				 struct rkvdec_task *task,
698*4882a593Smuzhiyun 				 struct mpp_task_msgs *msgs)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	int ret = 0;
701*4882a593Smuzhiyun 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	/*
704*4882a593Smuzhiyun 	 * special offset scale case
705*4882a593Smuzhiyun 	 *
706*4882a593Smuzhiyun 	 * This translation is for fd + offset translation.
707*4882a593Smuzhiyun 	 * One register has 32bits. We need to transfer both buffer file
708*4882a593Smuzhiyun 	 * handle and the start address offset so we packet file handle
709*4882a593Smuzhiyun 	 * and offset together using below format.
710*4882a593Smuzhiyun 	 *
711*4882a593Smuzhiyun 	 *  0~9  bit for buffer file handle range 0 ~ 1023
712*4882a593Smuzhiyun 	 * 10~31 bit for offset range 0 ~ 4M
713*4882a593Smuzhiyun 	 *
714*4882a593Smuzhiyun 	 * But on 4K case the offset can be larger the 4M
715*4882a593Smuzhiyun 	 * So on VP9 4K decoder colmv base we scale the offset by 16
716*4882a593Smuzhiyun 	 */
717*4882a593Smuzhiyun 	if (fmt == RKVDEC_FMT_VP9D) {
718*4882a593Smuzhiyun 		int fd;
719*4882a593Smuzhiyun 		u32 offset;
720*4882a593Smuzhiyun 		dma_addr_t iova = 0;
721*4882a593Smuzhiyun 		struct mpp_mem_region *mem_region = NULL;
722*4882a593Smuzhiyun 		int idx = RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
725*4882a593Smuzhiyun 			fd = task->reg[idx];
726*4882a593Smuzhiyun 			offset = 0;
727*4882a593Smuzhiyun 		} else {
728*4882a593Smuzhiyun 			fd = task->reg[idx] & 0x3ff;
729*4882a593Smuzhiyun 			offset = task->reg[idx] >> 10 << 4;
730*4882a593Smuzhiyun 		}
731*4882a593Smuzhiyun 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
732*4882a593Smuzhiyun 		if (IS_ERR(mem_region)) {
733*4882a593Smuzhiyun 			mpp_err("reg[%03d]: %08x fd %d attach failed\n",
734*4882a593Smuzhiyun 				idx, task->reg[idx], fd);
735*4882a593Smuzhiyun 			return -EFAULT;
736*4882a593Smuzhiyun 		}
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 		iova = mem_region->iova;
739*4882a593Smuzhiyun 		task->reg[idx] = iova + offset;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	ret = mpp_translate_reg_address(session, &task->mpp_task,
743*4882a593Smuzhiyun 					fmt, task->reg, &task->off_inf);
744*4882a593Smuzhiyun 	if (ret)
745*4882a593Smuzhiyun 		return ret;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	mpp_translate_reg_offset_info(&task->mpp_task,
748*4882a593Smuzhiyun 				      &task->off_inf, task->reg);
749*4882a593Smuzhiyun 	return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
rkvdec_extract_task_msg(struct rkvdec_task * task,struct mpp_task_msgs * msgs)752*4882a593Smuzhiyun static int rkvdec_extract_task_msg(struct rkvdec_task *task,
753*4882a593Smuzhiyun 				   struct mpp_task_msgs *msgs)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	u32 i;
756*4882a593Smuzhiyun 	int ret;
757*4882a593Smuzhiyun 	struct mpp_request *req;
758*4882a593Smuzhiyun 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	for (i = 0; i < msgs->req_cnt; i++) {
761*4882a593Smuzhiyun 		u32 off_s, off_e;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 		req = &msgs->reqs[i];
764*4882a593Smuzhiyun 		if (!req->size)
765*4882a593Smuzhiyun 			continue;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 		switch (req->cmd) {
768*4882a593Smuzhiyun 		case MPP_CMD_SET_REG_WRITE: {
769*4882a593Smuzhiyun 			off_s = hw_info->reg_start * sizeof(u32);
770*4882a593Smuzhiyun 			off_e = hw_info->reg_end * sizeof(u32);
771*4882a593Smuzhiyun 			ret = mpp_check_req(req, 0, sizeof(task->reg),
772*4882a593Smuzhiyun 					    off_s, off_e);
773*4882a593Smuzhiyun 			if (ret)
774*4882a593Smuzhiyun 				continue;
775*4882a593Smuzhiyun 			if (copy_from_user((u8 *)task->reg + req->offset,
776*4882a593Smuzhiyun 					   req->data, req->size)) {
777*4882a593Smuzhiyun 				mpp_err("copy_from_user reg failed\n");
778*4882a593Smuzhiyun 				return -EIO;
779*4882a593Smuzhiyun 			}
780*4882a593Smuzhiyun 			memcpy(&task->w_reqs[task->w_req_cnt++],
781*4882a593Smuzhiyun 			       req, sizeof(*req));
782*4882a593Smuzhiyun 		} break;
783*4882a593Smuzhiyun 		case MPP_CMD_SET_REG_READ: {
784*4882a593Smuzhiyun 			off_s = hw_info->reg_start * sizeof(u32);
785*4882a593Smuzhiyun 			off_e = hw_info->reg_end * sizeof(u32);
786*4882a593Smuzhiyun 			ret = mpp_check_req(req, 0, sizeof(task->reg),
787*4882a593Smuzhiyun 					    off_s, off_e);
788*4882a593Smuzhiyun 			if (ret)
789*4882a593Smuzhiyun 				continue;
790*4882a593Smuzhiyun 			memcpy(&task->r_reqs[task->r_req_cnt++],
791*4882a593Smuzhiyun 			       req, sizeof(*req));
792*4882a593Smuzhiyun 		} break;
793*4882a593Smuzhiyun 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
794*4882a593Smuzhiyun 			mpp_extract_reg_offset_info(&task->off_inf, req);
795*4882a593Smuzhiyun 		} break;
796*4882a593Smuzhiyun 		default:
797*4882a593Smuzhiyun 			break;
798*4882a593Smuzhiyun 		}
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
801*4882a593Smuzhiyun 		  task->w_req_cnt, task->r_req_cnt);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
rkvdec_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)806*4882a593Smuzhiyun static void *rkvdec_alloc_task(struct mpp_session *session,
807*4882a593Smuzhiyun 			       struct mpp_task_msgs *msgs)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	int ret;
810*4882a593Smuzhiyun 	struct mpp_task *mpp_task = NULL;
811*4882a593Smuzhiyun 	struct rkvdec_task *task = NULL;
812*4882a593Smuzhiyun 	struct mpp_dev *mpp = session->mpp;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	mpp_debug_enter();
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	task = kzalloc(sizeof(*task), GFP_KERNEL);
817*4882a593Smuzhiyun 	if (!task)
818*4882a593Smuzhiyun 		return NULL;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	mpp_task = &task->mpp_task;
821*4882a593Smuzhiyun 	mpp_task_init(session, mpp_task);
822*4882a593Smuzhiyun 	mpp_task->hw_info = mpp->var->hw_info;
823*4882a593Smuzhiyun 	mpp_task->reg = task->reg;
824*4882a593Smuzhiyun 	/* extract reqs for current task */
825*4882a593Smuzhiyun 	ret = rkvdec_extract_task_msg(task, msgs);
826*4882a593Smuzhiyun 	if (ret)
827*4882a593Smuzhiyun 		goto fail;
828*4882a593Smuzhiyun 	/* process fd in pps for 264 and 265 */
829*4882a593Smuzhiyun 	if (!(msgs->flags & MPP_FLAGS_SCL_FD_NO_TRANS)) {
830*4882a593Smuzhiyun 		ret = rkvdec_process_scl_fd(session, task, msgs);
831*4882a593Smuzhiyun 		if (ret)
832*4882a593Smuzhiyun 			goto fail;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun 	/* process fd in register */
835*4882a593Smuzhiyun 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
836*4882a593Smuzhiyun 		ret = rkvdec_process_reg_fd(session, task, msgs);
837*4882a593Smuzhiyun 		if (ret)
838*4882a593Smuzhiyun 			goto fail;
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun 	task->strm_addr = task->reg[RKVDEC_REG_RLC_BASE_INDEX];
841*4882a593Smuzhiyun 	task->link_mode = RKVDEC_MODE_ONEFRAME;
842*4882a593Smuzhiyun 	task->clk_mode = CLK_MODE_NORMAL;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* get resolution info */
845*4882a593Smuzhiyun 	task->pixels = RKVDEC_GET_YSTRDE(task->reg[RKVDEC_RGE_YSTRDE_INDEX]);
846*4882a593Smuzhiyun 	mpp_debug(DEBUG_TASK_INFO, "ystride=%d\n", task->pixels);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	mpp_debug_leave();
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	return mpp_task;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun fail:
853*4882a593Smuzhiyun 	mpp_task_dump_mem_region(mpp, mpp_task);
854*4882a593Smuzhiyun 	mpp_task_dump_reg(mpp, mpp_task);
855*4882a593Smuzhiyun 	mpp_task_finalize(session, mpp_task);
856*4882a593Smuzhiyun 	kfree(task);
857*4882a593Smuzhiyun 	return NULL;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
rkvdec_prepare_with_reset(struct mpp_dev * mpp,struct mpp_task * mpp_task)860*4882a593Smuzhiyun static void *rkvdec_prepare_with_reset(struct mpp_dev *mpp,
861*4882a593Smuzhiyun 				       struct mpp_task *mpp_task)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	unsigned long flags;
864*4882a593Smuzhiyun 	struct mpp_task *out_task = NULL;
865*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	spin_lock_irqsave(&mpp->queue->running_lock, flags);
868*4882a593Smuzhiyun 	out_task = list_empty(&mpp->queue->running_list) ? mpp_task : NULL;
869*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mpp->queue->running_lock, flags);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	if (out_task && !dec->had_reset) {
872*4882a593Smuzhiyun 		struct rkvdec_task *task = to_rkvdec_task(out_task);
873*4882a593Smuzhiyun 		u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 		/* in 3399 3228 and 3229 chips, when 264 switch vp9,
876*4882a593Smuzhiyun 		 * hardware will timeout, and can't recover problem.
877*4882a593Smuzhiyun 		 * so reset it when 264 switch vp9, before hardware run.
878*4882a593Smuzhiyun 		 */
879*4882a593Smuzhiyun 		if (dec->last_fmt == RKVDEC_FMT_H264D && fmt == RKVDEC_FMT_VP9D) {
880*4882a593Smuzhiyun 			mpp_power_on(mpp);
881*4882a593Smuzhiyun 			mpp_dev_reset(mpp);
882*4882a593Smuzhiyun 			mpp_power_off(mpp);
883*4882a593Smuzhiyun 		}
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	return out_task;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
rkvdec_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)889*4882a593Smuzhiyun static int rkvdec_run(struct mpp_dev *mpp,
890*4882a593Smuzhiyun 		      struct mpp_task *mpp_task)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	int i;
893*4882a593Smuzhiyun 	u32 reg_en;
894*4882a593Smuzhiyun 	struct rkvdec_task *task = NULL;
895*4882a593Smuzhiyun 	u32 timing_en = mpp->srv->timing_en;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	mpp_debug_enter();
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	task = to_rkvdec_task(mpp_task);
900*4882a593Smuzhiyun 	reg_en = mpp_task->hw_info->reg_en;
901*4882a593Smuzhiyun 	switch (task->link_mode) {
902*4882a593Smuzhiyun 	case RKVDEC_MODE_ONEFRAME: {
903*4882a593Smuzhiyun 		u32 reg;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 		/* set cache size */
906*4882a593Smuzhiyun 		reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS
907*4882a593Smuzhiyun 			| RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
908*4882a593Smuzhiyun 		if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
909*4882a593Smuzhiyun 			reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
912*4882a593Smuzhiyun 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
913*4882a593Smuzhiyun 		/* clear cache */
914*4882a593Smuzhiyun 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
915*4882a593Smuzhiyun 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
916*4882a593Smuzhiyun 		/* set registers for hardware */
917*4882a593Smuzhiyun 		for (i = 0; i < task->w_req_cnt; i++) {
918*4882a593Smuzhiyun 			int s, e;
919*4882a593Smuzhiyun 			struct mpp_request *req = &task->w_reqs[i];
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 			s = req->offset / sizeof(u32);
922*4882a593Smuzhiyun 			e = s + req->size / sizeof(u32);
923*4882a593Smuzhiyun 			mpp_write_req(mpp, task->reg, s, e, reg_en);
924*4882a593Smuzhiyun 		}
925*4882a593Smuzhiyun 		/* init current task */
926*4882a593Smuzhiyun 		mpp->cur_task = mpp_task;
927*4882a593Smuzhiyun 		mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
928*4882a593Smuzhiyun 		/* Flush the register before the start the device */
929*4882a593Smuzhiyun 		wmb();
930*4882a593Smuzhiyun 		mpp_write(mpp, RKVDEC_REG_INT_EN,
931*4882a593Smuzhiyun 			  task->reg[reg_en] | RKVDEC_DEC_START);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 		mpp_task_run_end(mpp_task, timing_en);
934*4882a593Smuzhiyun 	} break;
935*4882a593Smuzhiyun 	default:
936*4882a593Smuzhiyun 		break;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	mpp_debug_leave();
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	return 0;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun 
rkvdec_3328_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)944*4882a593Smuzhiyun static int rkvdec_3328_run(struct mpp_dev *mpp,
945*4882a593Smuzhiyun 			   struct mpp_task *mpp_task)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	u32 fmt = 0;
948*4882a593Smuzhiyun 	u32 cfg = 0;
949*4882a593Smuzhiyun 	struct rkvdec_task *task = NULL;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	mpp_debug_enter();
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	task = to_rkvdec_task(mpp_task);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	/*
956*4882a593Smuzhiyun 	 * HW defeat workaround: VP9 and H.265 power save optimization cause decoding
957*4882a593Smuzhiyun 	 * corruption, disable optimization here.
958*4882a593Smuzhiyun 	 */
959*4882a593Smuzhiyun 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
960*4882a593Smuzhiyun 	if (fmt == RKVDEC_FMT_VP9D || fmt == RKVDEC_FMT_H265D) {
961*4882a593Smuzhiyun 		cfg = task->reg[RKVDEC_POWER_CTL_INDEX] | 0xFFFF;
962*4882a593Smuzhiyun 		task->reg[RKVDEC_POWER_CTL_INDEX] = cfg & (~(1 << 12));
963*4882a593Smuzhiyun 		mpp_write_relaxed(mpp, RKVDEC_POWER_CTL_BASE,
964*4882a593Smuzhiyun 				  task->reg[RKVDEC_POWER_CTL_INDEX]);
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	rkvdec_run(mpp, mpp_task);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	mpp_debug_leave();
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	return 0;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
rkvdec_1126_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)974*4882a593Smuzhiyun static int rkvdec_1126_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	if (task->link_mode == RKVDEC_MODE_ONEFRAME)
979*4882a593Smuzhiyun 		mpp_iommu_flush_tlb(mpp->iommu_info);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	return rkvdec_run(mpp, mpp_task);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun 
rkvdec_px30_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)984*4882a593Smuzhiyun static int rkvdec_px30_run(struct mpp_dev *mpp,
985*4882a593Smuzhiyun 		    struct mpp_task *mpp_task)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	mpp_iommu_flush_tlb(mpp->iommu_info);
988*4882a593Smuzhiyun 	return rkvdec_run(mpp, mpp_task);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
rkvdec_irq(struct mpp_dev * mpp)991*4882a593Smuzhiyun static int rkvdec_irq(struct mpp_dev *mpp)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
994*4882a593Smuzhiyun 	if (!(mpp->irq_status & RKVDEC_DEC_INT_RAW))
995*4882a593Smuzhiyun 		return IRQ_NONE;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	return IRQ_WAKE_THREAD;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
rkvdec_isr(struct mpp_dev * mpp)1002*4882a593Smuzhiyun static int rkvdec_isr(struct mpp_dev *mpp)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	u32 err_mask;
1005*4882a593Smuzhiyun 	struct rkvdec_task *task = NULL;
1006*4882a593Smuzhiyun 	struct mpp_task *mpp_task = mpp->cur_task;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	mpp_debug_enter();
1009*4882a593Smuzhiyun 	/* FIXME use a spin lock here */
1010*4882a593Smuzhiyun 	if (!mpp_task) {
1011*4882a593Smuzhiyun 		dev_err(mpp->dev, "no current task\n");
1012*4882a593Smuzhiyun 		goto done;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 	mpp_time_diff(mpp_task);
1015*4882a593Smuzhiyun 	mpp->cur_task = NULL;
1016*4882a593Smuzhiyun 	task = to_rkvdec_task(mpp_task);
1017*4882a593Smuzhiyun 	task->irq_status = mpp->irq_status;
1018*4882a593Smuzhiyun 	switch (task->link_mode) {
1019*4882a593Smuzhiyun 	case RKVDEC_MODE_ONEFRAME: {
1020*4882a593Smuzhiyun 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 		err_mask = RKVDEC_INT_BUF_EMPTY
1023*4882a593Smuzhiyun 			| RKVDEC_INT_BUS_ERROR
1024*4882a593Smuzhiyun 			| RKVDEC_INT_COLMV_REF_ERROR
1025*4882a593Smuzhiyun 			| RKVDEC_INT_STRM_ERROR
1026*4882a593Smuzhiyun 			| RKVDEC_INT_TIMEOUT;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 		if (err_mask & task->irq_status)
1029*4882a593Smuzhiyun 			atomic_inc(&mpp->reset_request);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		mpp_task_finish(mpp_task->session, mpp_task);
1032*4882a593Smuzhiyun 	} break;
1033*4882a593Smuzhiyun 	default:
1034*4882a593Smuzhiyun 		break;
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun done:
1037*4882a593Smuzhiyun 	mpp_debug_leave();
1038*4882a593Smuzhiyun 	return IRQ_HANDLED;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
rkvdec_3328_isr(struct mpp_dev * mpp)1041*4882a593Smuzhiyun static int rkvdec_3328_isr(struct mpp_dev *mpp)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	u32 err_mask;
1044*4882a593Smuzhiyun 	struct rkvdec_task *task = NULL;
1045*4882a593Smuzhiyun 	struct mpp_task *mpp_task = mpp->cur_task;
1046*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	mpp_debug_enter();
1049*4882a593Smuzhiyun 	/* FIXME use a spin lock here */
1050*4882a593Smuzhiyun 	if (!mpp_task) {
1051*4882a593Smuzhiyun 		dev_err(mpp->dev, "no current task\n");
1052*4882a593Smuzhiyun 		goto done;
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 	mpp_time_diff(mpp_task);
1055*4882a593Smuzhiyun 	mpp->cur_task = NULL;
1056*4882a593Smuzhiyun 	task = to_rkvdec_task(mpp_task);
1057*4882a593Smuzhiyun 	task->irq_status = mpp->irq_status;
1058*4882a593Smuzhiyun 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	err_mask = RKVDEC_INT_BUF_EMPTY
1061*4882a593Smuzhiyun 		| RKVDEC_INT_BUS_ERROR
1062*4882a593Smuzhiyun 		| RKVDEC_INT_COLMV_REF_ERROR
1063*4882a593Smuzhiyun 		| RKVDEC_INT_STRM_ERROR
1064*4882a593Smuzhiyun 		| RKVDEC_INT_TIMEOUT;
1065*4882a593Smuzhiyun 	if (err_mask & task->irq_status)
1066*4882a593Smuzhiyun 		atomic_inc(&mpp->reset_request);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	/* unmap reserve buffer */
1069*4882a593Smuzhiyun 	if (dec->aux_iova != -1) {
1070*4882a593Smuzhiyun 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1071*4882a593Smuzhiyun 		dec->aux_iova = -1;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	mpp_task_finish(mpp_task->session, mpp_task);
1075*4882a593Smuzhiyun done:
1076*4882a593Smuzhiyun 	mpp_debug_leave();
1077*4882a593Smuzhiyun 	return IRQ_HANDLED;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
rkvdec_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)1080*4882a593Smuzhiyun static int rkvdec_finish(struct mpp_dev *mpp,
1081*4882a593Smuzhiyun 			 struct mpp_task *mpp_task)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	u32 i;
1084*4882a593Smuzhiyun 	u32 dec_get;
1085*4882a593Smuzhiyun 	s32 dec_length;
1086*4882a593Smuzhiyun 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	mpp_debug_enter();
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	switch (task->link_mode) {
1091*4882a593Smuzhiyun 	case RKVDEC_MODE_ONEFRAME: {
1092*4882a593Smuzhiyun 		u32 s, e;
1093*4882a593Smuzhiyun 		struct mpp_request *req;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 		/* read register after running */
1096*4882a593Smuzhiyun 		for (i = 0; i < task->r_req_cnt; i++) {
1097*4882a593Smuzhiyun 			req = &task->r_reqs[i];
1098*4882a593Smuzhiyun 			s = req->offset / sizeof(u32);
1099*4882a593Smuzhiyun 			e = s + req->size / sizeof(u32);
1100*4882a593Smuzhiyun 			mpp_read_req(mpp, task->reg, s, e);
1101*4882a593Smuzhiyun 		}
1102*4882a593Smuzhiyun 		/* revert hack for irq status */
1103*4882a593Smuzhiyun 		task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
1104*4882a593Smuzhiyun 		/* revert hack for decoded length */
1105*4882a593Smuzhiyun 		dec_get = mpp_read_relaxed(mpp, RKVDEC_REG_RLC_BASE);
1106*4882a593Smuzhiyun 		dec_length = dec_get - task->strm_addr;
1107*4882a593Smuzhiyun 		task->reg[RKVDEC_REG_RLC_BASE_INDEX] = dec_length << 10;
1108*4882a593Smuzhiyun 		mpp_debug(DEBUG_REGISTER,
1109*4882a593Smuzhiyun 			  "dec_get %08x dec_length %d\n", dec_get, dec_length);
1110*4882a593Smuzhiyun 	} break;
1111*4882a593Smuzhiyun 	default:
1112*4882a593Smuzhiyun 		break;
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	mpp_debug_leave();
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	return 0;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun 
rkvdec_finish_with_record_info(struct mpp_dev * mpp,struct mpp_task * mpp_task)1120*4882a593Smuzhiyun static int rkvdec_finish_with_record_info(struct mpp_dev *mpp,
1121*4882a593Smuzhiyun 					  struct mpp_task *mpp_task)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1124*4882a593Smuzhiyun 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	rkvdec_finish(mpp, mpp_task);
1127*4882a593Smuzhiyun 	dec->last_fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1128*4882a593Smuzhiyun 	dec->had_reset = (atomic_read(&mpp->reset_request) > 0) ? true : false;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	return 0;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
rkvdec_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)1133*4882a593Smuzhiyun static int rkvdec_result(struct mpp_dev *mpp,
1134*4882a593Smuzhiyun 			 struct mpp_task *mpp_task,
1135*4882a593Smuzhiyun 			 struct mpp_task_msgs *msgs)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	u32 i;
1138*4882a593Smuzhiyun 	struct mpp_request *req;
1139*4882a593Smuzhiyun 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	/* FIXME may overflow the kernel */
1142*4882a593Smuzhiyun 	for (i = 0; i < task->r_req_cnt; i++) {
1143*4882a593Smuzhiyun 		req = &task->r_reqs[i];
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 		if (copy_to_user(req->data,
1146*4882a593Smuzhiyun 				 (u8 *)task->reg + req->offset,
1147*4882a593Smuzhiyun 				 req->size)) {
1148*4882a593Smuzhiyun 			mpp_err("copy_to_user reg fail\n");
1149*4882a593Smuzhiyun 			return -EIO;
1150*4882a593Smuzhiyun 		}
1151*4882a593Smuzhiyun 	}
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	return 0;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
rkvdec_free_task(struct mpp_session * session,struct mpp_task * mpp_task)1156*4882a593Smuzhiyun static int rkvdec_free_task(struct mpp_session *session,
1157*4882a593Smuzhiyun 			    struct mpp_task *mpp_task)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	mpp_task_finalize(session, mpp_task);
1162*4882a593Smuzhiyun 	kfree(task);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	return 0;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec_procfs_remove(struct mpp_dev * mpp)1168*4882a593Smuzhiyun static int rkvdec_procfs_remove(struct mpp_dev *mpp)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	if (dec->procfs) {
1173*4882a593Smuzhiyun 		proc_remove(dec->procfs);
1174*4882a593Smuzhiyun 		dec->procfs = NULL;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	return 0;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun 
rkvdec_procfs_init(struct mpp_dev * mpp)1180*4882a593Smuzhiyun static int rkvdec_procfs_init(struct mpp_dev *mpp)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
1185*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dec->procfs)) {
1186*4882a593Smuzhiyun 		mpp_err("failed on open procfs\n");
1187*4882a593Smuzhiyun 		dec->procfs = NULL;
1188*4882a593Smuzhiyun 		return -EIO;
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	/* for common mpp_dev options */
1192*4882a593Smuzhiyun 	mpp_procfs_create_common(dec->procfs, mpp);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	mpp_procfs_create_u32("aclk", 0644,
1195*4882a593Smuzhiyun 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
1196*4882a593Smuzhiyun 	mpp_procfs_create_u32("clk_core", 0644,
1197*4882a593Smuzhiyun 			      dec->procfs, &dec->core_clk_info.debug_rate_hz);
1198*4882a593Smuzhiyun 	mpp_procfs_create_u32("clk_cabac", 0644,
1199*4882a593Smuzhiyun 			      dec->procfs, &dec->cabac_clk_info.debug_rate_hz);
1200*4882a593Smuzhiyun 	mpp_procfs_create_u32("clk_hevc_cabac", 0644,
1201*4882a593Smuzhiyun 			      dec->procfs, &dec->hevc_cabac_clk_info.debug_rate_hz);
1202*4882a593Smuzhiyun 	mpp_procfs_create_u32("session_buffers", 0644,
1203*4882a593Smuzhiyun 			      dec->procfs, &mpp->session_max_buffers);
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	return 0;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun #else
rkvdec_procfs_remove(struct mpp_dev * mpp)1208*4882a593Smuzhiyun static inline int rkvdec_procfs_remove(struct mpp_dev *mpp)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	return 0;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun 
rkvdec_procfs_init(struct mpp_dev * mpp)1213*4882a593Smuzhiyun static inline int rkvdec_procfs_init(struct mpp_dev *mpp)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	return 0;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun #endif
1218*4882a593Smuzhiyun 
rkvdec_init(struct mpp_dev * mpp)1219*4882a593Smuzhiyun static int rkvdec_init(struct mpp_dev *mpp)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	int ret;
1222*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	mutex_init(&dec->sip_reset_lock);
1225*4882a593Smuzhiyun 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVDEC];
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	/* Get clock info from dtsi */
1228*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
1229*4882a593Smuzhiyun 	if (ret)
1230*4882a593Smuzhiyun 		mpp_err("failed on clk_get aclk_vcodec\n");
1231*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
1232*4882a593Smuzhiyun 	if (ret)
1233*4882a593Smuzhiyun 		mpp_err("failed on clk_get hclk_vcodec\n");
1234*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->core_clk_info, "clk_core");
1235*4882a593Smuzhiyun 	if (ret)
1236*4882a593Smuzhiyun 		mpp_err("failed on clk_get clk_core\n");
1237*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->cabac_clk_info, "clk_cabac");
1238*4882a593Smuzhiyun 	if (ret)
1239*4882a593Smuzhiyun 		mpp_err("failed on clk_get clk_cabac\n");
1240*4882a593Smuzhiyun 	ret = mpp_get_clk_info(mpp, &dec->hevc_cabac_clk_info, "clk_hevc_cabac");
1241*4882a593Smuzhiyun 	if (ret)
1242*4882a593Smuzhiyun 		mpp_err("failed on clk_get clk_hevc_cabac\n");
1243*4882a593Smuzhiyun 	/* Set default rates */
1244*4882a593Smuzhiyun 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1245*4882a593Smuzhiyun 	mpp_set_clk_info_rate_hz(&dec->core_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1246*4882a593Smuzhiyun 	mpp_set_clk_info_rate_hz(&dec->cabac_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1247*4882a593Smuzhiyun 	mpp_set_clk_info_rate_hz(&dec->hevc_cabac_clk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	/* Get normal max workload from dtsi */
1250*4882a593Smuzhiyun 	of_property_read_u32(mpp->dev->of_node,
1251*4882a593Smuzhiyun 			     "rockchip,default-max-load", &dec->default_max_load);
1252*4882a593Smuzhiyun 	/* Get reset control from dtsi */
1253*4882a593Smuzhiyun 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1254*4882a593Smuzhiyun 	if (!dec->rst_a)
1255*4882a593Smuzhiyun 		mpp_err("No aclk reset resource define\n");
1256*4882a593Smuzhiyun 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1257*4882a593Smuzhiyun 	if (!dec->rst_h)
1258*4882a593Smuzhiyun 		mpp_err("No hclk reset resource define\n");
1259*4882a593Smuzhiyun 	dec->rst_niu_a = mpp_reset_control_get(mpp, RST_TYPE_NIU_A, "niu_a");
1260*4882a593Smuzhiyun 	if (!dec->rst_niu_a)
1261*4882a593Smuzhiyun 		mpp_err("No niu aclk reset resource define\n");
1262*4882a593Smuzhiyun 	dec->rst_niu_h = mpp_reset_control_get(mpp, RST_TYPE_NIU_H, "niu_h");
1263*4882a593Smuzhiyun 	if (!dec->rst_niu_h)
1264*4882a593Smuzhiyun 		mpp_err("No niu hclk reset resource define\n");
1265*4882a593Smuzhiyun 	dec->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1266*4882a593Smuzhiyun 	if (!dec->rst_core)
1267*4882a593Smuzhiyun 		mpp_err("No core reset resource define\n");
1268*4882a593Smuzhiyun 	dec->rst_cabac = mpp_reset_control_get(mpp, RST_TYPE_CABAC, "video_cabac");
1269*4882a593Smuzhiyun 	if (!dec->rst_cabac)
1270*4882a593Smuzhiyun 		mpp_err("No cabac reset resource define\n");
1271*4882a593Smuzhiyun 	dec->rst_hevc_cabac = mpp_reset_control_get(mpp, RST_TYPE_HEVC_CABAC, "video_hevc_cabac");
1272*4882a593Smuzhiyun 	if (!dec->rst_hevc_cabac)
1273*4882a593Smuzhiyun 		mpp_err("No hevc cabac reset resource define\n");
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	return 0;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
rkvdec_px30_init(struct mpp_dev * mpp)1278*4882a593Smuzhiyun static int rkvdec_px30_init(struct mpp_dev *mpp)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun 	rkvdec_init(mpp);
1281*4882a593Smuzhiyun 	return px30_workaround_combo_init(mpp);
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun 
rkvdec_3036_init(struct mpp_dev * mpp)1284*4882a593Smuzhiyun static int rkvdec_3036_init(struct mpp_dev *mpp)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun 	rkvdec_init(mpp);
1287*4882a593Smuzhiyun 	set_bit(mpp->var->device_type, &mpp->queue->dev_active_flags);
1288*4882a593Smuzhiyun 	return 0;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
rkvdec_3328_iommu_hdl(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1291*4882a593Smuzhiyun static int rkvdec_3328_iommu_hdl(struct iommu_domain *iommu,
1292*4882a593Smuzhiyun 				 struct device *iommu_dev,
1293*4882a593Smuzhiyun 				 unsigned long iova,
1294*4882a593Smuzhiyun 				 int status, void *arg)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun 	int ret = 0;
1297*4882a593Smuzhiyun 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1298*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	/*
1301*4882a593Smuzhiyun 	 * defeat workaround, invalidate address generated when rk322x
1302*4882a593Smuzhiyun 	 * hevc decoder tile mode pre-fetch colmv data.
1303*4882a593Smuzhiyun 	 */
1304*4882a593Smuzhiyun 	if (IOMMU_GET_BUS_ID(status) == 2) {
1305*4882a593Smuzhiyun 		unsigned long page_iova = 0;
1306*4882a593Smuzhiyun 		/* avoid another page fault occur after page fault */
1307*4882a593Smuzhiyun 		if (dec->aux_iova != -1) {
1308*4882a593Smuzhiyun 			iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1309*4882a593Smuzhiyun 			dec->aux_iova = -1;
1310*4882a593Smuzhiyun 		}
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 		page_iova = round_down(iova, IOMMU_PAGE_SIZE);
1313*4882a593Smuzhiyun 		ret = iommu_map(mpp->iommu_info->domain, page_iova,
1314*4882a593Smuzhiyun 				page_to_phys(dec->aux_page), IOMMU_PAGE_SIZE,
1315*4882a593Smuzhiyun 				IOMMU_READ | IOMMU_WRITE);
1316*4882a593Smuzhiyun 		if (!ret)
1317*4882a593Smuzhiyun 			dec->aux_iova = page_iova;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	return ret;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
rkvdec_devfreq_remove(struct mpp_dev * mpp)1324*4882a593Smuzhiyun static int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	devfreq_unregister_opp_notifier(mpp->dev, dec->devfreq);
1329*4882a593Smuzhiyun 	dev_pm_opp_of_remove_table(mpp->dev);
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	return 0;
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun 
rkvdec_devfreq_init(struct mpp_dev * mpp)1334*4882a593Smuzhiyun static int rkvdec_devfreq_init(struct mpp_dev *mpp)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun 	int ret = 0;
1337*4882a593Smuzhiyun 	struct devfreq_dev_status *stat;
1338*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	mutex_init(&dec->set_clk_lock);
1341*4882a593Smuzhiyun 	dec->parent_devfreq = devfreq_get_devfreq_by_phandle(mpp->dev, "rkvdec_devfreq", 0);
1342*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dec->parent_devfreq)) {
1343*4882a593Smuzhiyun 		if (PTR_ERR(dec->parent_devfreq) == -EPROBE_DEFER) {
1344*4882a593Smuzhiyun 			dev_warn(mpp->dev, "parent devfreq is not ready, retry\n");
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 			return -EPROBE_DEFER;
1347*4882a593Smuzhiyun 		}
1348*4882a593Smuzhiyun 	} else {
1349*4882a593Smuzhiyun 		dec->devfreq_nb.notifier_call = devfreq_notifier_call;
1350*4882a593Smuzhiyun 		devm_devfreq_register_notifier(mpp->dev,
1351*4882a593Smuzhiyun 					       dec->parent_devfreq,
1352*4882a593Smuzhiyun 					       &dec->devfreq_nb,
1353*4882a593Smuzhiyun 					       DEVFREQ_TRANSITION_NOTIFIER);
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	dec->vdd = devm_regulator_get_optional(mpp->dev, "vcodec");
1357*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dec->vdd)) {
1358*4882a593Smuzhiyun 		if (PTR_ERR(dec->vdd) == -EPROBE_DEFER) {
1359*4882a593Smuzhiyun 			dev_warn(mpp->dev, "vcodec regulator not ready, retry\n");
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 			return -EPROBE_DEFER;
1362*4882a593Smuzhiyun 		}
1363*4882a593Smuzhiyun 		dev_warn(mpp->dev, "no regulator for vcodec\n");
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 		return 0;
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	ret = rockchip_init_opp_table(mpp->dev, NULL,
1369*4882a593Smuzhiyun 				      "rkvdec_leakage", "vcodec");
1370*4882a593Smuzhiyun 	if (ret) {
1371*4882a593Smuzhiyun 		dev_err(mpp->dev, "Failed to init_opp_table\n");
1372*4882a593Smuzhiyun 		goto done;
1373*4882a593Smuzhiyun 	}
1374*4882a593Smuzhiyun 	dec->devfreq = devm_devfreq_add_device(mpp->dev, &devfreq_profile,
1375*4882a593Smuzhiyun 					       "userspace", NULL);
1376*4882a593Smuzhiyun 	if (IS_ERR(dec->devfreq)) {
1377*4882a593Smuzhiyun 		ret = PTR_ERR(dec->devfreq);
1378*4882a593Smuzhiyun 		goto done;
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	stat = &dec->devfreq->last_status;
1382*4882a593Smuzhiyun 	stat->current_frequency = clk_get_rate(dec->aclk_info.clk);
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	ret = devfreq_register_opp_notifier(mpp->dev, dec->devfreq);
1385*4882a593Smuzhiyun 	if (ret)
1386*4882a593Smuzhiyun 		goto done;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	/* power simplle init */
1389*4882a593Smuzhiyun 	ret = power_model_simple_init(mpp);
1390*4882a593Smuzhiyun 	if (!ret && dec->devfreq) {
1391*4882a593Smuzhiyun 		dec->devfreq_cooling =
1392*4882a593Smuzhiyun 			of_devfreq_cooling_register_power(mpp->dev->of_node,
1393*4882a593Smuzhiyun 							  dec->devfreq,
1394*4882a593Smuzhiyun 							  &cooling_power_data);
1395*4882a593Smuzhiyun 		if (IS_ERR_OR_NULL(dec->devfreq_cooling)) {
1396*4882a593Smuzhiyun 			ret = -ENXIO;
1397*4882a593Smuzhiyun 			dev_err(mpp->dev, "Failed to register cooling\n");
1398*4882a593Smuzhiyun 			goto done;
1399*4882a593Smuzhiyun 		}
1400*4882a593Smuzhiyun 	}
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun done:
1403*4882a593Smuzhiyun 	return ret;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun #else
rkvdec_devfreq_remove(struct mpp_dev * mpp)1406*4882a593Smuzhiyun static inline int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun 	return 0;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
rkvdec_devfreq_init(struct mpp_dev * mpp)1411*4882a593Smuzhiyun static inline int rkvdec_devfreq_init(struct mpp_dev *mpp)
1412*4882a593Smuzhiyun {
1413*4882a593Smuzhiyun 	return 0;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun #endif
1416*4882a593Smuzhiyun 
rkvdec_3328_init(struct mpp_dev * mpp)1417*4882a593Smuzhiyun static int rkvdec_3328_init(struct mpp_dev *mpp)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	int ret = 0;
1420*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	rkvdec_init(mpp);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	/* warkaround for mmu pagefault */
1425*4882a593Smuzhiyun 	dec->aux_page = alloc_page(GFP_KERNEL);
1426*4882a593Smuzhiyun 	if (!dec->aux_page) {
1427*4882a593Smuzhiyun 		dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1428*4882a593Smuzhiyun 		ret = -ENOMEM;
1429*4882a593Smuzhiyun 		goto done;
1430*4882a593Smuzhiyun 	}
1431*4882a593Smuzhiyun 	dec->aux_iova = -1;
1432*4882a593Smuzhiyun 	mpp->iommu_info->hdl = rkvdec_3328_iommu_hdl;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	ret = rkvdec_devfreq_init(mpp);
1435*4882a593Smuzhiyun done:
1436*4882a593Smuzhiyun 	return ret;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
rkvdec_3328_exit(struct mpp_dev * mpp)1439*4882a593Smuzhiyun static int rkvdec_3328_exit(struct mpp_dev *mpp)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	if (dec->aux_page)
1444*4882a593Smuzhiyun 		__free_page(dec->aux_page);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	if (dec->aux_iova != -1) {
1447*4882a593Smuzhiyun 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1448*4882a593Smuzhiyun 		dec->aux_iova = -1;
1449*4882a593Smuzhiyun 	}
1450*4882a593Smuzhiyun 	rkvdec_devfreq_remove(mpp);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	return 0;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun 
rkvdec_clk_on(struct mpp_dev * mpp)1455*4882a593Smuzhiyun static int rkvdec_clk_on(struct mpp_dev *mpp)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->aclk_info.clk);
1460*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->hclk_info.clk);
1461*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->core_clk_info.clk);
1462*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->cabac_clk_info.clk);
1463*4882a593Smuzhiyun 	mpp_clk_safe_enable(dec->hevc_cabac_clk_info.clk);
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	return 0;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun 
rkvdec_clk_off(struct mpp_dev * mpp)1468*4882a593Smuzhiyun static int rkvdec_clk_off(struct mpp_dev *mpp)
1469*4882a593Smuzhiyun {
1470*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	clk_disable_unprepare(dec->aclk_info.clk);
1473*4882a593Smuzhiyun 	clk_disable_unprepare(dec->hclk_info.clk);
1474*4882a593Smuzhiyun 	clk_disable_unprepare(dec->core_clk_info.clk);
1475*4882a593Smuzhiyun 	clk_disable_unprepare(dec->cabac_clk_info.clk);
1476*4882a593Smuzhiyun 	clk_disable_unprepare(dec->hevc_cabac_clk_info.clk);
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	return 0;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun 
rkvdec_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1481*4882a593Smuzhiyun static int rkvdec_get_freq(struct mpp_dev *mpp,
1482*4882a593Smuzhiyun 			   struct mpp_task *mpp_task)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	u32 task_cnt;
1485*4882a593Smuzhiyun 	u32 workload;
1486*4882a593Smuzhiyun 	struct mpp_task *loop = NULL, *n;
1487*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1488*4882a593Smuzhiyun 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	/* if not set max load, consider not have advanced mode */
1491*4882a593Smuzhiyun 	if (!dec->default_max_load || !task->pixels)
1492*4882a593Smuzhiyun 		return 0;
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	task_cnt = 1;
1495*4882a593Smuzhiyun 	workload = task->pixels;
1496*4882a593Smuzhiyun 	/* calc workload in pending list */
1497*4882a593Smuzhiyun 	mutex_lock(&mpp->queue->pending_lock);
1498*4882a593Smuzhiyun 	list_for_each_entry_safe(loop, n,
1499*4882a593Smuzhiyun 				 &mpp->queue->pending_list,
1500*4882a593Smuzhiyun 				 queue_link) {
1501*4882a593Smuzhiyun 		struct rkvdec_task *loop_task = to_rkvdec_task(loop);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 		task_cnt++;
1504*4882a593Smuzhiyun 		workload += loop_task->pixels;
1505*4882a593Smuzhiyun 	}
1506*4882a593Smuzhiyun 	mutex_unlock(&mpp->queue->pending_lock);
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	if (workload > dec->default_max_load)
1509*4882a593Smuzhiyun 		task->clk_mode = CLK_MODE_ADVANCED;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1512*4882a593Smuzhiyun 		  task_cnt, workload, task->clk_mode);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	return 0;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
rkvdec_3328_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1517*4882a593Smuzhiyun static int rkvdec_3328_get_freq(struct mpp_dev *mpp,
1518*4882a593Smuzhiyun 				struct mpp_task *mpp_task)
1519*4882a593Smuzhiyun {
1520*4882a593Smuzhiyun 	u32 fmt;
1521*4882a593Smuzhiyun 	u32 ddr_align_en;
1522*4882a593Smuzhiyun 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1525*4882a593Smuzhiyun 	ddr_align_en = task->reg[RKVDEC_REG_INT_EN_INDEX] & RKVDEC_WR_DDR_ALIGN_EN;
1526*4882a593Smuzhiyun 	if (fmt == RKVDEC_FMT_H264D && ddr_align_en)
1527*4882a593Smuzhiyun 		task->clk_mode = CLK_MODE_ADVANCED;
1528*4882a593Smuzhiyun 	else
1529*4882a593Smuzhiyun 		rkvdec_get_freq(mpp, mpp_task);
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	return 0;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun 
rkvdec_3368_set_grf(struct mpp_dev * mpp)1534*4882a593Smuzhiyun static int rkvdec_3368_set_grf(struct mpp_dev *mpp)
1535*4882a593Smuzhiyun {
1536*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	dec->grf_changed = mpp_grf_is_changed(mpp->grf_info);
1539*4882a593Smuzhiyun 	mpp_set_grf(mpp->grf_info);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	return 0;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun 
rkvdec_3036_set_grf(struct mpp_dev * mpp)1544*4882a593Smuzhiyun static int rkvdec_3036_set_grf(struct mpp_dev *mpp)
1545*4882a593Smuzhiyun {
1546*4882a593Smuzhiyun 	int grf_changed;
1547*4882a593Smuzhiyun 	struct mpp_dev *loop = NULL, *n;
1548*4882a593Smuzhiyun 	struct mpp_taskqueue *queue = mpp->queue;
1549*4882a593Smuzhiyun 	bool pd_is_on;
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	grf_changed = mpp_grf_is_changed(mpp->grf_info);
1552*4882a593Smuzhiyun 	if (grf_changed) {
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 		/*
1555*4882a593Smuzhiyun 		 * in this case, devices share the queue also share the same pd&clk,
1556*4882a593Smuzhiyun 		 * so use mpp->dev's pd to control all the process is okay
1557*4882a593Smuzhiyun 		 */
1558*4882a593Smuzhiyun 		pd_is_on = rockchip_pmu_pd_is_on(mpp->dev);
1559*4882a593Smuzhiyun 		if (!pd_is_on)
1560*4882a593Smuzhiyun 			rockchip_pmu_pd_on(mpp->dev);
1561*4882a593Smuzhiyun 		mpp->hw_ops->clk_on(mpp);
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 		list_for_each_entry_safe(loop, n, &queue->dev_list, queue_link) {
1564*4882a593Smuzhiyun 			if (test_bit(loop->var->device_type, &queue->dev_active_flags)) {
1565*4882a593Smuzhiyun 				mpp_set_grf(loop->grf_info);
1566*4882a593Smuzhiyun 				if (loop->hw_ops->clk_on)
1567*4882a593Smuzhiyun 					loop->hw_ops->clk_on(loop);
1568*4882a593Smuzhiyun 				if (loop->hw_ops->reset)
1569*4882a593Smuzhiyun 					loop->hw_ops->reset(loop);
1570*4882a593Smuzhiyun 				rockchip_iommu_disable(loop->dev);
1571*4882a593Smuzhiyun 				if (loop->hw_ops->clk_off)
1572*4882a593Smuzhiyun 					loop->hw_ops->clk_off(loop);
1573*4882a593Smuzhiyun 				clear_bit(loop->var->device_type, &queue->dev_active_flags);
1574*4882a593Smuzhiyun 			}
1575*4882a593Smuzhiyun 		}
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 		mpp_set_grf(mpp->grf_info);
1578*4882a593Smuzhiyun 		rockchip_iommu_enable(mpp->dev);
1579*4882a593Smuzhiyun 		set_bit(mpp->var->device_type, &queue->dev_active_flags);
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 		mpp->hw_ops->clk_off(mpp);
1582*4882a593Smuzhiyun 		if (!pd_is_on)
1583*4882a593Smuzhiyun 			rockchip_pmu_pd_off(mpp->dev);
1584*4882a593Smuzhiyun 	}
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	return 0;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun 
rkvdec_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1590*4882a593Smuzhiyun static int rkvdec_set_freq(struct mpp_dev *mpp,
1591*4882a593Smuzhiyun 			   struct mpp_task *mpp_task)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1594*4882a593Smuzhiyun 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1597*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1598*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1599*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	return 0;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun 
rkvdec_3368_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1604*4882a593Smuzhiyun static int rkvdec_3368_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1607*4882a593Smuzhiyun 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	/* if grf changed, need reset iommu for rk3368 */
1610*4882a593Smuzhiyun 	if (dec->grf_changed) {
1611*4882a593Smuzhiyun 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1612*4882a593Smuzhiyun 		dec->grf_changed = false;
1613*4882a593Smuzhiyun 	}
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1616*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1617*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1618*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	return 0;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun 
rkvdec_3328_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1623*4882a593Smuzhiyun static int rkvdec_3328_set_freq(struct mpp_dev *mpp,
1624*4882a593Smuzhiyun 				struct mpp_task *mpp_task)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1627*4882a593Smuzhiyun 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1630*4882a593Smuzhiyun 	if (dec->devfreq) {
1631*4882a593Smuzhiyun 		struct devfreq_dev_status *stat;
1632*4882a593Smuzhiyun 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 		stat = &dec->devfreq->last_status;
1635*4882a593Smuzhiyun 		stat->busy_time = 1;
1636*4882a593Smuzhiyun 		stat->total_time = 1;
1637*4882a593Smuzhiyun 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1638*4882a593Smuzhiyun 							task->clk_mode);
1639*4882a593Smuzhiyun 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1640*4882a593Smuzhiyun 							task->clk_mode);
1641*4882a593Smuzhiyun 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1642*4882a593Smuzhiyun 							 task->clk_mode);
1643*4882a593Smuzhiyun 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1644*4882a593Smuzhiyun 				    core_rate_hz, cabac_rate_hz,
1645*4882a593Smuzhiyun 				    EVENT_ADJUST);
1646*4882a593Smuzhiyun 	}
1647*4882a593Smuzhiyun #else
1648*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1649*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1650*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1651*4882a593Smuzhiyun #endif
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	return 0;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun 
rkvdec_reduce_freq(struct mpp_dev * mpp)1656*4882a593Smuzhiyun static int rkvdec_reduce_freq(struct mpp_dev *mpp)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1661*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1662*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1663*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_REDUCE);
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	return 0;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun 
rkvdec_3328_reduce_freq(struct mpp_dev * mpp)1668*4882a593Smuzhiyun static int rkvdec_3328_reduce_freq(struct mpp_dev *mpp)
1669*4882a593Smuzhiyun {
1670*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1673*4882a593Smuzhiyun 	if (dec->devfreq) {
1674*4882a593Smuzhiyun 		struct devfreq_dev_status *stat;
1675*4882a593Smuzhiyun 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 		stat = &dec->devfreq->last_status;
1678*4882a593Smuzhiyun 		stat->busy_time = 0;
1679*4882a593Smuzhiyun 		stat->total_time = 1;
1680*4882a593Smuzhiyun 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1681*4882a593Smuzhiyun 							CLK_MODE_REDUCE);
1682*4882a593Smuzhiyun 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1683*4882a593Smuzhiyun 							CLK_MODE_REDUCE);
1684*4882a593Smuzhiyun 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1685*4882a593Smuzhiyun 							 CLK_MODE_REDUCE);
1686*4882a593Smuzhiyun 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1687*4882a593Smuzhiyun 				    core_rate_hz, cabac_rate_hz,
1688*4882a593Smuzhiyun 				    EVENT_ADJUST);
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun #else
1691*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1692*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1693*4882a593Smuzhiyun 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1694*4882a593Smuzhiyun #endif
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	return 0;
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun 
rkvdec_reset(struct mpp_dev * mpp)1699*4882a593Smuzhiyun static int rkvdec_reset(struct mpp_dev *mpp)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	mpp_debug_enter();
1704*4882a593Smuzhiyun 	if (dec->rst_a && dec->rst_h) {
1705*4882a593Smuzhiyun 		mpp_pmu_idle_request(mpp, true);
1706*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_niu_a);
1707*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_niu_h);
1708*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_a);
1709*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_h);
1710*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_core);
1711*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_cabac);
1712*4882a593Smuzhiyun 		mpp_safe_reset(dec->rst_hevc_cabac);
1713*4882a593Smuzhiyun 		udelay(5);
1714*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_niu_h);
1715*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_niu_a);
1716*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_a);
1717*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_h);
1718*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_core);
1719*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_cabac);
1720*4882a593Smuzhiyun 		mpp_safe_unreset(dec->rst_hevc_cabac);
1721*4882a593Smuzhiyun 		mpp_pmu_idle_request(mpp, false);
1722*4882a593Smuzhiyun 	}
1723*4882a593Smuzhiyun 	mpp_debug_leave();
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	return 0;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun 
rkvdec_sip_reset(struct mpp_dev * mpp)1728*4882a593Smuzhiyun static int rkvdec_sip_reset(struct mpp_dev *mpp)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun 	if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1731*4882a593Smuzhiyun 		/* The reset flow in arm trustzone firmware */
1732*4882a593Smuzhiyun 		struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 		mutex_lock(&dec->sip_reset_lock);
1735*4882a593Smuzhiyun 		sip_smc_vpu_reset(0, 0, 0);
1736*4882a593Smuzhiyun 		mutex_unlock(&dec->sip_reset_lock);
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 		return 0;
1739*4882a593Smuzhiyun 	} else {
1740*4882a593Smuzhiyun 		return rkvdec_reset(mpp);
1741*4882a593Smuzhiyun 	}
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun static struct mpp_hw_ops rkvdec_v1_hw_ops = {
1745*4882a593Smuzhiyun 	.init = rkvdec_init,
1746*4882a593Smuzhiyun 	.clk_on = rkvdec_clk_on,
1747*4882a593Smuzhiyun 	.clk_off = rkvdec_clk_off,
1748*4882a593Smuzhiyun 	.get_freq = rkvdec_get_freq,
1749*4882a593Smuzhiyun 	.set_freq = rkvdec_set_freq,
1750*4882a593Smuzhiyun 	.reduce_freq = rkvdec_reduce_freq,
1751*4882a593Smuzhiyun 	.reset = rkvdec_reset,
1752*4882a593Smuzhiyun };
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun static struct mpp_hw_ops rkvdec_px30_hw_ops = {
1755*4882a593Smuzhiyun 	.init = rkvdec_px30_init,
1756*4882a593Smuzhiyun 	.clk_on = rkvdec_clk_on,
1757*4882a593Smuzhiyun 	.clk_off = rkvdec_clk_off,
1758*4882a593Smuzhiyun 	.get_freq = rkvdec_get_freq,
1759*4882a593Smuzhiyun 	.set_freq = rkvdec_set_freq,
1760*4882a593Smuzhiyun 	.reduce_freq = rkvdec_reduce_freq,
1761*4882a593Smuzhiyun 	.reset = rkvdec_reset,
1762*4882a593Smuzhiyun 	.set_grf = px30_workaround_combo_switch_grf,
1763*4882a593Smuzhiyun };
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun static struct mpp_hw_ops rkvdec_3036_hw_ops = {
1766*4882a593Smuzhiyun 	.init = rkvdec_3036_init,
1767*4882a593Smuzhiyun 	.clk_on = rkvdec_clk_on,
1768*4882a593Smuzhiyun 	.clk_off = rkvdec_clk_off,
1769*4882a593Smuzhiyun 	.get_freq = rkvdec_get_freq,
1770*4882a593Smuzhiyun 	.set_freq = rkvdec_set_freq,
1771*4882a593Smuzhiyun 	.reduce_freq = rkvdec_reduce_freq,
1772*4882a593Smuzhiyun 	.reset = rkvdec_reset,
1773*4882a593Smuzhiyun 	.set_grf = rkvdec_3036_set_grf,
1774*4882a593Smuzhiyun };
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun static struct mpp_hw_ops rkvdec_3399_hw_ops = {
1777*4882a593Smuzhiyun 	.init = rkvdec_init,
1778*4882a593Smuzhiyun 	.clk_on = rkvdec_clk_on,
1779*4882a593Smuzhiyun 	.clk_off = rkvdec_clk_off,
1780*4882a593Smuzhiyun 	.get_freq = rkvdec_get_freq,
1781*4882a593Smuzhiyun 	.set_freq = rkvdec_set_freq,
1782*4882a593Smuzhiyun 	.reduce_freq = rkvdec_reduce_freq,
1783*4882a593Smuzhiyun 	.reset = rkvdec_reset,
1784*4882a593Smuzhiyun };
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun static struct mpp_hw_ops rkvdec_3368_hw_ops = {
1787*4882a593Smuzhiyun 	.init = rkvdec_init,
1788*4882a593Smuzhiyun 	.clk_on = rkvdec_clk_on,
1789*4882a593Smuzhiyun 	.clk_off = rkvdec_clk_off,
1790*4882a593Smuzhiyun 	.get_freq = rkvdec_get_freq,
1791*4882a593Smuzhiyun 	.set_freq = rkvdec_3368_set_freq,
1792*4882a593Smuzhiyun 	.reduce_freq = rkvdec_reduce_freq,
1793*4882a593Smuzhiyun 	.reset = rkvdec_reset,
1794*4882a593Smuzhiyun 	.set_grf = rkvdec_3368_set_grf,
1795*4882a593Smuzhiyun };
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun static struct mpp_dev_ops rkvdec_v1_dev_ops = {
1798*4882a593Smuzhiyun 	.alloc_task = rkvdec_alloc_task,
1799*4882a593Smuzhiyun 	.run = rkvdec_run,
1800*4882a593Smuzhiyun 	.irq = rkvdec_irq,
1801*4882a593Smuzhiyun 	.isr = rkvdec_isr,
1802*4882a593Smuzhiyun 	.finish = rkvdec_finish,
1803*4882a593Smuzhiyun 	.result = rkvdec_result,
1804*4882a593Smuzhiyun 	.free_task = rkvdec_free_task,
1805*4882a593Smuzhiyun };
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun static struct mpp_dev_ops rkvdec_px30_dev_ops = {
1808*4882a593Smuzhiyun 	.alloc_task = rkvdec_alloc_task,
1809*4882a593Smuzhiyun 	.run = rkvdec_px30_run,
1810*4882a593Smuzhiyun 	.irq = rkvdec_irq,
1811*4882a593Smuzhiyun 	.isr = rkvdec_isr,
1812*4882a593Smuzhiyun 	.finish = rkvdec_finish,
1813*4882a593Smuzhiyun 	.result = rkvdec_result,
1814*4882a593Smuzhiyun 	.free_task = rkvdec_free_task,
1815*4882a593Smuzhiyun };
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun static struct mpp_hw_ops rkvdec_3328_hw_ops = {
1818*4882a593Smuzhiyun 	.init = rkvdec_3328_init,
1819*4882a593Smuzhiyun 	.exit = rkvdec_3328_exit,
1820*4882a593Smuzhiyun 	.clk_on = rkvdec_clk_on,
1821*4882a593Smuzhiyun 	.clk_off = rkvdec_clk_off,
1822*4882a593Smuzhiyun 	.get_freq = rkvdec_3328_get_freq,
1823*4882a593Smuzhiyun 	.set_freq = rkvdec_3328_set_freq,
1824*4882a593Smuzhiyun 	.reduce_freq = rkvdec_3328_reduce_freq,
1825*4882a593Smuzhiyun 	.reset = rkvdec_sip_reset,
1826*4882a593Smuzhiyun };
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun static struct mpp_dev_ops rkvdec_3328_dev_ops = {
1829*4882a593Smuzhiyun 	.alloc_task = rkvdec_alloc_task,
1830*4882a593Smuzhiyun 	.run = rkvdec_3328_run,
1831*4882a593Smuzhiyun 	.irq = rkvdec_irq,
1832*4882a593Smuzhiyun 	.isr = rkvdec_3328_isr,
1833*4882a593Smuzhiyun 	.finish = rkvdec_finish,
1834*4882a593Smuzhiyun 	.result = rkvdec_result,
1835*4882a593Smuzhiyun 	.free_task = rkvdec_free_task,
1836*4882a593Smuzhiyun };
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun static struct mpp_dev_ops rkvdec_3399_dev_ops = {
1839*4882a593Smuzhiyun 	.alloc_task = rkvdec_alloc_task,
1840*4882a593Smuzhiyun 	.prepare = rkvdec_prepare_with_reset,
1841*4882a593Smuzhiyun 	.run = rkvdec_run,
1842*4882a593Smuzhiyun 	.irq = rkvdec_irq,
1843*4882a593Smuzhiyun 	.isr = rkvdec_isr,
1844*4882a593Smuzhiyun 	.finish = rkvdec_finish_with_record_info,
1845*4882a593Smuzhiyun 	.result = rkvdec_result,
1846*4882a593Smuzhiyun 	.free_task = rkvdec_free_task,
1847*4882a593Smuzhiyun };
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun static struct mpp_dev_ops rkvdec_1126_dev_ops = {
1850*4882a593Smuzhiyun 	.alloc_task = rkvdec_alloc_task,
1851*4882a593Smuzhiyun 	.run = rkvdec_1126_run,
1852*4882a593Smuzhiyun 	.irq = rkvdec_irq,
1853*4882a593Smuzhiyun 	.isr = rkvdec_isr,
1854*4882a593Smuzhiyun 	.finish = rkvdec_finish,
1855*4882a593Smuzhiyun 	.result = rkvdec_result,
1856*4882a593Smuzhiyun 	.free_task = rkvdec_free_task,
1857*4882a593Smuzhiyun };
1858*4882a593Smuzhiyun static const struct mpp_dev_var rk_hevcdec_data = {
1859*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_HEVC_DEC,
1860*4882a593Smuzhiyun 	.hw_info = &rk_hevcdec_hw_info,
1861*4882a593Smuzhiyun 	.trans_info = rk_hevcdec_trans,
1862*4882a593Smuzhiyun 	.hw_ops = &rkvdec_v1_hw_ops,
1863*4882a593Smuzhiyun 	.dev_ops = &rkvdec_v1_dev_ops,
1864*4882a593Smuzhiyun };
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun static const struct mpp_dev_var rk_hevcdec_3036_data = {
1867*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_HEVC_DEC,
1868*4882a593Smuzhiyun 	.hw_info = &rk_hevcdec_hw_info,
1869*4882a593Smuzhiyun 	.trans_info = rk_hevcdec_trans,
1870*4882a593Smuzhiyun 	.hw_ops = &rkvdec_3036_hw_ops,
1871*4882a593Smuzhiyun 	.dev_ops = &rkvdec_v1_dev_ops,
1872*4882a593Smuzhiyun };
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun static const struct mpp_dev_var rk_hevcdec_3368_data = {
1875*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_HEVC_DEC,
1876*4882a593Smuzhiyun 	.hw_info = &rk_hevcdec_hw_info,
1877*4882a593Smuzhiyun 	.trans_info = rk_hevcdec_trans,
1878*4882a593Smuzhiyun 	.hw_ops = &rkvdec_3368_hw_ops,
1879*4882a593Smuzhiyun 	.dev_ops = &rkvdec_v1_dev_ops,
1880*4882a593Smuzhiyun };
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun static const struct mpp_dev_var rk_hevcdec_px30_data = {
1883*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_HEVC_DEC,
1884*4882a593Smuzhiyun 	.hw_info = &rk_hevcdec_hw_info,
1885*4882a593Smuzhiyun 	.trans_info = rk_hevcdec_trans,
1886*4882a593Smuzhiyun 	.hw_ops = &rkvdec_px30_hw_ops,
1887*4882a593Smuzhiyun 	.dev_ops = &rkvdec_px30_dev_ops,
1888*4882a593Smuzhiyun };
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun static const struct mpp_dev_var rkvdec_v1_data = {
1891*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_RKVDEC,
1892*4882a593Smuzhiyun 	.hw_info = &rkvdec_v1_hw_info,
1893*4882a593Smuzhiyun 	.trans_info = rkvdec_v1_trans,
1894*4882a593Smuzhiyun 	.hw_ops = &rkvdec_v1_hw_ops,
1895*4882a593Smuzhiyun 	.dev_ops = &rkvdec_v1_dev_ops,
1896*4882a593Smuzhiyun };
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun static const struct mpp_dev_var rkvdec_3399_data = {
1899*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_RKVDEC,
1900*4882a593Smuzhiyun 	.hw_info = &rkvdec_v1_hw_info,
1901*4882a593Smuzhiyun 	.trans_info = rkvdec_v1_trans,
1902*4882a593Smuzhiyun 	.hw_ops = &rkvdec_3399_hw_ops,
1903*4882a593Smuzhiyun 	.dev_ops = &rkvdec_3399_dev_ops,
1904*4882a593Smuzhiyun };
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun static const struct mpp_dev_var rkvdec_3328_data = {
1907*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_RKVDEC,
1908*4882a593Smuzhiyun 	.hw_info = &rkvdec_v1_hw_info,
1909*4882a593Smuzhiyun 	.trans_info = rkvdec_v1_trans,
1910*4882a593Smuzhiyun 	.hw_ops = &rkvdec_3328_hw_ops,
1911*4882a593Smuzhiyun 	.dev_ops = &rkvdec_3328_dev_ops,
1912*4882a593Smuzhiyun };
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun static const struct mpp_dev_var rkvdec_1126_data = {
1915*4882a593Smuzhiyun 	.device_type = MPP_DEVICE_RKVDEC,
1916*4882a593Smuzhiyun 	.hw_info = &rkvdec_v1_hw_info,
1917*4882a593Smuzhiyun 	.trans_info = rkvdec_v1_trans,
1918*4882a593Smuzhiyun 	.hw_ops = &rkvdec_v1_hw_ops,
1919*4882a593Smuzhiyun 	.dev_ops = &rkvdec_1126_dev_ops,
1920*4882a593Smuzhiyun };
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun static const struct of_device_id mpp_rkvdec_dt_match[] = {
1923*4882a593Smuzhiyun 	{
1924*4882a593Smuzhiyun 		.compatible = "rockchip,hevc-decoder",
1925*4882a593Smuzhiyun 		.data = &rk_hevcdec_data,
1926*4882a593Smuzhiyun 	},
1927*4882a593Smuzhiyun #ifdef CONFIG_CPU_PX30
1928*4882a593Smuzhiyun 	{
1929*4882a593Smuzhiyun 		.compatible = "rockchip,hevc-decoder-px30",
1930*4882a593Smuzhiyun 		.data = &rk_hevcdec_px30_data,
1931*4882a593Smuzhiyun 	},
1932*4882a593Smuzhiyun #endif
1933*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3036
1934*4882a593Smuzhiyun 	{
1935*4882a593Smuzhiyun 		.compatible = "rockchip,hevc-decoder-rk3036",
1936*4882a593Smuzhiyun 		.data = &rk_hevcdec_3036_data,
1937*4882a593Smuzhiyun 	},
1938*4882a593Smuzhiyun #endif
1939*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3368
1940*4882a593Smuzhiyun 	{
1941*4882a593Smuzhiyun 		.compatible = "rockchip,hevc-decoder-rk3368",
1942*4882a593Smuzhiyun 		.data = &rk_hevcdec_3368_data,
1943*4882a593Smuzhiyun 	},
1944*4882a593Smuzhiyun #endif
1945*4882a593Smuzhiyun 	{
1946*4882a593Smuzhiyun 		.compatible = "rockchip,rkv-decoder-v1",
1947*4882a593Smuzhiyun 		.data = &rkvdec_v1_data,
1948*4882a593Smuzhiyun 	},
1949*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3399
1950*4882a593Smuzhiyun 	{
1951*4882a593Smuzhiyun 		.compatible = "rockchip,rkv-decoder-rk3399",
1952*4882a593Smuzhiyun 		.data = &rkvdec_3399_data,
1953*4882a593Smuzhiyun 	},
1954*4882a593Smuzhiyun #endif
1955*4882a593Smuzhiyun #ifdef CONFIG_CPU_RK3328
1956*4882a593Smuzhiyun 	{
1957*4882a593Smuzhiyun 		.compatible = "rockchip,rkv-decoder-rk3328",
1958*4882a593Smuzhiyun 		.data = &rkvdec_3328_data,
1959*4882a593Smuzhiyun 	},
1960*4882a593Smuzhiyun #endif
1961*4882a593Smuzhiyun #ifdef CONFIG_CPU_RV1126
1962*4882a593Smuzhiyun 	{
1963*4882a593Smuzhiyun 		.compatible = "rockchip,rkv-decoder-rv1126",
1964*4882a593Smuzhiyun 		.data = &rkvdec_1126_data,
1965*4882a593Smuzhiyun 	},
1966*4882a593Smuzhiyun #endif
1967*4882a593Smuzhiyun 	{},
1968*4882a593Smuzhiyun };
1969*4882a593Smuzhiyun 
rkvdec_probe(struct platform_device * pdev)1970*4882a593Smuzhiyun static int rkvdec_probe(struct platform_device *pdev)
1971*4882a593Smuzhiyun {
1972*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
1973*4882a593Smuzhiyun 	struct rkvdec_dev *dec = NULL;
1974*4882a593Smuzhiyun 	struct mpp_dev *mpp = NULL;
1975*4882a593Smuzhiyun 	const struct of_device_id *match = NULL;
1976*4882a593Smuzhiyun 	int ret = 0;
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	dev_info(dev, "probing start\n");
1979*4882a593Smuzhiyun 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1980*4882a593Smuzhiyun 	if (!dec)
1981*4882a593Smuzhiyun 		return -ENOMEM;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	mpp = &dec->mpp;
1984*4882a593Smuzhiyun 	platform_set_drvdata(pdev, mpp);
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	if (pdev->dev.of_node) {
1987*4882a593Smuzhiyun 		match = of_match_node(mpp_rkvdec_dt_match,
1988*4882a593Smuzhiyun 				      pdev->dev.of_node);
1989*4882a593Smuzhiyun 		if (match)
1990*4882a593Smuzhiyun 			mpp->var = (struct mpp_dev_var *)match->data;
1991*4882a593Smuzhiyun 	}
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	ret = mpp_dev_probe(mpp, pdev);
1994*4882a593Smuzhiyun 	if (ret) {
1995*4882a593Smuzhiyun 		dev_err(dev, "probe sub driver failed\n");
1996*4882a593Smuzhiyun 		return ret;
1997*4882a593Smuzhiyun 	}
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(dev, mpp->irq,
2000*4882a593Smuzhiyun 					mpp_dev_irq,
2001*4882a593Smuzhiyun 					mpp_dev_isr_sched,
2002*4882a593Smuzhiyun 					IRQF_SHARED,
2003*4882a593Smuzhiyun 					dev_name(dev), mpp);
2004*4882a593Smuzhiyun 	if (ret) {
2005*4882a593Smuzhiyun 		dev_err(dev, "register interrupter runtime failed\n");
2006*4882a593Smuzhiyun 		return -EINVAL;
2007*4882a593Smuzhiyun 	}
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
2010*4882a593Smuzhiyun 	rkvdec_procfs_init(mpp);
2011*4882a593Smuzhiyun 	/* register current device to mpp service */
2012*4882a593Smuzhiyun 	mpp_dev_register_srv(mpp, mpp->srv);
2013*4882a593Smuzhiyun 	dev_info(dev, "probing finish\n");
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	return 0;
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun 
rkvdec_remove(struct platform_device * pdev)2018*4882a593Smuzhiyun static int rkvdec_remove(struct platform_device *pdev)
2019*4882a593Smuzhiyun {
2020*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2021*4882a593Smuzhiyun 	struct rkvdec_dev *dec = platform_get_drvdata(pdev);
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 	dev_info(dev, "remove device\n");
2024*4882a593Smuzhiyun 	mpp_dev_remove(&dec->mpp);
2025*4882a593Smuzhiyun 	rkvdec_procfs_remove(&dec->mpp);
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	return 0;
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun struct platform_driver rockchip_rkvdec_driver = {
2031*4882a593Smuzhiyun 	.probe = rkvdec_probe,
2032*4882a593Smuzhiyun 	.remove = rkvdec_remove,
2033*4882a593Smuzhiyun 	.shutdown = mpp_dev_shutdown,
2034*4882a593Smuzhiyun 	.driver = {
2035*4882a593Smuzhiyun 		.name = RKVDEC_DRIVER_NAME,
2036*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(mpp_rkvdec_dt_match),
2037*4882a593Smuzhiyun 	},
2038*4882a593Smuzhiyun };
2039*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_rkvdec_driver);
2040