1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * author:
6*4882a593Smuzhiyun * Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun * Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun * Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/cacheflush.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/devfreq.h>
15*4882a593Smuzhiyun #include <linux/devfreq_cooling.h>
16*4882a593Smuzhiyun #include <linux/iopoll.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/types.h>
21*4882a593Smuzhiyun #include <linux/of_platform.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/seq_file.h>
24*4882a593Smuzhiyun #include <linux/uaccess.h>
25*4882a593Smuzhiyun #include <linux/regmap.h>
26*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
27*4882a593Smuzhiyun #include <linux/proc_fs.h>
28*4882a593Smuzhiyun #include <linux/nospec.h>
29*4882a593Smuzhiyun #include <linux/workqueue.h>
30*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
31*4882a593Smuzhiyun #include <soc/rockchip/rockchip_iommu.h>
32*4882a593Smuzhiyun #include <soc/rockchip/rockchip_ipa.h>
33*4882a593Smuzhiyun #include <soc/rockchip/rockchip_opp_select.h>
34*4882a593Smuzhiyun #include <soc/rockchip/rockchip_system_monitor.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
37*4882a593Smuzhiyun #include "../../../devfreq/governor.h"
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "mpp_debug.h"
41*4882a593Smuzhiyun #include "mpp_iommu.h"
42*4882a593Smuzhiyun #include "mpp_common.h"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define RKVENC_DRIVER_NAME "mpp_rkvenc"
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define IOMMU_GET_BUS_ID(x) (((x) >> 6) & 0x1f)
47*4882a593Smuzhiyun #define IOMMU_PAGE_SIZE SZ_4K
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define RKVENC_SESSION_MAX_BUFFERS 40
50*4882a593Smuzhiyun /* The maximum registers number of all the version */
51*4882a593Smuzhiyun #define RKVENC_REG_L1_NUM 780
52*4882a593Smuzhiyun #define RKVENC_REG_L2_NUM 320
53*4882a593Smuzhiyun #define RKVENC_REG_START_INDEX 0
54*4882a593Smuzhiyun #define RKVENC_REG_END_INDEX 131
55*4882a593Smuzhiyun /* rkvenc register info */
56*4882a593Smuzhiyun #define RKVENC_REG_NUM 112
57*4882a593Smuzhiyun #define RKVENC_REG_HW_ID_INDEX 0
58*4882a593Smuzhiyun #define RKVENC_REG_CLR_CACHE_BASE 0x884
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define RKVENC_ENC_START_INDEX 1
61*4882a593Smuzhiyun #define RKVENC_ENC_START_BASE 0x004
62*4882a593Smuzhiyun #define RKVENC_LKT_NUM(x) ((x) & 0xff)
63*4882a593Smuzhiyun #define RKVENC_CMD(x) (((x) & 0x3) << 8)
64*4882a593Smuzhiyun #define RKVENC_CLK_GATE_EN BIT(16)
65*4882a593Smuzhiyun #define RKVENC_CLR_BASE 0x008
66*4882a593Smuzhiyun #define RKVENC_SAFE_CLR_BIT BIT(0)
67*4882a593Smuzhiyun #define RKVENC_FORCE_CLR_BIT BIT(1)
68*4882a593Smuzhiyun #define RKVENC_LKT_ADDR_BASE 0x00c
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define RKVENC_INT_EN_INDEX 4
71*4882a593Smuzhiyun #define RKVENC_INT_EN_BASE 0x010
72*4882a593Smuzhiyun #define RKVENC_INT_MSK_BASE 0x014
73*4882a593Smuzhiyun #define RKVENC_INT_CLR_BASE 0x018
74*4882a593Smuzhiyun #define RKVENC_INT_STATUS_INDEX 7
75*4882a593Smuzhiyun #define RKVENC_INT_STATUS_BASE 0x01c
76*4882a593Smuzhiyun /* bit for int mask clr status */
77*4882a593Smuzhiyun #define RKVENC_BIT_ONE_FRAME BIT(0)
78*4882a593Smuzhiyun #define RKVENC_BIT_LINK_TABLE BIT(1)
79*4882a593Smuzhiyun #define RKVENC_BIT_SAFE_CLEAR BIT(2)
80*4882a593Smuzhiyun #define RKVENC_BIT_ONE_SLICE BIT(3)
81*4882a593Smuzhiyun #define RKVENC_BIT_STREAM_OVERFLOW BIT(4)
82*4882a593Smuzhiyun #define RKVENC_BIT_AXI_WRITE_FIFO_FULL BIT(5)
83*4882a593Smuzhiyun #define RKVENC_BIT_AXI_WRITE_CHANNEL BIT(6)
84*4882a593Smuzhiyun #define RKVENC_BIT_AXI_READ_CHANNEL BIT(7)
85*4882a593Smuzhiyun #define RKVENC_BIT_TIMEOUT BIT(8)
86*4882a593Smuzhiyun #define RKVENC_INT_ERROR_BITS ((RKVENC_BIT_STREAM_OVERFLOW) |\
87*4882a593Smuzhiyun (RKVENC_BIT_AXI_WRITE_FIFO_FULL) |\
88*4882a593Smuzhiyun (RKVENC_BIT_AXI_WRITE_CHANNEL) |\
89*4882a593Smuzhiyun (RKVENC_BIT_AXI_READ_CHANNEL) |\
90*4882a593Smuzhiyun (RKVENC_BIT_TIMEOUT))
91*4882a593Smuzhiyun #define RKVENC_ENC_RSL_INDEX 12
92*4882a593Smuzhiyun #define RKVENC_ENC_PIC_INDEX 13
93*4882a593Smuzhiyun #define RKVENC_ENC_PIC_BASE 0x034
94*4882a593Smuzhiyun #define RKVENC_GET_FORMAT(x) ((x) & 0x1)
95*4882a593Smuzhiyun #define RKVENC_ENC_PIC_NODE_INT_EN BIT(31)
96*4882a593Smuzhiyun #define RKVENC_ENC_WDG_BASE 0x038
97*4882a593Smuzhiyun #define RKVENC_PPLN_ENC_LMT(x) ((x) & 0xf)
98*4882a593Smuzhiyun #define RKVENC_OSD_CFG_BASE 0x1c0
99*4882a593Smuzhiyun #define RKVENC_OSD_PLT_TYPE BIT(17)
100*4882a593Smuzhiyun #define RKVENC_OSD_CLK_SEL_BIT BIT(16)
101*4882a593Smuzhiyun #define RKVENC_STATUS_BASE(i) (0x210 + (4 * (i)))
102*4882a593Smuzhiyun #define RKVENC_BSL_STATUS_BASE 0x210
103*4882a593Smuzhiyun #define RKVENC_BITSTREAM_LENGTH(x) ((x) & 0x7FFFFFF)
104*4882a593Smuzhiyun #define RKVENC_ENC_STATUS_BASE 0x220
105*4882a593Smuzhiyun #define RKVENC_ENC_STATUS_ENC(x) (((x) >> 0) & 0x3)
106*4882a593Smuzhiyun #define RKVENC_LKT_STATUS_BASE 0x224
107*4882a593Smuzhiyun #define RKVENC_LKT_STATUS_FNUM_ENC(x) (((x) >> 0) & 0xff)
108*4882a593Smuzhiyun #define RKVENC_LKT_STATUS_FNUM_CFG(x) (((x) >> 8) & 0xff)
109*4882a593Smuzhiyun #define RKVENC_LKT_STATUS_FNUM_INT(x) (((x) >> 16) & 0xff)
110*4882a593Smuzhiyun #define RKVENC_OSD_PLT_BASE(i) (0x400 + (4 * (i)))
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #define RKVENC_L2_OFFSET (0x10000)
113*4882a593Smuzhiyun #define RKVENC_L2_ADDR_BASE (0x3f0)
114*4882a593Smuzhiyun #define RKVENC_L2_WRITE_BASE (0x3f4)
115*4882a593Smuzhiyun #define RKVENC_L2_READ_BASE (0x3f8)
116*4882a593Smuzhiyun #define RKVENC_L2_BURST_TYPE BIT(0)
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #define RKVENC_GET_WIDTH(x) (((x & 0x1ff) + 1) << 3)
119*4882a593Smuzhiyun #define RKVENC_GET_HEIGHT(x) ((((x >> 16) & 0x1ff) + 1) << 3)
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #define to_rkvenc_task(ctx) \
122*4882a593Smuzhiyun container_of(ctx, struct rkvenc_task, mpp_task)
123*4882a593Smuzhiyun #define to_rkvenc_dev(dev) \
124*4882a593Smuzhiyun container_of(dev, struct rkvenc_dev, mpp)
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun enum rkvenc_format_type {
127*4882a593Smuzhiyun RKVENC_FMT_H264E = 0,
128*4882a593Smuzhiyun RKVENC_FMT_H265E = 1,
129*4882a593Smuzhiyun RKVENC_FMT_BUTT,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun enum RKVENC_MODE {
133*4882a593Smuzhiyun RKVENC_MODE_NONE,
134*4882a593Smuzhiyun RKVENC_MODE_ONEFRAME,
135*4882a593Smuzhiyun RKVENC_MODE_LINKTABLE_FIX,
136*4882a593Smuzhiyun RKVENC_MODE_LINKTABLE_UPDATE,
137*4882a593Smuzhiyun RKVENC_MODE_BUTT
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun struct rkvenc_task {
141*4882a593Smuzhiyun struct mpp_task mpp_task;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun int link_flags;
144*4882a593Smuzhiyun int fmt;
145*4882a593Smuzhiyun enum RKVENC_MODE link_mode;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* level 1 register setting */
148*4882a593Smuzhiyun u32 reg_offset;
149*4882a593Smuzhiyun u32 reg_num;
150*4882a593Smuzhiyun u32 reg[RKVENC_REG_L1_NUM];
151*4882a593Smuzhiyun u32 width;
152*4882a593Smuzhiyun u32 height;
153*4882a593Smuzhiyun u32 pixels;
154*4882a593Smuzhiyun /* level 2 register setting */
155*4882a593Smuzhiyun u32 reg_l2_offset;
156*4882a593Smuzhiyun u32 reg_l2_num;
157*4882a593Smuzhiyun u32 reg_l2[RKVENC_REG_L2_NUM];
158*4882a593Smuzhiyun /* register offset info */
159*4882a593Smuzhiyun struct reg_offset_info off_inf;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun enum MPP_CLOCK_MODE clk_mode;
162*4882a593Smuzhiyun u32 irq_status;
163*4882a593Smuzhiyun /* req for current task */
164*4882a593Smuzhiyun u32 w_req_cnt;
165*4882a593Smuzhiyun struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
166*4882a593Smuzhiyun u32 r_req_cnt;
167*4882a593Smuzhiyun struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun struct rkvenc_session_priv {
171*4882a593Smuzhiyun struct rw_semaphore rw_sem;
172*4882a593Smuzhiyun /* codec info from user */
173*4882a593Smuzhiyun struct {
174*4882a593Smuzhiyun /* show mode */
175*4882a593Smuzhiyun u32 flag;
176*4882a593Smuzhiyun /* item data */
177*4882a593Smuzhiyun u64 val;
178*4882a593Smuzhiyun } codec_info[ENC_INFO_BUTT];
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun struct rkvenc_dev {
182*4882a593Smuzhiyun struct mpp_dev mpp;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct mpp_clk_info aclk_info;
185*4882a593Smuzhiyun struct mpp_clk_info hclk_info;
186*4882a593Smuzhiyun struct mpp_clk_info core_clk_info;
187*4882a593Smuzhiyun u32 default_max_load;
188*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
189*4882a593Smuzhiyun struct proc_dir_entry *procfs;
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun struct reset_control *rst_a;
192*4882a593Smuzhiyun struct reset_control *rst_h;
193*4882a593Smuzhiyun struct reset_control *rst_core;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
196*4882a593Smuzhiyun struct regulator *vdd;
197*4882a593Smuzhiyun struct devfreq *devfreq;
198*4882a593Smuzhiyun unsigned long volt;
199*4882a593Smuzhiyun unsigned long core_rate_hz;
200*4882a593Smuzhiyun unsigned long core_last_rate_hz;
201*4882a593Smuzhiyun struct ipa_power_model_data *model_data;
202*4882a593Smuzhiyun struct thermal_cooling_device *devfreq_cooling;
203*4882a593Smuzhiyun struct monitor_dev_info *mdev_info;
204*4882a593Smuzhiyun #endif
205*4882a593Smuzhiyun /* for iommu pagefault handle */
206*4882a593Smuzhiyun struct work_struct iommu_work;
207*4882a593Smuzhiyun struct workqueue_struct *iommu_wq;
208*4882a593Smuzhiyun struct page *aux_page;
209*4882a593Smuzhiyun unsigned long aux_iova;
210*4882a593Smuzhiyun unsigned long fault_iova;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun struct link_table_elem {
214*4882a593Smuzhiyun dma_addr_t lkt_dma_addr;
215*4882a593Smuzhiyun void *lkt_cpu_addr;
216*4882a593Smuzhiyun u32 lkt_index;
217*4882a593Smuzhiyun struct list_head list;
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun static struct mpp_hw_info rkvenc_hw_info = {
221*4882a593Smuzhiyun .reg_num = RKVENC_REG_NUM,
222*4882a593Smuzhiyun .reg_id = RKVENC_REG_HW_ID_INDEX,
223*4882a593Smuzhiyun .reg_en = RKVENC_ENC_START_INDEX,
224*4882a593Smuzhiyun .reg_start = RKVENC_REG_START_INDEX,
225*4882a593Smuzhiyun .reg_end = RKVENC_REG_END_INDEX,
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * file handle translate information
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun static const u16 trans_tbl_h264e[] = {
232*4882a593Smuzhiyun 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
233*4882a593Smuzhiyun 80, 81, 82, 83, 84, 85, 86, 124, 125,
234*4882a593Smuzhiyun 126, 127, 128, 129, 130, 131
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun static const u16 trans_tbl_h265e[] = {
238*4882a593Smuzhiyun 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
239*4882a593Smuzhiyun 80, 81, 82, 83, 84, 85, 86, 124, 125,
240*4882a593Smuzhiyun 126, 127, 128, 129, 130, 131, 95, 96
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun static struct mpp_trans_info trans_rk_rkvenc[] = {
244*4882a593Smuzhiyun [RKVENC_FMT_H264E] = {
245*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_h264e),
246*4882a593Smuzhiyun .table = trans_tbl_h264e,
247*4882a593Smuzhiyun },
248*4882a593Smuzhiyun [RKVENC_FMT_H265E] = {
249*4882a593Smuzhiyun .count = ARRAY_SIZE(trans_tbl_h265e),
250*4882a593Smuzhiyun .table = trans_tbl_h265e,
251*4882a593Smuzhiyun },
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun
rkvenc_extract_task_msg(struct rkvenc_task * task,struct mpp_task_msgs * msgs)254*4882a593Smuzhiyun static int rkvenc_extract_task_msg(struct rkvenc_task *task,
255*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun u32 i;
258*4882a593Smuzhiyun int ret;
259*4882a593Smuzhiyun struct mpp_request *req;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun for (i = 0; i < msgs->req_cnt; i++) {
262*4882a593Smuzhiyun req = &msgs->reqs[i];
263*4882a593Smuzhiyun if (!req->size)
264*4882a593Smuzhiyun continue;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun switch (req->cmd) {
267*4882a593Smuzhiyun case MPP_CMD_SET_REG_WRITE: {
268*4882a593Smuzhiyun int req_base;
269*4882a593Smuzhiyun int max_size;
270*4882a593Smuzhiyun u8 *dst = NULL;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (req->offset >= RKVENC_L2_OFFSET) {
273*4882a593Smuzhiyun req_base = RKVENC_L2_OFFSET;
274*4882a593Smuzhiyun max_size = sizeof(task->reg_l2);
275*4882a593Smuzhiyun dst = (u8 *)task->reg_l2;
276*4882a593Smuzhiyun } else {
277*4882a593Smuzhiyun req_base = 0;
278*4882a593Smuzhiyun max_size = sizeof(task->reg);
279*4882a593Smuzhiyun dst = (u8 *)task->reg;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun ret = mpp_check_req(req, req_base, max_size,
283*4882a593Smuzhiyun 0, max_size);
284*4882a593Smuzhiyun if (ret)
285*4882a593Smuzhiyun return ret;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun dst += req->offset - req_base;
288*4882a593Smuzhiyun if (copy_from_user(dst, req->data, req->size)) {
289*4882a593Smuzhiyun mpp_err("copy_from_user reg failed\n");
290*4882a593Smuzhiyun return -EIO;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun memcpy(&task->w_reqs[task->w_req_cnt++],
293*4882a593Smuzhiyun req, sizeof(*req));
294*4882a593Smuzhiyun } break;
295*4882a593Smuzhiyun case MPP_CMD_SET_REG_READ: {
296*4882a593Smuzhiyun int req_base;
297*4882a593Smuzhiyun int max_size;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (req->offset >= RKVENC_L2_OFFSET) {
300*4882a593Smuzhiyun req_base = RKVENC_L2_OFFSET;
301*4882a593Smuzhiyun max_size = sizeof(task->reg_l2);
302*4882a593Smuzhiyun } else {
303*4882a593Smuzhiyun req_base = 0;
304*4882a593Smuzhiyun max_size = sizeof(task->reg);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun ret = mpp_check_req(req, req_base, max_size,
308*4882a593Smuzhiyun 0, max_size);
309*4882a593Smuzhiyun if (ret)
310*4882a593Smuzhiyun return ret;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun memcpy(&task->r_reqs[task->r_req_cnt++],
313*4882a593Smuzhiyun req, sizeof(*req));
314*4882a593Smuzhiyun } break;
315*4882a593Smuzhiyun case MPP_CMD_SET_REG_ADDR_OFFSET: {
316*4882a593Smuzhiyun mpp_extract_reg_offset_info(&task->off_inf, req);
317*4882a593Smuzhiyun } break;
318*4882a593Smuzhiyun default:
319*4882a593Smuzhiyun break;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n",
323*4882a593Smuzhiyun task->w_req_cnt, task->r_req_cnt);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
rkvenc_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)328*4882a593Smuzhiyun static void *rkvenc_alloc_task(struct mpp_session *session,
329*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun int ret;
332*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL;
333*4882a593Smuzhiyun struct rkvenc_task *task = NULL;
334*4882a593Smuzhiyun struct mpp_dev *mpp = session->mpp;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun mpp_debug_enter();
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun task = kzalloc(sizeof(*task), GFP_KERNEL);
339*4882a593Smuzhiyun if (!task)
340*4882a593Smuzhiyun return NULL;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun mpp_task = &task->mpp_task;
343*4882a593Smuzhiyun mpp_task_init(session, mpp_task);
344*4882a593Smuzhiyun mpp_task->hw_info = mpp->var->hw_info;
345*4882a593Smuzhiyun mpp_task->reg = task->reg;
346*4882a593Smuzhiyun /* extract reqs for current task */
347*4882a593Smuzhiyun ret = rkvenc_extract_task_msg(task, msgs);
348*4882a593Smuzhiyun if (ret)
349*4882a593Smuzhiyun goto fail;
350*4882a593Smuzhiyun task->fmt = RKVENC_GET_FORMAT(task->reg[RKVENC_ENC_PIC_INDEX]);
351*4882a593Smuzhiyun /* process fd in register */
352*4882a593Smuzhiyun if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
353*4882a593Smuzhiyun ret = mpp_translate_reg_address(session,
354*4882a593Smuzhiyun mpp_task, task->fmt,
355*4882a593Smuzhiyun task->reg, &task->off_inf);
356*4882a593Smuzhiyun if (ret)
357*4882a593Smuzhiyun goto fail;
358*4882a593Smuzhiyun mpp_translate_reg_offset_info(mpp_task,
359*4882a593Smuzhiyun &task->off_inf, task->reg);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun task->link_mode = RKVENC_MODE_ONEFRAME;
362*4882a593Smuzhiyun task->clk_mode = CLK_MODE_NORMAL;
363*4882a593Smuzhiyun /* get resolution info */
364*4882a593Smuzhiyun task->width = RKVENC_GET_WIDTH(task->reg[RKVENC_ENC_RSL_INDEX]);
365*4882a593Smuzhiyun task->height = RKVENC_GET_HEIGHT(task->reg[RKVENC_ENC_RSL_INDEX]);
366*4882a593Smuzhiyun task->pixels = task->width * task->height;
367*4882a593Smuzhiyun mpp_debug(DEBUG_TASK_INFO, "width=%d, height=%d\n", task->width, task->height);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun mpp_debug_leave();
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return mpp_task;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun fail:
374*4882a593Smuzhiyun mpp_task_dump_mem_region(mpp, mpp_task);
375*4882a593Smuzhiyun mpp_task_dump_reg(mpp, mpp_task);
376*4882a593Smuzhiyun mpp_task_finalize(session, mpp_task);
377*4882a593Smuzhiyun kfree(task);
378*4882a593Smuzhiyun return NULL;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
rkvenc_write_req_l2(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)381*4882a593Smuzhiyun static int rkvenc_write_req_l2(struct mpp_dev *mpp,
382*4882a593Smuzhiyun u32 *regs,
383*4882a593Smuzhiyun u32 start_idx, u32 end_idx)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun int i;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun for (i = start_idx; i < end_idx; i++) {
388*4882a593Smuzhiyun int reg = i * sizeof(u32);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun mpp_debug(DEBUG_SET_REG_L2, "reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
391*4882a593Smuzhiyun writel_relaxed(reg, mpp->reg_base + RKVENC_L2_ADDR_BASE);
392*4882a593Smuzhiyun writel_relaxed(regs[i], mpp->reg_base + RKVENC_L2_WRITE_BASE);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
rkvenc_read_req_l2(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)398*4882a593Smuzhiyun static int rkvenc_read_req_l2(struct mpp_dev *mpp,
399*4882a593Smuzhiyun u32 *regs,
400*4882a593Smuzhiyun u32 start_idx, u32 end_idx)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun int i;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun for (i = start_idx; i < end_idx; i++) {
405*4882a593Smuzhiyun int reg = i * sizeof(u32);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun writel_relaxed(reg, mpp->reg_base + RKVENC_L2_ADDR_BASE);
408*4882a593Smuzhiyun regs[i] = readl_relaxed(mpp->reg_base + RKVENC_L2_READ_BASE);
409*4882a593Smuzhiyun mpp_debug(DEBUG_GET_REG_L2, "reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
rkvenc_write_req_backward(struct mpp_dev * mpp,u32 * regs,s32 start_idx,s32 end_idx,s32 en_idx)415*4882a593Smuzhiyun static int rkvenc_write_req_backward(struct mpp_dev *mpp, u32 *regs,
416*4882a593Smuzhiyun s32 start_idx, s32 end_idx, s32 en_idx)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun int i;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun for (i = end_idx - 1; i >= start_idx; i--) {
421*4882a593Smuzhiyun if (i == en_idx)
422*4882a593Smuzhiyun continue;
423*4882a593Smuzhiyun mpp_write_relaxed(mpp, i * sizeof(u32), regs[i]);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
rkvenc_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)429*4882a593Smuzhiyun static int rkvenc_run(struct mpp_dev *mpp,
430*4882a593Smuzhiyun struct mpp_task *mpp_task)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct rkvenc_task *task = to_rkvenc_task(mpp_task);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun mpp_debug_enter();
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* clear cache */
437*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVENC_REG_CLR_CACHE_BASE, 1);
438*4882a593Smuzhiyun switch (task->link_mode) {
439*4882a593Smuzhiyun case RKVENC_MODE_ONEFRAME: {
440*4882a593Smuzhiyun int i;
441*4882a593Smuzhiyun struct mpp_request *req;
442*4882a593Smuzhiyun u32 reg_en = mpp_task->hw_info->reg_en;
443*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * Tips: ensure osd plt clock is 0 before setting register,
447*4882a593Smuzhiyun * otherwise, osd setting will not work
448*4882a593Smuzhiyun */
449*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVENC_OSD_CFG_BASE, 0);
450*4882a593Smuzhiyun /* ensure clear finish */
451*4882a593Smuzhiyun wmb();
452*4882a593Smuzhiyun for (i = 0; i < task->w_req_cnt; i++) {
453*4882a593Smuzhiyun int s, e;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun req = &task->w_reqs[i];
456*4882a593Smuzhiyun /* set register L2 */
457*4882a593Smuzhiyun if (req->offset >= RKVENC_L2_OFFSET) {
458*4882a593Smuzhiyun int off = req->offset - RKVENC_L2_OFFSET;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun s = off / sizeof(u32);
461*4882a593Smuzhiyun e = s + req->size / sizeof(u32);
462*4882a593Smuzhiyun rkvenc_write_req_l2(mpp, task->reg_l2, s, e);
463*4882a593Smuzhiyun } else {
464*4882a593Smuzhiyun /* set register L1 */
465*4882a593Smuzhiyun s = req->offset / sizeof(u32);
466*4882a593Smuzhiyun e = s + req->size / sizeof(u32);
467*4882a593Smuzhiyun /* NOTE: for rkvenc, register should set backward */
468*4882a593Smuzhiyun rkvenc_write_req_backward(mpp, task->reg, s, e, reg_en);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* flush tlb before starting hardware */
473*4882a593Smuzhiyun mpp_iommu_flush_tlb(mpp->iommu_info);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* init current task */
476*4882a593Smuzhiyun mpp->cur_task = mpp_task;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Flush the register before the start the device */
481*4882a593Smuzhiyun wmb();
482*4882a593Smuzhiyun mpp_write(mpp, RKVENC_ENC_START_BASE, task->reg[reg_en]);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun mpp_task_run_end(mpp_task, timing_en);
485*4882a593Smuzhiyun } break;
486*4882a593Smuzhiyun case RKVENC_MODE_LINKTABLE_FIX:
487*4882a593Smuzhiyun case RKVENC_MODE_LINKTABLE_UPDATE:
488*4882a593Smuzhiyun default: {
489*4882a593Smuzhiyun mpp_err("link_mode %d failed.\n", task->link_mode);
490*4882a593Smuzhiyun } break;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun mpp_debug_leave();
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
rkvenc_irq(struct mpp_dev * mpp)498*4882a593Smuzhiyun static int rkvenc_irq(struct mpp_dev *mpp)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun mpp_debug_enter();
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun mpp->irq_status = mpp_read(mpp, RKVENC_INT_STATUS_BASE);
503*4882a593Smuzhiyun if (!mpp->irq_status)
504*4882a593Smuzhiyun return IRQ_NONE;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun mpp_write(mpp, RKVENC_INT_MSK_BASE, 0x100);
507*4882a593Smuzhiyun mpp_write(mpp, RKVENC_INT_CLR_BASE, 0xffffffff);
508*4882a593Smuzhiyun mpp_write(mpp, RKVENC_INT_STATUS_BASE, 0);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun mpp_debug_leave();
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
rkvenc_isr(struct mpp_dev * mpp)515*4882a593Smuzhiyun static int rkvenc_isr(struct mpp_dev *mpp)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct rkvenc_task *task = NULL;
518*4882a593Smuzhiyun struct mpp_task *mpp_task = mpp->cur_task;
519*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun mpp_debug_enter();
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* FIXME use a spin lock here */
524*4882a593Smuzhiyun if (!mpp_task) {
525*4882a593Smuzhiyun dev_err(mpp->dev, "no current task\n");
526*4882a593Smuzhiyun return IRQ_HANDLED;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun mpp_time_diff(mpp_task);
530*4882a593Smuzhiyun mpp->cur_task = NULL;
531*4882a593Smuzhiyun task = to_rkvenc_task(mpp_task);
532*4882a593Smuzhiyun task->irq_status = mpp->irq_status;
533*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (task->irq_status & RKVENC_INT_ERROR_BITS) {
536*4882a593Smuzhiyun atomic_inc(&mpp->reset_request);
537*4882a593Smuzhiyun if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
538*4882a593Smuzhiyun /* dump error register */
539*4882a593Smuzhiyun mpp_debug(DEBUG_DUMP_ERR_REG, "irq_status: %08x\n", task->irq_status);
540*4882a593Smuzhiyun mpp_task_dump_hw_reg(mpp);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* unmap reserve buffer */
545*4882a593Smuzhiyun if (enc->aux_iova != -1) {
546*4882a593Smuzhiyun iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
547*4882a593Smuzhiyun enc->aux_iova = -1;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun mpp_task_finish(mpp_task->session, mpp_task);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun mpp_debug_leave();
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return IRQ_HANDLED;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
rkvenc_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)557*4882a593Smuzhiyun static int rkvenc_finish(struct mpp_dev *mpp,
558*4882a593Smuzhiyun struct mpp_task *mpp_task)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct rkvenc_task *task = to_rkvenc_task(mpp_task);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun mpp_debug_enter();
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun switch (task->link_mode) {
565*4882a593Smuzhiyun case RKVENC_MODE_ONEFRAME: {
566*4882a593Smuzhiyun u32 i;
567*4882a593Smuzhiyun struct mpp_request *req;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun for (i = 0; i < task->r_req_cnt; i++) {
570*4882a593Smuzhiyun int s, e;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun req = &task->r_reqs[i];
573*4882a593Smuzhiyun if (req->offset >= RKVENC_L2_OFFSET) {
574*4882a593Smuzhiyun int off = req->offset - RKVENC_L2_OFFSET;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun s = off / sizeof(u32);
577*4882a593Smuzhiyun e = s + req->size / sizeof(u32);
578*4882a593Smuzhiyun rkvenc_read_req_l2(mpp, task->reg_l2, s, e);
579*4882a593Smuzhiyun } else {
580*4882a593Smuzhiyun s = req->offset / sizeof(u32);
581*4882a593Smuzhiyun e = s + req->size / sizeof(u32);
582*4882a593Smuzhiyun mpp_read_req(mpp, task->reg, s, e);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun task->reg[RKVENC_INT_STATUS_INDEX] = task->irq_status;
586*4882a593Smuzhiyun } break;
587*4882a593Smuzhiyun case RKVENC_MODE_LINKTABLE_FIX:
588*4882a593Smuzhiyun case RKVENC_MODE_LINKTABLE_UPDATE:
589*4882a593Smuzhiyun default: {
590*4882a593Smuzhiyun mpp_err("link_mode %d failed.\n", task->link_mode);
591*4882a593Smuzhiyun } break;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun mpp_debug_leave();
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
rkvenc_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)599*4882a593Smuzhiyun static int rkvenc_result(struct mpp_dev *mpp,
600*4882a593Smuzhiyun struct mpp_task *mpp_task,
601*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun struct rkvenc_task *task = to_rkvenc_task(mpp_task);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun mpp_debug_enter();
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun switch (task->link_mode) {
608*4882a593Smuzhiyun case RKVENC_MODE_ONEFRAME: {
609*4882a593Smuzhiyun u32 i;
610*4882a593Smuzhiyun struct mpp_request *req;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun for (i = 0; i < task->r_req_cnt; i++) {
613*4882a593Smuzhiyun req = &task->r_reqs[i];
614*4882a593Smuzhiyun /* set register L2 */
615*4882a593Smuzhiyun if (req->offset >= RKVENC_L2_OFFSET) {
616*4882a593Smuzhiyun int off = req->offset - RKVENC_L2_OFFSET;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (copy_to_user(req->data,
619*4882a593Smuzhiyun (u8 *)task->reg_l2 + off,
620*4882a593Smuzhiyun req->size)) {
621*4882a593Smuzhiyun mpp_err("copy_to_user reg_l2 fail\n");
622*4882a593Smuzhiyun return -EIO;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun } else {
625*4882a593Smuzhiyun if (copy_to_user(req->data,
626*4882a593Smuzhiyun (u8 *)task->reg + req->offset,
627*4882a593Smuzhiyun req->size)) {
628*4882a593Smuzhiyun mpp_err("copy_to_user reg fail\n");
629*4882a593Smuzhiyun return -EIO;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun } break;
634*4882a593Smuzhiyun case RKVENC_MODE_LINKTABLE_FIX:
635*4882a593Smuzhiyun case RKVENC_MODE_LINKTABLE_UPDATE:
636*4882a593Smuzhiyun default: {
637*4882a593Smuzhiyun mpp_err("link_mode %d failed.\n", task->link_mode);
638*4882a593Smuzhiyun } break;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
rkvenc_free_task(struct mpp_session * session,struct mpp_task * mpp_task)644*4882a593Smuzhiyun static int rkvenc_free_task(struct mpp_session *session,
645*4882a593Smuzhiyun struct mpp_task *mpp_task)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct rkvenc_task *task = to_rkvenc_task(mpp_task);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun mpp_task_finalize(session, mpp_task);
650*4882a593Smuzhiyun kfree(task);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun return 0;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
rkvenc_control(struct mpp_session * session,struct mpp_request * req)655*4882a593Smuzhiyun static int rkvenc_control(struct mpp_session *session, struct mpp_request *req)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun switch (req->cmd) {
658*4882a593Smuzhiyun case MPP_CMD_SEND_CODEC_INFO: {
659*4882a593Smuzhiyun int i;
660*4882a593Smuzhiyun int cnt;
661*4882a593Smuzhiyun struct codec_info_elem elem;
662*4882a593Smuzhiyun struct rkvenc_session_priv *priv;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (!session || !session->priv) {
665*4882a593Smuzhiyun mpp_err("session info null\n");
666*4882a593Smuzhiyun return -EINVAL;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun priv = session->priv;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun cnt = req->size / sizeof(elem);
671*4882a593Smuzhiyun cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
672*4882a593Smuzhiyun mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
673*4882a593Smuzhiyun for (i = 0; i < cnt; i++) {
674*4882a593Smuzhiyun if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
675*4882a593Smuzhiyun mpp_err("copy_from_user failed\n");
676*4882a593Smuzhiyun continue;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
679*4882a593Smuzhiyun elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
680*4882a593Smuzhiyun elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
681*4882a593Smuzhiyun priv->codec_info[elem.type].flag = elem.flag;
682*4882a593Smuzhiyun priv->codec_info[elem.type].val = elem.data;
683*4882a593Smuzhiyun } else {
684*4882a593Smuzhiyun mpp_err("codec info invalid, type %d, flag %d\n",
685*4882a593Smuzhiyun elem.type, elem.flag);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun } break;
689*4882a593Smuzhiyun default: {
690*4882a593Smuzhiyun mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
691*4882a593Smuzhiyun } break;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun return 0;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
rkvenc_free_session(struct mpp_session * session)697*4882a593Smuzhiyun static int rkvenc_free_session(struct mpp_session *session)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun if (session && session->priv) {
700*4882a593Smuzhiyun kfree(session->priv);
701*4882a593Smuzhiyun session->priv = NULL;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return 0;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
rkvenc_init_session(struct mpp_session * session)707*4882a593Smuzhiyun static int rkvenc_init_session(struct mpp_session *session)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun struct rkvenc_session_priv *priv;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (!session) {
712*4882a593Smuzhiyun mpp_err("session is null\n");
713*4882a593Smuzhiyun return -EINVAL;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun priv = kzalloc(sizeof(*priv), GFP_KERNEL);
717*4882a593Smuzhiyun if (!priv)
718*4882a593Smuzhiyun return -ENOMEM;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun init_rwsem(&priv->rw_sem);
721*4882a593Smuzhiyun session->priv = priv;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun return 0;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvenc_procfs_remove(struct mpp_dev * mpp)727*4882a593Smuzhiyun static int rkvenc_procfs_remove(struct mpp_dev *mpp)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun if (enc->procfs) {
732*4882a593Smuzhiyun proc_remove(enc->procfs);
733*4882a593Smuzhiyun enc->procfs = NULL;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun return 0;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)739*4882a593Smuzhiyun static int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun int i;
742*4882a593Smuzhiyun struct rkvenc_session_priv *priv = session->priv;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun down_read(&priv->rw_sem);
745*4882a593Smuzhiyun /* item name */
746*4882a593Smuzhiyun seq_puts(seq, "------------------------------------------------------");
747*4882a593Smuzhiyun seq_puts(seq, "------------------------------------------------------\n");
748*4882a593Smuzhiyun seq_printf(seq, "|%8s|", (const char *)"session");
749*4882a593Smuzhiyun seq_printf(seq, "%8s|", (const char *)"device");
750*4882a593Smuzhiyun for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
751*4882a593Smuzhiyun bool show = priv->codec_info[i].flag;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (show)
754*4882a593Smuzhiyun seq_printf(seq, "%8s|", enc_info_item_name[i]);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun seq_puts(seq, "\n");
757*4882a593Smuzhiyun /* item data*/
758*4882a593Smuzhiyun seq_printf(seq, "|%8d|", session->index);
759*4882a593Smuzhiyun seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
760*4882a593Smuzhiyun for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
761*4882a593Smuzhiyun u32 flag = priv->codec_info[i].flag;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (!flag)
764*4882a593Smuzhiyun continue;
765*4882a593Smuzhiyun if (flag == CODEC_INFO_FLAG_NUMBER) {
766*4882a593Smuzhiyun u32 data = priv->codec_info[i].val;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun seq_printf(seq, "%8d|", data);
769*4882a593Smuzhiyun } else if (flag == CODEC_INFO_FLAG_STRING) {
770*4882a593Smuzhiyun const char *name = (const char *)&priv->codec_info[i].val;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun seq_printf(seq, "%8s|", name);
773*4882a593Smuzhiyun } else {
774*4882a593Smuzhiyun seq_printf(seq, "%8s|", (const char *)"null");
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun seq_puts(seq, "\n");
778*4882a593Smuzhiyun up_read(&priv->rw_sem);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun return 0;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
rkvenc_show_session_info(struct seq_file * seq,void * offset)783*4882a593Smuzhiyun static int rkvenc_show_session_info(struct seq_file *seq, void *offset)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun struct mpp_session *session = NULL, *n;
786*4882a593Smuzhiyun struct mpp_dev *mpp = seq->private;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun mutex_lock(&mpp->srv->session_lock);
789*4882a593Smuzhiyun list_for_each_entry_safe(session, n,
790*4882a593Smuzhiyun &mpp->srv->session_list,
791*4882a593Smuzhiyun service_link) {
792*4882a593Smuzhiyun if (session->device_type != MPP_DEVICE_RKVENC)
793*4882a593Smuzhiyun continue;
794*4882a593Smuzhiyun if (!session->priv)
795*4882a593Smuzhiyun continue;
796*4882a593Smuzhiyun if (mpp->dev_ops->dump_session)
797*4882a593Smuzhiyun mpp->dev_ops->dump_session(session, seq);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun mutex_unlock(&mpp->srv->session_lock);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun return 0;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
rkvenc_procfs_init(struct mpp_dev * mpp)804*4882a593Smuzhiyun static int rkvenc_procfs_init(struct mpp_dev *mpp)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun enc->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
809*4882a593Smuzhiyun if (IS_ERR_OR_NULL(enc->procfs)) {
810*4882a593Smuzhiyun mpp_err("failed on open procfs\n");
811*4882a593Smuzhiyun enc->procfs = NULL;
812*4882a593Smuzhiyun return -EIO;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* for common mpp_dev options */
816*4882a593Smuzhiyun mpp_procfs_create_common(enc->procfs, mpp);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /* for debug */
819*4882a593Smuzhiyun mpp_procfs_create_u32("aclk", 0644,
820*4882a593Smuzhiyun enc->procfs, &enc->aclk_info.debug_rate_hz);
821*4882a593Smuzhiyun mpp_procfs_create_u32("clk_core", 0644,
822*4882a593Smuzhiyun enc->procfs, &enc->core_clk_info.debug_rate_hz);
823*4882a593Smuzhiyun mpp_procfs_create_u32("session_buffers", 0644,
824*4882a593Smuzhiyun enc->procfs, &mpp->session_max_buffers);
825*4882a593Smuzhiyun /* for show session info */
826*4882a593Smuzhiyun proc_create_single_data("sessions-info", 0444,
827*4882a593Smuzhiyun enc->procfs, rkvenc_show_session_info, mpp);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun return 0;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun #else
rkvenc_procfs_remove(struct mpp_dev * mpp)832*4882a593Smuzhiyun static inline int rkvenc_procfs_remove(struct mpp_dev *mpp)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun return 0;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
rkvenc_procfs_init(struct mpp_dev * mpp)837*4882a593Smuzhiyun static inline int rkvenc_procfs_init(struct mpp_dev *mpp)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)842*4882a593Smuzhiyun static inline int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun return 0;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun #endif
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
rkvenc_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)849*4882a593Smuzhiyun static int rkvenc_devfreq_target(struct device *dev,
850*4882a593Smuzhiyun unsigned long *freq, u32 flags)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun struct dev_pm_opp *opp;
853*4882a593Smuzhiyun unsigned long target_volt, target_freq;
854*4882a593Smuzhiyun int ret = 0;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun struct rkvenc_dev *enc = dev_get_drvdata(dev);
857*4882a593Smuzhiyun struct devfreq *devfreq = enc->devfreq;
858*4882a593Smuzhiyun struct devfreq_dev_status *stat = &devfreq->last_status;
859*4882a593Smuzhiyun unsigned long old_clk_rate = stat->current_frequency;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun opp = devfreq_recommended_opp(dev, freq, flags);
862*4882a593Smuzhiyun if (IS_ERR(opp)) {
863*4882a593Smuzhiyun dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
864*4882a593Smuzhiyun return PTR_ERR(opp);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun target_freq = dev_pm_opp_get_freq(opp);
867*4882a593Smuzhiyun target_volt = dev_pm_opp_get_voltage(opp);
868*4882a593Smuzhiyun dev_pm_opp_put(opp);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (old_clk_rate == target_freq) {
871*4882a593Smuzhiyun enc->core_last_rate_hz = target_freq;
872*4882a593Smuzhiyun if (enc->volt == target_volt)
873*4882a593Smuzhiyun return ret;
874*4882a593Smuzhiyun ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
875*4882a593Smuzhiyun if (ret) {
876*4882a593Smuzhiyun dev_err(dev, "Cannot set voltage %lu uV\n",
877*4882a593Smuzhiyun target_volt);
878*4882a593Smuzhiyun return ret;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun enc->volt = target_volt;
881*4882a593Smuzhiyun return 0;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (old_clk_rate < target_freq) {
885*4882a593Smuzhiyun ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
886*4882a593Smuzhiyun if (ret) {
887*4882a593Smuzhiyun dev_err(dev, "set voltage %lu uV\n", target_volt);
888*4882a593Smuzhiyun return ret;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
893*4882a593Smuzhiyun clk_set_rate(enc->core_clk_info.clk, target_freq);
894*4882a593Smuzhiyun stat->current_frequency = target_freq;
895*4882a593Smuzhiyun enc->core_last_rate_hz = target_freq;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (old_clk_rate > target_freq) {
898*4882a593Smuzhiyun ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
899*4882a593Smuzhiyun if (ret) {
900*4882a593Smuzhiyun dev_err(dev, "set vol %lu uV\n", target_volt);
901*4882a593Smuzhiyun return ret;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun enc->volt = target_volt;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return ret;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
rkvenc_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)909*4882a593Smuzhiyun static int rkvenc_devfreq_get_dev_status(struct device *dev,
910*4882a593Smuzhiyun struct devfreq_dev_status *stat)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun return 0;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
rkvenc_devfreq_get_cur_freq(struct device * dev,unsigned long * freq)915*4882a593Smuzhiyun static int rkvenc_devfreq_get_cur_freq(struct device *dev,
916*4882a593Smuzhiyun unsigned long *freq)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct rkvenc_dev *enc = dev_get_drvdata(dev);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun *freq = enc->core_last_rate_hz;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun return 0;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun static struct devfreq_dev_profile rkvenc_devfreq_profile = {
926*4882a593Smuzhiyun .target = rkvenc_devfreq_target,
927*4882a593Smuzhiyun .get_dev_status = rkvenc_devfreq_get_dev_status,
928*4882a593Smuzhiyun .get_cur_freq = rkvenc_devfreq_get_cur_freq,
929*4882a593Smuzhiyun };
930*4882a593Smuzhiyun
devfreq_venc_ondemand_func(struct devfreq * df,unsigned long * freq)931*4882a593Smuzhiyun static int devfreq_venc_ondemand_func(struct devfreq *df, unsigned long *freq)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun struct rkvenc_dev *enc = df->data;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (enc)
936*4882a593Smuzhiyun *freq = enc->core_rate_hz;
937*4882a593Smuzhiyun else
938*4882a593Smuzhiyun *freq = df->previous_freq;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun return 0;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
devfreq_venc_ondemand_handler(struct devfreq * devfreq,unsigned int event,void * data)943*4882a593Smuzhiyun static int devfreq_venc_ondemand_handler(struct devfreq *devfreq,
944*4882a593Smuzhiyun unsigned int event, void *data)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun return 0;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun static struct devfreq_governor devfreq_venc_ondemand = {
950*4882a593Smuzhiyun .name = "venc_ondemand",
951*4882a593Smuzhiyun .get_target_freq = devfreq_venc_ondemand_func,
952*4882a593Smuzhiyun .event_handler = devfreq_venc_ondemand_handler,
953*4882a593Smuzhiyun };
954*4882a593Smuzhiyun
rkvenc_get_static_power(struct devfreq * devfreq,unsigned long voltage)955*4882a593Smuzhiyun static unsigned long rkvenc_get_static_power(struct devfreq *devfreq,
956*4882a593Smuzhiyun unsigned long voltage)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun struct rkvenc_dev *enc = devfreq->data;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (!enc->model_data)
961*4882a593Smuzhiyun return 0;
962*4882a593Smuzhiyun else
963*4882a593Smuzhiyun return rockchip_ipa_get_static_power(enc->model_data,
964*4882a593Smuzhiyun voltage);
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun static struct devfreq_cooling_power venc_cooling_power_data = {
968*4882a593Smuzhiyun .get_static_power = rkvenc_get_static_power,
969*4882a593Smuzhiyun };
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun static struct monitor_dev_profile enc_mdevp = {
972*4882a593Smuzhiyun .type = MONITOR_TYPE_DEV,
973*4882a593Smuzhiyun .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
974*4882a593Smuzhiyun .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
975*4882a593Smuzhiyun };
976*4882a593Smuzhiyun
rv1126_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)977*4882a593Smuzhiyun static int __maybe_unused rv1126_get_soc_info(struct device *dev,
978*4882a593Smuzhiyun struct device_node *np,
979*4882a593Smuzhiyun int *bin, int *process)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun int ret = 0;
982*4882a593Smuzhiyun u8 value = 0;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (of_property_match_string(np, "nvmem-cell-names", "performance") >= 0) {
985*4882a593Smuzhiyun ret = rockchip_nvmem_cell_read_u8(np, "performance", &value);
986*4882a593Smuzhiyun if (ret) {
987*4882a593Smuzhiyun dev_err(dev, "Failed to get soc performance value\n");
988*4882a593Smuzhiyun return ret;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun if (value == 0x1)
991*4882a593Smuzhiyun *bin = 1;
992*4882a593Smuzhiyun else
993*4882a593Smuzhiyun *bin = 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun if (*bin >= 0)
996*4882a593Smuzhiyun dev_info(dev, "bin=%d\n", *bin);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun return ret;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun static const struct rockchip_opp_data __maybe_unused rv1126_rkvenc_opp_data = {
1002*4882a593Smuzhiyun .get_soc_info = rv1126_get_soc_info,
1003*4882a593Smuzhiyun };
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun static const struct of_device_id rockchip_rkvenc_of_match[] = {
1006*4882a593Smuzhiyun #ifdef CONFIG_CPU_RV1126
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun .compatible = "rockchip,rv1109",
1009*4882a593Smuzhiyun .data = (void *)&rv1126_rkvenc_opp_data,
1010*4882a593Smuzhiyun },
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun .compatible = "rockchip,rv1126",
1013*4882a593Smuzhiyun .data = (void *)&rv1126_rkvenc_opp_data,
1014*4882a593Smuzhiyun },
1015*4882a593Smuzhiyun #endif
1016*4882a593Smuzhiyun {},
1017*4882a593Smuzhiyun };
1018*4882a593Smuzhiyun
rkvenc_devfreq_init(struct mpp_dev * mpp)1019*4882a593Smuzhiyun static int rkvenc_devfreq_init(struct mpp_dev *mpp)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1022*4882a593Smuzhiyun struct clk *clk_core = enc->core_clk_info.clk;
1023*4882a593Smuzhiyun struct devfreq_cooling_power *venc_dcp = &venc_cooling_power_data;
1024*4882a593Smuzhiyun struct rockchip_opp_info opp_info = {0};
1025*4882a593Smuzhiyun int ret = 0;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun if (!clk_core)
1028*4882a593Smuzhiyun return 0;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun enc->vdd = devm_regulator_get_optional(mpp->dev, "venc");
1031*4882a593Smuzhiyun if (IS_ERR_OR_NULL(enc->vdd)) {
1032*4882a593Smuzhiyun if (PTR_ERR(enc->vdd) == -EPROBE_DEFER) {
1033*4882a593Smuzhiyun dev_warn(mpp->dev, "venc regulator not ready, retry\n");
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun return -EPROBE_DEFER;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun dev_info(mpp->dev, "no regulator, devfreq is disabled\n");
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun return 0;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun rockchip_get_opp_data(rockchip_rkvenc_of_match, &opp_info);
1043*4882a593Smuzhiyun ret = rockchip_init_opp_table(mpp->dev, &opp_info, "leakage", "venc");
1044*4882a593Smuzhiyun if (ret) {
1045*4882a593Smuzhiyun dev_err(mpp->dev, "failed to init_opp_table\n");
1046*4882a593Smuzhiyun return ret;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun ret = devfreq_add_governor(&devfreq_venc_ondemand);
1050*4882a593Smuzhiyun if (ret) {
1051*4882a593Smuzhiyun dev_err(mpp->dev, "failed to add venc_ondemand governor\n");
1052*4882a593Smuzhiyun goto governor_err;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun rkvenc_devfreq_profile.initial_freq = clk_get_rate(clk_core);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun enc->devfreq = devm_devfreq_add_device(mpp->dev,
1058*4882a593Smuzhiyun &rkvenc_devfreq_profile,
1059*4882a593Smuzhiyun "venc_ondemand", (void *)enc);
1060*4882a593Smuzhiyun if (IS_ERR(enc->devfreq)) {
1061*4882a593Smuzhiyun ret = PTR_ERR(enc->devfreq);
1062*4882a593Smuzhiyun enc->devfreq = NULL;
1063*4882a593Smuzhiyun goto devfreq_err;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun enc->devfreq->last_status.total_time = 1;
1066*4882a593Smuzhiyun enc->devfreq->last_status.busy_time = 1;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun devfreq_register_opp_notifier(mpp->dev, enc->devfreq);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun of_property_read_u32(mpp->dev->of_node, "dynamic-power-coefficient",
1071*4882a593Smuzhiyun (u32 *)&venc_dcp->dyn_power_coeff);
1072*4882a593Smuzhiyun enc->model_data = rockchip_ipa_power_model_init(mpp->dev,
1073*4882a593Smuzhiyun "venc_leakage");
1074*4882a593Smuzhiyun if (IS_ERR_OR_NULL(enc->model_data)) {
1075*4882a593Smuzhiyun enc->model_data = NULL;
1076*4882a593Smuzhiyun dev_err(mpp->dev, "failed to initialize power model\n");
1077*4882a593Smuzhiyun } else if (enc->model_data->dynamic_coefficient) {
1078*4882a593Smuzhiyun venc_dcp->dyn_power_coeff =
1079*4882a593Smuzhiyun enc->model_data->dynamic_coefficient;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun if (!venc_dcp->dyn_power_coeff) {
1082*4882a593Smuzhiyun dev_err(mpp->dev, "failed to get dynamic-coefficient\n");
1083*4882a593Smuzhiyun goto out;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun enc->devfreq_cooling =
1087*4882a593Smuzhiyun of_devfreq_cooling_register_power(mpp->dev->of_node,
1088*4882a593Smuzhiyun enc->devfreq, venc_dcp);
1089*4882a593Smuzhiyun if (IS_ERR_OR_NULL(enc->devfreq_cooling))
1090*4882a593Smuzhiyun dev_err(mpp->dev, "failed to register cooling device\n");
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun enc_mdevp.data = enc->devfreq;
1093*4882a593Smuzhiyun enc->mdev_info = rockchip_system_monitor_register(mpp->dev, &enc_mdevp);
1094*4882a593Smuzhiyun if (IS_ERR(enc->mdev_info)) {
1095*4882a593Smuzhiyun dev_dbg(mpp->dev, "without system monitor\n");
1096*4882a593Smuzhiyun enc->mdev_info = NULL;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun out:
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun return 0;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun devfreq_err:
1104*4882a593Smuzhiyun devfreq_remove_governor(&devfreq_venc_ondemand);
1105*4882a593Smuzhiyun governor_err:
1106*4882a593Smuzhiyun dev_pm_opp_of_remove_table(mpp->dev);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun return ret;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
rkvenc_devfreq_remove(struct mpp_dev * mpp)1111*4882a593Smuzhiyun static int rkvenc_devfreq_remove(struct mpp_dev *mpp)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (enc->mdev_info)
1116*4882a593Smuzhiyun rockchip_system_monitor_unregister(enc->mdev_info);
1117*4882a593Smuzhiyun if (enc->devfreq) {
1118*4882a593Smuzhiyun devfreq_unregister_opp_notifier(mpp->dev, enc->devfreq);
1119*4882a593Smuzhiyun dev_pm_opp_of_remove_table(mpp->dev);
1120*4882a593Smuzhiyun devfreq_remove_governor(&devfreq_venc_ondemand);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun return 0;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun #endif
1126*4882a593Smuzhiyun
rkvenc_iommu_handle_work(struct work_struct * work_s)1127*4882a593Smuzhiyun static void rkvenc_iommu_handle_work(struct work_struct *work_s)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun int ret = 0;
1130*4882a593Smuzhiyun struct rkvenc_dev *enc = container_of(work_s, struct rkvenc_dev, iommu_work);
1131*4882a593Smuzhiyun struct mpp_dev *mpp = &enc->mpp;
1132*4882a593Smuzhiyun unsigned long page_iova = 0;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun mpp_debug_enter();
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun /* avoid another page fault occur after page fault */
1137*4882a593Smuzhiyun mpp_iommu_down_write(mpp->iommu_info);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun if (enc->aux_iova != -1) {
1140*4882a593Smuzhiyun iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
1141*4882a593Smuzhiyun enc->aux_iova = -1;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun page_iova = round_down(enc->fault_iova, SZ_4K);
1145*4882a593Smuzhiyun ret = iommu_map(mpp->iommu_info->domain, page_iova,
1146*4882a593Smuzhiyun page_to_phys(enc->aux_page), IOMMU_PAGE_SIZE,
1147*4882a593Smuzhiyun IOMMU_READ | IOMMU_WRITE);
1148*4882a593Smuzhiyun if (ret)
1149*4882a593Smuzhiyun mpp_err("iommu_map iova %lx error.\n", page_iova);
1150*4882a593Smuzhiyun else
1151*4882a593Smuzhiyun enc->aux_iova = page_iova;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun rockchip_iommu_unmask_irq(mpp->dev);
1154*4882a593Smuzhiyun mpp_iommu_up_write(mpp->iommu_info);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun mpp_debug_leave();
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
rkvenc_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1159*4882a593Smuzhiyun static int rkvenc_iommu_fault_handle(struct iommu_domain *iommu,
1160*4882a593Smuzhiyun struct device *iommu_dev,
1161*4882a593Smuzhiyun unsigned long iova, int status, void *arg)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun struct mpp_dev *mpp = (struct mpp_dev *)arg;
1164*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun mpp_debug_enter();
1167*4882a593Smuzhiyun mpp_debug(DEBUG_IOMMU, "IOMMU_GET_BUS_ID(status)=%d\n", IOMMU_GET_BUS_ID(status));
1168*4882a593Smuzhiyun if (IOMMU_GET_BUS_ID(status)) {
1169*4882a593Smuzhiyun enc->fault_iova = iova;
1170*4882a593Smuzhiyun rockchip_iommu_mask_irq(mpp->dev);
1171*4882a593Smuzhiyun queue_work(enc->iommu_wq, &enc->iommu_work);
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun mpp_debug_leave();
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun return 0;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun
rkvenc_init(struct mpp_dev * mpp)1178*4882a593Smuzhiyun static int rkvenc_init(struct mpp_dev *mpp)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1181*4882a593Smuzhiyun int ret = 0;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC];
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun /* Get clock info from dtsi */
1186*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
1187*4882a593Smuzhiyun if (ret)
1188*4882a593Smuzhiyun mpp_err("failed on clk_get aclk_vcodec\n");
1189*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
1190*4882a593Smuzhiyun if (ret)
1191*4882a593Smuzhiyun mpp_err("failed on clk_get hclk_vcodec\n");
1192*4882a593Smuzhiyun ret = mpp_get_clk_info(mpp, &enc->core_clk_info, "clk_core");
1193*4882a593Smuzhiyun if (ret)
1194*4882a593Smuzhiyun mpp_err("failed on clk_get clk_core\n");
1195*4882a593Smuzhiyun /* Get normal max workload from dtsi */
1196*4882a593Smuzhiyun of_property_read_u32(mpp->dev->of_node,
1197*4882a593Smuzhiyun "rockchip,default-max-load",
1198*4882a593Smuzhiyun &enc->default_max_load);
1199*4882a593Smuzhiyun /* Set default rates */
1200*4882a593Smuzhiyun mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1201*4882a593Smuzhiyun mpp_set_clk_info_rate_hz(&enc->core_clk_info, CLK_MODE_DEFAULT, 600 * MHZ);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /* Get reset control from dtsi */
1204*4882a593Smuzhiyun enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1205*4882a593Smuzhiyun if (!enc->rst_a)
1206*4882a593Smuzhiyun mpp_err("No aclk reset resource define\n");
1207*4882a593Smuzhiyun enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1208*4882a593Smuzhiyun if (!enc->rst_h)
1209*4882a593Smuzhiyun mpp_err("No hclk reset resource define\n");
1210*4882a593Smuzhiyun enc->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1211*4882a593Smuzhiyun if (!enc->rst_core)
1212*4882a593Smuzhiyun mpp_err("No core reset resource define\n");
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1215*4882a593Smuzhiyun ret = rkvenc_devfreq_init(mpp);
1216*4882a593Smuzhiyun if (ret)
1217*4882a593Smuzhiyun mpp_err("failed to add venc devfreq\n");
1218*4882a593Smuzhiyun #endif
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /* for mmu pagefault */
1221*4882a593Smuzhiyun enc->aux_page = alloc_page(GFP_KERNEL);
1222*4882a593Smuzhiyun if (!enc->aux_page) {
1223*4882a593Smuzhiyun dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1224*4882a593Smuzhiyun return -ENOMEM;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun enc->aux_iova = -1;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun enc->iommu_wq = create_singlethread_workqueue("iommu_wq");
1229*4882a593Smuzhiyun if (!enc->iommu_wq) {
1230*4882a593Smuzhiyun mpp_err("failed to create workqueue\n");
1231*4882a593Smuzhiyun return -ENOMEM;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun INIT_WORK(&enc->iommu_work, rkvenc_iommu_handle_work);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun mpp->iommu_info->hdl = rkvenc_iommu_fault_handle;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun return ret;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
rkvenc_exit(struct mpp_dev * mpp)1240*4882a593Smuzhiyun static int rkvenc_exit(struct mpp_dev *mpp)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1245*4882a593Smuzhiyun rkvenc_devfreq_remove(mpp);
1246*4882a593Smuzhiyun #endif
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (enc->aux_page)
1249*4882a593Smuzhiyun __free_page(enc->aux_page);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun if (enc->aux_iova != -1) {
1252*4882a593Smuzhiyun iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
1253*4882a593Smuzhiyun enc->aux_iova = -1;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun if (enc->iommu_wq) {
1257*4882a593Smuzhiyun destroy_workqueue(enc->iommu_wq);
1258*4882a593Smuzhiyun enc->iommu_wq = NULL;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun return 0;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
rkvenc_reset(struct mpp_dev * mpp)1264*4882a593Smuzhiyun static int rkvenc_reset(struct mpp_dev *mpp)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun mpp_debug_enter();
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1271*4882a593Smuzhiyun if (enc->devfreq)
1272*4882a593Smuzhiyun mutex_lock(&enc->devfreq->lock);
1273*4882a593Smuzhiyun #endif
1274*4882a593Smuzhiyun mpp_clk_set_rate(&enc->aclk_info, CLK_MODE_REDUCE);
1275*4882a593Smuzhiyun mpp_clk_set_rate(&enc->core_clk_info, CLK_MODE_REDUCE);
1276*4882a593Smuzhiyun /* safe reset */
1277*4882a593Smuzhiyun mpp_write(mpp, RKVENC_INT_MSK_BASE, 0x1FF);
1278*4882a593Smuzhiyun mpp_write(mpp, RKVENC_CLR_BASE, RKVENC_SAFE_CLR_BIT);
1279*4882a593Smuzhiyun udelay(5);
1280*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", mpp_read(mpp, RKVENC_INT_STATUS_BASE));
1281*4882a593Smuzhiyun mpp_write(mpp, RKVENC_INT_CLR_BASE, 0xffffffff);
1282*4882a593Smuzhiyun mpp_write(mpp, RKVENC_INT_STATUS_BASE, 0);
1283*4882a593Smuzhiyun /* cru reset */
1284*4882a593Smuzhiyun if (enc->rst_a && enc->rst_h && enc->rst_core) {
1285*4882a593Smuzhiyun mpp_pmu_idle_request(mpp, true);
1286*4882a593Smuzhiyun mpp_safe_reset(enc->rst_a);
1287*4882a593Smuzhiyun mpp_safe_reset(enc->rst_h);
1288*4882a593Smuzhiyun mpp_safe_reset(enc->rst_core);
1289*4882a593Smuzhiyun udelay(5);
1290*4882a593Smuzhiyun mpp_safe_unreset(enc->rst_a);
1291*4882a593Smuzhiyun mpp_safe_unreset(enc->rst_h);
1292*4882a593Smuzhiyun mpp_safe_unreset(enc->rst_core);
1293*4882a593Smuzhiyun mpp_pmu_idle_request(mpp, false);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1296*4882a593Smuzhiyun if (enc->devfreq)
1297*4882a593Smuzhiyun mutex_unlock(&enc->devfreq->lock);
1298*4882a593Smuzhiyun #endif
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun mpp_debug_leave();
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun return 0;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
rkvenc_clk_on(struct mpp_dev * mpp)1305*4882a593Smuzhiyun static int rkvenc_clk_on(struct mpp_dev *mpp)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun mpp_clk_safe_enable(enc->aclk_info.clk);
1310*4882a593Smuzhiyun mpp_clk_safe_enable(enc->hclk_info.clk);
1311*4882a593Smuzhiyun mpp_clk_safe_enable(enc->core_clk_info.clk);
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun return 0;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
rkvenc_clk_off(struct mpp_dev * mpp)1316*4882a593Smuzhiyun static int rkvenc_clk_off(struct mpp_dev *mpp)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun clk_disable_unprepare(enc->aclk_info.clk);
1321*4882a593Smuzhiyun clk_disable_unprepare(enc->hclk_info.clk);
1322*4882a593Smuzhiyun clk_disable_unprepare(enc->core_clk_info.clk);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun return 0;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
rkvenc_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1327*4882a593Smuzhiyun static int rkvenc_get_freq(struct mpp_dev *mpp,
1328*4882a593Smuzhiyun struct mpp_task *mpp_task)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun u32 task_cnt;
1331*4882a593Smuzhiyun u32 workload;
1332*4882a593Smuzhiyun struct mpp_task *loop = NULL, *n;
1333*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1334*4882a593Smuzhiyun struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /* if not set max load, consider not have advanced mode */
1337*4882a593Smuzhiyun if (!enc->default_max_load)
1338*4882a593Smuzhiyun return 0;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun task_cnt = 1;
1341*4882a593Smuzhiyun workload = task->pixels;
1342*4882a593Smuzhiyun /* calc workload in pending list */
1343*4882a593Smuzhiyun mutex_lock(&mpp->queue->pending_lock);
1344*4882a593Smuzhiyun list_for_each_entry_safe(loop, n,
1345*4882a593Smuzhiyun &mpp->queue->pending_list,
1346*4882a593Smuzhiyun queue_link) {
1347*4882a593Smuzhiyun struct rkvenc_task *loop_task = to_rkvenc_task(loop);
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun task_cnt++;
1350*4882a593Smuzhiyun workload += loop_task->pixels;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun mutex_unlock(&mpp->queue->pending_lock);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun if (workload > enc->default_max_load)
1355*4882a593Smuzhiyun task->clk_mode = CLK_MODE_ADVANCED;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1358*4882a593Smuzhiyun task_cnt, workload, task->clk_mode);
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun return 0;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
rkvenc_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1363*4882a593Smuzhiyun static int rkvenc_set_freq(struct mpp_dev *mpp,
1364*4882a593Smuzhiyun struct mpp_task *mpp_task)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1367*4882a593Smuzhiyun struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun #ifdef CONFIG_PM_DEVFREQ
1372*4882a593Smuzhiyun if (enc->devfreq) {
1373*4882a593Smuzhiyun unsigned long core_rate_hz;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun mutex_lock(&enc->devfreq->lock);
1376*4882a593Smuzhiyun core_rate_hz = mpp_get_clk_info_rate_hz(&enc->core_clk_info, task->clk_mode);
1377*4882a593Smuzhiyun if (enc->core_rate_hz != core_rate_hz) {
1378*4882a593Smuzhiyun enc->core_rate_hz = core_rate_hz;
1379*4882a593Smuzhiyun update_devfreq(enc->devfreq);
1380*4882a593Smuzhiyun } else {
1381*4882a593Smuzhiyun /*
1382*4882a593Smuzhiyun * Restore frequency when frequency is changed by
1383*4882a593Smuzhiyun * rkvenc_reduce_freq()
1384*4882a593Smuzhiyun */
1385*4882a593Smuzhiyun clk_set_rate(enc->core_clk_info.clk, enc->core_last_rate_hz);
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun mutex_unlock(&enc->devfreq->lock);
1388*4882a593Smuzhiyun return 0;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun #endif
1391*4882a593Smuzhiyun mpp_clk_set_rate(&enc->core_clk_info, task->clk_mode);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun return 0;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun static struct mpp_hw_ops rkvenc_hw_ops = {
1397*4882a593Smuzhiyun .init = rkvenc_init,
1398*4882a593Smuzhiyun .exit = rkvenc_exit,
1399*4882a593Smuzhiyun .clk_on = rkvenc_clk_on,
1400*4882a593Smuzhiyun .clk_off = rkvenc_clk_off,
1401*4882a593Smuzhiyun .get_freq = rkvenc_get_freq,
1402*4882a593Smuzhiyun .set_freq = rkvenc_set_freq,
1403*4882a593Smuzhiyun .reset = rkvenc_reset,
1404*4882a593Smuzhiyun };
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun static struct mpp_dev_ops rkvenc_dev_ops = {
1407*4882a593Smuzhiyun .alloc_task = rkvenc_alloc_task,
1408*4882a593Smuzhiyun .run = rkvenc_run,
1409*4882a593Smuzhiyun .irq = rkvenc_irq,
1410*4882a593Smuzhiyun .isr = rkvenc_isr,
1411*4882a593Smuzhiyun .finish = rkvenc_finish,
1412*4882a593Smuzhiyun .result = rkvenc_result,
1413*4882a593Smuzhiyun .free_task = rkvenc_free_task,
1414*4882a593Smuzhiyun .ioctl = rkvenc_control,
1415*4882a593Smuzhiyun .init_session = rkvenc_init_session,
1416*4882a593Smuzhiyun .free_session = rkvenc_free_session,
1417*4882a593Smuzhiyun .dump_session = rkvenc_dump_session,
1418*4882a593Smuzhiyun };
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun static const struct mpp_dev_var rkvenc_v1_data = {
1421*4882a593Smuzhiyun .device_type = MPP_DEVICE_RKVENC,
1422*4882a593Smuzhiyun .hw_info = &rkvenc_hw_info,
1423*4882a593Smuzhiyun .trans_info = trans_rk_rkvenc,
1424*4882a593Smuzhiyun .hw_ops = &rkvenc_hw_ops,
1425*4882a593Smuzhiyun .dev_ops = &rkvenc_dev_ops,
1426*4882a593Smuzhiyun };
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun static const struct of_device_id mpp_rkvenc_dt_match[] = {
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun .compatible = "rockchip,rkv-encoder-v1",
1431*4882a593Smuzhiyun .data = &rkvenc_v1_data,
1432*4882a593Smuzhiyun },
1433*4882a593Smuzhiyun {},
1434*4882a593Smuzhiyun };
1435*4882a593Smuzhiyun
rkvenc_probe(struct platform_device * pdev)1436*4882a593Smuzhiyun static int rkvenc_probe(struct platform_device *pdev)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun int ret = 0;
1439*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1440*4882a593Smuzhiyun struct rkvenc_dev *enc = NULL;
1441*4882a593Smuzhiyun struct mpp_dev *mpp = NULL;
1442*4882a593Smuzhiyun const struct of_device_id *match = NULL;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun dev_info(dev, "probing start\n");
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1447*4882a593Smuzhiyun if (!enc)
1448*4882a593Smuzhiyun return -ENOMEM;
1449*4882a593Smuzhiyun mpp = &enc->mpp;
1450*4882a593Smuzhiyun platform_set_drvdata(pdev, mpp);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun if (pdev->dev.of_node) {
1453*4882a593Smuzhiyun match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node);
1454*4882a593Smuzhiyun if (match)
1455*4882a593Smuzhiyun mpp->var = (struct mpp_dev_var *)match->data;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun ret = mpp_dev_probe(mpp, pdev);
1459*4882a593Smuzhiyun if (ret)
1460*4882a593Smuzhiyun return ret;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun ret = devm_request_threaded_irq(dev, mpp->irq,
1463*4882a593Smuzhiyun mpp_dev_irq,
1464*4882a593Smuzhiyun mpp_dev_isr_sched,
1465*4882a593Smuzhiyun IRQF_SHARED,
1466*4882a593Smuzhiyun dev_name(dev), mpp);
1467*4882a593Smuzhiyun if (ret) {
1468*4882a593Smuzhiyun dev_err(dev, "register interrupter runtime failed\n");
1469*4882a593Smuzhiyun goto failed_get_irq;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1473*4882a593Smuzhiyun rkvenc_procfs_init(mpp);
1474*4882a593Smuzhiyun /* register current device to mpp service */
1475*4882a593Smuzhiyun mpp_dev_register_srv(mpp, mpp->srv);
1476*4882a593Smuzhiyun dev_info(dev, "probing finish\n");
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun return 0;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun failed_get_irq:
1481*4882a593Smuzhiyun mpp_dev_remove(mpp);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun return ret;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun
rkvenc_remove(struct platform_device * pdev)1486*4882a593Smuzhiyun static int rkvenc_remove(struct platform_device *pdev)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1489*4882a593Smuzhiyun struct mpp_dev *mpp = dev_get_drvdata(dev);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun dev_info(dev, "remove device\n");
1492*4882a593Smuzhiyun mpp_dev_remove(mpp);
1493*4882a593Smuzhiyun rkvenc_procfs_remove(mpp);
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun return 0;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun struct platform_driver rockchip_rkvenc_driver = {
1499*4882a593Smuzhiyun .probe = rkvenc_probe,
1500*4882a593Smuzhiyun .remove = rkvenc_remove,
1501*4882a593Smuzhiyun .shutdown = mpp_dev_shutdown,
1502*4882a593Smuzhiyun .driver = {
1503*4882a593Smuzhiyun .name = RKVENC_DRIVER_NAME,
1504*4882a593Smuzhiyun .of_match_table = of_match_ptr(mpp_rkvenc_dt_match),
1505*4882a593Smuzhiyun },
1506*4882a593Smuzhiyun };
1507