1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * author:
6*4882a593Smuzhiyun * Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun * Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun * Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #ifndef __ROCKCHIP_MPP_COMMON_H__
12*4882a593Smuzhiyun #define __ROCKCHIP_MPP_COMMON_H__
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/cdev.h>
15*4882a593Smuzhiyun #include <linux/clk.h>
16*4882a593Smuzhiyun #include <linux/dma-buf.h>
17*4882a593Smuzhiyun #include <linux/kfifo.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/time.h>
20*4882a593Smuzhiyun #include <linux/workqueue.h>
21*4882a593Smuzhiyun #include <linux/kthread.h>
22*4882a593Smuzhiyun #include <linux/reset.h>
23*4882a593Smuzhiyun #include <linux/irqreturn.h>
24*4882a593Smuzhiyun #include <linux/poll.h>
25*4882a593Smuzhiyun #include <linux/platform_device.h>
26*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define MHZ (1000 * 1000)
29*4882a593Smuzhiyun #define MPP_WORK_TIMEOUT_DELAY (500)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define MPP_MAX_MSG_NUM (16)
32*4882a593Smuzhiyun #define MPP_MAX_REG_TRANS_NUM (60)
33*4882a593Smuzhiyun #define MPP_MAX_TASK_CAPACITY (16)
34*4882a593Smuzhiyun /* define flags for mpp_request */
35*4882a593Smuzhiyun #define MPP_FLAGS_MULTI_MSG (0x00000001)
36*4882a593Smuzhiyun #define MPP_FLAGS_LAST_MSG (0x00000002)
37*4882a593Smuzhiyun #define MPP_FLAGS_REG_FD_NO_TRANS (0x00000004)
38*4882a593Smuzhiyun #define MPP_FLAGS_SCL_FD_NO_TRANS (0x00000008)
39*4882a593Smuzhiyun #define MPP_FLAGS_REG_NO_OFFSET (0x00000010)
40*4882a593Smuzhiyun #define MPP_FLAGS_SECURE_MODE (0x00010000)
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* grf mask for get value */
43*4882a593Smuzhiyun #define MPP_GRF_VAL_MASK (0xFFFF)
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* max 4 cores supported */
46*4882a593Smuzhiyun #define MPP_MAX_CORE_NUM (4)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun * Device type: classified by hardware feature
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun enum MPP_DEVICE_TYPE {
52*4882a593Smuzhiyun MPP_DEVICE_VDPU1 = 0, /* 0x00000001 */
53*4882a593Smuzhiyun MPP_DEVICE_VDPU2 = 1, /* 0x00000002 */
54*4882a593Smuzhiyun MPP_DEVICE_VDPU1_PP = 2, /* 0x00000004 */
55*4882a593Smuzhiyun MPP_DEVICE_VDPU2_PP = 3, /* 0x00000008 */
56*4882a593Smuzhiyun MPP_DEVICE_AV1DEC = 4, /* 0x00000010 */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun MPP_DEVICE_HEVC_DEC = 8, /* 0x00000100 */
59*4882a593Smuzhiyun MPP_DEVICE_RKVDEC = 9, /* 0x00000200 */
60*4882a593Smuzhiyun MPP_DEVICE_AVSPLUS_DEC = 12, /* 0x00001000 */
61*4882a593Smuzhiyun MPP_DEVICE_RKJPEGD = 13, /* 0x00002000 */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun MPP_DEVICE_RKVENC = 16, /* 0x00010000 */
64*4882a593Smuzhiyun MPP_DEVICE_VEPU1 = 17, /* 0x00020000 */
65*4882a593Smuzhiyun MPP_DEVICE_VEPU2 = 18, /* 0x00040000 */
66*4882a593Smuzhiyun MPP_DEVICE_VEPU2_JPEG = 19, /* 0x00080000 */
67*4882a593Smuzhiyun MPP_DEVICE_VEPU22 = 24, /* 0x01000000 */
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun MPP_DEVICE_IEP2 = 28, /* 0x10000000 */
70*4882a593Smuzhiyun MPP_DEVICE_VDPP = 29, /* 0x20000000 */
71*4882a593Smuzhiyun MPP_DEVICE_BUTT,
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * Driver type: classified by driver
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun enum MPP_DRIVER_TYPE {
78*4882a593Smuzhiyun MPP_DRIVER_NULL = 0,
79*4882a593Smuzhiyun MPP_DRIVER_VDPU1,
80*4882a593Smuzhiyun MPP_DRIVER_VEPU1,
81*4882a593Smuzhiyun MPP_DRIVER_VDPU2,
82*4882a593Smuzhiyun MPP_DRIVER_VEPU2,
83*4882a593Smuzhiyun MPP_DRIVER_VEPU22,
84*4882a593Smuzhiyun MPP_DRIVER_RKVDEC,
85*4882a593Smuzhiyun MPP_DRIVER_RKVENC,
86*4882a593Smuzhiyun MPP_DRIVER_IEP,
87*4882a593Smuzhiyun MPP_DRIVER_IEP2,
88*4882a593Smuzhiyun MPP_DRIVER_JPGDEC,
89*4882a593Smuzhiyun MPP_DRIVER_RKVDEC2,
90*4882a593Smuzhiyun MPP_DRIVER_RKVENC2,
91*4882a593Smuzhiyun MPP_DRIVER_AV1DEC,
92*4882a593Smuzhiyun MPP_DRIVER_VDPP,
93*4882a593Smuzhiyun MPP_DRIVER_BUTT,
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun * Command type: keep the same as user space
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun enum MPP_DEV_COMMAND_TYPE {
100*4882a593Smuzhiyun MPP_CMD_QUERY_BASE = 0,
101*4882a593Smuzhiyun MPP_CMD_QUERY_HW_SUPPORT = MPP_CMD_QUERY_BASE + 0,
102*4882a593Smuzhiyun MPP_CMD_QUERY_HW_ID = MPP_CMD_QUERY_BASE + 1,
103*4882a593Smuzhiyun MPP_CMD_QUERY_CMD_SUPPORT = MPP_CMD_QUERY_BASE + 2,
104*4882a593Smuzhiyun MPP_CMD_QUERY_BUTT,
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun MPP_CMD_INIT_BASE = 0x100,
107*4882a593Smuzhiyun MPP_CMD_INIT_CLIENT_TYPE = MPP_CMD_INIT_BASE + 0,
108*4882a593Smuzhiyun MPP_CMD_INIT_DRIVER_DATA = MPP_CMD_INIT_BASE + 1,
109*4882a593Smuzhiyun MPP_CMD_INIT_TRANS_TABLE = MPP_CMD_INIT_BASE + 2,
110*4882a593Smuzhiyun MPP_CMD_INIT_BUTT,
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun MPP_CMD_SEND_BASE = 0x200,
113*4882a593Smuzhiyun MPP_CMD_SET_REG_WRITE = MPP_CMD_SEND_BASE + 0,
114*4882a593Smuzhiyun MPP_CMD_SET_REG_READ = MPP_CMD_SEND_BASE + 1,
115*4882a593Smuzhiyun MPP_CMD_SET_REG_ADDR_OFFSET = MPP_CMD_SEND_BASE + 2,
116*4882a593Smuzhiyun MPP_CMD_SET_RCB_INFO = MPP_CMD_SEND_BASE + 3,
117*4882a593Smuzhiyun MPP_CMD_SET_SESSION_FD = MPP_CMD_SEND_BASE + 4,
118*4882a593Smuzhiyun MPP_CMD_SEND_BUTT,
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun MPP_CMD_POLL_BASE = 0x300,
121*4882a593Smuzhiyun MPP_CMD_POLL_HW_FINISH = MPP_CMD_POLL_BASE + 0,
122*4882a593Smuzhiyun MPP_CMD_POLL_HW_IRQ = MPP_CMD_POLL_BASE + 1,
123*4882a593Smuzhiyun MPP_CMD_POLL_BUTT,
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun MPP_CMD_CONTROL_BASE = 0x400,
126*4882a593Smuzhiyun MPP_CMD_RESET_SESSION = MPP_CMD_CONTROL_BASE + 0,
127*4882a593Smuzhiyun MPP_CMD_TRANS_FD_TO_IOVA = MPP_CMD_CONTROL_BASE + 1,
128*4882a593Smuzhiyun MPP_CMD_RELEASE_FD = MPP_CMD_CONTROL_BASE + 2,
129*4882a593Smuzhiyun MPP_CMD_SEND_CODEC_INFO = MPP_CMD_CONTROL_BASE + 3,
130*4882a593Smuzhiyun MPP_CMD_CONTROL_BUTT,
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun MPP_CMD_BUTT,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun enum MPP_CLOCK_MODE {
136*4882a593Smuzhiyun CLK_MODE_BASE = 0,
137*4882a593Smuzhiyun CLK_MODE_DEFAULT = CLK_MODE_BASE,
138*4882a593Smuzhiyun CLK_MODE_DEBUG,
139*4882a593Smuzhiyun CLK_MODE_REDUCE,
140*4882a593Smuzhiyun CLK_MODE_NORMAL,
141*4882a593Smuzhiyun CLK_MODE_ADVANCED,
142*4882a593Smuzhiyun CLK_MODE_BUTT,
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun enum MPP_RESET_TYPE {
146*4882a593Smuzhiyun RST_TYPE_BASE = 0,
147*4882a593Smuzhiyun RST_TYPE_A = RST_TYPE_BASE,
148*4882a593Smuzhiyun RST_TYPE_H,
149*4882a593Smuzhiyun RST_TYPE_NIU_A,
150*4882a593Smuzhiyun RST_TYPE_NIU_H,
151*4882a593Smuzhiyun RST_TYPE_CORE,
152*4882a593Smuzhiyun RST_TYPE_CABAC,
153*4882a593Smuzhiyun RST_TYPE_HEVC_CABAC,
154*4882a593Smuzhiyun RST_TYPE_BUTT,
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun enum ENC_INFO_TYPE {
158*4882a593Smuzhiyun ENC_INFO_BASE = 0,
159*4882a593Smuzhiyun ENC_INFO_WIDTH,
160*4882a593Smuzhiyun ENC_INFO_HEIGHT,
161*4882a593Smuzhiyun ENC_INFO_FORMAT,
162*4882a593Smuzhiyun ENC_INFO_FPS_IN,
163*4882a593Smuzhiyun ENC_INFO_FPS_OUT,
164*4882a593Smuzhiyun ENC_INFO_RC_MODE,
165*4882a593Smuzhiyun ENC_INFO_BITRATE,
166*4882a593Smuzhiyun ENC_INFO_GOP_SIZE,
167*4882a593Smuzhiyun ENC_INFO_FPS_CALC,
168*4882a593Smuzhiyun ENC_INFO_PROFILE,
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun ENC_INFO_BUTT,
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun enum DEC_INFO_TYPE {
174*4882a593Smuzhiyun DEC_INFO_BASE = 0,
175*4882a593Smuzhiyun DEC_INFO_WIDTH,
176*4882a593Smuzhiyun DEC_INFO_HEIGHT,
177*4882a593Smuzhiyun DEC_INFO_FORMAT,
178*4882a593Smuzhiyun DEC_INFO_BITDEPTH,
179*4882a593Smuzhiyun DEC_INFO_FPS,
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun DEC_INFO_BUTT,
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun enum CODEC_INFO_FLAGS {
185*4882a593Smuzhiyun CODEC_INFO_FLAG_NULL = 0,
186*4882a593Smuzhiyun CODEC_INFO_FLAG_NUMBER,
187*4882a593Smuzhiyun CODEC_INFO_FLAG_STRING,
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun CODEC_INFO_FLAG_BUTT,
190*4882a593Smuzhiyun };
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun struct mpp_task;
193*4882a593Smuzhiyun struct mpp_session;
194*4882a593Smuzhiyun struct mpp_dma_session;
195*4882a593Smuzhiyun struct mpp_taskqueue;
196*4882a593Smuzhiyun struct iommu_domain;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* data common struct for parse out */
199*4882a593Smuzhiyun struct mpp_request {
200*4882a593Smuzhiyun __u32 cmd;
201*4882a593Smuzhiyun __u32 flags;
202*4882a593Smuzhiyun __u32 size;
203*4882a593Smuzhiyun __u32 offset;
204*4882a593Smuzhiyun void __user *data;
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* struct use to collect task set and poll message */
208*4882a593Smuzhiyun struct mpp_task_msgs {
209*4882a593Smuzhiyun /* for ioctl msgs bat process */
210*4882a593Smuzhiyun struct list_head list;
211*4882a593Smuzhiyun struct list_head list_session;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun struct mpp_session *session;
214*4882a593Smuzhiyun struct mpp_taskqueue *queue;
215*4882a593Smuzhiyun struct mpp_task *task;
216*4882a593Smuzhiyun struct mpp_dev *mpp;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* for fd reference */
219*4882a593Smuzhiyun int ext_fd;
220*4882a593Smuzhiyun struct fd f;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun u32 flags;
223*4882a593Smuzhiyun u32 req_cnt;
224*4882a593Smuzhiyun u32 set_cnt;
225*4882a593Smuzhiyun u32 poll_cnt;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun struct mpp_request reqs[MPP_MAX_MSG_NUM];
228*4882a593Smuzhiyun struct mpp_request *poll_req;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun struct mpp_grf_info {
232*4882a593Smuzhiyun u32 offset;
233*4882a593Smuzhiyun u32 val;
234*4882a593Smuzhiyun struct regmap *grf;
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * struct for hardware info
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun struct mpp_hw_info {
241*4882a593Smuzhiyun /* register number */
242*4882a593Smuzhiyun u32 reg_num;
243*4882a593Smuzhiyun /* hardware id */
244*4882a593Smuzhiyun int reg_id;
245*4882a593Smuzhiyun u32 hw_id;
246*4882a593Smuzhiyun /* start index of register */
247*4882a593Smuzhiyun u32 reg_start;
248*4882a593Smuzhiyun /* end index of register */
249*4882a593Smuzhiyun u32 reg_end;
250*4882a593Smuzhiyun /* register of enable hardware */
251*4882a593Smuzhiyun int reg_en;
252*4882a593Smuzhiyun void *link_info;
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun struct mpp_trans_info {
256*4882a593Smuzhiyun const int count;
257*4882a593Smuzhiyun const u16 * const table;
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun struct reg_offset_elem {
261*4882a593Smuzhiyun u32 index;
262*4882a593Smuzhiyun u32 offset;
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun struct reg_offset_info {
266*4882a593Smuzhiyun u32 cnt;
267*4882a593Smuzhiyun struct reg_offset_elem elem[MPP_MAX_REG_TRANS_NUM];
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun struct codec_info_elem {
271*4882a593Smuzhiyun __u32 type;
272*4882a593Smuzhiyun __u32 flag;
273*4882a593Smuzhiyun __u64 data;
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun struct mpp_clk_info {
277*4882a593Smuzhiyun struct clk *clk;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* debug rate, from debug */
280*4882a593Smuzhiyun u32 debug_rate_hz;
281*4882a593Smuzhiyun /* normal rate, from dtsi */
282*4882a593Smuzhiyun u32 normal_rate_hz;
283*4882a593Smuzhiyun /* high performance rate, from dtsi */
284*4882a593Smuzhiyun u32 advanced_rate_hz;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun u32 default_rate_hz;
287*4882a593Smuzhiyun u32 reduce_rate_hz;
288*4882a593Smuzhiyun /* record last used rate */
289*4882a593Smuzhiyun u32 used_rate_hz;
290*4882a593Smuzhiyun u32 real_rate_hz;
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun struct mpp_dev_var {
294*4882a593Smuzhiyun enum MPP_DEVICE_TYPE device_type;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* info for each hardware */
297*4882a593Smuzhiyun struct mpp_hw_info *hw_info;
298*4882a593Smuzhiyun struct mpp_trans_info *trans_info;
299*4882a593Smuzhiyun struct mpp_hw_ops *hw_ops;
300*4882a593Smuzhiyun struct mpp_dev_ops *dev_ops;
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun struct mpp_mem_region {
304*4882a593Smuzhiyun struct list_head reg_link;
305*4882a593Smuzhiyun /* address for iommu */
306*4882a593Smuzhiyun dma_addr_t iova;
307*4882a593Smuzhiyun unsigned long len;
308*4882a593Smuzhiyun u32 reg_idx;
309*4882a593Smuzhiyun void *hdl;
310*4882a593Smuzhiyun int fd;
311*4882a593Smuzhiyun /* whether is dup import entity */
312*4882a593Smuzhiyun bool is_dup;
313*4882a593Smuzhiyun };
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun struct mpp_dev {
317*4882a593Smuzhiyun struct device *dev;
318*4882a593Smuzhiyun const struct mpp_dev_var *var;
319*4882a593Smuzhiyun struct mpp_hw_ops *hw_ops;
320*4882a593Smuzhiyun struct mpp_dev_ops *dev_ops;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* per-device work for attached taskqueue */
323*4882a593Smuzhiyun struct kthread_work work;
324*4882a593Smuzhiyun /* the flag for get/get/reduce freq */
325*4882a593Smuzhiyun bool auto_freq_en;
326*4882a593Smuzhiyun /* the flag for pmu idle request before device reset */
327*4882a593Smuzhiyun bool skip_idle;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * The task capacity is the task queue length that hardware can accept.
331*4882a593Smuzhiyun * Default 1 means normal hardware can only accept one task at once.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun u32 task_capacity;
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * The message capacity is the max message parallel process capacity.
336*4882a593Smuzhiyun * Default 1 means normal hardware can only accept one message at one
337*4882a593Smuzhiyun * shot ioctl.
338*4882a593Smuzhiyun * Multi-core hardware can accept more message at one shot ioctl.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun u32 msgs_cap;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun int irq;
343*4882a593Smuzhiyun bool is_irq_startup;
344*4882a593Smuzhiyun u32 irq_status;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun void __iomem *reg_base;
347*4882a593Smuzhiyun struct mpp_grf_info *grf_info;
348*4882a593Smuzhiyun struct mpp_iommu_info *iommu_info;
349*4882a593Smuzhiyun int (*fault_handler)(struct iommu_domain *iommu, struct device *iommu_dev,
350*4882a593Smuzhiyun unsigned long iova, int status, void *arg);
351*4882a593Smuzhiyun resource_size_t io_base;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun atomic_t reset_request;
354*4882a593Smuzhiyun atomic_t session_index;
355*4882a593Smuzhiyun atomic_t task_count;
356*4882a593Smuzhiyun atomic_t task_index;
357*4882a593Smuzhiyun /* current task in running */
358*4882a593Smuzhiyun struct mpp_task *cur_task;
359*4882a593Smuzhiyun /* set session max buffers */
360*4882a593Smuzhiyun u32 session_max_buffers;
361*4882a593Smuzhiyun struct mpp_taskqueue *queue;
362*4882a593Smuzhiyun struct mpp_reset_group *reset_group;
363*4882a593Smuzhiyun /* point to MPP Service */
364*4882a593Smuzhiyun struct mpp_service *srv;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* multi-core data */
367*4882a593Smuzhiyun struct list_head queue_link;
368*4882a593Smuzhiyun s32 core_id;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* common per-device procfs */
371*4882a593Smuzhiyun u32 disable;
372*4882a593Smuzhiyun u32 timing_check;
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun struct mpp_session {
376*4882a593Smuzhiyun enum MPP_DEVICE_TYPE device_type;
377*4882a593Smuzhiyun u32 index;
378*4882a593Smuzhiyun /* the session related device private data */
379*4882a593Smuzhiyun struct mpp_service *srv;
380*4882a593Smuzhiyun struct mpp_dev *mpp;
381*4882a593Smuzhiyun struct mpp_dma_session *dma;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* lock for session task pending list */
384*4882a593Smuzhiyun struct mutex pending_lock;
385*4882a593Smuzhiyun /* task pending list in session */
386*4882a593Smuzhiyun struct list_head pending_list;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun pid_t pid;
389*4882a593Smuzhiyun atomic_t task_count;
390*4882a593Smuzhiyun atomic_t release_request;
391*4882a593Smuzhiyun /* trans info set by user */
392*4882a593Smuzhiyun int trans_count;
393*4882a593Smuzhiyun u16 trans_table[MPP_MAX_REG_TRANS_NUM];
394*4882a593Smuzhiyun u32 msg_flags;
395*4882a593Smuzhiyun /* link to mpp_service session_list */
396*4882a593Smuzhiyun struct list_head service_link;
397*4882a593Smuzhiyun /* link to mpp_workqueue session_attach / session_detach */
398*4882a593Smuzhiyun struct list_head session_link;
399*4882a593Smuzhiyun /* private data */
400*4882a593Smuzhiyun void *priv;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * session handler from mpp_dev_ops
404*4882a593Smuzhiyun * process_task - handle messages of sending task
405*4882a593Smuzhiyun * wait_result - handle messages of polling task
406*4882a593Smuzhiyun * deinit - handle session deinit
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun int (*process_task)(struct mpp_session *session,
409*4882a593Smuzhiyun struct mpp_task_msgs *msgs);
410*4882a593Smuzhiyun int (*wait_result)(struct mpp_session *session,
411*4882a593Smuzhiyun struct mpp_task_msgs *msgs);
412*4882a593Smuzhiyun void (*deinit)(struct mpp_session *session);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* max message count */
415*4882a593Smuzhiyun int msgs_cnt;
416*4882a593Smuzhiyun struct list_head list_msgs;
417*4882a593Smuzhiyun struct list_head list_msgs_idle;
418*4882a593Smuzhiyun spinlock_t lock_msgs;
419*4882a593Smuzhiyun };
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* task state in work thread */
422*4882a593Smuzhiyun enum mpp_task_state {
423*4882a593Smuzhiyun TASK_STATE_PENDING = 0,
424*4882a593Smuzhiyun TASK_STATE_RUNNING = 1,
425*4882a593Smuzhiyun TASK_STATE_START = 2,
426*4882a593Smuzhiyun TASK_STATE_HANDLE = 3,
427*4882a593Smuzhiyun TASK_STATE_IRQ = 4,
428*4882a593Smuzhiyun TASK_STATE_FINISH = 5,
429*4882a593Smuzhiyun TASK_STATE_TIMEOUT = 6,
430*4882a593Smuzhiyun TASK_STATE_DONE = 7,
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun TASK_STATE_PREPARE = 8,
433*4882a593Smuzhiyun TASK_STATE_ABORT = 9,
434*4882a593Smuzhiyun TASK_STATE_ABORT_READY = 10,
435*4882a593Smuzhiyun TASK_STATE_PROC_DONE = 11,
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* timing debug state */
438*4882a593Smuzhiyun TASK_TIMING_CREATE = 16,
439*4882a593Smuzhiyun TASK_TIMING_CREATE_END = 17,
440*4882a593Smuzhiyun TASK_TIMING_PENDING = 18,
441*4882a593Smuzhiyun TASK_TIMING_RUN = 19,
442*4882a593Smuzhiyun TASK_TIMING_TO_SCHED = 20,
443*4882a593Smuzhiyun TASK_TIMING_RUN_END = 21,
444*4882a593Smuzhiyun TASK_TIMING_IRQ = 22,
445*4882a593Smuzhiyun TASK_TIMING_TO_CANCEL = 23,
446*4882a593Smuzhiyun TASK_TIMING_ISR = 24,
447*4882a593Smuzhiyun TASK_TIMING_FINISH = 25,
448*4882a593Smuzhiyun };
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* The context for the a task */
451*4882a593Smuzhiyun struct mpp_task {
452*4882a593Smuzhiyun /* context belong to */
453*4882a593Smuzhiyun struct mpp_session *session;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* link to pending list in session */
456*4882a593Smuzhiyun struct list_head pending_link;
457*4882a593Smuzhiyun /* link to done list in session */
458*4882a593Smuzhiyun struct list_head done_link;
459*4882a593Smuzhiyun /* link to list in taskqueue */
460*4882a593Smuzhiyun struct list_head queue_link;
461*4882a593Smuzhiyun /* The DMA buffer used in this task */
462*4882a593Smuzhiyun struct list_head mem_region_list;
463*4882a593Smuzhiyun u32 mem_count;
464*4882a593Smuzhiyun struct mpp_mem_region mem_regions[MPP_MAX_REG_TRANS_NUM];
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* state in the taskqueue */
467*4882a593Smuzhiyun unsigned long state;
468*4882a593Smuzhiyun atomic_t abort_request;
469*4882a593Smuzhiyun /* delayed work for hardware timeout */
470*4882a593Smuzhiyun struct delayed_work timeout_work;
471*4882a593Smuzhiyun struct kref ref;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* record context running start time */
474*4882a593Smuzhiyun ktime_t start;
475*4882a593Smuzhiyun ktime_t part;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* debug timing */
478*4882a593Smuzhiyun ktime_t on_create;
479*4882a593Smuzhiyun ktime_t on_create_end;
480*4882a593Smuzhiyun ktime_t on_pending;
481*4882a593Smuzhiyun ktime_t on_run;
482*4882a593Smuzhiyun ktime_t on_sched_timeout;
483*4882a593Smuzhiyun ktime_t on_run_end;
484*4882a593Smuzhiyun ktime_t on_irq;
485*4882a593Smuzhiyun ktime_t on_cancel_timeout;
486*4882a593Smuzhiyun ktime_t on_isr;
487*4882a593Smuzhiyun ktime_t on_finish;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* hardware info for current task */
490*4882a593Smuzhiyun struct mpp_hw_info *hw_info;
491*4882a593Smuzhiyun u32 task_index;
492*4882a593Smuzhiyun u32 task_id;
493*4882a593Smuzhiyun u32 *reg;
494*4882a593Smuzhiyun /* event for session wait thread */
495*4882a593Smuzhiyun wait_queue_head_t wait;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* for multi-core */
498*4882a593Smuzhiyun struct mpp_dev *mpp;
499*4882a593Smuzhiyun s32 core_id;
500*4882a593Smuzhiyun /* hw cycles */
501*4882a593Smuzhiyun u32 hw_cycles;
502*4882a593Smuzhiyun };
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun struct mpp_taskqueue {
505*4882a593Smuzhiyun /* kworker for attached taskqueue */
506*4882a593Smuzhiyun struct kthread_worker worker;
507*4882a593Smuzhiyun /* task for work queue */
508*4882a593Smuzhiyun struct task_struct *kworker_task;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* lock for session attach and session_detach */
511*4882a593Smuzhiyun struct mutex session_lock;
512*4882a593Smuzhiyun /* link to session session_link for attached sessions */
513*4882a593Smuzhiyun struct list_head session_attach;
514*4882a593Smuzhiyun /* link to session session_link for detached sessions */
515*4882a593Smuzhiyun struct list_head session_detach;
516*4882a593Smuzhiyun atomic_t detach_count;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun atomic_t task_id;
519*4882a593Smuzhiyun /* lock for pending list */
520*4882a593Smuzhiyun struct mutex pending_lock;
521*4882a593Smuzhiyun struct list_head pending_list;
522*4882a593Smuzhiyun /* lock for running list */
523*4882a593Smuzhiyun spinlock_t running_lock;
524*4882a593Smuzhiyun struct list_head running_list;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* point to MPP Service */
527*4882a593Smuzhiyun struct mpp_service *srv;
528*4882a593Smuzhiyun /* lock for mmu list */
529*4882a593Smuzhiyun struct mutex mmu_lock;
530*4882a593Smuzhiyun struct list_head mmu_list;
531*4882a593Smuzhiyun /* lock for dev list */
532*4882a593Smuzhiyun struct mutex dev_lock;
533*4882a593Smuzhiyun struct list_head dev_list;
534*4882a593Smuzhiyun /*
535*4882a593Smuzhiyun * task_capacity in taskqueue is the minimum task capacity of the
536*4882a593Smuzhiyun * device task capacity which is attached to the taskqueue
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun u32 task_capacity;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* multi-core task distribution */
541*4882a593Smuzhiyun atomic_t reset_request;
542*4882a593Smuzhiyun struct mpp_dev *cores[MPP_MAX_CORE_NUM];
543*4882a593Smuzhiyun unsigned long core_idle;
544*4882a593Smuzhiyun u32 core_id_max;
545*4882a593Smuzhiyun u32 core_count;
546*4882a593Smuzhiyun unsigned long dev_active_flags;
547*4882a593Smuzhiyun u32 iommu_fault;
548*4882a593Smuzhiyun };
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun struct mpp_reset_group {
551*4882a593Smuzhiyun /* the flag for whether use rw_sem */
552*4882a593Smuzhiyun u32 rw_sem_on;
553*4882a593Smuzhiyun struct rw_semaphore rw_sem;
554*4882a593Smuzhiyun struct reset_control *resets[RST_TYPE_BUTT];
555*4882a593Smuzhiyun /* for set rw_sem */
556*4882a593Smuzhiyun struct mpp_taskqueue *queue;
557*4882a593Smuzhiyun };
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun struct mpp_service {
560*4882a593Smuzhiyun struct class *cls;
561*4882a593Smuzhiyun struct device *dev;
562*4882a593Smuzhiyun dev_t dev_id;
563*4882a593Smuzhiyun struct cdev mpp_cdev;
564*4882a593Smuzhiyun struct device *child_dev;
565*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
566*4882a593Smuzhiyun struct proc_dir_entry *procfs;
567*4882a593Smuzhiyun #endif
568*4882a593Smuzhiyun unsigned long hw_support;
569*4882a593Smuzhiyun atomic_t shutdown_request;
570*4882a593Smuzhiyun /* follows for device probe */
571*4882a593Smuzhiyun struct mpp_grf_info grf_infos[MPP_DRIVER_BUTT];
572*4882a593Smuzhiyun struct platform_driver *sub_drivers[MPP_DRIVER_BUTT];
573*4882a593Smuzhiyun /* follows for attach service */
574*4882a593Smuzhiyun struct mpp_dev *sub_devices[MPP_DEVICE_BUTT];
575*4882a593Smuzhiyun u32 taskqueue_cnt;
576*4882a593Smuzhiyun struct mpp_taskqueue *task_queues[MPP_DEVICE_BUTT];
577*4882a593Smuzhiyun u32 reset_group_cnt;
578*4882a593Smuzhiyun struct mpp_reset_group *reset_groups[MPP_DEVICE_BUTT];
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* lock for session list */
581*4882a593Smuzhiyun struct mutex session_lock;
582*4882a593Smuzhiyun struct list_head session_list;
583*4882a593Smuzhiyun u32 session_count;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* global timing record flag */
586*4882a593Smuzhiyun u32 timing_en;
587*4882a593Smuzhiyun };
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /*
590*4882a593Smuzhiyun * struct mpp_hw_ops - context specific operations for device
591*4882a593Smuzhiyun * @init Do something when hardware probe.
592*4882a593Smuzhiyun * @exit Do something when hardware remove.
593*4882a593Smuzhiyun * @clk_on Enable clocks.
594*4882a593Smuzhiyun * @clk_off Disable clocks.
595*4882a593Smuzhiyun * @get_freq Get special freq for setting.
596*4882a593Smuzhiyun * @set_freq Set freq to hardware.
597*4882a593Smuzhiyun * @reduce_freq Reduce freq when hardware is not running.
598*4882a593Smuzhiyun * @reset When error, reset hardware.
599*4882a593Smuzhiyun */
600*4882a593Smuzhiyun struct mpp_hw_ops {
601*4882a593Smuzhiyun int (*init)(struct mpp_dev *mpp);
602*4882a593Smuzhiyun int (*exit)(struct mpp_dev *mpp);
603*4882a593Smuzhiyun int (*clk_on)(struct mpp_dev *mpp);
604*4882a593Smuzhiyun int (*clk_off)(struct mpp_dev *mpp);
605*4882a593Smuzhiyun int (*get_freq)(struct mpp_dev *mpp,
606*4882a593Smuzhiyun struct mpp_task *mpp_task);
607*4882a593Smuzhiyun int (*set_freq)(struct mpp_dev *mpp,
608*4882a593Smuzhiyun struct mpp_task *mpp_task);
609*4882a593Smuzhiyun int (*reduce_freq)(struct mpp_dev *mpp);
610*4882a593Smuzhiyun int (*reset)(struct mpp_dev *mpp);
611*4882a593Smuzhiyun int (*set_grf)(struct mpp_dev *mpp);
612*4882a593Smuzhiyun };
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * struct mpp_dev_ops - context specific operations for task
616*4882a593Smuzhiyun * @alloc_task Alloc and set task.
617*4882a593Smuzhiyun * @prepare Check HW status for determining run next task or not.
618*4882a593Smuzhiyun * @run Start a single {en,de}coding run. Set registers to hardware.
619*4882a593Smuzhiyun * @irq Deal with hardware interrupt top-half.
620*4882a593Smuzhiyun * @isr Deal with hardware interrupt bottom-half.
621*4882a593Smuzhiyun * @finish Read back processing results and additional data from hardware.
622*4882a593Smuzhiyun * @result Read status to userspace.
623*4882a593Smuzhiyun * @free_task Release the resource allocate which alloc.
624*4882a593Smuzhiyun * @ioctl Special cammand from userspace.
625*4882a593Smuzhiyun * @init_session extra initialization on session init.
626*4882a593Smuzhiyun * @free_session extra cleanup on session deinit.
627*4882a593Smuzhiyun * @dump_session information dump for session.
628*4882a593Smuzhiyun * @dump_dev information dump for hardware device.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun struct mpp_dev_ops {
631*4882a593Smuzhiyun int (*process_task)(struct mpp_session *session,
632*4882a593Smuzhiyun struct mpp_task_msgs *msgs);
633*4882a593Smuzhiyun int (*wait_result)(struct mpp_session *session,
634*4882a593Smuzhiyun struct mpp_task_msgs *msgs);
635*4882a593Smuzhiyun void (*deinit)(struct mpp_session *session);
636*4882a593Smuzhiyun void (*task_worker)(struct kthread_work *work_s);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun void *(*alloc_task)(struct mpp_session *session,
639*4882a593Smuzhiyun struct mpp_task_msgs *msgs);
640*4882a593Smuzhiyun void *(*prepare)(struct mpp_dev *mpp, struct mpp_task *task);
641*4882a593Smuzhiyun int (*run)(struct mpp_dev *mpp, struct mpp_task *task);
642*4882a593Smuzhiyun int (*irq)(struct mpp_dev *mpp);
643*4882a593Smuzhiyun int (*isr)(struct mpp_dev *mpp);
644*4882a593Smuzhiyun int (*finish)(struct mpp_dev *mpp, struct mpp_task *task);
645*4882a593Smuzhiyun int (*result)(struct mpp_dev *mpp, struct mpp_task *task,
646*4882a593Smuzhiyun struct mpp_task_msgs *msgs);
647*4882a593Smuzhiyun int (*free_task)(struct mpp_session *session,
648*4882a593Smuzhiyun struct mpp_task *task);
649*4882a593Smuzhiyun int (*ioctl)(struct mpp_session *session, struct mpp_request *req);
650*4882a593Smuzhiyun int (*init_session)(struct mpp_session *session);
651*4882a593Smuzhiyun int (*free_session)(struct mpp_session *session);
652*4882a593Smuzhiyun int (*dump_session)(struct mpp_session *session, struct seq_file *seq);
653*4882a593Smuzhiyun int (*dump_dev)(struct mpp_dev *mpp);
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun struct mpp_mem_region *
659*4882a593Smuzhiyun mpp_task_attach_fd(struct mpp_task *task, int fd);
660*4882a593Smuzhiyun int mpp_translate_reg_address(struct mpp_session *session,
661*4882a593Smuzhiyun struct mpp_task *task, int fmt,
662*4882a593Smuzhiyun u32 *reg, struct reg_offset_info *off_inf);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun int mpp_check_req(struct mpp_request *req, int base,
665*4882a593Smuzhiyun int max_size, u32 off_s, u32 off_e);
666*4882a593Smuzhiyun int mpp_extract_reg_offset_info(struct reg_offset_info *off_inf,
667*4882a593Smuzhiyun struct mpp_request *req);
668*4882a593Smuzhiyun int mpp_query_reg_offset_info(struct reg_offset_info *off_inf,
669*4882a593Smuzhiyun u32 index);
670*4882a593Smuzhiyun int mpp_translate_reg_offset_info(struct mpp_task *task,
671*4882a593Smuzhiyun struct reg_offset_info *off_inf,
672*4882a593Smuzhiyun u32 *reg);
673*4882a593Smuzhiyun int mpp_task_init(struct mpp_session *session,
674*4882a593Smuzhiyun struct mpp_task *task);
675*4882a593Smuzhiyun int mpp_task_finish(struct mpp_session *session,
676*4882a593Smuzhiyun struct mpp_task *task);
677*4882a593Smuzhiyun void mpp_task_run_begin(struct mpp_task *task, u32 timing_en, u32 timeout);
678*4882a593Smuzhiyun void mpp_task_run_end(struct mpp_task *task, u32 timing_en);
679*4882a593Smuzhiyun int mpp_task_finalize(struct mpp_session *session,
680*4882a593Smuzhiyun struct mpp_task *task);
681*4882a593Smuzhiyun int mpp_task_dump_mem_region(struct mpp_dev *mpp,
682*4882a593Smuzhiyun struct mpp_task *task);
683*4882a593Smuzhiyun int mpp_task_dump_reg(struct mpp_dev *mpp,
684*4882a593Smuzhiyun struct mpp_task *task);
685*4882a593Smuzhiyun int mpp_task_dump_hw_reg(struct mpp_dev *mpp);
686*4882a593Smuzhiyun void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun void mpp_reg_show(struct mpp_dev *mpp, u32 offset);
689*4882a593Smuzhiyun void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end);
690*4882a593Smuzhiyun void mpp_free_task(struct kref *ref);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun void mpp_session_deinit(struct mpp_session *session);
693*4882a593Smuzhiyun void mpp_session_cleanup_detach(struct mpp_taskqueue *queue,
694*4882a593Smuzhiyun struct kthread_work *work);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun int mpp_dev_probe(struct mpp_dev *mpp,
699*4882a593Smuzhiyun struct platform_device *pdev);
700*4882a593Smuzhiyun int mpp_dev_remove(struct mpp_dev *mpp);
701*4882a593Smuzhiyun void mpp_dev_shutdown(struct platform_device *pdev);
702*4882a593Smuzhiyun int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun int mpp_power_on(struct mpp_dev *mpp);
705*4882a593Smuzhiyun int mpp_power_off(struct mpp_dev *mpp);
706*4882a593Smuzhiyun int mpp_dev_reset(struct mpp_dev *mpp);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun irqreturn_t mpp_dev_irq(int irq, void *param);
709*4882a593Smuzhiyun irqreturn_t mpp_dev_isr_sched(int irq, void *param);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun struct reset_control *mpp_reset_control_get(struct mpp_dev *mpp,
712*4882a593Smuzhiyun enum MPP_RESET_TYPE type,
713*4882a593Smuzhiyun const char *name);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun u32 mpp_get_grf(struct mpp_grf_info *grf_info);
716*4882a593Smuzhiyun bool mpp_grf_is_changed(struct mpp_grf_info *grf_info);
717*4882a593Smuzhiyun int mpp_set_grf(struct mpp_grf_info *grf_info);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun int mpp_time_record(struct mpp_task *task);
720*4882a593Smuzhiyun int mpp_time_diff(struct mpp_task *task);
721*4882a593Smuzhiyun int mpp_time_diff_with_hw_time(struct mpp_task *task, u32 clk_hz);
722*4882a593Smuzhiyun int mpp_time_part_diff(struct mpp_task *task);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun int mpp_write_req(struct mpp_dev *mpp, u32 *regs,
725*4882a593Smuzhiyun u32 start_idx, u32 end_idx, u32 en_idx);
726*4882a593Smuzhiyun int mpp_read_req(struct mpp_dev *mpp, u32 *regs,
727*4882a593Smuzhiyun u32 start_idx, u32 end_idx);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun int mpp_get_clk_info(struct mpp_dev *mpp,
730*4882a593Smuzhiyun struct mpp_clk_info *clk_info,
731*4882a593Smuzhiyun const char *name);
732*4882a593Smuzhiyun int mpp_set_clk_info_rate_hz(struct mpp_clk_info *clk_info,
733*4882a593Smuzhiyun enum MPP_CLOCK_MODE mode,
734*4882a593Smuzhiyun unsigned long val);
735*4882a593Smuzhiyun unsigned long mpp_get_clk_info_rate_hz(struct mpp_clk_info *clk_info,
736*4882a593Smuzhiyun enum MPP_CLOCK_MODE mode);
737*4882a593Smuzhiyun int mpp_clk_set_rate(struct mpp_clk_info *clk_info,
738*4882a593Smuzhiyun enum MPP_CLOCK_MODE mode);
739*4882a593Smuzhiyun
mpp_write(struct mpp_dev * mpp,u32 reg,u32 val)740*4882a593Smuzhiyun static inline int mpp_write(struct mpp_dev *mpp, u32 reg, u32 val)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun int idx = reg / sizeof(u32);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun mpp_debug(DEBUG_SET_REG,
745*4882a593Smuzhiyun "write reg[%03d]: %04x: 0x%08x\n", idx, reg, val);
746*4882a593Smuzhiyun writel(val, mpp->reg_base + reg);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return 0;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
mpp_write_relaxed(struct mpp_dev * mpp,u32 reg,u32 val)751*4882a593Smuzhiyun static inline int mpp_write_relaxed(struct mpp_dev *mpp, u32 reg, u32 val)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun int idx = reg / sizeof(u32);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun mpp_debug(DEBUG_SET_REG,
756*4882a593Smuzhiyun "write reg[%03d]: %04x: 0x%08x\n", idx, reg, val);
757*4882a593Smuzhiyun writel_relaxed(val, mpp->reg_base + reg);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
mpp_read(struct mpp_dev * mpp,u32 reg)762*4882a593Smuzhiyun static inline u32 mpp_read(struct mpp_dev *mpp, u32 reg)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun u32 val = 0;
765*4882a593Smuzhiyun int idx = reg / sizeof(u32);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun val = readl(mpp->reg_base + reg);
768*4882a593Smuzhiyun mpp_debug(DEBUG_GET_REG,
769*4882a593Smuzhiyun "read reg[%03d]: %04x: 0x%08x\n", idx, reg, val);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun return val;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
mpp_read_relaxed(struct mpp_dev * mpp,u32 reg)774*4882a593Smuzhiyun static inline u32 mpp_read_relaxed(struct mpp_dev *mpp, u32 reg)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun u32 val = 0;
777*4882a593Smuzhiyun int idx = reg / sizeof(u32);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun val = readl_relaxed(mpp->reg_base + reg);
780*4882a593Smuzhiyun mpp_debug(DEBUG_GET_REG,
781*4882a593Smuzhiyun "read reg[%03d] %04x: 0x%08x\n", idx, reg, val);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return val;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
mpp_safe_reset(struct reset_control * rst)786*4882a593Smuzhiyun static inline int mpp_safe_reset(struct reset_control *rst)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun if (rst)
789*4882a593Smuzhiyun reset_control_assert(rst);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun return 0;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
mpp_safe_unreset(struct reset_control * rst)794*4882a593Smuzhiyun static inline int mpp_safe_unreset(struct reset_control *rst)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun if (rst)
797*4882a593Smuzhiyun reset_control_deassert(rst);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun return 0;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
mpp_clk_safe_enable(struct clk * clk)802*4882a593Smuzhiyun static inline int mpp_clk_safe_enable(struct clk *clk)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun if (clk)
805*4882a593Smuzhiyun clk_prepare_enable(clk);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun return 0;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
mpp_clk_safe_disable(struct clk * clk)810*4882a593Smuzhiyun static inline int mpp_clk_safe_disable(struct clk *clk)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun if (clk)
813*4882a593Smuzhiyun clk_disable_unprepare(clk);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun return 0;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
mpp_reset_down_read(struct mpp_reset_group * group)818*4882a593Smuzhiyun static inline int mpp_reset_down_read(struct mpp_reset_group *group)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun if (group && group->rw_sem_on)
821*4882a593Smuzhiyun down_read(&group->rw_sem);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return 0;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
mpp_reset_up_read(struct mpp_reset_group * group)826*4882a593Smuzhiyun static inline int mpp_reset_up_read(struct mpp_reset_group *group)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun if (group && group->rw_sem_on)
829*4882a593Smuzhiyun up_read(&group->rw_sem);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun return 0;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
mpp_reset_down_write(struct mpp_reset_group * group)834*4882a593Smuzhiyun static inline int mpp_reset_down_write(struct mpp_reset_group *group)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun if (group && group->rw_sem_on)
837*4882a593Smuzhiyun down_write(&group->rw_sem);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
mpp_reset_up_write(struct mpp_reset_group * group)842*4882a593Smuzhiyun static inline int mpp_reset_up_write(struct mpp_reset_group *group)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun if (group && group->rw_sem_on)
845*4882a593Smuzhiyun up_write(&group->rw_sem);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun return 0;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
mpp_pmu_idle_request(struct mpp_dev * mpp,bool idle)850*4882a593Smuzhiyun static inline int mpp_pmu_idle_request(struct mpp_dev *mpp, bool idle)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun if (mpp->skip_idle)
853*4882a593Smuzhiyun return 0;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun return rockchip_pmu_idle_request(mpp->dev, idle);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun static inline struct mpp_dev *
mpp_get_task_used_device(const struct mpp_task * task,const struct mpp_session * session)859*4882a593Smuzhiyun mpp_get_task_used_device(const struct mpp_task *task,
860*4882a593Smuzhiyun const struct mpp_session *session)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun return task->mpp ? task->mpp : session->mpp;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
866*4882a593Smuzhiyun struct proc_dir_entry *
867*4882a593Smuzhiyun mpp_procfs_create_u32(const char *name, umode_t mode,
868*4882a593Smuzhiyun struct proc_dir_entry *parent, void *data);
869*4882a593Smuzhiyun void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp);
870*4882a593Smuzhiyun #else
871*4882a593Smuzhiyun static inline struct proc_dir_entry *
mpp_procfs_create_u32(const char * name,umode_t mode,struct proc_dir_entry * parent,void * data)872*4882a593Smuzhiyun mpp_procfs_create_u32(const char *name, umode_t mode,
873*4882a593Smuzhiyun struct proc_dir_entry *parent, void *data)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun return 0;
876*4882a593Smuzhiyun }
mpp_procfs_create_common(struct proc_dir_entry * parent,struct mpp_dev * mpp)877*4882a593Smuzhiyun void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun #endif
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
883*4882a593Smuzhiyun extern const char *mpp_device_name[MPP_DEVICE_BUTT];
884*4882a593Smuzhiyun extern const char *enc_info_item_name[ENC_INFO_BUTT];
885*4882a593Smuzhiyun #endif
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun extern const struct file_operations rockchip_mpp_fops;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun extern struct platform_driver rockchip_rkvdec_driver;
890*4882a593Smuzhiyun extern struct platform_driver rockchip_rkvenc_driver;
891*4882a593Smuzhiyun extern struct platform_driver rockchip_vdpu1_driver;
892*4882a593Smuzhiyun extern struct platform_driver rockchip_vepu1_driver;
893*4882a593Smuzhiyun extern struct platform_driver rockchip_vdpu2_driver;
894*4882a593Smuzhiyun extern struct platform_driver rockchip_vepu2_driver;
895*4882a593Smuzhiyun extern struct platform_driver rockchip_vepu22_driver;
896*4882a593Smuzhiyun extern struct platform_driver rockchip_iep2_driver;
897*4882a593Smuzhiyun extern struct platform_driver rockchip_jpgdec_driver;
898*4882a593Smuzhiyun extern struct platform_driver rockchip_rkvdec2_driver;
899*4882a593Smuzhiyun extern struct platform_driver rockchip_rkvenc2_driver;
900*4882a593Smuzhiyun extern struct platform_driver rockchip_av1dec_driver;
901*4882a593Smuzhiyun extern struct platform_driver rockchip_av1_iommu_driver;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun extern int av1dec_driver_register(struct platform_driver *drv);
904*4882a593Smuzhiyun extern void av1dec_driver_unregister(struct platform_driver *drv);
905*4882a593Smuzhiyun extern struct bus_type av1dec_bus;
906*4882a593Smuzhiyun extern struct platform_driver rockchip_vdpp_driver;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun #endif
909