1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) Rockchip Electronics Co., Ltd.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Huang Lee <Putin.li@rock-chips.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef __LINUX_RGA_DRV_H_
9*4882a593Smuzhiyun #define __LINUX_RGA_DRV_H_
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/completion.h>
13*4882a593Smuzhiyun #include <linux/debugfs.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/fb.h>
19*4882a593Smuzhiyun #include <linux/fdtable.h>
20*4882a593Smuzhiyun #include <linux/fs.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/interrupt.h>
23*4882a593Smuzhiyun #include <linux/io.h>
24*4882a593Smuzhiyun #include <linux/irq.h>
25*4882a593Smuzhiyun #include <linux/kernel.h>
26*4882a593Smuzhiyun #include <linux/kref.h>
27*4882a593Smuzhiyun #include <linux/miscdevice.h>
28*4882a593Smuzhiyun #include <linux/module.h>
29*4882a593Smuzhiyun #include <linux/mutex.h>
30*4882a593Smuzhiyun #include <linux/of_device.h>
31*4882a593Smuzhiyun #include <linux/platform_device.h>
32*4882a593Smuzhiyun #include <linux/poll.h>
33*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
34*4882a593Smuzhiyun #include <linux/scatterlist.h>
35*4882a593Smuzhiyun #include <linux/sched.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/spinlock.h>
38*4882a593Smuzhiyun #include <linux/syscalls.h>
39*4882a593Smuzhiyun #include <linux/time.h>
40*4882a593Smuzhiyun #include <linux/timer.h>
41*4882a593Smuzhiyun #include <linux/uaccess.h>
42*4882a593Smuzhiyun #include <linux/version.h>
43*4882a593Smuzhiyun #include <linux/wait.h>
44*4882a593Smuzhiyun #include <linux/pm_runtime.h>
45*4882a593Smuzhiyun #include <linux/sched/mm.h>
46*4882a593Smuzhiyun #include <linux/string_helpers.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <asm/cacheflush.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include <linux/iommu.h>
51*4882a593Smuzhiyun #include <linux/iova.h>
52*4882a593Smuzhiyun #include <linux/pagemap.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #ifdef CONFIG_DMABUF_CACHE
55*4882a593Smuzhiyun #include <linux/dma-buf-cache.h>
56*4882a593Smuzhiyun #else
57*4882a593Smuzhiyun #include <linux/dma-buf.h>
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
61*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #include <linux/hrtimer.h>
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #include "rga.h"
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define RGA_CORE_REG_OFFSET 0x10000
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* load interval: 1000ms */
71*4882a593Smuzhiyun #define RGA_LOAD_INTERVAL_US 1000000
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* timer interval: 1000ms */
74*4882a593Smuzhiyun #define RGA_TIMER_INTERVAL_NS 1000000000
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #if ((defined(CONFIG_RK_IOMMU) || defined(CONFIG_ROCKCHIP_IOMMU)) \
77*4882a593Smuzhiyun && defined(CONFIG_ION_ROCKCHIP))
78*4882a593Smuzhiyun #define CONFIG_RGA_IOMMU
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Driver information */
82*4882a593Smuzhiyun #define DRIVER_DESC "RGA multicore Device Driver"
83*4882a593Smuzhiyun #define DRIVER_NAME "rga_multicore"
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define STR_HELPER(x) #x
86*4882a593Smuzhiyun #define STR(x) STR_HELPER(x)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define DRIVER_MAJOR_VERISON 1
89*4882a593Smuzhiyun #define DRIVER_MINOR_VERSION 2
90*4882a593Smuzhiyun #define DRIVER_REVISION_VERSION 27
91*4882a593Smuzhiyun #define DRIVER_PATCH_VERSION
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define DRIVER_VERSION (STR(DRIVER_MAJOR_VERISON) "." STR(DRIVER_MINOR_VERSION) \
94*4882a593Smuzhiyun "." STR(DRIVER_REVISION_VERSION) STR(DRIVER_PATCH_VERSION))
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* time limit */
97*4882a593Smuzhiyun #define RGA_JOB_TIMEOUT_DELAY HZ
98*4882a593Smuzhiyun #define RGA_RESET_TIMEOUT 1000
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define RGA_MAX_SCHEDULER 3
101*4882a593Smuzhiyun #define RGA_MAX_BUS_CLK 10
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define RGA_BUFFER_POOL_MAX_SIZE 64
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #ifndef ABS
106*4882a593Smuzhiyun #define ABS(X) (((X) < 0) ? (-(X)) : (X))
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #ifndef CLIP
110*4882a593Smuzhiyun #define CLIP(x, a, b) (((x) < (a)) \
111*4882a593Smuzhiyun ? (a) : (((x) > (b)) ? (b) : (x)))
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun extern struct rga_drvdata_t *rga_drvdata;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun enum {
117*4882a593Smuzhiyun RGA3_SCHEDULER_CORE0 = 1 << 0,
118*4882a593Smuzhiyun RGA3_SCHEDULER_CORE1 = 1 << 1,
119*4882a593Smuzhiyun RGA2_SCHEDULER_CORE0 = 1 << 2,
120*4882a593Smuzhiyun RGA_CORE_MASK = 0x7,
121*4882a593Smuzhiyun RGA_NONE_CORE = 0x0,
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun enum {
125*4882a593Smuzhiyun RGA_CMD_SLAVE = 1,
126*4882a593Smuzhiyun RGA_CMD_MASTER = 2,
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun enum iommu_dma_cookie_type {
130*4882a593Smuzhiyun IOMMU_DMA_IOVA_COOKIE,
131*4882a593Smuzhiyun IOMMU_DMA_MSI_COOKIE,
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun enum rga_scheduler_status {
135*4882a593Smuzhiyun RGA_SCHEDULER_IDLE = 0,
136*4882a593Smuzhiyun RGA_SCHEDULER_WORKING,
137*4882a593Smuzhiyun RGA_SCHEDULER_ABORT,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun enum rga_job_state {
141*4882a593Smuzhiyun RGA_JOB_STATE_PENDING = 0,
142*4882a593Smuzhiyun RGA_JOB_STATE_PREPARE,
143*4882a593Smuzhiyun RGA_JOB_STATE_RUNNING,
144*4882a593Smuzhiyun RGA_JOB_STATE_FINISH,
145*4882a593Smuzhiyun RGA_JOB_STATE_DONE,
146*4882a593Smuzhiyun RGA_JOB_STATE_INTR_ERR,
147*4882a593Smuzhiyun RGA_JOB_STATE_HW_TIMEOUT,
148*4882a593Smuzhiyun RGA_JOB_STATE_ABORT,
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun struct rga_iommu_dma_cookie {
152*4882a593Smuzhiyun enum iommu_dma_cookie_type type;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
155*4882a593Smuzhiyun struct iova_domain iovad;
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun struct rga_iommu_info {
159*4882a593Smuzhiyun struct device *dev;
160*4882a593Smuzhiyun struct device *default_dev; /* for dma-buf_api */
161*4882a593Smuzhiyun struct iommu_domain *domain;
162*4882a593Smuzhiyun struct iommu_group *group;
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun struct rga_dma_buffer {
166*4882a593Smuzhiyun /* DMABUF information */
167*4882a593Smuzhiyun struct dma_buf *dma_buf;
168*4882a593Smuzhiyun struct dma_buf_attachment *attach;
169*4882a593Smuzhiyun struct sg_table *sgt;
170*4882a593Smuzhiyun void *vmap_ptr;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun struct iommu_domain *domain;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun enum dma_data_direction dir;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun dma_addr_t iova;
177*4882a593Smuzhiyun unsigned long size;
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * The offset of the first page of the sgt.
180*4882a593Smuzhiyun * Since alloc iova must be page aligned, the offset of the first page is
181*4882a593Smuzhiyun * identified separately.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun size_t offset;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* The scheduler of the mapping */
186*4882a593Smuzhiyun struct rga_scheduler_t *scheduler;
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun struct rga_virt_addr {
190*4882a593Smuzhiyun uint64_t addr;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun struct page **pages;
193*4882a593Smuzhiyun int pages_order;
194*4882a593Smuzhiyun int page_count;
195*4882a593Smuzhiyun unsigned long size;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* The offset of the first page of the virtual address */
198*4882a593Smuzhiyun size_t offset;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun int result;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun struct rga_internal_buffer {
204*4882a593Smuzhiyun /* DMA buffer */
205*4882a593Smuzhiyun struct rga_dma_buffer *dma_buffer;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* virtual address */
208*4882a593Smuzhiyun struct rga_virt_addr *virt_addr;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* physical address */
211*4882a593Smuzhiyun uint64_t phys_addr;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* buffer size */
214*4882a593Smuzhiyun unsigned long size;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun struct rga_memory_parm memory_parm;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun struct mm_struct *current_mm;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* memory type. */
222*4882a593Smuzhiyun uint32_t type;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun uint32_t handle;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun uint32_t mm_flag;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun struct kref refcount;
229*4882a593Smuzhiyun struct rga_session *session;
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun struct rga_scheduler_t;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun struct rga_session {
235*4882a593Smuzhiyun int id;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun pid_t tgid;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun char *pname;
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun struct rga_job_buffer {
243*4882a593Smuzhiyun union {
244*4882a593Smuzhiyun struct {
245*4882a593Smuzhiyun struct rga_external_buffer *ex_y_addr;
246*4882a593Smuzhiyun struct rga_external_buffer *ex_uv_addr;
247*4882a593Smuzhiyun struct rga_external_buffer *ex_v_addr;
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun struct rga_external_buffer *ex_addr;
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun union {
253*4882a593Smuzhiyun struct {
254*4882a593Smuzhiyun struct rga_internal_buffer *y_addr;
255*4882a593Smuzhiyun struct rga_internal_buffer *uv_addr;
256*4882a593Smuzhiyun struct rga_internal_buffer *v_addr;
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun struct rga_internal_buffer *addr;
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun uint32_t *page_table;
262*4882a593Smuzhiyun int order;
263*4882a593Smuzhiyun int page_count;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun struct rga_job {
267*4882a593Smuzhiyun struct list_head head;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun struct rga_scheduler_t *scheduler;
270*4882a593Smuzhiyun struct rga_session *session;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun struct rga_req rga_command_base;
273*4882a593Smuzhiyun uint32_t cmd_reg[32 * 8];
274*4882a593Smuzhiyun struct rga_full_csc full_csc;
275*4882a593Smuzhiyun struct rga_pre_intr_info pre_intr_info;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun struct rga_job_buffer src_buffer;
278*4882a593Smuzhiyun struct rga_job_buffer src1_buffer;
279*4882a593Smuzhiyun struct rga_job_buffer dst_buffer;
280*4882a593Smuzhiyun /* used by rga2 */
281*4882a593Smuzhiyun struct rga_job_buffer els_buffer;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* for rga2 virtual_address */
284*4882a593Smuzhiyun struct mm_struct *mm;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* job time stamp */
287*4882a593Smuzhiyun ktime_t timestamp;
288*4882a593Smuzhiyun /* The time when the job is actually executed on the hardware */
289*4882a593Smuzhiyun ktime_t hw_running_time;
290*4882a593Smuzhiyun /* The time only for hrtimer to calculate the load */
291*4882a593Smuzhiyun ktime_t hw_recoder_time;
292*4882a593Smuzhiyun unsigned int flags;
293*4882a593Smuzhiyun int request_id;
294*4882a593Smuzhiyun int priority;
295*4882a593Smuzhiyun int core;
296*4882a593Smuzhiyun int ret;
297*4882a593Smuzhiyun pid_t pid;
298*4882a593Smuzhiyun bool use_batch_mode;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun struct kref refcount;
301*4882a593Smuzhiyun unsigned long state;
302*4882a593Smuzhiyun uint32_t intr_status;
303*4882a593Smuzhiyun uint32_t hw_status;
304*4882a593Smuzhiyun uint32_t cmd_status;
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun struct rga_backend_ops {
308*4882a593Smuzhiyun int (*get_version)(struct rga_scheduler_t *scheduler);
309*4882a593Smuzhiyun int (*set_reg)(struct rga_job *job, struct rga_scheduler_t *scheduler);
310*4882a593Smuzhiyun int (*init_reg)(struct rga_job *job);
311*4882a593Smuzhiyun void (*soft_reset)(struct rga_scheduler_t *scheduler);
312*4882a593Smuzhiyun int (*read_back_reg)(struct rga_job *job, struct rga_scheduler_t *scheduler);
313*4882a593Smuzhiyun int (*irq)(struct rga_scheduler_t *scheduler);
314*4882a593Smuzhiyun int (*isr_thread)(struct rga_job *job, struct rga_scheduler_t *scheduler);
315*4882a593Smuzhiyun };
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun struct rga_timer {
318*4882a593Smuzhiyun u32 busy_time;
319*4882a593Smuzhiyun u32 busy_time_record;
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun struct rga_scheduler_t {
323*4882a593Smuzhiyun struct device *dev;
324*4882a593Smuzhiyun void __iomem *rga_base;
325*4882a593Smuzhiyun struct rga_iommu_info *iommu_info;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun struct clk *clks[RGA_MAX_BUS_CLK];
328*4882a593Smuzhiyun int num_clks;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun enum rga_scheduler_status status;
331*4882a593Smuzhiyun int pd_refcount;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun struct rga_job *running_job;
334*4882a593Smuzhiyun struct list_head todo_list;
335*4882a593Smuzhiyun spinlock_t irq_lock;
336*4882a593Smuzhiyun wait_queue_head_t job_done_wq;
337*4882a593Smuzhiyun const struct rga_backend_ops *ops;
338*4882a593Smuzhiyun const struct rga_hw_data *data;
339*4882a593Smuzhiyun int job_count;
340*4882a593Smuzhiyun int irq;
341*4882a593Smuzhiyun struct rga_version_t version;
342*4882a593Smuzhiyun int core;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun struct rga_timer timer;
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun struct rga_request {
348*4882a593Smuzhiyun struct rga_req *task_list;
349*4882a593Smuzhiyun int task_count;
350*4882a593Smuzhiyun uint32_t finished_task_count;
351*4882a593Smuzhiyun uint32_t failed_task_count;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun bool use_batch_mode;
354*4882a593Smuzhiyun bool is_running;
355*4882a593Smuzhiyun bool is_done;
356*4882a593Smuzhiyun int ret;
357*4882a593Smuzhiyun uint32_t sync_mode;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun int32_t acquire_fence_fd;
360*4882a593Smuzhiyun int32_t release_fence_fd;
361*4882a593Smuzhiyun struct dma_fence *release_fence;
362*4882a593Smuzhiyun spinlock_t fence_lock;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun wait_queue_head_t finished_wq;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun int flags;
367*4882a593Smuzhiyun uint8_t mpi_config_flags;
368*4882a593Smuzhiyun int id;
369*4882a593Smuzhiyun struct rga_session *session;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun spinlock_t lock;
372*4882a593Smuzhiyun struct kref refcount;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun pid_t pid;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * The mapping of virtual addresses to obtain physical addresses requires
378*4882a593Smuzhiyun * the memory mapping information of the current process.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun struct mm_struct *current_mm;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* TODO: add some common work */
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun struct rga_pending_request_manager {
386*4882a593Smuzhiyun struct mutex lock;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /*
389*4882a593Smuzhiyun * @request_idr:
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * Mapping of request id to object pointers. Used by the GEM
392*4882a593Smuzhiyun * subsystem. Protected by @lock.
393*4882a593Smuzhiyun */
394*4882a593Smuzhiyun struct idr request_idr;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun int request_count;
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun struct rga_session_manager {
400*4882a593Smuzhiyun struct mutex lock;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun struct idr ctx_id_idr;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun int session_cnt;
405*4882a593Smuzhiyun };
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun struct rga_drvdata_t {
408*4882a593Smuzhiyun /* used by rga2's mmu lock */
409*4882a593Smuzhiyun struct mutex lock;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun struct rga_scheduler_t *scheduler[RGA_MAX_SCHEDULER];
412*4882a593Smuzhiyun int num_of_scheduler;
413*4882a593Smuzhiyun /* The scheduler_index used by default for memory mapping. */
414*4882a593Smuzhiyun int map_scheduler_index;
415*4882a593Smuzhiyun struct rga_mmu_base *mmu_base;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun struct delayed_work power_off_work;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun struct rga_mm *mm;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* rga_job pending manager, import by RGA_START_CONFIG */
422*4882a593Smuzhiyun struct rga_pending_request_manager *pend_request_manager;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun struct rga_session_manager *session_manager;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA_ASYNC
427*4882a593Smuzhiyun struct rga_fence_context *fence_ctx;
428*4882a593Smuzhiyun #endif
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
431*4882a593Smuzhiyun struct rga_debugger *debugger;
432*4882a593Smuzhiyun #endif
433*4882a593Smuzhiyun };
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun struct rga_irqs_data_t {
436*4882a593Smuzhiyun const char *name;
437*4882a593Smuzhiyun irqreturn_t (*irq_hdl)(int irq, void *ctx);
438*4882a593Smuzhiyun irqreturn_t (*irq_thread)(int irq, void *ctx);
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun struct rga_match_data_t {
442*4882a593Smuzhiyun const char * const *clks;
443*4882a593Smuzhiyun int num_clks;
444*4882a593Smuzhiyun };
445*4882a593Smuzhiyun
rga_read(int offset,struct rga_scheduler_t * scheduler)446*4882a593Smuzhiyun static inline int rga_read(int offset, struct rga_scheduler_t *scheduler)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun return readl(scheduler->rga_base + offset);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
rga_write(int value,int offset,struct rga_scheduler_t * scheduler)451*4882a593Smuzhiyun static inline void rga_write(int value, int offset, struct rga_scheduler_t *scheduler)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun writel(value, scheduler->rga_base + offset);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun int rga_power_enable(struct rga_scheduler_t *scheduler);
457*4882a593Smuzhiyun int rga_power_disable(struct rga_scheduler_t *scheduler);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun int rga_kernel_commit(struct rga_req *cmd);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun #endif /* __LINUX_RGA_FENCE_H_ */
462