1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015-2018 Etnaviv Project
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef __ETNAVIV_GPU_H__
7*4882a593Smuzhiyun #define __ETNAVIV_GPU_H__
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "etnaviv_cmdbuf.h"
10*4882a593Smuzhiyun #include "etnaviv_gem.h"
11*4882a593Smuzhiyun #include "etnaviv_mmu.h"
12*4882a593Smuzhiyun #include "etnaviv_drv.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct etnaviv_gem_submit;
15*4882a593Smuzhiyun struct etnaviv_vram_mapping;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun struct etnaviv_chip_identity {
18*4882a593Smuzhiyun u32 model;
19*4882a593Smuzhiyun u32 revision;
20*4882a593Smuzhiyun u32 product_id;
21*4882a593Smuzhiyun u32 customer_id;
22*4882a593Smuzhiyun u32 eco_id;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Supported feature fields. */
25*4882a593Smuzhiyun u32 features;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Supported minor feature fields. */
28*4882a593Smuzhiyun u32 minor_features0;
29*4882a593Smuzhiyun u32 minor_features1;
30*4882a593Smuzhiyun u32 minor_features2;
31*4882a593Smuzhiyun u32 minor_features3;
32*4882a593Smuzhiyun u32 minor_features4;
33*4882a593Smuzhiyun u32 minor_features5;
34*4882a593Smuzhiyun u32 minor_features6;
35*4882a593Smuzhiyun u32 minor_features7;
36*4882a593Smuzhiyun u32 minor_features8;
37*4882a593Smuzhiyun u32 minor_features9;
38*4882a593Smuzhiyun u32 minor_features10;
39*4882a593Smuzhiyun u32 minor_features11;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Number of streams supported. */
42*4882a593Smuzhiyun u32 stream_count;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Total number of temporary registers per thread. */
45*4882a593Smuzhiyun u32 register_max;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Maximum number of threads. */
48*4882a593Smuzhiyun u32 thread_count;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Number of shader cores. */
51*4882a593Smuzhiyun u32 shader_core_count;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Size of the vertex cache. */
54*4882a593Smuzhiyun u32 vertex_cache_size;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* Number of entries in the vertex output buffer. */
57*4882a593Smuzhiyun u32 vertex_output_buffer_size;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Number of pixel pipes. */
60*4882a593Smuzhiyun u32 pixel_pipes;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Number of instructions. */
63*4882a593Smuzhiyun u32 instruction_count;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Number of constants. */
66*4882a593Smuzhiyun u32 num_constants;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Buffer size */
69*4882a593Smuzhiyun u32 buffer_size;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Number of varyings */
72*4882a593Smuzhiyun u8 varyings_count;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun enum etnaviv_sec_mode {
76*4882a593Smuzhiyun ETNA_SEC_NONE = 0,
77*4882a593Smuzhiyun ETNA_SEC_KERNEL,
78*4882a593Smuzhiyun ETNA_SEC_TZ
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun struct etnaviv_event {
82*4882a593Smuzhiyun struct dma_fence *fence;
83*4882a593Smuzhiyun struct etnaviv_gem_submit *submit;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct etnaviv_cmdbuf_suballoc;
89*4882a593Smuzhiyun struct regulator;
90*4882a593Smuzhiyun struct clk;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #define ETNA_NR_EVENTS 30
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct etnaviv_gpu {
95*4882a593Smuzhiyun struct drm_device *drm;
96*4882a593Smuzhiyun struct thermal_cooling_device *cooling;
97*4882a593Smuzhiyun struct device *dev;
98*4882a593Smuzhiyun struct mutex lock;
99*4882a593Smuzhiyun struct etnaviv_chip_identity identity;
100*4882a593Smuzhiyun enum etnaviv_sec_mode sec_mode;
101*4882a593Smuzhiyun struct workqueue_struct *wq;
102*4882a593Smuzhiyun struct drm_gpu_scheduler sched;
103*4882a593Smuzhiyun bool initialized;
104*4882a593Smuzhiyun bool fe_running;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* 'ring'-buffer: */
107*4882a593Smuzhiyun struct etnaviv_cmdbuf buffer;
108*4882a593Smuzhiyun int exec_state;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* event management: */
111*4882a593Smuzhiyun DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
112*4882a593Smuzhiyun struct etnaviv_event event[ETNA_NR_EVENTS];
113*4882a593Smuzhiyun struct completion event_free;
114*4882a593Smuzhiyun spinlock_t event_spinlock;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun u32 idle_mask;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Fencing support */
119*4882a593Smuzhiyun struct mutex fence_lock;
120*4882a593Smuzhiyun struct idr fence_idr;
121*4882a593Smuzhiyun u32 next_fence;
122*4882a593Smuzhiyun u32 completed_fence;
123*4882a593Smuzhiyun wait_queue_head_t fence_event;
124*4882a593Smuzhiyun u64 fence_context;
125*4882a593Smuzhiyun spinlock_t fence_spinlock;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* worker for handling 'sync' points: */
128*4882a593Smuzhiyun struct work_struct sync_point_work;
129*4882a593Smuzhiyun int sync_point_event;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* hang detection */
132*4882a593Smuzhiyun u32 hangcheck_dma_addr;
133*4882a593Smuzhiyun u32 hangcheck_fence;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun void __iomem *mmio;
136*4882a593Smuzhiyun int irq;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun struct etnaviv_iommu_context *mmu_context;
139*4882a593Smuzhiyun unsigned int flush_seq;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* Power Control: */
142*4882a593Smuzhiyun struct clk *clk_bus;
143*4882a593Smuzhiyun struct clk *clk_reg;
144*4882a593Smuzhiyun struct clk *clk_core;
145*4882a593Smuzhiyun struct clk *clk_shader;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun unsigned int freq_scale;
148*4882a593Smuzhiyun unsigned long base_rate_core;
149*4882a593Smuzhiyun unsigned long base_rate_shader;
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
gpu_write(struct etnaviv_gpu * gpu,u32 reg,u32 data)152*4882a593Smuzhiyun static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun writel(data, gpu->mmio + reg);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
gpu_read(struct etnaviv_gpu * gpu,u32 reg)157*4882a593Smuzhiyun static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun return readl(gpu->mmio + reg);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
165*4882a593Smuzhiyun bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
168*4882a593Smuzhiyun int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
172*4882a593Smuzhiyun void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
173*4882a593Smuzhiyun int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
174*4882a593Smuzhiyun u32 fence, struct drm_etnaviv_timespec *timeout);
175*4882a593Smuzhiyun int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
176*4882a593Smuzhiyun struct etnaviv_gem_object *etnaviv_obj,
177*4882a593Smuzhiyun struct drm_etnaviv_timespec *timeout);
178*4882a593Smuzhiyun struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
179*4882a593Smuzhiyun int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
180*4882a593Smuzhiyun void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
181*4882a593Smuzhiyun int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
182*4882a593Smuzhiyun void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun extern struct platform_driver etnaviv_gpu_driver;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #endif /* __ETNAVIV_GPU_H__ */
187