xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/etnaviv/etnaviv_drv.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015-2018 Etnaviv Project
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef __ETNAVIV_DRV_H__
7*4882a593Smuzhiyun #define __ETNAVIV_DRV_H__
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/mm_types.h>
11*4882a593Smuzhiyun #include <linux/sizes.h>
12*4882a593Smuzhiyun #include <linux/time64.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <drm/drm_fb_helper.h>
16*4882a593Smuzhiyun #include <drm/drm_gem.h>
17*4882a593Smuzhiyun #include <drm/etnaviv_drm.h>
18*4882a593Smuzhiyun #include <drm/gpu_scheduler.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct etnaviv_cmdbuf;
21*4882a593Smuzhiyun struct etnaviv_gpu;
22*4882a593Smuzhiyun struct etnaviv_mmu;
23*4882a593Smuzhiyun struct etnaviv_gem_object;
24*4882a593Smuzhiyun struct etnaviv_gem_submit;
25*4882a593Smuzhiyun struct etnaviv_iommu_global;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define ETNAVIV_SOFTPIN_START_ADDRESS	SZ_4M /* must be >= SUBALLOC_SIZE */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct etnaviv_file_private {
30*4882a593Smuzhiyun 	struct etnaviv_iommu_context	*mmu;
31*4882a593Smuzhiyun 	struct drm_sched_entity		sched_entity[ETNA_MAX_PIPES];
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct etnaviv_drm_private {
35*4882a593Smuzhiyun 	int num_gpus;
36*4882a593Smuzhiyun 	struct device_dma_parameters dma_parms;
37*4882a593Smuzhiyun 	struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
38*4882a593Smuzhiyun 	gfp_t shm_gfp_mask;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
41*4882a593Smuzhiyun 	struct etnaviv_iommu_global *mmu_global;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	/* list of GEM objects: */
44*4882a593Smuzhiyun 	struct mutex gem_lock;
45*4882a593Smuzhiyun 	struct list_head gem_list;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
49*4882a593Smuzhiyun 		struct drm_file *file);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
52*4882a593Smuzhiyun vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
53*4882a593Smuzhiyun int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
54*4882a593Smuzhiyun struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
55*4882a593Smuzhiyun void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
56*4882a593Smuzhiyun void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
57*4882a593Smuzhiyun int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
58*4882a593Smuzhiyun 			   struct vm_area_struct *vma);
59*4882a593Smuzhiyun struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
60*4882a593Smuzhiyun 	struct dma_buf_attachment *attach, struct sg_table *sg);
61*4882a593Smuzhiyun int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
62*4882a593Smuzhiyun void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
63*4882a593Smuzhiyun void *etnaviv_gem_vmap(struct drm_gem_object *obj);
64*4882a593Smuzhiyun int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
65*4882a593Smuzhiyun 		struct drm_etnaviv_timespec *timeout);
66*4882a593Smuzhiyun int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
67*4882a593Smuzhiyun void etnaviv_gem_free_object(struct drm_gem_object *obj);
68*4882a593Smuzhiyun int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
69*4882a593Smuzhiyun 		u32 size, u32 flags, u32 *handle);
70*4882a593Smuzhiyun int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
71*4882a593Smuzhiyun 	uintptr_t ptr, u32 size, u32 flags, u32 *handle);
72*4882a593Smuzhiyun u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
73*4882a593Smuzhiyun u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
74*4882a593Smuzhiyun u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
75*4882a593Smuzhiyun void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
76*4882a593Smuzhiyun void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
77*4882a593Smuzhiyun void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
78*4882a593Smuzhiyun 	struct etnaviv_iommu_context *mmu,
79*4882a593Smuzhiyun 	unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
80*4882a593Smuzhiyun void etnaviv_validate_init(void);
81*4882a593Smuzhiyun bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
82*4882a593Smuzhiyun 	u32 *stream, unsigned int size,
83*4882a593Smuzhiyun 	struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
86*4882a593Smuzhiyun void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
87*4882a593Smuzhiyun 	struct seq_file *m);
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
91*4882a593Smuzhiyun #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * Return the storage size of a structure with a variable length array.
95*4882a593Smuzhiyun  * The array is nelem elements of elem_size, where the base structure
96*4882a593Smuzhiyun  * is defined by base.  If the size overflows size_t, return zero.
97*4882a593Smuzhiyun  */
size_vstruct(size_t nelem,size_t elem_size,size_t base)98*4882a593Smuzhiyun static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
101*4882a593Smuzhiyun 		return 0;
102*4882a593Smuzhiyun 	return base + nelem * elem_size;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
107*4882a593Smuzhiyun  * We need to calculate the timeout in terms of number of jiffies
108*4882a593Smuzhiyun  * between the specified timeout and the current CLOCK_MONOTONIC time.
109*4882a593Smuzhiyun  */
etnaviv_timeout_to_jiffies(const struct drm_etnaviv_timespec * timeout)110*4882a593Smuzhiyun static inline unsigned long etnaviv_timeout_to_jiffies(
111*4882a593Smuzhiyun 	const struct drm_etnaviv_timespec *timeout)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct timespec64 ts, to = {
114*4882a593Smuzhiyun 		.tv_sec = timeout->tv_sec,
115*4882a593Smuzhiyun 		.tv_nsec = timeout->tv_nsec,
116*4882a593Smuzhiyun 	};
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ktime_get_ts64(&ts);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* timeouts before "now" have already expired */
121*4882a593Smuzhiyun 	if (timespec64_compare(&to, &ts) <= 0)
122*4882a593Smuzhiyun 		return 0;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	ts = timespec64_sub(to, ts);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return timespec64_to_jiffies(&ts);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #endif /* __ETNAVIV_DRV_H__ */
130