xref: /OK3568_Linux_fs/kernel/include/drm/drm_legacy.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef __DRM_DRM_LEGACY_H__
2*4882a593Smuzhiyun #define __DRM_DRM_LEGACY_H__
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Legacy driver interfaces for the Direct Rendering Manager
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7*4882a593Smuzhiyun  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8*4882a593Smuzhiyun  * Copyright (c) 2009-2010, Code Aurora Forum.
9*4882a593Smuzhiyun  * All rights reserved.
10*4882a593Smuzhiyun  * Copyright © 2014 Intel Corporation
11*4882a593Smuzhiyun  *   Daniel Vetter <daniel.vetter@ffwll.ch>
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
14*4882a593Smuzhiyun  * Author: Gareth Hughes <gareth@valinux.com>
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
17*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
18*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
19*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
21*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
24*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
25*4882a593Smuzhiyun  * Software.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30*4882a593Smuzhiyun  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <drm/drm.h>
37*4882a593Smuzhiyun #include <drm/drm_auth.h>
38*4882a593Smuzhiyun #include <drm/drm_hashtab.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun struct drm_device;
41*4882a593Smuzhiyun struct drm_driver;
42*4882a593Smuzhiyun struct file;
43*4882a593Smuzhiyun struct pci_driver;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Legacy Support for palateontologic DRM drivers
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * If you add a new driver and it uses any of these functions or structures,
49*4882a593Smuzhiyun  * you're doing it terribly wrong.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * DMA buffer.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun struct drm_buf {
56*4882a593Smuzhiyun 	int idx;		       /**< Index into master buflist */
57*4882a593Smuzhiyun 	int total;		       /**< Buffer size */
58*4882a593Smuzhiyun 	int order;		       /**< log-base-2(total) */
59*4882a593Smuzhiyun 	int used;		       /**< Amount of buffer in use (for DMA) */
60*4882a593Smuzhiyun 	unsigned long offset;	       /**< Byte offset (used internally) */
61*4882a593Smuzhiyun 	void *address;		       /**< Address of buffer */
62*4882a593Smuzhiyun 	unsigned long bus_address;     /**< Bus address of buffer */
63*4882a593Smuzhiyun 	struct drm_buf *next;	       /**< Kernel-only: used for free list */
64*4882a593Smuzhiyun 	__volatile__ int waiting;      /**< On kernel DMA queue */
65*4882a593Smuzhiyun 	__volatile__ int pending;      /**< On hardware DMA queue */
66*4882a593Smuzhiyun 	struct drm_file *file_priv;    /**< Private of holding file descr */
67*4882a593Smuzhiyun 	int context;		       /**< Kernel queue for this buffer */
68*4882a593Smuzhiyun 	int while_locked;	       /**< Dispatch this buffer while locked */
69*4882a593Smuzhiyun 	enum {
70*4882a593Smuzhiyun 		DRM_LIST_NONE = 0,
71*4882a593Smuzhiyun 		DRM_LIST_FREE = 1,
72*4882a593Smuzhiyun 		DRM_LIST_WAIT = 2,
73*4882a593Smuzhiyun 		DRM_LIST_PEND = 3,
74*4882a593Smuzhiyun 		DRM_LIST_PRIO = 4,
75*4882a593Smuzhiyun 		DRM_LIST_RECLAIM = 5
76*4882a593Smuzhiyun 	} list;			       /**< Which list we're on */
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	int dev_priv_size;		 /**< Size of buffer private storage */
79*4882a593Smuzhiyun 	void *dev_private;		 /**< Per-buffer private storage */
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun typedef struct drm_dma_handle {
83*4882a593Smuzhiyun 	dma_addr_t busaddr;
84*4882a593Smuzhiyun 	void *vaddr;
85*4882a593Smuzhiyun 	size_t size;
86*4882a593Smuzhiyun } drm_dma_handle_t;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun  * Buffer entry.  There is one of this for each buffer size order.
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun struct drm_buf_entry {
92*4882a593Smuzhiyun 	int buf_size;			/**< size */
93*4882a593Smuzhiyun 	int buf_count;			/**< number of buffers */
94*4882a593Smuzhiyun 	struct drm_buf *buflist;		/**< buffer list */
95*4882a593Smuzhiyun 	int seg_count;
96*4882a593Smuzhiyun 	int page_order;
97*4882a593Smuzhiyun 	struct drm_dma_handle **seglist;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	int low_mark;			/**< Low water mark */
100*4882a593Smuzhiyun 	int high_mark;			/**< High water mark */
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  * DMA data.
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun struct drm_device_dma {
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];	/**< buffers, grouped by their size order */
109*4882a593Smuzhiyun 	int buf_count;			/**< total number of buffers */
110*4882a593Smuzhiyun 	struct drm_buf **buflist;		/**< Vector of pointers into drm_device_dma::bufs */
111*4882a593Smuzhiyun 	int seg_count;
112*4882a593Smuzhiyun 	int page_count;			/**< number of pages */
113*4882a593Smuzhiyun 	unsigned long *pagelist;	/**< page list */
114*4882a593Smuzhiyun 	unsigned long byte_count;
115*4882a593Smuzhiyun 	enum {
116*4882a593Smuzhiyun 		_DRM_DMA_USE_AGP = 0x01,
117*4882a593Smuzhiyun 		_DRM_DMA_USE_SG = 0x02,
118*4882a593Smuzhiyun 		_DRM_DMA_USE_FB = 0x04,
119*4882a593Smuzhiyun 		_DRM_DMA_USE_PCI_RO = 0x08
120*4882a593Smuzhiyun 	} flags;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun  * Scatter-gather memory.
126*4882a593Smuzhiyun  */
127*4882a593Smuzhiyun struct drm_sg_mem {
128*4882a593Smuzhiyun 	unsigned long handle;
129*4882a593Smuzhiyun 	void *virtual;
130*4882a593Smuzhiyun 	int pages;
131*4882a593Smuzhiyun 	struct page **pagelist;
132*4882a593Smuzhiyun 	dma_addr_t *busaddr;
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun  * Kernel side of a mapping
137*4882a593Smuzhiyun  */
138*4882a593Smuzhiyun struct drm_local_map {
139*4882a593Smuzhiyun 	dma_addr_t offset;	 /**< Requested physical address (0 for SAREA)*/
140*4882a593Smuzhiyun 	unsigned long size;	 /**< Requested physical size (bytes) */
141*4882a593Smuzhiyun 	enum drm_map_type type;	 /**< Type of memory to map */
142*4882a593Smuzhiyun 	enum drm_map_flags flags;	 /**< Flags */
143*4882a593Smuzhiyun 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
144*4882a593Smuzhiyun 				 /**< Kernel-space: kernel-virtual address */
145*4882a593Smuzhiyun 	int mtrr;		 /**< MTRR slot used */
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun typedef struct drm_local_map drm_local_map_t;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * Mappings list
152*4882a593Smuzhiyun  */
153*4882a593Smuzhiyun struct drm_map_list {
154*4882a593Smuzhiyun 	struct list_head head;		/**< list head */
155*4882a593Smuzhiyun 	struct drm_hash_item hash;
156*4882a593Smuzhiyun 	struct drm_local_map *map;	/**< mapping */
157*4882a593Smuzhiyun 	uint64_t user_token;
158*4882a593Smuzhiyun 	struct drm_master *master;
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
162*4882a593Smuzhiyun 		      unsigned int size, enum drm_map_type type,
163*4882a593Smuzhiyun 		      enum drm_map_flags flags, struct drm_local_map **map_p);
164*4882a593Smuzhiyun struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
165*4882a593Smuzhiyun void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
166*4882a593Smuzhiyun int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
167*4882a593Smuzhiyun struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
168*4882a593Smuzhiyun int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
171*4882a593Smuzhiyun int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun  * Test that the hardware lock is held by the caller, returning otherwise.
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  * \param dev DRM device.
177*4882a593Smuzhiyun  * \param filp file pointer of the caller.
178*4882a593Smuzhiyun  */
179*4882a593Smuzhiyun #define LOCK_TEST_WITH_RETURN( dev, _file_priv )				\
180*4882a593Smuzhiyun do {										\
181*4882a593Smuzhiyun 	if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) ||	\
182*4882a593Smuzhiyun 	    _file_priv->master->lock.file_priv != _file_priv)	{		\
183*4882a593Smuzhiyun 		DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
184*4882a593Smuzhiyun 			   __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
185*4882a593Smuzhiyun 			   _file_priv->master->lock.file_priv, _file_priv);	\
186*4882a593Smuzhiyun 		return -EINVAL;							\
187*4882a593Smuzhiyun 	}									\
188*4882a593Smuzhiyun } while (0)
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun void drm_legacy_idlelock_take(struct drm_lock_data *lock);
191*4882a593Smuzhiyun void drm_legacy_idlelock_release(struct drm_lock_data *lock);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /* drm_pci.c */
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #ifdef CONFIG_PCI
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
198*4882a593Smuzhiyun 				     size_t align);
199*4882a593Smuzhiyun void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
202*4882a593Smuzhiyun void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun #else
205*4882a593Smuzhiyun 
drm_pci_alloc(struct drm_device * dev,size_t size,size_t align)206*4882a593Smuzhiyun static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
207*4882a593Smuzhiyun 						   size_t size, size_t align)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	return NULL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
drm_pci_free(struct drm_device * dev,struct drm_dma_handle * dmah)212*4882a593Smuzhiyun static inline void drm_pci_free(struct drm_device *dev,
213*4882a593Smuzhiyun 				struct drm_dma_handle *dmah)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
drm_legacy_pci_init(struct drm_driver * driver,struct pci_driver * pdriver)217*4882a593Smuzhiyun static inline int drm_legacy_pci_init(struct drm_driver *driver,
218*4882a593Smuzhiyun 				      struct pci_driver *pdriver)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	return -EINVAL;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
drm_legacy_pci_exit(struct drm_driver * driver,struct pci_driver * pdriver)223*4882a593Smuzhiyun static inline void drm_legacy_pci_exit(struct drm_driver *driver,
224*4882a593Smuzhiyun 				       struct pci_driver *pdriver)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun #endif
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* drm_memory.c */
231*4882a593Smuzhiyun void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
232*4882a593Smuzhiyun void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
233*4882a593Smuzhiyun void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #endif /* __DRM_DRM_LEGACY_H__ */
236