xref: /OK3568_Linux_fs/kernel/include/drm/drm_device.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef _DRM_DEVICE_H_
2*4882a593Smuzhiyun #define _DRM_DEVICE_H_
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/list.h>
5*4882a593Smuzhiyun #include <linux/kref.h>
6*4882a593Smuzhiyun #include <linux/mutex.h>
7*4882a593Smuzhiyun #include <linux/idr.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <drm/drm_hashtab.h>
10*4882a593Smuzhiyun #include <drm/drm_mode_config.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun struct drm_driver;
13*4882a593Smuzhiyun struct drm_minor;
14*4882a593Smuzhiyun struct drm_master;
15*4882a593Smuzhiyun struct drm_device_dma;
16*4882a593Smuzhiyun struct drm_vblank_crtc;
17*4882a593Smuzhiyun struct drm_sg_mem;
18*4882a593Smuzhiyun struct drm_local_map;
19*4882a593Smuzhiyun struct drm_vma_offset_manager;
20*4882a593Smuzhiyun struct drm_vram_mm;
21*4882a593Smuzhiyun struct drm_fb_helper;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct inode;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct pci_dev;
26*4882a593Smuzhiyun struct pci_controller;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun  * enum drm_switch_power - power state of drm device
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun enum switch_power_state {
34*4882a593Smuzhiyun 	/** @DRM_SWITCH_POWER_ON: Power state is ON */
35*4882a593Smuzhiyun 	DRM_SWITCH_POWER_ON = 0,
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/** @DRM_SWITCH_POWER_OFF: Power state is OFF */
38*4882a593Smuzhiyun 	DRM_SWITCH_POWER_OFF = 1,
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	/** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
41*4882a593Smuzhiyun 	DRM_SWITCH_POWER_CHANGING = 2,
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	/** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
44*4882a593Smuzhiyun 	DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun  * struct drm_device - DRM device structure
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * This structure represent a complete card that
51*4882a593Smuzhiyun  * may contain multiple heads.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun struct drm_device {
54*4882a593Smuzhiyun 	/**
55*4882a593Smuzhiyun 	 * @legacy_dev_list:
56*4882a593Smuzhiyun 	 *
57*4882a593Smuzhiyun 	 * List of devices per driver for stealth attach cleanup
58*4882a593Smuzhiyun 	 */
59*4882a593Smuzhiyun 	struct list_head legacy_dev_list;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/** @if_version: Highest interface version set */
62*4882a593Smuzhiyun 	int if_version;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/** @ref: Object ref-count */
65*4882a593Smuzhiyun 	struct kref ref;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/** @dev: Device structure of bus-device */
68*4882a593Smuzhiyun 	struct device *dev;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/**
71*4882a593Smuzhiyun 	 * @managed:
72*4882a593Smuzhiyun 	 *
73*4882a593Smuzhiyun 	 * Managed resources linked to the lifetime of this &drm_device as
74*4882a593Smuzhiyun 	 * tracked by @ref.
75*4882a593Smuzhiyun 	 */
76*4882a593Smuzhiyun 	struct {
77*4882a593Smuzhiyun 		/** @managed.resources: managed resources list */
78*4882a593Smuzhiyun 		struct list_head resources;
79*4882a593Smuzhiyun 		/** @managed.final_kfree: pointer for final kfree() call */
80*4882a593Smuzhiyun 		void *final_kfree;
81*4882a593Smuzhiyun 		/** @managed.lock: protects @managed.resources */
82*4882a593Smuzhiyun 		spinlock_t lock;
83*4882a593Smuzhiyun 	} managed;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/** @driver: DRM driver managing the device */
86*4882a593Smuzhiyun 	struct drm_driver *driver;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/**
89*4882a593Smuzhiyun 	 * @dev_private:
90*4882a593Smuzhiyun 	 *
91*4882a593Smuzhiyun 	 * DRM driver private data. This is deprecated and should be left set to
92*4882a593Smuzhiyun 	 * NULL.
93*4882a593Smuzhiyun 	 *
94*4882a593Smuzhiyun 	 * Instead of using this pointer it is recommended that drivers use
95*4882a593Smuzhiyun 	 * devm_drm_dev_alloc() and embed struct &drm_device in their larger
96*4882a593Smuzhiyun 	 * per-device structure.
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	void *dev_private;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/** @primary: Primary node */
101*4882a593Smuzhiyun 	struct drm_minor *primary;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/** @render: Render node */
104*4882a593Smuzhiyun 	struct drm_minor *render;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/**
107*4882a593Smuzhiyun 	 * @registered:
108*4882a593Smuzhiyun 	 *
109*4882a593Smuzhiyun 	 * Internally used by drm_dev_register() and drm_connector_register().
110*4882a593Smuzhiyun 	 */
111*4882a593Smuzhiyun 	bool registered;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/**
114*4882a593Smuzhiyun 	 * @master:
115*4882a593Smuzhiyun 	 *
116*4882a593Smuzhiyun 	 * Currently active master for this device.
117*4882a593Smuzhiyun 	 * Protected by &master_mutex
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	struct drm_master *master;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/**
122*4882a593Smuzhiyun 	 * @driver_features: per-device driver features
123*4882a593Smuzhiyun 	 *
124*4882a593Smuzhiyun 	 * Drivers can clear specific flags here to disallow
125*4882a593Smuzhiyun 	 * certain features on a per-device basis while still
126*4882a593Smuzhiyun 	 * sharing a single &struct drm_driver instance across
127*4882a593Smuzhiyun 	 * all devices.
128*4882a593Smuzhiyun 	 */
129*4882a593Smuzhiyun 	u32 driver_features;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/**
132*4882a593Smuzhiyun 	 * @unplugged:
133*4882a593Smuzhiyun 	 *
134*4882a593Smuzhiyun 	 * Flag to tell if the device has been unplugged.
135*4882a593Smuzhiyun 	 * See drm_dev_enter() and drm_dev_is_unplugged().
136*4882a593Smuzhiyun 	 */
137*4882a593Smuzhiyun 	bool unplugged;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/** @anon_inode: inode for private address-space */
140*4882a593Smuzhiyun 	struct inode *anon_inode;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/** @unique: Unique name of the device */
143*4882a593Smuzhiyun 	char *unique;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/**
146*4882a593Smuzhiyun 	 * @struct_mutex:
147*4882a593Smuzhiyun 	 *
148*4882a593Smuzhiyun 	 * Lock for others (not &drm_minor.master and &drm_file.is_master)
149*4882a593Smuzhiyun 	 *
150*4882a593Smuzhiyun 	 * WARNING:
151*4882a593Smuzhiyun 	 * Only drivers annotated with DRIVER_LEGACY should be using this.
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	struct mutex struct_mutex;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/**
156*4882a593Smuzhiyun 	 * @master_mutex:
157*4882a593Smuzhiyun 	 *
158*4882a593Smuzhiyun 	 * Lock for &drm_minor.master and &drm_file.is_master
159*4882a593Smuzhiyun 	 */
160*4882a593Smuzhiyun 	struct mutex master_mutex;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/**
163*4882a593Smuzhiyun 	 * @open_count:
164*4882a593Smuzhiyun 	 *
165*4882a593Smuzhiyun 	 * Usage counter for outstanding files open,
166*4882a593Smuzhiyun 	 * protected by drm_global_mutex
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 	atomic_t open_count;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/** @filelist_mutex: Protects @filelist. */
171*4882a593Smuzhiyun 	struct mutex filelist_mutex;
172*4882a593Smuzhiyun 	/**
173*4882a593Smuzhiyun 	 * @filelist:
174*4882a593Smuzhiyun 	 *
175*4882a593Smuzhiyun 	 * List of userspace clients, linked through &drm_file.lhead.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	struct list_head filelist;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/**
180*4882a593Smuzhiyun 	 * @filelist_internal:
181*4882a593Smuzhiyun 	 *
182*4882a593Smuzhiyun 	 * List of open DRM files for in-kernel clients.
183*4882a593Smuzhiyun 	 * Protected by &filelist_mutex.
184*4882a593Smuzhiyun 	 */
185*4882a593Smuzhiyun 	struct list_head filelist_internal;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/**
188*4882a593Smuzhiyun 	 * @clientlist_mutex:
189*4882a593Smuzhiyun 	 *
190*4882a593Smuzhiyun 	 * Protects &clientlist access.
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	struct mutex clientlist_mutex;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/**
195*4882a593Smuzhiyun 	 * @clientlist:
196*4882a593Smuzhiyun 	 *
197*4882a593Smuzhiyun 	 * List of in-kernel clients. Protected by &clientlist_mutex.
198*4882a593Smuzhiyun 	 */
199*4882a593Smuzhiyun 	struct list_head clientlist;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/**
202*4882a593Smuzhiyun 	 * @irq_enabled:
203*4882a593Smuzhiyun 	 *
204*4882a593Smuzhiyun 	 * Indicates that interrupt handling is enabled, specifically vblank
205*4882a593Smuzhiyun 	 * handling. Drivers which don't use drm_irq_install() need to set this
206*4882a593Smuzhiyun 	 * to true manually.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	bool irq_enabled;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/**
211*4882a593Smuzhiyun 	 * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers.
212*4882a593Smuzhiyun 	 */
213*4882a593Smuzhiyun 	int irq;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/**
216*4882a593Smuzhiyun 	 * @vblank_disable_immediate:
217*4882a593Smuzhiyun 	 *
218*4882a593Smuzhiyun 	 * If true, vblank interrupt will be disabled immediately when the
219*4882a593Smuzhiyun 	 * refcount drops to zero, as opposed to via the vblank disable
220*4882a593Smuzhiyun 	 * timer.
221*4882a593Smuzhiyun 	 *
222*4882a593Smuzhiyun 	 * This can be set to true it the hardware has a working vblank counter
223*4882a593Smuzhiyun 	 * with high-precision timestamping (otherwise there are races) and the
224*4882a593Smuzhiyun 	 * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
225*4882a593Smuzhiyun 	 * appropriately. See also @max_vblank_count and
226*4882a593Smuzhiyun 	 * &drm_crtc_funcs.get_vblank_counter.
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun 	bool vblank_disable_immediate;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/**
231*4882a593Smuzhiyun 	 * @vblank:
232*4882a593Smuzhiyun 	 *
233*4882a593Smuzhiyun 	 * Array of vblank tracking structures, one per &struct drm_crtc. For
234*4882a593Smuzhiyun 	 * historical reasons (vblank support predates kernel modesetting) this
235*4882a593Smuzhiyun 	 * is free-standing and not part of &struct drm_crtc itself. It must be
236*4882a593Smuzhiyun 	 * initialized explicitly by calling drm_vblank_init().
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	struct drm_vblank_crtc *vblank;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/**
241*4882a593Smuzhiyun 	 * @vblank_time_lock:
242*4882a593Smuzhiyun 	 *
243*4882a593Smuzhiyun 	 *  Protects vblank count and time updates during vblank enable/disable
244*4882a593Smuzhiyun 	 */
245*4882a593Smuzhiyun 	spinlock_t vblank_time_lock;
246*4882a593Smuzhiyun 	/**
247*4882a593Smuzhiyun 	 * @vbl_lock: Top-level vblank references lock, wraps the low-level
248*4882a593Smuzhiyun 	 * @vblank_time_lock.
249*4882a593Smuzhiyun 	 */
250*4882a593Smuzhiyun 	spinlock_t vbl_lock;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/**
253*4882a593Smuzhiyun 	 * @max_vblank_count:
254*4882a593Smuzhiyun 	 *
255*4882a593Smuzhiyun 	 * Maximum value of the vblank registers. This value +1 will result in a
256*4882a593Smuzhiyun 	 * wrap-around of the vblank register. It is used by the vblank core to
257*4882a593Smuzhiyun 	 * handle wrap-arounds.
258*4882a593Smuzhiyun 	 *
259*4882a593Smuzhiyun 	 * If set to zero the vblank core will try to guess the elapsed vblanks
260*4882a593Smuzhiyun 	 * between times when the vblank interrupt is disabled through
261*4882a593Smuzhiyun 	 * high-precision timestamps. That approach is suffering from small
262*4882a593Smuzhiyun 	 * races and imprecision over longer time periods, hence exposing a
263*4882a593Smuzhiyun 	 * hardware vblank counter is always recommended.
264*4882a593Smuzhiyun 	 *
265*4882a593Smuzhiyun 	 * This is the statically configured device wide maximum. The driver
266*4882a593Smuzhiyun 	 * can instead choose to use a runtime configurable per-crtc value
267*4882a593Smuzhiyun 	 * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
268*4882a593Smuzhiyun 	 * must be left at zero. See drm_crtc_set_max_vblank_count() on how
269*4882a593Smuzhiyun 	 * to use the per-crtc value.
270*4882a593Smuzhiyun 	 *
271*4882a593Smuzhiyun 	 * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
272*4882a593Smuzhiyun 	 */
273*4882a593Smuzhiyun 	u32 max_vblank_count;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/** @vblank_event_list: List of vblank events */
276*4882a593Smuzhiyun 	struct list_head vblank_event_list;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/**
279*4882a593Smuzhiyun 	 * @event_lock:
280*4882a593Smuzhiyun 	 *
281*4882a593Smuzhiyun 	 * Protects @vblank_event_list and event delivery in
282*4882a593Smuzhiyun 	 * general. See drm_send_event() and drm_send_event_locked().
283*4882a593Smuzhiyun 	 */
284*4882a593Smuzhiyun 	spinlock_t event_lock;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/** @agp: AGP data */
287*4882a593Smuzhiyun 	struct drm_agp_head *agp;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/** @pdev: PCI device structure */
290*4882a593Smuzhiyun 	struct pci_dev *pdev;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #ifdef __alpha__
293*4882a593Smuzhiyun 	/** @hose: PCI hose, only used on ALPHA platforms. */
294*4882a593Smuzhiyun 	struct pci_controller *hose;
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun 	/** @num_crtcs: Number of CRTCs on this device */
297*4882a593Smuzhiyun 	unsigned int num_crtcs;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/** @mode_config: Current mode config */
300*4882a593Smuzhiyun 	struct drm_mode_config mode_config;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/** @object_name_lock: GEM information */
303*4882a593Smuzhiyun 	struct mutex object_name_lock;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/** @object_name_idr: GEM information */
306*4882a593Smuzhiyun 	struct idr object_name_idr;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/** @vma_offset_manager: GEM information */
309*4882a593Smuzhiyun 	struct drm_vma_offset_manager *vma_offset_manager;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/** @vram_mm: VRAM MM memory manager */
312*4882a593Smuzhiyun 	struct drm_vram_mm *vram_mm;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/**
315*4882a593Smuzhiyun 	 * @switch_power_state:
316*4882a593Smuzhiyun 	 *
317*4882a593Smuzhiyun 	 * Power state of the client.
318*4882a593Smuzhiyun 	 * Used by drivers supporting the switcheroo driver.
319*4882a593Smuzhiyun 	 * The state is maintained in the
320*4882a593Smuzhiyun 	 * &vga_switcheroo_client_ops.set_gpu_state callback
321*4882a593Smuzhiyun 	 */
322*4882a593Smuzhiyun 	enum switch_power_state switch_power_state;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/**
325*4882a593Smuzhiyun 	 * @fb_helper:
326*4882a593Smuzhiyun 	 *
327*4882a593Smuzhiyun 	 * Pointer to the fbdev emulation structure.
328*4882a593Smuzhiyun 	 * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
329*4882a593Smuzhiyun 	 */
330*4882a593Smuzhiyun 	struct drm_fb_helper *fb_helper;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* Everything below here is for legacy driver, never use! */
333*4882a593Smuzhiyun 	/* private: */
334*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_LEGACY)
335*4882a593Smuzhiyun 	/* Context handle management - linked list of context handles */
336*4882a593Smuzhiyun 	struct list_head ctxlist;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* Context handle management - mutex for &ctxlist */
339*4882a593Smuzhiyun 	struct mutex ctxlist_mutex;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* Context handle management */
342*4882a593Smuzhiyun 	struct idr ctx_idr;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* Memory management - linked list of regions */
345*4882a593Smuzhiyun 	struct list_head maplist;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* Memory management - user token hash table for maps */
348*4882a593Smuzhiyun 	struct drm_open_hash map_hash;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Context handle management - list of vmas (for debugging) */
351*4882a593Smuzhiyun 	struct list_head vmalist;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Optional pointer for DMA support */
354*4882a593Smuzhiyun 	struct drm_device_dma *dma;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* Context swapping flag */
357*4882a593Smuzhiyun 	__volatile__ long context_flag;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* Last current context */
360*4882a593Smuzhiyun 	int last_context;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* Lock for &buf_use and a few other things. */
363*4882a593Smuzhiyun 	spinlock_t buf_lock;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* Usage counter for buffers in use -- cannot alloc */
366*4882a593Smuzhiyun 	int buf_use;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* Buffer allocation in progress */
369*4882a593Smuzhiyun 	atomic_t buf_alloc;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	struct {
372*4882a593Smuzhiyun 		int context;
373*4882a593Smuzhiyun 		struct drm_hw_lock *lock;
374*4882a593Smuzhiyun 	} sigdata;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	struct drm_local_map *agp_buffer_map;
377*4882a593Smuzhiyun 	unsigned int agp_buffer_token;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Scatter gather memory */
380*4882a593Smuzhiyun 	struct drm_sg_mem *sg;
381*4882a593Smuzhiyun #endif
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun #endif
385