1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
3*4882a593Smuzhiyun * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4*4882a593Smuzhiyun * Copyright (c) 2009-2010, Code Aurora Forum.
5*4882a593Smuzhiyun * Copyright 2016 Intel Corp.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
9*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
10*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
12*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
15*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
16*4882a593Smuzhiyun * Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifndef _DRM_DRV_H_
28*4882a593Smuzhiyun #define _DRM_DRV_H_
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/list.h>
31*4882a593Smuzhiyun #include <linux/irqreturn.h>
32*4882a593Smuzhiyun #include <linux/uuid.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <drm/drm_device.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct drm_file;
37*4882a593Smuzhiyun struct drm_gem_object;
38*4882a593Smuzhiyun struct drm_master;
39*4882a593Smuzhiyun struct drm_minor;
40*4882a593Smuzhiyun struct dma_buf_attachment;
41*4882a593Smuzhiyun struct drm_display_mode;
42*4882a593Smuzhiyun struct drm_mode_create_dumb;
43*4882a593Smuzhiyun struct drm_printer;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * enum drm_driver_feature - feature flags
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * See &drm_driver.driver_features, drm_device.driver_features and
49*4882a593Smuzhiyun * drm_core_check_feature().
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun enum drm_driver_feature {
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * @DRIVER_GEM:
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * Driver use the GEM memory manager. This should be set for all modern
56*4882a593Smuzhiyun * drivers.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun DRIVER_GEM = BIT(0),
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun * @DRIVER_MODESET:
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Driver supports mode setting interfaces (KMS).
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun DRIVER_MODESET = BIT(1),
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun * @DRIVER_RENDER:
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Driver supports dedicated render nodes. See also the :ref:`section on
69*4882a593Smuzhiyun * render nodes <drm_render_node>` for details.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun DRIVER_RENDER = BIT(3),
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun * @DRIVER_ATOMIC:
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Driver supports the full atomic modesetting userspace API. Drivers
76*4882a593Smuzhiyun * which only use atomic internally, but do not the support the full
77*4882a593Smuzhiyun * userspace API (e.g. not all properties converted to atomic, or
78*4882a593Smuzhiyun * multi-plane updates are not guaranteed to be tear-free) should not
79*4882a593Smuzhiyun * set this flag.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun DRIVER_ATOMIC = BIT(4),
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * @DRIVER_SYNCOBJ:
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * Driver supports &drm_syncobj for explicit synchronization of command
86*4882a593Smuzhiyun * submission.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun DRIVER_SYNCOBJ = BIT(5),
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun * @DRIVER_SYNCOBJ_TIMELINE:
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * Driver supports the timeline flavor of &drm_syncobj for explicit
93*4882a593Smuzhiyun * synchronization of command submission.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun DRIVER_SYNCOBJ_TIMELINE = BIT(6),
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* IMPORTANT: Below are all the legacy flags, add new ones above. */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun * @DRIVER_USE_AGP:
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage
103*4882a593Smuzhiyun * AGP resources. New drivers don't need this.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun DRIVER_USE_AGP = BIT(25),
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun * @DRIVER_LEGACY:
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * Denote a legacy driver using shadow attach. Do not use.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun DRIVER_LEGACY = BIT(26),
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * @DRIVER_PCI_DMA:
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace
116*4882a593Smuzhiyun * will be enabled. Only for legacy drivers. Do not use.
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun DRIVER_PCI_DMA = BIT(27),
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * @DRIVER_SG:
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * Driver can perform scatter/gather DMA, allocation and mapping of
123*4882a593Smuzhiyun * scatter/gather buffers will be enabled. Only for legacy drivers. Do
124*4882a593Smuzhiyun * not use.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun DRIVER_SG = BIT(28),
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /**
129*4882a593Smuzhiyun * @DRIVER_HAVE_DMA:
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * Driver supports DMA, the userspace DMA API will be supported. Only
132*4882a593Smuzhiyun * for legacy drivers. Do not use.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun DRIVER_HAVE_DMA = BIT(29),
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * @DRIVER_HAVE_IRQ:
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * Legacy irq support. Only for legacy drivers. Do not use.
139*4882a593Smuzhiyun *
140*4882a593Smuzhiyun * New drivers can either use the drm_irq_install() and
141*4882a593Smuzhiyun * drm_irq_uninstall() helper functions, or roll their own irq support
142*4882a593Smuzhiyun * code by calling request_irq() directly.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun DRIVER_HAVE_IRQ = BIT(30),
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun * @DRIVER_KMS_LEGACY_CONTEXT:
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Used only by nouveau for backwards compatibility with existing
149*4882a593Smuzhiyun * userspace. Do not use.
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun DRIVER_KMS_LEGACY_CONTEXT = BIT(31),
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * struct drm_driver - DRM driver structure
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * This structure represent the common code for a family of cards. There will be
158*4882a593Smuzhiyun * one &struct drm_device for each card present in this family. It contains lots
159*4882a593Smuzhiyun * of vfunc entries, and a pile of those probably should be moved to more
160*4882a593Smuzhiyun * appropriate places like &drm_mode_config_funcs or into a new operations
161*4882a593Smuzhiyun * structure for GEM drivers.
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun struct drm_driver {
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun * @load:
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * Backward-compatible driver callback to complete initialization steps
168*4882a593Smuzhiyun * after the driver is registered. For this reason, may suffer from
169*4882a593Smuzhiyun * race conditions and its use is deprecated for new drivers. It is
170*4882a593Smuzhiyun * therefore only supported for existing drivers not yet converted to
171*4882a593Smuzhiyun * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for
172*4882a593Smuzhiyun * proper and race-free way to set up a &struct drm_device.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * This is deprecated, do not use!
175*4882a593Smuzhiyun *
176*4882a593Smuzhiyun * Returns:
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * Zero on success, non-zero value on failure.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun int (*load) (struct drm_device *, unsigned long flags);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * @open:
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Driver callback when a new &struct drm_file is opened. Useful for
186*4882a593Smuzhiyun * setting up driver-private data structures like buffer allocators,
187*4882a593Smuzhiyun * execution contexts or similar things. Such driver-private resources
188*4882a593Smuzhiyun * must be released again in @postclose.
189*4882a593Smuzhiyun *
190*4882a593Smuzhiyun * Since the display/modeset side of DRM can only be owned by exactly
191*4882a593Smuzhiyun * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
192*4882a593Smuzhiyun * there should never be a need to set up any modeset related resources
193*4882a593Smuzhiyun * in this callback. Doing so would be a driver design bug.
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * Returns:
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * 0 on success, a negative error code on failure, which will be
198*4882a593Smuzhiyun * promoted to userspace as the result of the open() system call.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun int (*open) (struct drm_device *, struct drm_file *);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun * @postclose:
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * One of the driver callbacks when a new &struct drm_file is closed.
206*4882a593Smuzhiyun * Useful for tearing down driver-private data structures allocated in
207*4882a593Smuzhiyun * @open like buffer allocators, execution contexts or similar things.
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun * Since the display/modeset side of DRM can only be owned by exactly
210*4882a593Smuzhiyun * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
211*4882a593Smuzhiyun * there should never be a need to tear down any modeset related
212*4882a593Smuzhiyun * resources in this callback. Doing so would be a driver design bug.
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun void (*postclose) (struct drm_device *, struct drm_file *);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * @lastclose:
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Called when the last &struct drm_file has been closed and there's
220*4882a593Smuzhiyun * currently no userspace client for the &struct drm_device.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * Modern drivers should only use this to force-restore the fbdev
223*4882a593Smuzhiyun * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
224*4882a593Smuzhiyun * Anything else would indicate there's something seriously wrong.
225*4882a593Smuzhiyun * Modern drivers can also use this to execute delayed power switching
226*4882a593Smuzhiyun * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
227*4882a593Smuzhiyun * infrastructure.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * This is called after @postclose hook has been called.
230*4882a593Smuzhiyun *
231*4882a593Smuzhiyun * NOTE:
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * All legacy drivers use this callback to de-initialize the hardware.
234*4882a593Smuzhiyun * This is purely because of the shadow-attach model, where the DRM
235*4882a593Smuzhiyun * kernel driver does not really own the hardware. Instead ownershipe is
236*4882a593Smuzhiyun * handled with the help of userspace through an inheritedly racy dance
237*4882a593Smuzhiyun * to set/unset the VT into raw mode.
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * Legacy drivers initialize the hardware in the @firstopen callback,
240*4882a593Smuzhiyun * which isn't even called for modern drivers.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun void (*lastclose) (struct drm_device *);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun * @unload:
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * Reverse the effects of the driver load callback. Ideally,
248*4882a593Smuzhiyun * the clean up performed by the driver should happen in the
249*4882a593Smuzhiyun * reverse order of the initialization. Similarly to the load
250*4882a593Smuzhiyun * hook, this handler is deprecated and its usage should be
251*4882a593Smuzhiyun * dropped in favor of an open-coded teardown function at the
252*4882a593Smuzhiyun * driver layer. See drm_dev_unregister() and drm_dev_put()
253*4882a593Smuzhiyun * for the proper way to remove a &struct drm_device.
254*4882a593Smuzhiyun *
255*4882a593Smuzhiyun * The unload() hook is called right after unregistering
256*4882a593Smuzhiyun * the device.
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun void (*unload) (struct drm_device *);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun * @release:
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * Optional callback for destroying device data after the final
265*4882a593Smuzhiyun * reference is released, i.e. the device is being destroyed.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * This is deprecated, clean up all memory allocations associated with a
268*4882a593Smuzhiyun * &drm_device using drmm_add_action(), drmm_kmalloc() and related
269*4882a593Smuzhiyun * managed resources functions.
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun void (*release) (struct drm_device *);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * @irq_handler:
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * Interrupt handler called when using drm_irq_install(). Not used by
277*4882a593Smuzhiyun * drivers which implement their own interrupt handling.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun irqreturn_t(*irq_handler) (int irq, void *arg);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun * @irq_preinstall:
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * Optional callback used by drm_irq_install() which is called before
285*4882a593Smuzhiyun * the interrupt handler is registered. This should be used to clear out
286*4882a593Smuzhiyun * any pending interrupts (from e.g. firmware based drives) and reset
287*4882a593Smuzhiyun * the interrupt handling registers.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun void (*irq_preinstall) (struct drm_device *dev);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun * @irq_postinstall:
293*4882a593Smuzhiyun *
294*4882a593Smuzhiyun * Optional callback used by drm_irq_install() which is called after
295*4882a593Smuzhiyun * the interrupt handler is registered. This should be used to enable
296*4882a593Smuzhiyun * interrupt generation in the hardware.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun int (*irq_postinstall) (struct drm_device *dev);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun * @irq_uninstall:
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * Optional callback used by drm_irq_uninstall() which is called before
304*4882a593Smuzhiyun * the interrupt handler is unregistered. This should be used to disable
305*4882a593Smuzhiyun * interrupt generation in the hardware.
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun void (*irq_uninstall) (struct drm_device *dev);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun * @master_set:
311*4882a593Smuzhiyun *
312*4882a593Smuzhiyun * Called whenever the minor master is set. Only used by vmwgfx.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun void (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
315*4882a593Smuzhiyun bool from_open);
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun * @master_drop:
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * Called whenever the minor master is dropped. Only used by vmwgfx.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun * @debugfs_init:
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * Allows drivers to create driver-specific debugfs files.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun void (*debugfs_init)(struct drm_minor *minor);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /**
331*4882a593Smuzhiyun * @gem_free_object_unlocked: deconstructor for drm_gem_objects
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * This is deprecated and should not be used by new drivers. Use
334*4882a593Smuzhiyun * &drm_gem_object_funcs.free instead.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /**
339*4882a593Smuzhiyun * @gem_open_object:
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * This callback is deprecated in favour of &drm_gem_object_funcs.open.
342*4882a593Smuzhiyun *
343*4882a593Smuzhiyun * Driver hook called upon gem handle creation
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /**
348*4882a593Smuzhiyun * @gem_close_object:
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * This callback is deprecated in favour of &drm_gem_object_funcs.close.
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * Driver hook called upon gem handle release
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun * @gem_create_object: constructor for gem objects
358*4882a593Smuzhiyun *
359*4882a593Smuzhiyun * Hook for allocating the GEM object struct, for use by the CMA and
360*4882a593Smuzhiyun * SHMEM GEM helpers.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
363*4882a593Smuzhiyun size_t size);
364*4882a593Smuzhiyun /**
365*4882a593Smuzhiyun * @prime_handle_to_fd:
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * Main PRIME export function. Should be implemented with
368*4882a593Smuzhiyun * drm_gem_prime_handle_to_fd() for GEM based drivers.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * For an in-depth discussion see :ref:`PRIME buffer sharing
371*4882a593Smuzhiyun * documentation <prime_buffer_sharing>`.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
374*4882a593Smuzhiyun uint32_t handle, uint32_t flags, int *prime_fd);
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * @prime_fd_to_handle:
377*4882a593Smuzhiyun *
378*4882a593Smuzhiyun * Main PRIME import function. Should be implemented with
379*4882a593Smuzhiyun * drm_gem_prime_fd_to_handle() for GEM based drivers.
380*4882a593Smuzhiyun *
381*4882a593Smuzhiyun * For an in-depth discussion see :ref:`PRIME buffer sharing
382*4882a593Smuzhiyun * documentation <prime_buffer_sharing>`.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
385*4882a593Smuzhiyun int prime_fd, uint32_t *handle);
386*4882a593Smuzhiyun /**
387*4882a593Smuzhiyun * @gem_prime_export:
388*4882a593Smuzhiyun *
389*4882a593Smuzhiyun * Export hook for GEM drivers. Deprecated in favour of
390*4882a593Smuzhiyun * &drm_gem_object_funcs.export.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
393*4882a593Smuzhiyun int flags);
394*4882a593Smuzhiyun /**
395*4882a593Smuzhiyun * @gem_prime_import:
396*4882a593Smuzhiyun *
397*4882a593Smuzhiyun * Import hook for GEM drivers.
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * This defaults to drm_gem_prime_import() if not set.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
402*4882a593Smuzhiyun struct dma_buf *dma_buf);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun * @gem_prime_pin:
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * Deprecated hook in favour of &drm_gem_object_funcs.pin.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun int (*gem_prime_pin)(struct drm_gem_object *obj);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * @gem_prime_unpin:
413*4882a593Smuzhiyun *
414*4882a593Smuzhiyun * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
415*4882a593Smuzhiyun */
416*4882a593Smuzhiyun void (*gem_prime_unpin)(struct drm_gem_object *obj);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * @gem_prime_get_sg_table:
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /**
427*4882a593Smuzhiyun * @gem_prime_import_sg_table:
428*4882a593Smuzhiyun *
429*4882a593Smuzhiyun * Optional hook used by the PRIME helper functions
430*4882a593Smuzhiyun * drm_gem_prime_import() respectively drm_gem_prime_import_dev().
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun struct drm_gem_object *(*gem_prime_import_sg_table)(
433*4882a593Smuzhiyun struct drm_device *dev,
434*4882a593Smuzhiyun struct dma_buf_attachment *attach,
435*4882a593Smuzhiyun struct sg_table *sgt);
436*4882a593Smuzhiyun /**
437*4882a593Smuzhiyun * @gem_prime_vmap:
438*4882a593Smuzhiyun *
439*4882a593Smuzhiyun * Deprecated vmap hook for GEM drivers. Please use
440*4882a593Smuzhiyun * &drm_gem_object_funcs.vmap instead.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun void *(*gem_prime_vmap)(struct drm_gem_object *obj);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /**
445*4882a593Smuzhiyun * @gem_prime_vunmap:
446*4882a593Smuzhiyun *
447*4882a593Smuzhiyun * Deprecated vunmap hook for GEM drivers. Please use
448*4882a593Smuzhiyun * &drm_gem_object_funcs.vunmap instead.
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun * @gem_prime_mmap:
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * mmap hook for GEM drivers, used to implement dma-buf mmap in the
456*4882a593Smuzhiyun * PRIME helpers.
457*4882a593Smuzhiyun *
458*4882a593Smuzhiyun * FIXME: There's way too much duplication going on here, and also moved
459*4882a593Smuzhiyun * to &drm_gem_object_funcs.
460*4882a593Smuzhiyun */
461*4882a593Smuzhiyun int (*gem_prime_mmap)(struct drm_gem_object *obj,
462*4882a593Smuzhiyun struct vm_area_struct *vma);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun * @gem_prime_get_uuid
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * get_uuid hook for GEM drivers. Retrieves the virtio uuid of the
468*4882a593Smuzhiyun * given GEM buffer.
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun int (*gem_prime_get_uuid)(struct drm_gem_object *obj,
471*4882a593Smuzhiyun uuid_t *uuid);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /**
474*4882a593Smuzhiyun * @dumb_create:
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * This creates a new dumb buffer in the driver's backing storage manager (GEM,
477*4882a593Smuzhiyun * TTM or something else entirely) and returns the resulting buffer handle. This
478*4882a593Smuzhiyun * handle can then be wrapped up into a framebuffer modeset object.
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * Note that userspace is not allowed to use such objects for render
481*4882a593Smuzhiyun * acceleration - drivers must create their own private ioctls for such a use
482*4882a593Smuzhiyun * case.
483*4882a593Smuzhiyun *
484*4882a593Smuzhiyun * Width, height and depth are specified in the &drm_mode_create_dumb
485*4882a593Smuzhiyun * argument. The callback needs to fill the handle, pitch and size for
486*4882a593Smuzhiyun * the created buffer.
487*4882a593Smuzhiyun *
488*4882a593Smuzhiyun * Called by the user via ioctl.
489*4882a593Smuzhiyun *
490*4882a593Smuzhiyun * Returns:
491*4882a593Smuzhiyun *
492*4882a593Smuzhiyun * Zero on success, negative errno on failure.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun int (*dumb_create)(struct drm_file *file_priv,
495*4882a593Smuzhiyun struct drm_device *dev,
496*4882a593Smuzhiyun struct drm_mode_create_dumb *args);
497*4882a593Smuzhiyun /**
498*4882a593Smuzhiyun * @dumb_map_offset:
499*4882a593Smuzhiyun *
500*4882a593Smuzhiyun * Allocate an offset in the drm device node's address space to be able to
501*4882a593Smuzhiyun * memory map a dumb buffer.
502*4882a593Smuzhiyun *
503*4882a593Smuzhiyun * The default implementation is drm_gem_create_mmap_offset(). GEM based
504*4882a593Smuzhiyun * drivers must not overwrite this.
505*4882a593Smuzhiyun *
506*4882a593Smuzhiyun * Called by the user via ioctl.
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Returns:
509*4882a593Smuzhiyun *
510*4882a593Smuzhiyun * Zero on success, negative errno on failure.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun int (*dumb_map_offset)(struct drm_file *file_priv,
513*4882a593Smuzhiyun struct drm_device *dev, uint32_t handle,
514*4882a593Smuzhiyun uint64_t *offset);
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun * @dumb_destroy:
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * This destroys the userspace handle for the given dumb backing storage buffer.
519*4882a593Smuzhiyun * Since buffer objects must be reference counted in the kernel a buffer object
520*4882a593Smuzhiyun * won't be immediately freed if a framebuffer modeset object still uses it.
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * Called by the user via ioctl.
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
525*4882a593Smuzhiyun * must not overwrite this.
526*4882a593Smuzhiyun *
527*4882a593Smuzhiyun * Returns:
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * Zero on success, negative errno on failure.
530*4882a593Smuzhiyun */
531*4882a593Smuzhiyun int (*dumb_destroy)(struct drm_file *file_priv,
532*4882a593Smuzhiyun struct drm_device *dev,
533*4882a593Smuzhiyun uint32_t handle);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun * @gem_vm_ops: Driver private ops for this object
537*4882a593Smuzhiyun *
538*4882a593Smuzhiyun * For GEM drivers this is deprecated in favour of
539*4882a593Smuzhiyun * &drm_gem_object_funcs.vm_ops.
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun const struct vm_operations_struct *gem_vm_ops;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /** @major: driver major number */
544*4882a593Smuzhiyun int major;
545*4882a593Smuzhiyun /** @minor: driver minor number */
546*4882a593Smuzhiyun int minor;
547*4882a593Smuzhiyun /** @patchlevel: driver patch level */
548*4882a593Smuzhiyun int patchlevel;
549*4882a593Smuzhiyun /** @name: driver name */
550*4882a593Smuzhiyun char *name;
551*4882a593Smuzhiyun /** @desc: driver description */
552*4882a593Smuzhiyun char *desc;
553*4882a593Smuzhiyun /** @date: driver date */
554*4882a593Smuzhiyun char *date;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun * @driver_features:
558*4882a593Smuzhiyun * Driver features, see &enum drm_driver_feature. Drivers can disable
559*4882a593Smuzhiyun * some features on a per-instance basis using
560*4882a593Smuzhiyun * &drm_device.driver_features.
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun u32 driver_features;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /**
565*4882a593Smuzhiyun * @ioctls:
566*4882a593Smuzhiyun *
567*4882a593Smuzhiyun * Array of driver-private IOCTL description entries. See the chapter on
568*4882a593Smuzhiyun * :ref:`IOCTL support in the userland interfaces
569*4882a593Smuzhiyun * chapter<drm_driver_ioctl>` for the full details.
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun const struct drm_ioctl_desc *ioctls;
573*4882a593Smuzhiyun /** @num_ioctls: Number of entries in @ioctls. */
574*4882a593Smuzhiyun int num_ioctls;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun * @fops:
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * File operations for the DRM device node. See the discussion in
580*4882a593Smuzhiyun * :ref:`file operations<drm_driver_fops>` for in-depth coverage and
581*4882a593Smuzhiyun * some examples.
582*4882a593Smuzhiyun */
583*4882a593Smuzhiyun const struct file_operations *fops;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Everything below here is for legacy driver, never use! */
586*4882a593Smuzhiyun /* private: */
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* List of devices hanging off this driver with stealth attach. */
589*4882a593Smuzhiyun struct list_head legacy_dev_list;
590*4882a593Smuzhiyun int (*firstopen) (struct drm_device *);
591*4882a593Smuzhiyun void (*preclose) (struct drm_device *, struct drm_file *file_priv);
592*4882a593Smuzhiyun int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
593*4882a593Smuzhiyun int (*dma_quiescent) (struct drm_device *);
594*4882a593Smuzhiyun int (*context_dtor) (struct drm_device *dev, int context);
595*4882a593Smuzhiyun u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
596*4882a593Smuzhiyun int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
597*4882a593Smuzhiyun void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
598*4882a593Smuzhiyun int dev_priv_size;
599*4882a593Smuzhiyun };
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
602*4882a593Smuzhiyun size_t size, size_t offset);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /**
605*4882a593Smuzhiyun * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
606*4882a593Smuzhiyun * @parent: Parent device object
607*4882a593Smuzhiyun * @driver: DRM driver
608*4882a593Smuzhiyun * @type: the type of the struct which contains struct &drm_device
609*4882a593Smuzhiyun * @member: the name of the &drm_device within @type.
610*4882a593Smuzhiyun *
611*4882a593Smuzhiyun * This allocates and initialize a new DRM device. No device registration is done.
612*4882a593Smuzhiyun * Call drm_dev_register() to advertice the device to user space and register it
613*4882a593Smuzhiyun * with other core subsystems. This should be done last in the device
614*4882a593Smuzhiyun * initialization sequence to make sure userspace can't access an inconsistent
615*4882a593Smuzhiyun * state.
616*4882a593Smuzhiyun *
617*4882a593Smuzhiyun * The initial ref-count of the object is 1. Use drm_dev_get() and
618*4882a593Smuzhiyun * drm_dev_put() to take and drop further ref-counts.
619*4882a593Smuzhiyun *
620*4882a593Smuzhiyun * It is recommended that drivers embed &struct drm_device into their own device
621*4882a593Smuzhiyun * structure.
622*4882a593Smuzhiyun *
623*4882a593Smuzhiyun * Note that this manages the lifetime of the resulting &drm_device
624*4882a593Smuzhiyun * automatically using devres. The DRM device initialized with this function is
625*4882a593Smuzhiyun * automatically put on driver detach using drm_dev_put().
626*4882a593Smuzhiyun *
627*4882a593Smuzhiyun * RETURNS:
628*4882a593Smuzhiyun * Pointer to new DRM device, or ERR_PTR on failure.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun #define devm_drm_dev_alloc(parent, driver, type, member) \
631*4882a593Smuzhiyun ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
632*4882a593Smuzhiyun offsetof(type, member)))
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun struct drm_device *drm_dev_alloc(struct drm_driver *driver,
635*4882a593Smuzhiyun struct device *parent);
636*4882a593Smuzhiyun int drm_dev_register(struct drm_device *dev, unsigned long flags);
637*4882a593Smuzhiyun void drm_dev_unregister(struct drm_device *dev);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun void drm_dev_get(struct drm_device *dev);
640*4882a593Smuzhiyun void drm_dev_put(struct drm_device *dev);
641*4882a593Smuzhiyun void drm_put_dev(struct drm_device *dev);
642*4882a593Smuzhiyun bool drm_dev_enter(struct drm_device *dev, int *idx);
643*4882a593Smuzhiyun void drm_dev_exit(int idx);
644*4882a593Smuzhiyun void drm_dev_unplug(struct drm_device *dev);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /**
647*4882a593Smuzhiyun * drm_dev_is_unplugged - is a DRM device unplugged
648*4882a593Smuzhiyun * @dev: DRM device
649*4882a593Smuzhiyun *
650*4882a593Smuzhiyun * This function can be called to check whether a hotpluggable is unplugged.
651*4882a593Smuzhiyun * Unplugging itself is singalled through drm_dev_unplug(). If a device is
652*4882a593Smuzhiyun * unplugged, these two functions guarantee that any store before calling
653*4882a593Smuzhiyun * drm_dev_unplug() is visible to callers of this function after it completes
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * WARNING: This function fundamentally races against drm_dev_unplug(). It is
656*4882a593Smuzhiyun * recommended that drivers instead use the underlying drm_dev_enter() and
657*4882a593Smuzhiyun * drm_dev_exit() function pairs.
658*4882a593Smuzhiyun */
drm_dev_is_unplugged(struct drm_device * dev)659*4882a593Smuzhiyun static inline bool drm_dev_is_unplugged(struct drm_device *dev)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun int idx;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (drm_dev_enter(dev, &idx)) {
664*4882a593Smuzhiyun drm_dev_exit(idx);
665*4882a593Smuzhiyun return false;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun return true;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /**
672*4882a593Smuzhiyun * drm_core_check_all_features - check driver feature flags mask
673*4882a593Smuzhiyun * @dev: DRM device to check
674*4882a593Smuzhiyun * @features: feature flag(s) mask
675*4882a593Smuzhiyun *
676*4882a593Smuzhiyun * This checks @dev for driver features, see &drm_driver.driver_features,
677*4882a593Smuzhiyun * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * Returns true if all features in the @features mask are supported, false
680*4882a593Smuzhiyun * otherwise.
681*4882a593Smuzhiyun */
drm_core_check_all_features(const struct drm_device * dev,u32 features)682*4882a593Smuzhiyun static inline bool drm_core_check_all_features(const struct drm_device *dev,
683*4882a593Smuzhiyun u32 features)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun u32 supported = dev->driver->driver_features & dev->driver_features;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun return features && (supported & features) == features;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /**
691*4882a593Smuzhiyun * drm_core_check_feature - check driver feature flags
692*4882a593Smuzhiyun * @dev: DRM device to check
693*4882a593Smuzhiyun * @feature: feature flag
694*4882a593Smuzhiyun *
695*4882a593Smuzhiyun * This checks @dev for driver features, see &drm_driver.driver_features,
696*4882a593Smuzhiyun * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
697*4882a593Smuzhiyun *
698*4882a593Smuzhiyun * Returns true if the @feature is supported, false otherwise.
699*4882a593Smuzhiyun */
drm_core_check_feature(const struct drm_device * dev,enum drm_driver_feature feature)700*4882a593Smuzhiyun static inline bool drm_core_check_feature(const struct drm_device *dev,
701*4882a593Smuzhiyun enum drm_driver_feature feature)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun return drm_core_check_all_features(dev, feature);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun * drm_drv_uses_atomic_modeset - check if the driver implements
708*4882a593Smuzhiyun * atomic_commit()
709*4882a593Smuzhiyun * @dev: DRM device
710*4882a593Smuzhiyun *
711*4882a593Smuzhiyun * This check is useful if drivers do not have DRIVER_ATOMIC set but
712*4882a593Smuzhiyun * have atomic modesetting internally implemented.
713*4882a593Smuzhiyun */
drm_drv_uses_atomic_modeset(struct drm_device * dev)714*4882a593Smuzhiyun static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
717*4882a593Smuzhiyun (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun int drm_dev_set_unique(struct drm_device *dev, const char *name);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun #endif
725