xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/drm_drv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5*4882a593Smuzhiyun  * All Rights Reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
10*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
11*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
12*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
14*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
17*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
18*4882a593Smuzhiyun  * Software.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23*4882a593Smuzhiyun  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/debugfs.h>
30*4882a593Smuzhiyun #include <linux/fs.h>
31*4882a593Smuzhiyun #include <linux/module.h>
32*4882a593Smuzhiyun #include <linux/moduleparam.h>
33*4882a593Smuzhiyun #include <linux/mount.h>
34*4882a593Smuzhiyun #include <linux/pseudo_fs.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun #include <linux/srcu.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <drm/drm_client.h>
39*4882a593Smuzhiyun #include <drm/drm_color_mgmt.h>
40*4882a593Smuzhiyun #include <drm/drm_drv.h>
41*4882a593Smuzhiyun #include <drm/drm_file.h>
42*4882a593Smuzhiyun #include <drm/drm_managed.h>
43*4882a593Smuzhiyun #include <drm/drm_mode_object.h>
44*4882a593Smuzhiyun #include <drm/drm_print.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include "drm_crtc_internal.h"
47*4882a593Smuzhiyun #include "drm_internal.h"
48*4882a593Smuzhiyun #include "drm_legacy.h"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
51*4882a593Smuzhiyun MODULE_DESCRIPTION("DRM shared core routines");
52*4882a593Smuzhiyun MODULE_LICENSE("GPL and additional rights");
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static DEFINE_SPINLOCK(drm_minor_lock);
55*4882a593Smuzhiyun static struct idr drm_minors_idr;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * If the drm core fails to init for whatever reason,
59*4882a593Smuzhiyun  * we should prevent any drivers from registering with it.
60*4882a593Smuzhiyun  * It's best to check this at drm_dev_init(), as some drivers
61*4882a593Smuzhiyun  * prefer to embed struct drm_device into their own device
62*4882a593Smuzhiyun  * structure and call drm_dev_init() themselves.
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun static bool drm_core_init_complete = false;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun static struct dentry *drm_debugfs_root;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun DEFINE_STATIC_SRCU(drm_unplug_srcu);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * DRM Minors
72*4882a593Smuzhiyun  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
73*4882a593Smuzhiyun  * of them is represented by a drm_minor object. Depending on the capabilities
74*4882a593Smuzhiyun  * of the device-driver, different interfaces are registered.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * Minors can be accessed via dev->$minor_name. This pointer is either
77*4882a593Smuzhiyun  * NULL or a valid drm_minor pointer and stays valid as long as the device is
78*4882a593Smuzhiyun  * valid. This means, DRM minors have the same life-time as the underlying
79*4882a593Smuzhiyun  * device. However, this doesn't mean that the minor is active. Minors are
80*4882a593Smuzhiyun  * registered and unregistered dynamically according to device-state.
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun 
drm_minor_get_slot(struct drm_device * dev,unsigned int type)83*4882a593Smuzhiyun static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
84*4882a593Smuzhiyun 					     unsigned int type)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	switch (type) {
87*4882a593Smuzhiyun 	case DRM_MINOR_PRIMARY:
88*4882a593Smuzhiyun 		return &dev->primary;
89*4882a593Smuzhiyun 	case DRM_MINOR_RENDER:
90*4882a593Smuzhiyun 		return &dev->render;
91*4882a593Smuzhiyun 	default:
92*4882a593Smuzhiyun 		BUG();
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
drm_minor_alloc_release(struct drm_device * dev,void * data)96*4882a593Smuzhiyun static void drm_minor_alloc_release(struct drm_device *dev, void *data)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct drm_minor *minor = data;
99*4882a593Smuzhiyun 	unsigned long flags;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	WARN_ON(dev != minor->dev);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	put_device(minor->kdev);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	spin_lock_irqsave(&drm_minor_lock, flags);
106*4882a593Smuzhiyun 	idr_remove(&drm_minors_idr, minor->index);
107*4882a593Smuzhiyun 	spin_unlock_irqrestore(&drm_minor_lock, flags);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
drm_minor_alloc(struct drm_device * dev,unsigned int type)110*4882a593Smuzhiyun static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct drm_minor *minor;
113*4882a593Smuzhiyun 	unsigned long flags;
114*4882a593Smuzhiyun 	int r;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
117*4882a593Smuzhiyun 	if (!minor)
118*4882a593Smuzhiyun 		return -ENOMEM;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	minor->type = type;
121*4882a593Smuzhiyun 	minor->dev = dev;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	idr_preload(GFP_KERNEL);
124*4882a593Smuzhiyun 	spin_lock_irqsave(&drm_minor_lock, flags);
125*4882a593Smuzhiyun 	r = idr_alloc(&drm_minors_idr,
126*4882a593Smuzhiyun 		      NULL,
127*4882a593Smuzhiyun 		      64 * type,
128*4882a593Smuzhiyun 		      64 * (type + 1),
129*4882a593Smuzhiyun 		      GFP_NOWAIT);
130*4882a593Smuzhiyun 	spin_unlock_irqrestore(&drm_minor_lock, flags);
131*4882a593Smuzhiyun 	idr_preload_end();
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (r < 0)
134*4882a593Smuzhiyun 		return r;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	minor->index = r;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
139*4882a593Smuzhiyun 	if (r)
140*4882a593Smuzhiyun 		return r;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	minor->kdev = drm_sysfs_minor_alloc(minor);
143*4882a593Smuzhiyun 	if (IS_ERR(minor->kdev))
144*4882a593Smuzhiyun 		return PTR_ERR(minor->kdev);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	*drm_minor_get_slot(dev, type) = minor;
147*4882a593Smuzhiyun 	return 0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
drm_minor_register(struct drm_device * dev,unsigned int type)150*4882a593Smuzhiyun static int drm_minor_register(struct drm_device *dev, unsigned int type)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct drm_minor *minor;
153*4882a593Smuzhiyun 	unsigned long flags;
154*4882a593Smuzhiyun 	int ret;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	DRM_DEBUG("\n");
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	minor = *drm_minor_get_slot(dev, type);
159*4882a593Smuzhiyun 	if (!minor)
160*4882a593Smuzhiyun 		return 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
163*4882a593Smuzhiyun 	if (ret) {
164*4882a593Smuzhiyun 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
165*4882a593Smuzhiyun 		goto err_debugfs;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	ret = device_add(minor->kdev);
169*4882a593Smuzhiyun 	if (ret)
170*4882a593Smuzhiyun 		goto err_debugfs;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* replace NULL with @minor so lookups will succeed from now on */
173*4882a593Smuzhiyun 	spin_lock_irqsave(&drm_minor_lock, flags);
174*4882a593Smuzhiyun 	idr_replace(&drm_minors_idr, minor, minor->index);
175*4882a593Smuzhiyun 	spin_unlock_irqrestore(&drm_minor_lock, flags);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	DRM_DEBUG("new minor registered %d\n", minor->index);
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun err_debugfs:
181*4882a593Smuzhiyun 	drm_debugfs_cleanup(minor);
182*4882a593Smuzhiyun 	return ret;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
drm_minor_unregister(struct drm_device * dev,unsigned int type)185*4882a593Smuzhiyun static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct drm_minor *minor;
188*4882a593Smuzhiyun 	unsigned long flags;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	minor = *drm_minor_get_slot(dev, type);
191*4882a593Smuzhiyun 	if (!minor || !device_is_registered(minor->kdev))
192*4882a593Smuzhiyun 		return;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* replace @minor with NULL so lookups will fail from now on */
195*4882a593Smuzhiyun 	spin_lock_irqsave(&drm_minor_lock, flags);
196*4882a593Smuzhiyun 	idr_replace(&drm_minors_idr, NULL, minor->index);
197*4882a593Smuzhiyun 	spin_unlock_irqrestore(&drm_minor_lock, flags);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	device_del(minor->kdev);
200*4882a593Smuzhiyun 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
201*4882a593Smuzhiyun 	drm_debugfs_cleanup(minor);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun  * Looks up the given minor-ID and returns the respective DRM-minor object. The
206*4882a593Smuzhiyun  * refence-count of the underlying device is increased so you must release this
207*4882a593Smuzhiyun  * object with drm_minor_release().
208*4882a593Smuzhiyun  *
209*4882a593Smuzhiyun  * As long as you hold this minor, it is guaranteed that the object and the
210*4882a593Smuzhiyun  * minor->dev pointer will stay valid! However, the device may get unplugged and
211*4882a593Smuzhiyun  * unregistered while you hold the minor.
212*4882a593Smuzhiyun  */
drm_minor_acquire(unsigned int minor_id)213*4882a593Smuzhiyun struct drm_minor *drm_minor_acquire(unsigned int minor_id)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct drm_minor *minor;
216*4882a593Smuzhiyun 	unsigned long flags;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	spin_lock_irqsave(&drm_minor_lock, flags);
219*4882a593Smuzhiyun 	minor = idr_find(&drm_minors_idr, minor_id);
220*4882a593Smuzhiyun 	if (minor)
221*4882a593Smuzhiyun 		drm_dev_get(minor->dev);
222*4882a593Smuzhiyun 	spin_unlock_irqrestore(&drm_minor_lock, flags);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (!minor) {
225*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
226*4882a593Smuzhiyun 	} else if (drm_dev_is_unplugged(minor->dev)) {
227*4882a593Smuzhiyun 		drm_dev_put(minor->dev);
228*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return minor;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
drm_minor_release(struct drm_minor * minor)234*4882a593Smuzhiyun void drm_minor_release(struct drm_minor *minor)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	drm_dev_put(minor->dev);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun  * DOC: driver instance overview
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * A device instance for a drm driver is represented by &struct drm_device. This
243*4882a593Smuzhiyun  * is allocated and initialized with devm_drm_dev_alloc(), usually from
244*4882a593Smuzhiyun  * bus-specific ->probe() callbacks implemented by the driver. The driver then
245*4882a593Smuzhiyun  * needs to initialize all the various subsystems for the drm device like memory
246*4882a593Smuzhiyun  * management, vblank handling, modesetting support and initial output
247*4882a593Smuzhiyun  * configuration plus obviously initialize all the corresponding hardware bits.
248*4882a593Smuzhiyun  * Finally when everything is up and running and ready for userspace the device
249*4882a593Smuzhiyun  * instance can be published using drm_dev_register().
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * There is also deprecated support for initalizing device instances using
252*4882a593Smuzhiyun  * bus-specific helpers and the &drm_driver.load callback. But due to
253*4882a593Smuzhiyun  * backwards-compatibility needs the device instance have to be published too
254*4882a593Smuzhiyun  * early, which requires unpretty global locking to make safe and is therefore
255*4882a593Smuzhiyun  * only support for existing drivers not yet converted to the new scheme.
256*4882a593Smuzhiyun  *
257*4882a593Smuzhiyun  * When cleaning up a device instance everything needs to be done in reverse:
258*4882a593Smuzhiyun  * First unpublish the device instance with drm_dev_unregister(). Then clean up
259*4882a593Smuzhiyun  * any other resources allocated at device initialization and drop the driver's
260*4882a593Smuzhiyun  * reference to &drm_device using drm_dev_put().
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  * Note that any allocation or resource which is visible to userspace must be
263*4882a593Smuzhiyun  * released only when the final drm_dev_put() is called, and not when the
264*4882a593Smuzhiyun  * driver is unbound from the underlying physical struct &device. Best to use
265*4882a593Smuzhiyun  * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
266*4882a593Smuzhiyun  * related functions.
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * devres managed resources like devm_kmalloc() can only be used for resources
269*4882a593Smuzhiyun  * directly related to the underlying hardware device, and only used in code
270*4882a593Smuzhiyun  * paths fully protected by drm_dev_enter() and drm_dev_exit().
271*4882a593Smuzhiyun  *
272*4882a593Smuzhiyun  * Display driver example
273*4882a593Smuzhiyun  * ~~~~~~~~~~~~~~~~~~~~~~
274*4882a593Smuzhiyun  *
275*4882a593Smuzhiyun  * The following example shows a typical structure of a DRM display driver.
276*4882a593Smuzhiyun  * The example focus on the probe() function and the other functions that is
277*4882a593Smuzhiyun  * almost always present and serves as a demonstration of devm_drm_dev_alloc().
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * .. code-block:: c
280*4882a593Smuzhiyun  *
281*4882a593Smuzhiyun  *	struct driver_device {
282*4882a593Smuzhiyun  *		struct drm_device drm;
283*4882a593Smuzhiyun  *		void *userspace_facing;
284*4882a593Smuzhiyun  *		struct clk *pclk;
285*4882a593Smuzhiyun  *	};
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  *	static struct drm_driver driver_drm_driver = {
288*4882a593Smuzhiyun  *		[...]
289*4882a593Smuzhiyun  *	};
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  *	static int driver_probe(struct platform_device *pdev)
292*4882a593Smuzhiyun  *	{
293*4882a593Smuzhiyun  *		struct driver_device *priv;
294*4882a593Smuzhiyun  *		struct drm_device *drm;
295*4882a593Smuzhiyun  *		int ret;
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  *		priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
298*4882a593Smuzhiyun  *					  struct driver_device, drm);
299*4882a593Smuzhiyun  *		if (IS_ERR(priv))
300*4882a593Smuzhiyun  *			return PTR_ERR(priv);
301*4882a593Smuzhiyun  *		drm = &priv->drm;
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  *		ret = drmm_mode_config_init(drm);
304*4882a593Smuzhiyun  *		if (ret)
305*4882a593Smuzhiyun  *			return ret;
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  *		priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
308*4882a593Smuzhiyun  *		if (!priv->userspace_facing)
309*4882a593Smuzhiyun  *			return -ENOMEM;
310*4882a593Smuzhiyun  *
311*4882a593Smuzhiyun  *		priv->pclk = devm_clk_get(dev, "PCLK");
312*4882a593Smuzhiyun  *		if (IS_ERR(priv->pclk))
313*4882a593Smuzhiyun  *			return PTR_ERR(priv->pclk);
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  *		// Further setup, display pipeline etc
316*4882a593Smuzhiyun  *
317*4882a593Smuzhiyun  *		platform_set_drvdata(pdev, drm);
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  *		drm_mode_config_reset(drm);
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  *		ret = drm_dev_register(drm);
322*4882a593Smuzhiyun  *		if (ret)
323*4882a593Smuzhiyun  *			return ret;
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  *		drm_fbdev_generic_setup(drm, 32);
326*4882a593Smuzhiyun  *
327*4882a593Smuzhiyun  *		return 0;
328*4882a593Smuzhiyun  *	}
329*4882a593Smuzhiyun  *
330*4882a593Smuzhiyun  *	// This function is called before the devm_ resources are released
331*4882a593Smuzhiyun  *	static int driver_remove(struct platform_device *pdev)
332*4882a593Smuzhiyun  *	{
333*4882a593Smuzhiyun  *		struct drm_device *drm = platform_get_drvdata(pdev);
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  *		drm_dev_unregister(drm);
336*4882a593Smuzhiyun  *		drm_atomic_helper_shutdown(drm)
337*4882a593Smuzhiyun  *
338*4882a593Smuzhiyun  *		return 0;
339*4882a593Smuzhiyun  *	}
340*4882a593Smuzhiyun  *
341*4882a593Smuzhiyun  *	// This function is called on kernel restart and shutdown
342*4882a593Smuzhiyun  *	static void driver_shutdown(struct platform_device *pdev)
343*4882a593Smuzhiyun  *	{
344*4882a593Smuzhiyun  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
345*4882a593Smuzhiyun  *	}
346*4882a593Smuzhiyun  *
347*4882a593Smuzhiyun  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
348*4882a593Smuzhiyun  *	{
349*4882a593Smuzhiyun  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
350*4882a593Smuzhiyun  *	}
351*4882a593Smuzhiyun  *
352*4882a593Smuzhiyun  *	static int __maybe_unused driver_pm_resume(struct device *dev)
353*4882a593Smuzhiyun  *	{
354*4882a593Smuzhiyun  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
355*4882a593Smuzhiyun  *
356*4882a593Smuzhiyun  *		return 0;
357*4882a593Smuzhiyun  *	}
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  *	static const struct dev_pm_ops driver_pm_ops = {
360*4882a593Smuzhiyun  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
361*4882a593Smuzhiyun  *	};
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  *	static struct platform_driver driver_driver = {
364*4882a593Smuzhiyun  *		.driver = {
365*4882a593Smuzhiyun  *			[...]
366*4882a593Smuzhiyun  *			.pm = &driver_pm_ops,
367*4882a593Smuzhiyun  *		},
368*4882a593Smuzhiyun  *		.probe = driver_probe,
369*4882a593Smuzhiyun  *		.remove = driver_remove,
370*4882a593Smuzhiyun  *		.shutdown = driver_shutdown,
371*4882a593Smuzhiyun  *	};
372*4882a593Smuzhiyun  *	module_platform_driver(driver_driver);
373*4882a593Smuzhiyun  *
374*4882a593Smuzhiyun  * Drivers that want to support device unplugging (USB, DT overlay unload) should
375*4882a593Smuzhiyun  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
376*4882a593Smuzhiyun  * regions that is accessing device resources to prevent use after they're
377*4882a593Smuzhiyun  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
378*4882a593Smuzhiyun  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
379*4882a593Smuzhiyun  * drm_atomic_helper_shutdown() is called. This means that if the disable code
380*4882a593Smuzhiyun  * paths are protected, they will not run on regular driver module unload,
381*4882a593Smuzhiyun  * possibily leaving the hardware enabled.
382*4882a593Smuzhiyun  */
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun  * drm_put_dev - Unregister and release a DRM device
386*4882a593Smuzhiyun  * @dev: DRM device
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  * Called at module unload time or when a PCI device is unplugged.
389*4882a593Smuzhiyun  *
390*4882a593Smuzhiyun  * Cleans up all DRM device, calling drm_lastclose().
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * Note: Use of this function is deprecated. It will eventually go away
393*4882a593Smuzhiyun  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
394*4882a593Smuzhiyun  * instead to make sure that the device isn't userspace accessible any more
395*4882a593Smuzhiyun  * while teardown is in progress, ensuring that userspace can't access an
396*4882a593Smuzhiyun  * inconsistent state.
397*4882a593Smuzhiyun  */
drm_put_dev(struct drm_device * dev)398*4882a593Smuzhiyun void drm_put_dev(struct drm_device *dev)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	DRM_DEBUG("\n");
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (!dev) {
403*4882a593Smuzhiyun 		DRM_ERROR("cleanup called no dev\n");
404*4882a593Smuzhiyun 		return;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	drm_dev_unregister(dev);
408*4882a593Smuzhiyun 	drm_dev_put(dev);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun EXPORT_SYMBOL(drm_put_dev);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /**
413*4882a593Smuzhiyun  * drm_dev_enter - Enter device critical section
414*4882a593Smuzhiyun  * @dev: DRM device
415*4882a593Smuzhiyun  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * This function marks and protects the beginning of a section that should not
418*4882a593Smuzhiyun  * be entered after the device has been unplugged. The section end is marked
419*4882a593Smuzhiyun  * with drm_dev_exit(). Calls to this function can be nested.
420*4882a593Smuzhiyun  *
421*4882a593Smuzhiyun  * Returns:
422*4882a593Smuzhiyun  * True if it is OK to enter the section, false otherwise.
423*4882a593Smuzhiyun  */
drm_dev_enter(struct drm_device * dev,int * idx)424*4882a593Smuzhiyun bool drm_dev_enter(struct drm_device *dev, int *idx)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	*idx = srcu_read_lock(&drm_unplug_srcu);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (dev->unplugged) {
429*4882a593Smuzhiyun 		srcu_read_unlock(&drm_unplug_srcu, *idx);
430*4882a593Smuzhiyun 		return false;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	return true;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_enter);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  * drm_dev_exit - Exit device critical section
439*4882a593Smuzhiyun  * @idx: index returned from drm_dev_enter()
440*4882a593Smuzhiyun  *
441*4882a593Smuzhiyun  * This function marks the end of a section that should not be entered after
442*4882a593Smuzhiyun  * the device has been unplugged.
443*4882a593Smuzhiyun  */
drm_dev_exit(int idx)444*4882a593Smuzhiyun void drm_dev_exit(int idx)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	srcu_read_unlock(&drm_unplug_srcu, idx);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_exit);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /**
451*4882a593Smuzhiyun  * drm_dev_unplug - unplug a DRM device
452*4882a593Smuzhiyun  * @dev: DRM device
453*4882a593Smuzhiyun  *
454*4882a593Smuzhiyun  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
455*4882a593Smuzhiyun  * userspace operations. Entry-points can use drm_dev_enter() and
456*4882a593Smuzhiyun  * drm_dev_exit() to protect device resources in a race free manner. This
457*4882a593Smuzhiyun  * essentially unregisters the device like drm_dev_unregister(), but can be
458*4882a593Smuzhiyun  * called while there are still open users of @dev.
459*4882a593Smuzhiyun  */
drm_dev_unplug(struct drm_device * dev)460*4882a593Smuzhiyun void drm_dev_unplug(struct drm_device *dev)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * After synchronizing any critical read section is guaranteed to see
464*4882a593Smuzhiyun 	 * the new value of ->unplugged, and any critical section which might
465*4882a593Smuzhiyun 	 * still have seen the old value of ->unplugged is guaranteed to have
466*4882a593Smuzhiyun 	 * finished.
467*4882a593Smuzhiyun 	 */
468*4882a593Smuzhiyun 	dev->unplugged = true;
469*4882a593Smuzhiyun 	synchronize_srcu(&drm_unplug_srcu);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	drm_dev_unregister(dev);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_unplug);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun  * DRM internal mount
477*4882a593Smuzhiyun  * We want to be able to allocate our own "struct address_space" to control
478*4882a593Smuzhiyun  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
479*4882a593Smuzhiyun  * stand-alone address_space objects, so we need an underlying inode. As there
480*4882a593Smuzhiyun  * is no way to allocate an independent inode easily, we need a fake internal
481*4882a593Smuzhiyun  * VFS mount-point.
482*4882a593Smuzhiyun  *
483*4882a593Smuzhiyun  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
484*4882a593Smuzhiyun  * frees it again. You are allowed to use iget() and iput() to get references to
485*4882a593Smuzhiyun  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
486*4882a593Smuzhiyun  * drm_fs_inode_free() call (which does not have to be the last iput()).
487*4882a593Smuzhiyun  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
488*4882a593Smuzhiyun  * between multiple inode-users. You could, technically, call
489*4882a593Smuzhiyun  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
490*4882a593Smuzhiyun  * iput(), but this way you'd end up with a new vfsmount for each inode.
491*4882a593Smuzhiyun  */
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun static int drm_fs_cnt;
494*4882a593Smuzhiyun static struct vfsmount *drm_fs_mnt;
495*4882a593Smuzhiyun 
drm_fs_init_fs_context(struct fs_context * fc)496*4882a593Smuzhiyun static int drm_fs_init_fs_context(struct fs_context *fc)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun static struct file_system_type drm_fs_type = {
502*4882a593Smuzhiyun 	.name		= "drm",
503*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
504*4882a593Smuzhiyun 	.init_fs_context = drm_fs_init_fs_context,
505*4882a593Smuzhiyun 	.kill_sb	= kill_anon_super,
506*4882a593Smuzhiyun };
507*4882a593Smuzhiyun 
drm_fs_inode_new(void)508*4882a593Smuzhiyun static struct inode *drm_fs_inode_new(void)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct inode *inode;
511*4882a593Smuzhiyun 	int r;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
514*4882a593Smuzhiyun 	if (r < 0) {
515*4882a593Smuzhiyun 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
516*4882a593Smuzhiyun 		return ERR_PTR(r);
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
520*4882a593Smuzhiyun 	if (IS_ERR(inode))
521*4882a593Smuzhiyun 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	return inode;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
drm_fs_inode_free(struct inode * inode)526*4882a593Smuzhiyun static void drm_fs_inode_free(struct inode *inode)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	if (inode) {
529*4882a593Smuzhiyun 		iput(inode);
530*4882a593Smuzhiyun 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun /**
535*4882a593Smuzhiyun  * DOC: component helper usage recommendations
536*4882a593Smuzhiyun  *
537*4882a593Smuzhiyun  * DRM drivers that drive hardware where a logical device consists of a pile of
538*4882a593Smuzhiyun  * independent hardware blocks are recommended to use the :ref:`component helper
539*4882a593Smuzhiyun  * library<component>`. For consistency and better options for code reuse the
540*4882a593Smuzhiyun  * following guidelines apply:
541*4882a593Smuzhiyun  *
542*4882a593Smuzhiyun  *  - The entire device initialization procedure should be run from the
543*4882a593Smuzhiyun  *    &component_master_ops.master_bind callback, starting with
544*4882a593Smuzhiyun  *    devm_drm_dev_alloc(), then binding all components with
545*4882a593Smuzhiyun  *    component_bind_all() and finishing with drm_dev_register().
546*4882a593Smuzhiyun  *
547*4882a593Smuzhiyun  *  - The opaque pointer passed to all components through component_bind_all()
548*4882a593Smuzhiyun  *    should point at &struct drm_device of the device instance, not some driver
549*4882a593Smuzhiyun  *    specific private structure.
550*4882a593Smuzhiyun  *
551*4882a593Smuzhiyun  *  - The component helper fills the niche where further standardization of
552*4882a593Smuzhiyun  *    interfaces is not practical. When there already is, or will be, a
553*4882a593Smuzhiyun  *    standardized interface like &drm_bridge or &drm_panel, providing its own
554*4882a593Smuzhiyun  *    functions to find such components at driver load time, like
555*4882a593Smuzhiyun  *    drm_of_find_panel_or_bridge(), then the component helper should not be
556*4882a593Smuzhiyun  *    used.
557*4882a593Smuzhiyun  */
558*4882a593Smuzhiyun 
drm_dev_init_release(struct drm_device * dev,void * res)559*4882a593Smuzhiyun static void drm_dev_init_release(struct drm_device *dev, void *res)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	drm_legacy_ctxbitmap_cleanup(dev);
562*4882a593Smuzhiyun 	drm_legacy_remove_map_hash(dev);
563*4882a593Smuzhiyun 	drm_fs_inode_free(dev->anon_inode);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	put_device(dev->dev);
566*4882a593Smuzhiyun 	/* Prevent use-after-free in drm_managed_release when debugging is
567*4882a593Smuzhiyun 	 * enabled. Slightly awkward, but can't really be helped. */
568*4882a593Smuzhiyun 	dev->dev = NULL;
569*4882a593Smuzhiyun 	mutex_destroy(&dev->master_mutex);
570*4882a593Smuzhiyun 	mutex_destroy(&dev->clientlist_mutex);
571*4882a593Smuzhiyun 	mutex_destroy(&dev->filelist_mutex);
572*4882a593Smuzhiyun 	mutex_destroy(&dev->struct_mutex);
573*4882a593Smuzhiyun 	drm_legacy_destroy_members(dev);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun 
drm_dev_init(struct drm_device * dev,struct drm_driver * driver,struct device * parent)576*4882a593Smuzhiyun static int drm_dev_init(struct drm_device *dev,
577*4882a593Smuzhiyun 			struct drm_driver *driver,
578*4882a593Smuzhiyun 			struct device *parent)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun 	struct inode *inode;
581*4882a593Smuzhiyun 	int ret;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	if (!drm_core_init_complete) {
584*4882a593Smuzhiyun 		DRM_ERROR("DRM core is not initialized\n");
585*4882a593Smuzhiyun 		return -ENODEV;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (WARN_ON(!parent))
589*4882a593Smuzhiyun 		return -EINVAL;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	kref_init(&dev->ref);
592*4882a593Smuzhiyun 	dev->dev = get_device(parent);
593*4882a593Smuzhiyun 	dev->driver = driver;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->managed.resources);
596*4882a593Smuzhiyun 	spin_lock_init(&dev->managed.lock);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* no per-device feature limits by default */
599*4882a593Smuzhiyun 	dev->driver_features = ~0u;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	drm_legacy_init_members(dev);
602*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->filelist);
603*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->filelist_internal);
604*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->clientlist);
605*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->vblank_event_list);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	spin_lock_init(&dev->event_lock);
608*4882a593Smuzhiyun 	mutex_init(&dev->struct_mutex);
609*4882a593Smuzhiyun 	mutex_init(&dev->filelist_mutex);
610*4882a593Smuzhiyun 	mutex_init(&dev->clientlist_mutex);
611*4882a593Smuzhiyun 	mutex_init(&dev->master_mutex);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
614*4882a593Smuzhiyun 	if (ret)
615*4882a593Smuzhiyun 		return ret;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	inode = drm_fs_inode_new();
618*4882a593Smuzhiyun 	if (IS_ERR(inode)) {
619*4882a593Smuzhiyun 		ret = PTR_ERR(inode);
620*4882a593Smuzhiyun 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
621*4882a593Smuzhiyun 		goto err;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	dev->anon_inode = inode;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
627*4882a593Smuzhiyun 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
628*4882a593Smuzhiyun 		if (ret)
629*4882a593Smuzhiyun 			goto err;
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
633*4882a593Smuzhiyun 	if (ret)
634*4882a593Smuzhiyun 		goto err;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	ret = drm_legacy_create_map_hash(dev);
637*4882a593Smuzhiyun 	if (ret)
638*4882a593Smuzhiyun 		goto err;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	drm_legacy_ctxbitmap_init(dev);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
643*4882a593Smuzhiyun 		ret = drm_gem_init(dev);
644*4882a593Smuzhiyun 		if (ret) {
645*4882a593Smuzhiyun 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
646*4882a593Smuzhiyun 			goto err;
647*4882a593Smuzhiyun 		}
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	ret = drm_dev_set_unique(dev, dev_name(parent));
651*4882a593Smuzhiyun 	if (ret)
652*4882a593Smuzhiyun 		goto err;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return 0;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun err:
657*4882a593Smuzhiyun 	drm_managed_release(dev);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	return ret;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
devm_drm_dev_init_release(void * data)662*4882a593Smuzhiyun static void devm_drm_dev_init_release(void *data)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	drm_dev_put(data);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
devm_drm_dev_init(struct device * parent,struct drm_device * dev,struct drm_driver * driver)667*4882a593Smuzhiyun static int devm_drm_dev_init(struct device *parent,
668*4882a593Smuzhiyun 			     struct drm_device *dev,
669*4882a593Smuzhiyun 			     struct drm_driver *driver)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	int ret;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	ret = drm_dev_init(dev, driver, parent);
674*4882a593Smuzhiyun 	if (ret)
675*4882a593Smuzhiyun 		return ret;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
678*4882a593Smuzhiyun 	if (ret)
679*4882a593Smuzhiyun 		devm_drm_dev_init_release(dev);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	return ret;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
__devm_drm_dev_alloc(struct device * parent,struct drm_driver * driver,size_t size,size_t offset)684*4882a593Smuzhiyun void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
685*4882a593Smuzhiyun 			   size_t size, size_t offset)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	void *container;
688*4882a593Smuzhiyun 	struct drm_device *drm;
689*4882a593Smuzhiyun 	int ret;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	container = kzalloc(size, GFP_KERNEL);
692*4882a593Smuzhiyun 	if (!container)
693*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	drm = container + offset;
696*4882a593Smuzhiyun 	ret = devm_drm_dev_init(parent, drm, driver);
697*4882a593Smuzhiyun 	if (ret) {
698*4882a593Smuzhiyun 		kfree(container);
699*4882a593Smuzhiyun 		return ERR_PTR(ret);
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 	drmm_add_final_kfree(drm, container);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return container;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun EXPORT_SYMBOL(__devm_drm_dev_alloc);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /**
708*4882a593Smuzhiyun  * drm_dev_alloc - Allocate new DRM device
709*4882a593Smuzhiyun  * @driver: DRM driver to allocate device for
710*4882a593Smuzhiyun  * @parent: Parent device object
711*4882a593Smuzhiyun  *
712*4882a593Smuzhiyun  * This is the deprecated version of devm_drm_dev_alloc(), which does not support
713*4882a593Smuzhiyun  * subclassing through embedding the struct &drm_device in a driver private
714*4882a593Smuzhiyun  * structure, and which does not support automatic cleanup through devres.
715*4882a593Smuzhiyun  *
716*4882a593Smuzhiyun  * RETURNS:
717*4882a593Smuzhiyun  * Pointer to new DRM device, or ERR_PTR on failure.
718*4882a593Smuzhiyun  */
drm_dev_alloc(struct drm_driver * driver,struct device * parent)719*4882a593Smuzhiyun struct drm_device *drm_dev_alloc(struct drm_driver *driver,
720*4882a593Smuzhiyun 				 struct device *parent)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct drm_device *dev;
723*4882a593Smuzhiyun 	int ret;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
726*4882a593Smuzhiyun 	if (!dev)
727*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	ret = drm_dev_init(dev, driver, parent);
730*4882a593Smuzhiyun 	if (ret) {
731*4882a593Smuzhiyun 		kfree(dev);
732*4882a593Smuzhiyun 		return ERR_PTR(ret);
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	drmm_add_final_kfree(dev, dev);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	return dev;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_alloc);
740*4882a593Smuzhiyun 
drm_dev_release(struct kref * ref)741*4882a593Smuzhiyun static void drm_dev_release(struct kref *ref)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	if (dev->driver->release)
746*4882a593Smuzhiyun 		dev->driver->release(dev);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	drm_managed_release(dev);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	kfree(dev->managed.final_kfree);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun /**
754*4882a593Smuzhiyun  * drm_dev_get - Take reference of a DRM device
755*4882a593Smuzhiyun  * @dev: device to take reference of or NULL
756*4882a593Smuzhiyun  *
757*4882a593Smuzhiyun  * This increases the ref-count of @dev by one. You *must* already own a
758*4882a593Smuzhiyun  * reference when calling this. Use drm_dev_put() to drop this reference
759*4882a593Smuzhiyun  * again.
760*4882a593Smuzhiyun  *
761*4882a593Smuzhiyun  * This function never fails. However, this function does not provide *any*
762*4882a593Smuzhiyun  * guarantee whether the device is alive or running. It only provides a
763*4882a593Smuzhiyun  * reference to the object and the memory associated with it.
764*4882a593Smuzhiyun  */
drm_dev_get(struct drm_device * dev)765*4882a593Smuzhiyun void drm_dev_get(struct drm_device *dev)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	if (dev)
768*4882a593Smuzhiyun 		kref_get(&dev->ref);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_get);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun /**
773*4882a593Smuzhiyun  * drm_dev_put - Drop reference of a DRM device
774*4882a593Smuzhiyun  * @dev: device to drop reference of or NULL
775*4882a593Smuzhiyun  *
776*4882a593Smuzhiyun  * This decreases the ref-count of @dev by one. The device is destroyed if the
777*4882a593Smuzhiyun  * ref-count drops to zero.
778*4882a593Smuzhiyun  */
drm_dev_put(struct drm_device * dev)779*4882a593Smuzhiyun void drm_dev_put(struct drm_device *dev)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	if (dev)
782*4882a593Smuzhiyun 		kref_put(&dev->ref, drm_dev_release);
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_put);
785*4882a593Smuzhiyun 
create_compat_control_link(struct drm_device * dev)786*4882a593Smuzhiyun static int create_compat_control_link(struct drm_device *dev)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	struct drm_minor *minor;
789*4882a593Smuzhiyun 	char *name;
790*4882a593Smuzhiyun 	int ret;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
793*4882a593Smuzhiyun 		return 0;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
796*4882a593Smuzhiyun 	if (!minor)
797*4882a593Smuzhiyun 		return 0;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	/*
800*4882a593Smuzhiyun 	 * Some existing userspace out there uses the existing of the controlD*
801*4882a593Smuzhiyun 	 * sysfs files to figure out whether it's a modeset driver. It only does
802*4882a593Smuzhiyun 	 * readdir, hence a symlink is sufficient (and the least confusing
803*4882a593Smuzhiyun 	 * option). Otherwise controlD* is entirely unused.
804*4882a593Smuzhiyun 	 *
805*4882a593Smuzhiyun 	 * Old controlD chardev have been allocated in the range
806*4882a593Smuzhiyun 	 * 64-127.
807*4882a593Smuzhiyun 	 */
808*4882a593Smuzhiyun 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
809*4882a593Smuzhiyun 	if (!name)
810*4882a593Smuzhiyun 		return -ENOMEM;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	ret = sysfs_create_link(minor->kdev->kobj.parent,
813*4882a593Smuzhiyun 				&minor->kdev->kobj,
814*4882a593Smuzhiyun 				name);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	kfree(name);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	return ret;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun 
remove_compat_control_link(struct drm_device * dev)821*4882a593Smuzhiyun static void remove_compat_control_link(struct drm_device *dev)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	struct drm_minor *minor;
824*4882a593Smuzhiyun 	char *name;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
827*4882a593Smuzhiyun 		return;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
830*4882a593Smuzhiyun 	if (!minor)
831*4882a593Smuzhiyun 		return;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
834*4882a593Smuzhiyun 	if (!name)
835*4882a593Smuzhiyun 		return;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	sysfs_remove_link(minor->kdev->kobj.parent, name);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	kfree(name);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun /**
843*4882a593Smuzhiyun  * drm_dev_register - Register DRM device
844*4882a593Smuzhiyun  * @dev: Device to register
845*4882a593Smuzhiyun  * @flags: Flags passed to the driver's .load() function
846*4882a593Smuzhiyun  *
847*4882a593Smuzhiyun  * Register the DRM device @dev with the system, advertise device to user-space
848*4882a593Smuzhiyun  * and start normal device operation. @dev must be initialized via drm_dev_init()
849*4882a593Smuzhiyun  * previously.
850*4882a593Smuzhiyun  *
851*4882a593Smuzhiyun  * Never call this twice on any device!
852*4882a593Smuzhiyun  *
853*4882a593Smuzhiyun  * NOTE: To ensure backward compatibility with existing drivers method this
854*4882a593Smuzhiyun  * function calls the &drm_driver.load method after registering the device
855*4882a593Smuzhiyun  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
856*4882a593Smuzhiyun  * therefore deprecated, drivers must perform all initialization before calling
857*4882a593Smuzhiyun  * drm_dev_register().
858*4882a593Smuzhiyun  *
859*4882a593Smuzhiyun  * RETURNS:
860*4882a593Smuzhiyun  * 0 on success, negative error code on failure.
861*4882a593Smuzhiyun  */
drm_dev_register(struct drm_device * dev,unsigned long flags)862*4882a593Smuzhiyun int drm_dev_register(struct drm_device *dev, unsigned long flags)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	struct drm_driver *driver = dev->driver;
865*4882a593Smuzhiyun 	int ret;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	if (!driver->load)
868*4882a593Smuzhiyun 		drm_mode_config_validate(dev);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	WARN_ON(!dev->managed.final_kfree);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	if (drm_dev_needs_global_mutex(dev))
873*4882a593Smuzhiyun 		mutex_lock(&drm_global_mutex);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
876*4882a593Smuzhiyun 	if (ret)
877*4882a593Smuzhiyun 		goto err_minors;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
880*4882a593Smuzhiyun 	if (ret)
881*4882a593Smuzhiyun 		goto err_minors;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	ret = create_compat_control_link(dev);
884*4882a593Smuzhiyun 	if (ret)
885*4882a593Smuzhiyun 		goto err_minors;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	dev->registered = true;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (dev->driver->load) {
890*4882a593Smuzhiyun 		ret = dev->driver->load(dev, flags);
891*4882a593Smuzhiyun 		if (ret)
892*4882a593Smuzhiyun 			goto err_minors;
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	if (drm_core_check_feature(dev, DRIVER_MODESET))
896*4882a593Smuzhiyun 		drm_modeset_register_all(dev);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	ret = 0;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
901*4882a593Smuzhiyun 		 driver->name, driver->major, driver->minor,
902*4882a593Smuzhiyun 		 driver->patchlevel, driver->date,
903*4882a593Smuzhiyun 		 dev->dev ? dev_name(dev->dev) : "virtual device",
904*4882a593Smuzhiyun 		 dev->primary->index);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	goto out_unlock;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun err_minors:
909*4882a593Smuzhiyun 	remove_compat_control_link(dev);
910*4882a593Smuzhiyun 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
911*4882a593Smuzhiyun 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
912*4882a593Smuzhiyun out_unlock:
913*4882a593Smuzhiyun 	if (drm_dev_needs_global_mutex(dev))
914*4882a593Smuzhiyun 		mutex_unlock(&drm_global_mutex);
915*4882a593Smuzhiyun 	return ret;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_register);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun /**
920*4882a593Smuzhiyun  * drm_dev_unregister - Unregister DRM device
921*4882a593Smuzhiyun  * @dev: Device to unregister
922*4882a593Smuzhiyun  *
923*4882a593Smuzhiyun  * Unregister the DRM device from the system. This does the reverse of
924*4882a593Smuzhiyun  * drm_dev_register() but does not deallocate the device. The caller must call
925*4882a593Smuzhiyun  * drm_dev_put() to drop their final reference.
926*4882a593Smuzhiyun  *
927*4882a593Smuzhiyun  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
928*4882a593Smuzhiyun  * which can be called while there are still open users of @dev.
929*4882a593Smuzhiyun  *
930*4882a593Smuzhiyun  * This should be called first in the device teardown code to make sure
931*4882a593Smuzhiyun  * userspace can't access the device instance any more.
932*4882a593Smuzhiyun  */
drm_dev_unregister(struct drm_device * dev)933*4882a593Smuzhiyun void drm_dev_unregister(struct drm_device *dev)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
936*4882a593Smuzhiyun 		drm_lastclose(dev);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	dev->registered = false;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	drm_client_dev_unregister(dev);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (drm_core_check_feature(dev, DRIVER_MODESET))
943*4882a593Smuzhiyun 		drm_modeset_unregister_all(dev);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	if (dev->driver->unload)
946*4882a593Smuzhiyun 		dev->driver->unload(dev);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	if (dev->agp)
949*4882a593Smuzhiyun 		drm_pci_agp_destroy(dev);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	drm_legacy_rmmaps(dev);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	remove_compat_control_link(dev);
954*4882a593Smuzhiyun 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
955*4882a593Smuzhiyun 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_unregister);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun /**
960*4882a593Smuzhiyun  * drm_dev_set_unique - Set the unique name of a DRM device
961*4882a593Smuzhiyun  * @dev: device of which to set the unique name
962*4882a593Smuzhiyun  * @name: unique name
963*4882a593Smuzhiyun  *
964*4882a593Smuzhiyun  * Sets the unique name of a DRM device using the specified string. This is
965*4882a593Smuzhiyun  * already done by drm_dev_init(), drivers should only override the default
966*4882a593Smuzhiyun  * unique name for backwards compatibility reasons.
967*4882a593Smuzhiyun  *
968*4882a593Smuzhiyun  * Return: 0 on success or a negative error code on failure.
969*4882a593Smuzhiyun  */
drm_dev_set_unique(struct drm_device * dev,const char * name)970*4882a593Smuzhiyun int drm_dev_set_unique(struct drm_device *dev, const char *name)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	drmm_kfree(dev, dev->unique);
973*4882a593Smuzhiyun 	dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	return dev->unique ? 0 : -ENOMEM;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun EXPORT_SYMBOL(drm_dev_set_unique);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun /*
980*4882a593Smuzhiyun  * DRM Core
981*4882a593Smuzhiyun  * The DRM core module initializes all global DRM objects and makes them
982*4882a593Smuzhiyun  * available to drivers. Once setup, drivers can probe their respective
983*4882a593Smuzhiyun  * devices.
984*4882a593Smuzhiyun  * Currently, core management includes:
985*4882a593Smuzhiyun  *  - The "DRM-Global" key/value database
986*4882a593Smuzhiyun  *  - Global ID management for connectors
987*4882a593Smuzhiyun  *  - DRM major number allocation
988*4882a593Smuzhiyun  *  - DRM minor management
989*4882a593Smuzhiyun  *  - DRM sysfs class
990*4882a593Smuzhiyun  *  - DRM debugfs root
991*4882a593Smuzhiyun  *
992*4882a593Smuzhiyun  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
993*4882a593Smuzhiyun  * interface registered on a DRM device, you can request minor numbers from DRM
994*4882a593Smuzhiyun  * core. DRM core takes care of major-number management and char-dev
995*4882a593Smuzhiyun  * registration. A stub ->open() callback forwards any open() requests to the
996*4882a593Smuzhiyun  * registered minor.
997*4882a593Smuzhiyun  */
998*4882a593Smuzhiyun 
drm_stub_open(struct inode * inode,struct file * filp)999*4882a593Smuzhiyun static int drm_stub_open(struct inode *inode, struct file *filp)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	const struct file_operations *new_fops;
1002*4882a593Smuzhiyun 	struct drm_minor *minor;
1003*4882a593Smuzhiyun 	int err;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	DRM_DEBUG("\n");
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	minor = drm_minor_acquire(iminor(inode));
1008*4882a593Smuzhiyun 	if (IS_ERR(minor))
1009*4882a593Smuzhiyun 		return PTR_ERR(minor);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	new_fops = fops_get(minor->dev->driver->fops);
1012*4882a593Smuzhiyun 	if (!new_fops) {
1013*4882a593Smuzhiyun 		err = -ENODEV;
1014*4882a593Smuzhiyun 		goto out;
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	replace_fops(filp, new_fops);
1018*4882a593Smuzhiyun 	if (filp->f_op->open)
1019*4882a593Smuzhiyun 		err = filp->f_op->open(inode, filp);
1020*4882a593Smuzhiyun 	else
1021*4882a593Smuzhiyun 		err = 0;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun out:
1024*4882a593Smuzhiyun 	drm_minor_release(minor);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	return err;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun static const struct file_operations drm_stub_fops = {
1030*4882a593Smuzhiyun 	.owner = THIS_MODULE,
1031*4882a593Smuzhiyun 	.open = drm_stub_open,
1032*4882a593Smuzhiyun 	.llseek = noop_llseek,
1033*4882a593Smuzhiyun };
1034*4882a593Smuzhiyun 
drm_core_exit(void)1035*4882a593Smuzhiyun static void drm_core_exit(void)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	unregister_chrdev(DRM_MAJOR, "drm");
1038*4882a593Smuzhiyun 	debugfs_remove(drm_debugfs_root);
1039*4882a593Smuzhiyun 	drm_sysfs_destroy();
1040*4882a593Smuzhiyun 	idr_destroy(&drm_minors_idr);
1041*4882a593Smuzhiyun 	drm_connector_ida_destroy();
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun 
drm_core_init(void)1044*4882a593Smuzhiyun static int __init drm_core_init(void)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	int ret;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	drm_connector_ida_init();
1049*4882a593Smuzhiyun 	idr_init(&drm_minors_idr);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	ret = drm_sysfs_init();
1052*4882a593Smuzhiyun 	if (ret < 0) {
1053*4882a593Smuzhiyun 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1054*4882a593Smuzhiyun 		goto error;
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1060*4882a593Smuzhiyun 	if (ret < 0)
1061*4882a593Smuzhiyun 		goto error;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	drm_core_init_complete = true;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	DRM_DEBUG("Initialized\n");
1066*4882a593Smuzhiyun 	return 0;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun error:
1069*4882a593Smuzhiyun 	drm_core_exit();
1070*4882a593Smuzhiyun 	return ret;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun #ifdef CONFIG_VIDEO_REVERSE_IMAGE
1074*4882a593Smuzhiyun fs_initcall(drm_core_init);
1075*4882a593Smuzhiyun #else
1076*4882a593Smuzhiyun module_init(drm_core_init);
1077*4882a593Smuzhiyun #endif
1078*4882a593Smuzhiyun module_exit(drm_core_exit);
1079