xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/drm_managed.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2020 Intel
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Based on drivers/base/devres.c
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <drm/drm_managed.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <drm/drm_device.h>
15*4882a593Smuzhiyun #include <drm/drm_print.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "drm_internal.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /**
20*4882a593Smuzhiyun  * DOC: managed resources
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Inspired by struct &device managed resources, but tied to the lifetime of
23*4882a593Smuzhiyun  * struct &drm_device, which can outlive the underlying physical device, usually
24*4882a593Smuzhiyun  * when userspace has some open files and other handles to resources still open.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Release actions can be added with drmm_add_action(), memory allocations can
27*4882a593Smuzhiyun  * be done directly with drmm_kmalloc() and the related functions. Everything
28*4882a593Smuzhiyun  * will be released on the final drm_dev_put() in reverse order of how the
29*4882a593Smuzhiyun  * release actions have been added and memory has been allocated since driver
30*4882a593Smuzhiyun  * loading started with devm_drm_dev_alloc().
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * Note that release actions and managed memory can also be added and removed
33*4882a593Smuzhiyun  * during the lifetime of the driver, all the functions are fully concurrent
34*4882a593Smuzhiyun  * safe. But it is recommended to use managed resources only for resources that
35*4882a593Smuzhiyun  * change rarely, if ever, during the lifetime of the &drm_device instance.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct drmres_node {
39*4882a593Smuzhiyun 	struct list_head	entry;
40*4882a593Smuzhiyun 	drmres_release_t	release;
41*4882a593Smuzhiyun 	const char		*name;
42*4882a593Smuzhiyun 	size_t			size;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct drmres {
46*4882a593Smuzhiyun 	struct drmres_node		node;
47*4882a593Smuzhiyun 	/*
48*4882a593Smuzhiyun 	 * Some archs want to perform DMA into kmalloc caches
49*4882a593Smuzhiyun 	 * and need a guaranteed alignment larger than
50*4882a593Smuzhiyun 	 * the alignment of a 64-bit integer.
51*4882a593Smuzhiyun 	 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
52*4882a593Smuzhiyun 	 * buffer alignment as if it was allocated by plain kmalloc().
53*4882a593Smuzhiyun 	 */
54*4882a593Smuzhiyun 	u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
free_dr(struct drmres * dr)57*4882a593Smuzhiyun static void free_dr(struct drmres *dr)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	kfree_const(dr->node.name);
60*4882a593Smuzhiyun 	kfree(dr);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
drm_managed_release(struct drm_device * dev)63*4882a593Smuzhiyun void drm_managed_release(struct drm_device *dev)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct drmres *dr, *tmp;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	drm_dbg_drmres(dev, "drmres release begin\n");
68*4882a593Smuzhiyun 	list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
69*4882a593Smuzhiyun 		drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
70*4882a593Smuzhiyun 			       dr, dr->node.name, dr->node.size);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 		if (dr->node.release)
73*4882a593Smuzhiyun 			dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		list_del(&dr->node.entry);
76*4882a593Smuzhiyun 		free_dr(dr);
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 	drm_dbg_drmres(dev, "drmres release end\n");
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun  * Always inline so that kmalloc_track_caller tracks the actual interesting
83*4882a593Smuzhiyun  * caller outside of drm_managed.c.
84*4882a593Smuzhiyun  */
alloc_dr(drmres_release_t release,size_t size,gfp_t gfp,int nid)85*4882a593Smuzhiyun static __always_inline struct drmres * alloc_dr(drmres_release_t release,
86*4882a593Smuzhiyun 						size_t size, gfp_t gfp, int nid)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	size_t tot_size;
89*4882a593Smuzhiyun 	struct drmres *dr;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* We must catch any near-SIZE_MAX cases that could overflow. */
92*4882a593Smuzhiyun 	if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
93*4882a593Smuzhiyun 		return NULL;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
96*4882a593Smuzhiyun 	if (unlikely(!dr))
97*4882a593Smuzhiyun 		return NULL;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	memset(dr, 0, offsetof(struct drmres, data));
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dr->node.entry);
102*4882a593Smuzhiyun 	dr->node.release = release;
103*4882a593Smuzhiyun 	dr->node.size = size;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return dr;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
del_dr(struct drm_device * dev,struct drmres * dr)108*4882a593Smuzhiyun static void del_dr(struct drm_device *dev, struct drmres *dr)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	list_del_init(&dr->node.entry);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
113*4882a593Smuzhiyun 		       dr, dr->node.name, (unsigned long) dr->node.size);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
add_dr(struct drm_device * dev,struct drmres * dr)116*4882a593Smuzhiyun static void add_dr(struct drm_device *dev, struct drmres *dr)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	unsigned long flags;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->managed.lock, flags);
121*4882a593Smuzhiyun 	list_add(&dr->node.entry, &dev->managed.resources);
122*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->managed.lock, flags);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
125*4882a593Smuzhiyun 		       dr, dr->node.name, (unsigned long) dr->node.size);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
drmm_add_final_kfree(struct drm_device * dev,void * container)128*4882a593Smuzhiyun void drmm_add_final_kfree(struct drm_device *dev, void *container)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	WARN_ON(dev->managed.final_kfree);
131*4882a593Smuzhiyun 	WARN_ON(dev < (struct drm_device *) container);
132*4882a593Smuzhiyun 	WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
133*4882a593Smuzhiyun 	dev->managed.final_kfree = container;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
__drmm_add_action(struct drm_device * dev,drmres_release_t action,void * data,const char * name)136*4882a593Smuzhiyun int __drmm_add_action(struct drm_device *dev,
137*4882a593Smuzhiyun 		      drmres_release_t action,
138*4882a593Smuzhiyun 		      void *data, const char *name)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct drmres *dr;
141*4882a593Smuzhiyun 	void **void_ptr;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	dr = alloc_dr(action, data ? sizeof(void*) : 0,
144*4882a593Smuzhiyun 		      GFP_KERNEL | __GFP_ZERO,
145*4882a593Smuzhiyun 		      dev_to_node(dev->dev));
146*4882a593Smuzhiyun 	if (!dr) {
147*4882a593Smuzhiyun 		drm_dbg_drmres(dev, "failed to add action %s for %p\n",
148*4882a593Smuzhiyun 			       name, data);
149*4882a593Smuzhiyun 		return -ENOMEM;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	dr->node.name = kstrdup_const(name, GFP_KERNEL);
153*4882a593Smuzhiyun 	if (data) {
154*4882a593Smuzhiyun 		void_ptr = (void **)&dr->data;
155*4882a593Smuzhiyun 		*void_ptr = data;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	add_dr(dev, dr);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun EXPORT_SYMBOL(__drmm_add_action);
163*4882a593Smuzhiyun 
__drmm_add_action_or_reset(struct drm_device * dev,drmres_release_t action,void * data,const char * name)164*4882a593Smuzhiyun int __drmm_add_action_or_reset(struct drm_device *dev,
165*4882a593Smuzhiyun 			       drmres_release_t action,
166*4882a593Smuzhiyun 			       void *data, const char *name)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	int ret;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	ret = __drmm_add_action(dev, action, data, name);
171*4882a593Smuzhiyun 	if (ret)
172*4882a593Smuzhiyun 		action(dev, data);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return ret;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun EXPORT_SYMBOL(__drmm_add_action_or_reset);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * drmm_kmalloc - &drm_device managed kmalloc()
180*4882a593Smuzhiyun  * @dev: DRM device
181*4882a593Smuzhiyun  * @size: size of the memory allocation
182*4882a593Smuzhiyun  * @gfp: GFP allocation flags
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * This is a &drm_device managed version of kmalloc(). The allocated memory is
185*4882a593Smuzhiyun  * automatically freed on the final drm_dev_put(). Memory can also be freed
186*4882a593Smuzhiyun  * before the final drm_dev_put() by calling drmm_kfree().
187*4882a593Smuzhiyun  */
drmm_kmalloc(struct drm_device * dev,size_t size,gfp_t gfp)188*4882a593Smuzhiyun void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct drmres *dr;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
193*4882a593Smuzhiyun 	if (!dr) {
194*4882a593Smuzhiyun 		drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
195*4882a593Smuzhiyun 			       size, gfp);
196*4882a593Smuzhiyun 		return NULL;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 	dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	add_dr(dev, dr);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return dr->data;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun EXPORT_SYMBOL(drmm_kmalloc);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /**
207*4882a593Smuzhiyun  * drmm_kstrdup - &drm_device managed kstrdup()
208*4882a593Smuzhiyun  * @dev: DRM device
209*4882a593Smuzhiyun  * @s: 0-terminated string to be duplicated
210*4882a593Smuzhiyun  * @gfp: GFP allocation flags
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * This is a &drm_device managed version of kstrdup(). The allocated memory is
213*4882a593Smuzhiyun  * automatically freed on the final drm_dev_put() and works exactly like a
214*4882a593Smuzhiyun  * memory allocation obtained by drmm_kmalloc().
215*4882a593Smuzhiyun  */
drmm_kstrdup(struct drm_device * dev,const char * s,gfp_t gfp)216*4882a593Smuzhiyun char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	size_t size;
219*4882a593Smuzhiyun 	char *buf;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (!s)
222*4882a593Smuzhiyun 		return NULL;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	size = strlen(s) + 1;
225*4882a593Smuzhiyun 	buf = drmm_kmalloc(dev, size, gfp);
226*4882a593Smuzhiyun 	if (buf)
227*4882a593Smuzhiyun 		memcpy(buf, s, size);
228*4882a593Smuzhiyun 	return buf;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(drmm_kstrdup);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /**
233*4882a593Smuzhiyun  * drmm_kfree - &drm_device managed kfree()
234*4882a593Smuzhiyun  * @dev: DRM device
235*4882a593Smuzhiyun  * @data: memory allocation to be freed
236*4882a593Smuzhiyun  *
237*4882a593Smuzhiyun  * This is a &drm_device managed version of kfree() which can be used to
238*4882a593Smuzhiyun  * release memory allocated through drmm_kmalloc() or any of its related
239*4882a593Smuzhiyun  * functions before the final drm_dev_put() of @dev.
240*4882a593Smuzhiyun  */
drmm_kfree(struct drm_device * dev,void * data)241*4882a593Smuzhiyun void drmm_kfree(struct drm_device *dev, void *data)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct drmres *dr_match = NULL, *dr;
244*4882a593Smuzhiyun 	unsigned long flags;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!data)
247*4882a593Smuzhiyun 		return;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->managed.lock, flags);
250*4882a593Smuzhiyun 	list_for_each_entry(dr, &dev->managed.resources, node.entry) {
251*4882a593Smuzhiyun 		if (dr->data == data) {
252*4882a593Smuzhiyun 			dr_match = dr;
253*4882a593Smuzhiyun 			del_dr(dev, dr_match);
254*4882a593Smuzhiyun 			break;
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->managed.lock, flags);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (WARN_ON(!dr_match))
260*4882a593Smuzhiyun 		return;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	free_dr(dr_match);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun EXPORT_SYMBOL(drmm_kfree);
265