xref: /OK3568_Linux_fs/kernel/Documentation/driver-api/driver-model/devres.rst (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun================================
2*4882a593SmuzhiyunDevres - Managed Device Resource
3*4882a593Smuzhiyun================================
4*4882a593Smuzhiyun
5*4882a593SmuzhiyunTejun Heo	<teheo@suse.de>
6*4882a593Smuzhiyun
7*4882a593SmuzhiyunFirst draft	10 January 2007
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun.. contents
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun   1. Intro			: Huh? Devres?
12*4882a593Smuzhiyun   2. Devres			: Devres in a nutshell
13*4882a593Smuzhiyun   3. Devres Group		: Group devres'es and release them together
14*4882a593Smuzhiyun   4. Details			: Life time rules, calling context, ...
15*4882a593Smuzhiyun   5. Overhead			: How much do we have to pay for this?
16*4882a593Smuzhiyun   6. List of managed interfaces: Currently implemented managed interfaces
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun1. Intro
20*4882a593Smuzhiyun--------
21*4882a593Smuzhiyun
22*4882a593Smuzhiyundevres came up while trying to convert libata to use iomap.  Each
23*4882a593Smuzhiyuniomapped address should be kept and unmapped on driver detach.  For
24*4882a593Smuzhiyunexample, a plain SFF ATA controller (that is, good old PCI IDE) in
25*4882a593Smuzhiyunnative mode makes use of 5 PCI BARs and all of them should be
26*4882a593Smuzhiyunmaintained.
27*4882a593Smuzhiyun
28*4882a593SmuzhiyunAs with many other device drivers, libata low level drivers have
29*4882a593Smuzhiyunsufficient bugs in ->remove and ->probe failure path.  Well, yes,
30*4882a593Smuzhiyunthat's probably because libata low level driver developers are lazy
31*4882a593Smuzhiyunbunch, but aren't all low level driver developers?  After spending a
32*4882a593Smuzhiyunday fiddling with braindamaged hardware with no document or
33*4882a593Smuzhiyunbraindamaged document, if it's finally working, well, it's working.
34*4882a593Smuzhiyun
35*4882a593SmuzhiyunFor one reason or another, low level drivers don't receive as much
36*4882a593Smuzhiyunattention or testing as core code, and bugs on driver detach or
37*4882a593Smuzhiyuninitialization failure don't happen often enough to be noticeable.
38*4882a593SmuzhiyunInit failure path is worse because it's much less travelled while
39*4882a593Smuzhiyunneeds to handle multiple entry points.
40*4882a593Smuzhiyun
41*4882a593SmuzhiyunSo, many low level drivers end up leaking resources on driver detach
42*4882a593Smuzhiyunand having half broken failure path implementation in ->probe() which
43*4882a593Smuzhiyunwould leak resources or even cause oops when failure occurs.  iomap
44*4882a593Smuzhiyunadds more to this mix.  So do msi and msix.
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun2. Devres
48*4882a593Smuzhiyun---------
49*4882a593Smuzhiyun
50*4882a593Smuzhiyundevres is basically linked list of arbitrarily sized memory areas
51*4882a593Smuzhiyunassociated with a struct device.  Each devres entry is associated with
52*4882a593Smuzhiyuna release function.  A devres can be released in several ways.  No
53*4882a593Smuzhiyunmatter what, all devres entries are released on driver detach.  On
54*4882a593Smuzhiyunrelease, the associated release function is invoked and then the
55*4882a593Smuzhiyundevres entry is freed.
56*4882a593Smuzhiyun
57*4882a593SmuzhiyunManaged interface is created for resources commonly used by device
58*4882a593Smuzhiyundrivers using devres.  For example, coherent DMA memory is acquired
59*4882a593Smuzhiyunusing dma_alloc_coherent().  The managed version is called
60*4882a593Smuzhiyundmam_alloc_coherent().  It is identical to dma_alloc_coherent() except
61*4882a593Smuzhiyunfor the DMA memory allocated using it is managed and will be
62*4882a593Smuzhiyunautomatically released on driver detach.  Implementation looks like
63*4882a593Smuzhiyunthe following::
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun  struct dma_devres {
66*4882a593Smuzhiyun	size_t		size;
67*4882a593Smuzhiyun	void		*vaddr;
68*4882a593Smuzhiyun	dma_addr_t	dma_handle;
69*4882a593Smuzhiyun  };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun  static void dmam_coherent_release(struct device *dev, void *res)
72*4882a593Smuzhiyun  {
73*4882a593Smuzhiyun	struct dma_devres *this = res;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun	dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
76*4882a593Smuzhiyun  }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun  dmam_alloc_coherent(dev, size, dma_handle, gfp)
79*4882a593Smuzhiyun  {
80*4882a593Smuzhiyun	struct dma_devres *dr;
81*4882a593Smuzhiyun	void *vaddr;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun	dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
84*4882a593Smuzhiyun	...
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun	/* alloc DMA memory as usual */
87*4882a593Smuzhiyun	vaddr = dma_alloc_coherent(...);
88*4882a593Smuzhiyun	...
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun	/* record size, vaddr, dma_handle in dr */
91*4882a593Smuzhiyun	dr->vaddr = vaddr;
92*4882a593Smuzhiyun	...
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun	devres_add(dev, dr);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun	return vaddr;
97*4882a593Smuzhiyun  }
98*4882a593Smuzhiyun
99*4882a593SmuzhiyunIf a driver uses dmam_alloc_coherent(), the area is guaranteed to be
100*4882a593Smuzhiyunfreed whether initialization fails half-way or the device gets
101*4882a593Smuzhiyundetached.  If most resources are acquired using managed interface, a
102*4882a593Smuzhiyundriver can have much simpler init and exit code.  Init path basically
103*4882a593Smuzhiyunlooks like the following::
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun  my_init_one()
106*4882a593Smuzhiyun  {
107*4882a593Smuzhiyun	struct mydev *d;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
110*4882a593Smuzhiyun	if (!d)
111*4882a593Smuzhiyun		return -ENOMEM;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun	d->ring = dmam_alloc_coherent(...);
114*4882a593Smuzhiyun	if (!d->ring)
115*4882a593Smuzhiyun		return -ENOMEM;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun	if (check something)
118*4882a593Smuzhiyun		return -EINVAL;
119*4882a593Smuzhiyun	...
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun	return register_to_upper_layer(d);
122*4882a593Smuzhiyun  }
123*4882a593Smuzhiyun
124*4882a593SmuzhiyunAnd exit path::
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun  my_remove_one()
127*4882a593Smuzhiyun  {
128*4882a593Smuzhiyun	unregister_from_upper_layer(d);
129*4882a593Smuzhiyun	shutdown_my_hardware();
130*4882a593Smuzhiyun  }
131*4882a593Smuzhiyun
132*4882a593SmuzhiyunAs shown above, low level drivers can be simplified a lot by using
133*4882a593Smuzhiyundevres.  Complexity is shifted from less maintained low level drivers
134*4882a593Smuzhiyunto better maintained higher layer.  Also, as init failure path is
135*4882a593Smuzhiyunshared with exit path, both can get more testing.
136*4882a593Smuzhiyun
137*4882a593SmuzhiyunNote though that when converting current calls or assignments to
138*4882a593Smuzhiyunmanaged devm_* versions it is up to you to check if internal operations
139*4882a593Smuzhiyunlike allocating memory, have failed. Managed resources pertains to the
140*4882a593Smuzhiyunfreeing of these resources *only* - all other checks needed are still
141*4882a593Smuzhiyunon you. In some cases this may mean introducing checks that were not
142*4882a593Smuzhiyunnecessary before moving to the managed devm_* calls.
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun3. Devres group
146*4882a593Smuzhiyun---------------
147*4882a593Smuzhiyun
148*4882a593SmuzhiyunDevres entries can be grouped using devres group.  When a group is
149*4882a593Smuzhiyunreleased, all contained normal devres entries and properly nested
150*4882a593Smuzhiyungroups are released.  One usage is to rollback series of acquired
151*4882a593Smuzhiyunresources on failure.  For example::
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun  if (!devres_open_group(dev, NULL, GFP_KERNEL))
154*4882a593Smuzhiyun	return -ENOMEM;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun  acquire A;
157*4882a593Smuzhiyun  if (failed)
158*4882a593Smuzhiyun	goto err;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun  acquire B;
161*4882a593Smuzhiyun  if (failed)
162*4882a593Smuzhiyun	goto err;
163*4882a593Smuzhiyun  ...
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun  devres_remove_group(dev, NULL);
166*4882a593Smuzhiyun  return 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun err:
169*4882a593Smuzhiyun  devres_release_group(dev, NULL);
170*4882a593Smuzhiyun  return err_code;
171*4882a593Smuzhiyun
172*4882a593SmuzhiyunAs resource acquisition failure usually means probe failure, constructs
173*4882a593Smuzhiyunlike above are usually useful in midlayer driver (e.g. libata core
174*4882a593Smuzhiyunlayer) where interface function shouldn't have side effect on failure.
175*4882a593SmuzhiyunFor LLDs, just returning error code suffices in most cases.
176*4882a593Smuzhiyun
177*4882a593SmuzhiyunEach group is identified by `void *id`.  It can either be explicitly
178*4882a593Smuzhiyunspecified by @id argument to devres_open_group() or automatically
179*4882a593Smuzhiyuncreated by passing NULL as @id as in the above example.  In both
180*4882a593Smuzhiyuncases, devres_open_group() returns the group's id.  The returned id
181*4882a593Smuzhiyuncan be passed to other devres functions to select the target group.
182*4882a593SmuzhiyunIf NULL is given to those functions, the latest open group is
183*4882a593Smuzhiyunselected.
184*4882a593Smuzhiyun
185*4882a593SmuzhiyunFor example, you can do something like the following::
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun  int my_midlayer_create_something()
188*4882a593Smuzhiyun  {
189*4882a593Smuzhiyun	if (!devres_open_group(dev, my_midlayer_create_something, GFP_KERNEL))
190*4882a593Smuzhiyun		return -ENOMEM;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun	...
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun	devres_close_group(dev, my_midlayer_create_something);
195*4882a593Smuzhiyun	return 0;
196*4882a593Smuzhiyun  }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun  void my_midlayer_destroy_something()
199*4882a593Smuzhiyun  {
200*4882a593Smuzhiyun	devres_release_group(dev, my_midlayer_create_something);
201*4882a593Smuzhiyun  }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun4. Details
205*4882a593Smuzhiyun----------
206*4882a593Smuzhiyun
207*4882a593SmuzhiyunLifetime of a devres entry begins on devres allocation and finishes
208*4882a593Smuzhiyunwhen it is released or destroyed (removed and freed) - no reference
209*4882a593Smuzhiyuncounting.
210*4882a593Smuzhiyun
211*4882a593Smuzhiyundevres core guarantees atomicity to all basic devres operations and
212*4882a593Smuzhiyunhas support for single-instance devres types (atomic
213*4882a593Smuzhiyunlookup-and-add-if-not-found).  Other than that, synchronizing
214*4882a593Smuzhiyunconcurrent accesses to allocated devres data is caller's
215*4882a593Smuzhiyunresponsibility.  This is usually non-issue because bus ops and
216*4882a593Smuzhiyunresource allocations already do the job.
217*4882a593Smuzhiyun
218*4882a593SmuzhiyunFor an example of single-instance devres type, read pcim_iomap_table()
219*4882a593Smuzhiyunin lib/devres.c.
220*4882a593Smuzhiyun
221*4882a593SmuzhiyunAll devres interface functions can be called without context if the
222*4882a593Smuzhiyunright gfp mask is given.
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun5. Overhead
226*4882a593Smuzhiyun-----------
227*4882a593Smuzhiyun
228*4882a593SmuzhiyunEach devres bookkeeping info is allocated together with requested data
229*4882a593Smuzhiyunarea.  With debug option turned off, bookkeeping info occupies 16
230*4882a593Smuzhiyunbytes on 32bit machines and 24 bytes on 64bit (three pointers rounded
231*4882a593Smuzhiyunup to ull alignment).  If singly linked list is used, it can be
232*4882a593Smuzhiyunreduced to two pointers (8 bytes on 32bit, 16 bytes on 64bit).
233*4882a593Smuzhiyun
234*4882a593SmuzhiyunEach devres group occupies 8 pointers.  It can be reduced to 6 if
235*4882a593Smuzhiyunsingly linked list is used.
236*4882a593Smuzhiyun
237*4882a593SmuzhiyunMemory space overhead on ahci controller with two ports is between 300
238*4882a593Smuzhiyunand 400 bytes on 32bit machine after naive conversion (we can
239*4882a593Smuzhiyuncertainly invest a bit more effort into libata core layer).
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun6. List of managed interfaces
243*4882a593Smuzhiyun-----------------------------
244*4882a593Smuzhiyun
245*4882a593SmuzhiyunCLOCK
246*4882a593Smuzhiyun  devm_clk_get()
247*4882a593Smuzhiyun  devm_clk_get_optional()
248*4882a593Smuzhiyun  devm_clk_put()
249*4882a593Smuzhiyun  devm_clk_bulk_get()
250*4882a593Smuzhiyun  devm_clk_bulk_get_all()
251*4882a593Smuzhiyun  devm_clk_bulk_get_optional()
252*4882a593Smuzhiyun  devm_get_clk_from_childl()
253*4882a593Smuzhiyun  devm_clk_hw_register()
254*4882a593Smuzhiyun  devm_of_clk_add_hw_provider()
255*4882a593Smuzhiyun  devm_clk_hw_register_clkdev()
256*4882a593Smuzhiyun
257*4882a593SmuzhiyunDMA
258*4882a593Smuzhiyun  dmaenginem_async_device_register()
259*4882a593Smuzhiyun  dmam_alloc_coherent()
260*4882a593Smuzhiyun  dmam_alloc_attrs()
261*4882a593Smuzhiyun  dmam_free_coherent()
262*4882a593Smuzhiyun  dmam_pool_create()
263*4882a593Smuzhiyun  dmam_pool_destroy()
264*4882a593Smuzhiyun
265*4882a593SmuzhiyunDRM
266*4882a593Smuzhiyun  devm_drm_dev_alloc()
267*4882a593Smuzhiyun
268*4882a593SmuzhiyunGPIO
269*4882a593Smuzhiyun  devm_gpiod_get()
270*4882a593Smuzhiyun  devm_gpiod_get_array()
271*4882a593Smuzhiyun  devm_gpiod_get_array_optional()
272*4882a593Smuzhiyun  devm_gpiod_get_index()
273*4882a593Smuzhiyun  devm_gpiod_get_index_optional()
274*4882a593Smuzhiyun  devm_gpiod_get_optional()
275*4882a593Smuzhiyun  devm_gpiod_put()
276*4882a593Smuzhiyun  devm_gpiod_unhinge()
277*4882a593Smuzhiyun  devm_gpiochip_add_data()
278*4882a593Smuzhiyun  devm_gpio_request()
279*4882a593Smuzhiyun  devm_gpio_request_one()
280*4882a593Smuzhiyun  devm_gpio_free()
281*4882a593Smuzhiyun
282*4882a593SmuzhiyunI2C
283*4882a593Smuzhiyun  devm_i2c_new_dummy_device()
284*4882a593Smuzhiyun
285*4882a593SmuzhiyunIIO
286*4882a593Smuzhiyun  devm_iio_device_alloc()
287*4882a593Smuzhiyun  devm_iio_device_register()
288*4882a593Smuzhiyun  devm_iio_kfifo_allocate()
289*4882a593Smuzhiyun  devm_iio_triggered_buffer_setup()
290*4882a593Smuzhiyun  devm_iio_trigger_alloc()
291*4882a593Smuzhiyun  devm_iio_trigger_register()
292*4882a593Smuzhiyun  devm_iio_channel_get()
293*4882a593Smuzhiyun  devm_iio_channel_get_all()
294*4882a593Smuzhiyun
295*4882a593SmuzhiyunINPUT
296*4882a593Smuzhiyun  devm_input_allocate_device()
297*4882a593Smuzhiyun
298*4882a593SmuzhiyunIO region
299*4882a593Smuzhiyun  devm_release_mem_region()
300*4882a593Smuzhiyun  devm_release_region()
301*4882a593Smuzhiyun  devm_release_resource()
302*4882a593Smuzhiyun  devm_request_mem_region()
303*4882a593Smuzhiyun  devm_request_region()
304*4882a593Smuzhiyun  devm_request_resource()
305*4882a593Smuzhiyun
306*4882a593SmuzhiyunIOMAP
307*4882a593Smuzhiyun  devm_ioport_map()
308*4882a593Smuzhiyun  devm_ioport_unmap()
309*4882a593Smuzhiyun  devm_ioremap()
310*4882a593Smuzhiyun  devm_ioremap_uc()
311*4882a593Smuzhiyun  devm_ioremap_wc()
312*4882a593Smuzhiyun  devm_ioremap_resource() : checks resource, requests memory region, ioremaps
313*4882a593Smuzhiyun  devm_ioremap_resource_wc()
314*4882a593Smuzhiyun  devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device
315*4882a593Smuzhiyun  devm_platform_ioremap_resource_wc()
316*4882a593Smuzhiyun  devm_platform_ioremap_resource_byname()
317*4882a593Smuzhiyun  devm_platform_get_and_ioremap_resource()
318*4882a593Smuzhiyun  devm_iounmap()
319*4882a593Smuzhiyun  pcim_iomap()
320*4882a593Smuzhiyun  pcim_iomap_regions()	: do request_region() and iomap() on multiple BARs
321*4882a593Smuzhiyun  pcim_iomap_table()	: array of mapped addresses indexed by BAR
322*4882a593Smuzhiyun  pcim_iounmap()
323*4882a593Smuzhiyun
324*4882a593SmuzhiyunIRQ
325*4882a593Smuzhiyun  devm_free_irq()
326*4882a593Smuzhiyun  devm_request_any_context_irq()
327*4882a593Smuzhiyun  devm_request_irq()
328*4882a593Smuzhiyun  devm_request_threaded_irq()
329*4882a593Smuzhiyun  devm_irq_alloc_descs()
330*4882a593Smuzhiyun  devm_irq_alloc_desc()
331*4882a593Smuzhiyun  devm_irq_alloc_desc_at()
332*4882a593Smuzhiyun  devm_irq_alloc_desc_from()
333*4882a593Smuzhiyun  devm_irq_alloc_descs_from()
334*4882a593Smuzhiyun  devm_irq_alloc_generic_chip()
335*4882a593Smuzhiyun  devm_irq_setup_generic_chip()
336*4882a593Smuzhiyun  devm_irq_sim_init()
337*4882a593Smuzhiyun
338*4882a593SmuzhiyunLED
339*4882a593Smuzhiyun  devm_led_classdev_register()
340*4882a593Smuzhiyun  devm_led_classdev_unregister()
341*4882a593Smuzhiyun
342*4882a593SmuzhiyunMDIO
343*4882a593Smuzhiyun  devm_mdiobus_alloc()
344*4882a593Smuzhiyun  devm_mdiobus_alloc_size()
345*4882a593Smuzhiyun  devm_mdiobus_register()
346*4882a593Smuzhiyun  devm_of_mdiobus_register()
347*4882a593Smuzhiyun
348*4882a593SmuzhiyunMEM
349*4882a593Smuzhiyun  devm_free_pages()
350*4882a593Smuzhiyun  devm_get_free_pages()
351*4882a593Smuzhiyun  devm_kasprintf()
352*4882a593Smuzhiyun  devm_kcalloc()
353*4882a593Smuzhiyun  devm_kfree()
354*4882a593Smuzhiyun  devm_kmalloc()
355*4882a593Smuzhiyun  devm_kmalloc_array()
356*4882a593Smuzhiyun  devm_kmemdup()
357*4882a593Smuzhiyun  devm_krealloc()
358*4882a593Smuzhiyun  devm_kstrdup()
359*4882a593Smuzhiyun  devm_kvasprintf()
360*4882a593Smuzhiyun  devm_kzalloc()
361*4882a593Smuzhiyun
362*4882a593SmuzhiyunMFD
363*4882a593Smuzhiyun  devm_mfd_add_devices()
364*4882a593Smuzhiyun
365*4882a593SmuzhiyunMUX
366*4882a593Smuzhiyun  devm_mux_chip_alloc()
367*4882a593Smuzhiyun  devm_mux_chip_register()
368*4882a593Smuzhiyun  devm_mux_control_get()
369*4882a593Smuzhiyun
370*4882a593SmuzhiyunNET
371*4882a593Smuzhiyun  devm_alloc_etherdev()
372*4882a593Smuzhiyun  devm_alloc_etherdev_mqs()
373*4882a593Smuzhiyun  devm_register_netdev()
374*4882a593Smuzhiyun
375*4882a593SmuzhiyunPER-CPU MEM
376*4882a593Smuzhiyun  devm_alloc_percpu()
377*4882a593Smuzhiyun  devm_free_percpu()
378*4882a593Smuzhiyun
379*4882a593SmuzhiyunPCI
380*4882a593Smuzhiyun  devm_pci_alloc_host_bridge()  : managed PCI host bridge allocation
381*4882a593Smuzhiyun  devm_pci_remap_cfgspace()	: ioremap PCI configuration space
382*4882a593Smuzhiyun  devm_pci_remap_cfg_resource()	: ioremap PCI configuration space resource
383*4882a593Smuzhiyun  pcim_enable_device()		: after success, all PCI ops become managed
384*4882a593Smuzhiyun  pcim_pin_device()		: keep PCI device enabled after release
385*4882a593Smuzhiyun
386*4882a593SmuzhiyunPHY
387*4882a593Smuzhiyun  devm_usb_get_phy()
388*4882a593Smuzhiyun  devm_usb_put_phy()
389*4882a593Smuzhiyun
390*4882a593SmuzhiyunPINCTRL
391*4882a593Smuzhiyun  devm_pinctrl_get()
392*4882a593Smuzhiyun  devm_pinctrl_put()
393*4882a593Smuzhiyun  devm_pinctrl_register()
394*4882a593Smuzhiyun  devm_pinctrl_unregister()
395*4882a593Smuzhiyun
396*4882a593SmuzhiyunPOWER
397*4882a593Smuzhiyun  devm_reboot_mode_register()
398*4882a593Smuzhiyun  devm_reboot_mode_unregister()
399*4882a593Smuzhiyun
400*4882a593SmuzhiyunPWM
401*4882a593Smuzhiyun  devm_pwm_get()
402*4882a593Smuzhiyun  devm_pwm_put()
403*4882a593Smuzhiyun
404*4882a593SmuzhiyunREGULATOR
405*4882a593Smuzhiyun  devm_regulator_bulk_get()
406*4882a593Smuzhiyun  devm_regulator_get()
407*4882a593Smuzhiyun  devm_regulator_put()
408*4882a593Smuzhiyun  devm_regulator_register()
409*4882a593Smuzhiyun
410*4882a593SmuzhiyunRESET
411*4882a593Smuzhiyun  devm_reset_control_get()
412*4882a593Smuzhiyun  devm_reset_controller_register()
413*4882a593Smuzhiyun
414*4882a593SmuzhiyunSERDEV
415*4882a593Smuzhiyun  devm_serdev_device_open()
416*4882a593Smuzhiyun
417*4882a593SmuzhiyunSLAVE DMA ENGINE
418*4882a593Smuzhiyun  devm_acpi_dma_controller_register()
419*4882a593Smuzhiyun
420*4882a593SmuzhiyunSPI
421*4882a593Smuzhiyun  devm_spi_register_master()
422*4882a593Smuzhiyun
423*4882a593SmuzhiyunWATCHDOG
424*4882a593Smuzhiyun  devm_watchdog_register_device()
425