xref: /OK3568_Linux_fs/kernel/drivers/reset/core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Reset Controller framework
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2013 Philipp Zabel, Pengutronix
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/atomic.h>
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/err.h>
10*4882a593Smuzhiyun #include <linux/export.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/kref.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/reset.h>
16*4882a593Smuzhiyun #include <linux/reset-controller.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static DEFINE_MUTEX(reset_list_mutex);
20*4882a593Smuzhiyun static LIST_HEAD(reset_controller_list);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static DEFINE_MUTEX(reset_lookup_mutex);
23*4882a593Smuzhiyun static LIST_HEAD(reset_lookup_list);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * struct reset_control - a reset control
27*4882a593Smuzhiyun  * @rcdev: a pointer to the reset controller device
28*4882a593Smuzhiyun  *         this reset control belongs to
29*4882a593Smuzhiyun  * @list: list entry for the rcdev's reset controller list
30*4882a593Smuzhiyun  * @id: ID of the reset controller in the reset
31*4882a593Smuzhiyun  *      controller device
32*4882a593Smuzhiyun  * @refcnt: Number of gets of this reset_control
33*4882a593Smuzhiyun  * @acquired: Only one reset_control may be acquired for a given rcdev and id.
34*4882a593Smuzhiyun  * @shared: Is this a shared (1), or an exclusive (0) reset_control?
35*4882a593Smuzhiyun  * @array: Is this an array of reset controls (1)?
36*4882a593Smuzhiyun  * @deassert_count: Number of times this reset line has been deasserted
37*4882a593Smuzhiyun  * @triggered_count: Number of times this reset line has been reset. Currently
38*4882a593Smuzhiyun  *                   only used for shared resets, which means that the value
39*4882a593Smuzhiyun  *                   will be either 0 or 1.
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun struct reset_control {
42*4882a593Smuzhiyun 	struct reset_controller_dev *rcdev;
43*4882a593Smuzhiyun 	struct list_head list;
44*4882a593Smuzhiyun 	unsigned int id;
45*4882a593Smuzhiyun 	struct kref refcnt;
46*4882a593Smuzhiyun 	bool acquired;
47*4882a593Smuzhiyun 	bool shared;
48*4882a593Smuzhiyun 	bool array;
49*4882a593Smuzhiyun 	atomic_t deassert_count;
50*4882a593Smuzhiyun 	atomic_t triggered_count;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun  * struct reset_control_array - an array of reset controls
55*4882a593Smuzhiyun  * @base: reset control for compatibility with reset control API functions
56*4882a593Smuzhiyun  * @num_rstcs: number of reset controls
57*4882a593Smuzhiyun  * @rstc: array of reset controls
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun struct reset_control_array {
60*4882a593Smuzhiyun 	struct reset_control base;
61*4882a593Smuzhiyun 	unsigned int num_rstcs;
62*4882a593Smuzhiyun 	struct reset_control *rstc[];
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
rcdev_name(struct reset_controller_dev * rcdev)65*4882a593Smuzhiyun static const char *rcdev_name(struct reset_controller_dev *rcdev)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	if (rcdev->dev)
68*4882a593Smuzhiyun 		return dev_name(rcdev->dev);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (rcdev->of_node)
71*4882a593Smuzhiyun 		return rcdev->of_node->full_name;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	return NULL;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * of_reset_simple_xlate - translate reset_spec to the reset line number
78*4882a593Smuzhiyun  * @rcdev: a pointer to the reset controller device
79*4882a593Smuzhiyun  * @reset_spec: reset line specifier as found in the device tree
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * This static translation function is used by default if of_xlate in
82*4882a593Smuzhiyun  * :c:type:`reset_controller_dev` is not set. It is useful for all reset
83*4882a593Smuzhiyun  * controllers with 1:1 mapping, where reset lines can be indexed by number
84*4882a593Smuzhiyun  * without gaps.
85*4882a593Smuzhiyun  */
of_reset_simple_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)86*4882a593Smuzhiyun static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
87*4882a593Smuzhiyun 			  const struct of_phandle_args *reset_spec)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	if (reset_spec->args[0] >= rcdev->nr_resets)
90*4882a593Smuzhiyun 		return -EINVAL;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return reset_spec->args[0];
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * reset_controller_register - register a reset controller device
97*4882a593Smuzhiyun  * @rcdev: a pointer to the initialized reset controller device
98*4882a593Smuzhiyun  */
reset_controller_register(struct reset_controller_dev * rcdev)99*4882a593Smuzhiyun int reset_controller_register(struct reset_controller_dev *rcdev)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	if (!rcdev->of_xlate) {
102*4882a593Smuzhiyun 		rcdev->of_reset_n_cells = 1;
103*4882a593Smuzhiyun 		rcdev->of_xlate = of_reset_simple_xlate;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rcdev->reset_control_head);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
109*4882a593Smuzhiyun 	list_add(&rcdev->list, &reset_controller_list);
110*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_controller_register);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /**
117*4882a593Smuzhiyun  * reset_controller_unregister - unregister a reset controller device
118*4882a593Smuzhiyun  * @rcdev: a pointer to the reset controller device
119*4882a593Smuzhiyun  */
reset_controller_unregister(struct reset_controller_dev * rcdev)120*4882a593Smuzhiyun void reset_controller_unregister(struct reset_controller_dev *rcdev)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
123*4882a593Smuzhiyun 	list_del(&rcdev->list);
124*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_controller_unregister);
127*4882a593Smuzhiyun 
devm_reset_controller_release(struct device * dev,void * res)128*4882a593Smuzhiyun static void devm_reset_controller_release(struct device *dev, void *res)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	reset_controller_unregister(*(struct reset_controller_dev **)res);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun  * devm_reset_controller_register - resource managed reset_controller_register()
135*4882a593Smuzhiyun  * @dev: device that is registering this reset controller
136*4882a593Smuzhiyun  * @rcdev: a pointer to the initialized reset controller device
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  * Managed reset_controller_register(). For reset controllers registered by
139*4882a593Smuzhiyun  * this function, reset_controller_unregister() is automatically called on
140*4882a593Smuzhiyun  * driver detach. See reset_controller_register() for more information.
141*4882a593Smuzhiyun  */
devm_reset_controller_register(struct device * dev,struct reset_controller_dev * rcdev)142*4882a593Smuzhiyun int devm_reset_controller_register(struct device *dev,
143*4882a593Smuzhiyun 				   struct reset_controller_dev *rcdev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct reset_controller_dev **rcdevp;
146*4882a593Smuzhiyun 	int ret;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
149*4882a593Smuzhiyun 			      GFP_KERNEL);
150*4882a593Smuzhiyun 	if (!rcdevp)
151*4882a593Smuzhiyun 		return -ENOMEM;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	ret = reset_controller_register(rcdev);
154*4882a593Smuzhiyun 	if (ret) {
155*4882a593Smuzhiyun 		devres_free(rcdevp);
156*4882a593Smuzhiyun 		return ret;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	*rcdevp = rcdev;
160*4882a593Smuzhiyun 	devres_add(dev, rcdevp);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return ret;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_reset_controller_register);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * reset_controller_add_lookup - register a set of lookup entries
168*4882a593Smuzhiyun  * @lookup: array of reset lookup entries
169*4882a593Smuzhiyun  * @num_entries: number of entries in the lookup array
170*4882a593Smuzhiyun  */
reset_controller_add_lookup(struct reset_control_lookup * lookup,unsigned int num_entries)171*4882a593Smuzhiyun void reset_controller_add_lookup(struct reset_control_lookup *lookup,
172*4882a593Smuzhiyun 				 unsigned int num_entries)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct reset_control_lookup *entry;
175*4882a593Smuzhiyun 	unsigned int i;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	mutex_lock(&reset_lookup_mutex);
178*4882a593Smuzhiyun 	for (i = 0; i < num_entries; i++) {
179*4882a593Smuzhiyun 		entry = &lookup[i];
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		if (!entry->dev_id || !entry->provider) {
182*4882a593Smuzhiyun 			pr_warn("%s(): reset lookup entry badly specified, skipping\n",
183*4882a593Smuzhiyun 				__func__);
184*4882a593Smuzhiyun 			continue;
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		list_add_tail(&entry->list, &reset_lookup_list);
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 	mutex_unlock(&reset_lookup_mutex);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun static inline struct reset_control_array *
rstc_to_array(struct reset_control * rstc)194*4882a593Smuzhiyun rstc_to_array(struct reset_control *rstc) {
195*4882a593Smuzhiyun 	return container_of(rstc, struct reset_control_array, base);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
reset_control_array_reset(struct reset_control_array * resets)198*4882a593Smuzhiyun static int reset_control_array_reset(struct reset_control_array *resets)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	int ret, i;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	for (i = 0; i < resets->num_rstcs; i++) {
203*4882a593Smuzhiyun 		ret = reset_control_reset(resets->rstc[i]);
204*4882a593Smuzhiyun 		if (ret)
205*4882a593Smuzhiyun 			return ret;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
reset_control_array_assert(struct reset_control_array * resets)211*4882a593Smuzhiyun static int reset_control_array_assert(struct reset_control_array *resets)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	int ret, i;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	for (i = 0; i < resets->num_rstcs; i++) {
216*4882a593Smuzhiyun 		ret = reset_control_assert(resets->rstc[i]);
217*4882a593Smuzhiyun 		if (ret)
218*4882a593Smuzhiyun 			goto err;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun err:
224*4882a593Smuzhiyun 	while (i--)
225*4882a593Smuzhiyun 		reset_control_deassert(resets->rstc[i]);
226*4882a593Smuzhiyun 	return ret;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
reset_control_array_deassert(struct reset_control_array * resets)229*4882a593Smuzhiyun static int reset_control_array_deassert(struct reset_control_array *resets)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	int ret, i;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	for (i = 0; i < resets->num_rstcs; i++) {
234*4882a593Smuzhiyun 		ret = reset_control_deassert(resets->rstc[i]);
235*4882a593Smuzhiyun 		if (ret)
236*4882a593Smuzhiyun 			goto err;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun err:
242*4882a593Smuzhiyun 	while (i--)
243*4882a593Smuzhiyun 		reset_control_assert(resets->rstc[i]);
244*4882a593Smuzhiyun 	return ret;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
reset_control_array_acquire(struct reset_control_array * resets)247*4882a593Smuzhiyun static int reset_control_array_acquire(struct reset_control_array *resets)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	unsigned int i;
250*4882a593Smuzhiyun 	int err;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	for (i = 0; i < resets->num_rstcs; i++) {
253*4882a593Smuzhiyun 		err = reset_control_acquire(resets->rstc[i]);
254*4882a593Smuzhiyun 		if (err < 0)
255*4882a593Smuzhiyun 			goto release;
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return 0;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun release:
261*4882a593Smuzhiyun 	while (i--)
262*4882a593Smuzhiyun 		reset_control_release(resets->rstc[i]);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	return err;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
reset_control_array_release(struct reset_control_array * resets)267*4882a593Smuzhiyun static void reset_control_array_release(struct reset_control_array *resets)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	unsigned int i;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	for (i = 0; i < resets->num_rstcs; i++)
272*4882a593Smuzhiyun 		reset_control_release(resets->rstc[i]);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
reset_control_is_array(struct reset_control * rstc)275*4882a593Smuzhiyun static inline bool reset_control_is_array(struct reset_control *rstc)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	return rstc->array;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun  * reset_control_reset - reset the controlled device
282*4882a593Smuzhiyun  * @rstc: reset controller
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * On a shared reset line the actual reset pulse is only triggered once for the
285*4882a593Smuzhiyun  * lifetime of the reset_control instance: for all but the first caller this is
286*4882a593Smuzhiyun  * a no-op.
287*4882a593Smuzhiyun  * Consumers must not use reset_control_(de)assert on shared reset lines when
288*4882a593Smuzhiyun  * reset_control_reset has been used.
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * If rstc is NULL it is an optional reset and the function will just
291*4882a593Smuzhiyun  * return 0.
292*4882a593Smuzhiyun  */
reset_control_reset(struct reset_control * rstc)293*4882a593Smuzhiyun int reset_control_reset(struct reset_control *rstc)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	int ret;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (!rstc)
298*4882a593Smuzhiyun 		return 0;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (WARN_ON(IS_ERR(rstc)))
301*4882a593Smuzhiyun 		return -EINVAL;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if (reset_control_is_array(rstc))
304*4882a593Smuzhiyun 		return reset_control_array_reset(rstc_to_array(rstc));
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (!rstc->rcdev->ops->reset)
307*4882a593Smuzhiyun 		return -ENOTSUPP;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (rstc->shared) {
310*4882a593Smuzhiyun 		if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
311*4882a593Smuzhiyun 			return -EINVAL;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		if (atomic_inc_return(&rstc->triggered_count) != 1)
314*4882a593Smuzhiyun 			return 0;
315*4882a593Smuzhiyun 	} else {
316*4882a593Smuzhiyun 		if (!rstc->acquired)
317*4882a593Smuzhiyun 			return -EPERM;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
321*4882a593Smuzhiyun 	if (rstc->shared && ret)
322*4882a593Smuzhiyun 		atomic_dec(&rstc->triggered_count);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return ret;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_reset);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  * reset_control_assert - asserts the reset line
330*4882a593Smuzhiyun  * @rstc: reset controller
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * Calling this on an exclusive reset controller guarantees that the reset
333*4882a593Smuzhiyun  * will be asserted. When called on a shared reset controller the line may
334*4882a593Smuzhiyun  * still be deasserted, as long as other users keep it so.
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * For shared reset controls a driver cannot expect the hw's registers and
337*4882a593Smuzhiyun  * internal state to be reset, but must be prepared for this to happen.
338*4882a593Smuzhiyun  * Consumers must not use reset_control_reset on shared reset lines when
339*4882a593Smuzhiyun  * reset_control_(de)assert has been used.
340*4882a593Smuzhiyun  *
341*4882a593Smuzhiyun  * If rstc is NULL it is an optional reset and the function will just
342*4882a593Smuzhiyun  * return 0.
343*4882a593Smuzhiyun  */
reset_control_assert(struct reset_control * rstc)344*4882a593Smuzhiyun int reset_control_assert(struct reset_control *rstc)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	if (!rstc)
347*4882a593Smuzhiyun 		return 0;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if (WARN_ON(IS_ERR(rstc)))
350*4882a593Smuzhiyun 		return -EINVAL;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (reset_control_is_array(rstc))
353*4882a593Smuzhiyun 		return reset_control_array_assert(rstc_to_array(rstc));
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (rstc->shared) {
356*4882a593Smuzhiyun 		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
357*4882a593Smuzhiyun 			return -EINVAL;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
360*4882a593Smuzhiyun 			return -EINVAL;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		if (atomic_dec_return(&rstc->deassert_count) != 0)
363*4882a593Smuzhiyun 			return 0;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		/*
366*4882a593Smuzhiyun 		 * Shared reset controls allow the reset line to be in any state
367*4882a593Smuzhiyun 		 * after this call, so doing nothing is a valid option.
368*4882a593Smuzhiyun 		 */
369*4882a593Smuzhiyun 		if (!rstc->rcdev->ops->assert)
370*4882a593Smuzhiyun 			return 0;
371*4882a593Smuzhiyun 	} else {
372*4882a593Smuzhiyun 		/*
373*4882a593Smuzhiyun 		 * If the reset controller does not implement .assert(), there
374*4882a593Smuzhiyun 		 * is no way to guarantee that the reset line is asserted after
375*4882a593Smuzhiyun 		 * this call.
376*4882a593Smuzhiyun 		 */
377*4882a593Smuzhiyun 		if (!rstc->rcdev->ops->assert)
378*4882a593Smuzhiyun 			return -ENOTSUPP;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (!rstc->acquired) {
381*4882a593Smuzhiyun 			WARN(1, "reset %s (ID: %u) is not acquired\n",
382*4882a593Smuzhiyun 			     rcdev_name(rstc->rcdev), rstc->id);
383*4882a593Smuzhiyun 			return -EPERM;
384*4882a593Smuzhiyun 		}
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_assert);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * reset_control_deassert - deasserts the reset line
393*4882a593Smuzhiyun  * @rstc: reset controller
394*4882a593Smuzhiyun  *
395*4882a593Smuzhiyun  * After calling this function, the reset is guaranteed to be deasserted.
396*4882a593Smuzhiyun  * Consumers must not use reset_control_reset on shared reset lines when
397*4882a593Smuzhiyun  * reset_control_(de)assert has been used.
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * If rstc is NULL it is an optional reset and the function will just
400*4882a593Smuzhiyun  * return 0.
401*4882a593Smuzhiyun  */
reset_control_deassert(struct reset_control * rstc)402*4882a593Smuzhiyun int reset_control_deassert(struct reset_control *rstc)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	if (!rstc)
405*4882a593Smuzhiyun 		return 0;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (WARN_ON(IS_ERR(rstc)))
408*4882a593Smuzhiyun 		return -EINVAL;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (reset_control_is_array(rstc))
411*4882a593Smuzhiyun 		return reset_control_array_deassert(rstc_to_array(rstc));
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (rstc->shared) {
414*4882a593Smuzhiyun 		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
415*4882a593Smuzhiyun 			return -EINVAL;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		if (atomic_inc_return(&rstc->deassert_count) != 1)
418*4882a593Smuzhiyun 			return 0;
419*4882a593Smuzhiyun 	} else {
420*4882a593Smuzhiyun 		if (!rstc->acquired) {
421*4882a593Smuzhiyun 			WARN(1, "reset %s (ID: %u) is not acquired\n",
422*4882a593Smuzhiyun 			     rcdev_name(rstc->rcdev), rstc->id);
423*4882a593Smuzhiyun 			return -EPERM;
424*4882a593Smuzhiyun 		}
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/*
428*4882a593Smuzhiyun 	 * If the reset controller does not implement .deassert(), we assume
429*4882a593Smuzhiyun 	 * that it handles self-deasserting reset lines via .reset(). In that
430*4882a593Smuzhiyun 	 * case, the reset lines are deasserted by default. If that is not the
431*4882a593Smuzhiyun 	 * case, the reset controller driver should implement .deassert() and
432*4882a593Smuzhiyun 	 * return -ENOTSUPP.
433*4882a593Smuzhiyun 	 */
434*4882a593Smuzhiyun 	if (!rstc->rcdev->ops->deassert)
435*4882a593Smuzhiyun 		return 0;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_deassert);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /**
442*4882a593Smuzhiyun  * reset_control_status - returns a negative errno if not supported, a
443*4882a593Smuzhiyun  * positive value if the reset line is asserted, or zero if the reset
444*4882a593Smuzhiyun  * line is not asserted or if the desc is NULL (optional reset).
445*4882a593Smuzhiyun  * @rstc: reset controller
446*4882a593Smuzhiyun  */
reset_control_status(struct reset_control * rstc)447*4882a593Smuzhiyun int reset_control_status(struct reset_control *rstc)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	if (!rstc)
450*4882a593Smuzhiyun 		return 0;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
453*4882a593Smuzhiyun 		return -EINVAL;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (rstc->rcdev->ops->status)
456*4882a593Smuzhiyun 		return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return -ENOTSUPP;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_status);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun  * reset_control_acquire() - acquires a reset control for exclusive use
464*4882a593Smuzhiyun  * @rstc: reset control
465*4882a593Smuzhiyun  *
466*4882a593Smuzhiyun  * This is used to explicitly acquire a reset control for exclusive use. Note
467*4882a593Smuzhiyun  * that exclusive resets are requested as acquired by default. In order for a
468*4882a593Smuzhiyun  * second consumer to be able to control the reset, the first consumer has to
469*4882a593Smuzhiyun  * release it first. Typically the easiest way to achieve this is to call the
470*4882a593Smuzhiyun  * reset_control_get_exclusive_released() to obtain an instance of the reset
471*4882a593Smuzhiyun  * control. Such reset controls are not acquired by default.
472*4882a593Smuzhiyun  *
473*4882a593Smuzhiyun  * Consumers implementing shared access to an exclusive reset need to follow
474*4882a593Smuzhiyun  * a specific protocol in order to work together. Before consumers can change
475*4882a593Smuzhiyun  * a reset they must acquire exclusive access using reset_control_acquire().
476*4882a593Smuzhiyun  * After they are done operating the reset, they must release exclusive access
477*4882a593Smuzhiyun  * with a call to reset_control_release(). Consumers are not granted exclusive
478*4882a593Smuzhiyun  * access to the reset as long as another consumer hasn't released a reset.
479*4882a593Smuzhiyun  *
480*4882a593Smuzhiyun  * See also: reset_control_release()
481*4882a593Smuzhiyun  */
reset_control_acquire(struct reset_control * rstc)482*4882a593Smuzhiyun int reset_control_acquire(struct reset_control *rstc)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct reset_control *rc;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (!rstc)
487*4882a593Smuzhiyun 		return 0;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (WARN_ON(IS_ERR(rstc)))
490*4882a593Smuzhiyun 		return -EINVAL;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (reset_control_is_array(rstc))
493*4882a593Smuzhiyun 		return reset_control_array_acquire(rstc_to_array(rstc));
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (rstc->acquired) {
498*4882a593Smuzhiyun 		mutex_unlock(&reset_list_mutex);
499*4882a593Smuzhiyun 		return 0;
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
503*4882a593Smuzhiyun 		if (rstc != rc && rstc->id == rc->id) {
504*4882a593Smuzhiyun 			if (rc->acquired) {
505*4882a593Smuzhiyun 				mutex_unlock(&reset_list_mutex);
506*4882a593Smuzhiyun 				return -EBUSY;
507*4882a593Smuzhiyun 			}
508*4882a593Smuzhiyun 		}
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	rstc->acquired = true;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
514*4882a593Smuzhiyun 	return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_acquire);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun /**
519*4882a593Smuzhiyun  * reset_control_release() - releases exclusive access to a reset control
520*4882a593Smuzhiyun  * @rstc: reset control
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Releases exclusive access right to a reset control previously obtained by a
523*4882a593Smuzhiyun  * call to reset_control_acquire(). Until a consumer calls this function, no
524*4882a593Smuzhiyun  * other consumers will be granted exclusive access.
525*4882a593Smuzhiyun  *
526*4882a593Smuzhiyun  * See also: reset_control_acquire()
527*4882a593Smuzhiyun  */
reset_control_release(struct reset_control * rstc)528*4882a593Smuzhiyun void reset_control_release(struct reset_control *rstc)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	if (!rstc || WARN_ON(IS_ERR(rstc)))
531*4882a593Smuzhiyun 		return;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (reset_control_is_array(rstc))
534*4882a593Smuzhiyun 		reset_control_array_release(rstc_to_array(rstc));
535*4882a593Smuzhiyun 	else
536*4882a593Smuzhiyun 		rstc->acquired = false;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_release);
539*4882a593Smuzhiyun 
__reset_control_get_internal(struct reset_controller_dev * rcdev,unsigned int index,bool shared,bool acquired)540*4882a593Smuzhiyun static struct reset_control *__reset_control_get_internal(
541*4882a593Smuzhiyun 				struct reset_controller_dev *rcdev,
542*4882a593Smuzhiyun 				unsigned int index, bool shared, bool acquired)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	struct reset_control *rstc;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	lockdep_assert_held(&reset_list_mutex);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
549*4882a593Smuzhiyun 		if (rstc->id == index) {
550*4882a593Smuzhiyun 			/*
551*4882a593Smuzhiyun 			 * Allow creating a secondary exclusive reset_control
552*4882a593Smuzhiyun 			 * that is initially not acquired for an already
553*4882a593Smuzhiyun 			 * controlled reset line.
554*4882a593Smuzhiyun 			 */
555*4882a593Smuzhiyun 			if (!rstc->shared && !shared && !acquired)
556*4882a593Smuzhiyun 				break;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 			if (WARN_ON(!rstc->shared || !shared))
559*4882a593Smuzhiyun 				return ERR_PTR(-EBUSY);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 			kref_get(&rstc->refcnt);
562*4882a593Smuzhiyun 			return rstc;
563*4882a593Smuzhiyun 		}
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
567*4882a593Smuzhiyun 	if (!rstc)
568*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (!try_module_get(rcdev->owner)) {
571*4882a593Smuzhiyun 		kfree(rstc);
572*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	rstc->rcdev = rcdev;
576*4882a593Smuzhiyun 	list_add(&rstc->list, &rcdev->reset_control_head);
577*4882a593Smuzhiyun 	rstc->id = index;
578*4882a593Smuzhiyun 	kref_init(&rstc->refcnt);
579*4882a593Smuzhiyun 	rstc->acquired = acquired;
580*4882a593Smuzhiyun 	rstc->shared = shared;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return rstc;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
__reset_control_release(struct kref * kref)585*4882a593Smuzhiyun static void __reset_control_release(struct kref *kref)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct reset_control *rstc = container_of(kref, struct reset_control,
588*4882a593Smuzhiyun 						  refcnt);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	lockdep_assert_held(&reset_list_mutex);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	module_put(rstc->rcdev->owner);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	list_del(&rstc->list);
595*4882a593Smuzhiyun 	kfree(rstc);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
__reset_control_put_internal(struct reset_control * rstc)598*4882a593Smuzhiyun static void __reset_control_put_internal(struct reset_control *rstc)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	lockdep_assert_held(&reset_list_mutex);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	kref_put(&rstc->refcnt, __reset_control_release);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
__of_reset_control_get(struct device_node * node,const char * id,int index,bool shared,bool optional,bool acquired)605*4882a593Smuzhiyun struct reset_control *__of_reset_control_get(struct device_node *node,
606*4882a593Smuzhiyun 				     const char *id, int index, bool shared,
607*4882a593Smuzhiyun 				     bool optional, bool acquired)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct reset_control *rstc;
610*4882a593Smuzhiyun 	struct reset_controller_dev *r, *rcdev;
611*4882a593Smuzhiyun 	struct of_phandle_args args;
612*4882a593Smuzhiyun 	int rstc_id;
613*4882a593Smuzhiyun 	int ret;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (!node)
616*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	if (id) {
619*4882a593Smuzhiyun 		index = of_property_match_string(node,
620*4882a593Smuzhiyun 						 "reset-names", id);
621*4882a593Smuzhiyun 		if (index == -EILSEQ)
622*4882a593Smuzhiyun 			return ERR_PTR(index);
623*4882a593Smuzhiyun 		if (index < 0)
624*4882a593Smuzhiyun 			return optional ? NULL : ERR_PTR(-ENOENT);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
628*4882a593Smuzhiyun 					 index, &args);
629*4882a593Smuzhiyun 	if (ret == -EINVAL)
630*4882a593Smuzhiyun 		return ERR_PTR(ret);
631*4882a593Smuzhiyun 	if (ret)
632*4882a593Smuzhiyun 		return optional ? NULL : ERR_PTR(ret);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
635*4882a593Smuzhiyun 	rcdev = NULL;
636*4882a593Smuzhiyun 	list_for_each_entry(r, &reset_controller_list, list) {
637*4882a593Smuzhiyun 		if (args.np == r->of_node) {
638*4882a593Smuzhiyun 			rcdev = r;
639*4882a593Smuzhiyun 			break;
640*4882a593Smuzhiyun 		}
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	if (!rcdev) {
644*4882a593Smuzhiyun 		rstc = ERR_PTR(-EPROBE_DEFER);
645*4882a593Smuzhiyun 		goto out;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
649*4882a593Smuzhiyun 		rstc = ERR_PTR(-EINVAL);
650*4882a593Smuzhiyun 		goto out;
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	rstc_id = rcdev->of_xlate(rcdev, &args);
654*4882a593Smuzhiyun 	if (rstc_id < 0) {
655*4882a593Smuzhiyun 		rstc = ERR_PTR(rstc_id);
656*4882a593Smuzhiyun 		goto out;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* reset_list_mutex also protects the rcdev's reset_control list */
660*4882a593Smuzhiyun 	rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun out:
663*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
664*4882a593Smuzhiyun 	of_node_put(args.np);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return rstc;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__of_reset_control_get);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun static struct reset_controller_dev *
__reset_controller_by_name(const char * name)671*4882a593Smuzhiyun __reset_controller_by_name(const char *name)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	struct reset_controller_dev *rcdev;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	lockdep_assert_held(&reset_list_mutex);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	list_for_each_entry(rcdev, &reset_controller_list, list) {
678*4882a593Smuzhiyun 		if (!rcdev->dev)
679*4882a593Smuzhiyun 			continue;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		if (!strcmp(name, dev_name(rcdev->dev)))
682*4882a593Smuzhiyun 			return rcdev;
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	return NULL;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun static struct reset_control *
__reset_control_get_from_lookup(struct device * dev,const char * con_id,bool shared,bool optional,bool acquired)689*4882a593Smuzhiyun __reset_control_get_from_lookup(struct device *dev, const char *con_id,
690*4882a593Smuzhiyun 				bool shared, bool optional, bool acquired)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	const struct reset_control_lookup *lookup;
693*4882a593Smuzhiyun 	struct reset_controller_dev *rcdev;
694*4882a593Smuzhiyun 	const char *dev_id = dev_name(dev);
695*4882a593Smuzhiyun 	struct reset_control *rstc = NULL;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	mutex_lock(&reset_lookup_mutex);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	list_for_each_entry(lookup, &reset_lookup_list, list) {
700*4882a593Smuzhiyun 		if (strcmp(lookup->dev_id, dev_id))
701*4882a593Smuzhiyun 			continue;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 		if ((!con_id && !lookup->con_id) ||
704*4882a593Smuzhiyun 		    ((con_id && lookup->con_id) &&
705*4882a593Smuzhiyun 		     !strcmp(con_id, lookup->con_id))) {
706*4882a593Smuzhiyun 			mutex_lock(&reset_list_mutex);
707*4882a593Smuzhiyun 			rcdev = __reset_controller_by_name(lookup->provider);
708*4882a593Smuzhiyun 			if (!rcdev) {
709*4882a593Smuzhiyun 				mutex_unlock(&reset_list_mutex);
710*4882a593Smuzhiyun 				mutex_unlock(&reset_lookup_mutex);
711*4882a593Smuzhiyun 				/* Reset provider may not be ready yet. */
712*4882a593Smuzhiyun 				return ERR_PTR(-EPROBE_DEFER);
713*4882a593Smuzhiyun 			}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 			rstc = __reset_control_get_internal(rcdev,
716*4882a593Smuzhiyun 							    lookup->index,
717*4882a593Smuzhiyun 							    shared, acquired);
718*4882a593Smuzhiyun 			mutex_unlock(&reset_list_mutex);
719*4882a593Smuzhiyun 			break;
720*4882a593Smuzhiyun 		}
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	mutex_unlock(&reset_lookup_mutex);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	if (!rstc)
726*4882a593Smuzhiyun 		return optional ? NULL : ERR_PTR(-ENOENT);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	return rstc;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
__reset_control_get(struct device * dev,const char * id,int index,bool shared,bool optional,bool acquired)731*4882a593Smuzhiyun struct reset_control *__reset_control_get(struct device *dev, const char *id,
732*4882a593Smuzhiyun 					  int index, bool shared, bool optional,
733*4882a593Smuzhiyun 					  bool acquired)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	if (WARN_ON(shared && acquired))
736*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	if (dev->of_node)
739*4882a593Smuzhiyun 		return __of_reset_control_get(dev->of_node, id, index, shared,
740*4882a593Smuzhiyun 					      optional, acquired);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	return __reset_control_get_from_lookup(dev, id, shared, optional,
743*4882a593Smuzhiyun 					       acquired);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__reset_control_get);
746*4882a593Smuzhiyun 
reset_control_array_put(struct reset_control_array * resets)747*4882a593Smuzhiyun static void reset_control_array_put(struct reset_control_array *resets)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	int i;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
752*4882a593Smuzhiyun 	for (i = 0; i < resets->num_rstcs; i++)
753*4882a593Smuzhiyun 		__reset_control_put_internal(resets->rstc[i]);
754*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
755*4882a593Smuzhiyun 	kfree(resets);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun /**
759*4882a593Smuzhiyun  * reset_control_put - free the reset controller
760*4882a593Smuzhiyun  * @rstc: reset controller
761*4882a593Smuzhiyun  */
reset_control_put(struct reset_control * rstc)762*4882a593Smuzhiyun void reset_control_put(struct reset_control *rstc)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(rstc))
765*4882a593Smuzhiyun 		return;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (reset_control_is_array(rstc)) {
768*4882a593Smuzhiyun 		reset_control_array_put(rstc_to_array(rstc));
769*4882a593Smuzhiyun 		return;
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
773*4882a593Smuzhiyun 	__reset_control_put_internal(rstc);
774*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_put);
777*4882a593Smuzhiyun 
devm_reset_control_release(struct device * dev,void * res)778*4882a593Smuzhiyun static void devm_reset_control_release(struct device *dev, void *res)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	reset_control_put(*(struct reset_control **)res);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
__devm_reset_control_get(struct device * dev,const char * id,int index,bool shared,bool optional,bool acquired)783*4882a593Smuzhiyun struct reset_control *__devm_reset_control_get(struct device *dev,
784*4882a593Smuzhiyun 				     const char *id, int index, bool shared,
785*4882a593Smuzhiyun 				     bool optional, bool acquired)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	struct reset_control **ptr, *rstc;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
790*4882a593Smuzhiyun 			   GFP_KERNEL);
791*4882a593Smuzhiyun 	if (!ptr)
792*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
795*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(rstc)) {
796*4882a593Smuzhiyun 		devres_free(ptr);
797*4882a593Smuzhiyun 		return rstc;
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	*ptr = rstc;
801*4882a593Smuzhiyun 	devres_add(dev, ptr);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	return rstc;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__devm_reset_control_get);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun /**
808*4882a593Smuzhiyun  * device_reset - find reset controller associated with the device
809*4882a593Smuzhiyun  *                and perform reset
810*4882a593Smuzhiyun  * @dev: device to be reset by the controller
811*4882a593Smuzhiyun  * @optional: whether it is optional to reset the device
812*4882a593Smuzhiyun  *
813*4882a593Smuzhiyun  * Convenience wrapper for __reset_control_get() and reset_control_reset().
814*4882a593Smuzhiyun  * This is useful for the common case of devices with single, dedicated reset
815*4882a593Smuzhiyun  * lines.
816*4882a593Smuzhiyun  */
__device_reset(struct device * dev,bool optional)817*4882a593Smuzhiyun int __device_reset(struct device *dev, bool optional)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	struct reset_control *rstc;
820*4882a593Smuzhiyun 	int ret;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
823*4882a593Smuzhiyun 	if (IS_ERR(rstc))
824*4882a593Smuzhiyun 		return PTR_ERR(rstc);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	ret = reset_control_reset(rstc);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	reset_control_put(rstc);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	return ret;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__device_reset);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun /*
835*4882a593Smuzhiyun  * APIs to manage an array of reset controls.
836*4882a593Smuzhiyun  */
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun  * of_reset_control_get_count - Count number of resets available with a device
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  * @node: device node that contains 'resets'.
842*4882a593Smuzhiyun  *
843*4882a593Smuzhiyun  * Returns positive reset count on success, or error number on failure and
844*4882a593Smuzhiyun  * on count being zero.
845*4882a593Smuzhiyun  */
of_reset_control_get_count(struct device_node * node)846*4882a593Smuzhiyun static int of_reset_control_get_count(struct device_node *node)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun 	int count;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	if (!node)
851*4882a593Smuzhiyun 		return -EINVAL;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	count = of_count_phandle_with_args(node, "resets", "#reset-cells");
854*4882a593Smuzhiyun 	if (count == 0)
855*4882a593Smuzhiyun 		count = -ENOENT;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	return count;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun /**
861*4882a593Smuzhiyun  * of_reset_control_array_get - Get a list of reset controls using
862*4882a593Smuzhiyun  *				device node.
863*4882a593Smuzhiyun  *
864*4882a593Smuzhiyun  * @np: device node for the device that requests the reset controls array
865*4882a593Smuzhiyun  * @shared: whether reset controls are shared or not
866*4882a593Smuzhiyun  * @optional: whether it is optional to get the reset controls
867*4882a593Smuzhiyun  * @acquired: only one reset control may be acquired for a given controller
868*4882a593Smuzhiyun  *            and ID
869*4882a593Smuzhiyun  *
870*4882a593Smuzhiyun  * Returns pointer to allocated reset_control on success or error on failure
871*4882a593Smuzhiyun  */
872*4882a593Smuzhiyun struct reset_control *
of_reset_control_array_get(struct device_node * np,bool shared,bool optional,bool acquired)873*4882a593Smuzhiyun of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
874*4882a593Smuzhiyun 			   bool acquired)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun 	struct reset_control_array *resets;
877*4882a593Smuzhiyun 	struct reset_control *rstc;
878*4882a593Smuzhiyun 	int num, i;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	num = of_reset_control_get_count(np);
881*4882a593Smuzhiyun 	if (num < 0)
882*4882a593Smuzhiyun 		return optional ? NULL : ERR_PTR(num);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
885*4882a593Smuzhiyun 	if (!resets)
886*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
889*4882a593Smuzhiyun 		rstc = __of_reset_control_get(np, NULL, i, shared, optional,
890*4882a593Smuzhiyun 					      acquired);
891*4882a593Smuzhiyun 		if (IS_ERR(rstc))
892*4882a593Smuzhiyun 			goto err_rst;
893*4882a593Smuzhiyun 		resets->rstc[i] = rstc;
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 	resets->num_rstcs = num;
896*4882a593Smuzhiyun 	resets->base.array = true;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	return &resets->base;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun err_rst:
901*4882a593Smuzhiyun 	mutex_lock(&reset_list_mutex);
902*4882a593Smuzhiyun 	while (--i >= 0)
903*4882a593Smuzhiyun 		__reset_control_put_internal(resets->rstc[i]);
904*4882a593Smuzhiyun 	mutex_unlock(&reset_list_mutex);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	kfree(resets);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	return rstc;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_reset_control_array_get);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun /**
913*4882a593Smuzhiyun  * devm_reset_control_array_get - Resource managed reset control array get
914*4882a593Smuzhiyun  *
915*4882a593Smuzhiyun  * @dev: device that requests the list of reset controls
916*4882a593Smuzhiyun  * @shared: whether reset controls are shared or not
917*4882a593Smuzhiyun  * @optional: whether it is optional to get the reset controls
918*4882a593Smuzhiyun  *
919*4882a593Smuzhiyun  * The reset control array APIs are intended for a list of resets
920*4882a593Smuzhiyun  * that just have to be asserted or deasserted, without any
921*4882a593Smuzhiyun  * requirements on the order.
922*4882a593Smuzhiyun  *
923*4882a593Smuzhiyun  * Returns pointer to allocated reset_control on success or error on failure
924*4882a593Smuzhiyun  */
925*4882a593Smuzhiyun struct reset_control *
devm_reset_control_array_get(struct device * dev,bool shared,bool optional)926*4882a593Smuzhiyun devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	struct reset_control **ptr, *rstc;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
931*4882a593Smuzhiyun 			   GFP_KERNEL);
932*4882a593Smuzhiyun 	if (!ptr)
933*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
936*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(rstc)) {
937*4882a593Smuzhiyun 		devres_free(ptr);
938*4882a593Smuzhiyun 		return rstc;
939*4882a593Smuzhiyun 	}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	*ptr = rstc;
942*4882a593Smuzhiyun 	devres_add(dev, ptr);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	return rstc;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
947*4882a593Smuzhiyun 
reset_control_get_count_from_lookup(struct device * dev)948*4882a593Smuzhiyun static int reset_control_get_count_from_lookup(struct device *dev)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	const struct reset_control_lookup *lookup;
951*4882a593Smuzhiyun 	const char *dev_id;
952*4882a593Smuzhiyun 	int count = 0;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (!dev)
955*4882a593Smuzhiyun 		return -EINVAL;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	dev_id = dev_name(dev);
958*4882a593Smuzhiyun 	mutex_lock(&reset_lookup_mutex);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	list_for_each_entry(lookup, &reset_lookup_list, list) {
961*4882a593Smuzhiyun 		if (!strcmp(lookup->dev_id, dev_id))
962*4882a593Smuzhiyun 			count++;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	mutex_unlock(&reset_lookup_mutex);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	if (count == 0)
968*4882a593Smuzhiyun 		count = -ENOENT;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	return count;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun /**
974*4882a593Smuzhiyun  * reset_control_get_count - Count number of resets available with a device
975*4882a593Smuzhiyun  *
976*4882a593Smuzhiyun  * @dev: device for which to return the number of resets
977*4882a593Smuzhiyun  *
978*4882a593Smuzhiyun  * Returns positive reset count on success, or error number on failure and
979*4882a593Smuzhiyun  * on count being zero.
980*4882a593Smuzhiyun  */
reset_control_get_count(struct device * dev)981*4882a593Smuzhiyun int reset_control_get_count(struct device *dev)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	if (dev->of_node)
984*4882a593Smuzhiyun 		return of_reset_control_get_count(dev->of_node);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	return reset_control_get_count_from_lookup(dev);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reset_control_get_count);
989