1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Hardware spinlock public header
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifndef __LINUX_HWSPINLOCK_H
11*4882a593Smuzhiyun #define __LINUX_HWSPINLOCK_H
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* hwspinlock mode argument */
17*4882a593Smuzhiyun #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
18*4882a593Smuzhiyun #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
19*4882a593Smuzhiyun #define HWLOCK_RAW 0x03
20*4882a593Smuzhiyun #define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct device;
23*4882a593Smuzhiyun struct device_node;
24*4882a593Smuzhiyun struct hwspinlock;
25*4882a593Smuzhiyun struct hwspinlock_device;
26*4882a593Smuzhiyun struct hwspinlock_ops;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun * struct hwspinlock_pdata - platform data for hwspinlock drivers
30*4882a593Smuzhiyun * @base_id: base id for this hwspinlock device
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * hwspinlock devices provide system-wide hardware locks that are used
33*4882a593Smuzhiyun * by remote processors that have no other way to achieve synchronization.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * To achieve that, each physical lock must have a system-wide id number
36*4882a593Smuzhiyun * that is agreed upon, otherwise remote processors can't possibly assume
37*4882a593Smuzhiyun * they're using the same hardware lock.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Usually boards have a single hwspinlock device, which provides several
40*4882a593Smuzhiyun * hwspinlocks, and in this case, they can be trivially numbered 0 to
41*4882a593Smuzhiyun * (num-of-locks - 1).
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * In case boards have several hwspinlocks devices, a different base id
44*4882a593Smuzhiyun * should be used for each hwspinlock device (they can't all use 0 as
45*4882a593Smuzhiyun * a starting id!).
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * This platform data structure should be used to provide the base id
48*4882a593Smuzhiyun * for each device (which is trivially 0 when only a single hwspinlock
49*4882a593Smuzhiyun * device exists). It can be shared between different platforms, hence
50*4882a593Smuzhiyun * its location.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun struct hwspinlock_pdata {
53*4882a593Smuzhiyun int base_id;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #ifdef CONFIG_HWSPINLOCK
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
59*4882a593Smuzhiyun const struct hwspinlock_ops *ops, int base_id, int num_locks);
60*4882a593Smuzhiyun int hwspin_lock_unregister(struct hwspinlock_device *bank);
61*4882a593Smuzhiyun struct hwspinlock *hwspin_lock_request(void);
62*4882a593Smuzhiyun struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
63*4882a593Smuzhiyun int hwspin_lock_free(struct hwspinlock *hwlock);
64*4882a593Smuzhiyun int of_hwspin_lock_get_id(struct device_node *np, int index);
65*4882a593Smuzhiyun int hwspin_lock_get_id(struct hwspinlock *hwlock);
66*4882a593Smuzhiyun int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
67*4882a593Smuzhiyun unsigned long *);
68*4882a593Smuzhiyun int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
69*4882a593Smuzhiyun void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
70*4882a593Smuzhiyun int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
71*4882a593Smuzhiyun int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
72*4882a593Smuzhiyun struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
73*4882a593Smuzhiyun struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
74*4882a593Smuzhiyun unsigned int id);
75*4882a593Smuzhiyun int devm_hwspin_lock_unregister(struct device *dev,
76*4882a593Smuzhiyun struct hwspinlock_device *bank);
77*4882a593Smuzhiyun int devm_hwspin_lock_register(struct device *dev,
78*4882a593Smuzhiyun struct hwspinlock_device *bank,
79*4882a593Smuzhiyun const struct hwspinlock_ops *ops,
80*4882a593Smuzhiyun int base_id, int num_locks);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #else /* !CONFIG_HWSPINLOCK */
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
86*4882a593Smuzhiyun * enabled. We prefer to silently succeed in this case, and let the
87*4882a593Smuzhiyun * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
88*4882a593Smuzhiyun * required on a given setup, users will still work.
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
91*4882a593Smuzhiyun * we _do_ want users to fail (no point in registering hwspinlock instances if
92*4882a593Smuzhiyun * the framework is not available).
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
95*4882a593Smuzhiyun * users. Others, which care, can still check this with IS_ERR.
96*4882a593Smuzhiyun */
hwspin_lock_request(void)97*4882a593Smuzhiyun static inline struct hwspinlock *hwspin_lock_request(void)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
hwspin_lock_request_specific(unsigned int id)102*4882a593Smuzhiyun static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
hwspin_lock_free(struct hwspinlock * hwlock)107*4882a593Smuzhiyun static inline int hwspin_lock_free(struct hwspinlock *hwlock)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun static inline
__hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to,int mode,unsigned long * flags)113*4882a593Smuzhiyun int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
114*4882a593Smuzhiyun int mode, unsigned long *flags)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun static inline
__hwspin_trylock(struct hwspinlock * hwlock,int mode,unsigned long * flags)120*4882a593Smuzhiyun int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun static inline
__hwspin_unlock(struct hwspinlock * hwlock,int mode,unsigned long * flags)126*4882a593Smuzhiyun void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
of_hwspin_lock_get_id(struct device_node * np,int index)130*4882a593Smuzhiyun static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
hwspin_lock_get_id(struct hwspinlock * hwlock)135*4882a593Smuzhiyun static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun static inline
of_hwspin_lock_get_id_byname(struct device_node * np,const char * name)141*4882a593Smuzhiyun int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return 0;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun static inline
devm_hwspin_lock_free(struct device * dev,struct hwspinlock * hwlock)147*4882a593Smuzhiyun int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
devm_hwspin_lock_request(struct device * dev)152*4882a593Smuzhiyun static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun static inline
devm_hwspin_lock_request_specific(struct device * dev,unsigned int id)158*4882a593Smuzhiyun struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
159*4882a593Smuzhiyun unsigned int id)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #endif /* !CONFIG_HWSPINLOCK */
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
168*4882a593Smuzhiyun * @hwlock: an hwspinlock which we want to trylock
169*4882a593Smuzhiyun * @flags: a pointer to where the caller's interrupt state will be saved at
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * This function attempts to lock the underlying hwspinlock, and will
172*4882a593Smuzhiyun * immediately fail if the hwspinlock is already locked.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * Upon a successful return from this function, preemption and local
175*4882a593Smuzhiyun * interrupts are disabled (previous interrupts state is saved at @flags),
176*4882a593Smuzhiyun * so the caller must not sleep, and is advised to release the hwspinlock
177*4882a593Smuzhiyun * as soon as possible.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
180*4882a593Smuzhiyun * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun static inline
hwspin_trylock_irqsave(struct hwspinlock * hwlock,unsigned long * flags)183*4882a593Smuzhiyun int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
190*4882a593Smuzhiyun * @hwlock: an hwspinlock which we want to trylock
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * This function attempts to lock the underlying hwspinlock, and will
193*4882a593Smuzhiyun * immediately fail if the hwspinlock is already locked.
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * Upon a successful return from this function, preemption and local
196*4882a593Smuzhiyun * interrupts are disabled, so the caller must not sleep, and is advised
197*4882a593Smuzhiyun * to release the hwspinlock as soon as possible.
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
200*4882a593Smuzhiyun * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
201*4882a593Smuzhiyun */
hwspin_trylock_irq(struct hwspinlock * hwlock)202*4882a593Smuzhiyun static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
209*4882a593Smuzhiyun * @hwlock: an hwspinlock which we want to trylock
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * This function attempts to lock an hwspinlock, and will immediately fail
212*4882a593Smuzhiyun * if the hwspinlock is already taken.
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun * Caution: User must protect the routine of getting hardware lock with mutex
215*4882a593Smuzhiyun * or spinlock to avoid dead-lock, that will let user can do some time-consuming
216*4882a593Smuzhiyun * or sleepable operations under the hardware lock.
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
219*4882a593Smuzhiyun * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
220*4882a593Smuzhiyun */
hwspin_trylock_raw(struct hwspinlock * hwlock)221*4882a593Smuzhiyun static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
228*4882a593Smuzhiyun * @hwlock: an hwspinlock which we want to trylock
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * This function attempts to lock an hwspinlock, and will immediately fail
231*4882a593Smuzhiyun * if the hwspinlock is already taken.
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * This function shall be called only from an atomic context.
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
236*4882a593Smuzhiyun * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
237*4882a593Smuzhiyun */
hwspin_trylock_in_atomic(struct hwspinlock * hwlock)238*4882a593Smuzhiyun static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * hwspin_trylock() - attempt to lock a specific hwspinlock
245*4882a593Smuzhiyun * @hwlock: an hwspinlock which we want to trylock
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * This function attempts to lock an hwspinlock, and will immediately fail
248*4882a593Smuzhiyun * if the hwspinlock is already taken.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * Upon a successful return from this function, preemption is disabled,
251*4882a593Smuzhiyun * so the caller must not sleep, and is advised to release the hwspinlock
252*4882a593Smuzhiyun * as soon as possible. This is required in order to minimize remote cores
253*4882a593Smuzhiyun * polling on the hardware interconnect.
254*4882a593Smuzhiyun *
255*4882a593Smuzhiyun * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
256*4882a593Smuzhiyun * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
257*4882a593Smuzhiyun */
hwspin_trylock(struct hwspinlock * hwlock)258*4882a593Smuzhiyun static inline int hwspin_trylock(struct hwspinlock *hwlock)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun return __hwspin_trylock(hwlock, 0, NULL);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
265*4882a593Smuzhiyun * @hwlock: the hwspinlock to be locked
266*4882a593Smuzhiyun * @to: timeout value in msecs
267*4882a593Smuzhiyun * @flags: a pointer to where the caller's interrupt state will be saved at
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * This function locks the underlying @hwlock. If the @hwlock
270*4882a593Smuzhiyun * is already taken, the function will busy loop waiting for it to
271*4882a593Smuzhiyun * be released, but give up when @timeout msecs have elapsed.
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * Upon a successful return from this function, preemption and local interrupts
274*4882a593Smuzhiyun * are disabled (plus previous interrupt state is saved), so the caller must
275*4882a593Smuzhiyun * not sleep, and is advised to release the hwspinlock as soon as possible.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Returns 0 when the @hwlock was successfully taken, and an appropriate
278*4882a593Smuzhiyun * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
279*4882a593Smuzhiyun * busy after @timeout msecs). The function will never sleep.
280*4882a593Smuzhiyun */
hwspin_lock_timeout_irqsave(struct hwspinlock * hwlock,unsigned int to,unsigned long * flags)281*4882a593Smuzhiyun static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
282*4882a593Smuzhiyun unsigned int to, unsigned long *flags)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /**
288*4882a593Smuzhiyun * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
289*4882a593Smuzhiyun * @hwlock: the hwspinlock to be locked
290*4882a593Smuzhiyun * @to: timeout value in msecs
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * This function locks the underlying @hwlock. If the @hwlock
293*4882a593Smuzhiyun * is already taken, the function will busy loop waiting for it to
294*4882a593Smuzhiyun * be released, but give up when @timeout msecs have elapsed.
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * Upon a successful return from this function, preemption and local interrupts
297*4882a593Smuzhiyun * are disabled so the caller must not sleep, and is advised to release the
298*4882a593Smuzhiyun * hwspinlock as soon as possible.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * Returns 0 when the @hwlock was successfully taken, and an appropriate
301*4882a593Smuzhiyun * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
302*4882a593Smuzhiyun * busy after @timeout msecs). The function will never sleep.
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun static inline
hwspin_lock_timeout_irq(struct hwspinlock * hwlock,unsigned int to)305*4882a593Smuzhiyun int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /**
311*4882a593Smuzhiyun * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
312*4882a593Smuzhiyun * @hwlock: the hwspinlock to be locked
313*4882a593Smuzhiyun * @to: timeout value in msecs
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * This function locks the underlying @hwlock. If the @hwlock
316*4882a593Smuzhiyun * is already taken, the function will busy loop waiting for it to
317*4882a593Smuzhiyun * be released, but give up when @timeout msecs have elapsed.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * Caution: User must protect the routine of getting hardware lock with mutex
320*4882a593Smuzhiyun * or spinlock to avoid dead-lock, that will let user can do some time-consuming
321*4882a593Smuzhiyun * or sleepable operations under the hardware lock.
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * Returns 0 when the @hwlock was successfully taken, and an appropriate
324*4882a593Smuzhiyun * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
325*4882a593Smuzhiyun * busy after @timeout msecs). The function will never sleep.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun static inline
hwspin_lock_timeout_raw(struct hwspinlock * hwlock,unsigned int to)328*4882a593Smuzhiyun int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
335*4882a593Smuzhiyun * @hwlock: the hwspinlock to be locked
336*4882a593Smuzhiyun * @to: timeout value in msecs
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * This function locks the underlying @hwlock. If the @hwlock
339*4882a593Smuzhiyun * is already taken, the function will busy loop waiting for it to
340*4882a593Smuzhiyun * be released, but give up when @timeout msecs have elapsed.
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * This function shall be called only from an atomic context and the timeout
343*4882a593Smuzhiyun * value shall not exceed a few msecs.
344*4882a593Smuzhiyun *
345*4882a593Smuzhiyun * Returns 0 when the @hwlock was successfully taken, and an appropriate
346*4882a593Smuzhiyun * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
347*4882a593Smuzhiyun * busy after @timeout msecs). The function will never sleep.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun static inline
hwspin_lock_timeout_in_atomic(struct hwspinlock * hwlock,unsigned int to)350*4882a593Smuzhiyun int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /**
356*4882a593Smuzhiyun * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
357*4882a593Smuzhiyun * @hwlock: the hwspinlock to be locked
358*4882a593Smuzhiyun * @to: timeout value in msecs
359*4882a593Smuzhiyun *
360*4882a593Smuzhiyun * This function locks the underlying @hwlock. If the @hwlock
361*4882a593Smuzhiyun * is already taken, the function will busy loop waiting for it to
362*4882a593Smuzhiyun * be released, but give up when @timeout msecs have elapsed.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * Upon a successful return from this function, preemption is disabled
365*4882a593Smuzhiyun * so the caller must not sleep, and is advised to release the hwspinlock
366*4882a593Smuzhiyun * as soon as possible.
367*4882a593Smuzhiyun * This is required in order to minimize remote cores polling on the
368*4882a593Smuzhiyun * hardware interconnect.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * Returns 0 when the @hwlock was successfully taken, and an appropriate
371*4882a593Smuzhiyun * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
372*4882a593Smuzhiyun * busy after @timeout msecs). The function will never sleep.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun static inline
hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to)375*4882a593Smuzhiyun int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun return __hwspin_lock_timeout(hwlock, to, 0, NULL);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
382*4882a593Smuzhiyun * @hwlock: a previously-acquired hwspinlock which we want to unlock
383*4882a593Smuzhiyun * @flags: previous caller's interrupt state to restore
384*4882a593Smuzhiyun *
385*4882a593Smuzhiyun * This function will unlock a specific hwspinlock, enable preemption and
386*4882a593Smuzhiyun * restore the previous state of the local interrupts. It should be used
387*4882a593Smuzhiyun * to undo, e.g., hwspin_trylock_irqsave().
388*4882a593Smuzhiyun *
389*4882a593Smuzhiyun * @hwlock must be already locked before calling this function: it is a bug
390*4882a593Smuzhiyun * to call unlock on a @hwlock that is already unlocked.
391*4882a593Smuzhiyun */
hwspin_unlock_irqrestore(struct hwspinlock * hwlock,unsigned long * flags)392*4882a593Smuzhiyun static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
393*4882a593Smuzhiyun unsigned long *flags)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /**
399*4882a593Smuzhiyun * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
400*4882a593Smuzhiyun * @hwlock: a previously-acquired hwspinlock which we want to unlock
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun * This function will unlock a specific hwspinlock, enable preemption and
403*4882a593Smuzhiyun * enable local interrupts. Should be used to undo hwspin_lock_irq().
404*4882a593Smuzhiyun *
405*4882a593Smuzhiyun * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
406*4882a593Smuzhiyun * calling this function: it is a bug to call unlock on a @hwlock that is
407*4882a593Smuzhiyun * already unlocked.
408*4882a593Smuzhiyun */
hwspin_unlock_irq(struct hwspinlock * hwlock)409*4882a593Smuzhiyun static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /**
415*4882a593Smuzhiyun * hwspin_unlock_raw() - unlock hwspinlock
416*4882a593Smuzhiyun * @hwlock: a previously-acquired hwspinlock which we want to unlock
417*4882a593Smuzhiyun *
418*4882a593Smuzhiyun * This function will unlock a specific hwspinlock.
419*4882a593Smuzhiyun *
420*4882a593Smuzhiyun * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
421*4882a593Smuzhiyun * this function: it is a bug to call unlock on a @hwlock that is already
422*4882a593Smuzhiyun * unlocked.
423*4882a593Smuzhiyun */
hwspin_unlock_raw(struct hwspinlock * hwlock)424*4882a593Smuzhiyun static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun * hwspin_unlock_in_atomic() - unlock hwspinlock
431*4882a593Smuzhiyun * @hwlock: a previously-acquired hwspinlock which we want to unlock
432*4882a593Smuzhiyun *
433*4882a593Smuzhiyun * This function will unlock a specific hwspinlock.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
436*4882a593Smuzhiyun * this function: it is a bug to call unlock on a @hwlock that is already
437*4882a593Smuzhiyun * unlocked.
438*4882a593Smuzhiyun */
hwspin_unlock_in_atomic(struct hwspinlock * hwlock)439*4882a593Smuzhiyun static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /**
445*4882a593Smuzhiyun * hwspin_unlock() - unlock hwspinlock
446*4882a593Smuzhiyun * @hwlock: a previously-acquired hwspinlock which we want to unlock
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * This function will unlock a specific hwspinlock and enable preemption
449*4882a593Smuzhiyun * back.
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
452*4882a593Smuzhiyun * this function: it is a bug to call unlock on a @hwlock that is already
453*4882a593Smuzhiyun * unlocked.
454*4882a593Smuzhiyun */
hwspin_unlock(struct hwspinlock * hwlock)455*4882a593Smuzhiyun static inline void hwspin_unlock(struct hwspinlock *hwlock)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun __hwspin_unlock(hwlock, 0, NULL);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun #endif /* __LINUX_HWSPINLOCK_H */
461