xref: /OK3568_Linux_fs/kernel/include/linux/lockdep.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Runtime locking correctness validator
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6*4882a593Smuzhiyun  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * see Documentation/locking/lockdep-design.rst for more details.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #ifndef __LINUX_LOCKDEP_H
11*4882a593Smuzhiyun #define __LINUX_LOCKDEP_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/lockdep_types.h>
14*4882a593Smuzhiyun #include <linux/smp.h>
15*4882a593Smuzhiyun #include <asm/percpu.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun struct task_struct;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* for sysctl */
20*4882a593Smuzhiyun extern int prove_locking;
21*4882a593Smuzhiyun extern int lock_stat;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/linkage.h>
26*4882a593Smuzhiyun #include <linux/list.h>
27*4882a593Smuzhiyun #include <linux/debug_locks.h>
28*4882a593Smuzhiyun #include <linux/stacktrace.h>
29*4882a593Smuzhiyun 
lockdep_copy_map(struct lockdep_map * to,struct lockdep_map * from)30*4882a593Smuzhiyun static inline void lockdep_copy_map(struct lockdep_map *to,
31*4882a593Smuzhiyun 				    struct lockdep_map *from)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	int i;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	*to = *from;
36*4882a593Smuzhiyun 	/*
37*4882a593Smuzhiyun 	 * Since the class cache can be modified concurrently we could observe
38*4882a593Smuzhiyun 	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
39*4882a593Smuzhiyun 	 * the caches and take the performance hit.
40*4882a593Smuzhiyun 	 *
41*4882a593Smuzhiyun 	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
42*4882a593Smuzhiyun 	 *     that relies on cache abuse.
43*4882a593Smuzhiyun 	 */
44*4882a593Smuzhiyun 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
45*4882a593Smuzhiyun 		to->class_cache[i] = NULL;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * Every lock has a list of other locks that were taken after it.
50*4882a593Smuzhiyun  * We only grow the list, never remove from it:
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun struct lock_list {
53*4882a593Smuzhiyun 	struct list_head		entry;
54*4882a593Smuzhiyun 	struct lock_class		*class;
55*4882a593Smuzhiyun 	struct lock_class		*links_to;
56*4882a593Smuzhiyun 	const struct lock_trace		*trace;
57*4882a593Smuzhiyun 	u16				distance;
58*4882a593Smuzhiyun 	/* bitmap of different dependencies from head to this */
59*4882a593Smuzhiyun 	u8				dep;
60*4882a593Smuzhiyun 	/* used by BFS to record whether "prev -> this" only has -(*R)-> */
61*4882a593Smuzhiyun 	u8				only_xr;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/*
64*4882a593Smuzhiyun 	 * The parent field is used to implement breadth-first search, and the
65*4882a593Smuzhiyun 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
66*4882a593Smuzhiyun 	 */
67*4882a593Smuzhiyun 	struct lock_list		*parent;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun  * struct lock_chain - lock dependency chain record
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * @irq_context: the same as irq_context in held_lock below
74*4882a593Smuzhiyun  * @depth:       the number of held locks in this chain
75*4882a593Smuzhiyun  * @base:        the index in chain_hlocks for this chain
76*4882a593Smuzhiyun  * @entry:       the collided lock chains in lock_chain hash list
77*4882a593Smuzhiyun  * @chain_key:   the hash key of this lock_chain
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun struct lock_chain {
80*4882a593Smuzhiyun 	/* see BUILD_BUG_ON()s in add_chain_cache() */
81*4882a593Smuzhiyun 	unsigned int			irq_context :  2,
82*4882a593Smuzhiyun 					depth       :  6,
83*4882a593Smuzhiyun 					base	    : 24;
84*4882a593Smuzhiyun 	/* 4 byte hole */
85*4882a593Smuzhiyun 	struct hlist_node		entry;
86*4882a593Smuzhiyun 	u64				chain_key;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define MAX_LOCKDEP_KEYS_BITS		13
90*4882a593Smuzhiyun #define MAX_LOCKDEP_KEYS		(1UL << MAX_LOCKDEP_KEYS_BITS)
91*4882a593Smuzhiyun #define INITIAL_CHAIN_KEY		-1
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun struct held_lock {
94*4882a593Smuzhiyun 	/*
95*4882a593Smuzhiyun 	 * One-way hash of the dependency chain up to this point. We
96*4882a593Smuzhiyun 	 * hash the hashes step by step as the dependency chain grows.
97*4882a593Smuzhiyun 	 *
98*4882a593Smuzhiyun 	 * We use it for dependency-caching and we skip detection
99*4882a593Smuzhiyun 	 * passes and dependency-updates if there is a cache-hit, so
100*4882a593Smuzhiyun 	 * it is absolutely critical for 100% coverage of the validator
101*4882a593Smuzhiyun 	 * to have a unique key value for every unique dependency path
102*4882a593Smuzhiyun 	 * that can occur in the system, to make a unique hash value
103*4882a593Smuzhiyun 	 * as likely as possible - hence the 64-bit width.
104*4882a593Smuzhiyun 	 *
105*4882a593Smuzhiyun 	 * The task struct holds the current hash value (initialized
106*4882a593Smuzhiyun 	 * with zero), here we store the previous hash value:
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	u64				prev_chain_key;
109*4882a593Smuzhiyun 	unsigned long			acquire_ip;
110*4882a593Smuzhiyun 	struct lockdep_map		*instance;
111*4882a593Smuzhiyun 	struct lockdep_map		*nest_lock;
112*4882a593Smuzhiyun #ifdef CONFIG_LOCK_STAT
113*4882a593Smuzhiyun 	u64 				waittime_stamp;
114*4882a593Smuzhiyun 	u64				holdtime_stamp;
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun 	/*
117*4882a593Smuzhiyun 	 * class_idx is zero-indexed; it points to the element in
118*4882a593Smuzhiyun 	 * lock_classes this held lock instance belongs to. class_idx is in
119*4882a593Smuzhiyun 	 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
120*4882a593Smuzhiyun 	 */
121*4882a593Smuzhiyun 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
122*4882a593Smuzhiyun 	/*
123*4882a593Smuzhiyun 	 * The lock-stack is unified in that the lock chains of interrupt
124*4882a593Smuzhiyun 	 * contexts nest ontop of process context chains, but we 'separate'
125*4882a593Smuzhiyun 	 * the hashes by starting with 0 if we cross into an interrupt
126*4882a593Smuzhiyun 	 * context, and we also keep do not add cross-context lock
127*4882a593Smuzhiyun 	 * dependencies - the lock usage graph walking covers that area
128*4882a593Smuzhiyun 	 * anyway, and we'd just unnecessarily increase the number of
129*4882a593Smuzhiyun 	 * dependencies otherwise. [Note: hardirq and softirq contexts
130*4882a593Smuzhiyun 	 * are separated from each other too.]
131*4882a593Smuzhiyun 	 *
132*4882a593Smuzhiyun 	 * The following field is used to detect when we cross into an
133*4882a593Smuzhiyun 	 * interrupt context:
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
136*4882a593Smuzhiyun 	unsigned int trylock:1;						/* 16 bits */
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	unsigned int read:2;        /* see lock_acquire() comment */
139*4882a593Smuzhiyun 	unsigned int check:1;       /* see lock_acquire() comment */
140*4882a593Smuzhiyun 	unsigned int hardirqs_off:1;
141*4882a593Smuzhiyun 	unsigned int references:12;					/* 32 bits */
142*4882a593Smuzhiyun 	unsigned int pin_count;
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun  * Initialization, self-test and debugging-output methods:
147*4882a593Smuzhiyun  */
148*4882a593Smuzhiyun extern void lockdep_init(void);
149*4882a593Smuzhiyun extern void lockdep_reset(void);
150*4882a593Smuzhiyun extern void lockdep_reset_lock(struct lockdep_map *lock);
151*4882a593Smuzhiyun extern void lockdep_free_key_range(void *start, unsigned long size);
152*4882a593Smuzhiyun extern asmlinkage void lockdep_sys_exit(void);
153*4882a593Smuzhiyun extern void lockdep_set_selftest_task(struct task_struct *task);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun extern void lockdep_init_task(struct task_struct *task);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun  * Split the recrursion counter in two to readily detect 'off' vs recursion.
159*4882a593Smuzhiyun  */
160*4882a593Smuzhiyun #define LOCKDEP_RECURSION_BITS	16
161*4882a593Smuzhiyun #define LOCKDEP_OFF		(1U << LOCKDEP_RECURSION_BITS)
162*4882a593Smuzhiyun #define LOCKDEP_RECURSION_MASK	(LOCKDEP_OFF - 1)
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166*4882a593Smuzhiyun  * to header dependencies.
167*4882a593Smuzhiyun  */
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define lockdep_off()					\
170*4882a593Smuzhiyun do {							\
171*4882a593Smuzhiyun 	current->lockdep_recursion += LOCKDEP_OFF;	\
172*4882a593Smuzhiyun } while (0)
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #define lockdep_on()					\
175*4882a593Smuzhiyun do {							\
176*4882a593Smuzhiyun 	current->lockdep_recursion -= LOCKDEP_OFF;	\
177*4882a593Smuzhiyun } while (0)
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun extern void lockdep_register_key(struct lock_class_key *key);
180*4882a593Smuzhiyun extern void lockdep_unregister_key(struct lock_class_key *key);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun  * These methods are used by specific locking variants (spinlocks,
184*4882a593Smuzhiyun  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
185*4882a593Smuzhiyun  * to lockdep:
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
189*4882a593Smuzhiyun 	struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun static inline void
lockdep_init_map_waits(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass,u8 inner,u8 outer)192*4882a593Smuzhiyun lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
193*4882a593Smuzhiyun 		       struct lock_class_key *key, int subclass, u8 inner, u8 outer)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun static inline void
lockdep_init_map_wait(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass,u8 inner)199*4882a593Smuzhiyun lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
200*4882a593Smuzhiyun 		      struct lock_class_key *key, int subclass, u8 inner)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
lockdep_init_map(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass)205*4882a593Smuzhiyun static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
206*4882a593Smuzhiyun 			     struct lock_class_key *key, int subclass)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * Reinitialize a lock key - for cases where there is special locking or
213*4882a593Smuzhiyun  * special initialization of locks so that the validator gets the scope
214*4882a593Smuzhiyun  * of dependencies wrong: they are either too broad (they need a class-split)
215*4882a593Smuzhiyun  * or they are too narrow (they suffer from a false class-split):
216*4882a593Smuzhiyun  */
217*4882a593Smuzhiyun #define lockdep_set_class(lock, key)				\
218*4882a593Smuzhiyun 	lockdep_init_map_type(&(lock)->dep_map, #key, key, 0,	\
219*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_inner,	\
220*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_outer,	\
221*4882a593Smuzhiyun 			      (lock)->dep_map.lock_type)
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #define lockdep_set_class_and_name(lock, key, name)		\
224*4882a593Smuzhiyun 	lockdep_init_map_type(&(lock)->dep_map, name, key, 0,	\
225*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_inner,	\
226*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_outer,	\
227*4882a593Smuzhiyun 			      (lock)->dep_map.lock_type)
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun #define lockdep_set_class_and_subclass(lock, key, sub)		\
230*4882a593Smuzhiyun 	lockdep_init_map_type(&(lock)->dep_map, #key, key, sub,	\
231*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_inner,	\
232*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_outer,	\
233*4882a593Smuzhiyun 			      (lock)->dep_map.lock_type)
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #define lockdep_set_subclass(lock, sub)					\
236*4882a593Smuzhiyun 	lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
237*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_inner,		\
238*4882a593Smuzhiyun 			      (lock)->dep_map.wait_type_outer,		\
239*4882a593Smuzhiyun 			      (lock)->dep_map.lock_type)
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun #define lockdep_set_novalidate_class(lock) \
242*4882a593Smuzhiyun 	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun  * Compare locking classes
246*4882a593Smuzhiyun  */
247*4882a593Smuzhiyun #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
248*4882a593Smuzhiyun 
lockdep_match_key(struct lockdep_map * lock,struct lock_class_key * key)249*4882a593Smuzhiyun static inline int lockdep_match_key(struct lockdep_map *lock,
250*4882a593Smuzhiyun 				    struct lock_class_key *key)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	return lock->key == key;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * Acquire a lock.
257*4882a593Smuzhiyun  *
258*4882a593Smuzhiyun  * Values for "read":
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  *   0: exclusive (write) acquire
261*4882a593Smuzhiyun  *   1: read-acquire (no recursion allowed)
262*4882a593Smuzhiyun  *   2: read-acquire with same-instance recursion allowed
263*4882a593Smuzhiyun  *
264*4882a593Smuzhiyun  * Values for check:
265*4882a593Smuzhiyun  *
266*4882a593Smuzhiyun  *   0: simple checks (freeing, held-at-exit-time, etc.)
267*4882a593Smuzhiyun  *   1: full validation
268*4882a593Smuzhiyun  */
269*4882a593Smuzhiyun extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
270*4882a593Smuzhiyun 			 int trylock, int read, int check,
271*4882a593Smuzhiyun 			 struct lockdep_map *nest_lock, unsigned long ip);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun extern void lock_release(struct lockdep_map *lock, unsigned long ip);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun  * Same "read" as for lock_acquire(), except -1 means any.
277*4882a593Smuzhiyun  */
278*4882a593Smuzhiyun extern int lock_is_held_type(const struct lockdep_map *lock, int read);
279*4882a593Smuzhiyun 
lock_is_held(const struct lockdep_map * lock)280*4882a593Smuzhiyun static inline int lock_is_held(const struct lockdep_map *lock)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	return lock_is_held_type(lock, -1);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
286*4882a593Smuzhiyun #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun extern void lock_set_class(struct lockdep_map *lock, const char *name,
289*4882a593Smuzhiyun 			   struct lock_class_key *key, unsigned int subclass,
290*4882a593Smuzhiyun 			   unsigned long ip);
291*4882a593Smuzhiyun 
lock_set_subclass(struct lockdep_map * lock,unsigned int subclass,unsigned long ip)292*4882a593Smuzhiyun static inline void lock_set_subclass(struct lockdep_map *lock,
293*4882a593Smuzhiyun 		unsigned int subclass, unsigned long ip)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
303*4882a593Smuzhiyun extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
304*4882a593Smuzhiyun extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #define lockdep_assert_held(l)	do {				\
309*4882a593Smuzhiyun 		WARN_ON(debug_locks && !lockdep_is_held(l));	\
310*4882a593Smuzhiyun 	} while (0)
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun #define lockdep_assert_held_write(l)	do {			\
313*4882a593Smuzhiyun 		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
314*4882a593Smuzhiyun 	} while (0)
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun #define lockdep_assert_held_read(l)	do {				\
317*4882a593Smuzhiyun 		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
318*4882a593Smuzhiyun 	} while (0)
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun #define lockdep_assert_held_once(l)	do {				\
321*4882a593Smuzhiyun 		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
322*4882a593Smuzhiyun 	} while (0)
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
327*4882a593Smuzhiyun #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
328*4882a593Smuzhiyun #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun #else /* !CONFIG_LOCKDEP */
331*4882a593Smuzhiyun 
lockdep_init_task(struct task_struct * task)332*4882a593Smuzhiyun static inline void lockdep_init_task(struct task_struct *task)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
lockdep_off(void)336*4882a593Smuzhiyun static inline void lockdep_off(void)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
lockdep_on(void)340*4882a593Smuzhiyun static inline void lockdep_on(void)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
lockdep_set_selftest_task(struct task_struct * task)344*4882a593Smuzhiyun static inline void lockdep_set_selftest_task(struct task_struct *task)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
349*4882a593Smuzhiyun # define lock_release(l, i)			do { } while (0)
350*4882a593Smuzhiyun # define lock_downgrade(l, i)			do { } while (0)
351*4882a593Smuzhiyun # define lock_set_class(l, n, k, s, i)		do { } while (0)
352*4882a593Smuzhiyun # define lock_set_subclass(l, s, i)		do { } while (0)
353*4882a593Smuzhiyun # define lockdep_init()				do { } while (0)
354*4882a593Smuzhiyun # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
355*4882a593Smuzhiyun 		do { (void)(name); (void)(key); } while (0)
356*4882a593Smuzhiyun # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
357*4882a593Smuzhiyun 		do { (void)(name); (void)(key); } while (0)
358*4882a593Smuzhiyun # define lockdep_init_map_wait(lock, name, key, sub, inner) \
359*4882a593Smuzhiyun 		do { (void)(name); (void)(key); } while (0)
360*4882a593Smuzhiyun # define lockdep_init_map(lock, name, key, sub) \
361*4882a593Smuzhiyun 		do { (void)(name); (void)(key); } while (0)
362*4882a593Smuzhiyun # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
363*4882a593Smuzhiyun # define lockdep_set_class_and_name(lock, key, name) \
364*4882a593Smuzhiyun 		do { (void)(key); (void)(name); } while (0)
365*4882a593Smuzhiyun #define lockdep_set_class_and_subclass(lock, key, sub) \
366*4882a593Smuzhiyun 		do { (void)(key); } while (0)
367*4882a593Smuzhiyun #define lockdep_set_subclass(lock, sub)		do { } while (0)
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun #define lockdep_set_novalidate_class(lock) do { } while (0)
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
373*4882a593Smuzhiyun  * case since the result is not well defined and the caller should rather
374*4882a593Smuzhiyun  * #ifdef the call himself.
375*4882a593Smuzhiyun  */
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun # define lockdep_reset()		do { debug_locks = 1; } while (0)
378*4882a593Smuzhiyun # define lockdep_free_key_range(start, size)	do { } while (0)
379*4882a593Smuzhiyun # define lockdep_sys_exit() 			do { } while (0)
380*4882a593Smuzhiyun 
lockdep_register_key(struct lock_class_key * key)381*4882a593Smuzhiyun static inline void lockdep_register_key(struct lock_class_key *key)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
lockdep_unregister_key(struct lock_class_key * key)385*4882a593Smuzhiyun static inline void lockdep_unregister_key(struct lock_class_key *key)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun #define lockdep_depth(tsk)	(0)
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun #define lockdep_is_held_type(l, r)		(1)
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun #define lockdep_assert_held(l)			do { (void)(l); } while (0)
394*4882a593Smuzhiyun #define lockdep_assert_held_write(l)	do { (void)(l); } while (0)
395*4882a593Smuzhiyun #define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
396*4882a593Smuzhiyun #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun #define lockdep_recursing(tsk)			(0)
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun #define NIL_COOKIE (struct pin_cookie){ }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun #define lockdep_pin_lock(l)			({ struct pin_cookie cookie = { }; cookie; })
403*4882a593Smuzhiyun #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
404*4882a593Smuzhiyun #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun #endif /* !LOCKDEP */
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun enum xhlock_context_t {
409*4882a593Smuzhiyun 	XHLOCK_HARD,
410*4882a593Smuzhiyun 	XHLOCK_SOFT,
411*4882a593Smuzhiyun 	XHLOCK_CTX_NR,
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun  * To initialize a lockdep_map statically use this macro.
417*4882a593Smuzhiyun  * Note that _name must not be NULL.
418*4882a593Smuzhiyun  */
419*4882a593Smuzhiyun #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
420*4882a593Smuzhiyun 	{ .name = (_name), .key = (void *)(_key), }
421*4882a593Smuzhiyun 
lockdep_invariant_state(bool force)422*4882a593Smuzhiyun static inline void lockdep_invariant_state(bool force) {}
lockdep_free_task(struct task_struct * task)423*4882a593Smuzhiyun static inline void lockdep_free_task(struct task_struct *task) {}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun #ifdef CONFIG_LOCK_STAT
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
428*4882a593Smuzhiyun extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun #define LOCK_CONTENDED(_lock, try, lock)			\
431*4882a593Smuzhiyun do {								\
432*4882a593Smuzhiyun 	if (!try(_lock)) {					\
433*4882a593Smuzhiyun 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
434*4882a593Smuzhiyun 		lock(_lock);					\
435*4882a593Smuzhiyun 	}							\
436*4882a593Smuzhiyun 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
437*4882a593Smuzhiyun } while (0)
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
440*4882a593Smuzhiyun ({								\
441*4882a593Smuzhiyun 	int ____err = 0;					\
442*4882a593Smuzhiyun 	if (!try(_lock)) {					\
443*4882a593Smuzhiyun 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
444*4882a593Smuzhiyun 		____err = lock(_lock);				\
445*4882a593Smuzhiyun 	}							\
446*4882a593Smuzhiyun 	if (!____err)						\
447*4882a593Smuzhiyun 		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
448*4882a593Smuzhiyun 	____err;						\
449*4882a593Smuzhiyun })
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun #else /* CONFIG_LOCK_STAT */
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun #define lock_contended(lockdep_map, ip) do {} while (0)
454*4882a593Smuzhiyun #define lock_acquired(lockdep_map, ip) do {} while (0)
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun #define LOCK_CONTENDED(_lock, try, lock) \
457*4882a593Smuzhiyun 	lock(_lock)
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
460*4882a593Smuzhiyun 	lock(_lock)
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun #endif /* CONFIG_LOCK_STAT */
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun  * On lockdep we dont want the hand-coded irq-enable of
468*4882a593Smuzhiyun  * _raw_*_lock_flags() code, because lockdep assumes
469*4882a593Smuzhiyun  * that interrupts are not re-enabled during lock-acquire:
470*4882a593Smuzhiyun  */
471*4882a593Smuzhiyun #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
472*4882a593Smuzhiyun 	LOCK_CONTENDED((_lock), (try), (lock))
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun #else /* CONFIG_LOCKDEP */
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
477*4882a593Smuzhiyun 	lockfl((_lock), (flags))
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun #endif /* CONFIG_LOCKDEP */
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
482*4882a593Smuzhiyun extern void print_irqtrace_events(struct task_struct *curr);
483*4882a593Smuzhiyun #else
print_irqtrace_events(struct task_struct * curr)484*4882a593Smuzhiyun static inline void print_irqtrace_events(struct task_struct *curr)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun #endif
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun /* Variable used to make lockdep treat read_lock() as recursive in selftests */
490*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
491*4882a593Smuzhiyun extern unsigned int force_read_lock_recursive;
492*4882a593Smuzhiyun #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
493*4882a593Smuzhiyun #define force_read_lock_recursive 0
494*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
497*4882a593Smuzhiyun extern bool read_lock_is_recursive(void);
498*4882a593Smuzhiyun #else /* CONFIG_LOCKDEP */
499*4882a593Smuzhiyun /* If !LOCKDEP, the value is meaningless */
500*4882a593Smuzhiyun #define read_lock_is_recursive() 0
501*4882a593Smuzhiyun #endif
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun  * For trivial one-depth nesting of a lock-class, the following
505*4882a593Smuzhiyun  * global define can be used. (Subsystems with multiple levels
506*4882a593Smuzhiyun  * of nesting should define their own lock-nesting subclasses.)
507*4882a593Smuzhiyun  */
508*4882a593Smuzhiyun #define SINGLE_DEPTH_NESTING			1
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /*
511*4882a593Smuzhiyun  * Map the dependency ops to NOP or to real lockdep ops, depending
512*4882a593Smuzhiyun  * on the per lock-class debug mode:
513*4882a593Smuzhiyun  */
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
516*4882a593Smuzhiyun #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
517*4882a593Smuzhiyun #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
520*4882a593Smuzhiyun #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
521*4882a593Smuzhiyun #define spin_release(l, i)			lock_release(l, i)
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
524*4882a593Smuzhiyun #define rwlock_acquire_read(l, s, t, i)					\
525*4882a593Smuzhiyun do {									\
526*4882a593Smuzhiyun 	if (read_lock_is_recursive())					\
527*4882a593Smuzhiyun 		lock_acquire_shared_recursive(l, s, t, NULL, i);	\
528*4882a593Smuzhiyun 	else								\
529*4882a593Smuzhiyun 		lock_acquire_shared(l, s, t, NULL, i);			\
530*4882a593Smuzhiyun } while (0)
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun #define rwlock_release(l, i)			lock_release(l, i)
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
535*4882a593Smuzhiyun #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
536*4882a593Smuzhiyun #define seqcount_release(l, i)			lock_release(l, i)
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
539*4882a593Smuzhiyun #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
540*4882a593Smuzhiyun #define mutex_release(l, i)			lock_release(l, i)
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
543*4882a593Smuzhiyun #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
544*4882a593Smuzhiyun #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
545*4882a593Smuzhiyun #define rwsem_release(l, i)			lock_release(l, i)
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
548*4882a593Smuzhiyun #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
549*4882a593Smuzhiyun #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
550*4882a593Smuzhiyun #define lock_map_release(l)			lock_release(l, _THIS_IP_)
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
553*4882a593Smuzhiyun # define might_lock(lock)						\
554*4882a593Smuzhiyun do {									\
555*4882a593Smuzhiyun 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
556*4882a593Smuzhiyun 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
557*4882a593Smuzhiyun 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
558*4882a593Smuzhiyun } while (0)
559*4882a593Smuzhiyun # define might_lock_read(lock)						\
560*4882a593Smuzhiyun do {									\
561*4882a593Smuzhiyun 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
562*4882a593Smuzhiyun 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
563*4882a593Smuzhiyun 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
564*4882a593Smuzhiyun } while (0)
565*4882a593Smuzhiyun # define might_lock_nested(lock, subclass)				\
566*4882a593Smuzhiyun do {									\
567*4882a593Smuzhiyun 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
568*4882a593Smuzhiyun 	lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\
569*4882a593Smuzhiyun 		     _THIS_IP_);					\
570*4882a593Smuzhiyun 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
571*4882a593Smuzhiyun } while (0)
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun DECLARE_PER_CPU(int, hardirqs_enabled);
574*4882a593Smuzhiyun DECLARE_PER_CPU(int, hardirq_context);
575*4882a593Smuzhiyun DECLARE_PER_CPU(unsigned int, lockdep_recursion);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun #define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun #define lockdep_assert_irqs_enabled()					\
580*4882a593Smuzhiyun do {									\
581*4882a593Smuzhiyun 	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
582*4882a593Smuzhiyun } while (0)
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun #define lockdep_assert_irqs_disabled()					\
585*4882a593Smuzhiyun do {									\
586*4882a593Smuzhiyun 	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
587*4882a593Smuzhiyun } while (0)
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun #define lockdep_assert_in_irq()						\
590*4882a593Smuzhiyun do {									\
591*4882a593Smuzhiyun 	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
592*4882a593Smuzhiyun } while (0)
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun #define lockdep_assert_preemption_enabled()				\
595*4882a593Smuzhiyun do {									\
596*4882a593Smuzhiyun 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
597*4882a593Smuzhiyun 		     __lockdep_enabled			&&		\
598*4882a593Smuzhiyun 		     (preempt_count() != 0		||		\
599*4882a593Smuzhiyun 		      !this_cpu_read(hardirqs_enabled)));		\
600*4882a593Smuzhiyun } while (0)
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun #define lockdep_assert_preemption_disabled()				\
603*4882a593Smuzhiyun do {									\
604*4882a593Smuzhiyun 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
605*4882a593Smuzhiyun 		     __lockdep_enabled			&&		\
606*4882a593Smuzhiyun 		     (preempt_count() == 0		&&		\
607*4882a593Smuzhiyun 		      this_cpu_read(hardirqs_enabled)));		\
608*4882a593Smuzhiyun } while (0)
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun #else
611*4882a593Smuzhiyun # define might_lock(lock) do { } while (0)
612*4882a593Smuzhiyun # define might_lock_read(lock) do { } while (0)
613*4882a593Smuzhiyun # define might_lock_nested(lock, subclass) do { } while (0)
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun # define lockdep_assert_irqs_enabled() do { } while (0)
616*4882a593Smuzhiyun # define lockdep_assert_irqs_disabled() do { } while (0)
617*4882a593Smuzhiyun # define lockdep_assert_in_irq() do { } while (0)
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun # define lockdep_assert_preemption_enabled() do { } while (0)
620*4882a593Smuzhiyun # define lockdep_assert_preemption_disabled() do { } while (0)
621*4882a593Smuzhiyun #endif
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun # define lockdep_assert_RT_in_threaded_ctx() do {			\
626*4882a593Smuzhiyun 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
627*4882a593Smuzhiyun 			  lockdep_hardirq_context() &&			\
628*4882a593Smuzhiyun 			  !(current->hardirq_threaded || current->irq_config),	\
629*4882a593Smuzhiyun 			  "Not in threaded context on PREEMPT_RT as expected\n");	\
630*4882a593Smuzhiyun } while (0)
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun #else
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun #endif
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
639*4882a593Smuzhiyun void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
640*4882a593Smuzhiyun #else
641*4882a593Smuzhiyun static inline void
lockdep_rcu_suspicious(const char * file,const int line,const char * s)642*4882a593Smuzhiyun lockdep_rcu_suspicious(const char *file, const int line, const char *s)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun #endif
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun #endif /* __LINUX_LOCKDEP_H */
648