xref: /OK3568_Linux_fs/kernel/kernel/locking/lockdep_internals.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * kernel/lockdep_internals.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Runtime locking correctness validator
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * lockdep subsystem internal functions and variables.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * Lock-class usage-state bits:
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun enum lock_usage_bit {
14*4882a593Smuzhiyun #define LOCKDEP_STATE(__STATE)		\
15*4882a593Smuzhiyun 	LOCK_USED_IN_##__STATE,		\
16*4882a593Smuzhiyun 	LOCK_USED_IN_##__STATE##_READ,	\
17*4882a593Smuzhiyun 	LOCK_ENABLED_##__STATE,		\
18*4882a593Smuzhiyun 	LOCK_ENABLED_##__STATE##_READ,
19*4882a593Smuzhiyun #include "lockdep_states.h"
20*4882a593Smuzhiyun #undef LOCKDEP_STATE
21*4882a593Smuzhiyun 	LOCK_USED,
22*4882a593Smuzhiyun 	LOCK_USED_READ,
23*4882a593Smuzhiyun 	LOCK_USAGE_STATES,
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* states after LOCK_USED_READ are not traced and printed */
27*4882a593Smuzhiyun static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define LOCK_USAGE_READ_MASK 1
30*4882a593Smuzhiyun #define LOCK_USAGE_DIR_MASK  2
31*4882a593Smuzhiyun #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Usage-state bitmasks:
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun enum {
39*4882a593Smuzhiyun #define LOCKDEP_STATE(__STATE)						\
40*4882a593Smuzhiyun 	__LOCKF(USED_IN_##__STATE)					\
41*4882a593Smuzhiyun 	__LOCKF(USED_IN_##__STATE##_READ)				\
42*4882a593Smuzhiyun 	__LOCKF(ENABLED_##__STATE)					\
43*4882a593Smuzhiyun 	__LOCKF(ENABLED_##__STATE##_READ)
44*4882a593Smuzhiyun #include "lockdep_states.h"
45*4882a593Smuzhiyun #undef LOCKDEP_STATE
46*4882a593Smuzhiyun 	__LOCKF(USED)
47*4882a593Smuzhiyun 	__LOCKF(USED_READ)
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
51*4882a593Smuzhiyun static const unsigned long LOCKF_ENABLED_IRQ =
52*4882a593Smuzhiyun #include "lockdep_states.h"
53*4882a593Smuzhiyun 	0;
54*4882a593Smuzhiyun #undef LOCKDEP_STATE
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE |
57*4882a593Smuzhiyun static const unsigned long LOCKF_USED_IN_IRQ =
58*4882a593Smuzhiyun #include "lockdep_states.h"
59*4882a593Smuzhiyun 	0;
60*4882a593Smuzhiyun #undef LOCKDEP_STATE
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE##_READ |
63*4882a593Smuzhiyun static const unsigned long LOCKF_ENABLED_IRQ_READ =
64*4882a593Smuzhiyun #include "lockdep_states.h"
65*4882a593Smuzhiyun 	0;
66*4882a593Smuzhiyun #undef LOCKDEP_STATE
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE##_READ |
69*4882a593Smuzhiyun static const unsigned long LOCKF_USED_IN_IRQ_READ =
70*4882a593Smuzhiyun #include "lockdep_states.h"
71*4882a593Smuzhiyun 	0;
72*4882a593Smuzhiyun #undef LOCKDEP_STATE
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
75*4882a593Smuzhiyun #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
78*4882a593Smuzhiyun #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
82*4882a593Smuzhiyun  * .data and .bss to fit in required 32MB limit for the kernel. With
83*4882a593Smuzhiyun  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
84*4882a593Smuzhiyun  * So, reduce the static allocations for lockdeps related structures so that
85*4882a593Smuzhiyun  * everything fits in current required size limit.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP_SMALL
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
90*4882a593Smuzhiyun  * we track.
91*4882a593Smuzhiyun  *
92*4882a593Smuzhiyun  * We use the per-lock dependency maps in two ways: we grow it by adding
93*4882a593Smuzhiyun  * every to-be-taken lock to all currently held lock's own dependency
94*4882a593Smuzhiyun  * table (if it's not there yet), and we check it for lock order
95*4882a593Smuzhiyun  * conflicts and deadlocks.
96*4882a593Smuzhiyun  */
97*4882a593Smuzhiyun #define MAX_LOCKDEP_ENTRIES	16384UL
98*4882a593Smuzhiyun #define MAX_LOCKDEP_CHAINS_BITS	15
99*4882a593Smuzhiyun #define MAX_STACK_TRACE_ENTRIES	262144UL
100*4882a593Smuzhiyun #define STACK_TRACE_HASH_SIZE	8192
101*4882a593Smuzhiyun #else
102*4882a593Smuzhiyun #define MAX_LOCKDEP_ENTRIES	(1UL << CONFIG_LOCKDEP_BITS)
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define MAX_LOCKDEP_CHAINS_BITS	CONFIG_LOCKDEP_CHAINS_BITS
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * Stack-trace: tightly packed array of stack backtrace
108*4882a593Smuzhiyun  * addresses. Protected by the hash_lock.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun #define MAX_STACK_TRACE_ENTRIES	(1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
111*4882a593Smuzhiyun #define STACK_TRACE_HASH_SIZE	(1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * Bit definitions for lock_chain.irq_context
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun #define LOCK_CHAIN_SOFTIRQ_CONTEXT	(1 << 0)
118*4882a593Smuzhiyun #define LOCK_CHAIN_HARDIRQ_CONTEXT	(1 << 1)
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun extern struct lock_chain lock_chains[];
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun extern void get_usage_chars(struct lock_class *class,
129*4882a593Smuzhiyun 			    char usage[LOCK_USAGE_CHARS]);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun extern const char *__get_key_name(const struct lockdep_subclass_key *key,
132*4882a593Smuzhiyun 				  char *str);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun extern unsigned long nr_lock_classes;
137*4882a593Smuzhiyun extern unsigned long nr_zapped_classes;
138*4882a593Smuzhiyun extern unsigned long nr_zapped_lock_chains;
139*4882a593Smuzhiyun extern unsigned long nr_list_entries;
140*4882a593Smuzhiyun long lockdep_next_lockchain(long i);
141*4882a593Smuzhiyun unsigned long lock_chain_count(void);
142*4882a593Smuzhiyun extern unsigned long nr_stack_trace_entries;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun extern unsigned int nr_hardirq_chains;
145*4882a593Smuzhiyun extern unsigned int nr_softirq_chains;
146*4882a593Smuzhiyun extern unsigned int nr_process_chains;
147*4882a593Smuzhiyun extern unsigned int nr_free_chain_hlocks;
148*4882a593Smuzhiyun extern unsigned int nr_lost_chain_hlocks;
149*4882a593Smuzhiyun extern unsigned int nr_large_chain_blocks;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun extern unsigned int max_lockdep_depth;
152*4882a593Smuzhiyun extern unsigned int max_bfs_queue_depth;
153*4882a593Smuzhiyun extern unsigned long max_lock_class_idx;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
156*4882a593Smuzhiyun extern unsigned long lock_classes_in_use[];
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
159*4882a593Smuzhiyun extern unsigned long lockdep_count_forward_deps(struct lock_class *);
160*4882a593Smuzhiyun extern unsigned long lockdep_count_backward_deps(struct lock_class *);
161*4882a593Smuzhiyun #ifdef CONFIG_TRACE_IRQFLAGS
162*4882a593Smuzhiyun u64 lockdep_stack_trace_count(void);
163*4882a593Smuzhiyun u64 lockdep_stack_hash_count(void);
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun #else
166*4882a593Smuzhiyun static inline unsigned long
lockdep_count_forward_deps(struct lock_class * class)167*4882a593Smuzhiyun lockdep_count_forward_deps(struct lock_class *class)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun static inline unsigned long
lockdep_count_backward_deps(struct lock_class * class)172*4882a593Smuzhiyun lockdep_count_backward_deps(struct lock_class *class)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCKDEP
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #include <asm/local.h>
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun  * Various lockdep statistics.
183*4882a593Smuzhiyun  * We want them per cpu as they are often accessed in fast path
184*4882a593Smuzhiyun  * and we want to avoid too much cache bouncing.
185*4882a593Smuzhiyun  */
186*4882a593Smuzhiyun struct lockdep_stats {
187*4882a593Smuzhiyun 	unsigned long  chain_lookup_hits;
188*4882a593Smuzhiyun 	unsigned int   chain_lookup_misses;
189*4882a593Smuzhiyun 	unsigned long  hardirqs_on_events;
190*4882a593Smuzhiyun 	unsigned long  hardirqs_off_events;
191*4882a593Smuzhiyun 	unsigned long  redundant_hardirqs_on;
192*4882a593Smuzhiyun 	unsigned long  redundant_hardirqs_off;
193*4882a593Smuzhiyun 	unsigned long  softirqs_on_events;
194*4882a593Smuzhiyun 	unsigned long  softirqs_off_events;
195*4882a593Smuzhiyun 	unsigned long  redundant_softirqs_on;
196*4882a593Smuzhiyun 	unsigned long  redundant_softirqs_off;
197*4882a593Smuzhiyun 	int            nr_unused_locks;
198*4882a593Smuzhiyun 	unsigned int   nr_redundant_checks;
199*4882a593Smuzhiyun 	unsigned int   nr_redundant;
200*4882a593Smuzhiyun 	unsigned int   nr_cyclic_checks;
201*4882a593Smuzhiyun 	unsigned int   nr_find_usage_forwards_checks;
202*4882a593Smuzhiyun 	unsigned int   nr_find_usage_backwards_checks;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * Per lock class locking operation stat counts
206*4882a593Smuzhiyun 	 */
207*4882a593Smuzhiyun 	unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
208*4882a593Smuzhiyun };
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #define __debug_atomic_inc(ptr)					\
213*4882a593Smuzhiyun 	this_cpu_inc(lockdep_stats.ptr);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun #define debug_atomic_inc(ptr)			{		\
216*4882a593Smuzhiyun 	WARN_ON_ONCE(!irqs_disabled());				\
217*4882a593Smuzhiyun 	__this_cpu_inc(lockdep_stats.ptr);			\
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #define debug_atomic_dec(ptr)			{		\
221*4882a593Smuzhiyun 	WARN_ON_ONCE(!irqs_disabled());				\
222*4882a593Smuzhiyun 	__this_cpu_dec(lockdep_stats.ptr);			\
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #define debug_atomic_read(ptr)		({				\
226*4882a593Smuzhiyun 	struct lockdep_stats *__cpu_lockdep_stats;			\
227*4882a593Smuzhiyun 	unsigned long long __total = 0;					\
228*4882a593Smuzhiyun 	int __cpu;							\
229*4882a593Smuzhiyun 	for_each_possible_cpu(__cpu) {					\
230*4882a593Smuzhiyun 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
231*4882a593Smuzhiyun 		__total += __cpu_lockdep_stats->ptr;			\
232*4882a593Smuzhiyun 	}								\
233*4882a593Smuzhiyun 	__total;							\
234*4882a593Smuzhiyun })
235*4882a593Smuzhiyun 
debug_class_ops_inc(struct lock_class * class)236*4882a593Smuzhiyun static inline void debug_class_ops_inc(struct lock_class *class)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	int idx;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	idx = class - lock_classes;
241*4882a593Smuzhiyun 	__debug_atomic_inc(lock_class_ops[idx]);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
debug_class_ops_read(struct lock_class * class)244*4882a593Smuzhiyun static inline unsigned long debug_class_ops_read(struct lock_class *class)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	int idx, cpu;
247*4882a593Smuzhiyun 	unsigned long ops = 0;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	idx = class - lock_classes;
250*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
251*4882a593Smuzhiyun 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
252*4882a593Smuzhiyun 	return ops;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun #else
256*4882a593Smuzhiyun # define __debug_atomic_inc(ptr)	do { } while (0)
257*4882a593Smuzhiyun # define debug_atomic_inc(ptr)		do { } while (0)
258*4882a593Smuzhiyun # define debug_atomic_dec(ptr)		do { } while (0)
259*4882a593Smuzhiyun # define debug_atomic_read(ptr)		0
260*4882a593Smuzhiyun # define debug_class_ops_inc(ptr)	do { } while (0)
261*4882a593Smuzhiyun #endif
262