1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Generic infrastructure for lifetime debugging of objects.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Started by Thomas Gleixner
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * For licencing details see kernel-base/COPYING
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define pr_fmt(fmt) "ODEBUG: " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/debugobjects.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
17*4882a593Smuzhiyun #include <linux/seq_file.h>
18*4882a593Smuzhiyun #include <linux/debugfs.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/hash.h>
21*4882a593Smuzhiyun #include <linux/kmemleak.h>
22*4882a593Smuzhiyun #include <linux/cpu.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define ODEBUG_HASH_BITS 14
25*4882a593Smuzhiyun #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define ODEBUG_POOL_SIZE 1024
28*4882a593Smuzhiyun #define ODEBUG_POOL_MIN_LEVEL 256
29*4882a593Smuzhiyun #define ODEBUG_POOL_PERCPU_SIZE 64
30*4882a593Smuzhiyun #define ODEBUG_BATCH_SIZE 16
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
33*4882a593Smuzhiyun #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
34*4882a593Smuzhiyun #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * We limit the freeing of debug objects via workqueue at a maximum
38*4882a593Smuzhiyun * frequency of 10Hz and about 1024 objects for each freeing operation.
39*4882a593Smuzhiyun * So it is freeing at most 10k debug objects per second.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun #define ODEBUG_FREE_WORK_MAX 1024
42*4882a593Smuzhiyun #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct debug_bucket {
45*4882a593Smuzhiyun struct hlist_head list;
46*4882a593Smuzhiyun raw_spinlock_t lock;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Debug object percpu free list
51*4882a593Smuzhiyun * Access is protected by disabling irq
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun struct debug_percpu_free {
54*4882a593Smuzhiyun struct hlist_head free_objs;
55*4882a593Smuzhiyun int obj_free;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(pool_lock);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static HLIST_HEAD(obj_pool);
67*4882a593Smuzhiyun static HLIST_HEAD(obj_to_free);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Because of the presence of percpu free pools, obj_pool_free will
71*4882a593Smuzhiyun * under-count those in the percpu free pools. Similarly, obj_pool_used
72*4882a593Smuzhiyun * will over-count those in the percpu free pools. Adjustments will be
73*4882a593Smuzhiyun * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74*4882a593Smuzhiyun * can be off.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun static int obj_pool_min_free = ODEBUG_POOL_SIZE;
77*4882a593Smuzhiyun static int obj_pool_free = ODEBUG_POOL_SIZE;
78*4882a593Smuzhiyun static int obj_pool_used;
79*4882a593Smuzhiyun static int obj_pool_max_used;
80*4882a593Smuzhiyun static bool obj_freeing;
81*4882a593Smuzhiyun /* The number of objs on the global free list */
82*4882a593Smuzhiyun static int obj_nr_tofree;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static int debug_objects_maxchain __read_mostly;
85*4882a593Smuzhiyun static int __maybe_unused debug_objects_maxchecked __read_mostly;
86*4882a593Smuzhiyun static int debug_objects_fixups __read_mostly;
87*4882a593Smuzhiyun static int debug_objects_warnings __read_mostly;
88*4882a593Smuzhiyun static int debug_objects_enabled __read_mostly
89*4882a593Smuzhiyun = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90*4882a593Smuzhiyun static int debug_objects_pool_size __read_mostly
91*4882a593Smuzhiyun = ODEBUG_POOL_SIZE;
92*4882a593Smuzhiyun static int debug_objects_pool_min_level __read_mostly
93*4882a593Smuzhiyun = ODEBUG_POOL_MIN_LEVEL;
94*4882a593Smuzhiyun static const struct debug_obj_descr *descr_test __read_mostly;
95*4882a593Smuzhiyun static struct kmem_cache *obj_cache __read_mostly;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * Track numbers of kmem_cache_alloc()/free() calls done.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun static int debug_objects_allocated;
101*4882a593Smuzhiyun static int debug_objects_freed;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun static void free_obj_work(struct work_struct *work);
104*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105*4882a593Smuzhiyun
enable_object_debug(char * str)106*4882a593Smuzhiyun static int __init enable_object_debug(char *str)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun debug_objects_enabled = 1;
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
disable_object_debug(char * str)112*4882a593Smuzhiyun static int __init disable_object_debug(char *str)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun debug_objects_enabled = 0;
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun early_param("debug_objects", enable_object_debug);
119*4882a593Smuzhiyun early_param("no_debug_objects", disable_object_debug);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun static const char *obj_states[ODEBUG_STATE_MAX] = {
122*4882a593Smuzhiyun [ODEBUG_STATE_NONE] = "none",
123*4882a593Smuzhiyun [ODEBUG_STATE_INIT] = "initialized",
124*4882a593Smuzhiyun [ODEBUG_STATE_INACTIVE] = "inactive",
125*4882a593Smuzhiyun [ODEBUG_STATE_ACTIVE] = "active",
126*4882a593Smuzhiyun [ODEBUG_STATE_DESTROYED] = "destroyed",
127*4882a593Smuzhiyun [ODEBUG_STATE_NOTAVAILABLE] = "not available",
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
fill_pool(void)130*4882a593Smuzhiyun static void fill_pool(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
133*4882a593Smuzhiyun struct debug_obj *obj;
134*4882a593Smuzhiyun unsigned long flags;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137*4882a593Smuzhiyun return;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * Reuse objs from the global free list; they will be reinitialized
141*4882a593Smuzhiyun * when allocating.
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144*4882a593Smuzhiyun * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145*4882a593Smuzhiyun * sections.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148*4882a593Smuzhiyun raw_spin_lock_irqsave(&pool_lock, flags);
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Recheck with the lock held as the worker thread might have
151*4882a593Smuzhiyun * won the race and freed the global free list already.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154*4882a593Smuzhiyun obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155*4882a593Smuzhiyun hlist_del(&obj->node);
156*4882a593Smuzhiyun WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157*4882a593Smuzhiyun hlist_add_head(&obj->node, &obj_pool);
158*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pool_lock, flags);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (unlikely(!obj_cache))
164*4882a593Smuzhiyun return;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167*4882a593Smuzhiyun struct debug_obj *new[ODEBUG_BATCH_SIZE];
168*4882a593Smuzhiyun int cnt;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171*4882a593Smuzhiyun new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172*4882a593Smuzhiyun if (!new[cnt])
173*4882a593Smuzhiyun break;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun if (!cnt)
176*4882a593Smuzhiyun return;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun raw_spin_lock_irqsave(&pool_lock, flags);
179*4882a593Smuzhiyun while (cnt) {
180*4882a593Smuzhiyun hlist_add_head(&new[--cnt]->node, &obj_pool);
181*4882a593Smuzhiyun debug_objects_allocated++;
182*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pool_lock, flags);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Lookup an object in the hash bucket.
190*4882a593Smuzhiyun */
lookup_object(void * addr,struct debug_bucket * b)191*4882a593Smuzhiyun static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct debug_obj *obj;
194*4882a593Smuzhiyun int cnt = 0;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun hlist_for_each_entry(obj, &b->list, node) {
197*4882a593Smuzhiyun cnt++;
198*4882a593Smuzhiyun if (obj->object == addr)
199*4882a593Smuzhiyun return obj;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun if (cnt > debug_objects_maxchain)
202*4882a593Smuzhiyun debug_objects_maxchain = cnt;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return NULL;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * Allocate a new object from the hlist
209*4882a593Smuzhiyun */
__alloc_object(struct hlist_head * list)210*4882a593Smuzhiyun static struct debug_obj *__alloc_object(struct hlist_head *list)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct debug_obj *obj = NULL;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (list->first) {
215*4882a593Smuzhiyun obj = hlist_entry(list->first, typeof(*obj), node);
216*4882a593Smuzhiyun hlist_del(&obj->node);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return obj;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * Allocate a new object. If the pool is empty, switch off the debugger.
224*4882a593Smuzhiyun * Must be called with interrupts disabled.
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)227*4882a593Smuzhiyun alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
230*4882a593Smuzhiyun struct debug_obj *obj;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (likely(obj_cache)) {
233*4882a593Smuzhiyun obj = __alloc_object(&percpu_pool->free_objs);
234*4882a593Smuzhiyun if (obj) {
235*4882a593Smuzhiyun percpu_pool->obj_free--;
236*4882a593Smuzhiyun goto init_obj;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun raw_spin_lock(&pool_lock);
241*4882a593Smuzhiyun obj = __alloc_object(&obj_pool);
242*4882a593Smuzhiyun if (obj) {
243*4882a593Smuzhiyun obj_pool_used++;
244*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Looking ahead, allocate one batch of debug objects and
248*4882a593Smuzhiyun * put them into the percpu free pool.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if (likely(obj_cache)) {
251*4882a593Smuzhiyun int i;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
254*4882a593Smuzhiyun struct debug_obj *obj2;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun obj2 = __alloc_object(&obj_pool);
257*4882a593Smuzhiyun if (!obj2)
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun hlist_add_head(&obj2->node,
260*4882a593Smuzhiyun &percpu_pool->free_objs);
261*4882a593Smuzhiyun percpu_pool->obj_free++;
262*4882a593Smuzhiyun obj_pool_used++;
263*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (obj_pool_used > obj_pool_max_used)
268*4882a593Smuzhiyun obj_pool_max_used = obj_pool_used;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (obj_pool_free < obj_pool_min_free)
271*4882a593Smuzhiyun obj_pool_min_free = obj_pool_free;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun raw_spin_unlock(&pool_lock);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun init_obj:
276*4882a593Smuzhiyun if (obj) {
277*4882a593Smuzhiyun obj->object = addr;
278*4882a593Smuzhiyun obj->descr = descr;
279*4882a593Smuzhiyun obj->state = ODEBUG_STATE_NONE;
280*4882a593Smuzhiyun obj->astate = 0;
281*4882a593Smuzhiyun hlist_add_head(&obj->node, &b->list);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun return obj;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * workqueue function to free objects.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * To reduce contention on the global pool_lock, the actual freeing of
290*4882a593Smuzhiyun * debug objects will be delayed if the pool_lock is busy.
291*4882a593Smuzhiyun */
free_obj_work(struct work_struct * work)292*4882a593Smuzhiyun static void free_obj_work(struct work_struct *work)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct hlist_node *tmp;
295*4882a593Smuzhiyun struct debug_obj *obj;
296*4882a593Smuzhiyun unsigned long flags;
297*4882a593Smuzhiyun HLIST_HEAD(tofree);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun WRITE_ONCE(obj_freeing, false);
300*4882a593Smuzhiyun if (!raw_spin_trylock_irqsave(&pool_lock, flags))
301*4882a593Smuzhiyun return;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (obj_pool_free >= debug_objects_pool_size)
304*4882a593Smuzhiyun goto free_objs;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /*
307*4882a593Smuzhiyun * The objs on the pool list might be allocated before the work is
308*4882a593Smuzhiyun * run, so recheck if pool list it full or not, if not fill pool
309*4882a593Smuzhiyun * list from the global free list. As it is likely that a workload
310*4882a593Smuzhiyun * may be gearing up to use more and more objects, don't free any
311*4882a593Smuzhiyun * of them until the next round.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
314*4882a593Smuzhiyun obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
315*4882a593Smuzhiyun hlist_del(&obj->node);
316*4882a593Smuzhiyun hlist_add_head(&obj->node, &obj_pool);
317*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
318*4882a593Smuzhiyun WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pool_lock, flags);
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun free_objs:
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * Pool list is already full and there are still objs on the free
326*4882a593Smuzhiyun * list. Move remaining free objs to a temporary list to free the
327*4882a593Smuzhiyun * memory outside the pool_lock held region.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun if (obj_nr_tofree) {
330*4882a593Smuzhiyun hlist_move_list(&obj_to_free, &tofree);
331*4882a593Smuzhiyun debug_objects_freed += obj_nr_tofree;
332*4882a593Smuzhiyun WRITE_ONCE(obj_nr_tofree, 0);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pool_lock, flags);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
337*4882a593Smuzhiyun hlist_del(&obj->node);
338*4882a593Smuzhiyun kmem_cache_free(obj_cache, obj);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
__free_object(struct debug_obj * obj)342*4882a593Smuzhiyun static void __free_object(struct debug_obj *obj)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct debug_obj *objs[ODEBUG_BATCH_SIZE];
345*4882a593Smuzhiyun struct debug_percpu_free *percpu_pool;
346*4882a593Smuzhiyun int lookahead_count = 0;
347*4882a593Smuzhiyun unsigned long flags;
348*4882a593Smuzhiyun bool work;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun local_irq_save(flags);
351*4882a593Smuzhiyun if (!obj_cache)
352*4882a593Smuzhiyun goto free_to_obj_pool;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /*
355*4882a593Smuzhiyun * Try to free it into the percpu pool first.
356*4882a593Smuzhiyun */
357*4882a593Smuzhiyun percpu_pool = this_cpu_ptr(&percpu_obj_pool);
358*4882a593Smuzhiyun if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
359*4882a593Smuzhiyun hlist_add_head(&obj->node, &percpu_pool->free_objs);
360*4882a593Smuzhiyun percpu_pool->obj_free++;
361*4882a593Smuzhiyun local_irq_restore(flags);
362*4882a593Smuzhiyun return;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * As the percpu pool is full, look ahead and pull out a batch
367*4882a593Smuzhiyun * of objects from the percpu pool and free them as well.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
370*4882a593Smuzhiyun objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
371*4882a593Smuzhiyun if (!objs[lookahead_count])
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun percpu_pool->obj_free--;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun free_to_obj_pool:
377*4882a593Smuzhiyun raw_spin_lock(&pool_lock);
378*4882a593Smuzhiyun work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
379*4882a593Smuzhiyun (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
380*4882a593Smuzhiyun obj_pool_used--;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (work) {
383*4882a593Smuzhiyun WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
384*4882a593Smuzhiyun hlist_add_head(&obj->node, &obj_to_free);
385*4882a593Smuzhiyun if (lookahead_count) {
386*4882a593Smuzhiyun WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
387*4882a593Smuzhiyun obj_pool_used -= lookahead_count;
388*4882a593Smuzhiyun while (lookahead_count) {
389*4882a593Smuzhiyun hlist_add_head(&objs[--lookahead_count]->node,
390*4882a593Smuzhiyun &obj_to_free);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if ((obj_pool_free > debug_objects_pool_size) &&
395*4882a593Smuzhiyun (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
396*4882a593Smuzhiyun int i;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * Free one more batch of objects from obj_pool.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
402*4882a593Smuzhiyun obj = __alloc_object(&obj_pool);
403*4882a593Smuzhiyun hlist_add_head(&obj->node, &obj_to_free);
404*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
405*4882a593Smuzhiyun WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun } else {
409*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
410*4882a593Smuzhiyun hlist_add_head(&obj->node, &obj_pool);
411*4882a593Smuzhiyun if (lookahead_count) {
412*4882a593Smuzhiyun WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
413*4882a593Smuzhiyun obj_pool_used -= lookahead_count;
414*4882a593Smuzhiyun while (lookahead_count) {
415*4882a593Smuzhiyun hlist_add_head(&objs[--lookahead_count]->node,
416*4882a593Smuzhiyun &obj_pool);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun raw_spin_unlock(&pool_lock);
421*4882a593Smuzhiyun local_irq_restore(flags);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun * Put the object back into the pool and schedule work to free objects
426*4882a593Smuzhiyun * if necessary.
427*4882a593Smuzhiyun */
free_object(struct debug_obj * obj)428*4882a593Smuzhiyun static void free_object(struct debug_obj *obj)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun __free_object(obj);
431*4882a593Smuzhiyun if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
432*4882a593Smuzhiyun WRITE_ONCE(obj_freeing, true);
433*4882a593Smuzhiyun schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)438*4882a593Smuzhiyun static int object_cpu_offline(unsigned int cpu)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun struct debug_percpu_free *percpu_pool;
441*4882a593Smuzhiyun struct hlist_node *tmp;
442*4882a593Smuzhiyun struct debug_obj *obj;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* Remote access is safe as the CPU is dead already */
445*4882a593Smuzhiyun percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
446*4882a593Smuzhiyun hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
447*4882a593Smuzhiyun hlist_del(&obj->node);
448*4882a593Smuzhiyun kmem_cache_free(obj_cache, obj);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun percpu_pool->obj_free = 0;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun * We run out of memory. That means we probably have tons of objects
458*4882a593Smuzhiyun * allocated.
459*4882a593Smuzhiyun */
debug_objects_oom(void)460*4882a593Smuzhiyun static void debug_objects_oom(void)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct debug_bucket *db = obj_hash;
463*4882a593Smuzhiyun struct hlist_node *tmp;
464*4882a593Smuzhiyun HLIST_HEAD(freelist);
465*4882a593Smuzhiyun struct debug_obj *obj;
466*4882a593Smuzhiyun unsigned long flags;
467*4882a593Smuzhiyun int i;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun pr_warn("Out of memory. ODEBUG disabled\n");
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
473*4882a593Smuzhiyun hlist_move_list(&db->list, &freelist);
474*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Now free them */
477*4882a593Smuzhiyun hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478*4882a593Smuzhiyun hlist_del(&obj->node);
479*4882a593Smuzhiyun free_object(obj);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * We use the pfn of the address for the hash. That way we can check
486*4882a593Smuzhiyun * for freed objects simply by checking the affected bucket.
487*4882a593Smuzhiyun */
get_bucket(unsigned long addr)488*4882a593Smuzhiyun static struct debug_bucket *get_bucket(unsigned long addr)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun unsigned long hash;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493*4882a593Smuzhiyun return &obj_hash[hash];
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
debug_print_object(struct debug_obj * obj,char * msg)496*4882a593Smuzhiyun static void debug_print_object(struct debug_obj *obj, char *msg)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun const struct debug_obj_descr *descr = obj->descr;
499*4882a593Smuzhiyun static int limit;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (limit < 5 && descr != descr_test) {
502*4882a593Smuzhiyun void *hint = descr->debug_hint ?
503*4882a593Smuzhiyun descr->debug_hint(obj->object) : NULL;
504*4882a593Smuzhiyun limit++;
505*4882a593Smuzhiyun WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
506*4882a593Smuzhiyun "object type: %s hint: %pS\n",
507*4882a593Smuzhiyun msg, obj_states[obj->state], obj->astate,
508*4882a593Smuzhiyun descr->name, hint);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun debug_objects_warnings++;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun * Try to repair the damage, so we have a better chance to get useful
515*4882a593Smuzhiyun * debug output.
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)518*4882a593Smuzhiyun debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
519*4882a593Smuzhiyun void * addr, enum debug_obj_state state)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun if (fixup && fixup(addr, state)) {
522*4882a593Smuzhiyun debug_objects_fixups++;
523*4882a593Smuzhiyun return true;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun return false;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
debug_object_is_on_stack(void * addr,int onstack)528*4882a593Smuzhiyun static void debug_object_is_on_stack(void *addr, int onstack)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun int is_on_stack;
531*4882a593Smuzhiyun static int limit;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (limit > 4)
534*4882a593Smuzhiyun return;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun is_on_stack = object_is_on_stack(addr);
537*4882a593Smuzhiyun if (is_on_stack == onstack)
538*4882a593Smuzhiyun return;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun limit++;
541*4882a593Smuzhiyun if (is_on_stack)
542*4882a593Smuzhiyun pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
543*4882a593Smuzhiyun task_stack_page(current));
544*4882a593Smuzhiyun else
545*4882a593Smuzhiyun pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
546*4882a593Smuzhiyun task_stack_page(current));
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun WARN_ON(1);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)552*4882a593Smuzhiyun __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun enum debug_obj_state state;
555*4882a593Smuzhiyun bool check_stack = false;
556*4882a593Smuzhiyun struct debug_bucket *db;
557*4882a593Smuzhiyun struct debug_obj *obj;
558*4882a593Smuzhiyun unsigned long flags;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun fill_pool();
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun obj = lookup_object(addr, db);
567*4882a593Smuzhiyun if (!obj) {
568*4882a593Smuzhiyun obj = alloc_object(addr, db, descr);
569*4882a593Smuzhiyun if (!obj) {
570*4882a593Smuzhiyun debug_objects_enabled = 0;
571*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
572*4882a593Smuzhiyun debug_objects_oom();
573*4882a593Smuzhiyun return;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun check_stack = true;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun switch (obj->state) {
579*4882a593Smuzhiyun case ODEBUG_STATE_NONE:
580*4882a593Smuzhiyun case ODEBUG_STATE_INIT:
581*4882a593Smuzhiyun case ODEBUG_STATE_INACTIVE:
582*4882a593Smuzhiyun obj->state = ODEBUG_STATE_INIT;
583*4882a593Smuzhiyun break;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
586*4882a593Smuzhiyun state = obj->state;
587*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
588*4882a593Smuzhiyun debug_print_object(obj, "init");
589*4882a593Smuzhiyun debug_object_fixup(descr->fixup_init, addr, state);
590*4882a593Smuzhiyun return;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun case ODEBUG_STATE_DESTROYED:
593*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
594*4882a593Smuzhiyun debug_print_object(obj, "init");
595*4882a593Smuzhiyun return;
596*4882a593Smuzhiyun default:
597*4882a593Smuzhiyun break;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
601*4882a593Smuzhiyun if (check_stack)
602*4882a593Smuzhiyun debug_object_is_on_stack(addr, onstack);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /**
606*4882a593Smuzhiyun * debug_object_init - debug checks when an object is initialized
607*4882a593Smuzhiyun * @addr: address of the object
608*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
609*4882a593Smuzhiyun */
debug_object_init(void * addr,const struct debug_obj_descr * descr)610*4882a593Smuzhiyun void debug_object_init(void *addr, const struct debug_obj_descr *descr)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun if (!debug_objects_enabled)
613*4882a593Smuzhiyun return;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun __debug_object_init(addr, descr, 0);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_init);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /**
620*4882a593Smuzhiyun * debug_object_init_on_stack - debug checks when an object on stack is
621*4882a593Smuzhiyun * initialized
622*4882a593Smuzhiyun * @addr: address of the object
623*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
624*4882a593Smuzhiyun */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)625*4882a593Smuzhiyun void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun if (!debug_objects_enabled)
628*4882a593Smuzhiyun return;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun __debug_object_init(addr, descr, 1);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /**
635*4882a593Smuzhiyun * debug_object_activate - debug checks when an object is activated
636*4882a593Smuzhiyun * @addr: address of the object
637*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
638*4882a593Smuzhiyun * Returns 0 for success, -EINVAL for check failed.
639*4882a593Smuzhiyun */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)640*4882a593Smuzhiyun int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun enum debug_obj_state state;
643*4882a593Smuzhiyun struct debug_bucket *db;
644*4882a593Smuzhiyun struct debug_obj *obj;
645*4882a593Smuzhiyun unsigned long flags;
646*4882a593Smuzhiyun int ret;
647*4882a593Smuzhiyun struct debug_obj o = { .object = addr,
648*4882a593Smuzhiyun .state = ODEBUG_STATE_NOTAVAILABLE,
649*4882a593Smuzhiyun .descr = descr };
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (!debug_objects_enabled)
652*4882a593Smuzhiyun return 0;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun obj = lookup_object(addr, db);
659*4882a593Smuzhiyun if (obj) {
660*4882a593Smuzhiyun bool print_object = false;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun switch (obj->state) {
663*4882a593Smuzhiyun case ODEBUG_STATE_INIT:
664*4882a593Smuzhiyun case ODEBUG_STATE_INACTIVE:
665*4882a593Smuzhiyun obj->state = ODEBUG_STATE_ACTIVE;
666*4882a593Smuzhiyun ret = 0;
667*4882a593Smuzhiyun break;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
670*4882a593Smuzhiyun state = obj->state;
671*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
672*4882a593Smuzhiyun debug_print_object(obj, "activate");
673*4882a593Smuzhiyun ret = debug_object_fixup(descr->fixup_activate, addr, state);
674*4882a593Smuzhiyun return ret ? 0 : -EINVAL;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun case ODEBUG_STATE_DESTROYED:
677*4882a593Smuzhiyun print_object = true;
678*4882a593Smuzhiyun ret = -EINVAL;
679*4882a593Smuzhiyun break;
680*4882a593Smuzhiyun default:
681*4882a593Smuzhiyun ret = 0;
682*4882a593Smuzhiyun break;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
685*4882a593Smuzhiyun if (print_object)
686*4882a593Smuzhiyun debug_print_object(obj, "activate");
687*4882a593Smuzhiyun return ret;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /*
693*4882a593Smuzhiyun * We are here when a static object is activated. We
694*4882a593Smuzhiyun * let the type specific code confirm whether this is
695*4882a593Smuzhiyun * true or not. if true, we just make sure that the
696*4882a593Smuzhiyun * static object is tracked in the object tracker. If
697*4882a593Smuzhiyun * not, this must be a bug, so we try to fix it up.
698*4882a593Smuzhiyun */
699*4882a593Smuzhiyun if (descr->is_static_object && descr->is_static_object(addr)) {
700*4882a593Smuzhiyun /* track this static object */
701*4882a593Smuzhiyun debug_object_init(addr, descr);
702*4882a593Smuzhiyun debug_object_activate(addr, descr);
703*4882a593Smuzhiyun } else {
704*4882a593Smuzhiyun debug_print_object(&o, "activate");
705*4882a593Smuzhiyun ret = debug_object_fixup(descr->fixup_activate, addr,
706*4882a593Smuzhiyun ODEBUG_STATE_NOTAVAILABLE);
707*4882a593Smuzhiyun return ret ? 0 : -EINVAL;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun return 0;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_activate);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun * debug_object_deactivate - debug checks when an object is deactivated
715*4882a593Smuzhiyun * @addr: address of the object
716*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
717*4882a593Smuzhiyun */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)718*4882a593Smuzhiyun void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun struct debug_bucket *db;
721*4882a593Smuzhiyun struct debug_obj *obj;
722*4882a593Smuzhiyun unsigned long flags;
723*4882a593Smuzhiyun bool print_object = false;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun if (!debug_objects_enabled)
726*4882a593Smuzhiyun return;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun obj = lookup_object(addr, db);
733*4882a593Smuzhiyun if (obj) {
734*4882a593Smuzhiyun switch (obj->state) {
735*4882a593Smuzhiyun case ODEBUG_STATE_INIT:
736*4882a593Smuzhiyun case ODEBUG_STATE_INACTIVE:
737*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
738*4882a593Smuzhiyun if (!obj->astate)
739*4882a593Smuzhiyun obj->state = ODEBUG_STATE_INACTIVE;
740*4882a593Smuzhiyun else
741*4882a593Smuzhiyun print_object = true;
742*4882a593Smuzhiyun break;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun case ODEBUG_STATE_DESTROYED:
745*4882a593Smuzhiyun print_object = true;
746*4882a593Smuzhiyun break;
747*4882a593Smuzhiyun default:
748*4882a593Smuzhiyun break;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
753*4882a593Smuzhiyun if (!obj) {
754*4882a593Smuzhiyun struct debug_obj o = { .object = addr,
755*4882a593Smuzhiyun .state = ODEBUG_STATE_NOTAVAILABLE,
756*4882a593Smuzhiyun .descr = descr };
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun debug_print_object(&o, "deactivate");
759*4882a593Smuzhiyun } else if (print_object) {
760*4882a593Smuzhiyun debug_print_object(obj, "deactivate");
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_deactivate);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /**
766*4882a593Smuzhiyun * debug_object_destroy - debug checks when an object is destroyed
767*4882a593Smuzhiyun * @addr: address of the object
768*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
769*4882a593Smuzhiyun */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)770*4882a593Smuzhiyun void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun enum debug_obj_state state;
773*4882a593Smuzhiyun struct debug_bucket *db;
774*4882a593Smuzhiyun struct debug_obj *obj;
775*4882a593Smuzhiyun unsigned long flags;
776*4882a593Smuzhiyun bool print_object = false;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (!debug_objects_enabled)
779*4882a593Smuzhiyun return;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun obj = lookup_object(addr, db);
786*4882a593Smuzhiyun if (!obj)
787*4882a593Smuzhiyun goto out_unlock;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun switch (obj->state) {
790*4882a593Smuzhiyun case ODEBUG_STATE_NONE:
791*4882a593Smuzhiyun case ODEBUG_STATE_INIT:
792*4882a593Smuzhiyun case ODEBUG_STATE_INACTIVE:
793*4882a593Smuzhiyun obj->state = ODEBUG_STATE_DESTROYED;
794*4882a593Smuzhiyun break;
795*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
796*4882a593Smuzhiyun state = obj->state;
797*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
798*4882a593Smuzhiyun debug_print_object(obj, "destroy");
799*4882a593Smuzhiyun debug_object_fixup(descr->fixup_destroy, addr, state);
800*4882a593Smuzhiyun return;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun case ODEBUG_STATE_DESTROYED:
803*4882a593Smuzhiyun print_object = true;
804*4882a593Smuzhiyun break;
805*4882a593Smuzhiyun default:
806*4882a593Smuzhiyun break;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun out_unlock:
809*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
810*4882a593Smuzhiyun if (print_object)
811*4882a593Smuzhiyun debug_print_object(obj, "destroy");
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_destroy);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /**
816*4882a593Smuzhiyun * debug_object_free - debug checks when an object is freed
817*4882a593Smuzhiyun * @addr: address of the object
818*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
819*4882a593Smuzhiyun */
debug_object_free(void * addr,const struct debug_obj_descr * descr)820*4882a593Smuzhiyun void debug_object_free(void *addr, const struct debug_obj_descr *descr)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun enum debug_obj_state state;
823*4882a593Smuzhiyun struct debug_bucket *db;
824*4882a593Smuzhiyun struct debug_obj *obj;
825*4882a593Smuzhiyun unsigned long flags;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (!debug_objects_enabled)
828*4882a593Smuzhiyun return;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun obj = lookup_object(addr, db);
835*4882a593Smuzhiyun if (!obj)
836*4882a593Smuzhiyun goto out_unlock;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun switch (obj->state) {
839*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
840*4882a593Smuzhiyun state = obj->state;
841*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
842*4882a593Smuzhiyun debug_print_object(obj, "free");
843*4882a593Smuzhiyun debug_object_fixup(descr->fixup_free, addr, state);
844*4882a593Smuzhiyun return;
845*4882a593Smuzhiyun default:
846*4882a593Smuzhiyun hlist_del(&obj->node);
847*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
848*4882a593Smuzhiyun free_object(obj);
849*4882a593Smuzhiyun return;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun out_unlock:
852*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_free);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /**
857*4882a593Smuzhiyun * debug_object_assert_init - debug checks when object should be init-ed
858*4882a593Smuzhiyun * @addr: address of the object
859*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
860*4882a593Smuzhiyun */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)861*4882a593Smuzhiyun void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct debug_bucket *db;
864*4882a593Smuzhiyun struct debug_obj *obj;
865*4882a593Smuzhiyun unsigned long flags;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (!debug_objects_enabled)
868*4882a593Smuzhiyun return;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun obj = lookup_object(addr, db);
875*4882a593Smuzhiyun if (!obj) {
876*4882a593Smuzhiyun struct debug_obj o = { .object = addr,
877*4882a593Smuzhiyun .state = ODEBUG_STATE_NOTAVAILABLE,
878*4882a593Smuzhiyun .descr = descr };
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * Maybe the object is static, and we let the type specific
883*4882a593Smuzhiyun * code confirm. Track this static object if true, else invoke
884*4882a593Smuzhiyun * fixup.
885*4882a593Smuzhiyun */
886*4882a593Smuzhiyun if (descr->is_static_object && descr->is_static_object(addr)) {
887*4882a593Smuzhiyun /* Track this static object */
888*4882a593Smuzhiyun debug_object_init(addr, descr);
889*4882a593Smuzhiyun } else {
890*4882a593Smuzhiyun debug_print_object(&o, "assert_init");
891*4882a593Smuzhiyun debug_object_fixup(descr->fixup_assert_init, addr,
892*4882a593Smuzhiyun ODEBUG_STATE_NOTAVAILABLE);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun return;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_assert_init);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /**
902*4882a593Smuzhiyun * debug_object_active_state - debug checks object usage state machine
903*4882a593Smuzhiyun * @addr: address of the object
904*4882a593Smuzhiyun * @descr: pointer to an object specific debug description structure
905*4882a593Smuzhiyun * @expect: expected state
906*4882a593Smuzhiyun * @next: state to move to if expected state is found
907*4882a593Smuzhiyun */
908*4882a593Smuzhiyun void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)909*4882a593Smuzhiyun debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
910*4882a593Smuzhiyun unsigned int expect, unsigned int next)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct debug_bucket *db;
913*4882a593Smuzhiyun struct debug_obj *obj;
914*4882a593Smuzhiyun unsigned long flags;
915*4882a593Smuzhiyun bool print_object = false;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (!debug_objects_enabled)
918*4882a593Smuzhiyun return;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun obj = lookup_object(addr, db);
925*4882a593Smuzhiyun if (obj) {
926*4882a593Smuzhiyun switch (obj->state) {
927*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
928*4882a593Smuzhiyun if (obj->astate == expect)
929*4882a593Smuzhiyun obj->astate = next;
930*4882a593Smuzhiyun else
931*4882a593Smuzhiyun print_object = true;
932*4882a593Smuzhiyun break;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun default:
935*4882a593Smuzhiyun print_object = true;
936*4882a593Smuzhiyun break;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
941*4882a593Smuzhiyun if (!obj) {
942*4882a593Smuzhiyun struct debug_obj o = { .object = addr,
943*4882a593Smuzhiyun .state = ODEBUG_STATE_NOTAVAILABLE,
944*4882a593Smuzhiyun .descr = descr };
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun debug_print_object(&o, "active_state");
947*4882a593Smuzhiyun } else if (print_object) {
948*4882a593Smuzhiyun debug_print_object(obj, "active_state");
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(debug_object_active_state);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)954*4882a593Smuzhiyun static void __debug_check_no_obj_freed(const void *address, unsigned long size)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
957*4882a593Smuzhiyun const struct debug_obj_descr *descr;
958*4882a593Smuzhiyun enum debug_obj_state state;
959*4882a593Smuzhiyun struct debug_bucket *db;
960*4882a593Smuzhiyun struct hlist_node *tmp;
961*4882a593Smuzhiyun struct debug_obj *obj;
962*4882a593Smuzhiyun int cnt, objs_checked = 0;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun saddr = (unsigned long) address;
965*4882a593Smuzhiyun eaddr = saddr + size;
966*4882a593Smuzhiyun paddr = saddr & ODEBUG_CHUNK_MASK;
967*4882a593Smuzhiyun chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
968*4882a593Smuzhiyun chunks >>= ODEBUG_CHUNK_SHIFT;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
971*4882a593Smuzhiyun db = get_bucket(paddr);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun repeat:
974*4882a593Smuzhiyun cnt = 0;
975*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
976*4882a593Smuzhiyun hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
977*4882a593Smuzhiyun cnt++;
978*4882a593Smuzhiyun oaddr = (unsigned long) obj->object;
979*4882a593Smuzhiyun if (oaddr < saddr || oaddr >= eaddr)
980*4882a593Smuzhiyun continue;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun switch (obj->state) {
983*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
984*4882a593Smuzhiyun descr = obj->descr;
985*4882a593Smuzhiyun state = obj->state;
986*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
987*4882a593Smuzhiyun debug_print_object(obj, "free");
988*4882a593Smuzhiyun debug_object_fixup(descr->fixup_free,
989*4882a593Smuzhiyun (void *) oaddr, state);
990*4882a593Smuzhiyun goto repeat;
991*4882a593Smuzhiyun default:
992*4882a593Smuzhiyun hlist_del(&obj->node);
993*4882a593Smuzhiyun __free_object(obj);
994*4882a593Smuzhiyun break;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (cnt > debug_objects_maxchain)
1000*4882a593Smuzhiyun debug_objects_maxchain = cnt;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun objs_checked += cnt;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (objs_checked > debug_objects_maxchecked)
1006*4882a593Smuzhiyun debug_objects_maxchecked = objs_checked;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* Schedule work to actually kmem_cache_free() objects */
1009*4882a593Smuzhiyun if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1010*4882a593Smuzhiyun WRITE_ONCE(obj_freeing, true);
1011*4882a593Smuzhiyun schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
debug_check_no_obj_freed(const void * address,unsigned long size)1015*4882a593Smuzhiyun void debug_check_no_obj_freed(const void *address, unsigned long size)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun if (debug_objects_enabled)
1018*4882a593Smuzhiyun __debug_check_no_obj_freed(address, size);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun #endif
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
1023*4882a593Smuzhiyun
debug_stats_show(struct seq_file * m,void * v)1024*4882a593Smuzhiyun static int debug_stats_show(struct seq_file *m, void *v)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun int cpu, obj_percpu_free = 0;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun for_each_possible_cpu(cpu)
1029*4882a593Smuzhiyun obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1032*4882a593Smuzhiyun seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1033*4882a593Smuzhiyun seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1034*4882a593Smuzhiyun seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1035*4882a593Smuzhiyun seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1036*4882a593Smuzhiyun seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1037*4882a593Smuzhiyun seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1038*4882a593Smuzhiyun seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1039*4882a593Smuzhiyun seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1040*4882a593Smuzhiyun seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1041*4882a593Smuzhiyun seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1042*4882a593Smuzhiyun seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1043*4882a593Smuzhiyun return 0;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(debug_stats);
1046*4882a593Smuzhiyun
debug_objects_init_debugfs(void)1047*4882a593Smuzhiyun static int __init debug_objects_init_debugfs(void)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun struct dentry *dbgdir;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun if (!debug_objects_enabled)
1052*4882a593Smuzhiyun return 0;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun dbgdir = debugfs_create_dir("debug_objects", NULL);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun return 0;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun __initcall(debug_objects_init_debugfs);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun #else
debug_objects_init_debugfs(void)1063*4882a593Smuzhiyun static inline void debug_objects_init_debugfs(void) { }
1064*4882a593Smuzhiyun #endif
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* Random data structure for the self test */
1069*4882a593Smuzhiyun struct self_test {
1070*4882a593Smuzhiyun unsigned long dummy1[6];
1071*4882a593Smuzhiyun int static_init;
1072*4882a593Smuzhiyun unsigned long dummy2[3];
1073*4882a593Smuzhiyun };
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun static __initconst const struct debug_obj_descr descr_type_test;
1076*4882a593Smuzhiyun
is_static_object(void * addr)1077*4882a593Smuzhiyun static bool __init is_static_object(void *addr)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun struct self_test *obj = addr;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun return obj->static_init;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /*
1085*4882a593Smuzhiyun * fixup_init is called when:
1086*4882a593Smuzhiyun * - an active object is initialized
1087*4882a593Smuzhiyun */
fixup_init(void * addr,enum debug_obj_state state)1088*4882a593Smuzhiyun static bool __init fixup_init(void *addr, enum debug_obj_state state)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun struct self_test *obj = addr;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun switch (state) {
1093*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
1094*4882a593Smuzhiyun debug_object_deactivate(obj, &descr_type_test);
1095*4882a593Smuzhiyun debug_object_init(obj, &descr_type_test);
1096*4882a593Smuzhiyun return true;
1097*4882a593Smuzhiyun default:
1098*4882a593Smuzhiyun return false;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /*
1103*4882a593Smuzhiyun * fixup_activate is called when:
1104*4882a593Smuzhiyun * - an active object is activated
1105*4882a593Smuzhiyun * - an unknown non-static object is activated
1106*4882a593Smuzhiyun */
fixup_activate(void * addr,enum debug_obj_state state)1107*4882a593Smuzhiyun static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct self_test *obj = addr;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun switch (state) {
1112*4882a593Smuzhiyun case ODEBUG_STATE_NOTAVAILABLE:
1113*4882a593Smuzhiyun return true;
1114*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
1115*4882a593Smuzhiyun debug_object_deactivate(obj, &descr_type_test);
1116*4882a593Smuzhiyun debug_object_activate(obj, &descr_type_test);
1117*4882a593Smuzhiyun return true;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun default:
1120*4882a593Smuzhiyun return false;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /*
1125*4882a593Smuzhiyun * fixup_destroy is called when:
1126*4882a593Smuzhiyun * - an active object is destroyed
1127*4882a593Smuzhiyun */
fixup_destroy(void * addr,enum debug_obj_state state)1128*4882a593Smuzhiyun static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun struct self_test *obj = addr;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun switch (state) {
1133*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
1134*4882a593Smuzhiyun debug_object_deactivate(obj, &descr_type_test);
1135*4882a593Smuzhiyun debug_object_destroy(obj, &descr_type_test);
1136*4882a593Smuzhiyun return true;
1137*4882a593Smuzhiyun default:
1138*4882a593Smuzhiyun return false;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /*
1143*4882a593Smuzhiyun * fixup_free is called when:
1144*4882a593Smuzhiyun * - an active object is freed
1145*4882a593Smuzhiyun */
fixup_free(void * addr,enum debug_obj_state state)1146*4882a593Smuzhiyun static bool __init fixup_free(void *addr, enum debug_obj_state state)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun struct self_test *obj = addr;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun switch (state) {
1151*4882a593Smuzhiyun case ODEBUG_STATE_ACTIVE:
1152*4882a593Smuzhiyun debug_object_deactivate(obj, &descr_type_test);
1153*4882a593Smuzhiyun debug_object_free(obj, &descr_type_test);
1154*4882a593Smuzhiyun return true;
1155*4882a593Smuzhiyun default:
1156*4882a593Smuzhiyun return false;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1161*4882a593Smuzhiyun check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun struct debug_bucket *db;
1164*4882a593Smuzhiyun struct debug_obj *obj;
1165*4882a593Smuzhiyun unsigned long flags;
1166*4882a593Smuzhiyun int res = -EINVAL;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun db = get_bucket((unsigned long) addr);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun raw_spin_lock_irqsave(&db->lock, flags);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun obj = lookup_object(addr, db);
1173*4882a593Smuzhiyun if (!obj && state != ODEBUG_STATE_NONE) {
1174*4882a593Smuzhiyun WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1175*4882a593Smuzhiyun goto out;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun if (obj && obj->state != state) {
1178*4882a593Smuzhiyun WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1179*4882a593Smuzhiyun obj->state, state);
1180*4882a593Smuzhiyun goto out;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun if (fixups != debug_objects_fixups) {
1183*4882a593Smuzhiyun WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1184*4882a593Smuzhiyun fixups, debug_objects_fixups);
1185*4882a593Smuzhiyun goto out;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun if (warnings != debug_objects_warnings) {
1188*4882a593Smuzhiyun WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1189*4882a593Smuzhiyun warnings, debug_objects_warnings);
1190*4882a593Smuzhiyun goto out;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun res = 0;
1193*4882a593Smuzhiyun out:
1194*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&db->lock, flags);
1195*4882a593Smuzhiyun if (res)
1196*4882a593Smuzhiyun debug_objects_enabled = 0;
1197*4882a593Smuzhiyun return res;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun static __initconst const struct debug_obj_descr descr_type_test = {
1201*4882a593Smuzhiyun .name = "selftest",
1202*4882a593Smuzhiyun .is_static_object = is_static_object,
1203*4882a593Smuzhiyun .fixup_init = fixup_init,
1204*4882a593Smuzhiyun .fixup_activate = fixup_activate,
1205*4882a593Smuzhiyun .fixup_destroy = fixup_destroy,
1206*4882a593Smuzhiyun .fixup_free = fixup_free,
1207*4882a593Smuzhiyun };
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun static __initdata struct self_test obj = { .static_init = 0 };
1210*4882a593Smuzhiyun
debug_objects_selftest(void)1211*4882a593Smuzhiyun static void __init debug_objects_selftest(void)
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun int fixups, oldfixups, warnings, oldwarnings;
1214*4882a593Smuzhiyun unsigned long flags;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun local_irq_save(flags);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun fixups = oldfixups = debug_objects_fixups;
1219*4882a593Smuzhiyun warnings = oldwarnings = debug_objects_warnings;
1220*4882a593Smuzhiyun descr_test = &descr_type_test;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun debug_object_init(&obj, &descr_type_test);
1223*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1224*4882a593Smuzhiyun goto out;
1225*4882a593Smuzhiyun debug_object_activate(&obj, &descr_type_test);
1226*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1227*4882a593Smuzhiyun goto out;
1228*4882a593Smuzhiyun debug_object_activate(&obj, &descr_type_test);
1229*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1230*4882a593Smuzhiyun goto out;
1231*4882a593Smuzhiyun debug_object_deactivate(&obj, &descr_type_test);
1232*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1233*4882a593Smuzhiyun goto out;
1234*4882a593Smuzhiyun debug_object_destroy(&obj, &descr_type_test);
1235*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1236*4882a593Smuzhiyun goto out;
1237*4882a593Smuzhiyun debug_object_init(&obj, &descr_type_test);
1238*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1239*4882a593Smuzhiyun goto out;
1240*4882a593Smuzhiyun debug_object_activate(&obj, &descr_type_test);
1241*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1242*4882a593Smuzhiyun goto out;
1243*4882a593Smuzhiyun debug_object_deactivate(&obj, &descr_type_test);
1244*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1245*4882a593Smuzhiyun goto out;
1246*4882a593Smuzhiyun debug_object_free(&obj, &descr_type_test);
1247*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1248*4882a593Smuzhiyun goto out;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun obj.static_init = 1;
1251*4882a593Smuzhiyun debug_object_activate(&obj, &descr_type_test);
1252*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1253*4882a593Smuzhiyun goto out;
1254*4882a593Smuzhiyun debug_object_init(&obj, &descr_type_test);
1255*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1256*4882a593Smuzhiyun goto out;
1257*4882a593Smuzhiyun debug_object_free(&obj, &descr_type_test);
1258*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1259*4882a593Smuzhiyun goto out;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_OBJECTS_FREE
1262*4882a593Smuzhiyun debug_object_init(&obj, &descr_type_test);
1263*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1264*4882a593Smuzhiyun goto out;
1265*4882a593Smuzhiyun debug_object_activate(&obj, &descr_type_test);
1266*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1267*4882a593Smuzhiyun goto out;
1268*4882a593Smuzhiyun __debug_check_no_obj_freed(&obj, sizeof(obj));
1269*4882a593Smuzhiyun if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1270*4882a593Smuzhiyun goto out;
1271*4882a593Smuzhiyun #endif
1272*4882a593Smuzhiyun pr_info("selftest passed\n");
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun out:
1275*4882a593Smuzhiyun debug_objects_fixups = oldfixups;
1276*4882a593Smuzhiyun debug_objects_warnings = oldwarnings;
1277*4882a593Smuzhiyun descr_test = NULL;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun local_irq_restore(flags);
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun #else
debug_objects_selftest(void)1282*4882a593Smuzhiyun static inline void debug_objects_selftest(void) { }
1283*4882a593Smuzhiyun #endif
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun /*
1286*4882a593Smuzhiyun * Called during early boot to initialize the hash buckets and link
1287*4882a593Smuzhiyun * the static object pool objects into the poll list. After this call
1288*4882a593Smuzhiyun * the object tracker is fully operational.
1289*4882a593Smuzhiyun */
debug_objects_early_init(void)1290*4882a593Smuzhiyun void __init debug_objects_early_init(void)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun int i;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1295*4882a593Smuzhiyun raw_spin_lock_init(&obj_hash[i].lock);
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1298*4882a593Smuzhiyun hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /*
1302*4882a593Smuzhiyun * Convert the statically allocated objects to dynamic ones:
1303*4882a593Smuzhiyun */
debug_objects_replace_static_objects(void)1304*4882a593Smuzhiyun static int __init debug_objects_replace_static_objects(void)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun struct debug_bucket *db = obj_hash;
1307*4882a593Smuzhiyun struct hlist_node *tmp;
1308*4882a593Smuzhiyun struct debug_obj *obj, *new;
1309*4882a593Smuzhiyun HLIST_HEAD(objects);
1310*4882a593Smuzhiyun int i, cnt = 0;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1313*4882a593Smuzhiyun obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1314*4882a593Smuzhiyun if (!obj)
1315*4882a593Smuzhiyun goto free;
1316*4882a593Smuzhiyun hlist_add_head(&obj->node, &objects);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /*
1320*4882a593Smuzhiyun * debug_objects_mem_init() is now called early that only one CPU is up
1321*4882a593Smuzhiyun * and interrupts have been disabled, so it is safe to replace the
1322*4882a593Smuzhiyun * active object references.
1323*4882a593Smuzhiyun */
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun /* Remove the statically allocated objects from the pool */
1326*4882a593Smuzhiyun hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1327*4882a593Smuzhiyun hlist_del(&obj->node);
1328*4882a593Smuzhiyun /* Move the allocated objects to the pool */
1329*4882a593Smuzhiyun hlist_move_list(&objects, &obj_pool);
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* Replace the active object references */
1332*4882a593Smuzhiyun for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1333*4882a593Smuzhiyun hlist_move_list(&db->list, &objects);
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun hlist_for_each_entry(obj, &objects, node) {
1336*4882a593Smuzhiyun new = hlist_entry(obj_pool.first, typeof(*obj), node);
1337*4882a593Smuzhiyun hlist_del(&new->node);
1338*4882a593Smuzhiyun /* copy object data */
1339*4882a593Smuzhiyun *new = *obj;
1340*4882a593Smuzhiyun hlist_add_head(&new->node, &db->list);
1341*4882a593Smuzhiyun cnt++;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun pr_debug("%d of %d active objects replaced\n",
1346*4882a593Smuzhiyun cnt, obj_pool_used);
1347*4882a593Smuzhiyun return 0;
1348*4882a593Smuzhiyun free:
1349*4882a593Smuzhiyun hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1350*4882a593Smuzhiyun hlist_del(&obj->node);
1351*4882a593Smuzhiyun kmem_cache_free(obj_cache, obj);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun return -ENOMEM;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun /*
1357*4882a593Smuzhiyun * Called after the kmem_caches are functional to setup a dedicated
1358*4882a593Smuzhiyun * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1359*4882a593Smuzhiyun * prevents that the debug code is called on kmem_cache_free() for the
1360*4882a593Smuzhiyun * debug tracker objects to avoid recursive calls.
1361*4882a593Smuzhiyun */
debug_objects_mem_init(void)1362*4882a593Smuzhiyun void __init debug_objects_mem_init(void)
1363*4882a593Smuzhiyun {
1364*4882a593Smuzhiyun int cpu, extras;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun if (!debug_objects_enabled)
1367*4882a593Smuzhiyun return;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun /*
1370*4882a593Smuzhiyun * Initialize the percpu object pools
1371*4882a593Smuzhiyun *
1372*4882a593Smuzhiyun * Initialization is not strictly necessary, but was done for
1373*4882a593Smuzhiyun * completeness.
1374*4882a593Smuzhiyun */
1375*4882a593Smuzhiyun for_each_possible_cpu(cpu)
1376*4882a593Smuzhiyun INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun obj_cache = kmem_cache_create("debug_objects_cache",
1379*4882a593Smuzhiyun sizeof (struct debug_obj), 0,
1380*4882a593Smuzhiyun SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1381*4882a593Smuzhiyun NULL);
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun if (!obj_cache || debug_objects_replace_static_objects()) {
1384*4882a593Smuzhiyun debug_objects_enabled = 0;
1385*4882a593Smuzhiyun kmem_cache_destroy(obj_cache);
1386*4882a593Smuzhiyun pr_warn("out of memory.\n");
1387*4882a593Smuzhiyun } else
1388*4882a593Smuzhiyun debug_objects_selftest();
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
1391*4882a593Smuzhiyun cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1392*4882a593Smuzhiyun object_cpu_offline);
1393*4882a593Smuzhiyun #endif
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun * Increase the thresholds for allocating and freeing objects
1397*4882a593Smuzhiyun * according to the number of possible CPUs available in the system.
1398*4882a593Smuzhiyun */
1399*4882a593Smuzhiyun extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1400*4882a593Smuzhiyun debug_objects_pool_size += extras;
1401*4882a593Smuzhiyun debug_objects_pool_min_level += extras;
1402*4882a593Smuzhiyun }
1403