xref: /OK3568_Linux_fs/kernel/kernel/dma/debug.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 Advanced Micro Devices, Inc.
4  *
5  * Author: Joerg Roedel <joerg.roedel@amd.com>
6  */
7 
8 #define pr_fmt(fmt)	"DMA-API: " fmt
9 
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <asm/sections.h>
27 #include "debug.h"
28 
29 #define HASH_SIZE       16384ULL
30 #define HASH_FN_SHIFT   13
31 #define HASH_FN_MASK    (HASH_SIZE - 1)
32 
33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34 /* If the pool runs out, add this many new entries at once */
35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36 
37 enum {
38 	dma_debug_single,
39 	dma_debug_sg,
40 	dma_debug_coherent,
41 	dma_debug_resource,
42 };
43 
44 enum map_err_types {
45 	MAP_ERR_CHECK_NOT_APPLICABLE,
46 	MAP_ERR_NOT_CHECKED,
47 	MAP_ERR_CHECKED,
48 };
49 
50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
51 
52 /**
53  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54  * @list: node on pre-allocated free_entries list
55  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56  * @size: length of the mapping
57  * @type: single, page, sg, coherent
58  * @direction: enum dma_data_direction
59  * @sg_call_ents: 'nents' from dma_map_sg
60  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
61  * @pfn: page frame of the start address
62  * @offset: offset of mapping relative to pfn
63  * @map_err_type: track whether dma_mapping_error() was checked
64  * @stacktrace: support backtraces when a violation is detected
65  */
66 struct dma_debug_entry {
67 	struct list_head list;
68 	struct device    *dev;
69 	u64              dev_addr;
70 	u64              size;
71 	int              type;
72 	int              direction;
73 	int		 sg_call_ents;
74 	int		 sg_mapped_ents;
75 	unsigned long	 pfn;
76 	size_t		 offset;
77 	enum map_err_types  map_err_type;
78 #ifdef CONFIG_STACKTRACE
79 	unsigned int	stack_len;
80 	unsigned long	stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
81 #endif
82 } ____cacheline_aligned_in_smp;
83 
84 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85 
86 struct hash_bucket {
87 	struct list_head list;
88 	spinlock_t lock;
89 };
90 
91 /* Hash list to save the allocated dma addresses */
92 static struct hash_bucket dma_entry_hash[HASH_SIZE];
93 /* List of pre-allocated dma_debug_entry's */
94 static LIST_HEAD(free_entries);
95 /* Lock for the list above */
96 static DEFINE_SPINLOCK(free_entries_lock);
97 
98 /* Global disable flag - will be set in case of an error */
99 static bool global_disable __read_mostly;
100 
101 /* Early initialization disable flag, set at the end of dma_debug_init */
102 static bool dma_debug_initialized __read_mostly;
103 
dma_debug_disabled(void)104 static inline bool dma_debug_disabled(void)
105 {
106 	return global_disable || !dma_debug_initialized;
107 }
108 
109 /* Global error count */
110 static u32 error_count;
111 
112 /* Global error show enable*/
113 static u32 show_all_errors __read_mostly;
114 /* Number of errors to show */
115 static u32 show_num_errors = 1;
116 
117 static u32 num_free_entries;
118 static u32 min_free_entries;
119 static u32 nr_total_entries;
120 
121 /* number of preallocated entries requested by kernel cmdline */
122 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
123 
124 /* per-driver filter related state */
125 
126 #define NAME_MAX_LEN	64
127 
128 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
129 static struct device_driver *current_driver                    __read_mostly;
130 
131 static DEFINE_RWLOCK(driver_name_lock);
132 
133 static const char *const maperr2str[] = {
134 	[MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135 	[MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136 	[MAP_ERR_CHECKED] = "dma map error checked",
137 };
138 
139 static const char *type2name[] = {
140 	[dma_debug_single] = "single",
141 	[dma_debug_sg] = "scather-gather",
142 	[dma_debug_coherent] = "coherent",
143 	[dma_debug_resource] = "resource",
144 };
145 
146 static const char *dir2name[] = {
147 	[DMA_BIDIRECTIONAL]	= "DMA_BIDIRECTIONAL",
148 	[DMA_TO_DEVICE]		= "DMA_TO_DEVICE",
149 	[DMA_FROM_DEVICE]	= "DMA_FROM_DEVICE",
150 	[DMA_NONE]		= "DMA_NONE",
151 };
152 
153 /*
154  * The access to some variables in this macro is racy. We can't use atomic_t
155  * here because all these variables are exported to debugfs. Some of them even
156  * writeable. This is also the reason why a lock won't help much. But anyway,
157  * the races are no big deal. Here is why:
158  *
159  *   error_count: the addition is racy, but the worst thing that can happen is
160  *                that we don't count some errors
161  *   show_num_errors: the subtraction is racy. Also no big deal because in
162  *                    worst case this will result in one warning more in the
163  *                    system log than the user configured. This variable is
164  *                    writeable via debugfs.
165  */
dump_entry_trace(struct dma_debug_entry * entry)166 static inline void dump_entry_trace(struct dma_debug_entry *entry)
167 {
168 #ifdef CONFIG_STACKTRACE
169 	if (entry) {
170 		pr_warn("Mapped at:\n");
171 		stack_trace_print(entry->stack_entries, entry->stack_len, 0);
172 	}
173 #endif
174 }
175 
driver_filter(struct device * dev)176 static bool driver_filter(struct device *dev)
177 {
178 	struct device_driver *drv;
179 	unsigned long flags;
180 	bool ret;
181 
182 	/* driver filter off */
183 	if (likely(!current_driver_name[0]))
184 		return true;
185 
186 	/* driver filter on and initialized */
187 	if (current_driver && dev && dev->driver == current_driver)
188 		return true;
189 
190 	/* driver filter on, but we can't filter on a NULL device... */
191 	if (!dev)
192 		return false;
193 
194 	if (current_driver || !current_driver_name[0])
195 		return false;
196 
197 	/* driver filter on but not yet initialized */
198 	drv = dev->driver;
199 	if (!drv)
200 		return false;
201 
202 	/* lock to protect against change of current_driver_name */
203 	read_lock_irqsave(&driver_name_lock, flags);
204 
205 	ret = false;
206 	if (drv->name &&
207 	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208 		current_driver = drv;
209 		ret = true;
210 	}
211 
212 	read_unlock_irqrestore(&driver_name_lock, flags);
213 
214 	return ret;
215 }
216 
217 #define err_printk(dev, entry, format, arg...) do {			\
218 		error_count += 1;					\
219 		if (driver_filter(dev) &&				\
220 		    (show_all_errors || show_num_errors > 0)) {		\
221 			WARN(1, pr_fmt("%s %s: ") format,		\
222 			     dev ? dev_driver_string(dev) : "NULL",	\
223 			     dev ? dev_name(dev) : "NULL", ## arg);	\
224 			dump_entry_trace(entry);			\
225 		}							\
226 		if (!show_all_errors && show_num_errors > 0)		\
227 			show_num_errors -= 1;				\
228 	} while (0);
229 
230 /*
231  * Hash related functions
232  *
233  * Every DMA-API request is saved into a struct dma_debug_entry. To
234  * have quick access to these structs they are stored into a hash.
235  */
hash_fn(struct dma_debug_entry * entry)236 static int hash_fn(struct dma_debug_entry *entry)
237 {
238 	/*
239 	 * Hash function is based on the dma address.
240 	 * We use bits 20-27 here as the index into the hash
241 	 */
242 	return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243 }
244 
245 /*
246  * Request exclusive access to a hash bucket for a given dma_debug_entry.
247  */
get_hash_bucket(struct dma_debug_entry * entry,unsigned long * flags)248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249 					   unsigned long *flags)
250 	__acquires(&dma_entry_hash[idx].lock)
251 {
252 	int idx = hash_fn(entry);
253 	unsigned long __flags;
254 
255 	spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256 	*flags = __flags;
257 	return &dma_entry_hash[idx];
258 }
259 
260 /*
261  * Give up exclusive access to the hash bucket
262  */
put_hash_bucket(struct hash_bucket * bucket,unsigned long flags)263 static void put_hash_bucket(struct hash_bucket *bucket,
264 			    unsigned long flags)
265 	__releases(&bucket->lock)
266 {
267 	spin_unlock_irqrestore(&bucket->lock, flags);
268 }
269 
exact_match(struct dma_debug_entry * a,struct dma_debug_entry * b)270 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271 {
272 	return ((a->dev_addr == b->dev_addr) &&
273 		(a->dev == b->dev)) ? true : false;
274 }
275 
containing_match(struct dma_debug_entry * a,struct dma_debug_entry * b)276 static bool containing_match(struct dma_debug_entry *a,
277 			     struct dma_debug_entry *b)
278 {
279 	if (a->dev != b->dev)
280 		return false;
281 
282 	if ((b->dev_addr <= a->dev_addr) &&
283 	    ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284 		return true;
285 
286 	return false;
287 }
288 
289 /*
290  * Search a given entry in the hash bucket list
291  */
__hash_bucket_find(struct hash_bucket * bucket,struct dma_debug_entry * ref,match_fn match)292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293 						  struct dma_debug_entry *ref,
294 						  match_fn match)
295 {
296 	struct dma_debug_entry *entry, *ret = NULL;
297 	int matches = 0, match_lvl, last_lvl = -1;
298 
299 	list_for_each_entry(entry, &bucket->list, list) {
300 		if (!match(ref, entry))
301 			continue;
302 
303 		/*
304 		 * Some drivers map the same physical address multiple
305 		 * times. Without a hardware IOMMU this results in the
306 		 * same device addresses being put into the dma-debug
307 		 * hash multiple times too. This can result in false
308 		 * positives being reported. Therefore we implement a
309 		 * best-fit algorithm here which returns the entry from
310 		 * the hash which fits best to the reference value
311 		 * instead of the first-fit.
312 		 */
313 		matches += 1;
314 		match_lvl = 0;
315 		entry->size         == ref->size         ? ++match_lvl : 0;
316 		entry->type         == ref->type         ? ++match_lvl : 0;
317 		entry->direction    == ref->direction    ? ++match_lvl : 0;
318 		entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
319 
320 		if (match_lvl == 4) {
321 			/* perfect-fit - return the result */
322 			return entry;
323 		} else if (match_lvl > last_lvl) {
324 			/*
325 			 * We found an entry that fits better then the
326 			 * previous one or it is the 1st match.
327 			 */
328 			last_lvl = match_lvl;
329 			ret      = entry;
330 		}
331 	}
332 
333 	/*
334 	 * If we have multiple matches but no perfect-fit, just return
335 	 * NULL.
336 	 */
337 	ret = (matches == 1) ? ret : NULL;
338 
339 	return ret;
340 }
341 
bucket_find_exact(struct hash_bucket * bucket,struct dma_debug_entry * ref)342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343 						 struct dma_debug_entry *ref)
344 {
345 	return __hash_bucket_find(bucket, ref, exact_match);
346 }
347 
bucket_find_contain(struct hash_bucket ** bucket,struct dma_debug_entry * ref,unsigned long * flags)348 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349 						   struct dma_debug_entry *ref,
350 						   unsigned long *flags)
351 {
352 
353 	unsigned int max_range = dma_get_max_seg_size(ref->dev);
354 	struct dma_debug_entry *entry, index = *ref;
355 	unsigned int range = 0;
356 
357 	while (range <= max_range) {
358 		entry = __hash_bucket_find(*bucket, ref, containing_match);
359 
360 		if (entry)
361 			return entry;
362 
363 		/*
364 		 * Nothing found, go back a hash bucket
365 		 */
366 		put_hash_bucket(*bucket, *flags);
367 		range          += (1 << HASH_FN_SHIFT);
368 		index.dev_addr -= (1 << HASH_FN_SHIFT);
369 		*bucket = get_hash_bucket(&index, flags);
370 	}
371 
372 	return NULL;
373 }
374 
375 /*
376  * Add an entry to a hash bucket
377  */
hash_bucket_add(struct hash_bucket * bucket,struct dma_debug_entry * entry)378 static void hash_bucket_add(struct hash_bucket *bucket,
379 			    struct dma_debug_entry *entry)
380 {
381 	list_add_tail(&entry->list, &bucket->list);
382 }
383 
384 /*
385  * Remove entry from a hash bucket list
386  */
hash_bucket_del(struct dma_debug_entry * entry)387 static void hash_bucket_del(struct dma_debug_entry *entry)
388 {
389 	list_del(&entry->list);
390 }
391 
phys_addr(struct dma_debug_entry * entry)392 static unsigned long long phys_addr(struct dma_debug_entry *entry)
393 {
394 	if (entry->type == dma_debug_resource)
395 		return __pfn_to_phys(entry->pfn) + entry->offset;
396 
397 	return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
398 }
399 
400 /*
401  * Dump mapping entries for debugging purposes
402  */
debug_dma_dump_mappings(struct device * dev)403 void debug_dma_dump_mappings(struct device *dev)
404 {
405 	int idx;
406 
407 	for (idx = 0; idx < HASH_SIZE; idx++) {
408 		struct hash_bucket *bucket = &dma_entry_hash[idx];
409 		struct dma_debug_entry *entry;
410 		unsigned long flags;
411 
412 		spin_lock_irqsave(&bucket->lock, flags);
413 
414 		list_for_each_entry(entry, &bucket->list, list) {
415 			if (!dev || dev == entry->dev) {
416 				dev_info(entry->dev,
417 					 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
418 					 type2name[entry->type], idx,
419 					 phys_addr(entry), entry->pfn,
420 					 entry->dev_addr, entry->size,
421 					 dir2name[entry->direction],
422 					 maperr2str[entry->map_err_type]);
423 			}
424 		}
425 
426 		spin_unlock_irqrestore(&bucket->lock, flags);
427 		cond_resched();
428 	}
429 }
430 
431 /*
432  * For each mapping (initial cacheline in the case of
433  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
434  * scatterlist, or the cacheline specified in dma_map_single) insert
435  * into this tree using the cacheline as the key. At
436  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
437  * the entry already exists at insertion time add a tag as a reference
438  * count for the overlapping mappings.  For now, the overlap tracking
439  * just ensures that 'unmaps' balance 'maps' before marking the
440  * cacheline idle, but we should also be flagging overlaps as an API
441  * violation.
442  *
443  * Memory usage is mostly constrained by the maximum number of available
444  * dma-debug entries in that we need a free dma_debug_entry before
445  * inserting into the tree.  In the case of dma_map_page and
446  * dma_alloc_coherent there is only one dma_debug_entry and one
447  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
448  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
449  * entries into the tree.
450  */
451 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
452 static DEFINE_SPINLOCK(radix_lock);
453 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
454 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
455 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
456 
to_cacheline_number(struct dma_debug_entry * entry)457 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
458 {
459 	return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
460 		(entry->offset >> L1_CACHE_SHIFT);
461 }
462 
active_cacheline_read_overlap(phys_addr_t cln)463 static int active_cacheline_read_overlap(phys_addr_t cln)
464 {
465 	int overlap = 0, i;
466 
467 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
468 		if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
469 			overlap |= 1 << i;
470 	return overlap;
471 }
472 
active_cacheline_set_overlap(phys_addr_t cln,int overlap)473 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
474 {
475 	int i;
476 
477 	if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
478 		return overlap;
479 
480 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
481 		if (overlap & 1 << i)
482 			radix_tree_tag_set(&dma_active_cacheline, cln, i);
483 		else
484 			radix_tree_tag_clear(&dma_active_cacheline, cln, i);
485 
486 	return overlap;
487 }
488 
active_cacheline_inc_overlap(phys_addr_t cln)489 static void active_cacheline_inc_overlap(phys_addr_t cln)
490 {
491 	int overlap = active_cacheline_read_overlap(cln);
492 
493 	overlap = active_cacheline_set_overlap(cln, ++overlap);
494 
495 	/* If we overflowed the overlap counter then we're potentially
496 	 * leaking dma-mappings.
497 	 */
498 	WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
499 		  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
500 		  ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
501 }
502 
active_cacheline_dec_overlap(phys_addr_t cln)503 static int active_cacheline_dec_overlap(phys_addr_t cln)
504 {
505 	int overlap = active_cacheline_read_overlap(cln);
506 
507 	return active_cacheline_set_overlap(cln, --overlap);
508 }
509 
active_cacheline_insert(struct dma_debug_entry * entry)510 static int active_cacheline_insert(struct dma_debug_entry *entry)
511 {
512 	phys_addr_t cln = to_cacheline_number(entry);
513 	unsigned long flags;
514 	int rc;
515 
516 	/* If the device is not writing memory then we don't have any
517 	 * concerns about the cpu consuming stale data.  This mitigates
518 	 * legitimate usages of overlapping mappings.
519 	 */
520 	if (entry->direction == DMA_TO_DEVICE)
521 		return 0;
522 
523 	spin_lock_irqsave(&radix_lock, flags);
524 	rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
525 	if (rc == -EEXIST)
526 		active_cacheline_inc_overlap(cln);
527 	spin_unlock_irqrestore(&radix_lock, flags);
528 
529 	return rc;
530 }
531 
active_cacheline_remove(struct dma_debug_entry * entry)532 static void active_cacheline_remove(struct dma_debug_entry *entry)
533 {
534 	phys_addr_t cln = to_cacheline_number(entry);
535 	unsigned long flags;
536 
537 	/* ...mirror the insert case */
538 	if (entry->direction == DMA_TO_DEVICE)
539 		return;
540 
541 	spin_lock_irqsave(&radix_lock, flags);
542 	/* since we are counting overlaps the final put of the
543 	 * cacheline will occur when the overlap count is 0.
544 	 * active_cacheline_dec_overlap() returns -1 in that case
545 	 */
546 	if (active_cacheline_dec_overlap(cln) < 0)
547 		radix_tree_delete(&dma_active_cacheline, cln);
548 	spin_unlock_irqrestore(&radix_lock, flags);
549 }
550 
551 /*
552  * Wrapper function for adding an entry to the hash.
553  * This function takes care of locking itself.
554  */
add_dma_entry(struct dma_debug_entry * entry)555 static void add_dma_entry(struct dma_debug_entry *entry)
556 {
557 	struct hash_bucket *bucket;
558 	unsigned long flags;
559 	int rc;
560 
561 	bucket = get_hash_bucket(entry, &flags);
562 	hash_bucket_add(bucket, entry);
563 	put_hash_bucket(bucket, flags);
564 
565 	rc = active_cacheline_insert(entry);
566 	if (rc == -ENOMEM) {
567 		pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
568 		global_disable = true;
569 	}
570 
571 	/* TODO: report -EEXIST errors here as overlapping mappings are
572 	 * not supported by the DMA API
573 	 */
574 }
575 
dma_debug_create_entries(gfp_t gfp)576 static int dma_debug_create_entries(gfp_t gfp)
577 {
578 	struct dma_debug_entry *entry;
579 	int i;
580 
581 	entry = (void *)get_zeroed_page(gfp);
582 	if (!entry)
583 		return -ENOMEM;
584 
585 	for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
586 		list_add_tail(&entry[i].list, &free_entries);
587 
588 	num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
589 	nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
590 
591 	return 0;
592 }
593 
__dma_entry_alloc(void)594 static struct dma_debug_entry *__dma_entry_alloc(void)
595 {
596 	struct dma_debug_entry *entry;
597 
598 	entry = list_entry(free_entries.next, struct dma_debug_entry, list);
599 	list_del(&entry->list);
600 	memset(entry, 0, sizeof(*entry));
601 
602 	num_free_entries -= 1;
603 	if (num_free_entries < min_free_entries)
604 		min_free_entries = num_free_entries;
605 
606 	return entry;
607 }
608 
__dma_entry_alloc_check_leak(void)609 static void __dma_entry_alloc_check_leak(void)
610 {
611 	u32 tmp = nr_total_entries % nr_prealloc_entries;
612 
613 	/* Shout each time we tick over some multiple of the initial pool */
614 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
615 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
616 			nr_total_entries,
617 			(nr_total_entries / nr_prealloc_entries));
618 	}
619 }
620 
621 /* struct dma_entry allocator
622  *
623  * The next two functions implement the allocator for
624  * struct dma_debug_entries.
625  */
dma_entry_alloc(void)626 static struct dma_debug_entry *dma_entry_alloc(void)
627 {
628 	struct dma_debug_entry *entry;
629 	unsigned long flags;
630 
631 	spin_lock_irqsave(&free_entries_lock, flags);
632 	if (num_free_entries == 0) {
633 		if (dma_debug_create_entries(GFP_ATOMIC)) {
634 			global_disable = true;
635 			spin_unlock_irqrestore(&free_entries_lock, flags);
636 			pr_err("debugging out of memory - disabling\n");
637 			return NULL;
638 		}
639 		__dma_entry_alloc_check_leak();
640 	}
641 
642 	entry = __dma_entry_alloc();
643 
644 	spin_unlock_irqrestore(&free_entries_lock, flags);
645 
646 #ifdef CONFIG_STACKTRACE
647 	entry->stack_len = stack_trace_save(entry->stack_entries,
648 					    ARRAY_SIZE(entry->stack_entries),
649 					    1);
650 #endif
651 	return entry;
652 }
653 
dma_entry_free(struct dma_debug_entry * entry)654 static void dma_entry_free(struct dma_debug_entry *entry)
655 {
656 	unsigned long flags;
657 
658 	active_cacheline_remove(entry);
659 
660 	/*
661 	 * add to beginning of the list - this way the entries are
662 	 * more likely cache hot when they are reallocated.
663 	 */
664 	spin_lock_irqsave(&free_entries_lock, flags);
665 	list_add(&entry->list, &free_entries);
666 	num_free_entries += 1;
667 	spin_unlock_irqrestore(&free_entries_lock, flags);
668 }
669 
670 /*
671  * DMA-API debugging init code
672  *
673  * The init code does two things:
674  *   1. Initialize core data structures
675  *   2. Preallocate a given number of dma_debug_entry structs
676  */
677 
filter_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)678 static ssize_t filter_read(struct file *file, char __user *user_buf,
679 			   size_t count, loff_t *ppos)
680 {
681 	char buf[NAME_MAX_LEN + 1];
682 	unsigned long flags;
683 	int len;
684 
685 	if (!current_driver_name[0])
686 		return 0;
687 
688 	/*
689 	 * We can't copy to userspace directly because current_driver_name can
690 	 * only be read under the driver_name_lock with irqs disabled. So
691 	 * create a temporary copy first.
692 	 */
693 	read_lock_irqsave(&driver_name_lock, flags);
694 	len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
695 	read_unlock_irqrestore(&driver_name_lock, flags);
696 
697 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
698 }
699 
filter_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)700 static ssize_t filter_write(struct file *file, const char __user *userbuf,
701 			    size_t count, loff_t *ppos)
702 {
703 	char buf[NAME_MAX_LEN];
704 	unsigned long flags;
705 	size_t len;
706 	int i;
707 
708 	/*
709 	 * We can't copy from userspace directly. Access to
710 	 * current_driver_name is protected with a write_lock with irqs
711 	 * disabled. Since copy_from_user can fault and may sleep we
712 	 * need to copy to temporary buffer first
713 	 */
714 	len = min(count, (size_t)(NAME_MAX_LEN - 1));
715 	if (copy_from_user(buf, userbuf, len))
716 		return -EFAULT;
717 
718 	buf[len] = 0;
719 
720 	write_lock_irqsave(&driver_name_lock, flags);
721 
722 	/*
723 	 * Now handle the string we got from userspace very carefully.
724 	 * The rules are:
725 	 *         - only use the first token we got
726 	 *         - token delimiter is everything looking like a space
727 	 *           character (' ', '\n', '\t' ...)
728 	 *
729 	 */
730 	if (!isalnum(buf[0])) {
731 		/*
732 		 * If the first character userspace gave us is not
733 		 * alphanumerical then assume the filter should be
734 		 * switched off.
735 		 */
736 		if (current_driver_name[0])
737 			pr_info("switching off dma-debug driver filter\n");
738 		current_driver_name[0] = 0;
739 		current_driver = NULL;
740 		goto out_unlock;
741 	}
742 
743 	/*
744 	 * Now parse out the first token and use it as the name for the
745 	 * driver to filter for.
746 	 */
747 	for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
748 		current_driver_name[i] = buf[i];
749 		if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
750 			break;
751 	}
752 	current_driver_name[i] = 0;
753 	current_driver = NULL;
754 
755 	pr_info("enable driver filter for driver [%s]\n",
756 		current_driver_name);
757 
758 out_unlock:
759 	write_unlock_irqrestore(&driver_name_lock, flags);
760 
761 	return count;
762 }
763 
764 static const struct file_operations filter_fops = {
765 	.read  = filter_read,
766 	.write = filter_write,
767 	.llseek = default_llseek,
768 };
769 
dump_show(struct seq_file * seq,void * v)770 static int dump_show(struct seq_file *seq, void *v)
771 {
772 	int idx;
773 
774 	for (idx = 0; idx < HASH_SIZE; idx++) {
775 		struct hash_bucket *bucket = &dma_entry_hash[idx];
776 		struct dma_debug_entry *entry;
777 		unsigned long flags;
778 
779 		spin_lock_irqsave(&bucket->lock, flags);
780 		list_for_each_entry(entry, &bucket->list, list) {
781 			seq_printf(seq,
782 				   "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
783 				   dev_name(entry->dev),
784 				   dev_driver_string(entry->dev),
785 				   type2name[entry->type], idx,
786 				   phys_addr(entry), entry->pfn,
787 				   entry->dev_addr, entry->size,
788 				   dir2name[entry->direction],
789 				   maperr2str[entry->map_err_type]);
790 		}
791 		spin_unlock_irqrestore(&bucket->lock, flags);
792 	}
793 	return 0;
794 }
795 DEFINE_SHOW_ATTRIBUTE(dump);
796 
dma_debug_fs_init(void)797 static int __init dma_debug_fs_init(void)
798 {
799 	struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
800 
801 	debugfs_create_bool("disabled", 0444, dentry, &global_disable);
802 	debugfs_create_u32("error_count", 0444, dentry, &error_count);
803 	debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
804 	debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
805 	debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
806 	debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
807 	debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
808 	debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
809 	debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
810 
811 	return 0;
812 }
813 core_initcall_sync(dma_debug_fs_init);
814 
device_dma_allocations(struct device * dev,struct dma_debug_entry ** out_entry)815 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
816 {
817 	struct dma_debug_entry *entry;
818 	unsigned long flags;
819 	int count = 0, i;
820 
821 	for (i = 0; i < HASH_SIZE; ++i) {
822 		spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
823 		list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
824 			if (entry->dev == dev) {
825 				count += 1;
826 				*out_entry = entry;
827 			}
828 		}
829 		spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
830 	}
831 
832 	return count;
833 }
834 
dma_debug_device_change(struct notifier_block * nb,unsigned long action,void * data)835 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
836 {
837 	struct device *dev = data;
838 	struct dma_debug_entry *entry;
839 	int count;
840 
841 	if (dma_debug_disabled())
842 		return 0;
843 
844 	switch (action) {
845 	case BUS_NOTIFY_UNBOUND_DRIVER:
846 		count = device_dma_allocations(dev, &entry);
847 		if (count == 0)
848 			break;
849 		err_printk(dev, entry, "device driver has pending "
850 				"DMA allocations while released from device "
851 				"[count=%d]\n"
852 				"One of leaked entries details: "
853 				"[device address=0x%016llx] [size=%llu bytes] "
854 				"[mapped with %s] [mapped as %s]\n",
855 			count, entry->dev_addr, entry->size,
856 			dir2name[entry->direction], type2name[entry->type]);
857 		break;
858 	default:
859 		break;
860 	}
861 
862 	return 0;
863 }
864 
dma_debug_add_bus(struct bus_type * bus)865 void dma_debug_add_bus(struct bus_type *bus)
866 {
867 	struct notifier_block *nb;
868 
869 	if (dma_debug_disabled())
870 		return;
871 
872 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
873 	if (nb == NULL) {
874 		pr_err("dma_debug_add_bus: out of memory\n");
875 		return;
876 	}
877 
878 	nb->notifier_call = dma_debug_device_change;
879 
880 	bus_register_notifier(bus, nb);
881 }
882 
dma_debug_init(void)883 static int dma_debug_init(void)
884 {
885 	int i, nr_pages;
886 
887 	/* Do not use dma_debug_initialized here, since we really want to be
888 	 * called to set dma_debug_initialized
889 	 */
890 	if (global_disable)
891 		return 0;
892 
893 	for (i = 0; i < HASH_SIZE; ++i) {
894 		INIT_LIST_HEAD(&dma_entry_hash[i].list);
895 		spin_lock_init(&dma_entry_hash[i].lock);
896 	}
897 
898 	nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
899 	for (i = 0; i < nr_pages; ++i)
900 		dma_debug_create_entries(GFP_KERNEL);
901 	if (num_free_entries >= nr_prealloc_entries) {
902 		pr_info("preallocated %d debug entries\n", nr_total_entries);
903 	} else if (num_free_entries > 0) {
904 		pr_warn("%d debug entries requested but only %d allocated\n",
905 			nr_prealloc_entries, nr_total_entries);
906 	} else {
907 		pr_err("debugging out of memory error - disabled\n");
908 		global_disable = true;
909 
910 		return 0;
911 	}
912 	min_free_entries = num_free_entries;
913 
914 	dma_debug_initialized = true;
915 
916 	pr_info("debugging enabled by kernel config\n");
917 	return 0;
918 }
919 core_initcall(dma_debug_init);
920 
dma_debug_cmdline(char * str)921 static __init int dma_debug_cmdline(char *str)
922 {
923 	if (!str)
924 		return -EINVAL;
925 
926 	if (strncmp(str, "off", 3) == 0) {
927 		pr_info("debugging disabled on kernel command line\n");
928 		global_disable = true;
929 	}
930 
931 	return 1;
932 }
933 
dma_debug_entries_cmdline(char * str)934 static __init int dma_debug_entries_cmdline(char *str)
935 {
936 	if (!str)
937 		return -EINVAL;
938 	if (!get_option(&str, &nr_prealloc_entries))
939 		nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
940 	return 1;
941 }
942 
943 __setup("dma_debug=", dma_debug_cmdline);
944 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
945 
check_unmap(struct dma_debug_entry * ref)946 static void check_unmap(struct dma_debug_entry *ref)
947 {
948 	struct dma_debug_entry *entry;
949 	struct hash_bucket *bucket;
950 	unsigned long flags;
951 
952 	bucket = get_hash_bucket(ref, &flags);
953 	entry = bucket_find_exact(bucket, ref);
954 
955 	if (!entry) {
956 		/* must drop lock before calling dma_mapping_error */
957 		put_hash_bucket(bucket, flags);
958 
959 		if (dma_mapping_error(ref->dev, ref->dev_addr)) {
960 			err_printk(ref->dev, NULL,
961 				   "device driver tries to free an "
962 				   "invalid DMA memory address\n");
963 		} else {
964 			err_printk(ref->dev, NULL,
965 				   "device driver tries to free DMA "
966 				   "memory it has not allocated [device "
967 				   "address=0x%016llx] [size=%llu bytes]\n",
968 				   ref->dev_addr, ref->size);
969 		}
970 		return;
971 	}
972 
973 	if (ref->size != entry->size) {
974 		err_printk(ref->dev, entry, "device driver frees "
975 			   "DMA memory with different size "
976 			   "[device address=0x%016llx] [map size=%llu bytes] "
977 			   "[unmap size=%llu bytes]\n",
978 			   ref->dev_addr, entry->size, ref->size);
979 	}
980 
981 	if (ref->type != entry->type) {
982 		err_printk(ref->dev, entry, "device driver frees "
983 			   "DMA memory with wrong function "
984 			   "[device address=0x%016llx] [size=%llu bytes] "
985 			   "[mapped as %s] [unmapped as %s]\n",
986 			   ref->dev_addr, ref->size,
987 			   type2name[entry->type], type2name[ref->type]);
988 	} else if ((entry->type == dma_debug_coherent) &&
989 		   (phys_addr(ref) != phys_addr(entry))) {
990 		err_printk(ref->dev, entry, "device driver frees "
991 			   "DMA memory with different CPU address "
992 			   "[device address=0x%016llx] [size=%llu bytes] "
993 			   "[cpu alloc address=0x%016llx] "
994 			   "[cpu free address=0x%016llx]",
995 			   ref->dev_addr, ref->size,
996 			   phys_addr(entry),
997 			   phys_addr(ref));
998 	}
999 
1000 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1001 	    ref->sg_call_ents != entry->sg_call_ents) {
1002 		err_printk(ref->dev, entry, "device driver frees "
1003 			   "DMA sg list with different entry count "
1004 			   "[map count=%d] [unmap count=%d]\n",
1005 			   entry->sg_call_ents, ref->sg_call_ents);
1006 	}
1007 
1008 	/*
1009 	 * This may be no bug in reality - but most implementations of the
1010 	 * DMA API don't handle this properly, so check for it here
1011 	 */
1012 	if (ref->direction != entry->direction) {
1013 		err_printk(ref->dev, entry, "device driver frees "
1014 			   "DMA memory with different direction "
1015 			   "[device address=0x%016llx] [size=%llu bytes] "
1016 			   "[mapped with %s] [unmapped with %s]\n",
1017 			   ref->dev_addr, ref->size,
1018 			   dir2name[entry->direction],
1019 			   dir2name[ref->direction]);
1020 	}
1021 
1022 	/*
1023 	 * Drivers should use dma_mapping_error() to check the returned
1024 	 * addresses of dma_map_single() and dma_map_page().
1025 	 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1026 	 */
1027 	if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1028 		err_printk(ref->dev, entry,
1029 			   "device driver failed to check map error"
1030 			   "[device address=0x%016llx] [size=%llu bytes] "
1031 			   "[mapped as %s]",
1032 			   ref->dev_addr, ref->size,
1033 			   type2name[entry->type]);
1034 	}
1035 
1036 	hash_bucket_del(entry);
1037 	dma_entry_free(entry);
1038 
1039 	put_hash_bucket(bucket, flags);
1040 }
1041 
check_for_stack(struct device * dev,struct page * page,size_t offset)1042 static void check_for_stack(struct device *dev,
1043 			    struct page *page, size_t offset)
1044 {
1045 	void *addr;
1046 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1047 
1048 	if (!stack_vm_area) {
1049 		/* Stack is direct-mapped. */
1050 		if (PageHighMem(page))
1051 			return;
1052 		addr = page_address(page) + offset;
1053 		if (object_is_on_stack(addr))
1054 			err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1055 	} else {
1056 		/* Stack is vmalloced. */
1057 		int i;
1058 
1059 		for (i = 0; i < stack_vm_area->nr_pages; i++) {
1060 			if (page != stack_vm_area->pages[i])
1061 				continue;
1062 
1063 			addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1064 			err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1065 			break;
1066 		}
1067 	}
1068 }
1069 
overlap(void * addr,unsigned long len,void * start,void * end)1070 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1071 {
1072 	unsigned long a1 = (unsigned long)addr;
1073 	unsigned long b1 = a1 + len;
1074 	unsigned long a2 = (unsigned long)start;
1075 	unsigned long b2 = (unsigned long)end;
1076 
1077 	return !(b1 <= a2 || a1 >= b2);
1078 }
1079 
check_for_illegal_area(struct device * dev,void * addr,unsigned long len)1080 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1081 {
1082 	if (overlap(addr, len, _stext, _etext) ||
1083 	    overlap(addr, len, __start_rodata, __end_rodata))
1084 		err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1085 }
1086 
check_sync(struct device * dev,struct dma_debug_entry * ref,bool to_cpu)1087 static void check_sync(struct device *dev,
1088 		       struct dma_debug_entry *ref,
1089 		       bool to_cpu)
1090 {
1091 	struct dma_debug_entry *entry;
1092 	struct hash_bucket *bucket;
1093 	unsigned long flags;
1094 
1095 	bucket = get_hash_bucket(ref, &flags);
1096 
1097 	entry = bucket_find_contain(&bucket, ref, &flags);
1098 
1099 	if (!entry) {
1100 		err_printk(dev, NULL, "device driver tries "
1101 				"to sync DMA memory it has not allocated "
1102 				"[device address=0x%016llx] [size=%llu bytes]\n",
1103 				(unsigned long long)ref->dev_addr, ref->size);
1104 		goto out;
1105 	}
1106 
1107 	if (ref->size > entry->size) {
1108 		err_printk(dev, entry, "device driver syncs"
1109 				" DMA memory outside allocated range "
1110 				"[device address=0x%016llx] "
1111 				"[allocation size=%llu bytes] "
1112 				"[sync offset+size=%llu]\n",
1113 				entry->dev_addr, entry->size,
1114 				ref->size);
1115 	}
1116 
1117 	if (entry->direction == DMA_BIDIRECTIONAL)
1118 		goto out;
1119 
1120 	if (ref->direction != entry->direction) {
1121 		err_printk(dev, entry, "device driver syncs "
1122 				"DMA memory with different direction "
1123 				"[device address=0x%016llx] [size=%llu bytes] "
1124 				"[mapped with %s] [synced with %s]\n",
1125 				(unsigned long long)ref->dev_addr, entry->size,
1126 				dir2name[entry->direction],
1127 				dir2name[ref->direction]);
1128 	}
1129 
1130 	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1131 		      !(ref->direction == DMA_TO_DEVICE))
1132 		err_printk(dev, entry, "device driver syncs "
1133 				"device read-only DMA memory for cpu "
1134 				"[device address=0x%016llx] [size=%llu bytes] "
1135 				"[mapped with %s] [synced with %s]\n",
1136 				(unsigned long long)ref->dev_addr, entry->size,
1137 				dir2name[entry->direction],
1138 				dir2name[ref->direction]);
1139 
1140 	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1141 		       !(ref->direction == DMA_FROM_DEVICE))
1142 		err_printk(dev, entry, "device driver syncs "
1143 				"device write-only DMA memory to device "
1144 				"[device address=0x%016llx] [size=%llu bytes] "
1145 				"[mapped with %s] [synced with %s]\n",
1146 				(unsigned long long)ref->dev_addr, entry->size,
1147 				dir2name[entry->direction],
1148 				dir2name[ref->direction]);
1149 
1150 	/* sg list count can be less than map count when partial cache sync */
1151 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1152 	    ref->sg_call_ents > entry->sg_call_ents) {
1153 		err_printk(ref->dev, entry, "device driver syncs "
1154 			   "DMA sg list count larger than map count "
1155 			   "[map count=%d] [sync count=%d]\n",
1156 			   entry->sg_call_ents, ref->sg_call_ents);
1157 	}
1158 
1159 out:
1160 	put_hash_bucket(bucket, flags);
1161 }
1162 
check_sg_segment(struct device * dev,struct scatterlist * sg)1163 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1164 {
1165 #ifdef CONFIG_DMA_API_DEBUG_SG
1166 	unsigned int max_seg = dma_get_max_seg_size(dev);
1167 	u64 start, end, boundary = dma_get_seg_boundary(dev);
1168 
1169 	/*
1170 	 * Either the driver forgot to set dma_parms appropriately, or
1171 	 * whoever generated the list forgot to check them.
1172 	 */
1173 	if (sg->length > max_seg)
1174 		err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1175 			   sg->length, max_seg);
1176 	/*
1177 	 * In some cases this could potentially be the DMA API
1178 	 * implementation's fault, but it would usually imply that
1179 	 * the scatterlist was built inappropriately to begin with.
1180 	 */
1181 	start = sg_dma_address(sg);
1182 	end = start + sg_dma_len(sg) - 1;
1183 	if ((start ^ end) & ~boundary)
1184 		err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1185 			   start, end, boundary);
1186 #endif
1187 }
1188 
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)1189 void debug_dma_map_single(struct device *dev, const void *addr,
1190 			    unsigned long len)
1191 {
1192 	if (unlikely(dma_debug_disabled()))
1193 		return;
1194 
1195 	if (!virt_addr_valid(addr))
1196 		err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1197 			   addr, len);
1198 
1199 	if (is_vmalloc_addr(addr))
1200 		err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1201 			   addr, len);
1202 }
1203 EXPORT_SYMBOL(debug_dma_map_single);
1204 
debug_dma_map_page(struct device * dev,struct page * page,size_t offset,size_t size,int direction,dma_addr_t dma_addr)1205 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1206 			size_t size, int direction, dma_addr_t dma_addr)
1207 {
1208 	struct dma_debug_entry *entry;
1209 
1210 	if (unlikely(dma_debug_disabled()))
1211 		return;
1212 
1213 	if (dma_mapping_error(dev, dma_addr))
1214 		return;
1215 
1216 	entry = dma_entry_alloc();
1217 	if (!entry)
1218 		return;
1219 
1220 	entry->dev       = dev;
1221 	entry->type      = dma_debug_single;
1222 	entry->pfn	 = page_to_pfn(page);
1223 	entry->offset	 = offset;
1224 	entry->dev_addr  = dma_addr;
1225 	entry->size      = size;
1226 	entry->direction = direction;
1227 	entry->map_err_type = MAP_ERR_NOT_CHECKED;
1228 
1229 	check_for_stack(dev, page, offset);
1230 
1231 	if (!PageHighMem(page)) {
1232 		void *addr = page_address(page) + offset;
1233 
1234 		check_for_illegal_area(dev, addr, size);
1235 	}
1236 
1237 	add_dma_entry(entry);
1238 }
1239 
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)1240 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1241 {
1242 	struct dma_debug_entry ref;
1243 	struct dma_debug_entry *entry;
1244 	struct hash_bucket *bucket;
1245 	unsigned long flags;
1246 
1247 	if (unlikely(dma_debug_disabled()))
1248 		return;
1249 
1250 	ref.dev = dev;
1251 	ref.dev_addr = dma_addr;
1252 	bucket = get_hash_bucket(&ref, &flags);
1253 
1254 	list_for_each_entry(entry, &bucket->list, list) {
1255 		if (!exact_match(&ref, entry))
1256 			continue;
1257 
1258 		/*
1259 		 * The same physical address can be mapped multiple
1260 		 * times. Without a hardware IOMMU this results in the
1261 		 * same device addresses being put into the dma-debug
1262 		 * hash multiple times too. This can result in false
1263 		 * positives being reported. Therefore we implement a
1264 		 * best-fit algorithm here which updates the first entry
1265 		 * from the hash which fits the reference value and is
1266 		 * not currently listed as being checked.
1267 		 */
1268 		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1269 			entry->map_err_type = MAP_ERR_CHECKED;
1270 			break;
1271 		}
1272 	}
1273 
1274 	put_hash_bucket(bucket, flags);
1275 }
1276 EXPORT_SYMBOL(debug_dma_mapping_error);
1277 
debug_dma_unmap_page(struct device * dev,dma_addr_t addr,size_t size,int direction)1278 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1279 			  size_t size, int direction)
1280 {
1281 	struct dma_debug_entry ref = {
1282 		.type           = dma_debug_single,
1283 		.dev            = dev,
1284 		.dev_addr       = addr,
1285 		.size           = size,
1286 		.direction      = direction,
1287 	};
1288 
1289 	if (unlikely(dma_debug_disabled()))
1290 		return;
1291 	check_unmap(&ref);
1292 }
1293 
debug_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,int mapped_ents,int direction)1294 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1295 		      int nents, int mapped_ents, int direction)
1296 {
1297 	struct dma_debug_entry *entry;
1298 	struct scatterlist *s;
1299 	int i;
1300 
1301 	if (unlikely(dma_debug_disabled()))
1302 		return;
1303 
1304 	for_each_sg(sg, s, nents, i) {
1305 		check_for_stack(dev, sg_page(s), s->offset);
1306 		if (!PageHighMem(sg_page(s)))
1307 			check_for_illegal_area(dev, sg_virt(s), s->length);
1308 	}
1309 
1310 	for_each_sg(sg, s, mapped_ents, i) {
1311 		entry = dma_entry_alloc();
1312 		if (!entry)
1313 			return;
1314 
1315 		entry->type           = dma_debug_sg;
1316 		entry->dev            = dev;
1317 		entry->pfn	      = page_to_pfn(sg_page(s));
1318 		entry->offset	      = s->offset;
1319 		entry->size           = sg_dma_len(s);
1320 		entry->dev_addr       = sg_dma_address(s);
1321 		entry->direction      = direction;
1322 		entry->sg_call_ents   = nents;
1323 		entry->sg_mapped_ents = mapped_ents;
1324 
1325 		check_sg_segment(dev, s);
1326 
1327 		add_dma_entry(entry);
1328 	}
1329 }
1330 
get_nr_mapped_entries(struct device * dev,struct dma_debug_entry * ref)1331 static int get_nr_mapped_entries(struct device *dev,
1332 				 struct dma_debug_entry *ref)
1333 {
1334 	struct dma_debug_entry *entry;
1335 	struct hash_bucket *bucket;
1336 	unsigned long flags;
1337 	int mapped_ents;
1338 
1339 	bucket       = get_hash_bucket(ref, &flags);
1340 	entry        = bucket_find_exact(bucket, ref);
1341 	mapped_ents  = 0;
1342 
1343 	if (entry)
1344 		mapped_ents = entry->sg_mapped_ents;
1345 	put_hash_bucket(bucket, flags);
1346 
1347 	return mapped_ents;
1348 }
1349 
debug_dma_unmap_sg(struct device * dev,struct scatterlist * sglist,int nelems,int dir)1350 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1351 			int nelems, int dir)
1352 {
1353 	struct scatterlist *s;
1354 	int mapped_ents = 0, i;
1355 
1356 	if (unlikely(dma_debug_disabled()))
1357 		return;
1358 
1359 	for_each_sg(sglist, s, nelems, i) {
1360 
1361 		struct dma_debug_entry ref = {
1362 			.type           = dma_debug_sg,
1363 			.dev            = dev,
1364 			.pfn		= page_to_pfn(sg_page(s)),
1365 			.offset		= s->offset,
1366 			.dev_addr       = sg_dma_address(s),
1367 			.size           = sg_dma_len(s),
1368 			.direction      = dir,
1369 			.sg_call_ents   = nelems,
1370 		};
1371 
1372 		if (mapped_ents && i >= mapped_ents)
1373 			break;
1374 
1375 		if (!i)
1376 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1377 
1378 		check_unmap(&ref);
1379 	}
1380 }
1381 
debug_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t dma_addr,void * virt)1382 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1383 			      dma_addr_t dma_addr, void *virt)
1384 {
1385 	struct dma_debug_entry *entry;
1386 
1387 	if (unlikely(dma_debug_disabled()))
1388 		return;
1389 
1390 	if (unlikely(virt == NULL))
1391 		return;
1392 
1393 	/* handle vmalloc and linear addresses */
1394 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1395 		return;
1396 
1397 	entry = dma_entry_alloc();
1398 	if (!entry)
1399 		return;
1400 
1401 	entry->type      = dma_debug_coherent;
1402 	entry->dev       = dev;
1403 	entry->offset	 = offset_in_page(virt);
1404 	entry->size      = size;
1405 	entry->dev_addr  = dma_addr;
1406 	entry->direction = DMA_BIDIRECTIONAL;
1407 
1408 	if (is_vmalloc_addr(virt))
1409 		entry->pfn = vmalloc_to_pfn(virt);
1410 	else
1411 		entry->pfn = page_to_pfn(virt_to_page(virt));
1412 
1413 	add_dma_entry(entry);
1414 }
1415 
debug_dma_free_coherent(struct device * dev,size_t size,void * virt,dma_addr_t addr)1416 void debug_dma_free_coherent(struct device *dev, size_t size,
1417 			 void *virt, dma_addr_t addr)
1418 {
1419 	struct dma_debug_entry ref = {
1420 		.type           = dma_debug_coherent,
1421 		.dev            = dev,
1422 		.offset		= offset_in_page(virt),
1423 		.dev_addr       = addr,
1424 		.size           = size,
1425 		.direction      = DMA_BIDIRECTIONAL,
1426 	};
1427 
1428 	/* handle vmalloc and linear addresses */
1429 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1430 		return;
1431 
1432 	if (is_vmalloc_addr(virt))
1433 		ref.pfn = vmalloc_to_pfn(virt);
1434 	else
1435 		ref.pfn = page_to_pfn(virt_to_page(virt));
1436 
1437 	if (unlikely(dma_debug_disabled()))
1438 		return;
1439 
1440 	check_unmap(&ref);
1441 }
1442 
debug_dma_map_resource(struct device * dev,phys_addr_t addr,size_t size,int direction,dma_addr_t dma_addr)1443 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1444 			    int direction, dma_addr_t dma_addr)
1445 {
1446 	struct dma_debug_entry *entry;
1447 
1448 	if (unlikely(dma_debug_disabled()))
1449 		return;
1450 
1451 	entry = dma_entry_alloc();
1452 	if (!entry)
1453 		return;
1454 
1455 	entry->type		= dma_debug_resource;
1456 	entry->dev		= dev;
1457 	entry->pfn		= PHYS_PFN(addr);
1458 	entry->offset		= offset_in_page(addr);
1459 	entry->size		= size;
1460 	entry->dev_addr		= dma_addr;
1461 	entry->direction	= direction;
1462 	entry->map_err_type	= MAP_ERR_NOT_CHECKED;
1463 
1464 	add_dma_entry(entry);
1465 }
1466 
debug_dma_unmap_resource(struct device * dev,dma_addr_t dma_addr,size_t size,int direction)1467 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1468 			      size_t size, int direction)
1469 {
1470 	struct dma_debug_entry ref = {
1471 		.type           = dma_debug_resource,
1472 		.dev            = dev,
1473 		.dev_addr       = dma_addr,
1474 		.size           = size,
1475 		.direction      = direction,
1476 	};
1477 
1478 	if (unlikely(dma_debug_disabled()))
1479 		return;
1480 
1481 	check_unmap(&ref);
1482 }
1483 
debug_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)1484 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1485 				   size_t size, int direction)
1486 {
1487 	struct dma_debug_entry ref;
1488 
1489 	if (unlikely(dma_debug_disabled()))
1490 		return;
1491 
1492 	ref.type         = dma_debug_single;
1493 	ref.dev          = dev;
1494 	ref.dev_addr     = dma_handle;
1495 	ref.size         = size;
1496 	ref.direction    = direction;
1497 	ref.sg_call_ents = 0;
1498 
1499 	check_sync(dev, &ref, true);
1500 }
1501 
debug_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)1502 void debug_dma_sync_single_for_device(struct device *dev,
1503 				      dma_addr_t dma_handle, size_t size,
1504 				      int direction)
1505 {
1506 	struct dma_debug_entry ref;
1507 
1508 	if (unlikely(dma_debug_disabled()))
1509 		return;
1510 
1511 	ref.type         = dma_debug_single;
1512 	ref.dev          = dev;
1513 	ref.dev_addr     = dma_handle;
1514 	ref.size         = size;
1515 	ref.direction    = direction;
1516 	ref.sg_call_ents = 0;
1517 
1518 	check_sync(dev, &ref, false);
1519 }
1520 
debug_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,int direction)1521 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1522 			       int nelems, int direction)
1523 {
1524 	struct scatterlist *s;
1525 	int mapped_ents = 0, i;
1526 
1527 	if (unlikely(dma_debug_disabled()))
1528 		return;
1529 
1530 	for_each_sg(sg, s, nelems, i) {
1531 
1532 		struct dma_debug_entry ref = {
1533 			.type           = dma_debug_sg,
1534 			.dev            = dev,
1535 			.pfn		= page_to_pfn(sg_page(s)),
1536 			.offset		= s->offset,
1537 			.dev_addr       = sg_dma_address(s),
1538 			.size           = sg_dma_len(s),
1539 			.direction      = direction,
1540 			.sg_call_ents   = nelems,
1541 		};
1542 
1543 		if (!i)
1544 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1545 
1546 		if (i >= mapped_ents)
1547 			break;
1548 
1549 		check_sync(dev, &ref, true);
1550 	}
1551 }
1552 
debug_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,int direction)1553 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1554 				  int nelems, int direction)
1555 {
1556 	struct scatterlist *s;
1557 	int mapped_ents = 0, i;
1558 
1559 	if (unlikely(dma_debug_disabled()))
1560 		return;
1561 
1562 	for_each_sg(sg, s, nelems, i) {
1563 
1564 		struct dma_debug_entry ref = {
1565 			.type           = dma_debug_sg,
1566 			.dev            = dev,
1567 			.pfn		= page_to_pfn(sg_page(s)),
1568 			.offset		= s->offset,
1569 			.dev_addr       = sg_dma_address(s),
1570 			.size           = sg_dma_len(s),
1571 			.direction      = direction,
1572 			.sg_call_ents   = nelems,
1573 		};
1574 		if (!i)
1575 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1576 
1577 		if (i >= mapped_ents)
1578 			break;
1579 
1580 		check_sync(dev, &ref, false);
1581 	}
1582 }
1583 
dma_debug_driver_setup(char * str)1584 static int __init dma_debug_driver_setup(char *str)
1585 {
1586 	int i;
1587 
1588 	for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1589 		current_driver_name[i] = *str;
1590 		if (*str == 0)
1591 			break;
1592 	}
1593 
1594 	if (current_driver_name[0])
1595 		pr_info("enable driver filter for driver [%s]\n",
1596 			current_driver_name);
1597 
1598 
1599 	return 1;
1600 }
1601 __setup("dma_debug_driver=", dma_debug_driver_setup);
1602