1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22#include <linux/stacktrace.h>
23#include <linux/dma-debug.h>
24#include <linux/spinlock.h>
25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
27#include <linux/export.h>
28#include <linux/device.h>
29#include <linux/types.h>
30#include <linux/sched.h>
31#include <linux/ctype.h>
32#include <linux/list.h>
33#include <linux/slab.h>
34
35#include <asm/sections.h>
36
37#define HASH_SIZE 1024ULL
38#define HASH_FN_SHIFT 13
39#define HASH_FN_MASK (HASH_SIZE - 1)
40
41enum {
42 dma_debug_single,
43 dma_debug_page,
44 dma_debug_sg,
45 dma_debug_coherent,
46 dma_debug_resource,
47};
48
49enum map_err_types {
50 MAP_ERR_CHECK_NOT_APPLICABLE,
51 MAP_ERR_NOT_CHECKED,
52 MAP_ERR_CHECKED,
53};
54
55#define DMA_DEBUG_STACKTRACE_ENTRIES 5
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71struct dma_debug_entry {
72 struct list_head list;
73 struct device *dev;
74 int type;
75 unsigned long pfn;
76 size_t offset;
77 u64 dev_addr;
78 u64 size;
79 int direction;
80 int sg_call_ents;
81 int sg_mapped_ents;
82 enum map_err_types map_err_type;
83#ifdef CONFIG_STACKTRACE
84 struct stack_trace stacktrace;
85 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
86#endif
87};
88
89typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
90
91struct hash_bucket {
92 struct list_head list;
93 spinlock_t lock;
94} ____cacheline_aligned_in_smp;
95
96
97static struct hash_bucket dma_entry_hash[HASH_SIZE];
98
99static LIST_HEAD(free_entries);
100
101static DEFINE_SPINLOCK(free_entries_lock);
102
103
104static u32 global_disable __read_mostly;
105
106
107static bool dma_debug_initialized __read_mostly;
108
109static inline bool dma_debug_disabled(void)
110{
111 return global_disable || !dma_debug_initialized;
112}
113
114
115static u32 error_count;
116
117
118static u32 show_all_errors __read_mostly;
119
120static u32 show_num_errors = 1;
121
122static u32 num_free_entries;
123static u32 min_free_entries;
124static u32 nr_total_entries;
125
126
127static u32 req_entries;
128
129
130static struct dentry *dma_debug_dent __read_mostly;
131static struct dentry *global_disable_dent __read_mostly;
132static struct dentry *error_count_dent __read_mostly;
133static struct dentry *show_all_errors_dent __read_mostly;
134static struct dentry *show_num_errors_dent __read_mostly;
135static struct dentry *num_free_entries_dent __read_mostly;
136static struct dentry *min_free_entries_dent __read_mostly;
137static struct dentry *filter_dent __read_mostly;
138
139
140
141#define NAME_MAX_LEN 64
142
143static char current_driver_name[NAME_MAX_LEN] __read_mostly;
144static struct device_driver *current_driver __read_mostly;
145
146static DEFINE_RWLOCK(driver_name_lock);
147
148static const char *const maperr2str[] = {
149 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
150 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
151 [MAP_ERR_CHECKED] = "dma map error checked",
152};
153
154static const char *type2name[5] = { "single", "page",
155 "scather-gather", "coherent",
156 "resource" };
157
158static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
159 "DMA_FROM_DEVICE", "DMA_NONE" };
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174static inline void dump_entry_trace(struct dma_debug_entry *entry)
175{
176#ifdef CONFIG_STACKTRACE
177 if (entry) {
178 pr_warning("Mapped at:\n");
179 print_stack_trace(&entry->stacktrace, 0);
180 }
181#endif
182}
183
184static bool driver_filter(struct device *dev)
185{
186 struct device_driver *drv;
187 unsigned long flags;
188 bool ret;
189
190
191 if (likely(!current_driver_name[0]))
192 return true;
193
194
195 if (current_driver && dev && dev->driver == current_driver)
196 return true;
197
198
199 if (!dev)
200 return false;
201
202 if (current_driver || !current_driver_name[0])
203 return false;
204
205
206 drv = dev->driver;
207 if (!drv)
208 return false;
209
210
211 read_lock_irqsave(&driver_name_lock, flags);
212
213 ret = false;
214 if (drv->name &&
215 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
216 current_driver = drv;
217 ret = true;
218 }
219
220 read_unlock_irqrestore(&driver_name_lock, flags);
221
222 return ret;
223}
224
225#define err_printk(dev, entry, format, arg...) do { \
226 error_count += 1; \
227 if (driver_filter(dev) && \
228 (show_all_errors || show_num_errors > 0)) { \
229 WARN(1, "%s %s: " format, \
230 dev ? dev_driver_string(dev) : "NULL", \
231 dev ? dev_name(dev) : "NULL", ## arg); \
232 dump_entry_trace(entry); \
233 } \
234 if (!show_all_errors && show_num_errors > 0) \
235 show_num_errors -= 1; \
236 } while (0);
237
238
239
240
241
242
243
244static int hash_fn(struct dma_debug_entry *entry)
245{
246
247
248
249
250 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
251}
252
253
254
255
256static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
257 unsigned long *flags)
258{
259 int idx = hash_fn(entry);
260 unsigned long __flags;
261
262 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
263 *flags = __flags;
264 return &dma_entry_hash[idx];
265}
266
267
268
269
270static void put_hash_bucket(struct hash_bucket *bucket,
271 unsigned long *flags)
272{
273 unsigned long __flags = *flags;
274
275 spin_unlock_irqrestore(&bucket->lock, __flags);
276}
277
278static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
279{
280 return ((a->dev_addr == b->dev_addr) &&
281 (a->dev == b->dev)) ? true : false;
282}
283
284static bool containing_match(struct dma_debug_entry *a,
285 struct dma_debug_entry *b)
286{
287 if (a->dev != b->dev)
288 return false;
289
290 if ((b->dev_addr <= a->dev_addr) &&
291 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
292 return true;
293
294 return false;
295}
296
297
298
299
300static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
301 struct dma_debug_entry *ref,
302 match_fn match)
303{
304 struct dma_debug_entry *entry, *ret = NULL;
305 int matches = 0, match_lvl, last_lvl = -1;
306
307 list_for_each_entry(entry, &bucket->list, list) {
308 if (!match(ref, entry))
309 continue;
310
311
312
313
314
315
316
317
318
319
320
321 matches += 1;
322 match_lvl = 0;
323 entry->size == ref->size ? ++match_lvl : 0;
324 entry->type == ref->type ? ++match_lvl : 0;
325 entry->direction == ref->direction ? ++match_lvl : 0;
326 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
327
328 if (match_lvl == 4) {
329
330 return entry;
331 } else if (match_lvl > last_lvl) {
332
333
334
335
336 last_lvl = match_lvl;
337 ret = entry;
338 }
339 }
340
341
342
343
344
345 ret = (matches == 1) ? ret : NULL;
346
347 return ret;
348}
349
350static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
351 struct dma_debug_entry *ref)
352{
353 return __hash_bucket_find(bucket, ref, exact_match);
354}
355
356static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
357 struct dma_debug_entry *ref,
358 unsigned long *flags)
359{
360
361 unsigned int max_range = dma_get_max_seg_size(ref->dev);
362 struct dma_debug_entry *entry, index = *ref;
363 unsigned int range = 0;
364
365 while (range <= max_range) {
366 entry = __hash_bucket_find(*bucket, &index, containing_match);
367
368 if (entry)
369 return entry;
370
371
372
373
374 put_hash_bucket(*bucket, flags);
375 range += (1 << HASH_FN_SHIFT);
376 index.dev_addr -= (1 << HASH_FN_SHIFT);
377 *bucket = get_hash_bucket(&index, flags);
378 }
379
380 return NULL;
381}
382
383
384
385
386static void hash_bucket_add(struct hash_bucket *bucket,
387 struct dma_debug_entry *entry)
388{
389 list_add_tail(&entry->list, &bucket->list);
390}
391
392
393
394
395static void hash_bucket_del(struct dma_debug_entry *entry)
396{
397 list_del(&entry->list);
398}
399
400static unsigned long long phys_addr(struct dma_debug_entry *entry)
401{
402 if (entry->type == dma_debug_resource)
403 return __pfn_to_phys(entry->pfn) + entry->offset;
404
405 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
406}
407
408
409
410
411void debug_dma_dump_mappings(struct device *dev)
412{
413 int idx;
414
415 for (idx = 0; idx < HASH_SIZE; idx++) {
416 struct hash_bucket *bucket = &dma_entry_hash[idx];
417 struct dma_debug_entry *entry;
418 unsigned long flags;
419
420 spin_lock_irqsave(&bucket->lock, flags);
421
422 list_for_each_entry(entry, &bucket->list, list) {
423 if (!dev || dev == entry->dev) {
424 dev_info(entry->dev,
425 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
426 type2name[entry->type], idx,
427 phys_addr(entry), entry->pfn,
428 entry->dev_addr, entry->size,
429 dir2name[entry->direction],
430 maperr2str[entry->map_err_type]);
431 }
432 }
433
434 spin_unlock_irqrestore(&bucket->lock, flags);
435 }
436}
437EXPORT_SYMBOL(debug_dma_dump_mappings);
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
463static DEFINE_SPINLOCK(radix_lock);
464#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
465#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
466#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
467
468static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
469{
470 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
471 (entry->offset >> L1_CACHE_SHIFT);
472}
473
474static int active_cacheline_read_overlap(phys_addr_t cln)
475{
476 int overlap = 0, i;
477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
480 overlap |= 1 << i;
481 return overlap;
482}
483
484static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
485{
486 int i;
487
488 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
489 return overlap;
490
491 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
492 if (overlap & 1 << i)
493 radix_tree_tag_set(&dma_active_cacheline, cln, i);
494 else
495 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
496
497 return overlap;
498}
499
500static void active_cacheline_inc_overlap(phys_addr_t cln)
501{
502 int overlap = active_cacheline_read_overlap(cln);
503
504 overlap = active_cacheline_set_overlap(cln, ++overlap);
505
506
507
508
509
510
511
512 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
513 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
514 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
515}
516
517static int active_cacheline_dec_overlap(phys_addr_t cln)
518{
519 int overlap = active_cacheline_read_overlap(cln);
520
521 return active_cacheline_set_overlap(cln, --overlap);
522}
523
524static int active_cacheline_insert(struct dma_debug_entry *entry)
525{
526 phys_addr_t cln = to_cacheline_number(entry);
527 unsigned long flags;
528 int rc;
529
530
531
532
533
534 if (entry->direction == DMA_TO_DEVICE)
535 return 0;
536
537 spin_lock_irqsave(&radix_lock, flags);
538 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
539 if (rc == -EEXIST)
540 active_cacheline_inc_overlap(cln);
541 spin_unlock_irqrestore(&radix_lock, flags);
542
543 return rc;
544}
545
546static void active_cacheline_remove(struct dma_debug_entry *entry)
547{
548 phys_addr_t cln = to_cacheline_number(entry);
549 unsigned long flags;
550
551
552 if (entry->direction == DMA_TO_DEVICE)
553 return;
554
555 spin_lock_irqsave(&radix_lock, flags);
556
557
558
559
560 if (active_cacheline_dec_overlap(cln) < 0)
561 radix_tree_delete(&dma_active_cacheline, cln);
562 spin_unlock_irqrestore(&radix_lock, flags);
563}
564
565
566
567
568
569
570
571
572
573void debug_dma_assert_idle(struct page *page)
574{
575 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
576 struct dma_debug_entry *entry = NULL;
577 void **results = (void **) &ents;
578 unsigned int nents, i;
579 unsigned long flags;
580 phys_addr_t cln;
581
582 if (!page)
583 return;
584
585 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
586 spin_lock_irqsave(&radix_lock, flags);
587 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
588 CACHELINES_PER_PAGE);
589 for (i = 0; i < nents; i++) {
590 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
591
592 if (ent_cln == cln) {
593 entry = ents[i];
594 break;
595 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
596 break;
597 }
598 spin_unlock_irqrestore(&radix_lock, flags);
599
600 if (!entry)
601 return;
602
603 cln = to_cacheline_number(entry);
604 err_printk(entry->dev, entry,
605 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
606 &cln);
607}
608
609
610
611
612
613static void add_dma_entry(struct dma_debug_entry *entry)
614{
615 struct hash_bucket *bucket;
616 unsigned long flags;
617 int rc;
618
619 bucket = get_hash_bucket(entry, &flags);
620 hash_bucket_add(bucket, entry);
621 put_hash_bucket(bucket, &flags);
622
623 rc = active_cacheline_insert(entry);
624 if (rc == -ENOMEM) {
625 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
626 global_disable = true;
627 }
628
629
630
631
632}
633
634static struct dma_debug_entry *__dma_entry_alloc(void)
635{
636 struct dma_debug_entry *entry;
637
638 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
639 list_del(&entry->list);
640 memset(entry, 0, sizeof(*entry));
641
642 num_free_entries -= 1;
643 if (num_free_entries < min_free_entries)
644 min_free_entries = num_free_entries;
645
646 return entry;
647}
648
649
650
651
652
653
654static struct dma_debug_entry *dma_entry_alloc(void)
655{
656 struct dma_debug_entry *entry;
657 unsigned long flags;
658
659 spin_lock_irqsave(&free_entries_lock, flags);
660
661 if (list_empty(&free_entries)) {
662 pr_err("DMA-API: debugging out of memory - disabling\n");
663 global_disable = true;
664 spin_unlock_irqrestore(&free_entries_lock, flags);
665 return NULL;
666 }
667
668 entry = __dma_entry_alloc();
669
670 spin_unlock_irqrestore(&free_entries_lock, flags);
671
672#ifdef CONFIG_STACKTRACE
673 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
674 entry->stacktrace.entries = entry->st_entries;
675 entry->stacktrace.skip = 2;
676 save_stack_trace(&entry->stacktrace);
677#endif
678
679 return entry;
680}
681
682static void dma_entry_free(struct dma_debug_entry *entry)
683{
684 unsigned long flags;
685
686 active_cacheline_remove(entry);
687
688
689
690
691
692 spin_lock_irqsave(&free_entries_lock, flags);
693 list_add(&entry->list, &free_entries);
694 num_free_entries += 1;
695 spin_unlock_irqrestore(&free_entries_lock, flags);
696}
697
698int dma_debug_resize_entries(u32 num_entries)
699{
700 int i, delta, ret = 0;
701 unsigned long flags;
702 struct dma_debug_entry *entry;
703 LIST_HEAD(tmp);
704
705 spin_lock_irqsave(&free_entries_lock, flags);
706
707 if (nr_total_entries < num_entries) {
708 delta = num_entries - nr_total_entries;
709
710 spin_unlock_irqrestore(&free_entries_lock, flags);
711
712 for (i = 0; i < delta; i++) {
713 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
714 if (!entry)
715 break;
716
717 list_add_tail(&entry->list, &tmp);
718 }
719
720 spin_lock_irqsave(&free_entries_lock, flags);
721
722 list_splice(&tmp, &free_entries);
723 nr_total_entries += i;
724 num_free_entries += i;
725 } else {
726 delta = nr_total_entries - num_entries;
727
728 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
729 entry = __dma_entry_alloc();
730 kfree(entry);
731 }
732
733 nr_total_entries -= i;
734 }
735
736 if (nr_total_entries != num_entries)
737 ret = 1;
738
739 spin_unlock_irqrestore(&free_entries_lock, flags);
740
741 return ret;
742}
743EXPORT_SYMBOL(dma_debug_resize_entries);
744
745
746
747
748
749
750
751
752
753static int prealloc_memory(u32 num_entries)
754{
755 struct dma_debug_entry *entry, *next_entry;
756 int i;
757
758 for (i = 0; i < num_entries; ++i) {
759 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
760 if (!entry)
761 goto out_err;
762
763 list_add_tail(&entry->list, &free_entries);
764 }
765
766 num_free_entries = num_entries;
767 min_free_entries = num_entries;
768
769 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
770
771 return 0;
772
773out_err:
774
775 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
776 list_del(&entry->list);
777 kfree(entry);
778 }
779
780 return -ENOMEM;
781}
782
783static ssize_t filter_read(struct file *file, char __user *user_buf,
784 size_t count, loff_t *ppos)
785{
786 char buf[NAME_MAX_LEN + 1];
787 unsigned long flags;
788 int len;
789
790 if (!current_driver_name[0])
791 return 0;
792
793
794
795
796
797
798 read_lock_irqsave(&driver_name_lock, flags);
799 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
800 read_unlock_irqrestore(&driver_name_lock, flags);
801
802 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
803}
804
805static ssize_t filter_write(struct file *file, const char __user *userbuf,
806 size_t count, loff_t *ppos)
807{
808 char buf[NAME_MAX_LEN];
809 unsigned long flags;
810 size_t len;
811 int i;
812
813
814
815
816
817
818
819 len = min(count, (size_t)(NAME_MAX_LEN - 1));
820 if (copy_from_user(buf, userbuf, len))
821 return -EFAULT;
822
823 buf[len] = 0;
824
825 write_lock_irqsave(&driver_name_lock, flags);
826
827
828
829
830
831
832
833
834
835 if (!isalnum(buf[0])) {
836
837
838
839
840
841 if (current_driver_name[0])
842 pr_info("DMA-API: switching off dma-debug driver filter\n");
843 current_driver_name[0] = 0;
844 current_driver = NULL;
845 goto out_unlock;
846 }
847
848
849
850
851
852 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
853 current_driver_name[i] = buf[i];
854 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
855 break;
856 }
857 current_driver_name[i] = 0;
858 current_driver = NULL;
859
860 pr_info("DMA-API: enable driver filter for driver [%s]\n",
861 current_driver_name);
862
863out_unlock:
864 write_unlock_irqrestore(&driver_name_lock, flags);
865
866 return count;
867}
868
869static const struct file_operations filter_fops = {
870 .read = filter_read,
871 .write = filter_write,
872 .llseek = default_llseek,
873};
874
875static int dma_debug_fs_init(void)
876{
877 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
878 if (!dma_debug_dent) {
879 pr_err("DMA-API: can not create debugfs directory\n");
880 return -ENOMEM;
881 }
882
883 global_disable_dent = debugfs_create_bool("disabled", 0444,
884 dma_debug_dent,
885 &global_disable);
886 if (!global_disable_dent)
887 goto out_err;
888
889 error_count_dent = debugfs_create_u32("error_count", 0444,
890 dma_debug_dent, &error_count);
891 if (!error_count_dent)
892 goto out_err;
893
894 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
895 dma_debug_dent,
896 &show_all_errors);
897 if (!show_all_errors_dent)
898 goto out_err;
899
900 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
901 dma_debug_dent,
902 &show_num_errors);
903 if (!show_num_errors_dent)
904 goto out_err;
905
906 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
907 dma_debug_dent,
908 &num_free_entries);
909 if (!num_free_entries_dent)
910 goto out_err;
911
912 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
913 dma_debug_dent,
914 &min_free_entries);
915 if (!min_free_entries_dent)
916 goto out_err;
917
918 filter_dent = debugfs_create_file("driver_filter", 0644,
919 dma_debug_dent, NULL, &filter_fops);
920 if (!filter_dent)
921 goto out_err;
922
923 return 0;
924
925out_err:
926 debugfs_remove_recursive(dma_debug_dent);
927
928 return -ENOMEM;
929}
930
931static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
932{
933 struct dma_debug_entry *entry;
934 unsigned long flags;
935 int count = 0, i;
936
937 local_irq_save(flags);
938
939 for (i = 0; i < HASH_SIZE; ++i) {
940 spin_lock(&dma_entry_hash[i].lock);
941 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
942 if (entry->dev == dev) {
943 count += 1;
944 *out_entry = entry;
945 }
946 }
947 spin_unlock(&dma_entry_hash[i].lock);
948 }
949
950 local_irq_restore(flags);
951
952 return count;
953}
954
955static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
956{
957 struct device *dev = data;
958 struct dma_debug_entry *uninitialized_var(entry);
959 int count;
960
961 if (dma_debug_disabled())
962 return 0;
963
964 switch (action) {
965 case BUS_NOTIFY_UNBOUND_DRIVER:
966 count = device_dma_allocations(dev, &entry);
967 if (count == 0)
968 break;
969 err_printk(dev, entry, "DMA-API: device driver has pending "
970 "DMA allocations while released from device "
971 "[count=%d]\n"
972 "One of leaked entries details: "
973 "[device address=0x%016llx] [size=%llu bytes] "
974 "[mapped with %s] [mapped as %s]\n",
975 count, entry->dev_addr, entry->size,
976 dir2name[entry->direction], type2name[entry->type]);
977 break;
978 default:
979 break;
980 }
981
982 return 0;
983}
984
985void dma_debug_add_bus(struct bus_type *bus)
986{
987 struct notifier_block *nb;
988
989 if (dma_debug_disabled())
990 return;
991
992 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
993 if (nb == NULL) {
994 pr_err("dma_debug_add_bus: out of memory\n");
995 return;
996 }
997
998 nb->notifier_call = dma_debug_device_change;
999
1000 bus_register_notifier(bus, nb);
1001}
1002
1003
1004
1005
1006void dma_debug_init(u32 num_entries)
1007{
1008 int i;
1009
1010
1011
1012
1013 if (global_disable)
1014 return;
1015
1016 for (i = 0; i < HASH_SIZE; ++i) {
1017 INIT_LIST_HEAD(&dma_entry_hash[i].list);
1018 spin_lock_init(&dma_entry_hash[i].lock);
1019 }
1020
1021 if (dma_debug_fs_init() != 0) {
1022 pr_err("DMA-API: error creating debugfs entries - disabling\n");
1023 global_disable = true;
1024
1025 return;
1026 }
1027
1028 if (req_entries)
1029 num_entries = req_entries;
1030
1031 if (prealloc_memory(num_entries) != 0) {
1032 pr_err("DMA-API: debugging out of memory error - disabled\n");
1033 global_disable = true;
1034
1035 return;
1036 }
1037
1038 nr_total_entries = num_free_entries;
1039
1040 dma_debug_initialized = true;
1041
1042 pr_info("DMA-API: debugging enabled by kernel config\n");
1043}
1044
1045static __init int dma_debug_cmdline(char *str)
1046{
1047 if (!str)
1048 return -EINVAL;
1049
1050 if (strncmp(str, "off", 3) == 0) {
1051 pr_info("DMA-API: debugging disabled on kernel command line\n");
1052 global_disable = true;
1053 }
1054
1055 return 0;
1056}
1057
1058static __init int dma_debug_entries_cmdline(char *str)
1059{
1060 int res;
1061
1062 if (!str)
1063 return -EINVAL;
1064
1065 res = get_option(&str, &req_entries);
1066
1067 if (!res)
1068 req_entries = 0;
1069
1070 return 0;
1071}
1072
1073__setup("dma_debug=", dma_debug_cmdline);
1074__setup("dma_debug_entries=", dma_debug_entries_cmdline);
1075
1076static void check_unmap(struct dma_debug_entry *ref)
1077{
1078 struct dma_debug_entry *entry;
1079 struct hash_bucket *bucket;
1080 unsigned long flags;
1081
1082 bucket = get_hash_bucket(ref, &flags);
1083 entry = bucket_find_exact(bucket, ref);
1084
1085 if (!entry) {
1086
1087 put_hash_bucket(bucket, &flags);
1088
1089 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1090 err_printk(ref->dev, NULL,
1091 "DMA-API: device driver tries to free an "
1092 "invalid DMA memory address\n");
1093 } else {
1094 err_printk(ref->dev, NULL,
1095 "DMA-API: device driver tries to free DMA "
1096 "memory it has not allocated [device "
1097 "address=0x%016llx] [size=%llu bytes]\n",
1098 ref->dev_addr, ref->size);
1099 }
1100 return;
1101 }
1102
1103 if (ref->size != entry->size) {
1104 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1105 "DMA memory with different size "
1106 "[device address=0x%016llx] [map size=%llu bytes] "
1107 "[unmap size=%llu bytes]\n",
1108 ref->dev_addr, entry->size, ref->size);
1109 }
1110
1111 if (ref->type != entry->type) {
1112 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1113 "DMA memory with wrong function "
1114 "[device address=0x%016llx] [size=%llu bytes] "
1115 "[mapped as %s] [unmapped as %s]\n",
1116 ref->dev_addr, ref->size,
1117 type2name[entry->type], type2name[ref->type]);
1118 } else if ((entry->type == dma_debug_coherent) &&
1119 (phys_addr(ref) != phys_addr(entry))) {
1120 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1121 "DMA memory with different CPU address "
1122 "[device address=0x%016llx] [size=%llu bytes] "
1123 "[cpu alloc address=0x%016llx] "
1124 "[cpu free address=0x%016llx]",
1125 ref->dev_addr, ref->size,
1126 phys_addr(entry),
1127 phys_addr(ref));
1128 }
1129
1130 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1131 ref->sg_call_ents != entry->sg_call_ents) {
1132 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1133 "DMA sg list with different entry count "
1134 "[map count=%d] [unmap count=%d]\n",
1135 entry->sg_call_ents, ref->sg_call_ents);
1136 }
1137
1138
1139
1140
1141
1142 if (ref->direction != entry->direction) {
1143 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1144 "DMA memory with different direction "
1145 "[device address=0x%016llx] [size=%llu bytes] "
1146 "[mapped with %s] [unmapped with %s]\n",
1147 ref->dev_addr, ref->size,
1148 dir2name[entry->direction],
1149 dir2name[ref->direction]);
1150 }
1151
1152 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1153 err_printk(ref->dev, entry,
1154 "DMA-API: device driver failed to check map error"
1155 "[device address=0x%016llx] [size=%llu bytes] "
1156 "[mapped as %s]",
1157 ref->dev_addr, ref->size,
1158 type2name[entry->type]);
1159 }
1160
1161 hash_bucket_del(entry);
1162 dma_entry_free(entry);
1163
1164 put_hash_bucket(bucket, &flags);
1165}
1166
1167static void check_for_stack(struct device *dev, void *addr)
1168{
1169 if (object_is_on_stack(addr))
1170 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
1171 "stack [addr=%p]\n", addr);
1172}
1173
1174static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1175{
1176 unsigned long a1 = (unsigned long)addr;
1177 unsigned long b1 = a1 + len;
1178 unsigned long a2 = (unsigned long)start;
1179 unsigned long b2 = (unsigned long)end;
1180
1181 return !(b1 <= a2 || a1 >= b2);
1182}
1183
1184static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1185{
1186 if (overlap(addr, len, _text, _etext) ||
1187 overlap(addr, len, __start_rodata, __end_rodata))
1188 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1189}
1190
1191static void check_sync(struct device *dev,
1192 struct dma_debug_entry *ref,
1193 bool to_cpu)
1194{
1195 struct dma_debug_entry *entry;
1196 struct hash_bucket *bucket;
1197 unsigned long flags;
1198
1199 bucket = get_hash_bucket(ref, &flags);
1200
1201 entry = bucket_find_contain(&bucket, ref, &flags);
1202
1203 if (!entry) {
1204 err_printk(dev, NULL, "DMA-API: device driver tries "
1205 "to sync DMA memory it has not allocated "
1206 "[device address=0x%016llx] [size=%llu bytes]\n",
1207 (unsigned long long)ref->dev_addr, ref->size);
1208 goto out;
1209 }
1210
1211 if (ref->size > entry->size) {
1212 err_printk(dev, entry, "DMA-API: device driver syncs"
1213 " DMA memory outside allocated range "
1214 "[device address=0x%016llx] "
1215 "[allocation size=%llu bytes] "
1216 "[sync offset+size=%llu]\n",
1217 entry->dev_addr, entry->size,
1218 ref->size);
1219 }
1220
1221 if (entry->direction == DMA_BIDIRECTIONAL)
1222 goto out;
1223
1224 if (ref->direction != entry->direction) {
1225 err_printk(dev, entry, "DMA-API: device driver syncs "
1226 "DMA memory with different direction "
1227 "[device address=0x%016llx] [size=%llu bytes] "
1228 "[mapped with %s] [synced with %s]\n",
1229 (unsigned long long)ref->dev_addr, entry->size,
1230 dir2name[entry->direction],
1231 dir2name[ref->direction]);
1232 }
1233
1234 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1235 !(ref->direction == DMA_TO_DEVICE))
1236 err_printk(dev, entry, "DMA-API: device driver syncs "
1237 "device read-only DMA memory for cpu "
1238 "[device address=0x%016llx] [size=%llu bytes] "
1239 "[mapped with %s] [synced with %s]\n",
1240 (unsigned long long)ref->dev_addr, entry->size,
1241 dir2name[entry->direction],
1242 dir2name[ref->direction]);
1243
1244 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1245 !(ref->direction == DMA_FROM_DEVICE))
1246 err_printk(dev, entry, "DMA-API: device driver syncs "
1247 "device write-only DMA memory to device "
1248 "[device address=0x%016llx] [size=%llu bytes] "
1249 "[mapped with %s] [synced with %s]\n",
1250 (unsigned long long)ref->dev_addr, entry->size,
1251 dir2name[entry->direction],
1252 dir2name[ref->direction]);
1253
1254out:
1255 put_hash_bucket(bucket, &flags);
1256}
1257
1258void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1259 size_t size, int direction, dma_addr_t dma_addr,
1260 bool map_single)
1261{
1262 struct dma_debug_entry *entry;
1263
1264 if (unlikely(dma_debug_disabled()))
1265 return;
1266
1267 if (dma_mapping_error(dev, dma_addr))
1268 return;
1269
1270 entry = dma_entry_alloc();
1271 if (!entry)
1272 return;
1273
1274 entry->dev = dev;
1275 entry->type = dma_debug_page;
1276 entry->pfn = page_to_pfn(page);
1277 entry->offset = offset,
1278 entry->dev_addr = dma_addr;
1279 entry->size = size;
1280 entry->direction = direction;
1281 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1282
1283 if (map_single)
1284 entry->type = dma_debug_single;
1285
1286 if (!PageHighMem(page)) {
1287 void *addr = page_address(page) + offset;
1288
1289 check_for_stack(dev, addr);
1290 check_for_illegal_area(dev, addr, size);
1291 }
1292
1293 add_dma_entry(entry);
1294}
1295EXPORT_SYMBOL(debug_dma_map_page);
1296
1297void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1298{
1299 struct dma_debug_entry ref;
1300 struct dma_debug_entry *entry;
1301 struct hash_bucket *bucket;
1302 unsigned long flags;
1303
1304 if (unlikely(dma_debug_disabled()))
1305 return;
1306
1307 ref.dev = dev;
1308 ref.dev_addr = dma_addr;
1309 bucket = get_hash_bucket(&ref, &flags);
1310
1311 list_for_each_entry(entry, &bucket->list, list) {
1312 if (!exact_match(&ref, entry))
1313 continue;
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1326 entry->map_err_type = MAP_ERR_CHECKED;
1327 break;
1328 }
1329 }
1330
1331 put_hash_bucket(bucket, &flags);
1332}
1333EXPORT_SYMBOL(debug_dma_mapping_error);
1334
1335void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1336 size_t size, int direction, bool map_single)
1337{
1338 struct dma_debug_entry ref = {
1339 .type = dma_debug_page,
1340 .dev = dev,
1341 .dev_addr = addr,
1342 .size = size,
1343 .direction = direction,
1344 };
1345
1346 if (unlikely(dma_debug_disabled()))
1347 return;
1348
1349 if (map_single)
1350 ref.type = dma_debug_single;
1351
1352 check_unmap(&ref);
1353}
1354EXPORT_SYMBOL(debug_dma_unmap_page);
1355
1356void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1357 int nents, int mapped_ents, int direction)
1358{
1359 struct dma_debug_entry *entry;
1360 struct scatterlist *s;
1361 int i;
1362
1363 if (unlikely(dma_debug_disabled()))
1364 return;
1365
1366 for_each_sg(sg, s, mapped_ents, i) {
1367 entry = dma_entry_alloc();
1368 if (!entry)
1369 return;
1370
1371 entry->type = dma_debug_sg;
1372 entry->dev = dev;
1373 entry->pfn = page_to_pfn(sg_page(s));
1374 entry->offset = s->offset,
1375 entry->size = sg_dma_len(s);
1376 entry->dev_addr = sg_dma_address(s);
1377 entry->direction = direction;
1378 entry->sg_call_ents = nents;
1379 entry->sg_mapped_ents = mapped_ents;
1380
1381 if (!PageHighMem(sg_page(s))) {
1382 check_for_stack(dev, sg_virt(s));
1383 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1384 }
1385
1386 add_dma_entry(entry);
1387 }
1388}
1389EXPORT_SYMBOL(debug_dma_map_sg);
1390
1391static int get_nr_mapped_entries(struct device *dev,
1392 struct dma_debug_entry *ref)
1393{
1394 struct dma_debug_entry *entry;
1395 struct hash_bucket *bucket;
1396 unsigned long flags;
1397 int mapped_ents;
1398
1399 bucket = get_hash_bucket(ref, &flags);
1400 entry = bucket_find_exact(bucket, ref);
1401 mapped_ents = 0;
1402
1403 if (entry)
1404 mapped_ents = entry->sg_mapped_ents;
1405 put_hash_bucket(bucket, &flags);
1406
1407 return mapped_ents;
1408}
1409
1410void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1411 int nelems, int dir)
1412{
1413 struct scatterlist *s;
1414 int mapped_ents = 0, i;
1415
1416 if (unlikely(dma_debug_disabled()))
1417 return;
1418
1419 for_each_sg(sglist, s, nelems, i) {
1420
1421 struct dma_debug_entry ref = {
1422 .type = dma_debug_sg,
1423 .dev = dev,
1424 .pfn = page_to_pfn(sg_page(s)),
1425 .offset = s->offset,
1426 .dev_addr = sg_dma_address(s),
1427 .size = sg_dma_len(s),
1428 .direction = dir,
1429 .sg_call_ents = nelems,
1430 };
1431
1432 if (mapped_ents && i >= mapped_ents)
1433 break;
1434
1435 if (!i)
1436 mapped_ents = get_nr_mapped_entries(dev, &ref);
1437
1438 check_unmap(&ref);
1439 }
1440}
1441EXPORT_SYMBOL(debug_dma_unmap_sg);
1442
1443void debug_dma_alloc_coherent(struct device *dev, size_t size,
1444 dma_addr_t dma_addr, void *virt)
1445{
1446 struct dma_debug_entry *entry;
1447
1448 if (unlikely(dma_debug_disabled()))
1449 return;
1450
1451 if (unlikely(virt == NULL))
1452 return;
1453
1454 entry = dma_entry_alloc();
1455 if (!entry)
1456 return;
1457
1458 entry->type = dma_debug_coherent;
1459 entry->dev = dev;
1460 entry->pfn = page_to_pfn(virt_to_page(virt));
1461 entry->offset = (size_t) virt & ~PAGE_MASK;
1462 entry->size = size;
1463 entry->dev_addr = dma_addr;
1464 entry->direction = DMA_BIDIRECTIONAL;
1465
1466 add_dma_entry(entry);
1467}
1468EXPORT_SYMBOL(debug_dma_alloc_coherent);
1469
1470void debug_dma_free_coherent(struct device *dev, size_t size,
1471 void *virt, dma_addr_t addr)
1472{
1473 struct dma_debug_entry ref = {
1474 .type = dma_debug_coherent,
1475 .dev = dev,
1476 .pfn = page_to_pfn(virt_to_page(virt)),
1477 .offset = (size_t) virt & ~PAGE_MASK,
1478 .dev_addr = addr,
1479 .size = size,
1480 .direction = DMA_BIDIRECTIONAL,
1481 };
1482
1483 if (unlikely(dma_debug_disabled()))
1484 return;
1485
1486 check_unmap(&ref);
1487}
1488EXPORT_SYMBOL(debug_dma_free_coherent);
1489
1490void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1491 int direction, dma_addr_t dma_addr)
1492{
1493 struct dma_debug_entry *entry;
1494
1495 if (unlikely(dma_debug_disabled()))
1496 return;
1497
1498 entry = dma_entry_alloc();
1499 if (!entry)
1500 return;
1501
1502 entry->type = dma_debug_resource;
1503 entry->dev = dev;
1504 entry->pfn = __phys_to_pfn(addr);
1505 entry->offset = offset_in_page(addr);
1506 entry->size = size;
1507 entry->dev_addr = dma_addr;
1508 entry->direction = direction;
1509 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1510
1511 add_dma_entry(entry);
1512}
1513EXPORT_SYMBOL(debug_dma_map_resource);
1514
1515void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1516 size_t size, int direction)
1517{
1518 struct dma_debug_entry ref = {
1519 .type = dma_debug_resource,
1520 .dev = dev,
1521 .dev_addr = dma_addr,
1522 .size = size,
1523 .direction = direction,
1524 };
1525
1526 if (unlikely(dma_debug_disabled()))
1527 return;
1528
1529 check_unmap(&ref);
1530}
1531EXPORT_SYMBOL(debug_dma_unmap_resource);
1532
1533void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1534 size_t size, int direction)
1535{
1536 struct dma_debug_entry ref;
1537
1538 if (unlikely(dma_debug_disabled()))
1539 return;
1540
1541 ref.type = dma_debug_single;
1542 ref.dev = dev;
1543 ref.dev_addr = dma_handle;
1544 ref.size = size;
1545 ref.direction = direction;
1546 ref.sg_call_ents = 0;
1547
1548 check_sync(dev, &ref, true);
1549}
1550EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1551
1552void debug_dma_sync_single_for_device(struct device *dev,
1553 dma_addr_t dma_handle, size_t size,
1554 int direction)
1555{
1556 struct dma_debug_entry ref;
1557
1558 if (unlikely(dma_debug_disabled()))
1559 return;
1560
1561 ref.type = dma_debug_single;
1562 ref.dev = dev;
1563 ref.dev_addr = dma_handle;
1564 ref.size = size;
1565 ref.direction = direction;
1566 ref.sg_call_ents = 0;
1567
1568 check_sync(dev, &ref, false);
1569}
1570EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1571
1572void debug_dma_sync_single_range_for_cpu(struct device *dev,
1573 dma_addr_t dma_handle,
1574 unsigned long offset, size_t size,
1575 int direction)
1576{
1577 struct dma_debug_entry ref;
1578
1579 if (unlikely(dma_debug_disabled()))
1580 return;
1581
1582 ref.type = dma_debug_single;
1583 ref.dev = dev;
1584 ref.dev_addr = dma_handle;
1585 ref.size = offset + size;
1586 ref.direction = direction;
1587 ref.sg_call_ents = 0;
1588
1589 check_sync(dev, &ref, true);
1590}
1591EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1592
1593void debug_dma_sync_single_range_for_device(struct device *dev,
1594 dma_addr_t dma_handle,
1595 unsigned long offset,
1596 size_t size, int direction)
1597{
1598 struct dma_debug_entry ref;
1599
1600 if (unlikely(dma_debug_disabled()))
1601 return;
1602
1603 ref.type = dma_debug_single;
1604 ref.dev = dev;
1605 ref.dev_addr = dma_handle;
1606 ref.size = offset + size;
1607 ref.direction = direction;
1608 ref.sg_call_ents = 0;
1609
1610 check_sync(dev, &ref, false);
1611}
1612EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1613
1614void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1615 int nelems, int direction)
1616{
1617 struct scatterlist *s;
1618 int mapped_ents = 0, i;
1619
1620 if (unlikely(dma_debug_disabled()))
1621 return;
1622
1623 for_each_sg(sg, s, nelems, i) {
1624
1625 struct dma_debug_entry ref = {
1626 .type = dma_debug_sg,
1627 .dev = dev,
1628 .pfn = page_to_pfn(sg_page(s)),
1629 .offset = s->offset,
1630 .dev_addr = sg_dma_address(s),
1631 .size = sg_dma_len(s),
1632 .direction = direction,
1633 .sg_call_ents = nelems,
1634 };
1635
1636 if (!i)
1637 mapped_ents = get_nr_mapped_entries(dev, &ref);
1638
1639 if (i >= mapped_ents)
1640 break;
1641
1642 check_sync(dev, &ref, true);
1643 }
1644}
1645EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1646
1647void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1648 int nelems, int direction)
1649{
1650 struct scatterlist *s;
1651 int mapped_ents = 0, i;
1652
1653 if (unlikely(dma_debug_disabled()))
1654 return;
1655
1656 for_each_sg(sg, s, nelems, i) {
1657
1658 struct dma_debug_entry ref = {
1659 .type = dma_debug_sg,
1660 .dev = dev,
1661 .pfn = page_to_pfn(sg_page(s)),
1662 .offset = s->offset,
1663 .dev_addr = sg_dma_address(s),
1664 .size = sg_dma_len(s),
1665 .direction = direction,
1666 .sg_call_ents = nelems,
1667 };
1668 if (!i)
1669 mapped_ents = get_nr_mapped_entries(dev, &ref);
1670
1671 if (i >= mapped_ents)
1672 break;
1673
1674 check_sync(dev, &ref, false);
1675 }
1676}
1677EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1678
1679static int __init dma_debug_driver_setup(char *str)
1680{
1681 int i;
1682
1683 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1684 current_driver_name[i] = *str;
1685 if (*str == 0)
1686 break;
1687 }
1688
1689 if (current_driver_name[0])
1690 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1691 current_driver_name);
1692
1693
1694 return 1;
1695}
1696__setup("dma_debug_driver=", dma_debug_driver_setup);
1697