1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/dma-mapping.h>
47#include <linux/init.h>
48#include <linux/module.h>
49#include <linux/mm.h>
50#include <linux/device.h>
51#include <linux/dmaengine.h>
52#include <linux/hardirq.h>
53#include <linux/spinlock.h>
54#include <linux/percpu.h>
55#include <linux/rcupdate.h>
56#include <linux/mutex.h>
57#include <linux/jiffies.h>
58#include <linux/rculist.h>
59#include <linux/idr.h>
60#include <linux/slab.h>
61#include <linux/acpi.h>
62#include <linux/acpi_dma.h>
63#include <linux/of_dma.h>
64#include <linux/mempool.h>
65
66static DEFINE_MUTEX(dma_list_mutex);
67static DEFINE_IDR(dma_idr);
68static LIST_HEAD(dma_device_list);
69static long dmaengine_ref_count;
70
71
72
73
74
75
76
77
78
79static struct dma_chan *dev_to_dma_chan(struct device *dev)
80{
81 struct dma_chan_dev *chan_dev;
82
83 chan_dev = container_of(dev, typeof(*chan_dev), device);
84 return chan_dev->chan;
85}
86
87static ssize_t memcpy_count_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct dma_chan *chan;
91 unsigned long count = 0;
92 int i;
93 int err;
94
95 mutex_lock(&dma_list_mutex);
96 chan = dev_to_dma_chan(dev);
97 if (chan) {
98 for_each_possible_cpu(i)
99 count += per_cpu_ptr(chan->local, i)->memcpy_count;
100 err = sprintf(buf, "%lu\n", count);
101 } else
102 err = -ENODEV;
103 mutex_unlock(&dma_list_mutex);
104
105 return err;
106}
107static DEVICE_ATTR_RO(memcpy_count);
108
109static ssize_t bytes_transferred_show(struct device *dev,
110 struct device_attribute *attr, char *buf)
111{
112 struct dma_chan *chan;
113 unsigned long count = 0;
114 int i;
115 int err;
116
117 mutex_lock(&dma_list_mutex);
118 chan = dev_to_dma_chan(dev);
119 if (chan) {
120 for_each_possible_cpu(i)
121 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
122 err = sprintf(buf, "%lu\n", count);
123 } else
124 err = -ENODEV;
125 mutex_unlock(&dma_list_mutex);
126
127 return err;
128}
129static DEVICE_ATTR_RO(bytes_transferred);
130
131static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
132 char *buf)
133{
134 struct dma_chan *chan;
135 int err;
136
137 mutex_lock(&dma_list_mutex);
138 chan = dev_to_dma_chan(dev);
139 if (chan)
140 err = sprintf(buf, "%d\n", chan->client_count);
141 else
142 err = -ENODEV;
143 mutex_unlock(&dma_list_mutex);
144
145 return err;
146}
147static DEVICE_ATTR_RO(in_use);
148
149static struct attribute *dma_dev_attrs[] = {
150 &dev_attr_memcpy_count.attr,
151 &dev_attr_bytes_transferred.attr,
152 &dev_attr_in_use.attr,
153 NULL,
154};
155ATTRIBUTE_GROUPS(dma_dev);
156
157static void chan_dev_release(struct device *dev)
158{
159 struct dma_chan_dev *chan_dev;
160
161 chan_dev = container_of(dev, typeof(*chan_dev), device);
162 if (atomic_dec_and_test(chan_dev->idr_ref)) {
163 mutex_lock(&dma_list_mutex);
164 idr_remove(&dma_idr, chan_dev->dev_id);
165 mutex_unlock(&dma_list_mutex);
166 kfree(chan_dev->idr_ref);
167 }
168 kfree(chan_dev);
169}
170
171static struct class dma_devclass = {
172 .name = "dma",
173 .dev_groups = dma_dev_groups,
174 .dev_release = chan_dev_release,
175};
176
177
178
179#define dma_device_satisfies_mask(device, mask) \
180 __dma_device_satisfies_mask((device), &(mask))
181static int
182__dma_device_satisfies_mask(struct dma_device *device,
183 const dma_cap_mask_t *want)
184{
185 dma_cap_mask_t has;
186
187 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
188 DMA_TX_TYPE_END);
189 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
190}
191
192static struct module *dma_chan_to_owner(struct dma_chan *chan)
193{
194 return chan->device->dev->driver->owner;
195}
196
197
198
199
200
201
202
203static void balance_ref_count(struct dma_chan *chan)
204{
205 struct module *owner = dma_chan_to_owner(chan);
206
207 while (chan->client_count < dmaengine_ref_count) {
208 __module_get(owner);
209 chan->client_count++;
210 }
211}
212
213
214
215
216
217
218
219static int dma_chan_get(struct dma_chan *chan)
220{
221 struct module *owner = dma_chan_to_owner(chan);
222 int ret;
223
224
225 if (chan->client_count) {
226 __module_get(owner);
227 goto out;
228 }
229
230 if (!try_module_get(owner))
231 return -ENODEV;
232
233
234 if (chan->device->device_alloc_chan_resources) {
235 ret = chan->device->device_alloc_chan_resources(chan);
236 if (ret < 0)
237 goto err_out;
238 }
239
240 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
241 balance_ref_count(chan);
242
243out:
244 chan->client_count++;
245 return 0;
246
247err_out:
248 module_put(owner);
249 return ret;
250}
251
252
253
254
255
256
257
258static void dma_chan_put(struct dma_chan *chan)
259{
260
261 if (!chan->client_count)
262 return;
263
264 chan->client_count--;
265 module_put(dma_chan_to_owner(chan));
266
267
268 if (!chan->client_count && chan->device->device_free_chan_resources)
269 chan->device->device_free_chan_resources(chan);
270}
271
272enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
273{
274 enum dma_status status;
275 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
276
277 dma_async_issue_pending(chan);
278 do {
279 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
280 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
281 pr_err("%s: timeout!\n", __func__);
282 return DMA_ERROR;
283 }
284 if (status != DMA_IN_PROGRESS)
285 break;
286 cpu_relax();
287 } while (1);
288
289 return status;
290}
291EXPORT_SYMBOL(dma_sync_wait);
292
293
294
295
296static dma_cap_mask_t dma_cap_mask_all;
297
298
299
300
301
302struct dma_chan_tbl_ent {
303 struct dma_chan *chan;
304};
305
306
307
308
309static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
310
311static int __init dma_channel_table_init(void)
312{
313 enum dma_transaction_type cap;
314 int err = 0;
315
316 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
317
318
319
320
321
322 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
323 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
324 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
325
326 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
327 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
328 if (!channel_table[cap]) {
329 err = -ENOMEM;
330 break;
331 }
332 }
333
334 if (err) {
335 pr_err("initialization failure\n");
336 for_each_dma_cap_mask(cap, dma_cap_mask_all)
337 free_percpu(channel_table[cap]);
338 }
339
340 return err;
341}
342arch_initcall(dma_channel_table_init);
343
344
345
346
347
348struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
349{
350 return this_cpu_read(channel_table[tx_type]->chan);
351}
352EXPORT_SYMBOL(dma_find_channel);
353
354
355
356
357void dma_issue_pending_all(void)
358{
359 struct dma_device *device;
360 struct dma_chan *chan;
361
362 rcu_read_lock();
363 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
364 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
365 continue;
366 list_for_each_entry(chan, &device->channels, device_node)
367 if (chan->client_count)
368 device->device_issue_pending(chan);
369 }
370 rcu_read_unlock();
371}
372EXPORT_SYMBOL(dma_issue_pending_all);
373
374
375
376
377static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
378{
379 int node = dev_to_node(chan->device->dev);
380 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
381}
382
383
384
385
386
387
388
389
390
391
392
393static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
394{
395 struct dma_device *device;
396 struct dma_chan *chan;
397 struct dma_chan *min = NULL;
398 struct dma_chan *localmin = NULL;
399
400 list_for_each_entry(device, &dma_device_list, global_node) {
401 if (!dma_has_cap(cap, device->cap_mask) ||
402 dma_has_cap(DMA_PRIVATE, device->cap_mask))
403 continue;
404 list_for_each_entry(chan, &device->channels, device_node) {
405 if (!chan->client_count)
406 continue;
407 if (!min || chan->table_count < min->table_count)
408 min = chan;
409
410 if (dma_chan_is_local(chan, cpu))
411 if (!localmin ||
412 chan->table_count < localmin->table_count)
413 localmin = chan;
414 }
415 }
416
417 chan = localmin ? localmin : min;
418
419 if (chan)
420 chan->table_count++;
421
422 return chan;
423}
424
425
426
427
428
429
430
431
432
433static void dma_channel_rebalance(void)
434{
435 struct dma_chan *chan;
436 struct dma_device *device;
437 int cpu;
438 int cap;
439
440
441 for_each_dma_cap_mask(cap, dma_cap_mask_all)
442 for_each_possible_cpu(cpu)
443 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
444
445 list_for_each_entry(device, &dma_device_list, global_node) {
446 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
447 continue;
448 list_for_each_entry(chan, &device->channels, device_node)
449 chan->table_count = 0;
450 }
451
452
453 if (!dmaengine_ref_count)
454 return;
455
456
457 for_each_dma_cap_mask(cap, dma_cap_mask_all)
458 for_each_online_cpu(cpu) {
459 chan = min_chan(cap, cpu);
460 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
461 }
462}
463
464int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
465{
466 struct dma_device *device;
467
468 if (!chan || !caps)
469 return -EINVAL;
470
471 device = chan->device;
472
473
474 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
475 return -ENXIO;
476
477
478
479
480
481
482 if (!device->directions)
483 return -ENXIO;
484
485 caps->src_addr_widths = device->src_addr_widths;
486 caps->dst_addr_widths = device->dst_addr_widths;
487 caps->directions = device->directions;
488 caps->residue_granularity = device->residue_granularity;
489
490
491
492
493
494 caps->cmd_pause = !!(device->device_pause && device->device_resume);
495 caps->cmd_terminate = !!device->device_terminate_all;
496
497 return 0;
498}
499EXPORT_SYMBOL_GPL(dma_get_slave_caps);
500
501static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
502 struct dma_device *dev,
503 dma_filter_fn fn, void *fn_param)
504{
505 struct dma_chan *chan;
506
507 if (!__dma_device_satisfies_mask(dev, mask)) {
508 pr_debug("%s: wrong capabilities\n", __func__);
509 return NULL;
510 }
511
512
513
514 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
515 list_for_each_entry(chan, &dev->channels, device_node) {
516
517 if (chan->client_count)
518 return NULL;
519 }
520
521 list_for_each_entry(chan, &dev->channels, device_node) {
522 if (chan->client_count) {
523 pr_debug("%s: %s busy\n",
524 __func__, dma_chan_name(chan));
525 continue;
526 }
527 if (fn && !fn(chan, fn_param)) {
528 pr_debug("%s: %s filter said false\n",
529 __func__, dma_chan_name(chan));
530 continue;
531 }
532 return chan;
533 }
534
535 return NULL;
536}
537
538
539
540
541
542struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
543{
544 int err = -EBUSY;
545
546
547 mutex_lock(&dma_list_mutex);
548
549 if (chan->client_count == 0) {
550 err = dma_chan_get(chan);
551 if (err)
552 pr_debug("%s: failed to get %s: (%d)\n",
553 __func__, dma_chan_name(chan), err);
554 } else
555 chan = NULL;
556
557 mutex_unlock(&dma_list_mutex);
558
559
560 return chan;
561}
562EXPORT_SYMBOL_GPL(dma_get_slave_channel);
563
564struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
565{
566 dma_cap_mask_t mask;
567 struct dma_chan *chan;
568 int err;
569
570 dma_cap_zero(mask);
571 dma_cap_set(DMA_SLAVE, mask);
572
573
574 mutex_lock(&dma_list_mutex);
575
576 chan = private_candidate(&mask, device, NULL, NULL);
577 if (chan) {
578 dma_cap_set(DMA_PRIVATE, device->cap_mask);
579 device->privatecnt++;
580 err = dma_chan_get(chan);
581 if (err) {
582 pr_debug("%s: failed to get %s: (%d)\n",
583 __func__, dma_chan_name(chan), err);
584 chan = NULL;
585 if (--device->privatecnt == 0)
586 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
587 }
588 }
589
590 mutex_unlock(&dma_list_mutex);
591
592 return chan;
593}
594EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
595
596
597
598
599
600
601
602
603
604struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
605 dma_filter_fn fn, void *fn_param)
606{
607 struct dma_device *device, *_d;
608 struct dma_chan *chan = NULL;
609 int err;
610
611
612 mutex_lock(&dma_list_mutex);
613 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
614 chan = private_candidate(mask, device, fn, fn_param);
615 if (chan) {
616
617
618
619
620
621 dma_cap_set(DMA_PRIVATE, device->cap_mask);
622 device->privatecnt++;
623 err = dma_chan_get(chan);
624
625 if (err == -ENODEV) {
626 pr_debug("%s: %s module removed\n",
627 __func__, dma_chan_name(chan));
628 list_del_rcu(&device->global_node);
629 } else if (err)
630 pr_debug("%s: failed to get %s: (%d)\n",
631 __func__, dma_chan_name(chan), err);
632 else
633 break;
634 if (--device->privatecnt == 0)
635 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
636 chan = NULL;
637 }
638 }
639 mutex_unlock(&dma_list_mutex);
640
641 pr_debug("%s: %s (%s)\n",
642 __func__,
643 chan ? "success" : "fail",
644 chan ? dma_chan_name(chan) : NULL);
645
646 return chan;
647}
648EXPORT_SYMBOL_GPL(__dma_request_channel);
649
650
651
652
653
654
655
656
657struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
658 const char *name)
659{
660
661 if (dev->of_node)
662 return of_dma_request_slave_channel(dev->of_node, name);
663
664
665 if (ACPI_HANDLE(dev))
666 return acpi_dma_request_slave_chan_by_name(dev, name);
667
668 return ERR_PTR(-ENODEV);
669}
670EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
671
672
673
674
675
676
677
678
679struct dma_chan *dma_request_slave_channel(struct device *dev,
680 const char *name)
681{
682 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
683 if (IS_ERR(ch))
684 return NULL;
685 return ch;
686}
687EXPORT_SYMBOL_GPL(dma_request_slave_channel);
688
689void dma_release_channel(struct dma_chan *chan)
690{
691 mutex_lock(&dma_list_mutex);
692 WARN_ONCE(chan->client_count != 1,
693 "chan reference count %d != 1\n", chan->client_count);
694 dma_chan_put(chan);
695
696 if (--chan->device->privatecnt == 0)
697 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
698 mutex_unlock(&dma_list_mutex);
699}
700EXPORT_SYMBOL_GPL(dma_release_channel);
701
702
703
704
705void dmaengine_get(void)
706{
707 struct dma_device *device, *_d;
708 struct dma_chan *chan;
709 int err;
710
711 mutex_lock(&dma_list_mutex);
712 dmaengine_ref_count++;
713
714
715 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
716 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
717 continue;
718 list_for_each_entry(chan, &device->channels, device_node) {
719 err = dma_chan_get(chan);
720 if (err == -ENODEV) {
721
722 list_del_rcu(&device->global_node);
723 break;
724 } else if (err)
725 pr_debug("%s: failed to get %s: (%d)\n",
726 __func__, dma_chan_name(chan), err);
727 }
728 }
729
730
731
732
733
734 if (dmaengine_ref_count == 1)
735 dma_channel_rebalance();
736 mutex_unlock(&dma_list_mutex);
737}
738EXPORT_SYMBOL(dmaengine_get);
739
740
741
742
743void dmaengine_put(void)
744{
745 struct dma_device *device;
746 struct dma_chan *chan;
747
748 mutex_lock(&dma_list_mutex);
749 dmaengine_ref_count--;
750 BUG_ON(dmaengine_ref_count < 0);
751
752 list_for_each_entry(device, &dma_device_list, global_node) {
753 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
754 continue;
755 list_for_each_entry(chan, &device->channels, device_node)
756 dma_chan_put(chan);
757 }
758 mutex_unlock(&dma_list_mutex);
759}
760EXPORT_SYMBOL(dmaengine_put);
761
762static bool device_has_all_tx_types(struct dma_device *device)
763{
764
765
766
767
768 #ifdef CONFIG_ASYNC_TX_DMA
769 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
770 return false;
771 #endif
772
773 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
774 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
775 return false;
776 #endif
777
778 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
779 if (!dma_has_cap(DMA_XOR, device->cap_mask))
780 return false;
781
782 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
783 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
784 return false;
785 #endif
786 #endif
787
788 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
789 if (!dma_has_cap(DMA_PQ, device->cap_mask))
790 return false;
791
792 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
793 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
794 return false;
795 #endif
796 #endif
797
798 return true;
799}
800
801static int get_dma_id(struct dma_device *device)
802{
803 int rc;
804
805 mutex_lock(&dma_list_mutex);
806
807 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
808 if (rc >= 0)
809 device->dev_id = rc;
810
811 mutex_unlock(&dma_list_mutex);
812 return rc < 0 ? rc : 0;
813}
814
815
816
817
818
819int dma_async_device_register(struct dma_device *device)
820{
821 int chancnt = 0, rc;
822 struct dma_chan* chan;
823 atomic_t *idr_ref;
824
825 if (!device)
826 return -ENODEV;
827
828
829 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
830 !device->device_prep_dma_memcpy);
831 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
832 !device->device_prep_dma_xor);
833 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
834 !device->device_prep_dma_xor_val);
835 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
836 !device->device_prep_dma_pq);
837 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
838 !device->device_prep_dma_pq_val);
839 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
840 !device->device_prep_dma_interrupt);
841 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
842 !device->device_prep_dma_sg);
843 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
844 !device->device_prep_dma_cyclic);
845 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
846 !device->device_prep_interleaved_dma);
847
848 BUG_ON(!device->device_tx_status);
849 BUG_ON(!device->device_issue_pending);
850 BUG_ON(!device->dev);
851
852
853
854
855 if (device_has_all_tx_types(device))
856 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
857
858 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
859 if (!idr_ref)
860 return -ENOMEM;
861 rc = get_dma_id(device);
862 if (rc != 0) {
863 kfree(idr_ref);
864 return rc;
865 }
866
867 atomic_set(idr_ref, 0);
868
869
870 list_for_each_entry(chan, &device->channels, device_node) {
871 rc = -ENOMEM;
872 chan->local = alloc_percpu(typeof(*chan->local));
873 if (chan->local == NULL)
874 goto err_out;
875 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
876 if (chan->dev == NULL) {
877 free_percpu(chan->local);
878 chan->local = NULL;
879 goto err_out;
880 }
881
882 chan->chan_id = chancnt++;
883 chan->dev->device.class = &dma_devclass;
884 chan->dev->device.parent = device->dev;
885 chan->dev->chan = chan;
886 chan->dev->idr_ref = idr_ref;
887 chan->dev->dev_id = device->dev_id;
888 atomic_inc(idr_ref);
889 dev_set_name(&chan->dev->device, "dma%dchan%d",
890 device->dev_id, chan->chan_id);
891
892 rc = device_register(&chan->dev->device);
893 if (rc) {
894 free_percpu(chan->local);
895 chan->local = NULL;
896 kfree(chan->dev);
897 atomic_dec(idr_ref);
898 goto err_out;
899 }
900 chan->client_count = 0;
901 }
902 device->chancnt = chancnt;
903
904 mutex_lock(&dma_list_mutex);
905
906 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
907 list_for_each_entry(chan, &device->channels, device_node) {
908
909
910
911 if (dma_chan_get(chan) == -ENODEV) {
912
913
914
915
916 rc = -ENODEV;
917 mutex_unlock(&dma_list_mutex);
918 goto err_out;
919 }
920 }
921 list_add_tail_rcu(&device->global_node, &dma_device_list);
922 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
923 device->privatecnt++;
924 dma_channel_rebalance();
925 mutex_unlock(&dma_list_mutex);
926
927 return 0;
928
929err_out:
930
931 if (atomic_read(idr_ref) == 0) {
932 mutex_lock(&dma_list_mutex);
933 idr_remove(&dma_idr, device->dev_id);
934 mutex_unlock(&dma_list_mutex);
935 kfree(idr_ref);
936 return rc;
937 }
938
939 list_for_each_entry(chan, &device->channels, device_node) {
940 if (chan->local == NULL)
941 continue;
942 mutex_lock(&dma_list_mutex);
943 chan->dev->chan = NULL;
944 mutex_unlock(&dma_list_mutex);
945 device_unregister(&chan->dev->device);
946 free_percpu(chan->local);
947 }
948 return rc;
949}
950EXPORT_SYMBOL(dma_async_device_register);
951
952
953
954
955
956
957
958
959void dma_async_device_unregister(struct dma_device *device)
960{
961 struct dma_chan *chan;
962
963 mutex_lock(&dma_list_mutex);
964 list_del_rcu(&device->global_node);
965 dma_channel_rebalance();
966 mutex_unlock(&dma_list_mutex);
967
968 list_for_each_entry(chan, &device->channels, device_node) {
969 WARN_ONCE(chan->client_count,
970 "%s called while %d clients hold a reference\n",
971 __func__, chan->client_count);
972 mutex_lock(&dma_list_mutex);
973 chan->dev->chan = NULL;
974 mutex_unlock(&dma_list_mutex);
975 device_unregister(&chan->dev->device);
976 free_percpu(chan->local);
977 }
978}
979EXPORT_SYMBOL(dma_async_device_unregister);
980
981struct dmaengine_unmap_pool {
982 struct kmem_cache *cache;
983 const char *name;
984 mempool_t *pool;
985 size_t size;
986};
987
988#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
989static struct dmaengine_unmap_pool unmap_pool[] = {
990 __UNMAP_POOL(2),
991 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
992 __UNMAP_POOL(16),
993 __UNMAP_POOL(128),
994 __UNMAP_POOL(256),
995 #endif
996};
997
998static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
999{
1000 int order = get_count_order(nr);
1001
1002 switch (order) {
1003 case 0 ... 1:
1004 return &unmap_pool[0];
1005 case 2 ... 4:
1006 return &unmap_pool[1];
1007 case 5 ... 7:
1008 return &unmap_pool[2];
1009 case 8:
1010 return &unmap_pool[3];
1011 default:
1012 BUG();
1013 return NULL;
1014 }
1015}
1016
1017static void dmaengine_unmap(struct kref *kref)
1018{
1019 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1020 struct device *dev = unmap->dev;
1021 int cnt, i;
1022
1023 cnt = unmap->to_cnt;
1024 for (i = 0; i < cnt; i++)
1025 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1026 DMA_TO_DEVICE);
1027 cnt += unmap->from_cnt;
1028 for (; i < cnt; i++)
1029 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1030 DMA_FROM_DEVICE);
1031 cnt += unmap->bidi_cnt;
1032 for (; i < cnt; i++) {
1033 if (unmap->addr[i] == 0)
1034 continue;
1035 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1036 DMA_BIDIRECTIONAL);
1037 }
1038 cnt = unmap->map_cnt;
1039 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1040}
1041
1042void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1043{
1044 if (unmap)
1045 kref_put(&unmap->kref, dmaengine_unmap);
1046}
1047EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1048
1049static void dmaengine_destroy_unmap_pool(void)
1050{
1051 int i;
1052
1053 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1054 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1055
1056 if (p->pool)
1057 mempool_destroy(p->pool);
1058 p->pool = NULL;
1059 if (p->cache)
1060 kmem_cache_destroy(p->cache);
1061 p->cache = NULL;
1062 }
1063}
1064
1065static int __init dmaengine_init_unmap_pool(void)
1066{
1067 int i;
1068
1069 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1070 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1071 size_t size;
1072
1073 size = sizeof(struct dmaengine_unmap_data) +
1074 sizeof(dma_addr_t) * p->size;
1075
1076 p->cache = kmem_cache_create(p->name, size, 0,
1077 SLAB_HWCACHE_ALIGN, NULL);
1078 if (!p->cache)
1079 break;
1080 p->pool = mempool_create_slab_pool(1, p->cache);
1081 if (!p->pool)
1082 break;
1083 }
1084
1085 if (i == ARRAY_SIZE(unmap_pool))
1086 return 0;
1087
1088 dmaengine_destroy_unmap_pool();
1089 return -ENOMEM;
1090}
1091
1092struct dmaengine_unmap_data *
1093dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1094{
1095 struct dmaengine_unmap_data *unmap;
1096
1097 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1098 if (!unmap)
1099 return NULL;
1100
1101 memset(unmap, 0, sizeof(*unmap));
1102 kref_init(&unmap->kref);
1103 unmap->dev = dev;
1104 unmap->map_cnt = nr;
1105
1106 return unmap;
1107}
1108EXPORT_SYMBOL(dmaengine_get_unmap_data);
1109
1110void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1111 struct dma_chan *chan)
1112{
1113 tx->chan = chan;
1114 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1115 spin_lock_init(&tx->lock);
1116 #endif
1117}
1118EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1119
1120
1121
1122
1123enum dma_status
1124dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1125{
1126 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1127
1128 if (!tx)
1129 return DMA_COMPLETE;
1130
1131 while (tx->cookie == -EBUSY) {
1132 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1133 pr_err("%s timeout waiting for descriptor submission\n",
1134 __func__);
1135 return DMA_ERROR;
1136 }
1137 cpu_relax();
1138 }
1139 return dma_sync_wait(tx->chan, tx->cookie);
1140}
1141EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1142
1143
1144
1145
1146
1147void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1148{
1149 struct dma_async_tx_descriptor *dep = txd_next(tx);
1150 struct dma_async_tx_descriptor *dep_next;
1151 struct dma_chan *chan;
1152
1153 if (!dep)
1154 return;
1155
1156
1157 txd_clear_next(tx);
1158 chan = dep->chan;
1159
1160
1161
1162
1163
1164 for (; dep; dep = dep_next) {
1165 txd_lock(dep);
1166 txd_clear_parent(dep);
1167 dep_next = txd_next(dep);
1168 if (dep_next && dep_next->chan == chan)
1169 txd_clear_next(dep);
1170 else
1171 dep_next = NULL;
1172 txd_unlock(dep);
1173
1174 dep->tx_submit(dep);
1175 }
1176
1177 chan->device->device_issue_pending(chan);
1178}
1179EXPORT_SYMBOL_GPL(dma_run_dependencies);
1180
1181static int __init dma_bus_init(void)
1182{
1183 int err = dmaengine_init_unmap_pool();
1184
1185 if (err)
1186 return err;
1187 return class_register(&dma_devclass);
1188}
1189arch_initcall(dma_bus_init);
1190
1191
1192