1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/init.h>
49#include <linux/module.h>
50#include <linux/mm.h>
51#include <linux/device.h>
52#include <linux/dmaengine.h>
53#include <linux/hardirq.h>
54#include <linux/spinlock.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
57#include <linux/mutex.h>
58#include <linux/jiffies.h>
59#include <linux/rculist.h>
60#include <linux/idr.h>
61
62static DEFINE_MUTEX(dma_list_mutex);
63static LIST_HEAD(dma_device_list);
64static long dmaengine_ref_count;
65static struct idr dma_idr;
66
67
68
69
70
71
72
73
74
75static struct dma_chan *dev_to_dma_chan(struct device *dev)
76{
77 struct dma_chan_dev *chan_dev;
78
79 chan_dev = container_of(dev, typeof(*chan_dev), device);
80 return chan_dev->chan;
81}
82
83static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
84{
85 struct dma_chan *chan;
86 unsigned long count = 0;
87 int i;
88 int err;
89
90 mutex_lock(&dma_list_mutex);
91 chan = dev_to_dma_chan(dev);
92 if (chan) {
93 for_each_possible_cpu(i)
94 count += per_cpu_ptr(chan->local, i)->memcpy_count;
95 err = sprintf(buf, "%lu\n", count);
96 } else
97 err = -ENODEV;
98 mutex_unlock(&dma_list_mutex);
99
100 return err;
101}
102
103static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
104 char *buf)
105{
106 struct dma_chan *chan;
107 unsigned long count = 0;
108 int i;
109 int err;
110
111 mutex_lock(&dma_list_mutex);
112 chan = dev_to_dma_chan(dev);
113 if (chan) {
114 for_each_possible_cpu(i)
115 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
116 err = sprintf(buf, "%lu\n", count);
117 } else
118 err = -ENODEV;
119 mutex_unlock(&dma_list_mutex);
120
121 return err;
122}
123
124static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
125{
126 struct dma_chan *chan;
127 int err;
128
129 mutex_lock(&dma_list_mutex);
130 chan = dev_to_dma_chan(dev);
131 if (chan)
132 err = sprintf(buf, "%d\n", chan->client_count);
133 else
134 err = -ENODEV;
135 mutex_unlock(&dma_list_mutex);
136
137 return err;
138}
139
140static struct device_attribute dma_attrs[] = {
141 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
142 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
143 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
144 __ATTR_NULL
145};
146
147static void chan_dev_release(struct device *dev)
148{
149 struct dma_chan_dev *chan_dev;
150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 mutex_lock(&dma_list_mutex);
154 idr_remove(&dma_idr, chan_dev->dev_id);
155 mutex_unlock(&dma_list_mutex);
156 kfree(chan_dev->idr_ref);
157 }
158 kfree(chan_dev);
159}
160
161static struct class dma_devclass = {
162 .name = "dma",
163 .dev_attrs = dma_attrs,
164 .dev_release = chan_dev_release,
165};
166
167
168
169#define dma_device_satisfies_mask(device, mask) \
170 __dma_device_satisfies_mask((device), &(mask))
171static int
172__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
173{
174 dma_cap_mask_t has;
175
176 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
177 DMA_TX_TYPE_END);
178 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
179}
180
181static struct module *dma_chan_to_owner(struct dma_chan *chan)
182{
183 return chan->device->dev->driver->owner;
184}
185
186
187
188
189
190
191
192static void balance_ref_count(struct dma_chan *chan)
193{
194 struct module *owner = dma_chan_to_owner(chan);
195
196 while (chan->client_count < dmaengine_ref_count) {
197 __module_get(owner);
198 chan->client_count++;
199 }
200}
201
202
203
204
205
206
207
208static int dma_chan_get(struct dma_chan *chan)
209{
210 int err = -ENODEV;
211 struct module *owner = dma_chan_to_owner(chan);
212
213 if (chan->client_count) {
214 __module_get(owner);
215 err = 0;
216 } else if (try_module_get(owner))
217 err = 0;
218
219 if (err == 0)
220 chan->client_count++;
221
222
223 if (chan->client_count == 1 && err == 0) {
224 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
225
226 if (desc_cnt < 0) {
227 err = desc_cnt;
228 chan->client_count = 0;
229 module_put(owner);
230 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
231 balance_ref_count(chan);
232 }
233
234 return err;
235}
236
237
238
239
240
241
242
243static void dma_chan_put(struct dma_chan *chan)
244{
245 if (!chan->client_count)
246 return;
247 chan->client_count--;
248 module_put(dma_chan_to_owner(chan));
249 if (chan->client_count == 0)
250 chan->device->device_free_chan_resources(chan);
251}
252
253enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
254{
255 enum dma_status status;
256 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
257
258 dma_async_issue_pending(chan);
259 do {
260 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
261 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
262 printk(KERN_ERR "dma_sync_wait_timeout!\n");
263 return DMA_ERROR;
264 }
265 } while (status == DMA_IN_PROGRESS);
266
267 return status;
268}
269EXPORT_SYMBOL(dma_sync_wait);
270
271
272
273
274static dma_cap_mask_t dma_cap_mask_all;
275
276
277
278
279
280struct dma_chan_tbl_ent {
281 struct dma_chan *chan;
282};
283
284
285
286
287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
288
289static int __init dma_channel_table_init(void)
290{
291 enum dma_transaction_type cap;
292 int err = 0;
293
294 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
295
296
297
298
299
300 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
301 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
302 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
303
304 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
305 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
306 if (!channel_table[cap]) {
307 err = -ENOMEM;
308 break;
309 }
310 }
311
312 if (err) {
313 pr_err("dmaengine: initialization failure\n");
314 for_each_dma_cap_mask(cap, dma_cap_mask_all)
315 if (channel_table[cap])
316 free_percpu(channel_table[cap]);
317 }
318
319 return err;
320}
321arch_initcall(dma_channel_table_init);
322
323
324
325
326
327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
328{
329 struct dma_chan *chan;
330 int cpu;
331
332 cpu = get_cpu();
333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
334 put_cpu();
335
336 return chan;
337}
338EXPORT_SYMBOL(dma_find_channel);
339
340
341
342
343void dma_issue_pending_all(void)
344{
345 struct dma_device *device;
346 struct dma_chan *chan;
347
348 rcu_read_lock();
349 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
350 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
351 continue;
352 list_for_each_entry(chan, &device->channels, device_node)
353 if (chan->client_count)
354 device->device_issue_pending(chan);
355 }
356 rcu_read_unlock();
357}
358EXPORT_SYMBOL(dma_issue_pending_all);
359
360
361
362
363
364
365
366
367
368
369static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
370{
371 struct dma_device *device;
372 struct dma_chan *chan;
373 struct dma_chan *ret = NULL;
374 struct dma_chan *min = NULL;
375
376 list_for_each_entry(device, &dma_device_list, global_node) {
377 if (!dma_has_cap(cap, device->cap_mask) ||
378 dma_has_cap(DMA_PRIVATE, device->cap_mask))
379 continue;
380 list_for_each_entry(chan, &device->channels, device_node) {
381 if (!chan->client_count)
382 continue;
383 if (!min)
384 min = chan;
385 else if (chan->table_count < min->table_count)
386 min = chan;
387
388 if (n-- == 0) {
389 ret = chan;
390 break;
391 }
392 }
393 if (ret)
394 break;
395 }
396
397 if (!ret)
398 ret = min;
399
400 if (ret)
401 ret->table_count++;
402
403 return ret;
404}
405
406
407
408
409
410
411
412
413
414static void dma_channel_rebalance(void)
415{
416 struct dma_chan *chan;
417 struct dma_device *device;
418 int cpu;
419 int cap;
420 int n;
421
422
423 for_each_dma_cap_mask(cap, dma_cap_mask_all)
424 for_each_possible_cpu(cpu)
425 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
426
427 list_for_each_entry(device, &dma_device_list, global_node) {
428 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
429 continue;
430 list_for_each_entry(chan, &device->channels, device_node)
431 chan->table_count = 0;
432 }
433
434
435 if (!dmaengine_ref_count)
436 return;
437
438
439 n = 0;
440 for_each_dma_cap_mask(cap, dma_cap_mask_all)
441 for_each_online_cpu(cpu) {
442 if (num_possible_cpus() > 1)
443 chan = nth_chan(cap, n++);
444 else
445 chan = nth_chan(cap, -1);
446
447 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
448 }
449}
450
451static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
452 dma_filter_fn fn, void *fn_param)
453{
454 struct dma_chan *chan;
455
456 if (!__dma_device_satisfies_mask(dev, mask)) {
457 pr_debug("%s: wrong capabilities\n", __func__);
458 return NULL;
459 }
460
461
462
463 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
464 list_for_each_entry(chan, &dev->channels, device_node) {
465
466 if (chan->client_count)
467 return NULL;
468 }
469
470 list_for_each_entry(chan, &dev->channels, device_node) {
471 if (chan->client_count) {
472 pr_debug("%s: %s busy\n",
473 __func__, dma_chan_name(chan));
474 continue;
475 }
476 if (fn && !fn(chan, fn_param)) {
477 pr_debug("%s: %s filter said false\n",
478 __func__, dma_chan_name(chan));
479 continue;
480 }
481 return chan;
482 }
483
484 return NULL;
485}
486
487
488
489
490
491
492
493struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
494{
495 struct dma_device *device, *_d;
496 struct dma_chan *chan = NULL;
497 int err;
498
499
500 mutex_lock(&dma_list_mutex);
501 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
502 chan = private_candidate(mask, device, fn, fn_param);
503 if (chan) {
504
505
506
507
508
509 dma_cap_set(DMA_PRIVATE, device->cap_mask);
510 device->privatecnt++;
511 err = dma_chan_get(chan);
512
513 if (err == -ENODEV) {
514 pr_debug("%s: %s module removed\n", __func__,
515 dma_chan_name(chan));
516 list_del_rcu(&device->global_node);
517 } else if (err)
518 pr_err("dmaengine: failed to get %s: (%d)\n",
519 dma_chan_name(chan), err);
520 else
521 break;
522 if (--device->privatecnt == 0)
523 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
524 chan->private = NULL;
525 chan = NULL;
526 }
527 }
528 mutex_unlock(&dma_list_mutex);
529
530 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
531 chan ? dma_chan_name(chan) : NULL);
532
533 return chan;
534}
535EXPORT_SYMBOL_GPL(__dma_request_channel);
536
537void dma_release_channel(struct dma_chan *chan)
538{
539 mutex_lock(&dma_list_mutex);
540 WARN_ONCE(chan->client_count != 1,
541 "chan reference count %d != 1\n", chan->client_count);
542 dma_chan_put(chan);
543
544 if (--chan->device->privatecnt == 0)
545 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
546 chan->private = NULL;
547 mutex_unlock(&dma_list_mutex);
548}
549EXPORT_SYMBOL_GPL(dma_release_channel);
550
551
552
553
554void dmaengine_get(void)
555{
556 struct dma_device *device, *_d;
557 struct dma_chan *chan;
558 int err;
559
560 mutex_lock(&dma_list_mutex);
561 dmaengine_ref_count++;
562
563
564 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
565 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
566 continue;
567 list_for_each_entry(chan, &device->channels, device_node) {
568 err = dma_chan_get(chan);
569 if (err == -ENODEV) {
570
571 list_del_rcu(&device->global_node);
572 break;
573 } else if (err)
574 pr_err("dmaengine: failed to get %s: (%d)\n",
575 dma_chan_name(chan), err);
576 }
577 }
578
579
580
581
582
583 if (dmaengine_ref_count == 1)
584 dma_channel_rebalance();
585 mutex_unlock(&dma_list_mutex);
586}
587EXPORT_SYMBOL(dmaengine_get);
588
589
590
591
592void dmaengine_put(void)
593{
594 struct dma_device *device;
595 struct dma_chan *chan;
596
597 mutex_lock(&dma_list_mutex);
598 dmaengine_ref_count--;
599 BUG_ON(dmaengine_ref_count < 0);
600
601 list_for_each_entry(device, &dma_device_list, global_node) {
602 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
603 continue;
604 list_for_each_entry(chan, &device->channels, device_node)
605 dma_chan_put(chan);
606 }
607 mutex_unlock(&dma_list_mutex);
608}
609EXPORT_SYMBOL(dmaengine_put);
610
611static bool device_has_all_tx_types(struct dma_device *device)
612{
613
614
615
616
617 #ifdef CONFIG_ASYNC_TX_DMA
618 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
619 return false;
620 #endif
621
622 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
623 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
624 return false;
625 #endif
626
627 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
628 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
629 return false;
630 #endif
631
632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
633 if (!dma_has_cap(DMA_XOR, device->cap_mask))
634 return false;
635
636 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
637 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
638 return false;
639 #endif
640 #endif
641
642 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
643 if (!dma_has_cap(DMA_PQ, device->cap_mask))
644 return false;
645
646 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
647 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
648 return false;
649 #endif
650 #endif
651
652 return true;
653}
654
655static int get_dma_id(struct dma_device *device)
656{
657 int rc;
658
659 idr_retry:
660 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
661 return -ENOMEM;
662 mutex_lock(&dma_list_mutex);
663 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
664 mutex_unlock(&dma_list_mutex);
665 if (rc == -EAGAIN)
666 goto idr_retry;
667 else if (rc != 0)
668 return rc;
669
670 return 0;
671}
672
673
674
675
676
677int dma_async_device_register(struct dma_device *device)
678{
679 int chancnt = 0, rc;
680 struct dma_chan* chan;
681 atomic_t *idr_ref;
682
683 if (!device)
684 return -ENODEV;
685
686
687 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
688 !device->device_prep_dma_memcpy);
689 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
690 !device->device_prep_dma_xor);
691 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
692 !device->device_prep_dma_xor_val);
693 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
694 !device->device_prep_dma_pq);
695 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
696 !device->device_prep_dma_pq_val);
697 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
698 !device->device_prep_dma_memset);
699 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
700 !device->device_prep_dma_interrupt);
701 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
702 !device->device_prep_slave_sg);
703 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
704 !device->device_terminate_all);
705
706 BUG_ON(!device->device_alloc_chan_resources);
707 BUG_ON(!device->device_free_chan_resources);
708 BUG_ON(!device->device_is_tx_complete);
709 BUG_ON(!device->device_issue_pending);
710 BUG_ON(!device->dev);
711
712
713
714
715 if (device_has_all_tx_types(device))
716 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
717
718 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
719 if (!idr_ref)
720 return -ENOMEM;
721 rc = get_dma_id(device);
722 if (rc != 0) {
723 kfree(idr_ref);
724 return rc;
725 }
726
727 atomic_set(idr_ref, 0);
728
729
730 list_for_each_entry(chan, &device->channels, device_node) {
731 rc = -ENOMEM;
732 chan->local = alloc_percpu(typeof(*chan->local));
733 if (chan->local == NULL)
734 goto err_out;
735 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
736 if (chan->dev == NULL) {
737 free_percpu(chan->local);
738 chan->local = NULL;
739 goto err_out;
740 }
741
742 chan->chan_id = chancnt++;
743 chan->dev->device.class = &dma_devclass;
744 chan->dev->device.parent = device->dev;
745 chan->dev->chan = chan;
746 chan->dev->idr_ref = idr_ref;
747 chan->dev->dev_id = device->dev_id;
748 atomic_inc(idr_ref);
749 dev_set_name(&chan->dev->device, "dma%dchan%d",
750 device->dev_id, chan->chan_id);
751
752 rc = device_register(&chan->dev->device);
753 if (rc) {
754 free_percpu(chan->local);
755 chan->local = NULL;
756 kfree(chan->dev);
757 atomic_dec(idr_ref);
758 goto err_out;
759 }
760 chan->client_count = 0;
761 }
762 device->chancnt = chancnt;
763
764 mutex_lock(&dma_list_mutex);
765
766 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
767 list_for_each_entry(chan, &device->channels, device_node) {
768
769
770
771 if (dma_chan_get(chan) == -ENODEV) {
772
773
774
775
776 rc = -ENODEV;
777 mutex_unlock(&dma_list_mutex);
778 goto err_out;
779 }
780 }
781 list_add_tail_rcu(&device->global_node, &dma_device_list);
782 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
783 device->privatecnt++;
784 dma_channel_rebalance();
785 mutex_unlock(&dma_list_mutex);
786
787 return 0;
788
789err_out:
790
791 if (atomic_read(idr_ref) == 0) {
792 mutex_lock(&dma_list_mutex);
793 idr_remove(&dma_idr, device->dev_id);
794 mutex_unlock(&dma_list_mutex);
795 kfree(idr_ref);
796 return rc;
797 }
798
799 list_for_each_entry(chan, &device->channels, device_node) {
800 if (chan->local == NULL)
801 continue;
802 mutex_lock(&dma_list_mutex);
803 chan->dev->chan = NULL;
804 mutex_unlock(&dma_list_mutex);
805 device_unregister(&chan->dev->device);
806 free_percpu(chan->local);
807 }
808 return rc;
809}
810EXPORT_SYMBOL(dma_async_device_register);
811
812
813
814
815
816
817
818
819void dma_async_device_unregister(struct dma_device *device)
820{
821 struct dma_chan *chan;
822
823 mutex_lock(&dma_list_mutex);
824 list_del_rcu(&device->global_node);
825 dma_channel_rebalance();
826 mutex_unlock(&dma_list_mutex);
827
828 list_for_each_entry(chan, &device->channels, device_node) {
829 WARN_ONCE(chan->client_count,
830 "%s called while %d clients hold a reference\n",
831 __func__, chan->client_count);
832 mutex_lock(&dma_list_mutex);
833 chan->dev->chan = NULL;
834 mutex_unlock(&dma_list_mutex);
835 device_unregister(&chan->dev->device);
836 }
837}
838EXPORT_SYMBOL(dma_async_device_unregister);
839
840
841
842
843
844
845
846
847
848
849
850
851
852dma_cookie_t
853dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
854 void *src, size_t len)
855{
856 struct dma_device *dev = chan->device;
857 struct dma_async_tx_descriptor *tx;
858 dma_addr_t dma_dest, dma_src;
859 dma_cookie_t cookie;
860 int cpu;
861 unsigned long flags;
862
863 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
864 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
865 flags = DMA_CTRL_ACK |
866 DMA_COMPL_SRC_UNMAP_SINGLE |
867 DMA_COMPL_DEST_UNMAP_SINGLE;
868 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
869
870 if (!tx) {
871 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
872 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
873 return -ENOMEM;
874 }
875
876 tx->callback = NULL;
877 cookie = tx->tx_submit(tx);
878
879 cpu = get_cpu();
880 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
881 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
882 put_cpu();
883
884 return cookie;
885}
886EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901dma_cookie_t
902dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
903 unsigned int offset, void *kdata, size_t len)
904{
905 struct dma_device *dev = chan->device;
906 struct dma_async_tx_descriptor *tx;
907 dma_addr_t dma_dest, dma_src;
908 dma_cookie_t cookie;
909 int cpu;
910 unsigned long flags;
911
912 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
913 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
914 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
915 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
916
917 if (!tx) {
918 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
919 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
920 return -ENOMEM;
921 }
922
923 tx->callback = NULL;
924 cookie = tx->tx_submit(tx);
925
926 cpu = get_cpu();
927 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
928 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
929 put_cpu();
930
931 return cookie;
932}
933EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949dma_cookie_t
950dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
951 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
952 size_t len)
953{
954 struct dma_device *dev = chan->device;
955 struct dma_async_tx_descriptor *tx;
956 dma_addr_t dma_dest, dma_src;
957 dma_cookie_t cookie;
958 int cpu;
959 unsigned long flags;
960
961 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
962 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
963 DMA_FROM_DEVICE);
964 flags = DMA_CTRL_ACK;
965 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
966
967 if (!tx) {
968 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
969 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
970 return -ENOMEM;
971 }
972
973 tx->callback = NULL;
974 cookie = tx->tx_submit(tx);
975
976 cpu = get_cpu();
977 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
978 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
979 put_cpu();
980
981 return cookie;
982}
983EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
984
985void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
986 struct dma_chan *chan)
987{
988 tx->chan = chan;
989 spin_lock_init(&tx->lock);
990}
991EXPORT_SYMBOL(dma_async_tx_descriptor_init);
992
993
994
995
996enum dma_status
997dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
998{
999 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1000
1001 if (!tx)
1002 return DMA_SUCCESS;
1003
1004 while (tx->cookie == -EBUSY) {
1005 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1006 pr_err("%s timeout waiting for descriptor submission\n",
1007 __func__);
1008 return DMA_ERROR;
1009 }
1010 cpu_relax();
1011 }
1012 return dma_sync_wait(tx->chan, tx->cookie);
1013}
1014EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1015
1016
1017
1018
1019
1020void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1021{
1022 struct dma_async_tx_descriptor *dep = tx->next;
1023 struct dma_async_tx_descriptor *dep_next;
1024 struct dma_chan *chan;
1025
1026 if (!dep)
1027 return;
1028
1029
1030 tx->next = NULL;
1031 chan = dep->chan;
1032
1033
1034
1035
1036
1037 for (; dep; dep = dep_next) {
1038 spin_lock_bh(&dep->lock);
1039 dep->parent = NULL;
1040 dep_next = dep->next;
1041 if (dep_next && dep_next->chan == chan)
1042 dep->next = NULL;
1043 else
1044 dep_next = NULL;
1045 spin_unlock_bh(&dep->lock);
1046
1047 dep->tx_submit(dep);
1048 }
1049
1050 chan->device->device_issue_pending(chan);
1051}
1052EXPORT_SYMBOL_GPL(dma_run_dependencies);
1053
1054static int __init dma_bus_init(void)
1055{
1056 idr_init(&dma_idr);
1057 mutex_init(&dma_list_mutex);
1058 return class_register(&dma_devclass);
1059}
1060arch_initcall(dma_bus_init);
1061
1062
1063