1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef LINUX_DMAENGINE_H
22#define LINUX_DMAENGINE_H
23
24#include <linux/device.h>
25#include <linux/uio.h>
26#include <linux/bug.h>
27#include <linux/scatterlist.h>
28#include <linux/bitmap.h>
29#include <linux/types.h>
30#include <asm/page.h>
31
32
33
34
35
36
37typedef s32 dma_cookie_t;
38#define DMA_MIN_COOKIE 1
39#define DMA_MAX_COOKIE INT_MAX
40
41#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
42
43
44
45
46
47
48
49
50enum dma_status {
51 DMA_SUCCESS = 0, DMA_COMPLETE = 0,
52 DMA_IN_PROGRESS,
53 DMA_PAUSED,
54 DMA_ERROR,
55};
56
57
58
59
60
61
62
63enum dma_transaction_type {
64 DMA_MEMCPY,
65 DMA_XOR,
66 DMA_PQ,
67 DMA_XOR_VAL,
68 DMA_PQ_VAL,
69 DMA_MEMSET,
70 DMA_INTERRUPT,
71 DMA_SG,
72 DMA_PRIVATE,
73 DMA_ASYNC_TX,
74 DMA_SLAVE,
75 DMA_CYCLIC,
76 DMA_INTERLEAVE,
77
78 DMA_TX_TYPE_END,
79};
80
81
82
83
84
85
86
87
88enum dma_transfer_direction {
89 DMA_MEM_TO_MEM,
90 DMA_MEM_TO_DEV,
91 DMA_DEV_TO_MEM,
92 DMA_DEV_TO_DEV,
93 DMA_TRANS_NONE,
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128struct data_chunk {
129 size_t size;
130 size_t icg;
131};
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151struct dma_interleaved_template {
152 dma_addr_t src_start;
153 dma_addr_t dst_start;
154 enum dma_transfer_direction dir;
155 bool src_inc;
156 bool dst_inc;
157 bool src_sgl;
158 bool dst_sgl;
159 size_t numf;
160 size_t frame_size;
161 struct data_chunk sgl[0];
162};
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186enum dma_ctrl_flags {
187 DMA_PREP_INTERRUPT = (1 << 0),
188 DMA_CTRL_ACK = (1 << 1),
189 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
190 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
191 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
192 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
193 DMA_PREP_PQ_DISABLE_P = (1 << 6),
194 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
195 DMA_PREP_CONTINUE = (1 << 8),
196 DMA_PREP_FENCE = (1 << 9),
197};
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213enum dma_ctrl_cmd {
214 DMA_TERMINATE_ALL,
215 DMA_PAUSE,
216 DMA_RESUME,
217 DMA_SLAVE_CONFIG,
218 FSLDMA_EXTERNAL_START,
219};
220
221
222
223
224enum sum_check_bits {
225 SUM_CHECK_P = 0,
226 SUM_CHECK_Q = 1,
227};
228
229
230
231
232
233
234enum sum_check_flags {
235 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
236 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
237};
238
239
240
241
242
243
244typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
245
246
247
248
249
250
251
252struct dma_chan_percpu {
253
254 unsigned long memcpy_count;
255 unsigned long bytes_transferred;
256};
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271struct dma_chan {
272 struct dma_device *device;
273 dma_cookie_t cookie;
274 dma_cookie_t completed_cookie;
275
276
277 int chan_id;
278 struct dma_chan_dev *dev;
279
280 struct list_head device_node;
281 struct dma_chan_percpu __percpu *local;
282 int client_count;
283 int table_count;
284 void *private;
285};
286
287
288
289
290
291
292
293
294struct dma_chan_dev {
295 struct dma_chan *chan;
296 struct device device;
297 int dev_id;
298 atomic_t *idr_ref;
299};
300
301
302
303
304
305enum dma_slave_buswidth {
306 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
307 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
308 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
309 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
310 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct dma_slave_config {
363 enum dma_transfer_direction direction;
364 dma_addr_t src_addr;
365 dma_addr_t dst_addr;
366 enum dma_slave_buswidth src_addr_width;
367 enum dma_slave_buswidth dst_addr_width;
368 u32 src_maxburst;
369 u32 dst_maxburst;
370 bool device_fc;
371 unsigned int slave_id;
372};
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394enum dma_residue_granularity {
395 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
396 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
397 DMA_RESIDUE_GRANULARITY_BURST = 2,
398};
399
400
401
402
403
404
405
406
407
408
409
410
411
412struct dma_slave_caps {
413 u32 src_addr_widths;
414 u32 dst_addr_widths;
415 u32 directions;
416 bool cmd_pause;
417 bool cmd_terminate;
418 enum dma_residue_granularity residue_granularity;
419};
420
421static inline const char *dma_chan_name(struct dma_chan *chan)
422{
423 return dev_name(&chan->dev->device);
424}
425
426void dma_chan_cleanup(struct kref *kref);
427
428
429
430
431
432
433
434
435
436
437
438
439typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
440
441typedef void (*dma_async_tx_callback)(void *dma_async_param);
442
443enum dmaengine_tx_result {
444 DMA_TRANS_NOERROR = 0,
445 DMA_TRANS_READ_FAILED,
446 DMA_TRANS_WRITE_FAILED,
447 DMA_TRANS_ABORTED,
448};
449
450struct dmaengine_result {
451 enum dmaengine_tx_result result;
452 u32 residue;
453};
454
455typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
456 const struct dmaengine_result *result);
457
458struct dmaengine_unmap_data {
459 u8 to_cnt;
460 u8 from_cnt;
461 u8 bidi_cnt;
462 struct device *dev;
463 struct kref kref;
464 size_t len;
465 dma_addr_t addr[0];
466};
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485struct dma_async_tx_descriptor {
486 dma_cookie_t cookie;
487 enum dma_ctrl_flags flags;
488 dma_addr_t phys;
489 struct dma_chan *chan;
490 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
491 dma_async_tx_callback callback;
492 dma_async_tx_callback_result callback_result;
493 void *callback_param;
494 struct dmaengine_unmap_data *unmap;
495#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
496 struct dma_async_tx_descriptor *next;
497 struct dma_async_tx_descriptor *parent;
498 spinlock_t lock;
499#endif
500};
501
502#ifdef CONFIG_DMA_ENGINE
503static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
504 struct dmaengine_unmap_data *unmap)
505{
506 kref_get(&unmap->kref);
507 tx->unmap = unmap;
508}
509
510struct dmaengine_unmap_data *
511dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
512void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
513#else
514static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
515 struct dmaengine_unmap_data *unmap)
516{
517}
518static inline struct dmaengine_unmap_data *
519dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
520{
521 return NULL;
522}
523static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
524{
525}
526#endif
527
528static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
529{
530 if (tx->unmap) {
531 dmaengine_unmap_put(tx->unmap);
532 tx->unmap = NULL;
533 }
534}
535
536#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
537static inline void txd_lock(struct dma_async_tx_descriptor *txd)
538{
539}
540static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
541{
542}
543static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
544{
545 BUG();
546}
547static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
548{
549}
550static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
551{
552}
553static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
554{
555 return NULL;
556}
557static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
558{
559 return NULL;
560}
561
562#else
563static inline void txd_lock(struct dma_async_tx_descriptor *txd)
564{
565 spin_lock_bh(&txd->lock);
566}
567static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
568{
569 spin_unlock_bh(&txd->lock);
570}
571static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
572{
573 txd->next = next;
574 next->parent = txd;
575}
576static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
577{
578 txd->parent = NULL;
579}
580static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
581{
582 txd->next = NULL;
583}
584static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
585{
586 return txd->parent;
587}
588static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
589{
590 return txd->next;
591}
592#endif
593
594
595
596
597
598
599
600
601
602
603struct dma_tx_state {
604 dma_cookie_t last;
605 dma_cookie_t used;
606 u32 residue;
607};
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664struct dma_device {
665
666 unsigned int chancnt;
667 unsigned int privatecnt;
668 struct list_head channels;
669 struct list_head global_node;
670 dma_cap_mask_t cap_mask;
671 unsigned short max_xor;
672 unsigned short max_pq;
673 u8 copy_align;
674 u8 xor_align;
675 u8 pq_align;
676 u8 fill_align;
677 #define DMA_HAS_PQ_CONTINUE (1 << 15)
678
679 int dev_id;
680 struct device *dev;
681
682 u32 src_addr_widths;
683 u32 dst_addr_widths;
684 u32 directions;
685 enum dma_residue_granularity residue_granularity;
686
687 int (*device_alloc_chan_resources)(struct dma_chan *chan);
688 void (*device_free_chan_resources)(struct dma_chan *chan);
689
690 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
691 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
692 size_t len, unsigned long flags);
693 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
694 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
695 unsigned int src_cnt, size_t len, unsigned long flags);
696 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
697 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
698 size_t len, enum sum_check_flags *result, unsigned long flags);
699 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
700 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
701 unsigned int src_cnt, const unsigned char *scf,
702 size_t len, unsigned long flags);
703 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
704 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
705 unsigned int src_cnt, const unsigned char *scf, size_t len,
706 enum sum_check_flags *pqres, unsigned long flags);
707 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
708 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
709 unsigned long flags);
710 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
711 struct dma_chan *chan, unsigned long flags);
712 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
713 struct dma_chan *chan,
714 struct scatterlist *dst_sg, unsigned int dst_nents,
715 struct scatterlist *src_sg, unsigned int src_nents,
716 unsigned long flags);
717
718 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
719 struct dma_chan *chan, struct scatterlist *sgl,
720 unsigned int sg_len, enum dma_transfer_direction direction,
721 unsigned long flags, void *context);
722 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
723 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
724 size_t period_len, enum dma_transfer_direction direction,
725 unsigned long flags, void *context);
726 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
727 struct dma_chan *chan, struct dma_interleaved_template *xt,
728 unsigned long flags);
729
730 int (*device_config)(struct dma_chan *chan,
731 struct dma_slave_config *config);
732 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
733 unsigned long arg);
734 int (*device_pause)(struct dma_chan *chan);
735 int (*device_resume)(struct dma_chan *chan);
736 int (*device_terminate_all)(struct dma_chan *chan);
737
738 enum dma_status (*device_tx_status)(struct dma_chan *chan,
739 dma_cookie_t cookie,
740 struct dma_tx_state *txstate);
741 void (*device_issue_pending)(struct dma_chan *chan);
742 int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
743};
744
745static inline int dmaengine_device_control(struct dma_chan *chan,
746 enum dma_ctrl_cmd cmd,
747 unsigned long arg)
748{
749 if (chan->device->device_control)
750 return chan->device->device_control(chan, cmd, arg);
751
752 return -ENOSYS;
753}
754
755static inline int dmaengine_slave_config(struct dma_chan *chan,
756 struct dma_slave_config *config)
757{
758 if (chan->device->device_config)
759 return chan->device->device_config(chan, config);
760
761 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
762 (unsigned long)config);
763}
764
765static inline bool is_slave_direction(enum dma_transfer_direction direction)
766{
767 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
768}
769
770static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
771 struct dma_chan *chan, dma_addr_t buf, size_t len,
772 enum dma_transfer_direction dir, unsigned long flags)
773{
774 struct scatterlist sg;
775 sg_init_table(&sg, 1);
776 sg_dma_address(&sg) = buf;
777 sg_dma_len(&sg) = len;
778
779 return chan->device->device_prep_slave_sg(chan, &sg, 1,
780 dir, flags, NULL);
781}
782
783static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
784 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
785 enum dma_transfer_direction dir, unsigned long flags)
786{
787 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
788 dir, flags, NULL);
789}
790
791#ifdef CONFIG_RAPIDIO_DMA_ENGINE
792struct rio_dma_ext;
793static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
794 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
795 enum dma_transfer_direction dir, unsigned long flags,
796 struct rio_dma_ext *rio_ext)
797{
798 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
799 dir, flags, rio_ext);
800}
801#endif
802
803static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
804 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
805 size_t period_len, enum dma_transfer_direction dir,
806 unsigned long flags)
807{
808 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
809 period_len, dir, flags, NULL);
810}
811
812static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
813 struct dma_chan *chan, struct dma_interleaved_template *xt,
814 unsigned long flags)
815{
816 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
817}
818
819static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
820{
821 struct dma_device *device;
822
823 if (!chan || !caps)
824 return -EINVAL;
825
826 device = chan->device;
827
828
829 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
830 return -ENXIO;
831
832 if (device->device_slave_caps)
833 return device->device_slave_caps(chan, caps);
834
835
836
837
838
839
840 if (!device->directions)
841 return -ENXIO;
842
843 caps->src_addr_widths = device->src_addr_widths;
844 caps->dst_addr_widths = device->dst_addr_widths;
845 caps->directions = device->directions;
846 caps->residue_granularity = device->residue_granularity;
847
848 caps->cmd_pause = !!device->device_pause;
849 caps->cmd_terminate = !!device->device_terminate_all;
850
851 return 0;
852}
853
854static inline int dmaengine_terminate_all(struct dma_chan *chan)
855{
856 if (chan->device->device_terminate_all)
857 return chan->device->device_terminate_all(chan);
858
859 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
860}
861
862static inline int dmaengine_pause(struct dma_chan *chan)
863{
864 if (chan->device->device_pause)
865 return chan->device->device_pause(chan);
866
867 return dmaengine_device_control(chan, DMA_PAUSE, 0);
868}
869
870static inline int dmaengine_resume(struct dma_chan *chan)
871{
872 if (chan->device->device_resume)
873 return chan->device->device_resume(chan);
874
875 return dmaengine_device_control(chan, DMA_RESUME, 0);
876}
877
878static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
879 dma_cookie_t cookie, struct dma_tx_state *state)
880{
881 return chan->device->device_tx_status(chan, cookie, state);
882}
883
884static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
885{
886 return desc->tx_submit(desc);
887}
888
889static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
890{
891 size_t mask;
892
893 if (!align)
894 return true;
895 mask = (1 << align) - 1;
896 if (mask & (off1 | off2 | len))
897 return false;
898 return true;
899}
900
901static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
902 size_t off2, size_t len)
903{
904 return dmaengine_check_align(dev->copy_align, off1, off2, len);
905}
906
907static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
908 size_t off2, size_t len)
909{
910 return dmaengine_check_align(dev->xor_align, off1, off2, len);
911}
912
913static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
914 size_t off2, size_t len)
915{
916 return dmaengine_check_align(dev->pq_align, off1, off2, len);
917}
918
919static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
920 size_t off2, size_t len)
921{
922 return dmaengine_check_align(dev->fill_align, off1, off2, len);
923}
924
925static inline void
926dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
927{
928 dma->max_pq = maxpq;
929 if (has_pq_continue)
930 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
931}
932
933static inline bool dmaf_continue(enum dma_ctrl_flags flags)
934{
935 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
936}
937
938static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
939{
940 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
941
942 return (flags & mask) == mask;
943}
944
945static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
946{
947 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
948}
949
950static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
951{
952 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
969{
970 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
971 return dma_dev_to_maxpq(dma);
972 else if (dmaf_p_disabled_continue(flags))
973 return dma_dev_to_maxpq(dma) - 1;
974 else if (dmaf_continue(flags))
975 return dma_dev_to_maxpq(dma) - 3;
976 BUG();
977}
978
979
980
981#ifdef CONFIG_DMA_ENGINE
982void dmaengine_get(void);
983void dmaengine_put(void);
984#else
985static inline void dmaengine_get(void)
986{
987}
988static inline void dmaengine_put(void)
989{
990}
991#endif
992
993#ifdef CONFIG_ASYNC_TX_DMA
994#define async_dmaengine_get() dmaengine_get()
995#define async_dmaengine_put() dmaengine_put()
996#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
997#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
998#else
999#define async_dma_find_channel(type) dma_find_channel(type)
1000#endif
1001#else
1002static inline void async_dmaengine_get(void)
1003{
1004}
1005static inline void async_dmaengine_put(void)
1006{
1007}
1008static inline struct dma_chan *
1009async_dma_find_channel(enum dma_transaction_type type)
1010{
1011 return NULL;
1012}
1013#endif
1014void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1015 struct dma_chan *chan);
1016
1017static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1018{
1019 tx->flags |= DMA_CTRL_ACK;
1020}
1021
1022static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1023{
1024 tx->flags &= ~DMA_CTRL_ACK;
1025}
1026
1027static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1028{
1029 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1030}
1031
1032#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1033static inline void
1034__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1035{
1036 set_bit(tx_type, dstp->bits);
1037}
1038
1039#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1040static inline void
1041__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1042{
1043 clear_bit(tx_type, dstp->bits);
1044}
1045
1046#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1047static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1048{
1049 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1050}
1051
1052#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1053static inline int
1054__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1055{
1056 return test_bit(tx_type, srcp->bits);
1057}
1058
1059#define for_each_dma_cap_mask(cap, mask) \
1060 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1061
1062
1063
1064
1065
1066
1067
1068
1069static inline void dma_async_issue_pending(struct dma_chan *chan)
1070{
1071 chan->device->device_issue_pending(chan);
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1086 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1087{
1088 struct dma_tx_state state;
1089 enum dma_status status;
1090
1091 status = chan->device->device_tx_status(chan, cookie, &state);
1092 if (last)
1093 *last = state.last;
1094 if (used)
1095 *used = state.used;
1096 return status;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1109 dma_cookie_t last_complete, dma_cookie_t last_used)
1110{
1111 if (last_complete <= last_used) {
1112 if ((cookie <= last_complete) || (cookie > last_used))
1113 return DMA_COMPLETE;
1114 } else {
1115 if ((cookie <= last_complete) && (cookie > last_used))
1116 return DMA_COMPLETE;
1117 }
1118 return DMA_IN_PROGRESS;
1119}
1120
1121static inline void
1122dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1123{
1124 if (st) {
1125 st->last = last;
1126 st->used = used;
1127 st->residue = residue;
1128 }
1129}
1130
1131enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1132#ifdef CONFIG_DMA_ENGINE
1133enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1134void dma_issue_pending_all(void);
1135struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1136 dma_filter_fn fn, void *fn_param);
1137struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1138void dma_release_channel(struct dma_chan *chan);
1139#else
1140static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1141{
1142 return DMA_COMPLETE;
1143}
1144static inline void dma_issue_pending_all(void)
1145{
1146}
1147static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1148 dma_filter_fn fn, void *fn_param)
1149{
1150 return NULL;
1151}
1152static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1153 const char *name)
1154{
1155 return NULL;
1156}
1157static inline void dma_release_channel(struct dma_chan *chan)
1158{
1159}
1160#endif
1161
1162
1163
1164int dma_async_device_register(struct dma_device *device);
1165void dma_async_device_unregister(struct dma_device *device);
1166void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1167struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1168struct dma_chan *net_dma_find_channel(void);
1169#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1170#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1171 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1172
1173static inline struct dma_chan
1174*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1175 dma_filter_fn fn, void *fn_param,
1176 struct device *dev, char *name)
1177{
1178 struct dma_chan *chan;
1179
1180 chan = dma_request_slave_channel(dev, name);
1181 if (chan)
1182 return chan;
1183
1184 return __dma_request_channel(mask, fn, fn_param);
1185}
1186
1187
1188
1189struct dma_page_list {
1190 char __user *base_address;
1191 int nr_pages;
1192 struct page **pages;
1193};
1194
1195struct dma_pinned_list {
1196 int nr_iovecs;
1197 struct dma_page_list page_list[0];
1198};
1199
1200struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
1201void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
1202
1203dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
1204 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
1205dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
1206 struct dma_pinned_list *pinned_list, struct page *page,
1207 unsigned int offset, size_t len);
1208
1209#endif
1210