1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LINUX_DMAENGINE_H
18#define LINUX_DMAENGINE_H
19
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/uio.h>
23#include <linux/bug.h>
24#include <linux/scatterlist.h>
25#include <linux/bitmap.h>
26#include <linux/types.h>
27#include <asm/page.h>
28
29
30
31
32
33
34typedef s32 dma_cookie_t;
35#define DMA_MIN_COOKIE 1
36
37static inline int dma_submit_error(dma_cookie_t cookie)
38{
39 return cookie < 0 ? cookie : 0;
40}
41
42
43
44
45
46
47
48
49enum dma_status {
50 DMA_COMPLETE,
51 DMA_IN_PROGRESS,
52 DMA_PAUSED,
53 DMA_ERROR,
54};
55
56
57
58
59
60
61
62enum dma_transaction_type {
63 DMA_MEMCPY,
64 DMA_XOR,
65 DMA_PQ,
66 DMA_XOR_VAL,
67 DMA_PQ_VAL,
68 DMA_MEMSET,
69 DMA_MEMSET_SG,
70 DMA_INTERRUPT,
71 DMA_SG,
72 DMA_PRIVATE,
73 DMA_ASYNC_TX,
74 DMA_SLAVE,
75 DMA_CYCLIC,
76 DMA_INTERLEAVE,
77
78 DMA_TX_TYPE_END,
79};
80
81
82
83
84
85
86
87
88enum dma_transfer_direction {
89 DMA_MEM_TO_MEM,
90 DMA_MEM_TO_DEV,
91 DMA_DEV_TO_MEM,
92 DMA_DEV_TO_DEV,
93 DMA_TRANS_NONE,
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134struct data_chunk {
135 size_t size;
136 size_t icg;
137 size_t dst_icg;
138 size_t src_icg;
139};
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct dma_interleaved_template {
160 dma_addr_t src_start;
161 dma_addr_t dst_start;
162 enum dma_transfer_direction dir;
163 bool src_inc;
164 bool dst_inc;
165 bool src_sgl;
166 bool dst_sgl;
167 size_t numf;
168 size_t frame_size;
169 struct data_chunk sgl[0];
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190enum dma_ctrl_flags {
191 DMA_PREP_INTERRUPT = (1 << 0),
192 DMA_CTRL_ACK = (1 << 1),
193 DMA_PREP_PQ_DISABLE_P = (1 << 2),
194 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
195 DMA_PREP_CONTINUE = (1 << 4),
196 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6),
198};
199
200
201
202
203enum sum_check_bits {
204 SUM_CHECK_P = 0,
205 SUM_CHECK_Q = 1,
206};
207
208
209
210
211
212
213enum sum_check_flags {
214 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
215 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
216};
217
218
219
220
221
222
223typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
224
225
226
227
228
229
230
231struct dma_chan_percpu {
232
233 unsigned long memcpy_count;
234 unsigned long bytes_transferred;
235};
236
237
238
239
240
241
242struct dma_router {
243 struct device *dev;
244 void (*route_free)(struct device *dev, void *route_data);
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct dma_chan {
263 struct dma_device *device;
264 dma_cookie_t cookie;
265 dma_cookie_t completed_cookie;
266
267
268 int chan_id;
269 struct dma_chan_dev *dev;
270
271 struct list_head device_node;
272 struct dma_chan_percpu __percpu *local;
273 int client_count;
274 int table_count;
275
276
277 struct dma_router *router;
278 void *route_data;
279
280 void *private;
281};
282
283
284
285
286
287
288
289
290struct dma_chan_dev {
291 struct dma_chan *chan;
292 struct device device;
293 int dev_id;
294 atomic_t *idr_ref;
295};
296
297
298
299
300
301enum dma_slave_buswidth {
302 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
303 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
304 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
305 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
306 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
307 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
308 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
309 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
310 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358struct dma_slave_config {
359 enum dma_transfer_direction direction;
360 dma_addr_t src_addr;
361 dma_addr_t dst_addr;
362 enum dma_slave_buswidth src_addr_width;
363 enum dma_slave_buswidth dst_addr_width;
364 u32 src_maxburst;
365 u32 dst_maxburst;
366 bool device_fc;
367 unsigned int slave_id;
368};
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390enum dma_residue_granularity {
391 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
392 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
393 DMA_RESIDUE_GRANULARITY_BURST = 2,
394};
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410struct dma_slave_caps {
411 u32 src_addr_widths;
412 u32 dst_addr_widths;
413 u32 directions;
414 bool cmd_pause;
415 bool cmd_terminate;
416 enum dma_residue_granularity residue_granularity;
417 bool descriptor_reuse;
418};
419
420static inline const char *dma_chan_name(struct dma_chan *chan)
421{
422 return dev_name(&chan->dev->device);
423}
424
425void dma_chan_cleanup(struct kref *kref);
426
427
428
429
430
431
432
433
434
435
436
437
438typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
439
440typedef void (*dma_async_tx_callback)(void *dma_async_param);
441
442struct dmaengine_unmap_data {
443 u8 map_cnt;
444 u8 to_cnt;
445 u8 from_cnt;
446 u8 bidi_cnt;
447 struct device *dev;
448 struct kref kref;
449 size_t len;
450 dma_addr_t addr[0];
451};
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471struct dma_async_tx_descriptor {
472 dma_cookie_t cookie;
473 enum dma_ctrl_flags flags;
474 dma_addr_t phys;
475 struct dma_chan *chan;
476 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
477 int (*desc_free)(struct dma_async_tx_descriptor *tx);
478 dma_async_tx_callback callback;
479 void *callback_param;
480 struct dmaengine_unmap_data *unmap;
481#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
482 struct dma_async_tx_descriptor *next;
483 struct dma_async_tx_descriptor *parent;
484 spinlock_t lock;
485#endif
486};
487
488#ifdef CONFIG_DMA_ENGINE
489static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
490 struct dmaengine_unmap_data *unmap)
491{
492 kref_get(&unmap->kref);
493 tx->unmap = unmap;
494}
495
496struct dmaengine_unmap_data *
497dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
498void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
499#else
500static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
501 struct dmaengine_unmap_data *unmap)
502{
503}
504static inline struct dmaengine_unmap_data *
505dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
506{
507 return NULL;
508}
509static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
510{
511}
512#endif
513
514static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
515{
516 if (tx->unmap) {
517 dmaengine_unmap_put(tx->unmap);
518 tx->unmap = NULL;
519 }
520}
521
522#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
523static inline void txd_lock(struct dma_async_tx_descriptor *txd)
524{
525}
526static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
527{
528}
529static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
530{
531 BUG();
532}
533static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
534{
535}
536static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
537{
538}
539static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
540{
541 return NULL;
542}
543static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
544{
545 return NULL;
546}
547
548#else
549static inline void txd_lock(struct dma_async_tx_descriptor *txd)
550{
551 spin_lock_bh(&txd->lock);
552}
553static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
554{
555 spin_unlock_bh(&txd->lock);
556}
557static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
558{
559 txd->next = next;
560 next->parent = txd;
561}
562static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
563{
564 txd->parent = NULL;
565}
566static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
567{
568 txd->next = NULL;
569}
570static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
571{
572 return txd->parent;
573}
574static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
575{
576 return txd->next;
577}
578#endif
579
580
581
582
583
584
585
586
587
588
589struct dma_tx_state {
590 dma_cookie_t last;
591 dma_cookie_t used;
592 u32 residue;
593};
594
595
596
597
598
599enum dmaengine_alignment {
600 DMAENGINE_ALIGN_1_BYTE = 0,
601 DMAENGINE_ALIGN_2_BYTES = 1,
602 DMAENGINE_ALIGN_4_BYTES = 2,
603 DMAENGINE_ALIGN_8_BYTES = 3,
604 DMAENGINE_ALIGN_16_BYTES = 4,
605 DMAENGINE_ALIGN_32_BYTES = 5,
606 DMAENGINE_ALIGN_64_BYTES = 6,
607};
608
609
610
611
612
613
614
615
616struct dma_slave_map {
617 const char *devname;
618 const char *slave;
619 void *param;
620};
621
622
623
624
625
626
627
628
629struct dma_filter {
630 dma_filter_fn fn;
631 int mapcnt;
632 const struct dma_slave_map *map;
633};
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693struct dma_device {
694
695 unsigned int chancnt;
696 unsigned int privatecnt;
697 struct list_head channels;
698 struct list_head global_node;
699 struct dma_filter filter;
700 dma_cap_mask_t cap_mask;
701 unsigned short max_xor;
702 unsigned short max_pq;
703 enum dmaengine_alignment copy_align;
704 enum dmaengine_alignment xor_align;
705 enum dmaengine_alignment pq_align;
706 enum dmaengine_alignment fill_align;
707 #define DMA_HAS_PQ_CONTINUE (1 << 15)
708
709 int dev_id;
710 struct device *dev;
711
712 u32 src_addr_widths;
713 u32 dst_addr_widths;
714 u32 directions;
715 bool descriptor_reuse;
716 enum dma_residue_granularity residue_granularity;
717
718 int (*device_alloc_chan_resources)(struct dma_chan *chan);
719 void (*device_free_chan_resources)(struct dma_chan *chan);
720
721 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
722 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
723 size_t len, unsigned long flags);
724 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
725 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
726 unsigned int src_cnt, size_t len, unsigned long flags);
727 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
728 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
729 size_t len, enum sum_check_flags *result, unsigned long flags);
730 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
731 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
732 unsigned int src_cnt, const unsigned char *scf,
733 size_t len, unsigned long flags);
734 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
735 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
736 unsigned int src_cnt, const unsigned char *scf, size_t len,
737 enum sum_check_flags *pqres, unsigned long flags);
738 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
739 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
740 unsigned long flags);
741 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
742 struct dma_chan *chan, struct scatterlist *sg,
743 unsigned int nents, int value, unsigned long flags);
744 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
745 struct dma_chan *chan, unsigned long flags);
746 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
747 struct dma_chan *chan,
748 struct scatterlist *dst_sg, unsigned int dst_nents,
749 struct scatterlist *src_sg, unsigned int src_nents,
750 unsigned long flags);
751
752 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
753 struct dma_chan *chan, struct scatterlist *sgl,
754 unsigned int sg_len, enum dma_transfer_direction direction,
755 unsigned long flags, void *context);
756 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
757 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
758 size_t period_len, enum dma_transfer_direction direction,
759 unsigned long flags);
760 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
761 struct dma_chan *chan, struct dma_interleaved_template *xt,
762 unsigned long flags);
763 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
764 struct dma_chan *chan, dma_addr_t dst, u64 data,
765 unsigned long flags);
766
767 int (*device_config)(struct dma_chan *chan,
768 struct dma_slave_config *config);
769 int (*device_pause)(struct dma_chan *chan);
770 int (*device_resume)(struct dma_chan *chan);
771 int (*device_terminate_all)(struct dma_chan *chan);
772 void (*device_synchronize)(struct dma_chan *chan);
773
774 enum dma_status (*device_tx_status)(struct dma_chan *chan,
775 dma_cookie_t cookie,
776 struct dma_tx_state *txstate);
777 void (*device_issue_pending)(struct dma_chan *chan);
778};
779
780static inline int dmaengine_slave_config(struct dma_chan *chan,
781 struct dma_slave_config *config)
782{
783 if (chan->device->device_config)
784 return chan->device->device_config(chan, config);
785
786 return -ENOSYS;
787}
788
789static inline bool is_slave_direction(enum dma_transfer_direction direction)
790{
791 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
792}
793
794static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
795 struct dma_chan *chan, dma_addr_t buf, size_t len,
796 enum dma_transfer_direction dir, unsigned long flags)
797{
798 struct scatterlist sg;
799 sg_init_table(&sg, 1);
800 sg_dma_address(&sg) = buf;
801 sg_dma_len(&sg) = len;
802
803 return chan->device->device_prep_slave_sg(chan, &sg, 1,
804 dir, flags, NULL);
805}
806
807static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
808 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
809 enum dma_transfer_direction dir, unsigned long flags)
810{
811 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
812 dir, flags, NULL);
813}
814
815#ifdef CONFIG_RAPIDIO_DMA_ENGINE
816struct rio_dma_ext;
817static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
818 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
819 enum dma_transfer_direction dir, unsigned long flags,
820 struct rio_dma_ext *rio_ext)
821{
822 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
823 dir, flags, rio_ext);
824}
825#endif
826
827static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
828 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
829 size_t period_len, enum dma_transfer_direction dir,
830 unsigned long flags)
831{
832 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
833 period_len, dir, flags);
834}
835
836static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
837 struct dma_chan *chan, struct dma_interleaved_template *xt,
838 unsigned long flags)
839{
840 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
841}
842
843static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
844 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
845 unsigned long flags)
846{
847 if (!chan || !chan->device)
848 return NULL;
849
850 return chan->device->device_prep_dma_memset(chan, dest, value,
851 len, flags);
852}
853
854static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
855 struct dma_chan *chan,
856 struct scatterlist *dst_sg, unsigned int dst_nents,
857 struct scatterlist *src_sg, unsigned int src_nents,
858 unsigned long flags)
859{
860 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
861 src_sg, src_nents, flags);
862}
863
864
865
866
867
868
869
870
871static inline int dmaengine_terminate_all(struct dma_chan *chan)
872{
873 if (chan->device->device_terminate_all)
874 return chan->device->device_terminate_all(chan);
875
876 return -ENOSYS;
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900static inline int dmaengine_terminate_async(struct dma_chan *chan)
901{
902 if (chan->device->device_terminate_all)
903 return chan->device->device_terminate_all(chan);
904
905 return -EINVAL;
906}
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926static inline void dmaengine_synchronize(struct dma_chan *chan)
927{
928 might_sleep();
929
930 if (chan->device->device_synchronize)
931 chan->device->device_synchronize(chan);
932}
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948static inline int dmaengine_terminate_sync(struct dma_chan *chan)
949{
950 int ret;
951
952 ret = dmaengine_terminate_async(chan);
953 if (ret)
954 return ret;
955
956 dmaengine_synchronize(chan);
957
958 return 0;
959}
960
961static inline int dmaengine_pause(struct dma_chan *chan)
962{
963 if (chan->device->device_pause)
964 return chan->device->device_pause(chan);
965
966 return -ENOSYS;
967}
968
969static inline int dmaengine_resume(struct dma_chan *chan)
970{
971 if (chan->device->device_resume)
972 return chan->device->device_resume(chan);
973
974 return -ENOSYS;
975}
976
977static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
978 dma_cookie_t cookie, struct dma_tx_state *state)
979{
980 return chan->device->device_tx_status(chan, cookie, state);
981}
982
983static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
984{
985 return desc->tx_submit(desc);
986}
987
988static inline bool dmaengine_check_align(enum dmaengine_alignment align,
989 size_t off1, size_t off2, size_t len)
990{
991 size_t mask;
992
993 if (!align)
994 return true;
995 mask = (1 << align) - 1;
996 if (mask & (off1 | off2 | len))
997 return false;
998 return true;
999}
1000
1001static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1002 size_t off2, size_t len)
1003{
1004 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1005}
1006
1007static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1008 size_t off2, size_t len)
1009{
1010 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1011}
1012
1013static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1014 size_t off2, size_t len)
1015{
1016 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1017}
1018
1019static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1020 size_t off2, size_t len)
1021{
1022 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1023}
1024
1025static inline void
1026dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1027{
1028 dma->max_pq = maxpq;
1029 if (has_pq_continue)
1030 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1031}
1032
1033static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1034{
1035 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1036}
1037
1038static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1039{
1040 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1041
1042 return (flags & mask) == mask;
1043}
1044
1045static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1046{
1047 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1048}
1049
1050static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1051{
1052 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1069{
1070 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1071 return dma_dev_to_maxpq(dma);
1072 else if (dmaf_p_disabled_continue(flags))
1073 return dma_dev_to_maxpq(dma) - 1;
1074 else if (dmaf_continue(flags))
1075 return dma_dev_to_maxpq(dma) - 3;
1076 BUG();
1077}
1078
1079static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1080 size_t dir_icg)
1081{
1082 if (inc) {
1083 if (dir_icg)
1084 return dir_icg;
1085 else if (sgl)
1086 return icg;
1087 }
1088
1089 return 0;
1090}
1091
1092static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1093 struct data_chunk *chunk)
1094{
1095 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1096 chunk->icg, chunk->dst_icg);
1097}
1098
1099static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1100 struct data_chunk *chunk)
1101{
1102 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1103 chunk->icg, chunk->src_icg);
1104}
1105
1106
1107
1108#ifdef CONFIG_DMA_ENGINE
1109void dmaengine_get(void);
1110void dmaengine_put(void);
1111#else
1112static inline void dmaengine_get(void)
1113{
1114}
1115static inline void dmaengine_put(void)
1116{
1117}
1118#endif
1119
1120#ifdef CONFIG_ASYNC_TX_DMA
1121#define async_dmaengine_get() dmaengine_get()
1122#define async_dmaengine_put() dmaengine_put()
1123#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1124#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1125#else
1126#define async_dma_find_channel(type) dma_find_channel(type)
1127#endif
1128#else
1129static inline void async_dmaengine_get(void)
1130{
1131}
1132static inline void async_dmaengine_put(void)
1133{
1134}
1135static inline struct dma_chan *
1136async_dma_find_channel(enum dma_transaction_type type)
1137{
1138 return NULL;
1139}
1140#endif
1141void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1142 struct dma_chan *chan);
1143
1144static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1145{
1146 tx->flags |= DMA_CTRL_ACK;
1147}
1148
1149static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1150{
1151 tx->flags &= ~DMA_CTRL_ACK;
1152}
1153
1154static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1155{
1156 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1157}
1158
1159#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1160static inline void
1161__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1162{
1163 set_bit(tx_type, dstp->bits);
1164}
1165
1166#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1167static inline void
1168__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1169{
1170 clear_bit(tx_type, dstp->bits);
1171}
1172
1173#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1174static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1175{
1176 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1177}
1178
1179#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1180static inline int
1181__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1182{
1183 return test_bit(tx_type, srcp->bits);
1184}
1185
1186#define for_each_dma_cap_mask(cap, mask) \
1187 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1188
1189
1190
1191
1192
1193
1194
1195
1196static inline void dma_async_issue_pending(struct dma_chan *chan)
1197{
1198 chan->device->device_issue_pending(chan);
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1213 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1214{
1215 struct dma_tx_state state;
1216 enum dma_status status;
1217
1218 status = chan->device->device_tx_status(chan, cookie, &state);
1219 if (last)
1220 *last = state.last;
1221 if (used)
1222 *used = state.used;
1223 return status;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1236 dma_cookie_t last_complete, dma_cookie_t last_used)
1237{
1238 if (last_complete <= last_used) {
1239 if ((cookie <= last_complete) || (cookie > last_used))
1240 return DMA_COMPLETE;
1241 } else {
1242 if ((cookie <= last_complete) && (cookie > last_used))
1243 return DMA_COMPLETE;
1244 }
1245 return DMA_IN_PROGRESS;
1246}
1247
1248static inline void
1249dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1250{
1251 if (st) {
1252 st->last = last;
1253 st->used = used;
1254 st->residue = residue;
1255 }
1256}
1257
1258#ifdef CONFIG_DMA_ENGINE
1259struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1260enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1261enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1262void dma_issue_pending_all(void);
1263struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1264 dma_filter_fn fn, void *fn_param);
1265struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1266
1267struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1268struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1269
1270void dma_release_channel(struct dma_chan *chan);
1271int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1272#else
1273static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1274{
1275 return NULL;
1276}
1277static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1278{
1279 return DMA_COMPLETE;
1280}
1281static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1282{
1283 return DMA_COMPLETE;
1284}
1285static inline void dma_issue_pending_all(void)
1286{
1287}
1288static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1289 dma_filter_fn fn, void *fn_param)
1290{
1291 return NULL;
1292}
1293static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1294 const char *name)
1295{
1296 return NULL;
1297}
1298static inline struct dma_chan *dma_request_chan(struct device *dev,
1299 const char *name)
1300{
1301 return ERR_PTR(-ENODEV);
1302}
1303static inline struct dma_chan *dma_request_chan_by_mask(
1304 const dma_cap_mask_t *mask)
1305{
1306 return ERR_PTR(-ENODEV);
1307}
1308static inline void dma_release_channel(struct dma_chan *chan)
1309{
1310}
1311static inline int dma_get_slave_caps(struct dma_chan *chan,
1312 struct dma_slave_caps *caps)
1313{
1314 return -ENXIO;
1315}
1316#endif
1317
1318#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1319
1320static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1321{
1322 struct dma_slave_caps caps;
1323
1324 dma_get_slave_caps(tx->chan, &caps);
1325
1326 if (caps.descriptor_reuse) {
1327 tx->flags |= DMA_CTRL_REUSE;
1328 return 0;
1329 } else {
1330 return -EPERM;
1331 }
1332}
1333
1334static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1335{
1336 tx->flags &= ~DMA_CTRL_REUSE;
1337}
1338
1339static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1340{
1341 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1342}
1343
1344static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1345{
1346
1347 if (dmaengine_desc_test_reuse(desc))
1348 return desc->desc_free(desc);
1349 else
1350 return -EPERM;
1351}
1352
1353
1354
1355int dma_async_device_register(struct dma_device *device);
1356void dma_async_device_unregister(struct dma_device *device);
1357void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1358struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1359struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1360#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1361#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1362 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1363
1364static inline struct dma_chan
1365*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1366 dma_filter_fn fn, void *fn_param,
1367 struct device *dev, const char *name)
1368{
1369 struct dma_chan *chan;
1370
1371 chan = dma_request_slave_channel(dev, name);
1372 if (chan)
1373 return chan;
1374
1375 if (!fn || !fn_param)
1376 return NULL;
1377
1378 return __dma_request_channel(mask, fn, fn_param);
1379}
1380#endif
1381