1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LINUX_DMAENGINE_H
18#define LINUX_DMAENGINE_H
19
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/uio.h>
23#include <linux/bug.h>
24#include <linux/scatterlist.h>
25#include <linux/bitmap.h>
26#include <linux/types.h>
27#include <asm/page.h>
28
29
30
31
32
33
34typedef s32 dma_cookie_t;
35#define DMA_MIN_COOKIE 1
36
37static inline int dma_submit_error(dma_cookie_t cookie)
38{
39 return cookie < 0 ? cookie : 0;
40}
41
42
43
44
45
46
47
48
49enum dma_status {
50 DMA_COMPLETE,
51 DMA_IN_PROGRESS,
52 DMA_PAUSED,
53 DMA_ERROR,
54};
55
56
57
58
59
60
61
62enum dma_transaction_type {
63 DMA_MEMCPY,
64 DMA_XOR,
65 DMA_PQ,
66 DMA_XOR_VAL,
67 DMA_PQ_VAL,
68 DMA_MEMSET,
69 DMA_MEMSET_SG,
70 DMA_INTERRUPT,
71 DMA_SG,
72 DMA_PRIVATE,
73 DMA_ASYNC_TX,
74 DMA_SLAVE,
75 DMA_CYCLIC,
76 DMA_INTERLEAVE,
77
78 DMA_TX_TYPE_END,
79};
80
81
82
83
84
85
86
87
88enum dma_transfer_direction {
89 DMA_MEM_TO_MEM,
90 DMA_MEM_TO_DEV,
91 DMA_DEV_TO_MEM,
92 DMA_DEV_TO_DEV,
93 DMA_TRANS_NONE,
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134struct data_chunk {
135 size_t size;
136 size_t icg;
137 size_t dst_icg;
138 size_t src_icg;
139};
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct dma_interleaved_template {
160 dma_addr_t src_start;
161 dma_addr_t dst_start;
162 enum dma_transfer_direction dir;
163 bool src_inc;
164 bool dst_inc;
165 bool src_sgl;
166 bool dst_sgl;
167 size_t numf;
168 size_t frame_size;
169 struct data_chunk sgl[0];
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190enum dma_ctrl_flags {
191 DMA_PREP_INTERRUPT = (1 << 0),
192 DMA_CTRL_ACK = (1 << 1),
193 DMA_PREP_PQ_DISABLE_P = (1 << 2),
194 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
195 DMA_PREP_CONTINUE = (1 << 4),
196 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6),
198};
199
200
201
202
203enum sum_check_bits {
204 SUM_CHECK_P = 0,
205 SUM_CHECK_Q = 1,
206};
207
208
209
210
211
212
213enum sum_check_flags {
214 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
215 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
216};
217
218
219
220
221
222
223typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
224
225
226
227
228
229
230
231struct dma_chan_percpu {
232
233 unsigned long memcpy_count;
234 unsigned long bytes_transferred;
235};
236
237
238
239
240
241
242struct dma_router {
243 struct device *dev;
244 void (*route_free)(struct device *dev, void *route_data);
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct dma_chan {
263 struct dma_device *device;
264 dma_cookie_t cookie;
265 dma_cookie_t completed_cookie;
266
267
268 int chan_id;
269 struct dma_chan_dev *dev;
270
271 struct list_head device_node;
272 struct dma_chan_percpu __percpu *local;
273 int client_count;
274 int table_count;
275
276
277 struct dma_router *router;
278 void *route_data;
279
280 void *private;
281};
282
283
284
285
286
287
288
289
290struct dma_chan_dev {
291 struct dma_chan *chan;
292 struct device device;
293 int dev_id;
294 atomic_t *idr_ref;
295};
296
297
298
299
300
301enum dma_slave_buswidth {
302 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
303 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
304 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
305 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
306 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
307 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
308 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
309 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
310 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358struct dma_slave_config {
359 enum dma_transfer_direction direction;
360 phys_addr_t src_addr;
361 phys_addr_t dst_addr;
362 enum dma_slave_buswidth src_addr_width;
363 enum dma_slave_buswidth dst_addr_width;
364 u32 src_maxburst;
365 u32 dst_maxburst;
366 bool device_fc;
367 unsigned int slave_id;
368};
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390enum dma_residue_granularity {
391 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
392 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
393 DMA_RESIDUE_GRANULARITY_BURST = 2,
394};
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411struct dma_slave_caps {
412 u32 src_addr_widths;
413 u32 dst_addr_widths;
414 u32 directions;
415 u32 max_burst;
416 bool cmd_pause;
417 bool cmd_terminate;
418 enum dma_residue_granularity residue_granularity;
419 bool descriptor_reuse;
420};
421
422static inline const char *dma_chan_name(struct dma_chan *chan)
423{
424 return dev_name(&chan->dev->device);
425}
426
427void dma_chan_cleanup(struct kref *kref);
428
429
430
431
432
433
434
435
436
437
438
439
440typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
441
442typedef void (*dma_async_tx_callback)(void *dma_async_param);
443
444struct dmaengine_unmap_data {
445 u8 map_cnt;
446 u8 to_cnt;
447 u8 from_cnt;
448 u8 bidi_cnt;
449 struct device *dev;
450 struct kref kref;
451 size_t len;
452 dma_addr_t addr[0];
453};
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473struct dma_async_tx_descriptor {
474 dma_cookie_t cookie;
475 enum dma_ctrl_flags flags;
476 dma_addr_t phys;
477 struct dma_chan *chan;
478 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
479 int (*desc_free)(struct dma_async_tx_descriptor *tx);
480 dma_async_tx_callback callback;
481 void *callback_param;
482 struct dmaengine_unmap_data *unmap;
483#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
484 struct dma_async_tx_descriptor *next;
485 struct dma_async_tx_descriptor *parent;
486 spinlock_t lock;
487#endif
488};
489
490#ifdef CONFIG_DMA_ENGINE
491static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
492 struct dmaengine_unmap_data *unmap)
493{
494 kref_get(&unmap->kref);
495 tx->unmap = unmap;
496}
497
498struct dmaengine_unmap_data *
499dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
500void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
501#else
502static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
503 struct dmaengine_unmap_data *unmap)
504{
505}
506static inline struct dmaengine_unmap_data *
507dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
508{
509 return NULL;
510}
511static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
512{
513}
514#endif
515
516static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
517{
518 if (tx->unmap) {
519 dmaengine_unmap_put(tx->unmap);
520 tx->unmap = NULL;
521 }
522}
523
524#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
525static inline void txd_lock(struct dma_async_tx_descriptor *txd)
526{
527}
528static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
529{
530}
531static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
532{
533 BUG();
534}
535static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
536{
537}
538static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
539{
540}
541static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
542{
543 return NULL;
544}
545static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
546{
547 return NULL;
548}
549
550#else
551static inline void txd_lock(struct dma_async_tx_descriptor *txd)
552{
553 spin_lock_bh(&txd->lock);
554}
555static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
556{
557 spin_unlock_bh(&txd->lock);
558}
559static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
560{
561 txd->next = next;
562 next->parent = txd;
563}
564static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
565{
566 txd->parent = NULL;
567}
568static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
569{
570 txd->next = NULL;
571}
572static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
573{
574 return txd->parent;
575}
576static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
577{
578 return txd->next;
579}
580#endif
581
582
583
584
585
586
587
588
589
590
591struct dma_tx_state {
592 dma_cookie_t last;
593 dma_cookie_t used;
594 u32 residue;
595};
596
597
598
599
600
601enum dmaengine_alignment {
602 DMAENGINE_ALIGN_1_BYTE = 0,
603 DMAENGINE_ALIGN_2_BYTES = 1,
604 DMAENGINE_ALIGN_4_BYTES = 2,
605 DMAENGINE_ALIGN_8_BYTES = 3,
606 DMAENGINE_ALIGN_16_BYTES = 4,
607 DMAENGINE_ALIGN_32_BYTES = 5,
608 DMAENGINE_ALIGN_64_BYTES = 6,
609};
610
611
612
613
614
615
616
617
618struct dma_slave_map {
619 const char *devname;
620 const char *slave;
621 void *param;
622};
623
624
625
626
627
628
629
630
631struct dma_filter {
632 dma_filter_fn fn;
633 int mapcnt;
634 const struct dma_slave_map *map;
635};
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696struct dma_device {
697
698 unsigned int chancnt;
699 unsigned int privatecnt;
700 struct list_head channels;
701 struct list_head global_node;
702 struct dma_filter filter;
703 dma_cap_mask_t cap_mask;
704 unsigned short max_xor;
705 unsigned short max_pq;
706 enum dmaengine_alignment copy_align;
707 enum dmaengine_alignment xor_align;
708 enum dmaengine_alignment pq_align;
709 enum dmaengine_alignment fill_align;
710 #define DMA_HAS_PQ_CONTINUE (1 << 15)
711
712 int dev_id;
713 struct device *dev;
714
715 u32 src_addr_widths;
716 u32 dst_addr_widths;
717 u32 directions;
718 u32 max_burst;
719 bool descriptor_reuse;
720 enum dma_residue_granularity residue_granularity;
721
722 int (*device_alloc_chan_resources)(struct dma_chan *chan);
723 void (*device_free_chan_resources)(struct dma_chan *chan);
724
725 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
726 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
727 size_t len, unsigned long flags);
728 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
729 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
730 unsigned int src_cnt, size_t len, unsigned long flags);
731 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
732 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
733 size_t len, enum sum_check_flags *result, unsigned long flags);
734 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
735 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
736 unsigned int src_cnt, const unsigned char *scf,
737 size_t len, unsigned long flags);
738 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
739 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
740 unsigned int src_cnt, const unsigned char *scf, size_t len,
741 enum sum_check_flags *pqres, unsigned long flags);
742 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
743 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
744 unsigned long flags);
745 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
746 struct dma_chan *chan, struct scatterlist *sg,
747 unsigned int nents, int value, unsigned long flags);
748 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
749 struct dma_chan *chan, unsigned long flags);
750 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
751 struct dma_chan *chan,
752 struct scatterlist *dst_sg, unsigned int dst_nents,
753 struct scatterlist *src_sg, unsigned int src_nents,
754 unsigned long flags);
755
756 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
757 struct dma_chan *chan, struct scatterlist *sgl,
758 unsigned int sg_len, enum dma_transfer_direction direction,
759 unsigned long flags, void *context);
760 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
761 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
762 size_t period_len, enum dma_transfer_direction direction,
763 unsigned long flags);
764 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
765 struct dma_chan *chan, struct dma_interleaved_template *xt,
766 unsigned long flags);
767 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
768 struct dma_chan *chan, dma_addr_t dst, u64 data,
769 unsigned long flags);
770
771 int (*device_config)(struct dma_chan *chan,
772 struct dma_slave_config *config);
773 int (*device_pause)(struct dma_chan *chan);
774 int (*device_resume)(struct dma_chan *chan);
775 int (*device_terminate_all)(struct dma_chan *chan);
776 void (*device_synchronize)(struct dma_chan *chan);
777
778 enum dma_status (*device_tx_status)(struct dma_chan *chan,
779 dma_cookie_t cookie,
780 struct dma_tx_state *txstate);
781 void (*device_issue_pending)(struct dma_chan *chan);
782};
783
784static inline int dmaengine_slave_config(struct dma_chan *chan,
785 struct dma_slave_config *config)
786{
787 if (chan->device->device_config)
788 return chan->device->device_config(chan, config);
789
790 return -ENOSYS;
791}
792
793static inline bool is_slave_direction(enum dma_transfer_direction direction)
794{
795 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
796}
797
798static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
799 struct dma_chan *chan, dma_addr_t buf, size_t len,
800 enum dma_transfer_direction dir, unsigned long flags)
801{
802 struct scatterlist sg;
803 sg_init_table(&sg, 1);
804 sg_dma_address(&sg) = buf;
805 sg_dma_len(&sg) = len;
806
807 return chan->device->device_prep_slave_sg(chan, &sg, 1,
808 dir, flags, NULL);
809}
810
811static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
812 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
813 enum dma_transfer_direction dir, unsigned long flags)
814{
815 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
816 dir, flags, NULL);
817}
818
819#ifdef CONFIG_RAPIDIO_DMA_ENGINE
820struct rio_dma_ext;
821static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
822 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
823 enum dma_transfer_direction dir, unsigned long flags,
824 struct rio_dma_ext *rio_ext)
825{
826 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
827 dir, flags, rio_ext);
828}
829#endif
830
831static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
832 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
833 size_t period_len, enum dma_transfer_direction dir,
834 unsigned long flags)
835{
836 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
837 period_len, dir, flags);
838}
839
840static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
841 struct dma_chan *chan, struct dma_interleaved_template *xt,
842 unsigned long flags)
843{
844 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
845}
846
847static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
848 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
849 unsigned long flags)
850{
851 if (!chan || !chan->device)
852 return NULL;
853
854 return chan->device->device_prep_dma_memset(chan, dest, value,
855 len, flags);
856}
857
858static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
859 struct dma_chan *chan,
860 struct scatterlist *dst_sg, unsigned int dst_nents,
861 struct scatterlist *src_sg, unsigned int src_nents,
862 unsigned long flags)
863{
864 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
865 src_sg, src_nents, flags);
866}
867
868
869
870
871
872
873
874
875static inline int dmaengine_terminate_all(struct dma_chan *chan)
876{
877 if (chan->device->device_terminate_all)
878 return chan->device->device_terminate_all(chan);
879
880 return -ENOSYS;
881}
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904static inline int dmaengine_terminate_async(struct dma_chan *chan)
905{
906 if (chan->device->device_terminate_all)
907 return chan->device->device_terminate_all(chan);
908
909 return -EINVAL;
910}
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930static inline void dmaengine_synchronize(struct dma_chan *chan)
931{
932 might_sleep();
933
934 if (chan->device->device_synchronize)
935 chan->device->device_synchronize(chan);
936}
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952static inline int dmaengine_terminate_sync(struct dma_chan *chan)
953{
954 int ret;
955
956 ret = dmaengine_terminate_async(chan);
957 if (ret)
958 return ret;
959
960 dmaengine_synchronize(chan);
961
962 return 0;
963}
964
965static inline int dmaengine_pause(struct dma_chan *chan)
966{
967 if (chan->device->device_pause)
968 return chan->device->device_pause(chan);
969
970 return -ENOSYS;
971}
972
973static inline int dmaengine_resume(struct dma_chan *chan)
974{
975 if (chan->device->device_resume)
976 return chan->device->device_resume(chan);
977
978 return -ENOSYS;
979}
980
981static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
982 dma_cookie_t cookie, struct dma_tx_state *state)
983{
984 return chan->device->device_tx_status(chan, cookie, state);
985}
986
987static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
988{
989 return desc->tx_submit(desc);
990}
991
992static inline bool dmaengine_check_align(enum dmaengine_alignment align,
993 size_t off1, size_t off2, size_t len)
994{
995 size_t mask;
996
997 if (!align)
998 return true;
999 mask = (1 << align) - 1;
1000 if (mask & (off1 | off2 | len))
1001 return false;
1002 return true;
1003}
1004
1005static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1006 size_t off2, size_t len)
1007{
1008 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1009}
1010
1011static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1012 size_t off2, size_t len)
1013{
1014 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1015}
1016
1017static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1018 size_t off2, size_t len)
1019{
1020 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1021}
1022
1023static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1024 size_t off2, size_t len)
1025{
1026 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1027}
1028
1029static inline void
1030dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1031{
1032 dma->max_pq = maxpq;
1033 if (has_pq_continue)
1034 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1035}
1036
1037static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1038{
1039 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1040}
1041
1042static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1043{
1044 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1045
1046 return (flags & mask) == mask;
1047}
1048
1049static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1050{
1051 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1052}
1053
1054static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1055{
1056 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1073{
1074 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1075 return dma_dev_to_maxpq(dma);
1076 else if (dmaf_p_disabled_continue(flags))
1077 return dma_dev_to_maxpq(dma) - 1;
1078 else if (dmaf_continue(flags))
1079 return dma_dev_to_maxpq(dma) - 3;
1080 BUG();
1081}
1082
1083static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1084 size_t dir_icg)
1085{
1086 if (inc) {
1087 if (dir_icg)
1088 return dir_icg;
1089 else if (sgl)
1090 return icg;
1091 }
1092
1093 return 0;
1094}
1095
1096static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1097 struct data_chunk *chunk)
1098{
1099 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1100 chunk->icg, chunk->dst_icg);
1101}
1102
1103static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1104 struct data_chunk *chunk)
1105{
1106 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1107 chunk->icg, chunk->src_icg);
1108}
1109
1110
1111
1112#ifdef CONFIG_DMA_ENGINE
1113void dmaengine_get(void);
1114void dmaengine_put(void);
1115#else
1116static inline void dmaengine_get(void)
1117{
1118}
1119static inline void dmaengine_put(void)
1120{
1121}
1122#endif
1123
1124#ifdef CONFIG_ASYNC_TX_DMA
1125#define async_dmaengine_get() dmaengine_get()
1126#define async_dmaengine_put() dmaengine_put()
1127#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1128#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1129#else
1130#define async_dma_find_channel(type) dma_find_channel(type)
1131#endif
1132#else
1133static inline void async_dmaengine_get(void)
1134{
1135}
1136static inline void async_dmaengine_put(void)
1137{
1138}
1139static inline struct dma_chan *
1140async_dma_find_channel(enum dma_transaction_type type)
1141{
1142 return NULL;
1143}
1144#endif
1145void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1146 struct dma_chan *chan);
1147
1148static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1149{
1150 tx->flags |= DMA_CTRL_ACK;
1151}
1152
1153static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1154{
1155 tx->flags &= ~DMA_CTRL_ACK;
1156}
1157
1158static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1159{
1160 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1161}
1162
1163#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1164static inline void
1165__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1166{
1167 set_bit(tx_type, dstp->bits);
1168}
1169
1170#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1171static inline void
1172__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1173{
1174 clear_bit(tx_type, dstp->bits);
1175}
1176
1177#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1178static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1179{
1180 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1181}
1182
1183#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1184static inline int
1185__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1186{
1187 return test_bit(tx_type, srcp->bits);
1188}
1189
1190#define for_each_dma_cap_mask(cap, mask) \
1191 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1192
1193
1194
1195
1196
1197
1198
1199
1200static inline void dma_async_issue_pending(struct dma_chan *chan)
1201{
1202 chan->device->device_issue_pending(chan);
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1217 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1218{
1219 struct dma_tx_state state;
1220 enum dma_status status;
1221
1222 status = chan->device->device_tx_status(chan, cookie, &state);
1223 if (last)
1224 *last = state.last;
1225 if (used)
1226 *used = state.used;
1227 return status;
1228}
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1240 dma_cookie_t last_complete, dma_cookie_t last_used)
1241{
1242 if (last_complete <= last_used) {
1243 if ((cookie <= last_complete) || (cookie > last_used))
1244 return DMA_COMPLETE;
1245 } else {
1246 if ((cookie <= last_complete) && (cookie > last_used))
1247 return DMA_COMPLETE;
1248 }
1249 return DMA_IN_PROGRESS;
1250}
1251
1252static inline void
1253dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1254{
1255 if (st) {
1256 st->last = last;
1257 st->used = used;
1258 st->residue = residue;
1259 }
1260}
1261
1262#ifdef CONFIG_DMA_ENGINE
1263struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1264enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1265enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1266void dma_issue_pending_all(void);
1267struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1268 dma_filter_fn fn, void *fn_param);
1269struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1270
1271struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1272struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1273
1274void dma_release_channel(struct dma_chan *chan);
1275int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1276#else
1277static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1278{
1279 return NULL;
1280}
1281static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1282{
1283 return DMA_COMPLETE;
1284}
1285static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1286{
1287 return DMA_COMPLETE;
1288}
1289static inline void dma_issue_pending_all(void)
1290{
1291}
1292static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1293 dma_filter_fn fn, void *fn_param)
1294{
1295 return NULL;
1296}
1297static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1298 const char *name)
1299{
1300 return NULL;
1301}
1302static inline struct dma_chan *dma_request_chan(struct device *dev,
1303 const char *name)
1304{
1305 return ERR_PTR(-ENODEV);
1306}
1307static inline struct dma_chan *dma_request_chan_by_mask(
1308 const dma_cap_mask_t *mask)
1309{
1310 return ERR_PTR(-ENODEV);
1311}
1312static inline void dma_release_channel(struct dma_chan *chan)
1313{
1314}
1315static inline int dma_get_slave_caps(struct dma_chan *chan,
1316 struct dma_slave_caps *caps)
1317{
1318 return -ENXIO;
1319}
1320#endif
1321
1322#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1323
1324static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1325{
1326 struct dma_slave_caps caps;
1327
1328 dma_get_slave_caps(tx->chan, &caps);
1329
1330 if (caps.descriptor_reuse) {
1331 tx->flags |= DMA_CTRL_REUSE;
1332 return 0;
1333 } else {
1334 return -EPERM;
1335 }
1336}
1337
1338static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1339{
1340 tx->flags &= ~DMA_CTRL_REUSE;
1341}
1342
1343static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1344{
1345 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1346}
1347
1348static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1349{
1350
1351 if (dmaengine_desc_test_reuse(desc))
1352 return desc->desc_free(desc);
1353 else
1354 return -EPERM;
1355}
1356
1357
1358
1359int dma_async_device_register(struct dma_device *device);
1360void dma_async_device_unregister(struct dma_device *device);
1361void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1362struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1363struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1364#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1365#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1366 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1367
1368static inline struct dma_chan
1369*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1370 dma_filter_fn fn, void *fn_param,
1371 struct device *dev, const char *name)
1372{
1373 struct dma_chan *chan;
1374
1375 chan = dma_request_slave_channel(dev, name);
1376 if (chan)
1377 return chan;
1378
1379 if (!fn || !fn_param)
1380 return NULL;
1381
1382 return __dma_request_channel(mask, fn, fn_param);
1383}
1384#endif
1385