1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LINUX_DMAENGINE_H
18#define LINUX_DMAENGINE_H
19
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/uio.h>
23#include <linux/bug.h>
24#include <linux/scatterlist.h>
25#include <linux/bitmap.h>
26#include <linux/types.h>
27#include <asm/page.h>
28
29
30
31
32
33
34typedef s32 dma_cookie_t;
35#define DMA_MIN_COOKIE 1
36
37static inline int dma_submit_error(dma_cookie_t cookie)
38{
39 return cookie < 0 ? cookie : 0;
40}
41
42
43
44
45
46
47
48
49enum dma_status {
50 DMA_COMPLETE,
51 DMA_IN_PROGRESS,
52 DMA_PAUSED,
53 DMA_ERROR,
54};
55
56
57
58
59
60
61
62enum dma_transaction_type {
63 DMA_MEMCPY,
64 DMA_XOR,
65 DMA_PQ,
66 DMA_XOR_VAL,
67 DMA_PQ_VAL,
68 DMA_MEMSET,
69 DMA_MEMSET_SG,
70 DMA_INTERRUPT,
71 DMA_SG,
72 DMA_PRIVATE,
73 DMA_ASYNC_TX,
74 DMA_SLAVE,
75 DMA_CYCLIC,
76 DMA_INTERLEAVE,
77
78 DMA_TX_TYPE_END,
79};
80
81
82
83
84
85
86
87
88enum dma_transfer_direction {
89 DMA_MEM_TO_MEM,
90 DMA_MEM_TO_DEV,
91 DMA_DEV_TO_MEM,
92 DMA_DEV_TO_DEV,
93 DMA_TRANS_NONE,
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134struct data_chunk {
135 size_t size;
136 size_t icg;
137 size_t dst_icg;
138 size_t src_icg;
139};
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct dma_interleaved_template {
160 dma_addr_t src_start;
161 dma_addr_t dst_start;
162 enum dma_transfer_direction dir;
163 bool src_inc;
164 bool dst_inc;
165 bool src_sgl;
166 bool dst_sgl;
167 size_t numf;
168 size_t frame_size;
169 struct data_chunk sgl[0];
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193enum dma_ctrl_flags {
194 DMA_PREP_INTERRUPT = (1 << 0),
195 DMA_CTRL_ACK = (1 << 1),
196 DMA_PREP_PQ_DISABLE_P = (1 << 2),
197 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
198 DMA_PREP_CONTINUE = (1 << 4),
199 DMA_PREP_FENCE = (1 << 5),
200 DMA_CTRL_REUSE = (1 << 6),
201 DMA_PREP_CMD = (1 << 7),
202};
203
204
205
206
207enum sum_check_bits {
208 SUM_CHECK_P = 0,
209 SUM_CHECK_Q = 1,
210};
211
212
213
214
215
216
217enum sum_check_flags {
218 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
219 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
220};
221
222
223
224
225
226
227typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
228
229
230
231
232
233
234
235struct dma_chan_percpu {
236
237 unsigned long memcpy_count;
238 unsigned long bytes_transferred;
239};
240
241
242
243
244
245
246struct dma_router {
247 struct device *dev;
248 void (*route_free)(struct device *dev, void *route_data);
249};
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266struct dma_chan {
267 struct dma_device *device;
268 dma_cookie_t cookie;
269 dma_cookie_t completed_cookie;
270
271
272 int chan_id;
273 struct dma_chan_dev *dev;
274
275 struct list_head device_node;
276 struct dma_chan_percpu __percpu *local;
277 int client_count;
278 int table_count;
279
280
281 struct dma_router *router;
282 void *route_data;
283
284 void *private;
285};
286
287
288
289
290
291
292
293
294struct dma_chan_dev {
295 struct dma_chan *chan;
296 struct device device;
297 int dev_id;
298 atomic_t *idr_ref;
299};
300
301
302
303
304
305enum dma_slave_buswidth {
306 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
307 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
308 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
309 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
310 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
311 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
312 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
313 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
314 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
315};
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368struct dma_slave_config {
369 enum dma_transfer_direction direction;
370 phys_addr_t src_addr;
371 phys_addr_t dst_addr;
372 enum dma_slave_buswidth src_addr_width;
373 enum dma_slave_buswidth dst_addr_width;
374 u32 src_maxburst;
375 u32 dst_maxburst;
376 u32 src_port_window_size;
377 u32 dst_port_window_size;
378 bool device_fc;
379 unsigned int slave_id;
380};
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402enum dma_residue_granularity {
403 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
404 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
405 DMA_RESIDUE_GRANULARITY_BURST = 2,
406};
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423struct dma_slave_caps {
424 u32 src_addr_widths;
425 u32 dst_addr_widths;
426 u32 directions;
427 u32 max_burst;
428 bool cmd_pause;
429 bool cmd_terminate;
430 enum dma_residue_granularity residue_granularity;
431 bool descriptor_reuse;
432};
433
434static inline const char *dma_chan_name(struct dma_chan *chan)
435{
436 return dev_name(&chan->dev->device);
437}
438
439void dma_chan_cleanup(struct kref *kref);
440
441
442
443
444
445
446
447
448
449
450
451
452typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
453
454typedef void (*dma_async_tx_callback)(void *dma_async_param);
455
456enum dmaengine_tx_result {
457 DMA_TRANS_NOERROR = 0,
458 DMA_TRANS_READ_FAILED,
459 DMA_TRANS_WRITE_FAILED,
460 DMA_TRANS_ABORTED,
461};
462
463struct dmaengine_result {
464 enum dmaengine_tx_result result;
465 u32 residue;
466};
467
468typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
469 const struct dmaengine_result *result);
470
471struct dmaengine_unmap_data {
472 u8 map_cnt;
473 u8 to_cnt;
474 u8 from_cnt;
475 u8 bidi_cnt;
476 struct device *dev;
477 struct kref kref;
478 size_t len;
479 dma_addr_t addr[0];
480};
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500struct dma_async_tx_descriptor {
501 dma_cookie_t cookie;
502 enum dma_ctrl_flags flags;
503 dma_addr_t phys;
504 struct dma_chan *chan;
505 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
506 int (*desc_free)(struct dma_async_tx_descriptor *tx);
507 dma_async_tx_callback callback;
508 dma_async_tx_callback_result callback_result;
509 void *callback_param;
510 struct dmaengine_unmap_data *unmap;
511#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
512 struct dma_async_tx_descriptor *next;
513 struct dma_async_tx_descriptor *parent;
514 spinlock_t lock;
515#endif
516};
517
518#ifdef CONFIG_DMA_ENGINE
519static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
520 struct dmaengine_unmap_data *unmap)
521{
522 kref_get(&unmap->kref);
523 tx->unmap = unmap;
524}
525
526struct dmaengine_unmap_data *
527dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
528void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
529#else
530static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
531 struct dmaengine_unmap_data *unmap)
532{
533}
534static inline struct dmaengine_unmap_data *
535dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
536{
537 return NULL;
538}
539static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
540{
541}
542#endif
543
544static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
545{
546 if (tx->unmap) {
547 dmaengine_unmap_put(tx->unmap);
548 tx->unmap = NULL;
549 }
550}
551
552#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
553static inline void txd_lock(struct dma_async_tx_descriptor *txd)
554{
555}
556static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
557{
558}
559static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
560{
561 BUG();
562}
563static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
564{
565}
566static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
567{
568}
569static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
570{
571 return NULL;
572}
573static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
574{
575 return NULL;
576}
577
578#else
579static inline void txd_lock(struct dma_async_tx_descriptor *txd)
580{
581 spin_lock_bh(&txd->lock);
582}
583static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
584{
585 spin_unlock_bh(&txd->lock);
586}
587static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
588{
589 txd->next = next;
590 next->parent = txd;
591}
592static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
593{
594 txd->parent = NULL;
595}
596static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
597{
598 txd->next = NULL;
599}
600static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
601{
602 return txd->parent;
603}
604static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
605{
606 return txd->next;
607}
608#endif
609
610
611
612
613
614
615
616
617
618
619struct dma_tx_state {
620 dma_cookie_t last;
621 dma_cookie_t used;
622 u32 residue;
623};
624
625
626
627
628
629enum dmaengine_alignment {
630 DMAENGINE_ALIGN_1_BYTE = 0,
631 DMAENGINE_ALIGN_2_BYTES = 1,
632 DMAENGINE_ALIGN_4_BYTES = 2,
633 DMAENGINE_ALIGN_8_BYTES = 3,
634 DMAENGINE_ALIGN_16_BYTES = 4,
635 DMAENGINE_ALIGN_32_BYTES = 5,
636 DMAENGINE_ALIGN_64_BYTES = 6,
637};
638
639
640
641
642
643
644
645
646struct dma_slave_map {
647 const char *devname;
648 const char *slave;
649 void *param;
650};
651
652
653
654
655
656
657
658
659struct dma_filter {
660 dma_filter_fn fn;
661 int mapcnt;
662 const struct dma_slave_map *map;
663};
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724struct dma_device {
725
726 unsigned int chancnt;
727 unsigned int privatecnt;
728 struct list_head channels;
729 struct list_head global_node;
730 struct dma_filter filter;
731 dma_cap_mask_t cap_mask;
732 unsigned short max_xor;
733 unsigned short max_pq;
734 enum dmaengine_alignment copy_align;
735 enum dmaengine_alignment xor_align;
736 enum dmaengine_alignment pq_align;
737 enum dmaengine_alignment fill_align;
738 #define DMA_HAS_PQ_CONTINUE (1 << 15)
739
740 int dev_id;
741 struct device *dev;
742
743 u32 src_addr_widths;
744 u32 dst_addr_widths;
745 u32 directions;
746 u32 max_burst;
747 bool descriptor_reuse;
748 enum dma_residue_granularity residue_granularity;
749
750 int (*device_alloc_chan_resources)(struct dma_chan *chan);
751 void (*device_free_chan_resources)(struct dma_chan *chan);
752
753 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
754 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
755 size_t len, unsigned long flags);
756 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
757 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
758 unsigned int src_cnt, size_t len, unsigned long flags);
759 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
760 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
761 size_t len, enum sum_check_flags *result, unsigned long flags);
762 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
763 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
764 unsigned int src_cnt, const unsigned char *scf,
765 size_t len, unsigned long flags);
766 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
767 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
768 unsigned int src_cnt, const unsigned char *scf, size_t len,
769 enum sum_check_flags *pqres, unsigned long flags);
770 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
771 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
772 unsigned long flags);
773 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
774 struct dma_chan *chan, struct scatterlist *sg,
775 unsigned int nents, int value, unsigned long flags);
776 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
777 struct dma_chan *chan, unsigned long flags);
778 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
779 struct dma_chan *chan,
780 struct scatterlist *dst_sg, unsigned int dst_nents,
781 struct scatterlist *src_sg, unsigned int src_nents,
782 unsigned long flags);
783
784 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
785 struct dma_chan *chan, struct scatterlist *sgl,
786 unsigned int sg_len, enum dma_transfer_direction direction,
787 unsigned long flags, void *context);
788 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
789 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
790 size_t period_len, enum dma_transfer_direction direction,
791 unsigned long flags);
792 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
793 struct dma_chan *chan, struct dma_interleaved_template *xt,
794 unsigned long flags);
795 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
796 struct dma_chan *chan, dma_addr_t dst, u64 data,
797 unsigned long flags);
798
799 int (*device_config)(struct dma_chan *chan,
800 struct dma_slave_config *config);
801 int (*device_pause)(struct dma_chan *chan);
802 int (*device_resume)(struct dma_chan *chan);
803 int (*device_terminate_all)(struct dma_chan *chan);
804 void (*device_synchronize)(struct dma_chan *chan);
805
806 enum dma_status (*device_tx_status)(struct dma_chan *chan,
807 dma_cookie_t cookie,
808 struct dma_tx_state *txstate);
809 void (*device_issue_pending)(struct dma_chan *chan);
810};
811
812static inline int dmaengine_slave_config(struct dma_chan *chan,
813 struct dma_slave_config *config)
814{
815 if (chan->device->device_config)
816 return chan->device->device_config(chan, config);
817
818 return -ENOSYS;
819}
820
821static inline bool is_slave_direction(enum dma_transfer_direction direction)
822{
823 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
824}
825
826static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
827 struct dma_chan *chan, dma_addr_t buf, size_t len,
828 enum dma_transfer_direction dir, unsigned long flags)
829{
830 struct scatterlist sg;
831 sg_init_table(&sg, 1);
832 sg_dma_address(&sg) = buf;
833 sg_dma_len(&sg) = len;
834
835 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
836 return NULL;
837
838 return chan->device->device_prep_slave_sg(chan, &sg, 1,
839 dir, flags, NULL);
840}
841
842static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
843 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
844 enum dma_transfer_direction dir, unsigned long flags)
845{
846 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
847 return NULL;
848
849 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
850 dir, flags, NULL);
851}
852
853#ifdef CONFIG_RAPIDIO_DMA_ENGINE
854struct rio_dma_ext;
855static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
856 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
857 enum dma_transfer_direction dir, unsigned long flags,
858 struct rio_dma_ext *rio_ext)
859{
860 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
861 return NULL;
862
863 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
864 dir, flags, rio_ext);
865}
866#endif
867
868static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
869 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
870 size_t period_len, enum dma_transfer_direction dir,
871 unsigned long flags)
872{
873 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
874 return NULL;
875
876 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
877 period_len, dir, flags);
878}
879
880static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
881 struct dma_chan *chan, struct dma_interleaved_template *xt,
882 unsigned long flags)
883{
884 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
885 return NULL;
886
887 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
888}
889
890static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
891 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
892 unsigned long flags)
893{
894 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
895 return NULL;
896
897 return chan->device->device_prep_dma_memset(chan, dest, value,
898 len, flags);
899}
900
901static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
902 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
903 size_t len, unsigned long flags)
904{
905 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
906 return NULL;
907
908 return chan->device->device_prep_dma_memcpy(chan, dest, src,
909 len, flags);
910}
911
912static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
913 struct dma_chan *chan,
914 struct scatterlist *dst_sg, unsigned int dst_nents,
915 struct scatterlist *src_sg, unsigned int src_nents,
916 unsigned long flags)
917{
918 if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
919 return NULL;
920
921 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
922 src_sg, src_nents, flags);
923}
924
925
926
927
928
929
930
931
932static inline int dmaengine_terminate_all(struct dma_chan *chan)
933{
934 if (chan->device->device_terminate_all)
935 return chan->device->device_terminate_all(chan);
936
937 return -ENOSYS;
938}
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961static inline int dmaengine_terminate_async(struct dma_chan *chan)
962{
963 if (chan->device->device_terminate_all)
964 return chan->device->device_terminate_all(chan);
965
966 return -EINVAL;
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987static inline void dmaengine_synchronize(struct dma_chan *chan)
988{
989 might_sleep();
990
991 if (chan->device->device_synchronize)
992 chan->device->device_synchronize(chan);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1010{
1011 int ret;
1012
1013 ret = dmaengine_terminate_async(chan);
1014 if (ret)
1015 return ret;
1016
1017 dmaengine_synchronize(chan);
1018
1019 return 0;
1020}
1021
1022static inline int dmaengine_pause(struct dma_chan *chan)
1023{
1024 if (chan->device->device_pause)
1025 return chan->device->device_pause(chan);
1026
1027 return -ENOSYS;
1028}
1029
1030static inline int dmaengine_resume(struct dma_chan *chan)
1031{
1032 if (chan->device->device_resume)
1033 return chan->device->device_resume(chan);
1034
1035 return -ENOSYS;
1036}
1037
1038static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1039 dma_cookie_t cookie, struct dma_tx_state *state)
1040{
1041 return chan->device->device_tx_status(chan, cookie, state);
1042}
1043
1044static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1045{
1046 return desc->tx_submit(desc);
1047}
1048
1049static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1050 size_t off1, size_t off2, size_t len)
1051{
1052 size_t mask;
1053
1054 if (!align)
1055 return true;
1056 mask = (1 << align) - 1;
1057 if (mask & (off1 | off2 | len))
1058 return false;
1059 return true;
1060}
1061
1062static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1063 size_t off2, size_t len)
1064{
1065 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1066}
1067
1068static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1069 size_t off2, size_t len)
1070{
1071 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1072}
1073
1074static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1075 size_t off2, size_t len)
1076{
1077 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1078}
1079
1080static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1081 size_t off2, size_t len)
1082{
1083 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1084}
1085
1086static inline void
1087dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1088{
1089 dma->max_pq = maxpq;
1090 if (has_pq_continue)
1091 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1092}
1093
1094static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1095{
1096 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1097}
1098
1099static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1100{
1101 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1102
1103 return (flags & mask) == mask;
1104}
1105
1106static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1107{
1108 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1109}
1110
1111static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1112{
1113 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1130{
1131 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1132 return dma_dev_to_maxpq(dma);
1133 else if (dmaf_p_disabled_continue(flags))
1134 return dma_dev_to_maxpq(dma) - 1;
1135 else if (dmaf_continue(flags))
1136 return dma_dev_to_maxpq(dma) - 3;
1137 BUG();
1138}
1139
1140static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1141 size_t dir_icg)
1142{
1143 if (inc) {
1144 if (dir_icg)
1145 return dir_icg;
1146 else if (sgl)
1147 return icg;
1148 }
1149
1150 return 0;
1151}
1152
1153static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1154 struct data_chunk *chunk)
1155{
1156 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1157 chunk->icg, chunk->dst_icg);
1158}
1159
1160static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1161 struct data_chunk *chunk)
1162{
1163 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1164 chunk->icg, chunk->src_icg);
1165}
1166
1167
1168
1169#ifdef CONFIG_DMA_ENGINE
1170void dmaengine_get(void);
1171void dmaengine_put(void);
1172#else
1173static inline void dmaengine_get(void)
1174{
1175}
1176static inline void dmaengine_put(void)
1177{
1178}
1179#endif
1180
1181#ifdef CONFIG_ASYNC_TX_DMA
1182#define async_dmaengine_get() dmaengine_get()
1183#define async_dmaengine_put() dmaengine_put()
1184#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1185#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1186#else
1187#define async_dma_find_channel(type) dma_find_channel(type)
1188#endif
1189#else
1190static inline void async_dmaengine_get(void)
1191{
1192}
1193static inline void async_dmaengine_put(void)
1194{
1195}
1196static inline struct dma_chan *
1197async_dma_find_channel(enum dma_transaction_type type)
1198{
1199 return NULL;
1200}
1201#endif
1202void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1203 struct dma_chan *chan);
1204
1205static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1206{
1207 tx->flags |= DMA_CTRL_ACK;
1208}
1209
1210static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1211{
1212 tx->flags &= ~DMA_CTRL_ACK;
1213}
1214
1215static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1216{
1217 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1218}
1219
1220#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1221static inline void
1222__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1223{
1224 set_bit(tx_type, dstp->bits);
1225}
1226
1227#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1228static inline void
1229__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1230{
1231 clear_bit(tx_type, dstp->bits);
1232}
1233
1234#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1235static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1236{
1237 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1238}
1239
1240#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1241static inline int
1242__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1243{
1244 return test_bit(tx_type, srcp->bits);
1245}
1246
1247#define for_each_dma_cap_mask(cap, mask) \
1248 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1249
1250
1251
1252
1253
1254
1255
1256
1257static inline void dma_async_issue_pending(struct dma_chan *chan)
1258{
1259 chan->device->device_issue_pending(chan);
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1274 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1275{
1276 struct dma_tx_state state;
1277 enum dma_status status;
1278
1279 status = chan->device->device_tx_status(chan, cookie, &state);
1280 if (last)
1281 *last = state.last;
1282 if (used)
1283 *used = state.used;
1284 return status;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1297 dma_cookie_t last_complete, dma_cookie_t last_used)
1298{
1299 if (last_complete <= last_used) {
1300 if ((cookie <= last_complete) || (cookie > last_used))
1301 return DMA_COMPLETE;
1302 } else {
1303 if ((cookie <= last_complete) && (cookie > last_used))
1304 return DMA_COMPLETE;
1305 }
1306 return DMA_IN_PROGRESS;
1307}
1308
1309static inline void
1310dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1311{
1312 if (st) {
1313 st->last = last;
1314 st->used = used;
1315 st->residue = residue;
1316 }
1317}
1318
1319#ifdef CONFIG_DMA_ENGINE
1320struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1321enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1322enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1323void dma_issue_pending_all(void);
1324struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1325 dma_filter_fn fn, void *fn_param);
1326struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1327
1328struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1329struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1330
1331void dma_release_channel(struct dma_chan *chan);
1332int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1333#else
1334static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1335{
1336 return NULL;
1337}
1338static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1339{
1340 return DMA_COMPLETE;
1341}
1342static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1343{
1344 return DMA_COMPLETE;
1345}
1346static inline void dma_issue_pending_all(void)
1347{
1348}
1349static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1350 dma_filter_fn fn, void *fn_param)
1351{
1352 return NULL;
1353}
1354static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1355 const char *name)
1356{
1357 return NULL;
1358}
1359static inline struct dma_chan *dma_request_chan(struct device *dev,
1360 const char *name)
1361{
1362 return ERR_PTR(-ENODEV);
1363}
1364static inline struct dma_chan *dma_request_chan_by_mask(
1365 const dma_cap_mask_t *mask)
1366{
1367 return ERR_PTR(-ENODEV);
1368}
1369static inline void dma_release_channel(struct dma_chan *chan)
1370{
1371}
1372static inline int dma_get_slave_caps(struct dma_chan *chan,
1373 struct dma_slave_caps *caps)
1374{
1375 return -ENXIO;
1376}
1377#endif
1378
1379#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1380
1381static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1382{
1383 struct dma_slave_caps caps;
1384
1385 dma_get_slave_caps(tx->chan, &caps);
1386
1387 if (caps.descriptor_reuse) {
1388 tx->flags |= DMA_CTRL_REUSE;
1389 return 0;
1390 } else {
1391 return -EPERM;
1392 }
1393}
1394
1395static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1396{
1397 tx->flags &= ~DMA_CTRL_REUSE;
1398}
1399
1400static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1401{
1402 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1403}
1404
1405static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1406{
1407
1408 if (dmaengine_desc_test_reuse(desc))
1409 return desc->desc_free(desc);
1410 else
1411 return -EPERM;
1412}
1413
1414
1415
1416int dma_async_device_register(struct dma_device *device);
1417void dma_async_device_unregister(struct dma_device *device);
1418void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1419struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1420struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1421#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1422#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1423 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1424
1425static inline struct dma_chan
1426*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1427 dma_filter_fn fn, void *fn_param,
1428 struct device *dev, const char *name)
1429{
1430 struct dma_chan *chan;
1431
1432 chan = dma_request_slave_channel(dev, name);
1433 if (chan)
1434 return chan;
1435
1436 if (!fn || !fn_param)
1437 return NULL;
1438
1439 return __dma_request_channel(mask, fn, fn_param);
1440}
1441#endif
1442