1
2
3
4
5#ifndef LINUX_DMAENGINE_H
6#define LINUX_DMAENGINE_H
7
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/uio.h>
11#include <linux/bug.h>
12#include <linux/scatterlist.h>
13#include <linux/bitmap.h>
14#include <linux/types.h>
15#include <asm/page.h>
16
17
18
19
20
21
22typedef s32 dma_cookie_t;
23#define DMA_MIN_COOKIE 1
24
25static inline int dma_submit_error(dma_cookie_t cookie)
26{
27 return cookie < 0 ? cookie : 0;
28}
29
30
31
32
33
34
35
36
37enum dma_status {
38 DMA_COMPLETE,
39 DMA_IN_PROGRESS,
40 DMA_PAUSED,
41 DMA_ERROR,
42};
43
44
45
46
47
48
49
50enum dma_transaction_type {
51 DMA_MEMCPY,
52 DMA_XOR,
53 DMA_PQ,
54 DMA_XOR_VAL,
55 DMA_PQ_VAL,
56 DMA_MEMSET,
57 DMA_MEMSET_SG,
58 DMA_INTERRUPT,
59 DMA_PRIVATE,
60 DMA_ASYNC_TX,
61 DMA_SLAVE,
62 DMA_CYCLIC,
63 DMA_INTERLEAVE,
64
65 DMA_TX_TYPE_END,
66};
67
68
69
70
71
72
73
74
75enum dma_transfer_direction {
76 DMA_MEM_TO_MEM,
77 DMA_MEM_TO_DEV,
78 DMA_DEV_TO_MEM,
79 DMA_DEV_TO_DEV,
80 DMA_TRANS_NONE,
81};
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct data_chunk {
122 size_t size;
123 size_t icg;
124 size_t dst_icg;
125 size_t src_icg;
126};
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146struct dma_interleaved_template {
147 dma_addr_t src_start;
148 dma_addr_t dst_start;
149 enum dma_transfer_direction dir;
150 bool src_inc;
151 bool dst_inc;
152 bool src_sgl;
153 bool dst_sgl;
154 size_t numf;
155 size_t frame_size;
156 struct data_chunk sgl[0];
157};
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180enum dma_ctrl_flags {
181 DMA_PREP_INTERRUPT = (1 << 0),
182 DMA_CTRL_ACK = (1 << 1),
183 DMA_PREP_PQ_DISABLE_P = (1 << 2),
184 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
185 DMA_PREP_CONTINUE = (1 << 4),
186 DMA_PREP_FENCE = (1 << 5),
187 DMA_CTRL_REUSE = (1 << 6),
188 DMA_PREP_CMD = (1 << 7),
189};
190
191
192
193
194enum sum_check_bits {
195 SUM_CHECK_P = 0,
196 SUM_CHECK_Q = 1,
197};
198
199
200
201
202
203
204enum sum_check_flags {
205 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
206 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
207};
208
209
210
211
212
213
214typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
215
216
217
218
219
220
221
222struct dma_chan_percpu {
223
224 unsigned long memcpy_count;
225 unsigned long bytes_transferred;
226};
227
228
229
230
231
232
233struct dma_router {
234 struct device *dev;
235 void (*route_free)(struct device *dev, void *route_data);
236};
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253struct dma_chan {
254 struct dma_device *device;
255 dma_cookie_t cookie;
256 dma_cookie_t completed_cookie;
257
258
259 int chan_id;
260 struct dma_chan_dev *dev;
261
262 struct list_head device_node;
263 struct dma_chan_percpu __percpu *local;
264 int client_count;
265 int table_count;
266
267
268 struct dma_router *router;
269 void *route_data;
270
271 void *private;
272};
273
274
275
276
277
278
279
280
281struct dma_chan_dev {
282 struct dma_chan *chan;
283 struct device device;
284 int dev_id;
285 atomic_t *idr_ref;
286};
287
288
289
290
291
292enum dma_slave_buswidth {
293 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
294 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
295 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
296 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
297 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
298 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
299 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
300 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
301 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
302};
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355struct dma_slave_config {
356 enum dma_transfer_direction direction;
357 phys_addr_t src_addr;
358 phys_addr_t dst_addr;
359 enum dma_slave_buswidth src_addr_width;
360 enum dma_slave_buswidth dst_addr_width;
361 u32 src_maxburst;
362 u32 dst_maxburst;
363 u32 src_port_window_size;
364 u32 dst_port_window_size;
365 bool device_fc;
366 unsigned int slave_id;
367};
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389enum dma_residue_granularity {
390 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
391 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
392 DMA_RESIDUE_GRANULARITY_BURST = 2,
393};
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct dma_slave_caps {
415 u32 src_addr_widths;
416 u32 dst_addr_widths;
417 u32 directions;
418 u32 max_burst;
419 bool cmd_pause;
420 bool cmd_resume;
421 bool cmd_terminate;
422 enum dma_residue_granularity residue_granularity;
423 bool descriptor_reuse;
424};
425
426static inline const char *dma_chan_name(struct dma_chan *chan)
427{
428 return dev_name(&chan->dev->device);
429}
430
431void dma_chan_cleanup(struct kref *kref);
432
433
434
435
436
437
438
439
440
441
442
443
444typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
445
446typedef void (*dma_async_tx_callback)(void *dma_async_param);
447
448enum dmaengine_tx_result {
449 DMA_TRANS_NOERROR = 0,
450 DMA_TRANS_READ_FAILED,
451 DMA_TRANS_WRITE_FAILED,
452 DMA_TRANS_ABORTED,
453};
454
455struct dmaengine_result {
456 enum dmaengine_tx_result result;
457 u32 residue;
458};
459
460typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
461 const struct dmaengine_result *result);
462
463struct dmaengine_unmap_data {
464#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
465 u16 map_cnt;
466#else
467 u8 map_cnt;
468#endif
469 u8 to_cnt;
470 u8 from_cnt;
471 u8 bidi_cnt;
472 struct device *dev;
473 struct kref kref;
474 size_t len;
475 dma_addr_t addr[0];
476};
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496struct dma_async_tx_descriptor {
497 dma_cookie_t cookie;
498 enum dma_ctrl_flags flags;
499 dma_addr_t phys;
500 struct dma_chan *chan;
501 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
502 int (*desc_free)(struct dma_async_tx_descriptor *tx);
503 dma_async_tx_callback callback;
504 dma_async_tx_callback_result callback_result;
505 void *callback_param;
506 struct dmaengine_unmap_data *unmap;
507#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
508 struct dma_async_tx_descriptor *next;
509 struct dma_async_tx_descriptor *parent;
510 spinlock_t lock;
511#endif
512};
513
514#ifdef CONFIG_DMA_ENGINE
515static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
516 struct dmaengine_unmap_data *unmap)
517{
518 kref_get(&unmap->kref);
519 tx->unmap = unmap;
520}
521
522struct dmaengine_unmap_data *
523dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
524void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
525#else
526static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
527 struct dmaengine_unmap_data *unmap)
528{
529}
530static inline struct dmaengine_unmap_data *
531dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
532{
533 return NULL;
534}
535static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
536{
537}
538#endif
539
540static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
541{
542 if (tx->unmap) {
543 dmaengine_unmap_put(tx->unmap);
544 tx->unmap = NULL;
545 }
546}
547
548#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
549static inline void txd_lock(struct dma_async_tx_descriptor *txd)
550{
551}
552static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
553{
554}
555static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
556{
557 BUG();
558}
559static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
560{
561}
562static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
563{
564}
565static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
566{
567 return NULL;
568}
569static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
570{
571 return NULL;
572}
573
574#else
575static inline void txd_lock(struct dma_async_tx_descriptor *txd)
576{
577 spin_lock_bh(&txd->lock);
578}
579static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
580{
581 spin_unlock_bh(&txd->lock);
582}
583static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
584{
585 txd->next = next;
586 next->parent = txd;
587}
588static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
589{
590 txd->parent = NULL;
591}
592static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
593{
594 txd->next = NULL;
595}
596static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
597{
598 return txd->parent;
599}
600static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
601{
602 return txd->next;
603}
604#endif
605
606
607
608
609
610
611
612
613
614
615struct dma_tx_state {
616 dma_cookie_t last;
617 dma_cookie_t used;
618 u32 residue;
619};
620
621
622
623
624
625enum dmaengine_alignment {
626 DMAENGINE_ALIGN_1_BYTE = 0,
627 DMAENGINE_ALIGN_2_BYTES = 1,
628 DMAENGINE_ALIGN_4_BYTES = 2,
629 DMAENGINE_ALIGN_8_BYTES = 3,
630 DMAENGINE_ALIGN_16_BYTES = 4,
631 DMAENGINE_ALIGN_32_BYTES = 5,
632 DMAENGINE_ALIGN_64_BYTES = 6,
633};
634
635
636
637
638
639
640
641
642struct dma_slave_map {
643 const char *devname;
644 const char *slave;
645 void *param;
646};
647
648
649
650
651
652
653
654
655struct dma_filter {
656 dma_filter_fn fn;
657 int mapcnt;
658 const struct dma_slave_map *map;
659};
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722struct dma_device {
723
724 unsigned int chancnt;
725 unsigned int privatecnt;
726 struct list_head channels;
727 struct list_head global_node;
728 struct dma_filter filter;
729 dma_cap_mask_t cap_mask;
730 unsigned short max_xor;
731 unsigned short max_pq;
732 enum dmaengine_alignment copy_align;
733 enum dmaengine_alignment xor_align;
734 enum dmaengine_alignment pq_align;
735 enum dmaengine_alignment fill_align;
736 #define DMA_HAS_PQ_CONTINUE (1 << 15)
737
738 int dev_id;
739 struct device *dev;
740
741 u32 src_addr_widths;
742 u32 dst_addr_widths;
743 u32 directions;
744 u32 max_burst;
745 bool descriptor_reuse;
746 enum dma_residue_granularity residue_granularity;
747
748 int (*device_alloc_chan_resources)(struct dma_chan *chan);
749 void (*device_free_chan_resources)(struct dma_chan *chan);
750
751 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
752 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
753 size_t len, unsigned long flags);
754 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
755 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
756 unsigned int src_cnt, size_t len, unsigned long flags);
757 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
758 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
759 size_t len, enum sum_check_flags *result, unsigned long flags);
760 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
761 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
762 unsigned int src_cnt, const unsigned char *scf,
763 size_t len, unsigned long flags);
764 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
765 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
766 unsigned int src_cnt, const unsigned char *scf, size_t len,
767 enum sum_check_flags *pqres, unsigned long flags);
768 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
769 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
770 unsigned long flags);
771 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
772 struct dma_chan *chan, struct scatterlist *sg,
773 unsigned int nents, int value, unsigned long flags);
774 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
775 struct dma_chan *chan, unsigned long flags);
776
777 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
778 struct dma_chan *chan, struct scatterlist *sgl,
779 unsigned int sg_len, enum dma_transfer_direction direction,
780 unsigned long flags, void *context);
781 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
782 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
783 size_t period_len, enum dma_transfer_direction direction,
784 unsigned long flags);
785 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
786 struct dma_chan *chan, struct dma_interleaved_template *xt,
787 unsigned long flags);
788 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
789 struct dma_chan *chan, dma_addr_t dst, u64 data,
790 unsigned long flags);
791
792 int (*device_config)(struct dma_chan *chan,
793 struct dma_slave_config *config);
794 int (*device_pause)(struct dma_chan *chan);
795 int (*device_resume)(struct dma_chan *chan);
796 int (*device_terminate_all)(struct dma_chan *chan);
797 void (*device_synchronize)(struct dma_chan *chan);
798
799 enum dma_status (*device_tx_status)(struct dma_chan *chan,
800 dma_cookie_t cookie,
801 struct dma_tx_state *txstate);
802 void (*device_issue_pending)(struct dma_chan *chan);
803};
804
805static inline int dmaengine_slave_config(struct dma_chan *chan,
806 struct dma_slave_config *config)
807{
808 if (chan->device->device_config)
809 return chan->device->device_config(chan, config);
810
811 return -ENOSYS;
812}
813
814static inline bool is_slave_direction(enum dma_transfer_direction direction)
815{
816 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
817}
818
819static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
820 struct dma_chan *chan, dma_addr_t buf, size_t len,
821 enum dma_transfer_direction dir, unsigned long flags)
822{
823 struct scatterlist sg;
824 sg_init_table(&sg, 1);
825 sg_dma_address(&sg) = buf;
826 sg_dma_len(&sg) = len;
827
828 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
829 return NULL;
830
831 return chan->device->device_prep_slave_sg(chan, &sg, 1,
832 dir, flags, NULL);
833}
834
835static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
836 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
837 enum dma_transfer_direction dir, unsigned long flags)
838{
839 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
840 return NULL;
841
842 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
843 dir, flags, NULL);
844}
845
846#ifdef CONFIG_RAPIDIO_DMA_ENGINE
847struct rio_dma_ext;
848static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
849 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
850 enum dma_transfer_direction dir, unsigned long flags,
851 struct rio_dma_ext *rio_ext)
852{
853 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
854 return NULL;
855
856 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
857 dir, flags, rio_ext);
858}
859#endif
860
861static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
862 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
863 size_t period_len, enum dma_transfer_direction dir,
864 unsigned long flags)
865{
866 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
867 return NULL;
868
869 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
870 period_len, dir, flags);
871}
872
873static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
874 struct dma_chan *chan, struct dma_interleaved_template *xt,
875 unsigned long flags)
876{
877 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
878 return NULL;
879
880 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
881}
882
883static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
884 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
885 unsigned long flags)
886{
887 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
888 return NULL;
889
890 return chan->device->device_prep_dma_memset(chan, dest, value,
891 len, flags);
892}
893
894static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
895 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
896 size_t len, unsigned long flags)
897{
898 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
899 return NULL;
900
901 return chan->device->device_prep_dma_memcpy(chan, dest, src,
902 len, flags);
903}
904
905
906
907
908
909
910
911
912static inline int dmaengine_terminate_all(struct dma_chan *chan)
913{
914 if (chan->device->device_terminate_all)
915 return chan->device->device_terminate_all(chan);
916
917 return -ENOSYS;
918}
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941static inline int dmaengine_terminate_async(struct dma_chan *chan)
942{
943 if (chan->device->device_terminate_all)
944 return chan->device->device_terminate_all(chan);
945
946 return -EINVAL;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967static inline void dmaengine_synchronize(struct dma_chan *chan)
968{
969 might_sleep();
970
971 if (chan->device->device_synchronize)
972 chan->device->device_synchronize(chan);
973}
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989static inline int dmaengine_terminate_sync(struct dma_chan *chan)
990{
991 int ret;
992
993 ret = dmaengine_terminate_async(chan);
994 if (ret)
995 return ret;
996
997 dmaengine_synchronize(chan);
998
999 return 0;
1000}
1001
1002static inline int dmaengine_pause(struct dma_chan *chan)
1003{
1004 if (chan->device->device_pause)
1005 return chan->device->device_pause(chan);
1006
1007 return -ENOSYS;
1008}
1009
1010static inline int dmaengine_resume(struct dma_chan *chan)
1011{
1012 if (chan->device->device_resume)
1013 return chan->device->device_resume(chan);
1014
1015 return -ENOSYS;
1016}
1017
1018static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1019 dma_cookie_t cookie, struct dma_tx_state *state)
1020{
1021 return chan->device->device_tx_status(chan, cookie, state);
1022}
1023
1024static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1025{
1026 return desc->tx_submit(desc);
1027}
1028
1029static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1030 size_t off1, size_t off2, size_t len)
1031{
1032 size_t mask;
1033
1034 if (!align)
1035 return true;
1036 mask = (1 << align) - 1;
1037 if (mask & (off1 | off2 | len))
1038 return false;
1039 return true;
1040}
1041
1042static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1043 size_t off2, size_t len)
1044{
1045 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1046}
1047
1048static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1049 size_t off2, size_t len)
1050{
1051 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1052}
1053
1054static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1055 size_t off2, size_t len)
1056{
1057 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1058}
1059
1060static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1061 size_t off2, size_t len)
1062{
1063 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1064}
1065
1066static inline void
1067dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1068{
1069 dma->max_pq = maxpq;
1070 if (has_pq_continue)
1071 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1072}
1073
1074static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1075{
1076 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1077}
1078
1079static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1080{
1081 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1082
1083 return (flags & mask) == mask;
1084}
1085
1086static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1087{
1088 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1089}
1090
1091static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1092{
1093 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1110{
1111 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1112 return dma_dev_to_maxpq(dma);
1113 else if (dmaf_p_disabled_continue(flags))
1114 return dma_dev_to_maxpq(dma) - 1;
1115 else if (dmaf_continue(flags))
1116 return dma_dev_to_maxpq(dma) - 3;
1117 BUG();
1118}
1119
1120static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1121 size_t dir_icg)
1122{
1123 if (inc) {
1124 if (dir_icg)
1125 return dir_icg;
1126 else if (sgl)
1127 return icg;
1128 }
1129
1130 return 0;
1131}
1132
1133static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1134 struct data_chunk *chunk)
1135{
1136 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1137 chunk->icg, chunk->dst_icg);
1138}
1139
1140static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1141 struct data_chunk *chunk)
1142{
1143 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1144 chunk->icg, chunk->src_icg);
1145}
1146
1147
1148
1149#ifdef CONFIG_DMA_ENGINE
1150void dmaengine_get(void);
1151void dmaengine_put(void);
1152#else
1153static inline void dmaengine_get(void)
1154{
1155}
1156static inline void dmaengine_put(void)
1157{
1158}
1159#endif
1160
1161#ifdef CONFIG_ASYNC_TX_DMA
1162#define async_dmaengine_get() dmaengine_get()
1163#define async_dmaengine_put() dmaengine_put()
1164#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1165#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1166#else
1167#define async_dma_find_channel(type) dma_find_channel(type)
1168#endif
1169#else
1170static inline void async_dmaengine_get(void)
1171{
1172}
1173static inline void async_dmaengine_put(void)
1174{
1175}
1176static inline struct dma_chan *
1177async_dma_find_channel(enum dma_transaction_type type)
1178{
1179 return NULL;
1180}
1181#endif
1182void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1183 struct dma_chan *chan);
1184
1185static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1186{
1187 tx->flags |= DMA_CTRL_ACK;
1188}
1189
1190static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1191{
1192 tx->flags &= ~DMA_CTRL_ACK;
1193}
1194
1195static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1196{
1197 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1198}
1199
1200#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1201static inline void
1202__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1203{
1204 set_bit(tx_type, dstp->bits);
1205}
1206
1207#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1208static inline void
1209__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1210{
1211 clear_bit(tx_type, dstp->bits);
1212}
1213
1214#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1215static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1216{
1217 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1218}
1219
1220#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1221static inline int
1222__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1223{
1224 return test_bit(tx_type, srcp->bits);
1225}
1226
1227#define for_each_dma_cap_mask(cap, mask) \
1228 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1229
1230
1231
1232
1233
1234
1235
1236
1237static inline void dma_async_issue_pending(struct dma_chan *chan)
1238{
1239 chan->device->device_issue_pending(chan);
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1254 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1255{
1256 struct dma_tx_state state;
1257 enum dma_status status;
1258
1259 status = chan->device->device_tx_status(chan, cookie, &state);
1260 if (last)
1261 *last = state.last;
1262 if (used)
1263 *used = state.used;
1264 return status;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1277 dma_cookie_t last_complete, dma_cookie_t last_used)
1278{
1279 if (last_complete <= last_used) {
1280 if ((cookie <= last_complete) || (cookie > last_used))
1281 return DMA_COMPLETE;
1282 } else {
1283 if ((cookie <= last_complete) && (cookie > last_used))
1284 return DMA_COMPLETE;
1285 }
1286 return DMA_IN_PROGRESS;
1287}
1288
1289static inline void
1290dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1291{
1292 if (st) {
1293 st->last = last;
1294 st->used = used;
1295 st->residue = residue;
1296 }
1297}
1298
1299#ifdef CONFIG_DMA_ENGINE
1300struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1301enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1302enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1303void dma_issue_pending_all(void);
1304struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1305 dma_filter_fn fn, void *fn_param);
1306struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1307
1308struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1309struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1310
1311void dma_release_channel(struct dma_chan *chan);
1312int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1313#else
1314static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1315{
1316 return NULL;
1317}
1318static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1319{
1320 return DMA_COMPLETE;
1321}
1322static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1323{
1324 return DMA_COMPLETE;
1325}
1326static inline void dma_issue_pending_all(void)
1327{
1328}
1329static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1330 dma_filter_fn fn, void *fn_param)
1331{
1332 return NULL;
1333}
1334static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1335 const char *name)
1336{
1337 return NULL;
1338}
1339static inline struct dma_chan *dma_request_chan(struct device *dev,
1340 const char *name)
1341{
1342 return ERR_PTR(-ENODEV);
1343}
1344static inline struct dma_chan *dma_request_chan_by_mask(
1345 const dma_cap_mask_t *mask)
1346{
1347 return ERR_PTR(-ENODEV);
1348}
1349static inline void dma_release_channel(struct dma_chan *chan)
1350{
1351}
1352static inline int dma_get_slave_caps(struct dma_chan *chan,
1353 struct dma_slave_caps *caps)
1354{
1355 return -ENXIO;
1356}
1357#endif
1358
1359#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1360
1361static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1362{
1363 struct dma_slave_caps caps;
1364
1365 dma_get_slave_caps(tx->chan, &caps);
1366
1367 if (caps.descriptor_reuse) {
1368 tx->flags |= DMA_CTRL_REUSE;
1369 return 0;
1370 } else {
1371 return -EPERM;
1372 }
1373}
1374
1375static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1376{
1377 tx->flags &= ~DMA_CTRL_REUSE;
1378}
1379
1380static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1381{
1382 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1383}
1384
1385static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1386{
1387
1388 if (dmaengine_desc_test_reuse(desc))
1389 return desc->desc_free(desc);
1390 else
1391 return -EPERM;
1392}
1393
1394
1395
1396int dma_async_device_register(struct dma_device *device);
1397int dmaenginem_async_device_register(struct dma_device *device);
1398void dma_async_device_unregister(struct dma_device *device);
1399void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1400struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1401struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1402#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1403#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1404 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1405
1406static inline struct dma_chan
1407*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1408 dma_filter_fn fn, void *fn_param,
1409 struct device *dev, const char *name)
1410{
1411 struct dma_chan *chan;
1412
1413 chan = dma_request_slave_channel(dev, name);
1414 if (chan)
1415 return chan;
1416
1417 if (!fn || !fn_param)
1418 return NULL;
1419
1420 return __dma_request_channel(mask, fn, fn_param);
1421}
1422#endif
1423