1
2
3
4
5#ifndef LINUX_DMAENGINE_H
6#define LINUX_DMAENGINE_H
7
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/uio.h>
11#include <linux/bug.h>
12#include <linux/scatterlist.h>
13#include <linux/bitmap.h>
14#include <linux/types.h>
15#include <asm/page.h>
16
17
18
19
20
21
22typedef s32 dma_cookie_t;
23#define DMA_MIN_COOKIE 1
24
25static inline int dma_submit_error(dma_cookie_t cookie)
26{
27 return cookie < 0 ? cookie : 0;
28}
29
30
31
32
33
34
35
36
37enum dma_status {
38 DMA_COMPLETE,
39 DMA_IN_PROGRESS,
40 DMA_PAUSED,
41 DMA_ERROR,
42 DMA_OUT_OF_ORDER,
43};
44
45
46
47
48
49
50
51enum dma_transaction_type {
52 DMA_MEMCPY,
53 DMA_MEMCPY_SG,
54 DMA_XOR,
55 DMA_PQ,
56 DMA_XOR_VAL,
57 DMA_PQ_VAL,
58 DMA_MEMSET,
59 DMA_MEMSET_SG,
60 DMA_INTERRUPT,
61 DMA_PRIVATE,
62 DMA_ASYNC_TX,
63 DMA_SLAVE,
64 DMA_CYCLIC,
65 DMA_INTERLEAVE,
66 DMA_COMPLETION_NO_ORDER,
67 DMA_REPEAT,
68 DMA_LOAD_EOT,
69
70 DMA_TX_TYPE_END,
71};
72
73
74
75
76
77
78
79
80enum dma_transfer_direction {
81 DMA_MEM_TO_MEM,
82 DMA_MEM_TO_DEV,
83 DMA_DEV_TO_MEM,
84 DMA_DEV_TO_DEV,
85 DMA_TRANS_NONE,
86};
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126struct data_chunk {
127 size_t size;
128 size_t icg;
129 size_t dst_icg;
130 size_t src_icg;
131};
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151struct dma_interleaved_template {
152 dma_addr_t src_start;
153 dma_addr_t dst_start;
154 enum dma_transfer_direction dir;
155 bool src_inc;
156 bool dst_inc;
157 bool src_sgl;
158 bool dst_sgl;
159 size_t numf;
160 size_t frame_size;
161 struct data_chunk sgl[];
162};
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195enum dma_ctrl_flags {
196 DMA_PREP_INTERRUPT = (1 << 0),
197 DMA_CTRL_ACK = (1 << 1),
198 DMA_PREP_PQ_DISABLE_P = (1 << 2),
199 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
200 DMA_PREP_CONTINUE = (1 << 4),
201 DMA_PREP_FENCE = (1 << 5),
202 DMA_CTRL_REUSE = (1 << 6),
203 DMA_PREP_CMD = (1 << 7),
204 DMA_PREP_REPEAT = (1 << 8),
205 DMA_PREP_LOAD_EOT = (1 << 9),
206};
207
208
209
210
211enum sum_check_bits {
212 SUM_CHECK_P = 0,
213 SUM_CHECK_Q = 1,
214};
215
216
217
218
219
220
221enum sum_check_flags {
222 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
223 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
224};
225
226
227
228
229
230
231typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283enum dma_desc_metadata_mode {
284 DESC_METADATA_NONE = 0,
285 DESC_METADATA_CLIENT = BIT(0),
286 DESC_METADATA_ENGINE = BIT(1),
287};
288
289
290
291
292
293
294struct dma_chan_percpu {
295
296 unsigned long memcpy_count;
297 unsigned long bytes_transferred;
298};
299
300
301
302
303
304
305struct dma_router {
306 struct device *dev;
307 void (*route_free)(struct device *dev, void *route_data);
308};
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329struct dma_chan {
330 struct dma_device *device;
331 struct device *slave;
332 dma_cookie_t cookie;
333 dma_cookie_t completed_cookie;
334
335
336 int chan_id;
337 struct dma_chan_dev *dev;
338 const char *name;
339#ifdef CONFIG_DEBUG_FS
340 char *dbg_client_name;
341#endif
342
343 struct list_head device_node;
344 struct dma_chan_percpu __percpu *local;
345 int client_count;
346 int table_count;
347
348
349 struct dma_router *router;
350 void *route_data;
351
352 void *private;
353};
354
355
356
357
358
359
360
361
362
363struct dma_chan_dev {
364 struct dma_chan *chan;
365 struct device device;
366 int dev_id;
367 bool chan_dma_dev;
368};
369
370
371
372
373
374enum dma_slave_buswidth {
375 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
376 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
377 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
378 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
379 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
380 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
381 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
382 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
383 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
384 DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
385};
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438struct dma_slave_config {
439 enum dma_transfer_direction direction;
440 phys_addr_t src_addr;
441 phys_addr_t dst_addr;
442 enum dma_slave_buswidth src_addr_width;
443 enum dma_slave_buswidth dst_addr_width;
444 u32 src_maxburst;
445 u32 dst_maxburst;
446 u32 src_port_window_size;
447 u32 dst_port_window_size;
448 bool device_fc;
449 void *peripheral_config;
450 size_t peripheral_size;
451};
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473enum dma_residue_granularity {
474 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
475 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
476 DMA_RESIDUE_GRANULARITY_BURST = 2,
477};
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502struct dma_slave_caps {
503 u32 src_addr_widths;
504 u32 dst_addr_widths;
505 u32 directions;
506 u32 min_burst;
507 u32 max_burst;
508 u32 max_sg_burst;
509 bool cmd_pause;
510 bool cmd_resume;
511 bool cmd_terminate;
512 enum dma_residue_granularity residue_granularity;
513 bool descriptor_reuse;
514};
515
516static inline const char *dma_chan_name(struct dma_chan *chan)
517{
518 return dev_name(&chan->dev->device);
519}
520
521void dma_chan_cleanup(struct kref *kref);
522
523
524
525
526
527
528
529
530
531
532
533
534typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
535
536typedef void (*dma_async_tx_callback)(void *dma_async_param);
537
538enum dmaengine_tx_result {
539 DMA_TRANS_NOERROR = 0,
540 DMA_TRANS_READ_FAILED,
541 DMA_TRANS_WRITE_FAILED,
542 DMA_TRANS_ABORTED,
543};
544
545struct dmaengine_result {
546 enum dmaengine_tx_result result;
547 u32 residue;
548};
549
550typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
551 const struct dmaengine_result *result);
552
553struct dmaengine_unmap_data {
554#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
555 u16 map_cnt;
556#else
557 u8 map_cnt;
558#endif
559 u8 to_cnt;
560 u8 from_cnt;
561 u8 bidi_cnt;
562 struct device *dev;
563 struct kref kref;
564 size_t len;
565 dma_addr_t addr[];
566};
567
568struct dma_async_tx_descriptor;
569
570struct dma_descriptor_metadata_ops {
571 int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
572 size_t len);
573
574 void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
575 size_t *payload_len, size_t *max_len);
576 int (*set_len)(struct dma_async_tx_descriptor *desc,
577 size_t payload_len);
578};
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603struct dma_async_tx_descriptor {
604 dma_cookie_t cookie;
605 enum dma_ctrl_flags flags;
606 dma_addr_t phys;
607 struct dma_chan *chan;
608 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
609 int (*desc_free)(struct dma_async_tx_descriptor *tx);
610 dma_async_tx_callback callback;
611 dma_async_tx_callback_result callback_result;
612 void *callback_param;
613 struct dmaengine_unmap_data *unmap;
614 enum dma_desc_metadata_mode desc_metadata_mode;
615 struct dma_descriptor_metadata_ops *metadata_ops;
616#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
617 struct dma_async_tx_descriptor *next;
618 struct dma_async_tx_descriptor *parent;
619 spinlock_t lock;
620#endif
621};
622
623#ifdef CONFIG_DMA_ENGINE
624static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
625 struct dmaengine_unmap_data *unmap)
626{
627 kref_get(&unmap->kref);
628 tx->unmap = unmap;
629}
630
631struct dmaengine_unmap_data *
632dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
633void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
634#else
635static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
636 struct dmaengine_unmap_data *unmap)
637{
638}
639static inline struct dmaengine_unmap_data *
640dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
641{
642 return NULL;
643}
644static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
645{
646}
647#endif
648
649static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
650{
651 if (!tx->unmap)
652 return;
653
654 dmaengine_unmap_put(tx->unmap);
655 tx->unmap = NULL;
656}
657
658#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
659static inline void txd_lock(struct dma_async_tx_descriptor *txd)
660{
661}
662static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
663{
664}
665static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
666{
667 BUG();
668}
669static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
670{
671}
672static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
673{
674}
675static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
676{
677 return NULL;
678}
679static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
680{
681 return NULL;
682}
683
684#else
685static inline void txd_lock(struct dma_async_tx_descriptor *txd)
686{
687 spin_lock_bh(&txd->lock);
688}
689static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
690{
691 spin_unlock_bh(&txd->lock);
692}
693static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
694{
695 txd->next = next;
696 next->parent = txd;
697}
698static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
699{
700 txd->parent = NULL;
701}
702static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
703{
704 txd->next = NULL;
705}
706static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
707{
708 return txd->parent;
709}
710static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
711{
712 return txd->next;
713}
714#endif
715
716
717
718
719
720
721
722
723
724
725
726struct dma_tx_state {
727 dma_cookie_t last;
728 dma_cookie_t used;
729 u32 residue;
730 u32 in_flight_bytes;
731};
732
733
734
735
736
737enum dmaengine_alignment {
738 DMAENGINE_ALIGN_1_BYTE = 0,
739 DMAENGINE_ALIGN_2_BYTES = 1,
740 DMAENGINE_ALIGN_4_BYTES = 2,
741 DMAENGINE_ALIGN_8_BYTES = 3,
742 DMAENGINE_ALIGN_16_BYTES = 4,
743 DMAENGINE_ALIGN_32_BYTES = 5,
744 DMAENGINE_ALIGN_64_BYTES = 6,
745 DMAENGINE_ALIGN_128_BYTES = 7,
746 DMAENGINE_ALIGN_256_BYTES = 8,
747};
748
749
750
751
752
753
754
755
756struct dma_slave_map {
757 const char *devname;
758 const char *slave;
759 void *param;
760};
761
762
763
764
765
766
767
768
769struct dma_filter {
770 dma_filter_fn fn;
771 int mapcnt;
772 const struct dma_slave_map *map;
773};
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852struct dma_device {
853 struct kref ref;
854 unsigned int chancnt;
855 unsigned int privatecnt;
856 struct list_head channels;
857 struct list_head global_node;
858 struct dma_filter filter;
859 dma_cap_mask_t cap_mask;
860 enum dma_desc_metadata_mode desc_metadata_modes;
861 unsigned short max_xor;
862 unsigned short max_pq;
863 enum dmaengine_alignment copy_align;
864 enum dmaengine_alignment xor_align;
865 enum dmaengine_alignment pq_align;
866 enum dmaengine_alignment fill_align;
867 #define DMA_HAS_PQ_CONTINUE (1 << 15)
868
869 int dev_id;
870 struct device *dev;
871 struct module *owner;
872 struct ida chan_ida;
873 struct mutex chan_mutex;
874
875 u32 src_addr_widths;
876 u32 dst_addr_widths;
877 u32 directions;
878 u32 min_burst;
879 u32 max_burst;
880 u32 max_sg_burst;
881 bool descriptor_reuse;
882 enum dma_residue_granularity residue_granularity;
883
884 int (*device_alloc_chan_resources)(struct dma_chan *chan);
885 int (*device_router_config)(struct dma_chan *chan);
886 void (*device_free_chan_resources)(struct dma_chan *chan);
887
888 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
889 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
890 size_t len, unsigned long flags);
891 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy_sg)(
892 struct dma_chan *chan,
893 struct scatterlist *dst_sg, unsigned int dst_nents,
894 struct scatterlist *src_sg, unsigned int src_nents,
895 unsigned long flags);
896 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
897 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
898 unsigned int src_cnt, size_t len, unsigned long flags);
899 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
900 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
901 size_t len, enum sum_check_flags *result, unsigned long flags);
902 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
903 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
904 unsigned int src_cnt, const unsigned char *scf,
905 size_t len, unsigned long flags);
906 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
907 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
908 unsigned int src_cnt, const unsigned char *scf, size_t len,
909 enum sum_check_flags *pqres, unsigned long flags);
910 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
911 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
912 unsigned long flags);
913 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
914 struct dma_chan *chan, struct scatterlist *sg,
915 unsigned int nents, int value, unsigned long flags);
916 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
917 struct dma_chan *chan, unsigned long flags);
918
919 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
920 struct dma_chan *chan, struct scatterlist *sgl,
921 unsigned int sg_len, enum dma_transfer_direction direction,
922 unsigned long flags, void *context);
923 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
924 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
925 size_t period_len, enum dma_transfer_direction direction,
926 unsigned long flags);
927 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
928 struct dma_chan *chan, struct dma_interleaved_template *xt,
929 unsigned long flags);
930 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
931 struct dma_chan *chan, dma_addr_t dst, u64 data,
932 unsigned long flags);
933
934 void (*device_caps)(struct dma_chan *chan,
935 struct dma_slave_caps *caps);
936 int (*device_config)(struct dma_chan *chan,
937 struct dma_slave_config *config);
938 int (*device_pause)(struct dma_chan *chan);
939 int (*device_resume)(struct dma_chan *chan);
940 int (*device_terminate_all)(struct dma_chan *chan);
941 void (*device_synchronize)(struct dma_chan *chan);
942
943 enum dma_status (*device_tx_status)(struct dma_chan *chan,
944 dma_cookie_t cookie,
945 struct dma_tx_state *txstate);
946 void (*device_issue_pending)(struct dma_chan *chan);
947 void (*device_release)(struct dma_device *dev);
948
949 void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
950 struct dentry *dbg_dev_root;
951};
952
953static inline int dmaengine_slave_config(struct dma_chan *chan,
954 struct dma_slave_config *config)
955{
956 if (chan->device->device_config)
957 return chan->device->device_config(chan, config);
958
959 return -ENOSYS;
960}
961
962static inline bool is_slave_direction(enum dma_transfer_direction direction)
963{
964 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
965}
966
967static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
968 struct dma_chan *chan, dma_addr_t buf, size_t len,
969 enum dma_transfer_direction dir, unsigned long flags)
970{
971 struct scatterlist sg;
972 sg_init_table(&sg, 1);
973 sg_dma_address(&sg) = buf;
974 sg_dma_len(&sg) = len;
975
976 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
977 return NULL;
978
979 return chan->device->device_prep_slave_sg(chan, &sg, 1,
980 dir, flags, NULL);
981}
982
983static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
984 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
985 enum dma_transfer_direction dir, unsigned long flags)
986{
987 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
988 return NULL;
989
990 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
991 dir, flags, NULL);
992}
993
994#ifdef CONFIG_RAPIDIO_DMA_ENGINE
995struct rio_dma_ext;
996static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
997 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
998 enum dma_transfer_direction dir, unsigned long flags,
999 struct rio_dma_ext *rio_ext)
1000{
1001 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
1002 return NULL;
1003
1004 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
1005 dir, flags, rio_ext);
1006}
1007#endif
1008
1009static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
1010 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1011 size_t period_len, enum dma_transfer_direction dir,
1012 unsigned long flags)
1013{
1014 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
1015 return NULL;
1016
1017 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
1018 period_len, dir, flags);
1019}
1020
1021static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
1022 struct dma_chan *chan, struct dma_interleaved_template *xt,
1023 unsigned long flags)
1024{
1025 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
1026 return NULL;
1027 if (flags & DMA_PREP_REPEAT &&
1028 !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
1029 return NULL;
1030
1031 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
1032}
1033
1034static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
1035 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
1036 unsigned long flags)
1037{
1038 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
1039 return NULL;
1040
1041 return chan->device->device_prep_dma_memset(chan, dest, value,
1042 len, flags);
1043}
1044
1045static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
1046 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1047 size_t len, unsigned long flags)
1048{
1049 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
1050 return NULL;
1051
1052 return chan->device->device_prep_dma_memcpy(chan, dest, src,
1053 len, flags);
1054}
1055
1056static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy_sg(
1057 struct dma_chan *chan,
1058 struct scatterlist *dst_sg, unsigned int dst_nents,
1059 struct scatterlist *src_sg, unsigned int src_nents,
1060 unsigned long flags)
1061{
1062 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy_sg)
1063 return NULL;
1064
1065 return chan->device->device_prep_dma_memcpy_sg(chan, dst_sg, dst_nents,
1066 src_sg, src_nents,
1067 flags);
1068}
1069
1070static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
1071 enum dma_desc_metadata_mode mode)
1072{
1073 if (!chan)
1074 return false;
1075
1076 return !!(chan->device->desc_metadata_modes & mode);
1077}
1078
1079#ifdef CONFIG_DMA_ENGINE
1080int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1081 void *data, size_t len);
1082void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1083 size_t *payload_len, size_t *max_len);
1084int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1085 size_t payload_len);
1086#else
1087static inline int dmaengine_desc_attach_metadata(
1088 struct dma_async_tx_descriptor *desc, void *data, size_t len)
1089{
1090 return -EINVAL;
1091}
1092static inline void *dmaengine_desc_get_metadata_ptr(
1093 struct dma_async_tx_descriptor *desc, size_t *payload_len,
1094 size_t *max_len)
1095{
1096 return NULL;
1097}
1098static inline int dmaengine_desc_set_metadata_len(
1099 struct dma_async_tx_descriptor *desc, size_t payload_len)
1100{
1101 return -EINVAL;
1102}
1103#endif
1104
1105
1106
1107
1108
1109
1110
1111
1112static inline int dmaengine_terminate_all(struct dma_chan *chan)
1113{
1114 if (chan->device->device_terminate_all)
1115 return chan->device->device_terminate_all(chan);
1116
1117 return -ENOSYS;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static inline int dmaengine_terminate_async(struct dma_chan *chan)
1142{
1143 if (chan->device->device_terminate_all)
1144 return chan->device->device_terminate_all(chan);
1145
1146 return -EINVAL;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static inline void dmaengine_synchronize(struct dma_chan *chan)
1168{
1169 might_sleep();
1170
1171 if (chan->device->device_synchronize)
1172 chan->device->device_synchronize(chan);
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1190{
1191 int ret;
1192
1193 ret = dmaengine_terminate_async(chan);
1194 if (ret)
1195 return ret;
1196
1197 dmaengine_synchronize(chan);
1198
1199 return 0;
1200}
1201
1202static inline int dmaengine_pause(struct dma_chan *chan)
1203{
1204 if (chan->device->device_pause)
1205 return chan->device->device_pause(chan);
1206
1207 return -ENOSYS;
1208}
1209
1210static inline int dmaengine_resume(struct dma_chan *chan)
1211{
1212 if (chan->device->device_resume)
1213 return chan->device->device_resume(chan);
1214
1215 return -ENOSYS;
1216}
1217
1218static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1219 dma_cookie_t cookie, struct dma_tx_state *state)
1220{
1221 return chan->device->device_tx_status(chan, cookie, state);
1222}
1223
1224static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1225{
1226 return desc->tx_submit(desc);
1227}
1228
1229static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1230 size_t off1, size_t off2, size_t len)
1231{
1232 return !(((1 << align) - 1) & (off1 | off2 | len));
1233}
1234
1235static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1236 size_t off2, size_t len)
1237{
1238 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1239}
1240
1241static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1242 size_t off2, size_t len)
1243{
1244 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1245}
1246
1247static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1248 size_t off2, size_t len)
1249{
1250 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1251}
1252
1253static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1254 size_t off2, size_t len)
1255{
1256 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1257}
1258
1259static inline void
1260dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1261{
1262 dma->max_pq = maxpq;
1263 if (has_pq_continue)
1264 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1265}
1266
1267static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1268{
1269 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1270}
1271
1272static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1273{
1274 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1275
1276 return (flags & mask) == mask;
1277}
1278
1279static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1280{
1281 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1282}
1283
1284static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1285{
1286 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1303{
1304 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1305 return dma_dev_to_maxpq(dma);
1306 if (dmaf_p_disabled_continue(flags))
1307 return dma_dev_to_maxpq(dma) - 1;
1308 if (dmaf_continue(flags))
1309 return dma_dev_to_maxpq(dma) - 3;
1310 BUG();
1311}
1312
1313static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1314 size_t dir_icg)
1315{
1316 if (inc) {
1317 if (dir_icg)
1318 return dir_icg;
1319 if (sgl)
1320 return icg;
1321 }
1322
1323 return 0;
1324}
1325
1326static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1327 struct data_chunk *chunk)
1328{
1329 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1330 chunk->icg, chunk->dst_icg);
1331}
1332
1333static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1334 struct data_chunk *chunk)
1335{
1336 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1337 chunk->icg, chunk->src_icg);
1338}
1339
1340
1341
1342#ifdef CONFIG_DMA_ENGINE
1343void dmaengine_get(void);
1344void dmaengine_put(void);
1345#else
1346static inline void dmaengine_get(void)
1347{
1348}
1349static inline void dmaengine_put(void)
1350{
1351}
1352#endif
1353
1354#ifdef CONFIG_ASYNC_TX_DMA
1355#define async_dmaengine_get() dmaengine_get()
1356#define async_dmaengine_put() dmaengine_put()
1357#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1358#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1359#else
1360#define async_dma_find_channel(type) dma_find_channel(type)
1361#endif
1362#else
1363static inline void async_dmaengine_get(void)
1364{
1365}
1366static inline void async_dmaengine_put(void)
1367{
1368}
1369static inline struct dma_chan *
1370async_dma_find_channel(enum dma_transaction_type type)
1371{
1372 return NULL;
1373}
1374#endif
1375void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1376 struct dma_chan *chan);
1377
1378static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1379{
1380 tx->flags |= DMA_CTRL_ACK;
1381}
1382
1383static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1384{
1385 tx->flags &= ~DMA_CTRL_ACK;
1386}
1387
1388static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1389{
1390 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1391}
1392
1393#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1394static inline void
1395__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1396{
1397 set_bit(tx_type, dstp->bits);
1398}
1399
1400#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1401static inline void
1402__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1403{
1404 clear_bit(tx_type, dstp->bits);
1405}
1406
1407#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1408static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1409{
1410 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1411}
1412
1413#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1414static inline int
1415__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1416{
1417 return test_bit(tx_type, srcp->bits);
1418}
1419
1420#define for_each_dma_cap_mask(cap, mask) \
1421 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1422
1423
1424
1425
1426
1427
1428
1429
1430static inline void dma_async_issue_pending(struct dma_chan *chan)
1431{
1432 chan->device->device_issue_pending(chan);
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1447 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1448{
1449 struct dma_tx_state state;
1450 enum dma_status status;
1451
1452 status = chan->device->device_tx_status(chan, cookie, &state);
1453 if (last)
1454 *last = state.last;
1455 if (used)
1456 *used = state.used;
1457 return status;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1470 dma_cookie_t last_complete, dma_cookie_t last_used)
1471{
1472 if (last_complete <= last_used) {
1473 if ((cookie <= last_complete) || (cookie > last_used))
1474 return DMA_COMPLETE;
1475 } else {
1476 if ((cookie <= last_complete) && (cookie > last_used))
1477 return DMA_COMPLETE;
1478 }
1479 return DMA_IN_PROGRESS;
1480}
1481
1482static inline void
1483dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1484{
1485 if (!st)
1486 return;
1487
1488 st->last = last;
1489 st->used = used;
1490 st->residue = residue;
1491}
1492
1493#ifdef CONFIG_DMA_ENGINE
1494struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1495enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1496enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1497void dma_issue_pending_all(void);
1498struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1499 dma_filter_fn fn, void *fn_param,
1500 struct device_node *np);
1501
1502struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1503struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1504
1505void dma_release_channel(struct dma_chan *chan);
1506int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1507#else
1508static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1509{
1510 return NULL;
1511}
1512static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1513{
1514 return DMA_COMPLETE;
1515}
1516static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1517{
1518 return DMA_COMPLETE;
1519}
1520static inline void dma_issue_pending_all(void)
1521{
1522}
1523static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1524 dma_filter_fn fn,
1525 void *fn_param,
1526 struct device_node *np)
1527{
1528 return NULL;
1529}
1530static inline struct dma_chan *dma_request_chan(struct device *dev,
1531 const char *name)
1532{
1533 return ERR_PTR(-ENODEV);
1534}
1535static inline struct dma_chan *dma_request_chan_by_mask(
1536 const dma_cap_mask_t *mask)
1537{
1538 return ERR_PTR(-ENODEV);
1539}
1540static inline void dma_release_channel(struct dma_chan *chan)
1541{
1542}
1543static inline int dma_get_slave_caps(struct dma_chan *chan,
1544 struct dma_slave_caps *caps)
1545{
1546 return -ENXIO;
1547}
1548#endif
1549
1550static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1551{
1552 struct dma_slave_caps caps;
1553 int ret;
1554
1555 ret = dma_get_slave_caps(tx->chan, &caps);
1556 if (ret)
1557 return ret;
1558
1559 if (!caps.descriptor_reuse)
1560 return -EPERM;
1561
1562 tx->flags |= DMA_CTRL_REUSE;
1563 return 0;
1564}
1565
1566static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1567{
1568 tx->flags &= ~DMA_CTRL_REUSE;
1569}
1570
1571static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1572{
1573 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1574}
1575
1576static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1577{
1578
1579 if (!dmaengine_desc_test_reuse(desc))
1580 return -EPERM;
1581
1582 return desc->desc_free(desc);
1583}
1584
1585
1586
1587int dma_async_device_register(struct dma_device *device);
1588int dmaenginem_async_device_register(struct dma_device *device);
1589void dma_async_device_unregister(struct dma_device *device);
1590int dma_async_device_channel_register(struct dma_device *device,
1591 struct dma_chan *chan);
1592void dma_async_device_channel_unregister(struct dma_device *device,
1593 struct dma_chan *chan);
1594void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1595#define dma_request_channel(mask, x, y) \
1596 __dma_request_channel(&(mask), x, y, NULL)
1597
1598
1599static inline struct dma_chan * __deprecated
1600dma_request_slave_channel(struct device *dev, const char *name)
1601{
1602 struct dma_chan *ch = dma_request_chan(dev, name);
1603
1604 return IS_ERR(ch) ? NULL : ch;
1605}
1606
1607static inline struct dma_chan
1608*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
1609 dma_filter_fn fn, void *fn_param,
1610 struct device *dev, const char *name)
1611{
1612 struct dma_chan *chan;
1613
1614 chan = dma_request_slave_channel(dev, name);
1615 if (chan)
1616 return chan;
1617
1618 if (!fn || !fn_param)
1619 return NULL;
1620
1621 return __dma_request_channel(&mask, fn, fn_param, NULL);
1622}
1623
1624static inline char *
1625dmaengine_get_direction_text(enum dma_transfer_direction dir)
1626{
1627 switch (dir) {
1628 case DMA_DEV_TO_MEM:
1629 return "DEV_TO_MEM";
1630 case DMA_MEM_TO_DEV:
1631 return "MEM_TO_DEV";
1632 case DMA_MEM_TO_MEM:
1633 return "MEM_TO_MEM";
1634 case DMA_DEV_TO_DEV:
1635 return "DEV_TO_DEV";
1636 default:
1637 return "invalid";
1638 }
1639}
1640
1641static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
1642{
1643 if (chan->dev->chan_dma_dev)
1644 return &chan->dev->device;
1645
1646 return chan->device->dev;
1647}
1648
1649#endif
1650