1
2
3
4
5#ifndef LINUX_DMAENGINE_H
6#define LINUX_DMAENGINE_H
7
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/uio.h>
11#include <linux/bug.h>
12#include <linux/scatterlist.h>
13#include <linux/bitmap.h>
14#include <linux/types.h>
15#include <asm/page.h>
16
17
18
19
20
21
22typedef s32 dma_cookie_t;
23#define DMA_MIN_COOKIE 1
24
25static inline int dma_submit_error(dma_cookie_t cookie)
26{
27 return cookie < 0 ? cookie : 0;
28}
29
30
31
32
33
34
35
36
37enum dma_status {
38 DMA_COMPLETE,
39 DMA_IN_PROGRESS,
40 DMA_PAUSED,
41 DMA_ERROR,
42 DMA_OUT_OF_ORDER,
43};
44
45
46
47
48
49
50
51enum dma_transaction_type {
52 DMA_MEMCPY,
53 DMA_XOR,
54 DMA_PQ,
55 DMA_XOR_VAL,
56 DMA_PQ_VAL,
57 DMA_MEMSET,
58 DMA_MEMSET_SG,
59 DMA_INTERRUPT,
60 DMA_PRIVATE,
61 DMA_ASYNC_TX,
62 DMA_SLAVE,
63 DMA_CYCLIC,
64 DMA_INTERLEAVE,
65 DMA_COMPLETION_NO_ORDER,
66 DMA_REPEAT,
67 DMA_LOAD_EOT,
68
69 DMA_TX_TYPE_END,
70};
71
72
73
74
75
76
77
78
79enum dma_transfer_direction {
80 DMA_MEM_TO_MEM,
81 DMA_MEM_TO_DEV,
82 DMA_DEV_TO_MEM,
83 DMA_DEV_TO_DEV,
84 DMA_TRANS_NONE,
85};
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125struct data_chunk {
126 size_t size;
127 size_t icg;
128 size_t dst_icg;
129 size_t src_icg;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150struct dma_interleaved_template {
151 dma_addr_t src_start;
152 dma_addr_t dst_start;
153 enum dma_transfer_direction dir;
154 bool src_inc;
155 bool dst_inc;
156 bool src_sgl;
157 bool dst_sgl;
158 size_t numf;
159 size_t frame_size;
160 struct data_chunk sgl[];
161};
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194enum dma_ctrl_flags {
195 DMA_PREP_INTERRUPT = (1 << 0),
196 DMA_CTRL_ACK = (1 << 1),
197 DMA_PREP_PQ_DISABLE_P = (1 << 2),
198 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
199 DMA_PREP_CONTINUE = (1 << 4),
200 DMA_PREP_FENCE = (1 << 5),
201 DMA_CTRL_REUSE = (1 << 6),
202 DMA_PREP_CMD = (1 << 7),
203 DMA_PREP_REPEAT = (1 << 8),
204 DMA_PREP_LOAD_EOT = (1 << 9),
205};
206
207
208
209
210enum sum_check_bits {
211 SUM_CHECK_P = 0,
212 SUM_CHECK_Q = 1,
213};
214
215
216
217
218
219
220enum sum_check_flags {
221 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
222 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
223};
224
225
226
227
228
229
230typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282enum dma_desc_metadata_mode {
283 DESC_METADATA_NONE = 0,
284 DESC_METADATA_CLIENT = BIT(0),
285 DESC_METADATA_ENGINE = BIT(1),
286};
287
288
289
290
291
292
293struct dma_chan_percpu {
294
295 unsigned long memcpy_count;
296 unsigned long bytes_transferred;
297};
298
299
300
301
302
303
304struct dma_router {
305 struct device *dev;
306 void (*route_free)(struct device *dev, void *route_data);
307};
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328struct dma_chan {
329 struct dma_device *device;
330 struct device *slave;
331 dma_cookie_t cookie;
332 dma_cookie_t completed_cookie;
333
334
335 int chan_id;
336 struct dma_chan_dev *dev;
337 const char *name;
338#ifdef CONFIG_DEBUG_FS
339 char *dbg_client_name;
340#endif
341
342 struct list_head device_node;
343 struct dma_chan_percpu __percpu *local;
344 int client_count;
345 int table_count;
346
347
348 struct dma_router *router;
349 void *route_data;
350
351 void *private;
352};
353
354
355
356
357
358
359
360
361
362struct dma_chan_dev {
363 struct dma_chan *chan;
364 struct device device;
365 int dev_id;
366 bool chan_dma_dev;
367};
368
369
370
371
372
373enum dma_slave_buswidth {
374 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
375 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
376 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
377 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
378 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
379 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
380 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
381 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
382 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
383 DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
384};
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440struct dma_slave_config {
441 enum dma_transfer_direction direction;
442 phys_addr_t src_addr;
443 phys_addr_t dst_addr;
444 enum dma_slave_buswidth src_addr_width;
445 enum dma_slave_buswidth dst_addr_width;
446 u32 src_maxburst;
447 u32 dst_maxburst;
448 u32 src_port_window_size;
449 u32 dst_port_window_size;
450 bool device_fc;
451 unsigned int slave_id;
452 void *peripheral_config;
453 size_t peripheral_size;
454};
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476enum dma_residue_granularity {
477 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
478 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
479 DMA_RESIDUE_GRANULARITY_BURST = 2,
480};
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505struct dma_slave_caps {
506 u32 src_addr_widths;
507 u32 dst_addr_widths;
508 u32 directions;
509 u32 min_burst;
510 u32 max_burst;
511 u32 max_sg_burst;
512 bool cmd_pause;
513 bool cmd_resume;
514 bool cmd_terminate;
515 enum dma_residue_granularity residue_granularity;
516 bool descriptor_reuse;
517};
518
519static inline const char *dma_chan_name(struct dma_chan *chan)
520{
521 return dev_name(&chan->dev->device);
522}
523
524void dma_chan_cleanup(struct kref *kref);
525
526
527
528
529
530
531
532
533
534
535
536
537typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
538
539typedef void (*dma_async_tx_callback)(void *dma_async_param);
540
541enum dmaengine_tx_result {
542 DMA_TRANS_NOERROR = 0,
543 DMA_TRANS_READ_FAILED,
544 DMA_TRANS_WRITE_FAILED,
545 DMA_TRANS_ABORTED,
546};
547
548struct dmaengine_result {
549 enum dmaengine_tx_result result;
550 u32 residue;
551};
552
553typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
554 const struct dmaengine_result *result);
555
556struct dmaengine_unmap_data {
557#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
558 u16 map_cnt;
559#else
560 u8 map_cnt;
561#endif
562 u8 to_cnt;
563 u8 from_cnt;
564 u8 bidi_cnt;
565 struct device *dev;
566 struct kref kref;
567 size_t len;
568 dma_addr_t addr[];
569};
570
571struct dma_async_tx_descriptor;
572
573struct dma_descriptor_metadata_ops {
574 int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
575 size_t len);
576
577 void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
578 size_t *payload_len, size_t *max_len);
579 int (*set_len)(struct dma_async_tx_descriptor *desc,
580 size_t payload_len);
581};
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606struct dma_async_tx_descriptor {
607 dma_cookie_t cookie;
608 enum dma_ctrl_flags flags;
609 dma_addr_t phys;
610 struct dma_chan *chan;
611 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
612 int (*desc_free)(struct dma_async_tx_descriptor *tx);
613 dma_async_tx_callback callback;
614 dma_async_tx_callback_result callback_result;
615 void *callback_param;
616 struct dmaengine_unmap_data *unmap;
617 enum dma_desc_metadata_mode desc_metadata_mode;
618 struct dma_descriptor_metadata_ops *metadata_ops;
619#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
620 struct dma_async_tx_descriptor *next;
621 struct dma_async_tx_descriptor *parent;
622 spinlock_t lock;
623#endif
624};
625
626#ifdef CONFIG_DMA_ENGINE
627static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
628 struct dmaengine_unmap_data *unmap)
629{
630 kref_get(&unmap->kref);
631 tx->unmap = unmap;
632}
633
634struct dmaengine_unmap_data *
635dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
636void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
637#else
638static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
639 struct dmaengine_unmap_data *unmap)
640{
641}
642static inline struct dmaengine_unmap_data *
643dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
644{
645 return NULL;
646}
647static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
648{
649}
650#endif
651
652static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
653{
654 if (!tx->unmap)
655 return;
656
657 dmaengine_unmap_put(tx->unmap);
658 tx->unmap = NULL;
659}
660
661#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
662static inline void txd_lock(struct dma_async_tx_descriptor *txd)
663{
664}
665static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
666{
667}
668static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
669{
670 BUG();
671}
672static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
673{
674}
675static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
676{
677}
678static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
679{
680 return NULL;
681}
682static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
683{
684 return NULL;
685}
686
687#else
688static inline void txd_lock(struct dma_async_tx_descriptor *txd)
689{
690 spin_lock_bh(&txd->lock);
691}
692static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
693{
694 spin_unlock_bh(&txd->lock);
695}
696static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
697{
698 txd->next = next;
699 next->parent = txd;
700}
701static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
702{
703 txd->parent = NULL;
704}
705static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
706{
707 txd->next = NULL;
708}
709static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
710{
711 return txd->parent;
712}
713static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
714{
715 return txd->next;
716}
717#endif
718
719
720
721
722
723
724
725
726
727
728
729struct dma_tx_state {
730 dma_cookie_t last;
731 dma_cookie_t used;
732 u32 residue;
733 u32 in_flight_bytes;
734};
735
736
737
738
739
740enum dmaengine_alignment {
741 DMAENGINE_ALIGN_1_BYTE = 0,
742 DMAENGINE_ALIGN_2_BYTES = 1,
743 DMAENGINE_ALIGN_4_BYTES = 2,
744 DMAENGINE_ALIGN_8_BYTES = 3,
745 DMAENGINE_ALIGN_16_BYTES = 4,
746 DMAENGINE_ALIGN_32_BYTES = 5,
747 DMAENGINE_ALIGN_64_BYTES = 6,
748 DMAENGINE_ALIGN_128_BYTES = 7,
749 DMAENGINE_ALIGN_256_BYTES = 8,
750};
751
752
753
754
755
756
757
758
759struct dma_slave_map {
760 const char *devname;
761 const char *slave;
762 void *param;
763};
764
765
766
767
768
769
770
771
772struct dma_filter {
773 dma_filter_fn fn;
774 int mapcnt;
775 const struct dma_slave_map *map;
776};
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855struct dma_device {
856 struct kref ref;
857 unsigned int chancnt;
858 unsigned int privatecnt;
859 struct list_head channels;
860 struct list_head global_node;
861 struct dma_filter filter;
862 dma_cap_mask_t cap_mask;
863 enum dma_desc_metadata_mode desc_metadata_modes;
864 unsigned short max_xor;
865 unsigned short max_pq;
866 enum dmaengine_alignment copy_align;
867 enum dmaengine_alignment xor_align;
868 enum dmaengine_alignment pq_align;
869 enum dmaengine_alignment fill_align;
870 #define DMA_HAS_PQ_CONTINUE (1 << 15)
871
872 int dev_id;
873 struct device *dev;
874 struct module *owner;
875 struct ida chan_ida;
876 struct mutex chan_mutex;
877
878 u32 src_addr_widths;
879 u32 dst_addr_widths;
880 u32 directions;
881 u32 min_burst;
882 u32 max_burst;
883 u32 max_sg_burst;
884 bool descriptor_reuse;
885 enum dma_residue_granularity residue_granularity;
886
887 int (*device_alloc_chan_resources)(struct dma_chan *chan);
888 int (*device_router_config)(struct dma_chan *chan);
889 void (*device_free_chan_resources)(struct dma_chan *chan);
890
891 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
892 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
893 size_t len, unsigned long flags);
894 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
895 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
896 unsigned int src_cnt, size_t len, unsigned long flags);
897 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
898 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
899 size_t len, enum sum_check_flags *result, unsigned long flags);
900 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
901 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
902 unsigned int src_cnt, const unsigned char *scf,
903 size_t len, unsigned long flags);
904 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
905 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
906 unsigned int src_cnt, const unsigned char *scf, size_t len,
907 enum sum_check_flags *pqres, unsigned long flags);
908 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
909 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
910 unsigned long flags);
911 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
912 struct dma_chan *chan, struct scatterlist *sg,
913 unsigned int nents, int value, unsigned long flags);
914 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
915 struct dma_chan *chan, unsigned long flags);
916
917 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
918 struct dma_chan *chan, struct scatterlist *sgl,
919 unsigned int sg_len, enum dma_transfer_direction direction,
920 unsigned long flags, void *context);
921 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
922 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
923 size_t period_len, enum dma_transfer_direction direction,
924 unsigned long flags);
925 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
926 struct dma_chan *chan, struct dma_interleaved_template *xt,
927 unsigned long flags);
928 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
929 struct dma_chan *chan, dma_addr_t dst, u64 data,
930 unsigned long flags);
931
932 void (*device_caps)(struct dma_chan *chan,
933 struct dma_slave_caps *caps);
934 int (*device_config)(struct dma_chan *chan,
935 struct dma_slave_config *config);
936 int (*device_pause)(struct dma_chan *chan);
937 int (*device_resume)(struct dma_chan *chan);
938 int (*device_terminate_all)(struct dma_chan *chan);
939 void (*device_synchronize)(struct dma_chan *chan);
940
941 enum dma_status (*device_tx_status)(struct dma_chan *chan,
942 dma_cookie_t cookie,
943 struct dma_tx_state *txstate);
944 void (*device_issue_pending)(struct dma_chan *chan);
945 void (*device_release)(struct dma_device *dev);
946
947#ifdef CONFIG_DEBUG_FS
948 void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
949 struct dentry *dbg_dev_root;
950#endif
951};
952
953static inline int dmaengine_slave_config(struct dma_chan *chan,
954 struct dma_slave_config *config)
955{
956 if (chan->device->device_config)
957 return chan->device->device_config(chan, config);
958
959 return -ENOSYS;
960}
961
962static inline bool is_slave_direction(enum dma_transfer_direction direction)
963{
964 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
965}
966
967static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
968 struct dma_chan *chan, dma_addr_t buf, size_t len,
969 enum dma_transfer_direction dir, unsigned long flags)
970{
971 struct scatterlist sg;
972 sg_init_table(&sg, 1);
973 sg_dma_address(&sg) = buf;
974 sg_dma_len(&sg) = len;
975
976 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
977 return NULL;
978
979 return chan->device->device_prep_slave_sg(chan, &sg, 1,
980 dir, flags, NULL);
981}
982
983static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
984 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
985 enum dma_transfer_direction dir, unsigned long flags)
986{
987 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
988 return NULL;
989
990 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
991 dir, flags, NULL);
992}
993
994#ifdef CONFIG_RAPIDIO_DMA_ENGINE
995struct rio_dma_ext;
996static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
997 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
998 enum dma_transfer_direction dir, unsigned long flags,
999 struct rio_dma_ext *rio_ext)
1000{
1001 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
1002 return NULL;
1003
1004 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
1005 dir, flags, rio_ext);
1006}
1007#endif
1008
1009static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
1010 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1011 size_t period_len, enum dma_transfer_direction dir,
1012 unsigned long flags)
1013{
1014 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
1015 return NULL;
1016
1017 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
1018 period_len, dir, flags);
1019}
1020
1021static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
1022 struct dma_chan *chan, struct dma_interleaved_template *xt,
1023 unsigned long flags)
1024{
1025 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
1026 return NULL;
1027 if (flags & DMA_PREP_REPEAT &&
1028 !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
1029 return NULL;
1030
1031 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
1032}
1033
1034static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
1035 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
1036 unsigned long flags)
1037{
1038 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
1039 return NULL;
1040
1041 return chan->device->device_prep_dma_memset(chan, dest, value,
1042 len, flags);
1043}
1044
1045static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
1046 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1047 size_t len, unsigned long flags)
1048{
1049 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
1050 return NULL;
1051
1052 return chan->device->device_prep_dma_memcpy(chan, dest, src,
1053 len, flags);
1054}
1055
1056static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
1057 enum dma_desc_metadata_mode mode)
1058{
1059 if (!chan)
1060 return false;
1061
1062 return !!(chan->device->desc_metadata_modes & mode);
1063}
1064
1065#ifdef CONFIG_DMA_ENGINE
1066int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1067 void *data, size_t len);
1068void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1069 size_t *payload_len, size_t *max_len);
1070int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1071 size_t payload_len);
1072#else
1073static inline int dmaengine_desc_attach_metadata(
1074 struct dma_async_tx_descriptor *desc, void *data, size_t len)
1075{
1076 return -EINVAL;
1077}
1078static inline void *dmaengine_desc_get_metadata_ptr(
1079 struct dma_async_tx_descriptor *desc, size_t *payload_len,
1080 size_t *max_len)
1081{
1082 return NULL;
1083}
1084static inline int dmaengine_desc_set_metadata_len(
1085 struct dma_async_tx_descriptor *desc, size_t payload_len)
1086{
1087 return -EINVAL;
1088}
1089#endif
1090
1091
1092
1093
1094
1095
1096
1097
1098static inline int dmaengine_terminate_all(struct dma_chan *chan)
1099{
1100 if (chan->device->device_terminate_all)
1101 return chan->device->device_terminate_all(chan);
1102
1103 return -ENOSYS;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static inline int dmaengine_terminate_async(struct dma_chan *chan)
1128{
1129 if (chan->device->device_terminate_all)
1130 return chan->device->device_terminate_all(chan);
1131
1132 return -EINVAL;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static inline void dmaengine_synchronize(struct dma_chan *chan)
1154{
1155 might_sleep();
1156
1157 if (chan->device->device_synchronize)
1158 chan->device->device_synchronize(chan);
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1176{
1177 int ret;
1178
1179 ret = dmaengine_terminate_async(chan);
1180 if (ret)
1181 return ret;
1182
1183 dmaengine_synchronize(chan);
1184
1185 return 0;
1186}
1187
1188static inline int dmaengine_pause(struct dma_chan *chan)
1189{
1190 if (chan->device->device_pause)
1191 return chan->device->device_pause(chan);
1192
1193 return -ENOSYS;
1194}
1195
1196static inline int dmaengine_resume(struct dma_chan *chan)
1197{
1198 if (chan->device->device_resume)
1199 return chan->device->device_resume(chan);
1200
1201 return -ENOSYS;
1202}
1203
1204static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1205 dma_cookie_t cookie, struct dma_tx_state *state)
1206{
1207 return chan->device->device_tx_status(chan, cookie, state);
1208}
1209
1210static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1211{
1212 return desc->tx_submit(desc);
1213}
1214
1215static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1216 size_t off1, size_t off2, size_t len)
1217{
1218 return !(((1 << align) - 1) & (off1 | off2 | len));
1219}
1220
1221static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1222 size_t off2, size_t len)
1223{
1224 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1225}
1226
1227static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1228 size_t off2, size_t len)
1229{
1230 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1231}
1232
1233static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1234 size_t off2, size_t len)
1235{
1236 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1237}
1238
1239static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1240 size_t off2, size_t len)
1241{
1242 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1243}
1244
1245static inline void
1246dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1247{
1248 dma->max_pq = maxpq;
1249 if (has_pq_continue)
1250 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1251}
1252
1253static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1254{
1255 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1256}
1257
1258static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1259{
1260 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1261
1262 return (flags & mask) == mask;
1263}
1264
1265static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1266{
1267 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1268}
1269
1270static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1271{
1272 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1289{
1290 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1291 return dma_dev_to_maxpq(dma);
1292 if (dmaf_p_disabled_continue(flags))
1293 return dma_dev_to_maxpq(dma) - 1;
1294 if (dmaf_continue(flags))
1295 return dma_dev_to_maxpq(dma) - 3;
1296 BUG();
1297}
1298
1299static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1300 size_t dir_icg)
1301{
1302 if (inc) {
1303 if (dir_icg)
1304 return dir_icg;
1305 if (sgl)
1306 return icg;
1307 }
1308
1309 return 0;
1310}
1311
1312static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1313 struct data_chunk *chunk)
1314{
1315 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1316 chunk->icg, chunk->dst_icg);
1317}
1318
1319static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1320 struct data_chunk *chunk)
1321{
1322 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1323 chunk->icg, chunk->src_icg);
1324}
1325
1326
1327
1328#ifdef CONFIG_DMA_ENGINE
1329void dmaengine_get(void);
1330void dmaengine_put(void);
1331#else
1332static inline void dmaengine_get(void)
1333{
1334}
1335static inline void dmaengine_put(void)
1336{
1337}
1338#endif
1339
1340#ifdef CONFIG_ASYNC_TX_DMA
1341#define async_dmaengine_get() dmaengine_get()
1342#define async_dmaengine_put() dmaengine_put()
1343#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1344#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1345#else
1346#define async_dma_find_channel(type) dma_find_channel(type)
1347#endif
1348#else
1349static inline void async_dmaengine_get(void)
1350{
1351}
1352static inline void async_dmaengine_put(void)
1353{
1354}
1355static inline struct dma_chan *
1356async_dma_find_channel(enum dma_transaction_type type)
1357{
1358 return NULL;
1359}
1360#endif
1361void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1362 struct dma_chan *chan);
1363
1364static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1365{
1366 tx->flags |= DMA_CTRL_ACK;
1367}
1368
1369static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1370{
1371 tx->flags &= ~DMA_CTRL_ACK;
1372}
1373
1374static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1375{
1376 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1377}
1378
1379#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1380static inline void
1381__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1382{
1383 set_bit(tx_type, dstp->bits);
1384}
1385
1386#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1387static inline void
1388__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1389{
1390 clear_bit(tx_type, dstp->bits);
1391}
1392
1393#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1394static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1395{
1396 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1397}
1398
1399#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1400static inline int
1401__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1402{
1403 return test_bit(tx_type, srcp->bits);
1404}
1405
1406#define for_each_dma_cap_mask(cap, mask) \
1407 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1408
1409
1410
1411
1412
1413
1414
1415
1416static inline void dma_async_issue_pending(struct dma_chan *chan)
1417{
1418 chan->device->device_issue_pending(chan);
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1433 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1434{
1435 struct dma_tx_state state;
1436 enum dma_status status;
1437
1438 status = chan->device->device_tx_status(chan, cookie, &state);
1439 if (last)
1440 *last = state.last;
1441 if (used)
1442 *used = state.used;
1443 return status;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1456 dma_cookie_t last_complete, dma_cookie_t last_used)
1457{
1458 if (last_complete <= last_used) {
1459 if ((cookie <= last_complete) || (cookie > last_used))
1460 return DMA_COMPLETE;
1461 } else {
1462 if ((cookie <= last_complete) && (cookie > last_used))
1463 return DMA_COMPLETE;
1464 }
1465 return DMA_IN_PROGRESS;
1466}
1467
1468static inline void
1469dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1470{
1471 if (!st)
1472 return;
1473
1474 st->last = last;
1475 st->used = used;
1476 st->residue = residue;
1477}
1478
1479#ifdef CONFIG_DMA_ENGINE
1480struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1481enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1482enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1483void dma_issue_pending_all(void);
1484struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1485 dma_filter_fn fn, void *fn_param,
1486 struct device_node *np);
1487
1488struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1489struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1490
1491void dma_release_channel(struct dma_chan *chan);
1492int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1493#else
1494static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1495{
1496 return NULL;
1497}
1498static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1499{
1500 return DMA_COMPLETE;
1501}
1502static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1503{
1504 return DMA_COMPLETE;
1505}
1506static inline void dma_issue_pending_all(void)
1507{
1508}
1509static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1510 dma_filter_fn fn,
1511 void *fn_param,
1512 struct device_node *np)
1513{
1514 return NULL;
1515}
1516static inline struct dma_chan *dma_request_chan(struct device *dev,
1517 const char *name)
1518{
1519 return ERR_PTR(-ENODEV);
1520}
1521static inline struct dma_chan *dma_request_chan_by_mask(
1522 const dma_cap_mask_t *mask)
1523{
1524 return ERR_PTR(-ENODEV);
1525}
1526static inline void dma_release_channel(struct dma_chan *chan)
1527{
1528}
1529static inline int dma_get_slave_caps(struct dma_chan *chan,
1530 struct dma_slave_caps *caps)
1531{
1532 return -ENXIO;
1533}
1534#endif
1535
1536static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1537{
1538 struct dma_slave_caps caps;
1539 int ret;
1540
1541 ret = dma_get_slave_caps(tx->chan, &caps);
1542 if (ret)
1543 return ret;
1544
1545 if (!caps.descriptor_reuse)
1546 return -EPERM;
1547
1548 tx->flags |= DMA_CTRL_REUSE;
1549 return 0;
1550}
1551
1552static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1553{
1554 tx->flags &= ~DMA_CTRL_REUSE;
1555}
1556
1557static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1558{
1559 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1560}
1561
1562static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1563{
1564
1565 if (!dmaengine_desc_test_reuse(desc))
1566 return -EPERM;
1567
1568 return desc->desc_free(desc);
1569}
1570
1571
1572
1573int dma_async_device_register(struct dma_device *device);
1574int dmaenginem_async_device_register(struct dma_device *device);
1575void dma_async_device_unregister(struct dma_device *device);
1576int dma_async_device_channel_register(struct dma_device *device,
1577 struct dma_chan *chan);
1578void dma_async_device_channel_unregister(struct dma_device *device,
1579 struct dma_chan *chan);
1580void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1581#define dma_request_channel(mask, x, y) \
1582 __dma_request_channel(&(mask), x, y, NULL)
1583
1584
1585static inline struct dma_chan * __deprecated
1586dma_request_slave_channel(struct device *dev, const char *name)
1587{
1588 struct dma_chan *ch = dma_request_chan(dev, name);
1589
1590 return IS_ERR(ch) ? NULL : ch;
1591}
1592
1593static inline struct dma_chan
1594*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
1595 dma_filter_fn fn, void *fn_param,
1596 struct device *dev, const char *name)
1597{
1598 struct dma_chan *chan;
1599
1600 chan = dma_request_slave_channel(dev, name);
1601 if (chan)
1602 return chan;
1603
1604 if (!fn || !fn_param)
1605 return NULL;
1606
1607 return __dma_request_channel(&mask, fn, fn_param, NULL);
1608}
1609
1610static inline char *
1611dmaengine_get_direction_text(enum dma_transfer_direction dir)
1612{
1613 switch (dir) {
1614 case DMA_DEV_TO_MEM:
1615 return "DEV_TO_MEM";
1616 case DMA_MEM_TO_DEV:
1617 return "MEM_TO_DEV";
1618 case DMA_MEM_TO_MEM:
1619 return "MEM_TO_MEM";
1620 case DMA_DEV_TO_DEV:
1621 return "DEV_TO_DEV";
1622 default:
1623 return "invalid";
1624 }
1625}
1626
1627static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
1628{
1629 if (chan->dev->chan_dma_dev)
1630 return &chan->dev->device;
1631
1632 return chan->device->dev;
1633}
1634
1635#endif
1636