1
2
3
4
5#ifndef LINUX_DMAENGINE_H
6#define LINUX_DMAENGINE_H
7
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/uio.h>
11#include <linux/bug.h>
12#include <linux/scatterlist.h>
13#include <linux/bitmap.h>
14#include <linux/types.h>
15#include <asm/page.h>
16
17
18
19
20
21
22typedef s32 dma_cookie_t;
23#define DMA_MIN_COOKIE 1
24
25static inline int dma_submit_error(dma_cookie_t cookie)
26{
27 return cookie < 0 ? cookie : 0;
28}
29
30
31
32
33
34
35
36
37enum dma_status {
38 DMA_COMPLETE,
39 DMA_IN_PROGRESS,
40 DMA_PAUSED,
41 DMA_ERROR,
42 DMA_OUT_OF_ORDER,
43};
44
45
46
47
48
49
50
51enum dma_transaction_type {
52 DMA_MEMCPY,
53 DMA_XOR,
54 DMA_PQ,
55 DMA_XOR_VAL,
56 DMA_PQ_VAL,
57 DMA_MEMSET,
58 DMA_MEMSET_SG,
59 DMA_INTERRUPT,
60 DMA_PRIVATE,
61 DMA_ASYNC_TX,
62 DMA_SLAVE,
63 DMA_CYCLIC,
64 DMA_INTERLEAVE,
65 DMA_COMPLETION_NO_ORDER,
66 DMA_REPEAT,
67 DMA_LOAD_EOT,
68
69 DMA_TX_TYPE_END,
70};
71
72
73
74
75
76
77
78
79enum dma_transfer_direction {
80 DMA_MEM_TO_MEM,
81 DMA_MEM_TO_DEV,
82 DMA_DEV_TO_MEM,
83 DMA_DEV_TO_DEV,
84 DMA_TRANS_NONE,
85};
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125struct data_chunk {
126 size_t size;
127 size_t icg;
128 size_t dst_icg;
129 size_t src_icg;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150struct dma_interleaved_template {
151 dma_addr_t src_start;
152 dma_addr_t dst_start;
153 enum dma_transfer_direction dir;
154 bool src_inc;
155 bool dst_inc;
156 bool src_sgl;
157 bool dst_sgl;
158 size_t numf;
159 size_t frame_size;
160 struct data_chunk sgl[];
161};
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194enum dma_ctrl_flags {
195 DMA_PREP_INTERRUPT = (1 << 0),
196 DMA_CTRL_ACK = (1 << 1),
197 DMA_PREP_PQ_DISABLE_P = (1 << 2),
198 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
199 DMA_PREP_CONTINUE = (1 << 4),
200 DMA_PREP_FENCE = (1 << 5),
201 DMA_CTRL_REUSE = (1 << 6),
202 DMA_PREP_CMD = (1 << 7),
203 DMA_PREP_REPEAT = (1 << 8),
204 DMA_PREP_LOAD_EOT = (1 << 9),
205};
206
207
208
209
210enum sum_check_bits {
211 SUM_CHECK_P = 0,
212 SUM_CHECK_Q = 1,
213};
214
215
216
217
218
219
220enum sum_check_flags {
221 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
222 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
223};
224
225
226
227
228
229
230typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288enum dma_desc_metadata_mode {
289 DESC_METADATA_NONE = 0,
290 DESC_METADATA_CLIENT = BIT(0),
291 DESC_METADATA_ENGINE = BIT(1),
292};
293
294struct dma_chan_percpu {
295
296 unsigned long memcpy_count;
297 unsigned long bytes_transferred;
298};
299
300
301
302
303
304
305struct dma_router {
306 struct device *dev;
307 void (*route_free)(struct device *dev, void *route_data);
308};
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329struct dma_chan {
330 struct dma_device *device;
331 struct device *slave;
332 dma_cookie_t cookie;
333 dma_cookie_t completed_cookie;
334
335
336 int chan_id;
337 struct dma_chan_dev *dev;
338 const char *name;
339#ifdef CONFIG_DEBUG_FS
340 char *dbg_client_name;
341#endif
342
343 struct list_head device_node;
344 struct dma_chan_percpu __percpu *local;
345 int client_count;
346 int table_count;
347
348
349 struct dma_router *router;
350 void *route_data;
351
352 void *private;
353};
354
355
356
357
358
359
360
361struct dma_chan_dev {
362 struct dma_chan *chan;
363 struct device device;
364 int dev_id;
365};
366
367
368
369
370
371enum dma_slave_buswidth {
372 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
373 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
374 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
375 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
376 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
377 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
378 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
379 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
380 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
381};
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434struct dma_slave_config {
435 enum dma_transfer_direction direction;
436 phys_addr_t src_addr;
437 phys_addr_t dst_addr;
438 enum dma_slave_buswidth src_addr_width;
439 enum dma_slave_buswidth dst_addr_width;
440 u32 src_maxburst;
441 u32 dst_maxburst;
442 u32 src_port_window_size;
443 u32 dst_port_window_size;
444 bool device_fc;
445 unsigned int slave_id;
446};
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468enum dma_residue_granularity {
469 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
470 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
471 DMA_RESIDUE_GRANULARITY_BURST = 2,
472};
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497struct dma_slave_caps {
498 u32 src_addr_widths;
499 u32 dst_addr_widths;
500 u32 directions;
501 u32 min_burst;
502 u32 max_burst;
503 u32 max_sg_burst;
504 bool cmd_pause;
505 bool cmd_resume;
506 bool cmd_terminate;
507 enum dma_residue_granularity residue_granularity;
508 bool descriptor_reuse;
509};
510
511static inline const char *dma_chan_name(struct dma_chan *chan)
512{
513 return dev_name(&chan->dev->device);
514}
515
516void dma_chan_cleanup(struct kref *kref);
517
518
519
520
521
522
523
524
525
526
527
528
529typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
530
531typedef void (*dma_async_tx_callback)(void *dma_async_param);
532
533enum dmaengine_tx_result {
534 DMA_TRANS_NOERROR = 0,
535 DMA_TRANS_READ_FAILED,
536 DMA_TRANS_WRITE_FAILED,
537 DMA_TRANS_ABORTED,
538};
539
540struct dmaengine_result {
541 enum dmaengine_tx_result result;
542 u32 residue;
543};
544
545typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
546 const struct dmaengine_result *result);
547
548struct dmaengine_unmap_data {
549#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
550 u16 map_cnt;
551#else
552 u8 map_cnt;
553#endif
554 u8 to_cnt;
555 u8 from_cnt;
556 u8 bidi_cnt;
557 struct device *dev;
558 struct kref kref;
559 size_t len;
560 dma_addr_t addr[];
561};
562
563struct dma_async_tx_descriptor;
564
565struct dma_descriptor_metadata_ops {
566 int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
567 size_t len);
568
569 void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
570 size_t *payload_len, size_t *max_len);
571 int (*set_len)(struct dma_async_tx_descriptor *desc,
572 size_t payload_len);
573};
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598struct dma_async_tx_descriptor {
599 dma_cookie_t cookie;
600 enum dma_ctrl_flags flags;
601 dma_addr_t phys;
602 struct dma_chan *chan;
603 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
604 int (*desc_free)(struct dma_async_tx_descriptor *tx);
605 dma_async_tx_callback callback;
606 dma_async_tx_callback_result callback_result;
607 void *callback_param;
608 struct dmaengine_unmap_data *unmap;
609 enum dma_desc_metadata_mode desc_metadata_mode;
610 struct dma_descriptor_metadata_ops *metadata_ops;
611#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
612 struct dma_async_tx_descriptor *next;
613 struct dma_async_tx_descriptor *parent;
614 spinlock_t lock;
615#endif
616};
617
618#ifdef CONFIG_DMA_ENGINE
619static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
620 struct dmaengine_unmap_data *unmap)
621{
622 kref_get(&unmap->kref);
623 tx->unmap = unmap;
624}
625
626struct dmaengine_unmap_data *
627dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
628void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
629#else
630static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
631 struct dmaengine_unmap_data *unmap)
632{
633}
634static inline struct dmaengine_unmap_data *
635dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
636{
637 return NULL;
638}
639static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
640{
641}
642#endif
643
644static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
645{
646 if (!tx->unmap)
647 return;
648
649 dmaengine_unmap_put(tx->unmap);
650 tx->unmap = NULL;
651}
652
653#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
654static inline void txd_lock(struct dma_async_tx_descriptor *txd)
655{
656}
657static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
658{
659}
660static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
661{
662 BUG();
663}
664static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
665{
666}
667static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
668{
669}
670static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
671{
672 return NULL;
673}
674static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
675{
676 return NULL;
677}
678
679#else
680static inline void txd_lock(struct dma_async_tx_descriptor *txd)
681{
682 spin_lock_bh(&txd->lock);
683}
684static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
685{
686 spin_unlock_bh(&txd->lock);
687}
688static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
689{
690 txd->next = next;
691 next->parent = txd;
692}
693static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
694{
695 txd->parent = NULL;
696}
697static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
698{
699 txd->next = NULL;
700}
701static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
702{
703 return txd->parent;
704}
705static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
706{
707 return txd->next;
708}
709#endif
710
711
712
713
714
715
716
717
718
719
720
721struct dma_tx_state {
722 dma_cookie_t last;
723 dma_cookie_t used;
724 u32 residue;
725 u32 in_flight_bytes;
726};
727
728
729
730
731
732enum dmaengine_alignment {
733 DMAENGINE_ALIGN_1_BYTE = 0,
734 DMAENGINE_ALIGN_2_BYTES = 1,
735 DMAENGINE_ALIGN_4_BYTES = 2,
736 DMAENGINE_ALIGN_8_BYTES = 3,
737 DMAENGINE_ALIGN_16_BYTES = 4,
738 DMAENGINE_ALIGN_32_BYTES = 5,
739 DMAENGINE_ALIGN_64_BYTES = 6,
740};
741
742
743
744
745
746
747
748
749struct dma_slave_map {
750 const char *devname;
751 const char *slave;
752 void *param;
753};
754
755
756
757
758
759
760
761
762struct dma_filter {
763 dma_filter_fn fn;
764 int mapcnt;
765 const struct dma_slave_map *map;
766};
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844struct dma_device {
845 struct kref ref;
846 unsigned int chancnt;
847 unsigned int privatecnt;
848 struct list_head channels;
849 struct list_head global_node;
850 struct dma_filter filter;
851 dma_cap_mask_t cap_mask;
852 enum dma_desc_metadata_mode desc_metadata_modes;
853 unsigned short max_xor;
854 unsigned short max_pq;
855 enum dmaengine_alignment copy_align;
856 enum dmaengine_alignment xor_align;
857 enum dmaengine_alignment pq_align;
858 enum dmaengine_alignment fill_align;
859 #define DMA_HAS_PQ_CONTINUE (1 << 15)
860
861 int dev_id;
862 struct device *dev;
863 struct module *owner;
864 struct ida chan_ida;
865 struct mutex chan_mutex;
866
867 u32 src_addr_widths;
868 u32 dst_addr_widths;
869 u32 directions;
870 u32 min_burst;
871 u32 max_burst;
872 u32 max_sg_burst;
873 bool descriptor_reuse;
874 enum dma_residue_granularity residue_granularity;
875
876 int (*device_alloc_chan_resources)(struct dma_chan *chan);
877 void (*device_free_chan_resources)(struct dma_chan *chan);
878
879 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
880 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
881 size_t len, unsigned long flags);
882 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
883 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
884 unsigned int src_cnt, size_t len, unsigned long flags);
885 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
886 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
887 size_t len, enum sum_check_flags *result, unsigned long flags);
888 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
889 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
890 unsigned int src_cnt, const unsigned char *scf,
891 size_t len, unsigned long flags);
892 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
893 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
894 unsigned int src_cnt, const unsigned char *scf, size_t len,
895 enum sum_check_flags *pqres, unsigned long flags);
896 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
897 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
898 unsigned long flags);
899 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
900 struct dma_chan *chan, struct scatterlist *sg,
901 unsigned int nents, int value, unsigned long flags);
902 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
903 struct dma_chan *chan, unsigned long flags);
904
905 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
906 struct dma_chan *chan, struct scatterlist *sgl,
907 unsigned int sg_len, enum dma_transfer_direction direction,
908 unsigned long flags, void *context);
909 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
910 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
911 size_t period_len, enum dma_transfer_direction direction,
912 unsigned long flags);
913 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
914 struct dma_chan *chan, struct dma_interleaved_template *xt,
915 unsigned long flags);
916 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
917 struct dma_chan *chan, dma_addr_t dst, u64 data,
918 unsigned long flags);
919
920 void (*device_caps)(struct dma_chan *chan,
921 struct dma_slave_caps *caps);
922 int (*device_config)(struct dma_chan *chan,
923 struct dma_slave_config *config);
924 int (*device_pause)(struct dma_chan *chan);
925 int (*device_resume)(struct dma_chan *chan);
926 int (*device_terminate_all)(struct dma_chan *chan);
927 void (*device_synchronize)(struct dma_chan *chan);
928
929 enum dma_status (*device_tx_status)(struct dma_chan *chan,
930 dma_cookie_t cookie,
931 struct dma_tx_state *txstate);
932 void (*device_issue_pending)(struct dma_chan *chan);
933 void (*device_release)(struct dma_device *dev);
934
935#ifdef CONFIG_DEBUG_FS
936 void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
937 struct dentry *dbg_dev_root;
938#endif
939};
940
941static inline int dmaengine_slave_config(struct dma_chan *chan,
942 struct dma_slave_config *config)
943{
944 if (chan->device->device_config)
945 return chan->device->device_config(chan, config);
946
947 return -ENOSYS;
948}
949
950static inline bool is_slave_direction(enum dma_transfer_direction direction)
951{
952 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
953}
954
955static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
956 struct dma_chan *chan, dma_addr_t buf, size_t len,
957 enum dma_transfer_direction dir, unsigned long flags)
958{
959 struct scatterlist sg;
960 sg_init_table(&sg, 1);
961 sg_dma_address(&sg) = buf;
962 sg_dma_len(&sg) = len;
963
964 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
965 return NULL;
966
967 return chan->device->device_prep_slave_sg(chan, &sg, 1,
968 dir, flags, NULL);
969}
970
971static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
972 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
973 enum dma_transfer_direction dir, unsigned long flags)
974{
975 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
976 return NULL;
977
978 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
979 dir, flags, NULL);
980}
981
982#ifdef CONFIG_RAPIDIO_DMA_ENGINE
983struct rio_dma_ext;
984static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
985 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
986 enum dma_transfer_direction dir, unsigned long flags,
987 struct rio_dma_ext *rio_ext)
988{
989 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
990 return NULL;
991
992 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
993 dir, flags, rio_ext);
994}
995#endif
996
997static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
998 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
999 size_t period_len, enum dma_transfer_direction dir,
1000 unsigned long flags)
1001{
1002 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
1003 return NULL;
1004
1005 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
1006 period_len, dir, flags);
1007}
1008
1009static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
1010 struct dma_chan *chan, struct dma_interleaved_template *xt,
1011 unsigned long flags)
1012{
1013 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
1014 return NULL;
1015 if (flags & DMA_PREP_REPEAT &&
1016 !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
1017 return NULL;
1018
1019 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
1020}
1021
1022static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
1023 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
1024 unsigned long flags)
1025{
1026 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
1027 return NULL;
1028
1029 return chan->device->device_prep_dma_memset(chan, dest, value,
1030 len, flags);
1031}
1032
1033static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
1034 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1035 size_t len, unsigned long flags)
1036{
1037 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
1038 return NULL;
1039
1040 return chan->device->device_prep_dma_memcpy(chan, dest, src,
1041 len, flags);
1042}
1043
1044static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
1045 enum dma_desc_metadata_mode mode)
1046{
1047 if (!chan)
1048 return false;
1049
1050 return !!(chan->device->desc_metadata_modes & mode);
1051}
1052
1053#ifdef CONFIG_DMA_ENGINE
1054int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1055 void *data, size_t len);
1056void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1057 size_t *payload_len, size_t *max_len);
1058int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1059 size_t payload_len);
1060#else
1061static inline int dmaengine_desc_attach_metadata(
1062 struct dma_async_tx_descriptor *desc, void *data, size_t len)
1063{
1064 return -EINVAL;
1065}
1066static inline void *dmaengine_desc_get_metadata_ptr(
1067 struct dma_async_tx_descriptor *desc, size_t *payload_len,
1068 size_t *max_len)
1069{
1070 return NULL;
1071}
1072static inline int dmaengine_desc_set_metadata_len(
1073 struct dma_async_tx_descriptor *desc, size_t payload_len)
1074{
1075 return -EINVAL;
1076}
1077#endif
1078
1079
1080
1081
1082
1083
1084
1085
1086static inline int dmaengine_terminate_all(struct dma_chan *chan)
1087{
1088 if (chan->device->device_terminate_all)
1089 return chan->device->device_terminate_all(chan);
1090
1091 return -ENOSYS;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static inline int dmaengine_terminate_async(struct dma_chan *chan)
1116{
1117 if (chan->device->device_terminate_all)
1118 return chan->device->device_terminate_all(chan);
1119
1120 return -EINVAL;
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static inline void dmaengine_synchronize(struct dma_chan *chan)
1142{
1143 might_sleep();
1144
1145 if (chan->device->device_synchronize)
1146 chan->device->device_synchronize(chan);
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1164{
1165 int ret;
1166
1167 ret = dmaengine_terminate_async(chan);
1168 if (ret)
1169 return ret;
1170
1171 dmaengine_synchronize(chan);
1172
1173 return 0;
1174}
1175
1176static inline int dmaengine_pause(struct dma_chan *chan)
1177{
1178 if (chan->device->device_pause)
1179 return chan->device->device_pause(chan);
1180
1181 return -ENOSYS;
1182}
1183
1184static inline int dmaengine_resume(struct dma_chan *chan)
1185{
1186 if (chan->device->device_resume)
1187 return chan->device->device_resume(chan);
1188
1189 return -ENOSYS;
1190}
1191
1192static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1193 dma_cookie_t cookie, struct dma_tx_state *state)
1194{
1195 return chan->device->device_tx_status(chan, cookie, state);
1196}
1197
1198static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1199{
1200 return desc->tx_submit(desc);
1201}
1202
1203static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1204 size_t off1, size_t off2, size_t len)
1205{
1206 return !(((1 << align) - 1) & (off1 | off2 | len));
1207}
1208
1209static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1210 size_t off2, size_t len)
1211{
1212 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1213}
1214
1215static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1216 size_t off2, size_t len)
1217{
1218 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1219}
1220
1221static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1222 size_t off2, size_t len)
1223{
1224 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1225}
1226
1227static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1228 size_t off2, size_t len)
1229{
1230 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1231}
1232
1233static inline void
1234dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1235{
1236 dma->max_pq = maxpq;
1237 if (has_pq_continue)
1238 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1239}
1240
1241static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1242{
1243 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1244}
1245
1246static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1247{
1248 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1249
1250 return (flags & mask) == mask;
1251}
1252
1253static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1254{
1255 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1256}
1257
1258static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1259{
1260 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1277{
1278 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1279 return dma_dev_to_maxpq(dma);
1280 if (dmaf_p_disabled_continue(flags))
1281 return dma_dev_to_maxpq(dma) - 1;
1282 if (dmaf_continue(flags))
1283 return dma_dev_to_maxpq(dma) - 3;
1284 BUG();
1285}
1286
1287static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1288 size_t dir_icg)
1289{
1290 if (inc) {
1291 if (dir_icg)
1292 return dir_icg;
1293 if (sgl)
1294 return icg;
1295 }
1296
1297 return 0;
1298}
1299
1300static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1301 struct data_chunk *chunk)
1302{
1303 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1304 chunk->icg, chunk->dst_icg);
1305}
1306
1307static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1308 struct data_chunk *chunk)
1309{
1310 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1311 chunk->icg, chunk->src_icg);
1312}
1313
1314
1315
1316#ifdef CONFIG_DMA_ENGINE
1317void dmaengine_get(void);
1318void dmaengine_put(void);
1319#else
1320static inline void dmaengine_get(void)
1321{
1322}
1323static inline void dmaengine_put(void)
1324{
1325}
1326#endif
1327
1328#ifdef CONFIG_ASYNC_TX_DMA
1329#define async_dmaengine_get() dmaengine_get()
1330#define async_dmaengine_put() dmaengine_put()
1331#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1332#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1333#else
1334#define async_dma_find_channel(type) dma_find_channel(type)
1335#endif
1336#else
1337static inline void async_dmaengine_get(void)
1338{
1339}
1340static inline void async_dmaengine_put(void)
1341{
1342}
1343static inline struct dma_chan *
1344async_dma_find_channel(enum dma_transaction_type type)
1345{
1346 return NULL;
1347}
1348#endif
1349void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1350 struct dma_chan *chan);
1351
1352static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1353{
1354 tx->flags |= DMA_CTRL_ACK;
1355}
1356
1357static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1358{
1359 tx->flags &= ~DMA_CTRL_ACK;
1360}
1361
1362static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1363{
1364 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1365}
1366
1367#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1368static inline void
1369__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1370{
1371 set_bit(tx_type, dstp->bits);
1372}
1373
1374#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1375static inline void
1376__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1377{
1378 clear_bit(tx_type, dstp->bits);
1379}
1380
1381#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1382static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1383{
1384 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1385}
1386
1387#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1388static inline int
1389__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1390{
1391 return test_bit(tx_type, srcp->bits);
1392}
1393
1394#define for_each_dma_cap_mask(cap, mask) \
1395 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1396
1397
1398
1399
1400
1401
1402
1403
1404static inline void dma_async_issue_pending(struct dma_chan *chan)
1405{
1406 chan->device->device_issue_pending(chan);
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1421 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1422{
1423 struct dma_tx_state state;
1424 enum dma_status status;
1425
1426 status = chan->device->device_tx_status(chan, cookie, &state);
1427 if (last)
1428 *last = state.last;
1429 if (used)
1430 *used = state.used;
1431 return status;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1444 dma_cookie_t last_complete, dma_cookie_t last_used)
1445{
1446 if (last_complete <= last_used) {
1447 if ((cookie <= last_complete) || (cookie > last_used))
1448 return DMA_COMPLETE;
1449 } else {
1450 if ((cookie <= last_complete) && (cookie > last_used))
1451 return DMA_COMPLETE;
1452 }
1453 return DMA_IN_PROGRESS;
1454}
1455
1456static inline void
1457dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1458{
1459 if (!st)
1460 return;
1461
1462 st->last = last;
1463 st->used = used;
1464 st->residue = residue;
1465}
1466
1467#ifdef CONFIG_DMA_ENGINE
1468struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1469enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1470enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1471void dma_issue_pending_all(void);
1472struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1473 dma_filter_fn fn, void *fn_param,
1474 struct device_node *np);
1475
1476struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1477struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1478
1479void dma_release_channel(struct dma_chan *chan);
1480int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1481#else
1482static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1483{
1484 return NULL;
1485}
1486static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1487{
1488 return DMA_COMPLETE;
1489}
1490static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1491{
1492 return DMA_COMPLETE;
1493}
1494static inline void dma_issue_pending_all(void)
1495{
1496}
1497static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1498 dma_filter_fn fn,
1499 void *fn_param,
1500 struct device_node *np)
1501{
1502 return NULL;
1503}
1504static inline struct dma_chan *dma_request_chan(struct device *dev,
1505 const char *name)
1506{
1507 return ERR_PTR(-ENODEV);
1508}
1509static inline struct dma_chan *dma_request_chan_by_mask(
1510 const dma_cap_mask_t *mask)
1511{
1512 return ERR_PTR(-ENODEV);
1513}
1514static inline void dma_release_channel(struct dma_chan *chan)
1515{
1516}
1517static inline int dma_get_slave_caps(struct dma_chan *chan,
1518 struct dma_slave_caps *caps)
1519{
1520 return -ENXIO;
1521}
1522#endif
1523
1524static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1525{
1526 struct dma_slave_caps caps;
1527 int ret;
1528
1529 ret = dma_get_slave_caps(tx->chan, &caps);
1530 if (ret)
1531 return ret;
1532
1533 if (!caps.descriptor_reuse)
1534 return -EPERM;
1535
1536 tx->flags |= DMA_CTRL_REUSE;
1537 return 0;
1538}
1539
1540static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1541{
1542 tx->flags &= ~DMA_CTRL_REUSE;
1543}
1544
1545static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1546{
1547 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1548}
1549
1550static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1551{
1552
1553 if (!dmaengine_desc_test_reuse(desc))
1554 return -EPERM;
1555
1556 return desc->desc_free(desc);
1557}
1558
1559
1560
1561int dma_async_device_register(struct dma_device *device);
1562int dmaenginem_async_device_register(struct dma_device *device);
1563void dma_async_device_unregister(struct dma_device *device);
1564int dma_async_device_channel_register(struct dma_device *device,
1565 struct dma_chan *chan);
1566void dma_async_device_channel_unregister(struct dma_device *device,
1567 struct dma_chan *chan);
1568void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1569#define dma_request_channel(mask, x, y) \
1570 __dma_request_channel(&(mask), x, y, NULL)
1571
1572
1573static inline struct dma_chan * __deprecated
1574dma_request_slave_channel(struct device *dev, const char *name)
1575{
1576 struct dma_chan *ch = dma_request_chan(dev, name);
1577
1578 return IS_ERR(ch) ? NULL : ch;
1579}
1580
1581static inline struct dma_chan
1582*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
1583 dma_filter_fn fn, void *fn_param,
1584 struct device *dev, const char *name)
1585{
1586 struct dma_chan *chan;
1587
1588 chan = dma_request_slave_channel(dev, name);
1589 if (chan)
1590 return chan;
1591
1592 if (!fn || !fn_param)
1593 return NULL;
1594
1595 return __dma_request_channel(&mask, fn, fn_param, NULL);
1596}
1597
1598static inline char *
1599dmaengine_get_direction_text(enum dma_transfer_direction dir)
1600{
1601 switch (dir) {
1602 case DMA_DEV_TO_MEM:
1603 return "DEV_TO_MEM";
1604 case DMA_MEM_TO_DEV:
1605 return "MEM_TO_DEV";
1606 case DMA_MEM_TO_MEM:
1607 return "MEM_TO_MEM";
1608 case DMA_DEV_TO_DEV:
1609 return "DEV_TO_DEV";
1610 default:
1611 return "invalid";
1612 }
1613}
1614#endif
1615