1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LINUX_DMAENGINE_H
18#define LINUX_DMAENGINE_H
19
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/uio.h>
23#include <linux/bug.h>
24#include <linux/scatterlist.h>
25#include <linux/bitmap.h>
26#include <linux/types.h>
27#include <asm/page.h>
28
29
30
31
32
33
34typedef s32 dma_cookie_t;
35#define DMA_MIN_COOKIE 1
36
37static inline int dma_submit_error(dma_cookie_t cookie)
38{
39 return cookie < 0 ? cookie : 0;
40}
41
42
43
44
45
46
47
48
49enum dma_status {
50 DMA_COMPLETE,
51 DMA_IN_PROGRESS,
52 DMA_PAUSED,
53 DMA_ERROR,
54};
55
56
57
58
59
60
61
62enum dma_transaction_type {
63 DMA_MEMCPY,
64 DMA_XOR,
65 DMA_PQ,
66 DMA_XOR_VAL,
67 DMA_PQ_VAL,
68 DMA_MEMSET,
69 DMA_MEMSET_SG,
70 DMA_INTERRUPT,
71 DMA_PRIVATE,
72 DMA_ASYNC_TX,
73 DMA_SLAVE,
74 DMA_CYCLIC,
75 DMA_INTERLEAVE,
76
77 DMA_TX_TYPE_END,
78};
79
80
81
82
83
84
85
86
87enum dma_transfer_direction {
88 DMA_MEM_TO_MEM,
89 DMA_MEM_TO_DEV,
90 DMA_DEV_TO_MEM,
91 DMA_DEV_TO_DEV,
92 DMA_TRANS_NONE,
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133struct data_chunk {
134 size_t size;
135 size_t icg;
136 size_t dst_icg;
137 size_t src_icg;
138};
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158struct dma_interleaved_template {
159 dma_addr_t src_start;
160 dma_addr_t dst_start;
161 enum dma_transfer_direction dir;
162 bool src_inc;
163 bool dst_inc;
164 bool src_sgl;
165 bool dst_sgl;
166 size_t numf;
167 size_t frame_size;
168 struct data_chunk sgl[0];
169};
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192enum dma_ctrl_flags {
193 DMA_PREP_INTERRUPT = (1 << 0),
194 DMA_CTRL_ACK = (1 << 1),
195 DMA_PREP_PQ_DISABLE_P = (1 << 2),
196 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
197 DMA_PREP_CONTINUE = (1 << 4),
198 DMA_PREP_FENCE = (1 << 5),
199 DMA_CTRL_REUSE = (1 << 6),
200 DMA_PREP_CMD = (1 << 7),
201};
202
203
204
205
206enum sum_check_bits {
207 SUM_CHECK_P = 0,
208 SUM_CHECK_Q = 1,
209};
210
211
212
213
214
215
216enum sum_check_flags {
217 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
218 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
219};
220
221
222
223
224
225
226typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
227
228
229
230
231
232
233
234struct dma_chan_percpu {
235
236 unsigned long memcpy_count;
237 unsigned long bytes_transferred;
238};
239
240
241
242
243
244
245struct dma_router {
246 struct device *dev;
247 void (*route_free)(struct device *dev, void *route_data);
248};
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265struct dma_chan {
266 struct dma_device *device;
267 dma_cookie_t cookie;
268 dma_cookie_t completed_cookie;
269
270
271 int chan_id;
272 struct dma_chan_dev *dev;
273
274 struct list_head device_node;
275 struct dma_chan_percpu __percpu *local;
276 int client_count;
277 int table_count;
278
279
280 struct dma_router *router;
281 void *route_data;
282
283 void *private;
284};
285
286
287
288
289
290
291
292
293struct dma_chan_dev {
294 struct dma_chan *chan;
295 struct device device;
296 int dev_id;
297 atomic_t *idr_ref;
298};
299
300
301
302
303
304enum dma_slave_buswidth {
305 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
306 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
307 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
308 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
309 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
310 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
311 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
312 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
313 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
314};
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367struct dma_slave_config {
368 enum dma_transfer_direction direction;
369 phys_addr_t src_addr;
370 phys_addr_t dst_addr;
371 enum dma_slave_buswidth src_addr_width;
372 enum dma_slave_buswidth dst_addr_width;
373 u32 src_maxburst;
374 u32 dst_maxburst;
375 u32 src_port_window_size;
376 u32 dst_port_window_size;
377 bool device_fc;
378 unsigned int slave_id;
379};
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401enum dma_residue_granularity {
402 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
403 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
404 DMA_RESIDUE_GRANULARITY_BURST = 2,
405};
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426struct dma_slave_caps {
427 u32 src_addr_widths;
428 u32 dst_addr_widths;
429 u32 directions;
430 u32 max_burst;
431 bool cmd_pause;
432 bool cmd_resume;
433 bool cmd_terminate;
434 enum dma_residue_granularity residue_granularity;
435 bool descriptor_reuse;
436};
437
438static inline const char *dma_chan_name(struct dma_chan *chan)
439{
440 return dev_name(&chan->dev->device);
441}
442
443void dma_chan_cleanup(struct kref *kref);
444
445
446
447
448
449
450
451
452
453
454
455
456typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
457
458typedef void (*dma_async_tx_callback)(void *dma_async_param);
459
460enum dmaengine_tx_result {
461 DMA_TRANS_NOERROR = 0,
462 DMA_TRANS_READ_FAILED,
463 DMA_TRANS_WRITE_FAILED,
464 DMA_TRANS_ABORTED,
465};
466
467struct dmaengine_result {
468 enum dmaengine_tx_result result;
469 u32 residue;
470};
471
472typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
473 const struct dmaengine_result *result);
474
475struct dmaengine_unmap_data {
476#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
477 u16 map_cnt;
478#else
479 u8 map_cnt;
480#endif
481 u8 to_cnt;
482 u8 from_cnt;
483 u8 bidi_cnt;
484 struct device *dev;
485 struct kref kref;
486 size_t len;
487 dma_addr_t addr[0];
488};
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508struct dma_async_tx_descriptor {
509 dma_cookie_t cookie;
510 enum dma_ctrl_flags flags;
511 dma_addr_t phys;
512 struct dma_chan *chan;
513 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
514 int (*desc_free)(struct dma_async_tx_descriptor *tx);
515 dma_async_tx_callback callback;
516 dma_async_tx_callback_result callback_result;
517 void *callback_param;
518 struct dmaengine_unmap_data *unmap;
519#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
520 struct dma_async_tx_descriptor *next;
521 struct dma_async_tx_descriptor *parent;
522 spinlock_t lock;
523#endif
524};
525
526#ifdef CONFIG_DMA_ENGINE
527static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
528 struct dmaengine_unmap_data *unmap)
529{
530 kref_get(&unmap->kref);
531 tx->unmap = unmap;
532}
533
534struct dmaengine_unmap_data *
535dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
536void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
537#else
538static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
539 struct dmaengine_unmap_data *unmap)
540{
541}
542static inline struct dmaengine_unmap_data *
543dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
544{
545 return NULL;
546}
547static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
548{
549}
550#endif
551
552static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
553{
554 if (tx->unmap) {
555 dmaengine_unmap_put(tx->unmap);
556 tx->unmap = NULL;
557 }
558}
559
560#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
561static inline void txd_lock(struct dma_async_tx_descriptor *txd)
562{
563}
564static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
565{
566}
567static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
568{
569 BUG();
570}
571static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
572{
573}
574static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
575{
576}
577static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
578{
579 return NULL;
580}
581static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
582{
583 return NULL;
584}
585
586#else
587static inline void txd_lock(struct dma_async_tx_descriptor *txd)
588{
589 spin_lock_bh(&txd->lock);
590}
591static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
592{
593 spin_unlock_bh(&txd->lock);
594}
595static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
596{
597 txd->next = next;
598 next->parent = txd;
599}
600static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
601{
602 txd->parent = NULL;
603}
604static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
605{
606 txd->next = NULL;
607}
608static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
609{
610 return txd->parent;
611}
612static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
613{
614 return txd->next;
615}
616#endif
617
618
619
620
621
622
623
624
625
626
627struct dma_tx_state {
628 dma_cookie_t last;
629 dma_cookie_t used;
630 u32 residue;
631};
632
633
634
635
636
637enum dmaengine_alignment {
638 DMAENGINE_ALIGN_1_BYTE = 0,
639 DMAENGINE_ALIGN_2_BYTES = 1,
640 DMAENGINE_ALIGN_4_BYTES = 2,
641 DMAENGINE_ALIGN_8_BYTES = 3,
642 DMAENGINE_ALIGN_16_BYTES = 4,
643 DMAENGINE_ALIGN_32_BYTES = 5,
644 DMAENGINE_ALIGN_64_BYTES = 6,
645};
646
647
648
649
650
651
652
653
654struct dma_slave_map {
655 const char *devname;
656 const char *slave;
657 void *param;
658};
659
660
661
662
663
664
665
666
667struct dma_filter {
668 dma_filter_fn fn;
669 int mapcnt;
670 const struct dma_slave_map *map;
671};
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734struct dma_device {
735
736 unsigned int chancnt;
737 unsigned int privatecnt;
738 struct list_head channels;
739 struct list_head global_node;
740 struct dma_filter filter;
741 dma_cap_mask_t cap_mask;
742 unsigned short max_xor;
743 unsigned short max_pq;
744 enum dmaengine_alignment copy_align;
745 enum dmaengine_alignment xor_align;
746 enum dmaengine_alignment pq_align;
747 enum dmaengine_alignment fill_align;
748 #define DMA_HAS_PQ_CONTINUE (1 << 15)
749
750 int dev_id;
751 struct device *dev;
752
753 u32 src_addr_widths;
754 u32 dst_addr_widths;
755 u32 directions;
756 u32 max_burst;
757 bool descriptor_reuse;
758 enum dma_residue_granularity residue_granularity;
759
760 int (*device_alloc_chan_resources)(struct dma_chan *chan);
761 void (*device_free_chan_resources)(struct dma_chan *chan);
762
763 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
764 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
765 size_t len, unsigned long flags);
766 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
767 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
768 unsigned int src_cnt, size_t len, unsigned long flags);
769 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
770 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
771 size_t len, enum sum_check_flags *result, unsigned long flags);
772 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
773 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
774 unsigned int src_cnt, const unsigned char *scf,
775 size_t len, unsigned long flags);
776 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
777 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
778 unsigned int src_cnt, const unsigned char *scf, size_t len,
779 enum sum_check_flags *pqres, unsigned long flags);
780 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
781 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
782 unsigned long flags);
783 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
784 struct dma_chan *chan, struct scatterlist *sg,
785 unsigned int nents, int value, unsigned long flags);
786 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
787 struct dma_chan *chan, unsigned long flags);
788
789 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
790 struct dma_chan *chan, struct scatterlist *sgl,
791 unsigned int sg_len, enum dma_transfer_direction direction,
792 unsigned long flags, void *context);
793 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
794 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
795 size_t period_len, enum dma_transfer_direction direction,
796 unsigned long flags);
797 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
798 struct dma_chan *chan, struct dma_interleaved_template *xt,
799 unsigned long flags);
800 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
801 struct dma_chan *chan, dma_addr_t dst, u64 data,
802 unsigned long flags);
803
804 int (*device_config)(struct dma_chan *chan,
805 struct dma_slave_config *config);
806 int (*device_pause)(struct dma_chan *chan);
807 int (*device_resume)(struct dma_chan *chan);
808 int (*device_terminate_all)(struct dma_chan *chan);
809 void (*device_synchronize)(struct dma_chan *chan);
810
811 enum dma_status (*device_tx_status)(struct dma_chan *chan,
812 dma_cookie_t cookie,
813 struct dma_tx_state *txstate);
814 void (*device_issue_pending)(struct dma_chan *chan);
815};
816
817static inline int dmaengine_slave_config(struct dma_chan *chan,
818 struct dma_slave_config *config)
819{
820 if (chan->device->device_config)
821 return chan->device->device_config(chan, config);
822
823 return -ENOSYS;
824}
825
826static inline bool is_slave_direction(enum dma_transfer_direction direction)
827{
828 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
829}
830
831static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
832 struct dma_chan *chan, dma_addr_t buf, size_t len,
833 enum dma_transfer_direction dir, unsigned long flags)
834{
835 struct scatterlist sg;
836 sg_init_table(&sg, 1);
837 sg_dma_address(&sg) = buf;
838 sg_dma_len(&sg) = len;
839
840 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
841 return NULL;
842
843 return chan->device->device_prep_slave_sg(chan, &sg, 1,
844 dir, flags, NULL);
845}
846
847static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
848 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
849 enum dma_transfer_direction dir, unsigned long flags)
850{
851 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
852 return NULL;
853
854 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
855 dir, flags, NULL);
856}
857
858#ifdef CONFIG_RAPIDIO_DMA_ENGINE
859struct rio_dma_ext;
860static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
861 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
862 enum dma_transfer_direction dir, unsigned long flags,
863 struct rio_dma_ext *rio_ext)
864{
865 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
866 return NULL;
867
868 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
869 dir, flags, rio_ext);
870}
871#endif
872
873static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
874 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
875 size_t period_len, enum dma_transfer_direction dir,
876 unsigned long flags)
877{
878 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
879 return NULL;
880
881 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
882 period_len, dir, flags);
883}
884
885static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
886 struct dma_chan *chan, struct dma_interleaved_template *xt,
887 unsigned long flags)
888{
889 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
890 return NULL;
891
892 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
893}
894
895static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
896 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
897 unsigned long flags)
898{
899 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
900 return NULL;
901
902 return chan->device->device_prep_dma_memset(chan, dest, value,
903 len, flags);
904}
905
906static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
907 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
908 size_t len, unsigned long flags)
909{
910 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
911 return NULL;
912
913 return chan->device->device_prep_dma_memcpy(chan, dest, src,
914 len, flags);
915}
916
917
918
919
920
921
922
923
924static inline int dmaengine_terminate_all(struct dma_chan *chan)
925{
926 if (chan->device->device_terminate_all)
927 return chan->device->device_terminate_all(chan);
928
929 return -ENOSYS;
930}
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953static inline int dmaengine_terminate_async(struct dma_chan *chan)
954{
955 if (chan->device->device_terminate_all)
956 return chan->device->device_terminate_all(chan);
957
958 return -EINVAL;
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979static inline void dmaengine_synchronize(struct dma_chan *chan)
980{
981 might_sleep();
982
983 if (chan->device->device_synchronize)
984 chan->device->device_synchronize(chan);
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1002{
1003 int ret;
1004
1005 ret = dmaengine_terminate_async(chan);
1006 if (ret)
1007 return ret;
1008
1009 dmaengine_synchronize(chan);
1010
1011 return 0;
1012}
1013
1014static inline int dmaengine_pause(struct dma_chan *chan)
1015{
1016 if (chan->device->device_pause)
1017 return chan->device->device_pause(chan);
1018
1019 return -ENOSYS;
1020}
1021
1022static inline int dmaengine_resume(struct dma_chan *chan)
1023{
1024 if (chan->device->device_resume)
1025 return chan->device->device_resume(chan);
1026
1027 return -ENOSYS;
1028}
1029
1030static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1031 dma_cookie_t cookie, struct dma_tx_state *state)
1032{
1033 return chan->device->device_tx_status(chan, cookie, state);
1034}
1035
1036static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1037{
1038 return desc->tx_submit(desc);
1039}
1040
1041static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1042 size_t off1, size_t off2, size_t len)
1043{
1044 size_t mask;
1045
1046 if (!align)
1047 return true;
1048 mask = (1 << align) - 1;
1049 if (mask & (off1 | off2 | len))
1050 return false;
1051 return true;
1052}
1053
1054static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1055 size_t off2, size_t len)
1056{
1057 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1058}
1059
1060static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1061 size_t off2, size_t len)
1062{
1063 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1064}
1065
1066static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1067 size_t off2, size_t len)
1068{
1069 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1070}
1071
1072static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1073 size_t off2, size_t len)
1074{
1075 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1076}
1077
1078static inline void
1079dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1080{
1081 dma->max_pq = maxpq;
1082 if (has_pq_continue)
1083 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1084}
1085
1086static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1087{
1088 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1089}
1090
1091static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1092{
1093 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1094
1095 return (flags & mask) == mask;
1096}
1097
1098static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1099{
1100 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1101}
1102
1103static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1104{
1105 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1122{
1123 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1124 return dma_dev_to_maxpq(dma);
1125 else if (dmaf_p_disabled_continue(flags))
1126 return dma_dev_to_maxpq(dma) - 1;
1127 else if (dmaf_continue(flags))
1128 return dma_dev_to_maxpq(dma) - 3;
1129 BUG();
1130}
1131
1132static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1133 size_t dir_icg)
1134{
1135 if (inc) {
1136 if (dir_icg)
1137 return dir_icg;
1138 else if (sgl)
1139 return icg;
1140 }
1141
1142 return 0;
1143}
1144
1145static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1146 struct data_chunk *chunk)
1147{
1148 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1149 chunk->icg, chunk->dst_icg);
1150}
1151
1152static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1153 struct data_chunk *chunk)
1154{
1155 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1156 chunk->icg, chunk->src_icg);
1157}
1158
1159
1160
1161#ifdef CONFIG_DMA_ENGINE
1162void dmaengine_get(void);
1163void dmaengine_put(void);
1164#else
1165static inline void dmaengine_get(void)
1166{
1167}
1168static inline void dmaengine_put(void)
1169{
1170}
1171#endif
1172
1173#ifdef CONFIG_ASYNC_TX_DMA
1174#define async_dmaengine_get() dmaengine_get()
1175#define async_dmaengine_put() dmaengine_put()
1176#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1177#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1178#else
1179#define async_dma_find_channel(type) dma_find_channel(type)
1180#endif
1181#else
1182static inline void async_dmaengine_get(void)
1183{
1184}
1185static inline void async_dmaengine_put(void)
1186{
1187}
1188static inline struct dma_chan *
1189async_dma_find_channel(enum dma_transaction_type type)
1190{
1191 return NULL;
1192}
1193#endif
1194void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1195 struct dma_chan *chan);
1196
1197static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1198{
1199 tx->flags |= DMA_CTRL_ACK;
1200}
1201
1202static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1203{
1204 tx->flags &= ~DMA_CTRL_ACK;
1205}
1206
1207static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1208{
1209 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1210}
1211
1212#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1213static inline void
1214__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1215{
1216 set_bit(tx_type, dstp->bits);
1217}
1218
1219#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1220static inline void
1221__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1222{
1223 clear_bit(tx_type, dstp->bits);
1224}
1225
1226#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1227static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1228{
1229 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1230}
1231
1232#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1233static inline int
1234__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1235{
1236 return test_bit(tx_type, srcp->bits);
1237}
1238
1239#define for_each_dma_cap_mask(cap, mask) \
1240 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1241
1242
1243
1244
1245
1246
1247
1248
1249static inline void dma_async_issue_pending(struct dma_chan *chan)
1250{
1251 chan->device->device_issue_pending(chan);
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1266 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1267{
1268 struct dma_tx_state state;
1269 enum dma_status status;
1270
1271 status = chan->device->device_tx_status(chan, cookie, &state);
1272 if (last)
1273 *last = state.last;
1274 if (used)
1275 *used = state.used;
1276 return status;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1289 dma_cookie_t last_complete, dma_cookie_t last_used)
1290{
1291 if (last_complete <= last_used) {
1292 if ((cookie <= last_complete) || (cookie > last_used))
1293 return DMA_COMPLETE;
1294 } else {
1295 if ((cookie <= last_complete) && (cookie > last_used))
1296 return DMA_COMPLETE;
1297 }
1298 return DMA_IN_PROGRESS;
1299}
1300
1301static inline void
1302dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1303{
1304 if (st) {
1305 st->last = last;
1306 st->used = used;
1307 st->residue = residue;
1308 }
1309}
1310
1311#ifdef CONFIG_DMA_ENGINE
1312struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1313enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1314enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1315void dma_issue_pending_all(void);
1316struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1317 dma_filter_fn fn, void *fn_param);
1318struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1319
1320struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1321struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1322
1323void dma_release_channel(struct dma_chan *chan);
1324int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1325#else
1326static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1327{
1328 return NULL;
1329}
1330static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1331{
1332 return DMA_COMPLETE;
1333}
1334static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1335{
1336 return DMA_COMPLETE;
1337}
1338static inline void dma_issue_pending_all(void)
1339{
1340}
1341static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1342 dma_filter_fn fn, void *fn_param)
1343{
1344 return NULL;
1345}
1346static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1347 const char *name)
1348{
1349 return NULL;
1350}
1351static inline struct dma_chan *dma_request_chan(struct device *dev,
1352 const char *name)
1353{
1354 return ERR_PTR(-ENODEV);
1355}
1356static inline struct dma_chan *dma_request_chan_by_mask(
1357 const dma_cap_mask_t *mask)
1358{
1359 return ERR_PTR(-ENODEV);
1360}
1361static inline void dma_release_channel(struct dma_chan *chan)
1362{
1363}
1364static inline int dma_get_slave_caps(struct dma_chan *chan,
1365 struct dma_slave_caps *caps)
1366{
1367 return -ENXIO;
1368}
1369#endif
1370
1371#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1372
1373static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1374{
1375 struct dma_slave_caps caps;
1376
1377 dma_get_slave_caps(tx->chan, &caps);
1378
1379 if (caps.descriptor_reuse) {
1380 tx->flags |= DMA_CTRL_REUSE;
1381 return 0;
1382 } else {
1383 return -EPERM;
1384 }
1385}
1386
1387static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1388{
1389 tx->flags &= ~DMA_CTRL_REUSE;
1390}
1391
1392static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1393{
1394 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1395}
1396
1397static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1398{
1399
1400 if (dmaengine_desc_test_reuse(desc))
1401 return desc->desc_free(desc);
1402 else
1403 return -EPERM;
1404}
1405
1406
1407
1408int dma_async_device_register(struct dma_device *device);
1409int dmaenginem_async_device_register(struct dma_device *device);
1410void dma_async_device_unregister(struct dma_device *device);
1411void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1412struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1413struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1414#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1415#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1416 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1417
1418static inline struct dma_chan
1419*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1420 dma_filter_fn fn, void *fn_param,
1421 struct device *dev, const char *name)
1422{
1423 struct dma_chan *chan;
1424
1425 chan = dma_request_slave_channel(dev, name);
1426 if (chan)
1427 return chan;
1428
1429 if (!fn || !fn_param)
1430 return NULL;
1431
1432 return __dma_request_channel(mask, fn, fn_param);
1433}
1434#endif
1435