1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LINUX_DMAENGINE_H
18#define LINUX_DMAENGINE_H
19
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/uio.h>
23#include <linux/bug.h>
24#include <linux/scatterlist.h>
25#include <linux/bitmap.h>
26#include <linux/types.h>
27#include <asm/page.h>
28
29
30
31
32
33
34typedef s32 dma_cookie_t;
35#define DMA_MIN_COOKIE 1
36
37static inline int dma_submit_error(dma_cookie_t cookie)
38{
39 return cookie < 0 ? cookie : 0;
40}
41
42
43
44
45
46
47
48
49enum dma_status {
50 DMA_COMPLETE,
51 DMA_IN_PROGRESS,
52 DMA_PAUSED,
53 DMA_ERROR,
54};
55
56
57
58
59
60
61
62enum dma_transaction_type {
63 DMA_MEMCPY,
64 DMA_XOR,
65 DMA_PQ,
66 DMA_XOR_VAL,
67 DMA_PQ_VAL,
68 DMA_MEMSET,
69 DMA_MEMSET_SG,
70 DMA_INTERRUPT,
71 DMA_SG,
72 DMA_PRIVATE,
73 DMA_ASYNC_TX,
74 DMA_SLAVE,
75 DMA_CYCLIC,
76 DMA_INTERLEAVE,
77
78 DMA_TX_TYPE_END,
79};
80
81
82
83
84
85
86
87
88enum dma_transfer_direction {
89 DMA_MEM_TO_MEM,
90 DMA_MEM_TO_DEV,
91 DMA_DEV_TO_MEM,
92 DMA_DEV_TO_DEV,
93 DMA_TRANS_NONE,
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134struct data_chunk {
135 size_t size;
136 size_t icg;
137 size_t dst_icg;
138 size_t src_icg;
139};
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct dma_interleaved_template {
160 dma_addr_t src_start;
161 dma_addr_t dst_start;
162 enum dma_transfer_direction dir;
163 bool src_inc;
164 bool dst_inc;
165 bool src_sgl;
166 bool dst_sgl;
167 size_t numf;
168 size_t frame_size;
169 struct data_chunk sgl[0];
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190enum dma_ctrl_flags {
191 DMA_PREP_INTERRUPT = (1 << 0),
192 DMA_CTRL_ACK = (1 << 1),
193 DMA_PREP_PQ_DISABLE_P = (1 << 2),
194 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
195 DMA_PREP_CONTINUE = (1 << 4),
196 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6),
198};
199
200
201
202
203enum sum_check_bits {
204 SUM_CHECK_P = 0,
205 SUM_CHECK_Q = 1,
206};
207
208
209
210
211
212
213enum sum_check_flags {
214 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
215 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
216};
217
218
219
220
221
222
223typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
224
225
226
227
228
229
230
231struct dma_chan_percpu {
232
233 unsigned long memcpy_count;
234 unsigned long bytes_transferred;
235};
236
237
238
239
240
241
242struct dma_router {
243 struct device *dev;
244 void (*route_free)(struct device *dev, void *route_data);
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct dma_chan {
263 struct dma_device *device;
264 dma_cookie_t cookie;
265 dma_cookie_t completed_cookie;
266
267
268 int chan_id;
269 struct dma_chan_dev *dev;
270
271 struct list_head device_node;
272 struct dma_chan_percpu __percpu *local;
273 int client_count;
274 int table_count;
275
276
277 struct dma_router *router;
278 void *route_data;
279
280 void *private;
281};
282
283
284
285
286
287
288
289
290struct dma_chan_dev {
291 struct dma_chan *chan;
292 struct device device;
293 int dev_id;
294 atomic_t *idr_ref;
295};
296
297
298
299
300
301enum dma_slave_buswidth {
302 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
303 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
304 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
305 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
306 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
307 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
308 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
309 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
310 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct dma_slave_config {
365 enum dma_transfer_direction direction;
366 phys_addr_t src_addr;
367 phys_addr_t dst_addr;
368 enum dma_slave_buswidth src_addr_width;
369 enum dma_slave_buswidth dst_addr_width;
370 u32 src_maxburst;
371 u32 dst_maxburst;
372 u32 src_port_window_size;
373 u32 dst_port_window_size;
374 bool device_fc;
375 unsigned int slave_id;
376};
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398enum dma_residue_granularity {
399 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
400 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
401 DMA_RESIDUE_GRANULARITY_BURST = 2,
402};
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419struct dma_slave_caps {
420 u32 src_addr_widths;
421 u32 dst_addr_widths;
422 u32 directions;
423 u32 max_burst;
424 bool cmd_pause;
425 bool cmd_terminate;
426 enum dma_residue_granularity residue_granularity;
427 bool descriptor_reuse;
428};
429
430static inline const char *dma_chan_name(struct dma_chan *chan)
431{
432 return dev_name(&chan->dev->device);
433}
434
435void dma_chan_cleanup(struct kref *kref);
436
437
438
439
440
441
442
443
444
445
446
447
448typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
449
450typedef void (*dma_async_tx_callback)(void *dma_async_param);
451
452enum dmaengine_tx_result {
453 DMA_TRANS_NOERROR = 0,
454 DMA_TRANS_READ_FAILED,
455 DMA_TRANS_WRITE_FAILED,
456 DMA_TRANS_ABORTED,
457};
458
459struct dmaengine_result {
460 enum dmaengine_tx_result result;
461 u32 residue;
462};
463
464typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
465 const struct dmaengine_result *result);
466
467struct dmaengine_unmap_data {
468 u8 map_cnt;
469 u8 to_cnt;
470 u8 from_cnt;
471 u8 bidi_cnt;
472 struct device *dev;
473 struct kref kref;
474 size_t len;
475 dma_addr_t addr[0];
476};
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496struct dma_async_tx_descriptor {
497 dma_cookie_t cookie;
498 enum dma_ctrl_flags flags;
499 dma_addr_t phys;
500 struct dma_chan *chan;
501 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
502 int (*desc_free)(struct dma_async_tx_descriptor *tx);
503 dma_async_tx_callback callback;
504 dma_async_tx_callback_result callback_result;
505 void *callback_param;
506 struct dmaengine_unmap_data *unmap;
507#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
508 struct dma_async_tx_descriptor *next;
509 struct dma_async_tx_descriptor *parent;
510 spinlock_t lock;
511#endif
512};
513
514#ifdef CONFIG_DMA_ENGINE
515static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
516 struct dmaengine_unmap_data *unmap)
517{
518 kref_get(&unmap->kref);
519 tx->unmap = unmap;
520}
521
522struct dmaengine_unmap_data *
523dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
524void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
525#else
526static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
527 struct dmaengine_unmap_data *unmap)
528{
529}
530static inline struct dmaengine_unmap_data *
531dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
532{
533 return NULL;
534}
535static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
536{
537}
538#endif
539
540static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
541{
542 if (tx->unmap) {
543 dmaengine_unmap_put(tx->unmap);
544 tx->unmap = NULL;
545 }
546}
547
548#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
549static inline void txd_lock(struct dma_async_tx_descriptor *txd)
550{
551}
552static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
553{
554}
555static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
556{
557 BUG();
558}
559static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
560{
561}
562static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
563{
564}
565static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
566{
567 return NULL;
568}
569static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
570{
571 return NULL;
572}
573
574#else
575static inline void txd_lock(struct dma_async_tx_descriptor *txd)
576{
577 spin_lock_bh(&txd->lock);
578}
579static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
580{
581 spin_unlock_bh(&txd->lock);
582}
583static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
584{
585 txd->next = next;
586 next->parent = txd;
587}
588static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
589{
590 txd->parent = NULL;
591}
592static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
593{
594 txd->next = NULL;
595}
596static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
597{
598 return txd->parent;
599}
600static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
601{
602 return txd->next;
603}
604#endif
605
606
607
608
609
610
611
612
613
614
615struct dma_tx_state {
616 dma_cookie_t last;
617 dma_cookie_t used;
618 u32 residue;
619};
620
621
622
623
624
625enum dmaengine_alignment {
626 DMAENGINE_ALIGN_1_BYTE = 0,
627 DMAENGINE_ALIGN_2_BYTES = 1,
628 DMAENGINE_ALIGN_4_BYTES = 2,
629 DMAENGINE_ALIGN_8_BYTES = 3,
630 DMAENGINE_ALIGN_16_BYTES = 4,
631 DMAENGINE_ALIGN_32_BYTES = 5,
632 DMAENGINE_ALIGN_64_BYTES = 6,
633};
634
635
636
637
638
639
640
641
642struct dma_slave_map {
643 const char *devname;
644 const char *slave;
645 void *param;
646};
647
648
649
650
651
652
653
654
655struct dma_filter {
656 dma_filter_fn fn;
657 int mapcnt;
658 const struct dma_slave_map *map;
659};
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720struct dma_device {
721
722 unsigned int chancnt;
723 unsigned int privatecnt;
724 struct list_head channels;
725 struct list_head global_node;
726 struct dma_filter filter;
727 dma_cap_mask_t cap_mask;
728 unsigned short max_xor;
729 unsigned short max_pq;
730 enum dmaengine_alignment copy_align;
731 enum dmaengine_alignment xor_align;
732 enum dmaengine_alignment pq_align;
733 enum dmaengine_alignment fill_align;
734 #define DMA_HAS_PQ_CONTINUE (1 << 15)
735
736 int dev_id;
737 struct device *dev;
738
739 u32 src_addr_widths;
740 u32 dst_addr_widths;
741 u32 directions;
742 u32 max_burst;
743 bool descriptor_reuse;
744 enum dma_residue_granularity residue_granularity;
745
746 int (*device_alloc_chan_resources)(struct dma_chan *chan);
747 void (*device_free_chan_resources)(struct dma_chan *chan);
748
749 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
750 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
751 size_t len, unsigned long flags);
752 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
753 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
754 unsigned int src_cnt, size_t len, unsigned long flags);
755 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
756 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
757 size_t len, enum sum_check_flags *result, unsigned long flags);
758 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
759 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
760 unsigned int src_cnt, const unsigned char *scf,
761 size_t len, unsigned long flags);
762 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
763 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
764 unsigned int src_cnt, const unsigned char *scf, size_t len,
765 enum sum_check_flags *pqres, unsigned long flags);
766 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
767 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
768 unsigned long flags);
769 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
770 struct dma_chan *chan, struct scatterlist *sg,
771 unsigned int nents, int value, unsigned long flags);
772 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
773 struct dma_chan *chan, unsigned long flags);
774 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
775 struct dma_chan *chan,
776 struct scatterlist *dst_sg, unsigned int dst_nents,
777 struct scatterlist *src_sg, unsigned int src_nents,
778 unsigned long flags);
779
780 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
781 struct dma_chan *chan, struct scatterlist *sgl,
782 unsigned int sg_len, enum dma_transfer_direction direction,
783 unsigned long flags, void *context);
784 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
785 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
786 size_t period_len, enum dma_transfer_direction direction,
787 unsigned long flags);
788 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
789 struct dma_chan *chan, struct dma_interleaved_template *xt,
790 unsigned long flags);
791 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
792 struct dma_chan *chan, dma_addr_t dst, u64 data,
793 unsigned long flags);
794
795 int (*device_config)(struct dma_chan *chan,
796 struct dma_slave_config *config);
797 int (*device_pause)(struct dma_chan *chan);
798 int (*device_resume)(struct dma_chan *chan);
799 int (*device_terminate_all)(struct dma_chan *chan);
800 void (*device_synchronize)(struct dma_chan *chan);
801
802 enum dma_status (*device_tx_status)(struct dma_chan *chan,
803 dma_cookie_t cookie,
804 struct dma_tx_state *txstate);
805 void (*device_issue_pending)(struct dma_chan *chan);
806};
807
808static inline int dmaengine_slave_config(struct dma_chan *chan,
809 struct dma_slave_config *config)
810{
811 if (chan->device->device_config)
812 return chan->device->device_config(chan, config);
813
814 return -ENOSYS;
815}
816
817static inline bool is_slave_direction(enum dma_transfer_direction direction)
818{
819 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
820}
821
822static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
823 struct dma_chan *chan, dma_addr_t buf, size_t len,
824 enum dma_transfer_direction dir, unsigned long flags)
825{
826 struct scatterlist sg;
827 sg_init_table(&sg, 1);
828 sg_dma_address(&sg) = buf;
829 sg_dma_len(&sg) = len;
830
831 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
832 return NULL;
833
834 return chan->device->device_prep_slave_sg(chan, &sg, 1,
835 dir, flags, NULL);
836}
837
838static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
839 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
840 enum dma_transfer_direction dir, unsigned long flags)
841{
842 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
843 return NULL;
844
845 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
846 dir, flags, NULL);
847}
848
849#ifdef CONFIG_RAPIDIO_DMA_ENGINE
850struct rio_dma_ext;
851static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
852 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
853 enum dma_transfer_direction dir, unsigned long flags,
854 struct rio_dma_ext *rio_ext)
855{
856 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
857 return NULL;
858
859 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
860 dir, flags, rio_ext);
861}
862#endif
863
864static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
865 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
866 size_t period_len, enum dma_transfer_direction dir,
867 unsigned long flags)
868{
869 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
870 return NULL;
871
872 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
873 period_len, dir, flags);
874}
875
876static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
877 struct dma_chan *chan, struct dma_interleaved_template *xt,
878 unsigned long flags)
879{
880 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
881 return NULL;
882
883 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
884}
885
886static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
887 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
888 unsigned long flags)
889{
890 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
891 return NULL;
892
893 return chan->device->device_prep_dma_memset(chan, dest, value,
894 len, flags);
895}
896
897static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
898 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
899 size_t len, unsigned long flags)
900{
901 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
902 return NULL;
903
904 return chan->device->device_prep_dma_memcpy(chan, dest, src,
905 len, flags);
906}
907
908static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
909 struct dma_chan *chan,
910 struct scatterlist *dst_sg, unsigned int dst_nents,
911 struct scatterlist *src_sg, unsigned int src_nents,
912 unsigned long flags)
913{
914 if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
915 return NULL;
916
917 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
918 src_sg, src_nents, flags);
919}
920
921
922
923
924
925
926
927
928static inline int dmaengine_terminate_all(struct dma_chan *chan)
929{
930 if (chan->device->device_terminate_all)
931 return chan->device->device_terminate_all(chan);
932
933 return -ENOSYS;
934}
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957static inline int dmaengine_terminate_async(struct dma_chan *chan)
958{
959 if (chan->device->device_terminate_all)
960 return chan->device->device_terminate_all(chan);
961
962 return -EINVAL;
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983static inline void dmaengine_synchronize(struct dma_chan *chan)
984{
985 might_sleep();
986
987 if (chan->device->device_synchronize)
988 chan->device->device_synchronize(chan);
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1006{
1007 int ret;
1008
1009 ret = dmaengine_terminate_async(chan);
1010 if (ret)
1011 return ret;
1012
1013 dmaengine_synchronize(chan);
1014
1015 return 0;
1016}
1017
1018static inline int dmaengine_pause(struct dma_chan *chan)
1019{
1020 if (chan->device->device_pause)
1021 return chan->device->device_pause(chan);
1022
1023 return -ENOSYS;
1024}
1025
1026static inline int dmaengine_resume(struct dma_chan *chan)
1027{
1028 if (chan->device->device_resume)
1029 return chan->device->device_resume(chan);
1030
1031 return -ENOSYS;
1032}
1033
1034static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1035 dma_cookie_t cookie, struct dma_tx_state *state)
1036{
1037 return chan->device->device_tx_status(chan, cookie, state);
1038}
1039
1040static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1041{
1042 return desc->tx_submit(desc);
1043}
1044
1045static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1046 size_t off1, size_t off2, size_t len)
1047{
1048 size_t mask;
1049
1050 if (!align)
1051 return true;
1052 mask = (1 << align) - 1;
1053 if (mask & (off1 | off2 | len))
1054 return false;
1055 return true;
1056}
1057
1058static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1059 size_t off2, size_t len)
1060{
1061 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1062}
1063
1064static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1065 size_t off2, size_t len)
1066{
1067 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1068}
1069
1070static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1071 size_t off2, size_t len)
1072{
1073 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1074}
1075
1076static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1077 size_t off2, size_t len)
1078{
1079 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1080}
1081
1082static inline void
1083dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1084{
1085 dma->max_pq = maxpq;
1086 if (has_pq_continue)
1087 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1088}
1089
1090static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1091{
1092 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1093}
1094
1095static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1096{
1097 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1098
1099 return (flags & mask) == mask;
1100}
1101
1102static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1103{
1104 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1105}
1106
1107static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1108{
1109 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1126{
1127 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1128 return dma_dev_to_maxpq(dma);
1129 else if (dmaf_p_disabled_continue(flags))
1130 return dma_dev_to_maxpq(dma) - 1;
1131 else if (dmaf_continue(flags))
1132 return dma_dev_to_maxpq(dma) - 3;
1133 BUG();
1134}
1135
1136static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1137 size_t dir_icg)
1138{
1139 if (inc) {
1140 if (dir_icg)
1141 return dir_icg;
1142 else if (sgl)
1143 return icg;
1144 }
1145
1146 return 0;
1147}
1148
1149static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1150 struct data_chunk *chunk)
1151{
1152 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1153 chunk->icg, chunk->dst_icg);
1154}
1155
1156static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1157 struct data_chunk *chunk)
1158{
1159 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1160 chunk->icg, chunk->src_icg);
1161}
1162
1163
1164
1165#ifdef CONFIG_DMA_ENGINE
1166void dmaengine_get(void);
1167void dmaengine_put(void);
1168#else
1169static inline void dmaengine_get(void)
1170{
1171}
1172static inline void dmaengine_put(void)
1173{
1174}
1175#endif
1176
1177#ifdef CONFIG_ASYNC_TX_DMA
1178#define async_dmaengine_get() dmaengine_get()
1179#define async_dmaengine_put() dmaengine_put()
1180#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1181#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1182#else
1183#define async_dma_find_channel(type) dma_find_channel(type)
1184#endif
1185#else
1186static inline void async_dmaengine_get(void)
1187{
1188}
1189static inline void async_dmaengine_put(void)
1190{
1191}
1192static inline struct dma_chan *
1193async_dma_find_channel(enum dma_transaction_type type)
1194{
1195 return NULL;
1196}
1197#endif
1198void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1199 struct dma_chan *chan);
1200
1201static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1202{
1203 tx->flags |= DMA_CTRL_ACK;
1204}
1205
1206static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1207{
1208 tx->flags &= ~DMA_CTRL_ACK;
1209}
1210
1211static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1212{
1213 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1214}
1215
1216#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1217static inline void
1218__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1219{
1220 set_bit(tx_type, dstp->bits);
1221}
1222
1223#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1224static inline void
1225__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1226{
1227 clear_bit(tx_type, dstp->bits);
1228}
1229
1230#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1231static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1232{
1233 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1234}
1235
1236#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1237static inline int
1238__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1239{
1240 return test_bit(tx_type, srcp->bits);
1241}
1242
1243#define for_each_dma_cap_mask(cap, mask) \
1244 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1245
1246
1247
1248
1249
1250
1251
1252
1253static inline void dma_async_issue_pending(struct dma_chan *chan)
1254{
1255 chan->device->device_issue_pending(chan);
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1270 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1271{
1272 struct dma_tx_state state;
1273 enum dma_status status;
1274
1275 status = chan->device->device_tx_status(chan, cookie, &state);
1276 if (last)
1277 *last = state.last;
1278 if (used)
1279 *used = state.used;
1280 return status;
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1293 dma_cookie_t last_complete, dma_cookie_t last_used)
1294{
1295 if (last_complete <= last_used) {
1296 if ((cookie <= last_complete) || (cookie > last_used))
1297 return DMA_COMPLETE;
1298 } else {
1299 if ((cookie <= last_complete) && (cookie > last_used))
1300 return DMA_COMPLETE;
1301 }
1302 return DMA_IN_PROGRESS;
1303}
1304
1305static inline void
1306dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1307{
1308 if (st) {
1309 st->last = last;
1310 st->used = used;
1311 st->residue = residue;
1312 }
1313}
1314
1315#ifdef CONFIG_DMA_ENGINE
1316struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1317enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1318enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1319void dma_issue_pending_all(void);
1320struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1321 dma_filter_fn fn, void *fn_param);
1322struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1323
1324struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1325struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1326
1327void dma_release_channel(struct dma_chan *chan);
1328int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1329#else
1330static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1331{
1332 return NULL;
1333}
1334static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1335{
1336 return DMA_COMPLETE;
1337}
1338static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1339{
1340 return DMA_COMPLETE;
1341}
1342static inline void dma_issue_pending_all(void)
1343{
1344}
1345static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1346 dma_filter_fn fn, void *fn_param)
1347{
1348 return NULL;
1349}
1350static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1351 const char *name)
1352{
1353 return NULL;
1354}
1355static inline struct dma_chan *dma_request_chan(struct device *dev,
1356 const char *name)
1357{
1358 return ERR_PTR(-ENODEV);
1359}
1360static inline struct dma_chan *dma_request_chan_by_mask(
1361 const dma_cap_mask_t *mask)
1362{
1363 return ERR_PTR(-ENODEV);
1364}
1365static inline void dma_release_channel(struct dma_chan *chan)
1366{
1367}
1368static inline int dma_get_slave_caps(struct dma_chan *chan,
1369 struct dma_slave_caps *caps)
1370{
1371 return -ENXIO;
1372}
1373#endif
1374
1375#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1376
1377static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1378{
1379 struct dma_slave_caps caps;
1380
1381 dma_get_slave_caps(tx->chan, &caps);
1382
1383 if (caps.descriptor_reuse) {
1384 tx->flags |= DMA_CTRL_REUSE;
1385 return 0;
1386 } else {
1387 return -EPERM;
1388 }
1389}
1390
1391static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1392{
1393 tx->flags &= ~DMA_CTRL_REUSE;
1394}
1395
1396static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1397{
1398 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1399}
1400
1401static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1402{
1403
1404 if (dmaengine_desc_test_reuse(desc))
1405 return desc->desc_free(desc);
1406 else
1407 return -EPERM;
1408}
1409
1410
1411
1412int dma_async_device_register(struct dma_device *device);
1413void dma_async_device_unregister(struct dma_device *device);
1414void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1415struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1416struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1417#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1418#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1419 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1420
1421static inline struct dma_chan
1422*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1423 dma_filter_fn fn, void *fn_param,
1424 struct device *dev, const char *name)
1425{
1426 struct dma_chan *chan;
1427
1428 chan = dma_request_slave_channel(dev, name);
1429 if (chan)
1430 return chan;
1431
1432 if (!fn || !fn_param)
1433 return NULL;
1434
1435 return __dma_request_channel(mask, fn, fn_param);
1436}
1437#endif
1438