1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LINUX_DMAENGINE_H
18#define LINUX_DMAENGINE_H
19
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/uio.h>
23#include <linux/bug.h>
24#include <linux/scatterlist.h>
25#include <linux/bitmap.h>
26#include <linux/types.h>
27#include <asm/page.h>
28
29
30
31
32
33
34typedef s32 dma_cookie_t;
35#define DMA_MIN_COOKIE 1
36
37static inline int dma_submit_error(dma_cookie_t cookie)
38{
39 return cookie < 0 ? cookie : 0;
40}
41
42
43
44
45
46
47
48
49enum dma_status {
50 DMA_COMPLETE,
51 DMA_IN_PROGRESS,
52 DMA_PAUSED,
53 DMA_ERROR,
54};
55
56
57
58
59
60
61
62enum dma_transaction_type {
63 DMA_MEMCPY,
64 DMA_XOR,
65 DMA_PQ,
66 DMA_XOR_VAL,
67 DMA_PQ_VAL,
68 DMA_MEMSET,
69 DMA_MEMSET_SG,
70 DMA_INTERRUPT,
71 DMA_SG,
72 DMA_PRIVATE,
73 DMA_ASYNC_TX,
74 DMA_SLAVE,
75 DMA_CYCLIC,
76 DMA_INTERLEAVE,
77
78 DMA_TX_TYPE_END,
79};
80
81
82
83
84
85
86
87
88enum dma_transfer_direction {
89 DMA_MEM_TO_MEM,
90 DMA_MEM_TO_DEV,
91 DMA_DEV_TO_MEM,
92 DMA_DEV_TO_DEV,
93 DMA_TRANS_NONE,
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134struct data_chunk {
135 size_t size;
136 size_t icg;
137 size_t dst_icg;
138 size_t src_icg;
139};
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct dma_interleaved_template {
160 dma_addr_t src_start;
161 dma_addr_t dst_start;
162 enum dma_transfer_direction dir;
163 bool src_inc;
164 bool dst_inc;
165 bool src_sgl;
166 bool dst_sgl;
167 size_t numf;
168 size_t frame_size;
169 struct data_chunk sgl[0];
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190enum dma_ctrl_flags {
191 DMA_PREP_INTERRUPT = (1 << 0),
192 DMA_CTRL_ACK = (1 << 1),
193 DMA_PREP_PQ_DISABLE_P = (1 << 2),
194 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
195 DMA_PREP_CONTINUE = (1 << 4),
196 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6),
198};
199
200
201
202
203enum sum_check_bits {
204 SUM_CHECK_P = 0,
205 SUM_CHECK_Q = 1,
206};
207
208
209
210
211
212
213enum sum_check_flags {
214 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
215 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
216};
217
218
219
220
221
222
223typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
224
225
226
227
228
229
230
231struct dma_chan_percpu {
232
233 unsigned long memcpy_count;
234 unsigned long bytes_transferred;
235};
236
237
238
239
240
241
242struct dma_router {
243 struct device *dev;
244 void (*route_free)(struct device *dev, void *route_data);
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct dma_chan {
263 struct dma_device *device;
264 dma_cookie_t cookie;
265 dma_cookie_t completed_cookie;
266
267
268 int chan_id;
269 struct dma_chan_dev *dev;
270
271 struct list_head device_node;
272 struct dma_chan_percpu __percpu *local;
273 int client_count;
274 int table_count;
275
276
277 struct dma_router *router;
278 void *route_data;
279
280 void *private;
281};
282
283
284
285
286
287
288
289
290struct dma_chan_dev {
291 struct dma_chan *chan;
292 struct device device;
293 int dev_id;
294 atomic_t *idr_ref;
295};
296
297
298
299
300
301enum dma_slave_buswidth {
302 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
303 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
304 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
305 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
306 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
307 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
308 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
309 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
310 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct dma_slave_config {
365 enum dma_transfer_direction direction;
366 phys_addr_t src_addr;
367 phys_addr_t dst_addr;
368 enum dma_slave_buswidth src_addr_width;
369 enum dma_slave_buswidth dst_addr_width;
370 u32 src_maxburst;
371 u32 dst_maxburst;
372 u32 src_port_window_size;
373 u32 dst_port_window_size;
374 bool device_fc;
375 unsigned int slave_id;
376};
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398enum dma_residue_granularity {
399 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
400 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
401 DMA_RESIDUE_GRANULARITY_BURST = 2,
402};
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419struct dma_slave_caps {
420 u32 src_addr_widths;
421 u32 dst_addr_widths;
422 u32 directions;
423 u32 max_burst;
424 bool cmd_pause;
425 bool cmd_terminate;
426 enum dma_residue_granularity residue_granularity;
427 bool descriptor_reuse;
428};
429
430static inline const char *dma_chan_name(struct dma_chan *chan)
431{
432 return dev_name(&chan->dev->device);
433}
434
435void dma_chan_cleanup(struct kref *kref);
436
437
438
439
440
441
442
443
444
445
446
447
448typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
449
450typedef void (*dma_async_tx_callback)(void *dma_async_param);
451
452enum dmaengine_tx_result {
453 DMA_TRANS_NOERROR = 0,
454 DMA_TRANS_READ_FAILED,
455 DMA_TRANS_WRITE_FAILED,
456 DMA_TRANS_ABORTED,
457};
458
459struct dmaengine_result {
460 enum dmaengine_tx_result result;
461 u32 residue;
462};
463
464typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
465 const struct dmaengine_result *result);
466
467struct dmaengine_unmap_data {
468 u8 map_cnt;
469 u8 to_cnt;
470 u8 from_cnt;
471 u8 bidi_cnt;
472 struct device *dev;
473 struct kref kref;
474 size_t len;
475 dma_addr_t addr[0];
476};
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496struct dma_async_tx_descriptor {
497 dma_cookie_t cookie;
498 enum dma_ctrl_flags flags;
499 dma_addr_t phys;
500 struct dma_chan *chan;
501 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
502 int (*desc_free)(struct dma_async_tx_descriptor *tx);
503 dma_async_tx_callback callback;
504 dma_async_tx_callback_result callback_result;
505 void *callback_param;
506 struct dmaengine_unmap_data *unmap;
507#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
508 struct dma_async_tx_descriptor *next;
509 struct dma_async_tx_descriptor *parent;
510 spinlock_t lock;
511#endif
512};
513
514#ifdef CONFIG_DMA_ENGINE
515static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
516 struct dmaengine_unmap_data *unmap)
517{
518 kref_get(&unmap->kref);
519 tx->unmap = unmap;
520}
521
522struct dmaengine_unmap_data *
523dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
524void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
525#else
526static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
527 struct dmaengine_unmap_data *unmap)
528{
529}
530static inline struct dmaengine_unmap_data *
531dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
532{
533 return NULL;
534}
535static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
536{
537}
538#endif
539
540static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
541{
542 if (tx->unmap) {
543 dmaengine_unmap_put(tx->unmap);
544 tx->unmap = NULL;
545 }
546}
547
548#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
549static inline void txd_lock(struct dma_async_tx_descriptor *txd)
550{
551}
552static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
553{
554}
555static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
556{
557 BUG();
558}
559static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
560{
561}
562static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
563{
564}
565static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
566{
567 return NULL;
568}
569static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
570{
571 return NULL;
572}
573
574#else
575static inline void txd_lock(struct dma_async_tx_descriptor *txd)
576{
577 spin_lock_bh(&txd->lock);
578}
579static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
580{
581 spin_unlock_bh(&txd->lock);
582}
583static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
584{
585 txd->next = next;
586 next->parent = txd;
587}
588static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
589{
590 txd->parent = NULL;
591}
592static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
593{
594 txd->next = NULL;
595}
596static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
597{
598 return txd->parent;
599}
600static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
601{
602 return txd->next;
603}
604#endif
605
606
607
608
609
610
611
612
613
614
615struct dma_tx_state {
616 dma_cookie_t last;
617 dma_cookie_t used;
618 u32 residue;
619};
620
621
622
623
624
625enum dmaengine_alignment {
626 DMAENGINE_ALIGN_1_BYTE = 0,
627 DMAENGINE_ALIGN_2_BYTES = 1,
628 DMAENGINE_ALIGN_4_BYTES = 2,
629 DMAENGINE_ALIGN_8_BYTES = 3,
630 DMAENGINE_ALIGN_16_BYTES = 4,
631 DMAENGINE_ALIGN_32_BYTES = 5,
632 DMAENGINE_ALIGN_64_BYTES = 6,
633};
634
635
636
637
638
639
640
641
642struct dma_slave_map {
643 const char *devname;
644 const char *slave;
645 void *param;
646};
647
648
649
650
651
652
653
654
655struct dma_filter {
656 dma_filter_fn fn;
657 int mapcnt;
658 const struct dma_slave_map *map;
659};
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720struct dma_device {
721
722 unsigned int chancnt;
723 unsigned int privatecnt;
724 struct list_head channels;
725 struct list_head global_node;
726 struct dma_filter filter;
727 dma_cap_mask_t cap_mask;
728 unsigned short max_xor;
729 unsigned short max_pq;
730 enum dmaengine_alignment copy_align;
731 enum dmaengine_alignment xor_align;
732 enum dmaengine_alignment pq_align;
733 enum dmaengine_alignment fill_align;
734 #define DMA_HAS_PQ_CONTINUE (1 << 15)
735
736 int dev_id;
737 struct device *dev;
738
739 u32 src_addr_widths;
740 u32 dst_addr_widths;
741 u32 directions;
742 u32 max_burst;
743 bool descriptor_reuse;
744 enum dma_residue_granularity residue_granularity;
745
746 int (*device_alloc_chan_resources)(struct dma_chan *chan);
747 void (*device_free_chan_resources)(struct dma_chan *chan);
748
749 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
750 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
751 size_t len, unsigned long flags);
752 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
753 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
754 unsigned int src_cnt, size_t len, unsigned long flags);
755 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
756 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
757 size_t len, enum sum_check_flags *result, unsigned long flags);
758 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
759 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
760 unsigned int src_cnt, const unsigned char *scf,
761 size_t len, unsigned long flags);
762 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
763 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
764 unsigned int src_cnt, const unsigned char *scf, size_t len,
765 enum sum_check_flags *pqres, unsigned long flags);
766 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
767 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
768 unsigned long flags);
769 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
770 struct dma_chan *chan, struct scatterlist *sg,
771 unsigned int nents, int value, unsigned long flags);
772 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
773 struct dma_chan *chan, unsigned long flags);
774 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
775 struct dma_chan *chan,
776 struct scatterlist *dst_sg, unsigned int dst_nents,
777 struct scatterlist *src_sg, unsigned int src_nents,
778 unsigned long flags);
779
780 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
781 struct dma_chan *chan, struct scatterlist *sgl,
782 unsigned int sg_len, enum dma_transfer_direction direction,
783 unsigned long flags, void *context);
784 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
785 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
786 size_t period_len, enum dma_transfer_direction direction,
787 unsigned long flags);
788 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
789 struct dma_chan *chan, struct dma_interleaved_template *xt,
790 unsigned long flags);
791 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
792 struct dma_chan *chan, dma_addr_t dst, u64 data,
793 unsigned long flags);
794
795 int (*device_config)(struct dma_chan *chan,
796 struct dma_slave_config *config);
797 int (*device_pause)(struct dma_chan *chan);
798 int (*device_resume)(struct dma_chan *chan);
799 int (*device_terminate_all)(struct dma_chan *chan);
800 void (*device_synchronize)(struct dma_chan *chan);
801
802 enum dma_status (*device_tx_status)(struct dma_chan *chan,
803 dma_cookie_t cookie,
804 struct dma_tx_state *txstate);
805 void (*device_issue_pending)(struct dma_chan *chan);
806};
807
808static inline int dmaengine_slave_config(struct dma_chan *chan,
809 struct dma_slave_config *config)
810{
811 if (chan->device->device_config)
812 return chan->device->device_config(chan, config);
813
814 return -ENOSYS;
815}
816
817static inline bool is_slave_direction(enum dma_transfer_direction direction)
818{
819 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
820}
821
822static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
823 struct dma_chan *chan, dma_addr_t buf, size_t len,
824 enum dma_transfer_direction dir, unsigned long flags)
825{
826 struct scatterlist sg;
827 sg_init_table(&sg, 1);
828 sg_dma_address(&sg) = buf;
829 sg_dma_len(&sg) = len;
830
831 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
832 return NULL;
833
834 return chan->device->device_prep_slave_sg(chan, &sg, 1,
835 dir, flags, NULL);
836}
837
838static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
839 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
840 enum dma_transfer_direction dir, unsigned long flags)
841{
842 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
843 return NULL;
844
845 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
846 dir, flags, NULL);
847}
848
849#ifdef CONFIG_RAPIDIO_DMA_ENGINE
850struct rio_dma_ext;
851static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
852 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
853 enum dma_transfer_direction dir, unsigned long flags,
854 struct rio_dma_ext *rio_ext)
855{
856 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
857 return NULL;
858
859 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
860 dir, flags, rio_ext);
861}
862#endif
863
864static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
865 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
866 size_t period_len, enum dma_transfer_direction dir,
867 unsigned long flags)
868{
869 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
870 return NULL;
871
872 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
873 period_len, dir, flags);
874}
875
876static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
877 struct dma_chan *chan, struct dma_interleaved_template *xt,
878 unsigned long flags)
879{
880 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
881 return NULL;
882
883 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
884}
885
886static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
887 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
888 unsigned long flags)
889{
890 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
891 return NULL;
892
893 return chan->device->device_prep_dma_memset(chan, dest, value,
894 len, flags);
895}
896
897static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
898 struct dma_chan *chan,
899 struct scatterlist *dst_sg, unsigned int dst_nents,
900 struct scatterlist *src_sg, unsigned int src_nents,
901 unsigned long flags)
902{
903 if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
904 return NULL;
905
906 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
907 src_sg, src_nents, flags);
908}
909
910
911
912
913
914
915
916
917static inline int dmaengine_terminate_all(struct dma_chan *chan)
918{
919 if (chan->device->device_terminate_all)
920 return chan->device->device_terminate_all(chan);
921
922 return -ENOSYS;
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946static inline int dmaengine_terminate_async(struct dma_chan *chan)
947{
948 if (chan->device->device_terminate_all)
949 return chan->device->device_terminate_all(chan);
950
951 return -EINVAL;
952}
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972static inline void dmaengine_synchronize(struct dma_chan *chan)
973{
974 might_sleep();
975
976 if (chan->device->device_synchronize)
977 chan->device->device_synchronize(chan);
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994static inline int dmaengine_terminate_sync(struct dma_chan *chan)
995{
996 int ret;
997
998 ret = dmaengine_terminate_async(chan);
999 if (ret)
1000 return ret;
1001
1002 dmaengine_synchronize(chan);
1003
1004 return 0;
1005}
1006
1007static inline int dmaengine_pause(struct dma_chan *chan)
1008{
1009 if (chan->device->device_pause)
1010 return chan->device->device_pause(chan);
1011
1012 return -ENOSYS;
1013}
1014
1015static inline int dmaengine_resume(struct dma_chan *chan)
1016{
1017 if (chan->device->device_resume)
1018 return chan->device->device_resume(chan);
1019
1020 return -ENOSYS;
1021}
1022
1023static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1024 dma_cookie_t cookie, struct dma_tx_state *state)
1025{
1026 return chan->device->device_tx_status(chan, cookie, state);
1027}
1028
1029static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1030{
1031 return desc->tx_submit(desc);
1032}
1033
1034static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1035 size_t off1, size_t off2, size_t len)
1036{
1037 size_t mask;
1038
1039 if (!align)
1040 return true;
1041 mask = (1 << align) - 1;
1042 if (mask & (off1 | off2 | len))
1043 return false;
1044 return true;
1045}
1046
1047static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1048 size_t off2, size_t len)
1049{
1050 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1051}
1052
1053static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1054 size_t off2, size_t len)
1055{
1056 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1057}
1058
1059static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1060 size_t off2, size_t len)
1061{
1062 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1063}
1064
1065static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1066 size_t off2, size_t len)
1067{
1068 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1069}
1070
1071static inline void
1072dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1073{
1074 dma->max_pq = maxpq;
1075 if (has_pq_continue)
1076 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1077}
1078
1079static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1080{
1081 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1082}
1083
1084static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1085{
1086 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1087
1088 return (flags & mask) == mask;
1089}
1090
1091static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1092{
1093 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1094}
1095
1096static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1097{
1098 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1115{
1116 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1117 return dma_dev_to_maxpq(dma);
1118 else if (dmaf_p_disabled_continue(flags))
1119 return dma_dev_to_maxpq(dma) - 1;
1120 else if (dmaf_continue(flags))
1121 return dma_dev_to_maxpq(dma) - 3;
1122 BUG();
1123}
1124
1125static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1126 size_t dir_icg)
1127{
1128 if (inc) {
1129 if (dir_icg)
1130 return dir_icg;
1131 else if (sgl)
1132 return icg;
1133 }
1134
1135 return 0;
1136}
1137
1138static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1139 struct data_chunk *chunk)
1140{
1141 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1142 chunk->icg, chunk->dst_icg);
1143}
1144
1145static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1146 struct data_chunk *chunk)
1147{
1148 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1149 chunk->icg, chunk->src_icg);
1150}
1151
1152
1153
1154#ifdef CONFIG_DMA_ENGINE
1155void dmaengine_get(void);
1156void dmaengine_put(void);
1157#else
1158static inline void dmaengine_get(void)
1159{
1160}
1161static inline void dmaengine_put(void)
1162{
1163}
1164#endif
1165
1166#ifdef CONFIG_ASYNC_TX_DMA
1167#define async_dmaengine_get() dmaengine_get()
1168#define async_dmaengine_put() dmaengine_put()
1169#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1170#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1171#else
1172#define async_dma_find_channel(type) dma_find_channel(type)
1173#endif
1174#else
1175static inline void async_dmaengine_get(void)
1176{
1177}
1178static inline void async_dmaengine_put(void)
1179{
1180}
1181static inline struct dma_chan *
1182async_dma_find_channel(enum dma_transaction_type type)
1183{
1184 return NULL;
1185}
1186#endif
1187void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1188 struct dma_chan *chan);
1189
1190static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1191{
1192 tx->flags |= DMA_CTRL_ACK;
1193}
1194
1195static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1196{
1197 tx->flags &= ~DMA_CTRL_ACK;
1198}
1199
1200static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1201{
1202 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1203}
1204
1205#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1206static inline void
1207__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1208{
1209 set_bit(tx_type, dstp->bits);
1210}
1211
1212#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1213static inline void
1214__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1215{
1216 clear_bit(tx_type, dstp->bits);
1217}
1218
1219#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1220static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1221{
1222 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1223}
1224
1225#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1226static inline int
1227__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1228{
1229 return test_bit(tx_type, srcp->bits);
1230}
1231
1232#define for_each_dma_cap_mask(cap, mask) \
1233 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1234
1235
1236
1237
1238
1239
1240
1241
1242static inline void dma_async_issue_pending(struct dma_chan *chan)
1243{
1244 chan->device->device_issue_pending(chan);
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1259 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1260{
1261 struct dma_tx_state state;
1262 enum dma_status status;
1263
1264 status = chan->device->device_tx_status(chan, cookie, &state);
1265 if (last)
1266 *last = state.last;
1267 if (used)
1268 *used = state.used;
1269 return status;
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1282 dma_cookie_t last_complete, dma_cookie_t last_used)
1283{
1284 if (last_complete <= last_used) {
1285 if ((cookie <= last_complete) || (cookie > last_used))
1286 return DMA_COMPLETE;
1287 } else {
1288 if ((cookie <= last_complete) && (cookie > last_used))
1289 return DMA_COMPLETE;
1290 }
1291 return DMA_IN_PROGRESS;
1292}
1293
1294static inline void
1295dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1296{
1297 if (st) {
1298 st->last = last;
1299 st->used = used;
1300 st->residue = residue;
1301 }
1302}
1303
1304#ifdef CONFIG_DMA_ENGINE
1305struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1306enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1307enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1308void dma_issue_pending_all(void);
1309struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1310 dma_filter_fn fn, void *fn_param);
1311struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1312
1313struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1314struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1315
1316void dma_release_channel(struct dma_chan *chan);
1317int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1318#else
1319static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1320{
1321 return NULL;
1322}
1323static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1324{
1325 return DMA_COMPLETE;
1326}
1327static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1328{
1329 return DMA_COMPLETE;
1330}
1331static inline void dma_issue_pending_all(void)
1332{
1333}
1334static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1335 dma_filter_fn fn, void *fn_param)
1336{
1337 return NULL;
1338}
1339static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1340 const char *name)
1341{
1342 return NULL;
1343}
1344static inline struct dma_chan *dma_request_chan(struct device *dev,
1345 const char *name)
1346{
1347 return ERR_PTR(-ENODEV);
1348}
1349static inline struct dma_chan *dma_request_chan_by_mask(
1350 const dma_cap_mask_t *mask)
1351{
1352 return ERR_PTR(-ENODEV);
1353}
1354static inline void dma_release_channel(struct dma_chan *chan)
1355{
1356}
1357static inline int dma_get_slave_caps(struct dma_chan *chan,
1358 struct dma_slave_caps *caps)
1359{
1360 return -ENXIO;
1361}
1362#endif
1363
1364#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1365
1366static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1367{
1368 struct dma_slave_caps caps;
1369
1370 dma_get_slave_caps(tx->chan, &caps);
1371
1372 if (caps.descriptor_reuse) {
1373 tx->flags |= DMA_CTRL_REUSE;
1374 return 0;
1375 } else {
1376 return -EPERM;
1377 }
1378}
1379
1380static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1381{
1382 tx->flags &= ~DMA_CTRL_REUSE;
1383}
1384
1385static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1386{
1387 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1388}
1389
1390static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1391{
1392
1393 if (dmaengine_desc_test_reuse(desc))
1394 return desc->desc_free(desc);
1395 else
1396 return -EPERM;
1397}
1398
1399
1400
1401int dma_async_device_register(struct dma_device *device);
1402void dma_async_device_unregister(struct dma_device *device);
1403void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1404struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1405struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1406#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1407#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1408 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1409
1410static inline struct dma_chan
1411*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1412 dma_filter_fn fn, void *fn_param,
1413 struct device *dev, const char *name)
1414{
1415 struct dma_chan *chan;
1416
1417 chan = dma_request_slave_channel(dev, name);
1418 if (chan)
1419 return chan;
1420
1421 if (!fn || !fn_param)
1422 return NULL;
1423
1424 return __dma_request_channel(mask, fn, fn_param);
1425}
1426#endif
1427