1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef LINUX_DMAENGINE_H
22#define LINUX_DMAENGINE_H
23
24#include <linux/device.h>
25#include <linux/uio.h>
26#include <linux/bug.h>
27#include <linux/scatterlist.h>
28#include <linux/bitmap.h>
29#include <linux/types.h>
30#include <asm/page.h>
31
32
33
34
35
36
37typedef s32 dma_cookie_t;
38#define DMA_MIN_COOKIE 1
39#define DMA_MAX_COOKIE INT_MAX
40
41static inline int dma_submit_error(dma_cookie_t cookie)
42{
43 return cookie < 0 ? cookie : 0;
44}
45
46
47
48
49
50
51
52
53enum dma_status {
54 DMA_SUCCESS,
55 DMA_IN_PROGRESS,
56 DMA_PAUSED,
57 DMA_ERROR,
58};
59
60
61
62
63
64
65
66enum dma_transaction_type {
67 DMA_MEMCPY,
68 DMA_XOR,
69 DMA_PQ,
70 DMA_XOR_VAL,
71 DMA_PQ_VAL,
72 DMA_INTERRUPT,
73 DMA_SG,
74 DMA_PRIVATE,
75 DMA_ASYNC_TX,
76 DMA_SLAVE,
77 DMA_CYCLIC,
78 DMA_INTERLEAVE,
79
80 DMA_TX_TYPE_END,
81};
82
83
84
85
86
87
88
89
90enum dma_transfer_direction {
91 DMA_MEM_TO_MEM,
92 DMA_MEM_TO_DEV,
93 DMA_DEV_TO_MEM,
94 DMA_DEV_TO_DEV,
95 DMA_TRANS_NONE,
96};
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130struct data_chunk {
131 size_t size;
132 size_t icg;
133};
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153struct dma_interleaved_template {
154 dma_addr_t src_start;
155 dma_addr_t dst_start;
156 enum dma_transfer_direction dir;
157 bool src_inc;
158 bool dst_inc;
159 bool src_sgl;
160 bool dst_sgl;
161 size_t numf;
162 size_t frame_size;
163 struct data_chunk sgl[0];
164};
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188enum dma_ctrl_flags {
189 DMA_PREP_INTERRUPT = (1 << 0),
190 DMA_CTRL_ACK = (1 << 1),
191 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
192 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
193 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
194 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
195 DMA_PREP_PQ_DISABLE_P = (1 << 6),
196 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
197 DMA_PREP_CONTINUE = (1 << 8),
198 DMA_PREP_FENCE = (1 << 9),
199};
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215enum dma_ctrl_cmd {
216 DMA_TERMINATE_ALL,
217 DMA_PAUSE,
218 DMA_RESUME,
219 DMA_SLAVE_CONFIG,
220 FSLDMA_EXTERNAL_START,
221};
222
223
224
225
226enum sum_check_bits {
227 SUM_CHECK_P = 0,
228 SUM_CHECK_Q = 1,
229};
230
231
232
233
234
235
236enum sum_check_flags {
237 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
238 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
239};
240
241
242
243
244
245
246typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
247
248
249
250
251
252
253
254struct dma_chan_percpu {
255
256 unsigned long memcpy_count;
257 unsigned long bytes_transferred;
258};
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273struct dma_chan {
274 struct dma_device *device;
275 dma_cookie_t cookie;
276 dma_cookie_t completed_cookie;
277
278
279 int chan_id;
280 struct dma_chan_dev *dev;
281
282 struct list_head device_node;
283 struct dma_chan_percpu __percpu *local;
284 int client_count;
285 int table_count;
286 void *private;
287};
288
289
290
291
292
293
294
295
296struct dma_chan_dev {
297 struct dma_chan *chan;
298 struct device device;
299 int dev_id;
300 atomic_t *idr_ref;
301};
302
303
304
305
306
307enum dma_slave_buswidth {
308 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
309 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
310 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
311 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
312 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
313};
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct dma_slave_config {
365 enum dma_transfer_direction direction;
366 dma_addr_t src_addr;
367 dma_addr_t dst_addr;
368 enum dma_slave_buswidth src_addr_width;
369 enum dma_slave_buswidth dst_addr_width;
370 u32 src_maxburst;
371 u32 dst_maxburst;
372 bool device_fc;
373 unsigned int slave_id;
374};
375
376
377
378
379
380
381
382
383
384
385
386
387struct dma_slave_caps {
388 u32 src_addr_widths;
389 u32 dstn_addr_widths;
390 u32 directions;
391 bool cmd_pause;
392 bool cmd_terminate;
393};
394
395static inline const char *dma_chan_name(struct dma_chan *chan)
396{
397 return dev_name(&chan->dev->device);
398}
399
400void dma_chan_cleanup(struct kref *kref);
401
402
403
404
405
406
407
408
409
410
411
412
413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
414
415typedef void (*dma_async_tx_callback)(void *dma_async_param);
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433struct dma_async_tx_descriptor {
434 dma_cookie_t cookie;
435 enum dma_ctrl_flags flags;
436 dma_addr_t phys;
437 struct dma_chan *chan;
438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
439 dma_async_tx_callback callback;
440 void *callback_param;
441#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
442 struct dma_async_tx_descriptor *next;
443 struct dma_async_tx_descriptor *parent;
444 spinlock_t lock;
445#endif
446};
447
448#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
449static inline void txd_lock(struct dma_async_tx_descriptor *txd)
450{
451}
452static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
453{
454}
455static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
456{
457 BUG();
458}
459static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
460{
461}
462static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
463{
464}
465static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
466{
467 return NULL;
468}
469static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
470{
471 return NULL;
472}
473
474#else
475static inline void txd_lock(struct dma_async_tx_descriptor *txd)
476{
477 spin_lock_bh(&txd->lock);
478}
479static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
480{
481 spin_unlock_bh(&txd->lock);
482}
483static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
484{
485 txd->next = next;
486 next->parent = txd;
487}
488static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
489{
490 txd->parent = NULL;
491}
492static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
493{
494 txd->next = NULL;
495}
496static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
497{
498 return txd->parent;
499}
500static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
501{
502 return txd->next;
503}
504#endif
505
506
507
508
509
510
511
512
513
514
515struct dma_tx_state {
516 dma_cookie_t last;
517 dma_cookie_t used;
518 u32 residue;
519};
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559struct dma_device {
560
561 unsigned int chancnt;
562 unsigned int privatecnt;
563 struct list_head channels;
564 struct list_head global_node;
565 dma_cap_mask_t cap_mask;
566 unsigned short max_xor;
567 unsigned short max_pq;
568 u8 copy_align;
569 u8 xor_align;
570 u8 pq_align;
571 u8 fill_align;
572 #define DMA_HAS_PQ_CONTINUE (1 << 15)
573
574 int dev_id;
575 struct device *dev;
576
577 int (*device_alloc_chan_resources)(struct dma_chan *chan);
578 void (*device_free_chan_resources)(struct dma_chan *chan);
579
580 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
581 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
582 size_t len, unsigned long flags);
583 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
584 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
585 unsigned int src_cnt, size_t len, unsigned long flags);
586 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
587 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
588 size_t len, enum sum_check_flags *result, unsigned long flags);
589 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
590 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
591 unsigned int src_cnt, const unsigned char *scf,
592 size_t len, unsigned long flags);
593 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
594 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
595 unsigned int src_cnt, const unsigned char *scf, size_t len,
596 enum sum_check_flags *pqres, unsigned long flags);
597 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
598 struct dma_chan *chan, unsigned long flags);
599 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
600 struct dma_chan *chan,
601 struct scatterlist *dst_sg, unsigned int dst_nents,
602 struct scatterlist *src_sg, unsigned int src_nents,
603 unsigned long flags);
604
605 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
606 struct dma_chan *chan, struct scatterlist *sgl,
607 unsigned int sg_len, enum dma_transfer_direction direction,
608 unsigned long flags, void *context);
609 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
610 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
611 size_t period_len, enum dma_transfer_direction direction,
612 unsigned long flags, void *context);
613 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
614 struct dma_chan *chan, struct dma_interleaved_template *xt,
615 unsigned long flags);
616 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
617 unsigned long arg);
618
619 enum dma_status (*device_tx_status)(struct dma_chan *chan,
620 dma_cookie_t cookie,
621 struct dma_tx_state *txstate);
622 void (*device_issue_pending)(struct dma_chan *chan);
623 int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
624};
625
626static inline int dmaengine_device_control(struct dma_chan *chan,
627 enum dma_ctrl_cmd cmd,
628 unsigned long arg)
629{
630 if (chan->device->device_control)
631 return chan->device->device_control(chan, cmd, arg);
632
633 return -ENOSYS;
634}
635
636static inline int dmaengine_slave_config(struct dma_chan *chan,
637 struct dma_slave_config *config)
638{
639 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
640 (unsigned long)config);
641}
642
643static inline bool is_slave_direction(enum dma_transfer_direction direction)
644{
645 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
646}
647
648static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
649 struct dma_chan *chan, dma_addr_t buf, size_t len,
650 enum dma_transfer_direction dir, unsigned long flags)
651{
652 struct scatterlist sg;
653 sg_init_table(&sg, 1);
654 sg_dma_address(&sg) = buf;
655 sg_dma_len(&sg) = len;
656
657 return chan->device->device_prep_slave_sg(chan, &sg, 1,
658 dir, flags, NULL);
659}
660
661static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
662 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
663 enum dma_transfer_direction dir, unsigned long flags)
664{
665 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
666 dir, flags, NULL);
667}
668
669#ifdef CONFIG_RAPIDIO_DMA_ENGINE
670struct rio_dma_ext;
671static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
672 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
673 enum dma_transfer_direction dir, unsigned long flags,
674 struct rio_dma_ext *rio_ext)
675{
676 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
677 dir, flags, rio_ext);
678}
679#endif
680
681static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
682 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
683 size_t period_len, enum dma_transfer_direction dir,
684 unsigned long flags)
685{
686 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
687 period_len, dir, flags, NULL);
688}
689
690static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
691 struct dma_chan *chan, struct dma_interleaved_template *xt,
692 unsigned long flags)
693{
694 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
695}
696
697static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
698{
699 if (!chan || !caps)
700 return -EINVAL;
701
702
703 if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
704 return -ENXIO;
705
706 if (chan->device->device_slave_caps)
707 return chan->device->device_slave_caps(chan, caps);
708
709 return -ENXIO;
710}
711
712static inline int dmaengine_terminate_all(struct dma_chan *chan)
713{
714 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
715}
716
717static inline int dmaengine_pause(struct dma_chan *chan)
718{
719 return dmaengine_device_control(chan, DMA_PAUSE, 0);
720}
721
722static inline int dmaengine_resume(struct dma_chan *chan)
723{
724 return dmaengine_device_control(chan, DMA_RESUME, 0);
725}
726
727static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
728 dma_cookie_t cookie, struct dma_tx_state *state)
729{
730 return chan->device->device_tx_status(chan, cookie, state);
731}
732
733static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
734{
735 return desc->tx_submit(desc);
736}
737
738static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
739{
740 size_t mask;
741
742 if (!align)
743 return true;
744 mask = (1 << align) - 1;
745 if (mask & (off1 | off2 | len))
746 return false;
747 return true;
748}
749
750static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
751 size_t off2, size_t len)
752{
753 return dmaengine_check_align(dev->copy_align, off1, off2, len);
754}
755
756static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
757 size_t off2, size_t len)
758{
759 return dmaengine_check_align(dev->xor_align, off1, off2, len);
760}
761
762static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
763 size_t off2, size_t len)
764{
765 return dmaengine_check_align(dev->pq_align, off1, off2, len);
766}
767
768static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
769 size_t off2, size_t len)
770{
771 return dmaengine_check_align(dev->fill_align, off1, off2, len);
772}
773
774static inline void
775dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
776{
777 dma->max_pq = maxpq;
778 if (has_pq_continue)
779 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
780}
781
782static inline bool dmaf_continue(enum dma_ctrl_flags flags)
783{
784 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
785}
786
787static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
788{
789 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
790
791 return (flags & mask) == mask;
792}
793
794static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
795{
796 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
797}
798
799static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
800{
801 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
818{
819 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
820 return dma_dev_to_maxpq(dma);
821 else if (dmaf_p_disabled_continue(flags))
822 return dma_dev_to_maxpq(dma) - 1;
823 else if (dmaf_continue(flags))
824 return dma_dev_to_maxpq(dma) - 3;
825 BUG();
826}
827
828
829
830#ifdef CONFIG_DMA_ENGINE
831void dmaengine_get(void);
832void dmaengine_put(void);
833#else
834static inline void dmaengine_get(void)
835{
836}
837static inline void dmaengine_put(void)
838{
839}
840#endif
841
842#ifdef CONFIG_NET_DMA
843#define net_dmaengine_get() dmaengine_get()
844#define net_dmaengine_put() dmaengine_put()
845#else
846static inline void net_dmaengine_get(void)
847{
848}
849static inline void net_dmaengine_put(void)
850{
851}
852#endif
853
854#ifdef CONFIG_ASYNC_TX_DMA
855#define async_dmaengine_get() dmaengine_get()
856#define async_dmaengine_put() dmaengine_put()
857#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
858#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
859#else
860#define async_dma_find_channel(type) dma_find_channel(type)
861#endif
862#else
863static inline void async_dmaengine_get(void)
864{
865}
866static inline void async_dmaengine_put(void)
867{
868}
869static inline struct dma_chan *
870async_dma_find_channel(enum dma_transaction_type type)
871{
872 return NULL;
873}
874#endif
875
876dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
877 void *dest, void *src, size_t len);
878dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
879 struct page *page, unsigned int offset, void *kdata, size_t len);
880dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
881 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
882 unsigned int src_off, size_t len);
883void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
884 struct dma_chan *chan);
885
886static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
887{
888 tx->flags |= DMA_CTRL_ACK;
889}
890
891static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
892{
893 tx->flags &= ~DMA_CTRL_ACK;
894}
895
896static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
897{
898 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
899}
900
901#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
902static inline void
903__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
904{
905 set_bit(tx_type, dstp->bits);
906}
907
908#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
909static inline void
910__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
911{
912 clear_bit(tx_type, dstp->bits);
913}
914
915#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
916static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
917{
918 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
919}
920
921#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
922static inline int
923__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
924{
925 return test_bit(tx_type, srcp->bits);
926}
927
928#define for_each_dma_cap_mask(cap, mask) \
929 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
930
931
932
933
934
935
936
937
938static inline void dma_async_issue_pending(struct dma_chan *chan)
939{
940 chan->device->device_issue_pending(chan);
941}
942
943
944
945
946
947
948
949
950
951
952
953
954static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
955 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
956{
957 struct dma_tx_state state;
958 enum dma_status status;
959
960 status = chan->device->device_tx_status(chan, cookie, &state);
961 if (last)
962 *last = state.last;
963 if (used)
964 *used = state.used;
965 return status;
966}
967
968
969
970
971
972
973
974
975
976
977static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
978 dma_cookie_t last_complete, dma_cookie_t last_used)
979{
980 if (last_complete <= last_used) {
981 if ((cookie <= last_complete) || (cookie > last_used))
982 return DMA_SUCCESS;
983 } else {
984 if ((cookie <= last_complete) && (cookie > last_used))
985 return DMA_SUCCESS;
986 }
987 return DMA_IN_PROGRESS;
988}
989
990static inline void
991dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
992{
993 if (st) {
994 st->last = last;
995 st->used = used;
996 st->residue = residue;
997 }
998}
999
1000#ifdef CONFIG_DMA_ENGINE
1001struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1002enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1003enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1004void dma_issue_pending_all(void);
1005struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1006 dma_filter_fn fn, void *fn_param);
1007struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1008void dma_release_channel(struct dma_chan *chan);
1009#else
1010static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1011{
1012 return NULL;
1013}
1014static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1015{
1016 return DMA_SUCCESS;
1017}
1018static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1019{
1020 return DMA_SUCCESS;
1021}
1022static inline void dma_issue_pending_all(void)
1023{
1024}
1025static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1026 dma_filter_fn fn, void *fn_param)
1027{
1028 return NULL;
1029}
1030static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1031 const char *name)
1032{
1033 return NULL;
1034}
1035static inline void dma_release_channel(struct dma_chan *chan)
1036{
1037}
1038#endif
1039
1040
1041
1042int dma_async_device_register(struct dma_device *device);
1043void dma_async_device_unregister(struct dma_device *device);
1044void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1045struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1046struct dma_chan *net_dma_find_channel(void);
1047#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1048#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1049 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1050
1051static inline struct dma_chan
1052*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1053 dma_filter_fn fn, void *fn_param,
1054 struct device *dev, char *name)
1055{
1056 struct dma_chan *chan;
1057
1058 chan = dma_request_slave_channel(dev, name);
1059 if (chan)
1060 return chan;
1061
1062 return __dma_request_channel(mask, fn, fn_param);
1063}
1064
1065
1066
1067struct dma_page_list {
1068 char __user *base_address;
1069 int nr_pages;
1070 struct page **pages;
1071};
1072
1073struct dma_pinned_list {
1074 int nr_iovecs;
1075 struct dma_page_list page_list[0];
1076};
1077
1078struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
1079void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
1080
1081dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
1082 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
1083dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
1084 struct dma_pinned_list *pinned_list, struct page *page,
1085 unsigned int offset, size_t len);
1086
1087#endif
1088