1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef DMAENGINE_H
22#define DMAENGINE_H
23
24#include <linux/device.h>
25#include <linux/uio.h>
26#include <linux/dma-mapping.h>
27
28
29
30
31
32
33typedef s32 dma_cookie_t;
34#define DMA_MIN_COOKIE 1
35#define DMA_MAX_COOKIE INT_MAX
36
37#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
38
39
40
41
42
43
44
45
46enum dma_status {
47 DMA_SUCCESS,
48 DMA_IN_PROGRESS,
49 DMA_PAUSED,
50 DMA_ERROR,
51};
52
53
54
55
56
57
58
59enum dma_transaction_type {
60 DMA_MEMCPY,
61 DMA_XOR,
62 DMA_PQ,
63 DMA_XOR_VAL,
64 DMA_PQ_VAL,
65 DMA_MEMSET,
66 DMA_INTERRUPT,
67 DMA_SG,
68 DMA_PRIVATE,
69 DMA_ASYNC_TX,
70 DMA_SLAVE,
71 DMA_CYCLIC,
72};
73
74
75#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100enum dma_ctrl_flags {
101 DMA_PREP_INTERRUPT = (1 << 0),
102 DMA_CTRL_ACK = (1 << 1),
103 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
104 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
105 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
106 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
107 DMA_PREP_PQ_DISABLE_P = (1 << 6),
108 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
109 DMA_PREP_CONTINUE = (1 << 8),
110 DMA_PREP_FENCE = (1 << 9),
111};
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127enum dma_ctrl_cmd {
128 DMA_TERMINATE_ALL,
129 DMA_PAUSE,
130 DMA_RESUME,
131 DMA_SLAVE_CONFIG,
132 FSLDMA_EXTERNAL_START,
133};
134
135
136
137
138enum sum_check_bits {
139 SUM_CHECK_P = 0,
140 SUM_CHECK_Q = 1,
141};
142
143
144
145
146
147
148enum sum_check_flags {
149 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
150 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
151};
152
153
154
155
156
157
158typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
159
160
161
162
163
164
165
166struct dma_chan_percpu {
167
168 unsigned long memcpy_count;
169 unsigned long bytes_transferred;
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184struct dma_chan {
185 struct dma_device *device;
186 dma_cookie_t cookie;
187
188
189 int chan_id;
190 struct dma_chan_dev *dev;
191
192 struct list_head device_node;
193 struct dma_chan_percpu __percpu *local;
194 int client_count;
195 int table_count;
196 void *private;
197};
198
199
200
201
202
203
204
205
206struct dma_chan_dev {
207 struct dma_chan *chan;
208 struct device device;
209 int dev_id;
210 atomic_t *idr_ref;
211};
212
213
214
215
216
217enum dma_slave_buswidth {
218 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
219 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
220 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
221 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
222 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
223};
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268struct dma_slave_config {
269 enum dma_data_direction direction;
270 dma_addr_t src_addr;
271 dma_addr_t dst_addr;
272 enum dma_slave_buswidth src_addr_width;
273 enum dma_slave_buswidth dst_addr_width;
274 u32 src_maxburst;
275 u32 dst_maxburst;
276};
277
278static inline const char *dma_chan_name(struct dma_chan *chan)
279{
280 return dev_name(&chan->dev->device);
281}
282
283void dma_chan_cleanup(struct kref *kref);
284
285
286
287
288
289
290
291
292
293
294
295
296typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
297
298typedef void (*dma_async_tx_callback)(void *dma_async_param);
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316struct dma_async_tx_descriptor {
317 dma_cookie_t cookie;
318 enum dma_ctrl_flags flags;
319 dma_addr_t phys;
320 struct dma_chan *chan;
321 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
322 dma_async_tx_callback callback;
323 void *callback_param;
324#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
325 struct dma_async_tx_descriptor *next;
326 struct dma_async_tx_descriptor *parent;
327 spinlock_t lock;
328#endif
329};
330
331#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
332static inline void txd_lock(struct dma_async_tx_descriptor *txd)
333{
334}
335static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
336{
337}
338static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
339{
340 BUG();
341}
342static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
343{
344}
345static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
346{
347}
348static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
349{
350 return NULL;
351}
352static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
353{
354 return NULL;
355}
356
357#else
358static inline void txd_lock(struct dma_async_tx_descriptor *txd)
359{
360 spin_lock_bh(&txd->lock);
361}
362static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
363{
364 spin_unlock_bh(&txd->lock);
365}
366static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
367{
368 txd->next = next;
369 next->parent = txd;
370}
371static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
372{
373 txd->parent = NULL;
374}
375static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
376{
377 txd->next = NULL;
378}
379static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
380{
381 return txd->parent;
382}
383static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
384{
385 return txd->next;
386}
387#endif
388
389
390
391
392
393
394
395
396
397
398struct dma_tx_state {
399 dma_cookie_t last;
400 dma_cookie_t used;
401 u32 residue;
402};
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441struct dma_device {
442
443 unsigned int chancnt;
444 unsigned int privatecnt;
445 struct list_head channels;
446 struct list_head global_node;
447 dma_cap_mask_t cap_mask;
448 unsigned short max_xor;
449 unsigned short max_pq;
450 u8 copy_align;
451 u8 xor_align;
452 u8 pq_align;
453 u8 fill_align;
454 #define DMA_HAS_PQ_CONTINUE (1 << 15)
455
456 int dev_id;
457 struct device *dev;
458
459 int (*device_alloc_chan_resources)(struct dma_chan *chan);
460 void (*device_free_chan_resources)(struct dma_chan *chan);
461
462 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
463 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
464 size_t len, unsigned long flags);
465 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
466 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
467 unsigned int src_cnt, size_t len, unsigned long flags);
468 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
469 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
470 size_t len, enum sum_check_flags *result, unsigned long flags);
471 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
472 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
473 unsigned int src_cnt, const unsigned char *scf,
474 size_t len, unsigned long flags);
475 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
476 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
477 unsigned int src_cnt, const unsigned char *scf, size_t len,
478 enum sum_check_flags *pqres, unsigned long flags);
479 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
480 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
481 unsigned long flags);
482 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
483 struct dma_chan *chan, unsigned long flags);
484 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
485 struct dma_chan *chan,
486 struct scatterlist *dst_sg, unsigned int dst_nents,
487 struct scatterlist *src_sg, unsigned int src_nents,
488 unsigned long flags);
489
490 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
491 struct dma_chan *chan, struct scatterlist *sgl,
492 unsigned int sg_len, enum dma_data_direction direction,
493 unsigned long flags);
494 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
495 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
496 size_t period_len, enum dma_data_direction direction);
497 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
498 unsigned long arg);
499
500 enum dma_status (*device_tx_status)(struct dma_chan *chan,
501 dma_cookie_t cookie,
502 struct dma_tx_state *txstate);
503 void (*device_issue_pending)(struct dma_chan *chan);
504};
505
506static inline int dmaengine_device_control(struct dma_chan *chan,
507 enum dma_ctrl_cmd cmd,
508 unsigned long arg)
509{
510 return chan->device->device_control(chan, cmd, arg);
511}
512
513static inline int dmaengine_slave_config(struct dma_chan *chan,
514 struct dma_slave_config *config)
515{
516 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
517 (unsigned long)config);
518}
519
520static inline int dmaengine_terminate_all(struct dma_chan *chan)
521{
522 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
523}
524
525static inline int dmaengine_pause(struct dma_chan *chan)
526{
527 return dmaengine_device_control(chan, DMA_PAUSE, 0);
528}
529
530static inline int dmaengine_resume(struct dma_chan *chan)
531{
532 return dmaengine_device_control(chan, DMA_RESUME, 0);
533}
534
535static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
536{
537 return desc->tx_submit(desc);
538}
539
540static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
541{
542 size_t mask;
543
544 if (!align)
545 return true;
546 mask = (1 << align) - 1;
547 if (mask & (off1 | off2 | len))
548 return false;
549 return true;
550}
551
552static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
553 size_t off2, size_t len)
554{
555 return dmaengine_check_align(dev->copy_align, off1, off2, len);
556}
557
558static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
559 size_t off2, size_t len)
560{
561 return dmaengine_check_align(dev->xor_align, off1, off2, len);
562}
563
564static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
565 size_t off2, size_t len)
566{
567 return dmaengine_check_align(dev->pq_align, off1, off2, len);
568}
569
570static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
571 size_t off2, size_t len)
572{
573 return dmaengine_check_align(dev->fill_align, off1, off2, len);
574}
575
576static inline void
577dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
578{
579 dma->max_pq = maxpq;
580 if (has_pq_continue)
581 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
582}
583
584static inline bool dmaf_continue(enum dma_ctrl_flags flags)
585{
586 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
587}
588
589static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
590{
591 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
592
593 return (flags & mask) == mask;
594}
595
596static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
597{
598 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
599}
600
601static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
602{
603 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
604}
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
620{
621 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
622 return dma_dev_to_maxpq(dma);
623 else if (dmaf_p_disabled_continue(flags))
624 return dma_dev_to_maxpq(dma) - 1;
625 else if (dmaf_continue(flags))
626 return dma_dev_to_maxpq(dma) - 3;
627 BUG();
628}
629
630
631
632#ifdef CONFIG_DMA_ENGINE
633void dmaengine_get(void);
634void dmaengine_put(void);
635#else
636static inline void dmaengine_get(void)
637{
638}
639static inline void dmaengine_put(void)
640{
641}
642#endif
643
644#ifdef CONFIG_NET_DMA
645#define net_dmaengine_get() dmaengine_get()
646#define net_dmaengine_put() dmaengine_put()
647#else
648static inline void net_dmaengine_get(void)
649{
650}
651static inline void net_dmaengine_put(void)
652{
653}
654#endif
655
656#ifdef CONFIG_ASYNC_TX_DMA
657#define async_dmaengine_get() dmaengine_get()
658#define async_dmaengine_put() dmaengine_put()
659#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
660#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
661#else
662#define async_dma_find_channel(type) dma_find_channel(type)
663#endif
664#else
665static inline void async_dmaengine_get(void)
666{
667}
668static inline void async_dmaengine_put(void)
669{
670}
671static inline struct dma_chan *
672async_dma_find_channel(enum dma_transaction_type type)
673{
674 return NULL;
675}
676#endif
677
678dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
679 void *dest, void *src, size_t len);
680dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
681 struct page *page, unsigned int offset, void *kdata, size_t len);
682dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
683 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
684 unsigned int src_off, size_t len);
685void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
686 struct dma_chan *chan);
687
688static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
689{
690 tx->flags |= DMA_CTRL_ACK;
691}
692
693static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
694{
695 tx->flags &= ~DMA_CTRL_ACK;
696}
697
698static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
699{
700 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
701}
702
703#define first_dma_cap(mask) __first_dma_cap(&(mask))
704static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
705{
706 return min_t(int, DMA_TX_TYPE_END,
707 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
708}
709
710#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
711static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
712{
713 return min_t(int, DMA_TX_TYPE_END,
714 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
715}
716
717#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
718static inline void
719__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
720{
721 set_bit(tx_type, dstp->bits);
722}
723
724#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
725static inline void
726__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
727{
728 clear_bit(tx_type, dstp->bits);
729}
730
731#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
732static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
733{
734 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
735}
736
737#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
738static inline int
739__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
740{
741 return test_bit(tx_type, srcp->bits);
742}
743
744#define for_each_dma_cap_mask(cap, mask) \
745 for ((cap) = first_dma_cap(mask); \
746 (cap) < DMA_TX_TYPE_END; \
747 (cap) = next_dma_cap((cap), (mask)))
748
749
750
751
752
753
754
755
756static inline void dma_async_issue_pending(struct dma_chan *chan)
757{
758 chan->device->device_issue_pending(chan);
759}
760
761#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
762
763
764
765
766
767
768
769
770
771
772
773
774static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
775 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
776{
777 struct dma_tx_state state;
778 enum dma_status status;
779
780 status = chan->device->device_tx_status(chan, cookie, &state);
781 if (last)
782 *last = state.last;
783 if (used)
784 *used = state.used;
785 return status;
786}
787
788#define dma_async_memcpy_complete(chan, cookie, last, used)\
789 dma_async_is_tx_complete(chan, cookie, last, used)
790
791
792
793
794
795
796
797
798
799
800static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
801 dma_cookie_t last_complete, dma_cookie_t last_used)
802{
803 if (last_complete <= last_used) {
804 if ((cookie <= last_complete) || (cookie > last_used))
805 return DMA_SUCCESS;
806 } else {
807 if ((cookie <= last_complete) && (cookie > last_used))
808 return DMA_SUCCESS;
809 }
810 return DMA_IN_PROGRESS;
811}
812
813static inline void
814dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
815{
816 if (st) {
817 st->last = last;
818 st->used = used;
819 st->residue = residue;
820 }
821}
822
823enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
824#ifdef CONFIG_DMA_ENGINE
825enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
826void dma_issue_pending_all(void);
827struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
828void dma_release_channel(struct dma_chan *chan);
829#else
830static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
831{
832 return DMA_SUCCESS;
833}
834static inline void dma_issue_pending_all(void)
835{
836}
837static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
838 dma_filter_fn fn, void *fn_param)
839{
840 return NULL;
841}
842static inline void dma_release_channel(struct dma_chan *chan)
843{
844}
845#endif
846
847
848
849int dma_async_device_register(struct dma_device *device);
850void dma_async_device_unregister(struct dma_device *device);
851void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
852struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
853#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
854
855
856
857struct dma_page_list {
858 char __user *base_address;
859 int nr_pages;
860 struct page **pages;
861};
862
863struct dma_pinned_list {
864 int nr_iovecs;
865 struct dma_page_list page_list[0];
866};
867
868struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
869void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
870
871dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
872 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
873dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
874 struct dma_pinned_list *pinned_list, struct page *page,
875 unsigned int offset, size_t len);
876
877#endif
878