1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef DMAENGINE_H
22#define DMAENGINE_H
23
24#include <linux/device.h>
25#include <linux/uio.h>
26#include <linux/dma-mapping.h>
27
28
29
30
31
32
33typedef s32 dma_cookie_t;
34
35#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
36
37
38
39
40
41
42
43enum dma_status {
44 DMA_SUCCESS,
45 DMA_IN_PROGRESS,
46 DMA_ERROR,
47};
48
49
50
51
52
53
54
55enum dma_transaction_type {
56 DMA_MEMCPY,
57 DMA_XOR,
58 DMA_PQ,
59 DMA_XOR_VAL,
60 DMA_PQ_VAL,
61 DMA_MEMSET,
62 DMA_INTERRUPT,
63 DMA_PRIVATE,
64 DMA_ASYNC_TX,
65 DMA_SLAVE,
66};
67
68
69#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94enum dma_ctrl_flags {
95 DMA_PREP_INTERRUPT = (1 << 0),
96 DMA_CTRL_ACK = (1 << 1),
97 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
98 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
99 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
100 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
101 DMA_PREP_PQ_DISABLE_P = (1 << 6),
102 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
103 DMA_PREP_CONTINUE = (1 << 8),
104 DMA_PREP_FENCE = (1 << 9),
105};
106
107
108
109
110enum sum_check_bits {
111 SUM_CHECK_P = 0,
112 SUM_CHECK_Q = 1,
113};
114
115
116
117
118
119
120enum sum_check_flags {
121 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
122 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
123};
124
125
126
127
128
129
130typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
131
132
133
134
135
136
137
138struct dma_chan_percpu {
139
140 unsigned long memcpy_count;
141 unsigned long bytes_transferred;
142};
143
144
145
146
147
148
149
150
151
152
153
154
155
156struct dma_chan {
157 struct dma_device *device;
158 dma_cookie_t cookie;
159
160
161 int chan_id;
162 struct dma_chan_dev *dev;
163
164 struct list_head device_node;
165 struct dma_chan_percpu *local;
166 int client_count;
167 int table_count;
168 void *private;
169};
170
171
172
173
174
175
176
177
178struct dma_chan_dev {
179 struct dma_chan *chan;
180 struct device device;
181 int dev_id;
182 atomic_t *idr_ref;
183};
184
185static inline const char *dma_chan_name(struct dma_chan *chan)
186{
187 return dev_name(&chan->dev->device);
188}
189
190void dma_chan_cleanup(struct kref *kref);
191
192
193
194
195
196
197
198
199
200
201
202
203typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
204
205typedef void (*dma_async_tx_callback)(void *dma_async_param);
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223struct dma_async_tx_descriptor {
224 dma_cookie_t cookie;
225 enum dma_ctrl_flags flags;
226 dma_addr_t phys;
227 struct dma_chan *chan;
228 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
229 dma_async_tx_callback callback;
230 void *callback_param;
231 struct dma_async_tx_descriptor *next;
232 struct dma_async_tx_descriptor *parent;
233 spinlock_t lock;
234};
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266struct dma_device {
267
268 unsigned int chancnt;
269 unsigned int privatecnt;
270 struct list_head channels;
271 struct list_head global_node;
272 dma_cap_mask_t cap_mask;
273 unsigned short max_xor;
274 unsigned short max_pq;
275 u8 copy_align;
276 u8 xor_align;
277 u8 pq_align;
278 u8 fill_align;
279 #define DMA_HAS_PQ_CONTINUE (1 << 15)
280
281 int dev_id;
282 struct device *dev;
283
284 int (*device_alloc_chan_resources)(struct dma_chan *chan);
285 void (*device_free_chan_resources)(struct dma_chan *chan);
286
287 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
289 size_t len, unsigned long flags);
290 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
291 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
292 unsigned int src_cnt, size_t len, unsigned long flags);
293 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
294 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
295 size_t len, enum sum_check_flags *result, unsigned long flags);
296 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
297 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
298 unsigned int src_cnt, const unsigned char *scf,
299 size_t len, unsigned long flags);
300 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
301 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
302 unsigned int src_cnt, const unsigned char *scf, size_t len,
303 enum sum_check_flags *pqres, unsigned long flags);
304 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
305 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
306 unsigned long flags);
307 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
308 struct dma_chan *chan, unsigned long flags);
309
310 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
311 struct dma_chan *chan, struct scatterlist *sgl,
312 unsigned int sg_len, enum dma_data_direction direction,
313 unsigned long flags);
314 void (*device_terminate_all)(struct dma_chan *chan);
315
316 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
317 dma_cookie_t cookie, dma_cookie_t *last,
318 dma_cookie_t *used);
319 void (*device_issue_pending)(struct dma_chan *chan);
320};
321
322static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
323{
324 size_t mask;
325
326 if (!align)
327 return true;
328 mask = (1 << align) - 1;
329 if (mask & (off1 | off2 | len))
330 return false;
331 return true;
332}
333
334static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
335 size_t off2, size_t len)
336{
337 return dmaengine_check_align(dev->copy_align, off1, off2, len);
338}
339
340static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
341 size_t off2, size_t len)
342{
343 return dmaengine_check_align(dev->xor_align, off1, off2, len);
344}
345
346static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
347 size_t off2, size_t len)
348{
349 return dmaengine_check_align(dev->pq_align, off1, off2, len);
350}
351
352static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
353 size_t off2, size_t len)
354{
355 return dmaengine_check_align(dev->fill_align, off1, off2, len);
356}
357
358static inline void
359dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
360{
361 dma->max_pq = maxpq;
362 if (has_pq_continue)
363 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
364}
365
366static inline bool dmaf_continue(enum dma_ctrl_flags flags)
367{
368 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
369}
370
371static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
372{
373 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
374
375 return (flags & mask) == mask;
376}
377
378static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
379{
380 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
381}
382
383static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
384{
385 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
402{
403 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
404 return dma_dev_to_maxpq(dma);
405 else if (dmaf_p_disabled_continue(flags))
406 return dma_dev_to_maxpq(dma) - 1;
407 else if (dmaf_continue(flags))
408 return dma_dev_to_maxpq(dma) - 3;
409 BUG();
410}
411
412
413
414#ifdef CONFIG_DMA_ENGINE
415void dmaengine_get(void);
416void dmaengine_put(void);
417#else
418static inline void dmaengine_get(void)
419{
420}
421static inline void dmaengine_put(void)
422{
423}
424#endif
425
426#ifdef CONFIG_NET_DMA
427#define net_dmaengine_get() dmaengine_get()
428#define net_dmaengine_put() dmaengine_put()
429#else
430static inline void net_dmaengine_get(void)
431{
432}
433static inline void net_dmaengine_put(void)
434{
435}
436#endif
437
438#ifdef CONFIG_ASYNC_TX_DMA
439#define async_dmaengine_get() dmaengine_get()
440#define async_dmaengine_put() dmaengine_put()
441#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
442#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
443#else
444#define async_dma_find_channel(type) dma_find_channel(type)
445#endif
446#else
447static inline void async_dmaengine_get(void)
448{
449}
450static inline void async_dmaengine_put(void)
451{
452}
453static inline struct dma_chan *
454async_dma_find_channel(enum dma_transaction_type type)
455{
456 return NULL;
457}
458#endif
459
460dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
461 void *dest, void *src, size_t len);
462dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
463 struct page *page, unsigned int offset, void *kdata, size_t len);
464dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
465 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
466 unsigned int src_off, size_t len);
467void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
468 struct dma_chan *chan);
469
470static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
471{
472 tx->flags |= DMA_CTRL_ACK;
473}
474
475static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
476{
477 tx->flags &= ~DMA_CTRL_ACK;
478}
479
480static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
481{
482 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
483}
484
485#define first_dma_cap(mask) __first_dma_cap(&(mask))
486static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
487{
488 return min_t(int, DMA_TX_TYPE_END,
489 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
490}
491
492#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
493static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
494{
495 return min_t(int, DMA_TX_TYPE_END,
496 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
497}
498
499#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
500static inline void
501__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
502{
503 set_bit(tx_type, dstp->bits);
504}
505
506#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
507static inline void
508__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
509{
510 clear_bit(tx_type, dstp->bits);
511}
512
513#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
514static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
515{
516 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
517}
518
519#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
520static inline int
521__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
522{
523 return test_bit(tx_type, srcp->bits);
524}
525
526#define for_each_dma_cap_mask(cap, mask) \
527 for ((cap) = first_dma_cap(mask); \
528 (cap) < DMA_TX_TYPE_END; \
529 (cap) = next_dma_cap((cap), (mask)))
530
531
532
533
534
535
536
537
538static inline void dma_async_issue_pending(struct dma_chan *chan)
539{
540 chan->device->device_issue_pending(chan);
541}
542
543#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
544
545
546
547
548
549
550
551
552
553
554
555
556static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
557 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
558{
559 return chan->device->device_is_tx_complete(chan, cookie, last, used);
560}
561
562#define dma_async_memcpy_complete(chan, cookie, last, used)\
563 dma_async_is_tx_complete(chan, cookie, last, used)
564
565
566
567
568
569
570
571
572
573
574static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
575 dma_cookie_t last_complete, dma_cookie_t last_used)
576{
577 if (last_complete <= last_used) {
578 if ((cookie <= last_complete) || (cookie > last_used))
579 return DMA_SUCCESS;
580 } else {
581 if ((cookie <= last_complete) && (cookie > last_used))
582 return DMA_SUCCESS;
583 }
584 return DMA_IN_PROGRESS;
585}
586
587enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
588#ifdef CONFIG_DMA_ENGINE
589enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
590void dma_issue_pending_all(void);
591#else
592static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
593{
594 return DMA_SUCCESS;
595}
596static inline void dma_issue_pending_all(void)
597{
598 do { } while (0);
599}
600#endif
601
602
603
604int dma_async_device_register(struct dma_device *device);
605void dma_async_device_unregister(struct dma_device *device);
606void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
607struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
608#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
609struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
610void dma_release_channel(struct dma_chan *chan);
611
612
613
614struct dma_page_list {
615 char __user *base_address;
616 int nr_pages;
617 struct page **pages;
618};
619
620struct dma_pinned_list {
621 int nr_iovecs;
622 struct dma_page_list page_list[0];
623};
624
625struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
626void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
627
628dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
629 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
630dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
631 struct dma_pinned_list *pinned_list, struct page *page,
632 unsigned int offset, size_t len);
633
634#endif
635