1
2
3
4
5
6
7
8
9
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14
15#include "virt-dma.h"
16
17static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
18{
19 return container_of(tx, struct virt_dma_desc, tx);
20}
21
22dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
23{
24 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
25 struct virt_dma_desc *vd = to_virt_desc(tx);
26 unsigned long flags;
27 dma_cookie_t cookie;
28
29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx);
31
32 list_move_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags);
34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
36 vc, vd, cookie);
37
38 return cookie;
39}
40EXPORT_SYMBOL_GPL(vchan_tx_submit);
41
42
43
44
45
46
47
48
49
50
51
52int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
53{
54 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
55 struct virt_dma_desc *vd = to_virt_desc(tx);
56 unsigned long flags;
57
58 spin_lock_irqsave(&vc->lock, flags);
59 list_del(&vd->node);
60 spin_unlock_irqrestore(&vc->lock, flags);
61
62 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
63 vc, vd, vd->tx.cookie);
64 vc->desc_free(vd);
65 return 0;
66}
67EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
68
69struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
70 dma_cookie_t cookie)
71{
72 struct virt_dma_desc *vd;
73
74 list_for_each_entry(vd, &vc->desc_issued, node)
75 if (vd->tx.cookie == cookie)
76 return vd;
77
78 return NULL;
79}
80EXPORT_SYMBOL_GPL(vchan_find_desc);
81
82
83
84
85
86static void vchan_complete(unsigned long arg)
87{
88 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
89 struct virt_dma_desc *vd;
90 struct dmaengine_desc_callback cb;
91 LIST_HEAD(head);
92
93 spin_lock_irq(&vc->lock);
94 list_splice_tail_init(&vc->desc_completed, &head);
95 vd = vc->cyclic;
96 if (vd) {
97 vc->cyclic = NULL;
98 dmaengine_desc_get_callback(&vd->tx, &cb);
99 } else {
100 memset(&cb, 0, sizeof(cb));
101 }
102 spin_unlock_irq(&vc->lock);
103
104 dmaengine_desc_callback_invoke(&cb, NULL);
105
106 while (!list_empty(&head)) {
107 vd = list_first_entry(&head, struct virt_dma_desc, node);
108 dmaengine_desc_get_callback(&vd->tx, &cb);
109
110 list_del(&vd->node);
111 if (dmaengine_desc_test_reuse(&vd->tx))
112 list_add(&vd->node, &vc->desc_allocated);
113 else
114 vc->desc_free(vd);
115
116 dmaengine_desc_callback_invoke(&cb, NULL);
117 }
118}
119
120void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
121{
122 while (!list_empty(head)) {
123 struct virt_dma_desc *vd = list_first_entry(head,
124 struct virt_dma_desc, node);
125 if (dmaengine_desc_test_reuse(&vd->tx)) {
126 list_move_tail(&vd->node, &vc->desc_allocated);
127 } else {
128 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
129 list_del(&vd->node);
130 vc->desc_free(vd);
131 }
132 }
133}
134EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
135
136void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
137{
138 dma_cookie_init(&vc->chan);
139
140 spin_lock_init(&vc->lock);
141 INIT_LIST_HEAD(&vc->desc_allocated);
142 INIT_LIST_HEAD(&vc->desc_submitted);
143 INIT_LIST_HEAD(&vc->desc_issued);
144 INIT_LIST_HEAD(&vc->desc_completed);
145
146 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
147
148 vc->chan.device = dmadev;
149 list_add_tail(&vc->chan.device_node, &dmadev->channels);
150}
151EXPORT_SYMBOL_GPL(vchan_init);
152
153MODULE_AUTHOR("Russell King");
154MODULE_LICENSE("GPL");
155