1
2
3
4
5
6
7
8
9
10
11
12#include "ptdma.h"
13#include "../dmaengine.h"
14#include "../virt-dma.h"
15
16static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
17{
18 return container_of(dma_chan, struct pt_dma_chan, vc.chan);
19}
20
21static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
22{
23 return container_of(vd, struct pt_dma_desc, vd);
24}
25
26static void pt_free_chan_resources(struct dma_chan *dma_chan)
27{
28 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
29
30 vchan_free_chan_resources(&chan->vc);
31}
32
33static void pt_synchronize(struct dma_chan *dma_chan)
34{
35 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
36
37 vchan_synchronize(&chan->vc);
38}
39
40static void pt_do_cleanup(struct virt_dma_desc *vd)
41{
42 struct pt_dma_desc *desc = to_pt_desc(vd);
43 struct pt_device *pt = desc->pt;
44
45 kmem_cache_free(pt->dma_desc_cache, desc);
46}
47
48static int pt_dma_start_desc(struct pt_dma_desc *desc)
49{
50 struct pt_passthru_engine *pt_engine;
51 struct pt_device *pt;
52 struct pt_cmd *pt_cmd;
53 struct pt_cmd_queue *cmd_q;
54
55 desc->issued_to_hw = 1;
56
57 pt_cmd = &desc->pt_cmd;
58 pt = pt_cmd->pt;
59 cmd_q = &pt->cmd_q;
60 pt_engine = &pt_cmd->passthru;
61
62 pt->tdata.cmd = pt_cmd;
63
64
65 pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
66
67 return 0;
68}
69
70static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
71{
72
73 struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
74
75 return vd ? to_pt_desc(vd) : NULL;
76}
77
78static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
79 struct pt_dma_desc *desc)
80{
81 struct dma_async_tx_descriptor *tx_desc;
82 struct virt_dma_desc *vd;
83 unsigned long flags;
84
85
86 do {
87 if (desc) {
88 if (!desc->issued_to_hw) {
89
90 if (desc->status != DMA_ERROR)
91 return desc;
92 }
93
94 tx_desc = &desc->vd.tx;
95 vd = &desc->vd;
96 } else {
97 tx_desc = NULL;
98 }
99
100 spin_lock_irqsave(&chan->vc.lock, flags);
101
102 if (desc) {
103 if (desc->status != DMA_ERROR)
104 desc->status = DMA_COMPLETE;
105
106 dma_cookie_complete(tx_desc);
107 dma_descriptor_unmap(tx_desc);
108 list_del(&desc->vd.node);
109 }
110
111 desc = pt_next_dma_desc(chan);
112
113 spin_unlock_irqrestore(&chan->vc.lock, flags);
114
115 if (tx_desc) {
116 dmaengine_desc_get_callback_invoke(tx_desc, NULL);
117 dma_run_dependencies(tx_desc);
118 vchan_vdesc_fini(vd);
119 }
120 } while (desc);
121
122 return NULL;
123}
124
125static void pt_cmd_callback(void *data, int err)
126{
127 struct pt_dma_desc *desc = data;
128 struct dma_chan *dma_chan;
129 struct pt_dma_chan *chan;
130 int ret;
131
132 if (err == -EINPROGRESS)
133 return;
134
135 dma_chan = desc->vd.tx.chan;
136 chan = to_pt_chan(dma_chan);
137
138 if (err)
139 desc->status = DMA_ERROR;
140
141 while (true) {
142
143 desc = pt_handle_active_desc(chan, desc);
144
145
146 if (!desc)
147 break;
148
149 ret = pt_dma_start_desc(desc);
150 if (!ret)
151 break;
152
153 desc->status = DMA_ERROR;
154 }
155}
156
157static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
158 unsigned long flags)
159{
160 struct pt_dma_desc *desc;
161
162 desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
163 if (!desc)
164 return NULL;
165
166 vchan_tx_prep(&chan->vc, &desc->vd, flags);
167
168 desc->pt = chan->pt;
169 desc->issued_to_hw = 0;
170 desc->status = DMA_IN_PROGRESS;
171
172 return desc;
173}
174
175static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
176 dma_addr_t dst,
177 dma_addr_t src,
178 unsigned int len,
179 unsigned long flags)
180{
181 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
182 struct pt_passthru_engine *pt_engine;
183 struct pt_dma_desc *desc;
184 struct pt_cmd *pt_cmd;
185
186 desc = pt_alloc_dma_desc(chan, flags);
187 if (!desc)
188 return NULL;
189
190 pt_cmd = &desc->pt_cmd;
191 pt_cmd->pt = chan->pt;
192 pt_engine = &pt_cmd->passthru;
193 pt_cmd->engine = PT_ENGINE_PASSTHRU;
194 pt_engine->src_dma = src;
195 pt_engine->dst_dma = dst;
196 pt_engine->src_len = len;
197 pt_cmd->pt_cmd_callback = pt_cmd_callback;
198 pt_cmd->data = desc;
199
200 desc->len = len;
201
202 return desc;
203}
204
205static struct dma_async_tx_descriptor *
206pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
207 dma_addr_t src, size_t len, unsigned long flags)
208{
209 struct pt_dma_desc *desc;
210
211 desc = pt_create_desc(dma_chan, dst, src, len, flags);
212 if (!desc)
213 return NULL;
214
215 return &desc->vd.tx;
216}
217
218static struct dma_async_tx_descriptor *
219pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
220{
221 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
222 struct pt_dma_desc *desc;
223
224 desc = pt_alloc_dma_desc(chan, flags);
225 if (!desc)
226 return NULL;
227
228 return &desc->vd.tx;
229}
230
231static void pt_issue_pending(struct dma_chan *dma_chan)
232{
233 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
234 struct pt_dma_desc *desc;
235 unsigned long flags;
236
237 spin_lock_irqsave(&chan->vc.lock, flags);
238
239 vchan_issue_pending(&chan->vc);
240
241 desc = pt_next_dma_desc(chan);
242
243 spin_unlock_irqrestore(&chan->vc.lock, flags);
244
245
246 if (desc)
247 pt_cmd_callback(desc, 0);
248}
249
250static int pt_pause(struct dma_chan *dma_chan)
251{
252 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
253 unsigned long flags;
254
255 spin_lock_irqsave(&chan->vc.lock, flags);
256 pt_stop_queue(&chan->pt->cmd_q);
257 spin_unlock_irqrestore(&chan->vc.lock, flags);
258
259 return 0;
260}
261
262static int pt_resume(struct dma_chan *dma_chan)
263{
264 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
265 struct pt_dma_desc *desc = NULL;
266 unsigned long flags;
267
268 spin_lock_irqsave(&chan->vc.lock, flags);
269 pt_start_queue(&chan->pt->cmd_q);
270 desc = pt_next_dma_desc(chan);
271 spin_unlock_irqrestore(&chan->vc.lock, flags);
272
273
274 if (desc)
275 pt_cmd_callback(desc, 0);
276
277 return 0;
278}
279
280static int pt_terminate_all(struct dma_chan *dma_chan)
281{
282 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
283 unsigned long flags;
284 LIST_HEAD(head);
285
286 spin_lock_irqsave(&chan->vc.lock, flags);
287 vchan_get_all_descriptors(&chan->vc, &head);
288 spin_unlock_irqrestore(&chan->vc.lock, flags);
289
290 vchan_dma_desc_free_list(&chan->vc, &head);
291 vchan_free_chan_resources(&chan->vc);
292
293 return 0;
294}
295
296int pt_dmaengine_register(struct pt_device *pt)
297{
298 struct pt_dma_chan *chan;
299 struct dma_device *dma_dev = &pt->dma_dev;
300 char *cmd_cache_name;
301 char *desc_cache_name;
302 int ret;
303
304 pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
305 GFP_KERNEL);
306 if (!pt->pt_dma_chan)
307 return -ENOMEM;
308
309 cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
310 "%s-dmaengine-cmd-cache",
311 dev_name(pt->dev));
312 if (!cmd_cache_name)
313 return -ENOMEM;
314
315 desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
316 "%s-dmaengine-desc-cache",
317 dev_name(pt->dev));
318 if (!desc_cache_name) {
319 ret = -ENOMEM;
320 goto err_cache;
321 }
322
323 pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
324 sizeof(struct pt_dma_desc), 0,
325 SLAB_HWCACHE_ALIGN, NULL);
326 if (!pt->dma_desc_cache) {
327 ret = -ENOMEM;
328 goto err_cache;
329 }
330
331 dma_dev->dev = pt->dev;
332 dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
333 dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
334 dma_dev->directions = DMA_MEM_TO_MEM;
335 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
336 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
337 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
338
339
340
341
342
343 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
344
345 INIT_LIST_HEAD(&dma_dev->channels);
346
347 chan = pt->pt_dma_chan;
348 chan->pt = pt;
349
350
351 dma_dev->device_free_chan_resources = pt_free_chan_resources;
352 dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
353 dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
354 dma_dev->device_issue_pending = pt_issue_pending;
355 dma_dev->device_tx_status = dma_cookie_status;
356 dma_dev->device_pause = pt_pause;
357 dma_dev->device_resume = pt_resume;
358 dma_dev->device_terminate_all = pt_terminate_all;
359 dma_dev->device_synchronize = pt_synchronize;
360
361 chan->vc.desc_free = pt_do_cleanup;
362 vchan_init(&chan->vc, dma_dev);
363
364 dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
365
366 ret = dma_async_device_register(dma_dev);
367 if (ret)
368 goto err_reg;
369
370 return 0;
371
372err_reg:
373 kmem_cache_destroy(pt->dma_desc_cache);
374
375err_cache:
376 kmem_cache_destroy(pt->dma_cmd_cache);
377
378 return ret;
379}
380
381void pt_dmaengine_unregister(struct pt_device *pt)
382{
383 struct dma_device *dma_dev = &pt->dma_dev;
384
385 dma_async_device_unregister(dma_dev);
386
387 kmem_cache_destroy(pt->dma_desc_cache);
388 kmem_cache_destroy(pt->dma_cmd_cache);
389}
390