1
2
3
4
5
6
7
8
9
10
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/dmaengine.h>
14#include <linux/of_dma.h>
15#include <linux/slab.h>
16
17#include "../../../dma/dmaengine.h"
18
19#include "xilinx-scenechange.h"
20
21
22
23
24
25
26static void xscd_dma_start(struct xscd_device *xscd, unsigned int channels)
27{
28 xscd_write(xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
29 xscd_write(xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
30 xscd_write(xscd->iomem, XSCD_CHAN_EN_OFFSET, channels);
31
32 xscd_set(xscd->iomem, XSCD_CTRL_OFFSET,
33 xscd->memory_based ? XSCD_CTRL_AP_START
34 : XSCD_CTRL_AP_START |
35 XSCD_CTRL_AUTO_RESTART);
36
37 xscd->running = true;
38}
39
40
41
42
43
44static void xscd_dma_stop(struct xscd_device *xscd)
45{
46 xscd_clr(xscd->iomem, XSCD_CTRL_OFFSET,
47 xscd->memory_based ? XSCD_CTRL_AP_START
48 : XSCD_CTRL_AP_START |
49 XSCD_CTRL_AUTO_RESTART);
50
51 xscd->running = false;
52}
53
54
55
56
57
58
59
60static int xscd_dma_setup_channel(struct xscd_dma_chan *chan)
61{
62 struct xscd_dma_tx_descriptor *desc;
63
64 if (!chan->enabled)
65 return 0;
66
67 if (list_empty(&chan->pending_list))
68 return 0;
69
70 desc = list_first_entry(&chan->pending_list,
71 struct xscd_dma_tx_descriptor, node);
72 list_del(&desc->node);
73
74 xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
75 chan->active_desc = desc;
76
77 return 1;
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99static void xscd_dma_kick(struct xscd_device *xscd)
100{
101 unsigned int channels = 0;
102 unsigned int i;
103
104 lockdep_assert_held(&xscd->lock);
105
106 if (xscd->running)
107 return;
108
109 for (i = 0; i < xscd->num_streams; i++) {
110 struct xscd_dma_chan *chan = xscd->channels[i];
111 unsigned long flags;
112 unsigned int running;
113 bool stopped;
114
115 spin_lock_irqsave(&chan->lock, flags);
116 running = xscd_dma_setup_channel(chan);
117 stopped = chan->running && !running;
118 chan->running = running;
119 spin_unlock_irqrestore(&chan->lock, flags);
120
121 channels |= running << chan->id;
122 if (stopped)
123 wake_up(&chan->wait);
124 }
125
126 if (channels)
127 xscd_dma_start(xscd, channels);
128 else
129 xscd_dma_stop(xscd);
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable)
146{
147 struct xscd_device *xscd = chan->xscd;
148
149 if (enable) {
150
151
152
153
154
155
156
157
158
159
160 spin_lock_irq(&chan->lock);
161 chan->enabled = true;
162 spin_unlock_irq(&chan->lock);
163 }
164
165 if (xscd->memory_based) {
166 if (enable) {
167 spin_lock_irq(&xscd->lock);
168 xscd_dma_kick(xscd);
169 spin_unlock_irq(&xscd->lock);
170 }
171 } else {
172 if (enable)
173 xscd_dma_start(xscd, BIT(chan->id));
174 else
175 xscd_dma_stop(xscd);
176 }
177}
178
179
180
181
182
183void xscd_dma_irq_handler(struct xscd_device *xscd)
184{
185 unsigned int i;
186
187
188
189
190
191 for (i = 0; i < xscd->num_streams; ++i) {
192 struct xscd_dma_chan *chan = xscd->channels[i];
193 struct xscd_dma_tx_descriptor *desc = chan->active_desc;
194
195 if (!desc)
196 continue;
197
198 dma_cookie_complete(&desc->async_tx);
199 xscd_chan_event_notify(&xscd->chans[i]);
200
201 spin_lock(&chan->lock);
202 list_add_tail(&desc->node, &chan->done_list);
203 chan->active_desc = NULL;
204 spin_unlock(&chan->lock);
205
206 tasklet_schedule(&chan->tasklet);
207 }
208
209
210 spin_lock(&xscd->lock);
211 xscd->running = false;
212 xscd_dma_kick(xscd);
213 spin_unlock(&xscd->lock);
214}
215
216
217
218
219
220
221
222
223
224
225
226static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
227{
228 struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
229 struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
230 dma_cookie_t cookie;
231 unsigned long flags;
232
233 spin_lock_irqsave(&chan->lock, flags);
234 cookie = dma_cookie_assign(tx);
235 list_add_tail(&desc->node, &chan->pending_list);
236 spin_unlock_irqrestore(&chan->lock, flags);
237
238 return cookie;
239}
240
241
242
243
244
245
246static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
247 struct list_head *list)
248{
249 struct xscd_dma_tx_descriptor *desc, *next;
250
251 list_for_each_entry_safe(desc, next, list, node) {
252 list_del(&desc->node);
253 kfree(desc);
254 }
255}
256
257
258
259
260
261static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
262{
263 unsigned long flags;
264
265 spin_lock_irqsave(&chan->lock, flags);
266
267 xscd_dma_free_desc_list(chan, &chan->pending_list);
268 xscd_dma_free_desc_list(chan, &chan->done_list);
269 kfree(chan->active_desc);
270
271 chan->active_desc = NULL;
272 INIT_LIST_HEAD(&chan->pending_list);
273 INIT_LIST_HEAD(&chan->done_list);
274
275 spin_unlock_irqrestore(&chan->lock, flags);
276}
277
278
279
280
281
282static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
283{
284 struct xscd_dma_tx_descriptor *desc, *next;
285 unsigned long flags;
286
287 spin_lock_irqsave(&chan->lock, flags);
288
289 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
290 dma_async_tx_callback callback;
291 void *callback_param;
292
293 list_del(&desc->node);
294
295
296 callback = desc->async_tx.callback;
297 callback_param = desc->async_tx.callback_param;
298 if (callback) {
299 spin_unlock_irqrestore(&chan->lock, flags);
300 callback(callback_param);
301 spin_lock_irqsave(&chan->lock, flags);
302 }
303
304 kfree(desc);
305 }
306
307 spin_unlock_irqrestore(&chan->lock, flags);
308}
309
310
311
312
313
314
315
316
317
318
319static struct dma_async_tx_descriptor *
320xscd_dma_prep_interleaved(struct dma_chan *dchan,
321 struct dma_interleaved_template *xt,
322 unsigned long flags)
323{
324 struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
325 struct xscd_dma_tx_descriptor *desc;
326 struct xscd_dma_desc *sw;
327
328 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
329 if (!desc)
330 return NULL;
331
332 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
333 desc->async_tx.tx_submit = xscd_dma_tx_submit;
334 async_tx_ack(&desc->async_tx);
335
336 sw = &desc->sw;
337 sw->vsize = xt->numf;
338 sw->hsize = xt->sgl[0].size;
339 sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
340 sw->luma_plane_addr = xt->src_start;
341
342 return &desc->async_tx;
343}
344
345static bool xscd_dma_is_running(struct xscd_dma_chan *chan)
346{
347 bool running;
348
349 spin_lock_irq(&chan->lock);
350 running = chan->running;
351 spin_unlock_irq(&chan->lock);
352
353 return running;
354}
355
356
357
358
359
360
361
362static int xscd_dma_terminate_all(struct dma_chan *dchan)
363{
364 struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
365 int ret;
366
367 spin_lock_irq(&chan->lock);
368 chan->enabled = false;
369 spin_unlock_irq(&chan->lock);
370
371
372 ret = wait_event_timeout(chan->wait, !xscd_dma_is_running(chan),
373 msecs_to_jiffies(100));
374 WARN_ON(ret == 0);
375
376 xscd_dma_free_descriptors(chan);
377 return 0;
378}
379
380
381
382
383
384static void xscd_dma_issue_pending(struct dma_chan *dchan)
385{
386 struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
387 struct xscd_device *xscd = chan->xscd;
388 unsigned long flags;
389
390 spin_lock_irqsave(&xscd->lock, flags);
391 xscd_dma_kick(xscd);
392 spin_unlock_irqrestore(&xscd->lock, flags);
393}
394
395static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
396 dma_cookie_t cookie,
397 struct dma_tx_state *txstate)
398{
399 return dma_cookie_status(dchan, cookie, txstate);
400}
401
402
403
404
405
406static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
407{
408 struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
409
410 xscd_dma_free_descriptors(chan);
411}
412
413
414
415
416
417static void xscd_dma_do_tasklet(unsigned long data)
418{
419 struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
420
421 xscd_dma_chan_desc_cleanup(chan);
422}
423
424
425
426
427
428
429
430static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
431{
432 dma_cookie_init(dchan);
433 return 0;
434}
435
436
437
438
439
440
441
442
443static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
444 struct of_dma *ofdma)
445{
446 struct xscd_device *xscd = ofdma->of_dma_data;
447 u32 chan_id = dma_spec->args[0];
448
449 if (chan_id >= xscd->num_streams)
450 return NULL;
451
452 if (!xscd->channels[chan_id])
453 return NULL;
454
455 return dma_get_slave_channel(&xscd->channels[chan_id]->common);
456}
457
458static void xscd_dma_chan_init(struct xscd_device *xscd, int chan_id)
459{
460 struct xscd_dma_chan *chan = &xscd->chans[chan_id].dmachan;
461
462 chan->id = chan_id;
463 chan->iomem = xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
464 chan->xscd = xscd;
465
466 xscd->channels[chan->id] = chan;
467
468 spin_lock_init(&chan->lock);
469 INIT_LIST_HEAD(&chan->pending_list);
470 INIT_LIST_HEAD(&chan->done_list);
471 tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
472 (unsigned long)chan);
473 init_waitqueue_head(&chan->wait);
474
475 chan->common.device = &xscd->dma_device;
476 list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
477}
478
479
480
481
482
483static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
484{
485 list_del(&chan->common.device_node);
486}
487
488
489
490
491
492
493
494int xscd_dma_init(struct xscd_device *xscd)
495{
496 struct dma_device *ddev = &xscd->dma_device;
497 unsigned int chan_id;
498 int ret;
499
500
501 ddev->dev = xscd->dev;
502 dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
503
504 INIT_LIST_HEAD(&ddev->channels);
505 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
506 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
507 ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
508 ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
509 ddev->device_tx_status = xscd_dma_tx_status;
510 ddev->device_issue_pending = xscd_dma_issue_pending;
511 ddev->device_terminate_all = xscd_dma_terminate_all;
512 ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
513
514 for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
515 xscd_dma_chan_init(xscd, chan_id);
516
517 ret = dma_async_device_register(ddev);
518 if (ret) {
519 dev_err(xscd->dev, "failed to register the dma device\n");
520 goto error;
521 }
522
523 ret = of_dma_controller_register(xscd->dev->of_node,
524 of_scdma_xilinx_xlate, xscd);
525 if (ret) {
526 dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
527 goto error_of_dma;
528 }
529
530 dev_info(xscd->dev, "Xilinx Scene Change DMA is initialized!\n");
531 return 0;
532
533error_of_dma:
534 dma_async_device_unregister(ddev);
535
536error:
537 for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
538 xscd_dma_chan_remove(xscd->channels[chan_id]);
539
540 return ret;
541}
542
543
544
545
546
547
548
549
550void xscd_dma_cleanup(struct xscd_device *xscd)
551{
552 dma_async_device_unregister(&xscd->dma_device);
553 of_dma_controller_free(xscd->dev->of_node);
554}
555