1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/of_device.h>
54#include <linux/property.h>
55#include <linux/delay.h>
56#include <linux/acpi.h>
57#include <linux/irq.h>
58#include <linux/atomic.h>
59#include <linux/pm_runtime.h>
60#include <linux/msi.h>
61
62#include "../dmaengine.h"
63#include "hidma.h"
64
65
66
67
68
69
70
71#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
72#define HIDMA_ERR_INFO_SW 0xFF
73#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
74#define HIDMA_NR_DEFAULT_DESC 10
75#define HIDMA_MSI_INTS 11
76
77static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
78{
79 return container_of(dmadev, struct hidma_dev, ddev);
80}
81
82static inline
83struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
84{
85 return container_of(_lldevp, struct hidma_dev, lldev);
86}
87
88static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
89{
90 return container_of(dmach, struct hidma_chan, chan);
91}
92
93static void hidma_free(struct hidma_dev *dmadev)
94{
95 INIT_LIST_HEAD(&dmadev->ddev.channels);
96}
97
98static unsigned int nr_desc_prm;
99module_param(nr_desc_prm, uint, 0644);
100MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
101
102enum hidma_cap {
103 HIDMA_MSI_CAP = 1,
104 HIDMA_IDENTITY_CAP,
105};
106
107
108static void hidma_process_completed(struct hidma_chan *mchan)
109{
110 struct dma_device *ddev = mchan->chan.device;
111 struct hidma_dev *mdma = to_hidma_dev(ddev);
112 struct dma_async_tx_descriptor *desc;
113 dma_cookie_t last_cookie;
114 struct hidma_desc *mdesc;
115 struct hidma_desc *next;
116 unsigned long irqflags;
117 struct list_head list;
118
119 INIT_LIST_HEAD(&list);
120
121
122 spin_lock_irqsave(&mchan->lock, irqflags);
123 list_splice_tail_init(&mchan->completed, &list);
124 spin_unlock_irqrestore(&mchan->lock, irqflags);
125
126
127 list_for_each_entry_safe(mdesc, next, &list, node) {
128 enum dma_status llstat;
129 struct dmaengine_desc_callback cb;
130 struct dmaengine_result result;
131
132 desc = &mdesc->desc;
133 last_cookie = desc->cookie;
134
135 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
136
137 spin_lock_irqsave(&mchan->lock, irqflags);
138 if (llstat == DMA_COMPLETE) {
139 mchan->last_success = last_cookie;
140 result.result = DMA_TRANS_NOERROR;
141 } else {
142 result.result = DMA_TRANS_ABORTED;
143 }
144
145 dma_cookie_complete(desc);
146 spin_unlock_irqrestore(&mchan->lock, irqflags);
147
148 dmaengine_desc_get_callback(desc, &cb);
149
150 dma_run_dependencies(desc);
151
152 spin_lock_irqsave(&mchan->lock, irqflags);
153 list_move(&mdesc->node, &mchan->free);
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
155
156 dmaengine_desc_callback_invoke(&cb, &result);
157 }
158}
159
160
161
162
163
164
165static void hidma_callback(void *data)
166{
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
172 bool queued = false;
173
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176
177 list_move_tail(&mdesc->node, &mchan->completed);
178 queued = true;
179
180
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
183 }
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
185
186 hidma_process_completed(mchan);
187
188 if (queued) {
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
191 }
192}
193
194static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
195{
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
198
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
200 if (!mchan)
201 return -ENOMEM;
202
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
208
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
213 INIT_LIST_HEAD(&mchan->queued);
214
215 spin_lock_init(&mchan->lock);
216 list_add_tail(&mchan->chan.device_node, &ddev->channels);
217 dmadev->ddev.chancnt++;
218 return 0;
219}
220
221static void hidma_issue_task(struct tasklet_struct *t)
222{
223 struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
224
225 pm_runtime_get_sync(dmadev->ddev.dev);
226 hidma_ll_start(dmadev->lldev);
227}
228
229static void hidma_issue_pending(struct dma_chan *dmach)
230{
231 struct hidma_chan *mchan = to_hidma_chan(dmach);
232 struct hidma_dev *dmadev = mchan->dmadev;
233 unsigned long flags;
234 struct hidma_desc *qdesc, *next;
235 int status;
236
237 spin_lock_irqsave(&mchan->lock, flags);
238 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
239 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
240 list_move_tail(&qdesc->node, &mchan->active);
241 }
242
243 if (!mchan->running) {
244 struct hidma_desc *desc = list_first_entry(&mchan->active,
245 struct hidma_desc,
246 node);
247 mchan->running = desc;
248 }
249 spin_unlock_irqrestore(&mchan->lock, flags);
250
251
252 status = pm_runtime_get(dmadev->ddev.dev);
253 if (status < 0)
254 tasklet_schedule(&dmadev->task);
255 else
256 hidma_ll_start(dmadev->lldev);
257}
258
259static inline bool hidma_txn_is_success(dma_cookie_t cookie,
260 dma_cookie_t last_success, dma_cookie_t last_used)
261{
262 if (last_success <= last_used) {
263 if ((cookie <= last_success) || (cookie > last_used))
264 return true;
265 } else {
266 if ((cookie <= last_success) && (cookie > last_used))
267 return true;
268 }
269 return false;
270}
271
272static enum dma_status hidma_tx_status(struct dma_chan *dmach,
273 dma_cookie_t cookie,
274 struct dma_tx_state *txstate)
275{
276 struct hidma_chan *mchan = to_hidma_chan(dmach);
277 enum dma_status ret;
278
279 ret = dma_cookie_status(dmach, cookie, txstate);
280 if (ret == DMA_COMPLETE) {
281 bool is_success;
282
283 is_success = hidma_txn_is_success(cookie, mchan->last_success,
284 dmach->cookie);
285 return is_success ? ret : DMA_ERROR;
286 }
287
288 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
289 unsigned long flags;
290 dma_cookie_t runcookie;
291
292 spin_lock_irqsave(&mchan->lock, flags);
293 if (mchan->running)
294 runcookie = mchan->running->desc.cookie;
295 else
296 runcookie = -EINVAL;
297
298 if (runcookie == cookie)
299 ret = DMA_PAUSED;
300
301 spin_unlock_irqrestore(&mchan->lock, flags);
302 }
303
304 return ret;
305}
306
307
308
309
310
311static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
312{
313 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
314 struct hidma_dev *dmadev = mchan->dmadev;
315 struct hidma_desc *mdesc;
316 unsigned long irqflags;
317 dma_cookie_t cookie;
318
319 pm_runtime_get_sync(dmadev->ddev.dev);
320 if (!hidma_ll_isenabled(dmadev->lldev)) {
321 pm_runtime_mark_last_busy(dmadev->ddev.dev);
322 pm_runtime_put_autosuspend(dmadev->ddev.dev);
323 return -ENODEV;
324 }
325 pm_runtime_mark_last_busy(dmadev->ddev.dev);
326 pm_runtime_put_autosuspend(dmadev->ddev.dev);
327
328 mdesc = container_of(txd, struct hidma_desc, desc);
329 spin_lock_irqsave(&mchan->lock, irqflags);
330
331
332 list_move_tail(&mdesc->node, &mchan->queued);
333
334
335 cookie = dma_cookie_assign(txd);
336
337 spin_unlock_irqrestore(&mchan->lock, irqflags);
338
339 return cookie;
340}
341
342static int hidma_alloc_chan_resources(struct dma_chan *dmach)
343{
344 struct hidma_chan *mchan = to_hidma_chan(dmach);
345 struct hidma_dev *dmadev = mchan->dmadev;
346 struct hidma_desc *mdesc, *tmp;
347 unsigned long irqflags;
348 LIST_HEAD(descs);
349 unsigned int i;
350 int rc = 0;
351
352 if (mchan->allocated)
353 return 0;
354
355
356 for (i = 0; i < dmadev->nr_descriptors; i++) {
357 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
358 if (!mdesc) {
359 rc = -ENOMEM;
360 break;
361 }
362 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
363 mdesc->desc.tx_submit = hidma_tx_submit;
364
365 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
366 "DMA engine", hidma_callback, mdesc,
367 &mdesc->tre_ch);
368 if (rc) {
369 dev_err(dmach->device->dev,
370 "channel alloc failed at %u\n", i);
371 kfree(mdesc);
372 break;
373 }
374 list_add_tail(&mdesc->node, &descs);
375 }
376
377 if (rc) {
378
379 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
380 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
381 kfree(mdesc);
382 }
383 return rc;
384 }
385
386 spin_lock_irqsave(&mchan->lock, irqflags);
387 list_splice_tail_init(&descs, &mchan->free);
388 mchan->allocated = true;
389 spin_unlock_irqrestore(&mchan->lock, irqflags);
390 return 1;
391}
392
393static struct dma_async_tx_descriptor *
394hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
395 size_t len, unsigned long flags)
396{
397 struct hidma_chan *mchan = to_hidma_chan(dmach);
398 struct hidma_desc *mdesc = NULL;
399 struct hidma_dev *mdma = mchan->dmadev;
400 unsigned long irqflags;
401
402
403 spin_lock_irqsave(&mchan->lock, irqflags);
404 if (!list_empty(&mchan->free)) {
405 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
406 list_del(&mdesc->node);
407 }
408 spin_unlock_irqrestore(&mchan->lock, irqflags);
409
410 if (!mdesc)
411 return NULL;
412
413 mdesc->desc.flags = flags;
414 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
415 src, dest, len, flags,
416 HIDMA_TRE_MEMCPY);
417
418
419 spin_lock_irqsave(&mchan->lock, irqflags);
420 list_add_tail(&mdesc->node, &mchan->prepared);
421 spin_unlock_irqrestore(&mchan->lock, irqflags);
422
423 return &mdesc->desc;
424}
425
426static struct dma_async_tx_descriptor *
427hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
428 size_t len, unsigned long flags)
429{
430 struct hidma_chan *mchan = to_hidma_chan(dmach);
431 struct hidma_desc *mdesc = NULL;
432 struct hidma_dev *mdma = mchan->dmadev;
433 unsigned long irqflags;
434
435
436 spin_lock_irqsave(&mchan->lock, irqflags);
437 if (!list_empty(&mchan->free)) {
438 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
439 list_del(&mdesc->node);
440 }
441 spin_unlock_irqrestore(&mchan->lock, irqflags);
442
443 if (!mdesc)
444 return NULL;
445
446 mdesc->desc.flags = flags;
447 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
448 value, dest, len, flags,
449 HIDMA_TRE_MEMSET);
450
451
452 spin_lock_irqsave(&mchan->lock, irqflags);
453 list_add_tail(&mdesc->node, &mchan->prepared);
454 spin_unlock_irqrestore(&mchan->lock, irqflags);
455
456 return &mdesc->desc;
457}
458
459static int hidma_terminate_channel(struct dma_chan *chan)
460{
461 struct hidma_chan *mchan = to_hidma_chan(chan);
462 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
463 struct hidma_desc *tmp, *mdesc;
464 unsigned long irqflags;
465 LIST_HEAD(list);
466 int rc;
467
468 pm_runtime_get_sync(dmadev->ddev.dev);
469
470 hidma_process_completed(mchan);
471
472 spin_lock_irqsave(&mchan->lock, irqflags);
473 mchan->last_success = 0;
474 list_splice_init(&mchan->active, &list);
475 list_splice_init(&mchan->prepared, &list);
476 list_splice_init(&mchan->completed, &list);
477 list_splice_init(&mchan->queued, &list);
478 spin_unlock_irqrestore(&mchan->lock, irqflags);
479
480
481 rc = hidma_ll_disable(dmadev->lldev);
482 if (rc) {
483 dev_err(dmadev->ddev.dev, "channel did not pause\n");
484 goto out;
485 }
486
487
488 list_for_each_entry_safe(mdesc, tmp, &list, node) {
489 struct dma_async_tx_descriptor *txd = &mdesc->desc;
490
491 dma_descriptor_unmap(txd);
492 dmaengine_desc_get_callback_invoke(txd, NULL);
493 dma_run_dependencies(txd);
494
495
496 list_move(&mdesc->node, &mchan->free);
497 }
498
499 rc = hidma_ll_enable(dmadev->lldev);
500out:
501 pm_runtime_mark_last_busy(dmadev->ddev.dev);
502 pm_runtime_put_autosuspend(dmadev->ddev.dev);
503 return rc;
504}
505
506static int hidma_terminate_all(struct dma_chan *chan)
507{
508 struct hidma_chan *mchan = to_hidma_chan(chan);
509 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
510 int rc;
511
512 rc = hidma_terminate_channel(chan);
513 if (rc)
514 return rc;
515
516
517 pm_runtime_get_sync(dmadev->ddev.dev);
518 rc = hidma_ll_setup(dmadev->lldev);
519 pm_runtime_mark_last_busy(dmadev->ddev.dev);
520 pm_runtime_put_autosuspend(dmadev->ddev.dev);
521 return rc;
522}
523
524static void hidma_free_chan_resources(struct dma_chan *dmach)
525{
526 struct hidma_chan *mchan = to_hidma_chan(dmach);
527 struct hidma_dev *mdma = mchan->dmadev;
528 struct hidma_desc *mdesc, *tmp;
529 unsigned long irqflags;
530 LIST_HEAD(descs);
531
532
533 hidma_terminate_channel(dmach);
534
535 spin_lock_irqsave(&mchan->lock, irqflags);
536
537
538 list_splice_tail_init(&mchan->free, &descs);
539
540
541 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
542 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
543 list_del(&mdesc->node);
544 kfree(mdesc);
545 }
546
547 mchan->allocated = false;
548 spin_unlock_irqrestore(&mchan->lock, irqflags);
549}
550
551static int hidma_pause(struct dma_chan *chan)
552{
553 struct hidma_chan *mchan;
554 struct hidma_dev *dmadev;
555
556 mchan = to_hidma_chan(chan);
557 dmadev = to_hidma_dev(mchan->chan.device);
558 if (!mchan->paused) {
559 pm_runtime_get_sync(dmadev->ddev.dev);
560 if (hidma_ll_disable(dmadev->lldev))
561 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
562 mchan->paused = true;
563 pm_runtime_mark_last_busy(dmadev->ddev.dev);
564 pm_runtime_put_autosuspend(dmadev->ddev.dev);
565 }
566 return 0;
567}
568
569static int hidma_resume(struct dma_chan *chan)
570{
571 struct hidma_chan *mchan;
572 struct hidma_dev *dmadev;
573 int rc = 0;
574
575 mchan = to_hidma_chan(chan);
576 dmadev = to_hidma_dev(mchan->chan.device);
577 if (mchan->paused) {
578 pm_runtime_get_sync(dmadev->ddev.dev);
579 rc = hidma_ll_enable(dmadev->lldev);
580 if (!rc)
581 mchan->paused = false;
582 else
583 dev_err(dmadev->ddev.dev,
584 "failed to resume the channel");
585 pm_runtime_mark_last_busy(dmadev->ddev.dev);
586 pm_runtime_put_autosuspend(dmadev->ddev.dev);
587 }
588 return rc;
589}
590
591static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
592{
593 struct hidma_lldev *lldev = arg;
594
595
596
597
598
599 return hidma_ll_inthandler(chirq, lldev);
600}
601
602#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
603static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
604{
605 struct hidma_lldev **lldevp = arg;
606 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
607
608 return hidma_ll_inthandler_msi(chirq, *lldevp,
609 1 << (chirq - dmadev->msi_virqbase));
610}
611#endif
612
613static ssize_t hidma_show_values(struct device *dev,
614 struct device_attribute *attr, char *buf)
615{
616 struct hidma_dev *mdev = dev_get_drvdata(dev);
617
618 buf[0] = 0;
619
620 if (strcmp(attr->attr.name, "chid") == 0)
621 sprintf(buf, "%d\n", mdev->chidx);
622
623 return strlen(buf);
624}
625
626static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
627{
628 device_remove_file(dev->ddev.dev, dev->chid_attrs);
629}
630
631static struct device_attribute*
632hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
633{
634 struct device_attribute *attrs;
635 char *name_copy;
636
637 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
638 GFP_KERNEL);
639 if (!attrs)
640 return NULL;
641
642 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
643 if (!name_copy)
644 return NULL;
645
646 attrs->attr.name = name_copy;
647 attrs->attr.mode = mode;
648 attrs->show = hidma_show_values;
649 sysfs_attr_init(&attrs->attr);
650
651 return attrs;
652}
653
654static int hidma_sysfs_init(struct hidma_dev *dev)
655{
656 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
657 if (!dev->chid_attrs)
658 return -ENOMEM;
659
660 return device_create_file(dev->ddev.dev, dev->chid_attrs);
661}
662
663#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
664static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
665{
666 struct device *dev = msi_desc_to_dev(desc);
667 struct hidma_dev *dmadev = dev_get_drvdata(dev);
668
669 if (!desc->platform.msi_index) {
670 writel(msg->address_lo, dmadev->dev_evca + 0x118);
671 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
672 writel(msg->data, dmadev->dev_evca + 0x120);
673 }
674}
675#endif
676
677static void hidma_free_msis(struct hidma_dev *dmadev)
678{
679#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
680 struct device *dev = dmadev->ddev.dev;
681 struct msi_desc *desc;
682
683
684 for_each_msi_entry(desc, dev)
685 devm_free_irq(dev, desc->irq, &dmadev->lldev);
686
687 platform_msi_domain_free_irqs(dev);
688#endif
689}
690
691static int hidma_request_msi(struct hidma_dev *dmadev,
692 struct platform_device *pdev)
693{
694#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
695 int rc;
696 struct msi_desc *desc;
697 struct msi_desc *failed_desc = NULL;
698
699 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
700 hidma_write_msi_msg);
701 if (rc)
702 return rc;
703
704 for_each_msi_entry(desc, &pdev->dev) {
705 if (!desc->platform.msi_index)
706 dmadev->msi_virqbase = desc->irq;
707
708 rc = devm_request_irq(&pdev->dev, desc->irq,
709 hidma_chirq_handler_msi,
710 0, "qcom-hidma-msi",
711 &dmadev->lldev);
712 if (rc) {
713 failed_desc = desc;
714 break;
715 }
716 }
717
718 if (rc) {
719
720 for_each_msi_entry(desc, &pdev->dev) {
721 if (desc == failed_desc)
722 break;
723 devm_free_irq(&pdev->dev, desc->irq,
724 &dmadev->lldev);
725 }
726 } else {
727
728 hidma_ll_setup_irq(dmadev->lldev, true);
729
730 }
731 if (rc)
732 dev_warn(&pdev->dev,
733 "failed to request MSI irq, falling back to wired IRQ\n");
734 return rc;
735#else
736 return -EINVAL;
737#endif
738}
739
740static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
741{
742 enum hidma_cap cap;
743
744 cap = (enum hidma_cap) device_get_match_data(dev);
745 return cap ? ((cap & test_cap) > 0) : 0;
746}
747
748static int hidma_probe(struct platform_device *pdev)
749{
750 struct hidma_dev *dmadev;
751 struct resource *trca_resource;
752 struct resource *evca_resource;
753 int chirq;
754 void __iomem *evca;
755 void __iomem *trca;
756 int rc;
757 bool msi;
758
759 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
760 pm_runtime_use_autosuspend(&pdev->dev);
761 pm_runtime_set_active(&pdev->dev);
762 pm_runtime_enable(&pdev->dev);
763
764 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
765 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
766 if (IS_ERR(trca)) {
767 rc = -ENOMEM;
768 goto bailout;
769 }
770
771 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
772 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
773 if (IS_ERR(evca)) {
774 rc = -ENOMEM;
775 goto bailout;
776 }
777
778
779
780
781
782 chirq = platform_get_irq(pdev, 0);
783 if (chirq < 0) {
784 rc = -ENODEV;
785 goto bailout;
786 }
787
788 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
789 if (!dmadev) {
790 rc = -ENOMEM;
791 goto bailout;
792 }
793
794 INIT_LIST_HEAD(&dmadev->ddev.channels);
795 spin_lock_init(&dmadev->lock);
796 dmadev->ddev.dev = &pdev->dev;
797 pm_runtime_get_sync(dmadev->ddev.dev);
798
799 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
800 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
801 if (WARN_ON(!pdev->dev.dma_mask)) {
802 rc = -ENXIO;
803 goto dmafree;
804 }
805
806 dmadev->dev_evca = evca;
807 dmadev->evca_resource = evca_resource;
808 dmadev->dev_trca = trca;
809 dmadev->trca_resource = trca_resource;
810 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
811 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
812 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
813 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
814 dmadev->ddev.device_tx_status = hidma_tx_status;
815 dmadev->ddev.device_issue_pending = hidma_issue_pending;
816 dmadev->ddev.device_pause = hidma_pause;
817 dmadev->ddev.device_resume = hidma_resume;
818 dmadev->ddev.device_terminate_all = hidma_terminate_all;
819 dmadev->ddev.copy_align = 8;
820
821
822
823
824
825 msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
826 device_property_read_u32(&pdev->dev, "desc-count",
827 &dmadev->nr_descriptors);
828
829 if (nr_desc_prm) {
830 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
831 nr_desc_prm);
832 dmadev->nr_descriptors = nr_desc_prm;
833 }
834
835 if (!dmadev->nr_descriptors)
836 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
837
838 if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
839 dmadev->chidx = readl(dmadev->dev_trca + 0x40);
840 else
841 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
842
843
844 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
845 if (rc) {
846 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
847 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
848 if (rc)
849 goto dmafree;
850 }
851
852 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
853 dmadev->nr_descriptors, dmadev->dev_trca,
854 dmadev->dev_evca, dmadev->chidx);
855 if (!dmadev->lldev) {
856 rc = -EPROBE_DEFER;
857 goto dmafree;
858 }
859
860 platform_set_drvdata(pdev, dmadev);
861 if (msi)
862 rc = hidma_request_msi(dmadev, pdev);
863
864 if (!msi || rc) {
865 hidma_ll_setup_irq(dmadev->lldev, false);
866 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
867 0, "qcom-hidma", dmadev->lldev);
868 if (rc)
869 goto uninit;
870 }
871
872 INIT_LIST_HEAD(&dmadev->ddev.channels);
873 rc = hidma_chan_init(dmadev, 0);
874 if (rc)
875 goto uninit;
876
877 rc = dma_async_device_register(&dmadev->ddev);
878 if (rc)
879 goto uninit;
880
881 dmadev->irq = chirq;
882 tasklet_setup(&dmadev->task, hidma_issue_task);
883 hidma_debug_init(dmadev);
884 hidma_sysfs_init(dmadev);
885 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
886 pm_runtime_mark_last_busy(dmadev->ddev.dev);
887 pm_runtime_put_autosuspend(dmadev->ddev.dev);
888 return 0;
889
890uninit:
891 if (msi)
892 hidma_free_msis(dmadev);
893
894 hidma_ll_uninit(dmadev->lldev);
895dmafree:
896 if (dmadev)
897 hidma_free(dmadev);
898bailout:
899 pm_runtime_put_sync(&pdev->dev);
900 pm_runtime_disable(&pdev->dev);
901 return rc;
902}
903
904static void hidma_shutdown(struct platform_device *pdev)
905{
906 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
907
908 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
909
910 pm_runtime_get_sync(dmadev->ddev.dev);
911 if (hidma_ll_disable(dmadev->lldev))
912 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
913 pm_runtime_mark_last_busy(dmadev->ddev.dev);
914 pm_runtime_put_autosuspend(dmadev->ddev.dev);
915
916}
917
918static int hidma_remove(struct platform_device *pdev)
919{
920 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
921
922 pm_runtime_get_sync(dmadev->ddev.dev);
923 dma_async_device_unregister(&dmadev->ddev);
924 if (!dmadev->lldev->msi_support)
925 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
926 else
927 hidma_free_msis(dmadev);
928
929 tasklet_kill(&dmadev->task);
930 hidma_sysfs_uninit(dmadev);
931 hidma_debug_uninit(dmadev);
932 hidma_ll_uninit(dmadev->lldev);
933 hidma_free(dmadev);
934
935 dev_info(&pdev->dev, "HI-DMA engine removed\n");
936 pm_runtime_put_sync_suspend(&pdev->dev);
937 pm_runtime_disable(&pdev->dev);
938
939 return 0;
940}
941
942#if IS_ENABLED(CONFIG_ACPI)
943static const struct acpi_device_id hidma_acpi_ids[] = {
944 {"QCOM8061"},
945 {"QCOM8062", HIDMA_MSI_CAP},
946 {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
947 {},
948};
949MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
950#endif
951
952static const struct of_device_id hidma_match[] = {
953 {.compatible = "qcom,hidma-1.0",},
954 {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
955 {.compatible = "qcom,hidma-1.2",
956 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
957 {},
958};
959MODULE_DEVICE_TABLE(of, hidma_match);
960
961static struct platform_driver hidma_driver = {
962 .probe = hidma_probe,
963 .remove = hidma_remove,
964 .shutdown = hidma_shutdown,
965 .driver = {
966 .name = "hidma",
967 .of_match_table = hidma_match,
968 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
969 },
970};
971
972module_platform_driver(hidma_driver);
973MODULE_LICENSE("GPL v2");
974